id stringlengths 1 8 | text stringlengths 6 1.05M | dataset_id stringclasses 1 value |
|---|---|---|
6566317 | <reponame>kayarre/pyNS
#!/usr/bin/env python
## Program: PyNS
## Module: ModelAdaptor.py
## Language: Python
## Date: $Date: 2012/09/04 10:21:12 $
## Version: $Revision: 0.4.2 $
## Copyright (c) <NAME>, <NAME>. All rights reserved.
## See LICENCE file for details.
## This software is distributed WITHOUT ANY WARRANTY; without even
## the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
## PURPOSE. See the above copyright notices for more information.
## Developed with support from the EC FP7/2007-2013: ARCH, Project n. 224390
import csv, shutil
from xml.etree import ElementTree as etree
from math import pi
import sys
class ModelAdaptor(object):
'''
This Class adapts generic model according to a specific dataset.
This Class provides the following methods:
SetNetworkGraph: a method for setting NetworkGraph input.
SetSimulationContext : a method for setting simulation context.
ChoosingTemplate: a method for setting correct template according to parameters in .csv file.
SettingParameters: a method for reading parameters from a .csv file and settin them into simulation context.
AdaptingParameters: a method for evaluating expressions in boundary conditions file and for writing a new
boundary conditions xml file with computed values.
AdaptingModel: a method for reading specific data from a csv file (measured radii) and evaluating the rest of the network rules.
Finally, it creates a new vascular network xml file with specific data and a .csv file with the patient-specific dataset.
'''
def __init__(self):
'''
Constructor
'''
self.NetworkGraph = None
self.SimulationContext = None
self.arm = None
self.ftype = None
self.Idpat = None
self.Visit = None
def SetNetworkGraph(self,networkGraph):
'''
Setting NetworkGraph
'''
self.NetworkGraph = networkGraph
def SetEvaluator(self,evaluator):
'''
Setting Evaluator
'''
self.Evaluator = evaluator
def SetSimulationContext(self,simulationContext):
'''
Setting SimulationContext
'''
self.SimulationContext = simulationContext
def ChoosingTemplate(self, csvfilepath):
'''
This method sets correct template according
to parameters in .csv file
'''
try:
csv_reader = csv.reader(file(csvfilepath, "rU"))
except IOError:
sys.exit("Error, Please specify a valid path for parameters csv file.")
for row in csv_reader:
el = row[0].split(";")
name = el[0]
value = el[1]
if name == 'idpat':
self.Idpat = str(value)
if name == 'visit':
self.Visit = str(value)
if name == 'arm':
self.arm = int(value)
if name == 'ftype':
self.ftype = int(value)
def SettingParameters(self, csvfilepath):
'''
This method reads parameters from a .csv file and sets them into
simulation context.
'''
try:
csv_reader = csv.reader(file(csvfilepath, "rU"))
except IOError:
sys.exit("Error, Please specify a valid path for parameters csv file.")
for row in csv_reader:
el = row[0].split(";")
name = el[0]
value = el[1]
if name == 'dob' or name == 'dos' or name == 'visit':
self.SimulationContext.Context[name] = str(value)
else:
self.SimulationContext.Context[name] = float(value)
def AdaptingParameters(self, genericXml, specificXml):
'''
This method evaluates expressions in boundary conditions file and
re-writes a new boundary conditions xml file with computed values
'''
if self.SimulationContext.Context['dos'] is None:
self.SimulationContext.Context['dos'] = self.SimulationContext.Defaults['dos']
if self.SimulationContext.Context['dob'] is None:
self.SimulationContext.Context['dob'] = self.SimulationContext.Defaults['dob']
if self.SimulationContext.Context['gender'] is None:
self.SimulationContext.Context['gender'] = self.SimulationContext.Defaults['gender']
if self.SimulationContext.Context['arm'] is None:
self.SimulationContext.Context['arm'] = self.SimulationContext.Defaults['arm']
if self.SimulationContext.Context['ftype'] is None:
self.SimulationContext.Context['ftype'] = self.SimulationContext.Defaults['ftype']
if self.SimulationContext.Context['height'] is None:
self.SimulationContext.Context['height'] = self.SimulationContext.Defaults['height']
if self.SimulationContext.Context['weight'] is None:
self.SimulationContext.Context['weight'] = self.SimulationContext.Defaults['weight']
if self.SimulationContext.Context['sysp'] is None:
self.SimulationContext.Context['sysp'] = self.SimulationContext.Defaults['sysp']
if self.SimulationContext.Context['diap'] is None:
self.SimulationContext.Context['diap'] = self.SimulationContext.Defaults['diap']
if self.SimulationContext.Context['period'] is None:
self.SimulationContext.Context['period'] = self.SimulationContext.Defaults['period']
if self.SimulationContext.Context['brachial_flow'] is None:
self.SimulationContext.Context['brachial_flow'] = self.SimulationContext.Defaults['brachial_flow']
if self.SimulationContext.Context['radial_flow'] is None:
self.SimulationContext.Context['radial_flow'] = self.SimulationContext.Defaults['radial_flow']
if self.SimulationContext.Context['ulnar_flow'] is None:
self.SimulationContext.Context['ulnar_flow'] = self.SimulationContext.Defaults['ulnar_flow']
if self.SimulationContext.Context['ht'] is None:
self.SimulationContext.Context['ht'] = self.SimulationContext.Defaults['ht']
if self.SimulationContext.Context['cp'] is None:
self.SimulationContext.Context['cp'] = self.SimulationContext.Defaults['cp']
if self.SimulationContext.Context['hyp'] is None:
self.SimulationContext.Context['hyp'] = self.SimulationContext.Defaults['hyp']
if self.SimulationContext.Context['diab'] is None:
self.SimulationContext.Context['diab'] = self.SimulationContext.Defaults['diab']
expressionList = []
for name in self.SimulationContext.Context:
if name != 'dob' and name != 'dos' and name != 'visit':
if type(self.SimulationContext.Context[name]) is str:
expressionList.append(self.SimulationContext.Context[name])
while len(expressionList)>0:
for x in expressionList:
try:
self.Evaluator.Evaluate(x)
expressionList.remove(x)
except:
pass
self.SimulationContext.UpdateXML(genericXml, specificXml)
def AdaptingModel(self, genericXml, specificXml, csvfilepath=None):
'''
This method reads specific data from a csv file
(measured radii) and evaluates the rest of the network rules.
Finally, it creates a new vascular network xml file with specific data.
'''
shutil.copy(genericXml, specificXml)
self.NetworkGraph.xmlgraphpath = specificXml
if csvfilepath:
print "Loading Specific Data"
try:
csv_reader = csv.reader(file(csvfilepath, "rU"))
except IOError:
sys.exit("Error, Please specify a valid path for diameters csv file.")
for row in csv_reader:
el = row[0].split(";")
name = el[0]
value1 = el[1]
value2 = el[2]
for edge in self.NetworkGraph.Edges.itervalues():
if name == edge.Name:
if edge.Side != "venous":
if value1 != value2:
if value1:
edge.Radius['array'][0.0] = (float(value1))
if value2:
edge.Radius['array'][1.0] = (float(value2))
else:
if value1 and value2:
edge.Radius['value'] = (float(value1))
else:
edge.ScalarRadius= {0.0:(float(value2)),1.0:(float(value1))}
expressionList = []
for edge in self.NetworkGraph.Edges.itervalues():
if edge.Side is not "venous":
if 'expression' in edge.Radius:
expressionList.append(edge.Radius['expression'])
if 'expression' in edge.Length:
expressionList.append(edge.Length['expression'])
if 'expression' in edge.YoungModulus:
expressionList.append(edge.YoungModulus['expression'])
if edge.Compliance is not None:
if 'expression' in edge.Compliance:
expressionList.append(edge.Compliance['expression'])
if edge.Side is not "venous":
if 'array' in edge.Radius:
for x in edge.Radius['array'].itervalues():
if type(x) is str:
if edge.ScalarRadius == {}:
expressionList.append(x)
while len(expressionList)>0:
for x in expressionList:
try:
self.Evaluator.Evaluate(x)
expressionList.remove(x)
except:
pass
root = etree.Element("NetworkGraph", id=self.NetworkGraph.Id, version="3.2")
xmlgraph = etree.ElementTree(root)
#CASE
case = etree.SubElement(root, "case")
patId = etree.SubElement(case, "patient_id")
patId.text = self.Idpat
visit = etree.SubElement(case, "visit")
visit.text = self.Visit
#NODES
nodes_list = []
nodes = etree.SubElement(root, "nodes")
for node in self.NetworkGraph.Nodes.itervalues():
nodes_list.append(int(node.Id))
nodes_list.sort()
for id in nodes_list:
name = self.NetworkGraph.Nodes[str(id)].Name
typee = self.NetworkGraph.Nodes[str(id)].Type
prop = self.NetworkGraph.Nodes[str(id)].Properties
if name and typee:
node = etree.SubElement(nodes, "node", id = str(id), type = typee, name = name)
if typee == 'downstream network':
node_p = etree.SubElement(node, "properties")
node_w = etree.SubElement(node_p, "windkessel")
node_e = etree.SubElement(node_w, "expression")
node_e.text = prop['windkessel']
if typee == 'anastomosis':
node_p = etree.SubElement(node, "properties")
node_c = etree.SubElement(node_p, "connections")
node_pa = etree.SubElement(node_c, "proximal_artery", edge_id=str(prop['proximal']))
try:
node_da = etree.SubElement(node_c, "distal_artery", edge_id=str(prop['distal']))
except KeyError:
pass
node_pv = etree.SubElement(node_c, "proximal_vein", edge_id=str(prop['vein']))
node_ar = etree.SubElement(node_p, "arterial_resistance")
node_ar_e = etree.SubElement(node_ar, "expression")
node_ar_e.text = prop['arterial_resistance']
node_vr = etree.SubElement(node_p, "venous_resistance")
node_vr_e = etree.SubElement(node_vr, "expression")
node_vr_e.text = prop['venous_resistance']
else:
etree.SubElement(nodes, "node", id = str(id))
#SUPEREDGES
superedges_list = []
superedges = etree.SubElement(root, "superedges")
for sedges in self.NetworkGraph.SuperEdges.iterkeys():
superedges_list.append(int(sedges))
superedges_list.sort()
for sedg in superedges_list:
for s in self.NetworkGraph.SuperEdges.itervalues():
if s.Id == str(sedg):
if s.SuperEdges != {}:
superedge = etree.SubElement(superedges, "superedge", id = str(s.Id), name = str(s.Name))
superedges2 = etree.SubElement(superedge, "superedges")
if s.SuperEdges == {}:
superedge2 = etree.SubElement(superedges2,"superedge", id = str(s.Id), name = str(s.Name))
edgeIdsel = etree.SubElement(superedge2, "edgesIds")
for edgeIds in s.Edges.iterkeys():
etree.SubElement(edgeIdsel, "edgeIds", edge_id = str(edgeIds))
#EDGES
edges_list = []
edges = etree.SubElement(root, "edges")
for edge in self.NetworkGraph.Edges.iterkeys():
edges_list.append(int(edge))
edges_list.sort()
for edg in edges_list:
for e in self.NetworkGraph.Edges.itervalues():
if e.Id == str(edg):
edge = etree.SubElement(edges, "edge", id = str(e.Id), name = str(e.Name), side = str(e.Side), node1_id = str(e.NodeIds[0]), node2_id = str(e.NodeIds[1]))
geometry = etree.SubElement(edge, "geometry")
length = etree.SubElement(geometry, "length", unit="m", accuracy="10%", source="US")
length_v = etree.SubElement(length, "scalar")
length_v.text = str(e.Length['value'])
properties = etree.SubElement(edge, "properties")
if e.xRadius:
if 'value' in e.xRadius:
xradius = etree.SubElement(properties, "radius_a", unit="m", accuracy="10%", source="US")
xradius_v = etree.SubElement(xradius, "scalar")
xradius_v.text = str(e.xRadius['value'])
if 'array' in e.xRadius:
xradius = etree.SubElement(properties, "radius_a_array", unit="m", accuracy="10%", source="US")
xradius_s1 = etree.SubElement(xradius, "value", s="0.0")
xradius_v1 = etree.SubElement(xradius_s1, "scalar")
xradius_v1.text = str(e.xRadius['array'][0.0])
xradius_s2 = etree.SubElement(xradius, "value", s="1.0")
xradius_v2 = etree.SubElement(xradius_s2, "scalar")
xradius_v2.text = str(e.xRadius['array'][1.0])
if 'value' in e.yRadius:
yradius = etree.SubElement(properties, "radius_b", unit="m", accuracy="10%", source="US")
yradius_v = etree.SubElement(yradius, "scalar")
yradius_v.text = str(e.yRadius['value'])
if 'array' in e.xRadius:
yradius = etree.SubElement(properties, "radius_b_array", unit="m", accuracy="10%", source="US")
yradius_s1 = etree.SubElement(yradius, "value", s="0.0")
yradius_v1 = etree.SubElement(yradius_s1, "scalar")
yradius_v1.text = str(e.yRadius['array'][0.0])
yradius_s2 = etree.SubElement(yradius, "value", s="1.0")
yradius_v2 = etree.SubElement(yradius_s2, "scalar")
yradius_v2.text = str(e.yRadius['array'][1.0])
else:
if e.ScalarRadius == {}:
if 'value' in e.Radius:
radius = etree.SubElement(properties, "radius", unit="m", accuracy="10%", source="US")
radius_v = etree.SubElement(radius, "scalar")
radius_v.text = str(e.Radius['value'])
if 'array' in e.Radius:
radius = etree.SubElement(properties, "radius_array", unit="m", accuracy="10%", source="US")
radius_s1 = etree.SubElement(radius, "value", s="0.0")
radius_v1 = etree.SubElement(radius_s1, "scalar")
radius_v1.text = str(e.Radius['array'][0.0])
radius_s2 = etree.SubElement(radius, "value", s="1.0")
radius_v2 = etree.SubElement(radius_s2, "scalar")
radius_v2.text = str(e.Radius['array'][1.0])
else:
if 'array' in e.Radius:
radius = etree.SubElement(properties, "radius_array", unit="m", accuracy="10%", source="US")
radius_s1 = etree.SubElement(radius, "value", s="0.0")
radius_v1_scalar = etree.SubElement(radius_s1, "scalar")
radius_v1_scalar.text = str(e.ScalarRadius[0.0])
radius_v1 = etree.SubElement(radius_s1, "expression")
radius_v1.text = str(e.Radius['array'][0.0])
radius_s2 = etree.SubElement(radius, "value", s="1.0")
radius_v2_scalar = etree.SubElement(radius_s2, "scalar")
radius_v2_scalar.text = str(e.ScalarRadius[1.0])
radius_v2 = etree.SubElement(radius_s2, "expression")
radius_v2.text = str(e.Radius['array'][1.0])
if 'value' in e.WallThickness:
wt = etree.SubElement(properties, "wall_thickness", unit="m", accuracy="10%", source="US")
wt_v = etree.SubElement(wt, "scalar")
wt_v.text = str(e.WallThickness['value'])
if 'expression' in e.WallThickness:
wt = etree.SubElement(properties, "wall_thickness")
wt_v = etree.SubElement(wt, "expression")
wt_v.text = str(e.WallThickness['expression'])
if 'value' in e.YoungModulus:
ym = etree.SubElement(properties, "young_modulus", unit="Pa", accuracy="10%", source="US")
ym_v = etree.SubElement(ym, "scalar")
ym_v.text = str(e.YoungModulus['value'])
if 'expression' in e.YoungModulus:
ym = etree.SubElement(properties, "young_modulus")
ym_v = etree.SubElement(ym, "expression")
ym_v.text = str(e.YoungModulus['expression'])
if e.Compliance is not None:
com = etree.SubElement(properties, "compliance", unit="m3/Pa")
com_v = etree.SubElement(com, "scalar")
com_v.text = str(e.Compliance)
if 'expression' in e.NlCompliance:
nlcom = etree.SubElement(properties, "nl_compliance", unit="m3/Pa")
nlcom_v = etree.SubElement(nlcom, "expression")
nlcom_v.text = str(e.NlCompliance['expression'])
indent(root)
xmlgraph.write (self.NetworkGraph.xmlgraphpath)
path = self.NetworkGraph.xmlgraphpath+'.csv'
ofile = open(path, "wb")
csv_writer = csv.writer(ofile, delimiter=",", quoting=csv.QUOTE_ALL)
for edg in edges_list:
for e in self.NetworkGraph.Edges.itervalues():
if e.xRadius or e.yRadius:
ellipticGeometry = True
else:
ellipticGeometry = False
if ellipticGeometry == True:
csv_writer.writerow(["Name","Side", "Length", "Radius s=0", "Radius s=1","xRadius s=0", "xRadius s=1","yRadius s=0", "yRadius s=1", "Compliance", "YoungModulus"])
csv_writer.writerow(["","", "cm", "mm", "mm","mm", "mm","mm", "mm", "mm2/kPa", "Pa"])
if ellipticGeometry == False:
csv_writer.writerow(["Name","Side", "Length", "Radius s=0", "Radius s=1", "Compliance", "YoungModulus"])
csv_writer.writerow(["","", "cm", "mm", "mm", "mm2/kPa", "Pa"])
for edg in edges_list:
for e in self.NetworkGraph.Edges.itervalues():
if e.Id == str(edg):
try:
if 'value' in e.Radius:
e.Radius_0 = e.Radius['value']
e.Radius_1 = e.Radius['value']
else:
if type(e.Radius['array'][0.0]) is str:
e.Radius_0 = e.ScalarRadius[0.0]
else:
e.Radius_0 = e.Radius['array'][0.0]
if type(e.Radius['array'][1.0]) is str:
e.Radius_1 = e.ScalarRadius[1.0]
else:
e.Radius_1 = e.Radius['array'][1.0]
e.xRadius_0 = e.yRadius_0 = e.xRadius_1 = e.yRadius_1 = 0.0
except KeyError:
if 'value' in e.xRadius:
e.xRadius_0 = e.xRadius['value']
e.xRadius_1 = e.xRadius['value']
else:
try:
e.xRadius_0 = e.xRadius['array'][0.0]
e.xRadius_1 = e.xRadius['array'][1.0]
except:
e.xRadius_0 = 0
e.xRadius_1 = 0
if 'value' in e.yRadius:
e.yRadius_0 = e.yRadius['value']
e.yRadius_1 = e.yRadius['value']
else:
try:
e.yRadius_0 = e.yRadius['array'][0.0]
e.yRadius_1 = e.yRadius['array'][1.0]
except:
e.yRadius_0 = 0
e.yRadius_1 = 0
e.Radius_0 = e.Radius_1 = 0.0
if e.Compliance is not None:
C = e.Compliance*1e9
else:
C = ''
if 'value' in e.YoungModulus:
ym = e.YoungModulus['value']
rm = ((e.Radius_0+e.Radius_1)/2)*1e3
wt = rm * 0.2
C = (((2.0*pi*rm**2)*(((2.0*rm**2*(1.0-self.SimulationContext.Context['dynamic_viscosity']**2))/(wt**2))+((1.0+self.SimulationContext.Context['dynamic_viscosity'])*(((2.0*rm)/wt)+1.0))))/(ym*(((2.0*rm)/wt)+1.0)))*1e3
else:
ym = ''
if ellipticGeometry == True:
csv_writer.writerow([e.Name, e.Side, e.Length['value']*1e2, e.Radius_0*1e3, e.Radius_1*1e3,e.xRadius_0*1e3, e.xRadius_1*1e3,e.yRadius_0*1e3, e.yRadius_1*1e3, C, ym])
if ellipticGeometry == False:
csv_writer.writerow([e.Name, e.Side, e.Length['value']*1e2, e.Radius_0*1e3, e.Radius_1*1e3, C, ym])
csv_writer.writerow([])
csv_writer.writerow([])
csv_writer.writerow(["idpat", "gender", "age", "arm", "fistula type", "height", "weight", "bsa", "pressure", "cardiac output", "cardiac frequency", "brachial flow", "radial flow", "ulnar flow", "hematocrit", "plasma concentration","dynamic_viscosity", "blood_density","hypertension", "diabetes"])
csv_writer.writerow(["", "", "" , "", "", "cm", "kg", "m2", "mmHg", "mL/min", "Hz", "mL/min", "mL/min", "mL/min", "%", "g/dL", "cP", "Kg/m3", "", ""])
try:
gender_s = self.SimulationContext.Context['gender']
if gender_s == 0:
gender = "female"
if gender_s == 1:
gender = "male"
except KeyError:
gender = "None"
try:
age = self.SimulationContext.Context['age']
except KeyError:
age = "None"
try:
arm_s = self.SimulationContext.Context['arm']
if arm_s == 0:
arm = "Left"
if arm_s == 1:
arm = "Right"
except KeyError:
arm = "None"
try:
ftype_s = self.SimulationContext.Context['ftype']
if ftype_s == 0:
ftype = "Lower Radio-Cephalic EndToEnd"
if ftype_s == 1:
ftype = "Lower Radio-Cephalic EndToSide"
if ftype_s == 2:
ftype = "Lower Radio-Cephalic SideToSide"
if ftype_s == 3:
ftype = "Upper Brachio-Cephalic EndToSide"
if ftype_s == 4:
ftype = "Upper Brachio-Cephalic SideToSide"
if ftype_s == 5:
ftype = "Upper Brachio-Basilic EndToSide"
if ftype_s == 6:
ftype = "Upper Brachio-Basilic SideToSide"
if ftype_s == 7:
ftype = "Pre-Surgery"
except KeyError:
ftype = "None"
try:
heigth = self.SimulationContext.Context['height']
except KeyError:
heigth = "None"
try:
weigth = self.SimulationContext.Context['weight']
except KeyError:
weigth = "None"
try:
bsa = self.SimulationContext.Context['bsa']
except KeyError:
bsa = "None"
try:
meanP = self.SimulationContext.Context['mean_pressure']
except KeyError:
meanP = "None"
try:
Co = self.SimulationContext.Context['cardiac_output']
except KeyError:
Co = "None"
try:
Cf = 1.0/(self.SimulationContext.Context['period'])
except KeyError:
Cf = "None"
try:
bflow = self.SimulationContext.Context['brachial_flow']
except KeyError:
bflow = "None"
try:
rflow = self.SimulationContext.Context['radial_flow']
except KeyError:
rflow = "None"
try:
uflow = self.SimulationContext.Context['ulnar_flow']
except KeyError:
uflow = "None"
try:
ht = self.SimulationContext.Context['ht']
except KeyError:
ht = "None"
try:
cp = self.SimulationContext.Context['cp']
except KeyError:
cp = "None"
try:
eta = self.SimulationContext.Context['dynamic_viscosity']*1e3
except KeyError:
eta = "None"
try:
bd = self.SimulationContext.Context['blood_density']
except KeyError:
bd = "None"
try:
hyp_s = self.SimulationContext.Context['hyp']
if hyp_s == 0:
hyp = "No"
if hyp_s == 1:
hyp = "Yes"
except KeyError:
hyp = "None"
try:
dia_s = self.SimulationContext.Context['diab']
if dia_s == 0:
dia = "No"
if dia_s == 1:
dia = "Yes"
except KeyError:
dia = "None"
csv_writer.writerow(['id_'+self.Idpat, gender, age, arm, ftype, heigth, weigth, bsa, meanP, Co, Cf, bflow, rflow, uflow, ht, cp, eta, bd, hyp, dia])
return path
def indent(elem, level=0):
i = "\n" + level*" "
if len(elem):
if not elem.text or not elem.text.strip():
elem.text = i + " "
if not elem.tail or not elem.tail.strip():
elem.tail = i
for elem in elem:
indent(elem, level+1)
if not elem.tail or not elem.tail.strip():
elem.tail = i
else:
if level and (not elem.tail or not elem.tail.strip()):
elem.tail = i | StarcoderdataPython |
355197 | # Generated by Django 3.2.6 on 2022-04-06 06:59
import apps.home.models
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('home', '0004_quranchapterold'),
]
operations = [
migrations.CreateModel(
name='HadithBook',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=100)),
('description', models.TextField(blank=True, null=True)),
],
),
migrations.CreateModel(
name='HadithBookMainChapter',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('chapter_no', models.IntegerField(max_length=3)),
('english_name', models.CharField(max_length=200)),
('arabic_name', models.CharField(max_length=200)),
('hadithbook', models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, to='home.hadithbook')),
],
),
migrations.AlterField(
model_name='bookpdf',
name='file_url',
field=models.FileField(blank=True, null=True, upload_to=apps.home.models.BookPDF_upload_location),
),
migrations.CreateModel(
name='HadithBookSubChapter',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('sub_chapter_name', models.CharField(max_length=200)),
('hadith_book_main_chapter', models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, to='home.hadithbookmainchapter')),
],
),
migrations.CreateModel(
name='HadithBookContent',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('arabic_content', models.TextField()),
('roman_urdu_content', models.TextField()),
('hindi_content', models.TextField()),
('reference_field', models.TextField()),
('grade', models.CharField(choices=[("Muttafaqun 'alayh", "Muttafaqun 'alayh"), ('Sahih', 'Sahih'), ('Hasan', 'Hasan'), ('Zaeef', 'Zaeef')], default='Sahih', max_length=50)),
('hadith_book_sub_chapter', models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, to='home.hadithbooksubchapter')),
],
),
]
| StarcoderdataPython |
12820297 | <gh_stars>0
from django import forms
from django.utils.translation import ugettext_lazy as _
from .models import Page
from ._markups import get_all_markups
from .settings import WALIKI_CODEMIRROR_SETTINGS as CM_SETTINGS
class DeleteForm(forms.Form):
what = forms.ChoiceField(label=_('What do you want to delete?'),
choices=(('this', _('Just this page')),
('namespace', _('This page and all the namespace')))
)
class MovePageForm(forms.ModelForm):
class Meta:
model = Page
fields = ['slug']
def clean_slug(self):
slug = self.cleaned_data['slug']
if self.instance.slug == slug:
raise forms.ValidationError(_("The slug wasn't changed"))
if Page.objects.filter(slug=slug).exists():
raise forms.ValidationError(_("There is already a page with this slug"))
return slug
class PageForm(forms.ModelForm):
raw = forms.CharField(label="", widget=forms.Textarea)
# Translators: log message
message = forms.CharField(label=_('Log message'), max_length=200, required=False)
extra_data = forms.CharField(widget=forms.HiddenInput, required=False)
class Meta:
model = Page
fields = ['title', 'markup', 'raw', 'message']
class Media:
modes = tuple('codemirror/mode/%s/%s.js' % (m.codemirror_mode, m.codemirror_mode)
for m in get_all_markups() if hasattr(m, 'codemirror_mode'))
theme = ('codemirror/theme/%s.css' % CM_SETTINGS['theme'],) if 'theme' in CM_SETTINGS else ()
js = ('codemirror/lib/codemirror.js',
'codemirror/addon/mode/overlay.js') + modes + ('js/waliki.js',)
css = {
'all': ('codemirror/lib/codemirror.css', ) + theme
}
def __init__(self, *args, **kwargs):
is_hidden = kwargs.pop('is_hidden', None)
super(PageForm, self).__init__(*args, **kwargs)
self.fields['raw'].initial = self.instance.raw
self.fields['markup'].widget = forms.HiddenInput()
# Translator: placeholder for log message
self.fields['message'].widget = forms.TextInput(attrs={'placeholder': _('Update %s') % self.instance.path})
if is_hidden:
for field in self.fields.values():
field.widget = forms.HiddenInput()
def clean_raw(self):
if self.instance.raw == self.cleaned_data['raw']:
raise forms.ValidationError(
_('There were no changes in the page to commit.')
)
return self.cleaned_data['raw']
def save(self, commit=True):
instance = super(PageForm, self).save(commit)
if commit:
instance.raw = self.cleaned_data['raw']
return instance
| StarcoderdataPython |
5061282 | <reponame>Garee/CREATe
import sqlite3
import json
conn = sqlite3.connect('database.sqlite')
c = conn.cursor()
conn.row_factory = sqlite3.Row
data = conn.execute('''SELECT p.page_id AS "page_id",
p.page_title,
r.rev_text_id AS "revision_id",
t.old_id AS "text_id",
t.old_text
FROM
page p
INNER JOIN revision r
ON p.page_id = r.rev_page
INNER JOIN text t
ON r.rev_text_id = t.old_id'''
)
# keys = {0: 'page_id', 1: 'page_title', 2: 'revison_id', 3:}
def buildModel():
pageModels = []
for row in data:
pageId = row[0]
pageTitle = row[1]
old = row[4]
lines = old.split("\n")
attributes = []
for line in lines:
if "=" in line and line.startswith('|'):
keyValStr = line.replace("|","")
#print (keyValStr)
keyVal = keyValStr.split("=")
key = keyVal[0]
key = key.replace("?","")
if not key or '{' in key:
continue
if not key or key[0].islower() or not key[0].isalpha():
continue
value = keyVal[1]
attributes.append({'key' : key, 'value': value})
pageModel = {
'pageId' : pageId,
'title' : pageTitle,
'text' : old,
'attributes': attributes
}
if attributes:
pageModels.append(pageModel)
return pageModels
pageModels = buildModel()
print(json.dumps(pageModels))
| StarcoderdataPython |
3269243 | from output.models.ms_data.regex.schema_i_xsd.schema_i import Doc
__all__ = [
"Doc",
]
| StarcoderdataPython |
1747207 | <gh_stars>0
#!/usr/bin/env python
from setuptools import setup
setup(
name = 'chipshouter',
version = '1.0.0',
description = "ChipSHOUTER EMFI API",
author = "<NAME>",
author_email = '<EMAIL>',
license = 'GPLv3',
url = 'http://www.ChipSHOUTER.com',
download_url='https://github.com/newaetech/chipshouter',
packages = ['chipshouter',
'chipshouter.console'
],
scripts=['scripts/shouter-console.py'],
install_requires = [
'pyserial',
'PyCRC',\
'tqdm'
],
)
| StarcoderdataPython |
3506440 | <reponame>zhangjq933/HowtoSim_Script
def buildBlock(x,y,z,oDesktop):
oProject = oDesktop.GetActiveProject()
oDesign = oProject.GetActiveDesign()
oEditor = oDesign.SetActiveEditor("3D Modeler")
oEditor.CreateBox(
[
"NAME:BoxParameters",
"XPosition:=" , "0mm",
"YPosition:=" , "0mm",
"ZPosition:=" , "0mm",
"XSize:=" , x,
"YSize:=" , y,
"ZSize:=" , z
],
[
"NAME:Attributes",
"Name:=" , "Box1",
"Flags:=" , "",
"Color:=" , "(143 175 143)",
"Transparency:=" , 0,
"PartCoordinateSystem:=", "Global",
"UDMId:=" , "",
"MaterialValue:=" , "\"vacuum\"",
"SurfaceMaterialValue:=", "\"\"",
"SolveInside:=" , True,
"IsMaterialEditable:=" , True,
"UseMaterialAppearance:=", False,
"IsLightweight:=" , False
])
oEditor.FitAll()
| StarcoderdataPython |
8068245 | # Field class (relativistic) for OLIVE
#
# Class is initialized with an array of modes and amplitudes as well as corresponding metadata
#
#
# Units
# -Assume CGS units for now
#
import numpy as np
from scipy.constants import c as c_mks
c = c_mks*1.e2
class Field(object):
def __init__(self, cavity):
"""Initialize a field object - need to specify a cavity
Arguments:
cavity (olive.fields.eigenmodes.<ModeSubClass>): an Olive object containing geometry and mode information
"""
self.cavity = cavity
self.modes = []
self.Q = []
self.P = []
self.num_modes = 0
#self.frequencies = []
self.geom_factor = []
self.poly_exponent = []
self.w_integrals = []
self.c_integrals = []
self.t_integrals = []
self.x_integrals = []
self.det = []
def create_modes(self,mode_nums, initial_amplitude, initial_modementa, phase_offset=False,
mode_expansion=False, energy_integrals=False):
"""Fills in corresponding field data using input arrays containing mode data for L modes. Because we
pass a cavity argument to the class we can get most of the static information from that.
Arguments:
mode_nums(ndarray): Lx3 array containing mode numbers m,n,p.
initial_amplitude(ndarray): Vector of length num_modes containing initial mode amplitudes
initial_modementa(ndarray): Vector of length num_modes containing initial modementa
mode_data(ndarray): 2xL array containing L mode frequencies and L mode wavenumbers
phase_offset(ndarray): Vector of length num_modes containing phase offset information
mode_expansion(ndarray): 2 X num_modes array containing geometry factors and exponents for each mode
energy_integrals(ndarray): 4 X num_modes array containing integral constants for each each mode
"""
# Basic mode data
self.num_modes = np.shape(mode_nums)[0]
if len(initial_amplitude) > 0:
if not np.shape(initial_amplitude)[0] == self.num_modes:
msg = 'Number of initial amplitudes must equal the number of modes'
raise Exception(msg)
self.Q = np.asarray(initial_amplitude)
else:
msg = 'Must specify positive number of initial amplitudes Q0'
raise Exception(msg)
if len(initial_modementa) > 0:
if not np.shape(initial_modementa)[0] == self.num_modes:
msg = 'Number of initial amplitudes must equal the number of modes'
raise Exception(msg)
self.P = np.asarray(initial_modementa)
else:
msg = 'Must specify positive number of initial modementa P0'
raise Exception(msg)
# Mode frequencies and wavenumbers
self.omegas = self.cavity.get_mode_frequencies(mode_nums[:, 0], mode_nums[:, 1],
mode_nums[:, 2]) / c # omega over c
self.ks = self.cavity.get_mode_wavenumbers(mode_nums[:, 0], mode_nums[:, 1], mode_nums[:, 2]) # wave numbers
self.kx = self.ks[0]
self.ky = self.ks[1]
self.kz = self.ks[2]
# Field geometry quantities
self.M = self.cavity.M
self.Ml = self.cavity.get_mode_Ms(self.kx, self.ky, self.kz)
self.Kl = self.Ml * (self.kx ** 2 + self.ky ** 2)
#Construct histories
self.Q_history = [self.Q]
self.P_history = [self.P]
#DEPRECATED INITIALIZATIONS FOR TRANSVERSE MODE EXPANSION
#Mode transverse expansion
#self.geom_factor = np.zeros(self.num_modes)
#self.poly_exponent = np.zeros(self.num_modes)
#Mode energy integrals
#self.w_integrals = np.zeros(self.num_modes)
#self.c_integrals = np.zeros(self.num_modes)
#self.t_integrals = np.zeros(self.num_modes)
#self.x_integrals = np.zeros(self.num_modes)
#self.wave_vecs = np.array(mode_data[:,1])
#self.horz_powers = np.array(mode_data[:,2])
#self.vert_powers = np.array(mode_data[:,3])
def add_mode(self,frequency, initial_amplitude=False,
initial_phase=False, mode_expansion=False,
energy_integrals=False):
'''Add a single mode to the current Field object
Arguments:
frequency (float): Mode frequency
initial_amplitude (float): Initial mode amplitude
phase_offset (float): Phase offset for mode
mode_expansion (ndarray): Pair containing containing geometry factor and exponent for mode
energy_integrals (ndarray): Quadruplet containing integral constants for mode
'''
def return_modes(self):
'''Return the mode frequencies and amplitudes'''
return self.modes, self.amplitudes
def calc_A_x(self, x, y, z):
'''
Returns an LxN array of A_x for L modes evaluated at N particle positions.
Arguments:
x (ndArray): vector of particle coordinates x (length N)
y (ndArray): vector of particle coordinates y (length N)
z (ndArray): vector of particle coordinates z (length N)
Returns:
A_x (ndArray): An LxN array of values
'''
return self.cavity.calc_A_x(self.kx, self.ky, self.kz, x, y, z)
def calc_A_y(self, x, y, z):
'''
Returns an LxN array of A_y for L modes evaluated at N particle positions.
Arguments:
x (ndArray): vector of particle coordinates x (length N)
y (ndArray): vector of particle coordinates y (length N)
z (ndArray): vector of particle coordinates z (length N)
Returns:
A_y (ndArray): An LxN array of values
'''
return self.cavity.calc_A_y(self.kx, self.ky, self.kz, x, y, z)
def calc_A_z(self, x, y, z):
'''
Returns an LxN array of A_x for L modes evaluated at N particle positions.
Arguments:
x (ndArray): vector of particle coordinates x (length N)
y (ndArray): vector of particle coordinates y (length N)
z (ndArray): vector of particle coordinates z (length N)
Returns:
A_z (ndArray): An LxN array of values
'''
return self.cavity.calc_A_z(self.kx, self.ky, self.kz, x, y, z)
def dx_int_A_z(self, x, y, z):
'''
Returns an LxN array of x derivative of int_A_z for L modes evaluated at N particle positions.
Arguments:
x (ndArray): vector of particle coordinates x (length N)
y (ndArray): vector of particle coordinates y (length N)
z (ndArray): vector of particle coordinates z (length N)
Returns:
dx_int_A_z (ndArray): An LxN array of values
'''
return self.cavity.dx_int_A_z(self.ks, x, y, z)
def dy_int_A_z(self, x, y, z):
'''
Returns an LxN array of y derivative of int_A_z for L modes evaluated at N particle positions.
Arguments:
x (ndArray): vector of particle coordinates x (length N)
y (ndArray): vector of particle coordinates y (length N)
z (ndArray): vector of particle coordinates z (length N)
Returns:
dy_int_A_z (ndArray): An LxN array of values
'''
return self.cavity.dy_int_A_z(self.ks, x, y, z)
def dz_int_A_z(self, x, y, z):
'''
Returns an LxN array of z derivative of int_A_z for L modes evaluated at N particle positions.
Arguments:
x (ndArray): vector of particle coordinates x (length N)
y (ndArray): vector of particle coordinates y (length N)
z (ndArray): vector of particle coordinates z (length N)
Returns:
dz_int_A_z (ndArray): An LxN array of values
'''
return self.cavity.dz_int_A_z(self.ks, x, y, z)
def calc_int_A_z(self, x, y, z):
'''
Returns an LxN array of int_A_z for L modes evaluated at N particle positions.
Arguments:
x (ndArray): vector of particle coordinates x (length N)
y (ndArray): vector of particle coordinates y (length N)
z (ndArray): vector of particle coordinates z (length N)
Returns:
int_A_z (ndArray): An LxN array of values
'''
return self.cavity.calc_int_A_z(self.ks,x,y,z)
def compute_single_mode_Az(self, index,pos):
"""DEPRECATED - Compute the z-component of the vector potential Az at position pos for the mode given by index
Arguments:
index (int): Index of the desired mode
pos (ndarray): Array of floats specifying position (x,y,z) to compute potential
Returns:
Az (float): Value of Az for the specified mode and position
"""
(tau, x, y) = pos
expansion_factor = self.geom_factor[index]*(x + 1j*y)**self.poly_exponent
return expansion_factor*self.amplitudes[index]*np.cos(self.frequencies[index]*tau/c + self.phases[index])
def compute_all_modes_Az(self,pos):
"""DEPRECATED - Compute the z-component of the vector potential Az for a single position for all modes
Arguments:
pos (ndarray): Array of floats specifying position (x,y,z) to compute potential
Returns:
Az (float): Value of Az for the specified mode and position
"""
(tau, x, y) = pos
expansion_factor = self.geom_factor*(x + 1j*y)**self.poly_exponent
return expansion_factor*self.amplitudes[index]*np.cos(self.frequencies[index]*tau/c + self.phases[index])
| StarcoderdataPython |
1917333 | <filename>main2.py
import i2v
from PIL import Image
import os
hair_list = ['blonde hair', 'brown hair', 'black hair', 'blue hair', 'pink hair' ,'purple hair', 'green hair','red hair', 'silver hair', 'white hair', 'orange hair', 'aqua hair', 'gray hair']
eye_list = ['blue eyes', 'red eyes', 'brown eyes' ,'green eyes', 'purple eyes', 'yellow eyes', 'pink eyes', 'aqua eyes', 'black eyes', 'orange eyes']
hairstyle_list = ['long hair','short hair', 'twintails', 'drill hair', 'ponytail']
other_list = ['blush', 'smile','open mouth', 'hat', 'ribbon', 'glasses']
illust2vec = i2v.make_i2v_with_chainer(
"illust2vec_tag_ver200.caffemodel", "tag_list.json")
# In the case of caffe, please use i2v.make_i2v_with_caffe instead:
# illust2vec = i2v.make_i2v_with_caffe(
# "illust2vec_tag.prototxt", "illust2vec_tag_ver200.caffemodel",
# "tag_list.json")
output = {}
#output is a dictionary in the format like:
#{"1.image" : ["blonde hair", "blue eyes", "long hair", "smile"],
# "2.image" : ["brown hair", "green eyes", "short hair", "ribbon"]}
count = 0
for i in range(1,37885):
try:
s = str(i).zfill(5)
img = Image.open("../lbpcascade_animeface/examples/cropped/{}.jpg".format(s))
result = illust2vec.estimate_plausible_tags([img], threshold=0.25)
result_dict = dict(result[0]['general'])
#print(result_dict)
if '1girl' in result_dict:
count += 1
print('find girl')
hair = None
p = 0
for j in hair_list:
if result_dict.get(j, 0) > p:
hair = j
p = result_dict[j]
lableList = [hair]
eye = None
p = 0
for j in eye_list:
if result_dict.get(j, 0) > p:
eye = j
p = result_dict[j]
lableList.append(eye)
hairstyle = None
p = 0
for j in hairstyle_list:
if result_dict.get(j, 0) > p:
hairstyle = j
p = result_dict[j]
lableList.append(hairstyle)
for j in other_list:
if j in result_dict:
lableList.append(j)
output.update({"{}.jpg".format(s) : lableList})
else:
print('not a girl!')
os.remove("../lbpcascade_animeface/examples/cropped/{}.jpg".format(s))
except:
print('one pic error!')
#The name of the output file
filename = "labels2.txt"
f = open(filename, "a")
print(count, file = f)
for i in hair_list:
print(i.replace(' ','_'), end = ' ', file = f)
for i in eye_list:
print(i.replace(' ','_'), end = ' ', file = f)
for i in hairstyle_list:
print(i.replace(' ','_'), end = ' ', file = f)
for i in other_list:
print(i.replace(' ','_'), end = ' ', file = f)
print('',file=f)
try:
count = 0
for i in range(1, 38159):
s = str(i).zfill(5)
imageName = "{}.jpg".format(s)
if imageName in output:
print('in output!')
count += 1
ss = str(count).zfill(5)
print("{}.jpg".format(ss), end = " ", file = f)
for j in hair_list:
if j in output.get(imageName):
print("1", end = " ", file = f)
else:
print("-1", end = " ", file = f)
for j in eye_list:
if j in output.get(imageName):
print("1", end = " ", file = f)
else:
print("-1", end = " ", file = f)
for j in hairstyle_list:
if j in output.get(imageName):
print("1", end = " ", file = f)
else:
print("-1", end = " ", file = f)
for j in other_list:
if j in output.get(imageName):
print("1", end = " ", file = f)
else:
print("-1", end = " ", file = f)
print('', file=f)
except:
print('one pic error!')
f.close()
| StarcoderdataPython |
217271 | <filename>venv/lib/python3.6/site-packages/ansible_collections/ngine_io/vultr/plugins/modules/vultr_plan_baremetal_info.py
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# (c) 2018, <NAME> <<EMAIL>>
# (c) 2020, <NAME> <<EMAIL>>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
DOCUMENTATION = r'''
---
module: vultr_plan_baremetal_info
short_description: Gather information about the Vultr Bare Metal plans available.
description:
- Gather information about Bare Metal plans available to boot servers.
version_added: "0.3.0"
author: "<NAME> (@sbaerlocher)"
extends_documentation_fragment:
- ngine_io.vultr.vultr
'''
EXAMPLES = r'''
- name: Gather Vultr Bare Metal plans information
ngine_io.vultr.vultr_baremetal_plan_info:
register: result
- name: Print the gathered information
debug:
var: result.vultr_baremetal_plan_info
'''
RETURN = r'''
---
vultr_api:
description: Response from Vultr API with a few additions/modification
returned: success
type: complex
contains:
api_account:
description: Account used in the ini file to select the key
returned: success
type: str
sample: default
api_timeout:
description: Timeout used for the API requests
returned: success
type: int
sample: 60
api_retries:
description: Amount of max retries for the API requests
returned: success
type: int
sample: 5
api_retry_max_delay:
description: Exponential backoff delay in seconds between retries up to this max delay value.
returned: success
type: int
sample: 12
api_endpoint:
description: Endpoint used for the API requests
returned: success
type: str
sample: "https://api.vultr.com"
vultr_plan_baremetal_info:
description: Response from Vultr API
returned: success
type: complex
contains:
plan:
description: List of the Bare Metal plans available.
returned: success
type: list
sample: [{
"available_locations": [
1
],
"bandwidth": 40.0,
"bandwidth_gb": 40960,
"disk": 110,
"id": 118,
"name": "32768 MB RAM,110 GB SSD,40.00 TB BW",
"plan_type": "DEDICATED",
"price_per_month": 240.0,
"ram": 32768,
"vcpu_count": 8,
"windows": false
}]
'''
from ansible.module_utils.basic import AnsibleModule
from ..module_utils.vultr import (
Vultr,
vultr_argument_spec,
)
class AnsibleVultrPlanInfo(Vultr):
def __init__(self, module):
super(AnsibleVultrPlanInfo, self).__init__(module, "vultr_plan_baremetal_info")
self.returns = {
"METALPLANID": dict(key='id', convert_to='int'),
"available_locations": dict(),
"bandwidth_tb": dict(convert_to='int'),
"disk": dict(),
"name": dict(),
"plan_type": dict(),
"price_per_month": dict(convert_to='float'),
"ram": dict(convert_to='int'),
"windows": dict(convert_to='bool'),
"cpu_count": dict(convert_to='int'),
"cpu_model": dict(),
"cpu_thread_count": dict(convert_to='int'),
}
def get_plans_baremetal(self):
return self.api_query(path="/v1/plans/list_baremetal")
def parse_plans_baremetal_list(plans_baremetal_list):
return [plan_baremetal for id, plan_baremetal in plans_baremetal_list.items()]
def main():
argument_spec = vultr_argument_spec()
module = AnsibleModule(
argument_spec=argument_spec,
supports_check_mode=True,
)
plan_baremetal_info = AnsibleVultrPlanInfo(module)
result = plan_baremetal_info.get_result(parse_plans_baremetal_list(plan_baremetal_info.get_plans_baremetal()))
module.exit_json(**result)
if __name__ == '__main__':
main()
| StarcoderdataPython |
8098579 | <filename>examples/create_scripts/extracellular_spikes.py
#!/usr/bin/python
import sys
import numpy as np
from nwb import nwb_file
from nwb import nwb_utils as ut
"""
Store extracellular ephys data
"""
OUTPUT_DIR = "../created_nwb_files/"
file_name = __file__[0:-3] + ".nwb"
########################################################################
# create a new NWB file
# several settings are specified when doing so. these can be supplied within
# the NWB constructor or defined in a dict, as in in this example
settings = {}
settings["file_name"] = OUTPUT_DIR + file_name
# each file should have a descriptive globally unique identifier
# that specifies the lab and this experiment session
# the function nwb.create_identifier() is recommended to use as it takes
# the string and appends the present date and time
#- settings["identifier"] = nwb.create_identifier("extracellular spikes example")
settings["identifier"] = ut.create_identifier("extracellular spikes example")
# indicate that it's OK to overwrite exting file
settings["mode"] = "w"
# specify the start time of the experiment. all times in the NWB file
# are relative to experiment start time
# if the start time is not specified the present time will be used
settings["start_time"] = "Sat Jul 04 2015 3:14:16"
# provide one or two sentences that describe the experiment and what
# data is in the file
settings["description"] = "Test file demonstrating a simple extracellular ephys recording"
# create the NWB file object. this manages the file
print("Creating " + settings["file_name"])
f = nwb_file.open(**settings)
########################################################################
# create two electrical series, one with a single electrode and one with many
# then create a spike event series
# first describe the device(s). Assume each probe is using a separate device
# names of devices are fictional
f.set_dataset("<device_X>", "Probe p0 device description", name="acme_model_23")
f.set_dataset("<device_X>", "Probe p1 device description", name="FooBar-X1")
# create the electrode map
# example simulated recording is made from two 2-electrode probes named
# 'p0' and 'p1'. we need to define the locations of the electrodes
# relative to each probe, and the location of the probes
# electrode coordinates are in meters and their positions
# are relative to each other. the location of the probe itself is
# stored separately. using absolute coordinates here, if they are known,
# is still OK
electrode_map = [[0, 0, 0], [0, 1.5e-6, 0], [0, 0, 0], [0, 3.0e-5, 0]]
electrode_group = [ "p0", "p0", "p1", "p1" ]
# make the group used to store extracellular ephys data. This group
# will be created inside group /general
ex_ephys = f.make_group("extracellular_ephys")
ex_ephys.set_dataset("electrode_map", electrode_map)
ex_ephys.set_dataset("electrode_group", electrode_group)
# set electrode impedances and describe filtering
# impedances are stored as text in case there is a range
ex_ephys.set_dataset("impedance", [ "1e6", "1.1e6", "1.2e6", "1.3e6" ])
ex_ephys.set_dataset("filtering", "description of filtering")
# specify the description, location and device for each probe
p0 = ex_ephys.make_group("<electrode_group_X>", "p0")
p0.set_dataset("description", "Description of p0")
p0.set_dataset("location", "CA1, left hemisphere, stereotactic coordinates xx, yy")
p0.set_dataset("device", "acme_model_23")
p1 = ex_ephys.make_group("<electrode_group_X>", "p1")
p1.set_dataset("description", "Description of p1")
p1.set_dataset("location", "CA3, left hemisphere, stereotactic coordinates xx, yy")
p1.set_dataset("device", "FooBar-X1")
########################################################################
# the example is of two 2-electrode probes. the electrode data from these
# probes can be stored individually, grouped as probes (eg, 2-electrode
# pair) or all stored together. these approaches are all exampled here
# create time series with all electrode data stored together
quad = f.make_group("<ElectricalSeries>", "quad", path="/acquisition/timeseries")
quad.set_attr("comments", "Data corresponds to four electrodes (two probes)")
quad.set_dataset("data", np.zeros((10000, 4)), attrs={"resolution": 1.2345e-6})
quad.set_dataset("timestamps", np.arange(10000) * 0.0001)
# indicate that we're recording from the first electrode defined in the
# above map (electrode numbers start at zero, so electrodes are
# 0, 1, 2 and 3
quad.set_dataset("electrode_idx", [0, 1, 2, 3])
########################################################################
# spikes can be reported by hardware or be detected by software
# in both cases, they are considered to be processed data and so belong
# in a processing module
# create the module
spike_mod = f.make_group("<Module>", "my spikes")
# create an interface that stores the events. here they will be stored
# with their waveforms, such as would be the input to a spike-sorting
# algorithm
spike_iface = spike_mod.make_group("EventWaveform")
spike_iface.set_attr("source", "Data from device FooBar-X1 using dynamic multi-phasic threshold of 5xRMS")
# the event waveform interface publishes a SpikeEventSeries. make
# that series
spike = spike_iface.make_group("<SpikeEventSeries>", "my waveforms")
spike.set_attr("comments", "Snapshots of spike events pulled from a recording")
spike.set_dataset("electrode_idx", [2, 3]) # probe 'p1'
# describe the source of the data (may be redundant w/ interface source)
spike.set_attr("source", "Data from device FooBar-X1 using dynamic multi-phasic threshold of 5xRMS")
# make some bogus simulated data
# this is 20 events all having the same shape and a pseudorandom time
evt = np.zeros((8,2))
evt[3][0] = 0.01
evt[4][0] = -0.005
evt[3][1] = 0.005
evt[4][1] = -0.0025
data = []
t = []
last = 1.0
for i in range(20):
data.append(evt)
last = last + (i * 17) % 29
t.append(last)
# save the simulated data and time
spike.set_dataset("data", data, attrs={"resolution": 1.2345e-6})
spike.set_dataset("timestamps", t)
# if data were stored in another unit such as microvolts, it would be
# necessary to specify a converstion between that unit and Volts.
# that would be done by including the conversion when setting the "data"
# dataset. Call would be:
# spike.set_dataset("data", data, attrs={"resolution": 1.2345e-6, "conversion":1.0e-6})
# close file, otherwise it will fail to write properly
f.close()
| StarcoderdataPython |
6486676 |
from pyvisdk.esxcli.executer import execute_soap
from pyvisdk.esxcli.base import Base
class NetworkIpInterfaceIpv6Address(Base):
'''
Commands to list and update IPv6 addresses assigned to the system.
'''
moid = 'ha-cli-handler-network-ip-interface-ipv6-address'
def add(self, interfacename, ipv6):
'''
Add a static IPv6 address to a given VMkernel network interface.
:param interfacename: string, The name of the VMkernel network interface to add a static IPv6 address to. This name must be an interface listed in the interface list command.
:param ipv6: string, The IPv6 address to add to the given VMkernel network interface. This must be in X:X:X::/X format
:returns: boolean
'''
return execute_soap(self._client, self._host, self.moid, 'vim.EsxCLI.network.ip.interface.ipv6.address.Add',
interfacename=interfacename,
ipv6=ipv6,
)
def list(self):
'''
This command will list all of the IPv6 addresses currently assigned to the system
:returns: vim.EsxCLI.network.ip.interface.ipv6.address.list.IPv6Interface[]
'''
return execute_soap(self._client, self._host, self.moid, 'vim.EsxCLI.network.ip.interface.ipv6.address.List',
)
def remove(self, interfacename, ipv6):
'''
Remove an IPv6 address from a given VMkernel network interface.
:param interfacename: string, The name of the VMkernel network interface to remove an IPv6 address from. This name must be an interface listed in the interface list command.
:param ipv6: string, The IPv6 address to remove from the given VMkernel network interface. This must be in X:X:X::/X format
:returns: boolean
'''
return execute_soap(self._client, self._host, self.moid, 'vim.EsxCLI.network.ip.interface.ipv6.address.Remove',
interfacename=interfacename,
ipv6=ipv6,
) | StarcoderdataPython |
158689 | """Argument parser for training pipelines."""
import dataclasses
import re
from argparse import ArgumentTypeError
from enum import Enum
from functools import partial
from typing import Any, List, NewType, Optional, Type, Union
from transformers import HfArgumentParser
def none_checker_bool(val: Union[bool, str]) -> Union[bool, None]:
"""Check given bool argument for None.
Args:
val: model arguments passed to the configuration.
Returns:
Bool value or None.
"""
if not val:
return None
if isinstance(val, bool):
return val
if val.lower() in ("yes", "true", "t", "y", "1"):
return True
elif val.lower() in ("no", "false", "f", "n", "0"):
return False
else:
raise ArgumentTypeError(
f"Truthy value expected: got {val} but expected one of yes/no, true/false, t/f, y/n, 1/0 (case insensitive)."
)
def none_checker(val: Any, dtype: Type) -> Any:
"""Check given argument for None.
Args:
val: model arguments passed to the configuration.
dtype: expected argument type.
Returns:
Value casted in the expected type or None.
"""
if not val or val == "none":
return None
return dtype(val)
DataClass = NewType("DataClass", Any) # type: ignore
DataClassType = NewType("DataClassType", Any) # type: ignore
class ArgumentParser(HfArgumentParser):
"""ArgumentParser inherited from hf's parser with modified dataclass arguments addition for better handling of None values."""
def _add_dataclass_arguments(self, dtype: DataClassType) -> None:
"""Add a dataclass arguments.
Args:
dtype: data class type.
"""
if hasattr(dtype, "_argument_group_name"):
parser = self.add_argument_group(dtype._argument_group_name)
else:
parser = self # type: ignore
for field in dataclasses.fields(dtype):
if not field.init:
continue
field_name = f"--{field.name}"
kwargs = field.metadata.copy() # type: ignore
# field.metadata is not used at all by Data Classes,
# it is provided as a third-party extension mechanism.
if isinstance(field.type, str):
raise ImportError(
"This implementation is not compatible with Postponed Evaluation of Annotations (PEP 563),"
"which can be opted in from Python 3.7 with `from __future__ import annotations`."
"We will add compatibility when Python 3.9 is released."
)
typestring = str(field.type)
for prim_type in (int, float, str):
for collection in (List,):
if (
typestring == f"typing.Union[{collection[prim_type]}, NoneType]" # type: ignore
or typestring == f"typing.Optional[{collection[prim_type]}]" # type: ignore
):
field.type = collection[prim_type] # type: ignore
if (
typestring == f"typing.Union[{prim_type.__name__}, NoneType]"
or typestring == f"typing.Optional[{prim_type.__name__}]"
):
field.type = prim_type
if isinstance(field.type, type) and issubclass(field.type, Enum):
kwargs["choices"] = [x.value for x in field.type]
kwargs["type"] = type(kwargs["choices"][0])
if field.default is not dataclasses.MISSING:
kwargs["default"] = field.default
else:
kwargs["required"] = True
elif field.type is bool or field.type == Optional[bool]:
if field.default is True:
parser.add_argument(
f"--no_{field.name}",
action="store_false",
dest=field.name,
**kwargs,
)
# Hack because type=bool in argparse does not behave as we want.
kwargs["type"] = none_checker_bool
if field.type is bool or (
field.default is not None
and field.default is not dataclasses.MISSING
):
# Default value is False if we have no default when of type bool.
default = (
False if field.default is dataclasses.MISSING else field.default
)
# This is the value that will get picked if we don't include --field_name in any way
kwargs["default"] = default
# This tells argparse we accept 0 or 1 value after --field_name
kwargs["nargs"] = "?"
# This is the value that will get picked if we do --field_name (without value)
kwargs["const"] = True
elif (
hasattr(field.type, "__origin__")
and re.search(r"^typing\.List\[(.*)\]$", str(field.type)) is not None
):
kwargs["nargs"] = "+"
kwargs["type"] = partial(none_checker, dtype=field.type.__args__[0])
assert all(
x == kwargs["type"] for x in field.type.__args__
), f"{field.name} cannot be a List of mixed types"
if field.default_factory is not dataclasses.MISSING: # type: ignore
kwargs["default"] = field.default_factory() # type: ignore
elif field.default is dataclasses.MISSING:
kwargs["required"] = True
else:
kwargs["type"] = partial(none_checker, dtype=field.type)
if field.default is not dataclasses.MISSING:
kwargs["default"] = field.default
elif field.default_factory is not dataclasses.MISSING: # type: ignore
kwargs["default"] = field.default_factory() # type: ignore
else:
kwargs["required"] = True
parser.add_argument(field_name, **kwargs)
| StarcoderdataPython |
9675560 | #!/usr/bin/env python
from __future__ import print_function
import sys
import time
import random
def write(data):
sys.stdout.write(data + '\n')
sys.stdout.flush()
def main():
if len(sys.argv) < 2:
print("%s <number of routes> <updates per second thereafter>")
sys.exit(1)
initial = sys.argv[1]
thereafter = sys.argv[2]
if not initial.isdigit() or not thereafter.isdigit():
write('please give valid numbers')
sys.exit(1)
# Limit to sane numbers :-)
number = int(initial) & 0x00FFFFFF
after = int(thereafter) & 0x0000FFFF
range1 = (number >> 16) & 0xFF
range2 = (number >> 8) & 0xFF
range3 = (number) & 0xFF
ip = {}
nexthop = [
'%d.%d.%d.%d' % (random.randint(1, 200), random.randint(0, 255), random.randint(0, 255), random.randint(0, 255))
for _ in range(200)
]
for ip1 in range(0, range1):
for ip2 in range(0, 256):
for ip3 in range(0, 256):
generated = '%d.%d.%d.%d' % (random.randint(1, 200), ip1, ip2, ip3)
ip[generated] = random.choice(nexthop)
for ip2 in range(0, range2):
for ip3 in range(0, 256):
generated = '%d.%d.%d.%d' % (random.randint(1, 200), range1, ip2, ip3)
ip[generated] = random.choice(nexthop)
for ip3 in range(0, range3):
generated = '%d.%d.%d.%d' % (random.randint(1, 200), range1, range2, ip3)
ip[generated] = random.choice(nexthop)
count = 0
# initial table dump
for k, v in ip.iteritems():
count += 1
write(
'announce route %s next-hop %s med 1%02d as-path [ 100 101 102 103 104 105 106 107 108 109 110 ]'
% (k, v, len(k))
)
if count % 10000 == 0:
sys.stderr.write('initial : announced %d\n' % count)
count &= 0xFFFFFFFE
# modify routes forever
while True:
now = time.time()
changed = {}
for k, v in ip.iteritems():
changed[k] = v
if not random.randint(0, after):
break
for k, v in changed.iteritems():
count += 2
write(
'withdraw route %s next-hop %s med 1%02d as-path [ 100 101 102 103 104 105 106 107 108 109 110 ]'
% (k, v, len(k))
)
ip[k] = random.choice(nexthop)
write(
'announce route %s next-hop %s med 1%02d as-path [ 100 101 102 103 104 105 106 107 108 109 110 ]'
% (k, ip[k], len(k))
)
if count % 100 == 0:
sys.stderr.write('updates : announced %d\n' % count)
time.sleep(time.time() - now + 1.0)
if __name__ == '__main__':
try:
main()
except KeyboardInterrupt:
pass
| StarcoderdataPython |
8184429 | <reponame>Maastro-CDS-Imaging-Group/SQLite4Radiomics
from logic.entities.ct_series import CtSeries
from test.mock_ups.logic.entities.series_with_image_slices import SeriesWithImageSlicesMockUp
class CtSeriesMockUp(CtSeries, SeriesWithImageSlicesMockUp):
pass
| StarcoderdataPython |
9773156 | import os, sys, time
print sys.argv
# Testing with large input.
imagesToCreate = int(sys.argv[1])
imageList = []
missingImages = []
print 'Creating images...'
for num in range(0, imagesToCreate):
print 'Creating picture #' + str(num)
os.system('cp testimage.jpg ../watch-folder/testimage' + str(num) + '.jpg')
imageList.append('testimage' + str(num) + '.jpg')
# While loop block checks if watch-folder has unprocessed files.
while os.listdir('../watch-folder/') != ['.gitignore']:
time.sleep(1)
print 'Processing...'
print 'Checking files...'
print str(imagesToCreate) + ' files created.'
for num in range(0, imagesToCreate):
# Check save-folder
if os.path.exists('../save-folder/Maywood2017' + imageList[num]):
print imageList[num] + ' exists in save-folder. Deleting.'
os.system('rm ../save-folder/Maywood2017' + imageList[num])
else:
print imageList[num] + ' was not found.'
missingImages.append(imageList[num])
# Check original-folder
if os.path.exists('../original-folder/' + imageList[num]):
print imageList[num] + ' exists in original-folder. Deleting.'
os.system('rm ../original-folder/' + imageList[num])
print 'Checks completed.'
print str(imagesToCreate - len(missingImages)) + ' out of ' + str(imagesToCreate) + ' tests passed.'
| StarcoderdataPython |
345245 | <reponame>wcastello/splunk-sdk-python
#!/usr/bin/env python
#
# Copyright 2011-2015 Splunk, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"): you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from __future__ import absolute_import
from __future__ import print_function
from tests import testlib
import logging
import os
import splunklib.client as client
try:
import unittest
except ImportError:
import unittest2 as unittest
class IndexTest(testlib.SDKTestCase):
def setUp(self):
super(IndexTest, self).setUp()
self.index_name = testlib.tmpname()
self.index = self.service.indexes.create(self.index_name)
self.assertEventuallyTrue(lambda: self.index.refresh()['disabled'] == '0')
def tearDown(self):
super(IndexTest, self).tearDown()
# We can't delete an index with the REST API before Splunk
# 5.0. In 4.x, we just have to leave them lying around until
# someone cares to go clean them up. Unique naming prevents
# clashes, though.
if self.service.splunk_version >= (5,):
if self.index_name in self.service.indexes and "TRAVIS" in os.environ:
self.service.indexes.delete(self.index_name)
self.assertEventuallyTrue(lambda: self.index_name not in self.service.indexes)
else:
logging.warning("test_index.py:TestDeleteIndex: Skipped: cannot "
"delete indexes via the REST API in Splunk 4.x")
def totalEventCount(self):
self.index.refresh()
return int(self.index['totalEventCount'])
def test_delete(self):
if self.service.splunk_version >= (5,):
self.assertTrue(self.index_name in self.service.indexes)
self.service.indexes.delete(self.index_name)
self.assertEventuallyTrue(lambda: self.index_name not in self.service.indexes)
def test_integrity(self):
self.check_entity(self.index)
def test_default(self):
default = self.service.indexes.get_default()
self.assertTrue(isinstance(default, str))
def test_disable_enable(self):
self.index.disable()
self.index.refresh()
self.assertEqual(self.index['disabled'], '1')
self.index.enable()
self.index.refresh()
self.assertEqual(self.index['disabled'], '0')
# def test_submit_and_clean(self):
# self.index.refresh()
# original_count = int(self.index['totalEventCount'])
# self.index.submit("Hello again!", sourcetype="Boris", host="meep")
# self.assertEventuallyTrue(lambda: self.totalEventCount() == original_count+1, timeout=50)
# # Cleaning an enabled index on 4.x takes forever, so we disable it.
# # However, cleaning it on 5 requires it to be enabled.
# if self.service.splunk_version < (5,):
# self.index.disable()
# self.restartSplunk()
# self.index.clean(timeout=500)
# self.assertEqual(self.index['totalEventCount'], '0')
def test_prefresh(self):
self.assertEqual(self.index['disabled'], '0') # Index is prefreshed
def test_submit(self):
event_count = int(self.index['totalEventCount'])
self.assertEqual(self.index['sync'], '0')
self.assertEqual(self.index['disabled'], '0')
self.index.submit("Hello again!", sourcetype="Boris", host="meep")
self.assertEventuallyTrue(lambda: self.totalEventCount() == event_count+1, timeout=50)
def test_submit_namespaced(self):
s = client.connect(**{
"username": self.service.username,
"password": <PASSWORD>,
"owner": "nobody",
"app": "search"
})
i = s.indexes[self.index_name]
event_count = int(i['totalEventCount'])
self.assertEqual(i['sync'], '0')
self.assertEqual(i['disabled'], '0')
i.submit("Hello again namespaced!", sourcetype="Boris", host="meep")
self.assertEventuallyTrue(lambda: self.totalEventCount() == event_count+1, timeout=50)
def test_submit_via_attach(self):
event_count = int(self.index['totalEventCount'])
cn = self.index.attach()
cn.send(b"Hello Boris!\r\n")
cn.close()
self.assertEventuallyTrue(lambda: self.totalEventCount() == event_count+1, timeout=60)
def test_submit_via_attach_using_token_header(self):
# Remove the prefix from the token
s = client.connect(**{'token': self.service.token.replace("Splunk ", "")})
i = s.indexes[self.index_name]
event_count = int(i['totalEventCount'])
if s.has_cookies():
del s.http._cookies
cn = i.attach()
cn.send(b"Hello Boris 5!\r\n")
cn.close()
self.assertEventuallyTrue(lambda: self.totalEventCount() == event_count+1, timeout=60)
def test_submit_via_attached_socket(self):
event_count = int(self.index['totalEventCount'])
f = self.index.attached_socket
with f() as sock:
sock.send(b'Hello world!\r\n')
self.assertEventuallyTrue(lambda: self.totalEventCount() == event_count+1, timeout=60)
def test_submit_via_attach_with_cookie_header(self):
# Skip this test if running below Splunk 6.2, cookie-auth didn't exist before
splver = self.service.splunk_version
if splver[:2] < (6, 2):
self.skipTest("Skipping cookie-auth tests, running in %d.%d.%d, this feature was added in 6.2+" % splver)
event_count = int(self.service.indexes[self.index_name]['totalEventCount'])
cookie = "%s=%s" % (list(self.service.http._cookies.items())[0])
service = client.Service(**{"cookie": cookie})
service.login()
cn = service.indexes[self.index_name].attach()
cn.send(b"Hello Boris!\r\n")
cn.close()
self.assertEventuallyTrue(lambda: self.totalEventCount() == event_count+1, timeout=60)
def test_submit_via_attach_with_multiple_cookie_headers(self):
# Skip this test if running below Splunk 6.2, cookie-auth didn't exist before
splver = self.service.splunk_version
if splver[:2] < (6, 2):
self.skipTest("Skipping cookie-auth tests, running in %d.%d.%d, this feature was added in 6.2+" % splver)
event_count = int(self.service.indexes[self.index_name]['totalEventCount'])
service = client.Service(**{"cookie": 'a bad cookie'})
service.http._cookies.update(self.service.http._cookies)
service.login()
cn = service.indexes[self.index_name].attach()
cn.send(b"Hello Boris!\r\n")
cn.close()
self.assertEventuallyTrue(lambda: self.totalEventCount() == event_count+1, timeout=60)
def test_upload(self):
if not self.app_collection_installed():
print("Test requires sdk-app-collection. Skipping.")
return
self.install_app_from_collection("file_to_upload")
event_count = int(self.index['totalEventCount'])
path = self.pathInApp("file_to_upload", ["log.txt"])
self.index.upload(path)
self.assertEventuallyTrue(lambda: self.totalEventCount() == event_count+4, timeout=60)
if __name__ == "__main__":
try:
import unittest2 as unittest
except ImportError:
import unittest
unittest.main()
| StarcoderdataPython |
115071 | '''
Library for 2-component Flory-Huggings theory.
Author: <NAME>
Date created: 23 March 2022
'''
import numpy as np
def help():
print('Here are the list of functions included in FH.py:\n')
print(' critical(n = 1): returns the critical concentration and critical interaction [phi_c, chi_c]\n')
print(' spinodal(chi, n = 1): returns spinodal concentrations [p1, p2, chi] in the valid chi range\n')
print(' GL_binodal(chi, n = 1): Ginzburg-Landau binodal [p1, p2, chi]\n')
print(' binodal(chi, n = 1, iteration = 5, UseImprovedMap = True): self-consistent solution with speficied number of iterations [p1, p2, chi]. You can also use the simple map to see what it does\n')
print(' analytic_binodal(x, n = 1): analytic forms')
def critical(n = 1):
''' calculates the critical point for a given polymer length '''
x_c = 0.5 * np.power(1. + 1./np.sqrt(n), 2)
phi_c = 1./(1. + np.sqrt(n))
return np.array([phi_c, x_c])
def spinodal(x, n = 1):
crit = critical(n)
x_c = crit[1]
gamma = 1. - 1./n
if not np.array(x).shape:
if x > x_c:
t1 = 1./2. - gamma / (4. * x)
t2 = np.sqrt(np.power(t1, 2) - 1./(2. * x * n))
return np.array([t1 + t2, t1 - t2])
else:
raise ValueError('interaction strength too small - no LLPS!')
else:
if max(x)<x_c:
raise ValueError('interaction strength too small - no LLPS!')
else:
x = np.array(x)
x = x[x >= x_c]
t1 = 1./2. - gamma / (4. * x)
t2 = np.sqrt(np.power(t1, 2) - 1./(2. * x * n))
return np.array([t1 + t2, t1 - t2, x])
def GL_binodal(x, n = 1):
crit = critical(n)
x_c = crit[1]
phi_c = crit[0]
if not np.array(x).shape:
if x > x_c:
t1 = phi_c
t2 = np.sqrt(3. * (x - x_c) / (2. * np.power(x_c, 2) * np.sqrt(n)))
return np.array([t1 + t2, t1 - t2, x])
else:
raise ValueError('interaction strength too small - no LLPS!')
else:
if max(x)<x_c:
raise ValueError('interaction strength too small - no LLPS!')
else:
x = np.array(x)
x = x[x >= x_c]
t1 = phi_c
t2 = np.sqrt(3. * (x - x_c) / (2. * np.power(x_c, 2) * np.sqrt(n)))
return np.array([t1 + t2, t1 - t2, x])
def binodal(x, n = 1, iteration = 5, UseImprovedMap = True):
assert iteration >= 0
crit = critical(n)
x_c = crit[1]
phi_c = crit[0]
gamma = 1. - 1./n
if n == 1:
guess = GL_binodal(x)
pp = guess[0]
xx = guess[2]
if UseImprovedMap:
for _ in range(iteration):
ee = np.exp(- 2 * xx * pp + xx)
pp = (2. * xx * pp * ee - 1. - ee)/(2. * xx * ee - (1. + ee)**2)
else:
for _ in range(iteration):
ee = np.exp(- 2 * xx * pp + xx)
pp = 1/(1 + ee)
return np.array([pp, 1 - pp, xx])
if n > 1:
guess = GL_binodal(x, n = n)
p1 = guess[0]
p2 = guess[1]
xx = guess[2]
if UseImprovedMap:
for _ in range(iteration):
a = np.exp( - 2. * xx * (p1 - p2))
b = np.exp( - gamma * (p1 - p2) - xx * (np.power(p1,2) - np.power(p2,2)))
c = np.power(a/b, n)
g1 = (1. - b)/(1. - np.power(a/b, n) * b)
g2 = (1. - b)/(np.power(b/a, n) - b)
d1lna = - 2. * xx
d1lnb = - gamma - xx * 2. * p1
d2lna = 2. * xx
d2lnb = gamma + xx * 2. * p2
j11 = g1**2 * (- d1lnb * b*(1-c)/(1-b)**2 + n * (d1lna - d1lnb) * c * b /(1-b)) - 1
j21 = g1**2 * (- d2lnb * b*(1-c)/(1-b)**2 + n * (d2lna - d2lnb) * c * b /(1-b))
j12 = (j11 + 1) * c + g1 * n * c * (d1lna - d1lnb)
j22 = j21 * c + g1 * n * c * (d2lna - d2lnb) - 1
detj = j11 * j22 - j12 * j21
p1_new = np.copy(p1 + (- (g1 - p1) * j22 + (g2 - p2) * j21)/detj)
p2_new = np.copy(p2 + (- (g2 - p2) * j11 + (g1 - p1) * j12)/detj)
p1 = p1_new
p2 = p2_new
else:
for _ in range(iteration):
a = np.exp( - 2. * xx * (p1 - p2))
b = np.exp( - gamma * (p1 - p2) - xx * (np.power(p1,2) - np.power(p2,2)))
c = np.power(a/b, n)
g1 = (1. - b)/(1. - np.power(a/b, n) * b)
g2 = (1. - b)/(np.power(b/a, n) - b)
p1_new = np.copy((1. - b)/(1. - np.power(a/b, n) * b))
p2_new = np.copy((1. - b)/(np.power(b/a, n) - b))
p1 = p1_new
p2 = p2_new
return np.array([p1, p2, xx])
def analytic_binodal(x, n = 1):
crit = critical(n)
x_c = crit[1]
if not np.array(x).shape:
if x > x_c:
if n == 1:
pp = 1/(1+np.exp(-x * np.tanh(x*np.sqrt(3*(x-2)/8))))
pm = 1/(1+np.exp(-x * np.tanh(-x*np.sqrt(3*(x-2)/8))))
else:
a = n ** 0.25
D = (x - x_c) / x_c
c = (a + 1/a) / 2
s = (a - 1/a) / 2
cothA = 1/np.tanh((1+D/a**2)*np.sqrt(3*D)/a)
cothB = 1/np.tanh((1+D*a**2)*np.sqrt(3*D)*a)
prefactor = c/(cothA+cothB)
numerator_exp = 8 * prefactor * (s/a**2 + (1+D) * prefactor * cothB / a**2)
denominator_exp = 8 * prefactor * (s*(1/a**2 - a**2)+(1+D)*prefactor*(cothB/a**2+a**2*cothA))
pp = (1-np.exp(-numerator_exp))/(1-np.exp(-denominator_exp))
pm = (1-np.exp(+numerator_exp))/(1-np.exp(+denominator_exp))
return np.array([pp, pm])
else:
raise ValueError('interaction strength too small - no LLPS!')
else:
if max(x)<x_c:
raise ValueError('interaction strength too small - no LLPS!')
else:
x = np.array(x)
x = x[x >= x_c]
if n == 1:
pp = 1/(1+np.exp(-x * np.tanh(x*np.sqrt(3*(x-2)/8))))
pm = 1/(1+np.exp(-x * np.tanh(-x*np.sqrt(3*(x-2)/8))))
else:
a = n ** 0.25
D = (x - x_c) / x_c
c = (a + 1/a) / 2
s = (a - 1/a) / 2
cothA = 1/np.tanh((1+D/a**2)*np.sqrt(3*D)/a)
cothB = 1/np.tanh((1+D*a**2)*np.sqrt(3*D)*a)
prefactor = c/(cothA+cothB)
numerator_exp = 8 * prefactor * (s/a**2 + (1+D) * prefactor * cothB / a**2)
denominator_exp = 8 * prefactor * (s*(1/a**2 - a**2)+(1+D)*prefactor*(cothB/a**2+a**2*cothA))
pp = (1-np.exp(-numerator_exp))/(1-np.exp(-denominator_exp))
pm = (1-np.exp(+numerator_exp))/(1-np.exp(+denominator_exp))
return np.array([pp, pm, x]) | StarcoderdataPython |
5131383 | from mcpi.minecraft import Minecraft
import time
import random
mc=Minecraft.create() #set create function to mc
blockType = 3 # set blocktype number (grass, lava, etc...)
amount = 33
steps= 0
hungry = 0
# code to have pet blocky follow you around!!!
pos =mc.player.getPos() # get player position
mc.setBlock(pos.x+2,pos.y,pos.z, blockType)
setHungry = 0
while (amount >= 0):
if steps >= 1000 and setHungry == 0:
setHungry = 1
petFood = random.randint(2,9)
if petFood == 2:
food = str('grass')
if petFood == 3:
food = str('dirt')
if petFood == 4:
food = str('cobblestone')
if petFood == 5:
food = str('woodPlanks')
if petFood == 6:
food = str('sappling')
if petFood == 7:
food = str('ironOre')
petFood=15
if petFood == 8:
food = str('water')
if petFood == 9:
food = str('sand')
petFood = 12
mc.postToChat('Blocky is hungry for ' +food)
hungry = 1
if hungry == 0:
fatigue = 0.3
else:
if hungry ==1:
fatigue = 1
pos2 = mc.player.getPos()
if pos2.x > pos.x +2 and pos.x -2 != pos2.x:
#mc.setBlock(x+2,y+jump,z, 0)
checkBlockX = mc.getBlock(pos.x+1,pos.y,pos.z)
if checkBlockX == 0:
#mc.postToChat('Move > x')
mc.setBlock(pos.x+1,pos.y,pos.z, blockType)
steps = steps + 1
time.sleep(fatigue)
mc.setBlock(pos.x+1,pos.y,pos.z, 0)
pos.x = pos.x+1
else:
if hungry == 1 and checkBlockX == petFood:
mc.setBlock(pos.x+1,pos.y,pos.z, 0)
#mc.setBlock(pos.x+1,pos.y,pos.z, blockType)
steps = 0
hungry = 0
setHungry = 0
mc.postToChat('Yummy thank you!')
if pos2.x < pos.x-2 and pos.x +2 != pos2.x:
checkBlockX2 = mc.getBlock(pos.x-1,pos.y,pos.z)
if checkBlockX2 == 0:
#mc.postToChat('Move < x')
mc.setBlock(pos.x-1,pos.y,pos.z, blockType)
steps = steps + 1
time.sleep(fatigue)
mc.setBlock(pos.x-1,pos.y,pos.z, 0)
pos.x = pos.x -1
else:
if hungry == 1 and checkBlockX2== petFood:
mc.setBlock(pos.x-1,pos.y,pos.z, 0)
#mc.setBlock(pos.x-1,pos.y,pos.z, blockType)
steps = 0
hungry = 0
setHungry = 0
mc.postToChat('Yummy thank you!')
if pos2.y > pos.y:
checkBlockY = mc.getBlock(pos.x,pos.y+1,pos.z)
if checkBlockY == 0:
#mc.postToChat('Move > y')
mc.setBlock(pos.x,pos.y+1,pos.z, blockType)
steps = steps + 1
time.sleep(fatigue)
mc.setBlock(pos.x,pos.y+1,pos.z, 0)
pos.y = pos.y+1
else:
if hungry == 1 and checkBlockY == petFood:
mc.setBlock(pos.x,pos.y+1,pos.z, 0)
#mc.setBlock(pos.x,pos.y+1,pos.z, blockType)
steps = 0
hungry = 0
setHungry = 0
mc.postToChat('Yummy thank you!')
if pos2.y < pos.y:
checkBlockY2 = mc.getBlock(pos.x,pos.y-1,pos.z)
if checkBlockY2 == 0:
#mc.postToChat('Move < y')
mc.setBlock(pos.x,pos.y-1,pos.z, blockType)
steps = steps + 1
time.sleep(fatigue)
mc.setBlock(pos.x,pos.y-1,pos.z, 0)
pos.y = pos.y -1
else:
if hungry == 1 and checkBlockY2 == petFood:
mc.setBlock(pos.x,pos.y-1,pos.z, 0)
#mc.setBlock(pos.x,pos.y-1,pos.z, blockType)
steps = 0
hungry = 0
setHungry = 0
mc.postToChat('Yummy thank you!')
if pos2.z > pos.z and pos.z+1 != pos2.z:
checkBlockZ = mc.getBlock(pos.x,pos.y,pos.z+1)
if checkBlockZ == 0:
#mc.postToChat('Move > z')
mc.setBlock(pos.x,pos.y,pos.z+1, blockType)
steps = steps + 1
time.sleep(fatigue)
mc.setBlock(pos.x,pos.y,pos.z+1, 0)
pos.z = pos.z+1
else:
if hungry == 1 and checkBlockZ== petFood:
mc.setBlock(pos.x,pos.y,pos.z+1, 0)
#mc.setBlock(pos.x,pos.y,pos.z+1, blockType)
steps = 0
hungry = 0
setHungry = 0
mc.postToChat('Yummy thank you!')
if pos2.z < pos.z and pos.z -1 != pos2.z:
checkBlockZ2 = mc.getBlock(pos.x,pos.y,pos.z-1)
if checkBlockZ2 ==0:
#mc.postToChat('Move < z')
mc.setBlock(pos.x,pos.y,pos.z-1, blockType)
steps = steps + 1
time.sleep(fatigue)
mc.setBlock(pos.x,pos.y,pos.z-1, 0)
pos.z = pos.z -1
else:
if hungry == 1 and checkBlockZ2 == petFood:
mc.setBlock(pos.x,pos.y,pos.z-1, 0)
#mc.setBlock(pos.x,pos.y,pos.z-1, blockType)
steps = 0
hungry = 0
setHungry = 0
mc.postToChat('Yummy thank you!')
| StarcoderdataPython |
3436719 | <filename>test/test_run.py
import sys
if int(sys.version.split(".")[1]) < 6:
# python 3.5
pass
else:
from tools import data
from tools import exceptions
from tools import utils
from unittest import mock
import anndata
import numpy as np
import pandas as pd
import re
import rpy2.rinterface_lib.callbacks
import rpy2.rinterface_lib.embedded
import rpy2.robjects as ro
import scipy.sparse
import scprep
import scprep.run
import scprep.run.conversion
import scprep.run.r_function
import sklearn.cluster
import unittest
builtin_warning = rpy2.rinterface_lib.callbacks.consolewrite_warnerror
def test_verbose():
fun = scprep.run.RFunction(
setup="message('This should not print')",
body="message('Verbose test\n\n'); list(1,2,3)",
verbose=True,
)
assert np.all(fun() == np.array([[1], [2], [3]]))
def test_install_bioc():
utils.assert_raises_message(
rpy2.rinterface_lib.embedded.RRuntimeError,
"Error: Bioconductor version '3.1' requires R version '3.2'; use",
scprep.run.install_bioconductor,
version="3.1",
site_repository="https://bioconductor.org/packages/3.1/bioc",
verbose=False,
)
def test_install_github_lib():
raise exceptions.SkipTestException
scprep.run.install_github("mvuorre/exampleRPackage", verbose=False)
fun = scprep.run.RFunction(
body="""
packages <- installed.packages()
'exampleRPackage' %in% packages
"""
)
assert fun()
def test_install_github_dependencies_None():
raise exceptions.SkipTestException
scprep.run.install_github("mvuorre/exampleRPackage", verbose=False)
fun = scprep.run.RFunction(
body="""
if (!require("pacman", quietly=TRUE)) {
install.packages("pacman",
repos='http://cran.rstudio.com')
}
deps <- pacman::p_depends(AnomalyDetection, local=TRUE)[c("Depends",
"Imports","LinkingTo")]
all(unname(unlist(deps)) %in% installed.packages()[, "Package"])
"""
)
assert fun()
def test_install_github_dependencies_True():
raise exceptions.SkipTestException
scprep.run.install_github(
"mvuorre/exampleRPackage", verbose=False, dependencies=True
)
fun = scprep.run.RFunction(
body="""
if (!require("pacman", quietly=TRUE)) {
install.packages("pacman",
repos='http://cran.rstudio.com')
}
deps <- pacman::p_depends(AnomalyDetection, local=TRUE)[c("Depends",
"Imports","LinkingTo","Suggests")]
deps <- unname(unlist(deps))
installed <- installed.packages()[, "Package"]
success <- all(deps %in% installed)
list(
success=success,
missing=setdiff(deps, installed),
deps=deps,
installed=installed
)
"""
)
result = fun()
assert result["success"], result
class TestSplatter(unittest.TestCase):
@classmethod
def setUpClass(self):
scprep.run.splatter.install(verbose=False)
def test_splatter_deprecated(self):
utils.assert_warns_message(
FutureWarning,
"path_length has been renamed path_n_steps, "
"please use path_n_steps in the future.",
scprep.run.SplatSimulate,
batch_cells=10,
n_genes=200,
verbose=0,
path_length=100,
)
def test_splatter_default(self):
sim = scprep.run.SplatSimulate(batch_cells=10, n_genes=200, verbose=0)
assert sim["counts"].shape == (10, 200)
assert np.all(sim["batch"] == "Batch1")
assert sim["batch_cell_means"].shape == (10, 200)
assert sim["base_cell_means"].shape == (10, 200)
assert sim["bcv"].shape == (10, 200)
assert sim["cell_means"].shape == (10, 200)
assert sim["true_counts"].shape == (10, 200)
assert sim["dropout"] is None
assert sim["step"].shape == (10,)
assert sim["group"].shape == (10,)
assert sim["exp_lib_size"].shape == (10,)
assert sim["base_gene_mean"].shape == (200,)
assert sim["outlier_factor"].shape == (200,)
assert sum(["batch_fac" in k for k in sim.keys()]) == 0
assert sum(["de_fac" in k for k in sim.keys()]) == 1
assert sim["de_fac_1"].shape == (200,)
assert sum(["sigma_fac" in k for k in sim.keys()]) == 1
assert sim["sigma_fac_1"].shape == (200,)
def test_splatter_batch(self):
sim = scprep.run.SplatSimulate(batch_cells=[5, 5], n_genes=200, verbose=0)
assert sim["counts"].shape == (10, 200)
assert np.all(sim["batch"][:5] == "Batch1")
assert np.all(sim["batch"][5:] == "Batch2")
assert sim["batch_cell_means"].shape == (10, 200)
assert sim["base_cell_means"].shape == (10, 200)
assert sim["bcv"].shape == (10, 200)
assert sim["cell_means"].shape == (10, 200)
assert sim["true_counts"].shape == (10, 200)
assert sim["dropout"] is None
assert sim["step"].shape == (10,)
assert sim["group"].shape == (10,)
assert sim["exp_lib_size"].shape == (10,)
assert sim["base_gene_mean"].shape == (200,)
assert sim["outlier_factor"].shape == (200,)
assert sum(["batch_fac" in k for k in sim.keys()]) == 2
assert sim["batch_fac_1"].shape == (200,)
assert sim["batch_fac_2"].shape == (200,)
assert sum(["de_fac" in k for k in sim.keys()]) == 1
assert sim["de_fac_1"].shape == (200,)
assert sum(["sigma_fac" in k for k in sim.keys()]) == 1
assert sim["sigma_fac_1"].shape == (200,)
def test_splatter_groups(self):
sim = scprep.run.SplatSimulate(
method="groups",
batch_cells=10,
group_prob=[0.5, 0.5],
n_genes=200,
de_fac_loc=[0.1, 0.5],
verbose=0,
)
assert sim["counts"].shape == (10, 200)
assert np.all(sim["batch"] == "Batch1")
assert sim["batch_cell_means"].shape == (10, 200)
assert sim["base_cell_means"].shape == (10, 200)
assert sim["bcv"].shape == (10, 200)
assert sim["cell_means"].shape == (10, 200)
assert sim["true_counts"].shape == (10, 200)
assert sim["dropout"] is None
assert sim["step"] is None
assert sim["group"].shape == (10,)
assert sim["exp_lib_size"].shape == (10,)
assert sim["base_gene_mean"].shape == (200,)
assert sim["outlier_factor"].shape == (200,)
assert sum(["batch_fac" in k for k in sim.keys()]) == 0
assert sum(["de_fac" in k for k in sim.keys()]) == 2
assert sim["de_fac_1"].shape == (200,)
assert sim["de_fac_2"].shape == (200,)
assert sum(["sigma_fac" in k for k in sim.keys()]) == 0
def test_splatter_paths(self):
sim = scprep.run.SplatSimulate(
method="paths",
batch_cells=10,
n_genes=200,
group_prob=[0.5, 0.5],
path_from=[0, 0],
path_n_steps=[100, 200],
path_skew=[0.4, 0.6],
de_fac_loc=[0.1, 0.5],
verbose=0,
)
assert sim["counts"].shape == (10, 200)
assert np.all(sim["batch"] == "Batch1")
assert sim["batch_cell_means"].shape == (10, 200)
assert sim["base_cell_means"].shape == (10, 200)
assert sim["bcv"].shape == (10, 200)
assert sim["cell_means"].shape == (10, 200)
assert sim["true_counts"].shape == (10, 200)
assert sim["dropout"] is None
assert sim["step"].shape == (10,)
assert sim["group"].shape == (10,)
assert sim["exp_lib_size"].shape == (10,)
assert sim["base_gene_mean"].shape == (200,)
assert sim["outlier_factor"].shape == (200,)
assert sum(["batch_fac" in k for k in sim.keys()]) == 0
assert sum(["de_fac" in k for k in sim.keys()]) == 2
assert sim["de_fac_1"].shape == (200,)
assert sim["de_fac_2"].shape == (200,)
assert sum(["sigma_fac" in k for k in sim.keys()]) == 2
assert sim["sigma_fac_1"].shape == (200,)
assert sim["sigma_fac_2"].shape == (200,)
def test_splatter_dropout(self):
sim = scprep.run.SplatSimulate(
batch_cells=10, n_genes=200, dropout_type="experiment", verbose=0
)
assert sim["counts"].shape == (10, 200)
assert np.all(sim["batch"] == "Batch1")
assert sim["batch_cell_means"].shape == (10, 200)
assert sim["base_cell_means"].shape == (10, 200)
assert sim["bcv"].shape == (10, 200)
assert sim["cell_means"].shape == (10, 200)
assert sim["true_counts"].shape == (10, 200)
assert sim["dropout"].shape == (10, 200)
assert sim["step"].shape == (10,)
assert sim["group"].shape == (10,)
assert sim["exp_lib_size"].shape == (10,)
assert sim["base_gene_mean"].shape == (200,)
assert sim["outlier_factor"].shape == (200,)
assert sum(["batch_fac" in k for k in sim.keys()]) == 0
assert sum(["de_fac" in k for k in sim.keys()]) == 1
assert sim["de_fac_1"].shape == (200,)
assert sum(["sigma_fac" in k for k in sim.keys()]) == 1
assert sim["sigma_fac_1"].shape == (200,)
def test_splatter_dropout_binomial(self):
sim = scprep.run.SplatSimulate(
batch_cells=10,
n_genes=200,
dropout_type="binomial",
dropout_prob=0.5,
verbose=False,
)
assert sim["counts"].shape == (10, 200)
assert np.all(sim["batch"] == "Batch1")
assert sim["batch_cell_means"].shape == (10, 200)
assert sim["base_cell_means"].shape == (10, 200)
assert sim["bcv"].shape == (10, 200)
assert sim["cell_means"].shape == (10, 200)
assert sim["true_counts"].shape == (10, 200)
dropout_proportion = np.mean(
sim["counts"][np.where(sim["true_counts"] > 0)]
/ sim["true_counts"][np.where(sim["true_counts"] > 0)]
)
assert dropout_proportion < 0.55
assert dropout_proportion > 0.45
assert sim["dropout"] is None
assert sim["step"].shape == (10,)
assert sim["group"].shape == (10,)
assert sim["exp_lib_size"].shape == (10,)
assert sim["base_gene_mean"].shape == (200,)
assert sim["outlier_factor"].shape == (200,)
assert sum(["batch_fac" in k for k in sim.keys()]) == 0
assert sum(["de_fac" in k for k in sim.keys()]) == 1
assert sim["de_fac_1"].shape == (200,)
assert sum(["sigma_fac" in k for k in sim.keys()]) == 1
assert sim["sigma_fac_1"].shape == (200,)
def test_splatter_warning(self):
assert (
rpy2.rinterface_lib.callbacks.consolewrite_warnerror is builtin_warning
)
scprep.run.r_function._ConsoleWarning.set_debug()
assert (
rpy2.rinterface_lib.callbacks.consolewrite_warnerror
is scprep.run.r_function._ConsoleWarning.debug
)
scprep.run.r_function._ConsoleWarning.set_warning()
assert (
rpy2.rinterface_lib.callbacks.consolewrite_warnerror
is scprep.run.r_function._ConsoleWarning.warning
)
scprep.run.r_function._ConsoleWarning.set_builtin()
assert (
rpy2.rinterface_lib.callbacks.consolewrite_warnerror is builtin_warning
)
class TestDyngen(unittest.TestCase):
@classmethod
def setUpClass(self):
scprep.run.dyngen.install(verbose=False)
def test_install_dyngen_lib(self):
scprep.run.dyngen.install(verbose=False)
fun = scprep.run.RFunction(
body="""
packages <- installed.packages()
'dyngen' %in% packages
"""
)
assert fun()
def test_install_dyngen_dependencies_None(self):
scprep.run.dyngen.install(verbose=False)
fun = scprep.run.RFunction(
body="""
if (!require("pacman", quietly=TRUE)) {
install.packages("pacman",
repos='http://cran.rstudio.com')
}
deps <- pacman::p_depends(dyngen)[c("Depends","Imports","LinkingTo")]
all(unname(unlist(deps)) %in% installed.packages()[, "Package"])
"""
)
assert fun()
def test_dyngen_backbone_not_in_list(self):
utils.assert_raises_message(
ValueError,
"Input not in default backbone list. "
"Choose backbone from get_backbones()",
scprep.run.DyngenSimulate,
backbone="not_a_backbone",
verbose=False,
)
def test_dyngen_default(self):
raise exceptions.SkipTestException
sim = scprep.run.DyngenSimulate(
backbone="bifurcating",
num_cells=50,
num_tfs=50,
num_targets=10,
num_hks=10,
verbose=False,
)
assert set(sim.keys()) == {"cell_info", "expression"}
assert sim["cell_info"].shape[0] > 0
assert sim["cell_info"].shape[0] <= 50
assert sim["expression"].shape[0] > 0
assert sim["expression"].shape[0] <= 50
assert sim["expression"].shape[1] == 70
def test_dyngen_force_cell_counts(self):
raise exceptions.SkipTestException
sim = scprep.run.DyngenSimulate(
backbone="bifurcating",
num_cells=50,
num_tfs=50,
num_targets=10,
num_hks=10,
verbose=False,
force_num_cells=True,
)
assert set(sim.keys()) == {"cell_info", "expression"}
assert sim["cell_info"].shape[0] == 50
assert sim["expression"].shape == (50, 70)
def test_dyngen_with_grn(self):
raise exceptions.SkipTestException
sim = scprep.run.DyngenSimulate(
backbone="bifurcating",
num_cells=50,
num_tfs=50,
num_targets=10,
num_hks=10,
compute_cellwise_grn=True,
verbose=False,
)
assert set(sim.keys()) == {
"cell_info",
"expression",
"bulk_grn",
"cellwise_grn",
}
assert sim["cell_info"].shape[0] > 0
assert sim["cell_info"].shape[0] <= 50
assert sim["expression"].shape[0] > 0
assert sim["expression"].shape[0] <= 50
assert sim["expression"].shape[1] == 70
assert sim["bulk_grn"].shape[0] > 0
assert sim["cellwise_grn"].shape[0] > 0
def test_dyngen_with_rna_velocity(self):
raise exceptions.SkipTestException
sim = scprep.run.DyngenSimulate(
backbone="bifurcating",
num_cells=50,
num_tfs=50,
num_targets=10,
num_hks=10,
compute_rna_velocity=True,
verbose=False,
)
assert set(sim.keys()) == {"cell_info", "expression", "rna_velocity"}
assert sim["cell_info"].shape[0] > 0
assert sim["cell_info"].shape[0] <= 50
assert sim["expression"].shape[0] > 0
assert sim["expression"].shape[0] <= 50
assert sim["expression"].shape[1] == 70
assert sim["rna_velocity"].shape[0] > 0
assert sim["rna_velocity"].shape[0] <= 50
assert sim["rna_velocity"].shape[1] == 70
class TestSlingshot(unittest.TestCase):
@classmethod
def setUpClass(self):
scprep.run.slingshot.install(verbose=False)
self.X = data.load_10X()
self.X_pca = scprep.reduce.pca(self.X)
self.clusters = sklearn.cluster.KMeans(6).fit_predict(self.X_pca)
def test_slingshot(self):
raise exceptions.SkipTestException
slingshot = scprep.run.Slingshot(
self.X_pca[:, :2], self.clusters, verbose=False
)
pseudotime, branch, curves = (
slingshot["pseudotime"],
slingshot["branch"],
slingshot["curves"],
)
assert pseudotime.shape[0] == self.X_pca.shape[0]
assert pseudotime.shape[1] == curves.shape[0]
assert branch.shape[0] == self.X_pca.shape[0]
current_pseudotime = -1
for i in np.unique(branch):
branch_membership = np.isnan(pseudotime[branch == i])
assert np.all(branch_membership == branch_membership[0])
new_pseudotime = np.nanmean(pseudotime[branch == i])
assert new_pseudotime > current_pseudotime
current_pseudotime = new_pseudotime
assert curves.shape[1] == self.X_pca.shape[0]
assert curves.shape[2] == 2
assert np.all(np.any(~np.isnan(pseudotime), axis=1))
def test_slingshot_pandas(self):
raise exceptions.SkipTestException
slingshot = scprep.run.Slingshot(
pd.DataFrame(self.X_pca[:, :2], index=self.X.index),
self.clusters,
verbose=False,
)
pseudotime, branch, curves = (
slingshot["pseudotime"],
slingshot["branch"],
slingshot["curves"],
)
assert np.all(pseudotime.index == self.X.index)
assert np.all(branch.index == self.X.index)
assert branch.name == "branch"
assert pseudotime.shape[0] == self.X_pca.shape[0]
assert pseudotime.shape[1] == curves.shape[0]
assert branch.shape[0] == self.X_pca.shape[0]
current_pseudotime = -1
for i in np.unique(branch):
branch_membership = np.isnan(pseudotime.loc[branch == i])
assert np.all(branch_membership == branch_membership.iloc[0])
new_pseudotime = np.nanmean(np.nanmean(pseudotime.loc[branch == i]))
assert new_pseudotime > current_pseudotime
current_pseudotime = new_pseudotime
assert curves.shape[1] == self.X_pca.shape[0]
assert curves.shape[2] == 2
assert np.all(np.any(~np.isnan(pseudotime), axis=1))
def test_slingshot_distance(self):
utils.assert_raises_message(
NotImplementedError,
"distance argument not currently implemented",
scprep.run.Slingshot,
self.X_pca,
self.clusters,
distance=lambda X, Y: np.sum(X - Y),
)
def test_slingshot_optional_args(self):
raise exceptions.SkipTestException
slingshot = scprep.run.Slingshot(
self.X_pca[:, :2],
self.clusters,
start_cluster=4,
omega=0.1,
smoother="loess",
max_iter=0,
verbose=False,
)
pseudotime, branch, curves = (
slingshot["pseudotime"],
slingshot["branch"],
slingshot["curves"],
)
assert pseudotime.shape[0] == self.X_pca.shape[0]
assert pseudotime.shape[1] == curves.shape[0]
assert branch.shape[0] == self.X_pca.shape[0]
current_pseudotime = -1
for i in np.unique(branch):
branch_membership = np.isnan(pseudotime[branch == i])
assert np.all(branch_membership == branch_membership[0])
if np.all(np.isnan(pseudotime[branch == i])):
assert i == -1
else:
new_pseudotime = np.nanmean(pseudotime[branch == i])
assert new_pseudotime > current_pseudotime
current_pseudotime = new_pseudotime
assert curves.shape[1] == self.X_pca.shape[0]
assert curves.shape[2] == 2
slingshot = scprep.run.Slingshot(
self.X_pca[:, :2], self.clusters, end_cluster=0, verbose=False
)
pseudotime, branch, curves = (
slingshot["pseudotime"],
slingshot["branch"],
slingshot["curves"],
)
assert pseudotime.shape[0] == self.X_pca.shape[0]
assert pseudotime.shape[1] == curves.shape[0]
assert branch.shape[0] == self.X_pca.shape[0]
current_pseudotime = -1
for i in np.unique(branch):
branch_membership = np.isnan(pseudotime[branch == i])
assert np.all(branch_membership == branch_membership[0])
new_pseudotime = np.nanmean(pseudotime[branch == i])
assert new_pseudotime > current_pseudotime
current_pseudotime = new_pseudotime
assert curves.shape[1] == self.X_pca.shape[0]
assert curves.shape[2] == 2
assert np.all(np.any(~np.isnan(pseudotime), axis=1))
def test_slingshot_errors(self):
raise exceptions.SkipTestException
utils.assert_warns_message(
UserWarning,
"Expected data to be low-dimensional. " "Got data.shape[1] = 4",
scprep.run.Slingshot,
self.X_pca[:, :4],
self.clusters,
verbose=False,
)
utils.assert_raises_message(
ValueError,
"Expected len(cluster_labels) ({}) to equal "
"data.shape[0] ({})".format(self.X.shape[0] // 2, self.X.shape[0]),
scprep.run.Slingshot,
self.X_pca[:, :2],
self.clusters[: self.X.shape[0] // 2],
verbose=False,
)
def test_conversion_list():
x = scprep.run.conversion.rpy2py(ro.r("list(1,2,3)"))
assert isinstance(x, np.ndarray)
assert len(x) == 3
assert np.all(x == np.array([[1], [2], [3]]))
def test_conversion_dict():
x = scprep.run.conversion.rpy2py(ro.r("list(a=1,b=2,c=3)"))
assert isinstance(x, dict)
assert len(x) == 3
assert np.all(np.array(list(x.keys())) == np.array(["a", "b", "c"]))
assert np.all(np.array(list(x.values())) == np.array([[1], [2], [3]]))
def test_conversion_array():
x = scprep.run.conversion.rpy2py(ro.r("matrix(c(1,2,3,4,5,6), nrow=2, ncol=3)"))
assert isinstance(x, np.ndarray)
assert x.shape == (2, 3)
assert np.all(x == np.array([[1, 3, 5], [2, 4, 6]]))
def test_conversion_spmatrix():
ro.r("library(Matrix)")
x = scprep.run.conversion.rpy2py(
ro.r("as(matrix(c(1,2,3,4,5,6), nrow=2, ncol=3), 'CsparseMatrix')")
)
assert isinstance(x, scipy.sparse.csc_matrix)
assert x.shape == (2, 3)
assert np.all(x.toarray() == np.array([[1, 3, 5], [2, 4, 6]]))
def test_conversion_dataframe():
x = scprep.run.conversion.rpy2py(
ro.r("data.frame(x=c(1,2,3), y=c('a', 'b', 'c'))")
)
assert isinstance(x, pd.DataFrame)
assert x.shape == (3, 2)
np.testing.assert_array_equal(x["x"], np.array([1, 2, 3]))
np.testing.assert_array_equal(x["y"], np.array(["a", "b", "c"]))
def test_conversion_sce():
scprep.run.install_bioconductor("SingleCellExperiment")
ro.r("library(SingleCellExperiment)")
ro.r("X <- matrix(1:6, nrow=2, ncol=3)")
ro.r("counts <- X * 2")
ro.r("sce <- SingleCellExperiment(assays=list(X=X, counts=counts))")
ro.r("rowData(sce)$rows <- c('a', 'b')")
ro.r("colData(sce)$cols <- c(1, 2, 3)")
x = scprep.run.conversion.rpy2py(ro.r("sce"))
assert isinstance(x, anndata.AnnData)
assert x.layers["counts"].shape == (3, 2)
np.testing.assert_array_equal(x.obs["cols"], np.array([1, 2, 3]))
np.testing.assert_array_equal(x.var["rows"], np.array(["a", "b"]))
def test_conversion_anndata_missing():
with mock.patch.dict(sys.modules, {"anndata2ri": None, "anndata": None}):
x = scprep.run.conversion.rpy2py(ro.r("NULL"))
assert x is None
def test_r_traceback():
test_fun = scprep.run.RFunction(
setup='a <- function() stop("test"); b <- function() a()',
body="b()",
verbose=False,
)
re_compile = re.compile
def compile_with_dotall(pattern, flags=0):
return re_compile(pattern, flags=re.DOTALL)
re.compile = compile_with_dotall
try:
utils.assert_raises_message(
rpy2.rinterface_lib.embedded.RRuntimeError,
r"Error in a\(\) : test.*test.*Backtrace:.*1\..*(function|`<fn>`\(\))"
r".*2\..*global[ \:]+b\(\).*3\..*global[ \:]+a\(\)",
test_fun,
regex=True,
)
finally:
re.compile = re_compile
| StarcoderdataPython |
1722421 | <filename>test/unit_tests/protocol/mpwp_protocol_test.py
# To change this license header, choose License Headers in Project Properties.
# To change this template file, choose Tools | Templates
# and open the template in the editor.
import unittest
from server_common import mpwp_protocol
class Mpwp_Protocol_TestCase(unittest.TestCase):
#def setUp(self):
# self.foo = Mpwp_protocol_()
#
#def tearDown(self):
# self.foo.dispose()
# self.foo = None
def test_get_uuid(self):
ID = mpwp_protocol.get_uuid()
self.assertEqual(bytes, type(ID))
self.assertEqual(36, len(ID))
def test_get_mpwp_packet(self):
packet = mpwp_protocol.get_mpwp_packet(mpwp_protocol.STATUS_DATA, b'0', b'0', b'12345', b'0')
self.assertEqual([b'mpwp_v1.0', b'100', b'0', b'0', b'12345', b'0'], packet)
def test_get_mpwp_status_packet(self):
packet = mpwp_protocol.get_mpwp_status_packet(b'100', b'0', b'1')
self.assertEqual([b'mpwp_v1.0', b'100', b'0', b'1'], packet)
def test_get_mpwp_content_packet(self):
packet = mpwp_protocol.get_mpwp_content_packet(b'0', b'1', b'12', b'0')
self.assertEqual([b'mpwp_v1.0', b'100', b'0', b'1', b'12', b'0'], packet)
def test_get_log_packet(self):
packet = mpwp_protocol.get_log_packet(b'0', b'241', b'1', b'Critical Error!')
self.assertEqual([b'mpwp_v1.0', b'300', b'3', b'0', b'241', b'1', b'Critical Error!'], packet)
def test_get_msg_content(self):
msg = [b'mpwp_v1.0', b'100', b'0', b'0', b'12345', b'0', b'', b'', b'']
self.assertEqual([b'', b'', b''], mpwp_protocol.msg_content(msg))
if __name__ == '__main__':
unittest.main()
| StarcoderdataPython |
47389 | from app import app
import unittest
import base64
import json
class TestLogin(unittest.TestCase):
def setUp(self):
app.config['TESTING'] = True
self.app = app.test_client()
self.user_name = "<EMAIL>"
self.password = "<PASSWORD>"
self.valid_credentials = base64.b64encode(b'<EMAIL>:123@Abcd').decode('utf-8')
self.invalid_password = base64.b64encode(b'<EMAIL>:<PASSWORD>').decode('utf-8')
self.invalid_username = base64.b64encode(b'<EMAIL>:<PASSWORD>').decode('utf-8')
def test_user_create_recipe_invalid_credentials(self):
datajson=json.dumps({
"cook_time_in_min": 15,
"prep_time_in_min": 15,
"title": "Creamy Cajun Chicken Pasta",
"cuisine": "Italian",
"servings": 2,
"ingredients": [
"4 ounces linguine pasta",
"2 boneless, skinless chicken breast halves, sliced into thin strips",
"2 teaspoons Cajun seasoning",
"2 tablespoons butter"
],
"steps": [
{
"position": 1,
"items": "some text here"
}
],
"nutrition_information": {
"calories": 100,
"cholesterol_in_mg": 4,
"sodium_in_mg": 100,
"carbohydrates_in_grams": 53.7,
"protein_in_grams": 53.7
}
})
response = self.app.post(
'/v1/recipe/', data=datajson, content_type='application/json',
headers={'Authorization': 'Basic ' + self.invalid_password})
self.assertEqual(response.status_code, 401)
def test_user_recipe_get(self):
response = self.app.get(
'/v1/recipe/f5e02bd4-55da-4243-b7fb-980b230a1138')
self.assertEqual(response.status_code, 404)
# def test_user_recipe_delete(self):
# response = self.app.delete(
# '/v1/recipe/f5e02bd4-55da-4243-b7fb-980b230a1138', headers={'Authorization': 'Basic ' + self.valid_credentials})
# self.assertEqual(response.status_code, 403)
| StarcoderdataPython |
12835294 | <gh_stars>10-100
import sys
try:
import pefile
except ImportError:
print 'You have to install pefile (pip install pefile)'
sys.exit()
def main():
if len(sys.argv) < 2:
print 'usage: dumpsec.py program.exe'
return
pe = pefile.PE(sys.argv[1], fast_load=True)
data = pe.get_memory_mapped_image()
i = 0
for section in pe.sections:
print (section.Name, hex(section.VirtualAddress), hex(section.Misc_VirtualSize), section.SizeOfRawData )
secdata = data[section.VirtualAddress:section.VirtualAddress+section.Misc_VirtualSize]
f = open('section%02d%s.bin' % (i, section.Name.replace('\0', '').replace('\x0a', '')), 'wb')
f.write(secdata)
f.close()
i += 1
if __name__ == "__main__":
main() | StarcoderdataPython |
371462 | # --- coding:utf-8 ---
# author: Cyberfish time:2021/7/23
from ner_code.predict import NerPredict
from intent_code.predict import IntentPredict
from biclass_code.predict import BiPredict
from database_code.database_main import DataBase
from agent import Agent
from collections import defaultdict
intent_predict = IntentPredict()
bi_predict = BiPredict()
ner_predict = NerPredict()
database = DataBase()
agent = Agent(intent_predict, bi_predict, ner_predict, database)
while True:
content = input('请输入内容: ')
if content == 'clear':
agent.DST = defaultdict(set)
print('已重置用户对话状态')
else:
agent.response(content)
# agent.response('好的,麻烦你帮我查一下桔子水晶酒店(北京安贞店)电话呗。')
# agent.response('营业时间是什么时间?')
# agent.response('关于酒店没有其他问题了。我想去八达岭长城游玩,麻烦你告诉我这个景点的门票和电话。')
# agent.response('我如果坐出租车,从北京京泰龙国际大酒店到北京杜莎夫人蜡像馆,能查到车型和车牌信息吗?')
# agent.response('好的,没有其他问题了,谢谢。')
# agent.response('收到,非常感谢!')
| StarcoderdataPython |
6695249 | <filename>src/run.py
from Transactioner import Transactioner
class TestDbInitializer:
#def __init__(self):
# self.__t = Transactioner('sqlite:///../res/test.db')
# self.__t.Execute()
#@t.Execute
#@transact('sqlite:///../res/test.db')
def initialize(self, db, *args, **kwargs):
print('TestDbInitializer.initialize()')
print(self)
print(db)
print(args)
print(kwargs)
db.query('select 1')
print(dir(db))
print(db.tables)
if 0 == len(db.tables): db.query("create table MyDb (Id integer, Name text);")
db.query("select * from MyDb;")
_id = db.query('select count(*) as count from MyDb').next()['count']
print(_id)
db.query("insert into MyDb (Id,Name) values ({},'{}')".format(_id, 'Run'))
print(db['MyDb'].find())
initer = TestDbInitializer()
t = Transactioner('sqlite:///../res/test.db')
t.Execute(initer)
#TestDbInitializer().initialize()
"""
@t.Execute
#@transact('sqlite:///../res/test.db')
def initialize_db(db):
db.query('select 1')
print(db.tables)
if 0 == len(db.tables): db.query("create table MyDb (Id integer, Name text);")
db.query("select * from MyDb;")
_id = db.query('select count(*) as count from MyDb').next()['count']
print(_id)
db.query("insert into MyDb (Id,Name) values ({},'{}')".format(_id, 'Run'))
print(db['MyDb'].find())
# 実行! 引数db不要!
initialize_db()
"""
| StarcoderdataPython |
1790828 | <reponame>mariogeiger/se3cnn
# pylint: disable=C,R,E1101
import torch
import os
import numpy as np
from scipy.stats import special_ortho_group
class Cath(torch.utils.data.Dataset):
url = 'https://github.com/deepfold/cath_datasets/blob/master/{}?raw=true'
def __init__(self, dataset, split, download=False,
discretization_bins=50,
discretization_bin_size=2.0,
use_density=True,
randomize_orientation=False):
"""
:param dataset: String specifying name of cath set
:param split: Which split to read in (10 in total)
:param download: Whether to retrieve dataset automatically
:param discretization_bins: Number of bins used in each dimension
:param discretization_bin_size: Size of a bin in each dimension (in Angstrom)
:param use_density: Whether to populate grid with densities rather than a one-hot encoding
:param randomize_orientation: Whether to resample the orientation of each input data point whenever it is requested (i.e. data augmentation)
"""
dirname, dataset = os.path.split(dataset)
self.root = os.path.expanduser(dirname if dirname != "" else ".")
if download:
self.download(dataset)
self.discretization_bins = discretization_bins
self.discretization_bin_size = discretization_bin_size
self.use_density = use_density
self.randomize_orientation = randomize_orientation
if not self._check_exists(dataset):
raise RuntimeError('Dataset not found.' +
' You can use download=True to download it')
data = np.load(os.path.join(self.root, dataset))
split_start_indices = data['split_start_indices']
split_range = list(zip(split_start_indices[0:], list(split_start_indices[1:])+[None]))[split]
self.positions = data['positions'][split_range[0]:split_range[1]]
self.atom_types = data['atom_types'][split_range[0]:split_range[1]]
self.n_atoms = data['n_atoms'][split_range[0]:split_range[1]]
self.labels = [tuple(v) if len(v) > 1 else v[0] for v in data['labels'][split_range[0]:split_range[1]]]
self.atom_type_set = np.unique(self.atom_types[0][:self.n_atoms[0]])
self.n_atom_types = len(self.atom_type_set)
self.atom_type_map = dict(zip(self.atom_type_set, range(len(self.atom_type_set))))
self.label_set = sorted(list(set(self.labels)))
self.label_map = dict(zip(self.label_set, range(len(self.label_set))))
def __getitem__(self, index):
n_atoms = self.n_atoms[index]
positions = self.positions[index][:n_atoms]
atom_types = self.atom_types[index][:n_atoms]
label = self.label_map[self.labels[index]]
p = self.discretization_bin_size
n = self.discretization_bins
if torch.cuda.is_available():
fields = torch.cuda.FloatTensor(*(self.n_atom_types,)+(n, n, n)).fill_(0)
else:
fields = torch.zeros(*(self.n_atom_types,)+(n, n, n))
if self.randomize_orientation:
random_rotation = special_ortho_group.rvs(3)
positions = np.dot(random_rotation, positions.T).T
if self.use_density:
## Numpy version ##
# a = np.linspace(start=-n / 2 * p + p / 2, stop=n / 2 * p - p / 2, num=n, endpoint=True)
# xx, yy, zz = np.meshgrid(a, a, a, indexing="ij")
# fields_np = np.zeros((self.n_atom_types, n, n, n), dtype=np.float32)
# for i, atom_type in enumerate(self.atom_type_set):
# # Extract positions with current atom type
# pos = positions[atom_types == atom_type]
# # Create grid x atom_pos grid
# posx_posx, xx_xx = np.meshgrid(pos[:,0], xx.reshape(-1))
# posy_posy, yy_yy = np.meshgrid(pos[:,1], yy.reshape(-1))
# posz_posz, zz_zz = np.meshgrid(pos[:,2], zz.reshape(-1))
# # Calculate density
# density = np.exp(-((xx_xx - posx_posx)**2 + (yy_yy - posy_posy)**2 + (zz_zz - posz_posz)**2) / (2 * (p)**2))
# # Normalize so each atom density sums to one
# density /= np.sum(density, axis=0)
# # Sum densities and reshape to original shape
# fields_np[i] = np.sum(density, axis=1).reshape(xx.shape)
## Pytorch version ##
# Create linearly spaced grid
a = torch.linspace(start=-n / 2 * p + p / 2, end=n / 2 * p - p / 2, steps=n)
if torch.cuda.is_available():
a = a.cuda()
# Pytorch does not suppoert meshgrid - do the repeats manually
xx = a.view(-1, 1, 1).repeat(1, len(a), len(a))
yy = a.view(1, -1, 1).repeat(len(a), 1, len(a))
zz = a.view(1, 1, -1).repeat(len(a), len(a), 1)
for i, atom_type in enumerate(self.atom_type_set):
# Extract positions with current atom type
pos = positions[atom_types == atom_type]
# Transfer position vector to gpu
pos = torch.FloatTensor(pos)
if torch.cuda.is_available():
pos = pos.cuda()
# Pytorch does not support meshgrid - do the repeats manually
# Numpy equivalent:
# posx_posx, xx_xx = np.meshgrid(pos[:,0], xx.reshape(-1))
# posy_posy, yy_yy = np.meshgrid(pos[:,1], yy.reshape(-1))
# posz_posz, zz_zz = np.meshgrid(pos[:,2], zz.reshape(-1))
xx_xx = xx.view(-1, 1).repeat(1, len(pos))
posx_posx = pos[:, 0].contiguous().view(1, -1).repeat(len(xx.view(-1)), 1)
yy_yy = yy.view(-1, 1).repeat(1, len(pos))
posy_posy = pos[:, 1].contiguous().view(1, -1).repeat(len(yy.view(-1)), 1)
zz_zz = zz.view(-1, 1).repeat(1, len(pos))
posz_posz = pos[:, 2].contiguous().view(1, -1).repeat(len(zz.view(-1)), 1)
# Calculate density
sigma = 0.5*p
density = torch.exp(-((xx_xx - posx_posx)**2 + (yy_yy - posy_posy)**2 + (zz_zz - posz_posz)**2) / (2 * (sigma)**2))
# Normalize so each atom density sums to one
density /= torch.sum(density, dim=0)
# Sum densities and reshape to original shape
fields[i] = torch.sum(density, dim=1).view(xx.shape)
else:
for i, atom_type in enumerate(self.atom_type_set):
# Extract positions with current atom type
pos = positions[atom_types == atom_type]
# Lookup indices and move to GPU
indices = torch.LongTensor(np.ravel_multi_index(np.digitize(pos, a+p/2).T, dims=(n, n, n)))
if torch.cuda.is_available():
indices = indices.cuda()
# Set values
fields[i].view(-1)[indices] = 1
return fields, label
def __len__(self):
return len(self.labels)
def _check_exists(self, dataset):
return os.path.exists(os.path.join(self.root, dataset))
def download(self, dataset):
from six.moves import urllib
if self._check_exists(dataset):
return
# download files
try:
os.makedirs(self.root)
except OSError as e:
if e.errno == os.errno.EEXIST:
pass
else:
raise
print('Downloading ' + self.url.format(dataset))
data = urllib.request.urlopen(self.url.format(dataset))
file_path = os.path.join(self.root, dataset)
with open(file_path, 'wb') as f:
f.write(data.read())
print('Done!')
| StarcoderdataPython |
9738409 | <filename>tests/unit/tst_16.py
from __future__ import division
import iotbx.pdb
import os
from scitbx.array_family import flex
from libtbx import easy_pickle
import time
import run_tests
from libtbx.test_utils import approx_equal
import libtbx.load_env
qrefine = libtbx.env.find_in_repositories("qrefine")
qr_unit_tests_data = os.path.join(qrefine,"tests","unit","data_files")
def run(prefix):
"""
Exercise gradients match:
- small vs large box:
-- using clustering vs not using clustering.
--- fast_interaction True / False
Non-P1 case (P212121)
"""
for fast_interaction in [True, False]:
data_file_prefix = "2olx"
common_args = ["restraints=cctbx", "mode=opt", "parallel.nproc=1"]
r = run_tests.run_cmd(prefix,
args = common_args+["clustering=true", "fast_interaction=%s"%str(fast_interaction),
"dump_gradients=cluster_true.pkl"],
pdb_name = os.path.join(qr_unit_tests_data,"%s.pdb"%data_file_prefix),
mtz_name = os.path.join(qr_unit_tests_data,"%s.mtz"%data_file_prefix))
r = run_tests.run_cmd(prefix,
args = common_args+["clustering=false",
"dump_gradients=cluster_false.pkl"],
pdb_name = os.path.join(qr_unit_tests_data,"%s.pdb"%data_file_prefix),
mtz_name = os.path.join(qr_unit_tests_data,"%s.mtz"%data_file_prefix))
#
g1 = flex.vec3_double(easy_pickle.load("cluster_false.pkl"))
g2 = flex.vec3_double(easy_pickle.load("cluster_true.pkl"))
assert g1.size() == g2.size()
diff = g1-g2
if(0):
for i, diff_i in enumerate(diff):
if(abs(max(diff_i)) > 1.e-6):
print i, diff_i, g1[i], g2[i]
print
assert approx_equal(diff.max(), [0,0,0])
if(__name__ == '__main__'):
prefix = os.path.basename(__file__).replace(".py","")
run_tests.runner(function=run, prefix=prefix, disable=False)
| StarcoderdataPython |
9679982 | <filename>Phidgets22.indigoPlugin/Contents/Server Plugin/plugin.py<gh_stars>0
# -*- coding: utf-8 -*-
import indigo
import logging
import traceback
import json
# Phidget libraries
from Phidget22.Devices.Log import Log
from Phidget22.Net import Net, PhidgetServerType
from Phidget22.Phidget import Phidget
from Phidget22.PhidgetException import PhidgetException
from PhidgetInfo import PhidgetInfo
# Classes to describe network & channel search info
from phidget import ChannelInfo, NetInfo
# Our wrappers around phidget objects
from voltageinput import VoltageInputPhidget
from voltageratioinput import VoltageRatioInputPhidget
from digitaloutput import DigitalOutputPhidget
from temperaturesensor import TemperatureSensorPhidget
from digitalinput import DigitalInputPhidget
from frequencycounter import FrequencyCounterPhidget
from humiditysensor import HumiditySensorPhidget
import phidget_util
class Plugin(indigo.PluginBase):
def __init__(self, pluginId, pluginDisplayName, pluginVersion, pluginPrefs):
super(Plugin, self).__init__(pluginId, pluginDisplayName, pluginVersion, pluginPrefs)
self.plugin_file_handler.setLevel(logging.INFO) # Master Logging Level for Plugin Log file
self.indigo_log_handler.setLevel(logging.INFO) # Logging level for Indigo Event Log
self.activePhidgets = {} # Map between Indigio ID and current instance of phidget
self.phidgetInfo = PhidgetInfo(phidgetInfoFile='../Resources/phidgets.json')
self.logger.setLevel(logging.DEBUG)
def startup(self):
# Setup logging in the phidgets library
if self.pluginPrefs.get('phidgetApiLogging', False):
self.phidgetApiLogLevel = int(self.pluginPrefs['phidgetApiLogLevel'])
self.phidgetApiLogfile = self.pluginPrefs['phidgetApiLogfile']
Log.enable(self.phidgetApiLogLevel, self.phidgetApiLogfile)
else:
Log.disable()
self.phidgetApiLogLevel = 0
loglevel = int(self.pluginPrefs.get('phidgetPluginLoggingLevel', '0'))
if loglevel:
self.plugin_file_handler.setLevel(loglevel) # Master Logging Level for Plugin Log file
self.indigo_log_handler.setLevel(loglevel) # Logging level for Indigo Event Log
self.logger.debug("Setting log level to %s" % logging.getLevelName(loglevel))
self.logger.debug("Using %s" % Phidget.getLibraryVersion())
# Should this be configurable?
Net.enableServerDiscovery(PhidgetServerType.PHIDGETSERVER_DEVICEREMOTE)
#
# Methods for working with interactive Indigo UI
#
def validatePrefsConfigUi(self, valuesDict):
# TODO
return True
def validateDeviceConfigUi(self, valuesDict, typeId, devId):
# TODO: Perform some type of verification on the fields?
# Look to see if there ia a label for the serial number
addrIndex = str(valuesDict['serialNumber'])
varName = "p22_" + addrIndex
if varName in indigo.variables:
phLabel = str(indigo.variables[varName].value)
if phLabel != "": # If we got a non-null value, use it
addrIndex = phLabel
# Set an address here
# TODO: dynamic address updating would require replacing the device and using didDeviceCommPropertyChange to prevent respawn
if bool(valuesDict['isVintHub']) and not bool(valuesDict['isVintDevice']):
valuesDict['address'] = addrIndex + "|p" + valuesDict['hubPort']
elif not bool(valuesDict['isVintHub']) and not bool(valuesDict['isVintDevice']): # an interfaceKit
if typeId == 'digitalInput':
valuesDict['address'] = addrIndex + "|di-" + valuesDict['channel']
elif typeId == 'digitalOutput':
valuesDict['address'] = addrIndex + "|do-" + valuesDict['channel']
elif typeId == 'voltageRatioInput':
valuesDict['address'] = addrIndex + "|vr-" + valuesDict['channel']
elif typeId == 'voltageInput':
valuesDict['address'] = addrIndex + "|av-" + valuesDict['channel']
else:
valuesDict['address'] = addrIndex + "|p-" + valuesDict['channel']
elif 'hubPort' in valuesDict and len(valuesDict['hubPort']) > 0 and 'channel' in valuesDict and len(valuesDict['channel']) > 0:
valuesDict[u'address'] = addrIndex + "|p" + valuesDict['hubPort'] + "-c" + valuesDict['channel']
elif 'hubPort' in valuesDict and len(valuesDict['hubPort']):
valuesDict[u'address'] = addrIndex + "|p" + valuesDict['hubPort']
elif 'channel' in valuesDict and len(valuesDict['channel']) > 0:
valuesDict[u'address'] = addrIndex + "|c" + valuesDict['channel']
else:
valuesDict[u'address'] = addrIndex
return (True, valuesDict)
def getPhidgetTypeMenu(self, filter="", valuesDict=None, typeId="", targetId=0):
classes = filter.split(',')
return self.phidgetInfo.getPhidgetTypeMenu(classes)
#
# Interact with the phidgets
#
def getDeviceStateList(self, device):
if device.id in self.activePhidgets:
return self.activePhidgets[device.id].getDeviceStateList()
else:
indigo.List()
def getDeviceDisplayStateId(self, device):
if device.id in self.activePhidgets:
return self.activePhidgets[device.id].getDeviceDisplayStateId()
else:
return None
def actionControlDevice(self, action, device):
if device.id in self.activePhidgets:
return self.activePhidgets[device.id].actionControlDevice(action)
else:
raise Exception("Unexpected device: %s" % device.id)
def actionControlSensor(self, action, device):
if device.id in self.activePhidgets:
return self.activePhidgets[device.id].actionControlSensor(action)
else:
raise Exception("Unexpected device: %s" % device.id)
def deviceStartComm(self, device):
# Phidget device type (device.deviceTypeId) are defined in devices.xml
# TODO: Clean this up by refactoring into factory methods for each Phidget type
try:
# Common properties for _all_ phidgets
serialNumber = device.pluginProps.get("serialNumber", None)
serialNumber = int(serialNumber) if serialNumber else -1
channel = device.pluginProps.get("channel", None)
channel = int(channel) if channel else -1
# isHubPortDevice is true only when non-VINT devices are attached to a VINT hub
isVintHub = device.pluginProps.get("isVintHub", None)
isVintHub = bool(isVintHub) if isVintHub else 0
isVintDevice = device.pluginProps.get("isVintDevice", None)
isVintDevice = bool(isVintDevice) if isVintDevice else 0
isHubPortDevice = int(isVintHub and not isVintDevice)
hubPort = device.pluginProps.get("hubPort", -1)
hubPort = int(hubPort) if hubPort else -1
networkPhidgets = self.pluginPrefs.get("networkPhidgets", False)
enableServerDiscovery = self.pluginPrefs.get("enableServerDiscovery", False)
channelInfo = ChannelInfo(
serialNumber=serialNumber,
channel=channel,
isHubPortDevice=isHubPortDevice,
hubPort=hubPort,
netInfo=NetInfo(isRemote=networkPhidgets, serverDiscovery=enableServerDiscovery)
)
# Data interval is used by many types. See if it is set
dataInterval = device.pluginProps.get("dataInterval", None)
dataInterval = int(dataInterval) if dataInterval else None
decimalPlaces = int(device.pluginProps.get("decimalPlaces", 3)) # Sane default 3 decimal places?
if device.deviceTypeId == "voltageInput" or device.deviceTypeId == "voltageRatioInput":
# Custom formula fields
if device.pluginProps.get("useCustomFormula", False):
customState = device.pluginProps.get("customState", None)
customFormula = device.pluginProps.get("customFormula", None)
else:
customState = None
customFormula = None
# TODO: Use better default sensor types... this might error if not populated
if device.deviceTypeId == "voltageInput":
sensorType = int(device.pluginProps.get("voltageSensorType", 0))
voltageChangeTrigger = float(device.pluginProps.get("voltageChangeTrigger", 0))
sensorValueChangeTrigger = float(device.pluginProps.get("sensorValueChangeTrigger", 0))
newPhidget = VoltageInputPhidget(indigo_plugin=self, channelInfo=channelInfo, indigoDevice=device, decimalPlaces=decimalPlaces, logger=self.logger, sensorType=sensorType, dataInterval=dataInterval, voltageChangeTrigger=voltageChangeTrigger, sensorValueChangeTrigger=sensorValueChangeTrigger, customState=customState, customFormula=customFormula)
elif device.deviceTypeId == "voltageRatioInput":
voltageRatioChangeTrigger = float(device.pluginProps.get("voltageRatioChangeTrigger", 0))
sensorValueChangeTrigger = float(device.pluginProps.get("sensorValueChangeTrigger", 0))
sensorType = int(device.pluginProps.get("voltageRatioSensorType", 0))
newPhidget = VoltageRatioInputPhidget(indigo_plugin=self, channelInfo=channelInfo, indigoDevice=device, decimalPlaces=decimalPlaces, logger=self.logger, sensorType=sensorType, dataInterval=dataInterval, voltageRatioChangeTrigger=voltageRatioChangeTrigger, sensorValueChangeTrigger=sensorValueChangeTrigger, customState=customState, customFormula=customFormula)
elif device.deviceTypeId == "digitalOutput":
newPhidget = DigitalOutputPhidget(indigo_plugin=self, channelInfo=channelInfo, indigoDevice=device, logger=self.logger)
elif device.deviceTypeId == "digitalInput":
isAlarm = bool(device.pluginProps.get("isAlarm", False))
newPhidget = DigitalInputPhidget(indigo_plugin=self, channelInfo=channelInfo, indigoDevice=device, logger=self.logger, isAlarm=isAlarm)
elif device.deviceTypeId == "temperatureSensor":
temperatureChangeTrigger = float(device.pluginProps.get("temperatureChangeTrigger", 0))
displayTempUnit = device.pluginProps.get("displayTempUnit", "C")
if device.pluginProps.get("useThermoCouple", False):
thermocoupleType = int(device.pluginProps.get("thermocoupleType", None))
else:
thermocoupleType = None
newPhidget = TemperatureSensorPhidget(indigo_plugin=self, channelInfo=channelInfo, indigoDevice=device, logger=self.logger, decimalPlaces=decimalPlaces, displayTempUnit=displayTempUnit, thermocoupleType=thermocoupleType, dataInterval=dataInterval, temperatureChangeTrigger=temperatureChangeTrigger)
elif device.deviceTypeId == "frequencyCounter":
filterType = int(device.pluginProps.get("filterType", 0))
displayStateName = device.pluginProps.get("displayStateName", None)
frequencyCutoff = float(device.pluginProps.get("frequencyCutoff", 1))
isDAQ1400 = bool(device.pluginProps.get("isDAQ1400", False))
inputType = int(device.pluginProps.get("inputType", 0))
powerSupply = int(device.pluginProps.get("powerSupply", 0))
newPhidget = FrequencyCounterPhidget(indigo_plugin=self, channelInfo=channelInfo, indigoDevice=device, logger=self.logger, decimalPlaces=decimalPlaces, filterType=filterType, dataInterval=dataInterval, displayStateName=displayStateName, frequencyCutoff=frequencyCutoff, isDAQ1400=isDAQ1400, inputType=inputType, powerSupply=powerSupply)
elif device.deviceTypeId == "humiditySensor":
humidityChangeTrigger = float(device.pluginProps.get("humidityChangeTrigger", 0))
newPhidget = HumiditySensorPhidget(indigo_plugin=self, channelInfo=channelInfo, indigoDevice=device, logger=self.logger, decimalPlaces=decimalPlaces, humidityChangeTrigger=humidityChangeTrigger, dataInterval=dataInterval)
else:
raise Exception("Unexpected device type: %s" % device.deviceTypeId)
self.activePhidgets[device.id] = newPhidget
newPhidget.start()
device.stateListOrDisplayStateIdChanged()
except PhidgetException as e:
self.logger.error("%d: %s\n" % (e.code, e.details))
self.logger.error(traceback.format_exc())
except Exception as e:
self.logger.error(traceback.format_exc())
#
# Methods related to shutdown
#
def deviceStopComm(self, device):
myPhidget = self.activePhidgets.pop(device.id)
myPhidget.stop()
def shutdown(self):
Phidget.finalize(0)
def __del__(self):
indigo.PluginBase.__del__(self)
| StarcoderdataPython |
6691464 | <reponame>omiguelperez/python-restful-web-bdd<filename>app/application.py
# -*- coding: utf-8 -*-
from flask import Flask, request, jsonify, Response
app = Flask(__name__)
USERS = {}
GET = 'GET'
POST = 'POST'
DELETE = 'DELETE'
PUT = 'PUT'
@app.route('/user/list', methods=[GET])
def list_users():
if request.method == GET:
user_list = {}
for key, value in USERS.items():
user_list.update({'username': key, 'name': value.get('name')})
return jsonify(user_list)
@app.route('/user/<username>', methods=[GET])
def retrieve_user(username):
user_details = USERS.get(username)
if user_details:
return jsonify(user_details)
else:
return Response(status=404)
@app.route('/user/<username>', methods=[DELETE])
def delete_user(username):
if username in USERS:
del USERS[username]
return Response(status=200)
return Response(status=404)
@app.route('/user/<username>', methods=[PUT])
def update_user(username):
update_data = request.get_json()
if username in USERS:
USERS.update({username: update_data})
updated = USERS.get(username)
return jsonify(updated)
return Response(status=404)
@app.route('/user', methods=[POST])
def register_user():
if request.method == POST:
user_data = request.get_json()
USERS.update(user_data)
username, details = user_data.items()[0]
created = USERS.get(username)
return jsonify(created)
if __name__ == '__main__':
app.run()
| StarcoderdataPython |
6619814 | # ::: Tuple :::
# tuple is just like list but read only
# almost all the thing is judt like list.
# defining a tuple
marks = (454, 657, 587, 345, 893) # just like list, only '[]' to '()'
marks = 454, 657, 587, 345, 893 # python will take it as a tuple
marks = tuple("Helo World")
# unpacking
marks = [45, 63, 96]
first, second, third = marks
# swaping values using tuple
x = 4
y = 6
x, y = y, x # same to : x, y = (y, x)
# at first 'y, x' is taken as a tuple and then unpack it
# this also works same way when define more than one variable at a line
x, y = 8, 6 # take as a tuple and then unpack it
# ====================================================================================
# ====================================================================================
# ::: Set :::
# set is also like list with some difference
# set is a list of unique items
# defining a set
num = {6, 8, 9, 12, 36} # just like list, only '[]' to '{}'
marks = set([1, 6, 9, 6, 3, 3, 4, 7]) # set from other iterables (set removes the duplicate)
# Adding item
num.add(6) # unlike list, set do not have append/insert
# Modify / Remove item
num.remove(12)
# Unlike list, set do not have index
# So, we can not access item with their index, like num[2], this will not work
# but we can iterate over the set, loop through a set etc.
for i in num:
print(i)
has_12 = 12 in num # 12 is not in num, so it will return False
# Some mathematics
first = {3, 2, 4, 6, 7}
second = {1, 2, 4, 5}
union = first | second # retuns all items that is in first or second set : {1, 2, 3, 4, 5, 6, 7}
intersection = first & second # retuns items that is both in first and second set : {2, 4}
difference = first - second # retuns first set after removing items that is in second set : {3, 6, 7}
semetric_difference = first ^ second # retuns items that is either in first or second set : {1, 3, 5, 6, 7}
# unlike list, we can not use + or * operator in set
# first = {45, 85, 69} + {85, 32, 45} : this will through error
| StarcoderdataPython |
226974 | <reponame>CherBoon/Cloudtopus
from django.conf import settings
from django.contrib.auth.hashers import check_password
from django.contrib.auth.models import User
from Module_TeamManagement.models import Student, Faculty, Class, Course_Section, Course
#-----------------------------------------------------------------------------#
#--------------------------- Validate Function -------------------------------#
#-----------------------------------------------------------------------------#
def validate(username,password):
login_valid = (settings.ADMIN_LOGIN == username)
pwd_valid = (password == settings.ADMIN_PASSWORD)
if login_valid and pwd_valid:
try:
user = User.objects.get(username=username)
except:
# Create a new user. There's no need to set a password
# because only the password from settings.py is checked.
user = User(username=username)
user.is_staff = True
user.is_superuser = True
user.save()
return user
return None
#Verification of student login credentials in database
def studentVerification(requests):
studentEmail = requests.user.email
studentObj = Student.objects.get(email=studentEmail)
return
#Verification of student login credentials in database
def InstructorVerification(requests):
studentEmail = requests.user.email
studentObj = Faculty.objects.get(email=studentEmail)
return
def changePassword(oldPassword,newPassword,studObj):
raise Exception("Incomplete")
| StarcoderdataPython |
3354889 | #Example code
import YassaAlchemy #import library
db = YassaAlchemy.table(host="localhost",user="root",passwd="<PASSWORD>",database="yassadb")
#create instance of YassaAlchemy
#make sure a database is created and insert
#the neccesary credentials
class Users:
#make static class
#class for Users
def create(db):
db.connect()
#connect to db
db.mk_table("Users")#name the table
db.mk_column("Username","VARCHAR(500)")#column one(Username) and type of data(Varchar)
db.mk_column("Password","VARCHAR(500)")#column two(Password) and type of data(Varchar)
db.publish_db()#commit changes
Users.create(db) #create User's table | StarcoderdataPython |
5020424 | <reponame>godontop/python-work<gh_stars>0
# -*- coding: utf-8 -*-
class FunctionalList(object):
"""实现了内置类型list的功能,并丰富了一些其他方法:head,tail,init,last,
drop,take"""
def __init__(self, values=None):
if values is None:
self.values = []
else:
self.values = values
def __len__(self):
return len(self.values)
def __getitem__(self, key):
return self.values[key]
def __setitem__(self, key, value):
self.values[key] = value
def __delitem__(self, key):
del self.values[key]
def __iter__(self):
return iter(self.values)
def __reversed__(self):
return FunctionalList(reversed(self.values))
def append(self, value):
self.values.append(value)
def head(self):
# 获取第一个元素
return self.values[0]
def tail(self):
# 获取第一个元素之后的所有元素
return self.values[1:]
def init(self):
# 获取最后一个元素之前的所有元素
return self.values[:-1]
def last(self):
# 获取最后一个元素
return self.values[-1]
def drop(self, n):
# 获取所有元素,除了前N个
return self.values[n:]
def take(self, n):
# 获取前N个元素
return self.values[:n]
| StarcoderdataPython |
5089730 | <reponame>sannithibalaji/cloudlift<gh_stars>0
import functools
import boto3
import click
from botocore.exceptions import ClientError
from cloudlift.config import highlight_production
from cloudlift.deployment.configs import deduce_name
from cloudlift.deployment import EnvironmentCreator, editor
from cloudlift.config.logging import log_err
from cloudlift.deployment.service_creator import ServiceCreator
from cloudlift.deployment.service_information_fetcher import ServiceInformationFetcher
from cloudlift.deployment.service_updater import ServiceUpdater
from cloudlift.session import SessionCreator
from cloudlift.version import VERSION
from cloudlift.exceptions import UnrecoverableException
def _require_environment(func):
@click.option('--environment', '-e', prompt='environment',
help='environment')
@functools.wraps(func)
def wrapper(*args, **kwargs):
if kwargs['environment'] == 'production':
highlight_production()
return func(*args, **kwargs)
return wrapper
def _require_name(func):
@click.option('--name', help='Your service name, give the name of \
repo')
@functools.wraps(func)
def wrapper(*args, **kwargs):
if kwargs['name'] is None:
kwargs['name'] = deduce_name(None)
return func(*args, **kwargs)
return wrapper
class CommandWrapper(click.Group):
def __call__(self, *args, **kwargs):
try:
return self.main(*args, **kwargs)
except UnrecoverableException as e:
log_err(e.value)
exit(1)
@click.group(cls=CommandWrapper)
@click.version_option(version=VERSION, prog_name="cloudlift")
def cli():
"""
Cloudlift is built by Simpl developers to make it easier to launch \
dockerized services in AWS ECS.
"""
try:
boto3.client('cloudformation')
except ClientError:
log_err("Could not connect to AWS!")
log_err("Ensure AWS_ACCESS_KEY_ID, AWS_SECRET_ACCESS_KEY & \
AWS_DEFAULT_REGION env vars are set OR run 'aws configure'")
exit(1)
@cli.command(help="Create a new service. This can contain multiple \
ECS services")
@_require_environment
@_require_name
def create_service(name, environment):
ServiceCreator(name, environment).create()
@cli.command(help="Update existing service.")
@_require_environment
@_require_name
def update_service(name, environment):
ServiceCreator(name, environment).update()
@cli.command(help="Create a new environment")
@click.option('--environment', '-e', prompt='environment',
help='environment')
def create_environment(environment):
EnvironmentCreator(environment).run()
@cli.command(help="Update environment")
@_require_environment
@click.option('--update_ecs_agents',
is_flag=True,
help='Update ECS container agents')
def update_environment(environment, update_ecs_agents):
EnvironmentCreator(environment).run_update(update_ecs_agents)
@cli.command(help="Command used to create or update the configuration \
in parameter store")
@_require_name
@_require_environment
def edit_config(name, environment):
editor.edit_config(name, environment)
@cli.command()
@_require_environment
@_require_name
@click.option('--version', default=None,
help='local image version tag')
@click.option("--build-arg", type=(str, str), multiple=True, help="These args are passed to docker build command "
"as --build-args. Supports multiple.\
Please leave space between name and value" )
def deploy_service(name, environment, version, build_arg):
ServiceUpdater(name, environment, None, version, dict(build_arg)).run()
@cli.command()
@click.option('--local_tag', help='Commit sha for image to be uploaded')
@click.option('--additional_tags', default=[], multiple=True,
help='Additional tags for the image apart from commit SHA')
@_require_name
def upload_to_ecr(name, local_tag, additional_tags):
ServiceUpdater(name, '', '', local_tag).upload_image(additional_tags)
@cli.command(help="Get commit information of currently deployed code \
from commit hash")
@_require_environment
@_require_name
@click.option('--short', '-s', is_flag=True,
help='Pass this when you just need the version tag')
def get_version(name, environment, short):
ServiceInformationFetcher(name, environment).get_version(short)
@cli.command(help="Start SSH session in instance running a current \
service task")
@_require_environment
@_require_name
@click.option('--mfa', help='MFA code', prompt='MFA Code')
def start_session(name, environment, mfa):
SessionCreator(name, environment).start_session(mfa)
if __name__ == '__main__':
cli()
| StarcoderdataPython |
1822398 | <gh_stars>0
import pytest
import os
import sys
import json
import math
import torch
import torch.distributed as dist
import torch.nn.functional as F
from fmoe.functions import ensure_comm
from test_ddp import _ensure_initialized, _run_distributed
from test_numerical import _assert_numerical
from fmoe.fastermoe.schedule import _fmoe_general_global_forward as smart_fwd
from fmoe.layers import _fmoe_general_global_forward as naive_fwd
@pytest.mark.parametrize("n_process", [8])
@pytest.mark.parametrize("d_model", [1024])
@pytest.mark.parametrize("batch_size", [16])
@pytest.mark.parametrize("n_expert", [1])
@pytest.mark.parametrize("group_sz", [1, 2, 4])
def test_faster_schedule(n_process, d_model, batch_size, n_expert, group_sz):
_run_distributed('_test_faster_schedule',
n_process,
{
'd_model': d_model,
'batch_size': batch_size,
'n_expert': n_expert
},
script=__file__,
env=dict(
FMOE_FASTER_GROUP_SIZE=str(group_sz)
)
)
def _test_faster_schedule(d_model, batch_size, n_expert):
_ensure_initialized()
rank = dist.get_rank()
world_size = dist.get_world_size()
x1 = torch.rand(batch_size, d_model).cuda()
x1.requires_grad = True
x2 = x1.data.clone()
x2.requires_grad = True
topk_idx = torch.randint(0, world_size * n_expert, (batch_size, 2)).cuda()
m1 = torch.nn.Linear(d_model, d_model).cuda()
m2 = torch.nn.Linear(d_model, d_model).cuda()
with torch.no_grad():
m2.weight.copy_(m1.weight)
m2.bias.copy_(m1.bias)
def ef1(x, fec):
y = m1(x)
return y
def ef2(x, fec):
y = m2(x)
return y
ensure_comm(x1, None)
y1 = smart_fwd(x1, topk_idx, ef1, n_expert, world_size)
y1.sum().backward()
y2 = naive_fwd(x2, topk_idx, ef2, n_expert, world_size)
y2.sum().backward()
_assert_numerical(['out', 'grad_in', 'grad_bias', 'grad_weight'],
[y1, x1.grad, m1.bias.grad, m1.weight.grad],
[y2, x2.grad, m2.bias.grad, m2.weight.grad], rank)
if __name__ == '__main__':
if len(sys.argv) >= 3:
args = json.loads(sys.argv[2])
locals()[sys.argv[1]](**args)
else:
# test_faster_schedule(8, 16, 16, 1, 2)
_test_faster_schedule(4, 2, 1)
| StarcoderdataPython |
6473964 | <reponame>movingpictures83/SequenceLength
class SequenceLengthPlugin:
def input(self, filename):
self.fasta = open(filename, 'r')
def run(self):
pass
def output(self, filename):
lineno = 1
totallen = 0
numseq = 0
for line in self.fasta:
if (lineno % 2 == 0):
line = line.strip()
totallen += len(line)
numseq += 1
lineno += 1
outfile = open(filename, 'w')
outfile.write("AVERAGE LENGTH: "+str(float(totallen)/numseq)+"\n")
outfile.write("NUMBER OF SEQUENCES: "+str(numseq)+"\n")
| StarcoderdataPython |
6520280 | <filename>tests/test_blobstash_base.py
import os
import pytest
from blobstash.base.blobstore import Blob, BlobNotFoundError, BlobStoreClient
from blobstash.base.client import Client
from blobstash.base.kvstore import KVStoreClient
from blobstash.base.test_utils import BlobStash
def test_test_utils():
"""Ensure the BlobStash utils can spawn a server."""
b = BlobStash()
b.cleanup()
try:
b.run()
c = Client()
resp = c.request("GET", "/", raw=True)
assert resp.status_code == 404
finally:
b.shutdown()
b.cleanup()
def test_blobstore_client():
"""Ensure the BlobStash utils can spawn a server."""
b = BlobStash()
b.cleanup()
try:
b.run()
client = BlobStoreClient(api_key="123")
assert len(list(client)) == 0
assert len(list(client.iter())) == 0
# try a blob that does not exist
with pytest.raises(BlobNotFoundError):
client.get("0" * 64)
# (blake2b 256 bits for an empty string)
expected_empty_hash = (
"0e5751c026e543b2e8ab2eb06099daa1d1e5df47778f7787faab45cdf12fe3a8"
)
empty_blob = Blob.from_data(b"")
assert empty_blob.hash == expected_empty_hash
client.put(empty_blob)
fetched_empty_blob = client.get(expected_empty_hash)
assert empty_blob == fetched_empty_blob
blobs = [empty_blob]
for i in range(1000 - len(blobs)):
blob = Blob.from_data(os.urandom(1024 * 8))
client.put(blob)
blobs.append(blob)
def by_hash(blob):
return blob.hash
blobs = sorted(blobs, key=by_hash)
fetched_blobs = sorted([cblob for cblob in client], key=by_hash)
assert len(fetched_blobs) == len(blobs)
for i, blob_ref in enumerate(fetched_blobs):
assert blob_ref.hash == blobs[i].hash
blob = client.get(blob_ref.hash)
assert blob.data == blobs[i].data
finally:
b.shutdown()
b.cleanup()
def test_kvstore_client():
"""Ensure the BlobStash utils can spawn a server."""
b = BlobStash()
b.cleanup()
try:
b.run()
client = KVStoreClient(api_key="123")
KV_COUNT = 10
KV_VERSIONS_COUNT = 100
keys = {}
for x in range(KV_COUNT):
key = "k{}".format(x)
if key not in keys:
keys[key] = []
for y in range(KV_VERSIONS_COUNT):
val = "value.{}.{}".format(x, y)
kv = client.put("k{}".format(x), val, version=y + 1)
keys[key].append(kv)
for key in keys.keys():
kv = client.get(key)
assert kv == keys[key][-1]
versions = list(client.get_versions(key))
assert len(versions) == len(keys[key])
for i, kv in enumerate(versions):
assert kv == keys[key][KV_VERSIONS_COUNT - (1 + i)]
b.shutdown()
b.run(reindex=True)
for key in keys.keys():
kv = client.get(key)
assert kv == keys[key][-1]
versions = list(client.get_versions(key))
assert len(versions) == KV_VERSIONS_COUNT
for i, kv in enumerate(versions):
assert kv == keys[key][KV_VERSIONS_COUNT - (1 + i)]
rkeys = list(client.iter())
for kv in rkeys:
assert kv == keys[kv.key][-1]
finally:
print("done")
b.shutdown()
b.cleanup()
| StarcoderdataPython |
6505329 | <gh_stars>10-100
from django.conf import settings
from django.core.exceptions import ImproperlyConfigured
#At a minimum you will need username,
default_shib_attributes = {
"REMOTE_USER": (True, "username"),
}
SHIB_ATTRIBUTE_MAP = getattr(settings, 'SHIBBOLETH_ATTRIBUTE_MAP', default_shib_attributes)
#Set to true if you are testing and want to insert sample headers.
SHIB_MOCK_HEADERS = getattr(settings, 'SHIBBOLETH_MOCK_HEADERS', False)
LOGIN_URL = getattr(settings, 'LOGIN_URL', None)
if not LOGIN_URL:
raise ImproperlyConfigured("A LOGIN_URL is required. Specify in settings.py")
# This list of attributes will map to Django permission groups
GROUP_ATTRIBUTES = getattr(settings, 'SHIBBOLETH_GROUP_ATTRIBUTES', [])
# If a group attribute is actually a list of groups, define the
# delimiters used to split the list
GROUP_DELIMITERS = getattr(settings, 'SHIBBOLETH_GROUP_DELIMITERS', [';'])
#Optional logout parameters
#This should look like: https://sso.school.edu/idp/logout.jsp?return=%s
#The return url variable will be replaced in the LogoutView.
LOGOUT_URL = getattr(settings, 'SHIBBOLETH_LOGOUT_URL', None)
#LOGOUT_REDIRECT_URL specifies a default logout page that will always be used when
#users logout from Shibboleth.
LOGOUT_REDIRECT_URL = getattr(settings, 'SHIBBOLETH_LOGOUT_REDIRECT_URL', None)
| StarcoderdataPython |
11211981 | # coding=utf-8
# Copyright 2021 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for synthetic_protein_landscapes.utils."""
from absl.testing import absltest
from absl.testing import parameterized
import numpy as np
import utils
class UtilsTest(parameterized.TestCase):
@parameterized.named_parameters(
dict(
testcase_name='3 classes',
class_vector=[1, 0, 2],
num_classes=3,
expected=np.array([[0, 1, 0], [1, 0, 0], [0, 0, 1]])),
dict(
testcase_name='4 classes',
class_vector=[1, 3],
num_classes=4,
expected=np.array([[0, 1, 0, 0], [0, 0, 0, 1]])),
dict(
testcase_name='2d in [100, 110]',
class_vector=np.array([[1, 0, 0], [1, 1, 0]]),
num_classes=2,
expected=np.array([[[0, 1], [1, 0], [1, 0]], [[0, 1], [0, 1], [1,
0]]]),
),
)
def test_one_hot(self, class_vector, num_classes, expected):
np.testing.assert_allclose(
utils.onehot(class_vector, num_classes), expected)
@parameterized.named_parameters(
dict(
testcase_name='no skip',
seq_start=[1, 2, 3],
seq_end=[4, 5, 6],
crossover_idx=3,
expected=[1, 2, 3]),
dict(
testcase_name='full skip',
seq_start=[1, 2, 3],
seq_end=[4, 5, 6],
crossover_idx=0,
expected=[4, 5, 6]),
dict(
testcase_name='1',
seq_start=[1, 2, 3],
seq_end=[4, 5, 6],
crossover_idx=1,
expected=[1, 5, 6]),
)
def test_crossover_at_index(self, seq_start, seq_end, crossover_idx,
expected):
seq_start = np.array(seq_start)
seq_end = np.array(seq_end)
expected = np.array(expected)
recombined_seq = utils._crossover_at_index(seq_start, seq_end,
crossover_idx)
np.testing.assert_array_equal(recombined_seq, expected)
@parameterized.named_parameters(
dict(
testcase_name='recombination distribution',
seq_start=np.array([0, 0, 0, 0]),
seq_end=np.array([1, 1, 1, 1]),
seed=0,
),)
def test_recombine_seqs_dist(self, seq_start, seq_end, seed):
random_state = np.random.RandomState(seed)
num_replicates = 1000
recombined_seqs = []
for _ in range(num_replicates):
recombined_seqs.append(
utils.recombine_seqs(seq_start, seq_end, random_state))
recombined_seqs = np.vstack(recombined_seqs)
# should be ~50% 1s in first column
num_1s_in_first_position = recombined_seqs[:, 0].sum()
tolerance = 50
self.assertIn(
num_1s_in_first_position,
range(num_replicates // 2 - tolerance, num_replicates // 2 + tolerance))
# should be ~uniform 1s across columns
num_1s_in_each_position = recombined_seqs.sum(axis=0)
freq_1s_in_each_position = num_1s_in_each_position / recombined_seqs.shape[0]
dist_1s = freq_1s_in_each_position / freq_1s_in_each_position.sum()
uniform_dist = np.array([0.25, 0.25, 0.25, 0.25])
np.testing.assert_allclose(dist_1s, uniform_dist, atol=0.02)
@parameterized.named_parameters(
dict(testcase_name='3 hamming', x=[1, 2, 3], y=[4, 5, 6], expected=3),
dict(testcase_name='2 hamming', x=[1, 2, 3], y=[4, 5, 3], expected=2),
dict(testcase_name='0 hamming', x=[3, 1, 2], y=[3, 1, 2], expected=0),
)
def test_hamming_distance(self, x, y, expected):
self.assertEqual(utils.hamming_distance(x, y), expected)
@parameterized.named_parameters(
dict(
testcase_name='2_combos',
mutations_a=[(0, 2), (1, 3)],
mutations_b=[(0, 4), (3, 11)],
expected_output=[((0, 2), (1, 3), (3, 11)),
((0, 4), (1, 3), (3, 11))]),
dict(
testcase_name='easy_add',
mutations_a=[(0, 1),],
mutations_b=[(1, 2),],
expected_output=[((0, 1), (1, 2)),]),
dict(
testcase_name='add_with_duplicate',
mutations_a=[(0, 1), (1, 3)],
mutations_b=[
(1, 2),
],
expected_output=[
((0, 1), (1, 2)),
((0, 1), (1, 3)),
]),
dict(
testcase_name='one_set_empty',
mutations_a=[(0, 2), (1, 3)],
mutations_b=[],
expected_output=[((0, 2), (1, 3),),]
),
)
def test_merge_mutation_sets(self, mutations_a, mutations_b, expected_output):
actual = utils.merge_mutation_sets(mutations_a, mutations_b)
self.assertSetEqual(set(actual), set(expected_output))
@parameterized.named_parameters(
dict(
testcase_name='2_combos',
mutations_a=((0, 2), (1, 3)),
mutations_b=((0, 4), (3, 11)),
mutations_c=((4, 4), (5, 5)),
expected_output=[((0, 2), (1, 3), (3, 11), (4, 4), (5, 5)),
((0, 4), (1, 3), (3, 11), (4, 4), (5, 5))],
),
dict(
testcase_name='easy_add',
mutations_a=((0, 0),),
mutations_b=((1, 1),),
mutations_c=((2, 2),),
expected_output=[((0, 0), (1, 1), (2, 2)), ],
),
dict(
testcase_name='add_with_duplicate',
mutations_a=[(0, 1), ],
mutations_b=[(1, 2), ],
mutations_c=[(1, 3), (4, 4)],
expected_output=[
((0, 1), (1, 2), (4, 4)),
((0, 1), (1, 3), (4, 4)),
]),
)
def test_merge_multiple_mutation_sets(self, mutations_a, mutations_b, mutations_c, expected_output):
actual = utils.merge_multiple_mutation_sets([mutations_a, mutations_b, mutations_c])
self.assertSetEqual(set(actual), set(expected_output))
@parameterized.parameters(
([1, 2, 3], [1, 2, 3], []),
([1, 2, 4], [1, 2, 3], [2]),
([1, 2, 3], [1, 2, 4], [2]),
([2, 2, 3], [1, 2, 4], [0, 2]),
([1, 1, 2], [1, 0, 0], [1, 2]),
)
def test_get_mutation_positions(self, sequence, parent, expected_output):
sequence = np.array(sequence)
parent = np.array(parent)
actual_output = utils.get_mutation_positions(sequence, parent)
self.assertEqual(list(actual_output), expected_output)
def test_get_mutation_positions_unequal_lengths(self):
a = np.array([1, 2])
b = np.array([1, 2, 3])
with self.assertRaisesRegex(AssertionError, 'equal length'):
utils.get_mutation_positions(a, b)
@parameterized.parameters(
([0, 1, 2], [0, 1, 2], []),
([0, 1, 3], [0, 1, 2], [(2, 3)]),
([0, 1, 2], [0, 1, 3], [(2, 2)]),
([1, 1, 2], [0, 1, 3], [(0, 1), (2, 2)]),
([1, 1, 2], [1, 0, 0], [(1, 1), (2, 2)]),
)
def test_get_mutations(self, sequence, parent, expected_output):
sequence = np.array(sequence)
parent = np.array(parent)
actual_output = utils.get_mutations(sequence, parent)
self.assertEqual(actual_output, expected_output)
def test_get_mutations_unequal_lengths(self):
a = np.array([1, 2])
b = np.array([1, 2, 3])
with self.assertRaisesRegex(AssertionError, 'equal length'):
utils.get_mutations(a, b)
@parameterized.parameters(
([], [0, 1, 2]),
([(0, 1)], [1, 1, 2]),
([(0, 1), (2, 0)], [1, 1, 0]),
)
def test_apply_mutations(self, mutations, expected_output):
parent = np.array([0, 1, 2])
expected_output = np.array(expected_output)
actual_output = utils.apply_mutations(parent, mutations)
np.testing.assert_allclose(actual_output, expected_output)
@parameterized.named_parameters(
dict(
testcase_name='additive',
seq_a=[1, 0, 0],
seq_b=[0, 0, 2],
ref_seq=[0, 0, 0],
expected=[[1, 0, 2]]),
dict(
testcase_name='duplicate_position',
seq_a=[1, 1, 0],
seq_b=[0, 2, 2],
ref_seq=[0, 0, 0],
expected=[[1, 1, 2], [1, 2, 2]]),
dict(
testcase_name='additive_2_positions',
seq_a=[1, 0],
seq_b=[0, 2],
ref_seq=[0, 0],
expected=[[1, 2]]),
dict(
testcase_name='2_positions',
seq_a=[1, 1],
seq_b=[2, 2],
ref_seq=[0, 0],
expected=[[1, 1], [1, 2], [2, 1], [2, 2]]),
dict(
testcase_name='combine_with_ref',
seq_a=[0, 0],
seq_b=[2, 2],
ref_seq=[0, 0],
expected=[[2, 2]]),
dict(
testcase_name='all_combos',
seq_a=[1, 1, 1],
seq_b=[2, 0, 2],
ref_seq=[0, 0, 0],
expected=[[1, 1, 2], [2, 1, 1], [1, 1, 1], [2, 1, 2]]),
)
def test_add_seqs(self, seq_a, seq_b, ref_seq, expected):
seq_a = np.array(seq_a)
seq_b = np.array(seq_b)
ref_seq = np.array(ref_seq)
combined_seqs = utils.add_seqs(seq_a, seq_b, ref_seq)
expected_set = set(tuple(s) for s in expected)
actual_set = set(tuple(s) for s in combined_seqs)
self.assertSetEqual(actual_set, expected_set)
@parameterized.named_parameters(
dict(
testcase_name='unequal_lengths',
seq_a=[1, 0, 0, 1],
seq_b=[0, 0, 2],
ref_seq=[0, 0, 0],
),
dict(
testcase_name='unequal_ref_length',
seq_a=[1, 0, 0],
seq_b=[0, 0, 2],
ref_seq=[0, 0, 0, 0],
),
)
def test_add_seqs_wrong_length(self, seq_a, seq_b, ref_seq):
seq_a = np.array(seq_a)
seq_b = np.array(seq_b)
ref_seq = np.array(ref_seq)
with self.assertRaisesRegex(AssertionError, 'equal length'):
utils.add_seqs(seq_a, seq_b, ref_seq)
class TensorUtilsTest(parameterized.TestCase):
# 2x2x3x3
mock_tensor = np.array([
[
[
[-10, 1, 1],
[1, 8, 1],
[2, 0, 1],
],
[
[0, 1, 6],
[1, 1, -10],
[2, 0, 0],
],
],
[
[
[0, 1, 1],
[1, 1, 1],
[0, 0, 10],
],
[
[0, 1, 6],
[1, 1, 1],
[2, 0, -9],
],
]
])
def _get_tensor_idx_from_pair(self, pair):
return (pair[0][0], pair[1][0], pair[0][1], pair[1][1])
def test_get_top_n_mutation_pairs(self):
best_interactions = utils.get_top_n_mutation_pairs(self.mock_tensor, 2, lowest=False)
best_pair = best_interactions[0]
self.assertEqual(self.mock_tensor[self._get_tensor_idx_from_pair(best_pair)], 10)
second_best_pair = best_interactions[1]
self.assertEqual(self.mock_tensor[self._get_tensor_idx_from_pair(second_best_pair)], 8)
def test_get_top_n_mutation_pairs_lowest(self):
worst_interactions = utils.get_top_n_mutation_pairs(self.mock_tensor, 3, lowest=True)
self.assertEqual(self.mock_tensor[self._get_tensor_idx_from_pair(worst_interactions[0])], -10)
self.assertEqual(self.mock_tensor[self._get_tensor_idx_from_pair(worst_interactions[1])], -10)
self.assertEqual(self.mock_tensor[self._get_tensor_idx_from_pair(worst_interactions[2])], -9)
if __name__ == '__main__':
absltest.main()
| StarcoderdataPython |
346341 | # This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
from telethon.tl.functions.messages import SaveDraftRequest
from FIREX.utils import admin_cmd, sudo_cmd
from userbot.cmdhelp import CmdHelp
@bot.on(admin_cmd(pattern="chain$"))
@bot.on(sudo_cmd(pattern="chain$", allow_sudo=True))
async def _(event):
if event.fwd_from:
return
await event.edit("Counting...")
count = -1
message = event.message
while message:
reply = await message.get_reply_message()
if reply is None:
await event.client(
SaveDraftRequest(
await event.get_input_chat(), "", reply_to_msg_id=message.id
)
)
message = reply
count += 1
await event.edit(f"Chain length: {count}")
CmdHelp("chain").add_command(
"chain",
"Reply to a message",
"Reply this command to any msg so that it finds chain length of that msg",
).add_info("Its Help U To Find The Connected Message").add_warning(
"Harmless Module✅"
).add_type(
"Official"
).add()
| StarcoderdataPython |
11249952 | from exo.exocore import ExoCore
API_KEY = "" #Enzoic API Key here
SECRET_KEY = "" #Enzoic Secret Key here
exo = ExoCore(API_KEY, SECRET_KEY)
result = exo.results("abc123@12")
print(result)
| StarcoderdataPython |
1849425 | def cut(s):
from itertools import islice
try:
it, b2n = iter(s), lambda b: int(''.join(next(it) for _ in range(b)), 2)
version, type_id = b2n(3), b2n(3)
except RuntimeError: return
if type_id == 4: # Literal
read, bits = '1', []
while read == '1':
read, *chunks = (next(it) for _ in range(5))
bits.extend(chunks)
subs = int(''.join(bits), 2)
else: # Operator
length_type = b2n(1)
if length_type: # Number
subs = [cut(it) for _ in range(b2n(11))]
else: # Length
subs, its = [], islice(s, b2n(15))
while sub := cut(its): subs.append(sub)
return version, type_id, subs
input = open('input', 'r').read().strip()
input = ''.join(f'{int(c, 16):04b}' for c in input)
input = cut(input)
def p1():
def rec(packet):
version, _, sub = packet
if isinstance(sub, list): version += sum(map(rec, sub))
return version
return rec(input)
def p2():
def rec(packet):
from math import prod
from operator import gt, lt, eq
acc = [sum, prod, min, max, None, lambda x: gt(*x), lambda x: lt(*x), lambda x: eq(*x)]
_, typ, sub = packet
return reduce(map(rec, sub)) if (reduce := acc[typ]) else sub
return rec(input)
if (r1 := p1()) is not None: print(r1)
if (r2 := p2()) is not None: print(r2)
| StarcoderdataPython |
3571078 | <gh_stars>0
class Historian():
def __init__(self):
self.history = []
self.history_index = -1
self.history_max_index = -1
self.temp_undo = []
self.temp_redo = []
empty_func = lambda: None
self.callback_enable_undo = empty_func
self.callback_enable_redo = empty_func
self.callback_disable_undo = empty_func
self.callback_disable_redo = empty_func
def undo(self):
if (self.history_index >= 0):
self.history_index = self.history_index - 1
self.apply_history_events(self.history[self.history_index + 1][0], False)
def redo(self):
if (self.history_index + 1 <= self.history_max_index):
self.history_index = self.history_index + 1
self.apply_history_events(self.history[self.history_index][1], True)
def apply_history_events(self, events, forward_direction):
if forward_direction:
for i in range(len(events)):
events[i][0](*events[i][1], False)
else:
for i in range(len(events)):
events[-i - 1][0](*events[-i - 1][1], False)
self.check_undo_redo()
def commit_event(self, undo_event, redo_event):
self.temp_undo.append(undo_event)
self.temp_redo.append(redo_event)
def register_events(self):
if (len(self.temp_undo) > 0 and len(self.temp_redo) > 0):
if (self.history_index == self.history_max_index):
self.history_max_index = self.history_max_index + 1
self.history_index = self.history_index + 1
if self.history_index >= len(self.history):
self.history.append((self.temp_undo, self.temp_redo))
else:
self.history[self.history_index] = (self.temp_undo, self.temp_redo)
else:
self.history_index = self.history_index + 1
self.history_max_index = self.history_index
self.history[self.history_index] = (self.temp_undo, self.temp_redo)
self.temp_undo = []
self.temp_redo = []
self.check_undo_redo()
def insert_commit(self, undoevent, redoevent):
if self.history_index >= 0:
self.history[self.history_index][0].append(undoevent)
self.history[self.history_index][1].append(redoevent)
if self.history_index < self.history_max_index:
self.history[self.history_index + 1][0].insert(0, redoevent)
self.history[self.history_index + 1][1].insert(0, undoevent)
def check_undo_redo(self):
if (self.history_index >= 0):
self.callback_enable_undo()
else:
self.callback_disable_undo()
if (self.history_index + 1 <= self.history_max_index):
self.callback_enable_redo()
else:
self.callback_disable_redo()
def clear(self):
self.history = []
self.history_index = -1
self.history_max_index = -1
self.temp_undo = []
self.temp_redo = []
self.check_undo_redo()
| StarcoderdataPython |
11210471 | import neutromeratio
import sys
import torch
from neutromeratio.constants import initialize_NUM_PROC
from neutromeratio.parameter_gradients import (
setup_and_perform_parameter_retraining_with_test_set_split,
)
def run():
initialize_NUM_PROC(4)
assert len(sys.argv) == 10
env = sys.argv[1]
elements = sys.argv[2]
data_path = sys.argv[3]
model_name = str(sys.argv[4])
max_snapshots_per_window = int(sys.argv[5])
batch_size = int(sys.argv[6])
test_size = float(sys.argv[7])
validation_size = float(sys.argv[8])
diameter = int(sys.argv[9])
print(f"Max nr of snapshots: {max_snapshots_per_window}")
print(f"Test set size: {test_size}")
print(f"Valiation set size: {validation_size}")
assert validation_size >= 0.1 and validation_size <= 0.9
assert test_size >= 0.1 and test_size <= 0.9
assert batch_size >= 1
assert max_snapshots_per_window >= 10
if model_name == "ANI2x":
model = neutromeratio.ani.CompartimentedAlchemicalANI2x
# model = neutromeratio.ani.AlchemicalANI2x
print(f"Using {model_name}.")
elif model_name == "ANI1ccx":
model = neutromeratio.ani.AlchemicalANI1ccx
print(f"Using {model_name}.")
elif model_name == "ANI1x":
model = neutromeratio.ani.AlchemicalANI1x
print(f"Using {model_name}.")
else:
raise RuntimeError(f"Unknown model name: {model_name}")
if env == "droplet":
bulk_energy_calculation = False
torch.set_num_threads(4)
print(f"Diameter: {diameter}")
else:
torch.set_num_threads(4)
bulk_energy_calculation = True
max_epochs = 100
(
rmse_validation,
rmse_test,
) = setup_and_perform_parameter_retraining_with_test_set_split(
env=env,
ANImodel=model,
batch_size=batch_size,
load_checkpoint=False,
max_snapshots_per_window=max_snapshots_per_window,
checkpoint_filename=f"parameters_{model_name}_{env}.pt",
data_path=data_path,
bulk_energy_calculation=bulk_energy_calculation,
elements=elements,
max_epochs=max_epochs,
diameter=diameter,
load_pickled_FEC=True,
lr_AdamW=1e-4,
lr_SGD=1e-4,
weight_decay=1e-06,
test_size=test_size,
validation_size=validation_size,
include_snapshot_penalty=False,
)
if __name__ == "__main__":
run() | StarcoderdataPython |
3259250 | #!/usr/bin/env python
import os
import sys
import time
import json
LOG_FILEPATH = "/tmp/onenote.log"
NOTES_ANNOTATION_DESCRIPTION = "[Notes]"
DEBUG = "ONENOTE_DEBUG" in os.environ
def log(message):
if DEBUG:
with open(LOG_FILEPATH, 'a') as logfile:
logfile.write("%s\n" % message)
def to_iso_string(time_struct):
format_string = "%Y%m%dT%H%M%SZ"
return time.strftime(format_string, time_struct)
def current_time():
return time.gmtime(time.time())
def current_time_iso():
return to_iso_string(current_time())
def check_notes(data):
return "notes" in data and data["notes"].strip()
def check_notes_updated(data_before, data_after):
if not "notes" in data_before and not "notes" in data_after:
return False
elif not "notes" in data_before and "notes" in data_after:
return data_after["notes"].strip()
elif "notes" in data_before and not "notes" in data_after:
return data_before["notes"].strip()
else:
return data_before["notes"].strip() != data_after["notes"].strip()
def check_notes_annotation(data):
if "annotations" in data:
for i, annotation in enumerate(data["annotations"]):
if annotation["description"] == NOTES_ANNOTATION_DESCRIPTION:
log("Found %s annotation for task: %s" % (NOTES_ANNOTATION_DESCRIPTION, data["uuid"]))
return True
return False
def add_notes_annotation(data):
if not "annotations" in data:
data["annotations"] = []
annotation = {
"entry": current_time_iso(),
"description": NOTES_ANNOTATION_DESCRIPTION,
}
data["annotations"].append(annotation)
log("Added %s annotation for task: %s" % (NOTES_ANNOTATION_DESCRIPTION, data["uuid"]))
return data
def remove_notes_annotation(data):
if "annotations" in data:
for i, annotation in enumerate(data["annotations"]):
if annotation["description"] == NOTES_ANNOTATION_DESCRIPTION:
data["annotations"].pop(i)
log("Removed %s annotation for task: %s" % (NOTES_ANNOTATION_DESCRIPTION, data["uuid"]))
if not data["annotations"]:
data.pop('annotations', None)
return data
def manage_notes_annotation(data_before, data_after):
notes_exist = check_notes(data_after)
notes_annotation_exists = check_notes_annotation(data_after)
if check_notes_updated(data_before, data_after):
# Force removal here and mark as removed, allows a new annotation
# with an updated entry time for the notes.
data_after = remove_notes_annotation(data_after)
notes_annotation_exists = False
if notes_exist and not notes_annotation_exists:
data_after = add_notes_annotation(data_after)
if not notes_exist and notes_annotation_exists:
data_after = remove_notes_annotation(data_after)
return data_after
def read_stdin():
first_line = sys.stdin.readline()
second_line = sys.stdin.readline()
return first_line, second_line
def load_task_data(first_line, second_line):
if second_line:
try:
task_before = json.loads(first_line)
task_after = json.loads(second_line)
return task_before, task_after
except:
exit_error()
else:
try:
task_after = json.loads(first_line)
return {}, task_after
except:
exit_error()
def exit_error():
e = sys.exc_info()[0]
log("Error: %s" % e)
sys.exit(1)
if __name__ == '__main__':
try:
first_line, second_line = read_stdin()
task_before, task_after = load_task_data(first_line, second_line)
if task_before:
log("Modifying current task")
log("Task before modification")
log(json.dumps(task_before, sort_keys=True, indent=2))
else:
log("Adding new task")
log("Task after modification")
log(json.dumps(task_after, sort_keys=True, indent=2))
modified_task = manage_notes_annotation(task_before, task_after)
log("Task after hook adjustments")
log(json.dumps(modified_task, sort_keys=True, indent=2))
except:
exit_error()
print(json.dumps(modified_task))
sys.exit(0)
| StarcoderdataPython |
3261891 | # Generated by Django 3.1 on 2020-10-07 15:37
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('projectmanager', '0044_auto_20201007_1844'),
]
operations = [
migrations.AddField(
model_name='materialinstock',
name='date_opi',
field=models.DateTimeField(blank=True, null=True, verbose_name='تاریخ opi'),
),
]
| StarcoderdataPython |
9688869 | <reponame>ynohat/bossman<gh_stars>1-10
import sys
from bossman.errors import BossmanValidationError
from os import getcwd
import git
import argparse
from rich import print
from rich.table import Table
from bossman import Bossman
def init(subparsers: argparse._SubParsersAction):
parser = subparsers.add_parser("validate", help="validates the working tree")
parser.add_argument("glob", nargs="*", default="*", help="select resources by glob pattern")
parser.set_defaults(func=exec)
def exec(bossman: Bossman, glob, *args, **kwargs):
resources = bossman.get_resources_from_working_copy(*glob)
if len(resources):
table = Table()
table.add_column("Resource")
table.add_column("Validation")
table.add_column("Error")
error_recorded = 0
for resource in resources:
error = ""
try:
bossman.validate(resource)
status = ":thumbs_up:"
except BossmanValidationError as e:
status = ":thumbs_down:"
error = e
error_recorded = 1
table.add_row(
resource,
status,
error
)
print(table)
if error_recorded == 1:
sys.exit(1)
else:
print("No resources to show: check the glob pattern if provided, or the configuration.")
| StarcoderdataPython |
209498 | <gh_stars>0
import numpy as np
import kmeans
import common
import naive_em
import em
X = np.loadtxt("toy_data.txt")
########## Begin: kMeans vs EM (and BIC) #############
K = [1, 2, 3, 4] # Clusters to try
seeds = [0, 1, 2, 3, 4] # Seeds to try
# Costs for diff. seeds
costs_kMeans = [0, 0, 0, 0, 0]
costs_EM = [0, 0, 0, 0, 0]
# Best seed for cluster based on lowest costs
best_seed_kMeans = [0, 0, 0, 0]
best_seed_EM = [0, 0, 0, 0]
# Mixtures for best seeds
mixtures_kMeans = [0, 0, 0, 0, 0]
mixtures_EM = [0, 0, 0, 0, 0]
# Posterior probs. for best seeds
posts_kMeans = [0, 0, 0, 0, 0]
posts_EM = [0, 0, 0, 0, 0]
# BIC score of cluster
bic = [0., 0., 0., 0.]
for k in range(len(K)):
for i in range(len(seeds)):
# Run kMeans
mixtures_kMeans[i], posts_kMeans[i], costs_kMeans[i] = \
kmeans.run(X, *common.init(X, K[k], seeds[i]))
# Run Naive EM
mixtures_EM[i], posts_EM[i], costs_EM[i] = \
naive_em.run(X, *common.init(X, K[k], seeds[i]))
# Print lowest cost
print("=============== Clusters:", k+1, "======================")
print("Lowest cost using kMeans is:", np.min(costs_kMeans))
print("Highest log likelihood using EM is:", np.max(costs_EM))
# Save best seed for plotting
best_seed_kMeans[k] = np.argmin(costs_kMeans)
best_seed_EM[k] = np.argmax(costs_EM)
# Plot kMeans and EM results
common.plot(X,
mixtures_kMeans[best_seed_kMeans[k]],
posts_kMeans[best_seed_kMeans[k]],
title="kMeans")
common.plot(X,
mixtures_EM[best_seed_EM[k]],
posts_EM[best_seed_EM[k]],
title="EM")
#BIC score for EM
bic[k] = common.bic(X, mixtures_EM[best_seed_EM[k]], np.max(costs_EM))
# Print the best K based on BIC
print("================= BIC ====================")
print("Best K is:", np.argmax(bic)+1)
print("BIC for the best K is:", np.max(bic))
########## End: kMeans vs EM (and BIC) ############# | StarcoderdataPython |
9777665 | <filename>models_with_cam.py
import torch.nn as nn
import math
import torch.utils.model_zoo as model_zoo
from torch.nn import functional as F
import torch
from torch.autograd import Variable
__all__ = ['ResNet', 'resnet18']
model_urls = {
'resnet18': 'https://download.pytorch.org/models/resnet18-5c106cde.pth'
}
def conv3x3(in_planes, out_planes, stride=1):
"""3x3 convolution with padding"""
return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,
padding=1, bias=False)
class BasicBlock(nn.Module):
expansion = 1
def __init__(self, inplanes, planes, stride=1, downsample=None):
super(BasicBlock, self).__init__()
self.conv1 = conv3x3(inplanes, planes, stride)
self.bn1 = nn.BatchNorm2d(planes)
self.relu = nn.ReLU(inplace=True)
self.conv2 = conv3x3(planes, planes)
self.bn2 = nn.BatchNorm2d(planes)
self.downsample = downsample
self.stride = stride
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
if self.downsample is not None:
residual = self.downsample(x)
out += residual
out = self.relu(out)
return out
class Bottleneck(nn.Module):
expansion = 4
def __init__(self, inplanes, planes, stride=1, downsample=None):
super(Bottleneck, self).__init__()
self.conv1 = nn.Conv2d(inplanes, planes, kernel_size=1, bias=False)
self.bn1 = nn.BatchNorm2d(planes)
self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=stride,
padding=1, bias=False)
self.bn2 = nn.BatchNorm2d(planes)
self.conv3 = nn.Conv2d(planes, planes * 4, kernel_size=1, bias=False)
self.bn3 = nn.BatchNorm2d(planes * 4)
self.relu = nn.ReLU(inplace=True)
self.downsample = downsample
self.stride = stride
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
out = self.relu(out)
out = self.conv3(out)
out = self.bn3(out)
if self.downsample is not None:
residual = self.downsample(x)
out += residual
out = self.relu(out)
return out
class ResNet(nn.Module):
def __init__(self, block, layers, num_classes=1000):
self.inplanes = 64
super(ResNet, self).__init__()
self.upsampleby4 = nn.Upsample(scale_factor=4,mode = "nearest")
self.upsample = nn.Upsample(scale_factor=8,mode = "bilinear")
self.inputMask = (torch.FloatTensor(16,3,224,224).zero_()+1.0).cuda()
self.conv11 = nn.Conv2d(1, 64, kernel_size=7, stride=2, padding=3,
bias=False)
self.bn1 = nn.BatchNorm2d(64)
self.relu = nn.ReLU(inplace=True)
self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
self.layer1 = self._make_layer(block, 64, layers[0])
self.layer2 = self._make_layer(block, 128, layers[1], stride=2)
self.layer3 = self._make_layer(block, 256, layers[2], stride=2)
self.layer4 = self._make_layer(block, 512, layers[3], stride=2)
self.avgpool = nn.AvgPool2d(8, stride=1)
self.fc_6outputs = nn.Linear(512 * block.expansion, num_classes)
self.convCAMlike = nn.Conv2d(512, 1, kernel_size=1, stride=1, padding=0)
for m in self.modules():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2. / n))
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
def _make_layer(self, block, planes, blocks, stride=1):
downsample = None
if stride != 1 or self.inplanes != planes * block.expansion:
downsample = nn.Sequential(
nn.Conv2d(self.inplanes, planes * block.expansion,
kernel_size=1, stride=stride, bias=False),
nn.BatchNorm2d(planes * block.expansion),
)
layers = []
layers.append(block(self.inplanes, planes, stride, downsample))
self.inplanes = planes * block.expansion
for i in range(1, blocks):
layers.append(block(self.inplanes, planes))
return nn.Sequential(*layers)
def forward(self, x):
x = self.upsampleby4(x);
x = self.conv11(x)
x = self.bn1(x)
x = self.relu(x)
x = self.maxpool(x)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x1 = self.layer4(x)
# print("x1 size = {}".format(x1.shape))
x2= self.avgpool(x1)
# print("x2 size = {}".format(x2.shape))
x3 = x2.view(x2.size(0), -1)
x3 = self.fc_6outputs(x3)
outsm = F.softmax(x3)
w = torch.mm(outsm, Variable(self.fc_6outputs.weight.data) )
# print("w size = {}".format(w.shape))
# print("x1 size = {}".format(x1.shape))
cam = torch.mul(x1,w.unsqueeze(2).unsqueeze(3))
# print("cam size = {}".format(cam.shape))
# cam = cam.sum(1) # sum over all channels
cam = cam.sum(1).unsqueeze(1) # sum over all channels and make: batchSize x height x width --> batchSize x 1 x height x width
# print("cam size = {}".format(cam.shape))
# print("cam size = {}".format(cam.unsqueeze(1).shape))
# print("upscaling")
# print(self.upsample(cam ).shape) # make 16 x height x width --> 16 x 1 x height x width
return outsm , self.upsample(cam )
def resnet18(pretrained=False, **kwargs):
"""Constructs a ResNet-18 model.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
"""
model = ResNet(BasicBlock, [2, 2, 2, 2], **kwargs)
if pretrained:
# model.load_state_dict(model_zoo.load_url(model_urls['resnet18']))
model.load_state_dict(model_zoo.load_url(model_urls['resnet18']),strict=False) # strict argument allows us to load models that have subset.superset
return model | StarcoderdataPython |
11331804 | """
Neighbor lists are used to obtain the indices of neighbors surrounding an atom
for the :obj:`schnetpack.md.calculators.SchnetPackCalculator`.
Currently only a primitive version of a neighbor list is implemented, which cannot deal
with periodic boundary conditions and does not possess optimal scaling for large systems.
"""
import torch
import schnetpack
class MDNeighborList:
"""
Basic neighbor list template for molecular dynamics simulations required for the calculator. This is used to obtain
the indices of the atoms adjacent to a central atom and e.g. used to compute the molecular interactions.
The neighbor mask is zero for interactions which should not be counted and one otherwise.
Args:
cutoff (float): Cutoff radius used for neighbor list construction.
shell (float): Buffer region around the cutoff radius. A new neighbor list is only constructed if an atom
moves farther than this shell. (Or if the simulation cell changes.)
"""
def __init__(self, cutoff, shell=None, device=None):
self.device = device
# Check cutoff and shell, as well as possible conventions
self.cutoff = cutoff
self.shell = shell
if self.cutoff is not None:
if self.shell is not None:
self.cutoff_shell = self.cutoff + self.shell
else:
self.cutoff_shell = self.cutoff
else:
self.cutoff_shell = None
# Init basic containers
self.neighbor_list = None
self.neighbor_mask = None
self.offsets = None
self.max_neighbors = None
# Store last positions and cells for determining whether the neighbor list needs to be recomputed
self.last_positions = None
self.last_cells = None
def get_neighbors(self, system):
"""
Convenience routine to obtain the neighbor list and neighbor mask in one step.
Returns:
tuple: Contains the neighbor list and neighbor mask tensors.
"""
if self._update_required(system) or self.neighbor_list is None:
self._construct_neighbor_list(system)
return self.neighbor_list, self.neighbor_mask, self.offsets
def _construct_neighbor_list(self, system):
"""
Instructions to construct the neighbor list. Needs to be defined and has to populate the neighbor_list
and neighbor_mask tensors.
Both, neighbor_list and neighbor_mask, should be a torch.Tensor with the dimensions:
n_replicas x n_molecules x n_atoms x n_neighbors
"""
raise NotImplementedError
def _update_required(self, system):
"""
Function to determine whether the neighbor list should be recomputed for the system. This could e.g. be
based on the maximum distance all atoms moved, etc. The return value should be True if it needs to be
recomputed and False otherwise.
Returns:
bool: Indicator whether it is necessary to compute a new neighbor list or not.
"""
# Compute the neighbor list if these two are not set
if self.last_positions is None or self.last_cells is None:
return True
# Check if cell has changed
if not torch.allclose(self.last_cells, system.cells):
return True
# Check if atoms have moved out of the boundary
max_displacement = torch.max(
torch.norm(system.positions - self.last_positions, 2, 3)
).detach()
if max_displacement >= self.shell:
return True
return False
class SimpleNeighborList(MDNeighborList):
"""
Basic implementation of a neighbor list. Simply enumerates the neighbors of all atoms in the molecule after
eliminating self interactions. This work fine for small to medium sized systems, but should not be used for
extended molecules, etc. The cutoff fulfills no purpose in this basic implementation. This neighbor list should
never be used in combination with periodic boundary conditions.
Args:
system (object): System class containing all molecules and their replicas.
cutoff (float): Cutoff radius used for neighbor list construction, not used in the present implementation.
"""
def __init__(self, cutoff=None, shell=None, device=None):
super(SimpleNeighborList, self).__init__(cutoff, shell, device=device)
def _construct_neighbor_list(self, system):
"""
Sets up a basic neighbor list, neighbor mask and offset array. The first two are torch.Tensor objects of the
dimension: n_replicas x n_molecules x n_atoms x n_neighbors. The offsets have the dimension
n_replicas x n_molecules x n_atoms x n_neighbors x 3.
This neighbor list simply enumerates all neighbors (neighbor_list) or mask nonsensical entries due to either
different cutoff radii or zero-padding arising from molecules of different size (neighbor_mask).
"""
# Set the maximum neighbors to include all interactions
self.max_neighbors = system.max_n_atoms - 1
# Construct basic, unmasked tile
basic_tile = torch.arange(system.max_n_atoms, device=system.device)[
None, :
].repeat(system.max_n_atoms, 1)
# Remove self interactions
diagonal_mask = torch.eye(system.max_n_atoms, device=system.device)
basic_tile = basic_tile[diagonal_mask != 1].view(
system.max_n_atoms, system.max_n_atoms - 1
)
# Tile neighbor lists and mask to get replica and molecule dimensions
neighbors_list = basic_tile[None, None, :, :].repeat(
system.n_replicas, system.n_molecules, 1, 1
)
# Construct the neighbor mask as an outer product of the atom masks, where self interactions are removed
self.neighbor_mask = (
system.atom_masks.transpose(2, 3)[..., 1:] * system.atom_masks
)
# Construct cell offsets
if system.cells is not None:
self.offsets = torch.zeros(
system.n_replicas,
system.n_molecules,
system.max_n_atoms,
self.max_neighbors,
3,
device=system.device,
)
else:
self.offsets = None
# Multiply neighbor list by mask to remove superfluous entries
self.neighbor_list = neighbors_list * self.neighbor_mask.long()
def _update_required(self, system):
"""
Since all interatomic distances are computed by default, the neighbor list never has to be updated.
Returns:
bool: Indicator whether it is necessary to compute a new neighbor list or not.
"""
return False
class EnvironmentProviderNeighborList(MDNeighborList):
"""
Basic neighbor list class to be used with the environment providers with SchNetPack. The corresponding provider
needs to be set in the `_set_environment_provider` function. Since this currently operates sequentially, it will
provide suboptimal performance for systems with many replicas and/or molecules.
Args:
cutoff (float): Cutoff radius used for neighbor list construction.
shell (float): Buffer region around the cutoff radius. A new neighbor list is only constructed if an atom
moves farther than this shell. (Or if the simulation cell changes.)
"""
def __init__(self, cutoff, shell=1.0, device=None, use_internal_units=True):
super(EnvironmentProviderNeighborList, self).__init__(
cutoff, shell, device=device
)
self.use_internal_units = use_internal_units
self.provider_cutoff = self._get_provider_cutoff()
# Setup the environment provider
self._environment_provider = None
self._set_environment_provider()
def _get_provider_cutoff(self):
if self.use_internal_units:
provider_cutoff = self.cutoff_shell
else:
provider_cutoff = self.cutoff_shell / schnetpack.md.MDUnits.angs2internal
return provider_cutoff
def _set_environment_provider(self):
"""
This function is intended to set the environment provider in neighbor lists based on this class.
"""
raise NotImplementedError
def _construct_neighbor_list(self, system):
"""
Construct the neighbor list using an environment provider. Since all providers are based on ASE atoms objects,
these objects are first extracted from the system. Then the neighbor lists ae constructed sequentially and
reconverted into the format required for the calculators. In addition, the old cells and positons are
stored to check if updates of the neighbor list are necessary.
"""
atoms = system.get_ase_atoms(internal_units=self.use_internal_units)
neighbor_idx = []
offsets = []
max_neighbors = 0
for mol in atoms:
nbh_idx, offset = self._environment_provider.get_environment(mol)
neighbor_idx.append(nbh_idx)
offsets.append(offset)
max_neighbors = max(max_neighbors, nbh_idx.shape[1])
self.neighbor_list = -torch.ones(
system.n_replicas,
system.n_molecules,
system.max_n_atoms,
max_neighbors,
device=system.device,
).long()
self.offsets = torch.zeros(
system.n_replicas,
system.n_molecules,
system.max_n_atoms,
max_neighbors,
3,
device=system.device,
)
count = 0
for r_idx in range(system.n_replicas):
for m_idx in range(system.n_molecules):
n_atoms = system.n_atoms[m_idx]
n_nbh = neighbor_idx[count].shape[1]
self.neighbor_list[r_idx, m_idx, :n_atoms, :n_nbh] = torch.from_numpy(
neighbor_idx[count]
)
if system.cells is not None:
self.offsets[r_idx, m_idx, :n_atoms, :n_nbh, :] = torch.from_numpy(
offsets[count]
)
else:
self.offsets = None
count += 1
self.max_neighbors = max_neighbors
self.neighbor_mask = torch.zeros_like(self.neighbor_list)
self.neighbor_mask[self.neighbor_list >= 0] = 1.0
# Mask away -1 indices for invalid atoms, since they do not work with torch.gather
self.neighbor_list = self.neighbor_list * self.neighbor_mask.long()
# Since the list was recomputed, update old cells and positions
self.last_positions = system.positions.clone().detach()
if system.cells is not None:
self.last_cells = system.cells.clone().detach()
class ASENeighborList(EnvironmentProviderNeighborList):
"""
Neighbor list based on the schnetpack.utils.environment.AseEnvironmentProvider. This can deal with periodic
boundary conditions and general unit cells. However, the provider runs on CPU only and will only provide
significant performance gains over the torch based one for very large systems.
The ASE neighbor_list internally uses a minimum bin size of 3A, hence positions and cells need to be converted
to A before passing them to the neighbor list to avoid performance issues.
Args:
cutoff (float): Cutoff radius used for neighbor list construction.
shell (float): Buffer region around the cutoff radius. A new neighbor list is only constructed if an atom
moves farther than this shell. (Or if the simulation cell changes.)
"""
def __init__(self, cutoff, shell, device=None):
super(ASENeighborList, self).__init__(
cutoff=cutoff, shell=shell, device=device, use_internal_units=False
)
def _set_environment_provider(self):
"""
Set the environment provider.
"""
self._environment_provider = schnetpack.environment.AseEnvironmentProvider(
self.provider_cutoff
)
class TorchNeighborList(EnvironmentProviderNeighborList):
"""
Neighbor list based on the schnetpack.utils.environment.TorchEnvironmentProvider. For moderately sized systems
with cells/periodic boundary conditions this should have a good performance.
Args:
cutoff (float): Cutoff radius used for neighbor list construction.
shell (float): Buffer region around the cutoff radius. A new neighbor list is only constructed if an atom
moves farther than this shell. (Or if the simulation cell changes.)
device (torch.device): Device used when computing the neighbor list.
"""
def __init__(self, cutoff, shell, device=torch.device("cpu")):
super(TorchNeighborList, self).__init__(
cutoff=cutoff, shell=shell, device=device
)
def _set_environment_provider(self):
"""
Set the environment provider.
"""
self._environment_provider = schnetpack.environment.TorchEnvironmentProvider(
self.cutoff_shell, self.device
)
class DualNeighborList:
def __init__(
self, cutoff_short, cutoff_long, neighbor_list, shell=1.0, device=None
):
self.neighbor_list_short = neighbor_list(cutoff_short, shell, device=device)
self.neighbor_list_long = neighbor_list(cutoff_long, shell, device=device)
def get_neighbors(self, system):
neighbors, neighbor_mask, offsets = self.neighbor_list_short.get_neighbors(
system
)
return neighbors, neighbor_mask, offsets
def get_neighbors_lr(self, system):
(
neighbors_long,
neighbor_mask_long,
offsets_long,
) = self.neighbor_list_long.get_neighbors(system)
return neighbors_long, neighbor_mask_long, offsets_long
@property
def max_neighbors(self):
return self.neighbor_list_short.max_neighbors
@property
def max_neighbors_lr(self):
return self.neighbor_list_long.max_neighbors
| StarcoderdataPython |
9702964 | <reponame>Kaiyuan-Zhang/Gravel-public<filename>specs/lb_rw.py<gh_stars>1-10
from gravel_spec.utils import *
from gravel_spec.ops import *
from gravel_spec.element import *
from gravel_spec.graph import *
from gravel_spec.config import *
class IPFilter(Element):
ele_name = 'IPFilter'
num_in_ports = 1
num_out_ports = 1
def process_packet(self, old, p, in_port):
ether_type = p.ether.ether_type
return [{ 'pre_cond' : ether_type == 0x0800,
'packets' : { 0 : p },
'new_state' : old }]
class TcpFilter(Element):
ele_name = 'udp_tcp_filter'
num_in_ports = 1
num_out_ports = 1
def process_packet(self, old, p, in_port):
proto = p.ip4.proto
return [{ 'pre_cond' : proto == 6,
'packets' : { 0 : p },
'new_state' : old }]
class LBStorage(Element):
ele_name = 'lb_storage'
num_in_ports = 2
num_out_ports = 2
private_state_type = [('decisions', 'map', (4, 2), (4,)),
('timestamps', 'map', (4, 2), (8,)),
('curr_time', 'bitvec', 8)]
def process_packet(self, old, p, in_port):
flow_id = p.ip4.src, p.tcp.src
is_known_flow = And(in_port == 0, old.decisions.has_key(flow_id))
new_p = p.copy()
new_p.ip4.dst = old.decisions[flow_id][0]
timestamp_updated = old.copy()
timestamp_updated.timestamps[flow_id] = old.curr_time
is_unknown_flow = And(in_port == 0, Not(old.decisions.has_key(flow_id)))
register_new_flow = in_port == 1
new = old.copy()
flow_id = p.ip4.src, p.tcp.src
new.decisions[flow_id] = p.ip4.dst
new.timestamps[flow_id] = new.curr_time
return [{ 'pre_cond' : is_known_flow,
'packets' : { 0 : new_p },
'new_state' : timestamp_updated },
{ 'pre_cond' : register_new_flow,
'packets' : { 0 : p },
'new_state' : new },
{ 'pre_cond' : is_unknown_flow,
'packets' : { 1 : p },
'new_state' : old }]
def handle_event(self, s, event, *params):
new = s.copy()
new.curr_time = params[0]
expire_filter = lambda ks, vs: And(s.timestamps.has_key(ks),
z3.ULT(s.timestamps[ks][0], -1 - 600),
z3.UGE(new.curr_time, 600 + s.timestamps[ks][0]))
new.decisions = new.decisions.filter(expire_filter)
new.timestamps = new.timestamps.filter(expire_filter)
return [{ 'pre_cond' : z3.BoolVal(True),
'packets' : {},
'new_state' : new }]
class Scheduler(Element):
ele_name = 'scheduler'
num_in_ports = 1
num_out_ports = 1
private_state_type = [('addr_map', 'map', (4,), (4,)),
('cnt', 'bitvec', 4),
('num_dsts', 'bitvec', 4)]
def process_packet(self, old, p, in_port):
dst_ip = old.addr_map[old.cnt % old.num_dsts][0]
new = old.copy()
new.cnt = (old.cnt + 1) % old.num_dsts
has_bked = old.addr_map.has_key(old.cnt % old.num_dsts)
new_packet = p.copy()
new_packet.ip4.dst = dst_ip
return [{ 'pre_cond' : And(is_tcp(p)),
'packets' : { 0 : new_packet },
'new_state' : new },
{ 'pre_cond' : Not(And(is_tcp(p))),
'packets' : {},
'new_state' : new }]
class Maglev(Element):
ele_name = 'maglev_selector'
num_in_ports = 1
num_out_ports = 1
private_state_type = [('lookup_table', 'map', (4,), (4,)),
('hash_func', 'uf', (12,), 4)]
def process_packet(self, old, p, in_port):
flow_id = p.ip4.src, p.tcp.src, p.ip4.dst, p.tcp.dst
hash_val = old.hash_func(Concat(*flow_id))
dst_ip = old.lookup_table[hash_val][0]
new_packet = p.copy()
new_packet.ip4.dst = dst_ip
return [{ 'pre_cond' : z3.BoolVal(True),
'packets' : { 0 : new_packet },
'new_state' : old }]
def state_inv(self, s):
k = fresh_bv('k', 32)
return ForAll([k], s.lookup_table.has_key(k))
def get_flow_id(p):
flow_id = p.ip4.src, p.tcp.src, p.ip4.dst, p.tcp.dst
return flow_id
def from_same_flow(p1, p2):
return And(is_tcp(p1), is_tcp(p2),
p2.ip4.src == p1.ip4.src,
p2.ip4.dst == p1.ip4.dst,
p2.tcp.src == p1.tcp.src,
p2.tcp.dst == p1.tcp.dst)
def is_tcp(p):
return And(p.ether.ether_type == 0x0800,
p.ip4.proto == 6)
def steer_to(c, s, p, dst_ip, t=None):
s_n = s
if t is not None:
_, s_n = c.handle_event(s, 'cache', '', t)
o, _ = c.process_packet(s_n, 'in', p)
return And(Not(o['out'].is_empty()),
o['out'].ip4.dst == dst_ip,
o['__edges']['cache'][1].is_empty())
class LBTasks(ConfigVerifyTask, unittest.TestCase):
@classmethod
def build_conf(cls):
parser = HeaderParser()
parser.add_header('ether', ETHER_HDR)
parser.add_header('ip4', IPv4_HDR)
parser.add_header('tcp', TCP_HDR)
parser.add_header('payload', [('data', 1500)])
path = ParserEdge('ether') >> (('ether_type', 'eq', 0x0800), 'ip4') \
>> (('proto', 'eq', 6), 'tcp') >> (('always'), 'payload')
parser.add_edges(path)
elements = [('in', Source),
('out', Sink),
('ip_filter', IPFilter),
('tcp_filter', TcpFilter),
('cache', LBStorage),
('lb', Scheduler)]
path = Path('in', 0) >> (0, 'ip_filter', 0) >> (0, 'tcp_filter', 0) \
>> (0, 'cache', 1) >> (0, 'lb', 0) >> (1, 'cache', 0) >> (0, 'out')
return Config(elements, path.edges(), parser)
def test_tcp_only(self):
c = self.conf()
p, old_states = c.fresh_packet(), c.fresh_states()
out, _ = c.process_packet(old_states, 'in', p)
self.verify(Implies(Not(out['out'].is_empty()),
is_tcp(p)))
def test_always_steer(self):
c = self.conf()
p, s = c.fresh_packet(), c.fresh_states()
out, s2 = c.process_packet(s, 'in', p)
self.verify(Implies(And(p.ether.ether_type == 0x0800,
p.ip4.proto == 6),
Not(out['out'].is_empty())),
lambda m: [m.eval(out['__edges']['cache'][1].is_empty()),
m.eval(out['__edges']['lb'][0].is_empty())])
def test_persistency(self):
c = self.conf()
p1, p2, old_states = c.fresh_packet(), c.fresh_packet(), c.fresh_states()
out1, new_s = c.process_packet(old_states, 'in', p1)
p2.ip4.src, p2.ip4.dst = p1.ip4.src, p1.ip4.dst
p2.tcp.src, p2.tcp.dst = p1.tcp.src, p1.tcp.dst
out2, _ = c.process_packet(new_s, 'in', p2)
self.verify(Implies(And(Not(out1['out'].is_empty()),
Not(out2['out'].is_empty())),
out1['out'].ip4.dst == out2['out'].ip4.dst))
def test_step_init(self):
c = self.conf()
dst_ip = fresh_bv('dst_ip', 32)
p0, p1, s0 = c.fresh_packet(), c.fresh_packet(), c.fresh_state()
o, s1 = c.process_packet(s0, 'in', p0)
dst_ip = o['out'].ip4.dst
t = s0['cache'].curr_time
self.verify(Implies(And(p0.ether.ether_type == 0x0800,
p0.ip4.proto == 6),
steer_to(c, s1, p0, dst_ip, t)))
def test_step_packet(self):
c = self.conf()
dst_ip = fresh_bv('dst_ip', 32)
p0, p1, s0 = c.fresh_packet(), c.fresh_packet(), c.fresh_state()
t = fresh_bv('time', 64)
p_diff = c.fresh_packet()
_, s1 = c.process_packet(s0, 'in', p_diff)
out, s2 = c.process_packet(s1, 'in', p1)
self.verify(Implies(And(steer_to(c, s0, p0, dst_ip, t),
And(p_diff.ip4.src != p1.ip4.src,
p_diff.tcp.src != p1.tcp.src),
from_same_flow(p0, p1)),
steer_to(c, s1, p1, dst_ip, t)),
lambda m: [m.eval(s1['cache'].timestamps[p1.ip4.src, p1.tcp.src][0]),
m.eval(dst_ip),
m.eval(t),
m.eval(s0['cache'].timestamps[p1.ip4.src, p1.tcp.src][0]),
m.eval(out['__edges']['cache'][0].is_empty())])
def test_step_time(self):
c = self.conf()
dst_ip = fresh_bv('dst_ip', 32)
p0, p1, s0 = c.fresh_packet(), c.fresh_packet(), c.fresh_state()
#t0 = s0['cache'].timestamps[get_flow_id(p0)][0]
t0 = fresh_bv('time0', 64)
t1 = fresh_bv('time1', 64)
_, s1 = c.handle_event(s0, 'cache', '', t1)
flow_id = get_flow_id(p0)
self.verify(Implies(And(steer_to(c, s0, p0, dst_ip, t0),
z3.ULT(t1, t0),
from_same_flow(p0, p1)),
steer_to(c, s1, p1, dst_ip, t0)))
| StarcoderdataPython |
12846173 | import json
import logging
from binance.helpers import round_step_size
from sqlalchemy import false
from ..enums import *
import bson
import abc
import itertools
from ..objects import EState, EOrderType, ECommand, EnhancedJSONEncoder
from ..utils import safe_sum, round_step_downward, truncate, safe_multiply, safe_substract
from .. import binance_filters as filters
from ..exceptions import NotImplementedException
logger = logging.getLogger('app')
class StrategyBase(metaclass=abc.ABCMeta):
# NOTE: fee can stay here until a better place is found
fee = 0
def __init__(self, _name, _config, _symbol_info):
self.name = _name
self.alloc_ratio = 0
self.logger = logging.getLogger('app.{}'.format(__name__))
self.config = _config['strategy'][self.name]
self.max_lto = self.config.get('max_lto',1)
# NOTE: Assigning the fee multiple times is not the most optimal solution
StrategyBase.fee = _config['broker'].get('fee', 0)
# TODO: Rename this config as strategy config etc. because some modules means the whole config dict some are just a portion
self.quote_currency = _config['broker']['quote_currency']
# TODO: Make proper handling for symbol_info
self.symbol_info = _symbol_info
# NOTE: Hardcoded time-scales list (scales should be in ascending order)
self.min_period = self.config['time_scales'][0]
self.meta_do = list(itertools.product(self.config['time_scales'], self.config['pairs']))
# It seems possible to have this on_STAT_EXIT_EXP() like approach. Surely needs to be tried again.
# Since it facilitates so much new strategy creation and modular implementation
# NOTE: strategywise_alloc_rate determines the available rate of use from the main capital
# If self.strategywise_alloc_rate is 0.25 then this strategy can use max %25 of the main capital
self.strategywise_alloc_rate = 0 # Will be filled by the strategy manager
# NOTE: pairwise_alloc_rate determines the available rate of use from the strategywise allocated capital
# If self.strategywise_alloc_rate is 0.25 then this strategy can use max %25 of the main capital
pass
@staticmethod
def is_lto_dead(trade):
if trade.command == ECommand.CANCEL or trade.status == EState.CLOSED:
return True # Trade is dead
else:
return False # Trade is alive # Skip evaluation if non of this is true (LTO will be alive until the next cycle)
@staticmethod
async def run_logic(self, analysis_dict, trade_list, ikarus_time, total_qc, free_qc):
"""[summary]
Args:
analysis_dict ([type]): [description]
lto_list ([type]): [description]
df_balance ([type]): [description]
ikarus_time ([type]): [description]
total_qc ([type]): [description]
Returns:
[type]: [description]
"""
# Preliminary condition: all of the config['pairs'] exist in analysis_dict
if not set(self.config['pairs']).issubset(analysis_dict.keys()):
self.logger.warn(f"Configured pair \"{self.config['pairs']}\" does not exist in analysis_dict. Skipping {self.name}.run")
return []
# Initialize trade_dict to be filled
trade_objects = []
# Handle LTOs separately before the new evaluation
# Create a mapping between the pair and lto such as {'BTCUSDT':{...}, ...}
pair_grouped_ltos = {}
alive_lto_counter = 0
in_trade_capital = 0
dead_lto_capital = 0
for lto_idx in range(len(trade_list)):
# If handle_lto_logic fails then it means that the trade_list[lto_idx] is unchanged.
if not await StrategyBase.handle_lto_logic(self, analysis_dict, trade_list[lto_idx], ikarus_time):
self.logger.warn(f"Function failed: 'handle_lto_logic'. Trade info: '{trade_list[lto_idx]._id}', '{trade_list[lto_idx].strategy}'")
pair_grouped_ltos[trade_list[lto_idx].pair] = trade_list[lto_idx]
# It is needed to know how many of LTOs are dead or will be dead
if not StrategyBase.is_lto_dead(trade_list[lto_idx]):
# NOTE: in_trade_capital is only calcualted for LTOs that will last until at least next candle
#in_trade_capital += lto_list[lto_idx][PHASE_ENTER][TYPE_LIMIT]['amount']
# NOTE: For the enter_expire, PHASE_ENTER can be directly reflected to balance
# market_exit is not considered as dead lto
# The result of the OCO orders is unknown
in_trade_capital = safe_sum(in_trade_capital, trade_list[lto_idx].enter.amount)
alive_lto_counter += 1
# NOTE: TYPE_MARKET PHASE:_EXIT LTOs are considered as alive right here. Not sure if it is a good approach
else:
# Dead capital
dead_lto_capital = safe_sum(dead_lto_capital, trade_list[lto_idx].enter.amount)
# NOTE: Only iterate for the configured pairs. Do not run the strategy if any of them is missing in analysis_dict
total_lto_slot = min(self.max_lto, len(self.config['pairs']))
empty_lto_slot = total_lto_slot - alive_lto_counter
if empty_lto_slot < 1:
return [] # TODO Debug this ansync LTO issue buy doing debugging around here
# Evaluate pairwise_alloc_share
strategy_capital = safe_multiply(total_qc, self.strategywise_alloc_rate)
#for lto in lto_list:
# in_trade_capital += lto[PHASE_ENTER][TYPE_LIMIT]['amount']
free_strategy_capital = safe_substract(strategy_capital, in_trade_capital)
available_capital = min(free_strategy_capital, safe_sum(free_qc, dead_lto_capital))
# TODO: This can be updated to use some kind of precision from the symbol info instead of hardcoded 8
pairwise_alloc_share = truncate(available_capital/empty_lto_slot, 8)
#available_lto_capital = min(pairwise_alloc_share, free_qc+dead_lto_capital)
# Iterate over pairs and make decisions about them
for ao_pair in self.config['pairs']:
# Break if there is no empty_lto_slot left
if empty_lto_slot < 1:
break
# Continue if the LTO of the pair is not dead
if ao_pair in pair_grouped_ltos.keys():
if not StrategyBase.is_lto_dead(pair_grouped_ltos[ao_pair]):
continue
# Perform evaluation
if trade:= await self.make_decision(analysis_dict, ao_pair, ikarus_time, pairwise_alloc_share):
# Apply exchange filters
if not StrategyBase.apply_exchange_filters(trade.enter, self.symbol_info[ao_pair]):
continue
trade_objects.append(trade)
empty_lto_slot -= 1
return trade_objects
@staticmethod
async def handle_lto_logic(self, analysis_dict, trade, ikarus_time):
"""
This function decides what to do for the LTOs based on their 'status'
"""
is_success = False
if trade.status == EState.ENTER_EXP:
if self.config['action_mapping'][EState.ENTER_EXP] == ECommand.CANCEL:
is_success = await self.on_cancel(trade)
elif trade.status == EState.EXIT_EXP:
if self.config['action_mapping'][EState.EXIT_EXP] == ECommand.UPDATE:
is_success = await self.on_update(trade, ikarus_time, analysis_dict=analysis_dict)
elif self.config['action_mapping'][EState.EXIT_EXP] == ECommand.MARKET_EXIT:
# NOTE: Market exit requires the exit prices to be known, thus provide the analysis_dict to that
is_success = await StrategyBase.on_market_exit(self, trade, analysis_dict)
elif trade.status == EState.WAITING_EXIT:
# LTO is entered succesfully, so exit order should be executed
# NOTE: expire of the exit_module can be calculated after the trade entered
is_success = await self.on_waiting_exit(trade, analysis_dict)
else:
is_success = True
return is_success
@abc.abstractclassmethod
async def on_update(self):
pass
@staticmethod
async def on_market_exit(self, trade, analysis_dict):
# TODO: Create market exit logic
raise NotImplementedException()
'''
#lto = await StrategyBase._config_market_exit(lto, self.config['exit']['type'])
lto['exit'] = await StrategyBase._create_exit_module(
TYPE_MARKET,
0,
lto['result'][PHASE_ENTER]['quantity'],
analysis_dict[lto['pair']][self.min_period]['close'],
0)
lto['exit'][TYPE_MARKET] = await StrategyBase.apply_exchange_filters(lto, self.symbol_info[lto['pair']])
trade.exi
trade.command = ECommand.MARKET_EXIT
self.logger.info(f'LTO: market exit configured') # TODO: Add orderId
'''
return trade
@abc.abstractclassmethod
async def on_waiting_exit(self):
pass
@abc.abstractclassmethod
async def on_closed(self):
pass
@classmethod
def __subclasshook__(cls, subclass):
return (hasattr(subclass, 'run') and
callable(subclass.run) and
hasattr(subclass, 'dump_to') and
callable(subclass.dump_to) or
NotImplemented)
@staticmethod
def _eval_future_candle_time(start_time, count, minute): return bson.Int64(start_time + count*minute*60*1000)
@staticmethod
async def _config_market_exit(lto, type):
# TODO: NEXT NEXT Integrate fee to market order
# Continue here
# TODO: Integrate price to market order, even if it has no use
# For now, it works and I am not gonna touch it for a rework
lto['action'] = ACTN_MARKET_EXIT
lto['exit'][TYPE_MARKET] = {
'amount': lto['exit'][type]['amount'],
'quantity': lto['exit'][type]['quantity'],
'orderId': '',
}
return lto
@staticmethod
def apply_exchange_filters(trade_order, symbol_info):
# TODO: Make the function orer specific using trade_order instead of trade
"""
- Call this method prior to any order placement
- Apply the filter of exchange pair
- This methhod does not check if the current conditiones are good to go.
If a filter is not satisfied then it would create an exception. Validation
costs time. Maybe in future
- Separating enter and exit does not make any sense since the filters are valid for both side.
Returns:
Order: enter or exit module
"""
# LOT_SIZE
# https://github.com/binance/binance-spot-api-docs/blob/master/rest-api.md#lot_size
if result := filters.lot_size(trade_order.quantity, symbol_info):
trade_order.quantity = result
else:
#logger.error(f"Filter failure: LOT_SIZE. {trade.strategy} in phase {phase} with quantity {str(trade.enter.quantity)}")
return False
# PRICE_FILTER
# https://github.com/binance/binance-spot-api-docs/blob/master/rest-api.md#price_filter
if type(trade_order).__name__ == EOrderType.MARKET:
pass
elif type(trade_order).__name__ == EOrderType.LIMIT:
trade_order.set_price(round_step_downward(trade_order.price, float(symbol_info['filters'][0]['tickSize']))) # Fixing PRICE_FILTER: tickSize
if trade_order.price > float(symbol_info['filters'][0]['maxPrice']):
pass
# TODO: BUG: NEXT: Add proper error handling or check for the prices
elif type(trade_order).__name__ == EOrderType.OCO:
trade_order.set_price(round_step_downward(trade_order.price, float(symbol_info['filters'][0]['tickSize']))) # Fixing PRICE_FILTER: tickSize
trade_order.stopPrice = round_step_downward(trade_order.stopPrice, float(symbol_info['filters'][0]['tickSize']))
trade_order.stopLimitPrice = round_step_downward(trade_order.stopLimitPrice, float(symbol_info['filters'][0]['tickSize']))
if not filters.min_notional(trade_order.stopPrice, trade_order.quantity, symbol_info):
logger.warn(f"Trade object skipped due to MIN_NOTIONAL filter for {symbol_info['symbol']}. NTO: {json.dumps(trade_order, cls=EnhancedJSONEncoder)}")
return False
# MIN_NOTIONAL
# https://github.com/binance/binance-spot-api-docs/blob/master/rest-api.md#min_notional
if not filters.min_notional(trade_order.price, trade_order.quantity, symbol_info):
logger.warn(f"Trade object skipped due to MIN_NOTIONAL filter for {symbol_info['symbol']}. NTO: {json.dumps(trade_order, cls=EnhancedJSONEncoder)}")
return False
return True
| StarcoderdataPython |
8140543 | import math
from . import _catboost
from .core import CatBoost, CatBoostError
from .utils import _import_matplotlib
FeatureExplanation = _catboost.FeatureExplanation
def _check_model(model):
if not isinstance(model, CatBoost):
raise CatBoostError("Model should be CatBoost")
def to_polynom(model):
_check_model(model)
return _catboost.to_polynom(model._object)
def to_polynom_string(model):
_check_model(model)
return _catboost.to_polynom_string(model._object)
def explain_features(model):
_check_model(model)
return _catboost.explain_features(model._object)
def calc_features_strength(model):
explanations = explain_features(model)
features_strength = [expl.calc_strength() for expl in explanations]
return features_strength
def plot_pdp(arg, size_per_plot=(5, 5), plots_per_row=None):
with _import_matplotlib() as _plt:
plt = _plt
if isinstance(arg, CatBoost):
arg = explain_features(arg)
if isinstance(arg, _catboost.FeatureExplanation):
arg = [arg]
assert len(arg) > 0
assert isinstance(arg, list)
for element in arg:
assert isinstance(element, _catboost.FeatureExplanation)
figs = []
for feature_explanation in arg:
dimension = feature_explanation.dimension()
if not plots_per_row:
plots_per_row = min(5, dimension)
rows = int(math.ceil(dimension / plots_per_row))
fig, axes = plt.subplots(rows, plots_per_row)
fig.suptitle("Feature #{}".format(feature_explanation.feature))
if rows == 1:
axes = [axes]
if plots_per_row == 1:
axes = [[row_axes] for row_axes in axes]
fig.set_size_inches(size_per_plot[0] * plots_per_row, size_per_plot[1] * rows)
for dim in range(dimension):
ax = axes[dim // plots_per_row][dim % plots_per_row]
ax.set_title("Dimension={}".format(dim))
ax.set_xlabel("feature value")
ax.set_ylabel("model value")
borders, values = feature_explanation.calc_pdp(dim)
xs = []
ys = []
if feature_explanation.type == "Float":
if len(borders) == 0:
xs.append(-0.1)
xs.append(0.1)
ys.append(feature_explanation.expected_bias[dim])
ys.append(feature_explanation.expected_bias[dim])
ax.plot(xs, ys)
else:
offset = max(0.1, (borders[0] + borders[-1]) / 2)
xs.append(borders[0] - offset)
ys.append(feature_explanation.expected_bias[dim])
for border, value in zip(borders, values):
xs.append(border)
ys.append(ys[-1])
xs.append(border)
ys.append(value)
xs.append(borders[-1] + offset)
ys.append(ys[-1])
ax.plot(xs, ys)
else:
xs = ['bias'] + list(map(str, borders))
ys = feature_explanation.expected_bias[dim] + values
ax.bar(xs, ys)
figs.append(fig)
return figs
def plot_features_strength(model, height_per_feature=0.5, width_per_plot=5, plots_per_row=None):
with _import_matplotlib() as _plt:
plt = _plt
strengths = calc_features_strength(model)
dimension = len(strengths[0])
features = len(strengths)
if not plots_per_row:
plots_per_row = min(5, dimension)
rows = int(math.ceil(dimension / plots_per_row))
fig, axes = plt.subplots(rows, plots_per_row)
if rows == 1:
axes = [axes]
if plots_per_row == 1:
axes = [[row_axes] for row_axes in axes]
fig.suptitle("Features Strength")
fig.set_size_inches(width_per_plot * plots_per_row, height_per_feature * features * rows)
for dim in range(dimension):
strengths = [(s[dim], i) for i, s in enumerate(strengths)]
# strengths = list(reversed(sorted(strengths)))
strengths = list(sorted(strengths))
labels = ["Feature #{}".format(f) for _, f in strengths]
strengths = [s for s, _ in strengths]
ax = axes[dim // plots_per_row][dim % plots_per_row]
colors = [(1, 0, 0) if s > 0 else (0, 0, 1) for s in strengths]
ax.set_title("Dimension={}".format(dim))
ax.barh(range(len(strengths)), strengths, align='center', color=colors)
ax.set_yticks(range(len(strengths)))
ax.set_yticklabels(labels)
# ax.invert_yaxis() # labels read top-to-bottom
ax.set_xlabel('Prediction value change')
return fig
| StarcoderdataPython |
6458098 | <reponame>apoveda25/graphql-python-server
import os
from dotenv import load_dotenv
class Environment:
def __init__(self, path: str = "/.env"):
self.get_file_env(path)
def get_file_env(self, path: str):
self.reload(os.getcwd() + path)
def reload(self, env_path: str):
load_dotenv(dotenv_path=env_path)
| StarcoderdataPython |
6678534 | import pandas as pd
import torch
from sklearn.metrics import f1_score, precision_score, recall_score, classification_report
from torch.autograd import Variable
from tqdm import tqdm
from models.MRNet_2D import MRNet_2D
from models.MRNet import MRNet
from mri_dataset.mri_3d_pkl_dataset import MRI_3D_PKL_Dataset
from mri_dataset.mri_3d_transformer_dataset import MRI_3D_Transformer_Dataset
from training_config import GPU_MODE, NUM_CLASSES
from transformation.aug_rescaler import AugmentedImageScaler
from transformation.cropping import Cropper
from transformation.rgb_converter import RGBConverter
from utils.dataset_utils import load_testset_from_csv
from torchvision import transforms
model = MRNet_2D(NUM_CLASSES)
model.load_state_dict(torch.load('dipg_vs_mb_vs_eb_checkpoints/dipg_vs_mb_vs_eb_1_1545103167.473354.pt', map_location='cpu'))
transforms = transforms.Compose([
AugmentedImageScaler(),
Cropper(),
RGBConverter()
])
dataset_loaders, dataset_sizes, datasets = load_testset_from_csv(MRI_3D_Transformer_Dataset, transforms)
# invert class_to_idx
y_pred_dict = {}
class_to_idx = datasets['test'].class_to_idx
idx_to_class = {}
for key in class_to_idx.keys():
idx_to_class[class_to_idx[key]] = key
y_pred_dict[key] = []
running_corrects = 0
y_pred = []
y_true = []
for data in tqdm(dataset_loaders['test']):
inputs, labels = data
if GPU_MODE:
try:
inputs, labels = Variable(inputs.float().cuda()), Variable(labels.float().cuda())
except Exception as e:
print("Exception while moving to cuda :", inputs, labels)
print(str(e))
else:
inputs, labels = Variable(inputs), Variable(labels)
outputs = model(inputs)
vals, preds = torch.max(outputs.data, 1)
running_corrects += torch.sum(preds == labels.data)
# outputs[0] since batch size always = 1
for i, log_prob in enumerate(outputs[0]):
y_pred_dict[idx_to_class[i]].append(log_prob)
for x in preds:
y_pred.append(x)
for x in labels:
y_true.append(x)
target_names = []
for key, value in sorted(idx_to_class.items()):
target_names.append(value)
print(classification_report(y_true, y_pred, target_names=target_names))
print('accuracy :', float(running_corrects) / dataset_sizes['test'])
print("overall f1 weighted:", f1_score(y_true, y_pred, average="weighted"))
print("overall precision weighted:", precision_score(y_true, y_pred, average="weighted"))
print("overall recall weighted:", recall_score(y_true, y_pred, average="weighted"))
images = datasets['test'].image_arr
labels = datasets['test'].label_arr
# Converting tensors to values
y_true_val = []
y_pred_val = []
y_pred_MB = []
y_pred_EP = []
y_pred_DIPG = []
image_names = []
for i in range(len(y_true)):
y_true_val.append(y_true[i].item())
y_pred_val.append(y_pred[i].item())
y_pred_MB.append(y_pred_dict['MB'][i].item())
y_pred_EP.append(y_pred_dict['EP'][i].item())
y_pred_DIPG.append(y_pred_dict['DIPG'][i].item())
# get image name from absolute path
# Can modify this to get patient id/study id/something better
image_names.append(images[i].split('/')[-1])
results_df = pd.DataFrame(
{'image_path': image_names,
'label': y_true_val,
'prediction': y_pred_val,
'log_probablity_mb': y_pred_MB,
'log_probablity_ep': y_pred_EP,
'log_probablity_dipg': y_pred_DIPG
})
results_df.to_csv("results.csv", index=False) | StarcoderdataPython |
1973697 | from experiments import research, data
| StarcoderdataPython |
4839170 | <filename>datacube/index/_datasets.py
# coding=utf-8
"""
API for dataset indexing, access and search.
"""
from __future__ import absolute_import
import logging
from cachetools.func import lru_cache
from datacube import compat
from datacube.model import Dataset, DatasetType, MetadataType
from datacube.utils import InvalidDocException, check_doc_unchanged, jsonify_document, get_doc_changes, contains
from . import fields
from .exceptions import DuplicateRecordError, UnknownFieldError
_LOG = logging.getLogger(__name__)
class MetadataTypeResource(object):
def __init__(self, db):
"""
:type db: datacube.index.postgres._api.PostgresDb
"""
self._db = db
def add(self, definition, allow_table_lock=False):
"""
:type definition: dict
:param allow_table_lock:
Allow an exclusive lock to be taken on the table while creating the indexes.
This will halt other user's requests until completed.
If false, creation will be slightly slower and cannot be done in a transaction.
:rtype: datacube.model.MetadataType
"""
# This column duplication is getting out of hand:
MetadataType.validate(definition)
name = definition['name']
existing = self._db.get_metadata_type_by_name(name)
if existing:
# They've passed us the same one again. Make sure it matches what is stored.
# TODO: Support for adding/updating search fields?
check_doc_unchanged(
existing.definition,
definition,
'Metadata Type {}'.format(name)
)
else:
self._db.add_metadata_type(
name=name,
definition=definition,
concurrently=not allow_table_lock
)
return self.get_by_name(name)
@lru_cache()
def get(self, id_):
"""
:rtype: datacube.model.MetadataType
"""
return self._make(self._db.get_metadata_type(id_))
@lru_cache()
def get_by_name(self, name):
"""
:rtype: datacube.model.MetadataType
"""
record = self._db.get_metadata_type_by_name(name)
if not record:
return None
return self._make(record)
def check_field_indexes(self, allow_table_lock=False, rebuild_all=False):
"""
Create or replace per-field indexes and views.
:param allow_table_lock:
Allow an exclusive lock to be taken on the table while creating the indexes.
This will halt other user's requests until completed.
If false, creation will be slightly slower and cannot be done in a transaction.
"""
self._db.check_dynamic_fields(concurrently=not allow_table_lock, rebuild_all=rebuild_all)
def _make_many(self, query_rows):
return (self._make(c) for c in query_rows)
def _make(self, query_row):
"""
:rtype list[datacube.model.MetadataType]
"""
definition = query_row['definition']
dataset_ = definition['dataset']
return MetadataType(
query_row['name'],
dataset_,
dataset_search_fields=self._db.get_dataset_fields(query_row),
id_=query_row['id']
)
class DatasetTypeResource(object):
"""
:type _db: datacube.index.postgres._api.PostgresDb
:type metadata_type_resource: MetadataTypeResource
"""
def __init__(self, db, metadata_type_resource):
"""
:type db: datacube.index.postgres._api.PostgresDb
:type metadata_type_resource: MetadataTypeResource
"""
self._db = db
self.metadata_type_resource = metadata_type_resource
def from_doc(self, definition):
"""
Create a Product from its definitions
:param dict definition: product definition document
:rtype: datacube.model.DatasetType
"""
# This column duplication is getting out of hand:
DatasetType.validate(definition)
metadata_type = definition['metadata_type']
# They either specified the name of a metadata type, or specified a metadata type.
# Is it a name?
if isinstance(metadata_type, compat.string_types):
metadata_type = self.metadata_type_resource.get_by_name(metadata_type)
else:
# Otherwise they embedded a document, add it if needed:
metadata_type = self.metadata_type_resource.add(metadata_type, allow_table_lock=False)
if not metadata_type:
raise InvalidDocException('Unknown metadata type: %r' % definition['metadata_type'])
return DatasetType(metadata_type, definition)
def add(self, type_):
"""
Add a Product
:param datacube.model.DatasetType type_: Product to add
:rtype: datacube.model.DatasetType
"""
DatasetType.validate(type_.definition)
existing = self._db.get_dataset_type_by_name(type_.name)
if existing:
# TODO: Support for adding/updating match rules?
# They've passed us the same collection again. Make sure it matches what is stored.
check_doc_unchanged(
existing.definition,
jsonify_document(type_.definition),
'Dataset type {}'.format(type_.name)
)
else:
self._db.add_dataset_type(
name=type_.name,
metadata=type_.metadata_doc,
metadata_type_id=type_.metadata_type.id,
definition=type_.definition
)
return self.get_by_name(type_.name)
def update(self, type_, allow_unsafe_updates=False):
"""
Update a product. Unsafe changes will throw a ValueError by default.
(An unsafe change is anything that may potentially make the product
incompatible with existing datasets of that type)
:param datacube.model.DatasetType type_: Product to add
:param allow_unsafe_updates bool: Allow unsafe changes. Use with caution.
:rtype: datacube.model.DatasetType
"""
DatasetType.validate(type_.definition)
existing = self._db.get_dataset_type_by_name(type_.name)
if not existing:
raise ValueError('Unknown product %s, cannot update – did you intend to add it?' % type_.name)
def handle_unsafe(msg):
if not allow_unsafe_updates:
raise ValueError(msg)
else:
_LOG.warning("Ignoring %s", msg)
# We'll probably want to use offsets in the future (ie. nested dicts), not just keys, but for now this suffices.
safe_keys_to_change = ('description', 'metadata')
doc_changes = get_doc_changes(existing.definition, jsonify_document(type_.definition))
for offset, old_value, new_value in doc_changes:
_LOG.info('Changing %s %s: %r -> %r', type_.name, '.'.join(offset), old_value, new_value)
key_name = offset[0]
if key_name not in safe_keys_to_change:
handle_unsafe('Potentially unsafe update: changing %r of product definition.' % key_name)
# You can safely make the match rules looser but not tighter.
if key_name == 'metadata':
# Tightening them could exclude datasets already matched to the product.
# (which would make search results wrong)
if not contains(old_value, new_value, case_sensitive=True):
handle_unsafe('Unsafe update: new product match rules are not a superset of old ones.')
if doc_changes:
_LOG.info("Updating product %s", type_.name)
self._db.update_dataset_type(
name=type_.name,
metadata=type_.metadata_doc,
metadata_type_id=type_.metadata_type.id,
definition=type_.definition
)
# Clear our local cache. Note that other users may still have
# cached copies for the duration of their connections.
self.get_by_name.cache_clear()
self.get.cache_clear()
else:
_LOG.info("No changes detected for product %s", type_.name)
def update_document(self, definition, allow_unsafe_update=False):
"""
Update a Product using its difinition
:param dict definition: product definition document
:rtype: datacube.model.DatasetType
"""
type_ = self.from_doc(definition)
return self.update(type_, allow_unsafe_updates=allow_unsafe_update)
def add_document(self, definition):
"""
Add a Product using its difinition
:param dict definition: product definition document
:rtype: datacube.model.DatasetType
"""
type_ = self.from_doc(definition)
return self.add(type_)
@lru_cache()
def get(self, id_):
"""
Retrieve Product by id
:param int id_: id of the Product
:rtype: datacube.model.DatasetType
"""
return self._make(self._db.get_dataset_type(id_))
@lru_cache()
def get_by_name(self, name):
"""
Retrieve Product by name
:param str name: name of the Product
:rtype: datacube.model.DatasetType
"""
result = self._db.get_dataset_type_by_name(name)
if not result:
return None
return self._make(result)
def get_with_fields(self, field_names):
"""
Return dataset types that have all the given fields.
:param tuple[str] field_names:
:rtype: __generator[DatasetType]
"""
for type_ in self.get_all():
for name in field_names:
if name not in type_.metadata_type.dataset_fields:
break
else:
yield type_
def search(self, **query):
"""
Return dataset types that have all the given fields.
:param dict query:
:rtype: __generator[DatasetType]
"""
for type_, q in self.search_robust(**query):
if not q:
yield type_
def search_robust(self, **query):
"""
Return dataset types that match match-able fields and dict of remaining un-matchable fields.
:param dict query:
:rtype: __generator[(DatasetType, dict)]
"""
for type_ in self.get_all():
q = query.copy()
if q.pop('product', type_.name) != type_.name:
continue
if q.pop('metadata_type', type_.metadata_type.name) != type_.metadata_type.name:
continue
for key, value in list(q.items()):
try:
exprs = fields.to_expressions(type_.metadata_type.dataset_fields.get, **{key: value})
except UnknownFieldError as e:
break
try:
if all(expr.evaluate(type_.metadata_doc) for expr in exprs):
q.pop(key)
else:
break
except (AttributeError, KeyError, ValueError) as e:
continue
else:
yield type_, q
def get_all(self):
"""
Retrieve all Products
:rtype: iter[datacube.model.DatasetType]
"""
return (self._make(record) for record in self._db.get_all_dataset_types())
def _make_many(self, query_rows):
return (self._make(c) for c in query_rows)
def _make(self, query_row):
"""
:rtype datacube.model.DatasetType
"""
return DatasetType(
definition=query_row['definition'],
metadata_type=self.metadata_type_resource.get(query_row['metadata_type_ref']),
id_=query_row['id'],
)
class DatasetResource(object):
"""
:type _db: datacube.index.postgres._api.PostgresDb
:type types: datacube.index._datasets.DatasetTypeResource
"""
def __init__(self, db, dataset_type_resource):
"""
:type db: datacube.index.postgres._api.PostgresDb
:type dataset_type_resource: datacube.index._datasets.DatasetTypeResource
"""
self._db = db
self.types = dataset_type_resource
def get(self, id_, include_sources=False):
"""
Get dataset by id
:param uuid id_: id of the dataset to retrieve
:param bool include_sources: get the full provenance graph?
:rtype: datacube.model.Dataset
"""
if not include_sources:
return self._make(self._db.get_dataset(id_), full_info=True)
datasets = {result['id']: (self._make(result, full_info=True), result)
for result in self._db.get_dataset_sources(id_)}
for dataset, result in datasets.values():
dataset.metadata_doc['lineage']['source_datasets'] = {
classifier: datasets[str(source)][0].metadata_doc
for source, classifier in zip(result['sources'], result['classes']) if source
}
dataset.sources = {
classifier: datasets[str(source)][0]
for source, classifier in zip(result['sources'], result['classes']) if source
}
return datasets[id_][0]
def get_derived(self, id_):
"""
Get drived datasets
:param uuid id_: dataset id
:rtype: list[datacube.model.Dataset]
"""
return [self._make(result) for result in self._db.get_derived_datasets(id_)]
def has(self, dataset):
"""
Have we already indexed this dataset?
:param datacube.model.Dataset dataset: dataset to check
:rtype: bool
"""
return self._db.contains_dataset(dataset.id)
def add(self, dataset, skip_sources=False):
"""
Ensure a dataset is in the index. Add it if not present.
:param datacube.model.Dataset dataset: dataset to add
:param bool skip_sources: don't attempt to index source (use when sources are already indexed)
:rtype: datacube.model.Dataset
"""
if not skip_sources:
for source in dataset.sources.values():
self.add(source)
was_inserted = False
sources_tmp = dataset.type.dataset_reader(dataset.metadata_doc).sources
dataset.type.dataset_reader(dataset.metadata_doc).sources = {}
try:
_LOG.info('Indexing %s', dataset.id)
with self._db.begin() as transaction:
try:
was_inserted = transaction.insert_dataset(dataset.metadata_doc, dataset.id, dataset.type.id)
for classifier, source_dataset in dataset.sources.items():
transaction.insert_dataset_source(classifier, dataset.id, source_dataset.id)
# try to update location in the same transaction as insertion.
# if insertion fails we'll try updating location later
# if insertion succeeds the location bit can't possibly fail
if dataset.local_uri:
transaction.ensure_dataset_location(dataset.id, dataset.local_uri)
except DuplicateRecordError as e:
_LOG.warning(str(e))
if not was_inserted:
existing = self.get(dataset.id)
if existing:
check_doc_unchanged(
existing.metadata_doc,
jsonify_document(dataset.metadata_doc),
'Dataset {}'.format(dataset.id)
)
# reinsert attempt? try updating the location
if dataset.local_uri:
try:
self._db.ensure_dataset_location(dataset.id, dataset.local_uri)
except DuplicateRecordError as e:
_LOG.warning(str(e))
finally:
dataset.type.dataset_reader(dataset.metadata_doc).sources = sources_tmp
return dataset
def archive(self, ids):
"""
Mark datasets as archived
:param list[uuid] ids: list of dataset ids to archive
"""
with self._db.begin() as transaction:
for id_ in ids:
transaction.archive_dataset(id_)
def restore(self, ids):
"""
Mark datasets as not archived
:param list[uuid] ids: list of dataset ids to restore
"""
with self._db.begin() as transaction:
for id_ in ids:
transaction.restore_dataset(id_)
def get_field_names(self, type_name=None):
"""
:param str type_name:
:rtype: __generator[str]
"""
if type_name is None:
types = self.types.get_all()
else:
types = [self.types.get_by_name(type_name)]
for type_ in types:
for name in type_.metadata_type.dataset_fields:
yield name
def get_locations(self, dataset):
"""
:param datacube.model.Dataset dataset: dataset
:rtype: list[str]
"""
return self._db.get_locations(dataset.id)
def _make(self, dataset_res, full_info=False):
"""
:rtype datacube.model.Dataset
:param bool full_info: Include all available fields
"""
return Dataset(
self.types.get(dataset_res.dataset_type_ref),
dataset_res.metadata,
dataset_res.local_uri,
indexed_by=dataset_res.added_by if full_info else None,
indexed_time=dataset_res.added if full_info else None
)
def _make_many(self, query_result):
"""
:rtype list[datacube.model.Dataset]
"""
return (self._make(dataset) for dataset in query_result)
def search_by_metadata(self, metadata):
"""
Perform a search using arbitrary metadata, returning results as Dataset objects.
Caution – slow! This will usually not use indexes.
:param dict metadata:
:rtype: list[datacube.model.Dataset]
"""
return self._make_many(self._db.search_datasets_by_metadata(metadata))
def search(self, **query):
"""
Perform a search, returning results as Dataset objects.
:param dict[str,str|float|datacube.model.Range] query:
:rtype: __generator[datacube.model.Dataset]
"""
for dataset_type, datasets in self._do_search_by_product(query):
for dataset in self._make_many(datasets):
yield dataset
def search_by_product(self, **query):
"""
Perform a search, returning datasets grouped by product type.
:param dict[str,str|float|datacube.model.Range] query:
:rtype: __generator[(datacube.model.DatasetType, __generator[datacube.model.Dataset])]]
"""
for dataset_type, datasets in self._do_search_by_product(query):
yield dataset_type, self._make_many(datasets)
def count(self, **query):
"""
Perform a search, returning count of results.
:param dict[str,str|float|datacube.model.Range] query:
:rtype: int
"""
# This may be optimised into one query in the future.
result = 0
for product_type, count in self._do_count_by_product(query):
result += count
return result
def count_by_product(self, **query):
"""
Perform a search, returning a count of for each matching product type.
:param dict[str,str|float|datacube.model.Range] query:
:returns: Sequence of (product, count)
:rtype: __generator[(datacube.model.DatasetType, int)]]
"""
return self._do_count_by_product(query)
def count_by_product_through_time(self, period, **query):
"""
Perform a search, returning counts for each product grouped in time slices
of the given period.
:param dict[str,str|float|datacube.model.Range] query:
:param str period: Time range for each slice: '1 month', '1 day' etc.
:returns: For each matching product type, a list of time ranges and their count.
:rtype: __generator[(datacube.model.DatasetType, list[(datetime.datetime, datetime.datetime), int)]]
"""
return self._do_time_count(period, query)
def count_product_through_time(self, period, **query):
"""
Perform a search, returning counts for a single product grouped in time slices
of the given period.
Will raise an error if the search terms match more than one product.
:param dict[str,str|float|datacube.model.Range] query:
:param str period: Time range for each slice: '1 month', '1 day' etc.
:returns: For each matching product type, a list of time ranges and their count.
:rtype: list[(str, list[(datetime.datetime, datetime.datetime), int)]]
"""
return next(self._do_time_count(period, query, ensure_single=True))[1]
def _get_dataset_types(self, q):
types = set()
if 'product' in q.keys():
types.add(self.types.get_by_name(q['product']))
else:
# Otherwise search any metadata type that has all the given search fields.
types = self.types.get_with_fields(tuple(q.keys()))
if not types:
raise ValueError('No type of dataset has fields: %r', tuple(q.keys()))
return types
def _get_product_queries(self, query):
for dataset_type, q in self.types.search_robust(**query):
q['dataset_type_id'] = dataset_type.id
yield q, dataset_type
def _do_search_by_product(self, query, return_fields=False, with_source_ids=False):
for q, dataset_type in self._get_product_queries(query):
dataset_fields = dataset_type.metadata_type.dataset_fields
query_exprs = tuple(fields.to_expressions(dataset_fields.get, **q))
select_fields = None
if return_fields:
select_fields = tuple(dataset_fields.values())
yield (dataset_type,
self._db.search_datasets(
query_exprs,
select_fields=select_fields,
with_source_ids=with_source_ids
))
def _do_count_by_product(self, query):
for q, dataset_type in self._get_product_queries(query):
dataset_fields = dataset_type.metadata_type.dataset_fields
query_exprs = tuple(fields.to_expressions(dataset_fields.get, **q))
count = self._db.count_datasets(query_exprs)
if count > 0:
yield dataset_type, count
def _do_time_count(self, period, query, ensure_single=False):
if 'time' not in query:
raise ValueError('Counting through time requires a "time" range query argument')
query = dict(query)
start, end = query['time']
del query['time']
product_quries = list(self._get_product_queries(query))
if ensure_single:
if len(product_quries) == 0:
raise ValueError('No products match search terms: %r' % query)
if len(product_quries) > 1:
raise ValueError('Multiple products match single query search: %r' %
([dt.name for q, dt in product_quries],))
for q, dataset_type in product_quries:
dataset_fields = dataset_type.metadata_type.dataset_fields
query_exprs = tuple(fields.to_expressions(dataset_fields.get, **q))
yield dataset_type, list(self._db.count_datasets_through_time(
start,
end,
period,
dataset_fields.get('time'),
query_exprs
))
def search_summaries(self, **query):
"""
Perform a search, returning just the search fields of each dataset.
:param dict[str,str|float|datacube.model.Range] query:
:rtype: dict
"""
for dataset_type, results in self._do_search_by_product(query, return_fields=True):
for columns in results:
yield dict(columns)
def search_eager(self, **query):
"""
Perform a search, returning results as Dataset objects.
:param dict[str,str|float|datacube.model.Range] query:
:rtype: list[datacube.model.Dataset]
"""
return list(self.search(**query))
| StarcoderdataPython |
6479798 | <filename>thumbnail_OpenFaaS/thumbnail/handler.py
from PIL import Image
import cgi
import io
import os
def handle(param):
# Fetch the HTTP request body from the function input.
bin_data = param
bin_length = len(bin_data)
# Convert it into a binary stream
bin_stream = io.BytesIO(bin_data)
# Fetch the content-type together with multipart boundary separator value from the Header
# OpenFaaS passes HTTP header values through the environment
input_content_type = os.getenv("Http_Content_Type")
# Parse the multi party form data using cgi FieldStorage library
form = cgi.FieldStorage(fp=bin_stream, environ={'REQUEST_METHOD': 'POST', 'CONTENT_LENGTH': bin_length,
'CONTENT_TYPE': input_content_type})
# Extract the image data from HTML 'pic' input field
dat = form["pic"]
bytestream = dat.file.read()
# Initiate the PILLOW library image object using the image binary stream
image = Image.open(io.BytesIO(bytestream))
# Apply image processing command
image.thumbnail((128,128))
# Save the result into as a byte array
imgByteArr = io.BytesIO()
image.save(imgByteArr, format='PNG')
imgByteArrValue = imgByteArr.getvalue()
# Return the json result.
# NB! Output content-type must be defined top be image/* (image/png) in the function.yaml file under environment
return imgByteArrValue
| StarcoderdataPython |
5156326 | # Copyright (c) 2018 Dolphin Emulator Website Contributors
# SPDX-License-Identifier: MIT
from django.conf import settings
from django.db import models
from django.utils.html import linebreaks
from django.utils.translation import ugettext as _
from zinnia.markups import textile
from zinnia.markups import markdown
from zinnia.markups import restructuredtext
from zinnia.settings import UPLOAD_TO, MARKUP_LANGUAGE
from zinnia.managers import PUBLISHED
from zinnia.models_bases.entry import AbstractEntry
class BlogSeries(models.Model):
"""Represents a date-ordered sequence of blog entries."""
name = models.CharField(max_length=255, db_index=True)
visible = models.BooleanField(default=True)
image = models.ImageField(
_('image'), blank=True, upload_to=UPLOAD_TO,
help_text=_('Used for illustration.'))
@property
def entries_reversed(self):
return self.entries.order_by('creation_date')
def nth_entry(self, nth, allow_hidden=False):
"""Returns the 1-indexed nth article in series."""
if nth < 1:
return None
qs = self.entries.all()
if not allow_hidden:
qs = qs.filter(status=PUBLISHED)
try:
return qs.order_by('creation_date')[nth - 1]
except IndexError:
return None
def __str__(self):
return self.name
def __repr__(self):
return '<BlogSeries "%s" (%d entries)>' % (self.name, self.entries.count())
class ForumThreadForEntry(models.Model):
entry = models.OneToOneField('zinnia.Entry', on_delete=models.CASCADE, related_name='forum_thread')
thread_id = models.IntegerField()
def get_absolute_url(self):
return settings.FORUM_URL_FOR_THREAD.format(id=self.thread_id)
def __str__(self):
return "%s -> %s" % (self.entry, self.get_absolute_url())
def __repr__(self):
return "<ForumThreadForEntry %s -> thread %d>" % (
self.entry, self.thread_id
)
class BlogEntry(AbstractEntry):
"""
Represents a blog entry. Adds an optional `series` field to the default
Zinnia model.
"""
within_series = models.ForeignKey(BlogSeries, on_delete=models.SET_NULL, null=True, blank=True, related_name='entries')
etherpad_id = models.CharField(max_length=256, null=True, blank=True)
@property
def use_collaborative_editing(self):
return self.etherpad_id and self.draft
@property
def draft(self):
return self.status != PUBLISHED
# The default Zinnia implementation of this does stupid content sniffing,
# assuming that if something contains </p> it is raw HTML. That's not true,
# since Markdown can contain HTML.
@property
def html_content(self):
"""
Returns the "content" field formatted in HTML.
"""
if MARKUP_LANGUAGE == 'markdown':
# TODO: Remove when Zinnia supports non-string Markdown exts.
import markdown
from zinnia.settings import MARKDOWN_EXTENSIONS
from django.utils.encoding import force_text
return markdown.markdown(force_text(self.content),
extensions=MARKDOWN_EXTENSIONS,
safe_mode=False)
elif MARKUP_LANGUAGE == 'textile':
return textile(self.content)
elif MARKUP_LANGUAGE == 'restructuredtext':
return restructuredtext(self.content)
return linebreaks(self.content)
@property
def real_image(self):
"""Priorities the entry image, then the series image, if any."""
if self.image:
return self.image
if self.within_series is not None:
# May be None!
return self.within_series.image
@property
def series_index(self):
if self.within_series is None:
return 1
return (self.within_series.entries
.filter(creation_date__lt=self.creation_date)
.exclude(pk=self.pk)
.count()) + 1
def relative_entry_in_series(self, offset):
if self.within_series is None:
return None
return self.within_series.nth_entry(self.series_index + offset)
@property
def next_entry_in_series(self):
return self.relative_entry_in_series(1)
@property
def previous_entry_in_series(self):
return self.relative_entry_in_series(-1)
class Meta(AbstractEntry.Meta):
abstract = True
| StarcoderdataPython |
82123 | import re
import datetime
from Constants import *
from ..Hashes import *
from StringUtils import *
from TimeZoneUtils import *
from ..ScheduleEvent import *
from .Scraper import *
def SupplementSchedule(sched, navigator, sport, league, season):
supplement = ScrapeAllStarGame(sport, league, season)
for key in supplement.keys():
supplementalEvent = supplement[key]
if not supplementalEvent: continue
eventId = int(key)
if key > 100: eventId = int(key) // 100
# Find event in original schedule
eligible = __find_events(sched, eventId)
if eligible:
foundEligible = False
for eligibleEvent in eligible:
isMatching = __is_matching_game(season, eligibleEvent, supplementalEvent, navigator)
if isMatching:
foundEligible = True
__merge_events(navigator, sport, league, season, eligibleEvent, supplementalEvent, eventId)
if not foundEligible:
# Add Event
__create_and_add_event(sched, navigator, sport, league, season, supplementalEvent, eventId)
else:
# Add Event
__create_and_add_event(sched, navigator, sport, league, season, supplementalEvent, eventId)
pass
def __is_matching_game(season, eligibleEvent, supplementalEvent, navigator):
if supplementalEvent.get("game") and eligibleEvent.game and supplementalEvent["game"] == eligibleEvent.game:
return True
homeTeam = navigator.GetTeam(season, fullName=eligibleEvent.homeTeamName, key=eligibleEvent.homeTeam)
awayTeam = navigator.GetTeam(season, fullName=eligibleEvent.awayTeamName, key=eligibleEvent.awayTeam)
winner = navigator.GetTeam(season, fullName=supplementalEvent.get("winner"), name=supplementalEvent.get("winner"), abbreviation=supplementalEvent.get("winner"), city=supplementalEvent.get("winner"))
loser = navigator.GetTeam(season, fullName=supplementalEvent.get("loser"), name=supplementalEvent.get("loser"), abbreviation=supplementalEvent.get("loser"), city=supplementalEvent.get("loser"))
if homeTeam and winner and homeTeam.key == winner.key:
if awayTeam and loser and awayTeam.key == loser.key:
return True
if homeTeam and loser and homeTeam.key == loser.key:
if awayTeam and winner and awayTeam.key == winner.key:
return True
return False
def __create_and_add_event(sched, navigator, sport, league, season, supplementalEvent, eventId):
newEvent = __convert_supplement(navigator, sport, league, season, supplementalEvent, eventId)
if not newEvent.get("date"): return
AddOrAugmentEvent(sched, ScheduleEvent(**newEvent), 0)
pass
def __find_events(sched, eventId):
qualifyingEvents = []
for augmentationKey in sched.keys(): # Hashed augmentation keys
for subkey in sched[augmentationKey].keys(): # Augmentation subkeys (hours)
evt = sched[augmentationKey][subkey]
if evt.eventindicator == eventId:
qualifyingEvents.append(evt)
return qualifyingEvents
def __convert_supplement(navigator, sport, league, season, augmentEvent, eventId):
date = augmentEvent.get("date")
if isinstance(date, datetime.datetime): pass
elif isinstance(date, datetime.date): pass
elif isinstance(date, basestring):
if IsISO8601DateWithoutTime(date): date = ParseISO8601Date(date).date()
elif IsISO8601Date(date): date = ParseISO8601Date(date)
augmentHomeTeam = deunicode(augmentEvent.get("homeTeam") if augmentEvent.get("homeTeam") else augmentEvent.get("loser"))
homeTeamKey = None
homeTeamName = None
homeTeamDisplay = None
if augmentHomeTeam:
discoveredHomeTeam = navigator.GetTeam(season, fullName=augmentHomeTeam, name=augmentHomeTeam, abbreviation=augmentHomeTeam, city=augmentHomeTeam)
if discoveredHomeTeam:
homeTeamKey = discoveredHomeTeam.key
homeTeamDisplay = discoveredHomeTeam.fullName
else:
homeTeamName = augmentHomeTeam
homeTeamDisplay = augmentHomeTeam
augmentAwayTeam = deunicode(augmentEvent.get("awayTeam") if augmentEvent.get("awayTeam") else augmentEvent.get("winner"))
awayTeamKey = None
awayTeamName = None
awayTeamDisplay = None
if augmentAwayTeam:
discoveredAwayTeam = navigator.GetTeam(season, fullName=augmentAwayTeam, name=augmentAwayTeam, abbreviation=augmentAwayTeam, city=augmentHomeTeam)
if discoveredAwayTeam:
awayTeamKey = discoveredAwayTeam.key
awayTeamDisplay = discoveredAwayTeam.fullName
else:
awayTeamName = augmentAwayTeam
awayTeamDisplay = augmentAwayTeam
game = augmentEvent.get("game")
enhancedEvent = {
"sport": sport,
"league": league,
"season": season,
"eventindicator": eventId,
"eventTitle": deunicode(augmentEvent.get("caption")),
"description": augmentEvent.get("description"),
"date": date,
"game": game
}
if homeTeamKey: enhancedEvent["homeTeam"] = homeTeamKey
if homeTeamName: enhancedEvent["homeTeamName"] = homeTeamName
if awayTeamKey: enhancedEvent["awayTeam"] = awayTeamKey
if awayTeamName: enhancedEvent["awayTeamName"] = awayTeamName
vs = None
if homeTeamDisplay and awayTeamDisplay:
vs = "%s vs. %s" % (homeTeamDisplay, awayTeamDisplay)
enhancedEvent.setdefault("vs", vs)
assets = {}
if augmentEvent.get("logo"):
assets[ASSET_TYPE_THUMBNAIL] = [{"source": ASSET_SOURCE_WIKIPEDIA, "url": deunicode(augmentEvent["logo"])}]
pass
if assets:
enhancedEvent.setdefault("assets", assets)
networks = []
if augmentEvent.get("networks"):
for network in augmentEvent["networks"]:
networks.append(deunicode(network))
if networks:
enhancedEvent.setdefault("networks", networks)
gameKeyPart = (".%s" % game) if game else ""
enhancedEvent["identity"] = {"WikipediaID": "%s.%s.%s%s" % (league, season, eventId, gameKeyPart)}
return enhancedEvent
def __merge_events(navigator, sport, league, season, evt, augmentEvent, eventId):
enhancedEvent = __convert_supplement(navigator, sport, league, season, augmentEvent, eventId)
evt.augment(**enhancedEvent)
if enhancedEvent.get("eventTitle") and evt.eventTitle and evt.eventTitle == evt.eventTitle.upper():
# Existing event title is from ESPN API. Overwrite it.
evt.eventTitle = enhancedEvent["eventTitle"]
pass | StarcoderdataPython |
4861471 | import dataset.cars196
import dataset.cub200
import dataset.stanford_online_products
def select(dataset, opt, data_path, TrainDatasetClass=None):
if 'cub200' in dataset:
return cub200.get_dataset(opt, data_path, TrainDatasetClass)
if 'cars196' in dataset:
return cars196.get_dataset(opt, data_path, TrainDatasetClass)
if 'online_products' in dataset:
return stanford_online_products.get_dataset(opt, data_path, TrainDatasetClass)
raise NotImplementedError('A dataset for {} is currently not implemented.\n\
Currently available are : cub200, cars196 & online_products!'.format(dataset))
| StarcoderdataPython |
1928044 | from __future__ import print_function, division
from warnings import warn
from nilmtk.disaggregate import Disaggregator
from keras.layers import Conv1D, Dense, Dropout, Reshape, Flatten
import os
import pickle
import pandas as pd
import numpy as np
from collections import OrderedDict
from keras.optimizers import SGD
from keras.models import Sequential, load_model
import matplotlib.pyplot as plt
from sklearn.model_selection import train_test_split
from keras.callbacks import ModelCheckpoint
import keras.backend as K
import random
import sys
import json
from .util import NumpyEncoder
random.seed(10)
np.random.seed(10)
class SequenceLengthError(Exception):
pass
class ApplianceNotFoundError(Exception):
pass
class DL_disagregator(Disaggregator):
def __init__(self, params):
"""
Parameters to be specified for the model
"""
self.MODEL_NAME = " "
self.models = OrderedDict()
self.sequence_length = params.get('sequence_length',99)
self.n_epochs = params.get('n_epochs', 50 )
self.batch_size = params.get('batch_size',1024)
self.mains_mean = params.get('mains_mean',1800)
self.mains_std = params.get('mains_std',600)
self.appliance_params = params.get('appliance_params',{})
self.save_model_path = params.get('save-model-path', None)
self.load_model_path = params.get('pretrained-model-path',None)
self.models = OrderedDict()
if self.load_model_path:
self.load_model()
if self.sequence_length%2==0:
print ("Sequence length should be odd!")
raise (SequenceLengthError)
def partial_fit(self,train_main,train_appliances,do_preprocessing=True,
**load_kwargs):
# If no appliance wise parameters are provided, then copmute them using the first chunk
if len(self.appliance_params) == 0:
self.set_appliance_params(train_appliances)
print("...............Seq2Point partial_fit running...............")
# Do the pre-processing, such as windowing and normalizing
if do_preprocessing:
train_main, train_appliances = self.call_preprocessing(
train_main, train_appliances, 'train') #480374,1 -> 480374,99, 480374,1 -> 480374,1
train_main = pd.concat(train_main,axis=0) #480374,99
train_main = train_main.values.reshape((-1,self.sequence_length,1))
new_train_appliances = []
for app_name, app_df in train_appliances:
app_df = pd.concat(app_df,axis=0)
app_df_values = app_df.values.reshape((-1,1))
new_train_appliances.append((app_name, app_df_values))
train_appliances = new_train_appliances
for appliance_name, power in train_appliances:
# Check if the appliance was already trained. If not then create a new model for it
if appliance_name not in self.models:
print("First model training for ", appliance_name)
self.models[appliance_name] = self.return_network()
# Retrain the particular appliance
else:
print("Started Retraining model for ", appliance_name)
model = self.models[appliance_name]
if train_main.size > 0:
# Sometimes chunks can be empty after dropping NANS
if len(train_main) > 10:
# Do validation when you have sufficient samples
filepath = 'seq2point-temp-weights-'+str(random.randint(0,100000))+'.h5'
checkpoint = ModelCheckpoint(filepath,monitor='val_loss',verbose=1,save_best_only=True,mode='min')
train_x, v_x, train_y, v_y = train_test_split(train_main, power, test_size=.15,random_state=10)
model.fit(train_x,train_y,validation_data=[v_x,v_y],epochs=self.n_epochs,callbacks=[checkpoint],batch_size=self.batch_size)
model.load_weights(filepath)
if self.save_model_path:
self.save_model()
def load_model(self):
print ("Loading the model using the pretrained-weights")
model_folder = self.load_model_path
if os.path.exists(os.path.join(model_folder, "model.json")):
with open(os.path.join(model_folder, "model.json"), "r") as f:
model_string = f.read().strip()
params_to_load = json.loads(model_string)
self.sequence_length = int(params_to_load['sequence_length'])
self.mains_mean = params_to_load['mains_mean']
self.mains_std = params_to_load['mains_std']
self.appliance_params = params_to_load['appliance_params']
for appliance_name in self.appliance_params:
self.models[appliance_name] = self.return_network()
self.models[appliance_name].load_weights(os.path.join(model_folder,appliance_name+".h5"))
def save_model(self):
if (os.path.exists(self.save_model_path) == False):
os.makedirs(self.save_model_path)
params_to_save = {}
params_to_save['appliance_params'] = self.appliance_params
params_to_save['sequence_length'] = self.sequence_length
params_to_save['mains_mean'] = self.mains_mean
params_to_save['mains_std'] = self.mains_std
for appliance_name in self.models:
print ("Saving model for ", appliance_name)
self.models[appliance_name].save_weights(os.path.join(self.save_model_path,appliance_name+".h5"))
with open(os.path.join(self.save_model_path,'model.json'),'w') as file:
file.write(json.dumps(params_to_save, cls=NumpyEncoder))
def disaggregate_chunk(self,test_main_list,model=None,do_preprocessing=True):
if model is not None:
self.models = model
# Preprocess the test mains such as windowing and normalizing
if do_preprocessing:
test_main_list = self.call_preprocessing(test_main_list, submeters_lst=None, method='test')
test_predictions = []
for test_main in test_main_list:
test_main = test_main.values
test_main = test_main.reshape((-1, self.sequence_length, 1))
disggregation_dict = {}
for appliance in self.models:
prediction = self.models[appliance].predict(test_main,batch_size=self.batch_size)
prediction = self.appliance_params[appliance]['mean'] + prediction * self.appliance_params[appliance]['std']
valid_predictions = prediction.flatten()
valid_predictions = np.where(valid_predictions > 0, valid_predictions, 0)
df = pd.Series(valid_predictions)
disggregation_dict[appliance] = df
results = pd.DataFrame(disggregation_dict, dtype='float32')
test_predictions.append(results)
return test_predictions
def return_network(self):
# Model architecture
model = Sequential()
model.add(Conv1D(30,10,activation="relu",input_shape=(self.sequence_length,1),strides=1))
model.add(Conv1D(30, 8, activation='relu', strides=1))
model.add(Conv1D(40, 6, activation='relu', strides=1))
model.add(Conv1D(50, 5, activation='relu', strides=1))
model.add(Dropout(.2))
model.add(Conv1D(50, 5, activation='relu', strides=1))
model.add(Dropout(.2))
model.add(Flatten())
model.add(Dense(1024, activation='relu'))
model.add(Dropout(.2))
model.add(Dense(1))
model.compile(loss='mse', optimizer='adam') # ,metrics=[self.mse])
return model
def call_preprocessing(self, mains_lst, submeters_lst, method):
if method == 'train':
# Preprocessing for the train data
mains_df_list = []
for mains in mains_lst:
new_mains = mains.values.flatten()
n = self.sequence_length
units_to_pad = n // 2
new_mains = np.pad(new_mains,(units_to_pad,units_to_pad),'constant',constant_values=(0,0))
new_mains = np.array([new_mains[i:i + n] for i in range(len(new_mains) - n + 1)]) ####################
new_mains = (new_mains - self.mains_mean) / self.mains_std
mains_df_list.append(pd.DataFrame(new_mains))
appliance_list = []
for app_index, (app_name, app_df_list) in enumerate(submeters_lst):
if app_name in self.appliance_params:
app_mean = self.appliance_params[app_name]['mean']
app_std = self.appliance_params[app_name]['std']
else:
print ("Parameters for ", app_name ," were not found!")
raise ApplianceNotFoundError()
processed_appliance_dfs = []
for app_df in app_df_list:
new_app_readings = app_df.values.reshape((-1, 1))
# This is for choosing windows
new_app_readings = (new_app_readings - app_mean) / app_std
# Return as a list of dataframe
processed_appliance_dfs.append(pd.DataFrame(new_app_readings))
appliance_list.append((app_name, processed_appliance_dfs))
return mains_df_list, appliance_list
else:
# Preprocessing for the test data
mains_df_list = []
for mains in mains_lst:
new_mains = mains.values.flatten()
n = self.sequence_length
units_to_pad = n // 2
new_mains = np.pad(new_mains,(units_to_pad,units_to_pad),'constant',constant_values=(0,0))
new_mains = np.array([new_mains[i:i + n] for i in range(len(new_mains) - n + 1)])
new_mains = (new_mains - self.mains_mean) / self.mains_std
mains_df_list.append(pd.DataFrame(new_mains))
return mains_df_list
def set_appliance_params(self,train_appliances):
# Find the parameters using the first
for (app_name,df_list) in train_appliances:
l = np.array(pd.concat(df_list,axis=0))
app_mean = np.mean(l)
app_std = np.std(l)
if app_std<1:
app_std = 100
self.appliance_params.update({app_name:{'mean':app_mean,'std':app_std}})
print (self.appliance_params) | StarcoderdataPython |
1623021 | """
Tests for vertex.py
"""
from graphpy.edge import UndirectedEdge, DirectedEdge
from graphpy.vertex import UndirectedVertex, DirectedVertex
import unittest
################################################################################
# #
# Undirected #
# #
################################################################################
class TestUndirectedVertex(unittest.TestCase):
def test_create_undirected_vertex(self):
""" Create an undirected vertex """
v0 = UndirectedVertex(val='v0')
self.assertEqual(v0.val, 'v0')
self.assertEqual(set(v0.edges), set())
def test_undirected_vertex_val(self):
""" Get an undirected vertex's val property """
v0 = UndirectedVertex(val='v0')
self.assertEqual(v0.val, 'v0')
with self.assertRaises(AttributeError):
v0.val = ''
def test_undirected_vertex_edges(self):
""" Get an undirected vertex's edges property """
v0 = UndirectedVertex(val='v0')
v1 = UndirectedVertex(val='v1')
v2 = UndirectedVertex(val='v2')
e00 = UndirectedEdge((v0, v0))
e01 = UndirectedEdge((v0, v1))
e02 = UndirectedEdge((v0, v2))
v0.add_edge(e00)
v0.add_edge(e01)
v1.add_edge(e01)
v0.add_edge(e02)
v2.add_edge(e02)
self.assertEqual(set(v0.edges), set([e00, e01, e02]))
self.assertEqual(set(v1.edges), set([e01]))
self.assertEqual(set(v2.edges), set([e02]))
with self.assertRaises(AttributeError):
v0.edges = set()
def test_undirected_vertex_has_self_edge(self):
""" Get an undirected vertex's has_self_edge property """
v0 = UndirectedVertex(val='v0')
v1 = UndirectedVertex(val='v1')
v2 = UndirectedVertex(val='v2')
e00 = UndirectedEdge((v0, v0))
e01 = UndirectedEdge((v0, v1))
e11 = UndirectedEdge((v1, v1))
e02 = UndirectedEdge((v0, v2))
v0.add_edge(e00)
v0.add_edge(e01)
v1.add_edge(e01)
v0.add_edge(e02)
v2.add_edge(e02)
self.assertTrue(v0.has_self_edge)
self.assertFalse(v1.has_self_edge)
self.assertFalse(v2.has_self_edge)
with self.assertRaises(AttributeError):
v0.has_self_edge = True
v1.add_edge(e11)
v0.remove_edge(e00)
self.assertFalse(v0.has_self_edge)
self.assertTrue(v1.has_self_edge)
self.assertFalse(v2.has_self_edge)
def test_undirected_vertex_neighbors_and_degree(self):
""" Get undirected vertices' neighbors and degree properties """
v0 = UndirectedVertex(val='v0')
v1 = UndirectedVertex(val='v1')
v2 = UndirectedVertex(val='v2')
e00 = UndirectedEdge((v0, v0))
e01 = UndirectedEdge((v0, v1))
e02 = UndirectedEdge((v0, v2))
v0.add_edge(e01)
v0.add_edge(e02)
self.assertEqual(set(v0.neighbors), set([v1, v2]))
with self.assertRaises(AttributeError):
v0.neighbors = set()
self.assertEqual(v0.degree, 2)
with self.assertRaises(AttributeError):
v0.degree = 0
v0.add_edge(e00)
self.assertEqual(set(v0.neighbors), set([v0, v1, v2]))
self.assertEqual(v0.degree, 4)
def test_undirected_vertex_add_edge(self):
""" Add an edge to an undirected vertex """
v0 = UndirectedVertex(val='v0')
v1 = UndirectedVertex(val='v1')
v2 = UndirectedVertex(val='v2')
e00 = UndirectedEdge((v0, v0))
e01 = UndirectedEdge((v0, v1))
e10 = UndirectedEdge((v1, v0))
e02 = UndirectedEdge((v0, v2))
v0.add_edge(e00)
v0.add_edge(e01)
self.assertTrue(e00 in v0)
self.assertTrue(e01 in v0)
self.assertTrue(e10 in v0)
self.assertFalse(e02 in v0)
def test_undirected_vertex_remove_edge(self):
""" Remove an edge from an undirected vertex """
v0 = UndirectedVertex(val='v0')
v1 = UndirectedVertex(val='v1')
v2 = UndirectedVertex(val='v2')
e01 = UndirectedEdge((v0, v1))
e10 = UndirectedEdge((v1, v0))
e02 = UndirectedEdge((v0, v2))
v0.add_edge(e01)
v0.add_edge(e02)
v0.remove_edge(e01)
self.assertFalse(e01 in v0)
self.assertFalse(e10 in v0)
self.assertTrue(e02 in v0)
v0.remove_edge(e02)
self.assertFalse(e01 in v0)
self.assertFalse(e10 in v0)
self.assertFalse(e02 in v0)
def test_undirected_vertex_get(self):
""" Get an attribute of an undirected vertex """
v0 = UndirectedVertex(val='v0', attrs={'city': 'Modena'})
self.assertEqual(v0.get('city'), 'Modena')
self.assertIsNone(v0.get('notthere'))
def test_undirected_vertex_set(self):
""" Set an attribute of an undirected vertex """
v0 = UndirectedVertex(val='v0')
v0.set('city', 'Modena')
self.assertEqual(v0.attrs, {'city': 'Modena'})
v0.set(0, 1)
self.assertEqual(v0.attrs, {'city': 'Modena', 0: 1})
def test_undirected_vertex_has_attr(self):
""" Check if an undirected vertex has a particular attribute """
v0 = UndirectedVertex(val='v0', attrs={'city': 'Modena', 0: 1})
self.assertTrue(v0.has_attr('city'))
self.assertFalse(v0.has_attr('town'))
v0.del_attr('city')
self.assertFalse(v0.has_attr('city'))
def test_undirected_vertex_del_attr(self):
""" Delete an attribute of an undirected vertex """
v0 = UndirectedVertex(val='v0', attrs={'city': 'Modena', 0: 1})
v0.del_attr('city')
self.assertEqual(v0.attrs, {0: 1})
v0.del_attr(0)
self.assertEqual(v0.attrs, {})
def test_undirected_vertex_already_has_edge(self):
""" An undirected vertex should not be able to add an edge that it
already has """
v0 = UndirectedVertex(val='v0')
v1 = UndirectedVertex(val='v1')
e01 = UndirectedEdge((v0, v1))
v0.add_edge(e01)
with self.assertRaises(ValueError):
v0.add_edge(e01)
try:
v1.add_edge(e01)
except ValueError:
self.fail("Adding the edge (v0, v1) to v0 should not stop the edge "
"(v0, v1) from being added to v1.")
def test_undirected_vertex_not_part_of_edge(self):
""" An undirected vertex should not be able add an edge which doesn't
have it as a vertex """
v0 = UndirectedVertex(val='v0')
v1 = UndirectedVertex(val='v1')
v2 = UndirectedVertex(val='v2')
e12 = UndirectedEdge((v1, v2))
with self.assertRaises(ValueError):
v0.add_edge(e12)
################################################################################
# #
# Directed #
# #
################################################################################
class TestDirectedVertex(unittest.TestCase):
def test_create_directed_vertex(self):
""" Create a directed vertex """
v0 = DirectedVertex(val='v0')
self.assertEqual(v0.val, 'v0')
self.assertEqual(set(v0.edges), set())
def test_directed_vertex_val(self):
""" Get a directed vertex's val property """
v0 = DirectedVertex(val='v0')
self.assertEqual(v0.val, 'v0')
with self.assertRaises(AttributeError):
v0.val = ''
def test_directed_vertex_edges(self):
""" Get a directed vertex's edges property """
v0 = DirectedVertex(val='v0')
v1 = DirectedVertex(val='v1')
v2 = DirectedVertex(val='v2')
e00 = DirectedEdge((v0, v0))
e01 = DirectedEdge((v0, v1))
e20 = DirectedEdge((v2, v0))
v0.add_edge(e00)
v0.add_edge(e01)
v1.add_edge(e01)
v0.add_edge(e20)
v2.add_edge(e20)
self.assertEqual(set(v0.edges), set([e00, e01, e20]))
self.assertEqual(set(v1.edges), set([e01]))
self.assertEqual(set(v2.edges), set([e20]))
with self.assertRaises(AttributeError):
v0.edges = set()
def test_directed_vertex_outs_and_ins_and_degrees(self):
""" Get directed vertices' outs, ins, out_degree, and in_degree
properties """
v0 = DirectedVertex(val='v0')
v1 = DirectedVertex(val='v1')
v2 = DirectedVertex(val='v2')
e00 = DirectedEdge((v0, v0))
e01 = DirectedEdge((v0, v1))
e02 = DirectedEdge((v0, v2))
e10 = DirectedEdge((v1, v0))
v0.add_edge(e00)
v0.add_edge(e01)
v0.add_edge(e02)
v0.add_edge(e10)
self.assertEqual(set(v0.outs), set([v0, v1, v2]))
self.assertEqual(set(v0.ins), set([v0, v1]))
self.assertEqual(v0.out_degree, 3)
self.assertEqual(v0.in_degree, 2)
self.assertEqual(v0.degree, 5)
def test_directed_vertex_add_edge(self):
""" Add an edge to a directed vertex """
v0 = DirectedVertex(val='v0')
v1 = DirectedVertex(val='v1')
v2 = DirectedVertex(val='v2')
e01 = DirectedEdge((v0, v1))
e10 = DirectedEdge((v1, v0))
e02 = DirectedEdge((v0, v2))
v0.add_edge(e01)
self.assertTrue(e01 in v0)
self.assertFalse(e10 in v0)
self.assertFalse(e02 in v0)
def test_directed_vertex_remove_edge(self):
""" Remove an edge from a directed vertex """
v0 = DirectedVertex(val='v0')
v1 = DirectedVertex(val='v1')
v2 = DirectedVertex(val='v2')
e01 = DirectedEdge((v0, v1))
e10 = DirectedEdge((v1, v0))
e02 = DirectedEdge((v0, v2))
v0.add_edge(e01)
v0.add_edge(e10)
v0.add_edge(e02)
v0.remove_edge(e01)
self.assertFalse(e01 in v0)
self.assertTrue(e10 in v0)
self.assertTrue(e02 in v0)
v0.remove_edge(e02)
self.assertFalse(e01 in v0)
self.assertTrue(e10 in v0)
self.assertFalse(e02 in v0)
def test_directed_vertex_get(self):
""" Get an attribute of a directed vertex """
v0 = DirectedVertex(val='v0', attrs={'city': 'Modena'})
self.assertEqual(v0.get('city'), 'Modena')
self.assertIsNone(v0.get('notthere'))
def test_directed_vertex_set(self):
""" Set an attribute of an directed vertex """
v0 = DirectedVertex(val='v0')
v0.set('city', 'Modena')
self.assertEqual(v0.attrs, {'city': 'Modena'})
v0.set(0, 1)
self.assertEqual(v0.attrs, {'city': 'Modena', 0: 1})
def test_undirected_vertex_has_attr(self):
""" Check if a directed vertex has a particular attribute """
v0 = DirectedVertex(val='v0', attrs={'city': 'Modena', 0: 1})
self.assertTrue(v0.has_attr('city'))
self.assertFalse(v0.has_attr('town'))
v0.del_attr('city')
self.assertFalse(v0.has_attr('city'))
def test_directed_vertex_del_attr(self):
""" Delete an attribute of a directed vertex """
v0 = DirectedVertex(val='v0', attrs={'city': 'Modena', 0: 1})
v0.del_attr('city')
self.assertEqual(v0.attrs, {0: 1})
v0.del_attr(0)
self.assertEqual(v0.attrs, {})
def test_directed_vertex_already_has_edge(self):
""" A directed vertex should not be able to add an edge that it already
has """
v0 = DirectedVertex(val='v0')
v1 = DirectedVertex(val='v1')
e01 = DirectedEdge((v0, v1))
e10 = DirectedEdge((v1, v0))
v0.add_edge(e01)
with self.assertRaises(ValueError):
v0.add_edge(e01)
try:
v1.add_edge(e01)
except ValueError:
self.fail("Adding the edge (v0, v1) to v0 should not stop the edge "
"(v0, v1) from being added to v1.")
try:
v0.add_edge(e10)
except ValueError:
self.fail("There should be no exception because (v1 -> v0) is a "
"different edge than (v0 -> v1) for a directed vertex.")
def test_directed_vertex_not_part_of_edge(self):
""" A directed vertex should not be able add an edge which doesn't have
it as a vertex """
v0 = DirectedVertex(val='v0')
v1 = DirectedVertex(val='v1')
v2 = DirectedVertex(val='v2')
e12 = DirectedEdge((v1, v2))
with self.assertRaises(ValueError):
v0.add_edge(e12)
if __name__ == '__main__':
unittest.main()
| StarcoderdataPython |
11394653 | <gh_stars>0
from src.job.compute.managers.pbs import PBSComputeParser
from src.job.compute.managers.slurm import SLURMComputeParser
def get_compute_parser(name, work_dir):
"""
Returns an instance of compute parser class
Args:
name (str): parser name, PBS or SLURM.
work_dir (str): full path to job working directory.
"""
parsers = dict(PBS=PBSComputeParser, SLURM=SLURMComputeParser)
return parsers[name](work_dir)
| StarcoderdataPython |
3277928 | <reponame>ltowarek/budget-supervisor
# coding: utf-8
"""
Salt Edge Account Information API
API Reference for services # noqa: E501
OpenAPI spec version: 5.0.0
Contact: <EMAIL>
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
class BasicReport(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'id': 'str',
'customer_id': 'str',
'customer_identifier': 'str',
'connection_ids': 'list[str]',
'status': 'str',
'created_at': 'datetime'
}
attribute_map = {
'id': 'id',
'customer_id': 'customer_id',
'customer_identifier': 'customer_identifier',
'connection_ids': 'connection_ids',
'status': 'status',
'created_at': 'created_at'
}
def __init__(self, id=None, customer_id=None, customer_identifier=None, connection_ids=None, status=None, created_at=None): # noqa: E501
"""BasicReport - a model defined in Swagger""" # noqa: E501
self._id = None
self._customer_id = None
self._customer_identifier = None
self._connection_ids = None
self._status = None
self._created_at = None
self.discriminator = None
self.id = id
self.customer_id = customer_id
self.customer_identifier = customer_identifier
self.connection_ids = connection_ids
self.status = status
self.created_at = created_at
@property
def id(self):
"""Gets the id of this BasicReport. # noqa: E501
the `id` of the general report generated based on the customer's data # noqa: E501
:return: The id of this BasicReport. # noqa: E501
:rtype: str
"""
return self._id
@id.setter
def id(self, id):
"""Sets the id of this BasicReport.
the `id` of the general report generated based on the customer's data # noqa: E501
:param id: The id of this BasicReport. # noqa: E501
:type: str
"""
if id is None:
raise ValueError("Invalid value for `id`, must not be `None`") # noqa: E501
self._id = id
@property
def customer_id(self):
"""Gets the customer_id of this BasicReport. # noqa: E501
the `id` of the [customer](#customers) for which the report has been requested # noqa: E501
:return: The customer_id of this BasicReport. # noqa: E501
:rtype: str
"""
return self._customer_id
@customer_id.setter
def customer_id(self, customer_id):
"""Sets the customer_id of this BasicReport.
the `id` of the [customer](#customers) for which the report has been requested # noqa: E501
:param customer_id: The customer_id of this BasicReport. # noqa: E501
:type: str
"""
if customer_id is None:
raise ValueError("Invalid value for `customer_id`, must not be `None`") # noqa: E501
self._customer_id = customer_id
@property
def customer_identifier(self):
"""Gets the customer_identifier of this BasicReport. # noqa: E501
unique [customer](#customers) identifier # noqa: E501
:return: The customer_identifier of this BasicReport. # noqa: E501
:rtype: str
"""
return self._customer_identifier
@customer_identifier.setter
def customer_identifier(self, customer_identifier):
"""Sets the customer_identifier of this BasicReport.
unique [customer](#customers) identifier # noqa: E501
:param customer_identifier: The customer_identifier of this BasicReport. # noqa: E501
:type: str
"""
if customer_identifier is None:
raise ValueError("Invalid value for `customer_identifier`, must not be `None`") # noqa: E501
self._customer_identifier = customer_identifier
@property
def connection_ids(self):
"""Gets the connection_ids of this BasicReport. # noqa: E501
`ids` of [connections](#connections) included in the report # noqa: E501
:return: The connection_ids of this BasicReport. # noqa: E501
:rtype: list[str]
"""
return self._connection_ids
@connection_ids.setter
def connection_ids(self, connection_ids):
"""Sets the connection_ids of this BasicReport.
`ids` of [connections](#connections) included in the report # noqa: E501
:param connection_ids: The connection_ids of this BasicReport. # noqa: E501
:type: list[str]
"""
if connection_ids is None:
raise ValueError("Invalid value for `connection_ids`, must not be `None`") # noqa: E501
self._connection_ids = connection_ids
@property
def status(self):
"""Gets the status of this BasicReport. # noqa: E501
current report's status. # noqa: E501
:return: The status of this BasicReport. # noqa: E501
:rtype: str
"""
return self._status
@status.setter
def status(self, status):
"""Sets the status of this BasicReport.
current report's status. # noqa: E501
:param status: The status of this BasicReport. # noqa: E501
:type: str
"""
if status is None:
raise ValueError("Invalid value for `status`, must not be `None`") # noqa: E501
allowed_values = ["initialized", "success", "failed", "calculating"] # noqa: E501
if status not in allowed_values:
raise ValueError(
"Invalid value for `status` ({0}), must be one of {1}" # noqa: E501
.format(status, allowed_values)
)
self._status = status
@property
def created_at(self):
"""Gets the created_at of this BasicReport. # noqa: E501
the date when the report was created # noqa: E501
:return: The created_at of this BasicReport. # noqa: E501
:rtype: datetime
"""
return self._created_at
@created_at.setter
def created_at(self, created_at):
"""Sets the created_at of this BasicReport.
the date when the report was created # noqa: E501
:param created_at: The created_at of this BasicReport. # noqa: E501
:type: datetime
"""
if created_at is None:
raise ValueError("Invalid value for `created_at`, must not be `None`") # noqa: E501
self._created_at = created_at
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(BasicReport, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, BasicReport):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| StarcoderdataPython |
6585954 | numbers = [1, 6, 8, 1, 2, 1, 5, 6]
input_number = int(input("Enter a number: "))
occurs = 0
for i in numbers:
if input_number == i:
occurs += 1
print("{} appears {} time(s) in my list.".format(input_number,occurs))
| StarcoderdataPython |
1815237 | from datetime import datetime, timezone
from typing import Union, List, Dict, Tuple
from .covidstatistics import *
from .exceptions import NotFound, BadSortParameter, BadYesterdayParameter, BadTwoDaysAgoParameter, BadAllowNoneParameter
from .covidendpoints import *
class Covid:
"""
Handles interactions with the Open Disease API's COVID-19 data.
"""
def __init__(self, api_url, request_client):
self.api_url = api_url
self.request_client = request_client
def _check_sort(self, sort):
if sort not in ['updated', 'country', 'countryInfo', 'cases', 'todayCases', 'deaths', 'todayDeaths', 'recovered',
'todayRecovered', 'active', 'critical', 'casesPerOneMillion', 'deathsPerOneMillion', 'tests',
'testsPerOneMillion', 'population', 'continent', 'oneCasePerPeople', 'oneDeathPerPeople',
'oneTestPerPeople', 'activePerOneMillion', 'recoveredPerOneMillion', 'criticalPerOneMillion']:
raise BadSortParameter('Invalid sort parameter.')
def _check_yesterday(self, value):
if not isinstance(value, bool):
raise BadYesterdayParameter('Value for yesterday should either be True or False.')
def _check_two_days_ago(self, value):
if not isinstance(value, bool):
raise BadTwoDaysAgoParameter('Value for two_days_ago should either be True or False.')
def _check_allow_none(self, value):
if not isinstance(value, bool):
raise BadAllowNoneParameter('Value for allow_none should either be True or False.')
def _compile_today(self, data):
return Today(
data.get('todayCases'),
data.get('todayDeaths'),
data.get('todayRecovered')
)
def _compile_permillion(self, data):
return PerMillion(
data.get('casesPerOneMillion'),
data.get('deathsPerOneMillion'),
data.get('testsPerOneMillion'),
data.get('activePerOneMillion'),
data.get('recoveredPerOneMillion'),
data.get('criticalPerOneMillion')
)
def _compile_perpeople(self, data):
return PerPeople(
data.get('oneCasePerPeople'),
data.get('oneDeathPerPeople'),
data.get('oneTestPerPeople')
)
def _compile_statetoday(self, data):
return StateToday(
data.get('todayCases'),
data.get('todayDeaths')
)
def _compile_statepermillion(self, data):
return StatePerMillion(
data.get('casesPerOneMillion'),
data.get('deathsPerOneMillion'),
data.get('testsPerOneMillion')
)
def _compile_countryInfo(self, countryInfo):
_id = countryInfo.get("_id")
iso2 = countryInfo.get("iso2")
iso3 = countryInfo.get("iso3")
_lat = countryInfo.get("lat")
_long = countryInfo.get("long")
flag = countryInfo.get("flag")
info = CountryInfo(
_id,
iso2,
iso3,
_lat,
_long,
flag
)
return info
def _compile_country_data(self, country_stats):
country_name = country_stats.get("country")
total_country_cases = country_stats.get("cases", 0)
total_country_deaths = country_stats.get("deaths", 0)
total_country_recoveries = country_stats.get("recovered", 0)
today = self._compile_today(country_stats)
total_critical = country_stats.get("critical", 0)
active = country_stats.get("active", 0)
tests = country_stats.get("tests", 0)
per_million = self._compile_permillion(country_stats)
per_people = self._compile_perpeople(country_stats)
continent = country_stats.get("continent")
population = country_stats.get("population", 0)
updated_epoch = country_stats.get("updated", 0)
updated = datetime.utcfromtimestamp(updated_epoch/1000.0)
countryInfo = country_stats["countryInfo"]
info = self._compile_countryInfo(countryInfo)
return Country(
info,
country_name,
total_country_cases,
total_country_deaths,
total_country_recoveries,
today,
total_critical,
active,
tests,
per_million,
per_people,
continent,
population,
updated
)
def _compile_state(self, state_dict):
state_name = state_dict.get("state")
total_state_cases = state_dict.get("cases", 0)
total_state_deaths = state_dict.get("deaths", 0)
today = self._compile_statetoday(state_dict)
active = state_dict.get("active", 0)
tests = state_dict.get("tests", 0)
per_million = self._compile_statepermillion(state_dict)
state_stats = State(
state_name,
total_state_cases,
total_state_deaths,
today,
active,
tests,
per_million
)
return state_stats
def _generate_history(self, historical_stats, is_county=False):
case_history = []
death_history = []
recovery_history = [] if not is_county else None
if not is_county:
country_name = historical_stats.get("country", "Global")
province_name = historical_stats.get("province")
else:
country_name = historical_stats.get("province")
province_name = historical_stats.get("county")
if "timeline" not in historical_stats: #if country was 'all'
d = historical_stats
else:
d = historical_stats["timeline"]
for date in list(d["cases"].keys()): #pass on all historical data. let the client decide how much of it they want
_d = datetime.strptime(date, "%m/%d/%y")
case_history.append(HistoryEntry(_d, d["cases"][date]))
death_history.append(HistoryEntry(_d, d["deaths"][date]))
if not is_county:
recovery_history.append(HistoryEntry(date, d["recovered"][date]))
his = History(
case_history,
death_history,
recovery_history
)
return Historical(
country_name,
province_name,
his
)
def _compile_jhu_data(self, matching_county):
country = matching_county.get("country") #will always be 'US'
province = matching_county.get("province")
county_name = matching_county.get("county")
confirmed_cases = matching_county["stats"].get("confirmed")
deaths = matching_county["stats"].get("deaths")
recoveries = matching_county["stats"].get("recovered")
_lat = float(matching_county["coordinates"].get("latitude")) if matching_county["coordinates"].get("latitude") else 0.0
_long = float(matching_county["coordinates"].get("longitude")) if matching_county["coordinates"].get("longitude") else 0.0
updated = datetime.strptime(matching_county.get('updatedAt'), '%Y-%m-%d %H:%M:%S')
stat = JhuCsse(
country,
province,
county_name,
updated,
confirmed_cases,
deaths,
recoveries,
_lat,
_long
)
return stat
def _compile_continent(self, data):
name = data.get('continent')
countries = data.get('countries')
cases = data.get("cases", 0)
deaths = data.get("deaths", 0)
recoveries = data.get("recovered", 0)
today = self._compile_today(data)
critical = data.get("critical", 0)
updated_epoch = data.get("updated", 0)
active = data.get("active", cases-deaths-recoveries)
tests = data.get("tests", 0)
per_million = self._compile_permillion(data)
population = data.get("population", 0)
updated = datetime.utcfromtimestamp(updated_epoch/1000.0)
return Continent(
name,
countries,
cases,
deaths,
recoveries,
critical,
active,
tests,
today,
per_million,
population,
updated
)
def _compile_state_list(self, data):
return [self._compile_nyt_state(d) for d in data]
def _compile_county_list(self, data):
return [self._compile_nyt_county(d) for d in data]
def _compile_nyt_state(self, data):
date = data.get('date')
state = data.get('state')
fips = data.get('fips')
cases = data.get('cases')
deaths = data.get('deaths')
if date:
date = datetime.strptime(date, "%Y-%m-%d")
return NewYorkTimesState(
date,
state,
fips,
cases,
deaths
)
def _compile_nyt_county(self, data):
date = data.get('date')
county = data.get('county')
state = data.get('state')
fips = data.get('fips')
cases = data.get('cases')
deaths = data.get('deaths')
if date:
date = datetime.strptime(date, "%Y-%m-%d")
return NewYorkTimesCounty(
date,
county,
state,
fips,
cases,
deaths
)
def _compile_apple_stats(self, data):
name = data.get("subregion_and_city")
_type = data.get("get_type")
date = data.get("date")
driving = data.get("driving")
transit = data.get("transit")
walking = data.get("walking")
if date:
date = datetime.strptime(date, "%Y-%m-%d")
return Mobility(
name,
_type,
date,
driving,
transit,
walking
)
def _compile_vaccine(self, data):
return Vaccine(
data.get("candidate"),
data.get("sponsors"),
data.get("details"),
data.get("trialPhase"),
data.get("institutions"),
data.get("funding")
)
def _compile_vaccines(self, data):
source = data.get("source")
return Vaccines(
source,
[self._compile_vaccine(vacc) for vacc in data["data"]]
)
def _compile_vax_tl(self, data):
return [VaccineTimeline(datetime.strptime(date, '%m/%d/%y'), data[date]) for date in data]
def _compile_vax_country(self, data):
return VaccineCountry(data['country'], self._compile_vax_tl(data['timeline']))
######################################################################################
async def all(self, **kwargs) -> Global:
"""
Get the global stats for Coronavirus COVID-19
"""
yesterday = kwargs.get('yesterday', False)
two_days_ago = kwargs.get('two_days_ago', False)
allow_none = kwargs.get('allow_none', False)
endpoint = GLOBAL_DATA.format(self.api_url)
if yesterday:
self._check_yesterday(yesterday)
if two_days_ago:
self._check_two_days_ago(two_days_ago)
if yesterday and two_days_ago:
raise ValueError('yesterday and two_days_ago cannot both be True.')
if allow_none:
self._check_allow_none(allow_none)
yesterday = str(yesterday).lower()
two_days_ago = str(two_days_ago).lower()
allow_none = str(allow_none).lower()
params = {"yesterday": yesterday, "twoDaysAgo": two_days_ago, "allowNull": allow_none}
global_data = await self.request_client.make_request(endpoint, params)
cases = global_data.get("cases", 0)
deaths = global_data.get("deaths", 0)
recoveries = global_data.get("recovered", 0)
today = self._compile_today(global_data)
total_critical = global_data.get("critical", 0)
updated_epoch = global_data.get("updated", 0)
active = global_data.get("active", 0)
tests = global_data.get("tests", 0)
per_million = self._compile_permillion(global_data)
per_people = self._compile_perpeople(global_data)
population = global_data.get("population", 0)
affected_countries = global_data.get("affectedCountries")
updated = datetime.utcfromtimestamp(updated_epoch/1000.0)
return Global(
cases,
deaths,
recoveries,
today,
total_critical,
active,
tests,
per_million,
per_people,
population,
affected_countries,
updated,
)
async def country(self, *countries, **kwargs) -> Union[Country, List[Country]]:
"""
Get the data for more than one country, but not necessarily all of them.
"""
yesterday = kwargs.get('yesterday', False)
two_days_ago = kwargs.get('two_days_ago', False)
allow_none = kwargs.get('allow_none', False)
country_list = ','.join(map(str, countries))
endpoint = COUNTRY_DATA.format(self.api_url, country_list)
if yesterday:
self._check_yesterday(yesterday)
if two_days_ago:
self._check_two_days_ago(two_days_ago)
if yesterday and two_days_ago:
raise ValueError('yesterday and two_days_ago cannot both be True.')
if allow_none:
self._check_allow_none(allow_none)
yesterday = str(yesterday).lower()
two_days_ago = str(two_days_ago).lower()
allow_none = str(allow_none).lower()
params = {"yesterday": yesterday, "twoDaysAgo": two_days_ago, "allowNull": allow_none}
data = await self.request_client.make_request(endpoint, params)
if isinstance(data, dict):
return self._compile_country_data(data)
return [self._compile_country_data(country) for country in data]
async def all_countries(self, **kwargs) -> List[Country]:
"""
Get the data for every affected country.
"""
yesterday = kwargs.get('yesterday', False)
two_days_ago = kwargs.get('two_days_ago', False)
allow_none = kwargs.get('allow_none', False)
sort = kwargs.get('sort', None)
endpoint = ALL_COUNTRIES.format(self.api_url)
params = None
if yesterday:
self._check_yesterday(yesterday)
if two_days_ago:
self._check_two_days_ago(two_days_ago)
if yesterday and two_days_ago:
raise ValueError('yesterday and two_days_ago cannot both be True.')
if allow_none:
self._check_allow_none(allow_none)
yesterday = str(yesterday).lower()
two_days_ago = str(two_days_ago).lower()
allow_none = str(allow_none).lower()
if sort:
self._check_sort(sort)
params = {"yesterday": yesterday, "twoDaysAgo": two_days_ago, "allowNull": allow_none, "sort": sort}
else:
params = {"yesterday": yesterday, "twoDaysAgo": two_days_ago, "allowNull": allow_none}
all_countries = await self.request_client.make_request(endpoint, params)
return [self._compile_country_data(c) for c in all_countries]
async def all_states(self, **kwargs) -> List[State]:
"""
Get the stats for all US states
"""
yesterday = kwargs.get('yesterday', False)
allow_none = kwargs.get('allow_none', False)
sort = kwargs.get('sort', None)
endpoint = ALL_STATES.format(self.api_url)
params = None
if yesterday:
self._check_yesterday(yesterday)
if allow_none:
self._check_allow_none(allow_none)
yesterday = str(yesterday).lower()
allow_none = str(allow_none).lower()
if sort:
self._check_sort(sort)
params = {"yesterday": yesterday, "allowNull": allow_none, "sort": sort}
else:
params = {"yesterday": yesterday, "allowNull": allow_none}
state_info = await self.request_client.make_request(endpoint, params)
return [self._compile_state(state) for state in state_info]
async def state(self, *states, **kwargs) -> Union[State, List[State]]:
"""
Get the stats for US States
"""
yesterday = kwargs.get('yesterday', False)
allow_none = kwargs.get('allow_none', False)
state_list = ','.join(map(str, states))
endpoint = SINGLE_STATE.format(self.api_url, state_list)
if yesterday:
self._check_yesterday(yesterday)
if allow_none:
self._check_allow_none(allow_none)
yesterday = str(yesterday).lower()
allow_none = str(allow_none).lower()
params = {"yesterday": yesterday, "allowNull": allow_none}
data = await self.request_client.make_request(endpoint, params)
if isinstance(data, dict):
return self._compile_state(data)
return [self._compile_state(state) for state in data]
async def country_history(self, country='all', last_days='all') -> Historical:
"""
Get historical data for a specific country or globally.
Defaults to 'all' in order to get global data. This can be overridden by the client.
"""
endpoint = HISTORICAL_COUNTRY.format(self.api_url, country)
params = {"lastdays": last_days}
historical_stats = await self.request_client.make_request(endpoint, params)
return self._generate_history(historical_stats)
async def province_history(self, country, province, last_days='all') -> Historical:
"""
Get the historical data for a province within a country.
"""
endpoint = HISTORICAL_PROVINCE.format(self.api_url, country, province)
params = {"lastdays": last_days}
data = await self.request_client.make_request(endpoint, params)
return self._generate_history(data)
async def county_history(self, state, county, last_days='all') -> Historical:
"""
Get the historical data for a county within a US state.
"""
endpoint = STATE_COUNTY.format(self.api_url, state)
params = {"lastdays": last_days}
data = await self.request_client.make_request(endpoint, params)
try:
matching_county = next(place for place in data if place["province"].lower() == state.lower()
and place["county"].lower() == county.lower())
except StopIteration:
raise NotFound('Nothing found for specified county.')
return self._generate_history(matching_county, True)
async def jhucsse(self) -> List[JhuCsse]:
"""
Get data from the JHU CSSE.
This includes province data for several countries
"""
endpoint = JHU_CSSE.format(self.api_url)
data = await self.request_client.make_request(endpoint)
return [self._compile_jhu_data(cp) for cp in data]
async def jhu_county(self, state, county) -> JhuCsse:
"""
Get the data for a specific county within a US state.
"""
endpoint = JHU_SINGLE_COUNTY.format(self.api_url, county)
all_matching_counties = await self.request_client.make_request(endpoint)
try:
matching_county = next(place for place in all_matching_counties if place["province"].lower() == state.lower()
and place["county"].lower() == county.lower())
except StopIteration:
raise NotFound('Nothing found for specified county.')
return self._compile_jhu_data(matching_county)
async def jhu_all_counties(self) -> List[JhuCsse]:
"""
Get the data for every single county in the US provided by JHU.
"""
endpoint = JHU_ALL_COUNTIES.format(self.api_url)
data = await self.request_client.make_request(endpoint)
return [self._compile_jhu_data(place) for place in data]
async def all_continents(self, **kwargs) -> List[Continent]:
"""
Get the statistics for world continents.
"""
yesterday = kwargs.get('yesterday', False)
two_days_ago = kwargs.get('two_days_ago', False)
allow_none = kwargs.get('allow_none', False)
sort = kwargs.get('sort', None)
endpoint = ALL_CONTINENTS.format(self.api_url)
params = None
if yesterday:
self._check_yesterday(yesterday)
if two_days_ago:
self._check_two_days_ago(two_days_ago)
if yesterday and two_days_ago:
raise ValueError('yesterday and two_days_ago cannot both be True.')
if allow_none:
self._check_allow_none(allow_none)
yesterday = str(yesterday).lower()
two_days_ago = str(two_days_ago).lower()
allow_none = str(allow_none).lower()
if sort:
self._check_sort(sort)
params = {"yesterday": yesterday, "twoDaysAgo": two_days_ago, "allowNull": allow_none, "sort": sort}
else:
params = {"yesterday": yesterday,"twoDaysAgo": two_days_ago, "allowNull": allow_none}
data = await self.request_client.make_request(endpoint, params)
return [self._compile_continent(c) for c in data]
async def continent(self, continent, **kwargs) -> Continent:
"""
Get the statistics for a single continent.
"""
yesterday = kwargs.get('yesterday', False)
two_days_ago = kwargs.get('two_days_ago', False)
allow_none = kwargs.get('allow_none', False)
endpoint = CONTINENT_DATA.format(self.api_url)
params = None
if yesterday:
self._check_yesterday(yesterday)
if two_days_ago:
self._check_two_days_ago(two_days_ago)
if yesterday and two_days_ago:
raise ValueError('yesterday and two_days_ago cannot both be True.')
yesterday = str(yesterday).lower()
two_days_ago = str(two_days_ago).lower()
allow_none = str(allow_none).lower()
params = {"yesterday": yesterday, "allowNull": allow_none}
data = await self.request_client.make_request(endpoint, params)
return self._compile_continent(data)
async def nyt(self) -> NewYorkTimesUsa:
"""
Get historical data for the US from the New York Times
"""
endpoint = NYT_USA.format(self.api_url)
data = await self.request_client.make_request(endpoint)
dates = []
for d in data:
date = d.get('date')
cases = d.get('cases')
deaths = d.get('deaths')
if date:
date = datetime.strptime(date, "%Y-%m-%d")
dates.append(
NewYorkTimesUsa(
date,
cases,
deaths
)
)
return dates
async def nyt_states(self) -> List[NewYorkTimesState]:
"""
Get the data for all states from New York Times
"""
endpoint = NYT_ALL_STATES.format(self.api_url)
data = await self.request_client.make_request(endpoint)
states_list = self._compile_state_list(data)
return states_list
async def nyt_state(self, state) -> List[NewYorkTimesState]:
"""
Get the data for a single state from New York Times
"""
endpoint = NYT_SINGLE_STATE.format(self.api_url, state)
data = await self.request_client.make_request(endpoint)
return [self._compile_nyt_state(d) for d in data]
async def nyt_counties(self) -> List[NewYorkTimesCounty]:
"""
Get the data for all counties within all US states from NYT
"""
endpoint = NYT_ALL_COUNTIES.format(self.api_url)
data = await self.request_client.make_request(endpoint)
county_list = self._compile_county_list(data)
return county_list
async def nyt_county(self, county) -> NewYorkTimesCounty:
"""
Get the data for all counties within all US states from NYT
"""
endpoint = NYT_SINGLE_COUNTY.format(self.api_url, county)
data = await self.request_client.make_request(endpoint)
county_data = self._compile_nyt_county(data)
return county_data
async def apple_countries(self) -> List[str]:
"""
Get the list of countries supported by Apple's mobility data set
"""
endpoint = APPLE_COUNTRIES.format(self.api_url)
data = await self.request_client.make_request(endpoint)
return data
async def apple_subregions(self, country) -> AppleSubregions:
"""
Get the list of supported subregions for a country within Apple's mobility data set
"""
endpoint = APPLE_SUBREGIONS.format(self.api_url, country)
data = await self.request_client.make_request(endpoint)
return AppleSubregions(
data.get("country"),
data.get("subregions")
)
async def apple_mobility_data(self, country, subregion) -> AppleSubregion:
"""
Get the statistics for the specified subregion
"""
endpoint = APPLE_SINGLE_SUBREGION.format(self.api_url, country, subregion)
data = await self.request_client.make_request(endpoint)
subregion_string = data.get("subregion")
statistics = [self._compile_apple_stats(stats) for stats in data["data"]]
return AppleSubregion(
subregion_string,
statistics
)
async def gov_countries(self) -> List[str]:
"""
Get a list of countries supported by Governmental data
"""
endpoint = GOV_ALL.format(self.api_url)
data = await self.request_client.make_request(endpoint)
return data
async def gov(self, country, **kwargs) -> Dict:
"""
Get the data from the Government of a specified country.
The data comes from the website of the government of each country so it is difficult to produce a standardised format.
As a result, we cannot create a unified class for such data. Therefore, returned data will be a list of dicts.
To get a list of attributes, you can use `list(data.keys())`
"""
allow_none = kwargs.get('allow_none', False)
if allow_none:
self._check_allow_none(allow_none)
allow_none = str(allow_none).lower()
params = {"allowNull": allow_none}
endpoint = GOV_COUNTRY.format(self.api_url, country)
data = await self.request_client.make_request(endpoint, params)
return data
async def vaccine(self) -> Vaccines:
"""
Get the data about vaccine trials for Covid.
Data sourced from https://www.raps.org/news-and-articles/news-articles/2020/3/covid-19-vaccine-tracker
"""
endpoint = VACCINE.format(self.api_url)
data = await self.request_client.make_request(endpoint)
return self._compile_vaccines(data)
async def vaccine_coverage(self, last_days='all') -> List[VaccineTimeline]:
"""
Get global vaccine coverage data.
"""
endpoint = COVERAGE_ALL.format(self.api_url)
params = {'lastdays': last_days}
data = await self.request_client.make_request(endpoint, params=params)
return self._compile_vax_tl(data)
async def vaccine_countries(self, last_days='all') -> List[VaccineCountry]:
"""
Get vaccination data for all countries.
"""
endpoint = COVERAGE_COUNTRIES.format(self.api_url)
params = {'lastdays': last_days}
data = await self.request_client.make_request(endpoint, params=params)
return [self._compile_vax_country(country) for country in data]
async def vaccine_country(self, country, last_days='all') -> VaccineCountry:
"""
Get vaccination data for a specific country.
"""
endpoint = COVERAGE_COUNTRY.format(self.api_url, country)
params = {'lastdays': last_days}
data = await self.request_client.make_request(endpoint, params=params)
return self._compile_vax_country(data)
async def therapeutics(self):
raise NotImplementedError | StarcoderdataPython |
11343941 | #!/usr/bin/env python2
# -*- coding: utf-8 -*-
"""
Created on Mon Apr 29 16:47:57 2019
@author: created by David on June 13 2020
"""
def warn(*args, **kwargs):
pass
import warnings, sys, os
warnings.warn = warn
import matplotlib; matplotlib.use('agg') #for server
import matplotlib.pyplot as plt
import seaborn as sns
from mvpa2.suite import *
#import matplotlib.pyplot as plt
from mvpa2.datasets.miscfx import remove_invariant_features ##
# import utilities
homedir = os.path.expanduser('~/REWOD/')
#add utils to path
sys.path.insert(0, homedir+'CODE/ANALYSIS/fMRI/MVPA/PYmvpa')
os.chdir(homedir+'CODE/ANALYSIS/fMRI/MVPA/PYmvpa')
import mvpa_utils
# ---------------------------- Script arguments
#subj = str(sys.argv[1])
#task = str(sys.argv[2])
#model = str(sys.argv[3])
subj = '01'
task = 'hedonic'
model = 'MVPA-04'
runs2use = 1 ##??
perm = 10.0 #0 #200 perms
print 'subject id:', subj
print 'smell VS no smell MVPA perms #', perm
#which ds to use and which mask to use
#load_file = homedir+'DERIVATIVES/ANALYSIS/MVPA/'+task+'/'+model+'/full_fds'
#save(full_fds, save_file)
#full_fds = h5load(save_file)
load_file = homedir+'DERIVATIVES/ANALYSIS/MVPA/'+task+'/'+model+'/sub-'+subj+'/mvpa/fds'
ds = h5load(load_file)
#clf = LinearCSVMC()
SVM = LinearCSVMC(C=-1.0)
clf = MappedClassifier(SVM, PCAMapper(reduce=True)) #output_dim=numPCA
partitioner = ChainNode([NFoldPartitioner(),Balancer(attr='targets',count=1,limit='partitions',apply_selection=True)],space='partitions')
cv = CrossValidation(
clf,
partitioner,
errorfx=prediction_target_matches,
postproc=BinomialProportionCI(width=.95, meth='jeffreys'))
cv_result = cv(ds)
acc = np.average(cv_result)
sDev = np.std(cv_result)
ci = cv_result.samples[:, 0]
CIlow = ci[0]
CIup = ci[1]
print 'Lower CI', CIlow
print "mean accuracy", acc
print 'Upper CI', CIup
repeater = Repeater(count=perm) # more
permutator = AttributePermutator('targets',limit={'partitions': 1},count=1)
null_cv = CrossValidation(clf,ChainNode([partitioner, permutator],space=partitioner.get_space()),postproc=mean_sample())
#MonteCarlo Null distance calculation
distr_est = MCNullDist(repeater, tail='left', measure=null_cv, enable_ca=['dist_samples'])
cv_mc = CrossValidation(clf,partitioner,postproc=mean_sample(),null_dist=distr_est,enable_ca=['stats'])
err = cv_mc(ds)
err1 = np.average(err)
print "error MC", err1
def make_plot(dist_samples, empirical, CIlow, CIup):
sns.set_style('darkgrid')
plt.hist(dist_samples, bins=20, normed=True, alpha=0.8, label='Null Distribution')
plt.axvline(CIlow, color='red', ls='--', alpha=0.5, label='Lower 95% CI')
plt.axvline(empirical, color='red', label='Empirical average cross-validated classification error ')
plt.axvline(CIup, color='red', ls='--', alpha=0.5, label='Upper 95% CI')
plt.axvline(0.5, color='black', ls='--', label='chance-level for a binary classification with balanced samples')
plt.legend()
fname = homedir+'DERIVATIVES/ANALYSIS/MVPA/'+task+'/'+model+'/sub-'+subj+'/mvpa/plot_acc.png'
# fname = homedir+'DERIVATIVES/ANALYSIS/MVPA/'+task+'/'+model+'/plot_acc.png'
null_dist = np.ravel(cv_mc.null_dist.ca.dist_samples)
make_plot(null_dist,acc, CIlow, CIup) #plot
plt.savefig(fname)
# p = np.asscalar(cv_mc.ca.null_prob) #old pval
# print p
pVal = len(np.where(null_dist>=acc)[0])/perm #p_val
nPCA = clf.mapper.node.get_output_dim() #number of pca
table = [acc, CIlow, CIup, sDev, perm, pVal] #tsv evrything
dist = homedir+'DERIVATIVES/ANALYSIS/MVPA/'+task+'/'+model+'/sub-'+subj+'/mvpa/null_dist.tsv'
accu = homedir+'DERIVATIVES/ANALYSIS/MVPA/'+task+'/'+model+'/sub-'+subj+'/mvpa/acc_ciL_ciU_sDev_pVal_nPCA_perm.tsv'
np.savetxt(dist, null_dist, delimiter='\t', fmt='%f')
np.savetxt(accu, table, delimiter='\t', fmt='%f')
# # print 'end'
| StarcoderdataPython |
11288268 | # -*- coding: utf-8 -*-
"""
@Author: Shaoweihua.Liu
@Contact: <EMAIL>
@Site: github.com/liushaoweihua
@File: models.py
@Time: 2020/3/13 03:58 PM
"""
import sys
sys.path.append("../..")
from keras_bert_kbqa.train import train
from keras_bert_kbqa.helper import train_args_parser
def run_train():
args = train_args_parser()
if True:
param_str = '\n'.join(['%20s = %s' % (k, v) for k, v in sorted(vars(args).items())])
print('usage: %s\n%20s %s\n%s\n%s\n' % (' '.join(sys.argv), 'ARG', 'VALUE', '_' * 50, param_str))
train(args=args)
if __name__ == "__main__":
run_train() | StarcoderdataPython |
5055568 | import pygame
from pygame.locals import *
import math
import sys
import os
import threading
import random
import time
import collections
# One boundary point known
def make_circle(points, p):
c = (p[0], p[1], 0.0)
for (i, q) in enumerate(points):
if not _is_in_circle(c, q):
if c[2] == 0.0:
c = _make_diameter(p, q)
else:
c = _make_circle_two_points(points[0 : i + 1], p, q)
return c
# Two boundary points known
def _make_circle_two_points(points, p, q):
diameter = _make_diameter(p, q)
if all(_is_in_circle(diameter, r) for r in points):
return diameter
left = None
right = None
for r in points:
cross = _cross_product(p[0], p[1], q[0], q[1], r[0], r[1])
c = _make_circumcircle(p, q, r)
if c is None:
continue
elif cross > 0.0 and (left is None or _cross_product(p[0], p[1], q[0], q[1], c[0], c[1]) > _cross_product(p[0], p[1], q[0], q[1], left[0], left[1])):
left = c
elif cross < 0.0 and (right is None or _cross_product(p[0], p[1], q[0], q[1], c[0], c[1]) < _cross_product(p[0], p[1], q[0], q[1], right[0], right[1])):
right = c
return left if (right is None or (left is not None and left[2] <= right[2])) else right
def _make_circumcircle(p0, p1, p2):
# Mathematical algorithm from Wikipedia: Circumscribed circle
ax = p0[0]; ay = p0[1]
bx = p1[0]; by = p1[1]
cx = p2[0]; cy = p2[1]
d = (ax * (by - cy) + bx * (cy - ay) + cx * (ay - by)) * 2.0
if d == 0.0:
return None
x = ((ax * ax + ay * ay) * (by - cy) + (bx * bx + by * by) * (cy - ay) + (cx * cx + cy * cy) * (ay - by)) / d
y = ((ax * ax + ay * ay) * (cx - bx) + (bx * bx + by * by) * (ax - cx) + (cx * cx + cy * cy) * (bx - ax)) / d
return (x, y, math.hypot(x - ax, y - ay))
def _make_diameter(p0, p1):
return ((p0[0] + p1[0]) / 2.0, (p0[1] + p1[1]) / 2.0, math.hypot(p0[0] - p1[0], p0[1] - p1[1]) / 2.0)
_EPSILON = 1e-12
def _is_in_circle(c, p):
return c is not None and math.hypot(p[0] - c[0], p[1] - c[1]) < c[2] + _EPSILON
# Returns twice the signed area of the triangle defined by (x0, y0), (x1, y1), (x2, y2)
def _cross_product(x0, y0, x1, y1, x2, y2):
return (x1 - x0) * (y2 - y0) - (y1 - y0) * (x2 - x0)
grunning = True
random.seed(2)
with open("A.in","w") as f:
n = 5
m = 100
r = 250
f.write("%d %d %d\n"%(n,m,r))
for i in range(n):
f.write("%d %d %d\n"%(random.randint(-960,960),
random.randint(-540,540),
random.randint(1,300)))
for i in range(m):
f.write("%d %d\n"%(random.randint(-960,960),
random.randint(-540,540)))
#sys.stdin = open("/Users/Zihao/Desktop/areaofeffect/data/in/areaofeffect.in11")
sys.stdin = open("A.in")
gN,gM,gR = list(map(int,input().split()))
gvillages = []
gminions = []
for i in range(gN):
gvillages.append(list(map(int,input().split())))
for i in range(gM):
gminions.append(list(map(int,input().split())))
gobjects = collections.OrderedDict()
gobjects_lock = threading.Lock()
def idist2(a,b):
return (a[0]-b[0])*(a[0]-b[0]) + (a[1]-b[1])*(a[1]-b[1])
def sub(a,b):
return (a[0]-b[0],a[1]-b[1])
class Solver(threading.Thread):
def __init__(self):
super().__init__()
def spinMinions(self, center):
#print("gg")
res = 0
events = [0] * (2*gN+2*(gM-1))
fptr = 0
bptr = 0
diameter2 = gR * 4 * gR
activeMinions = 1; # Include ourselves in our count
with gobjects_lock:
gobjects['proc_m'] = {}
gobjects['proc_n'] = {}
#print("gm")
for i in range(gM):
if i != center:
dd = idist2(gminions[i],gminions[center])
if (dd > diameter2):
continue
dv = sub(gminions[i],gminions[center])
d = math.sqrt(dd)
a = math.acos(d/(2.0*gR))
ang = math.atan2(dv[1], dv[0]);
e1 = ang-a
e2 = ang+a;
#print("minion %.6f %.6f -- %.6f"%(e1, e2, ang));
with gobjects_lock:
gobjects['proc_m'][i] = (e1,e2,ang)
if (e1 < -math.pi):
activeMinions+=1;
e1 += 2*math.pi;
events[bptr] = (e1, 1, i); # Add minion
bptr += 1
if (e2 > math.pi):
activeMinions+=1;
e2 -= 2*math.pi;
events[bptr] = (e2, 2, i); # Remove minion
bptr += 1
#print("gn")
activeVillages = 0
for i in range(gN):
a = gR + gvillages[i][2];
b = gR;
ab = a+b;
ab2 = ab*ab;
c2 = idist2((gvillages[i][0],gvillages[i][1]),gminions[center])
if (c2 > ab2):
continue
dv = sub((gvillages[i][0],gvillages[i][1]),gminions[center])
c = math.sqrt(c2);
ang = math.atan2(dv[1], dv[0])
top = b*b+c*c-a*a;
bot = 2*b*c;
#print(a,b,c,top,bot,(gvillages[i][0],gvillages[i][1]),gminions[center])
if not (-1 <= top/bot <= 1):
return 1
else:
A = math.acos(top/bot);
e1 = ang-A
e2 = ang+A;
with gobjects_lock:
gobjects['proc_n'][i] = (e1,e2,ang)
if (e1 < -math.pi):
activeVillages+=1;
e1 += 2*math.pi;
events[bptr] = (e1, 3, i); # Add village
bptr += 1
if (e2 > math.pi):
activeVillages+=1;
e2 -= 2*math.pi;
events[bptr] = (e2, 0, i); # Remove village
bptr += 1
#print('st')
events = events[:bptr]
events.sort(key = lambda x:(x[0],-(x[1])))
if (activeVillages == 0):
res = activeMinions;
with gobjects_lock:
gobjects['events'] = events
def equals(x,y):
if (abs(x[0]-y[0]) < 1e-6):
if(x[1] == y[1]):
return True
return False
with gobjects_lock:
gobjects['active_n'] = [0] * gN
gobjects['active_m'] = [0] * gM
while (fptr < bptr):
first = events[fptr];
while (fptr < bptr and equals(first,events[fptr])):
cur = events[fptr];
fptr += 1
with gobjects_lock:
if (cur[1] == 1):
activeMinions+=1;
gobjects['active_m'][cur[2]] = 1
elif (cur[1] == 2):
activeMinions-=1;
gobjects['active_m'][cur[2]] = 0
elif (cur[1] == 3):
activeVillages+=1;
gobjects['active_n'][cur[2]] = 1
elif (cur[1] == 0):
activeVillages-=1;
gobjects['active_n'][cur[2]] = 0
else:
print("BADNESS");
#//System.out.println(activeMinions+" "+activeVillages);
if (activeVillages == 0):
with gobjects_lock:
gobjects['active_2'] = gobjects['active_m'][:]
gobjects['circle'] = make_circle([gminions[i] for i in range(gM) if gobjects['active_2'][i]==1],gminions[center])
res = max(res, activeMinions);
if not grunning:
break
time.sleep(0.03)
return res
def run(self):
global gN,gM,gR,gvillages,gminions
while True:
with gobjects_lock:
gobjects['map'] = (gvillages,gminions)
res = 1
for i in range(gM):
#print('g1')
with gobjects_lock:
gobjects['center'] = i
#print('g2')
res = max(res, self.spinMinions(i))
if not grunning:
return
time.sleep(0.1)
random.seed(time.time())
with open("A.in","w") as f:
n = 5
m = 100
r = 250
f.write("%d %d %d\n"%(n,m,r))
for i in range(n):
f.write("%d %d %d\n"%(random.randint(-960,960),
random.randint(-540,540),
random.randint(1,300)))
for i in range(m):
f.write("%d %d\n"%(random.randint(-960,960),
random.randint(-540,540)))
#sys.stdin = open("/Users/Zihao/Desktop/areaofeffect/data/in/areaofeffect.in11")
sys.stdin = open("A.in")
gN,gM,gR = list(map(int,input().split()))
gvillages = []
gminions = []
for i in range(gN):
gvillages.append(list(map(int,input().split())))
for i in range(gM):
gminions.append(list(map(int,input().split())))
'''
for i in range(N):
for j in range(i+1,N):
res = max(res,sweepTwoVillages(i,j))
'''
print(res)
def xy(x,y):
return (960+x, (540 - y))
def color(*v):
return pygame.Color(*v)
pygame.init()
resolution = (width, height) = (1920, 1080)
screen = pygame.display.set_mode(resolution,pygame.FULLSCREEN)
clock = pygame.time.Clock()
pygame.font.init()
myfont = pygame.font.SysFont("Consolas", 11)
solver = Solver()
solver.start()
while grunning:
clock.tick(60)
screen.fill(0)
for event in pygame.event.get():
if event.type == QUIT:
grunning = False
elif event.type == pygame.KEYDOWN:
if event.key == pygame.K_ESCAPE:
grunning = False
elif event.key == pygame.K_q:
grunning = False
pygame.draw.line(screen, color('gray'), xy(-960,0), xy(960,0), 1)
pygame.draw.line(screen, color('gray'), xy(0,-540), xy(0,540), 1)
with gobjects_lock:
for typ,obj in gobjects.items():
if typ == 'map':
for village in obj[0]:
pygame.draw.circle(screen, color('green'), xy(village[0],village[1]), village[2], 1)
for minion in obj[1]:
screen.set_at(xy(minion[0],minion[1]),color('red'))
elif typ == 'proc_m!':
for i,l in obj.items():
label = myfont.render("%3f %3f %3f"%l, 1, color('white'))
screen.blit(label, xy(gminions[i][0],gminions[i][1]))
elif typ == 'proc_n!':
for i,l in obj.items():
label = myfont.render("%3f %3f %3f"%l, 1, color('purple'))
screen.blit(label, xy(gvillages[i][0],gvillages[i][1]))
elif typ == 'events':
k=0
s = 100
sd = len(obj)/155
for p,t,i in obj:
label = myfont.render("%d"%k, 1, (0,s,s))
if t == 1:
screen.blit(label, xy(gminions[i][0],gminions[i][1]-10))
if t == 2:
screen.blit(label, xy(gminions[i][0],gminions[i][1]+20))
if t == 3:
screen.blit(label, xy(gminions[i][0],gminions[i][1]-10))
if t == 0:
screen.blit(label, xy(gminions[i][0],gminions[i][1]+20))
k += 1
elif typ == 'active_2':
for i in range(gM):
if obj[i] == 0:
pygame.draw.circle(screen, color('orange'), xy(gminions[i][0],gminions[i][1]), 5, 0)
if obj[i] == 1:
pygame.draw.circle(screen, color('purple'), xy(gminions[i][0],gminions[i][1]), 5, 0)
elif typ == 'active_n!':
for i in range(gN):
if obj[i] == 0:
pygame.draw.circle(screen, color('orange'), xy(gvillages[i][0],gvillages[i][1]), 5, 0)
if obj[i] == 1:
pygame.draw.circle(screen, color('purple'), xy(gvillages[i][0],gvillages[i][1]), 5, 0)
elif typ == 'circle':
if(int(obj[2]) > 0):
pygame.draw.circle(screen, color('yellow'), xy(int(obj[0]),int(obj[1])), int(obj[2]), 1)
if 'center' in gobjects:
obj = gobjects['center']
pygame.draw.circle(screen, color('white'), xy(gminions[obj][0],gminions[obj][1]), 5, 0)
pygame.display.flip()
sys.exit(0)
'''
// Try minion center as the center minion and spin a circle around that
// point without restricting the radius. If no villages were present, this
// would be the optimal solution.
int spinMinions(int center)
{
Event[] events = new Event[2*N+2*(M-1)];
int fptr = 0, bptr = 0;
long diameter2 = gR*4L*gR;
// Add all the minion events
int activeMinions = 1; // Include ourselves in our count
for (int i=0; i<M; i++) if (i != center)
{
long dd = minions[i].idist2(minions[center]);
if (dd > diameter2) continue;
vec2 dv = minions[i].sub(minions[center]);
double d = Math.sqrt(dd);
double a = Math.acos(d/(2.0*gR));
double ang = Math.atan2(dv.y, dv.x);
double e1 = ang-a, e2 = ang+a;
//System.out.printf("minion %.6f %.6f -- %.6f%n", e1, e2, ang);
if (e1 < -Math.PI)
{
activeMinions++;
e1 += 2*Math.PI;
}
events[bptr++] = new Event(e1, 1); // Add minion
if (e2 > Math.PI)
{
activeMinions++;
e2 -= 2*Math.PI;
}
events[bptr++] = new Event(e2, 2); // Remove minion
}
// Add events for active circles that can block us using a circle.
int activeVillages = 0;
for (int i=0; i<N; i++)
{
long a = gR+(int)villages[i].rad;
long b = gR;
long ab = a+b;
long ab2 = ab*ab;
long c2 = villages[i].cen.idist2(minions[center]);
if (c2 > ab2) continue;
vec2 dv = villages[i].cen.sub(minions[center]);
double c = Math.sqrt(c2);
double ang = Math.atan2(dv.y, dv.x);
double top = b*b+c*c-a*a;
double bot = 2*b*c;
double A = Math.acos(top/bot);
double e1 = ang-A, e2 = ang+A;
//System.out.printf("village %.6f %.6f -- %.6f%n", e1, e2, ang);
if (e1 < -Math.PI)
{
activeVillages++;
e1 += 2*Math.PI;
}
events[bptr++] = new Event(e1, 3); // Add village
if (e2 > Math.PI)
{
activeVillages++;
e2 -= Math.PI;
}
events[bptr++] = new Event(e2, 0); // Remove village
}
// Run the sweep
Arrays.sort(events, fptr, bptr);
int res = 0;
if (activeVillages == 0)
res = activeMinions;
//System.out.println(activeMinions);
while (fptr < bptr)
{
Event first = events[fptr];
while (fptr < bptr && first.compareTo(events[fptr]) == 0)
{
Event cur = events[fptr++];
if (cur.t == 1)
activeMinions++;
else if (cur.t == 2)
activeMinions--;
else if (cur.t == 3)
activeVillages++;
else if (cur.t == 0)
activeVillages--;
else
System.out.println("BADNESS");
}
//System.out.println(activeMinions+" "+activeVillages);
if (activeVillages == 0)
res = Math.max(res, activeMinions);
}
//System.out.println(activeMinions+" "+res);
return res;
}
// The other case is to reduce the circle size, this is accomplished by
// sweeping between two villages.
int sweepTwoVillages(int village1, int village2)
{
circle2 v1 = villages[village1], v2 = villages[village2];
long diameter = gR*2+(int)v1.rad+(int)v2.rad;
long diameter2 = diameter*diameter;
long distBetweenVillages = v1.cen.idist2(v2.cen);
if (diameter2 <= distBetweenVillages) return 0;
int nCandidates = 0;
circle2[] candidateCircles = new circle2[2*M];
for (int i=0; i<M; i++)
{
circle2 tmp = new circle2(minions[i], 0);
vec2[] centers = v1.intersect(tmp, gR, 1);
if (centers == null) continue;
if (centers.length == 1)
{
circle2 cand = new circle2(centers[0], gR);
boolean isOK = cand.touches(v2);
for (int j=0; j<N; j++)
if (cand.overlaps(villages[j]))
isOK = false;
if (isOK) candidateCircles[nCandidates++] = cand;
continue;
}
for (int t=0; t<2; t++)
{
circle2 cand = new circle2(centers[t], gR);
// If the circle is unrestricted, we don't care!
if (!cand.overlapsOrTouches(v2)) continue;
// Yay! we can start our binary search now!
double lo = (v1.cen.dist(minions[i])-v1.rad)*0.5, hi = gR;
//System.out.printf("%.6f %.6f%n", lo, hi);
for (int iter=0; iter<200; iter++)
{
double m = (lo+hi)*0.5;
vec2[] centers2 = v1.intersect(tmp, m, 2);
cand = new circle2(centers2[t], m);
if (cand.overlaps(v2))
hi = m;
else
lo = m;
}
vec2[] centers2 = v1.intersect(tmp, lo, 2);
cand = new circle2(centers2[t], lo);
//System.out.println(cand);
boolean isOK = true;
for (int j=0; j<N; j++)
if (cand.overlaps(villages[j]))
isOK = false;
if (isOK) candidateCircles[nCandidates++] = cand;
}
}
int res = 0;
for (int i=0; i<nCandidates; i++)
{
circle2 cur = candidateCircles[i];
// Find out how many points are contained in this circle
int count = 0;
for (int j=0; j<M; j++)
if (cur.contains(minions[j]))
count++;
//System.out.println(cur+" "+count);
res = Math.max(res, count);
}
return res;
}
}
class Event implements Comparable<Event>
{
double p; // position in sweep (angle, or projected distance)
int t; // type of event. Sweep dependant. This problem has many sweeps.
public Event(double pp, int tt)
{
p=pp;
t=tt;
}
public int compareTo(Event rhs)
{
if (Math.abs(p-rhs.p) < 1e-9) return t-rhs.t;
return Double.compare(p, rhs.p);
}
}
class circle2
{
vec2 cen;
double rad;
public circle2(vec2 cc, double rr)
{
cen=cc; rad=rr;
}
boolean contains(vec2 p)
{
double d = cen.dist(p);
return GEOM.eq(d, rad) || d < rad;
}
boolean touches(circle2 rhs)
{
double d = cen.dist(rhs.cen);
double rr = rad+rhs.rad;
return GEOM.eq(d, rr);
}
boolean overlaps(circle2 rhs)
{
double d = cen.dist(rhs.cen);
double rr = rad+rhs.rad;
return !GEOM.eq(d, rr) && d < rr;
}
boolean overlapsOrTouches(circle2 rhs)
{
double d = cen.dist(rhs.cen);
double rr = rad+rhs.rad;
return GEOM.eq(d, rr) || d < rr;
}
// Checks circle-circle intersection with an expanded radius.
// Useful for finding a circle tangent to two circles.
vec2[] intersect(circle2 rhs, double er, int type)
{
double L = rad+er;
double R = rhs.rad+er;
double B = cen.dist(rhs.cen);
int nSols = GEOM.testTriangle(L, R, B);
if (nSols == 0) return null; // no solution
double c = (B*B+L*L-R*R)/(2*B);
double b = Math.sqrt(Math.max(L*L-c*c, 0));
vec2 u = rhs.cen.sub(cen).normalize();
vec2 v = new vec2(-u.y, u.x);
vec2 vc = u.scale(c), vb = v.scale(b);
if (type != 2 && nSols == 1) return new vec2[]{cen.add(vc).add(vb)};
return new vec2[]{cen.add(vc).add(vb), cen.add(vc).sub(vb)};
}
public String toString()
{
return String.format("%s, %.6f%n", cen, rad);
}
}
class vec2
{
double x, y;
public vec2(double xx, double yy)
{
x=xx; y=yy;
}
vec2 add(vec2 rhs)
{
return new vec2(x+rhs.x, y+rhs.y);
}
vec2 sub(vec2 rhs)
{
return new vec2(x-rhs.x, y-rhs.y);
}
vec2 scale(double s)
{
return new vec2(x*s, y*s);
}
vec2 normalize()
{
double s = mag();
if (s < 1e-9) System.out.println("BADNESS NORMALIZE");
return scale(1.0/s);
}
// Only use on int coords
long idist2(vec2 rhs)
{
long dx = (int)x-(int)rhs.x;
long dy = (int)y-(int)rhs.y;
return dx*dx+dy*dy;
}
double dist(vec2 rhs)
{
double dx = x-rhs.x;
double dy = y-rhs.y;
return Math.sqrt(dx*dx+dy*dy);
}
double mag()
{
return Math.sqrt(x*x+y*y);
}
public String toString()
{
return String.format("<%.6f, %.6f>", x, y);
}
}
class GEOM
{
static double EPS = 1e-9;
static boolean eq(double a, double b)
{
double d = Math.abs(a-b);
if (d < EPS) return true;
return d < EPS*Math.max(Math.abs(a), Math.abs(b));
}
static int testTriangle(double a, double b, double c)
{
double[] tri = new double[]{a, b, c};
Arrays.sort(tri);
double T = tri[0] + tri[1];
if (GEOM.eq(T, tri[2])) return 1;
return T>tri[2]?2:0;
}
}
'''
| StarcoderdataPython |
4833818 | import json
import os
import re
import shutil
import sys
import tempfile
import time
import pygame
import subprocess
from pgu import gui
from pygame.locals import *
from Constants import *
from Logger import logger
from pgu.gui import Theme
from surfaces.LeftMenu import LeftMenu
from surfaces.MainArea import MainArea
from surfaces.StatusBar import StatusBar
from surfaces.TopMenu import TopMenu
from utils import AssetManager
from utils import FileSystemHelper
if not pygame.font:
print 'Warning, fonts disabled'
if not pygame.mixer:
print 'Warning, sound disabled'
BASE_COMMAND = "/usr/bin/xinit /usr/bin/dbus-launch --exit-with-session %s -- :0 -nolisten tcp vt7"
class PyMain(object):
def __init__(self, configFile):
pygame.init()
logger.info("Using driver : " + pygame.display.get_driver())
# self.disableMouse()
self.initSurfaces()
self.initJoysticks()
self.temporaryFile = os.path.join(tempfile.gettempdir(), 'thinlauncher.tmp')
self.jsondata = json.load(open(FileSystemHelper.findConfig(configFile), 'rb'))
self.backgroundColor = eval(self.jsondata['backgroundColor'])
if 'backgroundImage' in self.jsondata:
self.backgroundImage = AssetManager.load_image(self.jsondata['backgroundImage'])
self.backgroundImage = pygame.transform.smoothscale(self.backgroundImage, (self.screen.get_width(), self.screen.get_height()))
else:
self.backgroundImage = None
self.topMenuSurface.init(self.jsondata['menus'])
self.setTopSelected(0)
self.setLeftSelected(0)
self.redraw()
def disableMouse(self):
devices = subprocess.check_output(["xinput"]).splitlines()
for line in devices:
logger.debug(line)
if re.search("(master|slave)\s*pointer", line):
deviceID = re.findall("id=([0-9]+)", line)[0]
logger.info("Trying to disable device %s" % deviceID)
subprocess.call(["xinput", "disable", deviceID])
def initSurfaces(self):
self.screen = pygame.display.set_mode((SCREEN_RES_X, SCREEN_RES_Y), SRCALPHA)
screenWidth = self.screen.get_width()
screenHeight = self.screen.get_height()
self.topMenuSurface = TopMenu(screenWidth, TOP_MENU_HEIGHT)
self.leftMenuSurface = LeftMenu(LEFT_MENU_WIDTH, screenHeight - TOP_MENU_HEIGHT - STATUS_BAR_HEIGHT)
self.statusBarSurface = StatusBar(screenWidth, STATUS_BAR_HEIGHT)
self.mainAreaSurface = MainArea(screenWidth - LEFT_MENU_WIDTH, screenHeight - TOP_MENU_HEIGHT - STATUS_BAR_HEIGHT)
logger.info("Screen created with resolution of %dx%d" % (screenWidth, screenHeight))
logger.info("TopMenu created with resolution of %dx%d" % (self.topMenuSurface.get_width(), self.topMenuSurface.get_height()))
logger.info("LeftMenu created with resolution of %dx%d" % (self.leftMenuSurface.get_width(), self.leftMenuSurface.get_height()))
logger.info("StatusBar created with resolution of %dx%d" % (self.statusBarSurface.get_width(), self.statusBarSurface.get_height()))
logger.info("MainArea created with resolution of %dx%d" % (self.mainAreaSurface.get_width(), self.mainAreaSurface.get_height()))
def initJoysticks(self):
for i in range(pygame.joystick.get_count()):
joystick = pygame.joystick.Joystick(i)
joystick.init()
logger.info("Found joystick %s" % (joystick.get_name(),))
def redraw(self):
# TODO : The whole concept of only partially redrawing is bad.
# TODO : Should try to figure out why redrawing is so expensive
self.screen.fill(self.backgroundColor)
if self.backgroundImage:
self.screen.blit(self.backgroundImage, self.backgroundImage.get_rect())
self.topMenuSurface.redraw(self.screen, 0, 0)
self.leftMenuSurface.redraw(self.screen, 0, TOP_MENU_HEIGHT)
self.statusBarSurface.redraw(self.screen, 0, TOP_MENU_HEIGHT + self.leftMenuSurface.get_height())
self.mainAreaSurface.redraw(self.screen, LEFT_MENU_WIDTH, TOP_MENU_HEIGHT)
pygame.display.flip()
def setTopSelected(self, index):
self.topMenuSurface.setSelected(index)
self.leftMenuSurface.init(self.jsondata['menus'][index]['entries'])
self.setLeftSelected(0)
def setLeftSelected(self, index):
self.leftMenuSurface.setSelected(index)
self.mainAreaSurface.init(self.leftMenuSurface.data[index])
def writeCommand(self, entry, command):
ff = open(self.temporaryFile, 'wb')
ff.write(command)
ff.close()
logger.info("Launching %s with command %s" % (entry['name'], command))
def loop(self):
while 1:
event = pygame.event.wait()
# if (event.type == pygame.KEYDOWN and event.key == pygame.K_ESCAPE) \
# or (event.type == pygame.JOYBUTTONDOWN and event.button == 1):
if (event.type == pygame.KEYDOWN and event.key == pygame.K_ESCAPE) \
or (event.type == pygame.QUIT):
if os.path.exists(self.temporaryFile):
os.remove(self.temporaryFile)
sys.exit(0)
elif (event.type == pygame.KEYDOWN and event.key == pygame.K_LEFT) \
or (event.type == pygame.JOYBUTTONDOWN and event.button == 11):
topMenuIndex = (self.topMenuSurface.getSelected() - 1) % len(self.topMenuSurface.buttons)
self.setTopSelected(topMenuIndex)
self.redraw()
elif (event.type == pygame.KEYDOWN and event.key == pygame.K_RIGHT) \
or (event.type == pygame.JOYBUTTONDOWN and event.button == 12):
topMenuIndex = (self.topMenuSurface.getSelected() + 1) % len(self.topMenuSurface.buttons)
self.setTopSelected(topMenuIndex)
self.redraw()
elif (event.type == pygame.KEYDOWN and event.key == pygame.K_UP) \
or (event.type == pygame.JOYBUTTONDOWN and event.button == 13):
leftMenuIndex = (self.leftMenuSurface.getSelected() - 1) % len(self.leftMenuSurface.buttons)
self.setLeftSelected(leftMenuIndex)
self.redraw()
elif (event.type == pygame.KEYDOWN and event.key == pygame.K_DOWN) \
or (event.type == pygame.JOYBUTTONDOWN and event.button == 14):
leftMenuIndex = (self.leftMenuSurface.getSelected() + 1) % len(self.leftMenuSurface.buttons)
self.setLeftSelected(leftMenuIndex)
self.redraw()
elif (event.type == pygame.KEYDOWN and event.key == pygame.K_RETURN) \
or (event.type == pygame.JOYBUTTONDOWN and event.button == 0):
entry = self.leftMenuSurface.data[self.leftMenuSurface.getSelected()]
# We run a command if there is one
if 'command' in entry:
command = entry['command']
if command[0] == '#':
self.writeCommand(entry, command[1:])
sys.exit(0)
elif command[0] == '$':
subprocess.call(command[1:], shell=True)
else:
self.writeCommand(entry, BASE_COMMAND % command)
sys.exit(0)
# If we have a main area GUI, we should activate it and start working on it
if 'mainAreaGUI' in entry:
pass
| StarcoderdataPython |
3214499 | <gh_stars>1-10
# -*- coding: utf-8 -*-
# Copyright 2020 Scriptim (https://github.com/Scriptim)
#
# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated
# documentation files (the "Software"), to deal in the Software without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software,
# and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all copies or substantial portions of
# the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO
# THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
# TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
"""This module provides some functions to simplify various operations."""
from typing import List, Tuple, Union
from abalone.enums import Direction, Space
def line_from_to(from_space: Space, to_space: Space) -> Union[Tuple[List[Space], Direction], Tuple[None, None]]:
"""Returns all `abalone.enums.Space`s in a straight line from a given starting space to a given ending space. The\
two bounding spaces are included. The `abalone.enums.Direction` of that line is also returned.
Example:
```python
line_from_to(Space.A1, Space.D4)
# ([Space.A1, Space.B2, Space.C3, Space.D4], Direction.NORTH_EAST)
```
```
I · · · · ·
H · · · · · ·
G · · · · · · ·
F · · · · · · · ·
E · · · · · · · · ·
D · · · X · · · · 9
C · · X · · · · 8
B · X · · · · 7
A X · · · · 6
1 2 3 4 5
```
Args:
from_space: The starting `abalone.enums.Space`.
to_space: The ending `abalone.enums.Space`.
Returns:
A tuple containing a list of `abalone.enums.Space`s and a `abalone.enums.Direction` or `(None, None)` in case\
no line with the given arguments is possible. The latter is also the case if the starting and ending spaces are\
identical.
Raises:
Exception: Spaces must not be `abalone.enums.Space.OFF`
"""
if from_space is Space.OFF or to_space is Space.OFF:
raise Exception('Spaces must not be `Space.OFF`')
for direction in Direction:
line = [from_space]
while line[-1] is not Space.OFF:
next_space = neighbor(line[-1], direction)
line.append(next_space)
if next_space is to_space:
return line, direction
return None, None
def line_to_edge(from_space: Space, direction: Direction) -> List[Space]:
"""Returns a straight line of `abalone.enums.Space`s, from a given starting space in a given\
`abalone.enums.Direction`. The line extends to the edge of the board. The starting space is included.
Example:
```python
utils.line_to_edge(Space.C4, Direction.SOUTH_EAST)
# [Space.C4, Space.B4, Space.A4]
```
```
I · · · · ·
H · · · · · ·
G · · · · · · ·
F · · · · · · · ·
E · · · · · · · · ·
D · · · · · · · · 9
C · · · X · · · 8
B · · · X · · 7
A · · · X · 6
1 2 3 4 5
```
Args:
from_space: The starting `abalone.enums.Space`.
direction: The `abalone.enums.Direction` of the line.
Returns:
A list of `abalone.enums.Space`s starting with `from_space`.
Raises:
Exception: `from_space` must not be `abalone.enums.Space.OFF`
"""
if from_space is Space.OFF:
raise Exception('`from_space` must not be `Space.OFF`')
line = [from_space]
while line[-1] is not Space.OFF:
line.append(neighbor(line[-1], direction))
line.pop() # remove Space.OFF
return line
def neighbor(space: Space, direction: Direction) -> Space:
"""Returns the neighboring `abalone.enums.Space` of a given space in a given `abalone.enums.Direction`.
Example:
```python
utils.neighbor(Space.B2, Direction.EAST)
# Space.B3
```
```
I · · · · ·
H · · · · · ·
G · · · · · · ·
F · · · · · · · ·
E · · · · · · · · ·
D · · · · · · · · 9
C · · · · · · · 8
B · X N · · · 7
A · · · · · 6
1 2 3 4 5
```
Args:
space: The `abalone.enums.Space` of which the neighbour is returned.
direction: The `abalone.enums.Direction` in which the neighbour is located.
Returns:
The neighboring `abalone.enums.Space` of `space` in `direction`. If `space` is `abalone.enums.Space.OFF`, for\
any given `direction`, `abalone.enums.Space.OFF` is returned.
"""
if space is Space.OFF:
return Space.OFF
xs = ['A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'I']
ys = ['1', '2', '3', '4', '5', '6', '7', '8', '9']
xi = xs.index(space.value[0])
yi = ys.index(space.value[1])
if direction is Direction.NORTH_EAST:
xi += 1
yi += 1
elif direction is Direction.EAST:
yi += 1
elif direction is Direction.SOUTH_EAST:
xi -= 1
elif direction is Direction.SOUTH_WEST:
xi -= 1
yi -= 1
elif direction is Direction.WEST:
yi -= 1
elif direction is Direction.NORTH_WEST:
xi += 1
if xi < 0 or xi >= len(xs) or yi < 0 or yi >= len(ys) or xs[xi] + ys[yi] not in Space.__members__:
return Space.OFF
return Space[xs[xi] + ys[yi]]
| StarcoderdataPython |
3313504 | import macropy.activate
import testBattleship
| StarcoderdataPython |
8078152 | <reponame>sparcs-kaist/araplus
from django.contrib import admin
from apps.session.models import UserProfile, Message,\
GroupMessage, Block, Group
class UserProfileAdmin(admin.ModelAdmin):
list_display = ('user', 'nickname', 'points')
class MessageAdmin(admin.ModelAdmin):
list_display = ('content', 'sender', 'receiver')
class GroupMessageAdmin(admin.ModelAdmin):
list_display = ('content', 'sender', 'receivers', 'created_time')
class BlockAdmin(admin.ModelAdmin):
list_display = ('sender', 'receiver')
class GroupAdmin(admin.ModelAdmin):
list_display = ('members_list', 'name')
def members_list(self, obj):
return ", ".join([str(user) for user in obj.members.all()])
admin.site.register(UserProfile, UserProfileAdmin)
admin.site.register(Message, MessageAdmin)
admin.site.register(GroupMessage, GroupMessageAdmin)
admin.site.register(Block, BlockAdmin)
admin.site.register(Group, GroupAdmin)
| StarcoderdataPython |
9771677 | from django.contrib import admin
from django.contrib.auth.admin import UserAdmin
from .models import User, Post, Tag, Image
# Register your models here.
class PostModelAdmin(admin.ModelAdmin):
list_filter = ('sites',)
list_display = ('title', 'in_sites',)
class TagModelAdmin(admin.ModelAdmin):
list_filter = ('sites',)
list_display = ('name', 'in_sites')
admin.site.register(User, UserAdmin)
admin.site.register(Post, PostModelAdmin)
admin.site.register(Tag, TagModelAdmin)
admin.site.register(Image)
| StarcoderdataPython |
1996362 | from importlib import import_module
from collections import defaultdict
from django.conf import settings as django_settings
from django.core.exceptions import ImproperlyConfigured
__all__ = [
'city_types','district_types',
'import_opts','import_opts_all','HookException','settings'
]
url_bases = {
'geonames': {
'dump': 'http://download.geonames.org/export/dump/',
'zip': 'http://download.geonames.org/export/zip/',
},
}
files = {
'country': {
'filename': 'countryInfo.txt',
'urls': [url_bases['geonames']['dump']+'{filename}', ],
'fields': [
'code',
'code3',
'codeNum',
'fips',
'name',
'capital',
'area',
'population',
'continent',
'tld',
'currencyCode',
'currencyName',
'phone',
'postalCodeFormat',
'postalCodeRegex',
'languages',
'geonameid',
'neighbours',
'equivalentFips'
]
},
'region': {
'filename': 'admin1CodesASCII.txt',
'urls': [url_bases['geonames']['dump']+'{filename}', ],
'fields': [
'code',
'name',
'asciiName',
'geonameid',
]
},
'subregion': {
'filename': 'admin2Codes.txt',
'urls': [url_bases['geonames']['dump']+'{filename}', ],
'fields': [
'code',
'name',
'asciiName',
'geonameid',
]
},
'city': {
'filename': 'cities5000.zip',
'urls': [url_bases['geonames']['dump']+'{filename}', ],
'fields': [
'geonameid',
'name',
'asciiName',
'alternateNames',
'latitude',
'longitude',
'featureClass',
'featureCode',
'countryCode',
'cc2',
'admin1Code',
'admin2Code',
'admin3Code',
'admin4Code',
'population',
'elevation',
'gtopo30',
'timezone',
'modificationDate'
]
},
'hierarchy': {
'filename': 'hierarchy.zip',
'urls': [url_bases['geonames']['dump']+'{filename}', ],
'fields': [
'parent',
'child'
]
},
'alt_name': {
'filename': 'alternateNames.zip',
'urls': [url_bases['geonames']['dump']+'{filename}', ],
'fields': [
'nameid',
'geonameid',
'language',
'name',
'isPreferred',
'isShort',
'isColloquial',
'isHistoric',
]
},
'postal_code': {
'filename': 'allCountries.zip',
'urls': [url_bases['geonames']['zip']+'{filename}', ],
'fields': [
'countryCode',
'postalCode',
'placeName',
'admin1Name',
'admin1Code',
'admin2Name',
'admin2Code',
'admin3Name',
'admin3Code',
'latitude',
'longitude',
'accuracy',
]
}
}
country_codes = [
'AD','AE','AF','AG','AI','AL','AM','AO','AQ','AR','AS','AT','AU','AW','AX','AZ',
'BA','BB','BD','BE','BF','BG','BH','BI','BJ','BL','BM','BN','BO','BQ','BR','BS','BT','BV','BW','BY','BZ',
'CA','CC','CD','CF','CG','CH','CI','CK','CL','CM','CN','CO','CR','CU','CV','CW','CX','CY','CZ',
'DE','DJ','DK','DM','DO','DZ','EC','EE','EG','EH','ER','ES','ET','FI','FJ','FK','FM','FO','FR',
'GA','GB','GD','GE','GF','GG','GH','GI','GL','GM','GN','GP','GQ','GR','GS','GT','GU','GW','GY',
'HK','HM','HN','HR','HT','HU','ID','IE','IL','IM','IN','IO','IQ','IR','IS','IT','JE','JM','JO','JP',
'KE','KG','KH','KI','KM','KN','KP','KR','XK','KW','KY','KZ','LA','LB','LC','LI','LK','LR','LS','LT','LU','LV','LY',
'MA','MC','MD','ME','MF','MG','MH','MK','ML','MM','MN','MO','MP','MQ','MR','MS','MT','MU','MV','MW','MX','MY','MZ',
'NA','NC','NE','NF','NG','NI','NL','NO','NP','NR','NU','NZ','OM',
'PA','PE','PF','PG','PH','PK','PL','PM','PN','PR','PS','PT','PW','PY','QA','RE','RO','RS','RU','RW',
'SA','SB','SC','SD','SS','SE','SG','SH','SI','SJ','SK','SL','SM','SN','SO','SR','ST','SV','SX','SY','SZ',
'TC','TD','TF','TG','TH','TJ','TK','TL','TM','TN','TO','TR','TT','TV','TW','TZ','UA','UG','UM','US','UY','UZ',
'VA','VC','VE','VG','VI','VN','VU','WF','WS','YE','YT','ZA','ZM','ZW',
]
# See http://www.geonames.org/export/codes.html
city_types = ['PPL','PPLA','PPLC','PPLA2','PPLA3','PPLA4', 'PPLG']
district_types = ['PPLX']
# Command-line import options
import_opts = [
'all',
'country',
'region',
'subregion',
'city',
'district',
'alt_name',
'postal_code',
]
import_opts_all = [
'country',
'region',
'subregion',
'city',
'district',
'alt_name',
'postal_code',
]
# Raise inside a hook (with an error message) to skip the current line of data.
class HookException(Exception): pass
# Hook functions that a plugin class may define
plugin_hooks = [
'country_pre', 'country_post',
'region_pre', 'region_post',
'subregion_pre', 'subregion_post',
'city_pre', 'city_post',
'district_pre', 'district_post',
'alt_name_pre', 'alt_name_post',
'postal_code_pre', 'postal_code_post',
]
def create_settings():
res = type('',(),{})
res.files = files.copy()
if hasattr(django_settings, "CITIES_FILES"):
for key in list(django_settings.CITIES_FILES.keys()):
if 'filenames' in django_settings.CITIES_FILES[key] and 'filename' in django_settings.CITIES_FILES[key]:
raise ImproperlyConfigured(
"Only one key should be specified for '%s': 'filename' of 'filenames'. Both specified instead" % key
)
res.files[key].update(django_settings.CITIES_FILES[key])
if 'filenames' in django_settings.CITIES_FILES[key]:
del res.files[key]['filename']
if hasattr(django_settings, "CITIES_LOCALES"):
locales = django_settings.CITIES_LOCALES[:]
else:
locales = ['en', 'und']
try:
locales.remove('LANGUAGES')
locales += [e[0] for e in django_settings.LANGUAGES]
except: pass
res.locales = set([e.lower() for e in locales])
if hasattr(django_settings, "CITIES_POSTAL_CODES"):
res.postal_codes = set([e.upper() for e in django_settings.CITIES_POSTAL_CODES])
else:
res.postal_codes = set()
return res
def create_plugins():
settings.plugins = defaultdict(list)
for plugin in django_settings.CITIES_PLUGINS:
module_path, classname = plugin.rsplit('.',1)
module = import_module(module_path)
class_ = getattr(module,classname)
obj = class_()
[settings.plugins[hook].append(obj) for hook in plugin_hooks if hasattr(obj,hook)]
settings = create_settings()
if hasattr(django_settings, "CITIES_PLUGINS"):
create_plugins()
if hasattr(django_settings, "CITIES_IGNORE_EMPTY_REGIONS"):
CITIES_IGNORE_EMPTY_REGIONS = django_settings.CITIES_IGNORE_EMPTY_REGIONS
else:
CITIES_IGNORE_EMPTY_REGIONS = False
| StarcoderdataPython |
4959742 | <filename>tdrs-backend/tdpservice/data_files/migrations/0006_datafile_file.py
# Generated by Django 3.2.3 on 2021-05-24 19:06
from django.db import migrations, models
import tdpservice.data_files.models
class Migration(migrations.Migration):
replaces = [('reports','0006_reportfile_file')]
dependencies = [
('data_files', '0005_update_section_enum'),
]
operations = [
migrations.AddField(
model_name='datafile',
name='file',
field=models.FileField(blank=True, null=True, storage=tdpservice.data_files.models.DataFilesS3Storage, upload_to=tdpservice.data_files.models.get_s3_upload_path),
),
]
| StarcoderdataPython |
283109 | # -*- coding: utf-8 -*-
from __future__ import absolute_import, unicode_literals
from django.shortcuts import render
from .models import Products
def index(request):
p = Products.objects.all()
products = {"products" : p}
return render(request, 'products/index.html', products)
def details(request, product_id):
product = Products.objects.get(id=product_id)
return render(request, 'products/details.html', {'product':product}) | StarcoderdataPython |
1694546 | <filename>torchcv/engine/__init__.py
from .preprocess import PREPROCESS_ENGINE
| StarcoderdataPython |
1886721 | <reponame>scottwedge/OpenStack-Stein
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
from datetime import datetime
import mock
from oslo_utils import uuidutils
from tacker import context
from tacker.db.nfvo import nfvo_db
from tacker.objects import heal_vnf_request
from tacker.tests.unit.db import base as db_base
from tacker.vnfm import plugin
from tacker.vnfm.policy_actions.vdu_autoheal import vdu_autoheal
vnf_dict = {
'id': uuidutils.generate_uuid(),
'mgmt_ip_address': '{"VDU1": "a.b.c.d"}',
'vim_id': '6261579e-d6f3-49ad-8bc3-a9cb974778ff',
'instance_id': 'a737497c-761c-11e5-89c3-9cb6541d805d',
'attributes': {
'heat_template': {
'resources': {
'VDU1': {
'properties': {
'networks': [{'port': {'get_resource': 'CP1'}}]}
}
}
}
}
}
class FakeDriverManager(mock.Mock):
def invoke(self, *args, **kwargs):
if 'create' in args:
return uuidutils.generate_uuid()
if 'get_resource_info' in args:
return {'resources': {'name': 'dummy_vnf',
'type': 'dummy',
'id': uuidutils.generate_uuid()}}
class FakeVNFMonitor(mock.Mock):
pass
class TestVNFActionVduAutoheal(db_base.SqlTestCase):
def setUp(self):
super(TestVNFActionVduAutoheal, self).setUp()
self.context = context.get_admin_context()
self._mock_device_manager()
self._mock_vnf_monitor()
self._insert_dummy_vim()
self.vnfm_plugin = plugin.VNFMPlugin()
self.vdu_autoheal = vdu_autoheal.VNFActionVduAutoheal()
self.addCleanup(mock.patch.stopall)
def _mock_device_manager(self):
self._device_manager = mock.Mock(wraps=FakeDriverManager())
self._device_manager.__contains__ = mock.Mock(
return_value=True)
fake_device_manager = mock.Mock()
fake_device_manager.return_value = self._device_manager
self._mock(
'tacker.common.driver_manager.DriverManager', fake_device_manager)
def _mock_vnf_monitor(self):
self._vnf_monitor = mock.Mock(wraps=FakeVNFMonitor())
fake_vnf_monitor = mock.Mock()
fake_vnf_monitor.return_value = self._vnf_monitor
self._mock(
'tacker.vnfm.monitor.VNFMonitor', fake_vnf_monitor)
def _insert_dummy_vim(self):
session = self.context.session
vim_db = nfvo_db.Vim(
id='6261579e-d6f3-49ad-8bc3-a9cb974778ff',
tenant_id='ad7ebc56538745a08ef7c5e97f8bd437',
name='fake_vim',
description='fake_vim_description',
type='test_vim',
status='Active',
deleted_at=datetime.min,
placement_attr={'regions': ['RegionOne']})
vim_auth_db = nfvo_db.VimAuth(
vim_id='6261579e-d6f3-49ad-8bc3-a9cb974778ff',
password='<PASSWORD>',
auth_url='http://localhost:5000',
vim_project={'name': 'test_project'},
auth_cred={'username': 'test_user', 'user_domain_id': 'default',
'project_domain_id': 'default'})
session.add(vim_db)
session.add(vim_auth_db)
session.flush()
@mock.patch('tacker.vnfm.plugin.VNFMPlugin.heal_vnf')
@mock.patch('yaml.safe_load')
@mock.patch('tacker.objects.HealVnfRequest')
def test_vdu_autoheal_execute_action(self, mock_heal_vnf_request,
mock_safe_load,
mock_heal_vnf):
# Here yaml.safe_load is mock as in the test case i am passing
# vnf_dict containing having vnf_dict['attributes']['heat_template']
# value in json format so while excution it giving the error as
# dict object has no read attribute where as in actual execution the
# value of vnf_dict['attributes']['heat_template'] is in ymal format.
mock_safe_load.return_value = vnf_dict['attributes']['heat_template']
resource_list = ['VDU1', 'CP1']
additional_params = []
for resource in resource_list:
additional_paramas_obj = heal_vnf_request.HealVnfAdditionalParams(
parameter=resource,
cause=["Unable to reach while monitoring resource: '%s'" %
resource])
additional_params.append(additional_paramas_obj)
heal_request_data_obj = heal_vnf_request.HealVnfRequest(
cause='VNF monitoring fails.',
additional_params=additional_params)
mock_heal_vnf_request.return_value = heal_request_data_obj
self.vdu_autoheal.execute_action(self.vnfm_plugin, self.context,
vnf_dict, args={'vdu_name': 'VDU1'})
mock_heal_vnf.assert_called_once_with(self.context, vnf_dict['id'],
heal_request_data_obj)
@mock.patch('tacker.vnfm.policy_actions.vdu_autoheal.'
'vdu_autoheal.LOG')
def test_vdu_autoheal_action_with_no_vdu_name(self, mock_log):
expected_error_msg = ("VDU resource of vnf '%s' is not present for "
"autoheal." % vnf_dict['id'])
self.vdu_autoheal.execute_action(self.vnfm_plugin, self.context,
vnf_dict, args={})
mock_log.error.assert_called_with(expected_error_msg)
| StarcoderdataPython |
1788723 | """
Filename: financial.py
Author: <NAME>
Current Status: In Development
"""
__author__ = "<NAME>"
__version__ = "1.000"
import openpyxl
import os
import time
import mysql.connector as mysql
from openpyxl.utils import column_index_from_string
from cellStyle import *
import sshtunnel
class ExcelSheet():
def __init__(self):
# change path
# self.path = "D:/Users/Tyler/Documents/Github/financialSystem"
# os.chdir (self.path)
#import excel sheet
self.filename = 'Example Spreadsheet.xlsx'
self.wb = openpyxl.load_workbook(self.filename)
#getting sheets from excel sheet
self.home = self.wb['HOME']
self.paychecks = self.wb['Paychecks']
self.dataTrack = self.wb['databaseTrack']
self.january = self.wb['January']
self.febuary = self.wb['Febuary']
self.march = self.wb['March']
self.april = self.wb['April']
self.may = self.wb['May']
self.june = self.wb['June']
self.july = self.wb['July']
self.august = self.wb['August']
self.september = self.wb['September']
self.october = self.wb['October']
self.november = self.wb['November']
self.december = self.wb['December']
def saveFile(self):
sheet = self.wb
file = self.filename
sheet.save(file)
def cellStyle(self, align, color, cell):
cell.fill = color
cell.border = Style.ALLBORDER
cell.alignment = align
class DatabaseQuery():
#initialzing database connection
def __init__(self, db):
self.db = db
self.cursor = self.db.cursor()
def closeConnection(self):
self.db.close()
def testDBQuery(self): # works
cursor = self.cursor
query = "select * from income"
cursor.execute(query)
result = cursor.fetchall()
for x in result:
print(x)
query = 'call newIncome(350.00, "rent", "Mom", "2021-12-01", "Housing")'
cursor.execute(query)
self.db.commit()
# ----------------------------------------------HOME sheet----------------------------------
def updateSubscription(self):
"""
Downloads Subscription info from database to spreadsheet
Implemented
"""
cursor = self.cursor
wb = ExcelSheet()
home = wb.home
"""
getting all subscriptions from database
"""
query = """select * from subscription"""
cursor.execute(query)
result = cursor.fetchall() #array of 5 (not using first column)
"""
Clearing all old data
"""
for i in range(18, 22):
home.cell(row = i, column= column_index_from_string("A")).value = None
home.cell(row = i, column= column_index_from_string("B")).value = None
home.cell(row = i, column= column_index_from_string("C")).value = None
home.cell(row = i, column= column_index_from_string("D")).value = None
"""
downloading/updating all subscription data to spreadsheet
"""
i = 18 # starting row [FIX FOR FUTURE USE]
for tracked in result:
item = tracked[1]
amount = tracked[2]
start = tracked[3]
status = tracked[4]
date = start.strftime("%m/%d/%Y")
home.cell(row = i, column= column_index_from_string("A")).value = item
home.cell(row = i, column= column_index_from_string("B")).value = date
home.cell(row = i, column= column_index_from_string("C")).value = status
home.cell(row = i, column= column_index_from_string("D")).value = amount
wb.cellStyle(Style.LEFTALIGN, HomeColor.SUBSCRIPTIONFILL,
home.cell(row = i, column= column_index_from_string("A")))
wb.cellStyle(Style.CENTERALIGN, HomeColor.SUBSCRIPTIONFILL,
home.cell(row = i, column= column_index_from_string("B")))
wb.cellStyle(Style.CENTERALIGN, HomeColor.SUBSCRIPTIONFILL,
home.cell(row = i, column= column_index_from_string("C")))
wb.cellStyle(Style.RIGHTALIGN, HomeColor.SUBSCRIPTIONFILL,
home.cell(row = i, column= column_index_from_string("D")))
i += 1
if i > 22:
raise ValueError("\tSIZE ERROR: Number of subscriptions have exceeded the total allowed on %s\n" % wb.filename)
self.closeConnection()
wb.saveFile()
def updateDesiredPurchase(self):
"""
Downloads Desired Purchase info from database to spreadsheet"""
cursor = self.cursor
wb = ExcelSheet()
home = wb.home
"""
getting all desired purchases from database
"""
query = """select * from desiredPur"""
cursor.execute(query)
result = cursor.fetchall() #array of 4 (not using first column)
"""
Clearing all old data
"""
for i in range(28, 32):
home.cell(row = i, column= column_index_from_string("A")).value = None
home.cell(row = i, column= column_index_from_string("B")).value = None
home.cell(row = i, column= column_index_from_string("C")).value = None
"""
downloading/updating all subscription data to spreadsheet
"""
i = 28 # starting row [FIX FOR FUTURE USE]
for tracked in result:
item = tracked[1]
amount = tracked[2]
status = tracked[3]
home.cell(row = i, column= column_index_from_string("A")).value = item
home.cell(row = i, column= column_index_from_string("B")).value = status
home.cell(row = i, column= column_index_from_string("C")).value = amount
wb.cellStyle(Style.LEFTALIGN, HomeColor.DESIREDPURFILL,
home.cell(row = i, column= column_index_from_string("A")))
wb.cellStyle(Style.CENTERALIGN, HomeColor.DESIREDPURFILL,
home.cell(row = i, column= column_index_from_string("B")))
wb.cellStyle(Style.RIGHTALIGN, HomeColor.DESIREDPURFILL,
home.cell(row = i, column= column_index_from_string("C")))
i += 1
if i > 32:
raise ValueError("\tSIZE ERROR: Number of desired purchases have exceeded the total allowed on %s\n" % wb.filename)
self.closeConnection()
wb.saveFile()
def updateForSale(self):
"""
Downloads For Sale info from database to spreadsheet
Not Implemented
"""
cursor = self.cursor
wb = ExcelSheet()
home = wb.home
"""
getting all sales from database
"""
query = """select * from forSale"""
cursor.execute(query)
result = cursor.fetchall() #array of 5 (not using first column)
"""
Clearing all old data
"""
for i in range(38, 40):
home.cell(row = i, column= column_index_from_string("A")).value = None
home.cell(row = i, column= column_index_from_string("B")).value = None
home.cell(row = i, column= column_index_from_string("C")).value = None
"""
downloading/updating all for sale data to spreadsheet
"""
i = 38 # starting row [FIX FOR FUTURE USE]
for tracked in result:
item = tracked[1]
amount = tracked[2]
status = tracked[3]
home.cell(row = i, column= column_index_from_string("A")).value = item
home.cell(row = i, column= column_index_from_string("B")).value = status
home.cell(row = i, column= column_index_from_string("C")).value = amount
wb.cellStyle(Style.LEFTALIGN, HomeColor.FORSALEFILL,
home.cell(row = i, column= column_index_from_string("A")))
wb.cellStyle(Style.CENTERALIGN, HomeColor.FORSALEFILL,
home.cell(row = i, column= column_index_from_string("B")))
wb.cellStyle(Style.RIGHTALIGN, HomeColor.FORSALEFILL,
home.cell(row = i, column= column_index_from_string("C")))
i += 1
if i > 40:
raise ValueError("\tSIZE ERROR: Number of for-sale items have exceeded the total allowed on %s\n" % wb.filename)
self.closeConnection()
wb.saveFile()
def updateNetWorth(self):
"""
Updates Net Worth Table in HOME spreadsheet
Not Implemented
"""
cursor = self.cursor
wb = ExcelSheet()
home = wb.home
"""
Getting most recent net worth data from database
"""
query = "call netWorth"
cursor.execute(query)
result = cursor.fetchall() #array of 3
"""
downloading all account data to spreadsheet
"""
for tracked in result:
amount = tracked[0]
account = tracked[1]
day = tracked[2]
date = day.strftime("%m/%d/%Y")
# if account is bank account (total in bank is autocalculated in excel)
if "Bank Account" in account:
if "Spend" in account:
home.cell(row = 22, column= column_index_from_string("L")).value = amount
elif "Reserve" in account:
home.cell(row = 23, column= column_index_from_string("L")).value = amount
elif "Growth" in account:
home.cell(row = 24, column= column_index_from_string("L")).value = amount
# refresh date
home.cell(row = 5, column= column_index_from_string("C")).value = date
for i in range(22-24):
wb.cellStyle(Style.RIGHTALIGN, HomeColor.BANKFILL,
home.cell(row=i, column= column_index_from_string("L")))
elif "Invest" in account:
home.cell(row = 6, column= column_index_from_string("B")).value = amount
home.cell(row = 6, column= column_index_from_string("C")).value = date
elif "Safe" in account:
home.cell(row = 7, column= column_index_from_string("B")).value = amount
home.cell(row = 7, column= column_index_from_string("C")).value = date
elif "Wallet" in account:
home.cell(row = 8, column= column_index_from_string("B")).value = amount
home.cell(row = 8, column= column_index_from_string("C")).value = date
elif "Gift Card 1" in account:
home.cell(row = 9, column= column_index_from_string("B")).value = amount
home.cell(row = 9, column= column_index_from_string("C")).value = date
elif "Gift Card 2" in account:
home.cell(row = 10, column= column_index_from_string("B")).value = amount
home.cell(row = 10, column= column_index_from_string("C")).value = date
else:
# need to find a way to get the sum of all "other" (within mysql???)
home.cell(row = 11, column= column_index_from_string("B")).value = amount
home.cell(row = 11, column= column_index_from_string("C")).value = date
#applying styles to all Net Worth cells
for i in range(5,12):
wb.cellStyle(Style.RIGHTALIGN, HomeColor.NETWORTHFILL,
home.cell(row=i, column=column_index_from_string("B")))
wb.cellStyle(Style.CENTERALIGN, HomeColor.NETWORTHFILL,
home.cell(row=i, column=column_index_from_string("C")))
self.closeConnection()
wb.saveFile()
# ----------------------------------------------databaseTrack sheet----------------------------------
def downloadAccount(self):
"""
Downloads Account Tracking from database to spreadsheet
Implemented
"""
cursor = self.cursor
wb = ExcelSheet()
dataTrack = wb.dataTrack
"""
getting last account_id
"""
lastId = lastRow = 0
firstId = dataTrack.cell(row = 3, column= column_index_from_string("A")).value
# if first cell is empty
if firstId == None:
lastId = 1000
lastRow = 3
else:
# look for first empty cell
for i in range(2, dataTrack.max_row):
nextID = dataTrack.cell(row = i+1, column= column_index_from_string("A")).value
if nextID == None:
lastId = dataTrack.cell(row = i, column= column_index_from_string("A")).value
lastRow = i+1
break
"""
getting all account data from database
"""
query = """select * from account where acct_id > %d""" % (lastId)
cursor.execute(query)
result = cursor.fetchall() #array of 4
"""
downloading all account data to spreadsheet
"""
for tracked in result:
id = tracked[0]
name = tracked[1]
value = tracked[2]
day = tracked[3]
date = day.strftime("%m/%d/%Y")
dataTrack.cell(row = lastRow, column= column_index_from_string("A")).value = id
dataTrack.cell(row = lastRow, column= column_index_from_string("B")).value = name
dataTrack.cell(row = lastRow, column= column_index_from_string("C")).value = value
dataTrack.cell(row = lastRow, column= column_index_from_string("D")).value = date
wb.cellStyle(Style.LEFTALIGN, DatabaseTrackColor.ACCOUNTFILL,
dataTrack.cell(row=lastRow, column= column_index_from_string("A")))
wb.cellStyle(Style.LEFTALIGN, DatabaseTrackColor.ACCOUNTFILL,
dataTrack.cell(row=lastRow, column= column_index_from_string("B")))
wb.cellStyle(Style.RIGHTALIGN, DatabaseTrackColor.ACCOUNTFILL,
dataTrack.cell(row=lastRow, column= column_index_from_string("C")))
wb.cellStyle(Style.CENTERALIGN, DatabaseTrackColor.ACCOUNTFILL,
dataTrack.cell(row=lastRow, column= column_index_from_string("D")))
lastRow += 1
self.closeConnection()
wb.saveFile()
def downloadProfit(self):
"""
Downloads Profit Trackign from database to spreadsheet
Implemented
note: cells H1 and I1 are filled in and have borders
"""
cursor = self.cursor
wb = ExcelSheet()
dataTrack = wb.dataTrack
"""
getting last profit_id
"""
lastId = lastRow = 0
firstId = firstId = dataTrack.cell(row = 3, column= column_index_from_string("F")).value
# if first cell is empty
if firstId == None:
lastId = 1000
lastRow = 3
else:
for i in range(2, dataTrack.max_row):
nextID = dataTrack.cell(row = i+1, column= column_index_from_string("F")).value
if nextID == None:
lastId = dataTrack.cell(row = i, column= column_index_from_string("F")).value
lastRow = i+1
break
"""
getting all profit data from database
"""
query = """select * from profit where profit_id > %d""" % (lastId)
cursor.execute(query)
result = cursor.fetchall() #array of 4
"""
downloading all account data to spreadsheet
"""
for tracked in result:
id = tracked[0]
value = tracked[1]
day = tracked[2]
time = tracked[3]
# print(id, name, value, day)
date = day.strftime("%m/%d/%Y")
dataTrack.cell(row = lastRow, column= column_index_from_string("F")).value = id
dataTrack.cell(row = lastRow, column= column_index_from_string("G")).value = value
dataTrack.cell(row = lastRow, column= column_index_from_string("H")).value = date
dataTrack.cell(row = lastRow, column= column_index_from_string("I")).value = time
wb.cellStyle(Style.LEFTALIGN, DatabaseTrackColor.PROFITFILL,
dataTrack.cell(row=lastRow, column= column_index_from_string("F")))
wb.cellStyle(Style.RIGHTALIGN, DatabaseTrackColor.PROFITFILL,
dataTrack.cell(row=lastRow, column= column_index_from_string("G")))
wb.cellStyle(Style.CENTERALIGN, DatabaseTrackColor.PROFITFILL,
dataTrack.cell(row=lastRow, column= column_index_from_string("H")))
wb.cellStyle(Style.CENTERALIGN, DatabaseTrackColor.PROFITFILL,
dataTrack.cell(row=lastRow, column= column_index_from_string("I")))
lastRow += 1
self.closeConnection()
wb.saveFile()
def newLogin(self):
"""
records all new logins on database and spreadsheet
Implemented
"""
# adding a new sign in to database
cursor = self.cursor
query = "call newLogin"
cursor.execute(query)
wb = ExcelSheet()
dataTrack = wb.dataTrack
"""
getting last account_login
"""
lastId = lastRow = 0
firstId = dataTrack.cell(row = 3, column= column_index_from_string("K")).value
# if first cell is empty
if firstId == None:
lastId = 1000
lastRow = 3
else:
# look for first empty cell
for i in range(2, dataTrack.max_row):
nextID = dataTrack.cell(row = i+1, column= column_index_from_string("K")).value
if nextID == None:
lastId = dataTrack.cell(row = i, column= column_index_from_string("K")).value
lastRow = i+1
break
"""
getting all login data from database
"""
query = """select * from login_track where login_id > %d""" % (lastId)
cursor.execute(query)
result = cursor.fetchall() #array of 3
"""
downloading all login data to spreadsheet
"""
for tracked in result:
index = tracked[0]
day = tracked[1]
sec = tracked[2]
date = day.strftime("%m/%d/%Y")
dataTrack.cell(row = lastRow, column= column_index_from_string("K")).value = index
dataTrack.cell(row = lastRow, column= column_index_from_string("L")).value = date
dataTrack.cell(row = lastRow, column= column_index_from_string("M")).value = sec
wb.cellStyle(Style.LEFTALIGN, DatabaseTrackColor.LOGINFILL,
dataTrack.cell(row = lastRow, column = column_index_from_string("K")))
wb.cellStyle(Style.CENTERALIGN, DatabaseTrackColor.LOGINFILL,
dataTrack.cell(row = lastRow, column = column_index_from_string("L")))
wb.cellStyle(Style.RIGHTALIGN, DatabaseTrackColor.LOGINFILL,
dataTrack.cell(row = lastRow, column = column_index_from_string("M")))
lastRow += 1
self.closeConnection()
wb.saveFile()
def downloadNetWorth(self):
"""
Downloads Net Worth Tracking from database to spreadsheet
Implemented
"""
cursor = self.cursor
wb = ExcelSheet()
dataTrack = wb.dataTrack
"""
getting last networth_id
"""
lastId = lastRow = 0
firstId = dataTrack.cell(row = 3, column= column_index_from_string("O")).value
# if first cell is empty
if firstId == None:
lastId = 1000
lastRow = 3
else:
# look for first empty cell
for i in range(2, dataTrack.max_row):
nextID = dataTrack.cell(row = i+1, column= column_index_from_string("O")).value
if nextID == None:
lastId = dataTrack.cell(row = i, column= column_index_from_string("O")).value
lastRow = i+1
break
"""
getting all account data from database
"""
query = """select * from netWorth where worth_id > %d""" % (lastId)
cursor.execute(query)
result = cursor.fetchall() #array of 4
"""
downloading all account data to spreadsheet
"""
for tracked in result:
id = tracked[0]
value = tracked[1]
day = tracked[2]
time = tracked[3]
date = day.strftime("%m/%d/%Y")
dataTrack.cell(row = lastRow, column= column_index_from_string("O")).value = id
dataTrack.cell(row = lastRow, column= column_index_from_string("P")).value = value
dataTrack.cell(row = lastRow, column= column_index_from_string("Q")).value = date
dataTrack.cell(row = lastRow, column= column_index_from_string("R")).value = time
wb.cellStyle(Style.LEFTALIGN, DatabaseTrackColor.NETWORTHFILL,
dataTrack.cell(row=lastRow, column= column_index_from_string("O")))
wb.cellStyle(Style.RIGHTALIGN, DatabaseTrackColor.NETWORTHFILL,
dataTrack.cell(row=lastRow, column= column_index_from_string("P")))
wb.cellStyle(Style.CENTERALIGN, DatabaseTrackColor.NETWORTHFILL,
dataTrack.cell(row=lastRow, column= column_index_from_string("Q")))
wb.cellStyle(Style.CENTERALIGN, DatabaseTrackColor.NETWORTHFILL,
dataTrack.cell(row=lastRow, column= column_index_from_string("R")))
lastRow += 1
self.closeConnection()
wb.saveFile()
# ----------------------------------------------Paychecks sheet----------------------------------
def downloadPaycheck(self):
"""
Downloads Paycheck Trackign from database to spreadsheet
Implemented
"""
cursor = self.cursor
wb = ExcelSheet()
paycheck = wb.paychecks
"""
getting last paycheck_id
"""
lastId = lastRow = 0
firstId = firstId = paycheck.cell(row = 6, column= column_index_from_string("A")).value
# if first cell is empty
if firstId == None:
lastId = 1000
lastRow = 6
else:
for i in range(5, paycheck.max_row):
nextID = paycheck.cell(row = i+1, column= column_index_from_string("A")).value
if nextID == None:
lastId = paycheck.cell(row = i, column= column_index_from_string("A")).value
lastRow = i+1
break
"""
getting all profit data from database
"""
query = """select * from paycheck where check_id > %d""" % (lastId)
cursor.execute(query)
result = cursor.fetchall() #array of 12 (2 not counted (income_id))
"""
downloading all account data to spreadsheet
"""
for tracked in result:
id= tracked[0]
company = tracked[2]
hours = tracked[3]
start = tracked[4]
end = tracked[5]
pay = tracked[6]
gross = tracked[7]
federal = tracked[8]
state = tracked[9]
city = tracked[10]
final = tracked[11]
startD = start.strftime("%m/%d/%Y")
endD = end.strftime("%m/%d/%Y")
payD = pay.strftime("%m/%d/%Y")
paycheck.cell(row = lastRow, column= column_index_from_string("A")).value = id
paycheck.cell(row = lastRow, column= column_index_from_string("B")).value = company
paycheck.cell(row = lastRow, column= column_index_from_string("F")).value = hours
paycheck.cell(row = lastRow, column= column_index_from_string("C")).value = startD
paycheck.cell(row = lastRow, column= column_index_from_string("D")).value = endD
paycheck.cell(row = lastRow, column= column_index_from_string("E")).value = payD
paycheck.cell(row = lastRow, column= column_index_from_string("G")).value = gross
paycheck.cell(row = lastRow, column= column_index_from_string("H")).value = federal
paycheck.cell(row = lastRow, column= column_index_from_string("I")).value = state
paycheck.cell(row = lastRow, column= column_index_from_string("J")).value = city
paycheck.cell(row = lastRow, column= column_index_from_string("K")).value = final
wb.cellStyle(Style.LEFTALIGN, PaycheckColor.PAYCHECKFILL,
paycheck.cell(row=lastRow, column= column_index_from_string("A")))
wb.cellStyle(Style.LEFTALIGN, PaycheckColor.PAYCHECKFILL,
paycheck.cell(row=lastRow, column= column_index_from_string("B")))
wb.cellStyle(Style.CENTERALIGN, PaycheckColor.PAYCHECKFILL,
paycheck.cell(row=lastRow, column= column_index_from_string("F")))
wb.cellStyle(Style.CENTERALIGN, PaycheckColor.PAYCHECKFILL,
paycheck.cell(row=lastRow, column= column_index_from_string("C")))
wb.cellStyle(Style.CENTERALIGN, PaycheckColor.PAYCHECKFILL,
paycheck.cell(row=lastRow, column= column_index_from_string("D")))
wb.cellStyle(Style.CENTERALIGN, PaycheckColor.PAYCHECKFILL,
paycheck.cell(row=lastRow, column= column_index_from_string("E")))
wb.cellStyle(Style.RIGHTALIGN, PaycheckColor.PAYCHECKFILL,
paycheck.cell(row=lastRow, column= column_index_from_string("G")))
wb.cellStyle(Style.RIGHTALIGN, PaycheckColor.PAYCHECKFILL,
paycheck.cell(row=lastRow, column= column_index_from_string("H")))
wb.cellStyle(Style.RIGHTALIGN, PaycheckColor.PAYCHECKFILL,
paycheck.cell(row=lastRow, column= column_index_from_string("I")))
wb.cellStyle(Style.RIGHTALIGN, PaycheckColor.PAYCHECKFILL,
paycheck.cell(row=lastRow, column= column_index_from_string("J")))
wb.cellStyle(Style.RIGHTALIGN, PaycheckColor.PAYCHECKFILL,
paycheck.cell(row=lastRow, column= column_index_from_string("K")))
lastRow += 1
self.closeConnection()
wb.saveFile()
# ----------------------------------------------Month sheet----------------------------------
def downloadAllExpenses(self):
"""
downloads all expense data from database to spreadsheet
only uploads to the month sheets, not the HOME sheet
not working
"""
cursor = self.cursor
wb = ExcelSheet()
nextMonth = wb.febuary
"""
getting all expenses from database
"""
query = "select * from expense"
cursor.execute(query)
result = cursor.fetchall() # array of 6 (not using first)
# print(result)
"""
downloading all expense data to spreadsheets
"""
i = 5
for tracked in result:
amount = tracked[1]
item = tracked[2]
party = tracked[3]
day = tracked[4]
type = tracked[5]
date = day.strftime("%m/%d/%Y")
month = self.checkMonth(date, wb)
if month != nextMonth:
i = 5
nextMonth = month
print(month)
month.cell(row = i, column= column_index_from_string("G")).value = item
month.cell(row = i, column= column_index_from_string("H")).value = party
month.cell(row = i, column= column_index_from_string("I")).value = date
month.cell(row = i, column= column_index_from_string("J")).value = amount
month.cell(row = i, column= column_index_from_string("K")).value = type
wb.cellStyle(Style.LEFTALIGN, MonthColor.EXPENSESFILL,
month.cell(row=i, column= column_index_from_string("G")))
wb.cellStyle(Style.CENTERALIGN, MonthColor.EXPENSESFILL,
month.cell(row=i, column= column_index_from_string("H")))
wb.cellStyle(Style.CENTERALIGN, MonthColor.EXPENSESFILL,
month.cell(row=i, column= column_index_from_string("I")))
wb.cellStyle(Style.RIGHTALIGN, MonthColor.EXPENSESFILL,
month.cell(row=i, column= column_index_from_string("J")))
wb.cellStyle(Style.CENTERALIGN, MonthColor.EXPENSESFILL,
month.cell(row=i, column= column_index_from_string("K")))
i += 1
self.closeConnection
wb.saveFile()
def downloadAllIncome(self):
"""
downloads all expense data from database to spreadsheet
only uploads to the month sheets, not the HOME sheet
not working
"""
cursor = self.cursor
wb = ExcelSheet()
nextMonth = wb.febuary
"""
getting all income from database
"""
query = "select * from income"
cursor.execute(query)
result = cursor.fetchall() # array of 6 (not using first)
# print(result)
"""
downloading all expense data to spreadsheets
"""
i = 5
for tracked in result:
amount = tracked[1]
item = tracked[2]
source = tracked[3]
day = tracked[4]
type = tracked[5]
date = day.strftime("%m/%d/%Y")
month = self.checkMonth(date, wb)
if month != nextMonth:
i = 5
nextMonth = month
print(month)
month.cell(row = i, column= column_index_from_string("A")).value = item
month.cell(row = i, column= column_index_from_string("B")).value = source
month.cell(row = i, column= column_index_from_string("C")).value = date
month.cell(row = i, column= column_index_from_string("D")).value = amount
month.cell(row = i, column= column_index_from_string("E")).value = type
wb.cellStyle(Style.LEFTALIGN, MonthColor.INCOMEFILL,
month.cell(row=i, column= column_index_from_string("A")))
wb.cellStyle(Style.CENTERALIGN, MonthColor.INCOMEFILL,
month.cell(row=i, column= column_index_from_string("B")))
wb.cellStyle(Style.CENTERALIGN, MonthColor.INCOMEFILL,
month.cell(row=i, column= column_index_from_string("C")))
wb.cellStyle(Style.RIGHTALIGN, MonthColor.INCOMEFILL,
month.cell(row=i, column= column_index_from_string("D")))
wb.cellStyle(Style.CENTERALIGN, MonthColor.INCOMEFILL,
month.cell(row=i, column= column_index_from_string("E")))
i += 1
self.closeConnection
wb.saveFile()
# ----------------------------------------------Helper methods-------------------------------------
def lastLogin(self):
"""
Returns the date of the last login on system
Implemented
"""
wb = ExcelSheet()
dataTrack = wb.dataTrack
"""
Getting Last Login Date
"""
lastDate = None
firstDate = dataTrack.cell(row = 3, column= column_index_from_string("L")).value
# if first cell is empty
if firstDate == None:
lastDate = firstDate
else:
# look for first empty cell
for i in range(3, dataTrack.max_row):
nextID = dataTrack.cell(row = i+1, column= column_index_from_string("L")).value
if nextID == None:
lastDate = dataTrack.cell(row = i, column= column_index_from_string("L")).value
break
return lastDate.strftime("%m/%d/%Y")
def checkMonth(self, date, wb):
"""
Checking the month of a given date
Used for income and expenses
Implemented
"""
# taking month substring out of date
month = date[0:2:1]
if month == "01":
return wb.january
elif month == "02":
return wb.febuary
elif month == "03":
return wb.march
elif month == "04":
return wb.april
elif month == "05":
return wb.may
elif month == "06":
return wb.june
elif month == "07":
return wb.july
elif month == "08":
return wb.august
elif month == "09":
return wb.september
elif month == "10":
return wb.october
elif month == "11":
return wb.november
elif month == "12":
return wb.december
class TestMethods():
# showing each value
def netWorth():
index = 1
for row in range(5, 12):
print(index, ") ", master.cell(row = row, column = 1).value,
"\t\t\t$", master.cell(row = row, column = 2).value)
index= index +1
def income():
index = 1
for row in range(5, 17):
print(index,") ", master.cell(row = row, column = 9).value,
"\t", master.cell(row = row, column = 10).value,
"\t$", master.cell(row=row, column = 11).value)
index = index +1
def purchase():
index = 1
for row in range(5, 30):
print(index,") ", master.cell(row = row, column = 13).value,
"\t", master.cell(row = row, column = 14).value,
"\t$", master.cell(row=row, column = 15).value)
index = index +1
# calculating totals
def totalWorth():
worth = 0.00
for row in range(5, 12):
if (master.cell(row=row, column = 2).value != None):
worth = worth + (master.cell(row = row, column = 2).value)
print("$", "{:.2f}".format(worth))
def totalRecieve():
worth = 0.00
for row in range(5, 17):
if (master.cell(row=row, column = 11).value != None):
worth = worth + (master.cell(row = row, column = 11).value)
print("$", "{:.2f}".format(worth))
def totalBought():
worth = 0.00
for row in range(5, 30):
if (master.cell(row=row, column = 15).value != None):
worth = worth +master.cell(row = row, column = 15).value
print("$", "{:.2f}".format(worth))
# creates a new sheet for the month
def newMonthSheet(month):
print(month)
wb.create_sheet('test')
wb.save
print("success")
# searches for the next month that needs a sheet
def newMonth():
if ('January' not in wb.sheetnames):
newMonthSheet('January')
elif ('Febuary' not in wb.sheetnames):
newMonthSheet('Febuary')
elif ('March' not in wb.sheetnames):
newMonthSheet('March')
elif ('April' not in wb.sheetnames):
newMonthSheet('April')
elif ('May' not in wb.sheetnames):
newMonthSheet('May')
elif ('June' not in wb.sheetnames):
newMonthSheet('June')
elif ('July' not in wb.sheetnames):
newMonthSheet('July')
elif ('August' not in wb.sheetnames):
newMonthSheet('August')
elif ('September' not in wb.sheetnames):
newMonthSheet('September')
elif ('October' not in wb.sheetnames):
newMonthSheet('October')
elif ('November' not in wb.sheetnames):
newMonthSheet('November')
elif ('December' not in wb.sheetnames):
newMonthSheet('December')
def updatePNC():
#updating last updated for PNC
string = "PNC Bank acct (as of "
string2 = time.strftime("%d/%m/%Y")
string = string + string2 + ")"
home['A5'] = string
print(string)
def updateMorgan():
string = "<NAME> (as of "
string2 = time.strftime("%d/%m/%Y")
string = string + string2 + ")"
master['A6'] = string
print(string)
def newIncome(income):
column = 11 #K column
for i in range (5,17):
if master.cell(i, column).value == None:
master.cell(i, column).value == income
wb.save(filename)
break
def newPurchase(purchase):
column = 15 #O column
for i in range (5,30):
if master.cell(i, column).value == None:
master.cell(i, column).value == purchase
wb.save(filename)
break
# with sshtunnel.SSHTunnelForwarder(('172.16.58.3'),
# ssh_username = 'pi', ssh_password = '<PASSWORD>!', remote_bind_address=()
# ) as tunnel:
# connection = mysql.connect(
# user = 'project',
# passwd = '<PASSWORD>',
# host = '192.168.1.38', port = tunnel.local_bind_port,
# db = 'financial',
# )
# print(sshtunnel.check_address(("172.16.58.3", 22)))
# local connection (WORKS!!!)
print("Begin")
db = mysql.connect(
host = "192.168.1.38",
database = "financial",
user = "project",
password = "<PASSWORD>"
)
test = DatabaseQuery(db)
# # test.testDBQuery()
# # test.newLogin()
# test.downloadAccount()
# test.downloadProfit()
# test.downloadNetWorth()
test.downloadPaycheck()
# test.updateSubscription()
# test.updateDesiredPurchase()
# test.updateForSale()
# # print(test.lastLogin())
# test.updateNetWorth()
# # print(test.checkMonth(test.lastLogin()))
# # test.downloadAllExpenses()
# # test.downloadAllIncome()
test.closeConnection()
print("End")
exit()
| StarcoderdataPython |
6583601 | # by amounra 0216 : http://www.aumhaa.com
# written against Live 9.6 release on 021516
from ableton.v2.control_surface.elements.color import Color
from aumhaa.v2.livid.colors import *
"""
Base_Map.py
Created by amounra on 2014-7-26.
This file allows the reassignment of the controls from their default arrangement. The order is from left to right;
Buttons are Note #'s and Faders/Rotaries are Controller #'s
"""
USER_OFFSET = 10
SHIFT_LATCHING = False
CAP_BUTTON_TRANSLATIONS = False #include the top 8 capacitive touch buttons in UserMode translations.
CHANNEL = 0 #main channel (0 - 15)
AFTERTOUCH = True #when True, sends AT in instrument modes and UserMode. When false, turns CC's off for instrument modes and transmits CC's in UserModes.
BASE_PADS = [60, 61, 62, 63, 64, 65, 66, 67, 52, 53, 54, 55, 56, 57, 58, 59, 44, 45, 46, 47, 48, 49, 50, 51, 36, 37, 38, 39, 40, 41, 42, 43] #there are 16 of these
BASE_TOUCHSTRIPS = [1, 2, 3, 4, 5, 6, 7, 8, 9] #there are 9 of these
BASE_TOUCHPADS = [10, 11, 12, 13, 14, 15, 16, 17]
BASE_BUTTONS = [18, 19, 20, 21, 22, 23, 24, 25] #there are 16 of these
BASE_RUNNERS = [68, 69, 70, 71, 72, 73, 74, 75]
BASE_LCDS = [34, 35]
COLOR_MAP = [2, 64, 4, 8, 16, 127, 32]
"""You can change the orientation of the Up, Down, Left, and Right buttons (where applicable) by changing the array. The values correspond to the buttons from top to bottom."""
UDLR = [0, 1, 2, 3]
"""The values in this array determine the choices for what length of clip is created when "Fixed Length" is turned on:
0 = 1 Beat
1 = 2 Beat
2 = 1 Bar
3 = 2 Bars
4 = 4 Bars
5 = 8 Bars
6 = 16 Bars
7 = 32 Bars
"""
LENGTH_VALUES = [2, 3, 4]
CHANNELS = ['Ch. 2', 'Ch. 3', 'Ch. 4', 'Ch. 5', 'Ch. 6', 'Ch. 7', 'Ch. 8', 'Ch. 9', 'Ch. 10', 'Ch. 11', 'Ch. 12', 'Ch. 13', 'Ch. 14']
"""These are the scales we have available. You can freely add your own scales to this """
SCALES = {'Mod':[0,1,2,3,4,5,6,7,8,9,10,11],
'Session':[0,1,2,3,4,5,6,7,8,9,10,11],
'Keys':[0,2,4,5,7,9,11,12,1,3,3,6,8,10,10,13],
'Auto':[0,1,2,3,4,5,6,7,8,9,10,11],
'Chromatic':[0,1,2,3,4,5,6,7,8,9,10,11],
'DrumPad':[0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15],
'Major':[0,2,4,5,7,9,11],
'Minor':[0,2,3,5,7,8,10],
'Dorian':[0,2,3,5,7,9,10],
'Mixolydian':[0,2,4,5,7,9,10],
'Lydian':[0,2,4,6,7,9,11],
'Phrygian':[0,1,3,5,7,8,10],
'Locrian':[0,1,3,4,7,8,10],
'Diminished':[0,1,3,4,6,7,9,10],
'Whole-half':[0,2,3,5,6,8,9,11],
'Whole_Tone':[0,2,4,6,8,10],
'Minor_Blues':[0,3,5,6,7,10],
'Minor_Pentatonic':[0,3,5,7,10],
'Major_Pentatonic':[0,2,4,7,9],
'Harmonic_Minor':[0,2,3,5,7,8,11],
'Melodic_Minor':[0,2,3,5,7,9,11],
'Dominant_Sus':[0,2,5,7,9,10],
'Super_Locrian':[0,1,3,4,6,8,10],
'Neopolitan_Minor':[0,1,3,5,7,8,11],
'Neopolitan_Major':[0,1,3,5,7,9,11],
'Enigmatic_Minor':[0,1,3,6,7,10,11],
'Enigmatic':[0,1,4,6,8,10,11],
'Composite':[0,1,4,6,7,8,11],
'Bebop_Locrian':[0,2,3,5,6,8,10,11],
'Bebop_Dominant':[0,2,4,5,7,9,10,11],
'Bebop_Major':[0,2,4,5,7,8,9,11],
'Bhairav':[0,1,4,5,7,8,11],
'Hungarian_Minor':[0,2,3,6,7,8,11],
'Minor_Gypsy':[0,1,4,5,7,8,10],
'Persian':[0,1,4,5,6,8,11],
'Hirojoshi':[0,2,3,7,8],
'In-Sen':[0,1,5,7,10],
'Iwato':[0,1,5,6,10],
'Kumoi':[0,2,3,7,9],
'Pelog':[0,1,3,4,7,8],
'Spanish':[0,1,3,4,5,6,8,10]
}
SCALEABBREVS = {'Auto':'-A','Keys':'-K','Chromatic':'12','DrumPad':'-D','Major':'M-','Minor':'m-','Dorian':'II','Mixolydian':'V',
'Lydian':'IV','Phrygian':'IH','Locrian':'VH','Diminished':'d-','Whole-half':'Wh','Whole_Tone':'WT','Minor_Blues':'mB',
'Minor_Pentatonic':'mP','Major_Pentatonic':'MP','Harmonic_Minor':'mH','Melodic_Minor':'mM','Dominant_Sus':'D+','Super_Locrian':'SL',
'Neopolitan_Minor':'mN','Neopolitan_Major':'MN','Enigmatic_Minor':'mE','Enigmatic':'ME','Composite':'Cp','Bebop_Locrian':'lB',
'Bebop_Dominant':'DB','Bebop_Major':'MB','Bhairav':'Bv','Hungarian_Minor':'mH','Minor_Gypsy':'mG','Persian':'Pr',
'Hirojoshi':'Hr','In-Sen':'IS','Iwato':'Iw','Kumoi':'Km','Pelog':'Pg','Spanish':'Sp'}
"""This is the default scale used by Auto when something other than a drumrack is detected for the selected track"""
DEFAULT_AUTO_SCALE = 'Major'
"""This is the default Vertical Offset for any scale other than DrumPad """
DEFAULT_VERTOFFSET = 4
"""This is the default NoteOffset, aka RootNote, used for scales other than DrumPad"""
DEFAULT_OFFSET = 48
"""This is the default NoteOffset, aka RootNote, used for the DrumPad scale; it is a multiple of 4, so an offset of 4 is actually a RootNote of 16"""
DEFAULT_DRUMOFFSET = 9
"""This is the default Scale used for all MIDI Channels"""
DEFAULT_SCALE = 'Auto'
"""This is the default SplitMode used for all MIDI Channels"""
DEFAULT_MODE = 'seq'
SCALENAMES = [scale for scale in sorted(SCALES.iterkeys())]
"""It is possible to create a custom list of scales to be used by the script. For instance, the list below would include major, minor, auto, drumpad, and chromatic scales, in that order."""
#SCALENAMES = ['Major', 'Minor', 'Auto', 'DrumPad', 'Chromatic']
DEFAULT_INSTRUMENT_SETTINGS = {'Scales':SCALES,
'ScaleAbbrevs':SCALEABBREVS,
'ScaleNames':SCALENAMES,
'DefaultAutoScale':DEFAULT_AUTO_SCALE,
'DefaultVertOffset':DEFAULT_VERTOFFSET,
'DefaultOffset':DEFAULT_OFFSET,
'DefaultDrumOffset':DEFAULT_DRUMOFFSET,
'DefaultScale':DEFAULT_SCALE,
'DefaultMode':DEFAULT_MODE,
'Channels':CHANNELS}
class BaseColors:
class DefaultButton:
On = LividRGB.WHITE
Off = LividRGB.OFF
Disabled = LividRGB.OFF
Alert = LividRGB.BlinkFast.WHITE
class MainModes:
Clips = LividRGB.WHITE
Clips_shifted = LividRGB.BlinkFast.WHITE
Sends = LividRGB.MAGENTA
Sends_shifted = LividRGB.BlinkFast.MAGENTA
Device = LividRGB.CYAN
Device_shifted = LividRGB.BlinkFast.CYAN
User = LividRGB.RED
User_shifted = LividRGB.BlinkFast.RED
class Session:
StopClipTriggered = LividRGB.BlinkFast.BLUE
StopClip = LividRGB.BLUE
Scene = LividRGB.CYAN
NoScene = LividRGB.OFF
SceneTriggered = LividRGB.BlinkFast.BLUE
ClipTriggeredPlay = LividRGB.BlinkFast.GREEN
ClipTriggeredRecord = LividRGB.BlinkFast.RED
RecordButton = LividRGB.OFF
ClipEmpty = LividRGB.OFF
ClipStopped = LividRGB.WHITE
ClipStarted = LividRGB.GREEN
ClipRecording = LividRGB.RED
NavigationButtonOn = LividRGB.BLUE
PageNavigationButtonOn = LividRGB.CYAN
Empty = LividRGB.OFF
class NoteEditor:
class Step:
Low = LividRGB.CYAN
High = LividRGB.WHITE
Full = LividRGB.YELLOW
Muted = LividRGB.YELLOW
StepEmpty = LividRGB.OFF
class StepEditing:
High = LividRGB.GREEN
Low = LividRGB.CYAN
Full = LividRGB.YELLOW
Muted = LividRGB.WHITE
StepEmpty = LividRGB.OFF
StepEmptyBase = LividRGB.OFF
StepEmptyScale = LividRGB.OFF
StepDisabled = LividRGB.OFF
Playhead = Color(31)
PlayheadRecord = Color(31)
StepSelected = LividRGB.GREEN
QuantizationSelected = LividRGB.RED
QuantizationUnselected = LividRGB.MAGENTA
class LoopSelector:
Playhead = LividRGB.YELLOW
OutsideLoop = LividRGB.BLUE
InsideLoopStartBar = LividRGB.CYAN
SelectedPage = LividRGB.WHITE
InsideLoop = LividRGB.CYAN
PlayheadRecord = LividRGB.RED
class DrumGroup:
PadAction = LividRGB.WHITE
PadFilled = LividRGB.GREEN
PadFilledAlt = LividRGB.MAGENTA
PadSelected = LividRGB.WHITE
PadSelectedNotSoloed = LividRGB.WHITE
PadEmpty = LividRGB.OFF
PadMuted = LividRGB.YELLOW
PadSoloed = LividRGB.CYAN
PadMutedSelected = LividRGB.BLUE
PadSoloedSelected = LividRGB.BLUE
PadInvisible = LividRGB.OFF
PadAction = LividRGB.RED
class Mixer:
SoloOn = LividRGB.CYAN
SoloOff = LividRGB.OFF
MuteOn = LividRGB.YELLOW
MuteOff = LividRGB.OFF
ArmSelected = LividRGB.GREEN
ArmUnselected = LividRGB.RED
ArmOff = LividRGB.OFF
StopClip = LividRGB.BLUE
SelectedOn = LividRGB.BLUE
SelectedOff = LividRGB.OFF
class Recording:
On = LividRGB.BlinkFast.GREEN
Off = LividRGB.GREEN
Transition = LividRGB.BlinkSlow.GREEN
class Recorder:
On = LividRGB.WHITE
Off = LividRGB.BLUE
NewOn = LividRGB.BlinkFast.YELLOW
NewOff = LividRGB.YELLOW
FixedOn = LividRGB.BlinkFast.CYAN
FixedOff = LividRGB.CYAN
RecordOn = LividRGB.BlinkFast.GREEN
RecordOff = LividRGB.GREEN
FixedAssigned = LividRGB.MAGENTA
FixedNotAssigned = LividRGB.OFF
OverdubOn = LividRGB.BlinkFast.RED
OverdubOff = LividRGB.RED
class Transport:
OverdubOn = LividRGB.BlinkFast.RED
OverdubOff = LividRGB.RED
StopOn = LividRGB.BLUE
StopOff = LividRGB.BLUE
class Sequencer:
OctaveOn = LividRGB.BlinkFast.CYAN
OctaveOff = LividRGB.OFF
On = LividRGB.WHITE
Off = LividRGB.OFF
class Device:
NavOn = LividRGB.MAGENTA
NavOff = LividRGB.OFF
BankOn = LividRGB.YELLOW
BankOff = LividRGB.OFF
ChainNavOn = LividRGB.RED
ChainNavOff = LividRGB.OFF
ContainNavOn = LividRGB.CYAN
ContainNavOff = LividRGB.OFF
class DeviceNavigator:
DevNavOff = LividRGB.OFF
DevNavOn = LividRGB.MAGENTA
ChainNavOn = LividRGB.RED
ChainNavOff = LividRGB.OFF
LevelNavOn = LividRGB.CYAN
LevelNavOff = LividRGB.OFF
class Mod:
class Nav:
OnValue = LividRGB.RED
OffValue = LividRGB.WHITE
class MonoInstrument:
PressFlash = LividRGB.WHITE
OffsetOnValue = LividRGB.GREEN
OffsetOffValue = LividRGB.OFF
ScaleOffsetOnValue = LividRGB.RED
ScaleOffsetOffValue = LividRGB.OFF
SplitModeOnValue = LividRGB.WHITE
SplitModeOffValue = LividRGB.OFF
SequencerModeOnValue = LividRGB.CYAN
SequencerModeOffValue = LividRGB.OFF
DrumOffsetOnValue = LividRGB.MAGENTA
DrumOffsetOffValue = LividRGB.OFF
VerticalOffsetOnValue = LividRGB.BLUE
VerticalOffsetOffValue = LividRGB.OFF
class Keys:
SelectedNote = LividRGB.GREEN
RootWhiteValue = LividRGB.RED
RootBlackValue = LividRGB.MAGENTA
WhiteValue = LividRGB.CYAN
BlackValue = LividRGB.BLUE
class Drums:
SelectedNote = LividRGB.BLUE
EvenValue = LividRGB.GREEN
OddValue = LividRGB.MAGENTA
class Translation:
SelectorOn = LividRGB.WHITE
SelectorOff = LividRGB.OFF
class Channel_10:
Pad_0 = LividRGB.OFF
Pad_1 = LividRGB.OFF
Pad_2 = LividRGB.OFF
Pad_3 = LividRGB.OFF
Pad_4 = LividRGB.OFF
Pad_5 = LividRGB.OFF
Pad_6 = LividRGB.OFF
Pad_7 = LividRGB.OFF
Pad_8 = LividRGB.OFF
Pad_9 = LividRGB.OFF
Pad_10 = LividRGB.OFF
Pad_11 = LividRGB.OFF
Pad_12 = LividRGB.OFF
Pad_13 = LividRGB.OFF
Pad_14 = LividRGB.OFF
Pad_15 = LividRGB.OFF
Pad_16 = LividRGB.OFF
Pad_17 = LividRGB.OFF
Pad_18 = LividRGB.OFF
Pad_19 = LividRGB.OFF
Pad_20 = LividRGB.OFF
Pad_21 = LividRGB.OFF
Pad_22 = LividRGB.OFF
Pad_23 = LividRGB.OFF
Pad_24 = LividRGB.OFF
Pad_25 = LividRGB.OFF
Pad_26 = LividRGB.OFF
Pad_27 = LividRGB.OFF
Pad_28 = LividRGB.OFF
Pad_29 = LividRGB.OFF
Pad_30 = LividRGB.OFF
Pad_31 = LividRGB.OFF
class Channel_11:
Pad_0 = LividRGB.OFF
Pad_1 = LividRGB.OFF
Pad_2 = LividRGB.OFF
Pad_3 = LividRGB.OFF
Pad_4 = LividRGB.OFF
Pad_5 = LividRGB.OFF
Pad_6 = LividRGB.OFF
Pad_7 = LividRGB.OFF
Pad_8 = LividRGB.OFF
Pad_9 = LividRGB.OFF
Pad_10 = LividRGB.OFF
Pad_11 = LividRGB.OFF
Pad_12 = LividRGB.OFF
Pad_13 = LividRGB.OFF
Pad_14 = LividRGB.OFF
Pad_15 = LividRGB.OFF
Pad_16 = LividRGB.OFF
Pad_17 = LividRGB.OFF
Pad_18 = LividRGB.OFF
Pad_19 = LividRGB.OFF
Pad_20 = LividRGB.OFF
Pad_21 = LividRGB.OFF
Pad_22 = LividRGB.OFF
Pad_23 = LividRGB.OFF
Pad_24 = LividRGB.OFF
Pad_25 = LividRGB.OFF
Pad_26 = LividRGB.OFF
Pad_27 = LividRGB.OFF
Pad_28 = LividRGB.OFF
Pad_29 = LividRGB.OFF
Pad_30 = LividRGB.OFF
Pad_31 = LividRGB.OFF
class Channel_12:
Pad_0 = LividRGB.OFF
Pad_1 = LividRGB.OFF
Pad_2 = LividRGB.OFF
Pad_3 = LividRGB.OFF
Pad_4 = LividRGB.OFF
Pad_5 = LividRGB.OFF
Pad_6 = LividRGB.OFF
Pad_7 = LividRGB.OFF
Pad_8 = LividRGB.OFF
Pad_9 = LividRGB.OFF
Pad_10 = LividRGB.OFF
Pad_11 = LividRGB.OFF
Pad_12 = LividRGB.OFF
Pad_13 = LividRGB.OFF
Pad_14 = LividRGB.OFF
Pad_15 = LividRGB.OFF
Pad_16 = LividRGB.OFF
Pad_17 = LividRGB.OFF
Pad_18 = LividRGB.OFF
Pad_19 = LividRGB.OFF
Pad_20 = LividRGB.OFF
Pad_21 = LividRGB.OFF
Pad_22 = LividRGB.OFF
Pad_23 = LividRGB.OFF
Pad_24 = LividRGB.OFF
Pad_25 = LividRGB.OFF
Pad_26 = LividRGB.OFF
Pad_27 = LividRGB.OFF
Pad_28 = LividRGB.OFF
Pad_29 = LividRGB.OFF
Pad_30 = LividRGB.OFF
Pad_31 = LividRGB.OFF
class Channel_13:
Pad_0 = LividRGB.OFF
Pad_1 = LividRGB.OFF
Pad_2 = LividRGB.OFF
Pad_3 = LividRGB.OFF
Pad_4 = LividRGB.OFF
Pad_5 = LividRGB.OFF
Pad_6 = LividRGB.OFF
Pad_7 = LividRGB.OFF
Pad_8 = LividRGB.OFF
Pad_9 = LividRGB.OFF
Pad_10 = LividRGB.OFF
Pad_11 = LividRGB.OFF
Pad_12 = LividRGB.OFF
Pad_13 = LividRGB.OFF
Pad_14 = LividRGB.OFF
Pad_15 = LividRGB.OFF
Pad_16 = LividRGB.OFF
Pad_17 = LividRGB.OFF
Pad_18 = LividRGB.OFF
Pad_19 = LividRGB.OFF
Pad_20 = LividRGB.OFF
Pad_21 = LividRGB.OFF
Pad_22 = LividRGB.OFF
Pad_23 = LividRGB.OFF
Pad_24 = LividRGB.OFF
Pad_25 = LividRGB.OFF
Pad_26 = LividRGB.OFF
Pad_27 = LividRGB.OFF
Pad_28 = LividRGB.OFF
Pad_29 = LividRGB.OFF
Pad_30 = LividRGB.OFF
Pad_31 = LividRGB.OFF
| StarcoderdataPython |
9794567 | from mkapi.core.base import Base, Inline
from mkapi.core.inherit import is_complete
from mkapi.core.node import Node
def test_is_complete():
assert is_complete(Node(Base))
assert not is_complete(Node(Inline))
| StarcoderdataPython |
8070390 | """
Напишете функция `date_is_valid`,
която приема дата под формата на: година, месец, ден, час, минута, секунда;
и връща `True` ако датата е влидна и `False` в противен случай.
```python
>>> data_is_valid(2015, 1, 2, 23, 20, 10)
True
>>> data_is_valid(2015, 2, 29, 12, 10, 10)
False
>>> data_is_valid(2012, 2, 29, 12, 10, 10)
True
>>> data_is_valid(2015, 11, 31, 13, 30, 50)
False
```
"""
def data_is_valid(year, month, day, hour, minute, second):
pass
| StarcoderdataPython |
1888798 | <filename>EVBE/EVBFile.py
#-*- coding: utf-8 -*-
"""
Copyright (C) 2013 <NAME>
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
import os
try:
from pefile import PE
except ImportError:
raise ImportError('You need install pefile library http://code.google.com/p/pefile')
__author__ = '<NAME>'
__contact__ = '<EMAIL>'
class EVBFile(object):
def __init__(self, name):
self.name = name
self.data = None
self.offset = 0x48
# TODO: Add generic search for container
def read(self):
try:
pe = PE(self.name, fast_load=True)
except:
print('File %s invalid' % self.name)
return False
if not pe.is_exe():
print('This file is not exe')
pe.close()
return False
section = None
for s in pe.sections:
if s.Name == '.enigma1':
section = s
break
if section is None:
print('This file is not Enigma Virtual Box container')
pe.close()
return False
self.data = pe.get_data(section.VirtualAddress, section.SizeOfRawData)
pe.close()
return True | StarcoderdataPython |
1776484 | # attempting to implement the parametric equations of a cone
import numpy as np
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
# from http://mathworld.wolfram.com/Cone.html
h = 1
r = 1 # r is a constant
u = np.linspace(0,h)
tht = np.linspace(0,2*np.pi)
x = ((h-u)/(h))*r*np.cos(tht)
y = ((h-u)/(h))*r*np.sin(tht)
z = u
x,y = np.meshgrid(x,y)
fig = plt.figure()
ax = plt.subplot(111,projection='3d')
ax.plot_surface(x,y,z)
ax.legend()
plt.show()
| StarcoderdataPython |
1608571 | """this file router for recipes page."""
from app import app
from db.db_queries.get_recipe_for_page_query import get_recipes_for_page
from flask import render_template
@app.route('/recipes')
def recipes():
"""Router for recipes page."""
recipes = get_recipes_for_page()
return render_template('recipes.html',
recipes=recipes)
| StarcoderdataPython |
5034037 | <reponame>nanodust/BregmanToolkit<gh_stars>10-100
"""
wtcmatrix - convert list of scores into matrix form
Requires:
Music21 version 1.4.0+ - web.mit.edu/music21/
BregmanToolkit - https://github.com/bregmanstudio/BregmanToolkit
2015, <NAME>, Dartmouth College, Bregman Media Labs
License:
Creative Commons Attribution-NonCommercial 4.0 International (CC BY-NC 4.0)
http://creativecommons.org/licenses/by-nc/4.0/
"""
from pylab import array, roll, ones, zeros, dot, std, mean, sort, sqrt, where, imshow, figure, cm, arange, xticks, yticks, grid,mod, kron, r_, c_, plot
import music21 as m21
import glob, pdb, sys, os
import bregman
from scipy.signal import resample
def report(msg):
print(msg)
sys.stdout.flush()
def load_wtc(idx=None, corpus=m21.__path__[0]+'/corpus/bach/bwv8[4-9][0-9]'):
"""
Load items from a corpus, use given idx slice argument to select subsets
"""
wtc = glob.glob(corpus)
wtc.sort()
idx = slice(0,len(wtc)) if idx is None else idx
WTC = []
for w in wtc[idx]:
for v in sort(glob.glob(w+'/*')):
WTC.append(m21.converter.parse(v))
return WTC
def get_notes_and_durations_by_measure(work_list):
"""
Make a list of (midi, quarterLength) tuples per measure from list of works
"""
notes = [[[(nn.midi,n.quarterLength) for n in w.measure(k).flat.notes for nn in n.pitches]
for k in xrange(1,len(w.measureOffsetMap())+1)] for w in work_list]
return notes
def get_notes_and_durations_by_quarters(work_list, num_qtrs=2):
"""
Make a list of (midi, quarterLength) tuples per quaterLength from list of works
"""
notes = [[[(nn.midi,n.quarterLength) for n in w.flat.notes.getElementsByOffset(i,i+num_qtrs,includeEndBoundary=False) for nn in n.pitches]
for i in xrange(0,int(max([o['offset'] for o in w.flat.notes.offsetMap]))+num_qtrs,num_qtrs)] for w in work_list]
return notes
def extract_notes_positions_and_durations(work_list):
"""
Return note positions and durations
"""
notes = [[(nn.midi,n.offset,n.quarterLength) for n in w.flat.notes for nn in n.pitches] for w in work_list]
na = array(notes[0])
notes = na[where(na[:,2])]
#pdb.set_trace()
return [notes]
#edit to include manual length and smallest duration
# start_t is start time in quarter notes
# duration is duration in quarter notes
def convert_notes_to_matrix(notes_list, start_t=0, duration=128): # start_t and duration offset in quarters
"""
Given a list of (midi,quarterLength) tuples, collate all notes per tactus tick (smallest duration) and
make piano-roll matrix
"""
mtx_list = []
for nl in notes_list: #where does nl come from?
smallest_dur = _calc_smallest_dur(nl) #manually calculate if none given
start_times = array(nl)[:,1] #
time_idx = (start_times >= start_t) & (start_times < start_t + duration)
nl = array(nl).copy()[time_idx]
t0 = nl[0,1]
N = nl[-1,1] - t0
d = nl[-1,2]
Nc = (N+d) / smallest_dur
mtx = zeros((128,Nc))
for n in nl:
mtx[n[0],(n[1]-t0)/smallest_dur:(n[1]-t0+n[2])/smallest_dur]=1
mtx_list.append(mtx)
return mtx_list
#calculate smallest interval
def _calc_smallest_dur(nl):
tick = array(nl)[:,2].min()
return tick
def plot_mtx(m, beats=4,tick=0.25, **kwargs):
"""
Plot piano-roll matrix
"""
figure()
kwargs.setdefault('cmap',cm.ocean_r)
imshow(m,aspect='auto',origin='bottom',**kwargs)
nr,nc = m.shape
xt = arange(0,nc,beats/tick)
xticks(xt,arange(1,len(xt)+1))
grid(axis='x',linestyle='--')
pc=['C','C#','D','Eb','E','F','F#','G','G#','A','Bb','B']
yt = arange(0,nr+1)
yticks(yt,array(pc)[mod(yt,12)],fontsize=6)
#grid(axis='y')
staff_lines = array([4,7,11,14,17])
staff_lines = array([staff_lines+12,staff_lines+36,staff_lines+60,staff_lines+84,staff_lines+108]).flatten()
plot(c_[zeros(len(staff_lines)),nc*ones(len(staff_lines))].T,c_[staff_lines,staff_lines].T,'k')
def play_mtx(w, sr=32000, foffset=24, nhop=4410):
"""
Invert a piano-roll matrix to audio
Return estimated signal
"""
# inverse constant-Q transform
F = bregman.features.LogFrequencySpectrum(bregman.testsignal.sinusoid(f0=441,num_points=44100),nbpo=12, nhop=4410)
F.X = w[foffset:F.X.shape[0]+foffset,:]
x_hat = F.inverse(pvoc=True)
#bregman.sound.play(x_hat/x_hat.max(),sr)
return x_hat
def convert_notes_to_signal(notes_list):
"""
Generate an audible signal from a list of notes
"""
sig = []
tick=16
for nn in notes_list:
aa = array(nn)
sig.append([])
sig[-1] = zeros(aa[-1,1]*tick)
sig[-1][array(aa[:,1]*16-1,'i4')]=2**(aa[:,0]/12.0)
sig[-1] = sig[-1] - sig[-1][where(sig[-1])].mean()
sig[-1] = resample(sig[-1], len(sig[-1])*8)
return sig
| StarcoderdataPython |
1715985 | class TestData:
CHROME_EXECUTABLE_PATH = "/Users/User/Desktop/selenium/selinium/python chromedriver/chromedriver"
FIREFOX_EXECUTABLE_PATH = "/Users/User/Desktop/selenium/selinium/python chromedriver/geckodriver"
BASE_URL = "https://app.hubspot.com/login"
"""https://app.hubspot.com/login"""
USER_NAME = "<EMAIL>"
PASSWORD = "<PASSWORD>"
LOGIN_PAGE_TITLE = "HubSpot Login" | StarcoderdataPython |
8102102 | import runonce
import time
import os
import random
lockname = "seattletestlock"
runonce.getprocesslock(str(os.getpid()))
print "my process id is:"+str(os.getpid())
retval = runonce.getprocesslock(lockname)
if retval == True:
print "I have the mutex"
elif retval == False:
print "Another process has the mutex (owned by another user most likely)"
else:
print "Process "+str(retval)+" has the mutex!"
while True:
for num in range(random.randint(0,5)):
time.sleep(2)
if runonce.stillhaveprocesslock(lockname):
print "I have the mutex"
else:
print "I do not have the mutex"
if runonce.stillhaveprocesslock(str(os.getpid())):
print "I have my mutex"
else:
print "I do not have my mutex"
time.sleep(2)
print "releasing mutex (if possible)"
runonce.releaseprocesslock(lockname)
| StarcoderdataPython |
4973635 | <reponame>jayhardikar/oci-data-science-ai-samples
import oci
import argparse
import time
import configparser
import os
import sys
from datetime import datetime, timedelta
# --- Set up
config_file = "~/.oci/config"
CONFIG_FILE = ""
ENV_TYPE = ""
class MLJobs:
def __init__(self, env_type, config_file, compartment_id, subnet_id):
self.config_file = config_file
self.compartment_id = compartment_id
self.subnet_id = subnet_id
try:
print("*** Setting up data science client....")
self.oci_config = oci.config.from_file(self.config_file, env_type)
self.identity = oci.identity.IdentityClient(config=self.oci_config)
self.dsc = oci.data_science.DataScienceClient(config=self.oci_config)
except Exception as e:
print(e)
raise e
def create_job(self, compartment_id, project_id, job_name="Job", subnet_id=None):
print("-------------------------------------")
print("*** Creating Job ...")
if subnet_id == None:
subnet_id = self.subnet_id
job_payload = {
"projectId": project_id,
"compartmentId": compartment_id,
"displayName": job_name,
"jobConfigurationDetails": {
"jobType": "DEFAULT",
"environmentVariables": {
# SET env. variables
# "CONDA_ENV_TYPE": "service",
# "CONDA_ENV_SLUG": "classic_cpu"
},
},
# SETS the logging
# "jobLogConfigurationDetails": {
# "enableLogging": True,
# "enableAutoLogCreation": False,
# "logGroupId": "<log_group_id>",
# "logId": "<log_id>"
# },
"jobInfrastructureConfigurationDetails": {
"jobInfrastructureType": "STANDALONE",
"shapeName": "VM.Standard2.1",
"blockStorageSizeInGBs": "100",
"subnetId": subnet_id,
},
}
return self.dsc.create_job(job_payload)
def list_jobs(self, compartment_id, project_id):
print("-------------------------------------")
print("*** List Jobs ...")
return self.dsc.list_jobs(compartment_id=compartment_id, project_id=project_id)
def get_job(self, job_id):
print("-------------------------------------")
print("*** Get Job ...")
return self.dsc.get_job(job_id)
# NOTICE: Artifacts cannot be replaced, once uploaded!
def create_job_artifact(self, job_id, file_name):
print("-------------------------------------")
print("*** Create Job Artifact ...")
fstream = open(file_name, "rb")
os.path.basename(fstream.name)
return self.dsc.create_job_artifact(
job_id, fstream, content_disposition=f"attachment; filename={os.path.basename(fstream.name)}"
)
def run_job(
self, compartment_id, project_id, job_id, log_id, job_run_name="Job Run"
):
print("-------------------------------------")
print("*** Run Job ...")
job_run_payload = {
"projectId": project_id,
"displayName": job_run_name,
"jobId": job_id,
"compartmentId": compartment_id,
"jobConfigurationOverrideDetails": {
"jobType": "DEFAULT",
"environmentVariables": {
# "JOB_RUN_ENTRYPOINT": "<main_python_file>.py"
# "CONDA_ENV_TYPE": "service",
# "CONDA_ENV_SLUG": "mlcpuv1",
# "MY_ENV_VAR": "abcde"
},
"commandLineArguments": "100 linux \"hi there\""
},
"jobLogConfigurationOverrideDetails": {
"logGroupId": log_id,
"enableLogging": True,
"enableAutoLogCreation": True,
},
}
return self.dsc.create_job_run(job_run_payload)
def list_job_runs(self, compartment_id, lifecycle_state=None):
print("-------------------------------------")
print("*** List Job Runs ...")
if lifecycle_state:
return self.dsc.list_job_runs(
compartment_id, lifecycle_state=lifecycle_state
)
else:
return self.dsc.list_job_runs(compartment_id)
def get_job_run(self, job_run_id):
print("-------------------------------------")
print("*** List Job Run ...")
return self.dsc.get_job_run(job_run_id)
def delete_job_run(self, job_run_id):
print("-------------------------------------")
print("*** Delete Job Run ...")
return self.dsc.delete_job_run(job_run_id)
def delete_job(self, job_id):
print("-------------------------------------")
print("*** Delete Job ...")
return self.dsc.delete_job(job_id, delete_related_job_runs=True)
def main(parser):
parser.add_argument("-f", "--file", required=True, default="", help='file to be used as job artifact')
args = parser.parse_args()
file = args.file
return {"file": file}
if __name__ == "__main__":
"""
# RUN: python mljobs.py -f <file>
"""
try:
t = time.time()
print("Start")
parser = argparse.ArgumentParser()
arguments = main(parser)
print("------------------------------------------")
print("FILE: {}".format(arguments["file"]))
print("------------------------------------------")
JOB_FILE = arguments["file"]
# params
project_id = os.environ['PROJECT']
compartment_id = os.environ['COMPARTMENT']
log_group_ocid = os.environ['LOGGROUP']
subnet_id = os.environ['SUBNET']
tenant = os.environ['TENANCY']
config = os.environ['CONFIG']
# initialize
sdk = MLJobs(tenant, config, compartment_id, subnet_id)
job_id = ""
job_name = "Job " + datetime.now().strftime("%m-%d-%Y %H:%M:%S")
job = sdk.create_job(
compartment_id=compartment_id, project_id=project_id, job_name=job_name
)
print("Job ID: " + job.data.id)
job_id = job.data.id
print(job.data.id)
# artifact = sdk.create_job_artifact(job_id, "hello_world.py")
artifact = sdk.create_job_artifact(job_id, JOB_FILE)
job = sdk.get_job(job_id)
print(job.data)
job_run_name = "Job Run " + datetime.now().strftime("%m-%d-%Y %H:%M:%S")
job_run = sdk.run_job(
job.data.compartment_id,
job.data.project_id,
job.data.id,
log_id=log_group_ocid,
job_run_name=job_run_name,
)
print(job_run.data.id)
job_run_id = job_run.data.id
# job_runs = sdk.list_job_runs(compartment_id)
# print(job_runs.data)
while True:
time.sleep(10)
job_run_details = sdk.get_job_run(job_run_id)
print(job_run_details.data)
if job_run_details.data.lifecycle_state in ["IN_PROGRESS", "ACCEPTED"]:
continue
else:
break
#
elapsed_time = time.time() - t
print("Process Time: ", str(timedelta(seconds=elapsed_time)))
except Exception as e:
print("ERROR: ", e)
| StarcoderdataPython |
1944473 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# A TextMatrix is used to generate a set of ImpalaTestVectors. The vectors that are
# generated are based on one or more ImpalaTestDimensions inputs. These lists define
# the set of values that are interesting to a test. For example, for file_format
# these might be 'seq', 'text', etc
#
# The ImpalaTestMatrix is then used to generate a set of ImpalaTestVectors. Each
# ImpalaTestVector contains a single value from each of the input ImpalaTestDimensions.
# An example:
#
# ImpalaTestMatrix.add_dimension('file_format', 'seq', 'text')
# ImpalaTestMatrix.add_dimension('agg_func', 'min', 'max', 'sum')
# ImpalaTestMatrix.add_dimension('col_type', 'int', 'bool')
# test_vectors = ImpalaTestMatrix.generate_test_vectors(...)
#
# Would return a collection of ImpalaTestVectors, with each one containing a
# combination of file_format, agg_func, and col_type:
# seq, min, int
# text, max, bool
# ...
#
# A ImpalaTestVector is an object itself, and the 'get_value' function is used to
# extract the actual value from the ImpalaTestVector for this particular combination:
# test_vector = test_vectors[0]
# print test_vector.get_value('file_format')
#
# The combinations of ImpalaTestVectors generated can be done in two ways: pairwise
# and exhaustive. Pairwise provides a way to get good coverage and reduce the total
# number of combinations generated where exhaustive will generate all valid
# combinations.
#
# Finally, the ImpalaTestMatrix also provides a way to add constraints to the vectors
# that are generated. This is useful to filter out invalid combinations. These can
# be added before calling 'generate_test_vectors'. The constraint is a function that
# accepts a ImpalaTestVector object and returns true if the vector is valid, false
# otherwise. For example, if we want to make sure 'bool' columns are not used with 'sum':
#
# ImpalaTestMatrix.add_constraint(lambda v:\
# not (v.get_value('col_type') == 'bool and v.get_value('agg_func') == 'sum'))
#
# Additional examples of usage can be found within the test suites.
from itertools import product
# A list of test dimension values.
class ImpalaTestDimension(list):
def __init__(self, name, *args):
self.name = name
self.extend([ImpalaTestVector.Value(name, arg) for arg in args])
# A test vector that passed to test method. The ImpalaTestVector can be used to
# extract values for the specified dimension(s)
class ImpalaTestVector(object):
def __init__(self, vector_values):
self.vector_values = vector_values
def get_value(self, name):
return next(vector_value for vector_value in self.vector_values\
if vector_value.name == name).value
def __str__(self):
return ' | '.join(['%s' % vector_value for vector_value in self.vector_values])
# Each value in a test vector is wrapped in the Value object. This wrapping is
# done internally so this object should never need to be created by the user.
class Value(object):
def __init__(self, name, value):
self.name = name
self.value = value
def __str__(self):
return '%s: %s' % (self.name, self.value)
# Matrix -> Collection of vectors
# Vector -> Call to get specific values
class ImpalaTestMatrix(object):
def __init__(self, *args):
self.dimensions = dict((arg.name, arg) for arg in args)
self.constraint_list = list()
def add_dimension(self, dimension):
self.dimensions[dimension.name] = dimension
def add_mandatory_exec_option(self, exec_option_key, exec_option_value):
for vector in self.dimensions['exec_option']:
vector.value[exec_option_key] = exec_option_value
def clear(self):
self.dimensions.clear()
def clear_dimension(self, dimension_name):
del self.dimensions[dimension_name]
def has_dimension(self, dimension_name):
return self.dimensions.has_key(dimension_name)
def generate_test_vectors(self, exploration_strategy):
if not self.dimensions:
return list()
# TODO: Check valid exploration strategies, provide more options for exploration
if exploration_strategy == 'exhaustive':
return self.__generate_exhaustive_combinations()
elif exploration_strategy in ['core', 'pairwise']:
return self.__generate_pairwise_combinations()
else:
raise ValueError, 'Unknown exploration strategy: %s' % exploration_strategy
def __generate_exhaustive_combinations(self):
return [ImpalaTestVector(vec) for vec in product(*self.__extract_vector_values())
if self.is_valid(vec)]
def __generate_pairwise_combinations(self):
import metacomm.combinatorics.all_pairs2
all_pairs = metacomm.combinatorics.all_pairs2.all_pairs2
# Pairwise fails if the number of inputs == 1. Use exhaustive in this case the
# results will be the same.
if len(self.dimensions) == 1:
return self.__generate_exhaustive_combinations()
return [ImpalaTestVector(vec) for vec in all_pairs(self.__extract_vector_values(),
filter_func = self.is_valid)]
def add_constraint(self, constraint_func):
self.constraint_list.append(constraint_func)
def clear_constraints(self):
self.constraint_list = list()
def __extract_vector_values(self):
# The data is stored as a tuple of (name, [val1, val2, val3]). So extract the
# actual values from this
return [v[1] for v in self.dimensions.items()]
def is_valid(self, vector):
for constraint in self.constraint_list:
if (isinstance(vector, list) or isinstance(vector, tuple)) and\
len(vector) == len(self.dimensions):
valid = constraint(ImpalaTestVector(vector))
if valid:
continue
return False
return True
| StarcoderdataPython |
1858285 | import logging
from appium import webdriver
from appium.webdriver.common import touch_action
from selenium.webdriver.common.by import By
logger = logging.getLogger(__name__)
class DummyList(dict):
def __init__(self, original: list):
super().__init__()
self._original = original
def __missing__(self, key):
return self._original[0]
class DummyDriver(webdriver.Remote):
DUMMY_APP_PACKAGE = 'edu.purdue.dsnl.dummy'
DUMMY_APP_ACTIVITY = '.MainActivity'
_DUMMY_ID = DUMMY_APP_PACKAGE + ':id/dummy'
_DUMMY_ANDROID_UIAUTOMATOR = f'new UiSelector().text("dummy")'
_DUMMY_ACCESSIBILITY_ID = 'dummy'
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self._dummy = False
self._rect = None
def find_element(self, by=By.ID, value=None):
value = self._may_dummy_value(by, value)
return super().find_element(by, value)
def find_elements(self, by=By.ID, value=None):
value = self._may_dummy_value(by, value)
elements = super().find_elements(by, value)
if self._dummy:
return DummyList(elements)
else:
return elements
def get_window_rect(self):
if not self._rect:
self._rect = super().get_window_rect()
return self._rect
def dummy(self, dummy: bool):
self._dummy = dummy
def is_dummy(self):
return self._dummy
def _may_dummy_value(self, by, value):
if self._dummy:
return self._dummy_value(by, value)
else:
return value
@staticmethod
def _dummy_value(by, value):
if by == By.ID:
return DummyDriver._DUMMY_ID
elif by == By.ANDROID_UIAUTOMATOR:
return DummyDriver._DUMMY_ANDROID_UIAUTOMATOR
elif by == By.ACCESSIBILITY_ID:
return DummyDriver._DUMMY_ACCESSIBILITY_ID
else:
logger.warning(f'{by} {value} is not replaced with dummy value')
return value
class TouchAction(touch_action.TouchAction):
def __init__(self, driver):
super().__init__(driver)
self._dummy = driver.is_dummy()
def tap(self, element=None, x=None, y=None, count=1):
if self._dummy:
rect = self._driver.get_window_rect()
x = rect['width'] // 2
y = rect['height'] // 2
return super().tap(element, x, y, count)
| StarcoderdataPython |
1720 | import requests
import aiohttp
from constants import API_KEY
class User(object):
def __init__(self, author_info):
# "author": {
# "about": "",
# "avatar": {
# "cache": "//a.disquscdn.com/1519942534/images/noavatar92.png",
# "isCustom": false,
# "large": {
# "cache": "//a.disquscdn.com/1519942534/images/noavatar92.png",
# "permalink": "https://disqus.com/api/users/avatars/felix1999.jpg"
# },
# "permalink": "https://disqus.com/api/users/avatars/felix1999.jpg",
# "small": {
# "cache": "//a.disquscdn.com/1519942534/images/noavatar32.png",
# "permalink": "https://disqus.com/api/users/avatars/felix1999.jpg"
# }
# },
# "disable3rdPartyTrackers": false,
# "id": "5472588",
# "isAnonymous": false,
# "isPowerContributor": false,
# "isPrimary": true,
# "isPrivate": true,
# "joinedAt": "2010-11-20T04:45:33",
# "location": "",
# "name": "felix1999",
# "profileUrl": "https://disqus.com/by/felix1999/",
# "signedUrl": "",
# "url": "",
# "username": "felix1999"
# },
self._basic_info = author_info
self._detailed_info = None
async def load(self):
async with aiohttp.ClientSession(connector=aiohttp.TCPConnector(verify_ssl=False)) as session:
user_info = await session.get(
'https://disqus.com/api/3.0/users/details.json',
params={'user': self.id, 'api_key': API_KEY}
)
detail_json = await user_info.json()
if detail_json['code'] != 0:
print(f'Problem with getting user details from user {self.id}')
print(detail_json)
self._detailed_info = detail_json['response']
def _get_detailed_info(self):
# https://disqus.com/api/3.0/users/details.json?user=137780765&api_key=<KEY>
# {
# "code": 0,
# "response": {
# "about": "",
# "avatar": {
# "cache": "https://c.disquscdn.com/uploads/users/13778/765/avatar92.jpg?1433896551",
# "isCustom": true,
# "large": {
# "cache": "https://c.disquscdn.com/uploads/users/13778/765/avatar92.jpg?1433896551",
# "permalink": "https://disqus.com/api/users/avatars/disqus_FqhLpDGmTT.jpg"
# },
# "permalink": "https://disqus.com/api/users/avatars/disqus_FqhLpDGmTT.jpg",
# "small": {
# "cache": "https://c.disquscdn.com/uploads/users/13778/765/avatar32.jpg?1433896551",
# "permalink": "https://disqus.com/api/users/avatars/disqus_FqhLpDGmTT.jpg"
# }
# },
# "disable3rdPartyTrackers": false,
# "id": "137780765",
# "isAnonymous": false,
# "isPowerContributor": false,
# "isPrimary": true,
# "isPrivate": false,
# "joinedAt": "2015-01-02T18:40:14",
# "location": "",
# "name": "Bob",
# "numFollowers": 2,
# "numFollowing": 0,
# "numForumsFollowing": 0,
# "numLikesReceived": 8967,
# "numPosts": 4147,
# "profileUrl": "https://disqus.com/by/disqus_FqhLpDGmTT/",
# "rep": 3.5297520000000002,
# "reputation": 3.5297520000000002,
# "reputationLabel": "High",
# "signedUrl": "",
# "url": "",
# "username": "disqus_FqhLpDGmTT"
# }
# }
print("WARNING: auto-loading user in async version of code!!!!")
details = requests.get(
'https://disqus.com/api/3.0/users/details.json',
{'user': self.id, 'api_key': API_KEY}
)
detail_json = details.json()
if detail_json['code'] != 0:
print(f'Problem with getting user details from user {self.id}')
print(detail_json)
self._detailed_info = detail_json['response']
@property
def anonymous(self):
return 'id' not in self._basic_info
@property
def private(self):
return self.anonymous or self._basic_info.get('isPrivate')
@property
def id(self):
if self.private:
return 'Private'
return self._basic_info.get('id', 'Anonymous')
@property
def name(self):
return self._basic_info.get('name')
@property
def username(self):
return self._basic_info.get('username')
@property
def location(self):
return self._basic_info.get('location')
@property
def joined_at(self):
return self._basic_info.get('joinedAt')
@property
def profile_url(self):
return self._basic_info.get('profileUrl')
@property
def total_posts(self):
if self._detailed_info is None:
self._get_detailed_info()
return self._detailed_info.get('numPosts')
@property
def total_likes(self):
if self._detailed_info is None:
self._get_detailed_info()
return self._detailed_info.get('numLikesReceived')
@property
def user_info_row(self):
return [
self.id,
self.name,
self.username,
self.total_posts,
self.total_likes,
self.location,
self.joined_at,
self.profile_url
]
| StarcoderdataPython |
3261808 | import threading
from typing import Callable, Optional
class RepeatingTimer:
def __init__(self, interval_ms: int, func: Callable, *args, **kwargs) -> None:
self.interval_s = interval_ms / 1000
self.func = func
self.args = args
self.kwargs = kwargs
self.timer = None # type: Optional[threading.Timer]
self.is_running = False
def set_func(self, func: Callable, *args, **kwargs) -> None:
self.func = func
self.args = args
self.kwargs = kwargs
def set_interval(self, interval_ms: int) -> None:
self.interval_s = interval_ms / 1000
def start(self) -> None:
self.timer = threading.Timer(self.interval_s, self._callback)
self.timer.start()
self.is_running = True
def cancel(self) -> None:
assert isinstance(self.timer, threading.Timer)
self.timer.cancel()
self.is_running = False
def _callback(self) -> None:
self.func(*self.args, **self.kwargs)
self.start()
| StarcoderdataPython |
6479845 | <gh_stars>0
import json
with open('cat_to_name.json', 'r') as f:
cat_to_name = json.load(f)
print(cat_to_name) | StarcoderdataPython |
12835644 | <filename>Set/1.Create-Define-a-aset.py
myset ={"C","C++","Python","Shap","Ruby","Java"}
print("Set Content:",myset) | StarcoderdataPython |
14597 | <gh_stars>0
#!/usr/bin/env python3
import json
import time
import sys
#import numpy as np
import cv2
from cscore import CameraServer, VideoSource, CvSource, VideoMode, CvSink, UsbCamera
from networktables import NetworkTablesInstance
def Track(frame, sd):
Lower = (0,0,0)
Upper = (0,0,0)
if sd.getNumber("Track", 0):
Lower = (0,103,105)
Upper = (150,255,255) #hatch panel
sd.putNumber("Tracking", 0)
elif sd.getNumber("Track", 1):
Lower = (16,18,108) #Tape
Upper = (32,52,127)
sd.putNumber("Tracking", 1)
else:
print("Could not get smartdashboard value, using hatch panel")
Lower = (0,103,105)
Upper = (150,255,255) #none selected using hatch
sd.putNumber("Tracking", 2)
#frame = cv2.flip(frame, 1)
#Blur out the Image
#blurred = cv2.GaussianBlur(frame, (11,11), 0)
hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)
#Make a mask for the pixals that meet yhe HSV filter
#then run a bunch of dolations and
#erosions to remove any small blobs still in the mask
mask = cv2.inRange(hsv, Lower, Upper)
mask = cv2.erode(mask, None, iterations = 2)
mask = cv2.dilate(mask, None, iterations = 2)
#find the Contours in the mask and initialize the
#current (x,y) center of the ball
a, cnts, b = cv2.findContours(mask, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE)
center = None
#only do stuff if a single contor was found
if len(cnts) > 0:
#find the largest contour in the mask, then use it
#to compute the minimum enclosing circle and centroid
c = max(cnts, key=cv2.contourArea)
((x,y), radius) = cv2.minEnclosingCircle(c)
M = cv2.moments(c)
center = (int(M["m10"] / M["m00"]), int (M["m01"] / M["m00"]))
#if the dectected contour has a radius big enough, we will send it
if radius > 15:
#draw a circle around the target and publish values to smart dashboard
cv2.circle(frame, (int(x), int(y)), int(radius), (255,255,8), 2)
cv2.circle(frame, center, 3, (0,0,225), -1)
sd.putNumber('X',x)
sd.putNumber('Y',y)
sd.putNumber('R', radius)
print("X: " + repr(round(x, 1)) + " Y: " + repr(round(y, 1)) + " Radius: " + repr(round(radius, 1)))
else:
print("WTF")
#let the RoboRio Know no target has been detected with -1
sd.putNumber('X', -1)
sd.putNumber('Y', -1)
sd.putNumber('R', -1)
cap1 = cv2.VideoCapture(0)
cap2 = cv2.VideoCapture(1)
#HatchPanel = HatchPanelPipeline()
team = None
ntinst = NetworkTablesInstance.getDefault()
ntinst.startClientTeam(team)
SmartDashBoardValues = ntinst.getTable('SmartDashboard')
while(True):
# Capture frame-by-frame
if SmartDashBoardValues.getNumber("Camera to Use", 0):
ret, frame = cap1.read() #use camera 0
SmartDashBoardValues.putNumber("Using Camera", 0)
elif SmartDashBoardValues.getNumber("Camera to Use", 1):
ret, frame = cap2.read() #use camera 1
SmartDashBoardValues.putNumber("Using Camera", 1)
else:
print("No camera selected using camera 0")
ret, frame = cap1.read() #found no value for camera to use, using cam 0
SmartDashBoardValues.putNumber("Using Camera", 2)
# Our operations on the frame come here
Track(frame, SmartDashBoardValues)
cv2.imshow('frame',frame)
#print(type(mask))
#res = cv2.bitwise_and(frame,frame, mask=mask)
#cv2.imshow('frame',frame)
#cv2.imshow('mask',mask)
#cv2.imshow('res',res)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
# When everything done, release the capture
cap1.release()
cap2.release()
cv2.destroyAllWindows() | StarcoderdataPython |
3578870 | from django.contrib.auth.base_user import BaseUserManager
from django.utils.translation import ugettext_lazy as _
class CustomUserManager(BaseUserManager):
"""
Custom user model where the email address is the unique identifier
and has an is_admin field to allow access to the admin app
"""
def create_user(self, email, password, **extra_fields):
if not email:
raise ValueError(_("The email must be set"))
if not password:
raise ValueError(_("The password must be set"))
email = self.normalize_email(email)
user = self.model(email=email, **extra_fields)
user.set_password(password)
user.save()
return user
def create_superuser(self, email, password, **extra_fields):
extra_fields.setdefault('is_active', True)
extra_fields.setdefault('role', 1)
if extra_fields.get('role') != 1:
raise ValueError('Superuser must have role of Global Admin')
return self.create_user(email, password, **extra_fields) | StarcoderdataPython |
1803783 | from django.contrib.auth.models import User
from rest_framework import serializers
from .models import Entry
class EntrySerializer(serializers.ModelSerializer):
class Meta:
model = Entry
fields = ["product", "quantity", "description"]
def to_representation(self, instance):
data = {
"product": instance.pk,
"quantity": instance.quantity,
"name": instance.product.name,
"user": instance.user.username,
"description": instance.description or "",
}
return data
| StarcoderdataPython |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.