id stringlengths 1 265 | text stringlengths 6 5.19M | dataset_id stringclasses 7
values |
|---|---|---|
28245 | import numpy as np
class perceptron(object):
#eta learning rata
#n_iter times
def __init__(self,eta,n_iter):
self.eta=eta
self.n_iter=n_iter
def fit(self,x,y):
'''
x=ndarray(n_samples,n_features),training data
y=ndarray(n_samples),labels
returns
self:object
w_:1darray,weights after fitting
errors=list,errors times
'''
#init
self.w_=np.zeros(np.shape(x)[1]+1)
self.errors_=[]
for _ in range(self.n_iter):
errors=0
for xi,yi in zip(x,y):
updata=self.eta*(self.predict(xi)-yi)
self.w_[1:]+=updata*xi
self.w_[0]+=updata
errors+=int(updata!=0.0)
self.errors_.append(errors)
print(self.errors_)
return self
def net_input(self,x):
'''
calculate net input
'''
return np.dot(x,self.w_[1:])+self.w_[0]
def predict(self,x):
'''
positive function
'''
return np.where(self.net_input(x)>=0.0,1,-1)
#painting
import matplotlib.pyplot as plt
#from perception import perceptron
#read data as DaraFrame
import pandas as pd
import numpy as np
import os
import pandas as pd
import numpy as np
import random
a=np.random.uniform(6.0,7.0,150)
b=np.random.uniform(2.0,4.0,150)
c=np.random.uniform(5.0,5.5,150)
d=np.random.uniform(1.5,2.5,150)
q=[]
for i in range(150):
e=np.random.choice(['a','b'])
q.append(e)
dic={'0':a,'1':b,'2':c,'3':d,'4':q}
df=pd.DataFrame(dic)
y=df.iloc[0:100,4].values
y=np.where(y=='b',-1,1)
x=df.iloc[0:100,[0,2]].values
plt.scatter(x[:50,0],x[:50,1],color='red',marker='o',label='setosa')
plt.scatter(x[50:100,0],x[50:100,1],color='green',marker='x',label='versicolor')
plt.xlabel('petal length')
plt.ylabel('sepal length')
plt.legend(loc='upper right')
plt.show()
ppn=perceptron(eta=1,n_iter=10000)
ppn.fit(x,y)
plt.plot(range(1,len(ppn.errors_)+1),ppn.errors_,marker='o',color='red')
plt.xlabel('epochs')
plt.ylabel('number of miscalssifications')
plt.show()
| StarcoderdataPython |
1797994 | #!/usr/bin/env python
#coding: utf-8
import os
import re
import json
import time
import subprocess
import threading
from datetime import datetime
import psutil
import requests
TEST_SERVER_HOSTS = ['192.168.40.215', '192.168.40.91']
TEST_SERVER_PORT = 8999
TEST_REQ_TMPL = 'http://%(host)s:%(port)d/test'
APP_SERVER_IP = '192.168.3.235'
APP_SERVER_PATH_TMPL = 'http://%(ip)s:%(port)d/hello'
TARGET_REQUEST = {
'path_tmpl': '',
'headers': {
},
'params' : {
}
}
SECONDS = 10
CONCURRENTS = [400, 600, 800, 1000, 1600]
PROCESSES_LST = [1, 4, 8, 16, 32]
HEADERS = {'Content-type': 'application/json', 'Accept': 'text/plain'}
REGEXPS = {
'availability(%)' : r'^Availability.*\b(\d+\.\d+)\b.*',
'transaction-rate(trans/sec)': r'^Transaction rate.*\b(\d+\.\d+)\b.*'
}
SUMMARY = {
'INFO': {
'TAG' : 'None',
'SECONDS': SECONDS,
'CONCURRENTS': CONCURRENTS,
'PROCESSES_LST': PROCESSES_LST,
'TEST_SERVER_HOSTS': TEST_SERVER_HOSTS,
'APP_SERVER_IP' : APP_SERVER_IP
},
'tests': [
{
'app': 'test_http.go',
'cmd_tmpl': './webapps/test_http.bin -port=%(port)d -size=%(processes)d 2>/dev/null 1>/dev/null',
'port' : 9001,
'results': []
},
{
'app': 'test_martini.go',
'cmd_tmpl': './webapps/test_martini.bin -port=%(port)d -size=%(processes)d 2>/dev/null 1>/dev/null',
'port': 9002,
'results': []
},
{
'app': 'test_tornado.py',
'port': 8001,
'cmd_tmpl': './webapps/test_tornado.py --port=%(port)d --processes=%(processes)d 2>/dev/null 1>/dev/null',
'results': []
},
{
'app': 'test_webpy_gevent.py',
'port': 8002,
'cmd_tmpl': 'cd webapps && gunicorn -k gevent -w %(processes)d -b 0.0.0.0:%(port)d test_webpy_gevent:wsgiapp 2>/dev/null 1>/dev/null',
'results': []
}
]
}
time_now = lambda: datetime.now().strftime("%m-%d_%H:%M:%S")
results_lock = threading.Lock()
def kill_proc_tree(pid, including_parent=True):
parent = psutil.Process(pid)
for child in parent.children(recursive=True):
try:
child.kill()
except psutil.NoSuchProcess:
pass
if including_parent:
try:
parent.kill()
except psutil.NoSuchProcess:
pass
def ping(url):
status = False
req = None
try:
req = requests.get(url, verify=False, timeout=2)
except Exception as e:
print 'Ping failed:', url, e
time.sleep(30)
if req and req.status_code == 200:
status = True
return status
def extract_test(data):
output = data['output']
result = {
'output': output
}
for line in output.split('\n'):
for name, regexp in REGEXPS.iteritems():
m = re.match(regexp, line)
if m:
match_result = m.groups()[0]
result[name] = float(match_result)
break
return result
def test_request(results, url, data, timeout):
retry = 3
resp_data = None
while retry > 0:
try:
req = requests.post(url, headers=HEADERS, data=json.dumps(data), timeout=timeout)
resp_data = req.json()
retry = 0 # !!!
except requests.Timeout as e:
print (3-retry), e
retry -= 1
if resp_data:
result = extract_test(resp_data)
results_lock.acquire()
results.append(result)
results_lock.release()
def merge_test(datas):
if len(datas) == 0: return None
result = {}
outputs = []
keys = []
for key in REGEXPS.keys():
keys.append(key)
# result[key] = []
result[key + '_TOTAL'] = 0
for data in datas:
outputs.append(data['output'])
for key in keys:
if key not in data: continue
# result[key].append(data[key])
result[key + '_TOTAL'] = result[key + '_TOTAL'] + data[key]
result['output'] = '\n\n'.join(outputs)
return result
def do_test(app_url, concurrent, seconds=20):
data = {
'url': app_url,
'concurrent': concurrent,
'seconds': seconds,
}
timeout = seconds + 10
results = []
threads = []
for host in TEST_SERVER_HOSTS:
port = TEST_SERVER_PORT
test_req_url = TEST_REQ_TMPL % locals()
t = threading.Thread(target=test_request, args=(results, test_req_url, data, timeout))
t.start()
threads.append(t)
[t.join() for t in threads]
return merge_test(results)
def gen_server_results(cmd_tmpl, port, app_url):
for processes in PROCESSES_LST:
cmd = cmd_tmpl % locals()
print 'Server:', cmd
p = subprocess.Popen(cmd, shell=True,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
time.sleep(0.5)
if not ping(app_url):
yield {
'processes': processes,
'concurrent': -1,
'output': 'PingError'
}
kill_proc_tree(p.pid)
continue
for concurrent in CONCURRENTS:
result = do_test(app_url, concurrent, seconds=SECONDS)
result['processes'] = processes
result['concurrent'] = concurrent * len(TEST_SERVER_HOSTS)
yield result
kill_proc_tree(p.pid)
time.sleep(3)
def main():
def cmp_res(a, b):
c1, c2 = a['concurrent'], b['concurrent']
if c1 > c2: return 1
if c1 < c2: return -1
p1, p2 = a['processes'], b['processes']
if p1 > p2: return 1
if p1 <= p2: return -1
for info in SUMMARY['tests']:
cmd_tmpl = info['cmd_tmpl']
port = info['port']
ip = APP_SERVER_IP
app_url = APP_SERVER_PATH_TMPL % locals()
results = info['results']
print 'Section:', info['app'], app_url
print time_now()
print '=================='
for result in gen_server_results(cmd_tmpl, port, app_url):
print 'section: {0}, processes: {1}, concurrent: {2}'.format(info['app'], result['processes'], result['concurrent'])
output = result.pop('output')
print '--------------------'
print output
print '--------------------'
print time_now(), info['app']
print '----------------------------------------\n'
results.append(result)
results.sort(cmp=cmp_res)
print '======================================================\n\n'
with open(os.path.join('results', '{0}_summary.json'.format(time_now())), 'w') as f:
f.write(json.dumps(SUMMARY, indent=4))
if __name__ == '__main__':
main()
| StarcoderdataPython |
140286 | <gh_stars>0
from math import *
import os
class Servo:
def __init__(self, number):
self.number = number
self.angle = 0
def set_servo_angle(self, angle):
self.angle = angle
self.write_value_to_hardware()
def get_servo_angle(self):
return self.angle
def write_value_to_hardware(self):
# Compensate for 500us - 2500us representing +/- 90 degrees, and ServoBlaster is in units of 10us
pwm_delay = 150 + (self.angle * 100 / 90)
if os.path.exists("/dev/servoblaster"):
# Send to PWM output
servo_command = "%u=%u\n" % (self.number, pwm_delay)
with open("/dev/servoblaster", "wb") as servo_device:
servo_device.write(servo_command)
| StarcoderdataPython |
13007 | <reponame>BramKaashoek/commercetools-python-sdk
import typing
from commercetools import schemas, types
from commercetools.services import abstract
from commercetools.typing import OptionalListStr
__all__ = ["TypeService"]
class TypeDeleteSchema(abstract.AbstractDeleteSchema):
pass
class TypeQuerySchema(abstract.AbstractQuerySchema):
pass
class TypeService(abstract.AbstractService):
def get_by_id(self, id: str, expand: OptionalListStr = None) -> types.Type:
query_params = {}
if expand:
query_params["expand"] = expand
return self._client._get(f"types/{id}", query_params, schemas.TypeSchema)
def get_by_key(self, key: str, expand: OptionalListStr = None) -> types.Type:
query_params = {}
if expand:
query_params["expand"] = expand
return self._client._get(f"types/key={key}", query_params, schemas.TypeSchema)
def query(
self,
where: OptionalListStr = None,
sort: OptionalListStr = None,
expand: OptionalListStr = None,
limit: int = None,
offset: int = None,
) -> types.TypePagedQueryResponse:
params = TypeQuerySchema().dump(
{
"where": where,
"sort": sort,
"expand": expand,
"limit": limit,
"offset": offset,
}
)
return self._client._get("types", params, schemas.TypePagedQueryResponseSchema)
def create(
self, draft: types.TypeDraft, expand: OptionalListStr = None
) -> types.Type:
query_params = {}
if expand:
query_params["expand"] = expand
return self._client._post(
"types", query_params, draft, schemas.TypeDraftSchema, schemas.TypeSchema
)
def update_by_id(
self,
id: str,
version: int,
actions: typing.List[types.TypeUpdateAction],
expand: OptionalListStr = None,
*,
force_update: bool = False,
) -> types.Type:
query_params = {}
if expand:
query_params["expand"] = expand
update_action = types.TypeUpdate(version=version, actions=actions)
return self._client._post(
endpoint=f"types/{id}",
params=query_params,
data_object=update_action,
request_schema_cls=schemas.TypeUpdateSchema,
response_schema_cls=schemas.TypeSchema,
force_update=force_update,
)
def update_by_key(
self,
key: str,
version: int,
actions: typing.List[types.TypeUpdateAction],
expand: OptionalListStr = None,
*,
force_update: bool = False,
) -> types.Type:
query_params = {}
if expand:
query_params["expand"] = expand
update_action = types.TypeUpdate(version=version, actions=actions)
return self._client._post(
endpoint=f"types/key={key}",
params=query_params,
data_object=update_action,
request_schema_cls=schemas.TypeUpdateSchema,
response_schema_cls=schemas.TypeSchema,
force_update=force_update,
)
def delete_by_id(
self,
id: str,
version: int,
expand: OptionalListStr = None,
*,
force_delete: bool = False,
) -> types.Type:
params = {"version": version}
if expand:
params["expand"] = expand
query_params = TypeDeleteSchema().dump(params)
return self._client._delete(
endpoint=f"types/{id}",
params=query_params,
response_schema_cls=schemas.TypeSchema,
force_delete=force_delete,
)
def delete_by_key(
self,
key: str,
version: int,
expand: OptionalListStr = None,
*,
force_delete: bool = False,
) -> types.Type:
params = {"version": version}
if expand:
params["expand"] = expand
query_params = TypeDeleteSchema().dump(params)
return self._client._delete(
endpoint=f"types/key={key}",
params=query_params,
response_schema_cls=schemas.TypeSchema,
force_delete=force_delete,
)
| StarcoderdataPython |
117971 | import torch
from torch import nn
from torch.nn import functional as F
from torch.distributions.uniform import Uniform
from networks.layers.non_linear import NonLinear, NonLinearType
from networks.layers.conv_bn import ConvBN
class DropConnect(nn.Module):
def __init__(self, survival_prob):
"""
A module that implements drop connection
:param survival_prob: the probability if connection survival
"""
super(DropConnect, self).__init__()
self.survival_prob = survival_prob
self.u = Uniform(0, 1)
def forward(self, x):
"""
The forward function of the DropConnect module
:param x: Input tensor x
:return: A tensor after drop connection
"""
if self.training:
random_tensor = self.u.sample([x.shape[0], 1, 1, 1]).cuda()
random_tensor += self.survival_prob
binary_tensor = torch.floor(random_tensor)
return x * binary_tensor / self.survival_prob
else:
return x
class GlobalAvgPool2d(nn.Module):
def __init__(self):
"""
Global Average pooling module
"""
super(GlobalAvgPool2d, self).__init__()
def forward(self, x):
"""
The forward function of the GlobalAvgPool2d module
:param x: Input tensor x
:return: A tensor after average pooling
"""
return F.avg_pool2d(x, (x.shape[2], x.shape[3]))
class SEBlock(nn.Module):
def __init__(self, nc, in_channels, reduce_channels):
"""
An implantation of Squeeze Excite block
:param nc: Input network controller
:param in_channels: the number of input channels
:param reduce_channels: the number of channels after reduction
"""
super(SEBlock, self).__init__()
self.gap = GlobalAvgPool2d()
self.conv_reduce = nn.Sequential(
ConvBN(nc, in_channels, reduce_channels, 1, disable_bn=True),
NonLinear(nc, reduce_channels, NonLinearType.SWISH))
self.conv_expand = nn.Sequential(
ConvBN(nc, reduce_channels, in_channels, 1, disable_bn=True),
NonLinear(nc, in_channels, NonLinearType.SIGMOID))
def forward(self, x):
"""
The forward function of the SEBlock module
:param x: Input tensor x
:return: A tensor after SE Block
"""
return x * self.conv_expand(self.conv_reduce(self.gap(x)))
class ConvBNNonLinear(nn.Sequential):
def __init__(self, nc, in_planes, out_planes, kernel_size=3, stride=1, groups=1, nl_type=NonLinearType.RELU6,
batch_norm_epsilon=1e-5, batch_norm_momentum=0.1, tf_padding=False):
"""
A joint block of 2d convolution with batch normalization and non linear function modules
with HMQ quantization of both the convolution weights and activation function
:param nc: The network quantization controller
:param in_planes: The number of input channels
:param out_planes: The number of output channels
:param kernel_size: The kernel size
:param stride: The convolution stride
:param groups: The convolution group size
:param nl_type: enum that state the non-linear type.
:param batch_norm_epsilon: The batch normalization epsilon
:param batch_norm_momentum: The batch normalization momentum
:param tf_padding: Use TensorFlow padding (for EfficientNet)
"""
padding = kernel_size - stride if tf_padding else (kernel_size - 1) // 2
super(ConvBNNonLinear, self).__init__(
ConvBN(nc, in_planes, out_planes, kernel_size, stride, padding, group=groups,
batch_norm_epsilon=batch_norm_epsilon, batch_norm_momentum=batch_norm_momentum,
tf_padding=tf_padding),
NonLinear(nc, out_planes, nl_type)
)
class InvertedResidual(nn.Module):
def __init__(self, nc, inp, oup, stride, expand_ratio, kernel_size=3, nl_type=NonLinearType.RELU6, se_ratio=0,
survival_prob=0, batch_norm_epsilon=1e-5, batch_norm_momentum=0.1, tf_padding=False):
"""
A Inverted Residual block use in Efficient-Net
:param nc: The network quantization controller
:param inp: The number of input channels
:param oup: The number of output channels
:param stride: The depth wise convolution stride
:param expand_ratio: The block expand ratio for depth-wise convolution
:param kernel_size: The kernel size
:param nl_type: enum that state the non-linear type.
:param se_ratio: the ratio between the number of input channel and mid channels in SE Bloock
:param survival_prob: the probability if connection survival
:param batch_norm_epsilon: The batch normalization epsilon
:param batch_norm_momentum: The batch normalization momentum
:param tf_padding: Use TensorFlow padding (for EfficientNet)
"""
super(InvertedResidual, self).__init__()
self.stride = stride
assert stride in [1, 2]
hidden_dim = int(round(inp * expand_ratio))
self.use_res_connect = self.stride == 1 and inp == oup
self.kernel_size = kernel_size
layers = []
if expand_ratio != 1:
# pw
layers.append(ConvBNNonLinear(nc, inp, hidden_dim, kernel_size=1, nl_type=nl_type,
batch_norm_epsilon=batch_norm_epsilon,
batch_norm_momentum=batch_norm_momentum))
layers.append(
ConvBNNonLinear(nc, hidden_dim, hidden_dim, kernel_size=kernel_size, stride=stride, groups=hidden_dim,
nl_type=nl_type, batch_norm_epsilon=batch_norm_epsilon,
batch_norm_momentum=batch_norm_momentum, tf_padding=tf_padding))
if se_ratio != 0:
layers.append(SEBlock(nc, hidden_dim, int(inp * se_ratio)))
layers.append(ConvBNNonLinear(nc, hidden_dim, oup, kernel_size=1, stride=1, nl_type=NonLinearType.IDENTITY,
batch_norm_epsilon=batch_norm_epsilon,
batch_norm_momentum=batch_norm_momentum))
if survival_prob != 0 and self.use_res_connect:
layers.append(DropConnect(survival_prob))
self.conv = nn.Sequential(*layers)
self.output_q = NonLinear(nc, oup, nl_type=NonLinearType.IDENTITY)
def forward(self, x):
"""
The forward function of the InvertedResidual module
:param x: Input tensor x
:return: A tensor after InvertedResidual
"""
if self.use_res_connect:
y = self.conv(x)
return self.output_q(x + y)
else:
x = self.conv(x)
return self.output_q(x)
class RepeatedInvertedResidual(nn.Module):
def __init__(self, nc, n_repeat, in_channels, out_channels, stride_first, expand_ratio, kernel_size=3,
nl_type=NonLinearType.RELU6,
se_ratio=0,
survival_prob_start=0, drop_rate=0, batch_norm_epsilon=1e-5, batch_norm_momentum=0.1,
tf_padding=False):
"""
A block the repeatedly run the InvertedResidual block
:param nc:The network quantization controller
:param n_repeat:
:param in_channels: The number of input channels
:param out_channels: The number of output channels
:param stride_first: The depth wise convolution stride in the first block
:param expand_ratio: The block expand ratio for depth-wise convolution
:param kernel_size: The kernel size
:param nl_type: enum that state the non-linear type.
:param se_ratio: the ratio between the number of input channel and mid channels in SE Bloock
:param survival_prob_start: the probability if connection survival in the first block
:param batch_norm_epsilon: The batch normalization epsilon
:param batch_norm_momentum: The batch normalization momentum
:param tf_padding: Use TensorFlow padding (for EfficientNet)
"""
super(RepeatedInvertedResidual, self).__init__()
layers = []
for i in range(n_repeat):
if survival_prob_start > 0 and drop_rate > 0:
survival_prob = survival_prob_start - drop_rate * float(i)
else:
survival_prob = 0
block = InvertedResidual(nc, in_channels if i == 0 else out_channels, out_channels,
stride_first if i == 0 else 1, expand_ratio, kernel_size=kernel_size,
nl_type=nl_type, se_ratio=se_ratio, survival_prob=survival_prob,
batch_norm_epsilon=batch_norm_epsilon, batch_norm_momentum=batch_norm_momentum,
tf_padding=tf_padding)
layers.append(block)
self.blocks = nn.Sequential(*layers)
def forward(self, x):
"""
The forward function of the RepeatedInvertedResidual module
:param x: Input tensor x
:return: A tensor after RepeatedInvertedResidual
"""
return self.blocks(x)
| StarcoderdataPython |
1747193 | <reponame>dirtysalt/pyorc
import re
from typing import Mapping, Tuple, Dict
from types import MappingProxyType
from pyorc._pyorc import _schema_from_string
from .enums import TypeKind
class TypeDescription:
name = ""
kind = -1
def __init__(self) -> None:
self._column_id = 0
self._attributes = {}
def __str__(self) -> str:
return self.name
@property
def attributes(self) -> Dict[str, str]:
return self._attributes
def set_attributes(self, val) -> None:
if isinstance(val, dict):
if all(
isinstance(key, str) and isinstance(val, str)
for key, val in val.items()
):
self._attributes = val
else:
raise TypeError(
"The all keys and values in the attributes dictinoary must be string"
)
else:
raise TypeError("The attributes must be a dictionary")
@property
def column_id(self) -> int:
return self._column_id
def set_column_id(self, val: int) -> int:
self._column_id = val
return self._column_id
def find_column_id(self, dotted_key: str) -> int:
raise KeyError(dotted_key)
@staticmethod
def from_string(schema: str) -> "TypeDescription":
return _schema_from_string(schema)
class Boolean(TypeDescription):
name = "boolean"
kind = TypeKind.BOOLEAN
class TinyInt(TypeDescription):
name = "tinyint"
kind = TypeKind.BYTE
class SmallInt(TypeDescription):
name = "smallint"
kind = TypeKind.SHORT
class Int(TypeDescription):
name = "int"
kind = TypeKind.INT
class BigInt(TypeDescription):
name = "bigint"
kind = TypeKind.LONG
class Float(TypeDescription):
name = "float"
kind = TypeKind.FLOAT
class Double(TypeDescription):
name = "double"
kind = TypeKind.DOUBLE
class String(TypeDescription):
name = "string"
kind = TypeKind.STRING
class Binary(TypeDescription):
name = "binary"
kind = TypeKind.BINARY
class Timestamp(TypeDescription):
name = "timestamp"
kind = TypeKind.TIMESTAMP
class TimestampInstant(TypeDescription):
name = "timestamp with local time zone"
kind = TypeKind.TIMESTAMP_INSTANT
class Date(TypeDescription):
name = "date"
kind = TypeKind.DATE
class Char(TypeDescription):
name = "char"
kind = TypeKind.CHAR
def __init__(self, max_length: int) -> None:
self.max_length = max_length
super().__init__()
def __str__(self) -> str:
return "{name}({len})".format(name=self.__class__.name, len=self.max_length)
class VarChar(TypeDescription):
name = "varchar"
kind = TypeKind.VARCHAR
def __init__(self, max_length: int) -> None:
super().__init__()
self.max_length = max_length
def __str__(self) -> str:
return "{name}({len})".format(name=self.__class__.name, len=self.max_length)
class Decimal(TypeDescription):
name = "decimal"
kind = TypeKind.DECIMAL
def __init__(self, precision: int, scale: int) -> None:
super().__init__()
self.precision = precision
self.scale = scale
def __str__(self) -> str:
return "{name}({prc},{scl})".format(
name=self.__class__.name, prc=self.precision, scl=self.scale
)
class Union(TypeDescription):
name = "uniontype"
kind = TypeKind.UNION
def __init__(self, *cont_types) -> None:
super().__init__()
for c_types in cont_types:
if not isinstance(c_types, TypeDescription):
raise TypeError("Invalid container type for Union")
self.__cont_types = cont_types
def __str__(self):
return "{name}<{types}>".format(
name=self.__class__.name,
types=",".join(str(typ) for typ in self.__cont_types),
)
def __getitem__(self, idx: int) -> TypeDescription:
return self.__cont_types[idx]
def set_column_id(self, val: int) -> int:
self._column_id = val
for c_type in self.__cont_types:
val = c_type.set_column_id(val + 1)
return val
@property
def cont_types(self) -> Tuple[TypeDescription]:
return self.__cont_types
class Array(TypeDescription):
name = "array"
kind = TypeKind.LIST
def __init__(self, cont_type: TypeDescription) -> None:
super().__init__()
if not isinstance(cont_type, TypeDescription):
raise TypeError("Array's container type must be a TypeDescription instance")
self.__type = cont_type
def __str__(self) -> str:
return "{name}<{type}>".format(name=self.__class__.name, type=str(self.__type))
def set_column_id(self, val: int) -> int:
self._column_id = val
val = self.__type.set_column_id(val + 1)
return val
@property
def type(self) -> TypeDescription:
return self.__type
class Map(TypeDescription):
name = "map"
kind = TypeKind.MAP
def __init__(self, key: TypeDescription, value: TypeDescription) -> None:
super().__init__()
if not isinstance(key, TypeDescription):
raise TypeError("Map's key type must be a TypeDescription instance")
if not isinstance(value, TypeDescription):
raise TypeError("Map's value type must be a TypeDescription instance")
self.__key = key
self.__value = value
def __str__(self) -> str:
return "{name}<{key},{val}>".format(
name=self.__class__.name, key=str(self.__key), val=str(self.__value)
)
def set_column_id(self, val: int) -> int:
self._column_id = val
val = self.__key.set_column_id(val + 1)
val = self.__value.set_column_id(val + 1)
return val
@property
def key(self) -> TypeDescription:
return self.__key
@property
def value(self) -> TypeDescription:
return self.__value
class Struct(TypeDescription):
name = "struct"
kind = TypeKind.STRUCT
def __init__(self, **fields) -> None:
super().__init__()
for fld in fields.values():
if not isinstance(fld, TypeDescription):
raise TypeError(
"Struct's field type must be a TypeDescription instance"
)
self.__fields = fields
self.set_column_id(0)
def __str__(self) -> str:
return "{name}<{fields}>".format(
name=self.__class__.name,
fields=",".join(
"{field}:{type}".format(field=key, type=str(val))
for key, val in self.__fields.items()
),
)
def __getitem__(self, key: str) -> TypeDescription:
return self.__fields[key]
def set_column_id(self, val: int) -> int:
self._column_id = val
for fld in self.__fields.values():
val = fld.set_column_id(val + 1)
return val
def find_column_id(self, dotted_key: str) -> int:
this = self
# Allow to use backtick for escaping column names with dot.
for key in re.findall(r"[^\.`]+|`[^`]*`", dotted_key):
this = this[key.replace("`", "")]
return this.column_id
@property
def fields(self) -> Mapping[str, TypeDescription]:
return MappingProxyType(self.__fields)
| StarcoderdataPython |
1757479 | #!/usr/bin/python
import argparse
# * nargs expects 0 or more arguments
parser = argparse.ArgumentParser()
parser.add_argument('num', type=int, nargs='*')
args = parser.parse_args()
print(f"The sum of values is {sum(args.num)}") | StarcoderdataPython |
1745125 | <reponame>joskid/vardbg
from pathlib import Path
from PIL import Image, ImageDraw, ImageFont
from .config import Config
from .gif_encoder import GIFEncoder
from .opencv_encoder import OpenCVEncoder
from .text_format import irepr
from .text_painter import TextPainter
from .webp_encoder import WebPEncoder
WATERMARK = "Generated by vardbg"
SAMPLE_CHARS = "0123456789abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ "
class FrameRenderer:
RED = 0
GREEN = 1
BLUE = 2
def __init__(self, path, config_path):
# Config
self.cfg = Config(config_path)
# Video encoder
ext = Path(path).suffix.lower()[1:]
if ext == "mp4":
self.encoder = OpenCVEncoder(path, "mp4v", self.cfg.fps, self.cfg.w, self.cfg.h)
elif ext == "gif":
self.encoder = GIFEncoder(path, self.cfg.fps)
elif ext == "webp":
self.encoder = WebPEncoder(path, self.cfg.fps)
else:
raise ValueError(f"Unrecognized file extension '{ext}'")
# Drawing context
self.draw = None
# Fonts
self.body_font = ImageFont.truetype(*self.cfg.font_body)
self.body_bold_font = ImageFont.truetype(*self.cfg.font_body_bold)
self.caption_font = ImageFont.truetype(*self.cfg.font_caption)
self.head_font = ImageFont.truetype(*self.cfg.font_heading)
self.intro_font = ImageFont.truetype(*self.cfg.font_intro)
# Whether the watermark has been drawn on this frame
self._watermark_drawn = False
# Sizes and positions to be calculated later
# Code body size
self.line_height = None
self.body_cols = None
self._body_rows = None
self.body_rows = None
# Output body start position
self.out_x = None
self.out_y = None
# Output body size
self.out_cols = None
self.out_rows = None
# Variable body start positions
self.vars_x = None
self.vars_y = None
self.ovars_x = None
self.ovars_y = None
# Variable body size
self.vars_cols = None
self.vars_rows = None
self.ovars_cols = None
self.ovars_rows = None
# Per-frame positions
self.last_var_x = None
self.last_var_y = None
self.ref_var_x = None
self.ref_var_y = None
# Current video frame (image)
self.frame = None
# Text size cache
self.text_size_cache = {}
# Prepare base frame
self.base_frame = None
self.prepare_base_frame()
# Write intro (if necessary)
if self.cfg.intro_text and self.cfg.intro_time:
self.write_intro()
def text_size(self, text, factor=10, **kwargs):
cache_key = (text, kwargs.get("font", None))
if cache_key in self.text_size_cache:
return self.text_size_cache[cache_key]
else:
# Multiply string and divide by the factor to get a more precise width
w, h = self.draw.textsize(text * factor, **kwargs)
w /= factor
# Save to cache and return
sizes = (w, h)
self.text_size_cache[cache_key] = sizes
return sizes
def calc_sizes(self):
# Calculate text sizes
w = self.text_size(SAMPLE_CHARS, font=self.body_font)[0] / len(SAMPLE_CHARS)
hw, hh = self.text_size("A", font=self.head_font)
_, mh = self.text_size("`^Ag", font=self.body_font)
_, ch = self.text_size("1p", font=self.caption_font)
# Code body size
self.line_height = mh * self.cfg.line_height
self.body_cols = int((self.cfg.var_x - self.cfg.sect_padding * 2) / w)
self._body_rows = (self.cfg.out_y - self.cfg.sect_padding * 2 - ch) / self.line_height
self.body_rows = int(self._body_rows)
# Output body start position
self.out_x = self.cfg.sect_padding
self.out_y = self.cfg.out_y + self.cfg.head_padding * 2 + hh
# Output body size
self.out_cols = self.body_cols
self.out_rows = round((self.cfg.h - self.out_y) / self.line_height)
# Variable body start positions
# Top-left X and Y for last variable section
self.vars_x = self.cfg.var_x + self.cfg.sect_padding
self.vars_y = self.cfg.head_padding * 2 + hh
# Columns and rows for last variable section
self.vars_cols = int((self.cfg.w - self.cfg.var_x - self.cfg.sect_padding * 2) / w)
self.vars_rows = int((self.cfg.ovar_y - self.cfg.head_padding * 2 - hh) / self.line_height)
# Top-left X and Y for other variables section
self.ovars_x = self.vars_x
self.ovars_y = self.cfg.ovar_y + self.vars_y
# Columns and rows for other variables section
self.ovars_cols = self.vars_cols
ovars_h = self.cfg.h - self.cfg.ovar_y
self.ovars_rows = int((ovars_h - self.cfg.sect_padding * 2) / self.line_height)
def get_color(self, col):
if col == self.RED:
return self.cfg.red
elif col == self.GREEN:
return self.cfg.green
else:
return self.cfg.blue
def draw_text_center(self, x, y, text, font, color):
w, h = self.text_size(text, font=font)
self.draw.text((x - w / 2, y - h / 2), text, font=font, fill=color)
def prepare_base_frame(self):
# Create new empty frame
self.new_frame(from_base=False)
# Draw output section
# Horizontal divider at 4/5 height
self.draw.line(((0, self.cfg.out_y), (self.cfg.var_x, self.cfg.out_y)), fill=self.cfg.fg_divider, width=1)
# Label horizontally centered and padded
out_center_x = self.cfg.var_x / 2
out_y = self.cfg.out_y + self.cfg.head_padding
self.draw_text_center(
out_center_x, out_y, "Output", self.head_font, self.cfg.fg_heading,
)
# Draw variable section
# Vertical divider at 2/3 width
self.draw.line(((self.cfg.var_x, 0), (self.cfg.var_x, self.cfg.h)), fill=self.cfg.fg_divider, width=1)
# Label horizontally centered in the variable section and vertically padded
var_center_x = self.cfg.var_x + ((self.cfg.w - self.cfg.var_x) / 2)
self.draw_text_center(var_center_x, self.cfg.head_padding, "Last Variable", self.head_font, self.cfg.fg_heading)
# Draw other variables section
# Horizontal divider at 1/3 height
self.draw.line(
((self.cfg.var_x, self.cfg.ovar_y), (self.cfg.w, self.cfg.ovar_y)), fill=self.cfg.fg_divider, width=1
)
# Label similar to the first, but in the others section instead
ovar_label_y = self.cfg.ovar_y + self.cfg.head_padding
self.draw_text_center(var_center_x, ovar_label_y, "Other Variables", self.head_font, self.cfg.fg_heading)
# Populate sizes and positions
self.calc_sizes()
# Save frame as base and reset current frame
self.base_frame = self.frame
self.frame = None
def new_frame(self, from_base=True):
# Create image
if from_base:
self.frame = self.base_frame.copy()
else:
self.frame = Image.new("RGB", (self.cfg.w, self.cfg.h), self.cfg.bg)
# Create drawing context
self.draw = ImageDraw.Draw(self.frame)
# Reset watermark drawn flag
self._watermark_drawn = False
def start_frame(self):
self.new_frame()
def finish_frame(self, var_state):
# Bail out if there's no frame to finish
if self.frame is None:
return
# Draw variable state (if available)
if var_state is not None:
self.draw_variables(var_state)
if self.cfg.watermark and not self._watermark_drawn:
self.draw_watermark()
self._watermark_drawn = True
self.encoder.write(self.frame)
def write_intro(self):
# Render frame
self.new_frame(from_base=False)
x = self.cfg.w / 2
y = self.cfg.h / 2
self.draw_text_center(x, y, self.cfg.intro_text, self.intro_font, self.cfg.fg_heading)
# Repeatedly write frame
frames = round(self.cfg.intro_time * self.cfg.fps)
for _ in range(frames):
self.finish_frame(None)
def draw_code(self, lines, cur_line):
cur_idx = cur_line - 1
# Construct list of (line, highlighted) tuples
hlines = [(line, i == cur_idx) for i, line in enumerate(lines)]
# Calculate start and end display indexes with an equivalent number of lines on both sides for context
ctx_side_lines = (self._body_rows - 1) / 2
start_idx = round(cur_idx - ctx_side_lines)
end_idx = round(cur_idx + ctx_side_lines)
# Accommodate for situations where not enough lines are available at the beginning
if start_idx < 0:
start_extra = abs(start_idx)
end_idx += start_extra
start_idx = 0
# Slice selected section
display_lines = hlines[start_idx:end_idx]
# Construct painter
x_start = self.cfg.sect_padding
y_start = self.cfg.sect_padding + self.line_height
x_end = self.cfg.var_x - self.cfg.sect_padding
painter = TextPainter(self, x_start, y_start, self.body_cols, self.body_rows, x_end=x_end, show_truncate=False)
# Render processed lines
for i, (line, highlighted) in enumerate(display_lines):
bg_color = self.cfg.highlight if highlighted else None
for token, text in line:
painter.write(text, bg_color=bg_color, **self.cfg.styles[token])
def draw_output(self, lines):
lines = lines[-self.out_rows :]
painter = TextPainter(self, self.out_x, self.out_y, self.out_cols, self.out_rows)
painter.write("\n".join(lines))
def draw_exec(self, nr_times, cur, avg, total):
plural = "" if nr_times == 1 else "s"
text = f"Line executed {nr_times} time{plural} — current time elapsed: {cur}, average: {avg}, total: {total}"
_, h = self.text_size(text, font=self.caption_font)
x = self.cfg.sect_padding
y = self.cfg.out_y - self.cfg.sect_padding - h
self.draw.text((x, y), text, font=self.caption_font)
def draw_last_var(self, state):
painter = TextPainter(self, self.vars_x, self.vars_y, self.vars_cols, self.vars_rows)
# Draw variable name
painter.write(state.name + " ")
# Draw action with color
self.last_var_x, self.last_var_y = painter.write(state.action + " ", bold=True, color=state.color)
painter.new_line()
# Draw remaining text
painter.write(state.text)
def draw_other_vars(self, state):
painter = TextPainter(self, self.ovars_x, self.ovars_y, self.ovars_cols, self.ovars_rows)
# Draw text
for idx, (var, values) in enumerate(state.other_history):
if values.ignored:
continue
if idx > 0:
painter.write("\n\n")
painter.write(var.name + ":")
for v_idx, value in enumerate(values): # sourcery off
painter.write("\n \u2022 ")
# Reference highlighting for latest value and matching variables only
if var.name == state.ref and v_idx == len(values) - 1:
v_pos = irepr(painter, value.value, state.value, bold=True, color=state.color, return_pos="H")
self.ref_var_x, self.ref_var_y = v_pos
else:
irepr(painter, value.value)
def draw_var_ref(self, state):
# Calculate X position to route the line on
# It should be as short as possible while not obscuring any variables or exceeding the scene width
right_line_x = min(
max(self.last_var_x, self.ref_var_x) + self.cfg.sect_padding, self.cfg.w - self.cfg.sect_padding / 2
)
sw, sh = self.text_size(" ", font=self.body_font)
# Draw the polyline
self.draw.line(
(
(self.last_var_x, self.last_var_y),
(right_line_x, self.last_var_y),
(right_line_x, self.ref_var_y - sh),
(self.ref_var_x, self.ref_var_y - sh),
(self.ref_var_x, self.ref_var_y),
),
fill=state.color,
width=2,
)
def draw_variables(self, state):
self.draw_other_vars(state)
self.draw_last_var(state)
if state.ref is not None:
self.draw_var_ref(state)
def draw_watermark(self):
# Get target bottom-right position
x = self.cfg.w - self.cfg.sect_padding
y = self.cfg.h - self.cfg.sect_padding
# Subtract text size to position it properly
w, h = self.text_size(WATERMARK, font=self.caption_font)
x -= w
y -= h
# Draw text
self.draw.text((x, y), WATERMARK, fill=self.cfg.fg_watermark, font=self.caption_font)
def close(self, var_state):
# Finish final frame
self.finish_frame(var_state)
# Close encoder
self.encoder.stop()
| StarcoderdataPython |
1644104 | from typing import List, Union
from indico.queries import (
RetrieveStorageObject,
GraphQLRequest,
JobStatus,
CreateModelGroup,
ModelGroupPredict,
CreateStorageURLs
)
from indico.types import Dataset, ModelGroup
from indico import IndicoClient
from indico.errors import IndicoRequestError
from indico_toolkit.types import Predictions
from indico_toolkit import ToolkitStatusError, retry
class IndicoWrapper:
"""
Class for shared API functionality
"""
def __init__(self, client: IndicoClient):
"""
Create indico client with user provided arguments
args:
client (IndicoClient): instantiated Indico Client object
"""
self.client = client
def train_model(
self,
dataset: Dataset,
model_name: str,
source_col: str,
target_col: str,
wait: bool = False,
) -> ModelGroup:
"""
Train an Indico model
Args:
dataset (Dataset): A dataset object (should represent an uploaded CSV dataset)
model_name (str): the name for your model
source_col (str): the csv column that contained the text
target_col (str): the csv column that contained the labels
wait (bool, optional): Wait for the model to finish training. Defaults to False.
Returns:
ModelGroup: Model group object
"""
return self.client.call(
CreateModelGroup(
name=model_name,
dataset_id=dataset.id,
source_column_id=dataset.datacolumn_by_name(source_col).id,
labelset_id=dataset.labelset_by_name(target_col).id,
wait=wait,
)
)
@retry((IndicoRequestError, ConnectionError))
def get_storage_object(self, storage_url):
return self.client.call(RetrieveStorageObject(storage_url))
def create_storage_urls(self, file_paths: List[str]) -> List[str]:
return self.client.call(CreateStorageURLs(files=file_paths))
def get_job_status(self, job_id: int, wait: bool = True):
return self.client.call(JobStatus(id=job_id, wait=wait))
@retry((IndicoRequestError, ConnectionError))
def graphQL_request(self, graphql_query: str, variables: dict = None):
return self.client.call(
GraphQLRequest(query=graphql_query, variables=variables)
)
def get_predictions_with_model_id(
self,
model_id: int,
samples: List[str],
load: bool = True,
options: dict = None,
wait: bool = True,
) -> Union[int, List[Predictions]]:
"""
Submit samples directly to a model. Note: documents must already be in raw text.
Args:
model_id (int): The model ID to submit to
samples (List[str]): A list containing the text samples you want to submit
load (bool, optional): Set to False if you are submitting for object detection. Defaults to True.
options (dict, optional): Model Prediction options. Defaults to None.
wait (bool, optional): Wait for predictions to finish. Defaults to True.
Returns: if wait is False, returns the job ID, else returns a list of Predictions where each
Predictions is either type Classifications or Extractions depending on your model.
"""
job = self.client.call(ModelGroupPredict(model_id, samples, load, options))
if wait == False:
return job.id
status = self.get_job_status(job.id, wait=True)
if status.status != "SUCCESS":
raise ToolkitStatusError(
f"Predictions Failed, {status.status}: {status.result}"
)
return [Predictions.get_obj(i) for i in status.result]
| StarcoderdataPython |
1685082 | """Sensor representation for mytoyota"""
import logging
from mytoyota.const import CLOSED, INCAR, LOCKED, OFF, STATE, WARNING
_LOGGER: logging.Logger = logging.getLogger(__package__)
class Hood:
"""Representation of the hood of the car"""
warning: bool = False
closed: bool = True
def __init__(self, hood):
self.warning = hood.get(WARNING, None)
self.closed = hood.get(CLOSED, None)
def __str__(self) -> str:
return str(self.as_dict())
def as_dict(self) -> dict:
"""Return as dict."""
return vars(self)
class Door:
"""Representation of a door"""
warning: bool = False
closed: bool = True
locked: bool = True
def __init__(self, door) -> None:
self.warning = door.get(WARNING, None)
self.closed = door.get(CLOSED, None)
self.locked = door.get(LOCKED, None)
def __str__(self) -> str:
return str(self.as_dict())
def as_dict(self) -> dict:
"""Return as dict."""
return {
WARNING: self.warning,
CLOSED: self.closed,
LOCKED: self.locked,
}
class Doors:
"""Represent all car doors"""
driverseat: Door
passengerseat: Door
rightrearseat: Door
leftrearseat: Door
trunk: Door
warning: bool = False
def __init__(self, doors: dict):
self.warning = doors[WARNING]
self.driverseat = Door(doors.get("driverSeatDoor", None))
self.passengerseat = Door(doors.get("passengerSeatDoor", None))
self.rightrearseat = Door(doors.get("rearRightSeatDoor", None))
self.leftrearseat = Door(doors.get("rearLeftSeatDoor", None))
self.trunk = Door(doors.get("backDoor", None))
def __str__(self) -> str:
return str(self.as_dict())
def as_dict(self) -> dict:
"""Return as dict."""
return {
WARNING: self.warning,
"driverseat": self.driverseat.as_dict(),
"passengerseat": self.passengerseat.as_dict(),
"rightrearseat": self.rightrearseat.as_dict(),
"leftrearseat": self.rightrearseat.as_dict(),
"trunk": self.trunk.as_dict(),
}
class Window:
"""Representation of a window"""
warning: bool = False
state: str = None
def __init__(self, window) -> None:
self.warning = window.get(WARNING, None)
self.state = window.get(STATE, None)
def __str__(self) -> str:
return str(self.as_dict())
def as_dict(self) -> dict:
"""Return as dict."""
return {
WARNING: self.warning,
STATE: self.state,
}
class Windows:
"""Represent all car windows"""
driverseat: Window
passengerseat: Window
rightrearseat: Window
leftrearseat: Window
warning: bool = False
def __init__(self, windows):
self.warning = windows[WARNING]
self.driverseat = Window(windows.get("driverSeatWindow", None))
self.passengerseat = Window(windows.get("passengerSeatWindow", None))
self.rightrearseat = Window(windows.get("rearRightSeatWindow", None))
self.leftrearseat = Window(windows.get("rearLeftSeatWindow", None))
def __str__(self) -> str:
return str(self.as_dict())
def as_dict(self) -> dict:
"""Return as dict."""
return {
WARNING: self.warning,
"driverseat": self.driverseat.as_dict(),
"passengerseat": self.passengerseat.as_dict(),
"rightrearseat": self.rightrearseat.as_dict(),
"leftrearseat": self.rightrearseat.as_dict(),
}
class Light:
"""Representation of the lights"""
warning: bool = False
off: bool = True
def __init__(self, light):
self.warning = light.get(WARNING, None)
self.off = light.get(OFF, None)
def __str__(self) -> str:
return str(self.as_dict())
def as_dict(self) -> dict:
"""Return as dict."""
return {
WARNING: self.warning,
OFF: self.off,
}
class Lights:
"""Represent all car windows"""
front: Light
back: Light
hazard: Light
warning: bool = False
def __init__(self, lights):
self.warning = lights.get(WARNING, None)
self.front = Light(lights.get("headLamp", None))
self.back = Light(lights.get("tailLamp", None))
self.hazard = Light(lights.get("hazardLamp", None))
def __str__(self) -> str:
return str(self.as_dict())
def as_dict(self) -> dict:
"""Return as dict."""
return {
"warning": self.warning,
"front": self.front.as_dict(),
"back": self.back.as_dict(),
"hazard": self.back.as_dict(),
}
class Key:
"""Representation of the ignition"""
warning: bool = False
in_car: bool = False
def __init__(self, key):
self.warning = key.get(WARNING, None)
self.in_car = key.get(INCAR, None)
def __str__(self) -> str:
return str(self.as_dict())
def as_dict(self) -> dict:
"""Return as dict."""
return vars(self)
| StarcoderdataPython |
91611 | <filename>open_spiel/python/games/optimal_stopping_game_config.py
from typing import List
import numpy as np
import pyspiel
from open_spiel.python.games.optimal_stopping_game_config_base import OptimalStoppingGameConfigBase
class OptimalStoppingGameConfig(OptimalStoppingGameConfigBase):
def __init__(self, p: float = 0.001, T_max: int = 5, L: int = 3, R_ST: int = 100, R_SLA: int = 10,
R_COST: int = -50, R_INT: int = -100, obs: str = "",
obs_dist: str = "", obs_dist_intrusion: str = "", initial_belief: str = "", use_beliefs: bool = False):
"""
DTO class representing the configuration of the optimal stopping game
:param p: the probability that the attacker is detected at any time-step
:param T_max: the maximum length of the game (could be infinite)
:param L: the number of stop actions of the defender
:param R_ST: constant for defining the reward function
:param R_SLA: constant for defining the reward function
:param R_COST: constant for defining the reward function
:param R_INT: constant for defining the reward function
:param obs: the list of observations
:param obs_dist_intrusion: the observation distribution
:param initial_belief: the initial belief
:param use_beliefs: boolean flag whether to use beliefs or not. If this is false, use observations instead.
"""
super(OptimalStoppingGameConfig, self).__init__(
p=p,T_max=T_max,L=L,R_ST=R_ST,R_SLA=R_SLA,R_COST=R_COST,R_INT=R_INT, obs=obs, obs_dist=obs_dist,
obs_dist_intrusion=obs_dist_intrusion, initial_belief=initial_belief, use_beliefs=use_beliefs)
def create_game_type(self) -> pyspiel.GameType:
"""
:return: GameType object
"""
return pyspiel.GameType(
short_name="python_optimal_stopping_game",
long_name="Python Optimal Stopping Game",
dynamics=pyspiel.GameType.Dynamics.SIMULTANEOUS,
chance_mode=pyspiel.GameType.ChanceMode.EXPLICIT_STOCHASTIC,
information=pyspiel.GameType.Information.IMPERFECT_INFORMATION,
utility=pyspiel.GameType.Utility.ZERO_SUM,
reward_model=pyspiel.GameType.RewardModel.REWARDS,
max_num_players=self.num_players,
min_num_players=self.num_players,
provides_information_state_string=True,
provides_information_state_tensor=False,
provides_observation_string=True,
provides_observation_tensor=True,
provides_factored_observation_string=True,
parameter_specification=self.params)
def create_game_info(self) -> pyspiel.GameInfo:
"""
:return: GameInfo object
"""
return pyspiel.GameInfo(
num_distinct_actions=len(self.get_actions()),
max_chance_outcomes=len(self.obs) + 1,
num_players=self.num_players,
min_utility=self.R_INT*10,
max_utility=self.R_ST*10,
utility_sum=0.0,
max_game_length=self.T_max)
@staticmethod
def from_params_dict(params_dict: dict) -> "OptimalStoppingGameConfig":
"""
Creates a config object from a user-supplied dict with parameters
:param params_dict: the dict with parameters
:return: a config object corresponding to the parameters in the dict
"""
return OptimalStoppingGameConfig(
p=params_dict["p"], T_max=params_dict["T_max"], L=params_dict["L"], R_ST=params_dict["R_ST"],
R_SLA=params_dict["R_SLA"], R_COST=params_dict["R_COST"], R_INT=params_dict["R_INT"],
obs=params_dict["obs"],
obs_dist_intrusion=params_dict["obs_dist_intrusion"],
obs_dist=params_dict["obs_dist"], initial_belief=params_dict["initial_belief"],
use_beliefs=params_dict["use_beliefs"]
)
| StarcoderdataPython |
1652305 | import numpy as np
import time
from pykin.kinematics.transform import Transform
JOINT_TYPE_MAP = {'revolute' : 'revolute',
'fixed' : 'fixed',
'prismatic' : 'prismatic'}
LINK_TYPE_MAP = {'cylinder' : 'cylinder',
'sphere' : 'sphere',
'box' : 'box',
'mesh' : 'mesh'}
LINK_TYPES = ['box', 'cylinder', 'sphere', 'capsule', 'mesh']
class ShellColors:
HEADER = '\033[95m'
OKBLUE = '\033[94m'
OKCYAN = '\033[96m'
OKGREEN = '\033[92m'
WARNING = '\033[93m'
FAIL = '\033[91m'
ENDC = '\033[0m'
BOLD = '\033[1m'
UNDERLINE = '\033[4m'
class Baxter:
left_e0_fixed_offset = Transform(rot=[0.5, 0.5, 0.5, 0.5], pos=[0.107, 0., 0. ])
left_w0_fixed_offset = Transform(rot=[0.5, 0.5, 0.5, 0.5], pos=[0.088, 0., 0. ])
right_e0_fixed_offset = Transform(rot=[0.5, 0.5, 0.5, 0.5], pos=[0.107, 0., 0. ])
right_w0_fixed_offset = Transform(rot=[0.5, 0.5, 0.5, 0.5], pos=[0.088, 0., 0. ])
@staticmethod
def add_visual_link(link_transforms, f):
if "left_lower_shoulder" in f.link.name:
link_transforms["left_upper_elbow_visual"] = np.dot(link_transforms["left_lower_shoulder"],
Baxter.left_e0_fixed_offset)
if "left_lower_elbow" in f.link.name:
link_transforms["left_upper_forearm_visual"] = np.dot(link_transforms["left_lower_elbow"],
Baxter.left_w0_fixed_offset)
if "right_lower_shoulder" in f.link.name:
link_transforms["right_upper_elbow_visual"] = np.dot(link_transforms["right_lower_shoulder"],
Baxter.right_e0_fixed_offset)
if "right_lower_elbow" in f.link.name:
link_transforms["right_upper_forearm_visual"] = np.dot(link_transforms["right_lower_elbow"],
Baxter.right_w0_fixed_offset)
def convert_thetas_to_dict(active_joint_names, thetas):
"""
Check if any pair of objects in the manager collide with one another.
Args:
active_joint_names (list): actuated joint names
thetas (sequence of float): If not dict, convert to dict ex. {joint names : thetas}
Returns:
thetas (dict): Dictionary of actuated joint angles
"""
if not isinstance(thetas, dict):
assert len(active_joint_names) == len(thetas
), f"""the number of robot joint's angle is {len(active_joint_names)},
but the number of input joint's angle is {len(thetas)}"""
thetas = dict((j, thetas[i]) for i, j in enumerate(active_joint_names))
return thetas
def logging_time(original_fn):
"""
Decorator to check time of function
"""
def wrapper_fn(*args, **kwargs):
start_time = time.time()
result = original_fn(*args, **kwargs)
end_time = time.time()
print(f"WorkingTime[{original_fn.__name__}]: {end_time-start_time:.4f} sec\n")
return result
return wrapper_fn
def convert_transform(origin):
"""
Args:
origin (None or Transform): offset of object
Returns:
Transform: Returns Transform if origin is None
"""
if origin is None:
return Transform()
else:
return Transform(rot=origin.rot, pos=origin.pos)
def convert_string_to_narray(str_input):
"""
Args:
str_input (str): string
Returns:
np.array: Returns string to np.array
"""
if str_input is not None:
return np.array([float(data) for data in str_input.split()])
def calc_pose_error(tar_pose, cur_pose, EPS):
"""
Args:
tar_pos (np.array): target pose
cur_pos (np.array): current pose
EPS (float): epsilon
Returns:
np.array: Returns pose error
"""
pos_err = np.array([tar_pose[:3, -1] - cur_pose[:3, -1]])
rot_err = np.dot(cur_pose[:3, :3].T, tar_pose[:3, :3])
w_err = np.dot(cur_pose[:3, :3], rot_to_omega(rot_err, EPS))
return np.vstack((pos_err.T, w_err))
def rot_to_omega(R, EPS):
# referred p36
el = np.array(
[[R[2, 1] - R[1, 2]],
[R[0, 2] - R[2, 0]],
[R[1, 0] - R[0, 1]]]
)
norm_el = np.linalg.norm(el)
if norm_el > EPS:
w = np.dot(np.arctan2(norm_el, np.trace(R) - 1) / norm_el, el)
elif (R[0, 0] > 0 and R[1, 1] > 0 and R[2, 2] > 0):
w = np.zeros((3, 1))
else:
w = np.dot(np.pi/2, np.array([[R[0, 0] + 1], [R[1, 1] + 1], [R[2, 2] + 1]]))
return w
def limit_joints(joint_angles, lower, upper):
"""
Set joint angle limit
Args:
joint_angles (sequence of float): joint angles
lower (sequence of float): lower limit
upper (sequence of float): upper limit
Returns:
joint_angles (sequence of float): Returns limited joint angle
"""
if lower is not None and upper is not None:
for i in range(len(joint_angles)):
if joint_angles[i] < lower[i]:
joint_angles[i] = lower[i]
if joint_angles[i] > upper[i]:
joint_angles[i] = upper[i]
return joint_angles
| StarcoderdataPython |
82588 | <reponame>BloomTech-Labs/Quick-Slack-ds
"""empty message
Revision ID: 57f3951597c0
Revises:
Create Date: 2020-01-23 23:11:22.197394
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '<KEY>0'
down_revision = None
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.create_table('admins',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('username', sa.String(length=24), nullable=False),
sa.Column('password', sa.String(length=24), nullable=False),
sa.PrimaryKeyConstraint('id'),
sa.UniqueConstraint('username')
)
op.create_table('channel',
sa.Column('db_id', sa.Integer(), nullable=False),
sa.Column('channel_id', sa.String(length=10), nullable=True),
sa.Column('channel_name', sa.String(length=100), nullable=True),
sa.PrimaryKeyConstraint('db_id'),
sa.UniqueConstraint('channel_id')
)
op.create_table('message',
sa.Column('db_id', sa.Integer(), nullable=False),
sa.Column('message_id', sa.String(length=36), nullable=True),
sa.Column('user_id', sa.String(length=20), nullable=True),
sa.Column('ts', sa.Float(), nullable=False),
sa.Column('reply_count', sa.Integer(), nullable=True),
sa.Column('text', sa.Text(), nullable=True),
sa.Column('channel_id', sa.String(length=10), nullable=False),
sa.ForeignKeyConstraint(['channel_id'], ['channel.channel_id'], ),
sa.PrimaryKeyConstraint('db_id')
)
op.create_table('reply',
sa.Column('db_id', sa.Integer(), nullable=False),
sa.Column('message_id', sa.String(length=36), nullable=True),
sa.Column('user_id', sa.String(length=20), nullable=True),
sa.Column('ts', sa.Float(), nullable=True),
sa.Column('text', sa.Text(), nullable=True),
sa.Column('thread_ts', sa.Float(), nullable=True),
sa.Column('channel_id', sa.String(length=10), nullable=False),
sa.ForeignKeyConstraint(['channel_id'], ['channel.channel_id'], ),
sa.PrimaryKeyConstraint('db_id')
)
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_table('reply')
op.drop_table('message')
op.drop_table('channel')
op.drop_table('admins')
# ### end Alembic commands ###
| StarcoderdataPython |
72602 | <filename>src/worker/worker_initializer.py
#!/usr/bin/python
# -*- coding: utf-8 -*-
import os
import queue
from flask import Flask
from helpers import unmunge_request, munge_response
from worker.blueprints.api import blueprint as api
from worker.config_handler import ConfigHandler
from worker.tool_config_parser import ToolConfigParser
from worker.worker_class import Worker
from worker.msg_out import Out
from worker.msg_in import MsgIn
from worker.mountcheck_thread import MountcheckThread, is_mounted
from exceptions.storage_not_mounted_error import StorageNotMountedError
from exceptions.configuration_exception import ConfigurationException
from exceptions.incorrect_config_file_error import IncorrectConfigFileError
class WorkerInitializer:
def __init__(self, config_path: str, copytool_path: str):
if not os.path.exists(config_path):
raise (IncorrectConfigFileError("No file was found at " + config_path))
self._storagedict = dict() # Contains key value dict for each key(i.e. alias)
self._storages = dict() # Contains actual storages for each key(i.e. alias)
self.parse_configfile(config_path)
self.create_objects()
# Create singleton object for future use
ToolConfigParser(copytool_path)
def parse_configfile(self, config_path: str):
try:
self._config_handler = ConfigHandler(config_path)
except ConfigurationException as exc:
raise IncorrectConfigFileError('The configuration file contains mistakes') from exc
self._worker_name = self._config_handler.get_worker_name()
self._worker_address = self._config_handler.get_worker_address()
self._master_address = self._config_handler.get_master_address()
self._timeout = self._config_handler.get_reconnect_timeout()
self._frequency = self._config_handler.get_reconnect_frequency()
self._authentification_token = self._config_handler.get_authentification_token()
self.mountpoints = self._config_handler.get_mountpoints()
def create_objects(self, ):
"""Initialize all the Objects according to the attributes read in configfile.
:raises StorageNotMountedError: If a Storage is not mounted at its given mountpath.
"""
# first we create MsgOut:
self._msg_out = Out(self._master_address, self._authentification_token, self._timeout,
self._frequency)
self._worker = Worker(self._worker_name, self._msg_out, self.mountpoints)
def register_to_master(self):
aliases = []
for key, storage in self._storages.items():
aliases.append(key)
# the order is important!
self._msg_out.register(self._master_address, self._authentification_token,
self._worker_name,
self._worker_address, self.mountpoints, self._worker.status)
self._msg_in = MsgIn(self._authentification_token, self._worker_address, self._msg_out,
self._worker)
def run_app(self):
app = Flask(__name__)
_dummy, _host, _port = self._worker_address.split(':')
app.config['DEBUG'] = False
app.config['MSGIN'] = self._msg_in
app.config['token'] = self._authentification_token
app.register_blueprint(api)
app.before_request(unmunge_request)
app.after_request(munge_response)
app.run(host=_host.strip('/'), port=int(_port))
| StarcoderdataPython |
39352 | <filename>orglearn/mind_map/backend/graphviz.py
import colour
import graphviz
from orglearn.mind_map.backend.backend import Backend
class Graphviz(Backend):
def __init__(self, *args, **kwargs):
self.ignore_shallow_tags = set(kwargs.get("ignore_shallow_tags_list", []))
self.ignore_tags = set(kwargs.get("ignore_tags_list", []))
self.base_color = colour.Color("green")
def convert(self, tree, stream, **kwargs):
# TODO: Maybe create heading from file name
# self.dot = graphviz.Digraph(comment='asd')
self.dot = graphviz.Digraph(comment="asd")
# self.dot.attr(size='6,6')
# self.dot.attr('graph', size='8.3,11.7!')
# self.dot.attr('graph', size='11.7,8.3!')
# self.dot.attr('graph', page='8.3,11.7!')
# self.dot.attr('graph', page='11.7,8.3!')
# self.dot.attr('graph', ratio='auto')
self.dot.attr("graph", ratio="scale")
self.dot.attr("graph", splines="spline")
self.dot.attr("node", shape="box")
self.dot.attr("graph", overlap="false")
self.dot.attr("edge", arrowhead="vee", arrowtail="vee", arrowsize="0.75")
# self.dot.attr('graph', mindist='5.0')
# self.dot.engine = "neato"
# self.dot.engine = "circo"
self.dot.engine = "fdp"
# self.dot.attr('graph', ratio='0.2')
# self.dot.attr('graph', K='100')
# self.dot.attr('graph', maxiter='100')
try:
# Try to set the center node text to a org file title comment
tree.root.heading = tree._special_comments["TITLE"][0]
except KeyError:
tree.root.heading = "MAP"
# Generate color gradient based on the depht of the org tree
max_depth = 1
for child in tree:
max_depth = max(max_depth, child.level + 1)
self.colors = list(self.base_color.range_to(colour.Color("white"), max_depth))
self._process_node(tree.root)
# TODO: Add option to split on highest level into files
# TODO: Cannot take stream
self.dot.render(stream.name)
def _process_node(self, tree_node):
"""Create a map node from tree node and proccess its children."""
# TODO(mato): What to do with a node body
# First construct the current node
# if tree_node.level == 0:
# self.dot.node(self._create_id(tree_node), tree_node.heading, shape='star', color='black')
# elif tree_node.level == 1:
# self.dot.node(self._create_id(tree_node), tree_node.heading, shape='doublecircle')
# else:
# if tree_node.level == 0:
# self.dot.attr('node', shape='diamond', style='filled', color='lightgray')
# else:
# self.dot.attr('node', shape='ellipse', color='black')
# height: 0.5
# width: 0.75
scale = 0.80 ** tree_node.level
height = str(1 * scale)
width = str(2 * scale)
self.dot.attr(
"node",
height=height,
width=width,
style="filled",
fillcolor=self.colors[tree_node.level].get_hex_l(),
)
self.dot.node(self._create_id(tree_node), tree_node.heading)
# If node has a parrent, create a link to it
if tree_node.parent is not None:
self.dot.edge(
self._create_id(tree_node.parent), self._create_id(tree_node)
) # , constraint='false')
# Process all children of this node
for c in tree_node.children:
if not self.ignore_tags.intersection(c.tags):
self._process_node(c)
def _create_id(self, node):
"""Hash the node to create identifier to reference nodes."""
# TODO: We should double escape the '\' characters
try:
return (
self._normalize_heading(node.parent.heading)
+ "%"
+ str(node.level)
+ "%"
+ self._normalize_heading(node.heading)
)
except AttributeError:
return str(node.level) + "%" + self._normalize_heading(node.heading)
def _normalize_heading(self, heading):
"""Normalize heading for dot format. Essentialy remove all ':' from headings."""
return heading.replace(":", "")
def get_ext(self):
# Graphviz automatically appends the '.pdf'
# And we don't want to colide with `pdf` command so prepend the '-map'
# This results in: "<filename>-map.pdf"
return "-map"
| StarcoderdataPython |
100240 | # -*- coding: utf-8 -*-
# Copyright (c) <NAME>. All Rights Reserved.
# Distributed under the MIT License. See LICENSE file for more info.
import threading
import time
from asyncframes import Frame, Event, sleep
from asyncframes.pyqt5_eventloop import EventLoop
class Thread(threading.Thread):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.event = Event('Thread.event')
def run(self):
time.sleep(1)
self.event.post()
@Frame
async def print_dots():
while True:
await sleep(0.1)
print(".", end="")
@Frame
async def main_frame():
print("start")
t = Thread()
t.start()
print_dots()
await t.event
print("done")
loop = EventLoop()
loop.run(main_frame)
| StarcoderdataPython |
3343833 | <reponame>yiyin/neurodriver<filename>neurokernel/LPU/NDComponents/DendriteModels/__init__.py
import os
import fnmatch
__all__ = []
NDC_dir = os.path.dirname(__file__)
for root, dirnames, filenames in os.walk(NDC_dir):
mod_imp = False
for f in fnmatch.filter(filenames,"*.py"):
if '__init__'!=f[:8] and 'Base'!=f[:4]:
if root != NDC_dir and not mod_imp:
__path__.append(root)
mod_imp = True
__all__.append(f[:-3])
| StarcoderdataPython |
3218137 | <filename>flit_core/flit_core/tests/test_buildapi.py
from contextlib import contextmanager
import os
import os.path as osp
import tarfile
from testpath import assert_isfile, assert_isdir
from testpath.tempdir import TemporaryDirectory
import zipfile
from flit_core import buildapi
samples_dir = osp.join(osp.dirname(__file__), 'samples')
@contextmanager
def cwd(directory):
prev = os.getcwd()
os.chdir(directory)
try:
yield
finally:
os.chdir(prev)
def test_get_build_requires():
expected = ["requests >= 2.18", "docutils"]
with cwd(osp.join(samples_dir,'pep517')):
assert buildapi.get_requires_for_build_wheel() == expected
assert buildapi.get_requires_for_build_sdist() == expected
def test_build_wheel():
with TemporaryDirectory() as td, cwd(osp.join(samples_dir,'pep517')):
filename = buildapi.build_wheel(td)
assert filename.endswith('.whl'), filename
assert_isfile(osp.join(td, filename))
assert zipfile.is_zipfile(osp.join(td, filename))
def test_build_sdist():
with TemporaryDirectory() as td, cwd(osp.join(samples_dir,'pep517')):
filename = buildapi.build_sdist(td)
assert filename.endswith('.tar.gz'), filename
assert_isfile(osp.join(td, filename))
assert tarfile.is_tarfile(osp.join(td, filename))
def test_prepare_metadata_for_build_wheel():
with TemporaryDirectory() as td, cwd(osp.join(samples_dir,'pep517')):
dirname = buildapi.prepare_metadata_for_build_wheel(td)
assert dirname.endswith('.dist-info'), dirname
assert_isdir(osp.join(td, dirname))
assert_isfile(osp.join(td, dirname, 'METADATA'))
| StarcoderdataPython |
1754369 | <filename>xCave/osm.py
import json
import numpy as np
import os
import requests
from itertools import tee, izip
from math import atan2, cos, radians, sin, sqrt
from os.path import basename, exists, isfile
from os import makedirs
from operator import itemgetter
from scipy.spatial import ConvexHull#, Delaunay
from sys import exit
from xml.etree import ElementTree
class OSMapi:
"""Get OSM files spanning in a regular grid through defined area using
OpenStreet Maps API.
Each frame has to span at least .00045*3 to maintain good resolution."""
API_QUERY = "http://www.openstreetmap.org/api/0.6/map" + \
"?bbox=%.6f,%.6f,%.6f,%.6f"
SCALING_FACTOR = 0.00135
def __init__(self, name, left, bottom, right, top, scaling_factor=None):
if left > right:
exit("left > right")
if bottom > top:
exit("bottom > top")
self.name = name
self.left = left
self.bottom = bottom
self.right = right
self.top = top
if scaling_factor is not None:
self.SCALING_FACTOR = scaling_factor
self.blocks = None
global pairwise
def pairwise(iterable):
"""s -> (s0,s1), (s1,s2), (s2, s3), ..."""
a, b = tee(iterable)
next(b, None)
return izip(a, b)
def split_region(self):
self.blocks = []
y = np.abs(self.top - self.bottom)
y_factor = np.ceil(y/self.SCALING_FACTOR)
if y_factor != 1:
y_steps = np.linspace(self.bottom, self.top, y_factor)
else:
y_steps = [self.bottom, self.top]
x = np.abs(self.right - self.left)
x_factor = np.ceil(x/self.SCALING_FACTOR)
if x_factor != 1:
x_steps = np.linspace(self.left, self.right, x_factor)
else:
x_steps = [self.left, self.right]
for l, r in pairwise(x_steps):
for b, t in pairwise(y_steps):
self.blocks.append((l, b, r, t))
def get_osm(self):
if self.blocks:
it = self.blocks
iterator = ["_%d"%i for i in range(len(it))]
else:
it = [(self.left, self.bottom, self.right, self.top)]
iterator = [""]
for i, j in zip(it, iterator):
print self.API_QUERY % (i[0], i[1], i[2], i[3])
r = requests.get(self.API_QUERY % (i[0], i[1], i[2], i[3]))
with open(self.name + j + ".osm", "w") as of:
of.write(r.content)
class OSMinterface:
KML_TEMPLATE = """<?xml version="1.0" encoding="UTF-8"?>
<kml xmlns="http://www.opengis.net/kml/2.2" xmlns:gx="http://www.google.com/kml/ext/2.2" xmlns:kml="http://www.opengis.net/kml/2.2" xmlns:atom="http://www.w3.org/2005/Atom">
<Document>
<name>%(name)s</name>
<open>1</open>
<LookAt>
<longitude>%(centreLon)s</longitude> <!--2.616655-->
<latitude>%(centreLat)s</latitude> <!--51.461505-->
<altitude>0</altitude>
<heading>0</heading>
<tilt>0</tilt>
<range>%(range)s</range> <!--50 (InMeters)-->
</LookAt>
<StyleMap id="m_ylw-pushpin">
<Pair>
<key>normal</key>
<styleUrl>#s_ylw-pushpin</styleUrl>
</Pair>
<Pair>
<key>highlight</key>
<styleUrl>#s_ylw-pushpin_hl</styleUrl>
</Pair>
</StyleMap>
<Style id="s_ylw-pushpin">
<IconStyle>
<scale>1.1</scale>
<Icon>
<href>http://maps.google.com/mapfiles/kml/pushpin/ylw-pushpin.png</href>
</Icon>
<hotSpot x="20" y="2" xunits="pixels" yunits="pixels"/>
</IconStyle>
<LineStyle>
<color>00ffffff</color>
</LineStyle>
<PolyStyle>
<color>00ffffff</color>
</PolyStyle>
</Style>
<Style id="s_ylw-pushpin_hl">
<IconStyle>
<scale>1.3</scale>
<Icon>
<href>http://maps.google.com/mapfiles/kml/pushpin/ylw-pushpin.png</href>
</Icon>
<hotSpot x="20" y="2" xunits="pixels" yunits="pixels"/>
</IconStyle>
<LineStyle>
<color>00ffffff</color>
</LineStyle>
<PolyStyle>
<color>00ffffff</color>
</PolyStyle>
</Style>
<Placemark>
<name>Region</name>
<styleUrl>#m_ylw-pushpin</styleUrl>
<Polygon>
<tessellate>1</tessellate>
<outerBoundaryIs>
<LinearRing>
<coordinates>
%(maxlon)s,%(maxlat)s,0 <!-- -2.61643,51.46168,0 -->
%(maxlon)s,%(minlat)s,0 <!-- -2.61643,51.46133,0 -->
%(minlon)s,%(minlat)s,0 <!-- -2.61688,51.46133,0 -->
%(minlon)s,%(maxlat)s,0 <!-- -2.61688,51.46168,0 -->
%(maxlon)s,%(maxlat)s,0 <!-- -2.61643,51.46168,0 -->
</coordinates>
</LinearRing>
</outerBoundaryIs>
</Polygon>
</Placemark>
</Document>
</kml>"""
RANGE_FACTOR = 1.3 # 1.2
MIN_SPAN_LON = 0.0004
MIN_SPAN_LAT = 0.0004
NUMBER_OF_CLOSEST_OBJECTS_TO_EXTRACT_BUILDING = 5
NUMBER_OF_CLOSEST_OBJECTS_TO_EXTRACT_ROAD = 2
def __init__(self, filename):
# Map bounds
self.bounds = None
# Objects on the map
self.objects = None
# Mapping from object id to object type
self.types = None
# Mapping from id to coordinates; tuples of format (lat,lon)
self.mapping = None
if isfile(filename):
self.filename = filename
else:
print "OSM file not given"
exit(1)
def read(self):
"""Read in OSM file into Python structure"""
e = ElementTree.parse(self.filename).getroot()
# Get map bounds
self.bounds = e.find("bounds").attrib
# Get mappings
self.mapping = {}
for i in e.iter("node"):
self.mapping[i.attrib["id"]] = (i.attrib["lat"], i.attrib["lon"])
# Get objects
self.objects = {}
self.types = {}
for i in e.iter("way"):
object_type = None
for j in i.iter("tag"):
# TODO: add more object types than *building* and *highway*
if j.attrib["k"] == "building":
self.types[i.attrib["id"]] = "building"
object_type = True
break
elif j.attrib["k"] == "highway":
self.types[i.attrib["id"]] = "highway"
object_type = True
break
if object_type is None:
continue
self.objects[i.attrib["id"]] = []
for j in i.iter("nd"):
self.objects[i.attrib["id"]].append(j.attrib["ref"])
def geo_distance(self, (lat1, lon1), (lat2, lon2)):
"""Calculate geo-distance between 2 points (in meters)."""
# approximate radius of earth in meters
R = 6373000.0
lat1 = radians(lat1)
lon1 = radians(lon1)
lat2 = radians(lat2)
lon2 = radians(lon2)
dlon = lon2 - lon1
dlat = lat2 - lat1
a = sin(dlat / 2)**2 + cos(lat1) * cos(lat2) * sin(dlon / 2)**2
c = 2 * atan2(sqrt(a), sqrt(1 - a))
distance = R * c
return distance
def save_object_to_klm(self, o, dirname="", max_lat=None, max_lon=None, max_range=None):
"""Save object as KLM file."""
if dirname:
dirname += "/"
kml_filename = o+".kml"
area = self.get_simple_bounding_box(o)
area["name"] = o
area["centreLat"], area["centreLon"] = self.get_centre(area)
lon = abs(float(area["maxlon"]) - float(area["minlon"]))
lat = abs(float(area["maxlat"]) - float(area["minlat"]))
if max_lat is not None and max_lon is not None:
# Maximise lat
diff = (max_lat - lat) / 2.0
area["minlat"] = str(float(area["minlat"]) - diff)
area["maxlat"] = str(float(area["maxlat"]) + diff)
# Maximise lon
diff = (max_lon - lon) / 2.0
area["minlon"] = str(float(area["minlon"]) - diff)
area["maxlon"] = str(float(area["maxlon"]) + diff)
else:
# Enforce minimum size of the area
if lon < self.MIN_SPAN_LON:
diff = (self.MIN_SPAN_LON - lon) / 2.0
lon = self.MIN_SPAN_LON
area["minlon"] = str(float(area["minlon"]) - diff)
area["maxlon"] = str(float(area["maxlon"]) + diff)
if lat < self.MIN_SPAN_LAT:
diff = (self.MIN_SPAN_LAT - lat) / 2.0
lat = self.MIN_SPAN_LAT
area["minlat"] = str(float(area["minlat"]) - diff)
area["maxlat"] = str(float(area["maxlat"]) + diff)
# 1m per each 0.00001 of difference in altitude or longitude
l = max(lon, lat)
if max_range is None:
area["range"] = int(self.RANGE_FACTOR*int(l/0.000008))
else:
area["range"] = max_range
kml = self.KML_TEMPLATE % area
with open(dirname + kml_filename, "w") as kml_file:
kml_file.write(kml)
def save_klm_per_object(self, centroids=[]):
"""Save every object in the OSM file into KLM."""
# Create a directory for the (closest) objects
dirname = self.filename[:-4] + "_objects"
if not exists(dirname):
makedirs(dirname)
# Get centers for each object
object_centres = {}
for o in self.types:
cLat, cLon = self.get_centre(self.get_simple_bounding_box(o))
object_centres[o] = (cLat, cLon)
# Save objects nearest to centroids to KLM
# 5 closest *buildings* and 2 *roads*
if centroids:
objects_to_extract = []
for centroid in centroids:
c_dist = []
# Get distances form objects
for o in object_centres:
c_dist.append(
(self.geo_distance(centroid, object_centres[o]), o)
)
# Sort on distance
c_dist.sort()
# Select top NUMBER_OF_CLOSEST_OBJECTS_TO_EXTRACT_BUILDING and
# ROAD
b, r = 0, 0
for i in c_dist:
if self.types[i[1]] == "building" and \
b < self.NUMBER_OF_CLOSEST_OBJECTS_TO_EXTRACT_BUILDING:
objects_to_extract.append(i[1])
b += 1
elif self.types[i[1]] == "highway" and \
b < self.NUMBER_OF_CLOSEST_OBJECTS_TO_EXTRACT_ROAD:
objects_to_extract.append(i[1])
r += 1
else:
break
# Remove duplicates from the lsit
objects_to_extract = list(set(objects_to_extract))
max_range = -1.
max_lon = -1.
max_lat = -1.
for o in objects_to_extract:
area = self.get_simple_bounding_box(o)
area["centreLat"], area["centreLon"] = self.get_centre(area)
lon = abs(float(area["maxlon"]) - float(area["minlon"]))
lat = abs(float(area["maxlat"]) - float(area["minlat"]))
max_lat = max(max_lat, lat)
max_lon = max(max_lon, lon)
# 1m per each 0.00001 of difference in altitude or longitude
l = max(max_lon, max_lat)
max_range = int(self.RANGE_FACTOR*int(l/0.000008))
# Memorise range, x and y sizes in JSON format
with open(os.path.join(dirname, "region_spec.json"), "w") as region_spec:
json.dump(
{
"max_lat": max_lat,
"max_lon": max_lon,
"max_range": max_range
},
region_spec,
sort_keys=True,
indent=2,
separators=(',', ': ')
)
for o in objects_to_extract:
self.save_object_to_klm(o, dirname, max_lat, max_lon, max_range)
# Save all objects to KLM
else:
for o in self.types:
self.save_object_to_klm(o, dirname)
def save_as_kml(self):
"Save area extracted from OSM file into KML file."
area = self.bounds.copy()
area["centreLat"], area["centreLon"] = self.get_centre()
# 1m per each 0.00001 of difference in altitude or longitude
lon = abs(float(area["maxlon"]) - float(area["minlon"]))
lat = abs(float(area["maxlat"]) - float(area["minlat"]))
l = max(lon, lat)
area["range"] = int(self.RANGE_FACTOR*int(l/0.000008))
kml_filename = self.filename[:-3]+"kml"
area["name"] = basename(kml_filename)
kml = self.KML_TEMPLATE % area
with open(kml_filename, "w") as kml_file:
kml_file.write(kml)
def id_to_coordinates(self, id):
"""Translate id into coordinates."""
if id in self.mapping:
return self.mapping[id]
else:
print "Unknown object id: ", id
return None
def get_centre(self, obj=None):
if obj is None:
obj = self.bounds
c_lat = (float(obj["maxlat"])+float(obj["minlat"]))/2
c_lon = (float(obj["maxlon"])+float(obj["minlon"]))/2
return c_lat, c_lon
def dist(self, a, b):
a_lat, a_lon = a[0], a[1]
b_lat, b_lon = b[0], b[1]
return sqrt((a_lat-b_lat)**2 + (a_lon-b_lon)**2)
def location_in_map(self, loc):
"""Is `loc` on this map?"""
lat, lon = loc[0], loc[1]
if float(self.bounds["minlat"]) < lat < float(self.bounds["maxlat"]) \
and float(self.bounds["minlon"]) < lon < \
float(self.bounds["maxlon"]):
return True
else:
return False
def location_distance_from_map(self, loc):
"""Distance between `loc` and map's centre of mass."""
c_lat, c_lon = self.get_centre()
return self.dist(loc, (c_lat, c_lon))
def location_in_object(self, loc):
in_object = {}
for i in self.objects:
object_vertices = []
for j in self.objects[i]:
object_vertices.append(self.id_to_coordinates(j))
# hull = ConvexHull(np.array(object_vertices))
# if not isinstance(hull, Delaunay):
# hull = Delaunay(hull)
# in_hull = hull.find_simplex([loc])>=0
# in_object[i] = in_hull[0]
hull = ConvexHull(np.array(object_vertices))
new_points = np.append(object_vertices, np.array([loc]), axis=0)
new_hull = ConvexHull(new_points)
if list(hull.vertices) == list(new_hull.vertices):
in_object[i] = True
else:
in_object[i] = False
return in_object
def location_distance_from_objects(self, loc):
"""Distance between `loc` and the closes object in the map."""
distances = {}
for i in self.objects:
object_centre_of_mass = None
object_vertices = []
for j in self.objects[i]:
object_vertices.append((float(self.id_to_coordinates(j)[0]),
float(self.id_to_coordinates(j)[1])))
object_centre_of_mass = np.mean(object_vertices, axis=0)
object_centre_of_mass = (object_centre_of_mass[0],
object_centre_of_mass[1])
distances[i] = self.dist(loc, object_centre_of_mass)
return distances
# TODO: do you prefer a complicated polygon or simple bounding box?
def get_simple_bounding_box(self, id):
"""Produce simple (max/min) bounding box of a selected object."""
coordinates = []
for i in self.objects[id]:
coordinates.append(self.id_to_coordinates(i))
return {
"minlat": min(coordinates, key=itemgetter(0))[0],
"maxlat": max(coordinates, key=itemgetter(0))[0],
"minlon": min(coordinates, key=itemgetter(1))[1],
"maxlon": max(coordinates, key=itemgetter(1))[1]
}
def get_bounding_box(self, id):
"""Produce best fitted bounding box of a selected object; i.e. polygon
generalisation's smallest surrounding rectangle."""
# from qhull_2d import *
# from min_bounding_rect import *
# Get all blocks
# blocks = []
pass
| StarcoderdataPython |
1614447 | <gh_stars>0
from keras import backend as K
from .tools import stretch_array
import tensorflow as tf
def build_network(X_nodes, X_edges, X_nodes_in_out,
X_messages_in, X_messages_out, message_passers,
state_updater, readout, ndim_features_nodes, fake_message_const, steps):
for step in range(steps):
messages = message_passers[step](
K.concatenate(
[
K.reshape(K.gather(reference=X_nodes, indices=X_nodes_in_out), shape=(-1, 2 * ndim_features_nodes)),
X_edges
], axis=1
)
)
messages = K.concatenate([messages, fake_message_const], axis=0)
messages = tf.where(tf.is_inf(messages), tf.zeros_like(messages), messages)
messages_aggregated_in = K.max(K.gather(reference=messages, indices=X_messages_in), axis=1)
messages_aggregated_out = K.max(K.gather(reference=messages, indices=X_messages_out), axis=1)
## For GRU-based state_updater
#_, X_nodes = state_updater(
# inputs=K.concatenate([messages_aggregated_in, messages_aggregated_out], axis=1),
# state=X_nodes
#)
## For dense state_updater
X_nodes = state_updater(K.concatenate([messages_aggregated_in, messages_aggregated_out, X_nodes], axis=1))
return readout(X_nodes)
def run_train(X_cluster_graph, X_predictions, optimizer, sess,
ndim_features_nodes, ndim_features_edges, placeholders, metrics=[]):
_, predictions, *metrics = sess.run([optimizer, X_predictions] + metrics, feed_dict={
placeholders['X_nodes']: stretch_array(X_cluster_graph['X_cluster_nodes'], ndim_features_nodes),
placeholders['X_edges']: stretch_array(X_cluster_graph['X_cluster_edges'], ndim_features_edges),
placeholders['X_labels']: X_cluster_graph['Y_cluster_labels'],
placeholders['X_nodes_in_out']: X_cluster_graph['X_cluster_in_out'],
placeholders['X_messages_in']: X_cluster_graph['X_cluster_messages_in'],
placeholders['X_messages_out']: X_cluster_graph['X_cluster_messages_out'],
K.learning_phase(): 1
})
return predictions, metrics
def run_test(X_cluster_graph, X_predictions, sess,
ndim_features_nodes, ndim_features_edges, placeholders, metrics=[]):
predictions, *metrics = sess.run([X_predictions] + metrics, feed_dict={
placeholders['X_nodes']: stretch_array(X_cluster_graph['X_cluster_nodes'], ndim_features_nodes),
placeholders['X_edges']: stretch_array(X_cluster_graph['X_cluster_edges'], ndim_features_edges),
placeholders['X_labels']: X_cluster_graph['Y_cluster_labels'],
placeholders['X_nodes_in_out']: X_cluster_graph['X_cluster_in_out'],
placeholders['X_messages_in']: X_cluster_graph['X_cluster_messages_in'],
placeholders['X_messages_out']: X_cluster_graph['X_cluster_messages_out'],
K.learning_phase(): 0
})
return predictions, metrics
| StarcoderdataPython |
1743854 | import os
import matplotlib.pyplot as plt
import numpy as np
from tensorflow.keras.layers import Activation
from tensorflow.keras.layers import Dense
from tensorflow.keras.layers import Flatten
from tensorflow.keras.layers import Input
from tensorflow.keras.layers import Reshape
from tensorflow.keras.models import Model
from mnistData import MNIST
PATH = os.path.abspath("C:/Users/Jan/Dropbox/_Coding/UdemyGAN")
IMAGES_PATH = os.path.join(PATH, "Chapter7_Autoencoder/images")
mnist_data = MNIST()
x_train, _ = mnist_data.get_train_set()
x_test, _ = mnist_data.get_test_set()
x_train_noise = x_train + 0.1 * np.random.normal(size=x_train.shape)
x_test_noise = x_test + 0.1 * np.random.normal(size=x_test.shape)
def build_autoencoder():
encoding_dim = 100
# Inputs
img_shape = (28, 28, 1)
input_img = Input(shape=img_shape)
input_img_flatten = Flatten()(input_img)
# Encoder
encoded = Dense(units=256)(input_img_flatten)
encoded = Activation("relu")(encoded)
encoded = Dense(units=128)(encoded)
encoded = Activation("relu")(encoded)
encoded = Dense(units=encoding_dim)(encoded)
encoded = Activation("relu")(encoded)
# Decoder
decoded = Dense(units=128)(encoded)
decoded = Activation("relu")(decoded)
decoded = Dense(units=256)(decoded)
decoded = Activation("relu")(decoded)
decoded = Dense(units=np.prod(img_shape))(decoded)
decoded = Activation("sigmoid")(decoded)
# Output
output_img = Reshape(target_shape=img_shape)(decoded)
# Model
model = Model(inputs=input_img, outputs=output_img)
model.summary()
return model
def run_autoencoder(model):
# Training
model.compile(optimizer="adam", loss="mse")
model.fit(
x=x_train_noise,
y=x_train,
epochs=10,
batch_size=128,
validation_data=(x_test_noise, x_test),
)
# Testing
test_imgs = x_test_noise[:10]
decoded_imgs = model.predict(x=test_imgs)
return test_imgs, decoded_imgs
def plot_imgs(test_imgs, decoded_imgs):
plt.figure(figsize=(12, 6))
for i in range(10):
_ = plt.subplot(2, 10, i + 1)
plt.imshow(test_imgs[i].reshape(28, 28), cmap="gray")
_ = plt.subplot(2, 10, i + 1 + 10)
plt.imshow(decoded_imgs[i].reshape(28, 28), cmap="gray")
plt.savefig(os.path.join(IMAGES_PATH, "denoise_autoencoder.png"))
if __name__ == "__main__":
model = build_autoencoder()
test_imgs, decoded_imgs = run_autoencoder(model)
plot_imgs(test_imgs, decoded_imgs)
| StarcoderdataPython |
188913 | # -*- coding: UTF-8 -*-
from typing import Union
import torch
import torch.nn as nn
import numpy as np
from .tn_module import _TNBase
__all__ = ["_TNConvNd"]
class _TNConvNd(_TNBase):
def __init__(self, in_shape: Union[list, np.ndarray], out_shape: Union[list, np.ndarray],
ranks: Union[list, np.ndarray], kernel_size: Union[int, tuple], stride=1, padding=0, bias=True):
"""Tensor Decomposition Convolution.
Parameters
----------
in_shape : Union[list, numpy.ndarray]
1-D param :math:`\in \mathbb{R}^m`. The decomposition shape of channel in
out_shape : Union[list, numpy.ndarray]
1-D param :math:`\in \mathbb{R}^n`. The decomposition shape of channel out
ranks : Union[list, numpy.ndarray]
1-D param :math:`\in \mathbb{R}^r`. The ranks of the decomposition
kernel_size : Union[int, tuple]
The convolutional kernel size
stride : int
The length of stride
padding : int
The size of padding
bias : bool
use bias of convolution or not. ``True`` to use, and ``False`` to not use
"""
if isinstance(kernel_size, int):
kernel_size = (kernel_size, kernel_size)
self.kernel_size = kernel_size
self.padding = padding
self.stride = stride
super(_TNConvNd, self).__init__(in_shape=in_shape, out_shape=out_shape, ranks=ranks, bias=bias)
def forward(self, inputs: torch.Tensor):
"""Tensor convolutional forwarding method.
Parameters
----------
inputs : torch.Tensor
tensor :math:`\in \mathbb{R}^{b \\times C \\times H \\times W}`
Returns
-------
torch.Tensor
tensor :math:`\in \mathbb{R}^{b \\times H' \\times W' \\times C'}`
"""
# inputs: [b, C, H, W] -> res: [b, H', W', C']
res = self.tn_contract(inputs)
if self.bias is not None:
res = torch.add(self.bias, res)
# res: [b, H', W', C'] -> res: [b', C', H', W']
res = res.permute(0, 3, 1, 2).contiguous()
res = res.contiguous()
return res
| StarcoderdataPython |
144661 |
import numpy as np
import re
from rdkit import Chem
if __name__ == "__main__":
import sys
args = sys.argv[1:]
smiless = args
for smiles in smiless:
m = Chem.MolFromSmiles(smiles)
| StarcoderdataPython |
3375956 | <reponame>losolio/website<filename>content_notes/migrations/0002_citation.py
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
import modelcluster.fields
import wagtail.wagtailcore.fields
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('articles', '0063_auto_20150930_1924'),
('content_notes', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='Citation',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('sort_order', models.IntegerField(null=True, editable=False, blank=True)),
('text', wagtail.wagtailcore.fields.RichTextField()),
('article', modelcluster.fields.ParentalKey(related_name='citation_links', to='articles.ArticlePage')),
],
options={
'ordering': ['sort_order'],
'abstract': False,
},
),
]
| StarcoderdataPython |
40614 | # Advent of Code 2015
#
# From https://adventofcode.com/2015/day/12
import json
import re
filename = ''
data = [re.findall(r'(-?\d+)', row.strip()) for row in open(f'../inputs/Advent2015_12{filename}.json', 'r')]
print(f"AoC 2015 Day 12, Part 1 answer is {sum(int(x[0]) for x in data if x)}")
with open(f'../inputs/Advent2015_12{filename}.json', 'r') as read_file:
data = json.load(read_file)
def parse_level(level):
count = 0
if isinstance(level, dict):
if 'red' in level or 'red' in level.values():
return 0
for k, v in level.items():
if isinstance(k, int) or isinstance(k, str) and k.isdigit():
count += int(k)
if isinstance(v, int) or isinstance(v, str) and v.isdigit():
count += int(v)
if isinstance(v, (dict, list)):
count += parse_level(v)
elif isinstance(level, list):
for x in level:
if isinstance(x, int) or isinstance(x, str) and x.isdigit():
count += int(x)
elif isinstance(x, (dict, list)):
count += parse_level(x)
return count
print(f"AoC 2015 Day 12, Part 2 answer is {parse_level(data)}")
| StarcoderdataPython |
3365197 | <gh_stars>1-10
import time
import board
import neopixel
pixels = neopixel.NeoPixel(board.NEOPIXEL, 10, brightness=.1)
# Colors
BLACK = (0, 0, 0)
RED = (255, 0, 0)
PINK = (255, 100, 120)
ORANGE = (255, 100, 0)
YELLOW = (255, 255, 0)
GREEN = (0, 255, 0)
CYAN = (0, 255, 255)
PURPLE = (255, 0, 255)
BLUE = (0, 0, 255)
LIGHT_BLUE = (80, 200, 175)
WHITE = (255, 255, 255)
pixels = pixels
pixels.fill(BLACK)
pixels.show()
while True:
for i in range(len(pixels)):
pixels[i] = RED
time.sleep(.05)
time.sleep(1)
for i in range(len(pixels)):
pixels[i] = GREEN
time.sleep(.05)
time.sleep(1)
| StarcoderdataPython |
3280659 | <filename>RRDGraphs/rrd_8years.py
import time
import matplotlib.pyplot as plt
import matplotlib.dates as mdate
import numpy as np
import rrdtool
start = 252288000
end = 0
if int(end) <= 0:
end = 2
if int(start) <= 0:
start = 600
epochTimeNow = int(time.time()-1)
data = rrdtool.fetch('/home/bca/rrdtoolfilesave/powerCapturenew.rrd', 'AVERAGE',
'--start', f'-{start}',
'--end', f'-{end}')
values = np.array(data[2])
breakpoint()
values[values.isnan()] = 0
epochEndTime = epochTimeNow - int(end)
epochStartTime = epochTimeNow - int(start)
timeseries = np.zeros(shape=((epochEndTime-epochStartTime + 1), 1))
for i in range (epochEndTime - epochStartTime + 1):
timeseries[i] = epochStartTime + 7200 + i
fig, ax = plt.subplots()
timeseries = mdate.epoch2num(timeseries)
ax.plot_date(timeseries, values, linestyle = '-', marker = '', label=f'AllThePower')
timeseriesFormat = '%d-%m-%y %H:%M:%S'
timeseriesFormatted = mdate.DateFormatter(timeseriesFormat)
ax.xaxis.set_major_formatter(timeseriesFormatted)
fig.autofmt_xdate()
plt.ylim(bottom = 0)
StartTime = time.strftime('%Y-%m-%d [%H:%M:%S]', time.localtime(epochStartTime))
EndTime = time.strftime('%Y-%m-%d [%H:%M:%S]', time.localtime(epochEndTime))
plt.ylabel('Watt')
plt.title(f'Time range: {StartTime} - {EndTime}')
plt.tight_layout()
plt.legend()
plt.show()
plt.close()
| StarcoderdataPython |
3235700 | from recent import module
| StarcoderdataPython |
47049 | import os
import pytest
import csv_diff
import logging
import torch
from unit_tests.t_utils import remove_tmp_dir, create_tmp_dir, __data_testing_dir__, __tmp_dir__
from ivadomed.loader import utils as imed_loader_utils
from ivadomed.loader import loader as imed_loader
logger = logging.getLogger(__name__)
def setup_function():
create_tmp_dir()
@pytest.mark.parametrize('loader_parameters', [{
"path_data": [os.path.join(__data_testing_dir__, "microscopy_png")],
"bids_config": "ivadomed/config/config_bids.json",
"target_suffix": [["_seg-myelin-manual", "_seg-axon-manual"]],
"extensions": [".png"],
"roi_params": {"suffix": None, "slice_filter_roi": None},
"contrast_params": {"contrast_lst": []}
}])
def test_bids_df_microscopy_png(loader_parameters):
"""
Test for microscopy png file format
Test for _sessions.tsv and _scans.tsv files
Test for target_suffix as a nested list
Test for when no contrast_params are provided
"""
bids_df = imed_loader_utils.BidsDataframe(loader_parameters, __tmp_dir__, derivatives=True)
df_test = bids_df.df.drop(columns=['path'])
# TODO: modify df_ref.csv file in data-testing dataset to include "participant_id"
# and "sample_id" columns, then delete next line
df_test = df_test.drop(columns=['participant_id', 'sample_id'])
df_test = df_test.sort_values(by=['filename']).reset_index(drop=True)
csv_ref = os.path.join(loader_parameters["path_data"][0], "df_ref.csv")
csv_test = os.path.join(loader_parameters["path_data"][0], "df_test.csv")
df_test.to_csv(csv_test, index=False)
diff = csv_diff.compare(csv_diff.load_csv(open(csv_ref)), csv_diff.load_csv(open(csv_test)))
assert diff == {'added': [], 'removed': [], 'changed': [], 'columns_added': [], 'columns_removed': []}
@pytest.mark.parametrize('loader_parameters', [{
"path_data": [__data_testing_dir__],
"target_suffix": ["_seg-manual"],
"extensions": [],
"roi_params": {"suffix": None, "slice_filter_roi": None},
"contrast_params": {"contrast_lst": ["T1w", "T2w"]}
}])
def test_bids_df_anat(loader_parameters):
"""
Test for MRI anat nii.gz file format
Test for when no file extensions are provided
Test for multiple target_suffix
TODO: modify test and "df_ref.csv" file in data-testing dataset to test behavior when "roi_suffix" is not None
"""
bids_df = imed_loader_utils.BidsDataframe(loader_parameters, __tmp_dir__, derivatives = True)
df_test = bids_df.df.drop(columns=['path'])
# TODO: modify df_ref.csv file in data-testing dataset to include "participant_id"
# column then delete next line
df_test = df_test.drop(columns=['participant_id'])
df_test = df_test.sort_values(by=['filename']).reset_index(drop=True)
csv_ref = os.path.join(loader_parameters["path_data"][0], "df_ref.csv")
csv_test = os.path.join(loader_parameters["path_data"][0], "df_test.csv")
df_test.to_csv(csv_test, index=False)
diff = csv_diff.compare(csv_diff.load_csv(open(csv_ref)), csv_diff.load_csv(open(csv_test)))
assert diff == {'added': [], 'removed': [], 'changed': [],
'columns_added': [], 'columns_removed': []}
# TODO: add a test to ensure the loader can read in multiple entries in path_data
@pytest.mark.parametrize('seg_pair', [
{"input": torch.rand((2, 5, 5))},
{"input": torch.rand((1, 5, 5))},
{"input": torch.rand((5, 5, 5, 5))},
{"input": (torch.rand((5, 5, 5, 3)) * torch.tensor([1, 0, 1], dtype=torch.float)).transpose(0, -1)},
{"input": (torch.rand((7, 7, 4)) * torch.tensor([1, 0, 0, 0], dtype=torch.float)).transpose(0, -1)}
])
def test_dropout_input(seg_pair):
n_channels = seg_pair['input'].size(0)
seg_pair = imed_loader.dropout_input(seg_pair)
empty_channels = [len(torch.unique(input_data)) == 1 for input_data in seg_pair['input']]
# If multichannel
if n_channels > 1:
# Verify that there is still at least one channel remaining
assert sum(empty_channels) <= n_channels
else:
assert sum(empty_channels) == 0
def teardown_function():
remove_tmp_dir()
| StarcoderdataPython |
186645 | #!/usr/bin/env python3
'''
This script converts MongoDB records for the Workspace Shock backend into records for the
workspace S3 backend.
The script does not alter the Shock backend records and may be re-run multiple times without issue.
To run:
1) Start the workspace at least once with the S3 backend enabled to create the appropriate
MongoDB indexes.
2) fill in the configuration variables for mongo DB below and run the script normally.
'''
###### CONFIGURATION VARIABLES ######
CONFIG_MONGO_HOST = 'localhost'
CONFIG_MONGO_DATABASE = 'workspace'
#CONFIG_MONGO_DATABASE = 'workspace_conv_test'
#CONFIG_MONGO_DATABASE = 'workspace_conv_test_many_recs'
CONFIG_MONGO_USER = ''
CONFIG_MONGO_PWD = ''
#### END CONFIGURATION VARIABLES ####
from pymongo.mongo_client import MongoClient
COLLECTION_SHOCK = 'shock_nodeMap'
COLLECTION_S3 = 's3_objects'
KEY_SHOCK_CHKSUM = 'chksum'
KEY_SHOCK_NODE = 'node'
KEY_SHOCK_SORTED = 'sorted'
KEY_S3_CHKSUM = 'chksum'
KEY_S3_KEY = 'key'
KEY_S3_SORTED = 'sorted'
'''
Potential improvement: allow filtering by > object id. Then you can run this script while
the workspace is up (since it could take hours), figure out the last object id processed, bring
the ws down, and then run with the object id filter to just process the new records.
'''
def main():
if CONFIG_MONGO_USER:
client = MongoClient(CONFIG_MONGO_HOST, authSource=CONFIG_MONGO_DATABASE,
username=CONFIG_MONGO_USER, password=CONFIG_MONGO_PWD)
else:
client = MongoClient(CONFIG_MONGO_HOST)
db = client[CONFIG_MONGO_DATABASE]
ttl = db[COLLECTION_SHOCK].count_documents({})
count = 0
lastPrint = 'Processed {}/{} records'.format(count, ttl)
print(lastPrint, end='', flush=True)
for o in db[COLLECTION_SHOCK].find():
db[COLLECTION_S3].update_one(
{KEY_S3_CHKSUM: o[KEY_SHOCK_CHKSUM]},
{'$set': {
KEY_S3_KEY: toS3Key(o[KEY_SHOCK_NODE]),
KEY_S3_SORTED: True if o.get(KEY_SHOCK_SORTED) else False}},
upsert=True)
count += 1
if count % 100 == 0:
backspace = '\b' * len(lastPrint)
lastPrint = 'Processed {}/{} records'.format(count, ttl)
print(backspace + lastPrint, end='', flush=True)
backspace = '\b' * len(lastPrint)
lastPrint = 'Processed {}/{} records'.format(count, ttl)
print(backspace + lastPrint)
def toS3Key(node):
return node[0:2] + '/' + node[2:4] + '/' + node[4:6] + '/' + node
if __name__ == '__main__':
main()
| StarcoderdataPython |
3307945 | #
#
#
# import my stuff
from myscripts import write_batchfile
from myscripts import commit_batchfiles
from myscripts import show_jobs
# path to PASC_inference library
library_path = "~/soft/PASC_inference";
# image_path = [image_dir]/[begin]_[width]_[height].bin
image_name = "C_noise_medium";
#image_name = "C_noise_large";
image_dir = "data/illia_image";
# dimensions of images to compute [width,height]
#dimensions = [[640,202]];
dimensions = [[1683,374]];
# used penalty
epssqrs = [1e-6, 2e-6, 3e-6, 4e-6, 5e-6, 6e-6, 7e-6, 8e-6, 9e-6, 1e-5, 2e-5, 3e-5, 4e-5, 5e-5, 6e-5, 7e-5, 8e-5, 9e-5, 1e-4, 5e-4, 1e-3, 1e-2];
#epssqrs = [1e-6, 1e-5, 1e-4, 1e-3, 1e-2];
# how many clusters to use (value of K)
Ks = [2];
# on how many nodes to compute
Ns = [1];
# name of problem (name of compiled program)
problem_name = "test_image";
# common parameters
problem_parameters = "--test_Theta=0.5 --test_Theta=0.51 --test_cutdata=true --test_scaledata=false --test_annealing=10 --tssolver_debugmode=2 --spgqpsolver_maxit=10000 --tssolver_maxit=100 --spgqpsolver_debugmode=0 --test_shortinfo=true";
# the upper estimation of computing time
problem_time = "00:40:00";
# machine parameters
architecture = "GPU1";
Ngpu = 1;
Nthreads = 1;
# prepare batchscripts
print "Preparing batch scripts: %s (Nthreads=%d, Ngpu=%d)" % (architecture,Nthreads,Ngpu)
batchfile_list = [];
for dimension in dimensions:
for epssqr in epssqrs:
for K in Ks:
for N in Ns:
image_path = "%s/%s_%s_%s.bin" % (image_dir,image_name,dimension[0],dimension[1]);
problem_name_full = "%s_%s_w%s_h%s_epssqr%.16f_K%s_arch%s_N%s_Nthreads%s_Ngpu%s" % (problem_name,image_name,dimension[0],dimension[1],epssqr,K,architecture,N,Nthreads,Ngpu)
print " - %s: %s" % (problem_name, problem_name_full);
problem_parameters_full = "%s --test_image_filename=\"%s\" --test_image_out=\"%s\" --test_width=%s --test_height=%s --test_epssqr=%.16f --test_K=%s --test_shortinfo_header='image_name,width,height,epssqr,K,architecture,N,Nthreads,Ngpu,' --test_shortinfo_values='%s,%d,%d,%.16f,%d,%s,%d,%d,%d,' --test_shortinfo_filename='shortinfo/%s.txt'" % (problem_parameters, image_path, problem_name_full, dimension[0], dimension[1], epssqr, K, image_name, dimension[0], dimension[1], epssqr, K, architecture, N, Nthreads, Ngpu, problem_name_full);
batchfile_name = write_batchfile(problem_name, problem_name_full, problem_time, problem_parameters_full, library_path, architecture, N, Nthreads, Ngpu);
batchfile_list.append(batchfile_name);
# run bash scripts
commit_batchfiles(batchfile_list, "c11", "normal")
# show the queue
show_jobs("c11")
| StarcoderdataPython |
4825069 | from typing import Union, IO
from pathlib import Path
FilePathOrBuffer = Union[str, Path, IO]
Buffer = IO | StarcoderdataPython |
167327 | #!/usr/bin/env python3.6
# Create an HTML page listing all the ad hoc queries in Redmine
import sys
import jinja2
from jinja2 import Template
import re
import string
from optparse import OptionParser
import csv
def main():
usage = "usage: %prog -i ad_hoc_listing_file -t ad_hoc_listing_template_file -o project_listing_html_file"
parser = OptionParser(usage=usage)
parser.add_option("-i", "--input", dest="al_file", help="Add hoc listing TSV input file.")
parser.add_option("-t", "--template", dest="template", help="Add hoc listing HTML template file.")
parser.add_option("-o", "--output", dest="alh_file", help="Add hoc listing HTML output file.")
(options, args) = parser.parse_args()
if not options.al_file:
print ("Please specify the add hoc listing TSV input file. (-i input)")
return - 1
if not options.template:
print ("Please specify the add hoc listing HTML template file (-t output)")
return - 2
if not options.alh_file:
print ("Please specify the add hoc listing HTML output file (-o output)")
return - 3
if (len(args) > 0):
print ("Too many input arguments")
return - 4
al_file = options.al_file
template = options.template
alh_file = options.alh_file
loader = jinja2.FileSystemLoader(template)
env = jinja2.Environment(loader=loader)
tm = env.get_template("")
pl_file_tsv = csv.reader(open(al_file), delimiter='\t')
rows = []
for row in pl_file_tsv:
rows.append(row)
html = (tm.render(rows=rows))
fd_out = open(alh_file,"w")
fd_out.write(html)
fd_out.close()
if __name__ == "__main__":
sys.exit(main())
| StarcoderdataPython |
173551 | # -*- coding: utf-8 -*-
""" Generic vault related helpers """
import os
import pathlib
from typing import Any
import chameleon
from rumps import MenuItem
from shellescape import quote
def generate_launchagent(profile_name: str) -> str:
"""
Generate the launchctl launchagent xml
"""
path = os.path.dirname(__file__)
templates: chameleon.PageTemplateLoader = chameleon.PageTemplateLoader(
os.path.join(path, "../templates")
)
template: Any = templates['launchagent.pt']
launchagent_xml: str = template(profile=profile_name)
return launchagent_xml
class Vault:
"""
Vault class to handle aws-vault related actions
"""
def __init__(self) -> None:
"""
Initializer for Vault class
"""
self.current_profile = 'None'
def start_metadata_server(self, sender: MenuItem) -> None:
"""
Start the launchctl aws-vault metadata server for the selected profile
@param sender:
@type sender:
@return:
@rtype:
"""
self.current_profile = sender.title
launchctl_path = pathlib.PosixPath(
f'~/Library/LaunchAgents/local.aws_vault_{quote(sender.title)}.plist' # noqa
).expanduser().__str__()
if not os.path.exists(launchctl_path):
with open(launchctl_path, "w") as launchctl_file:
launchctl_file.write(
generate_launchagent(self.current_profile)
)
os.system(f'launchctl load -w {launchctl_path}') # nosec
def stop_metadata_server(self, sender: Any) -> None: # pylint: disable=unused-argument # noqa
"""
Stop the launchctl aws-vault metadata server for
the currently loaded profile
@param sender: rumps sender
@type sender: Any
@return: None
@rtype: None
"""
launchctl_path = pathlib.PosixPath(
f'~/Library/LaunchAgents/local.aws_vault_{quote(self.current_profile)}.plist' # noqa
).expanduser()
os.system(f'launchctl unload {launchctl_path}') # nosec
self.current_profile = 'None'
os.remove(launchctl_path)
def console_login(self, sender: Any) -> None:
"""
Launch the aws-vault login command for the selected profile
@param sender: rumps sender
@type sender: Any
@return: None
@rtype: None
"""
self.current_profile = sender.title
os.system(f"aws-vault login {quote(sender.title)}") # nosec
| StarcoderdataPython |
1688905 | <reponame>vinissimus/pytest-mp
import pytest
@pytest.mark.parametrize('use_mp', (False, True))
def test_group_info_marker_kwargs_from_args(testdir, use_mp):
testdir.makepyfile("""
import pytest
@pytest.mark.mp_group('One')
def test_one(request):
kwargs = request.node.get_closest_marker('mp_group_info').kwargs
assert kwargs['group'] == 'One'
assert kwargs['strategy'] == 'free'
@pytest.mark.mp_group('One')
def test_two(request):
kwargs = request.node.get_closest_marker('mp_group_info').kwargs
assert kwargs['group'] == 'One'
assert kwargs['strategy'] == 'free'
@pytest.mark.mp_group('Two', 'serial')
def test_three(request):
kwargs = request.node.get_closest_marker('mp_group_info').kwargs
assert kwargs['group'] == 'Two'
assert kwargs['strategy'] == 'serial'
def test_four(request):
kwargs = request.node.get_closest_marker('mp_group_info').kwargs
assert kwargs['group'] == 'ungrouped'
assert kwargs['strategy'] == 'free'
""")
result = testdir.runpytest('--mp' if use_mp else '')
result.assert_outcomes(passed=4)
assert result.ret == 0
@pytest.mark.parametrize('use_mp', (False, True))
def test_group_info_marker_kwargs_from_kwargs(testdir, use_mp):
testdir.makepyfile("""
import pytest
@pytest.mark.mp_group(group='One')
def test_one(request):
kwargs = request.node.get_closest_marker('mp_group_info').kwargs
assert kwargs['group'] == 'One'
assert kwargs['strategy'] == 'free'
@pytest.mark.mp_group(group='One')
def test_two(request):
kwargs = request.node.get_closest_marker('mp_group_info').kwargs
assert kwargs['group'] == 'One'
assert kwargs['strategy'] == 'free'
@pytest.mark.mp_group(group='Two', strategy='serial')
def test_three(request):
kwargs = request.node.get_closest_marker('mp_group_info').kwargs
assert kwargs['group'] == 'Two'
assert kwargs['strategy'] == 'serial'
def test_four(request):
kwargs = request.node.get_closest_marker('mp_group_info').kwargs
assert kwargs['group'] == 'ungrouped'
assert kwargs['strategy'] == 'free'
""")
result = testdir.runpytest('--mp' if use_mp else '')
result.assert_outcomes(passed=4)
assert result.ret == 0
@pytest.mark.parametrize('use_mp', (False, True))
def test_group_info_marker_kwargs_from_args_and_kwargs(testdir, use_mp):
testdir.makepyfile("""
import pytest
@pytest.mark.mp_group('One', strategy='serial')
def test_one(request):
kwargs = request.node.get_closest_marker('mp_group_info').kwargs
assert kwargs['group'] == 'One'
assert kwargs['strategy'] == 'serial'
@pytest.mark.mp_group(group='One')
def test_two(request):
kwargs = request.node.get_closest_marker('mp_group_info').kwargs
assert kwargs['group'] == 'One'
assert kwargs['strategy'] == 'serial' # inherited
def test_three(request):
kwargs = request.node.get_closest_marker('mp_group_info').kwargs
assert kwargs['group'] == 'ungrouped'
assert kwargs['strategy'] == 'free'
""")
result = testdir.runpytest('--mp' if use_mp else '')
result.assert_outcomes(passed=3)
assert result.ret == 0
def test_multiple_groups_disallowed_args(testdir):
testdir.makepyfile("""
import pytest
@pytest.mark.mp_group('One')
class TestClass(object):
@pytest.mark.mp_group('Two')
def test_one(self):
assert True
""")
result = testdir.runpytest('--mp')
result.stdout.fnmatch_lines(['*Exception: Detected too many mp_group values for test_one',
'*= no tests ran in * seconds =*'])
assert result.ret == 3
def test_multiple_groups_disallowed_args_and_kwargs(testdir):
# It isn't possible to account for just kwargs since they will be set to highest
# decorated value (overwritten)
testdir.makepyfile("""
import pytest
@pytest.mark.mp_group('One')
class TestClass(object):
@pytest.mark.mp_group(group='Two')
def test_one(self, request):
assert True
""")
result = testdir.runpytest('--mp')
result.stdout.fnmatch_lines(['*Exception: Detected too many mp_group values for test_one',
'*= no tests ran in * seconds =*'])
assert result.ret == 3
def test_confirm_ordering_by_group_strategy(testdir):
# TODO
testdir.makepyfile("""
import pytest
@pytest.mark.mp_group('IsoSerial', 'isolated_serial')
def test_a_isolated_serial():
assert True
@pytest.mark.mp_group('IsoFree', 'isolated_free')
@pytest.mark.parametrize('val', range(10))
def test_b_isolated_free(val):
assert True
@pytest.mark.mp_group('Serial', 'serial')
@pytest.mark.parametrize('val', range(5))
def test_c_serial(val):
assert True
@pytest.mark.mp_group('Free', 'free')
@pytest.mark.parametrize('val', range(10))
def test_d_free(val):
assert True
""")
result = testdir.runpytest('-vs', '--mp')
result.assert_outcomes(passed=26)
assert result.ret == 0
| StarcoderdataPython |
1655654 | <gh_stars>0
from django.shortcuts import render
from django.views.generic.detail import DetailView
from django.views.generic.list import ListView
from django.views.generic.edit import CreateView, UpdateView, DeleteView
from django.utils.decorators import method_decorator
from django.contrib.auth.decorators import login_required
from django.core.urlresolvers import reverse_lazy
from .models import Tablet, Seal
from .forms import TabletForm, SealForm
class SealDetailView(DetailView):
model = Seal
template_name = 'tablets/seal_detail.html'
class SealCreate(CreateView):
model = Seal
template_name = 'tablets/seal_create.html'
form_class = SealForm
@method_decorator(login_required)
def dispatch(self, *args, **kwargs):
return super(SealCreate, self).dispatch(*args, **kwargs)
class SealUpdate(UpdateView):
model = Seal
form_class = SealForm
template_name = 'tablets/seal_create.html'
@method_decorator(login_required)
def dispatch(self, *args, **kwargs):
return super(SealUpdate, self).dispatch(*args, **kwargs)
class SealDelete(DeleteView):
model = Seal
template_name = 'vocabs/confirm_delete.html'
success_url = reverse_lazy('browsing:browse_tablets')
@method_decorator(login_required)
def dispatch(self, *args, **kwargs):
return super(SealDelete, self).dispatch(*args, **kwargs)
class TabletDetailView(DetailView):
model = Tablet
template_name = 'tablets/tablet_detail.html'
class TabletCreate(CreateView):
model = Tablet
template_name = 'tablets/tablet_create.html'
form_class = TabletForm
@method_decorator(login_required)
def dispatch(self, *args, **kwargs):
return super(TabletCreate, self).dispatch(*args, **kwargs)
class TabletUpdate(UpdateView):
model = Tablet
form_class = TabletForm
template_name = 'tablets/tablet_create.html'
@method_decorator(login_required)
def dispatch(self, *args, **kwargs):
return super(TabletUpdate, self).dispatch(*args, **kwargs)
class TabletDelete(DeleteView):
model = Tablet
template_name = 'vocabs/confirm_delete.html'
success_url = reverse_lazy('browsing:browse_tablets')
@method_decorator(login_required)
def dispatch(self, *args, **kwargs):
return super(TabletDelete, self).dispatch(*args, **kwargs)
| StarcoderdataPython |
193330 | #%%
import cv2;
from pathlib import Path
from dotenv import find_dotenv, load_dotenv
# not used in this stub but often useful for finding various files
#project_dir = Path(__file__).resolve().parents[2]
# find .env automagically by walking up directories until it's found, then
# load up the .env entries as environment variables
load_dotenv(find_dotenv())
import numpy as np
import cv2 as cv
from matplotlib import pyplot as plt
import seaborn as sns
import pandas as pd
from scipy import ndimage
from mpl_toolkits.mplot3d import Axes3D
from matplotlib import cm
from matplotlib import colors
import seaborn as sns
img = cv2.imread("C:\\Users\\Helldragger\\Documents\\projects\\MetaWatch\\MetaWatch\\src\\features\\original.jpg");
# we cut the image to keep only the interesting part: the overlay.
#%%
bgdModel = np.zeros((1,65),np.float64)
fgdModel = np.zeros((1,65),np.float64)
newmask = cv.imread('C:\\Users\\Helldragger\\Documents\\projects\\MetaWatch\\MetaWatch\\src\\features\\overlaymaskheroes.png',0)
# wherever it is marked white (sure foreground), change mask=1
# wherever it is marked black (sure background), change mask=0
mask[newmask == 0] = 0
mask[newmask == 255] = 1
mask, bgdModel, fgdModel = cv.grabCut(img,mask,None,bgdModel,fgdModel,5,cv.GC_INIT_WITH_MASK)
mask = np.where((mask==2)|(mask==0),0,1).astype('uint8')
img2 = img*mask[:,:,np.newaxis]
#%%
plt.imshow(img),plt.colorbar(),plt.show()
plt.imshow(img2),plt.colorbar(),plt.show()
cv2.imshow("cut", img2);
cv2.imshow("original", img);
cv2.waitKey(0)
croppedImg = img2[125:210, 35:1892];
plt.imshow(croppedImg),plt.colorbar(),plt.show()
cv2.imshow("cropped", croppedImg);
cv2.waitKey(0)
#%%
# read data about a single hero
src1_mask = cv2.imread('C:\\Users\\Helldragger\\Documents\\projects\\MetaWatch\\MetaWatch\\src\\features\\maskheroA1.png',0);
src1_mask = cv2.cvtColor(src1_mask,cv2.COLOR_GRAY2RGB)
masked_image = cv2.bitwise_and(img, src1_mask)
cv2.imshow("hero maked", masked_image);
cv2.waitKey(0)
#%%
#cropping the hhero image
y,x = masked_image[:,:,1].nonzero() # get the nonzero alpha coordinates
minx = np.min(x)
miny = np.min(y)
maxx = np.max(x)
maxy = np.max(y)
cropImg = masked_image[miny:maxy, minx:maxx]
#cv2.imwrite("cropped.png", cropImg)
cv2.imshow("cropped", cropImg)
cv2.waitKey(0)
#%%
# here we load various health bars and try to quantify their content: health, shield, armor, death symbol.
imgs = {};
srcs = [
"frame377_hero_A1_health",
"frame377_hero_A2_health",
"frame377_hero_A3_health",
"frame377_hero_A4_health",
"frame377_hero_A5_health",
"frame377_hero_A6_health",
"frame377_hero_B1_health",
"frame377_hero_B2_health",
"frame377_hero_B3_health",
"frame377_hero_B4_health",
"frame377_hero_B5_health",
"frame377_hero_B6_health",
]
for src in srcs:
imgs[src] = cv2.imread('C:\\Users\\Helldragger\\Documents\\projects\\MetaWatch\\MetaWatch\\src\\data\\tests\\'+src+'.jpg',-1);
#src1_mask = cv2.imread('C:\\Users\\Helldragger\\Documents\\projects\\MetaWatch\\MetaWatch\\src\\features\\maskheroA1.png',0);
#%%
# here we create an histogram for each image
# idea: counting the amount of health using the percentage of specific colours in the health zone.
img = imgs["frame377_hero_B1_health"];
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB )
img = cv2.cvtColor(img, cv2.COLOR_RGB2HLS )
channels = ('H','L','S')
colors = ("B", 'G', 'R')
for i,chann in enumerate(channels):
histr = cv2.calcHist(img, [i], None, [256], [1, 256], True, False)
histr /= max(histr)
plt.plot(histr,color = colors[i])
plt.xlim([0,256])
plt.show()
plt.imshow(img),plt.colorbar(),plt.show()
#for img in imgs.values():
#%%
# Reducing the amount of colors to 5 colors:
reducedimg = imgs["frame377_hero_B1_health"];
#cv2.cvtColor(reducedimg, cv2.COLOR_HLS2BGR )
plt.imshow(reducedimg),plt.colorbar(),plt.show()
reducedimg = reducedimg // 64
reducedimg = reducedimg * 64
plt.imshow(reducedimg),plt.colorbar(),plt.show()
#%%
img = imgs["frame377_hero_B1_health"];
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB )
pixel_colors = img.reshape((np.shape(img)[0]*np.shape(img)[1], 3))
norm = colors.Normalize(vmin=-1.,vmax=1.)
norm.autoscale(pixel_colors)
pixel_colors = norm(pixel_colors).tolist()
hlsImg = cv2.cvtColor(img, cv2.COLOR_RGB2HLS)
h, l, s = cv2.split(hlsImg);
#plt.imshow(img),plt.colorbar()
fig = plt.figure()
ax1 = fig.add_subplot(2, 1, 1, projection="3d")
ax1.scatter(h.flatten(), l.flatten(), s.flatten(), facecolors=pixel_colors, marker=".")
ax1.set_xlabel("Hue")
ax1.set_ylabel("Luminosity")
ax1.set_zlabel("Saturation")
ax2 = fig.add_subplot(2, 1, 2)
ax2.imshow(img)
plt.show()
#%%
# trying to filter out noise colors:
img = imgs["frame377_hero_B1_health"];
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB )
hlsImg = cv2.cvtColor(img, cv2.COLOR_RGB2HLS)
h, l, s = cv2.split(hlsImg);
fig = plt.figure()
ax = fig.add_subplot(4, 1, 1)
ax.imshow(img)
ax.set_label("pre filtering");
h, l, s = cv2.split(hlsImg);
ax = fig.add_subplot(4, 1, 2)
ax.imshow(h)
ax.set_label("Hue");
ax = fig.add_subplot(4, 1, 3)
ax.imshow(l)
ax.set_label("Luminance");
ax = fig.add_subplot(4, 1, 4)
ax.imshow(s)
ax.set_label("Saturation");
plt.show()
#%%
def getRGBImg(name):
return cv2.cvtColor(imgs[name], cv2.COLOR_BGR2RGB)
def showHealthDetected(img):
hlsImg = cv2.cvtColor(img, cv2.COLOR_RGB2HLS)
hlsImg[hlsImg[:,:,1] < 100] = 0
#hlsImg[:,:,0] = 126
h, l, s = cv2.split(hlsImg);
fig = plt.figure()
ax = fig.add_subplot(5, 1, 1)
ax.imshow(img)
ax = fig.add_subplot(5, 1, 2)
ax.imshow(h)
ax.set_label("Hue");
ax = fig.add_subplot(5, 1, 3)
ax.imshow(l)
ax.set_label("Luminance");
ax = fig.add_subplot(5, 1, 4)
ax.imshow(s)
ax.set_label("Saturation");
resultimg = cv2.cvtColor(hlsImg, cv2.COLOR_HLS2RGB)
ax = fig.add_subplot(5, 1, 5)
ax.imshow(resultimg)
plt.show()
return resultimg
analyzedImgs = {}
for imgSrc in imgs.keys():
analyzedImgs[imgSrc] = showHealthDetected(getRGBImg(imgSrc))
#%%
# recenter every img.
# calculate the histogram of pixels > 0 per line.
def showLineHistograms(img):
img[img > 0] = 1
Ysize = len(img[:,0,0])
hist = img.sum(axis=1)
fig = plt.figure()
plt.plot(hist)
plt.show()
hist = np.zeros((22));
observations = np.array([]);
for imgSrc in imgs.keys():
img = cv.cvtColor(analyzedImgs[imgSrc], cv2.COLOR_RGB2GRAY)
imgSum = img.sum(axis=1);
hist[: len(imgSum)] += imgSum
observations = np.concatenate((observations, np.where(imgSum > 0)[0]) );
hist /= max(hist)
fig = plt.figure()
plt.plot(hist)
plt.show()
sns.distplot(observations)
plt.show()
#%%
# here we try to detect the correct amount of notches on any image
# the image is passed through a gradient filter to show only the variations.
# this gradient filter is then
def detectLabels(img):
return ndimage.measurements.label(imgGrad)
def calculateLabellingErrorRate(gradImg, expected):
return detectLabels(gradImg)[1] - expected;
notchesExpected = {
"frame377_hero_A1_health":23,
"frame377_hero_A2_health":24,
"frame377_hero_A3_health":8,
"frame377_hero_A4_health":10,
"frame377_hero_A5_health":2,
"frame377_hero_A6_health":6,
"frame377_hero_B1_health":24,
"frame377_hero_B2_health":8,
"frame377_hero_B3_health":10,
"frame377_hero_B4_health":8,
"frame377_hero_B5_health":1,
"frame377_hero_B6_health":8,
}
gradientThreshold = 30
bin_amount = abs(min(256, 256))
gradientThresholds = np.linspace(0,256,bin_amount) // 1
#%%
errors = np.zeros(shape=(len(notchesExpected), bin_amount))
j = 0
for lowGradientThreshold in gradientThresholds:
i = 0
for src in imgs.keys():
expected = notchesExpected[src];
img = imgs[src];
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB )
imgGrad = ndimage.morphology.morphological_gradient(img[:,:,0], size=(1,2))
imgGrad[imgGrad<lowGradientThreshold] = 0
labels, count = ndimage.measurements.label(imgGrad)
errors[i, j] = abs(calculateLabellingErrorRate(img, expected))
i += 1
j+=1
sns.heatmap(errors)
plt.plot()
plt.figure()
errorsSummed = errors.sum(axis=0);
errorsSummed
sns.lineplot(data=errorsSummed)
plt.plot()
#plot.figure()
bestThreshold = np.where(errorsSummed == min(errorsSummed))[0][-1];
print("best high pass gradient threshold: ",bestThreshold, "\n\terror count:", errorsSummed[bestThreshold])
# low pass gradient gave a best score of 60 errors, not enough. combining both systems gave a best score of 38. we will keep the simple high pass.
#%%
# best system, simple high filter, with a threshold of 52 or around 50
#%%
img = imgs["frame377_hero_B1_health"];
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB )
plt.imshow(img),plt.colorbar(),plt.show()
imgGrad = ndimage.morphology.morphological_gradient(img[:,:,0], size=(1,2))
plt.imshow(imgGrad),plt.colorbar(),plt.show()
imgGrad[imgGrad<30] = 0
plt.imshow(imgGrad),plt.colorbar(),plt.show()
label, num_features = ndimage.measurements.label(imgGrad)
print("###RGB mode on gradient")
print(num_features, " features detected / 24.")
HLSImg = cv.cvtColor(analyzedImgs[imgSrc], cv.COLOR_RGB2HLS)
label, num_features = ndimage.measurements.label(HLSImg[:,:,0])
print("###HLSImg using Hue")
print(num_features, " features detected / 24.")
label, num_features = ndimage.measurements.label(HLSImg[:,:,1])
print("###HLSImg using Luminosity")
print(num_features, " features detected / 24.")
label, num_features = ndimage.measurements.label(HLSImg[:,:,2])
print("###HLSImg using Saturation")
print(num_features, " features detected / 24.")
#%%
# here we try to differentiate notches types.
notchesTypesExpected = {
"frame377_hero_A1_health":{"white":20,"yellow":3, "blue":0, "red":0},
"frame377_hero_A2_health":{"white":17,"yellow":7, "blue":0, "red":0},
"frame377_hero_A3_health":{"white":8,"yellow":0, "blue":0, "red":0},
"frame377_hero_A4_health":{"white":8,"yellow":2, "blue":0, "red":0},
"frame377_hero_A5_health":{"white":2,"yellow":0, "blue":0, "red":0},
"frame377_hero_A6_health":{"white":2,"yellow":0, "blue":4, "red":0},
"frame377_hero_B1_health":{"white":20,"yellow":4, "blue":0, "red":0},
"frame377_hero_B2_health":{"white":8,"yellow":0, "blue":0, "red":0},
"frame377_hero_B3_health":{"white":8,"yellow":2, "blue":0, "red":0},
"frame377_hero_B4_health":{"white":8,"yellow":0, "blue":0, "red":0},
"frame377_hero_B5_health":{"white":0,"yellow":0, "blue":0, "red":1},
"frame377_hero_B6_health":{"white":8,"yellow":0, "blue":0, "red":0},
}
#%%
img = imgs["frame377_hero_A6_health"];
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB )
originalimg = img
#plt.imshow(img),plt.colorbar(),plt.show()
imgHLS = cv2.cvtColor(img, cv2.COLOR_RGB2HLS )
#plt.imshow(imgHLS[:,:,1]),plt.colorbar(),plt.show()
#imgHLS[:,:,1] = ndimage.grey_erosion(imgHLS[:,:,1], size=(1,2));
imgGrad = ndimage.morphology.morphological_gradient(imgHLS[:,:,1], size=(1,4))
#plt.imshow(imgGrad),plt.colorbar(),plt.show()
imgGrad = ndimage.grey_erosion(imgGrad, size=(2,2));
#plt.imshow(imgGrad),plt.colorbar(),plt.show()
imgGrad = ndimage.grey_dilation(imgGrad, size=(2,2));
#plt.imshow(imgGrad),plt.colorbar(),plt.show()
imgGrad[imgGrad<52] = 0
#plt.imshow(imgGrad),plt.colorbar(),plt.show()
imgHLS[:,:,1] = imgGrad;
imgHLS[imgHLS[:,:,1] == 0] = 0
img = cv2.cvtColor(imgHLS, cv2.COLOR_HLS2RGB )
#plt.imshow(img),plt.colorbar(),plt.show()
labels, count = ndimage.label(imgGrad)
#plt.imshow(labels),plt.colorbar(),plt.show()
#detect colors
#plt.imshow(imgHLS[:,:,0]),plt.colorbar(),plt.show()
#plt.imshow(imgHLS[:,:,1]),plt.colorbar(),plt.show()
#plt.imshow(imgHLS[:,:,2]),plt.colorbar(),plt.show()
colors = {"white":0,"yellow":0, "blue":0, "red":0}
errors = np.zeros((len(notchesTypesExpected.keys()), 4))
i = 0
for imgKey in notchesTypesExpected.keys():
colors = {"white":0,"yellow":0, "blue":0, "red":0}
expectedColors = notchesTypesExpected[imgKey]
img = imgs[imgKey];
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB )
imgGrad = ndimage.morphology.morphological_gradient(img[:,:,0], size=(1,2))
imgGrad[imgGrad<52] = 0
labels, count = ndimage.measurements.label(imgGrad)
for label in range(1, count+1):
labelMeanRGB = np.array(img[labels==label, :].mean(axis=0))
best_dist = -1
best_color = ""
for color in COLOR_VALUES.keys():
curr_dist = np.sqrt(np.sum((COLOR_VALUES[color] - labelMeanRGB) ** 2))
if best_dist == -1 or curr_dist < best_dist:
best_dist = curr_dist
best_color = color
print(i,": ",labelMeanRGB," => ", best_color)
colors[best_color] += 1
# error detection
j=0
for color in colors.keys():
errors[i, j] += abs(expectedColors[color] - colors[color])
print(j,"=",color)
j+=1
i+=1
sns.heatmap(data=errors)
print("total errors:", errors.sum(axis=0))
#%%
#objective: find the best color codes to diminish errors in their case.
#%%
for i in range(1, count+1):
labelRGB = originalimg[labels==i, :]
labelMeanRGB = np.array(labelRGB.mean(axis=0))
best_dist = -1
best_color = ""
print("cluster ",i)
for color in COLOR_VALUES.keys():
curr_dist = np.sqrt(np.sum((COLOR_VALUES[color] - labelMeanRGB) ** 2))
print(color," => ",curr_dist)
if best_dist == -1 or curr_dist < best_dist:
best_dist = curr_dist
best_color = color
colors[best_color] += 1
print(colors)
#detectedimg = np.zeros(originalimg.shape)
#detectedimg = originalimg[labels != 0]
#plt.imshow(detectedimg),plt.colorbar(),plt.show()
#showHealthDetected(img)
#%%
labelHLS = imgHLS[labels==1, :]
labelMeanHLS = np.array(labelHLS.mean(axis=0))
labelMeanHLS[1] = labelMeanHLS[1]/256
plt.imshow(labelHLS),plt.colorbar(),plt.show()
plt.imshow([labelMeanHLS]),plt.colorbar(),plt.show()
distToRed = abs(labelMeanHLS[0] - RED_HUE)
distToYellow = abs(labelMeanHLS[0] - ORANGE_HUE)
distToBlue = abs(labelMeanHLS[0] - BLUE_HUE)
distToWhite = abs(labelMeanHLS[1] - 100)
#%%
labelRGB = originalimg[labels==1]
#plt.imshow(labelRGB),plt.colorbar(),plt.show()
labelMeanRGB = np.array([labelRGB.mean(axis=0)])
labelMeanRGB = labelMeanRGB / 255
#plt.imshow(labelMeanRGB),plt.colorbar(),plt.show()
plt.imshow([labelMeanRGB]),plt.colorbar(),plt.show()
#%%
# # reading the current results.
results = pd.read_csv("C:\\Users\\Helldragger\\Documents\\projects\\MetaWatch\\MetaWatch\\data\\temp\\2.csv")
# barlett, kinda clean (4-6 anomalies)
# bohman, blackmanharris, nuttall = same
# rolling window:
# [2.s] 120 clean( 1 anomaly)
# [1.5] 90 semi clean (3 anomalies).
# [1.s] 60 semi clean ( 4 - 6 death anomalies)
# [.5s] 30 ugly (10+)
res2 = results.groupby(['team', "hero"])["health", "armor", "shield", "death"].rolling(120).mean().reset_index()#.unstack(['team', "hero"])
res2["frame"] = res2["level_2"] // 12
res2.loc[res2.death > 0, "death"] = 1
res2 = res2.drop("level_2", axis=1)
res3 = pd.melt(res2, ['team', "hero", "frame", "death"])
#sns.relplot(x="frame", y="value", hue='variable', col="team", kind="line", data=res3, row="hero")
plt.style.use("seaborn-colorblind")
fig, axes = plt.subplots(6,2, figsize=(1920,1080), dpi=400)
i = 0
for team in res2.team.unique():
j = 0
for hero in res2.hero.unique():
frames = res2.loc[(res2.team==team) & (res2.hero == hero), "frame"]
health = res2.loc[(res2.team==team) & (res2.hero == hero), "health"]
shield = res2.loc[(res2.team==team) & (res2.hero == hero), "shield"]
armor = res2.loc[(res2.team==team) & (res2.hero == hero), "armor"]
axes[j,i].stackplot(frames,
health,
armor,
shield,cmap=plt.get_cmap("Accent"))
j+=1
i+=1
#plt.title('Recorded Game Statistics')
plt.show()
fig, axes = plt.subplots(6,2)
i = 0
for team in res2.team.unique():
j = 0
for hero in res2.hero.unique():
current_data = res2.loc[(res2.team==team) & (res2.hero == hero)]
frames = current_data.frame
daed_frames = (current_data.health < 25) & (current_data.armor < 25) & (current_data.death == 1)
axes[j,i].stackplot(frames,daed_frames, cmap=plt.get_cmap("Accent"))
j+=1
i+=1
#%%
# merging
class DeathInterval:
def __init__(self, start:int, previous=None, next=None):
self.end = self.start = start
self.previous = previous
self.next = next
def span(self):
return self.end - self.start
fig, axes = plt.subplots(6,2)
i = 0
for team in res2.team.unique():
j = 0
for hero in res2.hero.unique():
current_data = res2.loc[(res2.team==team) & (res2.hero == hero)]
frames = current_data.frame
# daed_frames = (current_data.health < 25) & (current_data.armor < 25) & (current_data.death == 1)
spawned_frames = (current_data.health >= 25)
axes[j,i].stackplot(frames,spawned_frames)
for frame in daed_frames:
pass # TODO merge small intervals with little distance together, in order to clean the death reports.
j+=1
i+=1
#%%
# reading ultimate values
import pytesseract
def ocr_core(filename):
"""
This function will handle the core OCR processing of images.
"""
text = pytesseract.image_to_string(cv2.cvtColor(cv2.imread(filename), cv2.COLOR_BGR2GRAY ), lang="Koverwatch") # We'll use Pillow's Image class to open the image and pytesseract to detect the string in the image
return text
#%%
import pytesseract
path = "C:/Users/Helldragger/Documents/projects/MetaWatch/MetaWatch/src/features/OCRTEST.png"
img = cv2.cvtColor(cv2.imread(path), cv2.COLOR_BGR2GRAY )
plt.imshow(img)
plt.show()
srcTri = np.array( [[0, 0], [img.shape[1] - 1, 0], [0, img.shape[0] - 1]] ).astype(np.float32)
dstTri = np.array( [[0, 0], [img.shape[1] - 1, 0], [img.shape[1]*0.08, img.shape[0] - 1]] ).astype(np.float32)
warp_mat = cv.getAffineTransform(srcTri, dstTri)
img = cv.warpAffine(img, warp_mat, (img.shape[1], img.shape[0]))
plt.imshow(img)
plt.show()
thresh = 200
maxValue = 255
img = cv2.threshold(img, thresh, maxValue, cv2.THRESH_BINARY )[1]
plt.imshow(img)
plt.show()
text = pytesseract.image_to_string(img, config="digits", lang="Koverwatch")
print("img text: '{}'".format(text))
#%%
import pytesseract
import numpy as np
import cv2
from matplotlib import pyplot as plt
import seaborn as sns
def convertUltimateImgToText(img):
plt.imshow(img);
plt.show();
img = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY )
plt.imshow(img);
plt.show();
srcTri = np.array( [[0, 0], [img.shape[1] - 1, 0], [0, img.shape[0] - 1]] ).astype(np.float32)
dstTri = np.array( [[0, 0], [img.shape[1] - 1, 0], [img.shape[1]*0.19, img.shape[0] - 1]] ).astype(np.float32)
warp_mat = cv2.getAffineTransform(srcTri, dstTri)
img = cv2.warpAffine(img, warp_mat, (img.shape[1], img.shape[0]))
plt.imshow(img);
plt.show();
thresh = 200
maxValue = 255
img = cv2.threshold(img, thresh, maxValue, cv2.THRESH_BINARY )[1]
plt.imshow(img);
plt.show();
text = pytesseract.image_to_string(img, config="digits", lang="Koverwatch")
print("img ult text: '{}'".format(text))
return text
def test_UltimateParsing(testSources):
# test format: COLOR_expectedValue.png
expected = {}
testedColors = set();
testedValues = set();
colorToIndex = {}
valueToIndex = {}
colorLabels = []
valueLabels = []
for src in testSources:
color, value = src.split("_");
expected[src] = {"value":value, "color":color};
if color not in testedColors:
colorToIndex[color] = len(testedColors)
testedColors.add(color)
colorLabels.append(color)
if value not in testedValues:
valueToIndex[value] = len(testedValues)
testedValues.add(value)
valueLabels.append(value)
imgs = {}
errors = np.zeros((len(testedValues), len(testedColors)))
i = 0
legendPrinted = False
for src in expected.keys():
ultimateImg = cv2.imread('src/data/tests/Ultimate/'+src+'.png',-1);
ultimateImg = cv2.cvtColor(ultimateImg, cv2.COLOR_BGR2RGB )
assert ultimateImg is not None
value = convertUltimateImgToText(ultimateImg)
if value != expected[src]["value"]:
errors[valueToIndex[expected[src]["value"]], colorToIndex[expected[src]["color"]]] += 1
sns.heatmap(data=errors, xticklabels=colorLabels, yticklabels=valueLabels)#.get_figure()#.savefig("src/data/tests/Ultimate/Tests_ErrorHeatmap.png")
plt.show()
totalErrors = errors.sum(axis=0).sum()
print("total color errors:", errors.sum(axis=0))
print("total values errors:", errors.sum(axis=1))
print("total errors:", errors.sum(axis=0).sum())
ultimateAcc = 1 - (totalErrors / len(testSources))
print("Ultimate detection accuracy: ", ultimateAcc)
#assert ultimateAcc >= 0.90 # 10% d'erreur
FULL_TEST_SOURCES = [
"BLUE_0",
"BLUE_1",
"BLUE_14",
"BLUE_15",
"BLUE_16",
"BLUE_24",
"BLUE_25",
"GREEN_13",
"GREEN_16",
"GREEN_21",
"GREEN_22",
"GREEN_39",
"GREEN_42",
"GREY_0",
"GREY_11",
"GREY_13",
"GREY_34",
"ORANGE_0",
"RED_10",
"RED_26",
"WHITE_0",
"WHITE_1",
"WHITE_8",
"WHITE_21",
"WHITE_24",
"WHITE_34"
]
ACTUAL_TEST_SOURCES = [
"BLUE_0",
"BLUE_1",
"BLUE_14",
"BLUE_15",
"BLUE_16",
"BLUE_24",
"BLUE_25",
]
test_UltimateParsing(ACTUAL_TEST_SOURCES)
#%%
from os import listdir
from os.path import isfile, join
imgDir = "src/data/tests/Ultimate/"
onlyfiles = [f for f in listdir(imgDir) if isfile(join(imgDir, f))]
imgHeatmap = None;
lastImg = None;
for fileSrc in onlyfiles:
img = cv.imread(imgDir+"/"+fileSrc)
img = cv.cvtColor(img, cv.COLOR_BGR2GRAY);
if imgHeatmap is None:
imgHeatmap=np.zeros(img.shape)
lastImg = img
if img.shape != imgHeatmap.shape:
continue
else:
imgHeatmap = imgHeatmap + img
sns.heatmap(imgHeatmap);
plt.show();
thresholedHeatmap = imgHeatmap
thresholedHeatmap[thresholedHeatmap> 110000] = 0
sns.heatmap(thresholedHeatmap);
plt.show();
#%%
plt.show();
cv.imshow("heatmap",imgHeatmap)
cv.waitKey(0) | StarcoderdataPython |
4829834 | <filename>FATERUI/common/__init__.py
#!/usr/bin/env python2.7
# coding=utf-8
'''
@date = '15/3/23'
@author = 'xiekun'
@email = '<EMAIL>'
'''
from .camera import Camera
from .camera import cameramanage
# from camera.camera_factory import *
from .infrared.infrared import Infrared
# from camera import CameraProcess, ReceivePictureThread
| StarcoderdataPython |
43019 | import os
class PathManager:
input_folder_label = None
output_folder_label = None
_input_folder_path = None
_output_folder_path = None
_import_file_path = None
_import_file_style = None
@classmethod
def set_input_folder_label(cls, label):
cls.input_folder_label = label
@classmethod
def set_output_folder_label(cls, label):
cls.output_folder_label = label
@classmethod
def get_input_path(cls, file_name=None):
result = cls._input_folder_path
if file_name is not None:
result = os.path.join(cls._input_folder_path, file_name)
return result
@classmethod
def get_output_path(cls, file_name=None):
result = cls._output_folder_path
if file_name is not None:
result = os.path.join(cls._output_folder_path, file_name)
return result
@classmethod
def set_input_path(cls, path):
cls._input_folder_path = os.path.abspath(path)
if cls.input_folder_label is not None:
cls.input_folder_label.text = f"Input folder: {cls._input_folder_path}"
@classmethod
def set_output_path(cls, path):
cls._output_folder_path = os.path.abspath(path)
if cls.output_folder_label is not None:
cls.output_folder_label.text = f"Output folder: {cls._output_folder_path}"
@classmethod
def input_path_exists(cls, path=None):
all_path = cls.get_input_path(path)
return os.path.exists(all_path)
@classmethod
def output_path_exists(cls, path):
all_path = cls.get_output_path(path)
return os.path.exists(all_path)
@classmethod
def open_input_file(cls, file_name, mode):
full_path = os.path.join(cls.get_input_path(), file_name)
return cls._open_file(full_path, mode)
@classmethod
def open_output_file(cls, file_name, mode):
full_path = os.path.join(cls.get_output_path(), file_name)
return cls._open_file(full_path, mode)
@classmethod
def set_import_file(cls, file_name, style):
full_path = os.path.abspath(file_name)
cls._import_file_path = full_path
cls._import_file_style = style
@classmethod
def get_import_path(cls):
return cls._import_file_path
@classmethod
def get_import_style(cls):
return cls._import_file_style
@classmethod
def _open_file(cls, file_name, mode):
return open(f'{file_name}', f'{mode}', encoding='utf-8', newline='\n')
| StarcoderdataPython |
1712090 | from pydivert import WinDivert
from threading import Thread
from requests import Session
from time import sleep
def DivertRST():
while True:
with WinDivert("tcp.SrcPort == 443 and tcp.PayloadLength == 0") as w:
try:
for packet in w:
packet.tcp.rst = False
w.send(packet)
except:
w.close()
def DivertDecorator(func):
def wrapper(*args, **kwargs):
t1 = Thread(target=DivertRST, )
t2 = Thread(target=func, args=args, kwargs=kwargs, )
t1.start()
t2.start()
return func(*args, **kwargs)
return wrapper
class SniSession:
def __init__(self):
self.sni_session = Session()
def request(self, method, url,
params=None, data=None, headers=None, cookies=None, files=None,
auth=None, timeout=None, allow_redirects=True, proxies=None,
hooks=None, stream=None, verify=None, cert=None, json=None):
return self.sni_session.request(method, url,
params=None, data=None, headers=None, cookies=None, files=None,
auth=None, timeout=None, allow_redirects=True, proxies=None,
hooks=None, stream=None, verify=None, cert=None, json=None)
@DivertDecorator
def get(self, url, **kwargs):
return self.sni_session.get(url, **kwargs)
def options(self, url, **kwargs):
return self.sni_session.options(url, **kwargs)
def head(self, url, **kwargs):
return self.sni_session.head(url, **kwargs)
@DivertDecorator
def post(self, url, data=None, json=None, **kwargs):
return self.sni_session.post(url, data, json, **kwargs)
@DivertDecorator
def put(self, url, data=None, **kwargs):
return self.sni_session.put(url, data, **kwargs)
@DivertDecorator
def patch(self, url, data=None, **kwargs):
return self.sni_session.patch(url, data, **kwargs)
@DivertDecorator
def delete(self, url, **kwargs):
return self.sni_session.delete(url, **kwargs)
@DivertDecorator
def send(self, request, **kwargs):
return self.sni_session.send(request, **kwargs)
def merge_environment_settings(self, url, proxies, stream, verify, cert):
return self.sni_session.merge_environment_settings(url, proxies, stream, verify, cert)
def get_adapter(self, url):
return self.sni_session.get_adapter(url)
def close(self):
return self.sni_session.close()
def mount(self, prefix, adpater):
return self.sni_session.mount(prefix, adpater)
def __getstate__(self):
return self.__getstate__()
def __setstate__(self, state):
return self.__setstate__(state)
| StarcoderdataPython |
1787675 | import datetime
import os
import json
# environment variables must be set
TEST_USE_STATIC_DATA = os.getenv('TEST_USE_STATIC_DATA', True)
test_api_key_search = os.getenv('TEST_API_KEY_SEARCH')
test_api_key_stream = os.getenv('TEST_API_KEY_STREAM')
NUMBER_OF_ADS = 1495
DAWN_OF_TIME = '1971-01-01T00:00:01'
current_time_stamp = datetime.datetime.now().strftime("%Y-%m-%dT%H:%M:%S")
headers_search = {'api-key': test_api_key_search, 'accept': 'application/json'}
headers_stream = {'api-key': test_api_key_stream, 'accept': 'application/json'}
test_url_search = os.getenv('TEST_URL_SEARCH', 'http://localhost')
test_port_search = os.getenv('TEST_PORT_SEARCH', 5000)
test_url_stream = os.getenv('TEST_URL_SEARCH', 'http://localhost')
test_port_stream = os.getenv('TEST_PORT_SEARCH', 5000)
SEARCH_URL = f"{test_url_search}:{test_port_search}"
STREAM_URL = f"{test_url_stream}:{test_port_stream}"
REMOTE_MATCH_PHRASES = [y.lower() for y in ["Arbeta på distans", "Arbete på distans", "Jobba på distans", "Arbeta hemifrån",
"Arbetar hemifrån", "Jobba hemifrån", "Jobb hemifrån", "remote work", "jobba tryggt hemifrån"]]
REMOTE_PHRASES_FOR_SWAGGER = json.dumps(REMOTE_MATCH_PHRASES, ensure_ascii=False)
def format_remote_phrases_for_swagger():
swagger_str = ''
for p in REMOTE_MATCH_PHRASES:
swagger_str += f'"{p}", '
return swagger_str.rstrip(', ')
| StarcoderdataPython |
3277920 | <reponame>spowlas/sarpy
from algorithm_toolkit import app
if (__name__) == '__main__':
import argparse
parser = argparse.ArgumentParser(description='Development Server Help')
parser.add_argument(
"-d",
"--debug",
action="store_true",
dest="debug_mode",
help="run in debug mode (for use with PyCharm)",
default=True
)
parser.add_argument(
"-p",
"--port",
dest="port",
help="port of server (default:%(default)s)",
type=int,
default=5000
)
cmd_args = parser.parse_args()
app_options = {"port": cmd_args.port}
if cmd_args.debug_mode:
app_options["debug"] = True
app_options["use_debugger"] = False
app_options["use_reloader"] = False
app.run(**app_options)
| StarcoderdataPython |
1621734 | <gh_stars>1-10
"""Example with a device defined in pyvisa-sim
==============================================
"""
from fluidlab.interfaces import PhysicalInterfaceType, set_default_interface
from fluidlab.interfaces.visa_inter import set_default_pyvisa_backend
from fluidlab.instruments.drivers import Driver
from fluidlab.instruments.features import (
QueryCommand,
StringValue,
FloatValue,
BoolValue,
)
set_default_interface(PhysicalInterfaceType.Serial, "VISAInterface")
set_default_pyvisa_backend("@sim")
class Device2(Driver):
"""Simple instrument driver
The instrument is defined in pyvisa-sim (as "device 2").
"""
Device2._build_class_with_features(
[
QueryCommand("get_idn", doc="Get identity", command_str="*IDN?"),
StringValue(
"rail",
doc="A string rail",
command_set="INST",
valid_values=["P6V", "P25V", "N25V"],
),
FloatValue(
"voltage",
doc="The voltage (in V)",
command_set=":VOLT:IMM:AMPL",
limits=[0, 6],
),
FloatValue(
"current",
doc="The current (in A)",
command_set=":CURR:IMM:AMPL",
limits=[0, 6],
),
BoolValue("output_enabled", doc="Output enabled", command_set="OUTP"),
]
)
if __name__ == "__main__":
dev = Device2("ASRL2::INSTR")
| StarcoderdataPython |
3284438 | <gh_stars>0
#
# <NAME>, <NAME>
# 10/09/2018
#
import tensorflow as tf
import numpy as np
def iou(prediction, mask, name):
# Compute the argmax for the output to match mask shape
prediction = tf.expand_dims(tf.argmax(prediction, axis=-1), axis=-1)
# Thresholds as specified by competition
thresholds = np.arange(0.5, 0.95, 0.05, dtype='float32')
# Compute precion at thresholds
precision = tf.metrics.precision_at_thresholds(mask, prediction, thresholds)
# Compute the mean for this step
mean_iou = tf.reduce_mean(precision, name=name)
return mean_iou
def cross_entropy(prediction, mask, num_classes, name):
# Encode mask as one_hot to match prediction
mask = tf.one_hot(tf.squeeze(mask), num_classes, dtype=tf.float32)
# Compute binary cross_entropy
loss = tf.keras.backend.binary_crossentropy(mask, prediction)
# Compute the mean for this step
loss = tf.reduce_mean(loss, name=name)
return loss
| StarcoderdataPython |
1771616 | <filename>scripts/build_all.py
# Copyright 2019-2020 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You
# may not use this file except in compliance with the License. A copy of
# the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
from __future__ import absolute_import
import argparse
import os
import subprocess
VERSION = '1.13.1'
REPO = 'sagemaker-tensorflow-scriptmode'
PY2_CPU_BINARY = 'https://s3-us-west-2.amazonaws.com/tensorflow-aws/1.13/AmazonLinux/cpu/latest-patch-latest-patch/tensorflow-1.13.1-cp27-cp27mu-linux_x86_64.whl' # noqa
PY3_CPU_BINARY = 'https://s3-us-west-2.amazonaws.com/tensorflow-aws/1.13/AmazonLinux/cpu/latest-patch-latest-patch/tensorflow-1.13.1-cp36-cp36m-linux_x86_64.whl' # noqa
PY2_GPU_BINARY = 'https://s3-us-west-2.amazonaws.com/tensorflow-aws/1.13/AmazonLinux/gpu/latest-patch-latest-patch/tensorflow-1.13.1-cp27-cp27mu-linux_x86_64.whl' # noqa
PY3_GPU_BINARY = 'https://s3-us-west-2.amazonaws.com/tensorflow-aws/1.13/AmazonLinux/gpu/latest-patch-latest-patch/tensorflow-1.13.1-cp36-cp36m-linux_x86_64.whl' # noqa
DEV_ACCOUNT = '142577830533'
REGION = 'us-west-2'
def _parse_args():
parser = argparse.ArgumentParser()
parser.add_argument('--account', type=str, default=DEV_ACCOUNT)
parser.add_argument('--region', type=str, default=REGION)
parser.add_argument('--version', type=str, default=VERSION)
parser.add_argument('--py2-cpu-binary', type=str, default=PY2_CPU_BINARY)
parser.add_argument('--py3-cpu-binary', type=str, default=PY3_CPU_BINARY)
parser.add_argument('--py2-gpu-binary', type=str, default=PY2_GPU_BINARY)
parser.add_argument('--py3-gpu-binary', type=str, default=PY3_GPU_BINARY)
parser.add_argument('--repo', type=str, default=REPO)
return parser.parse_args()
args = _parse_args()
binaries = {
'py2-cpu': args.py2_cpu_binary,
'py3-cpu': args.py3_cpu_binary,
'py2-gpu': args.py2_gpu_binary,
'py3-gpu': args.py3_gpu_binary
}
build_dir = os.path.join('docker', args.version)
# Run docker-login so we can pull the cached image
login_cmd = subprocess.check_output(
'aws ecr get-login --no-include-email --registry-id {}'.format(args.account).split())
print('Executing docker login command: '.format(login_cmd))
subprocess.check_call(login_cmd.split())
for arch in ['cpu', 'gpu']:
for py_version in ['2', '3']:
binary_url = binaries['py{}-{}'.format(py_version, arch)]
binary_file = os.path.basename(binary_url)
cmd = 'wget -O {}/{} {}'.format(build_dir, binary_file, binary_url)
print('Downloading binary file: {}'.format(cmd))
subprocess.check_call(cmd.split())
tag = '{}-{}-py{}'.format(args.version, arch, py_version)
prev_image_uri = '{}.dkr.ecr.{}.amazonaws.com/{}:{}'.format(args.account, args.region, args.repo, tag)
dockerfile = os.path.join(build_dir, 'Dockerfile.{}'.format(arch))
tar_file_name = subprocess.check_output('ls {}/sagemaker_tensorflow_container*'.format(build_dir),
shell=True).strip().decode('ascii')
print('framework_support_installable is {}'.format(os.path.basename(tar_file_name)))
build_cmd = 'docker build -f {} --cache-from {} --build-arg framework_support_installable={} ' \
'--build-arg py_version={} --build-arg framework_installable={} ' \
'-t {}:{} {}'.format(dockerfile, prev_image_uri, os.path.basename(tar_file_name), py_version,
binary_file, args.repo, tag, build_dir)
print('Building docker image: {}'.format(build_cmd))
subprocess.check_call(build_cmd.split())
print('Deleting binary file {}'.format(binary_file))
subprocess.check_call('rm {}'.format(os.path.join(build_dir, binary_file)).split())
| StarcoderdataPython |
3253129 | <reponame>jesnyder/MeasuredStress<gh_stars>0
from c0101_retrieve_ref import retrieve_ref
from c0101_retrieve_ref import retrieve_ref_color
from c0101_retrieve_ref import retrieve_sensor_unit
from c0102_timestamp import timestamp_source
from c0103_trim_record_to_max import trim_record_to_max
from c0104_plot_timestamp import plot_timestamp
from c0105_find_records import find_records
from c0106_record_to_summary import record_to_summary
from c0107_decide_inclusion import decide_inclusion
from c0108_save_meta import save_meta
from c0109_retrieve_meta import retrieve_meta
from c0110_find_temp_end import find_temp_end
from c0111_retrieve_analyzed import retrieve_analyzed
from c0112_plot_truncate import plot_truncate
from c0113_plot_acc import plot_acc
import os
import matplotlib
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
def find_paired_duration():
"""
Find the duration of the record
Add the end of the coregistered record in the meta file
"""
print("begin find_paired_duration")
study_list = retrieve_ref('study_list')
for study in study_list:
df_meta = retrieve_meta(study)
# print(df_meta)
source_path = list(df_meta['source_path'])
# add emptyt column
df_meta['recordDuration'] = [None] * len(source_path)
for record in source_path:
# save that value in the dataframe
i = df_meta[ df_meta['source_path'] == record].index.values[0]
print('i = ' + str(i))
recordBegin = int(df_meta.loc[i, 'recordBegin' ] )
print('recordBegin = ' + str(recordBegin))
recordEnd = int(df_meta.loc[i, 'recordEnd' ] )
print('recordEnd = ' + str(recordEnd))
recordDuration = round((recordEnd - recordBegin)/60 , 4)
df_meta.loc[i, 'recordDuration' ] = recordDuration
print('recordDuration = ' + str(recordDuration))
save_meta(study, df_meta)
print('df_meta = ')
print(df_meta)
| StarcoderdataPython |
3289274 | <filename>test.py<gh_stars>10-100
import argparse
import glob
import os
import PIL.Image as pil
import cv2
from crossView import model, CrossViewTransformer, CycledViewProjection
import numpy as np
import torch
from torchvision import transforms
from easydict import EasyDict as edict
import matplotlib.pyplot as PLT
def get_args():
parser = argparse.ArgumentParser(
description="Testing options")
parser.add_argument("--image_path", type=str,
help="path to folder of images", required=True)
parser.add_argument("--model_path", type=str,
help="path to MonoLayout model", required=True)
parser.add_argument(
"--ext",
type=str,
default="png",
help="extension of images in the folder")
parser.add_argument("--out_dir", type=str,
default="output directory to save topviews")
parser.add_argument("--type", type=str,
default="static/dynamic/both")
parser.add_argument("--view", type=str, default=1, help="view number")
parser.add_argument(
"--split",
type=str,
choices=[
"argo",
"3Dobject",
"odometry",
"raw"],
help="Data split for training/validation")
configs = edict(vars(parser.parse_args()))
return configs
def save_topview(idx, tv, name_dest_im):
tv_np = tv.squeeze().cpu().numpy()
true_top_view = np.zeros((tv_np.shape[1], tv_np.shape[2]))
true_top_view[tv_np[1] > tv_np[0]] = 255
dir_name = os.path.dirname(name_dest_im)
if not os.path.exists(dir_name):
os.makedirs(dir_name)
cv2.imwrite(name_dest_im, true_top_view)
print("Saved prediction to {}".format(name_dest_im))
def test(args):
models = {}
device = torch.device("cuda")
encoder_path = os.path.join(args.model_path, "encoder.pth")
encoder_dict = torch.load(encoder_path, map_location=device)
feed_height = encoder_dict["height"]
feed_width = encoder_dict["width"]
models["encoder"] = model.Encoder(18, feed_width, feed_height, False)
filtered_dict_enc = {
k: v for k,
v in encoder_dict.items() if k in models["encoder"].state_dict()}
models["encoder"].load_state_dict(filtered_dict_enc)
CVP_path = os.path.join(args.model_path, "CycledViewProjection.pth")
CVP_dict = torch.load(CVP_path, map_location=device)
models['CycledViewProjection'] = CycledViewProjection(in_dim=8)
filtered_dict_cvp = {
k: v for k,
v in CVP_dict.items() if k in models["CycledViewProjection"].state_dict()}
models["CycledViewProjection"].load_state_dict(filtered_dict_cvp)
CVT_path = os.path.join(args.model_path, "CrossViewTransformer.pth")
CVT_dict = torch.load(CVT_path, map_location=device)
models['CrossViewTransformer'] = CrossViewTransformer(128)
filtered_dict_cvt = {
k: v for k,
v in CVT_dict.items() if k in models["CrossViewTransformer"].state_dict()}
models["CrossViewTransformer"].load_state_dict(filtered_dict_cvt)
decoder_path = os.path.join(args.model_path, "decoder.pth")
DEC_dict = torch.load(decoder_path, map_location=device)
models["decoder"] = model.Decoder(
models["encoder"].resnet_encoder.num_ch_enc)
filtered_dict_dec = {
k: v for k,
v in DEC_dict.items() if k in models["decoder"].state_dict()}
models["decoder"].load_state_dict(filtered_dict_dec)
transform_decoder_path = os.path.join(args.model_path, "transform_decoder.pth")
TRDEC_dict = torch.load(transform_decoder_path, map_location=device)
models["transform_decoder"] = model.Decoder(
models["encoder"].resnet_encoder.num_ch_enc)
filtered_dict_trdec = {
k: v for k,
v in TRDEC_dict.items() if k in models["transform_decoder"].state_dict()}
models["transform_decoder"].load_state_dict(filtered_dict_trdec)
for key in models.keys():
models[key].to(device)
models[key].eval()
if os.path.isfile(args.image_path):
# Only testing on a single image
paths = [args.image_path]
output_directory = os.path.dirname(args.image_path)
elif os.path.isdir(args.image_path):
# Searching folder for images
if args.split == "argo":
paths = glob.glob(os.path.join(
args.image_path, '*/ring_front_center/*.{}'.format(args.ext)))
else:
paths = glob.glob(os.path.join(
args.image_path, '*.{}'.format(args.ext)))
output_directory = args.out_dir
try:
os.mkdir(output_directory)
except BaseException:
pass
else:
raise Exception(
"Can not find args.image_path: {}".format(
args.image_path))
print("-> Predicting on {:d} test images".format(len(paths)))
# PREDICTING ON EACH IMAGE IN TURN
with torch.no_grad():
for idx, image_path in enumerate(paths):
# Load image and preprocess
input_image = pil.open(image_path).convert('RGB')
original_width, original_height = input_image.size
input_image = input_image.resize(
(feed_width, feed_height), pil.LANCZOS)
input_image = transforms.ToTensor()(input_image).unsqueeze(0)
# PREDICTION
input_image = input_image.to(device)
features = models["encoder"](input_image)
transform_feature, retransform_features = models["CycledViewProjection"](features)
features = models["CrossViewTransformer"](features, transform_feature, retransform_features)
output_name = os.path.splitext(os.path.basename(image_path))[0]
print("Processing {:d} of {:d} images- ".format(idx + 1, len(paths)))
tv = models["decoder"](features, is_training=False)
transform_tv = models["transform_decoder"](transform_feature, is_training=False)
save_topview(
idx,
tv,
os.path.join(
args.out_dir,
args.type,
"{}.png".format(output_name)))
print('-> Done!')
if __name__ == "__main__":
args = get_args()
test(args)
| StarcoderdataPython |
3252907 |
from ntpath import join
from posixpath import dirname
import numpy as np
import pandas as pd
class CopperModel :
DEMAND = 'copper_demand'
YEAR_START = 'year_start'
YEAR_END = 'year_end'
ANNUAL_EXTRACTION = 'annual_extraction'
INITIAL_RESERVE = 'initial_copper_reserve'
INITIAL_STOCK = 'initial_copper_stock'
COPPER_STOCK = 'copper_stock'
COPPER_PRICE = 'copper_price'
COPPER_RESERVE = 'copper_reserve'
PRODUCTION = 'production'
def __init__(self, param):
self.param = param
self.set_data()
data_dir = join(dirname(dirname(__file__)), 'sos_wrapping', 'data')
self.copper_stock = pd.DataFrame(columns = ['Year', 'Stock']) #pd.read_csv(join(data_dir, 'copper_previous_stock.csv'))
self.copper_reserve = pd.DataFrame(columns = ['Year', 'Reserve']) #DataFrame with the wolrd's reserve updated each year
self.copper_prod_price = pd.DataFrame(columns = ['Year','Price/t', 'Total Price'])#DataFrame with prod price updated each year
self.copper_prod = pd.DataFrame(columns = ['Year', 'Extraction', 'World Production', 'Cumulated World Production', 'Ratio'])
def set_data(self):
self.year_start = self.param[self.YEAR_START]
self.year_end = self.param[self.YEAR_END]
self.annual_extraction = self.param[self.ANNUAL_EXTRACTION]
self.initial_copper_reserve = self.param[self.INITIAL_RESERVE]
self.initial_copper_stock = self.param[self.INITIAL_STOCK]
self.copper_demand = self.param[self.DEMAND]
def compute(self, copper_demand, period_of_exploitation):
self.create_dataframe(copper_demand)
for year in period_of_exploitation :
# reserves update
self.compute_copper_reserve(year)
# stock and production update
self.compute_copper_stock_and_production(year)
# determines yearly production price
self.compute_copper_price(year)
#### méthodes statiques
def create_dataframe(self, copper_demand) :
self.copper_demand = copper_demand
years = np.arange(2020, 2101, 1)
# filling the column Year
self.copper_demand['Year'] = years
self.copper_prod_price['Year'] = years
self.copper_reserve['Year'] = years
self.copper_prod['Year'] = years
self.copper_stock['Year'] = years
# put the years as index
self.copper_demand.index = self.copper_demand['Year'].values
self.copper_prod_price.index = self.copper_prod_price['Year'].values
self.copper_stock.index = self.copper_stock['Year'].values
self.copper_reserve.index = self.copper_reserve['Year'].values
self.copper_prod.index = self.copper_prod['Year'].values
# initializing every column
self.copper_prod['Extraction'] = self.annual_extraction
self.copper_prod['World Production'] = np.linspace(0,0,len(years))
self.copper_prod['Cumulated World Production'] = np.linspace(0,0,len(years))
self.copper_prod['Ratio'] = np.linspace(0,0,len(years))
self.copper_reserve['Reserve'] = np.linspace(0,0,len(years))
self.copper_prod_price['Total Price'] = np.linspace(0,0,len(years))
self.copper_stock['Stock'] = np.linspace(0,0,len(years))
self.copper_prod_price['Price/t'] = np.linspace(0,0,len(years))
def compute_copper_reserve(self, year):
copper_extraction = self.copper_prod.loc[year, 'Extraction']
if year == self.year_start :
remainig_copper = self.initial_copper_reserve
else :
remainig_copper = self.copper_reserve.loc[year -1, 'Reserve']
# If we want to extract more than what is available, we only extract the available
if remainig_copper < copper_extraction :
self.copper_reserve.loc[year, 'Reserve']= 0
self.copper_prod.loc[year, 'Extraction']= remainig_copper
# If the reserves fall too low, we diminish the extraction
elif remainig_copper < 500 :
self.copper_prod.loc[year, 'Extraction'] = 0.95 * self.copper_prod.loc[year , 'Extraction']
self.copper_reserve.loc[year, 'Reserve'] = remainig_copper - self.copper_prod.loc[year, 'Extraction']
else :
self.copper_reserve.loc[year, 'Reserve'] = remainig_copper - copper_extraction
if self.copper_demand.loc[year, 'Demand'] != 0 :
ratio = self.copper_prod.loc[year, 'Extraction'] / self.copper_demand.loc[year, 'Demand']
self.copper_prod.loc[year, 'Ratio'] = min(1, ratio)
def compute_copper_stock_and_production(self, year) :
if year == self.year_start :
old_stock = self.initial_copper_stock #self.INITIAL_STOCK['default']
else :
old_stock = self.copper_stock.at[year -1, 'Stock']
extraction = self.copper_prod.at[year, 'Extraction']
copper_demand = self.copper_demand.at[year, 'Demand']
# Stock of the previous year plus the extracted minerals, to which we remove the copper demand
#If the demand is too much and exceeds the stock, then there is no more stock
new_stock = old_stock + extraction - copper_demand
if new_stock < 0 :
self.copper_stock.at[year, 'Stock']= 0
#If no more Stock, the production is the extracted copper plus what remained of the previous stock (both can be null)
self.copper_prod.at[year, 'World Production'] = extraction + old_stock
else :
self.copper_stock.at[year, 'Stock'] = new_stock
#if there is still stock, it means the demand was satisfied
self.copper_prod.at[year, 'World Production'] = copper_demand
if year == self.year_start :
self.copper_prod.at[year, 'Cumulated World Production'] = self.copper_prod.at[year, 'World Production']
else :
self.copper_prod.at[year, 'Cumulated World Production'] = self.copper_prod.at[year -1, 'Cumulated World Production'] + self.copper_prod.at[year, 'World Production']
def compute_copper_price (self, year) :
if year == self.year_start :
self.copper_prod_price.loc[year, 'Price/t'] = 9780
else :
# when there is too much of a difference between the demand and the effective extraction, the prices rise
if self.copper_demand.loc[year, 'Demand'] - self.copper_prod.loc[year, 'Extraction'] > 5 :
self.copper_prod_price.loc[year, 'Price/t'] = self.copper_prod_price.loc[year - 1, 'Price/t'] * 1.01
else :
self.copper_prod_price.loc[year, 'Price/t'] = self.copper_prod_price.loc[year - 1, 'Price/t']
self.copper_prod_price.loc[year, 'Total Price']= self.copper_prod.loc[year, 'World Production'] * self.copper_prod_price.loc[year, 'Price/t'] *1000 #conversion Mt
| StarcoderdataPython |
11228 | <gh_stars>1-10
{
"targets": [
{
"target_name": "cclust",
"sources": [ "./src/heatmap_clustering_js_module.cpp" ],
'dependencies': ['bonsaiclust']
},
{
'target_name': 'bonsaiclust',
'type': 'static_library',
'sources': [ 'src/cluster.c' ],
'cflags': ['-fPIC', '-I', '-pedantic', '-Wall']
}
]
}
| StarcoderdataPython |
1786914 | <filename>quizzes/quiz1_1.py
contador = 0
q1 = input("Pergunta 1")
if q1 == "sim":
contador += 1
q2 = input("Pergunta 2")
if q2 == "sim":
contador += 1
q3 = input("Pergunta 3")
if q3 == "sim":
contador += 1
q4 = input("Pergunta 4")
if q4 == "sim":
contador += 1
q5 = input("Pergunta 5")
if q5 == "sim":
contador += 1
if contador == 2:
print("Alguma coisa")
elif 3<=contador <=4:
print("outra coisa")
else:
print("...")
| StarcoderdataPython |
106979 | <filename>nuxeo/client.py
# coding: utf-8
import atexit
import json
import logging
from typing import Any, Dict, Optional, Tuple, Type, Union
from warnings import warn
import requests
from requests.adapters import HTTPAdapter
from urllib3 import __version__ as urllib3_version
from urllib3.util.retry import Retry
from . import (
__version__,
comments,
directories,
documents,
groups,
operations,
tasks,
uploads,
users,
workflows,
)
from .auth.base import AuthBase
from .auth import BasicAuth, TokenAuth
from .constants import (
CHUNK_SIZE,
DEFAULT_API_PATH,
DEFAULT_APP_NAME,
DEFAULT_URL,
IDEMPOTENCY_KEY,
MAX_RETRY,
RETRY_BACKOFF_FACTOR,
RETRY_METHODS,
RETRY_STATUS_CODES,
TIMEOUT_CONNECT,
TIMEOUT_READ,
)
from .exceptions import (
BadQuery,
Conflict,
Forbidden,
HTTPError,
OngoingRequestError,
Unauthorized,
)
from .tcp import TCPKeepAliveHTTPSAdapter
from .utils import json_helper, log_response
AuthType = Optional[Union[Tuple[str, str], AuthBase]]
logger = logging.getLogger(__name__)
if urllib3_version < "1.26.0":
DEFAULT_RETRY = Retry(
total=MAX_RETRY,
backoff_factor=RETRY_BACKOFF_FACTOR,
method_whitelist=RETRY_METHODS,
status_forcelist=RETRY_STATUS_CODES,
raise_on_status=False,
)
else:
DEFAULT_RETRY = Retry(
total=MAX_RETRY,
backoff_factor=RETRY_BACKOFF_FACTOR,
allowed_methods=RETRY_METHODS,
status_forcelist=RETRY_STATUS_CODES,
raise_on_status=False,
)
# Custom exception to raise based on the HTTP status code
# (default will be HTTPError)
HTTP_ERROR = {
requests.codes.conflict: Conflict,
requests.codes.forbidden: Forbidden,
requests.codes.unauthorized: Unauthorized,
}
class NuxeoClient(object):
"""
The HTTP client used by Nuxeo.
:param auth: An authentication object passed to Requests
:param host: The url of the Nuxeo Platform
:param api_path: The API path appended to the host url
:param chunk_size: The size of the chunks for blob download
:param kwargs: kwargs passed to :func:`NuxeoClient.request`
"""
def __init__(
self,
auth=None, # type: AuthType
host=DEFAULT_URL, # type: str
api_path=DEFAULT_API_PATH, # type: str
chunk_size=CHUNK_SIZE, # type: int
**kwargs, # type: Any
):
# type: (...) -> None
self.auth = BasicAuth(*auth) if isinstance(auth, tuple) else auth
self.host = host
self.api_path = api_path
self.chunk_size = chunk_size
version = kwargs.pop("version", "")
app_name = kwargs.pop("app_name", DEFAULT_APP_NAME)
self.headers = {
"X-Application-Name": app_name,
"X-Client-Version": version,
"User-Agent": app_name + "/" + version,
"Accept": "application/json, */*",
}
self.schemas = kwargs.get("schemas", "*")
self.repository = kwargs.pop("repository", "default")
self._session = requests.sessions.Session()
self._session.hooks["response"] = [log_response]
cookies = kwargs.pop("cookies", None)
if cookies:
self._session.cookies = cookies
self._session.stream = True
self.client_kwargs = kwargs
atexit.register(self.on_exit)
# Cache for the server information
self._server_info = None
# Ensure the host is well formatted
if not self.host.endswith("/"):
self.host += "/"
# The retry adapter
self.retries = kwargs.pop("retries", None) or DEFAULT_RETRY
# Install the retries mecanism
self.enable_retry()
def __repr__(self):
# type: () -> str
return f"{type(self).__name__}<host={self.host!r}, version={self.server_version!r}>"
def __str__(self):
# type: () -> str
return repr(self)
def on_exit(self):
# type: () -> None
self._session.close()
def enable_retry(self):
# type: () -> None
""" Set a max retry for all connection errors with an adaptative backoff. """
self._session.mount(
"https://", TCPKeepAliveHTTPSAdapter(max_retries=self.retries)
)
self._session.mount("http://", HTTPAdapter(max_retries=self.retries))
def disable_retry(self):
# type: () -> None
"""
Restore default mount points to disable the eventual retry
adapters set with .enable_retry().
"""
self._session.close()
self._session.mount("https://", TCPKeepAliveHTTPSAdapter())
self._session.mount("http://", HTTPAdapter())
def query(
self,
query, # type: str
params=None, # type: Dict[str, Any]
):
"""
Query the server with the specified NXQL query.
Additional qery parameters can be set via the `params` argument:
>>> nuxeo.client.query('NXSQL query', params={'properties': '*'})
You can find what parameters to tweak under the `Repository.Query`
operation details.
"""
data = {"query": query}
if params:
data.update(params)
url = self.api_path + "/search/lang/NXQL/execute"
return self.request("GET", url, params=data).json()
def set(self, repository=None, schemas=None):
# type: (Optional[str], Optional[str]) -> NuxeoClient
"""
Set the repository and/or the schemas for the requests.
:return: The client instance after adding the settings
"""
if repository:
self.repository = repository
if schemas:
if isinstance(schemas, list):
schemas = ",".join(schemas)
self.schemas = schemas
return self
def request(
self,
method, # type: str
path, # type: str
headers=None, # type: Optional[Dict[str, str]]
data=None, # type: Optional[Any]
raw=False, # type: bool
**kwargs, # type: Any
):
# type: (...) -> Union[requests.Response, Any]
"""
Send a request to the Nuxeo server.
:param method: the HTTP method
:param path: the path to append to the host
:param headers: the headers for the HTTP request
:param data: data to put in the body
:param raw: if True, don't parse the data to JSON
:param kwargs: other parameters accepted by
:func:`requests.request`
:return: the HTTP response
"""
if method not in (
"GET",
"HEAD",
"POST",
"PUT",
"DELETE",
"CONNECT",
"OPTIONS",
"TRACE",
):
raise BadQuery("method parameter is not a valid HTTP method.")
# Construct the full URL without double slashes
url = self.host + path.lstrip("/")
if "adapter" in kwargs:
url = f"{url}/@{kwargs.pop('adapter')}"
kwargs.update(self.client_kwargs)
# Set the default value to `object` to allow someone
# to set `timeout` to `None`.
if kwargs.get("timeout", object) is object:
kwargs["timeout"] = (TIMEOUT_CONNECT, TIMEOUT_READ)
headers = headers or {}
if "Content-Type" not in headers:
headers["Content-Type"] = kwargs.pop("content_type", "application/json")
headers.update(
{"X-NXDocumentProperties": self.schemas, "X-NXRepository": self.repository}
)
enrichers = kwargs.pop("enrichers", None)
if enrichers:
headers["enrichers-document"] = ", ".join(enrichers)
headers.update(self.headers)
self._check_headers_and_params_format(headers, kwargs.get("params") or {})
if data and not isinstance(data, bytes) and not raw:
data = json.dumps(data, default=json_helper)
# Set the default value to `object` to allow someone
# to set `default` to `None`.
default = kwargs.pop("default", object)
# Allow to pass a custom authentication class
auth = kwargs.pop("auth", None) or self.auth
_kwargs = {k: v for k, v in kwargs.items() if k != "params"}
logged_params = kwargs.get("params", data if not raw else {})
logger.debug(
(
f"Calling {method} {url!r} with headers={headers!r},"
f" params={logged_params!r}, kwargs={_kwargs!r}"
f" and cookies={self._session.cookies!r}"
)
)
exc = None
try:
resp = self._session.request(
method, url, headers=headers, auth=auth, data=data, **kwargs
)
resp.raise_for_status()
except Exception as exc:
if default is object:
raise self._handle_error(exc)
resp = default
finally:
# Explicitly break a reference cycle
exc = None
del exc
return resp
def _check_headers_and_params_format(self, headers, params):
# type: (Dict[str, Any], Dict[str, Any]) -> None
"""Check headers and params keys for dots or underscores and throw a warning if one is found."""
msg = "{!r} {} should not contain '_' nor '.'. Replace with '-' to get rid of that warning."
for key in headers.keys():
if "_" in key or "." in key:
warn(msg.format(key, "header"), DeprecationWarning, 2)
if not isinstance(params, dict):
return
for key in params.keys():
if "_" in key or "." in key:
warn(msg.format(key, "param"), DeprecationWarning, 2)
def request_auth_token(
self,
device_id, # type: str
permission, # type: str
app_name=DEFAULT_APP_NAME, # type: str
device=None, # type: Optional[str]
revoke=False, # type: bool
):
# type: (...) -> str
"""
Request a token for the user.
It should only be used if you want to get a Nuxeo token from a Basic Auth.
:param device_id: device identifier
:param permission: read/write permissions
:param app_name: application name
:param device: optional device description
:param revoke: revoke the token
"""
auth = TokenAuth("")
token = auth.request_token(
self,
device_id,
permission,
app_name=app_name,
device=device,
revoke=revoke,
auth=self.auth,
)
# Use the (potentially re-newed) token from now on
if not revoke:
self.auth = auth
return token
def is_reachable(self):
# type: () -> bool
""" Check if the Nuxeo Platform is reachable. """
response = self.request("GET", "runningstatus", default=False)
if isinstance(response, requests.Response):
return response.ok
else:
return bool(response)
def server_info(self, force=False):
# type: (bool) -> Dict[str, str]
"""
Retreive server information.
:param bool force: Force information renewal.
"""
if force or self._server_info is None:
try:
response = self.request("GET", "json/cmis")
self._server_info = response.json()["default"]
except Exception:
logger.warning(
"Invalid response data when called server_info()", exc_info=True
)
return self._server_info
@property
def server_version(self):
# type: () -> str
""" Return the server version or "unknown". """
try:
return self.server_info()["productVersion"]
except Exception:
return "unknown"
@staticmethod
def _handle_error(error):
# type: (Exception) -> Exception
"""
Log error and handle raise.
:param error: The error to handle
"""
if not isinstance(error, requests.HTTPError):
return error
response = error.response
error_data = {}
try:
error_data.update(response.json())
except ValueError:
error_data["message"] = response.content
finally:
error_data["status"] = response.status_code
if not error_data.get("message", ""):
error_data["message"] = response.reason
status = error_data["status"]
request_uid = response.headers.get(IDEMPOTENCY_KEY, "")
if status == 409 and request_uid:
return OngoingRequestError(request_uid)
return HTTP_ERROR.get(status, HTTPError).parse(error_data)
class Nuxeo(object):
"""
Instantiate the client and all the API Endpoints.
:param auth: the authenticator
:param host: the host URL
:param app_name: the name of the application using the client
:param client: the client class
:param kwargs: any other argument to forward to every requests calls
"""
def __init__(
self,
auth=None, # type: Optional[Tuple[str, str]]
host=DEFAULT_URL, # type: str
app_name=DEFAULT_APP_NAME, # type: str
version=__version__, # type: str
client=NuxeoClient, # type: Type[NuxeoClient]
**kwargs, # type: Any
):
# type: (...) -> None
self.client = client(
auth, host=host, app_name=app_name, version=version, **kwargs
)
self.comments = comments.API(self.client)
self.operations = operations.API(self.client)
self.directories = directories.API(self.client)
self.groups = groups.API(self.client)
self.tasks = tasks.API(self.client)
self.uploads = uploads.API(self.client)
self.users = users.API(self.client)
self.workflows = workflows.API(self.client, self.tasks)
self.documents = documents.API(
self.client, self.operations, self.workflows, self.comments
)
def __repr__(self):
# type: () -> str
return f"{type(self).__name__}<version={__version__!r}, client={self.client!r}>"
def __str__(self):
# type: () -> str
return repr(self)
def can_use(self, operation):
# type: (str) -> str
"""Return a boolean to let the caller know if the given *operation* can be used."""
return operation in self.operations.operations
| StarcoderdataPython |
3279531 | <reponame>erezsh/runtype
from datetime import datetime
from unittest import TestCase
from typing import List, Dict
from runtype import dataclass, String, Int, Dispatch
class TestCasts(TestCase):
def test_typing_cast(self):
@dataclass(check_types='cast')
class P:
a: Int(min=0) = None
assert P(10)
assert P(10).a == 10
assert P(0).a == 0
assert P().a == None
self.assertRaises(TypeError, P, -3)
assert P('10').a == 10
assert P('0').a == 0
assert P('+3').a == 3
self.assertRaises(TypeError, P, '-3')
def test_dates(self):
@dataclass(check_types='cast')
class A:
a: datetime
d = datetime.now()
assert A(d).a.toordinal() == d.toordinal()
assert A(d.isoformat()).a.toordinal() == d.toordinal()
self.assertRaises(TypeError, A, 'bla')
def test_cast_dict(self):
@dataclass
class Point:
x: float
y: float
@dataclass(check_types='cast')
class Rect:
start: Point
end: Point
start = {'x': 10.0, 'y': 10.0}
end = {'x': 3.14, 'y': 234.3}
rect = {'start': start, 'end': end}
r = Rect(**rect)
assert r.json() == rect, (dict(r), rect)
self.assertRaises(TypeError, Rect, start={'x': 10.0, 'y': 10.0, 'z': 42.2}, end=end)
self.assertRaises(TypeError, Rect, start={'x': 10.0}, end=end)
@dataclass(check_types='cast')
class A:
a: dict
b: Dict[float, String] = None
A({})
A({1: 2})
A({1: 2}, {1.1: 'a'})
A({1: 2}, {1: 'a'})
A({1: 2}, None)
self.assertRaises(TypeError, A, [1])
self.assertRaises(TypeError, A, {}, {'b': 'c'})
self.assertRaises(TypeError, A, {}, {3: 2})
def test_cast_generic(self):
@dataclass(check_types='cast')
class Point:
x: float
y: float
@dataclass(check_types='cast')
class Polygon:
points: List[Point]
p1 = {'x': 1, 'y': 2}
p2 = {'x': 2, 'y': 3}
Polygon([p1, p2])
def test_custom_casts(self):
@dataclass
class Name:
first: str
last: str = None
@classmethod
def cast_from(cls, s: str):
return cls(*s.split())
@dataclass(check_types='cast')
class Person:
name: Name
p = Person("<NAME>")
assert p.name.first == 'Albert'
assert p.name.last == 'Einstein'
p = Person("Dodo")
assert p.name.first == 'Dodo'
assert p.name.last == None
def test_custom_casts2(self):
dp = Dispatch()
@dataclass
class Name:
first: str
last: str = None
@classmethod
@dp
def cast_from(cls, s: str):
return cls(*s.split())
@classmethod
@dp
def cast_from(cls, s: (tuple, list)):
return cls(*s)
@dataclass(check_types='cast')
class Person:
name: Name
bla: int = None
assert Person("<NAME>") == Person(('Albert', 'Einstein'), None) == Person(['Albert', 'Einstein'])
| StarcoderdataPython |
1628622 | <reponame>watxaut-alpha/joke-app<filename>src/api/src/db/jokes.py
import datetime
import pandas as pd
import sqlalchemy.exc
from sqlalchemy.engine import Engine
import src.db.core as db
def get_random_joke() -> pd.DataFrame:
conn = db.get_jokes_app_connection()
return db.get_random_element(conn, "jokes_to_send", where="do_send is null or do_send != false")
def __get_sql_jokes(limit, from_author, sent_from):
if from_author:
aux_sql = "jokes_to_send.author is not null"
else:
aux_sql = "jokes_to_send.author is null"
sql_author = """
select
*
from
jokes_to_send
where
jokes_to_send.id not in (select joke_id from sent_jokes where sent_from='{sent_from}') and
(jokes_to_send.do_send is null or jokes_to_send.do_send != false) and
{author}
order by created_at desc
limit {limit}
""".format(
limit=limit, author=aux_sql, sent_from=sent_from
)
return sql_author
def get_joke_not_sent_by_pfm_already(conn: Engine, limit=1, sent_from="mail") -> pd.DataFrame:
sql_author = __get_sql_jokes(limit, from_author=True, sent_from=sent_from)
df = db.execute_read(conn, sql_author)
if df.empty: # no more jokes from authors, get from scrapped sources
sql_author = __get_sql_jokes(limit, from_author=False, sent_from=sent_from)
df = db.execute_read(conn, sql_author)
return df
def get_5_next_jokes_to_send():
conn = db.get_jokes_app_connection()
return get_joke_not_sent_by_pfm_already(conn, limit=5, sent_from="mail")
def check_user_exists(user_id: str):
sql_telegram = "select user_id from users_telegram where user_id='{user_id}'".format(user_id=user_id)
sql_mail = "select id_hash from users_mail where id_hash='{user_id}'".format(user_id=user_id)
conn = db.get_jokes_app_connection()
has_telegram_user = not db.execute_read(conn, sql_telegram).empty
has_mail_user = not db.execute_read(conn, sql_mail).empty
if has_mail_user or has_telegram_user:
return True # at least one of the users exists
else:
return False # the id does not correspond to telegram or mail users -> fake ID
def check_joke_id_exists(joke_id):
sql = "select id from jokes_to_send where id={joke_id}".format(joke_id=joke_id)
conn = db.get_jokes_app_connection()
joke_id_exists = not db.execute_read(conn, sql).empty
return joke_id_exists
def insert_rating_joke(user_id: str, joke_id: int, rating: float, source: str) -> bool:
if check_user_exists(user_id) and check_joke_id_exists(joke_id):
conn = db.get_jokes_app_connection()
model = "ratings"
d_values = {
"user_id": user_id,
"joke_id": joke_id,
"rating": rating,
"created_at": datetime.datetime.now().isoformat(),
"source": source,
}
db.add_record(conn, model, d_values)
return True
else:
return False
def upsert_rating_joke(user_id: str, joke_id: int, rating: float, source: str) -> bool:
if check_user_exists(user_id) and check_joke_id_exists(joke_id):
try:
sql = """
INSERT INTO ratings (user_id, joke_id, rating, created_at, source)
VALUES ('{user_id}', {joke_id}, {rating}, '{created_at}', '{source}')
ON CONFLICT (user_id, joke_id)
DO UPDATE
SET rating = {rating};
""".format(
user_id=user_id,
joke_id=joke_id,
rating=rating,
created_at=datetime.datetime.now().isoformat(),
source=source,
)
conn = db.get_jokes_app_connection()
db.execute_update(conn, sql)
except sqlalchemy.exc.ProgrammingError:
return False
return True
else:
return False # id user does not exist in DB
def upsert_joke_tag(user_id: [str, int], joke_id: int, tag_id: int):
sql = """
INSERT INTO joke_tags (user_id, joke_id, tag_id, created_at)
VALUES ('{user_id}', {joke_id}, {tag_id}, '{created_at}')
ON CONFLICT (user_id, joke_id, tag_id)
DO NOTHING;
""".format(
user_id=user_id, joke_id=joke_id, tag_id=tag_id, created_at=datetime.datetime.now().isoformat()
)
conn = db.get_jokes_app_connection()
db.execute_update(conn, sql)
def put_joke_db(joke: str, author: str, author_email: str) -> None:
conn = db.get_jokes_app_connection()
model = "jokes_to_send"
d_values = {
"joke": joke,
"author": author,
"author_email": author_email,
"rating": None,
"tags": None,
"created_at": datetime.datetime.now().isoformat(),
}
db.add_record(conn, model, d_values)
def get_joke(joke_id: int):
conn = db.get_jokes_app_connection()
sql = f"select * from jokes_to_send where id = {joke_id}"
try:
df = db.execute_read(conn, sql)
except sqlalchemy.exc.SQLAlchemyError:
return pd.DataFrame()
return df
def delete_joke(joke_id: int):
conn = db.get_jokes_app_connection()
sql = f"delete from jokes_to_send where id = {joke_id}"
try:
db.execute_update(conn, sql)
except sqlalchemy.exc.SQLAlchemyError:
return False
return True
def put_sent_joke_db(conn: Engine, joke_id: int, sent_from: str) -> None:
model = "sent_jokes"
d_values = {"joke_id": joke_id, "sent_from": sent_from, "created_at": datetime.datetime.now().isoformat()}
db.add_record(conn, model, d_values)
def get_tags():
conn = db.get_jokes_app_connection()
sql = "select * from tags"
df_tags = db.execute_read(conn, sql)
return df_tags.to_dict(orient="index")
def get_untagged_joke():
conn = db.get_jokes_app_connection()
s_where = "id not in (select joke_id from joke_tags group by joke_id)"
return db.get_random_element(conn, "jokes_to_send", where=s_where)
| StarcoderdataPython |
4810456 | # protocol type
GRN_PROTO_GQTP = "gqtp"
GRN_PROTO_HTTP = "http"
# gqtp
GQTP_HEADER_SIZE = 24
# groonga status
GRN_STATUS_SUCCESS = 0
GRN_STATUS_END_OF_DATA = 1
GRN_STATUS_UNKNOWN_ERROR = 65535
GRN_STATUS_OPERATION_NOT_PERMITTED = 65534
GRN_STATUS_NO_SUCH_FILE_OR_DIRECTORY = 65533
GRN_STATUS_NO_SUCH_PROCESS = 65532
GRN_STATUS_INTERRUPTED_FUNCTION_CALL = 65531
GRN_STATUS_INPUT_OUTPUT_ERROR = 65530
GRN_STATUS_NO_SUCH_DEVICE_OR_ADDRESS = 65529
GRN_STATUS_ARG_LIST_TOO_LONG = 65528
GRN_STATUS_EXEC_FORMAT_ERROR = 65527
GRN_STATUS_BAD_FILE_DESCRIPTOR = 65526
GRN_STATUS_NO_CHILD_PROCESSES = 65525
GRN_STATUS_RESOURCE_TEMPORARILY_UNAVAILABLE = 65524
GRN_STATUS_NOT_ENOUGH_SPACE = 65523
GRN_STATUS_PERMISSION_DENIED = 65522
GRN_STATUS_BAD_ADDRESS = 65521
GRN_STATUS_RESOURCE_BUSY = 65520
GRN_STATUS_FILE_EXISTS = 65519
GRN_STATUS_IMPROPER_LINK = 65518
GRN_STATUS_NO_SUCH_DEVICE = 65517
GRN_STATUS_NOT_A_DIRECTORY = 65516
GRN_STATUS_IS_A_DIRECTORY = 65515
GRN_STATUS_INVALID_ARGUMENT = 65514
GRN_STATUS_TOO_MANY_OPEN_FILES_IN_SYSTEM = 65513
GRN_STATUS_TOO_MANY_OPEN_FILES = 65512
GRN_STATUS_INAPPROPRIATE_I_O_CONTROL_OPERATION = 65511
GRN_STATUS_FILE_TOO_LARGE = 65510
GRN_STATUS_NO_SPACE_LEFT_ON_DEVICE = 65509
GRN_STATUS_INVALID_SEEK = 65508
GRN_STATUS_READ_ONLY_FILE_SYSTEM = 65507
GRN_STATUS_TOO_MANY_LINKS = 65506
GRN_STATUS_BROKEN_PIPE = 65505
GRN_STATUS_DOMAIN_ERROR = 65504
GRN_STATUS_RESULT_TOO_LARGE = 65503
GRN_STATUS_RESOURCE_DEADLOCK_AVOIDED = 65502
GRN_STATUS_NO_MEMORY_AVAILABLE = 65501
GRN_STATUS_FILENAME_TOO_LONG = 65500
GRN_STATUS_NO_LOCKS_AVAILABLE = 65499
GRN_STATUS_FUNCTION_NOT_IMPLEMENTED = 65498
GRN_STATUS_DIRECTORY_NOT_EMPTY = 65497
GRN_STATUS_ILLEGAL_BYTE_SEQUENCE = 65496
GRN_STATUS_SOCKET_NOT_INITIALIZED = 65495
GRN_STATUS_OPERATION_WOULD_BLOCK = 65494
GRN_STATUS_ADDRESS_IS_NOT_AVAILABLE = 65493
GRN_STATUS_NETWORK_IS_DOWN = 65492
GRN_STATUS_NO_BUFFER = 65491
GRN_STATUS_SOCKET_IS_ALREADY_CONNECTED = 65490
GRN_STATUS_SOCKET_IS_NOT_CONNECTED = 65489
GRN_STATUS_SOCKET_IS_ALREADY_SHUTDOWNED = 65488
GRN_STATUS_OPERATION_TIMEOUT = 65487
GRN_STATUS_CONNECTION_REFUSED = 65486
GRN_STATUS_RANGE_ERROR = 65485
GRN_STATUS_TOKENIZER_ERROR = 65484
GRN_STATUS_FILE_CORRUPT = 65483
GRN_STATUS_INVALID_FORMAT = 65482
GRN_STATUS_OBJECT_CORRUPT = 65481
GRN_STATUS_TOO_MANY_SYMBOLIC_LINKS = 65480
GRN_STATUS_NOT_SOCKET = 65479
GRN_STATUS_OPERATION_NOT_SUPPORTED = 65478
GRN_STATUS_ADDRESS_IS_IN_USE = 65477
GRN_STATUS_ZLIB_ERROR = 65476
GRN_STATUS_LZO_ERROR = 65475
GRN_STATUS_STACK_OVER_FLOW = 65474
GRN_STATUS_SYNTAX_ERROR = 65473
GRN_STATUS_RETRY_MAX = 65472
GRN_STATUS_INCOMPATIBLE_FILE_FORMAT = 65471
GRN_STATUS_UPDATE_NOT_ALLOWED = 65470
GRN_STATUS_TOO_SMALL_OFFSET = 65469
GRN_STATUS_TOO_LARGE_OFFSET = 65468
GRN_STATUS_TOO_SMALL_LIMIT = 65467
GRN_STATUS_CAS_ERROR = 65466
GRN_STATUS_UNSUPPORTED_COMMAND_VERSION = 65465
| StarcoderdataPython |
4823038 | """Queue implementation using two stacks."""
from data_structure.stack.oop_stack import Stack
from data_structure.exceptions.collection_exeption import CollectionIsEmptyExeption
from data_structure.exceptions.error_messages import queue_is_empty
class Queue(object):
"""The implementation using two stacks."""
def __init__(self):
"""Class implements the data structure queue."""
self._put_stack = Stack()
self._extract_stack = Stack()
def __str__(self):
"""Return a string representation of the contents of a queue.
Returns:
str (string): contents of a queue
"""
return str(self._extract_stack) + str(self._put_stack)
def __len__(self):
"""Return size of stack.
Returns:
size (int): stack size
"""
return self.size
@property
def is_empty(self):
"""Return True if queue is empty.
Returns:
size (bool): true if stack is empry, else false
"""
return self._extract_stack.is_empty and self._put_stack.is_empty
@property
def size(self):
"""Return size of stack.
Returns:
size (int): queue size
"""
return self._put_stack.size + self._extract_stack.size
def enqueue(self, element):
"""Add element in queue.
Args:
element: element for added in queue
"""
self._put_stack.push(element)
def dequeue(self):
"""Extract item from queue.
Returns:
item: top item from queue
Raises:
CollectionIsEmptyExeption: if queue is empty
"""
if self._extract_stack.is_empty:
while not self._put_stack.is_empty:
self._extract_stack.push(self._put_stack.pop())
if self._extract_stack.is_empty:
raise CollectionIsEmptyExeption(queue_is_empty())
return self._extract_stack.pop()
| StarcoderdataPython |
1797089 | <reponame>kiyoon/PyVideoAI
import torch
import numpy as np
from torch import nn
class BatchRelationalModule(nn.Module):
def __init__(self, input_feature_dim, use_coordinates=False, num_layers=2, num_units=64):
super(BatchRelationalModule, self).__init__()
self.input_feature_dim = input_feature_dim
self.block_dict = nn.ModuleDict()
self.first_time = True
self.use_coordinates = use_coordinates
self.num_layers = num_layers
self.num_units = num_units
self.build_block()
def build_block(self):
print("Assuming input is of size (b=4, num_object=4, input_feature_dim=%d)" % self.input_feature_dim)
out_img = torch.zeros((4, 4, self.input_feature_dim))
"""g"""
b, length, c = out_img.shape
print(out_img.shape)
# x_flat = (64 x 25 x 24)
if self.use_coordinates:
self.coord_tensor = []
for i in range(length):
self.coord_tensor.append(torch.Tensor(np.array([i])))
self.coord_tensor = torch.stack(self.coord_tensor, dim=0).unsqueeze(0)
if self.coord_tensor.shape[0] != out_img.shape[0]:
self.coord_tensor = self.coord_tensor[0].unsqueeze(0).repeat([out_img.shape[0], 1, 1])
out_img = torch.cat([out_img, self.coord_tensor], dim=2)
x_i = torch.unsqueeze(out_img, 1) # (1xh*wxc)
x_i = x_i.repeat(1, length, 1, 1) # (h*wxh*wxc)
x_j = torch.unsqueeze(out_img, 2) # (h*wx1xc)
x_j = x_j.repeat(1, 1, length, 1) # (h*wxh*wxc)
# concatenate all together
per_location_feature = torch.cat([x_i, x_j], 3) # (h*wxh*wx2*c)
out = per_location_feature.view(
per_location_feature.shape[0] * per_location_feature.shape[1] * per_location_feature.shape[2],
per_location_feature.shape[3])
print(out.shape)
for idx_layer in range(self.num_layers):
self.block_dict['g_fcc_{}'.format(idx_layer)] = nn.Linear(out.shape[1], out_features=self.num_units, bias=True)
out = self.block_dict['g_fcc_{}'.format(idx_layer)].forward(out)
self.block_dict['LeakyReLU_{}'.format(idx_layer)] = nn.LeakyReLU()
out = self.block_dict['LeakyReLU_{}'.format(idx_layer)].forward(out)
# reshape again and sum
print(out.shape)
out = out.view(per_location_feature.shape[0], per_location_feature.shape[1], per_location_feature.shape[2], -1)
out = out.sum(1).sum(1)
print('here', out.shape)
"""f"""
self.post_processing_layer = nn.Linear(in_features=out.shape[1], out_features=self.num_units)
out = self.post_processing_layer.forward(out)
self.block_dict['LeakyReLU_post_processing'] = nn.LeakyReLU()
out = self.block_dict['LeakyReLU_post_processing'].forward(out)
self.output_layer = nn.Linear(in_features=out.shape[1], out_features=self.num_units)
out = self.output_layer.forward(out)
self.block_dict['LeakyReLU_output'] = nn.LeakyReLU()
out = self.block_dict['LeakyReLU_output'].forward(out)
print('Block built with output volume shape', out.shape)
def forward(self, x_img):
if isinstance(x_img, list):
# variable length feature count
batch_size = len(x_img)
length_per_batch = []
batch_to_g_size = 0 # size of batch that will go into the g network
for x_img1 in x_img:
length, c = x_img1.shape
length_per_batch.append(length)
batch_to_g_size += length
out = torch.Tensor()
out_list = [None] * batch_size
for b, x_img1 in enumerate(x_img):
out_img = x_img1
"""g"""
length, c = out_img.shape
if self.use_coordinates:
if self.coord_tensor.shape[0] != out_img.shape[0]:
self.coord_tensor = self.coord_tensor[0].unsqueeze(0).repeat([out_img.shape[0], 1, 1])
#print(self.coord_tensor)
out_img = torch.cat([out_img, self.coord_tensor.to(x_img1.device)], dim=2)
# x_flat = (64 x 25 x 24)
# print('out_img', out_img.shape)
x_i = torch.unsqueeze(out_img, 0) # (1xh*wxc)
x_i = x_i.repeat(length, 1, 1) # (h*wxh*wxc)
x_j = torch.unsqueeze(out_img, 1) # (h*wx1xc)
x_j = x_j.repeat(1, length, 1) # (h*wxh*wxc)
# concatenate all together
per_location_feature = torch.cat([x_i, x_j], 2) # (h*wxh*wx2*c)
out_list[b] = per_location_feature.view(
per_location_feature.shape[0] * per_location_feature.shape[1],
per_location_feature.shape[2])
out = torch.cat(out_list)
#print(out.shape)
for idx_layer in range(self.num_layers):
out = self.block_dict['g_fcc_{}'.format(idx_layer)].forward(out)
out = self.block_dict['LeakyReLU_{}'.format(idx_layer)].forward(out)
#print(out.shape)
# reshape again and sum
out_idx = 0
#out_list = [None] * batch_size
for b in range(batch_size):
out_list[b] = out[out_idx:out_idx+length_per_batch[b] ** 2].view(length_per_batch[b], length_per_batch[b], -1)
out_list[b] = out_list[b].sum(0).sum(0).unsqueeze(0)
out_idx += length_per_batch[b] ** 2
out = torch.cat(out_list, 0)
#print(out.shape)
"""f"""
out = self.post_processing_layer.forward(out)
out = self.block_dict['LeakyReLU_post_processing'].forward(out)
out = self.output_layer.forward(out)
out = self.block_dict['LeakyReLU_output'].forward(out)
# print('Block built with output volume shape', out.shape)
return out
else:
# constant feature count
out_img = x_img
"""g"""
b, length, c = out_img.shape
if self.use_coordinates:
if self.coord_tensor.shape[0] != out_img.shape[0]:
self.coord_tensor = self.coord_tensor[0].unsqueeze(0).repeat([out_img.shape[0], 1, 1])
#print(self.coord_tensor)
out_img = torch.cat([out_img, self.coord_tensor.to(x_img.device)], dim=2)
# x_flat = (64 x 25 x 24)
# print('out_img', out_img.shape)
x_i = torch.unsqueeze(out_img, 1) # (1xh*wxc)
x_i = x_i.repeat(1, length, 1, 1) # (h*wxh*wxc)
x_j = torch.unsqueeze(out_img, 2) # (h*wx1xc)
x_j = x_j.repeat(1, 1, length, 1) # (h*wxh*wxc)
# concatenate all together
per_location_feature = torch.cat([x_i, x_j], 3) # (h*wxh*wx2*c)
out = per_location_feature.view(
per_location_feature.shape[0] * per_location_feature.shape[1] * per_location_feature.shape[2],
per_location_feature.shape[3])
for idx_layer in range(self.num_layers):
out = self.block_dict['g_fcc_{}'.format(idx_layer)].forward(out)
out = self.block_dict['LeakyReLU_{}'.format(idx_layer)].forward(out)
# reshape again and sum
# print(out.shape)
out = out.view(per_location_feature.shape[0], per_location_feature.shape[1], per_location_feature.shape[2], -1)
#out = out.sum(1).sum(1)
out = out.mean(1).mean(1)
"""f"""
out = self.post_processing_layer.forward(out)
out = self.block_dict['LeakyReLU_post_processing'].forward(out)
out = self.output_layer.forward(out)
out = self.block_dict['LeakyReLU_output'].forward(out)
# print('Block built with output volume shape', out.shape)
return out
| StarcoderdataPython |
56226 | import unittest
import pandas as pd
class TestDataFrameStats(unittest.TestCase):
def setUp(self):
# initialize and load df
self.df = pd.DataFrame(data={'data': [0,1,2,3]})
def test_min(self):
self.assertGreaterEqual(self.df.min().values[0], 0)
def test_max(self):
self.assertLessEqual(self.df.max().values[0], 100) | StarcoderdataPython |
58530 | # Copyright 2017 The Tulsi Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the 'License');
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an 'AS IS' BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Test for bazel_build_events.py."""
import io
import json
import unittest
import bazel_build_events
ROOT_ID = {'foo': 'bar'}
CHILD_ID = {'foo': 'child'}
GRANDCHILD_ID = {'foo': 'grandchild'}
ROOT_EVENT_DICT = {
'id': ROOT_ID,
'children': [CHILD_ID],
'progress': {
'stdout': 'Hello',
'stderr': 'World',
},
'namedSetOfFiles': {
'files': [{'uri': 'file:///dir/file.txt'}],
},
}
CHILD_EVENT_DICT = {
'id': CHILD_ID,
'progress': {
'stderr': 'Hello!',
},
}
CHILD_WITHOUT_ID_EVENT_DICT = {
'progress': {
'stderr': 'Hello!',
},
}
CHILD_EVENT_WITH_CHILD_DICT = {
'id': CHILD_ID,
'children': [{'foo': 'grandchild'}],
}
GRANDCHILD_EVENT_DICT = {
'id': GRANDCHILD_ID,
'progress': {
'stderr': 'Hello from the grandchild!',
},
}
class TestFileLineReader(unittest.TestCase):
def testMultiLine(self):
test_file = io.StringIO()
test_file.write(u'First Line.\nSecond Line.\nThird Line.\n')
test_file.seek(0)
reader = bazel_build_events._FileLineReader(test_file)
self.assertEqual(reader.check_for_changes(), 'First Line.\n')
self.assertEqual(reader.check_for_changes(), 'Second Line.\n')
self.assertEqual(reader.check_for_changes(), 'Third Line.\n')
self.assertIsNone(reader.check_for_changes())
def testLineRescans(self):
test_file = io.StringIO()
reader = bazel_build_events._FileLineReader(test_file)
self.assertIsNone(reader.check_for_changes())
test_file.write(u'Line')
test_file.seek(0)
self.assertIsNone(reader.check_for_changes())
test_file.seek(0, 2)
partial_pos = test_file.tell()
test_file.write(u'!\n')
test_file.seek(partial_pos)
self.assertEqual(reader.check_for_changes(), 'Line!\n')
self.assertIsNone(reader.check_for_changes())
class TestBazelBuildEvents(unittest.TestCase):
def testBuildEventParsing(self):
event_dict = ROOT_EVENT_DICT
build_event = bazel_build_events.BazelBuildEvent(event_dict)
self.assertEqual(build_event.stdout, 'Hello')
self.assertEqual(build_event.stderr, 'World')
self.assertEqual(build_event.files, ['/dir/file.txt'])
class TestBazelBuildEventsWatcher(unittest.TestCase):
def testWatcherBuildEvent(self):
test_file = io.StringIO()
watcher = bazel_build_events.BazelBuildEventsWatcher(test_file)
test_file.write(json.dumps(ROOT_EVENT_DICT) + u'\n')
test_file.seek(0)
new_events = watcher.check_for_new_events()
self.assertEqual(len(new_events), 1)
build_event = new_events[0]
self.assertEqual(build_event.stdout, 'Hello')
self.assertEqual(build_event.stderr, 'World')
self.assertEqual(build_event.files, ['/dir/file.txt'])
if __name__ == '__main__':
unittest.main()
| StarcoderdataPython |
159131 | <reponame>misteraverin/flake8-annotations-coverage
def foo():
pass
def bar(*args, kwonly_arg: str = None):
pass
| StarcoderdataPython |
19220 | <reponame>DSciLab/mlutils
from typing import Callable, Optional, Union, Tuple, List
import torch
from torch import nn
from cfg import Opts
from torch import Tensor
from torch.nn import functional as F
from mlutils import LogitToPreds
EPS = 1.0e-8
__all__ = ['IOULoss', 'GDiceLoss', 'SoftDiceLoss',
'CrossEntropyLoss', 'BCELossWithLogits',
'GDiceCELoss', 'GDiceBCELoss', 'SoftDiceCELoss',
'SoftDiceBCELoss', 'DeepSupervisedLoss',
'LossPicker']
def softmax_helper(inp: Tensor) -> Tensor:
return F.softmax(inp, 1)
def onehot(inp: Tensor, num_classes: int,
with_channel: Optional[bool]=False) -> Tensor:
if not with_channel:
inp = inp.unsqueeze(1)
output_shape = list(inp.shape)
output_shape[1] = num_classes
output = torch.zeros(output_shape, dtype=torch.float).to(inp.device)
output.scatter_(1, inp.type(torch.int64), 1)
return output
def flatten(inp: Tensor, with_class: Optional[bool]=False) -> Tensor:
"""
:param inp: input tensor with shape (B, C, Spatial_shape)
:param with_class: flatten the tensor with C dim
:return: if with_class is True, return a tensor woth shape of
(B, C, prod(spatial_shape)), if with_class is False,
return a tensor with shape of (B, C * prod(spatial_shape))
"""
if with_class:
B = inp.size(0)
C = inp.size(1)
inp = inp.view(B, C, -1)
else:
B = inp.size(0)
inp = inp.view(B, -1)
return inp
def flatten_with_class(inp: Tensor) -> Tensor:
"""
:param inp: input tensor, the expected shape is (B, C, spatial_shape)
:return: a tentor with shape (C, B * prod(spatial_shape))
"""
inp = inp.permute(1, 0, *tuple(range(2, inp.ndim))).contiguous()
C = inp.size(0)
return inp.view(C, -1)
def iou_loss(pred: Tensor, gt: Tensor,
smooth: Optional[float]=0.01,
ignore_label: Optional[int]=None) -> Tensor:
"""
:param pred: after latest activation, the shape is (B, C, spatial_shape)
:param gt: onehoted gt, the shape is (B, C, spatial_shape)
:return: IOU, the shape is (B,)
"""
assert pred.shape == gt.shape
if ignore_label is not None:
pred = torch.stack([v for i, v in enumerate(torch.unbind(pred, dim=1))
if i != ignore_label])
gt = torch.stack([v for i, v in enumerate(torch.unbind(gt, dim=1))
if i != ignore_label])
pred = flatten(pred)
gt = flatten(gt)
tp = (pred * gt).sum(-1)
fp = (pred * (1 - gt)).sum(-1)
fn = ((1 - pred) * gt).sum(-1)
iou = (tp + smooth) / (tp + fp + fn + EPS + smooth)
return 1.0 - iou
def generalized_dice_loss(pred: Tensor, gt: Tensor,
smooth: Optional[float]=0.01,
with_weight: Optional[bool]=True,
ignore_label: Optional[int]=None) -> Tensor:
"""
:param pred: after latest activation, the shape is (B, C, spatial_shape)
:param gt: onehoted gt, the shape is (B, C, spatial_shape)
:return: GDice, the shape is (B,)
"""
assert pred.shape == gt.shape
if ignore_label is not None:
pred = torch.stack([v for i, v in enumerate(torch.unbind(pred, dim=1))
if i != ignore_label])
gt = torch.stack([v for i, v in enumerate(torch.unbind(gt, dim=1))
if i != ignore_label])
pred = flatten(pred, with_class=True)
gt = flatten(gt, with_class=True)
if with_weight:
gt_class_flatten = flatten_with_class(gt).sum(-1)
class_weight = 1.0 / (gt_class_flatten * gt_class_flatten + EPS)
intersect = (pred * gt).sum(-1) * class_weight.unsqueeze(0)
intersect = intersect.sum(-1)
else:
intersect = (pred * gt).sum([-2, -1])
# the shape of intersect is (B,)
# the shape of pred and gt is (B, C, prod(spatial_shape))
denominator = pred.sum([-2, -1]) + gt.sum([-2, -1])
assert intersect.shape == denominator.shape, \
f'{intersect.shape} != {denominator.shape}'
return 1.0 - (intersect + smooth) / (denominator + EPS + smooth)
def soft_dice_loss(pred: Tensor, gt: Tensor,
ignore_label: Optional[int]=None) -> Tensor:
"""
soft dice = 2 * IOU / (1 + IOU)
:param pred: after latest activation, the shape is (B, C, spatial_shape)
:param gt: onehoted gt, the shape is (B, C, spatial_shape)
:return: dice loss, the shape is (B,)
"""
iou = iou_loss(pred, gt, ignore_label=ignore_label)
return 2.0 * iou / (1.0 + iou)
class IOULoss(nn.Module):
def __init__(self, opt: Opts,
activation: Optional[Callable]=None,
ignore_label: Optional[int]=None) -> None:
super().__init__()
self.ignore_label = ignore_label
if activation is None:
self.activation = LogitToPreds(opt)
else:
self.activation = activation
def forward(self, logit: Tensor, gt: Tensor, *,
reduction: Optional[str]='mean') -> Tensor:
pred = self.activation(logit)
loss = iou_loss(pred, gt, ignore_label=self.ignore_label)
if reduction == 'mean':
loss = loss.mean()
elif reduction == 'none':
pass
else:
raise ValueError(f'Unrecognized reduction method ({reduction}).')
return loss
class GDiceLoss(nn.Module):
def __init__(self, opt: Opts,
activation: Optional[Callable]=None,
with_weight: Optional[bool]=False,
ignore_label: Optional[int]=None) -> None:
super().__init__()
self.with_weight = with_weight
self.ignore_label = ignore_label
if activation is None:
self.activation = LogitToPreds(opt)
else:
self.activation = activation
def forward(self, logit: Tensor, gt: Tensor, *,
onehoted: Optional[bool]=False,
reduction: Optional[str]='mean') -> Tensor:
if not onehoted:
num_classes = logit.size(1)
with_channel = True if gt.ndim == logit.ndim else False
onehoted_gt = onehot(gt, num_classes, with_channel=with_channel)
else:
onehoted_gt = gt
pred = self.activation(logit)
loss = generalized_dice_loss(pred, onehoted_gt,
with_weight=self.with_weight,
ignore_label=self.ignore_label)
if reduction == 'mean':
loss = loss.mean()
elif reduction == 'none':
pass
else:
raise ValueError(f'Unrecognized reduction method ({reduction}).')
return loss
class SoftDiceLoss(nn.Module):
def __init__(self, opt: Opts,
activation: Optional[Callable]=None,
ignore_label: int=None,
*args, **kwargs) -> None:
super().__init__()
self.ignore_label = ignore_label
if activation is None:
self.activation = LogitToPreds(opt)
else:
self.activation = activation
def forward(self, logit: Tensor, gt: Tensor, *,
onehoted: Optional[bool]=False,
reduction: Optional[str]='mean') -> Tensor:
if not onehoted:
num_classes = logit.size(1)
with_channel = True if gt.ndim == logit.ndim else False
onehoted_gt = onehot(gt, num_classes, with_channel=with_channel)
else:
onehoted_gt = gt
pred = self.activation(logit)
loss = soft_dice_loss(pred, onehoted_gt,
ignore_label=self.ignore_label)
if reduction == 'mean':
loss = loss.mean()
elif reduction == 'none':
pass
else:
raise ValueError(f'Unrecognized reduction method ({reduction}).')
return loss
class CrossEntropyLoss(nn.Module):
def forward(self, logit: Tensor, gt: Tensor, *,
reduction: Optional[str]='mean',
ignore_label: Optional[int]=None) -> Tensor:
assert logit.ndim == gt.ndim + 1
if ignore_label is None:
ignore_label = -100
loss = F.cross_entropy(logit, gt, reduction='none',
ignore_index=ignore_label)
if reduction == 'mean':
loss = loss.mean()
elif reduction == 'none':
loss = loss.mean(list(range(1, loss.ndim)))
else:
raise ValueError(
f'Unrecognized reduction method ({reduction}).')
return loss
class BCELossWithLogits(nn.Module):
def forward(self, logit: Tensor, gt: Tensor, *,
reduction: Optional[str]='mean',
ignore_label: Optional[int]=None) -> Tensor:
assert logit.shape == gt.shape
if ignore_label is not None:
logit = torch.stack(
[v for i, v in enumerate(torch.unbind(logit, dim=1))
if i != ignore_label])
gt = torch.stack(
[v for i, v in enumerate(torch.unbind(gt, dim=1))
if i != ignore_label])
loss = F.binary_cross_entropy_with_logits(logit, gt, reduction='none')
if reduction == 'mean':
loss = loss.mean()
elif reduction == 'none':
loss = loss.mean(list(range(1, loss.ndim)))
else:
raise ValueError(
f'Unrecognized reduction method ({reduction}).')
return loss
class GDiceCELoss(nn.Module):
def __init__(self, opt: Opts, dice_weight: Optional[float]=1.0,
ce_weight: Optional[float]=1.0,
ignore_label: Optional[int]=None,
*args, **kwargs) -> None:
super().__init__()
self.dice_weight = dice_weight
self.ce_weight = ce_weight
self.ignore_label = ignore_label
self.dice_loss = GDiceLoss(opt, activation=softmax_helper,
ignore_label=self.ignore_label,
*args, **kwargs)
self.ce_loss = CrossEntropyLoss()
def forward(self, logit: Tensor, gt: Tensor, *,
reduction: Optional[str]='mean') -> Tensor:
ce_gt = gt.squeeze(1) if logit.ndim == gt.ndim else gt
dice_loss_ = self.dice_loss(logit, gt.float(), reduction=reduction)
ce_loss_ = self.ce_loss(logit, ce_gt.long(), reduction=reduction,
ignore_label=self.ignore_label)
loss = dice_loss_ * self.dice_weight + ce_loss_ * self.ce_weight
return loss
class GDiceBCELoss(nn.Module):
def __init__(self, opt: Opts, dice_weight: Optional[float]=1.0,
ce_weight: Optional[float]=1.0,
ignore_label: Optional[int]=None,
*args, **kwargs) -> None:
super().__init__()
self.ignore_label = ignore_label
self.dice_weight = dice_weight
self.ce_weight = ce_weight
self.dice_loss = GDiceLoss(opt, activation=torch.sigmoid,
ignore_label=self.ignore_label,
*args, **kwargs)
self.ce_loss = BCELossWithLogits()
def forward(self, logit: Tensor, gt: Tensor, *,
reduction: Optional[str]='mean') -> Tensor:
num_classes = logit.size(1)
with_channel = True if gt.ndim == logit.ndim else False
onehoted_gt = onehot(gt, num_classes, with_channel=with_channel)
dice_loss_ = self.dice_loss(logit, onehoted_gt, onehoted=True,
reduction=reduction)
ce_loss_ = self.ce_loss(logit, onehoted_gt, reduction=reduction,
ignore_label=self.ignore_label)
loss = dice_loss_ * self.dice_weight + ce_loss_ * self.ce_weight
return loss
class SoftDiceCELoss(GDiceCELoss):
def __init__(self, opt: Opts, dice_weight: Optional[float]=1.0,
ce_weight: Optional[float]=1.0,
ignore_label: Optional[int]=None,
*args, **kwargs) -> None:
super().__init__(opt, dice_weight=dice_weight,
ce_weight=ce_weight,
ignore_label=ignore_label,
*args, **kwargs)
self.dice_loss = SoftDiceLoss(opt, activation=softmax_helper,
ignore_label=ignore_label,
*args, **kwargs)
class SoftDiceBCELoss(GDiceBCELoss):
def __init__(self, opt: Opts, dice_weight: Optional[float]=1.0,
ce_weight: Optional[float]=1.0,
ignore_label: Optional[int]=None,
*args, **kwargs) -> None:
super().__init__(opt, dice_weight=dice_weight,
ce_weight=ce_weight,
ignore_label=ignore_label,
*args, **kwargs)
self.dice_loss = SoftDiceLoss(opt, activation=torch.sigmoid,
ignore_label=ignore_label,
*args, **kwargs)
class DeepSupervisedLoss(nn.Module):
def __init__(self, loss_fn: Callable,
weights: Union[List, Tuple]) -> None:
super().__init__()
self.loss_fn = loss_fn
self.weights = weights
def forward(self, logits: Union[Tuple, List],
gts: Union[Tuple, List],
**kwargs) -> Tensor:
assert len(logits) == len(gts)
assert len(logits) == len(self.weights)
final_loss = 0
for logit, gt, weight in zip(logits, gts, self.weights):
final_loss += self.loss_fn(logit, gt, **kwargs) * weight
return final_loss
class LossPicker(object):
def __init__(self, opt: Opts, *args, **kwargs) -> None:
super().__init__()
assert opt.loss in _loss_dict_.keys(), \
f'{opt.loss} not in {_loss_dict_.keys()}'
self.loss_fn = _loss_dict_[opt.loss](opt, *args, **kwargs)
def __call__(self, *args, **kwargs) -> Tensor:
return self.loss_fn(*args, **kwargs)
_loss_dict_ = {
'IOULoss': IOULoss,
'GDiceLoss': GDiceLoss,
'SoftDiceLoss': SoftDiceBCELoss,
'CrossEntropyLoss': CrossEntropyLoss,
'BCELossWithLogits': BCELossWithLogits,
'GDiceCELoss': GDiceCELoss,
'GDiceBCELoss': GDiceBCELoss,
'SoftDiceCELoss': SoftDiceCELoss,
'SoftDiceBCELoss': SoftDiceBCELoss
}
| StarcoderdataPython |
4842169 | #!/pxrpythonsubst
#
# Copyright 2017 Pixar
#
# Licensed under the Apache License, Version 2.0 (the "Apache License")
# with the following modification; you may not use this file except in
# compliance with the Apache License and the following modification to it:
# Section 6. Trademarks. is deleted and replaced with:
#
# 6. Trademarks. This License does not grant permission to use the trade
# names, trademarks, service marks, or product names of the Licensor
# and its affiliates, except as required to comply with Section 4(c) of
# the License and to reproduce the content of the NOTICE file.
#
# You may obtain a copy of the Apache License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the Apache License with the above modification is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the Apache License for the specific
# language governing permissions and limitations under the Apache License.
from pxr import Sdf, Pcp, Tf
import unittest, os
class TestPcpCache(unittest.TestCase):
def test_Basic(self):
# Create a PcpCache for a reference chain, but do not perform any actual
# composition before querying its used layers. Verify that this does not crash.
file = 'BasicReference/root.sdf'
self.assertTrue(os.path.isfile(file))
layer = Sdf.Layer.FindOrOpen(file)
self.assertTrue(layer)
lsi = Pcp.LayerStackIdentifier(layer)
self.assertTrue(lsi)
pcpCache = Pcp.Cache(lsi)
self.assertTrue(pcpCache)
pcpCache.GetUsedLayers()
# Create a PcpCache with a target schema, ensuring that layers
# without the correct target will be marked invalid during composition.
pcpCache = Pcp.Cache(lsi, targetSchema='sdf')
(pi, _) = pcpCache.ComputePrimIndex('/PrimWithReferences')
self.assertTrue(pi.IsValid())
self.assertEqual(len(pi.localErrors), 0)
# XXX: Sdf currently emits coding errors when it cannot find a file format
# for the given target. I'm not sure this should be the case; it
# probably should treat it as though the file didn't exist, which
# does not emit errors.
pcpCache = Pcp.Cache(lsi, targetSchema='Presto')
with self.assertRaises(Tf.ErrorException):
pcpCache.ComputePrimIndex('/PrimWithReferences')
# Should be two local errors corresponding to invalid asset paths,
# since this prim has two references to layers with a different target
# schema.
(pi, _) = pcpCache.ComputePrimIndex('/PrimWithReferences')
self.assertTrue(pi.IsValid())
self.assertEqual(len(pi.localErrors), 2)
self.assertTrue(all([(isinstance(e, Pcp.ErrorInvalidAssetPath)
for e in pi.localErrors)]))
def test_PcpCacheReloadSessionLayers(self):
rootLayer = Sdf.Layer.CreateAnonymous()
sessionRootLayer = Sdf.Layer.CreateAnonymous()
self.assertTrue(sessionRootLayer)
sessionSubLayer = Sdf.Layer.CreateAnonymous()
self.assertTrue(sessionSubLayer)
sessionRootLayer.subLayerPaths.append(sessionSubLayer.identifier)
# Author something to the sublayer
primSpec = Sdf.PrimSpec(sessionSubLayer, "Root", Sdf.SpecifierDef)
self.assertTrue(sessionSubLayer.GetPrimAtPath("/Root"))
# Create the Pcp structures
lsi = Pcp.LayerStackIdentifier(rootLayer, sessionRootLayer)
self.assertTrue(lsi)
pcpCache = Pcp.Cache(lsi)
self.assertTrue(pcpCache)
pcpCache.ComputeLayerStack(lsi)
# Now reload and make sure that the spec on the sublayer stays intact
pcpCache.Reload()
self.assertTrue(sessionSubLayer.GetPrimAtPath("/Root"))
if __name__ == "__main__":
unittest.main()
| StarcoderdataPython |
3370894 | # ----- Info ------------------------------------------------------------------
__author__ = '<NAME> <<EMAIL>>'
# ----- Imports ---------------------------------------------------------------
from tinyAPI.base.services.table_builder.exception \
import TableBuilderException
from tinyAPI.base.services.table_builder.mysql \
import _MySQLDateTimeColumn, \
_MySQLNumericColumn, \
_MySQLStringColumn
import tinyAPI
import unittest
# ----- Tests -----------------------------------------------------------------
class TableBuilderMySQLTestCase(unittest.TestCase):
def test_add_column_dupe_exceptions(self):
try:
tinyAPI.Table('db', 'abc').bit('def').bint('def')
self.fail('Was able to add two columns with the same name.');
except TableBuilderException as e:
self.assertEqual('the column "def" already exists',
e.get_message())
def test_numeric_column_bit(self):
self.assertEqual(
"abcdef bit unsigned zerofill not null "
+ "auto_increment unique default \'1\'",
_MySQLNumericColumn('abcdef')
.integer_type(_MySQLNumericColumn.TYPE_BIT)
.default_value(1)
.auto_increment()
.not_null()
.unique()
.unsigned()
.zero_fill()
.get_definition())
def test_numeric_column_bint(self):
self.assertEqual(
"abcdef bigint(13) unsigned zerofill not null "
+ "auto_increment unique default \'1\'",
_MySQLNumericColumn('abcdef')
.integer_type(_MySQLNumericColumn.TYPE_BIGINT, 13)
.default_value(1)
.auto_increment()
.not_null()
.unique()
.unsigned()
.zero_fill()
.get_definition())
def test_numeric_column_mint(self):
self.assertEqual(
"abcdef mediumint(13) unsigned zerofill not null "
+ "auto_increment unique default \'1\'",
_MySQLNumericColumn('abcdef')
.integer_type(_MySQLNumericColumn.TYPE_MEDIUMINT, 13)
.default_value(1)
.auto_increment()
.not_null()
.unique()
.unsigned()
.zero_fill()
.get_definition())
def test_numeric_column_int(self):
self.assertEqual(
"abcdef int(13) unsigned zerofill not null "
+ "auto_increment unique default \'1\'",
_MySQLNumericColumn('abcdef')
.integer_type(_MySQLNumericColumn.TYPE_INT, 13)
.default_value(1)
.auto_increment()
.not_null()
.unique()
.unsigned()
.zero_fill()
.get_definition())
def test_numeric_column_sint(self):
self.assertEqual(
"abcdef smallint(13) unsigned zerofill not null "
+ "auto_increment unique default \'1\'",
_MySQLNumericColumn('abcdef')
.integer_type(_MySQLNumericColumn.TYPE_SMALLINT, 13)
.default_value(1)
.auto_increment()
.not_null()
.unique()
.unsigned()
.zero_fill()
.get_definition())
def test_numeric_column_tint(self):
self.assertEqual(
"abcdef tinyint(13) unsigned zerofill not null "
+ "auto_increment unique default \'1\'",
_MySQLNumericColumn('abcdef')
.integer_type(_MySQLNumericColumn.TYPE_TINYINT, 13)
.default_value(1)
.auto_increment()
.not_null()
.unique()
.unsigned()
.zero_fill()
.get_definition())
def test_numeric_column_dec(self):
self.assertEqual(
"abcdef decimal(12, 34) unsigned zerofill not null "
+ "auto_increment unique default \'1\'",
_MySQLNumericColumn('abcdef')
.decimal_type(_MySQLNumericColumn.TYPE_DECIMAL, 12, 34)
.default_value(1)
.auto_increment()
.not_null()
.unique()
.unsigned()
.zero_fill()
.get_definition())
def test_numeric_column_float(self):
self.assertEqual(
"abcdef float(12) unsigned zerofill not null "
+ "auto_increment unique default \'1.0\'",
_MySQLNumericColumn('abcdef')
.float_type(12)
.default_value(1.0)
.auto_increment()
.not_null()
.unique()
.unsigned()
.zero_fill()
.get_definition())
def test_table_engine_exceptions(self):
try:
tinyAPI.Table('db', 'abc').engine('def')
self.fail('Was able to set the engine to an invalid value.')
except TableBuilderException as e:
self.assertEqual('the engine "def" is invalid', e.get_message())
def test_table_get_definition_exceptions(self):
try:
tinyAPI.Table('db', 'abc').get_definition()
self.fail('Was able to get table definition even though no '
+ 'columns were provided.')
except TableBuilderException as e:
self.assertEqual(
'the table cannot be defined because it has no columns',
e.get_message())
def test_table_simple(self):
text = '''create table abc
(
id bigint unsigned not null auto_increment unique
) engine = innodb default charset = utf8 collate = utf8_unicode_ci;'''
self.assertEqual(text,
tinyAPI.Table('db', 'abc')
.engine('InnoDB')
.id('id', True, True)
.get_definition())
def test_table_multi_numeric_columns(self):
text = '''create table abc
(
id bigint unsigned not null auto_increment unique,
def tinyint(1) default null,
ghi float(12) default null
) engine = myisam default charset = utf8 collate = utf8_unicode_ci;'''
self.assertEqual(text,
tinyAPI.Table('db', 'abc')
.engine('myisam')
.id('id', True, True)
.bool('def')
.float('ghi', False, 12)
.get_definition())
def test_table_calling_set_attribute(self):
text = '''create table abc
(
def int default null,
ghi int unsigned zerofill default null
) engine = innodb default charset = utf8 collate = utf8_unicode_ci;'''
self.assertEqual(text,
tinyAPI.Table('db', 'abc')
.int('def')
.int('ghi', False, None, True, True)
.get_definition())
def test_table_helper_attribute_methods(self):
text = '''create table abc
(
def int default null,
ghi int unique default null,
jkl int auto_increment default null,
mno int default '123'
) engine = innodb default charset = utf8 collate = utf8_unicode_ci;'''
self.assertEqual(text,
tinyAPI.Table('db', 'abc')
.int('def')
.int('ghi')
.uk()
.int('jkl')
.ai()
.int('mno')
.defv(123)
.get_definition())
def test_table_active_column_is_primary_key(self):
text = '''create table abc
(
def int default null primary key
) engine = innodb default charset = utf8 collate = utf8_unicode_ci;'''
self.assertEqual(text,
tinyAPI.Table('db', 'abc')
.int('def')
.pk()
.get_definition())
def test_temporary_table(self):
text = '''create temporary table abc
(
id bigint unsigned not null auto_increment unique
) engine = innodb default charset = utf8 collate = utf8_unicode_ci;'''
self.assertEqual(text,
tinyAPI.Table('db', 'abc')
.temp()
.id('id', True, True)
.get_definition())
def test_table_composite_primary_key_exceptions(self):
try:
tinyAPI.Table('db', 'abc') \
.int('def') \
.pk(['def', 'ghi']) \
.get_definition()
self.fail('Was able to get the definition for a table even '
+ 'though one of the columns in the primary key did not '
+ 'exist.')
except TableBuilderException as e:
self.assertEqual(
'column "ghi" cannot be used in primary key because it has not '
+ 'been defined', e.get_message())
def test_table_composite_primary_key(self):
text = '''create table abc
(
def int default null,
ghi int default null,
primary key abc_pk (def, ghi)
) engine = innodb default charset = utf8 collate = utf8_unicode_ci;'''
self.assertEqual(text,
tinyAPI.Table('db', 'abc')
.int('def')
.int('ghi')
.pk(['def', 'ghi'])
.get_definition())
def test_table_composite_unique_key_exceptions(self):
try:
tinyAPI.Table('db', 'abc') \
.int('def') \
.uk(['def', 'ghi']) \
.get_definition()
self.fail('Was able to get the definition for a table even '
+ 'though one of the columns in a unique key did not '
+ 'exist.')
except TableBuilderException as e:
self.assertEqual(
'column "ghi" cannot be used in unique key because it has not '
+ 'been defined', e.get_message())
def test_table_one_composite_unique_key(self):
text = '''create table abc
(
def int default null,
ghi int default null,
jkl int default null,
unique key abc_0_uk (def, ghi)
) engine = innodb default charset = utf8 collate = utf8_unicode_ci;'''
self.assertEqual(text,
tinyAPI.Table('db', 'abc')
.int('def')
.int('ghi')
.int('jkl')
.uk(['def', 'ghi'])
.get_definition())
def test_table_multiple_composite_unique_keys(self):
text = '''create table abc
(
def int default null,
ghi int default null,
jkl int default null,
unique key abc_0_uk (def, ghi),
unique key abc_1_uk (ghi, jkl)
) engine = innodb default charset = utf8 collate = utf8_unicode_ci;'''
self.assertEqual(text,
tinyAPI.Table('db', 'abc')
.int('def')
.int('ghi')
.int('jkl')
.uk(['def', 'ghi'])
.uk(['ghi', 'jkl'])
.get_definition())
def test_date_time_column_type_exception(self):
try:
_MySQLDateTimeColumn('abc').date_time_type(-1)
self.fail('Was able to set a date time type even though the type '
+ 'ID provided was invalid.')
except TableBuilderException as e:
self.assertEqual('the type ID provided was invalid',
e.get_message())
def test_date_time_column_date(self):
self.assertEqual(
"abcdef date not null unique default \'1\'",
_MySQLDateTimeColumn('abcdef')
.date_time_type(_MySQLDateTimeColumn.TYPE_DATE)
.default_value(1)
.not_null()
.unique()
.get_definition())
def test_date_time_column_datetime(self):
self.assertEqual(
"abcdef datetime not null unique default \'1\'",
_MySQLDateTimeColumn('abcdef')
.date_time_type(_MySQLDateTimeColumn.TYPE_DATETIME)
.default_value(1)
.not_null()
.unique()
.get_definition())
def test_date_time_column_timestamp(self):
self.assertEqual(
"abcdef timestamp not null unique default \'1\'",
_MySQLDateTimeColumn('abcdef')
.date_time_type(_MySQLDateTimeColumn.TYPE_TIMESTAMP)
.default_value(1)
.not_null()
.unique()
.get_definition())
def test_date_time_column_time(self):
self.assertEqual(
"abcdef time not null unique default \'1\'",
_MySQLDateTimeColumn('abcdef')
.date_time_type(_MySQLDateTimeColumn.TYPE_TIME)
.default_value(1)
.not_null()
.unique()
.get_definition())
def test_date_time_column_year_2(self):
self.assertEqual(
"abcdef year(2) not null unique default \'1\'",
_MySQLDateTimeColumn('abcdef')
.date_time_type(_MySQLDateTimeColumn.TYPE_YEAR)
.precision(2)
.default_value(1)
.not_null()
.unique()
.get_definition())
def test_date_time_column_year_4(self):
self.assertEqual(
"abcdef year(4) not null unique default \'1\'",
_MySQLDateTimeColumn('abcdef')
.date_time_type(_MySQLDateTimeColumn.TYPE_YEAR)
.precision(4)
.default_value(1)
.not_null()
.unique()
.get_definition())
def test_table_date_columns_year_2(self):
text = '''create table abc
(
def date not null,
ghi datetime not null,
jkl timestamp not null,
mno time not null,
pqr year(2) not null
) engine = innodb default charset = utf8 collate = utf8_unicode_ci;'''
self.assertEqual(text,
tinyAPI.Table('db', 'abc')
.dt('def', True)
.dtt('ghi', True)
.ts('jkl', True)
.ti('mno', True)
.yr('pqr', True, 2)
.get_definition())
def test_table_date_columns_year_4(self):
text = '''create table abc
(
def date not null,
ghi datetime not null,
jkl timestamp not null,
mno time not null,
pqr year(4) not null
) engine = innodb default charset = utf8 collate = utf8_unicode_ci;'''
self.assertEqual(text,
tinyAPI.Table('db', 'abc')
.dt('def', True)
.dtt('ghi', True)
.ts('jkl', True)
.ti('mno', True)
.yr('pqr', True, 4)
.get_definition())
def test_table_created(self):
text = '''create table abc
(
id bigint unsigned not null auto_increment unique,
date_created datetime not null
) engine = innodb default charset = utf8 collate = utf8_unicode_ci;'''
self.assertEqual(text,
tinyAPI.Table('db', 'abc')
.id('id', True, True)
.created()
.get_definition())
def test_table_updated(self):
text = '''create table abc
(
id bigint unsigned not null auto_increment unique,
date_updated timestamp default '2000-01-01 00:00:00' on update current_timestamp
) engine = innodb default charset = utf8 collate = utf8_unicode_ci;'''
self.assertEqual(text,
tinyAPI.Table('db', 'abc')
.id('id', True, True)
.updated()
.get_definition())
def test_string_validate_type_id_exceptions(self):
try:
_MySQLStringColumn('abc').binary_type(-1)
self.fail('Was able to set binary type even though the ID provided '
+ 'was invalid.')
except TableBuilderException as e:
self.assertEqual('the type ID provided was invalid',
e.get_message())
try:
_MySQLStringColumn('abc').blob_type(-1)
self.fail('Was able to set blob type even though the ID provided '
+ 'was invalid.')
except TableBuilderException as e:
self.assertEqual('the type ID provided was invalid',
e.get_message())
try:
_MySQLStringColumn('abc').char_type(-1)
self.fail('Was able to set char type even though the ID provided '
+ 'was invalid.')
except TableBuilderException as e:
self.assertEqual('the type ID provided was invalid',
e.get_message())
try:
_MySQLStringColumn('abc').list_type(-1)
self.fail('Was able to set list type even though the ID provided '
+ 'was invalid.')
except TableBuilderException as e:
self.assertEqual('the type ID provided was invalid',
e.get_message())
try:
_MySQLStringColumn('abc').text_type(-1)
self.fail('Was able to set text type even though the ID provided '
+ 'was invalid.')
except TableBuilderException as e:
self.assertEqual('the type ID provided was invalid',
e.get_message())
def test_string_blob_type_exceptions(self):
for type_id in [_MySQLStringColumn.TYPE_TINYBLOB,
_MySQLStringColumn.TYPE_MEDIUMBLOB,
_MySQLStringColumn.TYPE_LONGBLOB]:
try:
_MySQLStringColumn('abc').blob_type(type_id, 15)
self.fail('Was able to specify length even though it is not '
+ 'allowed for a non-blob column.')
except TableBuilderException as e:
self.assertEqual(
'you can only specify the length if the column is blob',
e.get_message())
def test_string_text_type_exceptions(self):
for type_id in [_MySQLStringColumn.TYPE_TINYTEXT,
_MySQLStringColumn.TYPE_MEDIUMTEXT,
_MySQLStringColumn.TYPE_LONGTEXT]:
try:
_MySQLStringColumn('abc').text_type(type_id, 15)
self.fail('Was able to specify length even though it is not '
+ 'allowed for a non-text column.')
except TableBuilderException as e:
self.assertEqual(
'you can only specify the length if the column is text',
e.get_message())
def test_string_binary_binary(self):
self.assertEqual(
"abc binary(15) default null",
_MySQLStringColumn('abc')
.binary_type(_MySQLStringColumn.TYPE_BINARY, 15)
.charset('def')
.collation('ghi')
.get_definition())
def test_string_binary_varbinary(self):
self.assertEqual(
"abc varbinary(15) default null",
_MySQLStringColumn('abc')
.binary_type(_MySQLStringColumn.TYPE_VARBINARY, 15)
.charset('def')
.collation('ghi')
.get_definition())
def test_string_blob_tinyblob(self):
self.assertEqual(
"abc tinyblob default null",
_MySQLStringColumn('abc')
.binary_type(_MySQLStringColumn.TYPE_TINYBLOB)
.charset('def')
.collation('ghi')
.get_definition())
def test_string_blob_blob(self):
self.assertEqual(
"abc blob(15) default null",
_MySQLStringColumn('abc')
.binary_type(_MySQLStringColumn.TYPE_BLOB, 15)
.charset('def')
.collation('ghi')
.get_definition())
def test_string_blob_mediumblob(self):
self.assertEqual(
"abc mediumblob default null",
_MySQLStringColumn('abc')
.binary_type(_MySQLStringColumn.TYPE_MEDIUMBLOB)
.charset('def')
.collation('ghi')
.get_definition())
def test_string_blob_longblob(self):
self.assertEqual(
"abc longblob default null",
_MySQLStringColumn('abc')
.binary_type(_MySQLStringColumn.TYPE_LONGBLOB)
.charset('def')
.collation('ghi')
.get_definition())
def test_string_char_char(self):
self.assertEqual(
"abc char(15) character set def collate ghi default null",
_MySQLStringColumn('abc')
.char_type(_MySQLStringColumn.TYPE_CHAR, 15)
.charset('def')
.collation('ghi')
.get_definition())
def test_string_char_varchar(self):
self.assertEqual(
"abc varchar(15) character set def collate ghi default null",
_MySQLStringColumn('abc')
.char_type(_MySQLStringColumn.TYPE_VARCHAR, 15)
.charset('def')
.collation('ghi')
.get_definition())
def test_string_list_enum(self):
self.assertEqual(
"abc enum('x', 'y') character set def collate ghi default null",
_MySQLStringColumn('abc')
.list_type(_MySQLStringColumn.TYPE_ENUM, ['x', 'y'])
.charset('def')
.collation('ghi')
.get_definition())
def test_string_list_set(self):
self.assertEqual(
"abc set('x', 'y') character set def collate ghi default null",
_MySQLStringColumn('abc')
.list_type(_MySQLStringColumn.TYPE_SET, ['x', 'y'])
.charset('def')
.collation('ghi')
.get_definition())
def test_string_types_in_table(self):
text = '''create table abc
(
def char(15) character set utf8 collate utf8_unicode_ci not null,
ghi varchar(16) character set utf8 collate utf8_unicode_ci not null,
jkl binary(17) not null,
mno varbinary(18) not null,
pqr tinyblob not null,
stu blob(19) not null,
vwx mediumblob not null,
yza longblob not null,
bcd tinytext character set utf8 collate utf8_unicode_ci not null,
efg text(20) character set utf8 collate utf8_unicode_ci not null,
hij mediumtext character set utf8 collate utf8_unicode_ci not null,
klm longtext character set utf8 collate utf8_unicode_ci not null,
nop enum('a', 'b') character set utf8 collate utf8_unicode_ci not null,
qrs set('c', 'd') character set utf8 collate utf8_unicode_ci not null
) engine = innodb default charset = utf8 collate = utf8_unicode_ci;'''
self.assertEqual(text,
tinyAPI.Table('db', 'abc')
.char('def', 15, True)
.vchar('ghi', 16, True)
.bin('jkl', 17, True)
.vbin('mno', 18, True)
.tblob('pqr', True)
.blob('stu', 19, True)
.mblob('vwx', True)
.lblob('yza', True)
.ttext('bcd', True)
.text('efg', True, 20)
.mtext('hij', True)
.ltext('klm', True)
.enum('nop', ['a', 'b'], True)
.set('qrs', ['c', 'd'], True)
.get_definition())
def test_ref_table_exceptions(self):
try:
tinyAPI.RefTable('db', 'abc');
self.fail('Was able to create a reference table even though the '
+ 'table name was non-standard.')
except TableBuilderException as e:
self.assertEqual(
'the name of the reference table must contain "_ref_"',
e.get_message())
try:
tinyAPI.RefTable('db', 'abc_ref_def').add(1, 'a').add(1, 'b')
self.fail('Was able to create a reference table even though a '
+ 'duplicate ID value was used.')
except TableBuilderException as e:
self.assertEqual('the ID "1" is already defined', e.get_message())
try:
tinyAPI.RefTable('db', 'abc_ref_def').add(1, 'a', 1).add(2, 'b', 1)
self.fail('Was able to create a reference table even though a '
+ 'duplicate display order was used.')
except TableBuilderException as e:
self.assertEqual('the display order "1" is already defined',
e.get_message())
def test_ref_table(self):
table_definition = '''create table abc_ref_def
(
id bigint unsigned not null auto_increment primary key,
value varchar(100) character set utf8 collate utf8_unicode_ci not null,
display_order int default null
) engine = innodb default charset = utf8 collate = utf8_unicode_ci;'''
insert_statements = '''insert into abc_ref_def
(
id,
value,
display_order
)
values
(
'1',
'one',
'1'
);
commit;
insert into abc_ref_def
(
id,
value,
display_order
)
values
(
'2',
'two',
'2'
);
commit;
insert into abc_ref_def
(
id,
value,
display_order
)
values
(
'3',
'three',
'3'
);
commit;
'''
ref_table = tinyAPI.RefTable('db', 'abc_ref_def') \
.add(1, 'one', 1) \
.add(2, 'two', 2) \
.add(3, 'three', 3)
self.assertEqual(table_definition, ref_table.get_definition())
actual = ''
for insert_statement in ref_table.get_insert_statements():
actual += insert_statement + "\n"
self.assertEqual(insert_statements, actual)
def test_table_ai_active_column_is_set_exceptions(self):
try:
tinyAPI.Table('db', 'abc').ai()
self.fail('Was able to set a column as auto-increment even though '
+ 'no column was defined.')
except TableBuilderException as e:
self.assertEqual(
'call to "ai" invalid until column is defined',
e.get_message())
def test_table_defv_active_column_is_set_exceptions(self):
try:
tinyAPI.Table('db', 'abc').defv(1)
self.fail('Was able to set the default value for a olumn even '
+ 'though no column was defined.')
except TableBuilderException as e:
self.assertEqual(
'call to "defv" invalid until column is defined',
e.get_message())
def test_table_pk_active_column_is_set_exceptions(self):
try:
tinyAPI.Table('db', 'abc').pk()
self.fail('Was able to set a column as primary key even though no '
+ 'column was defined.')
except TableBuilderException as e:
self.assertEqual(
'call to "pk" invalid until column is defined',
e.get_message())
def test_table_uk_active_column_is_set_exceptions(self):
try:
tinyAPI.Table('db', 'abc').uk()
self.fail('Was able to set a column as a unique key even though no '
+ 'column was defined.')
except TableBuilderException as e:
self.assertEqual(
'call to "uk" invalid until column is defined',
e.get_message())
def test_table_fk_active_column_is_set_exceptions(self):
try:
tinyAPI.Table('db', 'abc').fk('def')
self.fail('Was able to set a column as a foreign key even though '
+ 'no column was defined.')
except TableBuilderException as e:
self.assertEqual(
'call to "fk" invalid until column is defined',
e.get_message())
def test_table_idx_active_column_is_set_exceptions(self):
try:
tinyAPI.Table('db', 'abc').idx()
self.fail('Was able to set a column as an index even though no '
+ 'column was defined.')
except TableBuilderException as e:
self.assertEqual(
'call to "idx" invalid until column is defined',
e.get_message())
def test_table_foreign_key_and_dependencies_active_column(self):
text = '''create table abc
(
id bigint unsigned not null auto_increment unique
) engine = innodb default charset = utf8 collate = utf8_unicode_ci;'''
table = tinyAPI.Table('db', 'abc') \
.id('id', True, True) \
.fk('def')
self.assertEqual(text, table.get_definition())
text = ''' alter table abc
add constraint abc_0_fk
foreign key (id)
references def (id)
on delete cascade'''
fks = table.get_foreign_key_definitions()
self.assertEqual(1, len(fks))
self.assertEqual(text, fks[0])
deps = table.get_dependencies()
self.assertEqual(1, len(deps))
self.assertTrue('def' in deps)
def test_foreign_key_full_definition(self):
text = ''' alter table abc
add constraint abc_0_fk
foreign key (col_a, col_b)
references def (col_c, col_d)'''
fks = tinyAPI.Table('db', 'abc') \
.int('col_a') \
.int('col_b') \
.fk('def', False, ['col_a', 'col_b'], ['col_c', 'col_d']) \
.get_foreign_key_definitions()
self.assertEqual(1, len(fks))
self.assertEqual(text, fks[0])
def test_table_foreign_key_exceptions(self):
try:
tinyAPI.Table('db', 'abc').fk('def', True, ['ghi'])
self.fail('Was able to create a foreign key even though the column '
+ 'provided did not exist.')
except TableBuilderException as e:
self.assertEqual(
'column "ghi" cannot be used in foreign key because it has '
+ 'not been defined', e.get_message())
def test_table_index_exceptions(self):
try:
tinyAPI.Table('db', 'abc').idx(['def'])
self.fail('Was able to create an index even though the column '
+ 'provided did not exist.')
except TableBuilderException as e:
self.assertEqual(
'column "def" cannot be used in index because it has not been '
+ 'defined', e.get_message())
try:
tinyAPI.Table('db', 'abc').int('col_a').idx(['col_a x'])
self.fail('Was able to create an indexed with an invalid column '
+ 'modifier for asc/desc.');
except TableBuilderException as e:
self.assertEqual(
'columns can only be modified using "asc" or "desc"',
e.get_message())
def test_table_getting_index_definitions(self):
table = tinyAPI.Table('db', 'abc') \
.int('col_a') \
.int('col_b') \
.idx() \
.idx(['col_a asc', 'col_b desc'])
indexes = table.get_index_definitions()
self.assertEqual(2, len(indexes))
self.assertEqual(
"create index abc_0_idx\n on abc\n (col_b)",
indexes[0])
self.assertEqual(
"create index abc_1_idx\n on abc\n"
+ " (col_a asc, col_b desc)",
indexes[1])
def test_text_type_with_no_length(self):
self.assertEqual(
'abc text character set utf8 collate utf8_unicode_ci default null',
_MySQLStringColumn('abc')
.text_type(_MySQLStringColumn.TYPE_TEXT)
.get_definition())
def test_getting_db_name_from_table(self):
self.assertEqual('db',
tinyAPI.Table('db', 'abc_ref_def').get_db_name())
def test_getting_db_name_from_ref_table(self):
self.assertEqual('db',
tinyAPI.RefTable('db', 'abc_ref_def').get_db_name())
def test_latitude_column_exceptions(self):
try:
tinyAPI.Table('db', 'abc').lat('invalid')
self.fail('Was able to create a latitude column with an invalid '
+ 'name.')
except TableBuilderException as e:
self.assertEqual(
'latitude column must be named "latitude" or start with '
+ '"lat_"',
e.get_message())
def test_latitude_column(self):
text = '''create table abc
(
latitude float(53) default null
) engine = innodb default charset = utf8 collate = utf8_unicode_ci;'''
self.assertEqual(
text,
tinyAPI.Table('db', 'abc').lat('latitude').get_definition())
text = '''create table abc
(
lat_x float(53) not null
) engine = innodb default charset = utf8 collate = utf8_unicode_ci;'''
self.assertEqual(
text,
tinyAPI.Table('db', 'abc').lat('lat_x', True).get_definition())
def test_longitude_column_exceptions(self):
try:
tinyAPI.Table('db', 'abc').long('invalid')
self.fail('Was able to create a longitude column with an invalid '
+ 'name.')
except TableBuilderException as e:
self.assertEqual(
'longitude column must be named "longitude" or start with '
+ '"long_"',
e.get_message())
def test_longitude_column(self):
text = '''create table abc
(
longitude float(53) default null
) engine = innodb default charset = utf8 collate = utf8_unicode_ci;'''
self.assertEqual(
text,
tinyAPI.Table('db', 'abc').long('longitude').get_definition())
text = '''create table abc
(
long_x float(53) not null
) engine = innodb default charset = utf8 collate = utf8_unicode_ci;'''
self.assertEqual(
text,
tinyAPI.Table('db', 'abc').long('long_x', True).get_definition())
def test_email_column_exceptions(self):
try:
tinyAPI.Table('db', 'abc').email('invalid')
self.fail('Was able to create an email column with an invalid '
+ 'name.')
except TableBuilderException as e:
self.assertEqual(
'email column must be named "email_address" or start with '
+ '"em_"',
e.get_message())
def test_email_column(self):
text = '''create table abc
(
em_address varchar(100) character set utf8 collate utf8_unicode_ci not null
) engine = innodb default charset = utf8 collate = utf8_unicode_ci;'''
self.assertEqual(
text,
tinyAPI.Table('db', 'abc') \
.email('em_address', True) \
.get_definition())
def test_view_exceptions(self):
try:
tinyAPI.View('db', 'abc').get_definition()
self.fail('Was able to get the definition for a view even though '
+ 'no base table was provided.')
except TableBuilderException as e:
self.assertEqual(
'a base table was not configured for the view "abc"',
e.get_message())
def test_view(self):
text = '''create view abc
as select *
from def'''
self.assertEqual(
text,
tinyAPI.View('db', 'abc')
.tbl('def')
.get_definition())
def test_money_column(self):
text = '''create table abc
(
def float(53) not null
) engine = innodb default charset = utf8 collate = utf8_unicode_ci;'''
self.assertEqual(
text,
tinyAPI.Table('db', 'abc')
.money('def', True)
.get_definition())
def test_date_created_with_precision(self):
text = '''create table abc
(
date_created datetime(6) not null
) engine = innodb default charset = utf8 collate = utf8_unicode_ci;'''
self.assertEqual(
text,
tinyAPI.Table('db', 'abc') \
.created(6) \
.get_definition())
def test_date_updated_with_precision(self):
text = '''create table abc
(
date_updated timestamp(6) default current_timestamp(6) on update current_timestamp(6)
) engine = innodb default charset = utf8 collate = utf8_unicode_ci;'''
self.assertEqual(
text,
tinyAPI.Table('db', 'abc') \
.updated(6) \
.get_definition())
# ----- Main ------------------------------------------------------------------
if __name__ == '__main__':
unittest.main()
| StarcoderdataPython |
1663864 | <filename>main/construct-binary-tree-from-inorder-and-postorder-traversal/construct-binary-tree-from-preorder-and-inorder-traversal-scratch.py
# Definition for a binary tree node.
class TreeNode:
def __init__(self, x):
self.val = x
self.left = None
self.right = None
class Solution:
def buildTree(self, preorder, inorder):
"""
:type preorder: List[int]
:type inorder: List[int]
:rtype: TreeNode
"""
in_indices = {val: i for i, val in enumerate(inorder)}
def build(size, pre_start, in_start):
print('size=%d, pre_start=%d, in_start=%d' % (size, pre_start, in_start))
if not size:
return None
assert size > 0
val = preorder[pre_start]
in_index = in_indices[val]
assert in_start <= in_index < in_start + size
left_size = in_index - in_start
root = TreeNode(val)
root.left = build(left_size, pre_start + 1, in_start)
root.right = build(size - left_size - 1, pre_start + left_size + 1, in_index + 1)
return root
assert len(preorder) == len(inorder)
return build(len(preorder), 0, 0)
s = Solution()
preorder0 = [3,9,20,15,7]
inorder0 = [9,3,15,20,7]
root0 = s.buildTree(preorder0, inorder0)
print(root0)
| StarcoderdataPython |
3232031 | <filename>lib/BloatAPI.py
'''
Author: <NAME>
Email: <EMAIL>
Most the functions are based on the paper:
- 'Bounds and Perturbation Bounds for the Matrix Exponential'
by Bo Kagstrom
- 'The Sensitivity of the Matrix Exponential'
by <NAME>
- 'Norms of Interval Matrices'
by <NAME>, <NAME> and <NAME>
- Linear Dynamical System: dot{x} = (A+E)x; where E is the perturbation.
- Given a perturbation, the bloating factor according to which e^{At} is to be
bloated to accomodate e^{(A+B)t} is computed.
'''
import os,sys
PROJECT_ROOT = os.environ['ULS_ROOT_DIR']
sys.path.append(PROJECT_ROOT)
from Parameters import *
import numpy as np
import numpy.linalg as LA
import scipy.linalg as SLA
from sympy.matrices import *
import math
from random import seed
from random import random
class BloatKagstrom:
'''
Neccessary APIs required to bloat the Reachable
Set according to the given uncertainty. Based on the paper
'Bounds and Perturbation Bounds for the Matrix Exponential'
by Bo Kagstrom.
'''
def __init__(self, matA, matE):
self.A=matA # Matrix A of size n*n, represented as a numpy array.
self.E=matE # Matrix E, represents error.
'''
Following dictionary data-structure has been used to represent
the error matrix E:
{
(i,j): [a,b]
}
Indicating E[i][j] can pick any value within the range [a,b]
'''
self.n=self.A.shape[0] # Dimension of the System
@staticmethod
def computeP(x,n):
'''
compute p_{n-1}(x) according to the paper
'Bounds and Perturbation Bounds for the Matrix Exponential'
'''
s=0
for k in range(n-1):
xk=x**k
k_fact=math.factorial(k)
s=s+(xk/k_fact)
return s
def intervalNorm(self,p='slow'):
'''
Computes the interval norm of
E based on Theorem 7/10 of the
paper 'Norms of Interval Matrices'
'''
norm=IntervalNorm(self.E,self.n,p).getNorm()
return norm
def decompose(self):
'''
Decompose A=QTQ^H
Q: Unitary
T=lam+M
lam: Diagonal
M: Strict Upper Triangular
This functions returns: (Q,lam,M)
IMPORTANT: The M is not minimal
norm in this implementation
'''
(T,Q)=SLA.schur(self.A)
lam=np.zeros((self.n,self.n))
for i in range(self.n):
lam[i][i]=T[i][i]
T[i][i]=0
return (Q,lam,T)
def computeBloatingFactor(self,t,p='slow'):
'''
Computes the Relative Error Bound
as per 4.14 (Table 4.1) in the paper
'Bounds and Perturbation Bounds for the Matrix Exponential'
'''
(Q,lam,M)=self.decompose()
normE=self.intervalNorm(p)
normM=IntervalNorm.spectralNorm(M)
bloatFactor=BloatKagstrom.computeP(normM*t,self.n)*(np.exp(BloatKagstrom.computeP(normM*t,self.n)*normE*t)-1)
return bloatFactor
def computeBloatingFactorWithTime(self,start,n,step,p='slow'):
'''
Computes the Relative Error Bound
as per 4.14 (Table 4.1) in the paper
'Bounds and Perturbation Bounds for the Matrix Exponential'
with respect to time.
'''
#print("++++Kagstrom1++++")
(Q,lam,M)=self.decompose()
normE=self.intervalNorm(p)
normM=IntervalNorm.spectralNorm(M)
#print("p(n-1): ",BloatKagstrom.computeP(normM*20,self.n))
#print("Norm of E: ",normE)
#print("Norm of M: ", normM)
#print("")
timeAxis=[]
fAxis=[]
t=start
it=0
while True:
bloatFactor=BloatKagstrom.computeP(normM*t,self.n)*(np.exp(BloatKagstrom.computeP(normM*t,self.n)*normE*t)-1)
timeAxis.append(t)
#print("Time ",t,": ",bloatFactor)
fAxis.append(bloatFactor)
t=t+step
it=it+1
if (it>n):
break
return (timeAxis,fAxis)
def computeBloatingFactor2(self,t,p='slow'):
'''
Computes the Relative Error Bound
as per 4.12 (Table 4.1) in the paper
'Bounds and Perturbation Bounds for the Matrix Exponential'
'''
(S,N,l,ep)=BloatKagstrom.JNFDecomp(self.A)
D=BloatKagstrom.getD(l,ep,N)
K=BloatKagstrom.computeK(np.matmul(S,D))
normE=self.intervalNorm(p)
bloat=K*np.exp(ep*t)*(np.exp(K*normE*t)-1)
return bloat
def computeBloatingFactor2WithTime(self,start,n,step,p='slow'):
'''
Computes the Relative Error Bound
as per 4.12 (Table 4.1) in the paper
'Bounds and Perturbation Bounds for the Matrix Exponential'
with respect to time.
'''
#print("++++Kagstrom2++++")
(S,N,l,ep)=BloatKagstrom.JNFDecomp(self.A)
D=BloatKagstrom.getD(l,ep,N)
K=BloatKagstrom.computeK(np.matmul(S,D))
#print("K(SD): ",K)
#print("epsilon: ",ep)
normE=self.intervalNorm(p)
#print("Norm E: ",normE)
timeAxis=[]
fAxis=[]
t=start
it=0
while True:
bloatFactor=K*np.exp(ep*t)*(np.exp(K*normE*t)-1)
timeAxis.append(t)
#print("Time ",t,": ",bloatFactor)
fAxis.append(bloatFactor)
t=t+step
it=it+1
if (it>n):
break
return (timeAxis,fAxis)
@staticmethod
def computeK(S):
'''
Computes K(S) as given in the paper
The Sensitivity of the Matrix Exponential
by Kagstrom
K(S)=||S||*||S^-1||
'''
#S=S.astype('float')
K=IntervalNorm.spectralNorm(S)*(IntervalNorm.spectralNorm(LA.inv(S)))
#print("K(SD): ",K)
return K
@staticmethod
def JNFDecomp(A):
'''
Decomposes the matrix A to Jordan Normal Form
A=SJS^-1
returns S,N,len,ep
Where N: has 1 in the same positions (i,i+1) as J
len: countJordanBlocks(J), the length of Jordan Blocks
ep: 0 < ep < -alpha(A). alpha(A) is the maximum eigen value (real part) [Yet to implement]
'''
a=Matrix(A)
(s,j)=a.jordan_form()
#print(s,j)
#exit(0)
S=np.array(s)
J=np.array(j)
S=S.astype('complex')
J=J.astype('complex')
#print("S: \n",S)
#print("J: \n",J)
N=np.copy(J)
for i in range(N.shape[0]):
N[i][i]=0
N=N.astype('float')
#print("N: \n",N)
ep=BloatKagstrom.getEpsilon(A,J)
#print("epsilon: ",ep)
return (S,N,BloatKagstrom.countJordanBlocks(J),ep)
@staticmethod
def countJordanBlocks(J):
'''
Counts the lengths of each Jordan Blocks
'''
n=J.shape[0]
l=[]
c=0
for i in range(n-1):
if J[i][i+1]!=1:
l.append(c+1)
c=0
else:
c=c+1
if sum(l)!=n:
l.append(c+1)
return l
@staticmethod
def getD(leng,epsilon,N):
'''
Returns the matrix according to the given length leng
as given in the paper The Sensitivity of the Matrix Exponential
by Kagstrom
'''
if (epsilon==0):
delta=0
elif (LA.norm(N,ord='fro')==0):
print("Exception!!")
delta=0
else:
delta=min(1,epsilon/LA.norm(N,ord='fro'))
n=sum(leng)
D=np.zeros((n,n))
ind=0
for c in leng:
for j in range(c):
D[ind][ind]=delta**j
ind=ind+1
#print("delta: ",delta)
#print("D: \n",D)
return D
@staticmethod
def getEpsilon(A,J):
'''
Returns the proper value of epsilon
'''
n=J.shape[0]
l=[]
c=0
for i in range(n-1):
if J[i][i+1]!=1:
l.append((J[i-1][i-1],c+1))
c=0
else:
c=c+1
sm=0
for i in l:
sm=sm+i[1]
if sm!=n:
l.append((J[n-1][n-1],c+1))
mx=-9999
ind=-1
for lam in l:
r=lam[0].real+(math.cos(math.pi/(lam[1])))
if r>mx:
ind=lam
mx=r
if ind[1]==1:
print("BINGO!!")
return 0
elif max(LA.eig(A)[0].real)<0:
print("NEGATIVE EIGENVALUE!!")
return -max(LA.eig(A)[0].real)-0.00001
else:
print("RANDOM")
return 0.05
class BloatLoan:
'''
Neccessary APIs required to bloat the Reachable
Set according to the given uncertainty. Based on the paper
'The Sensitivity of the Matrix Exponential'
by <NAME>
'''
def __init__(self, matA, matE):
self.A=matA # Matrix A of size n*n, represented as a numpy array.
self.E=matE # Matrix E, represents error.
'''
Following dictionary data-structure has been used to represent
the error matrix E:
{
(i,j): [a,b]
}
Indicating E[i][j] can pick any value within the range [a,b]
'''
self.n=self.A.shape[0] # Dimension of the System
def intervalNorm(self,p='slow'):
'''
Computes the interval norm of
E based on Theorem 7 of the
paper 'Norms of Interval Matrices'
'''
norm=IntervalNorm(self.E,self.n,p).getNorm()
return norm
def computeBloatingFactor(self,t,p='slow'):
'''
Computes the Relative Error Bound
as per Theorem 2 in the paper
'The Sensitivity of the Matrix Exponential'
'''
normE=self.intervalNorm(p)
alphaA=self.computeAlpha()
muA=self.computeMu()
ePow=(muA-alphaA+normE)*t
bloatFactor=t*normE*np.exp(ePow)
return bloatFactor
@staticmethod
def computeBloatingFactorTest(normE,alphaA,muA,t):
'''
Computes the Relative Error Bound
as per Theorem 2 in the paper
'The Sensitivity of the Matrix Exponential'
'''
ePow=(muA-alphaA+normE)*t
bloatFactor=t*normE*np.exp(ePow)
return bloatFactor.real
def computeBloatingFactorWithTime(self,start,n,step,p='slow'):
'''
Computes the Relative Error Bound
as per Theorem 2 in the paper
'The Sensitivity of the Matrix Exponential'
with respect to time.
'''
#print("++++Loan++++")
normE=self.intervalNorm(p)
alphaA=self.computeAlpha()
muA=self.computeMu()
ePow=(muA-alphaA+normE)
timeAxis=[]
fAxis=[]
t=start
it=0
while True:
bloatFactor=(t*normE*np.exp(ePow*t)).real
timeAxis.append(t)
#print("Time ",t,": ",bloatFactor)
fAxis.append(bloatFactor)
t=t+step
it=it+1
if (it>n):
break
return (timeAxis,fAxis)
def computeAlpha(self):
'''
Computes alpha(A) as per the paper 'The Sensitivity of the Matrix Exponential'
by Chales Van Loan
'''
print("Alpha(A): ",max(LA.eig(self.A)[0].real))
return max(LA.eig(self.A)[0].real)
def conjugateFactor(self):
'''
Returns the following:
(A*+(A/2))
'''
AStar=self.A.conjugate().transpose()
return ((AStar+self.A)/2)
def computeMu(self):
'''
Computes mu(A) as per the paper 'The Sensitivity of the Matrix Exponential'
by <NAME>
'''
print("Mu(A): ",max(LA.eig(self.conjugateFactor())[0]))
return (max(LA.eig(self.conjugateFactor())[0]))
class IntervalNorm:
'''
Computes Norm-2 of a Interval Matrix
based on the paper 'Norms of Interval Matrices'
by <NAME>, <NAME> and <NAME>
'''
def __init__(self,matrix,s,p='slow'):
self.A=matrix
self.pace=p
self.n=s
def getNorm(self):
'''
Return: Norm2 by default,
Frobenius Norm if fast method is wanted
'''
if self.pace.lower() == 'slow':
return self.intervalNorm2()
else:
return self.frobeniusNorm()
def centerify(self):
'''
Break the error matrix to A=[Ac-delta,Ac+delta)
'''
Ac=np.zeros((self.n,self.n))
delta=np.zeros((self.n,self.n))
for key in self.A:
Ac[key[0]][key[1]]=(self.A[key][0]+self.A[key][1])/2
delta[key[0]][key[1]]=self.A[key][1]-Ac[key[0]][key[1]]
return (Ac,delta)
@staticmethod
def generateSignBits(n,size,axis):
'''
generates a list of (+,-1) of size n,
based on n's binary interpretation
'''
s=np.binary_repr(n,size)
if axis==0:
bit=np.zeros((1,size))
for i in range(size):
if s[i]=='1':
bit[0][i]=1
else:
bit[0][i]=-1
return bit
else:
bit=np.zeros((size,1))
for i in range(size):
if s[i]=='1':
bit[i][0]=1
else:
bit[i][0]=-1
return bit
def intervalNorm2(self):
'''
Computes the interval norm of
A based on Theorem 7 of the
paper 'Norms of Interval Matrices'
'''
#print("SLOW")
norm=-9999
(Ac,delta)=self.centerify()
for i in range(2**self.n):
y=IntervalNorm.generateSignBits(i,self.n,1)
for j in range(2**self.n):
z=IntervalNorm.generateSignBits(j,self.n,0)
tmp=IntervalNorm.spectralNorm(Ac+(np.matmul(y,z)*delta))
if tmp>norm:
norm=tmp
return norm
@staticmethod
def spectralNorm(matA):
# Computes 2-Norm of matrix matA
return LA.norm(matA,ord=2)
def frobeniusNorm(self):
'''
Computes the interval norm of
A based on Theorem 10 of the
paper 'Norms of Interval Matrices'
'''
#print("FAST")
(Ac,delta)=self.centerify()
Ac=abs(Ac)
return LA.norm(Ac+delta,ord='fro')
# Tester --------------
if False:
A=np.array([
[1,0,0,1,0],
[0,1,0,0,1],
[0,0,1,0,0],
[1,0,0,1,1],
[1,0,0,0,1],
])
E={
(0,2): [-0.2,0.2],
(3,2): [-0.1,0.1]
}
b=BloatKagstrom(A,E)
print("A: \n",A)
print("E: \n",E)
print()
print("-----Kagstrom (4.12) Illustration-------")
print(b.computeBloatingFactor2WithTime(0,5,1))
if False:
q=BloatLoan.computeBloatingFactorTest(4.8714495e-122,0,14.102002,20)
print(q)
| StarcoderdataPython |
1765585 | import os
from deepblast.dataset.utils import state_f, revstate_f
import pandas as pd
import numpy as np
from collections import Counter
def read_mali(root, tool='manual', report_ids=False):
""" Reads in all alignments.
Parameters
----------
root : path
Path to root directory
tool : str
Specifies which tools alignments should be extracted for.
Returns
-------
pd.DataFrame
Three columns, one for each sequence and the resulting alignment.
If `report_ids` is specified, then the pdb id and the query/hit
ids are also reported as additional columns.
"""
res = []
pdbs = []
dirs = []
for path, directories, files in os.walk(root):
for f in files:
if '.ali' in f and tool in f and ('manual2' not in f):
fname = os.path.join(path, f)
lines = open(fname).readlines()
X = lines[0].rstrip().upper()
Y = lines[1].rstrip().upper()
S = ''.join(
list(map(revstate_f, map(state_f, list(zip(X, Y))))))
res.append((X.replace('-', ''), Y.replace('-', ''), S))
pdbs.append(os.path.basename(f).split(f'.{tool}.ali')[0])
dirs.append(os.path.basename(path))
res = pd.DataFrame(res)
if report_ids:
res['query_id'] = np.arange(len(res)).astype(np.str)
res['hit_id'] = (np.arange(len(res)) + len(res)).astype(np.str)
res['pdb'] = pdbs
res['dir'] = dirs
return res
def _mammoth_strip(x):
y = ''.join(x.split(' ')[1:])
return y.rstrip()
def read_mali_mammoth(root, report_ids=False):
""" Reads in all alignments.
Parameters
----------
root : path
Path to root directory
tool : str
Specifies which tools alignments should be extracted for.
Returns
-------
pd.DataFrame
Three columns, one for each sequence and the resulting alignment.
If `report_ids` is specified, then the pdb id and the query/hit
ids are also reported as additional columns.
"""
res = []
pdbs = []
for path, directories, files in os.walk(root):
for f in files:
if '.ali' in f:
fname = os.path.join(path, f)
contents = open(fname).readlines()
pred = list(filter(lambda x: 'Prediction ' in x, contents))
expr = list(filter(lambda x: 'Experiment ' in x, contents))
idx = np.arange(len(pred)) % 2 == 0
pred = list(np.array(pred)[idx])
X = ''.join(list(map(_mammoth_strip, pred)))
expr = list(np.array(expr)[~idx])
Y = ''.join(list(map(_mammoth_strip, expr)))
X, Y = X.replace('.', '-'), Y.replace('.', '-')
X, Y = X.rstrip().upper(), Y.rstrip().upper()
S = ''.join(
list(map(revstate_f, map(state_f, list(zip(X, Y))))))
res.append((X.replace('-', ''), Y.replace('-', ''), S))
pdbs.append(os.path.basename(f).split('.mammoth.ali')[0])
res = pd.DataFrame(res)
if report_ids:
res['query_id'] = np.arange(len(res)).astype(np.str)
res['hit_id'] = (np.arange(len(res)) + len(res)).astype(np.str)
res['pdb'] = pdbs
return res
def get_mali_structure_stats(root):
""" Reads in the manual alignments and obtains stats.
Parameters
----------
root : path
Path to root directory
Returns
-------
pd.DataFrame
alpha residues
beta residues
"""
from Bio.PDB import PDBParser
from Bio.PDB.DSSP import DSSP
res = []
tool = 'manual'
for path, directories, files in os.walk(root):
for f in files:
if '.pdb' in f and tool in f:
fname = os.path.join(path, f)
parser = PDBParser()
# ids = os.path.basename(fname).split('_')
structs = parser.get_structure('', fname)
dssp1 = DSSP(structs[0], fname, dssp='mkdssp')
classes1 = list(map(lambda x: x[2], dssp1))
len1 = len(classes1)
classes1 = pd.Series(Counter(classes1))
classes1.index = list(map(lambda x: 'x' + x, classes1.index))
pdb_name = os.path.basename(f).split('.')[0]
# stats = pd.concat((classes1, classes2))
stats = classes1
stats['pdb'] = pdb_name
stats['path'] = fname
stats['xlen'] = len1
# dssp2 = DSSP(structs[1], fname, dssp='mkdssp')
# classes2 = list(map(lambda x: x[2], dssp2))
# len2 = len(classes2)
# classes2 = pd.Series(Counter(classes2))
# classes2.index = list(map(lambda x: 'y' + x, classes2.index))
# stats['ylen'] = len2
res.append(stats)
res = pd.DataFrame(res)
return res
| StarcoderdataPython |
1633822 | import pytest
# noinspection PyProtectedMember
from infoblox import _settings
@pytest.mark.parametrize(('setting_name', 'setting_type'), [
('DEFAULT_CONNECT_TIMEOUT', float),
('DEFAULT_READ_TIMEOUT', float),
('DEFAULT_MAX_RETRIES', int),
('DEFAULT_BACKOFF_FACTOR', float)
])
def test_settings_presence_and_type(setting_name, setting_type):
assert hasattr(_settings, setting_name)
assert isinstance(getattr(_settings, setting_name), setting_type)
| StarcoderdataPython |
3295594 | from datetime import datetime
from typing import Optional
from uuid import UUID
from server.schemas.base import BoilerplateBaseModel
class ShopToPriceBase(BoilerplateBaseModel):
active: bool
new: bool
price_id: UUID
shop_id: UUID
category_id: UUID
kind_id: Optional[UUID] = None
product_id: Optional[UUID] = None
use_half: bool = False
half: Optional[float] = None
use_one: bool = False
one: Optional[float] = None
two_five: float = None
use_two_five: bool = None
use_five: bool = False
five: Optional[float] = None
use_joint: bool = False
joint: Optional[float] = None
use_piece: bool = False
piece: Optional[float] = None
class ShopToPriceAvailability(BoilerplateBaseModel):
active: bool
# Properties to receive via API on creation
class ShopToPriceCreate(ShopToPriceBase):
pass
# Properties to receive via API on update
class ShopToPriceUpdate(ShopToPriceBase):
pass
class ShopToPriceInDBBase(ShopToPriceBase):
id: UUID
created_at: datetime
modified_at: datetime
class Config:
orm_mode = True
# Additional properties to return via API
class ShopToPriceSchema(ShopToPriceInDBBase):
pass
| StarcoderdataPython |
3213387 | <reponame>IvanTodorovBG/SoftUni
import re
racers = input().split(", ")
my_dict = {}
data = input()
string_pattern = r"[a-zA-Z]"
num_pattern = r"[0-9]"
while data != "end of race":
name = "".join(re.findall(string_pattern, data))
if name in racers:
numbers = re.findall(num_pattern, data)
nums = sum([int(x) for x in numbers])
if name not in my_dict:
my_dict[name] = 0
my_dict[name] += nums
data = input()
sorted_my_dict = dict(sorted(my_dict.items(), key=lambda kvr: -kvr[1]))
answer = list(sorted_my_dict.keys())
print(f"1st place: {answer[0]}")
print(f"2nd place: {answer[1]}")
print(f"3rd place: {answer[2]}")
| StarcoderdataPython |
1733252 | """Tests for spiketools.stats.permutations"""
from spiketools.stats.permutations import *
###################################################################################################
###################################################################################################
def test_vec_perm():
data = np.array([[1, 2, 3], [4, 5, 6]])
out = vec_perm(data)
assert isinstance(out, np.ndarray)
def test_compute_empirical_pvalue(tdata):
p_value = compute_empirical_pvalue(1.5, tdata)
assert isinstance(p_value, float)
def test_zscore_to_surrogates(tdata):
zscore = zscore_to_surrogates(1.5, tdata)
assert isinstance(zscore, float)
| StarcoderdataPython |
3277939 | '''set_stall_detection(stop_when_stalled)
Turns stall detection on or off.
Stall detection senses when a motor has been blocked and can’t move. If stall detection has been enabled and a motor is blocked, the motor will be powered off after two seconds and the current motor command will be interrupted. If stall detection has been disabled, the motor will keep trying to run and programs will "get stuck" until the motor is no longer blocked.
Stall detection is enabled by default.
Parameters
stop_when_stalled
Choose "true" to enable stall detection or "false" to disable it.
Type:boolean
Values:True or False
Default:True
Errors
TypeError
stop_when_stalled is not a boolean.
RuntimeError
The motor has been disconnected from the Port.
Example
'''
from spike import Motor
motor = Motor('A')
motor.set_stall_detection(False)
#motor.run_for_rotations(2)
# The program will never proceed to here if the motor is stalled | StarcoderdataPython |
1715607 | <filename>Harpe-website/website/contrib/communication/admin.py
# -*- coding: utf-8 -*-
from django.conf import settings
from django.contrib import admin
#from django.utils.translation import ugettext_lazy as _
#from webcore.utils.admin import AdminThumbnailMixin
#from grappellifit.admin import TranslationAdmin
from website.contrib.communication.models import *
class ClientCalculationAdmin(admin.ModelAdmin):
list_display = ("client","analysepeptide","status","send_hour","recive_hour","analyse_status")
admin.site.register(ClientCalculation,ClientCalculationAdmin)
class ClientAdmin(admin.ModelAdmin):
list_display = ("ip","port","owner","server","is_active","ram","version")
list_filter = ("is_active",)
admin.site.register(Client,ClientAdmin)
class HarpeServerAdmin(admin.ModelAdmin):
list_display = ("name","version","ip","port","is_active")
#list_editable = ("is_active")
admin.site.register(HarpeServer,HarpeServerAdmin)
| StarcoderdataPython |
1702659 | from pytest import raises
from Lexer import Lexer
from Token import Token, TokenTypes
def test_repr():
assert repr(Lexer('1 + 3')) == '<Lexer [1] + 3>'
def test_advance():
lexer = Lexer('1 + 3')
lexer.advance()
assert repr(lexer) == '<Lexer 1[ ]+ 3>'
lexer.advance()
assert repr(lexer) == '<Lexer 1 [+] 3>'
lexer.advance()
assert repr(lexer) == '<Lexer 1 +[ ]3>'
lexer.advance()
assert repr(lexer) == '<Lexer 1 + [3]>'
lexer.advance()
assert repr(lexer) == '<Lexer EOF>'
def test_peek():
lexer = Lexer('1 + 3')
assert lexer.peek() == ' '
lexer.advance()
assert lexer.peek() == '+'
lexer.advance()
assert lexer.peek() == ' '
lexer.advance()
assert lexer.peek() == '3'
lexer.advance()
assert lexer.peek() is ''
lexer.advance()
def test_next_token():
lexer = Lexer('1+ 3')
assert lexer.next_token() == Token(TokenTypes.INT, 1)
assert lexer.next_token() == Token(TokenTypes.ADD)
assert lexer.next_token() == Token(TokenTypes.INT, 3)
assert lexer.next_token() == Token(TokenTypes.EOF)
def test_empty_program():
lexer = Lexer('')
assert repr(lexer) == '<Lexer EOF>'
assert lexer.next_token() == Token(TokenTypes.EOF)
def test_bad_input():
lexer = Lexer('&')
with raises(Exception):
lexer.next_token()
def test_parse_integer():
assert Lexer('173').next_token() == Token(TokenTypes.INT, 173)
def test_skip_whitespace():
lexer = Lexer('1 +3 9')
assert lexer.next_token() == Token(TokenTypes.INT, 1)
assert lexer.next_token() == Token(TokenTypes.ADD)
assert lexer.next_token() == Token(TokenTypes.INT, 3)
assert lexer.next_token() == Token(TokenTypes.INT, 9)
assert lexer.next_token() == Token(TokenTypes.EOF)
def test_math_symbols():
lexer = Lexer('+ - * /')
assert lexer.next_token() == Token(TokenTypes.ADD)
assert lexer.next_token() == Token(TokenTypes.SUB)
assert lexer.next_token() == Token(TokenTypes.MUL)
assert lexer.next_token() == Token(TokenTypes.DIV)
def test_negative_numbers():
lexer = Lexer('-3 * -2')
assert lexer.next_token() == Token(TokenTypes.INT, -3)
assert lexer.next_token() == Token(TokenTypes.MUL)
assert lexer.next_token() == Token(TokenTypes.INT, -2)
def test_minus_as_final_token_crash():
lexer = Lexer('-')
assert lexer.next_token() == Token(TokenTypes.SUB)
| StarcoderdataPython |
1616640 | <filename>choose_nodes/lazy.py
#!/usr/bin/env python3
import numpy as np
def lazy(N, n):
'''
Chose first n nodes as the measure nodes
Arguments:
1. N: Total number of nodes
2. n: Number of measure nodes
Returns:
1. measure_id: Measured node indices of the original (whole) network
2. hidden_id Hidden node indices of the original (whole) network
'''
assert type(N) == int and N > 0, "N must be a positive integer"
assert type(n) == int and (n > 0 and n < N), "n must be a positive integer less than N"
measure_id = np.arange(n)
hidden_id = np.arange(n, N)
return measure_id, hidden_id
| StarcoderdataPython |
1776935 | <filename>satori.web/satori/web/setup.py
# vim:ts=4:sts=4:sw=4:expandtab
"""Takes care of settings required by Django. Import this module before django.*
"""
import os
os.environ['DJANGO_SETTINGS_MODULE'] = 'satori.web.settings'
from django.core.management import setup_environ
from satori.web import settings
setup_environ(settings)
| StarcoderdataPython |
1761397 | import os
import csv
csv_path = os.path.join("..", "Resources", "PyBank", "budget_data.csv")
output_path = os.path.join("..", "Analysis", "PyBank_Analysis.txt")
with open(csv_path) as csv_file:
csv_read = csv.reader(csv_file)
next(csv_read)
months = 0
total = 0
total_ch = 0
prev_rev = 0
inc = [0,'']
dec = [0,'']
for i, row in enumerate(csv_read):
months += 1
rev = int(row[1])
total += rev
change = rev - prev_rev
prev_rev = rev
if i == 0:
change = 0
total_ch += change
# Greatest Increase in Profits
if change > inc[0]:
inc[0] = change
inc[1] = row[0]
# Greatest Decrease in Profits
if change < dec[0]:
dec[0] = change
dec[1] = row[0]
output = (
f'\n Financial Analysis\n\
----------------------------\n\
Total Months: {months}\n\
Total: ${total:,}\n\
Average Change: ${total_ch/(months-1):,.2f}\n\
Greatest Increase in Profits: {inc[1]} (${inc[0]:,})\n\
Greatest Decrease in Profits: {dec[1]} (${dec[0]:,})\n'
)
open(output_path,'w').write(output)
print(output) | StarcoderdataPython |
3375023 | <filename>cli-image-processing/src/controller.py
import os
import settings
import cv2
import time
import requests
class App:
original_images = []
processed_images =[]
original_images_paths = []
cv_images = []
resized_images = []
grayscale_image = []
canny_edge_detection = []
def __init__(self):
settings.load_env()
print("CLI image processing app started. Awating images...")
def run(self):
self.original_images = self.get_original_images()
self.get_processed()
self.make_request(os.getenv("GREEN_LED"))
if self.compute_diff():
self.compute_abs_path_to_original_images()
self.cv_read_images()
self.cv_process_and_write_resize()
self.cv_process_and_write_grayscale()
self.cv_process_and_write_canny()
self.write_processed()
self.make_request(os.getenv("GREEN_LED"))
def write_processed(self):
path_to_processed_manifest = os.getenv("PROCESSED_IMAGES_MANIFEST")
stream = open(path_to_processed_manifest, 'a')
if self.original_images:
for image in self.original_images:
stream.write(image + ", ")
print ("Image {} was processed successfully".format(image))
stream.close()
def get_processed(self):
path_to_processed_manifest = os.getenv("PROCESSED_IMAGES_MANIFEST")
stream = open(path_to_processed_manifest, 'r')
data = stream.read().split(", ")
self.processed_images = data
def compute_diff(self):
diff = list(filter(lambda x: x not in self.processed_images, self.original_images))
if len(diff) > 0:
self.make_request(os.getenv("YELLOW_LED"))
self.original_images = diff
for image in self.original_images:
print("Image {} was found. Proceeding...".format(image))
time.sleep(0.5)
print("Stand by. Working ...")
time.sleep(0.5)
return True
else:
print("Awaiting new images to be processed...")
return False
def get_path_to_dir(self, env_path_var):
abs_path = os.path.abspath(env_path_var)
if not abs_path:
raise FileNotFoundError
return abs_path
def get_original_images(self):
self.original_images = os.listdir(self.get_path_to_dir(os.getenv("ORIGINAL_IMAGES_DIR")))
if not self.original_images:
print("There are no images to be processed")
return self.original_images
def compute_abs_path_to_original_images(self):
if self.original_images:
abs_path_to_original_images_folder = self.get_path_to_dir(os.getenv("ORIGINAL_IMAGES_DIR"))
for image in self.original_images:
self.original_images_paths.append(abs_path_to_original_images_folder + '/' + image)
def cv_read_images(self):
if self.original_images_paths:
for image in self.original_images_paths:
self.cv_images.append(cv2.imread(image, 1))
def cv_process_and_write_resize(self):
if self.cv_images:
for image in self.cv_images:
self.resized_images.append(cv2.resize(image, (int(os.getenv("WIDTH")), int(os.getenv("HEIGHT")))))
if self.resized_images:
i = 0
for image in self.resized_images:
write_name = os.getenv("PROCESSED_RESIZED_IMAGE_DIR") + "/_resized_" + self.original_images[i]
cv2.imwrite(write_name, image)
i += 1
if i == len(self.original_images):
break
def cv_process_and_write_grayscale(self):
if self.cv_images:
for image in self.cv_images:
self.grayscale_image.append(cv2.cvtColor(image, cv2.COLOR_BGR2GRAY))
if self.grayscale_image:
i = 0
for image in self.grayscale_image:
write_name = os.getenv("PROCESSED_GRAYSCALE_IMAGE_DIR") + "/_grayscale_" + self.original_images[i]
cv2.imwrite(write_name, image)
i += 1
if i == len(self.original_images):
break
def cv_process_and_write_canny(self):
if self.cv_images:
for image in self.cv_images:
self.canny_edge_detection.append(cv2.Canny(image, int(os.getenv("CANNY_MIN_VAL")), int(os.getenv("CANNY_MAX_VAL"))))
if self.canny_edge_detection:
i = 0
for image in self.canny_edge_detection:
write_name = os.getenv("PROCESSED_CANNY_EDGE_DETECTION_IMAGE_DIR") + "/_canny_" + self.original_images[i]
cv2.imwrite(write_name, image)
i += 1
if i == len(self.original_images):
break
def make_request(self, route):
indicator_ip = os.getenv("INDICATOR_IP")
full_route = "http://{}/{}".format(indicator_ip, route)
requests.get(full_route)
| StarcoderdataPython |
1666218 | from math import factorial
mod = int(1e9 + 7)
n = int(input())
ans = factorial(n)
print(ans % mod)
| StarcoderdataPython |
3377477 | """
Use of this source code is governed by the MIT license found in the LICENSE file.
Base for serial or socket connections
"""
class StickConnection(object):
""" Generic Plugwise stick connection"""
def open_port(self) -> bool:
"""Placeholder to initialize the connection"""
raise NotImplementedError
def is_connected(self):
"""Placeholder to get current state of connection"""
raise NotImplementedError
def read_thread_alive(self):
"""Placeholder to get current state of the reader thread"""
raise NotImplementedError
def write_thread_alive(self):
"""Placeholder to get current state of the writer thread"""
raise NotImplementedError
def send(self, message, callback=None):
"""Placeholder to send message"""
raise NotImplementedError
def close_port(self):
"""Placeholder to disconnect"""
raise NotImplementedError
| StarcoderdataPython |
4818815 | <reponame>Milo-Goodfellow-Work/GuildsDevelopmentBuild
from django.apps import AppConfig
class FullsearchConfig(AppConfig):
name = 'FullSearch'
| StarcoderdataPython |
164262 | from django.shortcuts import render
from django.core import serializers
from . models import Sensor, Devices, Online, Speedtest
import json
from django.http import HttpResponse
from django.views.decorators.http import require_GET
# Create your views here.
def index(request):
template='temprature/index.html'
results=Sensor.objects.all()
context={
'results':results,
}
return render(request,template,context)
def devices(request):
template='temprature/devices.html'
results=Devices.objects.all()
context={
'results':results,
}
return render(request,template,context)
def sensor(request):
template='temprature/sensor.html'
results=Sensor.objects.all()
context={
'results':results,
}
return render(request,template,context)
def online(request):
template='temprature/online.html'
results=Online.objects.all()
context={
'results':results,
}
return render(request,template,context)
def speedtest(request):
template='temprature/speedtest.html'
results=Speedtest.objects.all()
context={
'results':results,
}
return render(request,template,context)
| StarcoderdataPython |
1620348 | # Copyright 2016 Brocade Communications Systems, Inc.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ipaddress import ip_interface
from ne_base import NosDeviceAction
from ne_base import log_exceptions
import sys
class CreateVrrpe(NosDeviceAction):
"""
Implements the logic to Enable VRRPE and Configure VIP and VMAC the on VDX Switches .
This action acheives the below functionality
1. Enable VRRPE V4/6
2. Create the VRRPE extended group
3. Associate the VIP and VMAC address
4. Enable short path forwarding
"""
def run(self, mgmt_ip, username, password, intf_type, intf_name, rbridge_id, vrid, virtual_ip):
"""Run helper methods to implement the desired state.
"""
try:
self.setup_connection(host=mgmt_ip, user=username, passwd=password)
except Exception as e:
self.logger.error(e.message)
sys.exit(-1)
changes = self.switch_operation(intf_type, intf_name, rbridge_id, virtual_ip, vrid)
return changes
@log_exceptions
def switch_operation(self, intf_type, intf_name, rbridge_id, virtual_ip, vrid):
changes = {}
with self.pmgr(conn=self.conn, auth_snmp=self.auth_snmp) as device:
self.logger.info('successfully connected to %s to Enable'
' VRRPE Configs', self.host)
if device.suports_rbridge and rbridge_id is None:
rbridge_id = self.vlag_pair(device)
if rbridge_id:
for rb_id in rbridge_id:
self.validate_supports_rbridge(device, rb_id)
changes = self._create_vrrpe(device, intf_type, intf_name,
rb_id, virtual_ip, vrid)
else:
self.validate_supports_rbridge(device, rbridge_id)
changes = self._create_vrrpe(device, intf_type, intf_name,
rbridge_id, virtual_ip, vrid)
return changes
def _create_vrrpe(self, device, intf_type, intf_name, rbridge_id, virtual_ip, vrid):
changes = {}
changes['pre_validation'] = self._check_requirements(
device, intf_type=intf_type, intf_name=intf_name,
rbridge_id=rbridge_id, vrid=vrid, virtual_ip=virtual_ip)
if changes['pre_validation'] != '':
ip_version = int(changes['pre_validation'])
changes['start_vrrpe'] = self._start_vrrpe(
device,
rbridge_id=rbridge_id,
ip_version=ip_version)
changes['vrrpe_vip'] = self._create_vrrpe_vip(
device,
intf_type=intf_type, intf_name=intf_name,
rbridge_id=rbridge_id,
virtual_ip=virtual_ip,
vrid=vrid,
ip_version=ip_version)
if changes['vrrpe_vip']:
changes['vrrpe_vmac'] = self._create_vrrpe_vmac(
device,
intf_type=intf_type, intf_name=intf_name,
rbridge_id=rbridge_id,
vrid=vrid,
ip_version=ip_version)
if changes['vrrpe_vmac']:
changes['vrrpe_spf'] = self._create_vrrpe_spf(
device,
intf_type=intf_type, intf_name=intf_name,
rbridge_id=rbridge_id,
vrid=vrid,
ip_version=ip_version)
self.logger.info(
'closing connection to %s after Enabling VRRPE - all done!',
self.host)
return changes
def _check_requirements(self, device, intf_type, intf_name, vrid, rbridge_id,
virtual_ip):
""" Verify if the VRRPE configs are pre-configured """
# Verify if the VIP address already exists
try:
tmp_ip = ip_interface(unicode(virtual_ip))
ip_version = tmp_ip.version
except ValueError:
self.logger.error('Invalid Virtual IP Address %s' % virtual_ip)
raise ValueError('Invalid Virtual IP Address %s' % virtual_ip)
if len(unicode(virtual_ip).split("/")) != 1:
raise ValueError(
'Pass VIP address without netmask %s' %
virtual_ip)
"""
# Check if the VRRP-E/VRRPV3 is pre-existing
version_to_validate = 6 if ip_version == 4 else 4
proto = device.services.vrrpe(get=True, ip_version=int(version_to_validate),
rbridge_id=rbridge_id)
if proto['ipv%s_vrrpe' % version_to_validate]:
raise ValueError('Device is pre-configured with ip version %s' %
version_to_validate)
"""
# validate supported interface type for vrrpe
device.interface.vrrpe_supported_intf(intf_type=intf_type)
# Verify if the VRRPE configs pre-exist
if intf_type == 've':
vlan_list = device.interface.ve_interfaces(rbridge_id=rbridge_id)
config = self._validate_vip_vrid(device, vlan_list, 'Ve',
intf_name, ip_version, virtual_ip, vrid, rbridge_id)
if not config[0]:
self.logger.error('Ve %s is not available' % intf_name)
raise ValueError('Ve %s is not present on the device' % (intf_name))
if intf_type == 'ethernet':
eth_list = device.interface.get_eth_l3_interfaces()
config = self._validate_vip_vrid(device, eth_list, 'eth',
intf_name, ip_version, virtual_ip, vrid, rbridge_id)
if not config[0]:
self.logger.error('eth l3 intf %s is not available' % intf_name)
raise ValueError('eth l3 intf %s is not present on the device' % (intf_name))
if str(config[1]) == '' and config[2] is False:
sys.exit(-1)
return str(config[1])
def _validate_vip_vrid(self, device, intf_list, intf_type, intf_name,
ip_version, virtual_ip, vrid, rbridge_id):
"""
validate whehter vip and vrid already present
"""
intf_present = False
idempotent_check = False
if intf_type == 'Ve':
int_type = 've'
else:
int_type = 'ethernet'
for each_intf in intf_list:
if intf_type in each_intf['if-name']:
tmp_ip_version = ip_version
if tmp_ip_version == '':
tmp_ip_version = 4
vip_get = device.interface.vrrpe_vip(
get=True, int_type=int_type,
name=each_intf['if-name'].split()[1],
rbridge_id=rbridge_id)
if each_intf['if-name'].split()[1] == intf_name:
intf_present = True
for each_entry in vip_get:
if self._is_same_vip(each_entry['vip'], virtual_ip) \
and each_entry['vrid'] == vrid:
self.logger.info(
'VRRP Extended group %s & associations '
'are pre-existing in %s %s' %
(vrid, intf_type, intf_name))
ip_version = ''
idempotent_check = True
elif self._is_same_vip(each_entry['vip'], virtual_ip)\
and each_entry['vrid'] != vrid:
self.logger.error(
'VIP %s is associated to a different '
'VRRPE group %s in %s %s' %
(virtual_ip, each_entry['vrid'],
intf_type, intf_name))
ip_version = ''
elif not self._is_same_vip(each_entry['vip'], virtual_ip) \
and each_entry['vrid'] == vrid:
self.logger.error(
'VRID %s is either associated to '
'a different IP %s or there is no '
'association existing in %s %s' %
(vrid, each_entry['vip'], intf_type, intf_name))
ip_version = ''
elif each_intf['if-name'].split()[1] != intf_name:
for each_entry in vip_get:
if self._is_same_vip(each_entry['vip'], virtual_ip) \
and each_entry['vrid'] == vrid:
self.logger.error(
'VRRP-E group %s & associations are'
' pre-existing on different %s %s' %
(vrid, intf_type, each_intf['if-name'].split()[1]))
ip_version = ''
elif self._is_same_vip(each_entry['vip'], virtual_ip) \
and each_entry['vrid'] != vrid:
self.logger.error('VIP %s is already part of'
' a different %s %s' %
(virtual_ip, intf_type,
each_intf['if-name'].split()[1]))
ip_version = ''
return (intf_present, ip_version, idempotent_check)
def _is_same_vip(self, vip_list, vip):
"""
Check wheter vip present in the vip_list based on its type
"""
if(type(vip_list).__name__ == 'str'):
if(vip_list == vip):
return True
else:
return False
else:
for each_vip in vip_list:
if(each_vip == vip):
return True
return False
def _start_vrrpe(self, device, rbridge_id, ip_version):
""" Start the VRRPE service globally"""
self.logger.info('Start the VRRPE v-%s service globally', ip_version)
device.services.vrrpe(
rbridge_id=rbridge_id,
ip_version=str(ip_version))
return True
def _create_vrrpe_vip(self, device, intf_type, intf_name, rbridge_id,
virtual_ip, vrid, ip_version):
""" Create the VRRPE extender group and associate the VIP """
self.logger.info('Create the VRRPE extender group %s'
' and associate the VIP service %s',
vrid, virtual_ip)
device.interface.vrrpe_vrid(int_type=intf_type,
name=intf_name,
vrid=vrid,
version=ip_version,
rbridge_id=rbridge_id)
device.interface.vrrpe_vip(name=intf_name, int_type=intf_type,
vip=virtual_ip,
vrid=vrid, rbridge_id=rbridge_id,
version=int(ip_version))
return True
def _create_vrrpe_vmac(self, device, intf_type, intf_name, vrid,
rbridge_id, ip_version):
""" Associate the VMAC to the extender group"""
try:
self.logger.info('Associating the VMAC to the extender '
'group %s', vrid)
device.interface.vrrpe_vmac(int_type=intf_type, vrid=vrid,
rbridge_id=rbridge_id,
name=intf_name, version=int(ip_version))
except (ValueError, KeyError):
self.logger.exception('Unable to set VRRPe VMAC %s',
vrid)
raise ValueError('Unable to set VRRPe VMAC %s',
vrid)
return True
def _create_vrrpe_spf(self, device, intf_type, intf_name, rbridge_id,
vrid, ip_version):
""" Enable short path forwarding on the extender group"""
try:
self.logger.info('Enable SPF on the extender group %s', vrid)
device.interface.vrrpe_spf_basic(int_type=intf_type, vrid=vrid,
name=intf_name,
rbridge_id=rbridge_id, version=ip_version)
except (ValueError, KeyError):
self.logger.exception('Invalid input values vrid,intf_name '
'%s %s' % (vrid, intf_name))
raise ValueError('Invalid input values vrid,intf_name '
'%s %s' % (vrid, intf_name))
return True
| StarcoderdataPython |
144792 | <reponame>0x008800/Sandbox
import urllib.request
response = urllib.request.urlopen('https://google.com/')
products = response.read()
mystr = products.decode("utf8")
response.close()
file = open('file.txt', 'w')
file.write(mystr)
file.close()
print(mystr)
| StarcoderdataPython |
171187 | import matplotlib.pyplot as plt
from pycocotools.coco import COCO
from pycocotools.cocoeval import COCOeval
import numpy as np
import skimage.io as io
import ipdb;pdb=ipdb.set_trace
from collections import OrderedDict
# markdown format output
def _print_name_value(name_value, full_arch_name):
names = name_value.keys()
values = name_value.values()
num_values = len(name_value)
print(
'| Arch ' +
' '.join(['| {}'.format(name) for name in names]) +
' |'
)
print('|---' * (num_values+1) + '|')
if len(full_arch_name) > 15:
full_arch_name = full_arch_name[:8] + '...'
print(
'| ' + full_arch_name + ' ' +
' '.join(['| {:.3f}'.format(value) for value in values]) +
' |'
)
gt_anns = 'data/coco/annotations/person_keypoints_val2017.json'
# dt_anns = '/home/xyliu/2D_pose/deep-high-resolution-net.pytorch/person_keypoints.json'
dt_anns = '/home/xyliu/2D_pose/simple-pose-estimation/person_keypoints.json'
annType = 'keypoints'
cocoGt=COCO(gt_anns)
cocoDt=cocoGt.loadRes(dt_anns)
cocoEval = COCOeval(cocoGt,cocoDt,annType)
cocoEval.evaluate()
cocoEval.accumulate()
cocoEval.summarize()
stats_names = ['AP', 'Ap .5', 'AP .75', 'AP (M)', 'AP (L)', 'AR', 'AR .5', 'AR .75', 'AR (M)', 'AR (L)']
info_str = []
for ind, name in enumerate(stats_names):
info_str.append((name, cocoEval.stats[ind]))
name_values = OrderedDict(info_str)
model_name = 'openpose'
if isinstance(name_values, list):
for name_value in name_values:
_print_name_value(name_value, model_name)
else:
_print_name_value(name_values, model_name)
| StarcoderdataPython |
1727036 | <filename>hrflow/hrflow/job/__init__.py<gh_stars>1-10
from .parsing import JobParsing
from .indexing import JobIndexing
from .embedding import JobEmbedding
from .searching import JobSearching
from .scoring import JobScoring
from .reasoning import JobReasoning
class Job(object):
def __init__(self, client):
self.client = client
self.parsing = JobParsing(self.client)
self.indexing = JobIndexing(self.client)
self.embedding = JobEmbedding(self.client)
self.searching = JobSearching(self.client)
self.scoring = JobScoring(self.client)
self.reasoning = JobReasoning(self.client)
| StarcoderdataPython |
4822310 | #from airflow import DAG
#from airflow.operators.python import PythonOperator
#from airflow.utils.dates import days_ago
import sqlite3
import pandas as pd
default_args = {'owner': 'airflow'}
path = "C:\\Users\\joaoa\\Documents\\bootcamp"
path_db_producao = path+"\\data\\imoveis_prod.db"
path_db_datawarehouse = path+"\\data\\imoveis_dw.db"
path_temp_csv = path+"\\data\\dataset.csv"
#dag = DAG(dag_id='data_pipeline', default_args=default_args, schedule_interval='@daily',start_date=days_ago(2))
def _extract():
#conectando a base de dados de produção.
connect_db_imoveis = sqlite3.connect(path_db_producao)
#selecionando os dados.
dataset_df = pd.read_sql_query(r"""
SELECT CIDADE.NOME as 'cidade'
,ESTADO.NOME as 'estado'
,IMOVEIS.AREA as 'area'
,IMOVEIS.NUM_QUARTOS
,IMOVEIS.NUM_BANHEIROS
,IMOVEIS.NUM_ANDARES
,IMOVEIS.ACEITA_ANIMAIS
,IMOVEIS.MOBILIA
,IMOVEIS.VALOR_ALUGUEL
,IMOVEIS.VALOR_CONDOMINIO
,IMOVEIS.VALOR_IPTU
,IMOVEIS.VALOR_SEGURO_INCENDIO
FROM IMOVEIS INNER JOIN CIDADE
ON IMOVEIS.CODIGO_CIDADE = CIDADE.CODIGO
INNER JOIN ESTADO
ON CIDADE.CODIGO_ESTADO = ESTADO.CODIGO;
""",
connect_db_imoveis
)
#exportando os dados para a área de stage.
dataset_df.to_csv(
path_temp_csv,
index=False
)
#fechando a conexão com o banco de dados.
connect_db_imoveis.close()
return None
def _transform():
dataset_df = pd.read_csv(path_temp_csv)
#transformando os dados dos atributos.
dataset_df.aceita_animais.replace({'acept': 1, 'not acept':0}, inplace=True)
dataset_df.mobilia.replace({'furnished': 1, 'not furnished':0}, inplace=True)
#limpando os registros.
dataset_df.num_andares.replace({'-': 1}, inplace=True)
dataset_df.cidade = dataset_df.cidade.str.title()
dataset_df.cidade.replace({'Sao Paulo': 'São Paulo',
'Rio Janeiro': 'Rio de Janeiro'}, inplace=True)
#substituindo o dados originais pelos transformados
dataset_df.to_csv(path_temp_csv, index=False)
return None
def _load():
#conectando com o banco de dados Data Warehouse.
connect_db_imoveis_dw = sqlite3.connect(path_db_datawarehouse)
#lendo os dados a partir dos arquivos csv.
dataset_df = pd.read_csv(path_temp_csv)
#carregando os dados no banco de dados.
dataset_df.to_sql("imoveis", connect_db_imoveis_dw,
if_exists="replace", index=False)
return None
#---------------------------------------------
#ETL
#----------------------------------------------
#FUNÇÕES PARA MÉTODO DO AIRFLOW
#extract_task = PythonOperator(task_id="extract", python_callable=_extract, dag=dag)
#transform_task = PythonOperator(task_id="transform", python_callable=_transform, dag=dag)
#load_task = PythonOperator(task_id="load", python_callable=_load, dag=dag)
#extract_task >> transform_task >> load_task
#Coletando
_extract()
#transformando
_transform()
#processando
_load()
| StarcoderdataPython |
1732017 | import logging
from abc import ABC
from typing import Any, Dict, Type, Union
from torch.optim import Optimizer
from torch.tensor import Tensor
from torch.utils.data import DataLoader, TensorDataset
from melbe.collections.pipelines.torch.configs import TorchConfig
from melbe.data import PREDICTIONS, TEXT_SENTENCE, LIST_SENTENCE
from melbe.configs import ClassConfig, MelbeConfig, select
from melbe.pipelines import Pipeline, PipelineInputs, PipelinePredictions
MODELS_CLASS = SCHEDULERS_CLASS = MODULE_CLASS = Union[Type, Dict[str, Type], None]
OPTIMIZERS_CLASS = Union[Type[Optimizer], Dict[str, Type[Optimizer]], None]
class TorchInputs:
input_ids: Tensor = ...
attention_mask: Tensor = ...
labels: Tensor = ...
def __init__(self, input_ids: Tensor, attention_mask: Tensor, labels: Tensor = None):
self.input_ids = input_ids
self.attention_mask = attention_mask
self.labels = labels
class TorchPredictions(PipelinePredictions):
logits: Tensor = ...
preds: Tensor = ...
def __init__(self, logits: Tensor = None, preds: Tensor = None, *args, **kwargs):
super().__init__(*args, **kwargs)
self.logits = logits
self.preds = preds
class TorchPipelineInputs(PipelineInputs):
train: TorchInputs = ...
validate: TorchInputs = ...
test: TorchInputs = ...
predict: TorchInputs = ...
def __init__(self, **kwargs):
super().__init__(**kwargs)
class TorchDataLoaders:
train: DataLoader = ...
validate: DataLoader = ...
test: DataLoader = ...
predict: DataLoader = ...
def __init__(self,
train: DataLoader = None,
validate: DataLoader = None,
test: DataLoader = None,
predict: DataLoader = None):
self.train = train
self.validate = validate
self.test = test
self.predict = predict
class TorchClasses:
models: MODELS_CLASS = ...
optimizers: OPTIMIZERS_CLASS = ...
schedulers: SCHEDULERS_CLASS = ...
def __init__(self,
models: MODELS_CLASS = None,
optimizers: OPTIMIZERS_CLASS = None,
schedulers: SCHEDULERS_CLASS = None):
self.models = models
self.optimizers = optimizers
self.schedulers = schedulers
class Torch(Pipeline, ABC):
config: TorchConfig = ...
classes: TorchClasses = ...
models: Union[Any, Dict[str, Any]] = ...
optimizers: Union[Optimizer, Dict[str, Optimizer]] = ...
schedulers: Union[Any, Dict[str, Any], None] = ...
inputs: TorchPipelineInputs = None
data_loaders: TorchDataLoaders = None
predictions: TorchPredictions = None
checkpoint: Dict[str, Any] = ...
def __init__(self,
melbe_config: MelbeConfig,
models: MODELS_CLASS = None,
optimizers: OPTIMIZERS_CLASS = None,
schedulers: SCHEDULERS_CLASS = None,
**kwargs):
super().__init__(melbe_config)
self.config = TorchConfig(melbe_config=melbe_config, **select(kwargs, TorchConfig.name))
self.classes = TorchClasses(models, optimizers, schedulers)
def setup(self):
self.setup_module('models', required=True)
self.setup_module('optimizers', required=True)
self.setup_module('schedulers')
return self
def setup_module(self, name: str, required: bool = False) -> None:
factory_module: MODULE_CLASS = getattr(self.classes, name)
config_module: Union[ClassConfig, Dict[ClassConfig]] = getattr(self.config, name)
if factory_module is not None:
if isinstance(factory_module, dict):
for key, comp in factory_module.items():
config_module[key].override(comp)
else:
config_module.override(factory_module)
if isinstance(config_module, dict):
setattr(self, name, {})
for key, comp in config_module.items():
getattr(self, name)[key] = comp(**comp.kwargs)
elif config_module.cls is not None:
setattr(self, name, config_module(**config_module.kwargs))
elif required:
error = f'At least one module of "{name}" is required but none provided.'
logging.error(error)
raise RuntimeError(error)
else:
setattr(self, name, None)
def prepare(self):
self.build_inputs()
self.build_data_loaders()
return self
def build_data_loaders(self):
def build(inputs: TorchInputs) -> TensorDataset:
# input_ids: torch.LongTensor of shape (batch_size, sequence_length)
# attention_mask: torch.FloatTensor of shape (batch_size, sequence_length)
# labels: torch.LongTensor of shape (batch_size, sequence_length)
if inputs.labels is not None:
return TensorDataset(inputs.input_ids,
inputs.attention_mask,
inputs.labels)
else:
return TensorDataset(inputs.input_ids,
inputs.attention_mask)
logging.info('Building data loaders...')
self.data_loaders = TorchDataLoaders()
if self.inputs.predict is not None:
self.data_loaders.predict = DataLoader(build(self.inputs.predict), **self.config.data_loaders.predict)
else:
if 'train' in self.tasks and self.inputs.train is not None:
self.data_loaders.train = DataLoader(build(self.inputs.train), **self.config.data_loaders.train)
if self.inputs.validate is not None:
self.data_loaders.validate = DataLoader(build(self.inputs.validate),
**self.config.data_loaders.validate)
else:
self.data_loaders.train = None
self.data_loaders.validate = None
if 'test' in self.tasks and self.inputs.test is not None:
self.data_loaders.test = DataLoader(build(self.inputs.test), **self.config.data_loaders.test)
logging.info('Building data loaders completed.')
return self
def predict(self, **kwargs) -> TorchPredictions:
super().predict(**kwargs)
return self.predictions
def process_predictions(self, preds: Tensor) -> None:
if self.predictions.type == 'text' or self.predictions.type == 'words':
self.predictions.results = self.cast_prediction_sentence(self.documents[0][0], preds[0], 0)
elif self.predictions.type == 'document':
self.predictions.results = [self.cast_prediction_sentence(self.documents[0][i], preds[i], i)
for i in range(len(preds))]
elif self.predictions.type == 'documents':
self.predictions.results, pred_i = [], 0
for document in self.documents:
self.predictions.results.append([])
for sentence in document:
self.predictions.results[-1].append(self.cast_prediction_sentence(sentence,
preds[pred_i],
pred_i))
pred_i += 1
def cast_prediction_sentence(self,
sentence: Union[TEXT_SENTENCE, LIST_SENTENCE],
preds: Tensor,
pred_i: int) -> PREDICTIONS:
...
| StarcoderdataPython |
4822301 | <gh_stars>1-10
import os
from os import path
from importlib import import_module
from flask import Flask
from flask import url_for
from flask_login import LoginManager
import sentry_sdk
from sentry_sdk.integrations.flask import FlaskIntegration
from api.views.new_cases import new_cases_views
from api.views.redirects import redirects_views
from api.views.webhook import webhook_views
login_manager = LoginManager()
def register_blueprints(app_instance):
app_instance.register_blueprint(new_cases_views)
app_instance.register_blueprint(redirects_views)
app_instance.register_blueprint(webhook_views)
for module_name in (
"additional",
"base",
"data",
"forms",
"home",
"public",
"tables",
"ui",
):
module = import_module("app.{}.routes".format(module_name))
app_instance.register_blueprint(module.blueprint)
def apply_themes(flask_app):
"""
Add support for themes.
If DEFAULT_THEME is set then all calls to
url_for('static', filename='')
will modfify the url to include the theme name
The theme parameter can be set directly in url_for as well:
ex. url_for('static', filename='', theme='')
If the file cannot be found in the /static/<theme>/ location then
the url will not be modified and the file is expected to be
in the default /static/ location
"""
@flask_app.context_processor
def override_url_for():
return dict(url_for=_generate_url_for_theme)
def _generate_url_for_theme(endpoint, **values):
if endpoint.endswith("static"):
theme = values.get("theme", None) or flask_app.config.get(
"DEFAULT_THEME", None
)
if theme:
theme_file = "{}/{}".format(theme, values.get("filename", ""))
if path.isfile(path.join(flask_app.static_folder, theme_file)):
values["filename"] = theme_file
if flask_app.env != "development":
return (
f"{os.getenv('STATIC_HOST')}{url_for(endpoint, **values)}"
)
return url_for(endpoint, **values)
def create_app():
app = Flask(__name__, static_folder="app/base/static",)
app.config["SECRET_KEY"] = os.environ["SECRET_KEY"]
app.config["LOGIN_DISABLED"] = os.getenv("LOGIN_DISABLED")
login_manager.init_app(app)
register_blueprints(app)
apply_themes(app)
return app
SENTRY_DSN = os.getenv("SENTRY_DSN")
if SENTRY_DSN:
sentry_sdk.init(
dsn=os.environ["SENTRY_DSN"], integrations=[FlaskIntegration()]
)
app = create_app()
if __name__ == "__main__":
app.run(
host=os.getenv("HOST", "127.0.0.1"),
port=int(os.environ.get("PORT", 5000)),
debug=os.getenv("DEBUG", False),
)
| StarcoderdataPython |
1633591 | from output.models.nist_data.atomic.duration.schema_instance.nistschema_sv_iv_atomic_duration_enumeration_5_xsd.nistschema_sv_iv_atomic_duration_enumeration_5 import (
NistschemaSvIvAtomicDurationEnumeration5,
NistschemaSvIvAtomicDurationEnumeration5Type,
)
__all__ = [
"NistschemaSvIvAtomicDurationEnumeration5",
"NistschemaSvIvAtomicDurationEnumeration5Type",
]
| StarcoderdataPython |
3257059 | <reponame>ifwe/digsby
import logging
logging.Logger.debug_s = logging.Logger.debug
import wx
from gui.browser.webkit import WebKitWindow
def test_webkit_unicode():
f = wx.Frame(None)
w = WebKitWindow(f, initialContents = 'test')
#w.RunScript('document.write("test");')
def foo():
w.RunScript(u'document.write("<p>abcd\u1234</p>");')
wx.CallLater(500, foo)
f.Show()
def main():
a = wx.PySimpleApp()
test_webkit_unicode()
a.MainLoop()
if __name__ == '__main__':
main() | StarcoderdataPython |
3215985 | import sys
import json
from subprocess import Popen, PIPE
try:
import yaml
except ImportError:
print('Unable to import YAML module: please install PyYAML', file=sys.stderr)
sys.exit(1)
class Reporter(object):
"""Collect and report errors."""
def __init__(self):
"""Constructor."""
super(Reporter, self).__init__()
self.messages = []
def check_field(self, filename, name, values, key, expected):
"""Check that a dictionary has an expected value."""
if key not in values:
self.add(filename, '{0} does not contain {1}', name, key)
elif values[key] != expected:
self.add(filename, '{0} {1} is {2} not {3}', name, key, values[key], expected)
def check(self, condition, location, fmt, *args):
"""Append error if condition not met."""
if not condition:
self.add(location, fmt, *args)
def add(self, location, fmt, *args):
"""Append error unilaterally."""
if isinstance(location, type(None)):
coords = ''
elif isinstance(location, str):
coords = '{0}: '.format(location)
elif isinstance(location, tuple):
filename, line_number = location
coords = '{0}:{1}: '.format(*location)
else:
assert False, 'Unknown location "{0}"/{1}'.format(location, type(location))
self.messages.append(coords + fmt.format(*args))
def report(self, stream=sys.stdout):
"""Report all messages."""
if not self.messages:
return
for m in sorted(self.messages):
print(m, file=stream)
def read_markdown(parser, path):
"""
Get YAML and AST for Markdown file, returning
{'metadata':yaml, 'metadata_len':N, 'text':text, 'lines':[(i, line, len)], 'doc':doc}.
"""
# Split and extract YAML (if present).
with open(path, 'r') as reader:
body = reader.read()
metadata_raw, metadata_yaml, body = split_metadata(path, body)
# Split into lines.
metadata_len = 0 if metadata_raw is None else metadata_raw.count('\n')
lines = [(metadata_len+i+1, line, len(line)) for (i, line) in enumerate(body.split('\n'))]
# Parse Markdown.
cmd = 'ruby {0}'.format(parser)
p = Popen(cmd, shell=True, stdin=PIPE, stdout=PIPE, close_fds=True, universal_newlines=True)
stdout_data, stderr_data = p.communicate(body)
doc = json.loads(stdout_data)
return {
'metadata': metadata_yaml,
'metadata_len': metadata_len,
'text': body,
'lines': lines,
'doc': doc
}
def split_metadata(path, text):
"""
Get raw (text) metadata, metadata as YAML, and rest of body.
If no metadata, return (None, None, body).
"""
metadata_raw = None
metadata_yaml = None
metadata_len = None
pieces = text.split('---', 2)
if len(pieces) == 3:
metadata_raw = pieces[1]
text = pieces[2]
try:
metadata_yaml = yaml.load(metadata_raw)
except yaml.YAMLError as e:
print('Unable to parse YAML header in {0}:\n{1}'.format(path, e), file=sys.stderr)
sys.exit(1)
return metadata_raw, metadata_yaml, text
def load_yaml(filename):
"""
Wrapper around YAML loading so that 'import yaml' and error
handling is only needed in one place.
"""
with open(filename, 'r') as reader:
return yaml.load(reader)
| StarcoderdataPython |
1635251 | # -*- encoding: utf-8 -*-
'''
HubbleStack Nova-to-Splunk returner
:maintainer: HubbleStack
:platform: All
:requires: SaltStack
Deliver HubbleStack Nova result data into Splunk using the HTTP
event collector. Required config/pillar settings:
.. code-block:: yaml
hubblestack:
returner:
splunk:
- token: <KEY>
indexer: splunk-indexer.domain.tld
index: hubble
sourcetype_nova: hubble_audit
You can also add an `custom_fields` argument which is a list of keys to add to events
with using the results of config.get(<custom_field>). These new keys will be prefixed
with 'custom_' to prevent conflicts. The values of these keys should be
strings, do not choose grains or pillar values with complex values or they will
be skipped:
.. code-block:: yaml
hubblestack:
returner:
splunk:
- token: <KEY>
indexer: splunk-indexer.domain.tld
index: hubble
sourcetype_nova: hubble_audit
custom_fields:
- site
- product_group
'''
import socket
# Import AWS details
from aws_details import get_aws_details
# Imports for http event forwarder
import requests
import json
import time
import logging
_max_content_bytes = 100000
http_event_collector_SSL_verify = False
http_event_collector_debug = False
log = logging.getLogger(__name__)
hec = None
def returner(ret):
opts_list = _get_options()
# Get aws details
aws = get_aws_details()
for opts in opts_list:
log.info('Options: %s' % json.dumps(opts))
http_event_collector_key = opts['token']
http_event_collector_host = opts['indexer']
http_event_collector_port = opts['port']
hec_ssl = opts['http_event_server_ssl']
proxy = opts['proxy']
timeout = opts['timeout']
custom_fields = opts['custom_fields']
# Set up the collector
hec = http_event_collector(http_event_collector_key, http_event_collector_host, http_event_port=http_event_collector_port, http_event_server_ssl=hec_ssl, proxy=proxy, timeout=timeout)
# st = 'salt:hubble:nova'
data = ret['return']
minion_id = ret['id']
jid = ret['jid']
fqdn = __grains__['fqdn']
# Sometimes fqdn is blank. If it is, replace it with minion_id
fqdn = fqdn if fqdn else minion_id
master = __grains__['master']
try:
fqdn_ip4 = __grains__['fqdn_ip4'][0]
except IndexError:
fqdn_ip4 = __grains__['ipv4'][0]
if fqdn_ip4.startswith('127.'):
for ip4_addr in __grains__['ipv4']:
if ip4_addr and not ip4_addr.startswith('127.'):
fqdn_ip4 = ip4_addr
break
if __grains__['master']:
master = __grains__['master']
else:
master = socket.gethostname() # We *are* the master, so use our hostname
if not isinstance(data, dict):
log.error('Data sent to splunk_nova_return was not formed as a '
'dict:\n{0}'.format(data))
return
for fai in data.get('Failure', []):
check_id = fai.keys()[0]
payload = {}
event = {}
event.update({'check_result': 'Failure'})
event.update({'check_id': check_id})
event.update({'job_id': jid})
if not isinstance(fai[check_id], dict):
event.update({'description': fai[check_id]})
elif 'description' in fai[check_id]:
for key, value in fai[check_id].iteritems():
if key not in ['tag']:
event[key] = value
event.update({'master': master})
event.update({'minion_id': minion_id})
event.update({'dest_host': fqdn})
event.update({'dest_ip': fqdn_ip4})
if aws['aws_account_id'] is not None:
event.update({'aws_ami_id': aws['aws_ami_id']})
event.update({'aws_instance_id': aws['aws_instance_id']})
event.update({'aws_account_id': aws['aws_account_id']})
for custom_field in custom_fields:
custom_field_name = 'custom_' + custom_field
custom_field_value = __salt__['config.get'](custom_field, '')
if isinstance(custom_field_value, str):
event.update({custom_field_name: custom_field_value})
elif isinstance(custom_field_value, list):
custom_field_value = ','.join(custom_field_value)
event.update({custom_field_name: custom_field_value})
payload.update({'host': fqdn})
payload.update({'index': opts['index']})
payload.update({'sourcetype': opts['sourcetype']})
payload.update({'event': event})
hec.batchEvent(payload)
for suc in data.get('Success', []):
check_id = suc.keys()[0]
payload = {}
event = {}
event.update({'check_result': 'Success'})
event.update({'check_id': check_id})
event.update({'job_id': jid})
if not isinstance(suc[check_id], dict):
event.update({'description': suc[check_id]})
elif 'description' in suc[check_id]:
for key, value in suc[check_id].iteritems():
if key not in ['tag']:
event[key] = value
event.update({'master': master})
event.update({'minion_id': minion_id})
event.update({'dest_host': fqdn})
event.update({'dest_ip': fqdn_ip4})
if aws['aws_account_id'] is not None:
event.update({'aws_ami_id': aws['aws_ami_id']})
event.update({'aws_instance_id': aws['aws_instance_id']})
event.update({'aws_account_id': aws['aws_account_id']})
for custom_field in custom_fields:
custom_field_name = 'custom_' + custom_field
custom_field_value = __salt__['config.get'](custom_field, '')
if isinstance(custom_field_value, str):
event.update({custom_field_name: custom_field_value})
elif isinstance(custom_field_value, list):
custom_field_value = ','.join(custom_field_value)
event.update({custom_field_name: custom_field_value})
payload.update({'host': fqdn})
payload.update({'sourcetype': opts['sourcetype']})
payload.update({'index': opts['index']})
payload.update({'event': event})
hec.batchEvent(payload)
if data.get('Compliance', None):
payload = {}
event = {}
event.update({'job_id': jid})
event.update({'compliance_percentage': data['Compliance']})
event.update({'master': master})
event.update({'minion_id': minion_id})
event.update({'dest_host': fqdn})
event.update({'dest_ip': fqdn_ip4})
if aws['aws_account_id'] is not None:
event.update({'aws_ami_id': aws['aws_ami_id']})
event.update({'aws_instance_id': aws['aws_instance_id']})
event.update({'aws_account_id': aws['aws_account_id']})
for custom_field in custom_fields:
custom_field_name = 'custom_' + custom_field
custom_field_value = __salt__['config.get'](custom_field, '')
if isinstance(custom_field_value, str):
event.update({custom_field_name: custom_field_value})
elif isinstance(custom_field_value, list):
custom_field_value = ','.join(custom_field_value)
event.update({custom_field_name: custom_field_value})
payload.update({'host': fqdn})
payload.update({'sourcetype': opts['sourcetype']})
payload.update({'index': opts['index']})
payload.update({'event': event})
hec.batchEvent(payload)
hec.flushBatch()
return
def event_return(event):
'''
When called from the master via event_return.
Note that presently the master won't see returners in file_roots/_returners
so you need to put it in a returners/ subdirectory and configure
custom_modules in your master config.
'''
for e in event:
if not('salt/job/' in e['tag']):
continue # not a salt job event. Not relevant to hubble
elif(e['data']['fun'] != 'hubble.audit'):
continue # not a call to hubble.audit, so not relevant
else:
log.debug('Logging event: %s' % str(e))
returner(e['data']) # Call the standard returner
return
def _get_options():
if __salt__['config.get']('hubblestack:returner:splunk'):
splunk_opts = []
returner_opts = __salt__['config.get']('hubblestack:returner:splunk')
if not isinstance(returner_opts, list):
returner_opts = [returner_opts]
for opt in returner_opts:
processed = {}
processed['token'] = opt.get('token')
processed['indexer'] = opt.get('indexer')
processed['port'] = str(opt.get('port', '8088'))
processed['index'] = opt.get('index')
processed['custom_fields'] = opt.get('custom_fields', [])
processed['sourcetype'] = opt.get('sourcetype_nova', 'hubble_audit')
processed['http_event_server_ssl'] = opt.get('hec_ssl', True)
processed['proxy'] = opt.get('proxy', {})
processed['timeout'] = opt.get('timeout', 9.05)
splunk_opts.append(processed)
return splunk_opts
else:
try:
token = __salt__['config.get']('hubblestack:nova:returner:splunk:token').strip()
indexer = __salt__['config.get']('hubblestack:nova:returner:splunk:indexer')
sourcetype = __salt__['config.get']('hubblestack:nova:returner:splunk:sourcetype')
index = __salt__['config.get']('hubblestack:nova:returner:splunk:index')
custom_fields = __salt__['config.get']('hubblestack:nebula:returner:splunk:custom_fields', [])
except:
return None
splunk_opts = {'token': token, 'indexer': indexer, 'sourcetype': sourcetype, 'index': index, 'custom_fields': custom_fields}
hec_ssl = __salt__['config.get']('hubblestack:nova:returner:splunk:hec_ssl', True)
splunk_opts['http_event_server_ssl'] = hec_ssl
splunk_opts['proxy'] = __salt__['config.get']('hubblestack:nova:returner:splunk:proxy', {})
splunk_opts['timeout'] = __salt__['config.get']('hubblestack:nova:returner:splunk:timeout', 9.05)
return [splunk_opts]
def send_splunk(event, index_override=None, sourcetype_override=None):
# Get Splunk Options
# init the payload
payload = {}
# Set up the event metadata
if index_override is None:
payload.update({'index': opts['index']})
else:
payload.update({'index': index_override})
if sourcetype_override is None:
payload.update({'sourcetype': opts['sourcetype']})
else:
payload.update({'sourcetype': sourcetype_override})
# Add the event
payload.update({'event': event})
log.info('Payload: %s' % json.dumps(payload))
# fire it off
hec.batchEvent(payload)
return True
# Thanks to <NAME> for the http_event_collector class (https://github.com/georgestarcher/)
# Default batch max size to match splunk's default limits for max byte
# See http_input stanza in limits.conf; note in testing I had to limit to 100,000 to avoid http event collector breaking connection
# Auto flush will occur if next event payload will exceed limit
class http_event_collector:
def __init__(self, token, http_event_server, host='', http_event_port='8088', http_event_server_ssl=True, max_bytes=_max_content_bytes, proxy=None, timeout=9.05):
self.timeout = timeout
self.token = token
self.batchEvents = []
self.maxByteLength = max_bytes
self.currentByteLength = 0
self.server_uri = []
if proxy and http_event_server_ssl:
self.proxy = {'https': 'https://{0}'.format(proxy)}
elif proxy:
self.proxy = {'http': 'http://{0}'.format(proxy)}
else:
self.proxy = {}
# Set host to specified value or default to localhostname if no value provided
if host:
self.host = host
else:
self.host = socket.gethostname()
# Build and set server_uri for http event collector
# Defaults to SSL if flag not passed
# Defaults to port 8088 if port not passed
servers = http_event_server
if not isinstance(servers, list):
servers = [servers]
for server in servers:
if http_event_server_ssl:
self.server_uri.append(['https://%s:%s/services/collector/event' % (server, http_event_port), True])
else:
self.server_uri.append(['http://%s:%s/services/collector/event' % (server, http_event_port), True])
if http_event_collector_debug:
print self.token
print self.server_uri
def sendEvent(self, payload, eventtime=''):
# Method to immediately send an event to the http event collector
headers = {'Authorization': 'Splunk ' + self.token}
# If eventtime in epoch not passed as optional argument use current system time in epoch
if not eventtime:
eventtime = str(int(time.time()))
# Fill in local hostname if not manually populated
if 'host' not in payload:
payload.update({'host': self.host})
# Update time value on payload if need to use system time
data = {'time': eventtime}
data.update(payload)
# send event to http event collector
r = requests.post(self.server_uri, data=json.dumps(data), headers=headers, verify=http_event_collector_SSL_verify, proxies=self.proxy)
# Print debug info if flag set
if http_event_collector_debug:
log.debug(r.text)
log.debug(data)
def batchEvent(self, payload, eventtime=''):
# Method to store the event in a batch to flush later
# Fill in local hostname if not manually populated
if 'host' not in payload:
payload.update({'host': self.host})
payloadLength = len(json.dumps(payload))
if (self.currentByteLength + payloadLength) > self.maxByteLength:
self.flushBatch()
# Print debug info if flag set
if http_event_collector_debug:
print 'auto flushing'
else:
self.currentByteLength = self.currentByteLength + payloadLength
# If eventtime in epoch not passed as optional argument use current system time in epoch
if not eventtime:
eventtime = str(int(time.time()))
# Update time value on payload if need to use system time
data = {'time': eventtime}
data.update(payload)
self.batchEvents.append(json.dumps(data))
def flushBatch(self):
# Method to flush the batch list of events
if len(self.batchEvents) > 0:
headers = {'Authorization': 'Splunk ' + self.token}
self.server_uri = [x for x in self.server_uri if x[1] is not False]
for server in self.server_uri:
try:
r = requests.post(server[0], data=' '.join(self.batchEvents), headers=headers, verify=http_event_collector_SSL_verify, proxies=self.proxy, timeout=self.timeout)
r.raise_for_status()
server[1] = True
break
except requests.exceptions.RequestException:
log.info('Request to splunk server "%s" failed. Marking as bad.' % server[0])
server[1] = False
except Exception as e:
log.error('Request to splunk threw an error: {0}'.format(e))
self.batchEvents = []
self.currentByteLength = 0
| StarcoderdataPython |
3218939 | """
Adafruit BME280 temp/press/hum
"""
from datetime import datetime
import logging
from threading import Event, Thread
import time
import Adafruit_BME280
from sensor_feed.sensor_multi import MultiSensorDevice, ChildSensor
LOGGER = logging.getLogger(__name__)
class BME280Sensor(MultiSensorDevice):
"""Adafruit BME280 I2C Temp/press/hum."""
device_name = 'bem280'
def __init__(self, *args, **kwargs):
super(BME280Sensor, self).__init__(*args, **kwargs)
self._children = [
ChildSensor(self, 'temp', 'temp', 'degC'),
ChildSensor(self, 'relative humidity', 'rhum', '%'),
ChildSensor(self, 'baromatric pressure', 'pressure', 'Pa'),
]
self._device = Adafruit_BME280.BME280()
def enqueue_values(self, timestamp):
"""Just map some data from a list to child sensors..."""
data = [
self._device.read_temperature(),
self._device.read_humidity(),
self._device.read_pressure(),
]
for sensor, value in zip(self._children, data):
try:
queue = self.queues[sensor]
except KeyError:
# not running, skip.
continue
queue.put((timestamp, value))
| StarcoderdataPython |
3354159 | # Copyright 2020 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from abc import ABC, abstractmethod
from pathlib import Path
from typing import List, NamedTuple, Optional
from ros_cross_compile.docker_client import DockerClient
from ros_cross_compile.platform import Platform
"""
A NamedTuple that collects the customizations for each stage passed in by the user.
As such, the documentation for each customization can be found by looking at the
argparse options in ros_cross_compile.py.
"""
PipelineStageConfigOptions = NamedTuple('PipelineStageConfigOptions',
[('skip_rosdep_collection', bool),
('skip_rosdep_keys', List[str]),
('custom_script', Optional[Path]),
('custom_data_dir', Optional[Path]),
('custom_setup_script', Optional[Path])])
class PipelineStage(ABC):
"""Interface to represent a stage of the cross compile pipeline."""
@abstractmethod
def __init__(self, name):
self._name = name
@property
def name(self):
return self._name
@abstractmethod
def __call__(self, platform: Platform, docker_client: DockerClient, ros_workspace_dir: Path,
customizations: PipelineStageConfigOptions):
raise NotImplementedError
| StarcoderdataPython |
3378150 | import math
def parse(z):
ast = None
return z
print(parse('z'))
math.log(42)
| StarcoderdataPython |
125377 | import json
import re
import ast
import numpy as np
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
from sklearn.model_selection import train_test_split, GridSearchCV, StratifiedKFold, cross_val_score
from sklearn.metrics import accuracy_score, precision_score, recall_score, f1_score, log_loss
from sklearn.metrics import confusion_matrix, classification_report
from sklearn.metrics import r2_score
from sklearn.model_selection import cross_val_score
people_path = pd.read_csv("../data/processed/people_transformation/people_cast_list.csv")
## 1.Dataset Builder
def convert_output_id(output):
data = []
for i in range(0, len(output)):
if isinstance(output[i], dict):
if len(output[i]["movie_results"]) >= 1:
data.append(output[i]["movie_results"][0]["id"])
return data
def get_transformed_json(path):
id_remove = []
json_final = []
with open(path) as json_file:
data = json.load(json_file)
for i in range(0, len(data)):
if data[i] == "<class 'requests.exceptions.ReadTimeout'>":
id_remove.append(i)
elif data[i] == "<class 'requests.exceptions.ConnectionError'>":
id_remove.append(i)
else:
json_final.append(data[i])
return json_final
## 2.1 Pre_transformation
def from_json_to_array(df, column, regex):
df[column] = df[column].apply(lambda x: re.findall(rf"{regex}", str(x)))
def split_credits_column(df):
df["cast"] = df["credits"].apply(lambda x: string_to_dictionary(x, "cast"))
df["crew"] = df["credits"].apply(lambda x: string_to_dictionary(x, "crew"))
df.drop("credits", axis=1, inplace=True)
## 2.2 People Pre Pre_transformation
def unique_values(df_list):
ids_list = [re.findall(r'\b\d+\b', value) for value in df_list]
return set(flatten(ids_list))
## 4 Data Wrangling
def create_new_columns(df, column):
value_list = []
for cell in df[column]:
lista_genres = re.findall(r'\b\w+\b', cell)
for value in lista_genres:
value_list.append(value)
v = get_value_counts(value_list)
columns_to_zero(df, v, column)
validate_column(df, column)
def get_average_people(df, df_list, year):
ids_list = [re.findall(r'\b\d+\b', value) for value in df_list]
for i in range(len(df_list)):
df.loc[i, "cast"] = np.mean(get_score(ids_list[i], year[i]))
## Modeling
def predict(model, X_train, y_train, X_test, y_test, model_text):
model.fit(X_train, y_train)
y_pred_test = model.predict(X_test)
cf_matrix = confusion_matrix(y_test, y_pred_test)
plot_confusion_matrix(cf_matrix, model_text)
return baseline_report(model, X_train, X_test, y_train, y_test, model_text)
def predict_linear(model, X_test, X_train, y_train, X, y, model_text):
model.fit(X_train, y_train)
y_pred = model.predict(X_test)
score = r2_score(y_train, y_pred)
scores = cross_val_score(model,
X_train,
y_train,
cv=5,
scoring='r2')
print('CV Mean: ', np.mean(scores))
print('STD: ', np.std(scores))
fig, ax = plt.subplots()
ax.scatter(y_train, y_pred)
ax.plot([y.min(), y.max()], [y.min(), y.max()], 'k--', lw=4)
ax.set_xlabel('Predicted revenue')
ax.set_ylabel('Predicted revenue')
plt.title('Measured versus predicted revenue')
plt.savefig(f"../data/exports/{model_text}_regression_scatter.png")
plt.show()
# Private functions
## Generics
def isNaN(num):
return num == num
flatten = lambda l: [item for sublist in l for item in sublist]
def string_to_dictionary(data, key):
if data and (data == data):
json_string = ast.literal_eval(data)
json_dump = json.dumps(json_string)
json_loaded = json.loads(json_dump)
return json_loaded[key]
else:
return np.NaN
## Encode our categorical values
def get_value_counts(list):
d = {}
for name in list:
d[name.lower()] = d.get(name.lower(), 0) + 1
sorted_list = dict(sorted(d.items(), key=lambda kv: kv[1], reverse=True))
return sorted_list
def columns_to_zero(df, sorted_list, column):
iterator = iter(sorted_list.items())
for i in range(len(sorted_list)):
name_column = f"{column}_{list(sorted_list)[i]}"
df[name_column] = 0
next(iterator)
def validate_column(df, column):
for i, j in df.iterrows():
lista_genres = re.findall(r'\b\w+\b', j[column])
for value in lista_genres:
name_column = f"{column}_{value.lower()}"
df.loc[i, name_column] = 1
## Retrieve average cast value from df
def get_score(ids, year):
values = []
for id_ in ids:
dict_people = people_path[people_path["tmdb_id"] == int(id_)].to_dict('r')
if len(dict_people) > 0:
values.append(get_mean_value(get_values_people(dict_people, year)))
return values
def get_mean_value(array):
try:
mean_value = sum(array) / len(array)
except ZeroDivisionError:
mean_value = 0
return mean_value
def get_values_people(dictionary, year):
values = []
for i,k in dictionary[0].items():
if i != "tmdb_id":
if '{0:g}'.format(float(i)) == str(year):
if isNaN(k):
values.append(k)
return values
else:
if isNaN(k):
values.append(k)
## Model visualization
def plot_confusion_matrix(cf_matrix, model_text):
group_names = ["True Neg", "False Pos", "False Neg", "True Pos"]
group_counts = ["{0:0.0f}".format(value) for value in cf_matrix.flatten()]
group_percentages = ["{0:.2%}".format(value) for value in
cf_matrix.flatten()/np.sum(cf_matrix)]
labels = [f"{v1}\n{v2}\n{v3}" for v1, v2, v3 in zip(group_names,group_counts,group_percentages)]
labels = np.asarray(labels).reshape(2,2)
sns_heatmap = sns.heatmap(cf_matrix, annot=labels, fmt="", cmap='Blues')
sns_heatmap.figure.savefig(f"../data/exports/{model_text}_classification_heatmap.png")
def baseline_report(model, X_train, X_test, y_train, y_test, name):
"""
The function takes the model, the split data and the name and returns the dataframe with
the scores of the models with training and test data.
"""
strat_k_fold = StratifiedKFold(n_splits=5, shuffle=True)
model.fit(X_train, y_train)
accuracy = np.mean(cross_val_score(model, X_train, y_train, cv=strat_k_fold, scoring='accuracy'))
precision = np.mean(cross_val_score(model, X_train, y_train, cv=strat_k_fold, scoring='precision'))
recall = np.mean(cross_val_score(model, X_train, y_train, cv=strat_k_fold, scoring='recall'))
f1score = np.mean(cross_val_score(model, X_train, y_train, cv=strat_k_fold, scoring='f1'))
rocauc = np.mean(cross_val_score(model, X_train, y_train, cv=strat_k_fold, scoring='roc_auc'))
y_pred_train = model.predict(X_train)
logloss = log_loss(y_train, y_pred_train)
df_model_train = pd.DataFrame({'data' : 'training',
'model' : [name],
'accuracy' : [accuracy],
'precision' : [precision],
'recall' : [recall],
'f1score' : [f1score],
'rocauc' : [rocauc],
'logloss' : [logloss]})
accuracy = np.mean(cross_val_score(model, X_test, y_test, cv=strat_k_fold, scoring='accuracy'))
precision = np.mean(cross_val_score(model, X_test, y_test, cv=strat_k_fold, scoring='precision'))
recall = np.mean(cross_val_score(model, X_test, y_test, cv=strat_k_fold, scoring='recall'))
f1score = np.mean(cross_val_score(model, X_test, y_test, cv=strat_k_fold, scoring='f1'))
rocauc = np.mean(cross_val_score(model, X_test, y_test, cv=strat_k_fold, scoring='roc_auc'))
y_pred_test = model.predict(X_test)
logloss = log_loss(y_test, y_pred_test) # SVC & LinearSVC unable to use cvs
df_model_test = pd.DataFrame({'data' : 'test',
'model' : [name],
'accuracy' : [accuracy],
'precision' : [precision],
'recall' : [recall],
'f1score' : [f1score],
'rocauc' : [rocauc],
'logloss' : [logloss]}) # timetaken: to be used for comparison later
df_model = pd.concat([df_model_train, df_model_test], ignore_index=True)
return df_model
| StarcoderdataPython |
4833139 | from mock import Mock
from mock import patch
import pytest
import typing # NOQA
from optuna import distributions
from optuna import samplers
from optuna import storages
from optuna.study import create_study
from optuna.trial import FixedTrial
from optuna.trial import Trial
parametrize_storage = pytest.mark.parametrize(
'storage_init_func',
[storages.InMemoryStorage, lambda: storages.RDBStorage('sqlite:///:memory:')]
)
@parametrize_storage
def test_suggest_uniform(storage_init_func):
# type: (typing.Callable[[], storages.BaseStorage]) -> None
mock = Mock()
mock.side_effect = [1., 2., 3.]
sampler = samplers.RandomSampler()
with patch.object(sampler, 'sample', mock) as mock_object:
study = create_study(storage_init_func(), sampler=sampler)
trial = Trial(study, study.storage.create_new_trial_id(study.study_id))
distribution = distributions.UniformDistribution(low=0., high=3.)
assert trial._suggest('x', distribution) == 1. # Test suggesting a param.
assert trial._suggest('x', distribution) == 1. # Test suggesting the same param.
assert trial._suggest('y', distribution) == 3. # Test suggesting a different param.
assert trial.params == {'x': 1., 'y': 3.}
assert mock_object.call_count == 3
@parametrize_storage
def test_suggest_discrete_uniform(storage_init_func):
# type: (typing.Callable[[], storages.BaseStorage]) -> None
mock = Mock()
mock.side_effect = [1., 2., 3.]
sampler = samplers.RandomSampler()
with patch.object(sampler, 'sample', mock) as mock_object:
study = create_study(storage_init_func(), sampler=sampler)
trial = Trial(study, study.storage.create_new_trial_id(study.study_id))
distribution = distributions.DiscreteUniformDistribution(low=0., high=3., q=1.)
assert trial._suggest('x', distribution) == 1. # Test suggesting a param.
assert trial._suggest('x', distribution) == 1. # Test suggesting the same param.
assert trial._suggest('y', distribution) == 3. # Test suggesting a different param.
assert trial.params == {'x': 1., 'y': 3.}
assert mock_object.call_count == 3
@parametrize_storage
def test_suggest_int(storage_init_func):
# type: (typing.Callable[[], storages.BaseStorage]) -> None
mock = Mock()
mock.side_effect = [1, 2, 3]
sampler = samplers.RandomSampler()
with patch.object(sampler, 'sample', mock) as mock_object:
study = create_study(storage_init_func(), sampler=sampler)
trial = Trial(study, study.storage.create_new_trial_id(study.study_id))
distribution = distributions.IntUniformDistribution(low=0, high=3)
assert trial._suggest('x', distribution) == 1 # Test suggesting a param.
assert trial._suggest('x', distribution) == 1 # Test suggesting the same param.
assert trial._suggest('y', distribution) == 3 # Test suggesting a different param.
assert trial.params == {'x': 1, 'y': 3}
assert mock_object.call_count == 3
def test_fixed_trial_suggest_uniform():
# type: () -> None
trial = FixedTrial({'x': 1.})
assert trial.suggest_uniform('x', -100., 100.) == 1.
with pytest.raises(ValueError):
trial.suggest_uniform('y', -100., 100.)
def test_fixed_trial_suggest_loguniform():
# type: () -> None
trial = FixedTrial({'x': 1.})
assert trial.suggest_loguniform('x', 0., 1.) == 1.
with pytest.raises(ValueError):
trial.suggest_loguniform('y', 0., 1.)
def test_fixed_trial_suggest_discrete_uniform():
# type: () -> None
trial = FixedTrial({'x': 1.})
assert trial.suggest_discrete_uniform('x', 0., 1., 0.1) == 1.
with pytest.raises(ValueError):
trial.suggest_discrete_uniform('y', 0., 1., 0.1)
def test_fixed_trial_suggest_int():
# type: () -> None
trial = FixedTrial({'x': 1})
assert trial.suggest_int('x', 0, 10) == 1
with pytest.raises(ValueError):
trial.suggest_int('y', 0, 10)
def test_fixed_trial_suggest_categorical():
# type: () -> None
trial = FixedTrial({'x': 1})
assert trial.suggest_categorical('x', [0, 1, 2, 3]) == 1
with pytest.raises(ValueError):
trial.suggest_categorical('y', [0, 1, 2, 3])
def test_fixed_trial_user_attrs():
# type: () -> None
trial = FixedTrial({'x': 1})
trial.set_user_attr('data', 'MNIST')
assert trial.user_attrs['data'] == 'MNIST'
def test_fixed_trial_system_attrs():
# type: () -> None
trial = FixedTrial({'x': 1})
trial.set_system_attr('system_message', 'test')
assert trial.system_attrs['system_message'] == 'test'
def test_fixed_trial_params():
# type: () -> None
params = {'x': 1}
trial = FixedTrial(params)
assert trial.params == params
def test_fixed_trial_report():
# type: () -> None
# FixedTrial ignores reported values.
trial = FixedTrial({})
trial.report(1.0, 1)
trial.report(2.0)
def test_fixed_trial_should_prune():
# type: () -> None
# FixedTrial never prunes trials.
assert FixedTrial({}).should_prune(1) is False
| StarcoderdataPython |
3221756 | <filename>oregami/sark_bc.py
import sark
##########################################################
# Adding backwards compatability for sark before python3 #
##########################################################
# Sark started using start_ea, end_ea instead of start_ea, end_ea
# We will make the old sark (for python 2.7) support the new names as well
# Notice - we do not support backwards compatability to IDA before 7, which uses different APIs
need_bc = ('start_ea' not in dir(sark.Function))
if need_bc:
sark.Function.start_ea = property(lambda self: self.startEA)
sark.Function.end_ea = property(lambda self: self.endEA)
sark.Line.start_ea = property(lambda self: self.startEA)
sark.Line.end_ea = property(lambda self: self.endEA)
sark.code.segment.Segment.start_ea = property(lambda self: self.startEA)
sark.code.segment.Segment.end_ea = property(lambda self: self.endEA)
# CodeBlock inherits from idaapi.BasicBlock, and therefore contains both
# start_ea and startEA (and same for end) - so no need to fix
| StarcoderdataPython |
1783369 | <gh_stars>1-10
import hashlib
m = [hashlib.sha256(), hashlib.sha512(), hashlib.blake2b(), hashlib.blake2s(), hashlib.sha3_256(), hashlib.sha3_512()]
input = b"testByteArray"
for n in m:
n.update(input)
print(n.digest().hex().upper())
| StarcoderdataPython |
107397 | #!/usr/bin/env python
"""JARVIS 2 helper script
Usage:
run.py -j [-s] [NAME]
run.py [-d]
Options:
-h --help Show usage
-d --debug Run app in debug mode
-j --job Run a job, will prompt if NAME is not given
-s --json Print job output as JSON
"""
from __future__ import print_function
import os
import signal
from docopt import docopt
from main import app, queues, sched
def _teardown(signal, frame):
sched.shutdown(wait=False)
for queue in queues.values():
queue.put(None)
queues.clear()
# Let the interrupt bubble up so that Flask/Werkzeug see it
raise KeyboardInterrupt
def _run_job(name=None, print_json=False):
import json
import sys
from flask import Flask
from jobs import load_jobs
from pprint import pprint
_app = Flask(__name__)
_app.config.from_envvar('JARVIS_SETTINGS')
conf = _app.config['JOBS']
jobs = load_jobs()
if name is None or len(name) == 0:
names = ' '.join(jobs.keys())
name = raw_input('Name of the job to run [%s]: ' % (names,)).lower()
cls = jobs.get(name)
if cls is None:
print('No such job: %s' % (name,))
sys.exit(1)
job_conf = conf.get(name)
if job_conf is None:
print('No config found for job: %s' % (name,))
sys.exit(1)
job = cls(job_conf)
data = job.get()
if print_json:
print(json.dumps(data, indent=2))
else:
pprint(data)
def _run_app(debug=False):
app.debug = debug
# if not debug:
# import logging
# log = logging.getLogger('werkzeug')
# log.setLevel(logging.ERROR)
signal.signal(signal.SIGINT, _teardown)
port = int(os.environ.get('PORT', 5000))
app.run(host='0.0.0.0', port=port, use_reloader=False, threaded=True)
def main():
args = docopt(__doc__)
if args['--job']:
_run_job(args['NAME'], args['--json'])
else:
_run_app(args['--debug'])
if __name__ == '__main__':
main()
| StarcoderdataPython |
3250391 | def swap(i,j):
tmp=line[i]
line[i]=line[j]
line[j]=tmp
a=input()
line=input()
a=a.split(' ')
num=a[0]
time=int(a[1])
line=[i for i in line]
while(time>0):
time-=1
flagBoy=False
for i in range(len(line)):
if(line[i]=='B'):
flagBoy=True
if(flagBoy and line[i]=='G'):
swap(i-1,i)
flagBoy=False
print(''.join(line))
| StarcoderdataPython |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.