max_stars_repo_path stringlengths 4 286 | max_stars_repo_name stringlengths 5 119 | max_stars_count int64 0 191k | id stringlengths 1 7 | content stringlengths 6 1.03M | content_cleaned stringlengths 6 1.03M | language stringclasses 111 values | language_score float64 0.03 1 | comments stringlengths 0 556k | edu_score float64 0.32 5.03 | edu_int_score int64 0 5 |
|---|---|---|---|---|---|---|---|---|---|---|
util/version.py | DevEliran/news-aggregator | 0 | 6621551 | """
Current Fuse version
"""
VERSION = "1.0.1"
| """
Current Fuse version
"""
VERSION = "1.0.1"
| en | 0.797927 | Current Fuse version | 1.052147 | 1 |
variants/gan-weightnorm/model.py | Robin-ML/gan | 2 | 6621552 | import torch
import torch.nn as nn
import torch.nn.functional as F
import modules
class Discriminator(nn.Module):
def __init__(self, w_in, h_in, num_features, num_blocks):
super(Discriminator, self).__init__()
f_prev = 3
w = w_in
h = h_in
self.net = nn.Sequential()
for i in range(len(num_features)):
f = num_features[i]
if i == len(num_features) - 1:
pad_w = 0
pad_h = 0
else:
if (w % 4 == 2):
pad_w = 1
else:
pad_w = 0
if (h % 4 == 2):
pad_h = 1
else:
pad_h = 0
for j in range(num_blocks[i]):
if j == 0:
self.net.add_module('level_{0}_block_{1}'.format(i, j), modules.ResidueBlock(f_prev, f, 2, pad_h, pad_w))
else:
self.net.add_module('level_{0}_block_{1}'.format(i, j), modules.ResidueBlock(f, f, 1, 0, 0))
f_prev = f
w = (w + pad_w * 2) // 2
h = (h + pad_h * 2) // 2
self.final = modules.WeightNormalizedConv2d(f_prev, 1, (h, w), 1, 0, scale = True, bias = True)
def forward(self, input):
return self.final(self.net(input)).contiguous().view(input.size(0))
class Generator(nn.Module):
def __init__(self, w_out, h_out, num_features, num_blocks, code_size):
super(Generator, self).__init__()
pad_w = []
pad_h = []
w = w_out
h = h_out
for i in range(len(num_features) - 1):
if (w % 4 == 2):
pad_w.append(1)
w = (w + 2) // 2
else:
pad_w.append(0)
w = w // 2
if (h % 4 == 2):
pad_h.append(1)
h = (h + 2) // 2
else:
pad_h.append(0)
h = h // 2
w = w // 2
h = h // 2
pad_w.append(0)
pad_h.append(0)
self.net = nn.Sequential()
self.initial_fc = modules.WeightNormalizedLinear(code_size, num_features[-1] * h * w, scale = True, bias = True, init_factor = 0.01)
self.initial_size = (num_features[-1], h, w)
self.initial_prelu = nn.PReLU(num_features[-1])
for i in range(len(num_features)):
level = len(num_features) - 1 - i
f = num_features[level]
if level == 0:
f_next = 3
else:
f_next = num_features[level - 1]
for j in range(num_blocks[level]):
if j == num_blocks[level] - 1:
self.net.add_module('level_{0}_block_{1}'.format(level, j), modules.ResidueBlockTranspose(f, f_next, 2, pad_h[level], pad_w[level], gen_last_block = (level == 0)))
else:
self.net.add_module('level_{0}_block_{1}'.format(level, j), modules.ResidueBlockTranspose(f, f, 1, 0, 0))
def forward(self, input):
return F.sigmoid(self.net(self.initial_prelu(self.initial_fc(input).contiguous().view(input.size(0), *self.initial_size))))
| import torch
import torch.nn as nn
import torch.nn.functional as F
import modules
class Discriminator(nn.Module):
def __init__(self, w_in, h_in, num_features, num_blocks):
super(Discriminator, self).__init__()
f_prev = 3
w = w_in
h = h_in
self.net = nn.Sequential()
for i in range(len(num_features)):
f = num_features[i]
if i == len(num_features) - 1:
pad_w = 0
pad_h = 0
else:
if (w % 4 == 2):
pad_w = 1
else:
pad_w = 0
if (h % 4 == 2):
pad_h = 1
else:
pad_h = 0
for j in range(num_blocks[i]):
if j == 0:
self.net.add_module('level_{0}_block_{1}'.format(i, j), modules.ResidueBlock(f_prev, f, 2, pad_h, pad_w))
else:
self.net.add_module('level_{0}_block_{1}'.format(i, j), modules.ResidueBlock(f, f, 1, 0, 0))
f_prev = f
w = (w + pad_w * 2) // 2
h = (h + pad_h * 2) // 2
self.final = modules.WeightNormalizedConv2d(f_prev, 1, (h, w), 1, 0, scale = True, bias = True)
def forward(self, input):
return self.final(self.net(input)).contiguous().view(input.size(0))
class Generator(nn.Module):
def __init__(self, w_out, h_out, num_features, num_blocks, code_size):
super(Generator, self).__init__()
pad_w = []
pad_h = []
w = w_out
h = h_out
for i in range(len(num_features) - 1):
if (w % 4 == 2):
pad_w.append(1)
w = (w + 2) // 2
else:
pad_w.append(0)
w = w // 2
if (h % 4 == 2):
pad_h.append(1)
h = (h + 2) // 2
else:
pad_h.append(0)
h = h // 2
w = w // 2
h = h // 2
pad_w.append(0)
pad_h.append(0)
self.net = nn.Sequential()
self.initial_fc = modules.WeightNormalizedLinear(code_size, num_features[-1] * h * w, scale = True, bias = True, init_factor = 0.01)
self.initial_size = (num_features[-1], h, w)
self.initial_prelu = nn.PReLU(num_features[-1])
for i in range(len(num_features)):
level = len(num_features) - 1 - i
f = num_features[level]
if level == 0:
f_next = 3
else:
f_next = num_features[level - 1]
for j in range(num_blocks[level]):
if j == num_blocks[level] - 1:
self.net.add_module('level_{0}_block_{1}'.format(level, j), modules.ResidueBlockTranspose(f, f_next, 2, pad_h[level], pad_w[level], gen_last_block = (level == 0)))
else:
self.net.add_module('level_{0}_block_{1}'.format(level, j), modules.ResidueBlockTranspose(f, f, 1, 0, 0))
def forward(self, input):
return F.sigmoid(self.net(self.initial_prelu(self.initial_fc(input).contiguous().view(input.size(0), *self.initial_size))))
| none | 1 | 2.409473 | 2 | |
containerd/types/descriptor_pb2.py | neuro-inc/platform-container-runtime | 0 | 6621553 | <gh_stars>0
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: containerd/types/descriptor.proto
"""Generated protocol buffer code."""
from google.protobuf import (
descriptor as _descriptor,
message as _message,
reflection as _reflection,
symbol_database as _symbol_database,
)
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
from github.com.gogo.protobuf.gogoproto import (
gogo_pb2 as github_dot_com_dot_gogo_dot_protobuf_dot_gogoproto_dot_gogo__pb2,
)
DESCRIPTOR = _descriptor.FileDescriptor(
name="containerd/types/descriptor.proto",
package="containerd.types",
syntax="proto3",
serialized_options=b"Z0github.com/containerd/containerd/api/types;types",
create_key=_descriptor._internal_create_key,
serialized_pb=b'\n!containerd/types/descriptor.proto\x12\x10\x63ontainerd.types\x1a-github.com/gogo/protobuf/gogoproto/gogo.proto"\xea\x01\n\nDescriptor\x12\x12\n\nmedia_type\x18\x01 \x01(\t\x12\x42\n\x06\x64igest\x18\x02 \x01(\tB2\xda\xde\x1f*github.com/opencontainers/go-digest.Digest\xc8\xde\x1f\x00\x12\x0c\n\x04size\x18\x03 \x01(\x03\x12\x42\n\x0b\x61nnotations\x18\x05 \x03(\x0b\x32-.containerd.types.Descriptor.AnnotationsEntry\x1a\x32\n\x10\x41nnotationsEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\t:\x02\x38\x01\x42\x32Z0github.com/containerd/containerd/api/types;typesX\x00\x62\x06proto3',
dependencies=[
github_dot_com_dot_gogo_dot_protobuf_dot_gogoproto_dot_gogo__pb2.DESCRIPTOR,
],
)
_DESCRIPTOR_ANNOTATIONSENTRY = _descriptor.Descriptor(
name="AnnotationsEntry",
full_name="containerd.types.Descriptor.AnnotationsEntry",
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name="key",
full_name="containerd.types.Descriptor.AnnotationsEntry.key",
index=0,
number=1,
type=9,
cpp_type=9,
label=1,
has_default_value=False,
default_value=b"".decode("utf-8"),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
_descriptor.FieldDescriptor(
name="value",
full_name="containerd.types.Descriptor.AnnotationsEntry.value",
index=1,
number=2,
type=9,
cpp_type=9,
label=1,
has_default_value=False,
default_value=b"".decode("utf-8"),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
],
extensions=[],
nested_types=[],
enum_types=[],
serialized_options=b"8\001",
is_extendable=False,
syntax="proto3",
extension_ranges=[],
oneofs=[],
serialized_start=287,
serialized_end=337,
)
_DESCRIPTOR = _descriptor.Descriptor(
name="Descriptor",
full_name="containerd.types.Descriptor",
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name="media_type",
full_name="containerd.types.Descriptor.media_type",
index=0,
number=1,
type=9,
cpp_type=9,
label=1,
has_default_value=False,
default_value=b"".decode("utf-8"),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
_descriptor.FieldDescriptor(
name="digest",
full_name="containerd.types.Descriptor.digest",
index=1,
number=2,
type=9,
cpp_type=9,
label=1,
has_default_value=False,
default_value=b"".decode("utf-8"),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=b"\332\336\037*github.com/opencontainers/go-digest.Digest\310\336\037\000",
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
_descriptor.FieldDescriptor(
name="size",
full_name="containerd.types.Descriptor.size",
index=2,
number=3,
type=3,
cpp_type=2,
label=1,
has_default_value=False,
default_value=0,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
_descriptor.FieldDescriptor(
name="annotations",
full_name="containerd.types.Descriptor.annotations",
index=3,
number=5,
type=11,
cpp_type=10,
label=3,
has_default_value=False,
default_value=[],
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
],
extensions=[],
nested_types=[
_DESCRIPTOR_ANNOTATIONSENTRY,
],
enum_types=[],
serialized_options=None,
is_extendable=False,
syntax="proto3",
extension_ranges=[],
oneofs=[],
serialized_start=103,
serialized_end=337,
)
_DESCRIPTOR_ANNOTATIONSENTRY.containing_type = _DESCRIPTOR
_DESCRIPTOR.fields_by_name["annotations"].message_type = _DESCRIPTOR_ANNOTATIONSENTRY
DESCRIPTOR.message_types_by_name["Descriptor"] = _DESCRIPTOR
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
Descriptor = _reflection.GeneratedProtocolMessageType(
"Descriptor",
(_message.Message,),
{
"AnnotationsEntry": _reflection.GeneratedProtocolMessageType(
"AnnotationsEntry",
(_message.Message,),
{
"DESCRIPTOR": _DESCRIPTOR_ANNOTATIONSENTRY,
"__module__": "containerd.types.descriptor_pb2"
# @@protoc_insertion_point(class_scope:containerd.types.Descriptor.AnnotationsEntry)
},
),
"DESCRIPTOR": _DESCRIPTOR,
"__module__": "containerd.types.descriptor_pb2"
# @@protoc_insertion_point(class_scope:containerd.types.Descriptor)
},
)
_sym_db.RegisterMessage(Descriptor)
_sym_db.RegisterMessage(Descriptor.AnnotationsEntry)
DESCRIPTOR._options = None
_DESCRIPTOR_ANNOTATIONSENTRY._options = None
_DESCRIPTOR.fields_by_name["digest"]._options = None
# @@protoc_insertion_point(module_scope)
| # Generated by the protocol buffer compiler. DO NOT EDIT!
# source: containerd/types/descriptor.proto
"""Generated protocol buffer code."""
from google.protobuf import (
descriptor as _descriptor,
message as _message,
reflection as _reflection,
symbol_database as _symbol_database,
)
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
from github.com.gogo.protobuf.gogoproto import (
gogo_pb2 as github_dot_com_dot_gogo_dot_protobuf_dot_gogoproto_dot_gogo__pb2,
)
DESCRIPTOR = _descriptor.FileDescriptor(
name="containerd/types/descriptor.proto",
package="containerd.types",
syntax="proto3",
serialized_options=b"Z0github.com/containerd/containerd/api/types;types",
create_key=_descriptor._internal_create_key,
serialized_pb=b'\n!containerd/types/descriptor.proto\x12\x10\x63ontainerd.types\x1a-github.com/gogo/protobuf/gogoproto/gogo.proto"\xea\x01\n\nDescriptor\x12\x12\n\nmedia_type\x18\x01 \x01(\t\x12\x42\n\x06\x64igest\x18\x02 \x01(\tB2\xda\xde\x1f*github.com/opencontainers/go-digest.Digest\xc8\xde\x1f\x00\x12\x0c\n\x04size\x18\x03 \x01(\x03\x12\x42\n\x0b\x61nnotations\x18\x05 \x03(\x0b\x32-.containerd.types.Descriptor.AnnotationsEntry\x1a\x32\n\x10\x41nnotationsEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\t:\x02\x38\x01\x42\x32Z0github.com/containerd/containerd/api/types;typesX\x00\x62\x06proto3',
dependencies=[
github_dot_com_dot_gogo_dot_protobuf_dot_gogoproto_dot_gogo__pb2.DESCRIPTOR,
],
)
_DESCRIPTOR_ANNOTATIONSENTRY = _descriptor.Descriptor(
name="AnnotationsEntry",
full_name="containerd.types.Descriptor.AnnotationsEntry",
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name="key",
full_name="containerd.types.Descriptor.AnnotationsEntry.key",
index=0,
number=1,
type=9,
cpp_type=9,
label=1,
has_default_value=False,
default_value=b"".decode("utf-8"),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
_descriptor.FieldDescriptor(
name="value",
full_name="containerd.types.Descriptor.AnnotationsEntry.value",
index=1,
number=2,
type=9,
cpp_type=9,
label=1,
has_default_value=False,
default_value=b"".decode("utf-8"),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
],
extensions=[],
nested_types=[],
enum_types=[],
serialized_options=b"8\001",
is_extendable=False,
syntax="proto3",
extension_ranges=[],
oneofs=[],
serialized_start=287,
serialized_end=337,
)
_DESCRIPTOR = _descriptor.Descriptor(
name="Descriptor",
full_name="containerd.types.Descriptor",
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name="media_type",
full_name="containerd.types.Descriptor.media_type",
index=0,
number=1,
type=9,
cpp_type=9,
label=1,
has_default_value=False,
default_value=b"".decode("utf-8"),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
_descriptor.FieldDescriptor(
name="digest",
full_name="containerd.types.Descriptor.digest",
index=1,
number=2,
type=9,
cpp_type=9,
label=1,
has_default_value=False,
default_value=b"".decode("utf-8"),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=b"\332\336\037*github.com/opencontainers/go-digest.Digest\310\336\037\000",
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
_descriptor.FieldDescriptor(
name="size",
full_name="containerd.types.Descriptor.size",
index=2,
number=3,
type=3,
cpp_type=2,
label=1,
has_default_value=False,
default_value=0,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
_descriptor.FieldDescriptor(
name="annotations",
full_name="containerd.types.Descriptor.annotations",
index=3,
number=5,
type=11,
cpp_type=10,
label=3,
has_default_value=False,
default_value=[],
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
],
extensions=[],
nested_types=[
_DESCRIPTOR_ANNOTATIONSENTRY,
],
enum_types=[],
serialized_options=None,
is_extendable=False,
syntax="proto3",
extension_ranges=[],
oneofs=[],
serialized_start=103,
serialized_end=337,
)
_DESCRIPTOR_ANNOTATIONSENTRY.containing_type = _DESCRIPTOR
_DESCRIPTOR.fields_by_name["annotations"].message_type = _DESCRIPTOR_ANNOTATIONSENTRY
DESCRIPTOR.message_types_by_name["Descriptor"] = _DESCRIPTOR
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
Descriptor = _reflection.GeneratedProtocolMessageType(
"Descriptor",
(_message.Message,),
{
"AnnotationsEntry": _reflection.GeneratedProtocolMessageType(
"AnnotationsEntry",
(_message.Message,),
{
"DESCRIPTOR": _DESCRIPTOR_ANNOTATIONSENTRY,
"__module__": "containerd.types.descriptor_pb2"
# @@protoc_insertion_point(class_scope:containerd.types.Descriptor.AnnotationsEntry)
},
),
"DESCRIPTOR": _DESCRIPTOR,
"__module__": "containerd.types.descriptor_pb2"
# @@protoc_insertion_point(class_scope:containerd.types.Descriptor)
},
)
_sym_db.RegisterMessage(Descriptor)
_sym_db.RegisterMessage(Descriptor.AnnotationsEntry)
DESCRIPTOR._options = None
_DESCRIPTOR_ANNOTATIONSENTRY._options = None
_DESCRIPTOR.fields_by_name["digest"]._options = None
# @@protoc_insertion_point(module_scope) | en | 0.379362 | # Generated by the protocol buffer compiler. DO NOT EDIT! # source: containerd/types/descriptor.proto Generated protocol buffer code. # @@protoc_insertion_point(imports) # @@protoc_insertion_point(class_scope:containerd.types.Descriptor.AnnotationsEntry) # @@protoc_insertion_point(class_scope:containerd.types.Descriptor) # @@protoc_insertion_point(module_scope) | 1.134596 | 1 |
ror/slope_constraints.py | jakub-tomczak/ror | 0 | 6621554 | import logging
from ror.Relation import Relation
from ror.Dataset import Dataset
from typing import List, Tuple
from ror.Constraint import Constraint, ConstraintVariable, ConstraintVariablesSet, ValueConstraintVariable
import numpy as np
# difference of 2 values greater than DIFF_EPS indicates that they are different
DIFF_EPS = 1e-10
def check_preconditions(data: Dataset) -> bool:
if len(data.alternatives) < 3:
logging.info('number of alternatives is lower than 3, skipping slope constraint')
return False
return True
def _create_slope_constraint(
alternative_index: int,
data: Dataset,
criterion_name: str,
relation: Relation,
alternatives: List[str],
alternative_scores: List[float]) -> Tuple[Constraint, Constraint]:
'''
Returns slope constraint or None if there would be division by 0 (in case when g_i(l) == g_i(l-1) or g_i(l-1) == g_i(l-2))
Slope constraint is meeting the requirement | z - w | <= rho
This constraint minimizes the differences between 2 consecutive characteristic points.
This constraint requires partial utility function to be monotonic, non-decreasing
'''
first_diff = alternative_scores[alternative_index] - alternative_scores[alternative_index-1]
# check if the 2 following points are not in the same place
if abs(first_diff) < DIFF_EPS:
logging.debug(
f'Criterion {criterion_name} for alternative {alternatives[alternative_index]} has the same value ({alternative_scores[alternative_index-1]}) as alternative {alternatives[alternative_index-1]} on this criterion.')
return None
first_coeff = 1 / (first_diff)
second_diff = alternative_scores[alternative_index-1] - alternative_scores[alternative_index-2]
# check if the 2 following points are not in the same place
if abs(second_diff) < DIFF_EPS:
logging.debug(
f'Criterion {criterion_name} for alternative {alternatives[alternative_index-1]} has the same value ({alternatives[alternative_index-2]}) as alternative {alternatives[alternative_index-2]} on this criterion.')
return None
second_coeff = 1 / (second_diff)
delta_constraint = ConstraintVariable(
"delta",
-1.0
) if data.delta is None else ValueConstraintVariable(
data.delta
)
# create constraint
first_constraint = Constraint(ConstraintVariablesSet([
ConstraintVariable(
Constraint.create_variable_name(
'u', criterion_name, alternatives[alternative_index]),
first_coeff,
alternatives[alternative_index]
),
ConstraintVariable(
Constraint.create_variable_name(
'u', criterion_name, alternatives[alternative_index-1]),
-first_coeff,
alternatives
),
ConstraintVariable(
Constraint.create_variable_name(
'u', criterion_name, alternatives[alternative_index-1]),
-second_coeff,
alternatives[alternative_index-1]
),
ConstraintVariable(
Constraint.create_variable_name(
'u', criterion_name, alternatives[alternative_index-2]),
second_coeff,
alternatives[alternative_index-2]
),
delta_constraint
]), relation, Constraint.create_variable_name("first_slope", criterion_name, alternative_index))
second_constraint = Constraint(ConstraintVariablesSet([
ConstraintVariable(
Constraint.create_variable_name(
'u', criterion_name, alternatives[alternative_index]),
-first_coeff,
alternatives[alternative_index]
),
ConstraintVariable(
Constraint.create_variable_name(
'u', criterion_name, alternatives[alternative_index-1]),
first_coeff,
alternatives[alternative_index-1]
),
ConstraintVariable(
Constraint.create_variable_name(
'u', criterion_name, alternatives[alternative_index-1]),
second_coeff,
alternatives[alternative_index-1]
),
ConstraintVariable(
Constraint.create_variable_name(
'u', criterion_name, alternatives[alternative_index-2]),
-second_coeff,
alternatives[alternative_index-2]
),
delta_constraint
]), relation, Constraint.create_variable_name("second_slope", criterion_name, alternative_index))
return (first_constraint, second_constraint)
def create_slope_constraints(data: Dataset, relation: Relation = None) -> List[Constraint]:
'''
Returns slope constraints for all alternatives except the ones that have duplicated
values in the criterion space.
So the number of constraints will be
2 x criteria + (m-2)*2
where 'm' is the number of alternatives without duplicated data on each criterion
and 'criteria' is the number of criteria in the data.
'''
if not check_preconditions(data):
return []
if relation is None:
relation = Relation('<=')
constraints = []
for criterion_index, (criterion_name, _) in enumerate(data.criteria):
alternative_score_on_criterion = data.matrix[:, criterion_index]
for l in range(2, len(data.alternatives)):
slope_constraints = _create_slope_constraint(
l, data, criterion_name, relation, data.alternatives, alternative_score_on_criterion
)
if slope_constraints is not None:
first_constraint, second_constraint = slope_constraints
constraints.append(first_constraint)
constraints.append(second_constraint)
return constraints
| import logging
from ror.Relation import Relation
from ror.Dataset import Dataset
from typing import List, Tuple
from ror.Constraint import Constraint, ConstraintVariable, ConstraintVariablesSet, ValueConstraintVariable
import numpy as np
# difference of 2 values greater than DIFF_EPS indicates that they are different
DIFF_EPS = 1e-10
def check_preconditions(data: Dataset) -> bool:
if len(data.alternatives) < 3:
logging.info('number of alternatives is lower than 3, skipping slope constraint')
return False
return True
def _create_slope_constraint(
alternative_index: int,
data: Dataset,
criterion_name: str,
relation: Relation,
alternatives: List[str],
alternative_scores: List[float]) -> Tuple[Constraint, Constraint]:
'''
Returns slope constraint or None if there would be division by 0 (in case when g_i(l) == g_i(l-1) or g_i(l-1) == g_i(l-2))
Slope constraint is meeting the requirement | z - w | <= rho
This constraint minimizes the differences between 2 consecutive characteristic points.
This constraint requires partial utility function to be monotonic, non-decreasing
'''
first_diff = alternative_scores[alternative_index] - alternative_scores[alternative_index-1]
# check if the 2 following points are not in the same place
if abs(first_diff) < DIFF_EPS:
logging.debug(
f'Criterion {criterion_name} for alternative {alternatives[alternative_index]} has the same value ({alternative_scores[alternative_index-1]}) as alternative {alternatives[alternative_index-1]} on this criterion.')
return None
first_coeff = 1 / (first_diff)
second_diff = alternative_scores[alternative_index-1] - alternative_scores[alternative_index-2]
# check if the 2 following points are not in the same place
if abs(second_diff) < DIFF_EPS:
logging.debug(
f'Criterion {criterion_name} for alternative {alternatives[alternative_index-1]} has the same value ({alternatives[alternative_index-2]}) as alternative {alternatives[alternative_index-2]} on this criterion.')
return None
second_coeff = 1 / (second_diff)
delta_constraint = ConstraintVariable(
"delta",
-1.0
) if data.delta is None else ValueConstraintVariable(
data.delta
)
# create constraint
first_constraint = Constraint(ConstraintVariablesSet([
ConstraintVariable(
Constraint.create_variable_name(
'u', criterion_name, alternatives[alternative_index]),
first_coeff,
alternatives[alternative_index]
),
ConstraintVariable(
Constraint.create_variable_name(
'u', criterion_name, alternatives[alternative_index-1]),
-first_coeff,
alternatives
),
ConstraintVariable(
Constraint.create_variable_name(
'u', criterion_name, alternatives[alternative_index-1]),
-second_coeff,
alternatives[alternative_index-1]
),
ConstraintVariable(
Constraint.create_variable_name(
'u', criterion_name, alternatives[alternative_index-2]),
second_coeff,
alternatives[alternative_index-2]
),
delta_constraint
]), relation, Constraint.create_variable_name("first_slope", criterion_name, alternative_index))
second_constraint = Constraint(ConstraintVariablesSet([
ConstraintVariable(
Constraint.create_variable_name(
'u', criterion_name, alternatives[alternative_index]),
-first_coeff,
alternatives[alternative_index]
),
ConstraintVariable(
Constraint.create_variable_name(
'u', criterion_name, alternatives[alternative_index-1]),
first_coeff,
alternatives[alternative_index-1]
),
ConstraintVariable(
Constraint.create_variable_name(
'u', criterion_name, alternatives[alternative_index-1]),
second_coeff,
alternatives[alternative_index-1]
),
ConstraintVariable(
Constraint.create_variable_name(
'u', criterion_name, alternatives[alternative_index-2]),
-second_coeff,
alternatives[alternative_index-2]
),
delta_constraint
]), relation, Constraint.create_variable_name("second_slope", criterion_name, alternative_index))
return (first_constraint, second_constraint)
def create_slope_constraints(data: Dataset, relation: Relation = None) -> List[Constraint]:
'''
Returns slope constraints for all alternatives except the ones that have duplicated
values in the criterion space.
So the number of constraints will be
2 x criteria + (m-2)*2
where 'm' is the number of alternatives without duplicated data on each criterion
and 'criteria' is the number of criteria in the data.
'''
if not check_preconditions(data):
return []
if relation is None:
relation = Relation('<=')
constraints = []
for criterion_index, (criterion_name, _) in enumerate(data.criteria):
alternative_score_on_criterion = data.matrix[:, criterion_index]
for l in range(2, len(data.alternatives)):
slope_constraints = _create_slope_constraint(
l, data, criterion_name, relation, data.alternatives, alternative_score_on_criterion
)
if slope_constraints is not None:
first_constraint, second_constraint = slope_constraints
constraints.append(first_constraint)
constraints.append(second_constraint)
return constraints
| en | 0.855065 | # difference of 2 values greater than DIFF_EPS indicates that they are different Returns slope constraint or None if there would be division by 0 (in case when g_i(l) == g_i(l-1) or g_i(l-1) == g_i(l-2)) Slope constraint is meeting the requirement | z - w | <= rho This constraint minimizes the differences between 2 consecutive characteristic points. This constraint requires partial utility function to be monotonic, non-decreasing # check if the 2 following points are not in the same place # check if the 2 following points are not in the same place # create constraint Returns slope constraints for all alternatives except the ones that have duplicated values in the criterion space. So the number of constraints will be 2 x criteria + (m-2)*2 where 'm' is the number of alternatives without duplicated data on each criterion and 'criteria' is the number of criteria in the data. | 2.570142 | 3 |
src/main.py | westernmagic/outer_ear | 0 | 6621555 | #!/usr/bin/env python
'''
Outer ear simulator
Author: <NAME> <<EMAIL>>
Version: 1.0.0
Data: 2019-09-09
'''
from typing import Tuple
import numpy as np
import scipy.io.wavfile as wav
import scipy.signal as ss
from argparse import ArgumentParser, ArgumentDefaultsHelpFormatter
from pysofaconventions import SOFAFile
def main() -> None:
args = arg_parser().parse_args()
data, f_s = read(args.input_file)
if args.head:
data = head(data, args.sofa, args.azimuth, args.elevation)
if args.canal:
data = canal(data, f_s, args.l, args.d)
if args.middle:
data = middle(data)
wav.write(args.output_file, f_s, data)
def head(data : np.ndarray, sofa : SOFAFile, azimuth : float, elevation : float):
'''
Apply effects of the head (HRTF)
'''
from scipy.spatial import KDTree
s = get_sofa(sofa)
pos = s.getVariableValue('SourcePosition')
# find closest position to requested azimuth and elevation
# TODO: consider normalizing position units to eg. degrees
index = KDTree(pos).query([azimuth, elevation, 1])[1]
hrir = s.getDataIR()[index, :, :]
data = data.T
left = ss.fftconvolve(data, hrir[0])
right = ss.fftconvolve(data, hrir[1])
output = np.asarray([left, right]).swapaxes(-1, 0)
return output
def canal(input : np.ndarray, f_s: int, l : float, d : float):
'''
Apply effects of the ear canal
Modeled as a bandpass filter, as in 'Matlab Auditory Periphery (MAP)'
'''
assert f_s > 0
assert l >= 0
assert d >= 0
v = 343
gain = 10
order = 1
f_nyq = f_s / 2
for n in [1, 3, 5]:
# 'Stopped pipe' resonator; resonating frequency
f_r = (n * v) / (4 * l / 1000 + 0.4 * d / 1000)
# bandpass cut offsets somewhat chosen s.t. for the first mode, they coincide with the parameters from MAP
lowcut = f_r - 1500 # Hz
highcut = f_r + 500 # Hz
low = lowcut / f_nyq
high = highcut / f_nyq
b, a = ss.butter(order, [low, high], btype = 'band')
input += gain * ss.lfilter(b, a, input)
return input
def middle(input):
'''
Apply the effects of the middle ear
Modelled soley as impedence mismatch and lever
'''
z_air = 414 # kg m^-2 s^-1
z_water = 1.48e6 # kg m^-2 s^-1
A_eardrum = 60 # mm^2
A_oval = 3.2 # mm^2
lever_malleus = 1.3
reflected = ((z_air - z_water) / (z_air + z_water)) ** 2
transmitted = 1 - reflected
return input * transmitted * (A_eardrum / A_oval) * lever_malleus
def arg_parser() -> ArgumentParser:
parser = ArgumentParser(
formatter_class = ArgumentDefaultsHelpFormatter
)
parser.add_argument(
'--head',
help = 'Consider head effects',
dest = 'head',
action = 'store_true'
)
parser.add_argument(
'--no-head',
dest = 'head',
action = 'store_false'
)
parser.set_defaults(head = True)
parser.add_argument(
'--canal',
help = 'Consider ear canal effects',
dest = 'canal',
action = 'store_true'
)
parser.add_argument(
'--no-canal',
dest = 'canal',
action = 'store_false'
)
parser.set_defaults(canal = True)
parser.add_argument(
'--middle',
help = 'Consider middle ear effects',
dest = 'middle',
action = 'store_true'
)
parser.add_argument(
'--no-middle',
dest = 'middle',
action = 'store_false'
)
parser.set_defaults(middle = True)
parser.add_argument(
'--sofa',
help = 'HTRF Sofa file',
default = 'http://sofacoustics.org/data/database/cipic/subject_003.sofa'
)
parser.add_argument(
'-a', '--azimuth',
help = 'Azimuth of source in SOFA file units',
default = 0,
type = float
)
parser.add_argument(
'-e', '--elevation',
help = 'Elevation of source in SOFA file units',
default = 0,
type = float
)
parser.add_argument(
'-l',
help = 'Ear canal length in mm',
default = 22,
type = float
)
parser.add_argument(
'-d',
help = 'Ear canal diameter in mm',
default = 7,
type = float
)
parser.add_argument(
'input_file',
help = 'Input file'
)
parser.add_argument(
'output_file',
help = 'Output file'
)
return parser
def read(filename : str) -> Tuple[np.ndarray, float]:
'''
Read WAV file and normalize to float array
'''
f_s, data = wav.read(filename)
if data.dtype == 'uint8':
data = data / 255 - 0.5
elif data.dtype == 'int16':
data = data / 32767
elif data.dtype == 'int32':
data = data / 2147483647
elif data.dtype == 'float32':
data = 1.0 * data
else:
eprint(f'Input error: data.dtype = {data.dtype}')
exit(1)
if data.ndim == 1:
# mono
pass
elif data.ndim == 2:
data = data[:, 0]
else:
eprint(f'Input error: data.ndim = {data.ndim}')
exit(1)
return data, f_s
def get_sofa(url : str) -> SOFAFile:
import requests
from tempfile import NamedTemporaryFile
if url.startswith(('http://', 'https://')):
r = requests.get(url)
r.raise_for_status()
with NamedTemporaryFile() as f:
f.write(r.content)
return SOFAFile(f.name, 'r')
elif url.startswith('file://'):
url = url[7:]
return SOFAFile(url, 'r')
def eprint(*args, **kwargs):
from sys import stderr
print(*args, file = stderr, **kwargs)
if __name__ == "__main__":
main()
| #!/usr/bin/env python
'''
Outer ear simulator
Author: <NAME> <<EMAIL>>
Version: 1.0.0
Data: 2019-09-09
'''
from typing import Tuple
import numpy as np
import scipy.io.wavfile as wav
import scipy.signal as ss
from argparse import ArgumentParser, ArgumentDefaultsHelpFormatter
from pysofaconventions import SOFAFile
def main() -> None:
args = arg_parser().parse_args()
data, f_s = read(args.input_file)
if args.head:
data = head(data, args.sofa, args.azimuth, args.elevation)
if args.canal:
data = canal(data, f_s, args.l, args.d)
if args.middle:
data = middle(data)
wav.write(args.output_file, f_s, data)
def head(data : np.ndarray, sofa : SOFAFile, azimuth : float, elevation : float):
'''
Apply effects of the head (HRTF)
'''
from scipy.spatial import KDTree
s = get_sofa(sofa)
pos = s.getVariableValue('SourcePosition')
# find closest position to requested azimuth and elevation
# TODO: consider normalizing position units to eg. degrees
index = KDTree(pos).query([azimuth, elevation, 1])[1]
hrir = s.getDataIR()[index, :, :]
data = data.T
left = ss.fftconvolve(data, hrir[0])
right = ss.fftconvolve(data, hrir[1])
output = np.asarray([left, right]).swapaxes(-1, 0)
return output
def canal(input : np.ndarray, f_s: int, l : float, d : float):
'''
Apply effects of the ear canal
Modeled as a bandpass filter, as in 'Matlab Auditory Periphery (MAP)'
'''
assert f_s > 0
assert l >= 0
assert d >= 0
v = 343
gain = 10
order = 1
f_nyq = f_s / 2
for n in [1, 3, 5]:
# 'Stopped pipe' resonator; resonating frequency
f_r = (n * v) / (4 * l / 1000 + 0.4 * d / 1000)
# bandpass cut offsets somewhat chosen s.t. for the first mode, they coincide with the parameters from MAP
lowcut = f_r - 1500 # Hz
highcut = f_r + 500 # Hz
low = lowcut / f_nyq
high = highcut / f_nyq
b, a = ss.butter(order, [low, high], btype = 'band')
input += gain * ss.lfilter(b, a, input)
return input
def middle(input):
'''
Apply the effects of the middle ear
Modelled soley as impedence mismatch and lever
'''
z_air = 414 # kg m^-2 s^-1
z_water = 1.48e6 # kg m^-2 s^-1
A_eardrum = 60 # mm^2
A_oval = 3.2 # mm^2
lever_malleus = 1.3
reflected = ((z_air - z_water) / (z_air + z_water)) ** 2
transmitted = 1 - reflected
return input * transmitted * (A_eardrum / A_oval) * lever_malleus
def arg_parser() -> ArgumentParser:
parser = ArgumentParser(
formatter_class = ArgumentDefaultsHelpFormatter
)
parser.add_argument(
'--head',
help = 'Consider head effects',
dest = 'head',
action = 'store_true'
)
parser.add_argument(
'--no-head',
dest = 'head',
action = 'store_false'
)
parser.set_defaults(head = True)
parser.add_argument(
'--canal',
help = 'Consider ear canal effects',
dest = 'canal',
action = 'store_true'
)
parser.add_argument(
'--no-canal',
dest = 'canal',
action = 'store_false'
)
parser.set_defaults(canal = True)
parser.add_argument(
'--middle',
help = 'Consider middle ear effects',
dest = 'middle',
action = 'store_true'
)
parser.add_argument(
'--no-middle',
dest = 'middle',
action = 'store_false'
)
parser.set_defaults(middle = True)
parser.add_argument(
'--sofa',
help = 'HTRF Sofa file',
default = 'http://sofacoustics.org/data/database/cipic/subject_003.sofa'
)
parser.add_argument(
'-a', '--azimuth',
help = 'Azimuth of source in SOFA file units',
default = 0,
type = float
)
parser.add_argument(
'-e', '--elevation',
help = 'Elevation of source in SOFA file units',
default = 0,
type = float
)
parser.add_argument(
'-l',
help = 'Ear canal length in mm',
default = 22,
type = float
)
parser.add_argument(
'-d',
help = 'Ear canal diameter in mm',
default = 7,
type = float
)
parser.add_argument(
'input_file',
help = 'Input file'
)
parser.add_argument(
'output_file',
help = 'Output file'
)
return parser
def read(filename : str) -> Tuple[np.ndarray, float]:
'''
Read WAV file and normalize to float array
'''
f_s, data = wav.read(filename)
if data.dtype == 'uint8':
data = data / 255 - 0.5
elif data.dtype == 'int16':
data = data / 32767
elif data.dtype == 'int32':
data = data / 2147483647
elif data.dtype == 'float32':
data = 1.0 * data
else:
eprint(f'Input error: data.dtype = {data.dtype}')
exit(1)
if data.ndim == 1:
# mono
pass
elif data.ndim == 2:
data = data[:, 0]
else:
eprint(f'Input error: data.ndim = {data.ndim}')
exit(1)
return data, f_s
def get_sofa(url : str) -> SOFAFile:
import requests
from tempfile import NamedTemporaryFile
if url.startswith(('http://', 'https://')):
r = requests.get(url)
r.raise_for_status()
with NamedTemporaryFile() as f:
f.write(r.content)
return SOFAFile(f.name, 'r')
elif url.startswith('file://'):
url = url[7:]
return SOFAFile(url, 'r')
def eprint(*args, **kwargs):
from sys import stderr
print(*args, file = stderr, **kwargs)
if __name__ == "__main__":
main()
| en | 0.775138 | #!/usr/bin/env python Outer ear simulator Author: <NAME> <<EMAIL>> Version: 1.0.0 Data: 2019-09-09 Apply effects of the head (HRTF) # find closest position to requested azimuth and elevation # TODO: consider normalizing position units to eg. degrees Apply effects of the ear canal Modeled as a bandpass filter, as in 'Matlab Auditory Periphery (MAP)' # 'Stopped pipe' resonator; resonating frequency # bandpass cut offsets somewhat chosen s.t. for the first mode, they coincide with the parameters from MAP # Hz # Hz Apply the effects of the middle ear Modelled soley as impedence mismatch and lever # kg m^-2 s^-1 # kg m^-2 s^-1 # mm^2 # mm^2 Read WAV file and normalize to float array # mono | 2.487645 | 2 |
main.py | ProfessorBeekums/mtg-deck-stats | 0 | 6621556 | import json
import random
import sys
import time
import deck_stats.deck as deck
def analyze(deck_json):
my_deck = deck.Deck(deck_json)
# don't show dozens/hundreds of hands with less than 1% chance of occuring
num_hands_to_print = 9000
total_runs = 10000
opening_hand_mana = {}
for step in range(0, total_runs):
if step < total_runs - 1:
print("Running simulation... [%d] out of [%d]\r" % (step, total_runs,) , end="")
else:
print("Running simulation... [%d] out of [%d]" % (total_runs, total_runs,))
# TODO do we want to clone? shuffle modifies original
cards = my_deck.cards
# TODO do we need true randomness? Does this match Magic Arena's algorithm for randomness?
# use shuffle instead of sample so we can see what next turns will look like
random.shuffle(cards)
opening_hand = cards[0:6]
mana_counts = {}
for card in opening_hand:
# count mana in opening hand
if isinstance(card, deck.LandCard):
mana_key = card.get_mana_key()
if mana_key not in mana_counts:
mana_counts[mana_key] = 0
mana_counts[mana_key] += 1
# now make an appropriate key based on the mana
opening_hand_mana_keys = []
for mana_count_key, count in sorted(mana_counts.items()):
# count = mana_counts[mana_count_key]
opening_hand_mana_keys.append(str(count) + ' ' + mana_count_key + ' lands')
opening_hand_mana_key = ', '.join(opening_hand_mana_keys)
if opening_hand_mana_key not in opening_hand_mana:
opening_hand_mana[opening_hand_mana_key] = 0
opening_hand_mana[opening_hand_mana_key] += 1
print("Simulation was completed!!!")
sorted_opening_hands = sorted(opening_hand_mana.items(), key=lambda kv: kv[1])
num_hands = 0
for soh_tuple in reversed(sorted_opening_hands):
key = soh_tuple[0]
count = soh_tuple[1]
if len(key) == 0:
key = ' no lands'
print(count, " hands with ", key)
num_hands += count
if num_hands >= num_hands_to_print:
break
if __name__ == "__main__":
full_file_path = sys.argv[1]
deck_json_file = open(full_file_path, 'r')
deck_json = deck_json_file.read()
deck_json = json.loads(deck_json)
analyze(deck_json) | import json
import random
import sys
import time
import deck_stats.deck as deck
def analyze(deck_json):
my_deck = deck.Deck(deck_json)
# don't show dozens/hundreds of hands with less than 1% chance of occuring
num_hands_to_print = 9000
total_runs = 10000
opening_hand_mana = {}
for step in range(0, total_runs):
if step < total_runs - 1:
print("Running simulation... [%d] out of [%d]\r" % (step, total_runs,) , end="")
else:
print("Running simulation... [%d] out of [%d]" % (total_runs, total_runs,))
# TODO do we want to clone? shuffle modifies original
cards = my_deck.cards
# TODO do we need true randomness? Does this match Magic Arena's algorithm for randomness?
# use shuffle instead of sample so we can see what next turns will look like
random.shuffle(cards)
opening_hand = cards[0:6]
mana_counts = {}
for card in opening_hand:
# count mana in opening hand
if isinstance(card, deck.LandCard):
mana_key = card.get_mana_key()
if mana_key not in mana_counts:
mana_counts[mana_key] = 0
mana_counts[mana_key] += 1
# now make an appropriate key based on the mana
opening_hand_mana_keys = []
for mana_count_key, count in sorted(mana_counts.items()):
# count = mana_counts[mana_count_key]
opening_hand_mana_keys.append(str(count) + ' ' + mana_count_key + ' lands')
opening_hand_mana_key = ', '.join(opening_hand_mana_keys)
if opening_hand_mana_key not in opening_hand_mana:
opening_hand_mana[opening_hand_mana_key] = 0
opening_hand_mana[opening_hand_mana_key] += 1
print("Simulation was completed!!!")
sorted_opening_hands = sorted(opening_hand_mana.items(), key=lambda kv: kv[1])
num_hands = 0
for soh_tuple in reversed(sorted_opening_hands):
key = soh_tuple[0]
count = soh_tuple[1]
if len(key) == 0:
key = ' no lands'
print(count, " hands with ", key)
num_hands += count
if num_hands >= num_hands_to_print:
break
if __name__ == "__main__":
full_file_path = sys.argv[1]
deck_json_file = open(full_file_path, 'r')
deck_json = deck_json_file.read()
deck_json = json.loads(deck_json)
analyze(deck_json) | en | 0.884228 | # don't show dozens/hundreds of hands with less than 1% chance of occuring # TODO do we want to clone? shuffle modifies original # TODO do we need true randomness? Does this match Magic Arena's algorithm for randomness? # use shuffle instead of sample so we can see what next turns will look like # count mana in opening hand # now make an appropriate key based on the mana # count = mana_counts[mana_count_key] | 3.299881 | 3 |
src/reminder/models.py | arnulfojr/sanic-persistance-patterns | 0 | 6621557 |
class MixinModel(dict):
__tablename__ = 'mixin_model'
@classmethod
def schema(cls):
raise NotImplemented
class Reminder(MixinModel):
"""Reminder object."""
__tablename__ = 'reminders'
@classmethod
def schema(cls):
return {
'TableName': cls.__tablename__,
'AttributeDefinitions': [
{
'AttributeName': 'id',
'AttributeType': 'S'
}
],
'KeySchema': [
{
'AttributeName': 'id',
'KeyType': 'HASH'
}
],
'ProvisionedThroughput': {
'ReadCapacityUnits': 10,
'WriteCapacityUnits': 10
}
}
|
class MixinModel(dict):
__tablename__ = 'mixin_model'
@classmethod
def schema(cls):
raise NotImplemented
class Reminder(MixinModel):
"""Reminder object."""
__tablename__ = 'reminders'
@classmethod
def schema(cls):
return {
'TableName': cls.__tablename__,
'AttributeDefinitions': [
{
'AttributeName': 'id',
'AttributeType': 'S'
}
],
'KeySchema': [
{
'AttributeName': 'id',
'KeyType': 'HASH'
}
],
'ProvisionedThroughput': {
'ReadCapacityUnits': 10,
'WriteCapacityUnits': 10
}
}
| en | 0.653751 | Reminder object. | 2.456501 | 2 |
memory_reader/stat_mappings.py | sparkie3/MF_run_counter | 43 | 6621558 | <filename>memory_reader/stat_mappings.py
import csv
from init import media_path
def load_stat_map():
with open(media_path + 'stat_map.csv', 'r') as fo:
out = {int(row['ID']): row for row in csv.DictReader(fo)}
return out
SKILLTABS = {
0: 'Bow Skills (Ama)',
1: 'PM Skills (Ama)',
2: 'Java Skills (Ama)',
8: 'Fire Skills (Sorc)',
9: 'Light Skills (Sorc)',
10: 'Cold Skills (Sorc)',
16: 'Curse Skills (Nec)',
17: 'PB Skills (Nec)',
18: 'Summon Skills (Nec)',
24: 'Combat Skills (Pala)',
25: 'Offensive Skills (Pala)',
26: 'Defensive Skills (Pala)',
32: 'Combat Skills (Barb)',
33: 'Mastery Skills (Barb)',
34: 'Warcry Skills (Barb)',
40: 'Summon Skills (Druid)',
41: 'Shapeshifting Skills (Druid)',
42: 'Ele Skills (Druid)',
48: 'Trap Skills (Assa)',
49: 'Shadow Skills (Assa)',
50: 'Martial Skills (Assa)',
}
CLASSSKILLS = {
0: 'Amazon Skills',
1: 'Sorceress Skills',
2: 'Necromancer Skills',
3: 'Paladin Skills',
4: 'Barbarian Skills',
5: 'Druid Skills',
6: 'Assassin Skills',
}
ELEMENTALSKILLS = {
0: 'ELEMENTAL_SKILLS_0',
1: 'Fire Skills',
2: 'ELEMENTAL_SKILLS_2',
3: 'ELEMENTAL_SKILLS_3',
}
SKILLS = {
0: 'Attack',
1: 'Kick',
2: 'Throw',
3: 'Unsummon',
4: 'Left Hand Throw',
5: 'Left Hand Swing',
6: 'Magic Arrow',
7: 'Fire Arrow',
8: 'Inner Sight',
9: 'Critical Strike',
10: 'Jab',
11: 'Cold Arrow',
12: 'Multiple Shot',
13: 'Dodge',
14: 'Power Strike',
15: 'Poison Javelin',
16: 'Exploding Arrow',
17: 'Slow Missiles',
18: 'Avoid',
19: 'Impale',
20: 'Lightning Bolt',
21: 'Ice Arrow',
22: 'Guided Arrow',
23: 'Penetrate',
24: 'Charged Strike',
25: 'Plague Javelin',
26: 'Strafe',
27: 'Immolation Arrow',
28: 'Dopplezon',
29: 'Evade',
30: 'Fend',
31: 'Freezing Arrow',
32: 'Valkyrie',
33: 'Pierce',
34: 'Lightning Strike',
35: 'Lightning Fury',
36: 'Fire Bolt',
37: 'Warmth',
38: 'Charged Bolt',
39: 'Ice Bolt',
40: 'Frozen Armor',
41: 'Inferno',
42: 'Static Field',
43: 'Telekinesis',
44: 'Frost Nova',
45: 'Ice Blast',
46: 'Blaze',
47: 'Fire Ball',
48: 'Nova',
49: 'Lightning',
50: 'Shiver Armor',
51: 'Fire Wall',
52: 'Enchant',
53: 'Chain Lightning',
54: 'Teleport',
55: 'Glacial Spike',
56: 'Meteor',
57: 'Thunder Storm',
58: 'Energy Shield',
59: 'Blizzard',
60: 'Chilling Armor',
61: 'Fire Mastery',
62: 'Hydra',
63: 'Lightning Mastery',
64: 'Frozen Orb',
65: 'Cold Mastery',
66: 'Amplify Damage',
67: 'Teeth',
68: 'Bone Armor',
69: 'Skeleton Mastery',
70: 'Raise Skeleton',
71: 'Dim Vision',
72: 'Weaken',
73: 'Poison Dagger',
74: 'Corpse Explosion',
75: 'Clay Golem',
76: 'Iron Maiden',
77: 'Terror',
78: 'Bone Wall',
79: 'Golem Mastery',
80: 'Raise Skeletal Mage',
81: 'Confuse',
82: 'Life Tap',
83: 'Poison Explosion',
84: 'Bone Spear',
85: 'Blood Golem',
86: 'Attract',
87: 'Decrepify',
88: 'Bone Prison',
89: 'Summon Resist',
90: 'Iron Golem',
91: 'Lower Resist',
92: 'Poison Nova',
93: 'Bone Spirit',
94: 'Fire Golem',
95: 'Revive',
96: 'Sacrifice',
97: 'Smite',
98: 'Might',
99: 'Prayer',
100: 'Resist Fire',
101: 'Holy Bolt',
102: 'Holy Fire',
103: 'Thorns',
104: 'Defiance',
105: 'Resist Cold',
106: 'Zeal',
107: 'Charge',
108: 'Blessed Aim',
109: 'Cleansing',
110: 'Resist Lightning',
111: 'Vengeance',
112: 'Blessed Hammer',
113: 'Concentration',
114: 'Holy Freeze',
115: 'Vigor',
116: 'Conversion',
117: 'Holy Shield',
118: 'Holy Shock',
119: 'Sanctuary',
120: 'Meditation',
121: 'Fist of the Heavens',
122: 'Fanaticism',
123: 'Conviction',
124: 'Redemption',
125: 'Salvation',
126: 'Bash',
127: 'Sword Mastery',
128: 'Axe Mastery',
129: 'Mace Mastery',
130: 'Howl',
131: 'Find Potion',
132: 'Leap',
133: 'Double Swing',
134: 'Pole Arm Mastery',
135: 'Throwing Mastery',
136: 'Spear Mastery',
137: 'Taunt',
138: 'Shout',
139: 'Stun',
140: 'Double Throw',
141: 'Increased Stamina',
142: 'Find Item',
143: 'Leap Attack',
144: 'Concentrate',
145: 'Iron Skin',
146: 'Battle Cry',
147: 'Frenzy',
148: 'Increased Speed',
149: 'Battle Orders',
150: 'Grim Ward',
151: 'Whirlwind',
152: 'Berserk',
153: 'Natural Resistance',
154: 'War Cry',
155: 'Battle Command',
156: 'Fire Hit',
157: 'UnHolyBolt',
158: 'SkeletonRaise',
159: 'MaggotEgg',
160: 'ShamanFire',
161: 'MagottUp',
162: 'MagottDown',
163: 'MagottLay',
164: 'AndrialSpray',
165: 'Jump',
166: 'Swarm Move',
167: 'Nest',
168: 'Quick Strike',
169: 'VampireFireball',
170: 'VampireFirewall',
171: 'VampireMeteor',
172: 'GargoyleTrap',
173: 'SpiderLay',
174: 'VampireHeal',
175: 'VampireRaise',
176: 'Submerge',
177: 'FetishAura',
178: 'FetishInferno',
179: 'ZakarumHeal',
180: 'Emerge',
181: 'Resurrect',
182: 'Bestow',
183: 'MissileSkill1',
184: 'MonTeleport',
185: 'PrimeLightning',
186: 'PrimeBolt',
187: 'PrimeBlaze',
188: 'PrimeFirewall',
189: 'PrimeSpike',
190: 'PrimeIceNova',
191: 'PrimePoisonball',
192: 'PrimePoisonNova',
193: 'DiabLight',
194: 'DiabCold',
195: 'DiabFire',
196: 'FingerMageSpider',
197: 'DiabWall',
198: 'DiabRun',
199: 'DiabPrison',
200: 'PoisonBallTrap',
201: 'AndyPoisonBolt',
202: 'HireableMissile',
203: 'DesertTurret',
204: 'ArcaneTower',
205: 'MonBlizzard',
206: 'Mosquito',
207: 'CursedBallTrapRight',
208: 'CursedBallTrapLeft',
209: 'MonFrozenArmor',
210: 'MonBoneArmor',
211: 'MonBoneSpirit',
212: 'MonCurseCast',
213: 'HellMeteor',
214: 'RegurgitatorEat',
215: 'MonFrenzy',
216: 'QueenDeath',
217: 'Scroll of Identify',
218: 'Book of Identify',
219: 'Scroll of Townportal',
220: 'Book of Townportal',
221: 'Raven',
222: 'Plague Poppy',
223: 'Wearwolf',
224: 'Shape Shifting',
225: 'Firestorm',
226: 'Oak Sage',
227: 'Summon Spirit Wolf',
228: 'Wearbear',
229: 'Molten Boulder',
230: 'Arctic Blast',
231: 'Cycle of Life',
232: 'Feral Rage',
233: 'Maul',
234: 'Eruption',
235: 'Cyclone Armor',
236: 'Heart of Wolverine',
237: 'Summon Fenris',
238: 'Rabies',
239: 'Fire Claws',
240: 'Twister',
241: 'Vines',
242: 'Hunger',
243: 'Shock Wave',
244: 'Volcano',
245: 'Tornado',
246: 'Spirit of Barbs',
247: 'Summon Grizzly',
248: 'Fury',
249: 'Armageddon',
250: 'Hurricane',
251: 'Fire Trauma',
252: 'Claw Mastery',
253: 'Psychic Hammer',
254: 'Tiger Strike',
255: 'Dragon Talon',
256: 'Shock Field',
257: 'Blade Sentinel',
258: 'Quickness',
259: 'Fists of Fire',
260: 'Dragon Claw',
261: 'Charged Bolt Sentry',
262: 'Wake of Fire Sentry',
263: 'Weapon Block',
264: 'Cloak of Shadows',
265: 'Cobra Strike',
266: 'Blade Fury',
267: 'Fade',
268: 'Shadow Warrior',
269: 'Claws of Thunder',
270: 'Dragon Tail',
271: 'Lightning Sentry',
272: 'Inferno Sentry',
273: 'Mind Blast',
274: 'Blades of Ice',
275: 'Dragon Flight',
276: 'Death Sentry',
277: 'Blade Shield',
278: 'Venom',
279: 'Shadow Master',
280: 'Royal Strike',
281: 'Wake Of Destruction Sentry',
282: 'Imp Inferno',
283: 'Imp Fireball',
284: 'Baal Taunt',
285: 'Baal Corpse Explode',
286: 'Baal Monster Spawn',
287: 'Catapult Charged Ball',
288: 'Catapult Spike Ball',
289: 'Suck Blood',
290: 'Cry Help',
291: 'Healing Vortex',
292: 'Teleport 2',
293: 'Self-resurrect',
294: 'Vine Attack',
295: 'Overseer Whip',
296: 'Barbs Aura',
297: 'Wolverine Aura',
298: 'Oak Sage Aura',
299: 'Imp Fire Missile',
300: 'Impregnate',
301: 'Siege Beast Stomp',
302: 'MinionSpawner',
303: 'CatapultBlizzard',
304: 'CatapultPlague',
305: 'CatapultMeteor',
306: 'BoltSentry',
307: 'CorpseCycler',
308: 'DeathMaul',
309: 'Defense Curse',
310: 'Blood Mana',
311: 'mon inferno sentry',
312: 'mon death sentry',
313: 'sentry lightning',
314: 'fenris rage',
315: 'Baal Tentacle',
316: 'Baal Nova',
317: 'Baal Inferno',
318: 'Baal Cold Missiles',
319: 'MegademonInferno',
320: 'EvilHutSpawner',
321: 'CountessFirewall',
322: 'ImpBolt',
323: 'Horror Arctic Blast',
324: 'death sentry ltng',
325: 'VineCycler',
326: 'BearSmite',
327: 'Resurrect2',
328: 'BloodLordFrenzy',
329: 'Baal Teleport',
330: 'Imp Teleport',
331: 'Baal Clone Teleport',
332: 'ZakarumLightning',
333: 'VampireMissile',
334: 'MephistoMissile',
335: 'DoomKnightMissile',
336: 'RogueMissile',
337: 'HydraMissile',
338: 'NecromageMissile',
339: 'MonBow',
340: 'MonFireArrow',
341: 'MonColdArrow',
342: 'MonExplodingArrow',
343: 'MonFreezingArrow',
344: 'MonPowerStrike',
345: 'SuccubusBolt',
346: 'MephFrostNova',
347: 'MonIceSpear',
348: 'ShamanIce',
349: 'Diablogeddon',
350: 'Delerium Change',
351: 'NihlathakCorpseExplosion',
352: 'SerpentCharge',
353: 'Trap Nova',
354: 'UnHolyBoltEx',
355: 'ShamanFireEx',
356: 'Imp Fire Missile Ex'
}
STAT_MAP = load_stat_map()
| <filename>memory_reader/stat_mappings.py
import csv
from init import media_path
def load_stat_map():
with open(media_path + 'stat_map.csv', 'r') as fo:
out = {int(row['ID']): row for row in csv.DictReader(fo)}
return out
SKILLTABS = {
0: 'Bow Skills (Ama)',
1: 'PM Skills (Ama)',
2: 'Java Skills (Ama)',
8: 'Fire Skills (Sorc)',
9: 'Light Skills (Sorc)',
10: 'Cold Skills (Sorc)',
16: 'Curse Skills (Nec)',
17: 'PB Skills (Nec)',
18: 'Summon Skills (Nec)',
24: 'Combat Skills (Pala)',
25: 'Offensive Skills (Pala)',
26: 'Defensive Skills (Pala)',
32: 'Combat Skills (Barb)',
33: 'Mastery Skills (Barb)',
34: 'Warcry Skills (Barb)',
40: 'Summon Skills (Druid)',
41: 'Shapeshifting Skills (Druid)',
42: 'Ele Skills (Druid)',
48: 'Trap Skills (Assa)',
49: 'Shadow Skills (Assa)',
50: 'Martial Skills (Assa)',
}
CLASSSKILLS = {
0: 'Amazon Skills',
1: 'Sorceress Skills',
2: 'Necromancer Skills',
3: 'Paladin Skills',
4: 'Barbarian Skills',
5: 'Druid Skills',
6: 'Assassin Skills',
}
ELEMENTALSKILLS = {
0: 'ELEMENTAL_SKILLS_0',
1: 'Fire Skills',
2: 'ELEMENTAL_SKILLS_2',
3: 'ELEMENTAL_SKILLS_3',
}
SKILLS = {
0: 'Attack',
1: 'Kick',
2: 'Throw',
3: 'Unsummon',
4: 'Left Hand Throw',
5: 'Left Hand Swing',
6: 'Magic Arrow',
7: 'Fire Arrow',
8: 'Inner Sight',
9: 'Critical Strike',
10: 'Jab',
11: 'Cold Arrow',
12: 'Multiple Shot',
13: 'Dodge',
14: 'Power Strike',
15: 'Poison Javelin',
16: 'Exploding Arrow',
17: 'Slow Missiles',
18: 'Avoid',
19: 'Impale',
20: 'Lightning Bolt',
21: 'Ice Arrow',
22: 'Guided Arrow',
23: 'Penetrate',
24: 'Charged Strike',
25: 'Plague Javelin',
26: 'Strafe',
27: 'Immolation Arrow',
28: 'Dopplezon',
29: 'Evade',
30: 'Fend',
31: 'Freezing Arrow',
32: 'Valkyrie',
33: 'Pierce',
34: 'Lightning Strike',
35: 'Lightning Fury',
36: 'Fire Bolt',
37: 'Warmth',
38: 'Charged Bolt',
39: 'Ice Bolt',
40: 'Frozen Armor',
41: 'Inferno',
42: 'Static Field',
43: 'Telekinesis',
44: 'Frost Nova',
45: 'Ice Blast',
46: 'Blaze',
47: 'Fire Ball',
48: 'Nova',
49: 'Lightning',
50: 'Shiver Armor',
51: 'Fire Wall',
52: 'Enchant',
53: 'Chain Lightning',
54: 'Teleport',
55: 'Glacial Spike',
56: 'Meteor',
57: 'Thunder Storm',
58: 'Energy Shield',
59: 'Blizzard',
60: 'Chilling Armor',
61: 'Fire Mastery',
62: 'Hydra',
63: 'Lightning Mastery',
64: 'Frozen Orb',
65: 'Cold Mastery',
66: 'Amplify Damage',
67: 'Teeth',
68: 'Bone Armor',
69: 'Skeleton Mastery',
70: 'Raise Skeleton',
71: 'Dim Vision',
72: 'Weaken',
73: 'Poison Dagger',
74: 'Corpse Explosion',
75: 'Clay Golem',
76: 'Iron Maiden',
77: 'Terror',
78: 'Bone Wall',
79: 'Golem Mastery',
80: 'Raise Skeletal Mage',
81: 'Confuse',
82: 'Life Tap',
83: 'Poison Explosion',
84: 'Bone Spear',
85: 'Blood Golem',
86: 'Attract',
87: 'Decrepify',
88: 'Bone Prison',
89: 'Summon Resist',
90: 'Iron Golem',
91: 'Lower Resist',
92: 'Poison Nova',
93: 'Bone Spirit',
94: 'Fire Golem',
95: 'Revive',
96: 'Sacrifice',
97: 'Smite',
98: 'Might',
99: 'Prayer',
100: 'Resist Fire',
101: 'Holy Bolt',
102: 'Holy Fire',
103: 'Thorns',
104: 'Defiance',
105: 'Resist Cold',
106: 'Zeal',
107: 'Charge',
108: 'Blessed Aim',
109: 'Cleansing',
110: 'Resist Lightning',
111: 'Vengeance',
112: 'Blessed Hammer',
113: 'Concentration',
114: 'Holy Freeze',
115: 'Vigor',
116: 'Conversion',
117: 'Holy Shield',
118: 'Holy Shock',
119: 'Sanctuary',
120: 'Meditation',
121: 'Fist of the Heavens',
122: 'Fanaticism',
123: 'Conviction',
124: 'Redemption',
125: 'Salvation',
126: 'Bash',
127: 'Sword Mastery',
128: 'Axe Mastery',
129: 'Mace Mastery',
130: 'Howl',
131: 'Find Potion',
132: 'Leap',
133: 'Double Swing',
134: 'Pole Arm Mastery',
135: 'Throwing Mastery',
136: 'Spear Mastery',
137: 'Taunt',
138: 'Shout',
139: 'Stun',
140: 'Double Throw',
141: 'Increased Stamina',
142: 'Find Item',
143: 'Leap Attack',
144: 'Concentrate',
145: 'Iron Skin',
146: 'Battle Cry',
147: 'Frenzy',
148: 'Increased Speed',
149: 'Battle Orders',
150: 'Grim Ward',
151: 'Whirlwind',
152: 'Berserk',
153: 'Natural Resistance',
154: 'War Cry',
155: 'Battle Command',
156: 'Fire Hit',
157: 'UnHolyBolt',
158: 'SkeletonRaise',
159: 'MaggotEgg',
160: 'ShamanFire',
161: 'MagottUp',
162: 'MagottDown',
163: 'MagottLay',
164: 'AndrialSpray',
165: 'Jump',
166: 'Swarm Move',
167: 'Nest',
168: 'Quick Strike',
169: 'VampireFireball',
170: 'VampireFirewall',
171: 'VampireMeteor',
172: 'GargoyleTrap',
173: 'SpiderLay',
174: 'VampireHeal',
175: 'VampireRaise',
176: 'Submerge',
177: 'FetishAura',
178: 'FetishInferno',
179: 'ZakarumHeal',
180: 'Emerge',
181: 'Resurrect',
182: 'Bestow',
183: 'MissileSkill1',
184: 'MonTeleport',
185: 'PrimeLightning',
186: 'PrimeBolt',
187: 'PrimeBlaze',
188: 'PrimeFirewall',
189: 'PrimeSpike',
190: 'PrimeIceNova',
191: 'PrimePoisonball',
192: 'PrimePoisonNova',
193: 'DiabLight',
194: 'DiabCold',
195: 'DiabFire',
196: 'FingerMageSpider',
197: 'DiabWall',
198: 'DiabRun',
199: 'DiabPrison',
200: 'PoisonBallTrap',
201: 'AndyPoisonBolt',
202: 'HireableMissile',
203: 'DesertTurret',
204: 'ArcaneTower',
205: 'MonBlizzard',
206: 'Mosquito',
207: 'CursedBallTrapRight',
208: 'CursedBallTrapLeft',
209: 'MonFrozenArmor',
210: 'MonBoneArmor',
211: 'MonBoneSpirit',
212: 'MonCurseCast',
213: 'HellMeteor',
214: 'RegurgitatorEat',
215: 'MonFrenzy',
216: 'QueenDeath',
217: 'Scroll of Identify',
218: 'Book of Identify',
219: 'Scroll of Townportal',
220: 'Book of Townportal',
221: 'Raven',
222: 'Plague Poppy',
223: 'Wearwolf',
224: 'Shape Shifting',
225: 'Firestorm',
226: 'Oak Sage',
227: 'Summon Spirit Wolf',
228: 'Wearbear',
229: 'Molten Boulder',
230: 'Arctic Blast',
231: 'Cycle of Life',
232: 'Feral Rage',
233: 'Maul',
234: 'Eruption',
235: 'Cyclone Armor',
236: 'Heart of Wolverine',
237: 'Summon Fenris',
238: 'Rabies',
239: 'Fire Claws',
240: 'Twister',
241: 'Vines',
242: 'Hunger',
243: 'Shock Wave',
244: 'Volcano',
245: 'Tornado',
246: 'Spirit of Barbs',
247: 'Summon Grizzly',
248: 'Fury',
249: 'Armageddon',
250: 'Hurricane',
251: 'Fire Trauma',
252: 'Claw Mastery',
253: 'Psychic Hammer',
254: 'Tiger Strike',
255: 'Dragon Talon',
256: 'Shock Field',
257: 'Blade Sentinel',
258: 'Quickness',
259: 'Fists of Fire',
260: 'Dragon Claw',
261: 'Charged Bolt Sentry',
262: 'Wake of Fire Sentry',
263: 'Weapon Block',
264: 'Cloak of Shadows',
265: 'Cobra Strike',
266: 'Blade Fury',
267: 'Fade',
268: 'Shadow Warrior',
269: 'Claws of Thunder',
270: 'Dragon Tail',
271: 'Lightning Sentry',
272: 'Inferno Sentry',
273: 'Mind Blast',
274: 'Blades of Ice',
275: 'Dragon Flight',
276: 'Death Sentry',
277: 'Blade Shield',
278: 'Venom',
279: 'Shadow Master',
280: 'Royal Strike',
281: 'Wake Of Destruction Sentry',
282: 'Imp Inferno',
283: 'Imp Fireball',
284: 'Baal Taunt',
285: 'Baal Corpse Explode',
286: 'Baal Monster Spawn',
287: 'Catapult Charged Ball',
288: 'Catapult Spike Ball',
289: 'Suck Blood',
290: 'Cry Help',
291: 'Healing Vortex',
292: 'Teleport 2',
293: 'Self-resurrect',
294: 'Vine Attack',
295: 'Overseer Whip',
296: 'Barbs Aura',
297: 'Wolverine Aura',
298: 'Oak Sage Aura',
299: 'Imp Fire Missile',
300: 'Impregnate',
301: 'Siege Beast Stomp',
302: 'MinionSpawner',
303: 'CatapultBlizzard',
304: 'CatapultPlague',
305: 'CatapultMeteor',
306: 'BoltSentry',
307: 'CorpseCycler',
308: 'DeathMaul',
309: 'Defense Curse',
310: 'Blood Mana',
311: 'mon inferno sentry',
312: 'mon death sentry',
313: 'sentry lightning',
314: 'fenris rage',
315: 'Baal Tentacle',
316: 'Baal Nova',
317: 'Baal Inferno',
318: 'Baal Cold Missiles',
319: 'MegademonInferno',
320: 'EvilHutSpawner',
321: 'CountessFirewall',
322: 'ImpBolt',
323: 'Horror Arctic Blast',
324: 'death sentry ltng',
325: 'VineCycler',
326: 'BearSmite',
327: 'Resurrect2',
328: 'BloodLordFrenzy',
329: 'Baal Teleport',
330: 'Imp Teleport',
331: 'Baal Clone Teleport',
332: 'ZakarumLightning',
333: 'VampireMissile',
334: 'MephistoMissile',
335: 'DoomKnightMissile',
336: 'RogueMissile',
337: 'HydraMissile',
338: 'NecromageMissile',
339: 'MonBow',
340: 'MonFireArrow',
341: 'MonColdArrow',
342: 'MonExplodingArrow',
343: 'MonFreezingArrow',
344: 'MonPowerStrike',
345: 'SuccubusBolt',
346: 'MephFrostNova',
347: 'MonIceSpear',
348: 'ShamanIce',
349: 'Diablogeddon',
350: 'Delerium Change',
351: 'NihlathakCorpseExplosion',
352: 'SerpentCharge',
353: 'Trap Nova',
354: 'UnHolyBoltEx',
355: 'ShamanFireEx',
356: 'Imp Fire Missile Ex'
}
STAT_MAP = load_stat_map()
| none | 1 | 3.230723 | 3 | |
server/tank.py | jacobrec/little-tanks | 0 | 6621559 | <reponame>jacobrec/little-tanks
import json
class Tank:
def __init__(self, conn, pos, angle):
self.conn = conn
self.pos = pos
self.angle = angle
def send_update(self):
self.conn.write_message(self)
def __str__(self):
return json.dumps({
"pos": self.pos,
"angle": self.angle
})
| import json
class Tank:
def __init__(self, conn, pos, angle):
self.conn = conn
self.pos = pos
self.angle = angle
def send_update(self):
self.conn.write_message(self)
def __str__(self):
return json.dumps({
"pos": self.pos,
"angle": self.angle
}) | none | 1 | 2.952393 | 3 | |
prcdns/__init__.py | Kiterepo/prc-dns | 52 | 6621560 | from . import index, white_domain
| from . import index, white_domain
| none | 1 | 1.038747 | 1 | |
history_generator/plan.py | ReedOei/History-Generator | 19 | 6621561 | <gh_stars>10-100
class Plan:
def __init__(self, parent, nation):
self.parent = parent
self.nation = nation
def build_plan(self):
return
| class Plan:
def __init__(self, parent, nation):
self.parent = parent
self.nation = nation
def build_plan(self):
return | none | 1 | 2.648461 | 3 | |
lcm/workflows/graphflow/task/lcm_sync_rest_task.py | onap/vfc-nfvo-lcm | 4 | 6621562 | # Copyright 2018 ZTE Corporation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import json
from lcm.workflows.graphflow.task.sync_rest_task import SyncRestTask
from lcm.pub.utils import restcall
logger = logging.getLogger(__name__)
class LcmSyncRestTask(SyncRestTask):
def call_rest(self, url, method, content):
ret = restcall.req_by_msb(url, method, content)
logger.debug("call_rest result %s" % ret)
return ret[2], json.JSONDecoder().decode(ret[1])
| # Copyright 2018 ZTE Corporation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import json
from lcm.workflows.graphflow.task.sync_rest_task import SyncRestTask
from lcm.pub.utils import restcall
logger = logging.getLogger(__name__)
class LcmSyncRestTask(SyncRestTask):
def call_rest(self, url, method, content):
ret = restcall.req_by_msb(url, method, content)
logger.debug("call_rest result %s" % ret)
return ret[2], json.JSONDecoder().decode(ret[1])
| en | 0.857076 | # Copyright 2018 ZTE Corporation. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. | 1.851094 | 2 |
mytest004.py | ShuailingZhao/mccnn | 0 | 6621563 | <reponame>ShuailingZhao/mccnn<gh_stars>0
#!/usr/bin/python
def printme( str ):
print str;
return;
| #!/usr/bin/python
def printme( str ):
print str;
return; | ru | 0.258958 | #!/usr/bin/python | 1.720785 | 2 |
Level25.py | z-Wind/Python_Challenge | 0 | 6621564 | <gh_stars>0
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""http://www.pythonchallenge.com/pc/hex/lake.html"""
__author__ = "子風"
__copyright__ = "Copyright 2015, Sun All rights reserved"
__version__ = "1.0.0"
import get_challenge
import wave
wavs = [wave.open(get_challenge.download('butter', 'fly', 'http://www.pythonchallenge.com/pc/hex/lake%d.wav' % i)) for i in range(1, 26)]
| #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""http://www.pythonchallenge.com/pc/hex/lake.html"""
__author__ = "子風"
__copyright__ = "Copyright 2015, Sun All rights reserved"
__version__ = "1.0.0"
import get_challenge
import wave
wavs = [wave.open(get_challenge.download('butter', 'fly', 'http://www.pythonchallenge.com/pc/hex/lake%d.wav' % i)) for i in range(1, 26)] | en | 0.309463 | #!/usr/bin/env python # -*- coding: utf-8 -*- http://www.pythonchallenge.com/pc/hex/lake.html | 2.568923 | 3 |
cart/views.py | saptarsi96/FreshExpress | 0 | 6621565 | <filename>cart/views.py
from django.http.response import HttpResponse
from django.shortcuts import render, get_object_or_404, redirect
from cart.cart import Cart
from store.models import Product
from django.contrib import messages
from django.contrib.auth.decorators import login_required
from django.views.decorators.http import require_POST
from cart.forms import CartForm
# Create your views here.
def displayitems(request):
result = Product.objects.all()
print(result)
return render(request, 'index5.html', {"items": result})
def order1(request):
return HttpResponse("you are too good")
def order(request):
orderlist = {}
items1 = ['1kg arhard daal', 'Surf-Excel', 'Ariel',
'Toothpaste', 'Mouth-wash', 'Axe perfume', '5 Kg flour']
items2 = ['2kg arhard daal', 'Surf-Excel', 'Ariel',
'Toothpaste', 'Mouth-wash', 'Axe perfume', '5 Kg flour']
items3 = ['3kg arhard daal', 'Surf-Excel', 'Ariel',
'Toothpaste', 'Mouth-wash', 'Axe perfume', '5 Kg flour']
items4 = ['4kg arhard daal', 'Surf-Excel', 'Ariel',
'Toothpaste', 'Mouth-wash', 'Axe perfume', '5 Kg flour']
items5 = ['5kg arhard daal', 'Ariel', 'Toothpaste',
'Mouth-wash', 'Axe perfume', '5 Kg flour']
orderlist["first"] = items1
orderlist["second"] = items2
orderlist["third"] = items3
orderlist["Fourth"] = items4
orderlist["fifth"] = items5
context = {'li': orderlist}
return render(request, 'index4.html', context)
@login_required
@require_POST
def add_to_cart(request):
cart = Cart(request)
form = CartForm(request.POST)
if form.is_valid():
product_id = form.cleaned_data['product_id']
quantity = form.cleaned_data['quantity']
product = get_object_or_404(Product, id=product_id, availibility=True)
cart.add(product_id, product.price, quantity)
messages.success(request, f'{product.name} added to cart.')
return redirect('cart:cart_details')
@login_required
def cart_details(request):
cart = Cart(request)
products = Product.objects.filter(pk__in=cart.cart.keys())
productkeys = list(cart.cart.keys())
productlist = ' '.join(map(str, productkeys))
def map_function(p):
pid = str(p.id)
q = cart.cart[pid]['quantity']
return {'product': p, 'quantity': q, 'total': p.price*q, 'form': CartForm(initial={'quantity': q, 'product_id': pid})}
cart_items = map(map_function, products)
return render(request, 'cart/cart_details.html', {'cart_items': cart_items, 'total': cart.get_total_price(), 'productlist': productlist})
@login_required
def remove_from_cart(request, id):
cart = Cart(request)
cart.remove(str(id))
return redirect('cart:cart_details')
@login_required
def clear_cart(request):
cart = Cart(request)
cart.clear()
return redirect('cart:cart_details')
| <filename>cart/views.py
from django.http.response import HttpResponse
from django.shortcuts import render, get_object_or_404, redirect
from cart.cart import Cart
from store.models import Product
from django.contrib import messages
from django.contrib.auth.decorators import login_required
from django.views.decorators.http import require_POST
from cart.forms import CartForm
# Create your views here.
def displayitems(request):
result = Product.objects.all()
print(result)
return render(request, 'index5.html', {"items": result})
def order1(request):
return HttpResponse("you are too good")
def order(request):
orderlist = {}
items1 = ['1kg arhard daal', 'Surf-Excel', 'Ariel',
'Toothpaste', 'Mouth-wash', 'Axe perfume', '5 Kg flour']
items2 = ['2kg arhard daal', 'Surf-Excel', 'Ariel',
'Toothpaste', 'Mouth-wash', 'Axe perfume', '5 Kg flour']
items3 = ['3kg arhard daal', 'Surf-Excel', 'Ariel',
'Toothpaste', 'Mouth-wash', 'Axe perfume', '5 Kg flour']
items4 = ['4kg arhard daal', 'Surf-Excel', 'Ariel',
'Toothpaste', 'Mouth-wash', 'Axe perfume', '5 Kg flour']
items5 = ['5kg arhard daal', 'Ariel', 'Toothpaste',
'Mouth-wash', 'Axe perfume', '5 Kg flour']
orderlist["first"] = items1
orderlist["second"] = items2
orderlist["third"] = items3
orderlist["Fourth"] = items4
orderlist["fifth"] = items5
context = {'li': orderlist}
return render(request, 'index4.html', context)
@login_required
@require_POST
def add_to_cart(request):
cart = Cart(request)
form = CartForm(request.POST)
if form.is_valid():
product_id = form.cleaned_data['product_id']
quantity = form.cleaned_data['quantity']
product = get_object_or_404(Product, id=product_id, availibility=True)
cart.add(product_id, product.price, quantity)
messages.success(request, f'{product.name} added to cart.')
return redirect('cart:cart_details')
@login_required
def cart_details(request):
cart = Cart(request)
products = Product.objects.filter(pk__in=cart.cart.keys())
productkeys = list(cart.cart.keys())
productlist = ' '.join(map(str, productkeys))
def map_function(p):
pid = str(p.id)
q = cart.cart[pid]['quantity']
return {'product': p, 'quantity': q, 'total': p.price*q, 'form': CartForm(initial={'quantity': q, 'product_id': pid})}
cart_items = map(map_function, products)
return render(request, 'cart/cart_details.html', {'cart_items': cart_items, 'total': cart.get_total_price(), 'productlist': productlist})
@login_required
def remove_from_cart(request, id):
cart = Cart(request)
cart.remove(str(id))
return redirect('cart:cart_details')
@login_required
def clear_cart(request):
cart = Cart(request)
cart.clear()
return redirect('cart:cart_details')
| en | 0.968116 | # Create your views here. | 2.118001 | 2 |
kubuculum/statistics/stats_splitter/stats_splitter.py | manojtpillai/kubuculum | 3 | 6621566 |
import logging
import os
import kubuculum.statistics.util_functions
from kubuculum import util_functions
logger = logging.getLogger (__name__)
class stats_splitter:
def __init__ (self, run_dir, params_dict, globals):
# get directory pathname for module
self.dirpath = os.path.dirname (os.path.abspath (__file__))
# update params
labels_path = ['statistics', 'stats_splitter']
self.params = util_functions.get_modparams (params_dict, labels_path)
self.modhandles = []
for stats_dict in self.params['module_list']:
# stats_dict is of the form: stats_module: {dict_of_params}
(stats_module, stats_module_params) = \
list (stats_dict.items())[0]
handle = kubuculum.statistics.util_functions.create_object \
(stats_module, run_dir, params_dict, globals)
handle.update_params (stats_module_params)
self.modhandles.append (handle)
logger.debug (f"statistics enabled: {self.params['module_list']}")
def start (self):
for handle in self.modhandles:
handle.start ()
def gather (self, tag=""):
for handle in self.modhandles:
handle.gather (tag)
def stop (self):
for handle in self.modhandles:
handle.stop ()
|
import logging
import os
import kubuculum.statistics.util_functions
from kubuculum import util_functions
logger = logging.getLogger (__name__)
class stats_splitter:
def __init__ (self, run_dir, params_dict, globals):
# get directory pathname for module
self.dirpath = os.path.dirname (os.path.abspath (__file__))
# update params
labels_path = ['statistics', 'stats_splitter']
self.params = util_functions.get_modparams (params_dict, labels_path)
self.modhandles = []
for stats_dict in self.params['module_list']:
# stats_dict is of the form: stats_module: {dict_of_params}
(stats_module, stats_module_params) = \
list (stats_dict.items())[0]
handle = kubuculum.statistics.util_functions.create_object \
(stats_module, run_dir, params_dict, globals)
handle.update_params (stats_module_params)
self.modhandles.append (handle)
logger.debug (f"statistics enabled: {self.params['module_list']}")
def start (self):
for handle in self.modhandles:
handle.start ()
def gather (self, tag=""):
for handle in self.modhandles:
handle.gather (tag)
def stop (self):
for handle in self.modhandles:
handle.stop ()
| en | 0.272233 | # get directory pathname for module # update params # stats_dict is of the form: stats_module: {dict_of_params} | 2.297344 | 2 |
fem/base_app/gui/main_window/base_beta_menu.py | mjredmond/FEMApp | 1 | 6621567 | <reponame>mjredmond/FEMApp
from __future__ import print_function, absolute_import
import sys
import os.path
from qtpy import QtGui, QtCore, QtWidgets
from fem.base_app.configuration import BaseConfiguration
from fem.utilities import BaseObject
class BaseBetaMenu(BaseObject):
BaseConfiguration = BaseConfiguration
def __init__(self, main_window):
self.main_window = main_window
self.menu_bar = self.main_window.menuBar()
""":type: QtWidgets.QMenuBar"""
self.config = self.BaseConfiguration.instance()
self.beta_file = self.config.beta_file()
try:
self.beta_available = os.path.isfile(self.beta_file) and sys.executable != self.beta_file
except TypeError:
self.beta_available = False
if self.beta_available:
self.beta_menu = self.menu_bar.addMenu("&Check Beta Release")
self.beta_version = self.beta_menu.addAction("Beta Release Available!")
self.beta_version.triggered.connect(self._beta_version)
def _beta_version(self, *args):
if not self.beta_available:
return
import subprocess
p = subprocess.Popen([self.beta_file], cwd=os.path.dirname(self.beta_file))
p.wait()
@classmethod
def copy_cls(cls):
class _Tmp(cls):
pass
_Tmp.__name__ = cls.__name__
return _Tmp
| from __future__ import print_function, absolute_import
import sys
import os.path
from qtpy import QtGui, QtCore, QtWidgets
from fem.base_app.configuration import BaseConfiguration
from fem.utilities import BaseObject
class BaseBetaMenu(BaseObject):
BaseConfiguration = BaseConfiguration
def __init__(self, main_window):
self.main_window = main_window
self.menu_bar = self.main_window.menuBar()
""":type: QtWidgets.QMenuBar"""
self.config = self.BaseConfiguration.instance()
self.beta_file = self.config.beta_file()
try:
self.beta_available = os.path.isfile(self.beta_file) and sys.executable != self.beta_file
except TypeError:
self.beta_available = False
if self.beta_available:
self.beta_menu = self.menu_bar.addMenu("&Check Beta Release")
self.beta_version = self.beta_menu.addAction("Beta Release Available!")
self.beta_version.triggered.connect(self._beta_version)
def _beta_version(self, *args):
if not self.beta_available:
return
import subprocess
p = subprocess.Popen([self.beta_file], cwd=os.path.dirname(self.beta_file))
p.wait()
@classmethod
def copy_cls(cls):
class _Tmp(cls):
pass
_Tmp.__name__ = cls.__name__
return _Tmp | en | 0.202157 | :type: QtWidgets.QMenuBar | 2.169497 | 2 |
Tools/Scenarios/strip_code_tex.py | ErQing/Nova | 212 | 6621568 | #!/usr/bin/env python3
import re
from luaparser import astnodes
from nova_script_parser import (get_node_name, normalize_dialogue,
parse_chapters, walk_functions)
in_filename = 'scenario.txt'
out_filename = 'scenario_no_code.tex'
translate_data = [
('room', '房间'),
]
translate_data = sorted(translate_data, key=lambda x: len(x[0]), reverse=True)
def camel_to_snake(s):
s = re.compile('(.)([A-Z][a-z]+)').sub(r'\1_\2', s)
s = re.compile('([a-z0-9])([A-Z])').sub(r'\1_\2', s)
s = s.lower()
return s
def translate(s):
s = camel_to_snake(s)
for x, y in translate_data:
s = s.replace(x, y)
s = s.replace('_', '')
assert not any('A' <= c <= 'Z' or 'a' <= c <= 'z' for c in s), s
return s
def parse_code(code, f):
bg_name = None
bgm_name = None
for func_name, args, _ in walk_functions(code):
if (func_name in [
'show', 'trans', 'trans2', 'trans_fade', 'trans_left',
'trans_right', 'trans_up', 'trans_down'
] and args and get_node_name(args[0]) == 'bg'
and isinstance(args[1], astnodes.String)
and not args[1].s.startswith('chapter')):
bg_name = args[1].s
elif (func_name == 'show_loop' and args
and get_node_name(args[0]) == 'bg'):
bg_name = args[1].fields[0].value.s
elif func_name == 'timeline':
bg_name = args[0].s
elif (func_name in ['play', 'fade_in'] and args
and get_node_name(args[0]) == 'bgm'):
bgm_name = args[1].s
return bg_name, bgm_name
def normalize_tex(s):
s = s.replace('\\', '\\textbackslash')
for x in ' &%$#_{}':
s = s.replace(x, '\\' + x)
s = s.replace('~', '\\textasciitilde')
s = s.replace('^', '\\textasciicircum')
s = s.replace('\n', ' \\\\\n')
s = s.replace(' \\\\\n \\\\\n', '\n\n')
return s
def main():
with open(in_filename, 'r', encoding='utf-8') as f:
chapters = parse_chapters(f)
with open(out_filename, 'w', encoding='utf-8', newline='\n') as f:
f.write(r"""\documentclass{article}
\usepackage[a4paper,left=1in,right=1in,top=1in,bottom=1in]{geometry}
\usepackage[hidelinks]{hyperref}
\usepackage{xcolor}
\usepackage{xeCJK}
\setlength{\parindent}{0pt}
\setlength{\parskip}{1ex}
""")
f.write('\\begin{document}\n\n')
for chapter_name, entries, _, _ in chapters:
print(chapter_name)
chapter_name = normalize_tex(chapter_name)
f.write(f'\\section{{{chapter_name}}}\n\n')
for code, chara_name, dialogue in entries:
bg_name, bgm_name = parse_code(code, f)
if bg_name:
bg_name = normalize_tex(translate(bg_name))
f.write(f'{{\\color{{orange}} 场景:{bg_name}}}\n\n')
if bgm_name:
bgm_name = normalize_tex(translate(bgm_name))
f.write(f'{{\\color{{blue}} 音乐:{bgm_name}}}\n\n')
dialogue = normalize_dialogue(dialogue, keep_todo=['配音'])
if dialogue:
dialogue = normalize_tex(dialogue)
if chara_name:
chara_name = normalize_tex(chara_name)
f.write(
f'{{\\color{{lightgray}} {chara_name}}}{dialogue}\n\n'
)
else:
f.write(dialogue + '\n\n')
f.write('\\newpage\n\n')
f.write('\\end{document}\n')
if __name__ == '__main__':
main()
| #!/usr/bin/env python3
import re
from luaparser import astnodes
from nova_script_parser import (get_node_name, normalize_dialogue,
parse_chapters, walk_functions)
in_filename = 'scenario.txt'
out_filename = 'scenario_no_code.tex'
translate_data = [
('room', '房间'),
]
translate_data = sorted(translate_data, key=lambda x: len(x[0]), reverse=True)
def camel_to_snake(s):
s = re.compile('(.)([A-Z][a-z]+)').sub(r'\1_\2', s)
s = re.compile('([a-z0-9])([A-Z])').sub(r'\1_\2', s)
s = s.lower()
return s
def translate(s):
s = camel_to_snake(s)
for x, y in translate_data:
s = s.replace(x, y)
s = s.replace('_', '')
assert not any('A' <= c <= 'Z' or 'a' <= c <= 'z' for c in s), s
return s
def parse_code(code, f):
bg_name = None
bgm_name = None
for func_name, args, _ in walk_functions(code):
if (func_name in [
'show', 'trans', 'trans2', 'trans_fade', 'trans_left',
'trans_right', 'trans_up', 'trans_down'
] and args and get_node_name(args[0]) == 'bg'
and isinstance(args[1], astnodes.String)
and not args[1].s.startswith('chapter')):
bg_name = args[1].s
elif (func_name == 'show_loop' and args
and get_node_name(args[0]) == 'bg'):
bg_name = args[1].fields[0].value.s
elif func_name == 'timeline':
bg_name = args[0].s
elif (func_name in ['play', 'fade_in'] and args
and get_node_name(args[0]) == 'bgm'):
bgm_name = args[1].s
return bg_name, bgm_name
def normalize_tex(s):
s = s.replace('\\', '\\textbackslash')
for x in ' &%$#_{}':
s = s.replace(x, '\\' + x)
s = s.replace('~', '\\textasciitilde')
s = s.replace('^', '\\textasciicircum')
s = s.replace('\n', ' \\\\\n')
s = s.replace(' \\\\\n \\\\\n', '\n\n')
return s
def main():
with open(in_filename, 'r', encoding='utf-8') as f:
chapters = parse_chapters(f)
with open(out_filename, 'w', encoding='utf-8', newline='\n') as f:
f.write(r"""\documentclass{article}
\usepackage[a4paper,left=1in,right=1in,top=1in,bottom=1in]{geometry}
\usepackage[hidelinks]{hyperref}
\usepackage{xcolor}
\usepackage{xeCJK}
\setlength{\parindent}{0pt}
\setlength{\parskip}{1ex}
""")
f.write('\\begin{document}\n\n')
for chapter_name, entries, _, _ in chapters:
print(chapter_name)
chapter_name = normalize_tex(chapter_name)
f.write(f'\\section{{{chapter_name}}}\n\n')
for code, chara_name, dialogue in entries:
bg_name, bgm_name = parse_code(code, f)
if bg_name:
bg_name = normalize_tex(translate(bg_name))
f.write(f'{{\\color{{orange}} 场景:{bg_name}}}\n\n')
if bgm_name:
bgm_name = normalize_tex(translate(bgm_name))
f.write(f'{{\\color{{blue}} 音乐:{bgm_name}}}\n\n')
dialogue = normalize_dialogue(dialogue, keep_todo=['配音'])
if dialogue:
dialogue = normalize_tex(dialogue)
if chara_name:
chara_name = normalize_tex(chara_name)
f.write(
f'{{\\color{{lightgray}} {chara_name}}}{dialogue}\n\n'
)
else:
f.write(dialogue + '\n\n')
f.write('\\newpage\n\n')
f.write('\\end{document}\n')
if __name__ == '__main__':
main()
| en | 0.137602 | #!/usr/bin/env python3 #_{}': \documentclass{article} \usepackage[a4paper,left=1in,right=1in,top=1in,bottom=1in]{geometry} \usepackage[hidelinks]{hyperref} \usepackage{xcolor} \usepackage{xeCJK} \setlength{\parindent}{0pt} \setlength{\parskip}{1ex} | 2.811481 | 3 |
src/import_mat.py | JVini98/Synthetic_ECG | 0 | 6621569 | import scipy
from scipy import signal
from scipy.io import loadmat
import pandas as pd
import os
import shutil
import matplotlib.pyplot as plt
import numpy as np
out_dir = "/home/jvini/PycharmProjects/TFG_ECG/formated_data_AF_filtered"
os.makedirs(out_dir, exist_ok=True)
df = pd.read_csv(r'/home/jvini/PycharmProjects/TFG_ECG/training2017/REFERENCE-original.csv')
categories = df.values
af_files_counter = 1
b, a = signal.butter(5,[0.5,100],fs = 300, btype='band')
b2, a2 = signal.iirnotch(50,30,300)
for i in range(1, 8528):
if categories.item((i-1, 1)) == 'N':
if i < 10:
var = f'A0000{i}'
elif 10 <= i < 100:
var = f'A000{i}'
elif 100 <= i < 1000:
var = f'A00{i}'
elif 1000 <= i:
var = f'A0{i}'
ecg = loadmat(f'/home/jvini/PycharmProjects/TFG_ECG/training2017/{var}.mat')
ecg_array = ecg['val'][0]
if 5000 <= ecg_array.size :
filtered_ecg = signal.filtfilt(b,a,ecg_array)
filtered_ecg = signal.filtfilt(b2,a2,filtered_ecg)
file = open(f'{out_dir}/{10001 + af_files_counter*10}.asc', "w")
if filtered_ecg.size < 6000:
for i ,line in enumerate(filtered_ecg):
if i == 5000: break
file.write(str(line))
file.write("\n")
file.flush()
elif filtered_ecg.size >=6000:
filtered_ecg = filtered_ecg[1000:]
for i ,line in enumerate(filtered_ecg):
if i == 5000: break
file.write(str(line))
file.write("\n")
file.flush()
af_files_counter = af_files_counter + 1
"""
shutil.copy(f'{out_dir}/{10001 + af_files_counter * 10}.asc',
f'{out_dir}/{10002 + af_files_counter * 10}.asc')
shutil.copy(f'{out_dir}/{10001 + af_files_counter * 10}.asc',
f'{out_dir}/{10003 + af_files_counter * 10}.asc')
shutil.copy(f'{out_dir}/{10001 + af_files_counter * 10}.asc',
f'{out_dir}/{10004 + af_files_counter * 10}.asc')
shutil.copy(f'{out_dir}/{10001 + af_files_counter * 10}.asc',
f'{out_dir}/{10005 + af_files_counter * 10}.asc')
"""
#plt.figure()
#plt.plot(ecg_array)
#plt.show() | import scipy
from scipy import signal
from scipy.io import loadmat
import pandas as pd
import os
import shutil
import matplotlib.pyplot as plt
import numpy as np
out_dir = "/home/jvini/PycharmProjects/TFG_ECG/formated_data_AF_filtered"
os.makedirs(out_dir, exist_ok=True)
df = pd.read_csv(r'/home/jvini/PycharmProjects/TFG_ECG/training2017/REFERENCE-original.csv')
categories = df.values
af_files_counter = 1
b, a = signal.butter(5,[0.5,100],fs = 300, btype='band')
b2, a2 = signal.iirnotch(50,30,300)
for i in range(1, 8528):
if categories.item((i-1, 1)) == 'N':
if i < 10:
var = f'A0000{i}'
elif 10 <= i < 100:
var = f'A000{i}'
elif 100 <= i < 1000:
var = f'A00{i}'
elif 1000 <= i:
var = f'A0{i}'
ecg = loadmat(f'/home/jvini/PycharmProjects/TFG_ECG/training2017/{var}.mat')
ecg_array = ecg['val'][0]
if 5000 <= ecg_array.size :
filtered_ecg = signal.filtfilt(b,a,ecg_array)
filtered_ecg = signal.filtfilt(b2,a2,filtered_ecg)
file = open(f'{out_dir}/{10001 + af_files_counter*10}.asc', "w")
if filtered_ecg.size < 6000:
for i ,line in enumerate(filtered_ecg):
if i == 5000: break
file.write(str(line))
file.write("\n")
file.flush()
elif filtered_ecg.size >=6000:
filtered_ecg = filtered_ecg[1000:]
for i ,line in enumerate(filtered_ecg):
if i == 5000: break
file.write(str(line))
file.write("\n")
file.flush()
af_files_counter = af_files_counter + 1
"""
shutil.copy(f'{out_dir}/{10001 + af_files_counter * 10}.asc',
f'{out_dir}/{10002 + af_files_counter * 10}.asc')
shutil.copy(f'{out_dir}/{10001 + af_files_counter * 10}.asc',
f'{out_dir}/{10003 + af_files_counter * 10}.asc')
shutil.copy(f'{out_dir}/{10001 + af_files_counter * 10}.asc',
f'{out_dir}/{10004 + af_files_counter * 10}.asc')
shutil.copy(f'{out_dir}/{10001 + af_files_counter * 10}.asc',
f'{out_dir}/{10005 + af_files_counter * 10}.asc')
"""
#plt.figure()
#plt.plot(ecg_array)
#plt.show() | sr | 0.203548 | shutil.copy(f'{out_dir}/{10001 + af_files_counter * 10}.asc', f'{out_dir}/{10002 + af_files_counter * 10}.asc') shutil.copy(f'{out_dir}/{10001 + af_files_counter * 10}.asc', f'{out_dir}/{10003 + af_files_counter * 10}.asc') shutil.copy(f'{out_dir}/{10001 + af_files_counter * 10}.asc', f'{out_dir}/{10004 + af_files_counter * 10}.asc') shutil.copy(f'{out_dir}/{10001 + af_files_counter * 10}.asc', f'{out_dir}/{10005 + af_files_counter * 10}.asc') #plt.figure() #plt.plot(ecg_array) #plt.show() | 2.309929 | 2 |
tasks/func/_tree.py | AntonObersteiner/python-lessons | 0 | 6621570 | import turtle
turtle.speed(0)
turtle.delay(0)
turtle.tracer(0, 0)
angle = 20
length = 50
inner = .9 * length
shrink = .8
leaf_width = 5
def segment(depth = 0, max_depth = 5):
if depth == max_depth:
turtle.fillcolor(0, depth / max_depth, 0)
turtle.begin_fill()
turtle.right(30)
turtle.forward(20)
turtle.left(120)
turtle.forward(20 + leaf_width)
turtle.left(120)
turtle.forward(20)
turtle.left(150)
turtle.end_fill()
return
print(f"{' '*depth}Tiefe: {depth}")
factor = shrink ** depth
turtle.right(angle)
turtle.forward(length * factor)
segment(depth + 1, max_depth)
turtle.backward(inner * factor)
turtle.left(2 * angle)
turtle.forward(inner * factor)
segment(depth + 1, max_depth)
turtle.backward(length * factor)
turtle.right(angle)
def increment_angle(): global angle; angle += 1; redraw()
def decrement_angle(): global angle; angle -= 1; redraw()
def increment_length(): global length; length *= 1.01; redraw()
def decrement_length(): global length; length /= 1.01; redraw()
def increment_leaf_width(): global leaf_width; leaf_width += 1; redraw()
def decrement_leaf_width(): global leaf_width; leaf_width -= 1; redraw()
def redraw():
turtle.home()
turtle.clear()
segment(0, 5)
turtle.update()
if __name__ == '__main__':
turtle.onkeypress(increment_angle, 'plus')
turtle.onkeypress(decrement_angle, 'minus')
turtle.onkeypress(increment_length, 'asterisk')
turtle.onkeypress(decrement_length, 'slash')
turtle.onkeypress(increment_leaf_width, 'Up')
turtle.onkeypress(decrement_leaf_width, 'Down')
turtle.listen()
redraw()
input("[ENTER] to quit")
| import turtle
turtle.speed(0)
turtle.delay(0)
turtle.tracer(0, 0)
angle = 20
length = 50
inner = .9 * length
shrink = .8
leaf_width = 5
def segment(depth = 0, max_depth = 5):
if depth == max_depth:
turtle.fillcolor(0, depth / max_depth, 0)
turtle.begin_fill()
turtle.right(30)
turtle.forward(20)
turtle.left(120)
turtle.forward(20 + leaf_width)
turtle.left(120)
turtle.forward(20)
turtle.left(150)
turtle.end_fill()
return
print(f"{' '*depth}Tiefe: {depth}")
factor = shrink ** depth
turtle.right(angle)
turtle.forward(length * factor)
segment(depth + 1, max_depth)
turtle.backward(inner * factor)
turtle.left(2 * angle)
turtle.forward(inner * factor)
segment(depth + 1, max_depth)
turtle.backward(length * factor)
turtle.right(angle)
def increment_angle(): global angle; angle += 1; redraw()
def decrement_angle(): global angle; angle -= 1; redraw()
def increment_length(): global length; length *= 1.01; redraw()
def decrement_length(): global length; length /= 1.01; redraw()
def increment_leaf_width(): global leaf_width; leaf_width += 1; redraw()
def decrement_leaf_width(): global leaf_width; leaf_width -= 1; redraw()
def redraw():
turtle.home()
turtle.clear()
segment(0, 5)
turtle.update()
if __name__ == '__main__':
turtle.onkeypress(increment_angle, 'plus')
turtle.onkeypress(decrement_angle, 'minus')
turtle.onkeypress(increment_length, 'asterisk')
turtle.onkeypress(decrement_length, 'slash')
turtle.onkeypress(increment_leaf_width, 'Up')
turtle.onkeypress(decrement_leaf_width, 'Down')
turtle.listen()
redraw()
input("[ENTER] to quit")
| none | 1 | 3.660294 | 4 | |
2018/05/alchemical_reduction.py | GeoffRiley/AdventOfCode | 2 | 6621571 | <filename>2018/05/alchemical_reduction.py
def react_all(new_str):
done = False
while not done:
done = True
old_str = new_str
last_char = old_str[0]
new_str = ''
skip = 0
for char in old_str[1:]:
if skip > 0:
skip -= 1
last_char = char
continue
if last_char != char and last_char.lower() == char.lower():
done = False
skip = 1
continue
new_str += last_char
last_char = char
if skip == 0:
new_str += last_char
return new_str
def alchemical_reduction_part_1(inp):
new_str = react_all(inp[0])
return len(new_str)
def alchemical_reduction_part_2(inp):
inp: str = inp[0]
char_set = set(inp.lower())
counts = dict()
for char in char_set:
tmp = inp.replace(char, '').replace(char.upper(), '')
counts[char] = react_all(tmp)
result = min(counts.items(), key=lambda x: len(x[1]))
return len(result[1])
if __name__ == '__main__':
with open('input.txt') as chem_file:
chem_strings = chem_file.read().splitlines(keepends=False)
print(f'Day 5, part 1: {alchemical_reduction_part_1(chem_strings)}')
print(f'Day 5, part 2: {alchemical_reduction_part_2(chem_strings)}')
# Day 5, part 1: 11540
# Day 5, part 2: 6918
| <filename>2018/05/alchemical_reduction.py
def react_all(new_str):
done = False
while not done:
done = True
old_str = new_str
last_char = old_str[0]
new_str = ''
skip = 0
for char in old_str[1:]:
if skip > 0:
skip -= 1
last_char = char
continue
if last_char != char and last_char.lower() == char.lower():
done = False
skip = 1
continue
new_str += last_char
last_char = char
if skip == 0:
new_str += last_char
return new_str
def alchemical_reduction_part_1(inp):
new_str = react_all(inp[0])
return len(new_str)
def alchemical_reduction_part_2(inp):
inp: str = inp[0]
char_set = set(inp.lower())
counts = dict()
for char in char_set:
tmp = inp.replace(char, '').replace(char.upper(), '')
counts[char] = react_all(tmp)
result = min(counts.items(), key=lambda x: len(x[1]))
return len(result[1])
if __name__ == '__main__':
with open('input.txt') as chem_file:
chem_strings = chem_file.read().splitlines(keepends=False)
print(f'Day 5, part 1: {alchemical_reduction_part_1(chem_strings)}')
print(f'Day 5, part 2: {alchemical_reduction_part_2(chem_strings)}')
# Day 5, part 1: 11540
# Day 5, part 2: 6918
| en | 0.435693 | # Day 5, part 1: 11540 # Day 5, part 2: 6918 | 3.304758 | 3 |
sdk/python/pulumi_scaleway/vpc_private_network.py | stack72/pulumi-scaleway | 6 | 6621572 | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from . import _utilities
__all__ = ['VpcPrivateNetworkArgs', 'VpcPrivateNetwork']
@pulumi.input_type
class VpcPrivateNetworkArgs:
def __init__(__self__, *,
name: Optional[pulumi.Input[str]] = None,
project_id: Optional[pulumi.Input[str]] = None,
tags: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
zone: Optional[pulumi.Input[str]] = None):
"""
The set of arguments for constructing a VpcPrivateNetwork resource.
:param pulumi.Input[str] name: The name of the private network. If not provided it will be randomly generated.
:param pulumi.Input[str] project_id: `project_id`) The ID of the project the private network is associated with.
:param pulumi.Input[Sequence[pulumi.Input[str]]] tags: The tags associated with the private network.
:param pulumi.Input[str] zone: `zone`) The zone in which the private network should be created.
"""
if name is not None:
pulumi.set(__self__, "name", name)
if project_id is not None:
pulumi.set(__self__, "project_id", project_id)
if tags is not None:
pulumi.set(__self__, "tags", tags)
if zone is not None:
pulumi.set(__self__, "zone", zone)
@property
@pulumi.getter
def name(self) -> Optional[pulumi.Input[str]]:
"""
The name of the private network. If not provided it will be randomly generated.
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "name", value)
@property
@pulumi.getter(name="projectId")
def project_id(self) -> Optional[pulumi.Input[str]]:
"""
`project_id`) The ID of the project the private network is associated with.
"""
return pulumi.get(self, "project_id")
@project_id.setter
def project_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "project_id", value)
@property
@pulumi.getter
def tags(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
"""
The tags associated with the private network.
"""
return pulumi.get(self, "tags")
@tags.setter
def tags(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "tags", value)
@property
@pulumi.getter
def zone(self) -> Optional[pulumi.Input[str]]:
"""
`zone`) The zone in which the private network should be created.
"""
return pulumi.get(self, "zone")
@zone.setter
def zone(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "zone", value)
@pulumi.input_type
class _VpcPrivateNetworkState:
def __init__(__self__, *,
created_at: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
organization_id: Optional[pulumi.Input[str]] = None,
project_id: Optional[pulumi.Input[str]] = None,
tags: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
updated_at: Optional[pulumi.Input[str]] = None,
zone: Optional[pulumi.Input[str]] = None):
"""
Input properties used for looking up and filtering VpcPrivateNetwork resources.
:param pulumi.Input[str] created_at: The date and time of the creation of the private network
:param pulumi.Input[str] name: The name of the private network. If not provided it will be randomly generated.
:param pulumi.Input[str] organization_id: The organization ID the private network is associated with.
:param pulumi.Input[str] project_id: `project_id`) The ID of the project the private network is associated with.
:param pulumi.Input[Sequence[pulumi.Input[str]]] tags: The tags associated with the private network.
:param pulumi.Input[str] updated_at: The date and time of the last update of the private network
:param pulumi.Input[str] zone: `zone`) The zone in which the private network should be created.
"""
if created_at is not None:
pulumi.set(__self__, "created_at", created_at)
if name is not None:
pulumi.set(__self__, "name", name)
if organization_id is not None:
pulumi.set(__self__, "organization_id", organization_id)
if project_id is not None:
pulumi.set(__self__, "project_id", project_id)
if tags is not None:
pulumi.set(__self__, "tags", tags)
if updated_at is not None:
pulumi.set(__self__, "updated_at", updated_at)
if zone is not None:
pulumi.set(__self__, "zone", zone)
@property
@pulumi.getter(name="createdAt")
def created_at(self) -> Optional[pulumi.Input[str]]:
"""
The date and time of the creation of the private network
"""
return pulumi.get(self, "created_at")
@created_at.setter
def created_at(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "created_at", value)
@property
@pulumi.getter
def name(self) -> Optional[pulumi.Input[str]]:
"""
The name of the private network. If not provided it will be randomly generated.
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "name", value)
@property
@pulumi.getter(name="organizationId")
def organization_id(self) -> Optional[pulumi.Input[str]]:
"""
The organization ID the private network is associated with.
"""
return pulumi.get(self, "organization_id")
@organization_id.setter
def organization_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "organization_id", value)
@property
@pulumi.getter(name="projectId")
def project_id(self) -> Optional[pulumi.Input[str]]:
"""
`project_id`) The ID of the project the private network is associated with.
"""
return pulumi.get(self, "project_id")
@project_id.setter
def project_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "project_id", value)
@property
@pulumi.getter
def tags(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
"""
The tags associated with the private network.
"""
return pulumi.get(self, "tags")
@tags.setter
def tags(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "tags", value)
@property
@pulumi.getter(name="updatedAt")
def updated_at(self) -> Optional[pulumi.Input[str]]:
"""
The date and time of the last update of the private network
"""
return pulumi.get(self, "updated_at")
@updated_at.setter
def updated_at(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "updated_at", value)
@property
@pulumi.getter
def zone(self) -> Optional[pulumi.Input[str]]:
"""
`zone`) The zone in which the private network should be created.
"""
return pulumi.get(self, "zone")
@zone.setter
def zone(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "zone", value)
class VpcPrivateNetwork(pulumi.CustomResource):
@overload
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
name: Optional[pulumi.Input[str]] = None,
project_id: Optional[pulumi.Input[str]] = None,
tags: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
zone: Optional[pulumi.Input[str]] = None,
__props__=None):
"""
Creates and manages Scaleway VPC Private Networks.
For more information, see [the documentation](https://developers.scaleway.com/en/products/vpc/api/#private-networks-ac2df4).
## Example
```python
import pulumi
import pulumi_scaleway as scaleway
pn_priv = scaleway.VpcPrivateNetwork("pnPriv", tags=[
"demo",
"terraform",
])
```
## Import
Private networks can be imported using the `{zone}/{id}`, e.g. bash
```sh
$ pulumi import scaleway:index/vpcPrivateNetwork:VpcPrivateNetwork vpc_demo fr-par-1/11111111-1111-1111-1111-111111111111
```
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] name: The name of the private network. If not provided it will be randomly generated.
:param pulumi.Input[str] project_id: `project_id`) The ID of the project the private network is associated with.
:param pulumi.Input[Sequence[pulumi.Input[str]]] tags: The tags associated with the private network.
:param pulumi.Input[str] zone: `zone`) The zone in which the private network should be created.
"""
...
@overload
def __init__(__self__,
resource_name: str,
args: Optional[VpcPrivateNetworkArgs] = None,
opts: Optional[pulumi.ResourceOptions] = None):
"""
Creates and manages Scaleway VPC Private Networks.
For more information, see [the documentation](https://developers.scaleway.com/en/products/vpc/api/#private-networks-ac2df4).
## Example
```python
import pulumi
import pulumi_scaleway as scaleway
pn_priv = scaleway.VpcPrivateNetwork("pnPriv", tags=[
"demo",
"terraform",
])
```
## Import
Private networks can be imported using the `{zone}/{id}`, e.g. bash
```sh
$ pulumi import scaleway:index/vpcPrivateNetwork:VpcPrivateNetwork vpc_demo fr-par-1/11111111-1111-1111-1111-111111111111
```
:param str resource_name: The name of the resource.
:param VpcPrivateNetworkArgs args: The arguments to use to populate this resource's properties.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
...
def __init__(__self__, resource_name: str, *args, **kwargs):
resource_args, opts = _utilities.get_resource_args_opts(VpcPrivateNetworkArgs, pulumi.ResourceOptions, *args, **kwargs)
if resource_args is not None:
__self__._internal_init(resource_name, opts, **resource_args.__dict__)
else:
__self__._internal_init(resource_name, *args, **kwargs)
def _internal_init(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
name: Optional[pulumi.Input[str]] = None,
project_id: Optional[pulumi.Input[str]] = None,
tags: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
zone: Optional[pulumi.Input[str]] = None,
__props__=None):
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = VpcPrivateNetworkArgs.__new__(VpcPrivateNetworkArgs)
__props__.__dict__["name"] = name
__props__.__dict__["project_id"] = project_id
__props__.__dict__["tags"] = tags
__props__.__dict__["zone"] = zone
__props__.__dict__["created_at"] = None
__props__.__dict__["organization_id"] = None
__props__.__dict__["updated_at"] = None
super(VpcPrivateNetwork, __self__).__init__(
'scaleway:index/vpcPrivateNetwork:VpcPrivateNetwork',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None,
created_at: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
organization_id: Optional[pulumi.Input[str]] = None,
project_id: Optional[pulumi.Input[str]] = None,
tags: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
updated_at: Optional[pulumi.Input[str]] = None,
zone: Optional[pulumi.Input[str]] = None) -> 'VpcPrivateNetwork':
"""
Get an existing VpcPrivateNetwork resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] created_at: The date and time of the creation of the private network
:param pulumi.Input[str] name: The name of the private network. If not provided it will be randomly generated.
:param pulumi.Input[str] organization_id: The organization ID the private network is associated with.
:param pulumi.Input[str] project_id: `project_id`) The ID of the project the private network is associated with.
:param pulumi.Input[Sequence[pulumi.Input[str]]] tags: The tags associated with the private network.
:param pulumi.Input[str] updated_at: The date and time of the last update of the private network
:param pulumi.Input[str] zone: `zone`) The zone in which the private network should be created.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = _VpcPrivateNetworkState.__new__(_VpcPrivateNetworkState)
__props__.__dict__["created_at"] = created_at
__props__.__dict__["name"] = name
__props__.__dict__["organization_id"] = organization_id
__props__.__dict__["project_id"] = project_id
__props__.__dict__["tags"] = tags
__props__.__dict__["updated_at"] = updated_at
__props__.__dict__["zone"] = zone
return VpcPrivateNetwork(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter(name="createdAt")
def created_at(self) -> pulumi.Output[str]:
"""
The date and time of the creation of the private network
"""
return pulumi.get(self, "created_at")
@property
@pulumi.getter
def name(self) -> pulumi.Output[str]:
"""
The name of the private network. If not provided it will be randomly generated.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="organizationId")
def organization_id(self) -> pulumi.Output[str]:
"""
The organization ID the private network is associated with.
"""
return pulumi.get(self, "organization_id")
@property
@pulumi.getter(name="projectId")
def project_id(self) -> pulumi.Output[str]:
"""
`project_id`) The ID of the project the private network is associated with.
"""
return pulumi.get(self, "project_id")
@property
@pulumi.getter
def tags(self) -> pulumi.Output[Optional[Sequence[str]]]:
"""
The tags associated with the private network.
"""
return pulumi.get(self, "tags")
@property
@pulumi.getter(name="updatedAt")
def updated_at(self) -> pulumi.Output[str]:
"""
The date and time of the last update of the private network
"""
return pulumi.get(self, "updated_at")
@property
@pulumi.getter
def zone(self) -> pulumi.Output[str]:
"""
`zone`) The zone in which the private network should be created.
"""
return pulumi.get(self, "zone")
| # coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from . import _utilities
__all__ = ['VpcPrivateNetworkArgs', 'VpcPrivateNetwork']
@pulumi.input_type
class VpcPrivateNetworkArgs:
def __init__(__self__, *,
name: Optional[pulumi.Input[str]] = None,
project_id: Optional[pulumi.Input[str]] = None,
tags: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
zone: Optional[pulumi.Input[str]] = None):
"""
The set of arguments for constructing a VpcPrivateNetwork resource.
:param pulumi.Input[str] name: The name of the private network. If not provided it will be randomly generated.
:param pulumi.Input[str] project_id: `project_id`) The ID of the project the private network is associated with.
:param pulumi.Input[Sequence[pulumi.Input[str]]] tags: The tags associated with the private network.
:param pulumi.Input[str] zone: `zone`) The zone in which the private network should be created.
"""
if name is not None:
pulumi.set(__self__, "name", name)
if project_id is not None:
pulumi.set(__self__, "project_id", project_id)
if tags is not None:
pulumi.set(__self__, "tags", tags)
if zone is not None:
pulumi.set(__self__, "zone", zone)
@property
@pulumi.getter
def name(self) -> Optional[pulumi.Input[str]]:
"""
The name of the private network. If not provided it will be randomly generated.
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "name", value)
@property
@pulumi.getter(name="projectId")
def project_id(self) -> Optional[pulumi.Input[str]]:
"""
`project_id`) The ID of the project the private network is associated with.
"""
return pulumi.get(self, "project_id")
@project_id.setter
def project_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "project_id", value)
@property
@pulumi.getter
def tags(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
"""
The tags associated with the private network.
"""
return pulumi.get(self, "tags")
@tags.setter
def tags(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "tags", value)
@property
@pulumi.getter
def zone(self) -> Optional[pulumi.Input[str]]:
"""
`zone`) The zone in which the private network should be created.
"""
return pulumi.get(self, "zone")
@zone.setter
def zone(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "zone", value)
@pulumi.input_type
class _VpcPrivateNetworkState:
def __init__(__self__, *,
created_at: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
organization_id: Optional[pulumi.Input[str]] = None,
project_id: Optional[pulumi.Input[str]] = None,
tags: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
updated_at: Optional[pulumi.Input[str]] = None,
zone: Optional[pulumi.Input[str]] = None):
"""
Input properties used for looking up and filtering VpcPrivateNetwork resources.
:param pulumi.Input[str] created_at: The date and time of the creation of the private network
:param pulumi.Input[str] name: The name of the private network. If not provided it will be randomly generated.
:param pulumi.Input[str] organization_id: The organization ID the private network is associated with.
:param pulumi.Input[str] project_id: `project_id`) The ID of the project the private network is associated with.
:param pulumi.Input[Sequence[pulumi.Input[str]]] tags: The tags associated with the private network.
:param pulumi.Input[str] updated_at: The date and time of the last update of the private network
:param pulumi.Input[str] zone: `zone`) The zone in which the private network should be created.
"""
if created_at is not None:
pulumi.set(__self__, "created_at", created_at)
if name is not None:
pulumi.set(__self__, "name", name)
if organization_id is not None:
pulumi.set(__self__, "organization_id", organization_id)
if project_id is not None:
pulumi.set(__self__, "project_id", project_id)
if tags is not None:
pulumi.set(__self__, "tags", tags)
if updated_at is not None:
pulumi.set(__self__, "updated_at", updated_at)
if zone is not None:
pulumi.set(__self__, "zone", zone)
@property
@pulumi.getter(name="createdAt")
def created_at(self) -> Optional[pulumi.Input[str]]:
"""
The date and time of the creation of the private network
"""
return pulumi.get(self, "created_at")
@created_at.setter
def created_at(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "created_at", value)
@property
@pulumi.getter
def name(self) -> Optional[pulumi.Input[str]]:
"""
The name of the private network. If not provided it will be randomly generated.
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "name", value)
@property
@pulumi.getter(name="organizationId")
def organization_id(self) -> Optional[pulumi.Input[str]]:
"""
The organization ID the private network is associated with.
"""
return pulumi.get(self, "organization_id")
@organization_id.setter
def organization_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "organization_id", value)
@property
@pulumi.getter(name="projectId")
def project_id(self) -> Optional[pulumi.Input[str]]:
"""
`project_id`) The ID of the project the private network is associated with.
"""
return pulumi.get(self, "project_id")
@project_id.setter
def project_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "project_id", value)
@property
@pulumi.getter
def tags(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
"""
The tags associated with the private network.
"""
return pulumi.get(self, "tags")
@tags.setter
def tags(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "tags", value)
@property
@pulumi.getter(name="updatedAt")
def updated_at(self) -> Optional[pulumi.Input[str]]:
"""
The date and time of the last update of the private network
"""
return pulumi.get(self, "updated_at")
@updated_at.setter
def updated_at(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "updated_at", value)
@property
@pulumi.getter
def zone(self) -> Optional[pulumi.Input[str]]:
"""
`zone`) The zone in which the private network should be created.
"""
return pulumi.get(self, "zone")
@zone.setter
def zone(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "zone", value)
class VpcPrivateNetwork(pulumi.CustomResource):
@overload
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
name: Optional[pulumi.Input[str]] = None,
project_id: Optional[pulumi.Input[str]] = None,
tags: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
zone: Optional[pulumi.Input[str]] = None,
__props__=None):
"""
Creates and manages Scaleway VPC Private Networks.
For more information, see [the documentation](https://developers.scaleway.com/en/products/vpc/api/#private-networks-ac2df4).
## Example
```python
import pulumi
import pulumi_scaleway as scaleway
pn_priv = scaleway.VpcPrivateNetwork("pnPriv", tags=[
"demo",
"terraform",
])
```
## Import
Private networks can be imported using the `{zone}/{id}`, e.g. bash
```sh
$ pulumi import scaleway:index/vpcPrivateNetwork:VpcPrivateNetwork vpc_demo fr-par-1/11111111-1111-1111-1111-111111111111
```
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] name: The name of the private network. If not provided it will be randomly generated.
:param pulumi.Input[str] project_id: `project_id`) The ID of the project the private network is associated with.
:param pulumi.Input[Sequence[pulumi.Input[str]]] tags: The tags associated with the private network.
:param pulumi.Input[str] zone: `zone`) The zone in which the private network should be created.
"""
...
@overload
def __init__(__self__,
resource_name: str,
args: Optional[VpcPrivateNetworkArgs] = None,
opts: Optional[pulumi.ResourceOptions] = None):
"""
Creates and manages Scaleway VPC Private Networks.
For more information, see [the documentation](https://developers.scaleway.com/en/products/vpc/api/#private-networks-ac2df4).
## Example
```python
import pulumi
import pulumi_scaleway as scaleway
pn_priv = scaleway.VpcPrivateNetwork("pnPriv", tags=[
"demo",
"terraform",
])
```
## Import
Private networks can be imported using the `{zone}/{id}`, e.g. bash
```sh
$ pulumi import scaleway:index/vpcPrivateNetwork:VpcPrivateNetwork vpc_demo fr-par-1/11111111-1111-1111-1111-111111111111
```
:param str resource_name: The name of the resource.
:param VpcPrivateNetworkArgs args: The arguments to use to populate this resource's properties.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
...
def __init__(__self__, resource_name: str, *args, **kwargs):
resource_args, opts = _utilities.get_resource_args_opts(VpcPrivateNetworkArgs, pulumi.ResourceOptions, *args, **kwargs)
if resource_args is not None:
__self__._internal_init(resource_name, opts, **resource_args.__dict__)
else:
__self__._internal_init(resource_name, *args, **kwargs)
def _internal_init(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
name: Optional[pulumi.Input[str]] = None,
project_id: Optional[pulumi.Input[str]] = None,
tags: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
zone: Optional[pulumi.Input[str]] = None,
__props__=None):
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = VpcPrivateNetworkArgs.__new__(VpcPrivateNetworkArgs)
__props__.__dict__["name"] = name
__props__.__dict__["project_id"] = project_id
__props__.__dict__["tags"] = tags
__props__.__dict__["zone"] = zone
__props__.__dict__["created_at"] = None
__props__.__dict__["organization_id"] = None
__props__.__dict__["updated_at"] = None
super(VpcPrivateNetwork, __self__).__init__(
'scaleway:index/vpcPrivateNetwork:VpcPrivateNetwork',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None,
created_at: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
organization_id: Optional[pulumi.Input[str]] = None,
project_id: Optional[pulumi.Input[str]] = None,
tags: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
updated_at: Optional[pulumi.Input[str]] = None,
zone: Optional[pulumi.Input[str]] = None) -> 'VpcPrivateNetwork':
"""
Get an existing VpcPrivateNetwork resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] created_at: The date and time of the creation of the private network
:param pulumi.Input[str] name: The name of the private network. If not provided it will be randomly generated.
:param pulumi.Input[str] organization_id: The organization ID the private network is associated with.
:param pulumi.Input[str] project_id: `project_id`) The ID of the project the private network is associated with.
:param pulumi.Input[Sequence[pulumi.Input[str]]] tags: The tags associated with the private network.
:param pulumi.Input[str] updated_at: The date and time of the last update of the private network
:param pulumi.Input[str] zone: `zone`) The zone in which the private network should be created.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = _VpcPrivateNetworkState.__new__(_VpcPrivateNetworkState)
__props__.__dict__["created_at"] = created_at
__props__.__dict__["name"] = name
__props__.__dict__["organization_id"] = organization_id
__props__.__dict__["project_id"] = project_id
__props__.__dict__["tags"] = tags
__props__.__dict__["updated_at"] = updated_at
__props__.__dict__["zone"] = zone
return VpcPrivateNetwork(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter(name="createdAt")
def created_at(self) -> pulumi.Output[str]:
"""
The date and time of the creation of the private network
"""
return pulumi.get(self, "created_at")
@property
@pulumi.getter
def name(self) -> pulumi.Output[str]:
"""
The name of the private network. If not provided it will be randomly generated.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="organizationId")
def organization_id(self) -> pulumi.Output[str]:
"""
The organization ID the private network is associated with.
"""
return pulumi.get(self, "organization_id")
@property
@pulumi.getter(name="projectId")
def project_id(self) -> pulumi.Output[str]:
"""
`project_id`) The ID of the project the private network is associated with.
"""
return pulumi.get(self, "project_id")
@property
@pulumi.getter
def tags(self) -> pulumi.Output[Optional[Sequence[str]]]:
"""
The tags associated with the private network.
"""
return pulumi.get(self, "tags")
@property
@pulumi.getter(name="updatedAt")
def updated_at(self) -> pulumi.Output[str]:
"""
The date and time of the last update of the private network
"""
return pulumi.get(self, "updated_at")
@property
@pulumi.getter
def zone(self) -> pulumi.Output[str]:
"""
`zone`) The zone in which the private network should be created.
"""
return pulumi.get(self, "zone")
| en | 0.744905 | # coding=utf-8 # *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. *** # *** Do not edit by hand unless you're certain you know what you are doing! *** The set of arguments for constructing a VpcPrivateNetwork resource. :param pulumi.Input[str] name: The name of the private network. If not provided it will be randomly generated. :param pulumi.Input[str] project_id: `project_id`) The ID of the project the private network is associated with. :param pulumi.Input[Sequence[pulumi.Input[str]]] tags: The tags associated with the private network. :param pulumi.Input[str] zone: `zone`) The zone in which the private network should be created. The name of the private network. If not provided it will be randomly generated. `project_id`) The ID of the project the private network is associated with. The tags associated with the private network. `zone`) The zone in which the private network should be created. Input properties used for looking up and filtering VpcPrivateNetwork resources. :param pulumi.Input[str] created_at: The date and time of the creation of the private network :param pulumi.Input[str] name: The name of the private network. If not provided it will be randomly generated. :param pulumi.Input[str] organization_id: The organization ID the private network is associated with. :param pulumi.Input[str] project_id: `project_id`) The ID of the project the private network is associated with. :param pulumi.Input[Sequence[pulumi.Input[str]]] tags: The tags associated with the private network. :param pulumi.Input[str] updated_at: The date and time of the last update of the private network :param pulumi.Input[str] zone: `zone`) The zone in which the private network should be created. The date and time of the creation of the private network The name of the private network. If not provided it will be randomly generated. The organization ID the private network is associated with. `project_id`) The ID of the project the private network is associated with. The tags associated with the private network. The date and time of the last update of the private network `zone`) The zone in which the private network should be created. Creates and manages Scaleway VPC Private Networks. For more information, see [the documentation](https://developers.scaleway.com/en/products/vpc/api/#private-networks-ac2df4). ## Example ```python import pulumi import pulumi_scaleway as scaleway pn_priv = scaleway.VpcPrivateNetwork("pnPriv", tags=[ "demo", "terraform", ]) ``` ## Import Private networks can be imported using the `{zone}/{id}`, e.g. bash ```sh $ pulumi import scaleway:index/vpcPrivateNetwork:VpcPrivateNetwork vpc_demo fr-par-1/11111111-1111-1111-1111-111111111111 ``` :param str resource_name: The name of the resource. :param pulumi.ResourceOptions opts: Options for the resource. :param pulumi.Input[str] name: The name of the private network. If not provided it will be randomly generated. :param pulumi.Input[str] project_id: `project_id`) The ID of the project the private network is associated with. :param pulumi.Input[Sequence[pulumi.Input[str]]] tags: The tags associated with the private network. :param pulumi.Input[str] zone: `zone`) The zone in which the private network should be created. Creates and manages Scaleway VPC Private Networks. For more information, see [the documentation](https://developers.scaleway.com/en/products/vpc/api/#private-networks-ac2df4). ## Example ```python import pulumi import pulumi_scaleway as scaleway pn_priv = scaleway.VpcPrivateNetwork("pnPriv", tags=[ "demo", "terraform", ]) ``` ## Import Private networks can be imported using the `{zone}/{id}`, e.g. bash ```sh $ pulumi import scaleway:index/vpcPrivateNetwork:VpcPrivateNetwork vpc_demo fr-par-1/11111111-1111-1111-1111-111111111111 ``` :param str resource_name: The name of the resource. :param VpcPrivateNetworkArgs args: The arguments to use to populate this resource's properties. :param pulumi.ResourceOptions opts: Options for the resource. Get an existing VpcPrivateNetwork resource's state with the given name, id, and optional extra properties used to qualify the lookup. :param str resource_name: The unique name of the resulting resource. :param pulumi.Input[str] id: The unique provider ID of the resource to lookup. :param pulumi.ResourceOptions opts: Options for the resource. :param pulumi.Input[str] created_at: The date and time of the creation of the private network :param pulumi.Input[str] name: The name of the private network. If not provided it will be randomly generated. :param pulumi.Input[str] organization_id: The organization ID the private network is associated with. :param pulumi.Input[str] project_id: `project_id`) The ID of the project the private network is associated with. :param pulumi.Input[Sequence[pulumi.Input[str]]] tags: The tags associated with the private network. :param pulumi.Input[str] updated_at: The date and time of the last update of the private network :param pulumi.Input[str] zone: `zone`) The zone in which the private network should be created. The date and time of the creation of the private network The name of the private network. If not provided it will be randomly generated. The organization ID the private network is associated with. `project_id`) The ID of the project the private network is associated with. The tags associated with the private network. The date and time of the last update of the private network `zone`) The zone in which the private network should be created. | 2.343566 | 2 |
problems/daily_challenge/2021_03_03_missing_number/py/submissions/set_sol.py | phunc20/leetcode | 0 | 6621573 | <reponame>phunc20/leetcode
class Solution:
def missingNumber(self, nums: List[int]) -> int:
n = len(nums)
return (set(range(n+1)) - set(nums)).pop()
| class Solution:
def missingNumber(self, nums: List[int]) -> int:
n = len(nums)
return (set(range(n+1)) - set(nums)).pop() | none | 1 | 3.07829 | 3 | |
helloworld.py | mamonu/gh_Actions_CI | 0 | 6621574 | <reponame>mamonu/gh_Actions_CI<filename>helloworld.py
def add(a,b):
c = a + b
#duh!
return c
| def add(a,b):
c = a + b
#duh!
return c | none | 1 | 2.392347 | 2 | |
api.py | macgyvercsehdev/api_gerencianet | 0 | 6621575 | <filename>api.py
from requests.auth import HTTPBasicAuth
from requests import request, post
from dotenv import load_dotenv
from os import getenv
load_dotenv('.env')
def _token():
response = post(
url='%s/oauth/token' % getenv('URL_PROD'),
auth=HTTPBasicAuth(
getenv('CLIENT_ID_PROD'),
getenv('CLIENT_SECRET_PROD')
),
json={
'grant_type': 'client_credentials'
},
cert=getenv('CERTIFICADO_PROD'),
)
return response.json()['access_token']
def api_gerencianet(method, endpoint, **kwargs):
return request(
method,
'%s/%s' % (getenv('URL_PROD'), endpoint),
headers={
'Authorization': f"Bearer {_token()}",
},
cert=getenv('CERTIFICADO_PROD'),
**kwargs,
)
| <filename>api.py
from requests.auth import HTTPBasicAuth
from requests import request, post
from dotenv import load_dotenv
from os import getenv
load_dotenv('.env')
def _token():
response = post(
url='%s/oauth/token' % getenv('URL_PROD'),
auth=HTTPBasicAuth(
getenv('CLIENT_ID_PROD'),
getenv('CLIENT_SECRET_PROD')
),
json={
'grant_type': 'client_credentials'
},
cert=getenv('CERTIFICADO_PROD'),
)
return response.json()['access_token']
def api_gerencianet(method, endpoint, **kwargs):
return request(
method,
'%s/%s' % (getenv('URL_PROD'), endpoint),
headers={
'Authorization': f"Bearer {_token()}",
},
cert=getenv('CERTIFICADO_PROD'),
**kwargs,
)
| none | 1 | 2.650244 | 3 | |
app/services/nomics/rest_api_to_db/currencies/controller.py | Tinitto/crypto-exchange | 0 | 6621576 | <reponame>Tinitto/crypto-exchange<filename>app/services/nomics/rest_api_to_db/currencies/controller.py
"""
Controller for getting all currencies supported by Nomics
https://nomics.com/docs/#operation/getCurrencies
"""
from typing import Type, List
from judah.destinations.database.model import DatabaseBaseModel
from judah.transformers.base import BaseTransformer
from .destination.model import Currencies
from .source import CurrenciesDataset
from ..abstract.controllers.bulk import NomicsBulkRestAPIToDatabaseController
from ..abstract.sources.bulk import NomicsBulkRestApiSource
class ControllerForCurrencies(NomicsBulkRestAPIToDatabaseController):
"""
The controller for getting all supported currencies from Nomics
"""
destination_model_class: Type[DatabaseBaseModel] = Currencies
source_class: Type[NomicsBulkRestApiSource] = CurrenciesDataset
interval_in_milliseconds: int = 24 * 60 * 60 * 1000 # 1 day
transformer_classes: List[Type[BaseTransformer]] = []
| """
Controller for getting all currencies supported by Nomics
https://nomics.com/docs/#operation/getCurrencies
"""
from typing import Type, List
from judah.destinations.database.model import DatabaseBaseModel
from judah.transformers.base import BaseTransformer
from .destination.model import Currencies
from .source import CurrenciesDataset
from ..abstract.controllers.bulk import NomicsBulkRestAPIToDatabaseController
from ..abstract.sources.bulk import NomicsBulkRestApiSource
class ControllerForCurrencies(NomicsBulkRestAPIToDatabaseController):
"""
The controller for getting all supported currencies from Nomics
"""
destination_model_class: Type[DatabaseBaseModel] = Currencies
source_class: Type[NomicsBulkRestApiSource] = CurrenciesDataset
interval_in_milliseconds: int = 24 * 60 * 60 * 1000 # 1 day
transformer_classes: List[Type[BaseTransformer]] = [] | en | 0.857901 | Controller for getting all currencies supported by Nomics https://nomics.com/docs/#operation/getCurrencies The controller for getting all supported currencies from Nomics # 1 day | 2.063257 | 2 |
app/ui/main_ui_page.py | leepan1991/onvif_device_manager_python | 3 | 6621577 | <filename>app/ui/main_ui_page.py
#! /usr/bin/env python
# -*- coding: utf-8 -*-
import sys
import device_manager_setup
from app.http.http_utils import update_ip, update_device_time, get_default_gateway_ip
import ipaddress
try:
import Tkinter as tk
except ImportError:
import tkinter as tk
import tkinter.messagebox
try:
import ttk
py3 = False
except ImportError:
import tkinter.ttk as ttk
py3 = True
def vp_start_gui():
'''Starting point when module is the main routine.'''
global val, w, root
root = tk.Tk()
top = Toplevel1(root)
device_manager_setup.init(root, top)
root.mainloop()
w = None
def create_Toplevel1(rt, *args, **kwargs):
'''Starting point when module is imported by another module.
Correct form of call: 'create_Toplevel1(root, *args, **kwargs)' .'''
global w, w_win, root
# rt = root
root = rt
w = tk.Toplevel(root)
top = Toplevel1(w)
device_manager_setup.init(w, top, *args, **kwargs)
return (w, top)
def destroy_Toplevel1():
global w
w.destroy()
w = None
class Toplevel1:
def __init__(self, top=None):
'''This class configures and populates the toplevel window.
top is the toplevel containing window.'''
_bgcolor = '#d9d9d9' # X11 color: 'gray85'
_fgcolor = '#000000' # X11 color: 'black'
_compcolor = '#d9d9d9' # X11 color: 'gray85'
_ana1color = '#d9d9d9' # X11 color: 'gray85'
_ana2color = '#ececec' # Closest X11 color: 'gray92'
self.style = ttk.Style()
if sys.platform == "win32":
self.style.theme_use('winnative')
self.style.configure('.', background=_bgcolor)
self.style.configure('.', foreground=_fgcolor)
self.style.configure('.', font="TkDefaultFont")
self.style.map('.', background=
[('selected', _compcolor), ('active', _ana2color)])
top.geometry("600x450+650+150")
top.minsize(148, 1)
top.maxsize(3204, 2405)
top.resizable(1, 1)
top.title("Onvif Device Manager")
top.configure(background="#d9d9d9")
top.configure(highlightbackground="#d9d9d9")
top.configure(highlightcolor="black")
self.Frame1 = tk.Frame(top)
self.Frame1.place(relx=0.05, rely=0.044, relheight=0.278, relwidth=0.9)
self.Frame1.configure(relief='groove')
self.Frame1.configure(borderwidth="2")
self.Frame1.configure(relief="groove")
self.Frame1.configure(background="#d9d9d9")
self.Frame1.configure(highlightbackground="#d9d9d9")
self.Frame1.configure(highlightcolor="black")
self.Label1 = tk.Label(self.Frame1)
self.Label1.place(relx=0.259, rely=0.064, height=27, width=260)
self.Label1.configure(activebackground="#f9f9f9")
self.Label1.configure(activeforeground="black")
self.Label1.configure(background="#d9d9d9")
self.Label1.configure(disabledforeground="#a3a3a3")
self.Label1.configure(foreground="#000000")
self.Label1.configure(highlightbackground="#d9d9d9")
self.Label1.configure(highlightcolor="black")
self.Label1.configure(text='''Thanks for choosing Device Manager''')
self.Labelframe1 = tk.LabelFrame(self.Frame1)
self.Labelframe1.place(relx=0.056, rely=0.24, relheight=0.648
, relwidth=0.889)
self.Labelframe1.configure(relief='groove')
self.Labelframe1.configure(foreground="black")
self.Labelframe1.configure(text='''Functions''')
self.Labelframe1.configure(background="#d9d9d9")
self.Labelframe1.configure(highlightbackground="#d9d9d9")
self.Labelframe1.configure(highlightcolor="black")
self.Label2 = tk.Label(self.Labelframe1)
self.Label2.place(relx=0.125, rely=0.247, height=20, width=270
, bordermode='ignore')
self.Label2.configure(activebackground="#f9f9f9")
self.Label2.configure(activeforeground="black")
self.Label2.configure(background="#d9d9d9")
self.Label2.configure(disabledforeground="#a3a3a3")
self.Label2.configure(foreground="#000000")
self.Label2.configure(highlightbackground="#d9d9d9")
self.Label2.configure(highlightcolor="black")
self.Label2.configure(text='''1. Sync computer datetime to device''')
self.Label3 = tk.Label(self.Labelframe1)
self.Label3.place(relx=0.125, rely=0.617, height=20, width=255
, bordermode='ignore')
self.Label3.configure(activebackground="#f9f9f9")
self.Label3.configure(activeforeground="black")
self.Label3.configure(background="#d9d9d9")
self.Label3.configure(disabledforeground="#a3a3a3")
self.Label3.configure(foreground="#000000")
self.Label3.configure(highlightbackground="#d9d9d9")
self.Label3.configure(highlightcolor="black")
self.Label3.configure(text='''2. Update device network settings''')
self.Label4 = tk.Label(top)
self.Label4.place(relx=0.083, rely=0.330, height=20, width=100)
self.Label4.configure(activebackground="#f9f9f9")
self.Label4.configure(activeforeground="black")
self.Label4.configure(background="#d9d9d9")
self.Label4.configure(disabledforeground="#a3a3a3")
self.Label4.configure(foreground="#000000")
self.Label4.configure(highlightbackground="#d9d9d9")
self.Label4.configure(highlightcolor="black")
self.Label4.configure(text='''Current IP''')
self.Label5 = tk.Label(top)
self.Label5.place(relx=0.417, rely=0.330, height=20, width=100)
self.Label5.configure(activebackground="#f9f9f9")
self.Label5.configure(activeforeground="black")
self.Label5.configure(background="#d9d9d9")
self.Label5.configure(disabledforeground="#a3a3a3")
self.Label5.configure(foreground="#000000")
self.Label5.configure(highlightbackground="#d9d9d9")
self.Label5.configure(highlightcolor="black")
self.Label5.configure(text='''Username''')
self.Label6 = tk.Label(top)
self.Label6.place(relx=0.733, rely=0.330, height=20, width=100)
self.Label6.configure(activebackground="#f9f9f9")
self.Label6.configure(activeforeground="black")
self.Label6.configure(background="#d9d9d9")
self.Label6.configure(disabledforeground="#a3a3a3")
self.Label6.configure(foreground="#000000")
self.Label6.configure(highlightbackground="#d9d9d9")
self.Label6.configure(highlightcolor="black")
self.Label6.configure(text='''Password''')
self.Text1 = tk.Text(top)
self.Text1.place(relx=0.05, rely=0.370, relheight=0.060, relwidth=0.25)
self.Text1.configure(background="white")
self.Text1.configure(font="TkTextFont")
self.Text1.configure(foreground="black")
self.Text1.configure(highlightbackground="#d9d9d9")
self.Text1.configure(highlightcolor="black")
self.Text1.configure(insertbackground="black")
self.Text1.configure(selectbackground="blue")
self.Text1.configure(selectforeground="white")
self.Text1.configure(wrap="word")
self.Text2 = tk.Text(top)
self.Text2.place(relx=0.375, rely=0.370, relheight=0.060, relwidth=0.25)
self.Text2.configure(background="white")
self.Text2.configure(font="TkTextFont")
self.Text2.configure(foreground="black")
self.Text2.configure(highlightbackground="#d9d9d9")
self.Text2.configure(highlightcolor="black")
self.Text2.configure(insertbackground="black")
self.Text2.configure(selectbackground="blue")
self.Text2.configure(selectforeground="white")
self.Text2.configure(wrap="word")
self.Text3 = tk.Text(top)
self.Text3.place(relx=0.7, rely=0.370, relheight=0.060, relwidth=0.25)
self.Text3.configure(background="white")
self.Text3.configure(font="TkTextFont")
self.Text3.configure(foreground="black")
self.Text3.configure(highlightbackground="#d9d9d9")
self.Text3.configure(highlightcolor="black")
self.Text3.configure(insertbackground="black")
self.Text3.configure(selectbackground="blue")
self.Text3.configure(selectforeground="white")
self.Text3.configure(wrap="word")
self.TSeparator1 = ttk.Separator(top)
self.TSeparator1.place(relx=0.0, rely=0.450, relwidth=1.0)
self.Label8 = tk.Label(top)
self.Label8.place(relx=0.017, rely=0.470, height=20, width=252)
self.Label8.configure(activebackground="#f9f9f9")
self.Label8.configure(activeforeground="black")
self.Label8.configure(background="#d9d9d9")
self.Label8.configure(disabledforeground="#a3a3a3")
self.Label8.configure(foreground="#000000")
self.Label8.configure(highlightbackground="#d9d9d9")
self.Label8.configure(highlightcolor="black")
self.Label8.configure(text='''Please click the following button to''')
self.Label10 = tk.Label(top)
self.Label10.place(relx=0.017, rely=0.510, height=20, width=245)
self.Label10.configure(activebackground="#f9f9f9")
self.Label10.configure(activeforeground="black")
self.Label10.configure(background="#d9d9d9")
self.Label10.configure(disabledforeground="#a3a3a3")
self.Label10.configure(foreground="#000000")
self.Label10.configure(highlightbackground="#d9d9d9")
self.Label10.configure(highlightcolor="black")
self.Label10.configure(text='''sync computer datetime to device''')
self.Label9 = tk.Label(top)
self.Label9.place(relx=0.017, rely=0.550, height=20, width=360)
self.Label9.configure(activebackground="#f9f9f9")
self.Label9.configure(activeforeground="black")
self.Label9.configure(background="#d9d9d9")
self.Label9.configure(disabledforeground="#a3a3a3")
self.Label9.configure(foreground="#000000")
self.Label9.configure(highlightbackground="#d9d9d9")
self.Label9.configure(highlightcolor="black")
self.Label9.configure(text='''and update datetime format to MM-dd-yyyy HH:mm:ss''')
self.TButton1 = ttk.Button(top)
self.TButton1.place(relx=0.7, rely=0.480, height=40, width=118)
self.TButton1.configure(takefocus="")
self.TButton1.configure(text='''Sync DateTime''')
self.TButton1.configure(command=self.update_timezone_and_datetime)
################IP
self.TSeparator2 = ttk.Separator(top)
self.TSeparator2.place(relx=0.0, rely=0.600, relwidth=1.0)
self.Label7 = tk.Label(top)
self.Label7.place(relx=0.067, rely=0.635, height=20, width=122)
self.Label7.configure(activebackground="#f9f9f9")
self.Label7.configure(activeforeground="black")
self.Label7.configure(background="#d9d9d9")
self.Label7.configure(disabledforeground="#a3a3a3")
self.Label7.configure(foreground="#000000")
self.Label7.configure(highlightbackground="#d9d9d9")
self.Label7.configure(highlightcolor="black")
self.Label7.configure(text='''New IP Address *''')
self.Text4 = tk.Text(top)
self.Text4.place(relx=0.283, rely=0.630, relheight=0.060, relwidth=0.30)
self.Text4.configure(background="white")
self.Text4.configure(font="TkTextFont")
self.Text4.configure(foreground="black")
self.Text4.configure(highlightbackground="#d9d9d9")
self.Text4.configure(highlightcolor="black")
self.Text4.configure(insertbackground="black")
self.Text4.configure(selectbackground="blue")
self.Text4.configure(selectforeground="white")
self.Text4.configure(wrap="word")
self.Label11 = tk.Label(top)
self.Label11.place(relx=0.067, rely=0.730, height=20, width=122)
self.Label11.configure(activebackground="#f9f9f9")
self.Label11.configure(activeforeground="black")
self.Label11.configure(background="#d9d9d9")
self.Label11.configure(disabledforeground="#a3a3a3")
self.Label11.configure(foreground="#000000")
self.Label11.configure(highlightbackground="#d9d9d9")
self.Label11.configure(highlightcolor="black")
self.Label11.configure(text='''Subnet Mask *''')
self.Text5 = tk.Text(top)
self.Text5.place(relx=0.283, rely=0.720, relheight=0.060, relwidth=0.30)
self.Text5.bind("<FocusIn>", self.get_sub_mask)
self.Text5.configure(background="white")
self.Text5.configure(font="TkTextFont")
self.Text5.configure(foreground="black")
self.Text5.configure(highlightbackground="#d9d9d9")
self.Text5.configure(highlightcolor="black")
self.Text5.configure(insertbackground="black")
self.Text5.configure(selectbackground="blue")
self.Text5.configure(selectforeground="white")
self.Text5.configure(wrap="word")
self.Label12 = tk.Label(top)
self.Label12.place(relx=0.067, rely=0.820, height=20, width=122)
self.Label12.configure(activebackground="#f9f9f9")
self.Label12.configure(activeforeground="black")
self.Label12.configure(background="#d9d9d9")
self.Label12.configure(disabledforeground="#a3a3a3")
self.Label12.configure(foreground="#000000")
self.Label12.configure(highlightbackground="#d9d9d9")
self.Label12.configure(highlightcolor="black")
self.Label12.configure(text='''Default Gateway *''')
self.Text6 = tk.Text(top)
self.Text6.place(relx=0.283, rely=0.810, relheight=0.060, relwidth=0.30)
self.Text6.bind("<FocusIn>", self.get_default_gateway)
self.Text6.configure(background="white")
self.Text6.configure(font="TkTextFont")
self.Text6.configure(foreground="black")
self.Text6.configure(highlightbackground="#d9d9d9")
self.Text6.configure(highlightcolor="black")
self.Text6.configure(insertbackground="black")
self.Text6.configure(selectbackground="blue")
self.Text6.configure(selectforeground="white")
self.Text6.configure(wrap="word")
self.TButton2 = ttk.Button(top)
self.TButton2.place(relx=0.7, rely=0.700, height=40, width=118)
self.TButton2.configure(takefocus="")
self.TButton2.configure(text='''Update IP''')
self.TButton2.configure(command=self.update_ip)
self.menubar = tk.Menu(top, font="TkMenuFont", bg=_bgcolor, fg=_fgcolor)
top.configure(menu=self.menubar)
def get_sub_mask(self, event):
try:
if len(self.Text4.get("1.0", 'end-1c')) > 0:
net = ipaddress.ip_network(self.Text4.get("1.0", 'end-1c') + '/24', strict=False)
if len(self.Text5.get("1.0", 'end-1c')) > 0:
self.Text5.delete('1.0', 'end')
self.Text5.insert(1.0, str(net.netmask))
else:
print("please input new ip address to get sub mask")
except Exception as e:
print(e)
self.Text5.delete('1.0', 'end')
def get_default_gateway(self, event):
try:
if len(self.Text4.get("1.0", 'end-1c')) > 0:
default_gateway_ip = get_default_gateway_ip(self.Text4.get("1.0", 'end-1c'))
if default_gateway_ip is not None:
if len(self.Text6.get("1.0", 'end-1c')) > 0:
self.Text6.delete('1.0', 'end')
self.Text6.insert(1.0, default_gateway_ip)
else:
print("default_gateway_ip is None")
else:
print("please input new ip address to get default gateway")
except Exception as e:
print(e)
self.Text6.delete('1.0', 'end')
def update_timezone_and_datetime(self):
try:
result = update_device_time(
self.Text1.get("1.0", 'end-1c'),
self.Text2.get("1.0", 'end-1c'),
self.Text3.get("1.0", 'end-1c'))
if result == 'success':
tkinter.messagebox.showinfo("Information", "Update Device time success")
else:
tkinter.messagebox.showinfo("Information", "Update Device time failed")
except Exception as e:
print(e)
tkinter.messagebox.showerror("Error", "Please check input and connection then try again")
def update_ip(self):
try:
if len(self.Text4.get("1.0", 'end-1c')) > 0 and len(self.Text5.get("1.0", 'end-1c')) > 0 and len(
self.Text6.get("1.0", 'end-1c')) > 0:
result = update_ip(
self.Text1.get("1.0", 'end-1c'),
self.Text2.get("1.0", 'end-1c'),
self.Text3.get("1.0", 'end-1c'),
self.Text4.get("1.0", 'end-1c'),
self.Text6.get("1.0", 'end-1c'))
if result == 'success':
tkinter.messagebox.showinfo("Information", "IP Address Updated Successfully.")
else:
tkinter.messagebox.showinfo("Information", "Failed to Update IP Address.")
else:
tkinter.messagebox.showinfo("Information", "Please confirm whether the information is complete")
except Exception as e:
print(e)
tkinter.messagebox.showerror("Error", "Please check input and connection then try again")
if __name__ == '__main__':
vp_start_gui()
| <filename>app/ui/main_ui_page.py
#! /usr/bin/env python
# -*- coding: utf-8 -*-
import sys
import device_manager_setup
from app.http.http_utils import update_ip, update_device_time, get_default_gateway_ip
import ipaddress
try:
import Tkinter as tk
except ImportError:
import tkinter as tk
import tkinter.messagebox
try:
import ttk
py3 = False
except ImportError:
import tkinter.ttk as ttk
py3 = True
def vp_start_gui():
'''Starting point when module is the main routine.'''
global val, w, root
root = tk.Tk()
top = Toplevel1(root)
device_manager_setup.init(root, top)
root.mainloop()
w = None
def create_Toplevel1(rt, *args, **kwargs):
'''Starting point when module is imported by another module.
Correct form of call: 'create_Toplevel1(root, *args, **kwargs)' .'''
global w, w_win, root
# rt = root
root = rt
w = tk.Toplevel(root)
top = Toplevel1(w)
device_manager_setup.init(w, top, *args, **kwargs)
return (w, top)
def destroy_Toplevel1():
global w
w.destroy()
w = None
class Toplevel1:
def __init__(self, top=None):
'''This class configures and populates the toplevel window.
top is the toplevel containing window.'''
_bgcolor = '#d9d9d9' # X11 color: 'gray85'
_fgcolor = '#000000' # X11 color: 'black'
_compcolor = '#d9d9d9' # X11 color: 'gray85'
_ana1color = '#d9d9d9' # X11 color: 'gray85'
_ana2color = '#ececec' # Closest X11 color: 'gray92'
self.style = ttk.Style()
if sys.platform == "win32":
self.style.theme_use('winnative')
self.style.configure('.', background=_bgcolor)
self.style.configure('.', foreground=_fgcolor)
self.style.configure('.', font="TkDefaultFont")
self.style.map('.', background=
[('selected', _compcolor), ('active', _ana2color)])
top.geometry("600x450+650+150")
top.minsize(148, 1)
top.maxsize(3204, 2405)
top.resizable(1, 1)
top.title("Onvif Device Manager")
top.configure(background="#d9d9d9")
top.configure(highlightbackground="#d9d9d9")
top.configure(highlightcolor="black")
self.Frame1 = tk.Frame(top)
self.Frame1.place(relx=0.05, rely=0.044, relheight=0.278, relwidth=0.9)
self.Frame1.configure(relief='groove')
self.Frame1.configure(borderwidth="2")
self.Frame1.configure(relief="groove")
self.Frame1.configure(background="#d9d9d9")
self.Frame1.configure(highlightbackground="#d9d9d9")
self.Frame1.configure(highlightcolor="black")
self.Label1 = tk.Label(self.Frame1)
self.Label1.place(relx=0.259, rely=0.064, height=27, width=260)
self.Label1.configure(activebackground="#f9f9f9")
self.Label1.configure(activeforeground="black")
self.Label1.configure(background="#d9d9d9")
self.Label1.configure(disabledforeground="#a3a3a3")
self.Label1.configure(foreground="#000000")
self.Label1.configure(highlightbackground="#d9d9d9")
self.Label1.configure(highlightcolor="black")
self.Label1.configure(text='''Thanks for choosing Device Manager''')
self.Labelframe1 = tk.LabelFrame(self.Frame1)
self.Labelframe1.place(relx=0.056, rely=0.24, relheight=0.648
, relwidth=0.889)
self.Labelframe1.configure(relief='groove')
self.Labelframe1.configure(foreground="black")
self.Labelframe1.configure(text='''Functions''')
self.Labelframe1.configure(background="#d9d9d9")
self.Labelframe1.configure(highlightbackground="#d9d9d9")
self.Labelframe1.configure(highlightcolor="black")
self.Label2 = tk.Label(self.Labelframe1)
self.Label2.place(relx=0.125, rely=0.247, height=20, width=270
, bordermode='ignore')
self.Label2.configure(activebackground="#f9f9f9")
self.Label2.configure(activeforeground="black")
self.Label2.configure(background="#d9d9d9")
self.Label2.configure(disabledforeground="#a3a3a3")
self.Label2.configure(foreground="#000000")
self.Label2.configure(highlightbackground="#d9d9d9")
self.Label2.configure(highlightcolor="black")
self.Label2.configure(text='''1. Sync computer datetime to device''')
self.Label3 = tk.Label(self.Labelframe1)
self.Label3.place(relx=0.125, rely=0.617, height=20, width=255
, bordermode='ignore')
self.Label3.configure(activebackground="#f9f9f9")
self.Label3.configure(activeforeground="black")
self.Label3.configure(background="#d9d9d9")
self.Label3.configure(disabledforeground="#a3a3a3")
self.Label3.configure(foreground="#000000")
self.Label3.configure(highlightbackground="#d9d9d9")
self.Label3.configure(highlightcolor="black")
self.Label3.configure(text='''2. Update device network settings''')
self.Label4 = tk.Label(top)
self.Label4.place(relx=0.083, rely=0.330, height=20, width=100)
self.Label4.configure(activebackground="#f9f9f9")
self.Label4.configure(activeforeground="black")
self.Label4.configure(background="#d9d9d9")
self.Label4.configure(disabledforeground="#a3a3a3")
self.Label4.configure(foreground="#000000")
self.Label4.configure(highlightbackground="#d9d9d9")
self.Label4.configure(highlightcolor="black")
self.Label4.configure(text='''Current IP''')
self.Label5 = tk.Label(top)
self.Label5.place(relx=0.417, rely=0.330, height=20, width=100)
self.Label5.configure(activebackground="#f9f9f9")
self.Label5.configure(activeforeground="black")
self.Label5.configure(background="#d9d9d9")
self.Label5.configure(disabledforeground="#a3a3a3")
self.Label5.configure(foreground="#000000")
self.Label5.configure(highlightbackground="#d9d9d9")
self.Label5.configure(highlightcolor="black")
self.Label5.configure(text='''Username''')
self.Label6 = tk.Label(top)
self.Label6.place(relx=0.733, rely=0.330, height=20, width=100)
self.Label6.configure(activebackground="#f9f9f9")
self.Label6.configure(activeforeground="black")
self.Label6.configure(background="#d9d9d9")
self.Label6.configure(disabledforeground="#a3a3a3")
self.Label6.configure(foreground="#000000")
self.Label6.configure(highlightbackground="#d9d9d9")
self.Label6.configure(highlightcolor="black")
self.Label6.configure(text='''Password''')
self.Text1 = tk.Text(top)
self.Text1.place(relx=0.05, rely=0.370, relheight=0.060, relwidth=0.25)
self.Text1.configure(background="white")
self.Text1.configure(font="TkTextFont")
self.Text1.configure(foreground="black")
self.Text1.configure(highlightbackground="#d9d9d9")
self.Text1.configure(highlightcolor="black")
self.Text1.configure(insertbackground="black")
self.Text1.configure(selectbackground="blue")
self.Text1.configure(selectforeground="white")
self.Text1.configure(wrap="word")
self.Text2 = tk.Text(top)
self.Text2.place(relx=0.375, rely=0.370, relheight=0.060, relwidth=0.25)
self.Text2.configure(background="white")
self.Text2.configure(font="TkTextFont")
self.Text2.configure(foreground="black")
self.Text2.configure(highlightbackground="#d9d9d9")
self.Text2.configure(highlightcolor="black")
self.Text2.configure(insertbackground="black")
self.Text2.configure(selectbackground="blue")
self.Text2.configure(selectforeground="white")
self.Text2.configure(wrap="word")
self.Text3 = tk.Text(top)
self.Text3.place(relx=0.7, rely=0.370, relheight=0.060, relwidth=0.25)
self.Text3.configure(background="white")
self.Text3.configure(font="TkTextFont")
self.Text3.configure(foreground="black")
self.Text3.configure(highlightbackground="#d9d9d9")
self.Text3.configure(highlightcolor="black")
self.Text3.configure(insertbackground="black")
self.Text3.configure(selectbackground="blue")
self.Text3.configure(selectforeground="white")
self.Text3.configure(wrap="word")
self.TSeparator1 = ttk.Separator(top)
self.TSeparator1.place(relx=0.0, rely=0.450, relwidth=1.0)
self.Label8 = tk.Label(top)
self.Label8.place(relx=0.017, rely=0.470, height=20, width=252)
self.Label8.configure(activebackground="#f9f9f9")
self.Label8.configure(activeforeground="black")
self.Label8.configure(background="#d9d9d9")
self.Label8.configure(disabledforeground="#a3a3a3")
self.Label8.configure(foreground="#000000")
self.Label8.configure(highlightbackground="#d9d9d9")
self.Label8.configure(highlightcolor="black")
self.Label8.configure(text='''Please click the following button to''')
self.Label10 = tk.Label(top)
self.Label10.place(relx=0.017, rely=0.510, height=20, width=245)
self.Label10.configure(activebackground="#f9f9f9")
self.Label10.configure(activeforeground="black")
self.Label10.configure(background="#d9d9d9")
self.Label10.configure(disabledforeground="#a3a3a3")
self.Label10.configure(foreground="#000000")
self.Label10.configure(highlightbackground="#d9d9d9")
self.Label10.configure(highlightcolor="black")
self.Label10.configure(text='''sync computer datetime to device''')
self.Label9 = tk.Label(top)
self.Label9.place(relx=0.017, rely=0.550, height=20, width=360)
self.Label9.configure(activebackground="#f9f9f9")
self.Label9.configure(activeforeground="black")
self.Label9.configure(background="#d9d9d9")
self.Label9.configure(disabledforeground="#a3a3a3")
self.Label9.configure(foreground="#000000")
self.Label9.configure(highlightbackground="#d9d9d9")
self.Label9.configure(highlightcolor="black")
self.Label9.configure(text='''and update datetime format to MM-dd-yyyy HH:mm:ss''')
self.TButton1 = ttk.Button(top)
self.TButton1.place(relx=0.7, rely=0.480, height=40, width=118)
self.TButton1.configure(takefocus="")
self.TButton1.configure(text='''Sync DateTime''')
self.TButton1.configure(command=self.update_timezone_and_datetime)
################IP
self.TSeparator2 = ttk.Separator(top)
self.TSeparator2.place(relx=0.0, rely=0.600, relwidth=1.0)
self.Label7 = tk.Label(top)
self.Label7.place(relx=0.067, rely=0.635, height=20, width=122)
self.Label7.configure(activebackground="#f9f9f9")
self.Label7.configure(activeforeground="black")
self.Label7.configure(background="#d9d9d9")
self.Label7.configure(disabledforeground="#a3a3a3")
self.Label7.configure(foreground="#000000")
self.Label7.configure(highlightbackground="#d9d9d9")
self.Label7.configure(highlightcolor="black")
self.Label7.configure(text='''New IP Address *''')
self.Text4 = tk.Text(top)
self.Text4.place(relx=0.283, rely=0.630, relheight=0.060, relwidth=0.30)
self.Text4.configure(background="white")
self.Text4.configure(font="TkTextFont")
self.Text4.configure(foreground="black")
self.Text4.configure(highlightbackground="#d9d9d9")
self.Text4.configure(highlightcolor="black")
self.Text4.configure(insertbackground="black")
self.Text4.configure(selectbackground="blue")
self.Text4.configure(selectforeground="white")
self.Text4.configure(wrap="word")
self.Label11 = tk.Label(top)
self.Label11.place(relx=0.067, rely=0.730, height=20, width=122)
self.Label11.configure(activebackground="#f9f9f9")
self.Label11.configure(activeforeground="black")
self.Label11.configure(background="#d9d9d9")
self.Label11.configure(disabledforeground="#a3a3a3")
self.Label11.configure(foreground="#000000")
self.Label11.configure(highlightbackground="#d9d9d9")
self.Label11.configure(highlightcolor="black")
self.Label11.configure(text='''Subnet Mask *''')
self.Text5 = tk.Text(top)
self.Text5.place(relx=0.283, rely=0.720, relheight=0.060, relwidth=0.30)
self.Text5.bind("<FocusIn>", self.get_sub_mask)
self.Text5.configure(background="white")
self.Text5.configure(font="TkTextFont")
self.Text5.configure(foreground="black")
self.Text5.configure(highlightbackground="#d9d9d9")
self.Text5.configure(highlightcolor="black")
self.Text5.configure(insertbackground="black")
self.Text5.configure(selectbackground="blue")
self.Text5.configure(selectforeground="white")
self.Text5.configure(wrap="word")
self.Label12 = tk.Label(top)
self.Label12.place(relx=0.067, rely=0.820, height=20, width=122)
self.Label12.configure(activebackground="#f9f9f9")
self.Label12.configure(activeforeground="black")
self.Label12.configure(background="#d9d9d9")
self.Label12.configure(disabledforeground="#a3a3a3")
self.Label12.configure(foreground="#000000")
self.Label12.configure(highlightbackground="#d9d9d9")
self.Label12.configure(highlightcolor="black")
self.Label12.configure(text='''Default Gateway *''')
self.Text6 = tk.Text(top)
self.Text6.place(relx=0.283, rely=0.810, relheight=0.060, relwidth=0.30)
self.Text6.bind("<FocusIn>", self.get_default_gateway)
self.Text6.configure(background="white")
self.Text6.configure(font="TkTextFont")
self.Text6.configure(foreground="black")
self.Text6.configure(highlightbackground="#d9d9d9")
self.Text6.configure(highlightcolor="black")
self.Text6.configure(insertbackground="black")
self.Text6.configure(selectbackground="blue")
self.Text6.configure(selectforeground="white")
self.Text6.configure(wrap="word")
self.TButton2 = ttk.Button(top)
self.TButton2.place(relx=0.7, rely=0.700, height=40, width=118)
self.TButton2.configure(takefocus="")
self.TButton2.configure(text='''Update IP''')
self.TButton2.configure(command=self.update_ip)
self.menubar = tk.Menu(top, font="TkMenuFont", bg=_bgcolor, fg=_fgcolor)
top.configure(menu=self.menubar)
def get_sub_mask(self, event):
try:
if len(self.Text4.get("1.0", 'end-1c')) > 0:
net = ipaddress.ip_network(self.Text4.get("1.0", 'end-1c') + '/24', strict=False)
if len(self.Text5.get("1.0", 'end-1c')) > 0:
self.Text5.delete('1.0', 'end')
self.Text5.insert(1.0, str(net.netmask))
else:
print("please input new ip address to get sub mask")
except Exception as e:
print(e)
self.Text5.delete('1.0', 'end')
def get_default_gateway(self, event):
try:
if len(self.Text4.get("1.0", 'end-1c')) > 0:
default_gateway_ip = get_default_gateway_ip(self.Text4.get("1.0", 'end-1c'))
if default_gateway_ip is not None:
if len(self.Text6.get("1.0", 'end-1c')) > 0:
self.Text6.delete('1.0', 'end')
self.Text6.insert(1.0, default_gateway_ip)
else:
print("default_gateway_ip is None")
else:
print("please input new ip address to get default gateway")
except Exception as e:
print(e)
self.Text6.delete('1.0', 'end')
def update_timezone_and_datetime(self):
try:
result = update_device_time(
self.Text1.get("1.0", 'end-1c'),
self.Text2.get("1.0", 'end-1c'),
self.Text3.get("1.0", 'end-1c'))
if result == 'success':
tkinter.messagebox.showinfo("Information", "Update Device time success")
else:
tkinter.messagebox.showinfo("Information", "Update Device time failed")
except Exception as e:
print(e)
tkinter.messagebox.showerror("Error", "Please check input and connection then try again")
def update_ip(self):
try:
if len(self.Text4.get("1.0", 'end-1c')) > 0 and len(self.Text5.get("1.0", 'end-1c')) > 0 and len(
self.Text6.get("1.0", 'end-1c')) > 0:
result = update_ip(
self.Text1.get("1.0", 'end-1c'),
self.Text2.get("1.0", 'end-1c'),
self.Text3.get("1.0", 'end-1c'),
self.Text4.get("1.0", 'end-1c'),
self.Text6.get("1.0", 'end-1c'))
if result == 'success':
tkinter.messagebox.showinfo("Information", "IP Address Updated Successfully.")
else:
tkinter.messagebox.showinfo("Information", "Failed to Update IP Address.")
else:
tkinter.messagebox.showinfo("Information", "Please confirm whether the information is complete")
except Exception as e:
print(e)
tkinter.messagebox.showerror("Error", "Please check input and connection then try again")
if __name__ == '__main__':
vp_start_gui()
| en | 0.666746 | #! /usr/bin/env python # -*- coding: utf-8 -*- Starting point when module is the main routine. Starting point when module is imported by another module. Correct form of call: 'create_Toplevel1(root, *args, **kwargs)' . # rt = root This class configures and populates the toplevel window. top is the toplevel containing window. # X11 color: 'gray85' # X11 color: 'black' # X11 color: 'gray85' # X11 color: 'gray85' # Closest X11 color: 'gray92' Thanks for choosing Device Manager Functions 1. Sync computer datetime to device 2. Update device network settings Current IP Username Password Please click the following button to sync computer datetime to device and update datetime format to MM-dd-yyyy HH:mm:ss Sync DateTime ################IP New IP Address * Subnet Mask * Default Gateway * Update IP | 2.450505 | 2 |
reason/metrics/_accuracy.py | alisoltanirad/Reason | 1 | 6621578 | <gh_stars>1-10
def accuracy(y_true, y_pred):
"""Accuracy score function.
Easy-to-use word tokenize function.
Example:
>>> from reason.metrics import accuracy
>>> accuracy(y_true, y_pred)
0.9358
Args:
y_true (list): Real labels.
y_pred (list): Predicted labels returned by classifier.
Returns:
float: Accuracy score.
"""
length = len(y_true)
correct = 0
for i in range(length):
if y_true[i] == y_pred[i]:
correct += 1
return float('{:.4f}'.format(correct / length))
| def accuracy(y_true, y_pred):
"""Accuracy score function.
Easy-to-use word tokenize function.
Example:
>>> from reason.metrics import accuracy
>>> accuracy(y_true, y_pred)
0.9358
Args:
y_true (list): Real labels.
y_pred (list): Predicted labels returned by classifier.
Returns:
float: Accuracy score.
"""
length = len(y_true)
correct = 0
for i in range(length):
if y_true[i] == y_pred[i]:
correct += 1
return float('{:.4f}'.format(correct / length)) | en | 0.670671 | Accuracy score function. Easy-to-use word tokenize function. Example: >>> from reason.metrics import accuracy >>> accuracy(y_true, y_pred) 0.9358 Args: y_true (list): Real labels. y_pred (list): Predicted labels returned by classifier. Returns: float: Accuracy score. | 3.391712 | 3 |
day10-11/code/threads.py | liuchunhuicanfly/learning-python | 4 | 6621579 | # encoding: utf-8
from threading import currentThread, Thread, Lock
from time import time, sleep
from random import randint
# def download_task(filename):
# print('线程 %s 开始下载%s...' % (currentThread().name, filename))
# time_to_download = randint(5, 10)
# sleep(time_to_download)
# print('线程 %s 下载完成! 耗费了%d秒' % (currentThread().name, time_to_download))
# 单线程
# def main():
# start_time = time()
# print('线程 %s is running...' % currentThread().name)
# t = Thread(target = download_task, args = ('test1.txt',), name = 'DownloadThread')
# t.start()
# t.join()
# end_time = time()
# print('线程 %s ended. 共耗时 %.2f' % (currentThread().name, end_time - start_time))
"""
线程 MainThread is running...
线程 DownloadThread 开始下载test1.txt...
线程 DownloadThread 下载完成! 耗费了8秒
线程 MainThread ended. 共耗时 8.00
"""
# 多线程
# def main():
# start_time = time()
# print('线程 %s is running...' % currentThread().name)
# t1 = Thread(target = download_task, args = ('test1.txt',), name = 'DownloadThread1')
# t1.start()
# t2 = Thread(target = download_task, args = ('test2.txt',), name = 'DownloadThread2')
# t2.start()
# t1.join()
# t2.join()
# end_time = time()
# print('线程 %s ended. 共耗时 %.2f' % (currentThread().name, end_time - start_time))
"""
线程 MainThread is running...
线程 DownloadThread1 开始下载test1.txt...
线程 DownloadThread2 开始下载test2.txt...
线程 DownloadThread1 下载完成! 耗费了6秒
线程 DownloadThread2 下载完成! 耗费了8秒
线程 MainThread ended. 共耗时 8.00
"""
# 使用继承创建线程
# class DownloadTask(Thread):
# def __init__(self, filename, threadname):
# super().__init__()
# self._filename = filename
# self._name = threadname
# def run(self):
# print('线程 %s 开始下载%s...' % (currentThread().name, self._filename))
# time_to_download = randint(5, 10)
# sleep(time_to_download)
# print('线程 %s 下载完成! 耗费了%d秒' % (currentThread().name, time_to_download))
# def main():
# start_time = time()
# print('线程 %s is running...' % currentThread().name)
# t1 = DownloadTask('test1.txt', 'DownloadThread1')
# t1.start()
# t2 = DownloadTask('test2.txt', 'DownloadThread2')
# t2.start()
# t1.join()
# t2.join()
# end_time = time()
# print('线程 %s ended. 共耗时 %.2f' % (currentThread().name, end_time - start_time))
"""
线程 MainThread is running...
线程 DownloadThread1 开始下载test1.txt...
线程 DownloadThread2 开始下载test2.txt...
线程 DownloadThread1 下载完成! 耗费了5秒
线程 DownloadThread2 下载完成! 耗费了9秒
线程 MainThread ended. 共耗时 9.00
"""
# Lock
class Account(object):
"""docstring for Account"""
# 无锁
# def __init__(self):
# super(Account, self).__init__()
# self._balance = 0
# def deposit(self, money):
# new_balance = self._balance + money
# sleep(0.1)
# self._balance = new_balance
# 有锁
def __init__(self):
super(Account, self).__init__()
self._balance = 0
self._lock = Lock()
def deposit(self, money):
# 先获取锁才能执行后续的代码
self._lock.acquire()
try:
new_balance = self._balance + money
sleep(0.01)
self._balance = new_balance
finally:
# 在finally中执行释放锁的操作保证正常异常锁都能释放
self._lock.release()
@property
def balance(self):
return self._balance
class AddMoneyThread(Thread):
def __init__(self, name, account, money):
super().__init__()
self._name = name
self._account = account
self._money = money
def run(self):
print('线程%s running...' % currentThread().name)
self._account.deposit(self._money)
def main():
start_time = time()
account = Account()
threads = []
for _ in range(1000):
t = AddMoneyThread('Thread%s' % str(_ + 1), account, 1)
threads.append(t)
t.start()
for t in threads:
t.join()
end_time = time()
print('共耗时: %.2f' % (end_time - start_time))
print('账户余额为: %d元' % account.balance)
"""
无锁
线程Thread1 running...
线程Thread2 running...
....
线程Thread999 running...
线程Thread1000 running...
共耗时: 0.20
账户余额为: 1元
有锁
线程Thread1 running...
线程Thread2 running...
....
线程Thread999 running...
线程Thread1000 running...
共耗时: 11.68
账户余额为: 1000元
"""
if __name__ == '__main__':
main()
| # encoding: utf-8
from threading import currentThread, Thread, Lock
from time import time, sleep
from random import randint
# def download_task(filename):
# print('线程 %s 开始下载%s...' % (currentThread().name, filename))
# time_to_download = randint(5, 10)
# sleep(time_to_download)
# print('线程 %s 下载完成! 耗费了%d秒' % (currentThread().name, time_to_download))
# 单线程
# def main():
# start_time = time()
# print('线程 %s is running...' % currentThread().name)
# t = Thread(target = download_task, args = ('test1.txt',), name = 'DownloadThread')
# t.start()
# t.join()
# end_time = time()
# print('线程 %s ended. 共耗时 %.2f' % (currentThread().name, end_time - start_time))
"""
线程 MainThread is running...
线程 DownloadThread 开始下载test1.txt...
线程 DownloadThread 下载完成! 耗费了8秒
线程 MainThread ended. 共耗时 8.00
"""
# 多线程
# def main():
# start_time = time()
# print('线程 %s is running...' % currentThread().name)
# t1 = Thread(target = download_task, args = ('test1.txt',), name = 'DownloadThread1')
# t1.start()
# t2 = Thread(target = download_task, args = ('test2.txt',), name = 'DownloadThread2')
# t2.start()
# t1.join()
# t2.join()
# end_time = time()
# print('线程 %s ended. 共耗时 %.2f' % (currentThread().name, end_time - start_time))
"""
线程 MainThread is running...
线程 DownloadThread1 开始下载test1.txt...
线程 DownloadThread2 开始下载test2.txt...
线程 DownloadThread1 下载完成! 耗费了6秒
线程 DownloadThread2 下载完成! 耗费了8秒
线程 MainThread ended. 共耗时 8.00
"""
# 使用继承创建线程
# class DownloadTask(Thread):
# def __init__(self, filename, threadname):
# super().__init__()
# self._filename = filename
# self._name = threadname
# def run(self):
# print('线程 %s 开始下载%s...' % (currentThread().name, self._filename))
# time_to_download = randint(5, 10)
# sleep(time_to_download)
# print('线程 %s 下载完成! 耗费了%d秒' % (currentThread().name, time_to_download))
# def main():
# start_time = time()
# print('线程 %s is running...' % currentThread().name)
# t1 = DownloadTask('test1.txt', 'DownloadThread1')
# t1.start()
# t2 = DownloadTask('test2.txt', 'DownloadThread2')
# t2.start()
# t1.join()
# t2.join()
# end_time = time()
# print('线程 %s ended. 共耗时 %.2f' % (currentThread().name, end_time - start_time))
"""
线程 MainThread is running...
线程 DownloadThread1 开始下载test1.txt...
线程 DownloadThread2 开始下载test2.txt...
线程 DownloadThread1 下载完成! 耗费了5秒
线程 DownloadThread2 下载完成! 耗费了9秒
线程 MainThread ended. 共耗时 9.00
"""
# Lock
class Account(object):
"""docstring for Account"""
# 无锁
# def __init__(self):
# super(Account, self).__init__()
# self._balance = 0
# def deposit(self, money):
# new_balance = self._balance + money
# sleep(0.1)
# self._balance = new_balance
# 有锁
def __init__(self):
super(Account, self).__init__()
self._balance = 0
self._lock = Lock()
def deposit(self, money):
# 先获取锁才能执行后续的代码
self._lock.acquire()
try:
new_balance = self._balance + money
sleep(0.01)
self._balance = new_balance
finally:
# 在finally中执行释放锁的操作保证正常异常锁都能释放
self._lock.release()
@property
def balance(self):
return self._balance
class AddMoneyThread(Thread):
def __init__(self, name, account, money):
super().__init__()
self._name = name
self._account = account
self._money = money
def run(self):
print('线程%s running...' % currentThread().name)
self._account.deposit(self._money)
def main():
start_time = time()
account = Account()
threads = []
for _ in range(1000):
t = AddMoneyThread('Thread%s' % str(_ + 1), account, 1)
threads.append(t)
t.start()
for t in threads:
t.join()
end_time = time()
print('共耗时: %.2f' % (end_time - start_time))
print('账户余额为: %d元' % account.balance)
"""
无锁
线程Thread1 running...
线程Thread2 running...
....
线程Thread999 running...
线程Thread1000 running...
共耗时: 0.20
账户余额为: 1元
有锁
线程Thread1 running...
线程Thread2 running...
....
线程Thread999 running...
线程Thread1000 running...
共耗时: 11.68
账户余额为: 1000元
"""
if __name__ == '__main__':
main()
| en | 0.293581 | # encoding: utf-8 # def download_task(filename): # print('线程 %s 开始下载%s...' % (currentThread().name, filename)) # time_to_download = randint(5, 10) # sleep(time_to_download) # print('线程 %s 下载完成! 耗费了%d秒' % (currentThread().name, time_to_download)) # 单线程 # def main(): # start_time = time() # print('线程 %s is running...' % currentThread().name) # t = Thread(target = download_task, args = ('test1.txt',), name = 'DownloadThread') # t.start() # t.join() # end_time = time() # print('线程 %s ended. 共耗时 %.2f' % (currentThread().name, end_time - start_time)) 线程 MainThread is running... 线程 DownloadThread 开始下载test1.txt... 线程 DownloadThread 下载完成! 耗费了8秒 线程 MainThread ended. 共耗时 8.00 # 多线程 # def main(): # start_time = time() # print('线程 %s is running...' % currentThread().name) # t1 = Thread(target = download_task, args = ('test1.txt',), name = 'DownloadThread1') # t1.start() # t2 = Thread(target = download_task, args = ('test2.txt',), name = 'DownloadThread2') # t2.start() # t1.join() # t2.join() # end_time = time() # print('线程 %s ended. 共耗时 %.2f' % (currentThread().name, end_time - start_time)) 线程 MainThread is running... 线程 DownloadThread1 开始下载test1.txt... 线程 DownloadThread2 开始下载test2.txt... 线程 DownloadThread1 下载完成! 耗费了6秒 线程 DownloadThread2 下载完成! 耗费了8秒 线程 MainThread ended. 共耗时 8.00 # 使用继承创建线程 # class DownloadTask(Thread): # def __init__(self, filename, threadname): # super().__init__() # self._filename = filename # self._name = threadname # def run(self): # print('线程 %s 开始下载%s...' % (currentThread().name, self._filename)) # time_to_download = randint(5, 10) # sleep(time_to_download) # print('线程 %s 下载完成! 耗费了%d秒' % (currentThread().name, time_to_download)) # def main(): # start_time = time() # print('线程 %s is running...' % currentThread().name) # t1 = DownloadTask('test1.txt', 'DownloadThread1') # t1.start() # t2 = DownloadTask('test2.txt', 'DownloadThread2') # t2.start() # t1.join() # t2.join() # end_time = time() # print('线程 %s ended. 共耗时 %.2f' % (currentThread().name, end_time - start_time)) 线程 MainThread is running... 线程 DownloadThread1 开始下载test1.txt... 线程 DownloadThread2 开始下载test2.txt... 线程 DownloadThread1 下载完成! 耗费了5秒 线程 DownloadThread2 下载完成! 耗费了9秒 线程 MainThread ended. 共耗时 9.00 # Lock docstring for Account # 无锁 # def __init__(self): # super(Account, self).__init__() # self._balance = 0 # def deposit(self, money): # new_balance = self._balance + money # sleep(0.1) # self._balance = new_balance # 有锁 # 先获取锁才能执行后续的代码 # 在finally中执行释放锁的操作保证正常异常锁都能释放 无锁 线程Thread1 running... 线程Thread2 running... .... 线程Thread999 running... 线程Thread1000 running... 共耗时: 0.20 账户余额为: 1元 有锁 线程Thread1 running... 线程Thread2 running... .... 线程Thread999 running... 线程Thread1000 running... 共耗时: 11.68 账户余额为: 1000元 | 3.582916 | 4 |
modules/sr/robot/vision/__init__.py | 13ros27/competition-simulator | 0 | 6621580 | from .api import tokens_from_objects
from .polar import PolarCoord, polar_from_cartesian
from .tokens import Face, Orientation
from .vectors import Vector
__all__ = (
'Face',
'Vector',
'PolarCoord',
'Orientation',
'tokens_from_objects',
'polar_from_cartesian',
)
| from .api import tokens_from_objects
from .polar import PolarCoord, polar_from_cartesian
from .tokens import Face, Orientation
from .vectors import Vector
__all__ = (
'Face',
'Vector',
'PolarCoord',
'Orientation',
'tokens_from_objects',
'polar_from_cartesian',
)
| none | 1 | 1.411338 | 1 | |
os_v4_hek/defs/tag_.py | holy-crust/reclaimer | 0 | 6621581 | <reponame>holy-crust/reclaimer
from ...os_v3_hek.defs.tag_ import *
| from ...os_v3_hek.defs.tag_ import * | none | 1 | 1.197808 | 1 | |
project/views/user.py | DanielGrams/gsevp | 1 | 6621582 | <gh_stars>1-10
from flask import render_template
from flask_security import auth_required
from project import app
from project.models import AdminUnitInvitation
from project.views.utils import get_invitation_access_result
@app.route("/profile")
@auth_required()
def profile():
return render_template("profile.html")
@app.route("/user/organization-invitations/<int:id>")
def user_organization_invitation(id):
invitation = AdminUnitInvitation.query.get_or_404(id)
result = get_invitation_access_result(invitation.email)
if result:
return result
return render_template("user/organization_invitations.html")
@app.route("/user/organization-invitations")
@app.route("/user/organization-invitations/<path:path>")
@auth_required()
def user_organization_invitations(path=None):
return render_template("user/organization_invitations.html")
| from flask import render_template
from flask_security import auth_required
from project import app
from project.models import AdminUnitInvitation
from project.views.utils import get_invitation_access_result
@app.route("/profile")
@auth_required()
def profile():
return render_template("profile.html")
@app.route("/user/organization-invitations/<int:id>")
def user_organization_invitation(id):
invitation = AdminUnitInvitation.query.get_or_404(id)
result = get_invitation_access_result(invitation.email)
if result:
return result
return render_template("user/organization_invitations.html")
@app.route("/user/organization-invitations")
@app.route("/user/organization-invitations/<path:path>")
@auth_required()
def user_organization_invitations(path=None):
return render_template("user/organization_invitations.html") | none | 1 | 2.075438 | 2 | |
tests.py | oneassure-tech/onepipepy | 0 | 6621583 | import unittest
from src.onepipepy import *
#from models import *
from config import Config
from datetime import datetime
class PDTest(unittest.TestCase):
api = API(Config.PD_API_KEY)
vars = dict()
def test_search_person(self):
self.assertIsInstance(
self.api.search.search_items(
term="Shreyans",
item_types="person"
),
Person
)
def test_search_deal(self):
self.assertIsInstance(
self.api.search.search_items(
term="Shreyans",
item_types="deal"
),
Deal
)
def test_add_person(self):
self.assertIsInstance(
self.api.person.add_person(
data=dict(
name="Shreyans",
phone="9686421633"
)
),
Person
)
def test_add_org(self):
self.assertIsInstance(
self.api.org.add_org(
name="Shreyans - 9686421633"
),
Organization
)
def add_deal(self):
deal = self.api.deal.add_deal(
title="Shreyans - 9686421633"
)
self.vars["deal_id"] = deal.data["id"]
self.assertIsInstance(
deal,
Deal
)
def add_deal_v2(self):
self.assertIsInstance(
self.api.deal.add_deal_v2(
title="Shreyans - 9686421633",
person=dict(
name="Shreyans",
phone=9686421633
),
org=dict(
name="Shreyans - 9686421633"
)
),
Deal
)
def update_deal(self):
self.assertIsInstance(
self.api.deal.update_deal(
id=self.vars["deal_id"],
data=dict(
title="Shreyans - new -deal"
)
),
Deal
)
def get_deal_by_id(self):
self.assertIsInstance(
self.api.deal.get_deal_by_id(
id=self.vars["deal_id"]
),
Deal
)
def add_activity_to_deal(self):
self.assertIsInstance(
self.api.activity.add_activity(
deal_id=self.vars["deal_id"],
data=dict(
subject="Test activity",
due_date=datetime.today().strftime('%Y-%m-%d'),
)
),
Activites
)
def test_deals(self):
self.add_deal()
self.add_deal_v2()
self.update_deal()
self.get_deal_by_id()
self.add_activity_to_deal()
if __name__ == "__main__":
unittest.main()
| import unittest
from src.onepipepy import *
#from models import *
from config import Config
from datetime import datetime
class PDTest(unittest.TestCase):
api = API(Config.PD_API_KEY)
vars = dict()
def test_search_person(self):
self.assertIsInstance(
self.api.search.search_items(
term="Shreyans",
item_types="person"
),
Person
)
def test_search_deal(self):
self.assertIsInstance(
self.api.search.search_items(
term="Shreyans",
item_types="deal"
),
Deal
)
def test_add_person(self):
self.assertIsInstance(
self.api.person.add_person(
data=dict(
name="Shreyans",
phone="9686421633"
)
),
Person
)
def test_add_org(self):
self.assertIsInstance(
self.api.org.add_org(
name="Shreyans - 9686421633"
),
Organization
)
def add_deal(self):
deal = self.api.deal.add_deal(
title="Shreyans - 9686421633"
)
self.vars["deal_id"] = deal.data["id"]
self.assertIsInstance(
deal,
Deal
)
def add_deal_v2(self):
self.assertIsInstance(
self.api.deal.add_deal_v2(
title="Shreyans - 9686421633",
person=dict(
name="Shreyans",
phone=9686421633
),
org=dict(
name="Shreyans - 9686421633"
)
),
Deal
)
def update_deal(self):
self.assertIsInstance(
self.api.deal.update_deal(
id=self.vars["deal_id"],
data=dict(
title="Shreyans - new -deal"
)
),
Deal
)
def get_deal_by_id(self):
self.assertIsInstance(
self.api.deal.get_deal_by_id(
id=self.vars["deal_id"]
),
Deal
)
def add_activity_to_deal(self):
self.assertIsInstance(
self.api.activity.add_activity(
deal_id=self.vars["deal_id"],
data=dict(
subject="Test activity",
due_date=datetime.today().strftime('%Y-%m-%d'),
)
),
Activites
)
def test_deals(self):
self.add_deal()
self.add_deal_v2()
self.update_deal()
self.get_deal_by_id()
self.add_activity_to_deal()
if __name__ == "__main__":
unittest.main()
| en | 0.506495 | #from models import * | 2.812447 | 3 |
static_setup.py | kongwf5813/ANARCI | 0 | 6621584 | <filename>static_setup.py<gh_stars>0
#!/usr/bin/env python3
import shutil, os
if os.path.isdir("build"):
shutil.rmtree("build/")
from distutils.core import setup
setup(name='anarci',
version='1.3',
description='Antibody Numbering and Receptor ClassIfication',
author='<NAME>',
author_email='<EMAIL>',
url='http://opig.stats.ox.ac.uk/webapps/ANARCI',
packages=['anarci'],
package_dir={'anarci': 'lib/python/anarci'},
package_data={'anarci': ['dat/HMMs/ALL.hmm',
'dat/HMMs/ALL.hmm.h3f',
'dat/HMMs/ALL.hmm.h3i',
'dat/HMMs/ALL.hmm.h3m',
'dat/HMMs/ALL.hmm.h3p']},
scripts=['bin/ANARCI'],
data_files = [ ('bin', ['bin/muscle', 'bin/muscle_macOS']) ]
) | <filename>static_setup.py<gh_stars>0
#!/usr/bin/env python3
import shutil, os
if os.path.isdir("build"):
shutil.rmtree("build/")
from distutils.core import setup
setup(name='anarci',
version='1.3',
description='Antibody Numbering and Receptor ClassIfication',
author='<NAME>',
author_email='<EMAIL>',
url='http://opig.stats.ox.ac.uk/webapps/ANARCI',
packages=['anarci'],
package_dir={'anarci': 'lib/python/anarci'},
package_data={'anarci': ['dat/HMMs/ALL.hmm',
'dat/HMMs/ALL.hmm.h3f',
'dat/HMMs/ALL.hmm.h3i',
'dat/HMMs/ALL.hmm.h3m',
'dat/HMMs/ALL.hmm.h3p']},
scripts=['bin/ANARCI'],
data_files = [ ('bin', ['bin/muscle', 'bin/muscle_macOS']) ]
) | fr | 0.221828 | #!/usr/bin/env python3 | 1.459869 | 1 |
stage1/rubberdecode.py | fishilico/sstic-2015 | 0 | 6621585 | <filename>stage1/rubberdecode.py
#!/usr/bin/env python3
"""Decode the Rubber Ducky inject.bin compiled script"""
import struct
import sys
# Build a (opcode, modifier)-to-char dictonary
OM2C = {
(0x1e, 0): '1', (0x1e, 2): '!',
(0x1f, 0): '2', (0x1f, 2): '@',
(0x20, 0): '3', (0x20, 2): '#',
(0x21, 0): '4', (0x21, 2): '$',
(0x22, 0): '5', (0x22, 2): '%',
(0x23, 0): '6', (0x23, 2): '^',
(0x24, 0): '7', (0x24, 2): '&',
(0x25, 0): '8', (0x25, 2): '*',
(0x26, 0): '9', (0x26, 2): '(',
(0x27, 0): '0', (0x27, 2): ')',
(0x28, 0): '[ENTER]\n', (0x29, 0): '[ESC]\n',
(0x2b, 0): '\t', (0x2c, 0): ' ',
(0x2d, 0): '-', (0x2d, 2): '_',
(0x2e, 0): '=', (0x2e, 2): '+',
(0x2f, 0): '[', (0x2f, 2): '{',
(0x30, 0): ']', (0x30, 2): '}',
(0x31, 0): '\\', (0x31, 2): '|',
(0x33, 0): ':', (0x33, 2): ';',
(0x34, 0): "'", (0x34, 2): '"',
(0x35, 0): '`', (0x35, 2): '~',
(0x36, 0): ',', (0x36, 2): '<',
(0x37, 0): '.', (0x37, 2): '>',
(0x38, 0): '/', (0x38, 2): '?',
}
# Alphabet
for i in range(26):
OM2C[(i + 4, 0)] = chr(ord('a') + i)
OM2C[(i + 4, 2)] = chr(ord('A') + i)
delay_time = 0
with open('inject.bin', 'rb') as f:
while True:
injectdata = f.read(2)
if len(injectdata) == 0:
break
opcode, modifier = struct.unpack('BB', injectdata)
# "DELAY" is encoded with successive opcode-0 commands
if opcode == 0:
delay_time += modifier
continue
if delay_time:
print("[DELAY {}]".format(delay_time))
delay_time = 0
# WIN key + letter is encoded with modifier 8
if modifier & 8:
c = OM2C.get((opcode, modifier & ~8))
if c is not None:
print("[WIN {}]".format(c))
continue
sys.stdout.write(OM2C.get((opcode, modifier)))
| <filename>stage1/rubberdecode.py
#!/usr/bin/env python3
"""Decode the Rubber Ducky inject.bin compiled script"""
import struct
import sys
# Build a (opcode, modifier)-to-char dictonary
OM2C = {
(0x1e, 0): '1', (0x1e, 2): '!',
(0x1f, 0): '2', (0x1f, 2): '@',
(0x20, 0): '3', (0x20, 2): '#',
(0x21, 0): '4', (0x21, 2): '$',
(0x22, 0): '5', (0x22, 2): '%',
(0x23, 0): '6', (0x23, 2): '^',
(0x24, 0): '7', (0x24, 2): '&',
(0x25, 0): '8', (0x25, 2): '*',
(0x26, 0): '9', (0x26, 2): '(',
(0x27, 0): '0', (0x27, 2): ')',
(0x28, 0): '[ENTER]\n', (0x29, 0): '[ESC]\n',
(0x2b, 0): '\t', (0x2c, 0): ' ',
(0x2d, 0): '-', (0x2d, 2): '_',
(0x2e, 0): '=', (0x2e, 2): '+',
(0x2f, 0): '[', (0x2f, 2): '{',
(0x30, 0): ']', (0x30, 2): '}',
(0x31, 0): '\\', (0x31, 2): '|',
(0x33, 0): ':', (0x33, 2): ';',
(0x34, 0): "'", (0x34, 2): '"',
(0x35, 0): '`', (0x35, 2): '~',
(0x36, 0): ',', (0x36, 2): '<',
(0x37, 0): '.', (0x37, 2): '>',
(0x38, 0): '/', (0x38, 2): '?',
}
# Alphabet
for i in range(26):
OM2C[(i + 4, 0)] = chr(ord('a') + i)
OM2C[(i + 4, 2)] = chr(ord('A') + i)
delay_time = 0
with open('inject.bin', 'rb') as f:
while True:
injectdata = f.read(2)
if len(injectdata) == 0:
break
opcode, modifier = struct.unpack('BB', injectdata)
# "DELAY" is encoded with successive opcode-0 commands
if opcode == 0:
delay_time += modifier
continue
if delay_time:
print("[DELAY {}]".format(delay_time))
delay_time = 0
# WIN key + letter is encoded with modifier 8
if modifier & 8:
c = OM2C.get((opcode, modifier & ~8))
if c is not None:
print("[WIN {}]".format(c))
continue
sys.stdout.write(OM2C.get((opcode, modifier)))
| en | 0.69451 | #!/usr/bin/env python3 Decode the Rubber Ducky inject.bin compiled script # Build a (opcode, modifier)-to-char dictonary # Alphabet # "DELAY" is encoded with successive opcode-0 commands # WIN key + letter is encoded with modifier 8 | 2.820625 | 3 |
algorithms/BeamLstmWrapper.py | keith-leung/cis667-secretary-problem | 0 | 6621586 | <gh_stars>0
import random
import numpy as np
import torch as tr
import math
import pickle
# Define a small LSTM recurrent neural network with linear hidden-to-output layer
class BeamLstmWrapper():
def __init__(self, modelname='', word_path='', dictionary_path = '', net_path= ''):
self._name = modelname
self._words = []
self.dictionary = {}
with open(word_path, 'rb') as handle1:
self._words = pickle.load(handle1) # like_people is a list with data
with open(dictionary_path, 'rb') as handle2:
self.dictionary = pickle.load(handle2) # like_people is a list with data
self.net = None
self.net = tr.load(net_path)
self.norm_hist_candidates = []
self.list_historical_candidates = []
pass
def name(self):
return self._name
def __str__(self, self_print=False, print_nodes=False):
str_result = "Search nodes not applicable\r\n"
if self_print is not None and True == self_print:
print(str_result)
return str_result
## return true or false , selected index
def decide(self, current_index, current_value):
dt2 = round(self.norm(current_value, 0, 10))
self.norm_hist_candidates.append(dt2)
self.list_historical_candidates.append(current_value)
# real prediction
current_sentence = self.norm_hist_candidates # ['3', '5', '7', '4', '3', '2']
v = None
# print(current_sentence)
# keep the final(last word) prediction
final_word = None
final_y = None
final_y_args = None
final_val = 0
final_prob = 0
for c in current_sentence:
x = self.dictionary[c]
print('x={x}'.format(x=x))
print('----------------------------------------')
y, v = self.net(self.dictionary[c], v)
y = y.squeeze() # ignore singleton dimensions for time-step/example
y.argmax()
w = y.argmax()
print('y={y}, v={v}, w={w}'.format(y=y, v=v, w=w))
print('----------------------------------------')
word = self._words[w]
print('word={word} + w={w} ++ {words}'.format(word=word, w=w, words=self._words))
print('----------------------------------------')
prob = y[w]
print(word, prob.item())
print('----------------------------------------')
final_word = word
final_y = y
final_y_args = np.argpartition(y, -5)
prob_sum = np.sum(final_y_args)
#in this line, the y has different probabilities,
#but get 5 largest and calculate the expected value as the prediction
for arg in final_y_args:
word2 = self._words[arg] #word is string, we need integer
word_f = float(word2)
word_exp = word_f * final_y[arg]
final_val += word_exp
# all the probabilities need to be normalized to be the summation of 1
final_prob = prob.item()
if x is not None and y is not None and x > 0 and x < len(self.list_historical_candidates):
# x is the most likely value in the appeared candidates
value = self.list_historical_candidates[x]
if current_value >= value:
return True, current_value
# fake implementation to ensure LSTM algorithm integration
return False, current_value
def norm(self, dt, left, right):
dt2 = dt / 100.0
range = right - left
return left + (range * dt2)
| import random
import numpy as np
import torch as tr
import math
import pickle
# Define a small LSTM recurrent neural network with linear hidden-to-output layer
class BeamLstmWrapper():
def __init__(self, modelname='', word_path='', dictionary_path = '', net_path= ''):
self._name = modelname
self._words = []
self.dictionary = {}
with open(word_path, 'rb') as handle1:
self._words = pickle.load(handle1) # like_people is a list with data
with open(dictionary_path, 'rb') as handle2:
self.dictionary = pickle.load(handle2) # like_people is a list with data
self.net = None
self.net = tr.load(net_path)
self.norm_hist_candidates = []
self.list_historical_candidates = []
pass
def name(self):
return self._name
def __str__(self, self_print=False, print_nodes=False):
str_result = "Search nodes not applicable\r\n"
if self_print is not None and True == self_print:
print(str_result)
return str_result
## return true or false , selected index
def decide(self, current_index, current_value):
dt2 = round(self.norm(current_value, 0, 10))
self.norm_hist_candidates.append(dt2)
self.list_historical_candidates.append(current_value)
# real prediction
current_sentence = self.norm_hist_candidates # ['3', '5', '7', '4', '3', '2']
v = None
# print(current_sentence)
# keep the final(last word) prediction
final_word = None
final_y = None
final_y_args = None
final_val = 0
final_prob = 0
for c in current_sentence:
x = self.dictionary[c]
print('x={x}'.format(x=x))
print('----------------------------------------')
y, v = self.net(self.dictionary[c], v)
y = y.squeeze() # ignore singleton dimensions for time-step/example
y.argmax()
w = y.argmax()
print('y={y}, v={v}, w={w}'.format(y=y, v=v, w=w))
print('----------------------------------------')
word = self._words[w]
print('word={word} + w={w} ++ {words}'.format(word=word, w=w, words=self._words))
print('----------------------------------------')
prob = y[w]
print(word, prob.item())
print('----------------------------------------')
final_word = word
final_y = y
final_y_args = np.argpartition(y, -5)
prob_sum = np.sum(final_y_args)
#in this line, the y has different probabilities,
#but get 5 largest and calculate the expected value as the prediction
for arg in final_y_args:
word2 = self._words[arg] #word is string, we need integer
word_f = float(word2)
word_exp = word_f * final_y[arg]
final_val += word_exp
# all the probabilities need to be normalized to be the summation of 1
final_prob = prob.item()
if x is not None and y is not None and x > 0 and x < len(self.list_historical_candidates):
# x is the most likely value in the appeared candidates
value = self.list_historical_candidates[x]
if current_value >= value:
return True, current_value
# fake implementation to ensure LSTM algorithm integration
return False, current_value
def norm(self, dt, left, right):
dt2 = dt / 100.0
range = right - left
return left + (range * dt2) | en | 0.854151 | # Define a small LSTM recurrent neural network with linear hidden-to-output layer # like_people is a list with data # like_people is a list with data ## return true or false , selected index # real prediction # ['3', '5', '7', '4', '3', '2'] # print(current_sentence) # keep the final(last word) prediction # ignore singleton dimensions for time-step/example #in this line, the y has different probabilities, #but get 5 largest and calculate the expected value as the prediction #word is string, we need integer # all the probabilities need to be normalized to be the summation of 1 # x is the most likely value in the appeared candidates # fake implementation to ensure LSTM algorithm integration | 2.992014 | 3 |
test/conftest.py | zli117/Evolution | 4 | 6621587 | <reponame>zli117/Evolution
from typing import Tuple
import pytest
from evolution.encoding.base import IdentityOperation
from evolution.encoding.base import MaxPool2D
from evolution.encoding.base import PointConv2D
from evolution.encoding.base import Vertex
from evolution.encoding.mutable_edge import MutableEdge
@pytest.fixture()
def basic_graph_no_v12() -> Tuple[MutableEdge, Vertex, Vertex, Vertex,
Vertex]:
complex_operation = MutableEdge((PointConv2D((1, 4)), MaxPool2D()))
vertex1 = Vertex()
vertex2 = Vertex()
vertex3 = Vertex()
vertex4 = Vertex()
edge1 = IdentityOperation()
edge2 = IdentityOperation()
edge3 = IdentityOperation()
edge4 = IdentityOperation()
edge5 = IdentityOperation()
edge6 = IdentityOperation()
complex_operation.input_vertex.out_bound_edges.clear()
complex_operation.input_vertex.out_bound_edges.extend([edge1, edge2, edge3])
edge1.end_vertex = vertex1
edge2.end_vertex = vertex2
edge3.end_vertex = vertex4
vertex1.out_bound_edges.append(edge6)
edge6.end_vertex = complex_operation.output_vertex
vertex2.out_bound_edges.append(edge4)
edge4.end_vertex = complex_operation.output_vertex
vertex3.out_bound_edges.append(edge5)
edge5.end_vertex = complex_operation.output_vertex
return complex_operation, vertex1, vertex2, vertex3, vertex4
@pytest.fixture()
def basic_graph(basic_graph_no_v12) -> Tuple[MutableEdge, Vertex, Vertex,
Vertex, Vertex]:
complex_operation, vertex1, vertex2, vertex3, vertex4 = basic_graph_no_v12
edge = IdentityOperation()
vertex1.out_bound_edges.append(edge)
edge.end_vertex = vertex2
return complex_operation, vertex1, vertex2, vertex3, vertex4
| from typing import Tuple
import pytest
from evolution.encoding.base import IdentityOperation
from evolution.encoding.base import MaxPool2D
from evolution.encoding.base import PointConv2D
from evolution.encoding.base import Vertex
from evolution.encoding.mutable_edge import MutableEdge
@pytest.fixture()
def basic_graph_no_v12() -> Tuple[MutableEdge, Vertex, Vertex, Vertex,
Vertex]:
complex_operation = MutableEdge((PointConv2D((1, 4)), MaxPool2D()))
vertex1 = Vertex()
vertex2 = Vertex()
vertex3 = Vertex()
vertex4 = Vertex()
edge1 = IdentityOperation()
edge2 = IdentityOperation()
edge3 = IdentityOperation()
edge4 = IdentityOperation()
edge5 = IdentityOperation()
edge6 = IdentityOperation()
complex_operation.input_vertex.out_bound_edges.clear()
complex_operation.input_vertex.out_bound_edges.extend([edge1, edge2, edge3])
edge1.end_vertex = vertex1
edge2.end_vertex = vertex2
edge3.end_vertex = vertex4
vertex1.out_bound_edges.append(edge6)
edge6.end_vertex = complex_operation.output_vertex
vertex2.out_bound_edges.append(edge4)
edge4.end_vertex = complex_operation.output_vertex
vertex3.out_bound_edges.append(edge5)
edge5.end_vertex = complex_operation.output_vertex
return complex_operation, vertex1, vertex2, vertex3, vertex4
@pytest.fixture()
def basic_graph(basic_graph_no_v12) -> Tuple[MutableEdge, Vertex, Vertex,
Vertex, Vertex]:
complex_operation, vertex1, vertex2, vertex3, vertex4 = basic_graph_no_v12
edge = IdentityOperation()
vertex1.out_bound_edges.append(edge)
edge.end_vertex = vertex2
return complex_operation, vertex1, vertex2, vertex3, vertex4 | none | 1 | 2.188356 | 2 | |
tests/GenPro/genetic_algorithm/individuals/test_basic.py | Hispar/procedural_generation | 0 | 6621588 | <filename>tests/GenPro/genetic_algorithm/individuals/test_basic.py
# -*- coding: utf-8 -*-
# Python imports
# 3rd Party imports
import pytest
# App imports
from src.GenPro.genetic_algorithm.individuals.basic import BasicIndividual
def test_individual_basic_fitness():
individual = BasicIndividual()
with pytest.raises(NotImplementedError):
individual.fitness()
def test_individual_basic_genes():
individual = BasicIndividual()
assert individual.genes() == []
def test_individual_basic_with_genes():
individual = BasicIndividual(gen1=1, gen2=2)
assert individual.genes() == ['gen1', 'gen2']
assert individual.gen1 == 1
assert individual.gen2 == 2
| <filename>tests/GenPro/genetic_algorithm/individuals/test_basic.py
# -*- coding: utf-8 -*-
# Python imports
# 3rd Party imports
import pytest
# App imports
from src.GenPro.genetic_algorithm.individuals.basic import BasicIndividual
def test_individual_basic_fitness():
individual = BasicIndividual()
with pytest.raises(NotImplementedError):
individual.fitness()
def test_individual_basic_genes():
individual = BasicIndividual()
assert individual.genes() == []
def test_individual_basic_with_genes():
individual = BasicIndividual(gen1=1, gen2=2)
assert individual.genes() == ['gen1', 'gen2']
assert individual.gen1 == 1
assert individual.gen2 == 2
| en | 0.750028 | # -*- coding: utf-8 -*- # Python imports # 3rd Party imports # App imports | 2.390129 | 2 |
tests/blessclient/awsmfautils_test.py | mwpeterson/python-blessclient | 115 | 6621589 | import blessclient.awsmfautils as awsmfautils
import os
import datetime
def test_unset_token():
os.environ['AWS_ACCESS_KEY_ID'] = 'foo'
os.environ['AWS_SESSION_TOKEN'] = 'foo'
os.environ['AWS_SECURITY_TOKEN'] = 'foo'
awsmfautils.unset_token()
assert 'AWS_ACCESS_KEY_ID' not in os.environ
assert 'AWS_SECRET_ACCESS_KEY' not in os.environ
assert 'AWS_SESSION_TOKEN' not in os.environ
assert 'AWS_SECURITY_TOKEN' not in os.environ
def test_get_serial(mock):
list_mfa_devices = {
u'MFADevices': [{
u'UserName': 'foobar',
u'SerialNumber': 'arn:aws:iam::000000000000:mfa/foobar',
u'EnableDate': datetime.datetime.utcnow()
}],
u'IsTruncated': False,
'ResponseMetadata': {
'RetryAttempts': 0,
'HTTPStatusCode': 200,
'RequestId': '85d05b5b-d2ca-11e6-96b6-8503a2da6360',
'HTTPHeaders': {
'x-amzn-requestid': '85d05b5b-d2ca-11e6-96b6-8503a2da6360',
'date': 'Wed, 04 Jan 2017 22:09:54 GMT',
'content-length': '528',
'content-type': 'text/xml'}
}
}
iam_client = mock.Mock()
iam_client.list_mfa_devices.return_value = list_mfa_devices
serial = awsmfautils.get_serial(iam_client, 'foobar')
iam_client.list_mfa_devices.assert_called_once_with(UserName='foobar')
assert serial == 'arn:aws:iam::000000000000:mfa/foobar'
def test_get_role_arn():
norole = awsmfautils.get_role_arn(
'arn:aws:iam::000000000000:user/foobar', None)
assert norole == ''
rolebar = awsmfautils.get_role_arn(
'arn:aws:iam::000000000000:user/foobar', 'rolebar')
assert rolebar == 'arn:aws:iam::000000000000:role/rolebar'
rolebar_acct = awsmfautils.get_role_arn(
'arn:aws:iam::000000000000:user/foobar', 'rolebar', '111111111111')
assert rolebar_acct == 'arn:aws:iam::111111111111:role/rolebar'
| import blessclient.awsmfautils as awsmfautils
import os
import datetime
def test_unset_token():
os.environ['AWS_ACCESS_KEY_ID'] = 'foo'
os.environ['AWS_SESSION_TOKEN'] = 'foo'
os.environ['AWS_SECURITY_TOKEN'] = 'foo'
awsmfautils.unset_token()
assert 'AWS_ACCESS_KEY_ID' not in os.environ
assert 'AWS_SECRET_ACCESS_KEY' not in os.environ
assert 'AWS_SESSION_TOKEN' not in os.environ
assert 'AWS_SECURITY_TOKEN' not in os.environ
def test_get_serial(mock):
list_mfa_devices = {
u'MFADevices': [{
u'UserName': 'foobar',
u'SerialNumber': 'arn:aws:iam::000000000000:mfa/foobar',
u'EnableDate': datetime.datetime.utcnow()
}],
u'IsTruncated': False,
'ResponseMetadata': {
'RetryAttempts': 0,
'HTTPStatusCode': 200,
'RequestId': '85d05b5b-d2ca-11e6-96b6-8503a2da6360',
'HTTPHeaders': {
'x-amzn-requestid': '85d05b5b-d2ca-11e6-96b6-8503a2da6360',
'date': 'Wed, 04 Jan 2017 22:09:54 GMT',
'content-length': '528',
'content-type': 'text/xml'}
}
}
iam_client = mock.Mock()
iam_client.list_mfa_devices.return_value = list_mfa_devices
serial = awsmfautils.get_serial(iam_client, 'foobar')
iam_client.list_mfa_devices.assert_called_once_with(UserName='foobar')
assert serial == 'arn:aws:iam::000000000000:mfa/foobar'
def test_get_role_arn():
norole = awsmfautils.get_role_arn(
'arn:aws:iam::000000000000:user/foobar', None)
assert norole == ''
rolebar = awsmfautils.get_role_arn(
'arn:aws:iam::000000000000:user/foobar', 'rolebar')
assert rolebar == 'arn:aws:iam::000000000000:role/rolebar'
rolebar_acct = awsmfautils.get_role_arn(
'arn:aws:iam::000000000000:user/foobar', 'rolebar', '111111111111')
assert rolebar_acct == 'arn:aws:iam::111111111111:role/rolebar'
| none | 1 | 2.064958 | 2 | |
jsb/plugs/common/remind.py | NURDspace/jsonbot | 1 | 6621590 | # jsb/plugs/common/remind.py
#
#
""" remind people .. say txt when somebody gets active """
## jsb imports
from jsb.utils.generic import getwho
from jsb.lib.commands import cmnds
from jsb.lib.examples import examples
from jsb.lib.callbacks import callbacks
from jsb.lib.persist import PlugPersist
## basic imports
import time
import os
## Remind-class
class Remind(PlugPersist):
""" remind object """
def __init__(self, name):
PlugPersist.__init__(self, name)
def add(self, who, data):
""" add a remind txt """
if not self.data.has_key(who):
self.data[who] = []
self.data[who].append(data)
self.save()
def wouldremind(self, userhost):
""" check if there is a remind for userhost """
try:
reminds = self.data[userhost]
if reminds == None or reminds == []:
return False
except KeyError:
return False
return True
def remind(self, bot, userhost):
""" send a user all registered reminds """
reminds = self.data[userhost]
if not reminds:
return
for i in reminds:
ttime = None
try:
(tonick, fromnick, txt, ttime) = i
except ValueError:
(tonick, fromnick, txt) = i
txtformat = '[%s] %s wants to remind you of: %s'
if ttime:
timestr = time.ctime(ttime)
else:
timestr = None
bot.saynocb(tonick, txtformat % (timestr, fromnick, txt))
bot.saynocb(fromnick, '[%s] reminded %s of: %s' % (timestr, tonick, txt))
try: del self.data[userhost]
except KeyError: pass
self.save()
## defines
remind = Remind('remind.data')
assert remind
## callbacks
def preremind(bot, ievent):
""" remind precondition """
return remind.wouldremind(ievent.userhost)
def remindcb(bot, ievent):
""" remind callbacks """
remind.remind(bot, ievent.userhost)
callbacks.add('PRIVMSG', remindcb, preremind, threaded=True)
callbacks.add('JOIN', remindcb, preremind, threaded=True)
callbacks.add('MESSAGE', remindcb, preremind, threaded=True)
callbacks.add('WEB', remindcb, preremind, threaded=True)
callbacks.add('TORNADO', remindcb, preremind, threaded=True)
## remind command
def handle_remind(bot, ievent):
""" arguments: <nick> <txt> - add a remind for a user, as soon as he/she gets online or says something the txt will be send. """
try: who = ievent.args[0] ; txt = ' '.join(ievent.args[1:])
except IndexError: ievent.missing('<nick> <txt>') ; return
if not txt: ievent.missing('<nick> <txt>') ; return
userhost = getwho(bot, who)
if not userhost: ievent.reply("can't find userhost for %s" % who) ; return
else:
remind.add(userhost, [who, ievent.nick, txt, time.time()])
ievent.reply("remind for %s added" % who)
cmnds.add('remind', handle_remind, ['OPER', 'USER', 'GUEST'], allowqueue=False)
examples.add('remind', 'remind <nick> <txt>', 'remind dunker check the bot !')
| # jsb/plugs/common/remind.py
#
#
""" remind people .. say txt when somebody gets active """
## jsb imports
from jsb.utils.generic import getwho
from jsb.lib.commands import cmnds
from jsb.lib.examples import examples
from jsb.lib.callbacks import callbacks
from jsb.lib.persist import PlugPersist
## basic imports
import time
import os
## Remind-class
class Remind(PlugPersist):
""" remind object """
def __init__(self, name):
PlugPersist.__init__(self, name)
def add(self, who, data):
""" add a remind txt """
if not self.data.has_key(who):
self.data[who] = []
self.data[who].append(data)
self.save()
def wouldremind(self, userhost):
""" check if there is a remind for userhost """
try:
reminds = self.data[userhost]
if reminds == None or reminds == []:
return False
except KeyError:
return False
return True
def remind(self, bot, userhost):
""" send a user all registered reminds """
reminds = self.data[userhost]
if not reminds:
return
for i in reminds:
ttime = None
try:
(tonick, fromnick, txt, ttime) = i
except ValueError:
(tonick, fromnick, txt) = i
txtformat = '[%s] %s wants to remind you of: %s'
if ttime:
timestr = time.ctime(ttime)
else:
timestr = None
bot.saynocb(tonick, txtformat % (timestr, fromnick, txt))
bot.saynocb(fromnick, '[%s] reminded %s of: %s' % (timestr, tonick, txt))
try: del self.data[userhost]
except KeyError: pass
self.save()
## defines
remind = Remind('remind.data')
assert remind
## callbacks
def preremind(bot, ievent):
""" remind precondition """
return remind.wouldremind(ievent.userhost)
def remindcb(bot, ievent):
""" remind callbacks """
remind.remind(bot, ievent.userhost)
callbacks.add('PRIVMSG', remindcb, preremind, threaded=True)
callbacks.add('JOIN', remindcb, preremind, threaded=True)
callbacks.add('MESSAGE', remindcb, preremind, threaded=True)
callbacks.add('WEB', remindcb, preremind, threaded=True)
callbacks.add('TORNADO', remindcb, preremind, threaded=True)
## remind command
def handle_remind(bot, ievent):
""" arguments: <nick> <txt> - add a remind for a user, as soon as he/she gets online or says something the txt will be send. """
try: who = ievent.args[0] ; txt = ' '.join(ievent.args[1:])
except IndexError: ievent.missing('<nick> <txt>') ; return
if not txt: ievent.missing('<nick> <txt>') ; return
userhost = getwho(bot, who)
if not userhost: ievent.reply("can't find userhost for %s" % who) ; return
else:
remind.add(userhost, [who, ievent.nick, txt, time.time()])
ievent.reply("remind for %s added" % who)
cmnds.add('remind', handle_remind, ['OPER', 'USER', 'GUEST'], allowqueue=False)
examples.add('remind', 'remind <nick> <txt>', 'remind dunker check the bot !')
| en | 0.648243 | # jsb/plugs/common/remind.py # # remind people .. say txt when somebody gets active ## jsb imports ## basic imports ## Remind-class remind object add a remind txt check if there is a remind for userhost send a user all registered reminds ## defines ## callbacks remind precondition remind callbacks ## remind command arguments: <nick> <txt> - add a remind for a user, as soon as he/she gets online or says something the txt will be send. | 2.470037 | 2 |
django/backend/nt_search/migrations/0004_remove_response_alignments.py | joeytab/ginkgo-project | 0 | 6621591 | # Generated by Django 3.2.7 on 2021-11-30 02:53
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('nt_search', '0003_alter_response_alignments'),
]
operations = [
migrations.RemoveField(
model_name='response',
name='alignments',
),
]
| # Generated by Django 3.2.7 on 2021-11-30 02:53
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('nt_search', '0003_alter_response_alignments'),
]
operations = [
migrations.RemoveField(
model_name='response',
name='alignments',
),
]
| en | 0.870599 | # Generated by Django 3.2.7 on 2021-11-30 02:53 | 1.350754 | 1 |
isaac-run-selector.py | gaizkadc/isaac-run-selector | 0 | 6621592 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# Imports
import random
# Mode selector
def select_mode ():
mode = {
1: "Normal Run",
2: "Challenge"
}
mode_key = random.randint(1,2)
print mode[mode_key]
return mode_key
# Character selector
def select_character ():
character = {
1: "Isaac",
2: "Magdalene",
3: "Cain",
4: "Judas",
5: "???",
6: "Eve",
7: "Samson",
8: "Azazel",
9: "Lazarus",
10: "Eden",
11: "Lilith",
12: "Apollyon"
}
character_key = random.randint(1,12)
print "├─ "+character[character_key]
# Difficulty selector
def select_difficulty():
difficulty = {
1: "Normal",
2: "Hard",
3: "Greed"
}
difficulty_key = random.randint(1,3)
print "└── "+difficulty[difficulty_key]
# Select challenge
def select_challenge ():
challenge = {
1: "Pitch Black",
2: "High Brow",
3: "Head Trauma",
4: "Darkness Falls",
5: "The Tank",
6: "Solar System",
7: "Suicide King",
8: "Cat Got Your Tongue",
9: "Demo Man",
10: "Cursed!",
11: "Glass Cannon",
12: "When Life Gives You Lemons",
13: "Beans!",
14: "It's In The Cards",
15: "Slow Roll",
16: "Computer Savvy",
17: "Waka Waka",
18: "The Host",
19: "The Family Man",
20: "Purist",
21: "XXXXXXXXL",
22: "SPEED!",
23: "Blue Bomber",
24: "PAY TO PLAY",
25: "Have a Heart",
26: "I RULE!",
27: "BRAINS!",
28: "PRIDE DAY!",
29: "Onan's Streak",
30: "The Guardian",
31: "Backasswards",
32: "<NAME>",
33: "<NAME>",
34: "Ultra Hard",
35: "Pong"
}
challenge_key = random.randint(1,35)
print "└─ "+challenge[challenge_key]
# Main
mode = select_mode()
if mode == 1:
select_character()
select_difficulty()
else:
select_challenge()
print "\nHave fun!" | #!/usr/bin/python
# -*- coding: utf-8 -*-
# Imports
import random
# Mode selector
def select_mode ():
mode = {
1: "Normal Run",
2: "Challenge"
}
mode_key = random.randint(1,2)
print mode[mode_key]
return mode_key
# Character selector
def select_character ():
character = {
1: "Isaac",
2: "Magdalene",
3: "Cain",
4: "Judas",
5: "???",
6: "Eve",
7: "Samson",
8: "Azazel",
9: "Lazarus",
10: "Eden",
11: "Lilith",
12: "Apollyon"
}
character_key = random.randint(1,12)
print "├─ "+character[character_key]
# Difficulty selector
def select_difficulty():
difficulty = {
1: "Normal",
2: "Hard",
3: "Greed"
}
difficulty_key = random.randint(1,3)
print "└── "+difficulty[difficulty_key]
# Select challenge
def select_challenge ():
challenge = {
1: "Pitch Black",
2: "High Brow",
3: "Head Trauma",
4: "Darkness Falls",
5: "The Tank",
6: "Solar System",
7: "Suicide King",
8: "Cat Got Your Tongue",
9: "Demo Man",
10: "Cursed!",
11: "Glass Cannon",
12: "When Life Gives You Lemons",
13: "Beans!",
14: "It's In The Cards",
15: "Slow Roll",
16: "Computer Savvy",
17: "Waka Waka",
18: "The Host",
19: "The Family Man",
20: "Purist",
21: "XXXXXXXXL",
22: "SPEED!",
23: "Blue Bomber",
24: "PAY TO PLAY",
25: "Have a Heart",
26: "I RULE!",
27: "BRAINS!",
28: "PRIDE DAY!",
29: "Onan's Streak",
30: "The Guardian",
31: "Backasswards",
32: "<NAME>",
33: "<NAME>",
34: "Ultra Hard",
35: "Pong"
}
challenge_key = random.randint(1,35)
print "└─ "+challenge[challenge_key]
# Main
mode = select_mode()
if mode == 1:
select_character()
select_difficulty()
else:
select_challenge()
print "\nHave fun!" | en | 0.651361 | #!/usr/bin/python # -*- coding: utf-8 -*- # Imports # Mode selector # Character selector # Difficulty selector # Select challenge # Main | 3.454755 | 3 |
iconservice/iconscore/icx.py | bayeshack2016/icon-service | 52 | 6621593 | <filename>iconservice/iconscore/icx.py
# -*- coding: utf-8 -*-
# Copyright 2018 ICON Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
from .icon_score_constant import STR_FALLBACK
from .icon_score_context_util import IconScoreContextUtil
from .internal_call import InternalCall
from ..base.address import GOVERNANCE_SCORE_ADDRESS
if TYPE_CHECKING:
from .icon_score_context import IconScoreContext
from ..base.address import Address
class Icx(object):
"""Class for handling ICX coin transfer
These functions are intended to be used for SCORE development.
"""
def __init__(self, context: 'IconScoreContext', address: 'Address') -> None:
"""Constructor
"""
self._context = context
self._address = address
def transfer(self, addr_to: 'Address', amount: int) -> None:
"""
transfer the amount of icx to the given 'addr_to'
If failed, an exception will be raised
:param addr_to: receiver address
:param amount: the amount of icx to transfer (unit: loop)
"""
InternalCall.other_external_call(self._context, self._address, addr_to, amount, STR_FALLBACK)
def send(self, addr_to: 'Address', amount: int) -> bool:
"""
transfer the amount of icx to the given 'addr_to'
:param addr_to: receiver address
:param amount: the amount of icx to transfer (unit: loop)
:return: True(success) False(failed)
"""
try:
self.transfer(addr_to, amount)
if not addr_to.is_contract and self._is_icx_send_defective():
return False
return True
except:
return False
def get_balance(self, address: 'Address') -> int:
"""
Returns the ICX balance of given address
:param address: address
:return: ICX balance of given address
"""
return InternalCall.icx_get_balance(self._context, address)
# noinspection PyBroadException
def _is_icx_send_defective(self) -> bool:
try:
governance_score = IconScoreContextUtil.get_builtin_score(
self._context, GOVERNANCE_SCORE_ADDRESS)
if hasattr(governance_score, 'getVersion'):
version = governance_score.getVersion()
return version == '0.0.2'
except BaseException:
pass
return False
| <filename>iconservice/iconscore/icx.py
# -*- coding: utf-8 -*-
# Copyright 2018 ICON Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
from .icon_score_constant import STR_FALLBACK
from .icon_score_context_util import IconScoreContextUtil
from .internal_call import InternalCall
from ..base.address import GOVERNANCE_SCORE_ADDRESS
if TYPE_CHECKING:
from .icon_score_context import IconScoreContext
from ..base.address import Address
class Icx(object):
"""Class for handling ICX coin transfer
These functions are intended to be used for SCORE development.
"""
def __init__(self, context: 'IconScoreContext', address: 'Address') -> None:
"""Constructor
"""
self._context = context
self._address = address
def transfer(self, addr_to: 'Address', amount: int) -> None:
"""
transfer the amount of icx to the given 'addr_to'
If failed, an exception will be raised
:param addr_to: receiver address
:param amount: the amount of icx to transfer (unit: loop)
"""
InternalCall.other_external_call(self._context, self._address, addr_to, amount, STR_FALLBACK)
def send(self, addr_to: 'Address', amount: int) -> bool:
"""
transfer the amount of icx to the given 'addr_to'
:param addr_to: receiver address
:param amount: the amount of icx to transfer (unit: loop)
:return: True(success) False(failed)
"""
try:
self.transfer(addr_to, amount)
if not addr_to.is_contract and self._is_icx_send_defective():
return False
return True
except:
return False
def get_balance(self, address: 'Address') -> int:
"""
Returns the ICX balance of given address
:param address: address
:return: ICX balance of given address
"""
return InternalCall.icx_get_balance(self._context, address)
# noinspection PyBroadException
def _is_icx_send_defective(self) -> bool:
try:
governance_score = IconScoreContextUtil.get_builtin_score(
self._context, GOVERNANCE_SCORE_ADDRESS)
if hasattr(governance_score, 'getVersion'):
version = governance_score.getVersion()
return version == '0.0.2'
except BaseException:
pass
return False
| en | 0.827194 | # -*- coding: utf-8 -*- # Copyright 2018 ICON Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. Class for handling ICX coin transfer These functions are intended to be used for SCORE development. Constructor transfer the amount of icx to the given 'addr_to' If failed, an exception will be raised :param addr_to: receiver address :param amount: the amount of icx to transfer (unit: loop) transfer the amount of icx to the given 'addr_to' :param addr_to: receiver address :param amount: the amount of icx to transfer (unit: loop) :return: True(success) False(failed) Returns the ICX balance of given address :param address: address :return: ICX balance of given address # noinspection PyBroadException | 2.277047 | 2 |
view_real_results.py | crislmfroes/Parallel-Manipulation-DRL | 2 | 6621594 | from matplotlib import pyplot as plt
import matplotlib.image as mpimg
import numpy as np
import pickle
import os
path = os.path.dirname(os.path.abspath(__file__))
list_dir = os.listdir(path + '/real_results/')
threshold_x = 10
threshold_y = 30
threshold = 10
STAGE = 4
c = 7
def antispike(old_list_x, old_list_y):
new_list_x = list()
new_list_y = list()
for index in range(1, len(old_list_x)):
if abs(old_list_x[index] - old_list_x[index-1]) < threshold and abs(old_list_y[index] - old_list_y[index-1]) < threshold:
new_list_x.append(old_list_x[index])
new_list_y.append(old_list_y[index])
return new_list_x, new_list_y
def antispike2(old_list_x, old_list_y):
pivot_x = old_list_x[0]
pivot_y = old_list_y[0]
new_list_x = list()
new_list_y = list()
for index in range(1, len(old_list_x)):
if abs(old_list_x[index] - pivot_x) < threshold and abs(old_list_y[index] - pivot_y) < threshold:
pivot_x = old_list_x[index]
pivot_y = old_list_y[index]
new_list_x.append(old_list_x[index])
new_list_y.append(old_list_y[index])
return new_list_x, new_list_y
def open_test_data(i):
return open(path + '/real_results/PDSRL_P_Sl_episode{}'.format(i), 'rb')
stage = mpimg.imread(path+'/media/stage_{}_real.png'.format(STAGE))
data = list()
for i in range(1, 15):
with open_test_data(i) as f:
data.append(pickle.load(f))
color = {0: 'firebrick', 1: 'tomato', 2: 'peru', 3: 'gold', 4: 'dodgerblue', 5: 'springgreen', 6: 'indigo', 7: 'deeppink'}
data = np.array(data)[[0, 1, 3, 4, 5, 6, 7, 8, 9, 11, 12, 13]]
size = len(data)
plt.imshow(stage)
rewards = list()
times = list()
for i in range(size):
rewards.append(1 if data[i][0] == 20 else 0)
times.append(data[i][1])
print(rewards)
print(times)
print('')
print('Valores testados:', size)
print('Time mean:', np.mean(times), 'std:', np.std(times))
print('Sucess rate:', (sum(rewards)/size) * 100, '%')
for l in range(size):
x = []
y = []
for x_n, y_n in data[l][4]:
x.append(x_n)
y.append(y_n)
x = np.array(x)
x = x / 1.7
x += 10
y = np.array(y)
y = y / 1.4
y -= 10
x, y = antispike(x, y)
#x, y = antispike(x, y)
plt.plot(x, y, color=color[c], linestyle='-', linewidth=2)
plt.show()
| from matplotlib import pyplot as plt
import matplotlib.image as mpimg
import numpy as np
import pickle
import os
path = os.path.dirname(os.path.abspath(__file__))
list_dir = os.listdir(path + '/real_results/')
threshold_x = 10
threshold_y = 30
threshold = 10
STAGE = 4
c = 7
def antispike(old_list_x, old_list_y):
new_list_x = list()
new_list_y = list()
for index in range(1, len(old_list_x)):
if abs(old_list_x[index] - old_list_x[index-1]) < threshold and abs(old_list_y[index] - old_list_y[index-1]) < threshold:
new_list_x.append(old_list_x[index])
new_list_y.append(old_list_y[index])
return new_list_x, new_list_y
def antispike2(old_list_x, old_list_y):
pivot_x = old_list_x[0]
pivot_y = old_list_y[0]
new_list_x = list()
new_list_y = list()
for index in range(1, len(old_list_x)):
if abs(old_list_x[index] - pivot_x) < threshold and abs(old_list_y[index] - pivot_y) < threshold:
pivot_x = old_list_x[index]
pivot_y = old_list_y[index]
new_list_x.append(old_list_x[index])
new_list_y.append(old_list_y[index])
return new_list_x, new_list_y
def open_test_data(i):
return open(path + '/real_results/PDSRL_P_Sl_episode{}'.format(i), 'rb')
stage = mpimg.imread(path+'/media/stage_{}_real.png'.format(STAGE))
data = list()
for i in range(1, 15):
with open_test_data(i) as f:
data.append(pickle.load(f))
color = {0: 'firebrick', 1: 'tomato', 2: 'peru', 3: 'gold', 4: 'dodgerblue', 5: 'springgreen', 6: 'indigo', 7: 'deeppink'}
data = np.array(data)[[0, 1, 3, 4, 5, 6, 7, 8, 9, 11, 12, 13]]
size = len(data)
plt.imshow(stage)
rewards = list()
times = list()
for i in range(size):
rewards.append(1 if data[i][0] == 20 else 0)
times.append(data[i][1])
print(rewards)
print(times)
print('')
print('Valores testados:', size)
print('Time mean:', np.mean(times), 'std:', np.std(times))
print('Sucess rate:', (sum(rewards)/size) * 100, '%')
for l in range(size):
x = []
y = []
for x_n, y_n in data[l][4]:
x.append(x_n)
y.append(y_n)
x = np.array(x)
x = x / 1.7
x += 10
y = np.array(y)
y = y / 1.4
y -= 10
x, y = antispike(x, y)
#x, y = antispike(x, y)
plt.plot(x, y, color=color[c], linestyle='-', linewidth=2)
plt.show()
| en | 0.156207 | #x, y = antispike(x, y) | 2.403385 | 2 |
Curso_Python_3_UDEMY/banco_dados/sqllite.py | DanilooSilva/Cursos_de_Python | 0 | 6621595 | <reponame>DanilooSilva/Cursos_de_Python<gh_stars>0
from sqlite3 import connect, ProgrammingError, Row
tabela_grupo = '''CREATE TABLE IF NOT EXISTS GRUPOS(
ID INTEGER PRIMARY KEY AUTOINCREMENT,
DESCRICAO VARCHAR(30)
)'''
tabela_contatos = """
CREATE TABLE IF NOT EXISTS CONTATOS (
ID INTEGER PRIMARY KEY AUTOINCREMENT,
NOME VARCHAR(50),
TEL VARCHAR(40),
IDGRUPO INTEGER,
FOREIGN KEY (IDGRUPO) REFERENCES GRUPOS (ID)
)
"""
insert_gurpos = 'INSERT INTO GRUPOS (DESCRICAO) VALUES (?)'
select_grupos = 'SELECT ID, DESCRICAO FROM GRUPOS'
insert_contatos = 'INSERT INTO CONTATOS (NOME, TEL, IDGRUPO) VALUES (?, ?, ?)'
select = """
SELECT B.DESCRICAO AS GRUPO, A.NOME AS CONTATO
FROM CONTATOS A
INNER JOIN GRUPOS B ON A.IDGRUPO = B.ID
ORDER BY GRUPO, CONTATO
"""
try:
conexao = connect(':memory:')
conexao.row_factory = Row
cursor = conexao.cursor()
cursor.execute(tabela_grupo)
cursor.execute(tabela_contatos)
cursor.executemany(insert_gurpos, (('Casa',), ('Trabalho',)))
cursor.execute(select_grupos)
grupos = {row['DESCRICAO']: row['ID'] for row in cursor.fetchall()}
contatos = (
('Danilo', 124, grupos['Casa']),
('Maria', 352, grupos['Casa']),
('Scarlett', 557, grupos['Trabalho']),
('Allanys', 785, None),
('Mel', 879, None),
('Ohara', 597, None),
)
cursor.executemany(insert_contatos, contatos)
cursor.execute(select)
for contato in cursor:
print(contato['CONTATO'], contato['GRUPO'])
except ProgrammingError as e:
print(f'Erro: {e.msg}') | from sqlite3 import connect, ProgrammingError, Row
tabela_grupo = '''CREATE TABLE IF NOT EXISTS GRUPOS(
ID INTEGER PRIMARY KEY AUTOINCREMENT,
DESCRICAO VARCHAR(30)
)'''
tabela_contatos = """
CREATE TABLE IF NOT EXISTS CONTATOS (
ID INTEGER PRIMARY KEY AUTOINCREMENT,
NOME VARCHAR(50),
TEL VARCHAR(40),
IDGRUPO INTEGER,
FOREIGN KEY (IDGRUPO) REFERENCES GRUPOS (ID)
)
"""
insert_gurpos = 'INSERT INTO GRUPOS (DESCRICAO) VALUES (?)'
select_grupos = 'SELECT ID, DESCRICAO FROM GRUPOS'
insert_contatos = 'INSERT INTO CONTATOS (NOME, TEL, IDGRUPO) VALUES (?, ?, ?)'
select = """
SELECT B.DESCRICAO AS GRUPO, A.NOME AS CONTATO
FROM CONTATOS A
INNER JOIN GRUPOS B ON A.IDGRUPO = B.ID
ORDER BY GRUPO, CONTATO
"""
try:
conexao = connect(':memory:')
conexao.row_factory = Row
cursor = conexao.cursor()
cursor.execute(tabela_grupo)
cursor.execute(tabela_contatos)
cursor.executemany(insert_gurpos, (('Casa',), ('Trabalho',)))
cursor.execute(select_grupos)
grupos = {row['DESCRICAO']: row['ID'] for row in cursor.fetchall()}
contatos = (
('Danilo', 124, grupos['Casa']),
('Maria', 352, grupos['Casa']),
('Scarlett', 557, grupos['Trabalho']),
('Allanys', 785, None),
('Mel', 879, None),
('Ohara', 597, None),
)
cursor.executemany(insert_contatos, contatos)
cursor.execute(select)
for contato in cursor:
print(contato['CONTATO'], contato['GRUPO'])
except ProgrammingError as e:
print(f'Erro: {e.msg}') | en | 0.257806 | CREATE TABLE IF NOT EXISTS GRUPOS( ID INTEGER PRIMARY KEY AUTOINCREMENT, DESCRICAO VARCHAR(30) ) CREATE TABLE IF NOT EXISTS CONTATOS ( ID INTEGER PRIMARY KEY AUTOINCREMENT, NOME VARCHAR(50), TEL VARCHAR(40), IDGRUPO INTEGER, FOREIGN KEY (IDGRUPO) REFERENCES GRUPOS (ID) ) SELECT B.DESCRICAO AS GRUPO, A.NOME AS CONTATO FROM CONTATOS A INNER JOIN GRUPOS B ON A.IDGRUPO = B.ID ORDER BY GRUPO, CONTATO | 3.697391 | 4 |
kyu_6/unique_in_order/unique_in_order.py | pedrocodacyorg2/codewars | 1 | 6621596 | <filename>kyu_6/unique_in_order/unique_in_order.py
# Created by <NAME>.
# GitHub: https://github.com/ikostan
# LinkedIn: https://www.linkedin.com/in/egor-kostan/
from typing import Iterable, List
def unique_in_order(iterable: Iterable) -> list:
"""
Takes as argument a sequence and returns a list
of items without any elements with the same value
next to each other and preserving the original
order of elements.
:param iterable:
:return:
"""
result: List = []
for i in iterable:
if len(result) == 0 or i != result[-1]:
result.append(i)
return result
| <filename>kyu_6/unique_in_order/unique_in_order.py
# Created by <NAME>.
# GitHub: https://github.com/ikostan
# LinkedIn: https://www.linkedin.com/in/egor-kostan/
from typing import Iterable, List
def unique_in_order(iterable: Iterable) -> list:
"""
Takes as argument a sequence and returns a list
of items without any elements with the same value
next to each other and preserving the original
order of elements.
:param iterable:
:return:
"""
result: List = []
for i in iterable:
if len(result) == 0 or i != result[-1]:
result.append(i)
return result
| en | 0.720248 | # Created by <NAME>. # GitHub: https://github.com/ikostan # LinkedIn: https://www.linkedin.com/in/egor-kostan/ Takes as argument a sequence and returns a list of items without any elements with the same value next to each other and preserving the original order of elements. :param iterable: :return: | 4.007282 | 4 |
backend/api/geoutils.py | hrbonz/wechat_hackathon_AQ | 2 | 6621597 | # -*- coding: utf-8 -*-
from geopy.distance import vincenty
import Geohash
def hash2tag(geohash):
return Geohash.decode(geohash.rstrip("0"))
def get_city(geotag):
# FIXME(stefan.berder): get to use baidu backend to resolve city
# g = geocoder.baidu()
# return g.city
return "shanghai"
def get_closest(geotag, neighbors):
closest_locname = None
mindist = None
for (locname, geohash) in neighbors.items():
loc_geotag = hash2tag(geohash)
dist = vincenty(geotag, loc_geotag).km
if mindist is None:
mindist = dist
if dist <= mindist:
mindist = dist
closest_locname = locname
return (locname, mindist)
| # -*- coding: utf-8 -*-
from geopy.distance import vincenty
import Geohash
def hash2tag(geohash):
return Geohash.decode(geohash.rstrip("0"))
def get_city(geotag):
# FIXME(stefan.berder): get to use baidu backend to resolve city
# g = geocoder.baidu()
# return g.city
return "shanghai"
def get_closest(geotag, neighbors):
closest_locname = None
mindist = None
for (locname, geohash) in neighbors.items():
loc_geotag = hash2tag(geohash)
dist = vincenty(geotag, loc_geotag).km
if mindist is None:
mindist = dist
if dist <= mindist:
mindist = dist
closest_locname = locname
return (locname, mindist)
| en | 0.598972 | # -*- coding: utf-8 -*- # FIXME(stefan.berder): get to use baidu backend to resolve city # g = geocoder.baidu() # return g.city | 3.122829 | 3 |
Exercise05/5-35.py | ywyz/IntroducingToProgrammingUsingPython | 0 | 6621598 | <gh_stars>0
'''
@Date: 2019-11-06 19:34:16
@Author: ywyz
@LastModifiedBy: ywyz
@Github: https://github.com/ywyz
@LastEditors: ywyz
@LastEditTime: 2019-11-06 19:35:11
'''
for i in range(1, 10001):
k = 0
for j in range(1, i):
if (i % j == 0):
k += j
if k == i:
print(k)
| '''
@Date: 2019-11-06 19:34:16
@Author: ywyz
@LastModifiedBy: ywyz
@Github: https://github.com/ywyz
@LastEditors: ywyz
@LastEditTime: 2019-11-06 19:35:11
'''
for i in range(1, 10001):
k = 0
for j in range(1, i):
if (i % j == 0):
k += j
if k == i:
print(k) | en | 0.364006 | @Date: 2019-11-06 19:34:16 @Author: ywyz @LastModifiedBy: ywyz @Github: https://github.com/ywyz @LastEditors: ywyz @LastEditTime: 2019-11-06 19:35:11 | 3.079033 | 3 |
streamlit_app.py | pipegalera/BasketballReference-Webscraper | 0 | 6621599 | from functions_app import *
st.markdown(" # :basketball: NBA Data Scraper :basketball:")
st.subheader('Web App by [Pipe Galera](https://www.pipegalera.com/)')
########################### Lists and Dictionaries ###########################
current_season = int(start_of_the_season_indicator()[5:])
seasons_dict, seasons_list = get_seasons_dict(1950, current_season+1)
########################### Data Scraper ###############################
with st.form('Form'):
selected_seasons = st.multiselect('NBA Seasons:', seasons_list, seasons_list[:22])
selected_stats_type = st.selectbox('Data:', list(stats_dict.keys()))
submit = st.form_submit_button(label='Submit')
if submit:
if selected_stats_type == 'Teams statistics':
with st.spinner('Loading...'):
df = loading_teams_data(seasons_dict, selected_seasons)
df_header = 'Team stats for the ' + str(len(selected_seasons)) + ' selected seasons'
elif selected_stats_type == 'Players salary (only available from 1990 on)':
with st.spinner('Loading...'):
df = nba_salaries(seasons_dict, selected_seasons)
df_header = 'Player stats for the ' + str(len(selected_seasons)) + ' selected seasons'
else:
with st.spinner('Loading...'):
df = loading_players_data(seasons_dict, stats_dict, selected_seasons, selected_stats_type)
df_header = 'Player stats for the ' + str(len(selected_seasons)) + ' selected seasons'
st.subheader(df_header)
st.write(df)
st.markdown("**Source:** Real-time scraped from [Basketball-reference.com](https://www.basketball-reference.com/). Salaries data comes from [Hoopshype.com](https://hoopshype.com/salaries/)")
st.markdown("---")
column1, column2, column3 = st.beta_columns(3)
with column2:
st.markdown(link_csv(df), unsafe_allow_html=True)
st.markdown(link_excel(df), unsafe_allow_html=True)
else:
pass
| from functions_app import *
st.markdown(" # :basketball: NBA Data Scraper :basketball:")
st.subheader('Web App by [Pipe Galera](https://www.pipegalera.com/)')
########################### Lists and Dictionaries ###########################
current_season = int(start_of_the_season_indicator()[5:])
seasons_dict, seasons_list = get_seasons_dict(1950, current_season+1)
########################### Data Scraper ###############################
with st.form('Form'):
selected_seasons = st.multiselect('NBA Seasons:', seasons_list, seasons_list[:22])
selected_stats_type = st.selectbox('Data:', list(stats_dict.keys()))
submit = st.form_submit_button(label='Submit')
if submit:
if selected_stats_type == 'Teams statistics':
with st.spinner('Loading...'):
df = loading_teams_data(seasons_dict, selected_seasons)
df_header = 'Team stats for the ' + str(len(selected_seasons)) + ' selected seasons'
elif selected_stats_type == 'Players salary (only available from 1990 on)':
with st.spinner('Loading...'):
df = nba_salaries(seasons_dict, selected_seasons)
df_header = 'Player stats for the ' + str(len(selected_seasons)) + ' selected seasons'
else:
with st.spinner('Loading...'):
df = loading_players_data(seasons_dict, stats_dict, selected_seasons, selected_stats_type)
df_header = 'Player stats for the ' + str(len(selected_seasons)) + ' selected seasons'
st.subheader(df_header)
st.write(df)
st.markdown("**Source:** Real-time scraped from [Basketball-reference.com](https://www.basketball-reference.com/). Salaries data comes from [Hoopshype.com](https://hoopshype.com/salaries/)")
st.markdown("---")
column1, column2, column3 = st.beta_columns(3)
with column2:
st.markdown(link_csv(df), unsafe_allow_html=True)
st.markdown(link_excel(df), unsafe_allow_html=True)
else:
pass
| de | 0.645552 | # :basketball: NBA Data Scraper :basketball:") ########################### Lists and Dictionaries ########################### ########################### Data Scraper ############################### | 3.355426 | 3 |
fcrepo_verify/constants.py | awoods/fcrepo-import-export-verify | 5 | 6621600 | <reponame>awoods/fcrepo-import-export-verify
__author__ = 'dbernstein'
EXT_MAP = {"application/ld+json": ".json",
"application/n-triples": ".nt",
"application/rdf+xml": ".xml",
"text/n3": ".n3",
"text/rdf+n3": ".n3",
"text/plain": ".txt",
"text/turtle": ".ttl",
"application/x-turtle": ".ttl"
}
LDP_NON_RDF_SOURCE = "http://www.w3.org/ns/ldp#NonRDFSource"
LDP_CONTAINS = "http://www.w3.org/ns/ldp#contains"
FEDORA_HAS_VERSION = "http://fedora.info/definitions/v4/repository#hasVersion"
FEDORA_HAS_VERSIONS = \
"http://fedora.info/definitions/v4/repository#hasVersions"
EXT_BINARY_INTERNAL = ".binary"
EXT_BINARY_EXTERNAL = ".external"
BAG_DATA_DIR = "/data"
MINIMAL_HEADER = {"Prefer": "return=minimal"}
| __author__ = 'dbernstein'
EXT_MAP = {"application/ld+json": ".json",
"application/n-triples": ".nt",
"application/rdf+xml": ".xml",
"text/n3": ".n3",
"text/rdf+n3": ".n3",
"text/plain": ".txt",
"text/turtle": ".ttl",
"application/x-turtle": ".ttl"
}
LDP_NON_RDF_SOURCE = "http://www.w3.org/ns/ldp#NonRDFSource"
LDP_CONTAINS = "http://www.w3.org/ns/ldp#contains"
FEDORA_HAS_VERSION = "http://fedora.info/definitions/v4/repository#hasVersion"
FEDORA_HAS_VERSIONS = \
"http://fedora.info/definitions/v4/repository#hasVersions"
EXT_BINARY_INTERNAL = ".binary"
EXT_BINARY_EXTERNAL = ".external"
BAG_DATA_DIR = "/data"
MINIMAL_HEADER = {"Prefer": "return=minimal"} | en | 0.236299 | #NonRDFSource" #contains" #hasVersion" #hasVersions" | 1.570847 | 2 |
Problems/Study Plans/Algorithm/Algorithm I/20_merge_two_sorted_lists.py | andor2718/LeetCode | 1 | 6621601 | <gh_stars>1-10
# https://leetcode.com/problems/merge-two-sorted-lists/
from typing import Optional
# Definition for singly-linked list.
class ListNode:
def __init__(self, val=0, next=None):
self.val = val
self.next = next
def __repr__(self):
return f'{self.val}->{self.next}'
class Solution:
def mergeTwoLists(
self, l1: Optional[ListNode], l2: Optional[ListNode]
) -> Optional[ListNode]:
sentinel = tail = ListNode()
while l1 and l2:
if l1.val <= l2.val:
tail.next = l1
tail = l1
l1 = l1.next
else:
tail.next = l2
tail = l2
l2 = l2.next
if not l1:
tail.next = l2
else:
tail.next = l1
return sentinel.next
| # https://leetcode.com/problems/merge-two-sorted-lists/
from typing import Optional
# Definition for singly-linked list.
class ListNode:
def __init__(self, val=0, next=None):
self.val = val
self.next = next
def __repr__(self):
return f'{self.val}->{self.next}'
class Solution:
def mergeTwoLists(
self, l1: Optional[ListNode], l2: Optional[ListNode]
) -> Optional[ListNode]:
sentinel = tail = ListNode()
while l1 and l2:
if l1.val <= l2.val:
tail.next = l1
tail = l1
l1 = l1.next
else:
tail.next = l2
tail = l2
l2 = l2.next
if not l1:
tail.next = l2
else:
tail.next = l1
return sentinel.next | en | 0.798306 | # https://leetcode.com/problems/merge-two-sorted-lists/ # Definition for singly-linked list. | 3.93327 | 4 |
sunpy/net/tests/test_hek.py | drewleonard42/sunpy | 0 | 6621602 | <gh_stars>0
# -*- coding: utf-8 -*-
# Author: <NAME> <<EMAIL>>
#pylint: disable=W0613
import pytest
from sunpy.net import hek
from sunpy.net import attr
@pytest.fixture
def foostrwrap(request):
return hek.attrs._StringParamAttrWrapper("foo")
def test_eventtype_collide():
with pytest.raises(TypeError):
hek.attrs.AR & hek.attrs.CE
with pytest.raises(TypeError):
(hek.attrs.AR & hek.attrs.Time((2011, 1, 1),
(2011, 1, 2))) & hek.attrs.CE
with pytest.raises(TypeError):
(hek.attrs.AR | hek.attrs.Time((2011, 1, 1),
(2011, 1, 2))) & hek.attrs.CE
def test_eventtype_or():
assert (hek.attrs.AR | hek.attrs.CE).item == "ar,ce"
def test_paramattr():
res = hek.attrs.walker.create(hek.attrs._ParamAttr("foo", "=", "bar"), {})
assert len(res) == 1
assert res[0] == {'value0': 'bar', 'op0': '=', 'param0': 'foo'}
def test_stringwrapper_eq(foostrwrap):
res = hek.attrs.walker.create(foostrwrap == "bar", {})
assert len(res) == 1
assert res[0] == {'value0': 'bar', 'op0': '=', 'param0': 'foo'}
def test_stringwrapper_lt(foostrwrap):
res = hek.attrs.walker.create(foostrwrap < "bar", {})
assert len(res) == 1
assert res[0] == {'value0': 'bar', 'op0': '<', 'param0': 'foo'}
def test_stringwrapper_gt(foostrwrap):
res = hek.attrs.walker.create(foostrwrap > "bar", {})
assert len(res) == 1
assert res[0] == {'value0': 'bar', 'op0': '>', 'param0': 'foo'}
def test_stringwrapper_le(foostrwrap):
res = hek.attrs.walker.create(foostrwrap <= "bar", {})
assert len(res) == 1
assert res[0] == {'value0': 'bar', 'op0': '<=', 'param0': 'foo'}
def test_stringwrapper_ge(foostrwrap):
res = hek.attrs.walker.create(foostrwrap >= "bar", {})
assert len(res) == 1
assert res[0] == {'value0': 'bar', 'op0': '>=', 'param0': 'foo'}
def test_stringwrapper_ne(foostrwrap):
res = hek.attrs.walker.create(foostrwrap != "bar", {})
assert len(res) == 1
assert res[0] == {'value0': 'bar', 'op0': '!=', 'param0': 'foo'}
def test_stringwrapper_like(foostrwrap):
res = hek.attrs.walker.create(foostrwrap.like("bar"), {})
assert len(res) == 1
assert res[0] == {'value0': 'bar', 'op0': 'like', 'param0': 'foo'}
def test_err_dummyattr_create():
with pytest.raises(TypeError):
hek.attrs.walker.create(attr.DummyAttr(), {})
def test_err_dummyattr_apply():
with pytest.raises(TypeError):
hek.attrs.walker.apply(attr.DummyAttr(), {})
@pytest.mark.remote_data
def test_hek_client():
startTime = '2011/08/09 07:23:56'
endTime = '2011/08/09 12:40:29'
eventType = 'FL'
hekTime = hek.attrs.Time(startTime, endTime)
hekEvent = hek.attrs.EventType(eventType)
h = hek.HEKClient()
hek_query = h.search(hekTime, hekEvent)
assert hek_query[0]['event_peaktime'] == hek_query[0].get('event_peaktime')
assert hek_query[0].get('') == None
| # -*- coding: utf-8 -*-
# Author: <NAME> <<EMAIL>>
#pylint: disable=W0613
import pytest
from sunpy.net import hek
from sunpy.net import attr
@pytest.fixture
def foostrwrap(request):
return hek.attrs._StringParamAttrWrapper("foo")
def test_eventtype_collide():
with pytest.raises(TypeError):
hek.attrs.AR & hek.attrs.CE
with pytest.raises(TypeError):
(hek.attrs.AR & hek.attrs.Time((2011, 1, 1),
(2011, 1, 2))) & hek.attrs.CE
with pytest.raises(TypeError):
(hek.attrs.AR | hek.attrs.Time((2011, 1, 1),
(2011, 1, 2))) & hek.attrs.CE
def test_eventtype_or():
assert (hek.attrs.AR | hek.attrs.CE).item == "ar,ce"
def test_paramattr():
res = hek.attrs.walker.create(hek.attrs._ParamAttr("foo", "=", "bar"), {})
assert len(res) == 1
assert res[0] == {'value0': 'bar', 'op0': '=', 'param0': 'foo'}
def test_stringwrapper_eq(foostrwrap):
res = hek.attrs.walker.create(foostrwrap == "bar", {})
assert len(res) == 1
assert res[0] == {'value0': 'bar', 'op0': '=', 'param0': 'foo'}
def test_stringwrapper_lt(foostrwrap):
res = hek.attrs.walker.create(foostrwrap < "bar", {})
assert len(res) == 1
assert res[0] == {'value0': 'bar', 'op0': '<', 'param0': 'foo'}
def test_stringwrapper_gt(foostrwrap):
res = hek.attrs.walker.create(foostrwrap > "bar", {})
assert len(res) == 1
assert res[0] == {'value0': 'bar', 'op0': '>', 'param0': 'foo'}
def test_stringwrapper_le(foostrwrap):
res = hek.attrs.walker.create(foostrwrap <= "bar", {})
assert len(res) == 1
assert res[0] == {'value0': 'bar', 'op0': '<=', 'param0': 'foo'}
def test_stringwrapper_ge(foostrwrap):
res = hek.attrs.walker.create(foostrwrap >= "bar", {})
assert len(res) == 1
assert res[0] == {'value0': 'bar', 'op0': '>=', 'param0': 'foo'}
def test_stringwrapper_ne(foostrwrap):
res = hek.attrs.walker.create(foostrwrap != "bar", {})
assert len(res) == 1
assert res[0] == {'value0': 'bar', 'op0': '!=', 'param0': 'foo'}
def test_stringwrapper_like(foostrwrap):
res = hek.attrs.walker.create(foostrwrap.like("bar"), {})
assert len(res) == 1
assert res[0] == {'value0': 'bar', 'op0': 'like', 'param0': 'foo'}
def test_err_dummyattr_create():
with pytest.raises(TypeError):
hek.attrs.walker.create(attr.DummyAttr(), {})
def test_err_dummyattr_apply():
with pytest.raises(TypeError):
hek.attrs.walker.apply(attr.DummyAttr(), {})
@pytest.mark.remote_data
def test_hek_client():
startTime = '2011/08/09 07:23:56'
endTime = '2011/08/09 12:40:29'
eventType = 'FL'
hekTime = hek.attrs.Time(startTime, endTime)
hekEvent = hek.attrs.EventType(eventType)
h = hek.HEKClient()
hek_query = h.search(hekTime, hekEvent)
assert hek_query[0]['event_peaktime'] == hek_query[0].get('event_peaktime')
assert hek_query[0].get('') == None | en | 0.329143 | # -*- coding: utf-8 -*- # Author: <NAME> <<EMAIL>> #pylint: disable=W0613 | 2.11247 | 2 |
airflow/contrib/operators/postgres_to_csv_operator.py | katerinekhh/airflow | 0 | 6621603 | import csv
import sqlparse
from airflow.models import BaseOperator
from airflow.hooks.postgres_hook import PostgresHook
from airflow.utils.decorators import apply_defaults
class PostgresToCsvOperator(BaseOperator):
"""
Executes sql code in a specific Postgres database and creates a .csv file
with selected data. CSV headers will match column names from sql select statement by default.
Or can be passed as a parameter.
:param sql: the sql code to be executed. (templated)
:type sql: Can receive a str representing a sql statement,
a list of str (sql statements), or reference to a template file.
Template reference are recognized by str ending in '.sql'
:param postgres_conn_id: reference to a specific postgres database
:type postgres_conn_id: str
:param csv_file_path: path to csv file, which will be created with selected data.
:type csv_file_path: str
:param parameters: (optional) the parameters to render the SQL query with.
(default value: None)
:type parameters: dict
:param headers: list of column names for csv file, if they should not match default headers
corresponding column names from sql select statement.
(default value: None)
:type headers: list[str]
:param increment: if True, creates a value for %(last_updated_value)s parameter.
WHERE clause in your sql should contain such parameter.
(default value: False)
'incremental sql' used for executing last_updated_value:
'SELECT MAX({{ task.last_modified_fname }}) FROM {{ task.destination_table }}'
:type increment: bool
:param destination_table: table name, from where to select last updated value.
:type destination_table: str
:param last_modified_fname: column name to refer to in 'incremental sql'.
:type last_modified_fname: str
:param destination_conn_id: reference to a specific postgres database to execute 'incremental sql'.
:type destination_conn_id: str
:param default_last_updated_value: default last_updated_value, if None is selected.
(default value: '1970-01-01 00:00:00+00:00')
:type default_last_updated_value: str/int
"""
template_fields = [
'sql', 'last_updated_sql',
'destination_table', 'last_modified_fname',
]
template_ext = ['.sql']
@apply_defaults
def __init__( # noqa: CFQ002
self,
csv_file_path: str,
parameters={},
sql='',
postgres_conn_id='',
destination_conn_id='',
destination_table='',
last_modified_fname='',
headers=None,
increment=False,
default_last_updated_value='1970-01-01 00:00:00+00:00',
*args, **kwargs):
super().__init__(*args, **kwargs)
self.sql = sql
self.postgres_conn_id = postgres_conn_id
self.destination_conn_id = destination_conn_id
self.csv_file_path = csv_file_path
self.parameters = parameters
self.increment = increment
self.headers = headers
self.destination_table = destination_table
self.last_modified_fname = last_modified_fname
self.last_updated_sql = 'SELECT MAX({{ task.last_modified_fname }}) FROM {{ task.destination_table }}'
self.default_last_updated_value = default_last_updated_value
if self.parameters and not isinstance(self.parameters, dict):
raise SyntaxError(f"Argument 'parameters' must be type - dict")
if self.increment:
if not self.last_modified_fname:
raise SyntaxError("Argument 'last_modified_fname' is required for incremental select")
if not self.destination_table:
raise SyntaxError("Argument 'destination_table' is required for incremental select")
@staticmethod
def _parse_sql_field_to_csv_header(sql_field) -> str:
csv_header = sql_field.lower().strip().replace('\"', '')
if ' as ' in csv_header:
csv_header = csv_header.split(' as ')[-1]
if '.' in csv_header:
csv_header = csv_header.split('.')[1]
if ' ' in csv_header:
csv_header = csv_header.split(' ')[1]
return csv_header
def execute(self, context): # noqa: C901
if self.increment:
last_updated_value = self._extract_last_updated_value()
self.parameters.update({'last_updated_value': last_updated_value})
hook = PostgresHook(postgres_conn_id=self.postgres_conn_id)
results = hook.get_records(sql=self.sql, parameters=self.parameters)
if not results:
self.log.info('No data extracted')
if not self.headers:
self.headers = self._get_csv_headers_from_sql()
self._create_csv(results, self.headers)
def _extract_last_updated_value(self) -> str:
hook = PostgresHook(postgres_conn_id=self.destination_conn_id)
last_updated_field = hook.get_first(sql=self.last_updated_sql)[0]
if not last_updated_field:
self.log.info(
f'Last event value not found, ' + (
f'using default value - {self.default_last_updated_value}'),
)
return self.default_last_updated_value
self.log.info(f'Last event value was {last_updated_field}')
return last_updated_field
def _create_csv(self, results: list, headers: list) -> None:
with open(self.csv_file_path, 'w') as csv_file:
writer_headers = csv.DictWriter(csv_file, fieldnames=headers)
writer_headers.writeheader()
writer = csv.writer(csv_file)
for row in results:
writer.writerow(row)
self.log.info('Finished creating csv file')
def _get_csv_headers_from_sql(self) -> list:
parsed_sql = sqlparse.parse(self.sql)[0].tokens
parsed_sql_fields = []
for token in parsed_sql:
if not isinstance(token, sqlparse.sql.IdentifierList):
continue
for field in token.get_identifiers():
parsed_sql_fields.append(field.value)
headers = []
for sql_field in parsed_sql_fields:
csv_header = self._parse_sql_field_to_csv_header(sql_field)
headers.append(csv_header)
return headers | import csv
import sqlparse
from airflow.models import BaseOperator
from airflow.hooks.postgres_hook import PostgresHook
from airflow.utils.decorators import apply_defaults
class PostgresToCsvOperator(BaseOperator):
"""
Executes sql code in a specific Postgres database and creates a .csv file
with selected data. CSV headers will match column names from sql select statement by default.
Or can be passed as a parameter.
:param sql: the sql code to be executed. (templated)
:type sql: Can receive a str representing a sql statement,
a list of str (sql statements), or reference to a template file.
Template reference are recognized by str ending in '.sql'
:param postgres_conn_id: reference to a specific postgres database
:type postgres_conn_id: str
:param csv_file_path: path to csv file, which will be created with selected data.
:type csv_file_path: str
:param parameters: (optional) the parameters to render the SQL query with.
(default value: None)
:type parameters: dict
:param headers: list of column names for csv file, if they should not match default headers
corresponding column names from sql select statement.
(default value: None)
:type headers: list[str]
:param increment: if True, creates a value for %(last_updated_value)s parameter.
WHERE clause in your sql should contain such parameter.
(default value: False)
'incremental sql' used for executing last_updated_value:
'SELECT MAX({{ task.last_modified_fname }}) FROM {{ task.destination_table }}'
:type increment: bool
:param destination_table: table name, from where to select last updated value.
:type destination_table: str
:param last_modified_fname: column name to refer to in 'incremental sql'.
:type last_modified_fname: str
:param destination_conn_id: reference to a specific postgres database to execute 'incremental sql'.
:type destination_conn_id: str
:param default_last_updated_value: default last_updated_value, if None is selected.
(default value: '1970-01-01 00:00:00+00:00')
:type default_last_updated_value: str/int
"""
template_fields = [
'sql', 'last_updated_sql',
'destination_table', 'last_modified_fname',
]
template_ext = ['.sql']
@apply_defaults
def __init__( # noqa: CFQ002
self,
csv_file_path: str,
parameters={},
sql='',
postgres_conn_id='',
destination_conn_id='',
destination_table='',
last_modified_fname='',
headers=None,
increment=False,
default_last_updated_value='1970-01-01 00:00:00+00:00',
*args, **kwargs):
super().__init__(*args, **kwargs)
self.sql = sql
self.postgres_conn_id = postgres_conn_id
self.destination_conn_id = destination_conn_id
self.csv_file_path = csv_file_path
self.parameters = parameters
self.increment = increment
self.headers = headers
self.destination_table = destination_table
self.last_modified_fname = last_modified_fname
self.last_updated_sql = 'SELECT MAX({{ task.last_modified_fname }}) FROM {{ task.destination_table }}'
self.default_last_updated_value = default_last_updated_value
if self.parameters and not isinstance(self.parameters, dict):
raise SyntaxError(f"Argument 'parameters' must be type - dict")
if self.increment:
if not self.last_modified_fname:
raise SyntaxError("Argument 'last_modified_fname' is required for incremental select")
if not self.destination_table:
raise SyntaxError("Argument 'destination_table' is required for incremental select")
@staticmethod
def _parse_sql_field_to_csv_header(sql_field) -> str:
csv_header = sql_field.lower().strip().replace('\"', '')
if ' as ' in csv_header:
csv_header = csv_header.split(' as ')[-1]
if '.' in csv_header:
csv_header = csv_header.split('.')[1]
if ' ' in csv_header:
csv_header = csv_header.split(' ')[1]
return csv_header
def execute(self, context): # noqa: C901
if self.increment:
last_updated_value = self._extract_last_updated_value()
self.parameters.update({'last_updated_value': last_updated_value})
hook = PostgresHook(postgres_conn_id=self.postgres_conn_id)
results = hook.get_records(sql=self.sql, parameters=self.parameters)
if not results:
self.log.info('No data extracted')
if not self.headers:
self.headers = self._get_csv_headers_from_sql()
self._create_csv(results, self.headers)
def _extract_last_updated_value(self) -> str:
hook = PostgresHook(postgres_conn_id=self.destination_conn_id)
last_updated_field = hook.get_first(sql=self.last_updated_sql)[0]
if not last_updated_field:
self.log.info(
f'Last event value not found, ' + (
f'using default value - {self.default_last_updated_value}'),
)
return self.default_last_updated_value
self.log.info(f'Last event value was {last_updated_field}')
return last_updated_field
def _create_csv(self, results: list, headers: list) -> None:
with open(self.csv_file_path, 'w') as csv_file:
writer_headers = csv.DictWriter(csv_file, fieldnames=headers)
writer_headers.writeheader()
writer = csv.writer(csv_file)
for row in results:
writer.writerow(row)
self.log.info('Finished creating csv file')
def _get_csv_headers_from_sql(self) -> list:
parsed_sql = sqlparse.parse(self.sql)[0].tokens
parsed_sql_fields = []
for token in parsed_sql:
if not isinstance(token, sqlparse.sql.IdentifierList):
continue
for field in token.get_identifiers():
parsed_sql_fields.append(field.value)
headers = []
for sql_field in parsed_sql_fields:
csv_header = self._parse_sql_field_to_csv_header(sql_field)
headers.append(csv_header)
return headers | en | 0.479902 | Executes sql code in a specific Postgres database and creates a .csv file with selected data. CSV headers will match column names from sql select statement by default. Or can be passed as a parameter. :param sql: the sql code to be executed. (templated) :type sql: Can receive a str representing a sql statement, a list of str (sql statements), or reference to a template file. Template reference are recognized by str ending in '.sql' :param postgres_conn_id: reference to a specific postgres database :type postgres_conn_id: str :param csv_file_path: path to csv file, which will be created with selected data. :type csv_file_path: str :param parameters: (optional) the parameters to render the SQL query with. (default value: None) :type parameters: dict :param headers: list of column names for csv file, if they should not match default headers corresponding column names from sql select statement. (default value: None) :type headers: list[str] :param increment: if True, creates a value for %(last_updated_value)s parameter. WHERE clause in your sql should contain such parameter. (default value: False) 'incremental sql' used for executing last_updated_value: 'SELECT MAX({{ task.last_modified_fname }}) FROM {{ task.destination_table }}' :type increment: bool :param destination_table: table name, from where to select last updated value. :type destination_table: str :param last_modified_fname: column name to refer to in 'incremental sql'. :type last_modified_fname: str :param destination_conn_id: reference to a specific postgres database to execute 'incremental sql'. :type destination_conn_id: str :param default_last_updated_value: default last_updated_value, if None is selected. (default value: '1970-01-01 00:00:00+00:00') :type default_last_updated_value: str/int # noqa: CFQ002 # noqa: C901 | 2.916676 | 3 |
common/data/split.py | alainjungo/reliability-challenges-uncertainty | 56 | 6621604 | <gh_stars>10-100
import json
import operator
import numpy as np
import sklearn.model_selection as model_selection
import common.utils.filehelper as fh
def split_subjects(subjects: list, sizes: tuple) -> tuple:
nb_total = len(subjects)
counts = _normalize_sizes(sizes, nb_total)
nb_train, nb_valid = counts[0], counts[1]
train_subjects = subjects[:nb_train]
valid_subjects = subjects[nb_train:nb_train + nb_valid]
ret = [train_subjects, valid_subjects]
with_test = len(counts) == 3
if with_test:
nb_test = counts[2]
test_subjects = subjects[-nb_test:]
ret.append(test_subjects)
return tuple(ret)
def split_subjects_k_fold(subjects: list, k: int) -> list:
no_subjects = len(subjects)
if no_subjects % k != 0:
raise ValueError('Number of subjects ({}) must be a multiple of k ({})'.format(no_subjects, k))
subjects_per_fold = no_subjects // k
splits = []
for i in range(0, no_subjects, subjects_per_fold):
valid_subjects = subjects[i:i + subjects_per_fold]
train_subjects = subjects[0:i] + subjects[i + subjects_per_fold:]
splits.append((train_subjects, valid_subjects))
return splits
def split_subject_k_fold_stratified(subjects: list, stratification: list, k: int) -> list:
# note: folds may not be of same size
select = model_selection.StratifiedKFold(n_splits=k)
folds = []
for train_indices, valid_indices in select.split(subjects, stratification):
train_names = operator.itemgetter(*train_indices)(subjects)
valid_names = operator.itemgetter(*valid_indices)(subjects)
folds.append((train_names, valid_names))
return folds
def create_stratified_shuffled_split(subjects: list, stratification: list, counts: tuple, seed=100):
valid_cnt = counts[1]
res = model_selection.train_test_split(subjects, stratification, test_size=valid_cnt, random_state=seed,
shuffle=True, stratify=np.asarray(stratification))
tt_subjects, valid_subjects = res[:2]
tt_stratification, _ = res[2:]
if len(counts) == 3:
test_cnt = counts[2]
res = model_selection.train_test_split(tt_subjects, test_size=test_cnt, random_state=seed,
shuffle=True, stratify=np.asarray(tt_stratification))
train_subjects, test_subjects = res
return train_subjects, valid_subjects, test_subjects
else:
train_subjects = tt_subjects
return train_subjects, valid_subjects
def save_split(file: str, train_subjects: list, valid_subjects: list, test_subjects: list = None):
fh.remove_if_exists(file)
write_dict = {'train': train_subjects, 'valid': valid_subjects, 'test': test_subjects}
with open(file, 'w') as f:
json.dump(write_dict, f)
def load_split(file: str, k=None):
with open(file, 'r') as f:
read_dict = json.load(f)
train_subjects, valid_subjects, test_subjects = read_dict['train'], read_dict['valid'], read_dict['test']
if k is not None:
train_subjects, valid_subjects = train_subjects[k], valid_subjects[k]
test_subjects = [] if test_subjects is None else test_subjects[k]
return train_subjects, valid_subjects, test_subjects
def _normalize_sizes(sizes, nb_total):
if isinstance(sizes[0], int):
if nb_total != sum(sizes):
raise ValueError('int sizes ({}) do not sum to number of subjects ({})'.format(sizes, nb_total))
nb_train = sizes[0]
nb_valid = sizes[1]
elif isinstance(sizes[0], float):
if sum(sizes) != 1.0:
raise ValueError('float sizes ({}) do not sum up to 1'.format(sizes))
nb_train = int(nb_total * sizes[0])
nb_valid = int(nb_total * sizes[1])
else:
raise ValueError('size values must be float or int, found {}'.format(type(sizes[0])))
counts = [nb_train, nb_valid]
with_test = len(sizes) == 3
if with_test:
nb_test = nb_total - nb_train - nb_valid
counts.append(nb_test)
return tuple(counts)
| import json
import operator
import numpy as np
import sklearn.model_selection as model_selection
import common.utils.filehelper as fh
def split_subjects(subjects: list, sizes: tuple) -> tuple:
nb_total = len(subjects)
counts = _normalize_sizes(sizes, nb_total)
nb_train, nb_valid = counts[0], counts[1]
train_subjects = subjects[:nb_train]
valid_subjects = subjects[nb_train:nb_train + nb_valid]
ret = [train_subjects, valid_subjects]
with_test = len(counts) == 3
if with_test:
nb_test = counts[2]
test_subjects = subjects[-nb_test:]
ret.append(test_subjects)
return tuple(ret)
def split_subjects_k_fold(subjects: list, k: int) -> list:
no_subjects = len(subjects)
if no_subjects % k != 0:
raise ValueError('Number of subjects ({}) must be a multiple of k ({})'.format(no_subjects, k))
subjects_per_fold = no_subjects // k
splits = []
for i in range(0, no_subjects, subjects_per_fold):
valid_subjects = subjects[i:i + subjects_per_fold]
train_subjects = subjects[0:i] + subjects[i + subjects_per_fold:]
splits.append((train_subjects, valid_subjects))
return splits
def split_subject_k_fold_stratified(subjects: list, stratification: list, k: int) -> list:
# note: folds may not be of same size
select = model_selection.StratifiedKFold(n_splits=k)
folds = []
for train_indices, valid_indices in select.split(subjects, stratification):
train_names = operator.itemgetter(*train_indices)(subjects)
valid_names = operator.itemgetter(*valid_indices)(subjects)
folds.append((train_names, valid_names))
return folds
def create_stratified_shuffled_split(subjects: list, stratification: list, counts: tuple, seed=100):
valid_cnt = counts[1]
res = model_selection.train_test_split(subjects, stratification, test_size=valid_cnt, random_state=seed,
shuffle=True, stratify=np.asarray(stratification))
tt_subjects, valid_subjects = res[:2]
tt_stratification, _ = res[2:]
if len(counts) == 3:
test_cnt = counts[2]
res = model_selection.train_test_split(tt_subjects, test_size=test_cnt, random_state=seed,
shuffle=True, stratify=np.asarray(tt_stratification))
train_subjects, test_subjects = res
return train_subjects, valid_subjects, test_subjects
else:
train_subjects = tt_subjects
return train_subjects, valid_subjects
def save_split(file: str, train_subjects: list, valid_subjects: list, test_subjects: list = None):
fh.remove_if_exists(file)
write_dict = {'train': train_subjects, 'valid': valid_subjects, 'test': test_subjects}
with open(file, 'w') as f:
json.dump(write_dict, f)
def load_split(file: str, k=None):
with open(file, 'r') as f:
read_dict = json.load(f)
train_subjects, valid_subjects, test_subjects = read_dict['train'], read_dict['valid'], read_dict['test']
if k is not None:
train_subjects, valid_subjects = train_subjects[k], valid_subjects[k]
test_subjects = [] if test_subjects is None else test_subjects[k]
return train_subjects, valid_subjects, test_subjects
def _normalize_sizes(sizes, nb_total):
if isinstance(sizes[0], int):
if nb_total != sum(sizes):
raise ValueError('int sizes ({}) do not sum to number of subjects ({})'.format(sizes, nb_total))
nb_train = sizes[0]
nb_valid = sizes[1]
elif isinstance(sizes[0], float):
if sum(sizes) != 1.0:
raise ValueError('float sizes ({}) do not sum up to 1'.format(sizes))
nb_train = int(nb_total * sizes[0])
nb_valid = int(nb_total * sizes[1])
else:
raise ValueError('size values must be float or int, found {}'.format(type(sizes[0])))
counts = [nb_train, nb_valid]
with_test = len(sizes) == 3
if with_test:
nb_test = nb_total - nb_train - nb_valid
counts.append(nb_test)
return tuple(counts) | en | 0.866307 | # note: folds may not be of same size | 2.921885 | 3 |
Scripts/core/assertions.py | velocist/TS4CheatsInfo | 0 | 6621605 | <filename>Scripts/core/assertions.py
# uncompyle6 version 3.7.4
# Python bytecode 3.7 (3394)
# Decompiled from: Python 3.7.9 (tags/v3.7.9:13c94747c7, Aug 17 2020, 18:58:18) [MSC v.1900 64 bit (AMD64)]
# Embedded file name: T:\InGame\Gameplay\Scripts\Core\assertions.py
# Compiled at: 2015-02-04 23:14:34
# Size of source mod 2**32: 4799 bytes
import functools
from sims4.collections import ListSet
from sims4.repr_utils import standard_repr
import sims4.log
logger = sims4.log.Logger('Assertions')
ENABLE_INTRUSIVE_ASSERTIONS = False
def not_recursive(func):
return func
def not_recursive_gen(func):
return func
def hot_path(fn):
return fn | <filename>Scripts/core/assertions.py
# uncompyle6 version 3.7.4
# Python bytecode 3.7 (3394)
# Decompiled from: Python 3.7.9 (tags/v3.7.9:13c94747c7, Aug 17 2020, 18:58:18) [MSC v.1900 64 bit (AMD64)]
# Embedded file name: T:\InGame\Gameplay\Scripts\Core\assertions.py
# Compiled at: 2015-02-04 23:14:34
# Size of source mod 2**32: 4799 bytes
import functools
from sims4.collections import ListSet
from sims4.repr_utils import standard_repr
import sims4.log
logger = sims4.log.Logger('Assertions')
ENABLE_INTRUSIVE_ASSERTIONS = False
def not_recursive(func):
return func
def not_recursive_gen(func):
return func
def hot_path(fn):
return fn | en | 0.519486 | # uncompyle6 version 3.7.4 # Python bytecode 3.7 (3394) # Decompiled from: Python 3.7.9 (tags/v3.7.9:13c94747c7, Aug 17 2020, 18:58:18) [MSC v.1900 64 bit (AMD64)] # Embedded file name: T:\InGame\Gameplay\Scripts\Core\assertions.py # Compiled at: 2015-02-04 23:14:34 # Size of source mod 2**32: 4799 bytes | 1.679607 | 2 |
src/Chap14_Problem.py | falconlee236/CodingTheMatrix-Answer | 0 | 6621606 | <reponame>falconlee236/CodingTheMatrix-Answer
from mat import Mat
from vec import Vec
from solver import solve
print("# Probem 14.16.2")
# Probem 14.16.2
def find_move_helper(A, r):
return solve(A, Vec(A.D[0], {r: 1}))
A = Mat(({1, 2, 3}, {1, 2, 3}), {(1, 1): 1, (1, 2): 1, (2, 2): 1, (2, 3): 1, (3, 1): 1, (3, 3): 1})
print("# Problem 14.16.3")
# Problem 14.16.3
def find_move_direction(A, x, r):
return find_move_helper(A, r)
x = Vec({1, 2, 3}, {1: 2, 2: 4, 3: 6})
print("# Problem 14.16.4")
# Problem 14.16.4
def find_move(A, x, r):
w = find_move_direction(A, x, r)
sigma = 0
for i in range(100):
sigma = i
test = list((x + sigma * w).f.values())
if min(test) >= 0 and (min(w.f.values()) > 0 or len(list(filter(lambda x: x < 10e-10, test))) > 0):
return sigma
print(find_move(A, x, 3))
| from mat import Mat
from vec import Vec
from solver import solve
print("# Probem 14.16.2")
# Probem 14.16.2
def find_move_helper(A, r):
return solve(A, Vec(A.D[0], {r: 1}))
A = Mat(({1, 2, 3}, {1, 2, 3}), {(1, 1): 1, (1, 2): 1, (2, 2): 1, (2, 3): 1, (3, 1): 1, (3, 3): 1})
print("# Problem 14.16.3")
# Problem 14.16.3
def find_move_direction(A, x, r):
return find_move_helper(A, r)
x = Vec({1, 2, 3}, {1: 2, 2: 4, 3: 6})
print("# Problem 14.16.4")
# Problem 14.16.4
def find_move(A, x, r):
w = find_move_direction(A, x, r)
sigma = 0
for i in range(100):
sigma = i
test = list((x + sigma * w).f.values())
if min(test) >= 0 and (min(w.f.values()) > 0 or len(list(filter(lambda x: x < 10e-10, test))) > 0):
return sigma
print(find_move(A, x, 3)) | en | 0.490311 | # Probem 14.16.2 # Problem 14.16.3 # Problem 14.16.4 | 3.037705 | 3 |
shin/apps.py | Hasun-Shin/Hasun-Shin.github.io | 0 | 6621607 | from django.apps import AppConfig
class ShinConfig(AppConfig):
name = 'shin'
| from django.apps import AppConfig
class ShinConfig(AppConfig):
name = 'shin'
| none | 1 | 1.036423 | 1 | |
anyway/database.py | AlonMaor14/anyway | 1 | 6621608 | from anyway.app_and_db import db
Base = db.Model
| from anyway.app_and_db import db
Base = db.Model
| none | 1 | 1.168609 | 1 | |
decred/tests/unit/dcr/test_vsp_unit.py | JoeGruffins/tinydecred | 0 | 6621609 | """
Copyright (c) 2020, the Decred developers
See LICENSE for details
"""
import time
import pytest
from decred import DecredError
from decred.dcr import vsp
from decred.dcr.nets import mainnet
from decred.util import encode
def test_result_is_success():
# (res, isSuccess)
tests = [
(dict(status="success"), True),
(dict(status="fail"), False),
(dict(), False),
("success", False),
("abcd", False),
("", False),
(0, False),
(True, False),
(None, False),
]
for res, isSuccess in tests:
assert vsp.resultIsSuccess(res) == isSuccess
purchaseInfo = {
"PoolAddress": "TsbyH2p611jSWnvUAq3erSsRYnCxBg3nT2S",
"PoolFees": 0.5,
"Script": "512103af3c24d005ca8b755e7167617f3a5b4c60a65f8318a7fcd1b0cacb1ab"
"d2a97fc21027b81bc16954e28adb832248140eb58bedb6078ae5f4dabf21fde5a8ab7135c"
"b652ae",
"TicketAddress": "Tcbvn2hiEAXBDwUPDLDG2SxF9iANMKhdVev",
"VoteBits": 5,
"VoteBitsVersion": 0,
}
def assertPiIsEqual(pi):
assert pi.poolAddress == purchaseInfo["PoolAddress"]
assert pi.poolFees == purchaseInfo["PoolFees"]
assert pi.script == purchaseInfo["Script"]
assert pi.ticketAddress == purchaseInfo["TicketAddress"]
assert pi.voteBits == purchaseInfo["VoteBits"]
assert pi.voteBitsVersion == purchaseInfo["VoteBitsVersion"]
def test_purchase_info_parse():
now = int(time.time())
pi = vsp.PurchaseInfo.parse(purchaseInfo)
assertPiIsEqual(pi)
assert isinstance(pi.unixTimestamp, int) and pi.unixTimestamp >= now
def test_purchase_info_blobbing():
pi = vsp.PurchaseInfo.parse(purchaseInfo)
b = vsp.PurchaseInfo.blob(pi)
assert isinstance(b, bytearray)
rePi = vsp.PurchaseInfo.unblob(b)
assertPiIsEqual(rePi)
ts = rePi.unixTimestamp
assert isinstance(ts, int) and ts == pi.unixTimestamp
# bad version
bCopy = encode.ByteArray(b, copy=True)
bCopy[0] = 255
with pytest.raises(NotImplementedError):
vsp.PurchaseInfo.unblob(bCopy.bytes())
# too long
bCopy = encode.ByteArray(b, copy=True)
bCopy += b"\x00"
with pytest.raises(DecredError):
vsp.PurchaseInfo.unblob(bCopy.bytes())
poolStats = {
"AllMempoolTix": 12,
"APIVersionsSupported": [1, 2],
"BlockHeight": 368781,
"Difficulty": 88.50820708,
"Expired": 3,
"Immature": 0,
"Live": 28,
"Missed": 349,
"OwnMempoolTix": 0,
"PoolSize": 5759,
"ProportionLive": 0.004861955200555652,
"ProportionMissed": 0.3216589861751152,
"Revoked": 349,
"TotalSubsidy": 293.10719669,
"Voted": 736,
"Network": "testnet3",
"PoolEmail": "<EMAIL>",
"PoolFees": 0.5,
"PoolStatus": "Open",
"UserCount": 44,
"UserCountActive": 34,
"Version": "1.6.0-pre",
}
def test_pool_stats():
ps = vsp.PoolStats(poolStats)
assert ps.allMempoolTix == poolStats["AllMempoolTix"]
assert ps.apiVersionsSupported == poolStats["APIVersionsSupported"]
assert ps.blockHeight == poolStats["BlockHeight"]
assert ps.difficulty == poolStats["Difficulty"]
assert ps.expired == poolStats["Expired"]
assert ps.immature == poolStats["Immature"]
assert ps.live == poolStats["Live"]
assert ps.missed == poolStats["Missed"]
assert ps.ownMempoolTix == poolStats["OwnMempoolTix"]
assert ps.poolSize == poolStats["PoolSize"]
assert ps.proportionLive == poolStats["ProportionLive"]
assert ps.proportionMissed == poolStats["ProportionMissed"]
assert ps.revoked == poolStats["Revoked"]
assert ps.totalSubsidy == poolStats["TotalSubsidy"]
assert ps.voted == poolStats["Voted"]
assert ps.network == poolStats["Network"]
assert ps.poolEmail == poolStats["PoolEmail"]
assert ps.poolFees == poolStats["PoolFees"]
assert ps.poolStatus == poolStats["PoolStatus"]
assert ps.userCount == poolStats["UserCount"]
assert ps.userCountActive == poolStats["UserCountActive"]
assert ps.version == poolStats["Version"]
now = int(time.time())
votingServiceProvider = {
"url": "https://www.dcrstakedinner.com/",
"apiKey": (
"<KEY>"
"<KEY>"
"XMiOjQ2fQ.PEb000_TjQuBYxjRdh-VOaXMdV2GUw3_ZyIyp_tfpFE"
),
"netName": "testnet3",
"purchaseInfo": vsp.PurchaseInfo.parse(purchaseInfo),
}
def assertVspIsEqual(pool):
assert pool.url == votingServiceProvider["url"]
assert pool.apiKey == votingServiceProvider["apiKey"]
assert pool.netParams.Name == votingServiceProvider["netName"]
assertPiIsEqual(pool.purchaseInfo)
def test_vsp_init():
pool = vsp.VotingServiceProvider(**votingServiceProvider)
assertVspIsEqual(pool)
ts = pool.purchaseInfo.unixTimestamp
assert isinstance(ts, int) and ts >= now
def test_vsp_blobbing():
pool = vsp.VotingServiceProvider(**votingServiceProvider)
b = vsp.VotingServiceProvider.blob(pool)
assert isinstance(b, bytearray)
rePool = vsp.VotingServiceProvider.unblob(b)
assertVspIsEqual(rePool)
ts = rePool.purchaseInfo.unixTimestamp
assert isinstance(ts, int) and ts == pool.purchaseInfo.unixTimestamp
# bad version
bCopy = encode.ByteArray(b, copy=True)
bCopy[0] = 255
with pytest.raises(NotImplementedError):
vsp.VotingServiceProvider.unblob(bCopy.bytes())
# too long
bCopy = encode.ByteArray(b, copy=True)
bCopy += b"\x00"
with pytest.raises(DecredError):
vsp.VotingServiceProvider.unblob(bCopy.bytes())
def test_vsp_serialize():
pool = vsp.VotingServiceProvider(**votingServiceProvider)
b = vsp.VotingServiceProvider.blob(pool)
assert pool.serialize() == encode.ByteArray(b)
vspProviders = {
"Staked": {
"APIEnabled": True,
"APIVersionsSupported": [1, 2],
"Network": "mainnet",
"URL": "https://decred.staked.us",
"Launched": 1543433400,
"LastUpdated": 1582020568,
"Immature": 0,
"Live": 141,
"Voted": 2730,
"Missed": 10,
"PoolFees": 5,
"ProportionLive": 0.0034847511245118877,
"ProportionMissed": 0.0036496350364963502,
"UserCount": 229,
"UserCountActive": 106,
"Version": "1.4.0-pre+dev",
},
"Golf": {
"APIEnabled": True,
"APIVersionsSupported": [1, 2],
"Network": "mainnet",
"URL": "https://stakepool.dcrstats.com",
"Launched": 1464167340,
"LastUpdated": 1582020568,
"Immature": 21,
"Live": 768,
"Voted": 148202,
"Missed": 154,
"PoolFees": 5,
"ProportionLive": 0.01898077208244773,
"ProportionMissed": 0,
"UserCount": 6005,
"UserCountActive": 2751,
"Version": "1.5.0-pre",
},
"Hotel": {
"APIEnabled": True,
"APIVersionsSupported": [1, 2],
"Network": "mainnet",
"URL": "https://stake.decredbrasil.com",
"Launched": 1464463860,
"LastUpdated": 1582020568,
"Immature": 41,
"Live": 607,
"Voted": 48135,
"Missed": 49,
"PoolFees": 5,
"ProportionLive": 0.015002842383647644,
"ProportionMissed": 0.0010169350821849577,
"UserCount": 1607,
"UserCountActive": 968,
"Version": "1.5.0",
},
"November": {
"APIEnabled": True,
"APIVersionsSupported": [1, 2],
"Network": "mainnet",
"URL": "https://decred.raqamiya.net",
"Launched": 1513878600,
"LastUpdated": 1582020568,
"Immature": 5,
"Live": 334,
"Voted": 15720,
"Missed": 50,
"PoolFees": 1,
"ProportionLive": 0.008255270767937913,
"ProportionMissed": 0.0031705770450221942,
"UserCount": 261,
"UserCountActive": 114,
"Version": "1.5.0-pre",
},
"Ray": {
"APIEnabled": True,
"APIVersionsSupported": [1, 2],
"Network": "mainnet",
"URL": "https://dcrpos.idcray.com",
"Launched": 1518446640,
"LastUpdated": 1582020569,
"Immature": 50,
"Live": 1108,
"Voted": 36974,
"Missed": 298,
"PoolFees": 2,
"ProportionLive": 0.027385748535554512,
"ProportionMissed": 0.007995277956643057,
"UserCount": 137,
"UserCountActive": 70,
"Version": "1.4.0-pre+dev",
},
}
def test_vsp_providers(http_get_post):
http_get_post("https://api.decred.org/?c=gsd", vspProviders)
providers = vsp.VotingServiceProvider.providers(mainnet)
assert len(providers) == 5
def test_vsp_api_path():
pool = vsp.VotingServiceProvider(**votingServiceProvider)
path = pool.apiPath("stakeinfo")
assert path == "https://www.dcrstakedinner.com/api/v2/stakeinfo"
def test_vsp_headers():
pool = vsp.VotingServiceProvider(**votingServiceProvider)
headers = pool.headers()
assert headers == {"Authorization": "Bearer " + votingServiceProvider["apiKey"]}
def test_vsp_validate():
pool = vsp.VotingServiceProvider(**votingServiceProvider)
# correct address
addr = "<KEY>"
pool.validate(addr)
# valid but wrong address
addr = "<KEY>"
with pytest.raises(DecredError):
pool.validate(addr)
# invalid address
addr = "ASDF"
with pytest.raises(DecredError):
pool.validate(addr)
# no address
addr = ""
with pytest.raises(DecredError):
pool.validate(addr)
def test_vsp_authorize(http_get_post):
pool = vsp.VotingServiceProvider(**votingServiceProvider)
success = {"status": "success", "data": purchaseInfo}
addressNotSet = {
"status": "error",
"code": 9,
"message": "no address submitted",
}
# ok
addr = "<KEY>"
http_get_post(pool.apiPath("getpurchaseinfo"), success)
pool.authorize(addr)
# address not submitted
addr = "<KEY>"
http_get_post(pool.apiPath("getpurchaseinfo"), addressNotSet)
http_get_post(pool.apiPath("getpurchaseinfo"), success)
http_get_post((pool.apiPath("address"), repr({"UserPubKeyAddr": addr})), success)
pool.authorize(addr)
# other error
systemErr = {"status": "error", "code": 14, "message": "system error"}
addr = "<KEY>"
http_get_post(pool.apiPath("getpurchaseinfo"), systemErr)
with pytest.raises(DecredError):
pool.authorize(addr)
# wrong address
addr = "<KEY>"
http_get_post(pool.apiPath("getpurchaseinfo"), systemErr)
with pytest.raises(DecredError):
pool.authorize(addr)
def test_vsp_get_purchase_info(http_get_post):
pool = vsp.VotingServiceProvider(**votingServiceProvider)
success = {"status": "success", "data": purchaseInfo}
addressNotSet = {
"status": "error",
"code": 9,
"message": "no address submitted",
}
# ok
http_get_post(pool.apiPath("getpurchaseinfo"), success)
pool.getPurchaseInfo()
assert not pool.err
# error
http_get_post(pool.apiPath("getpurchaseinfo"), addressNotSet)
with pytest.raises(DecredError):
pool.getPurchaseInfo()
assert pool.err
def test_vsp_update_purchase_info(http_get_post):
pool = vsp.VotingServiceProvider(**votingServiceProvider)
success = {"status": "success", "data": purchaseInfo}
# updated
pool.purchaseInfo.unixTimestamp = 0
http_get_post(pool.apiPath("getpurchaseinfo"), success)
pool.updatePurchaseInfo()
assert pool.purchaseInfo.unixTimestamp != 0
# not updated
# within the update threshhold
before = int(time.time() - vsp.PURCHASE_INFO_LIFE / 2)
pool.purchaseInfo.unixTimestamp = before
pool.updatePurchaseInfo()
assert pool.purchaseInfo.unixTimestamp == before
def test_vsp_get_stats(http_get_post):
pool = vsp.VotingServiceProvider(**votingServiceProvider)
success = {"status": "success", "data": poolStats}
# ok
http_get_post(pool.apiPath("stats"), success)
pool.getStats()
# pool error
systemErr = {"status": "error", "code": 14, "message": "system error"}
http_get_post(pool.apiPath("stats"), systemErr)
with pytest.raises(DecredError):
pool.getStats()
def test_vsp_set_vote_bits(http_get_post):
pool = vsp.VotingServiceProvider(**votingServiceProvider)
success = {"status": "success", "data": "ok"}
# votebits are 5
assert pool.purchaseInfo.voteBits == 5
# ok
http_get_post((pool.apiPath("voting"), repr({"VoteBits": 7})), success)
pool.setVoteBits(7)
# set to 7
assert pool.purchaseInfo.voteBits == 7
# pool error
systemErr = {"status": "error", "code": 14, "message": "system error"}
http_get_post((pool.apiPath("voting"), repr({"VoteBits": 3})), systemErr)
with pytest.raises(DecredError):
pool.setVoteBits(3)
# no change
assert pool.purchaseInfo.voteBits == 7
| """
Copyright (c) 2020, the Decred developers
See LICENSE for details
"""
import time
import pytest
from decred import DecredError
from decred.dcr import vsp
from decred.dcr.nets import mainnet
from decred.util import encode
def test_result_is_success():
# (res, isSuccess)
tests = [
(dict(status="success"), True),
(dict(status="fail"), False),
(dict(), False),
("success", False),
("abcd", False),
("", False),
(0, False),
(True, False),
(None, False),
]
for res, isSuccess in tests:
assert vsp.resultIsSuccess(res) == isSuccess
purchaseInfo = {
"PoolAddress": "TsbyH2p611jSWnvUAq3erSsRYnCxBg3nT2S",
"PoolFees": 0.5,
"Script": "512103af3c24d005ca8b755e7167617f3a5b4c60a65f8318a7fcd1b0cacb1ab"
"d2a97fc21027b81bc16954e28adb832248140eb58bedb6078ae5f4dabf21fde5a8ab7135c"
"b652ae",
"TicketAddress": "Tcbvn2hiEAXBDwUPDLDG2SxF9iANMKhdVev",
"VoteBits": 5,
"VoteBitsVersion": 0,
}
def assertPiIsEqual(pi):
assert pi.poolAddress == purchaseInfo["PoolAddress"]
assert pi.poolFees == purchaseInfo["PoolFees"]
assert pi.script == purchaseInfo["Script"]
assert pi.ticketAddress == purchaseInfo["TicketAddress"]
assert pi.voteBits == purchaseInfo["VoteBits"]
assert pi.voteBitsVersion == purchaseInfo["VoteBitsVersion"]
def test_purchase_info_parse():
now = int(time.time())
pi = vsp.PurchaseInfo.parse(purchaseInfo)
assertPiIsEqual(pi)
assert isinstance(pi.unixTimestamp, int) and pi.unixTimestamp >= now
def test_purchase_info_blobbing():
pi = vsp.PurchaseInfo.parse(purchaseInfo)
b = vsp.PurchaseInfo.blob(pi)
assert isinstance(b, bytearray)
rePi = vsp.PurchaseInfo.unblob(b)
assertPiIsEqual(rePi)
ts = rePi.unixTimestamp
assert isinstance(ts, int) and ts == pi.unixTimestamp
# bad version
bCopy = encode.ByteArray(b, copy=True)
bCopy[0] = 255
with pytest.raises(NotImplementedError):
vsp.PurchaseInfo.unblob(bCopy.bytes())
# too long
bCopy = encode.ByteArray(b, copy=True)
bCopy += b"\x00"
with pytest.raises(DecredError):
vsp.PurchaseInfo.unblob(bCopy.bytes())
poolStats = {
"AllMempoolTix": 12,
"APIVersionsSupported": [1, 2],
"BlockHeight": 368781,
"Difficulty": 88.50820708,
"Expired": 3,
"Immature": 0,
"Live": 28,
"Missed": 349,
"OwnMempoolTix": 0,
"PoolSize": 5759,
"ProportionLive": 0.004861955200555652,
"ProportionMissed": 0.3216589861751152,
"Revoked": 349,
"TotalSubsidy": 293.10719669,
"Voted": 736,
"Network": "testnet3",
"PoolEmail": "<EMAIL>",
"PoolFees": 0.5,
"PoolStatus": "Open",
"UserCount": 44,
"UserCountActive": 34,
"Version": "1.6.0-pre",
}
def test_pool_stats():
ps = vsp.PoolStats(poolStats)
assert ps.allMempoolTix == poolStats["AllMempoolTix"]
assert ps.apiVersionsSupported == poolStats["APIVersionsSupported"]
assert ps.blockHeight == poolStats["BlockHeight"]
assert ps.difficulty == poolStats["Difficulty"]
assert ps.expired == poolStats["Expired"]
assert ps.immature == poolStats["Immature"]
assert ps.live == poolStats["Live"]
assert ps.missed == poolStats["Missed"]
assert ps.ownMempoolTix == poolStats["OwnMempoolTix"]
assert ps.poolSize == poolStats["PoolSize"]
assert ps.proportionLive == poolStats["ProportionLive"]
assert ps.proportionMissed == poolStats["ProportionMissed"]
assert ps.revoked == poolStats["Revoked"]
assert ps.totalSubsidy == poolStats["TotalSubsidy"]
assert ps.voted == poolStats["Voted"]
assert ps.network == poolStats["Network"]
assert ps.poolEmail == poolStats["PoolEmail"]
assert ps.poolFees == poolStats["PoolFees"]
assert ps.poolStatus == poolStats["PoolStatus"]
assert ps.userCount == poolStats["UserCount"]
assert ps.userCountActive == poolStats["UserCountActive"]
assert ps.version == poolStats["Version"]
now = int(time.time())
votingServiceProvider = {
"url": "https://www.dcrstakedinner.com/",
"apiKey": (
"<KEY>"
"<KEY>"
"XMiOjQ2fQ.PEb000_TjQuBYxjRdh-VOaXMdV2GUw3_ZyIyp_tfpFE"
),
"netName": "testnet3",
"purchaseInfo": vsp.PurchaseInfo.parse(purchaseInfo),
}
def assertVspIsEqual(pool):
assert pool.url == votingServiceProvider["url"]
assert pool.apiKey == votingServiceProvider["apiKey"]
assert pool.netParams.Name == votingServiceProvider["netName"]
assertPiIsEqual(pool.purchaseInfo)
def test_vsp_init():
pool = vsp.VotingServiceProvider(**votingServiceProvider)
assertVspIsEqual(pool)
ts = pool.purchaseInfo.unixTimestamp
assert isinstance(ts, int) and ts >= now
def test_vsp_blobbing():
pool = vsp.VotingServiceProvider(**votingServiceProvider)
b = vsp.VotingServiceProvider.blob(pool)
assert isinstance(b, bytearray)
rePool = vsp.VotingServiceProvider.unblob(b)
assertVspIsEqual(rePool)
ts = rePool.purchaseInfo.unixTimestamp
assert isinstance(ts, int) and ts == pool.purchaseInfo.unixTimestamp
# bad version
bCopy = encode.ByteArray(b, copy=True)
bCopy[0] = 255
with pytest.raises(NotImplementedError):
vsp.VotingServiceProvider.unblob(bCopy.bytes())
# too long
bCopy = encode.ByteArray(b, copy=True)
bCopy += b"\x00"
with pytest.raises(DecredError):
vsp.VotingServiceProvider.unblob(bCopy.bytes())
def test_vsp_serialize():
pool = vsp.VotingServiceProvider(**votingServiceProvider)
b = vsp.VotingServiceProvider.blob(pool)
assert pool.serialize() == encode.ByteArray(b)
vspProviders = {
"Staked": {
"APIEnabled": True,
"APIVersionsSupported": [1, 2],
"Network": "mainnet",
"URL": "https://decred.staked.us",
"Launched": 1543433400,
"LastUpdated": 1582020568,
"Immature": 0,
"Live": 141,
"Voted": 2730,
"Missed": 10,
"PoolFees": 5,
"ProportionLive": 0.0034847511245118877,
"ProportionMissed": 0.0036496350364963502,
"UserCount": 229,
"UserCountActive": 106,
"Version": "1.4.0-pre+dev",
},
"Golf": {
"APIEnabled": True,
"APIVersionsSupported": [1, 2],
"Network": "mainnet",
"URL": "https://stakepool.dcrstats.com",
"Launched": 1464167340,
"LastUpdated": 1582020568,
"Immature": 21,
"Live": 768,
"Voted": 148202,
"Missed": 154,
"PoolFees": 5,
"ProportionLive": 0.01898077208244773,
"ProportionMissed": 0,
"UserCount": 6005,
"UserCountActive": 2751,
"Version": "1.5.0-pre",
},
"Hotel": {
"APIEnabled": True,
"APIVersionsSupported": [1, 2],
"Network": "mainnet",
"URL": "https://stake.decredbrasil.com",
"Launched": 1464463860,
"LastUpdated": 1582020568,
"Immature": 41,
"Live": 607,
"Voted": 48135,
"Missed": 49,
"PoolFees": 5,
"ProportionLive": 0.015002842383647644,
"ProportionMissed": 0.0010169350821849577,
"UserCount": 1607,
"UserCountActive": 968,
"Version": "1.5.0",
},
"November": {
"APIEnabled": True,
"APIVersionsSupported": [1, 2],
"Network": "mainnet",
"URL": "https://decred.raqamiya.net",
"Launched": 1513878600,
"LastUpdated": 1582020568,
"Immature": 5,
"Live": 334,
"Voted": 15720,
"Missed": 50,
"PoolFees": 1,
"ProportionLive": 0.008255270767937913,
"ProportionMissed": 0.0031705770450221942,
"UserCount": 261,
"UserCountActive": 114,
"Version": "1.5.0-pre",
},
"Ray": {
"APIEnabled": True,
"APIVersionsSupported": [1, 2],
"Network": "mainnet",
"URL": "https://dcrpos.idcray.com",
"Launched": 1518446640,
"LastUpdated": 1582020569,
"Immature": 50,
"Live": 1108,
"Voted": 36974,
"Missed": 298,
"PoolFees": 2,
"ProportionLive": 0.027385748535554512,
"ProportionMissed": 0.007995277956643057,
"UserCount": 137,
"UserCountActive": 70,
"Version": "1.4.0-pre+dev",
},
}
def test_vsp_providers(http_get_post):
http_get_post("https://api.decred.org/?c=gsd", vspProviders)
providers = vsp.VotingServiceProvider.providers(mainnet)
assert len(providers) == 5
def test_vsp_api_path():
pool = vsp.VotingServiceProvider(**votingServiceProvider)
path = pool.apiPath("stakeinfo")
assert path == "https://www.dcrstakedinner.com/api/v2/stakeinfo"
def test_vsp_headers():
pool = vsp.VotingServiceProvider(**votingServiceProvider)
headers = pool.headers()
assert headers == {"Authorization": "Bearer " + votingServiceProvider["apiKey"]}
def test_vsp_validate():
pool = vsp.VotingServiceProvider(**votingServiceProvider)
# correct address
addr = "<KEY>"
pool.validate(addr)
# valid but wrong address
addr = "<KEY>"
with pytest.raises(DecredError):
pool.validate(addr)
# invalid address
addr = "ASDF"
with pytest.raises(DecredError):
pool.validate(addr)
# no address
addr = ""
with pytest.raises(DecredError):
pool.validate(addr)
def test_vsp_authorize(http_get_post):
pool = vsp.VotingServiceProvider(**votingServiceProvider)
success = {"status": "success", "data": purchaseInfo}
addressNotSet = {
"status": "error",
"code": 9,
"message": "no address submitted",
}
# ok
addr = "<KEY>"
http_get_post(pool.apiPath("getpurchaseinfo"), success)
pool.authorize(addr)
# address not submitted
addr = "<KEY>"
http_get_post(pool.apiPath("getpurchaseinfo"), addressNotSet)
http_get_post(pool.apiPath("getpurchaseinfo"), success)
http_get_post((pool.apiPath("address"), repr({"UserPubKeyAddr": addr})), success)
pool.authorize(addr)
# other error
systemErr = {"status": "error", "code": 14, "message": "system error"}
addr = "<KEY>"
http_get_post(pool.apiPath("getpurchaseinfo"), systemErr)
with pytest.raises(DecredError):
pool.authorize(addr)
# wrong address
addr = "<KEY>"
http_get_post(pool.apiPath("getpurchaseinfo"), systemErr)
with pytest.raises(DecredError):
pool.authorize(addr)
def test_vsp_get_purchase_info(http_get_post):
pool = vsp.VotingServiceProvider(**votingServiceProvider)
success = {"status": "success", "data": purchaseInfo}
addressNotSet = {
"status": "error",
"code": 9,
"message": "no address submitted",
}
# ok
http_get_post(pool.apiPath("getpurchaseinfo"), success)
pool.getPurchaseInfo()
assert not pool.err
# error
http_get_post(pool.apiPath("getpurchaseinfo"), addressNotSet)
with pytest.raises(DecredError):
pool.getPurchaseInfo()
assert pool.err
def test_vsp_update_purchase_info(http_get_post):
pool = vsp.VotingServiceProvider(**votingServiceProvider)
success = {"status": "success", "data": purchaseInfo}
# updated
pool.purchaseInfo.unixTimestamp = 0
http_get_post(pool.apiPath("getpurchaseinfo"), success)
pool.updatePurchaseInfo()
assert pool.purchaseInfo.unixTimestamp != 0
# not updated
# within the update threshhold
before = int(time.time() - vsp.PURCHASE_INFO_LIFE / 2)
pool.purchaseInfo.unixTimestamp = before
pool.updatePurchaseInfo()
assert pool.purchaseInfo.unixTimestamp == before
def test_vsp_get_stats(http_get_post):
pool = vsp.VotingServiceProvider(**votingServiceProvider)
success = {"status": "success", "data": poolStats}
# ok
http_get_post(pool.apiPath("stats"), success)
pool.getStats()
# pool error
systemErr = {"status": "error", "code": 14, "message": "system error"}
http_get_post(pool.apiPath("stats"), systemErr)
with pytest.raises(DecredError):
pool.getStats()
def test_vsp_set_vote_bits(http_get_post):
pool = vsp.VotingServiceProvider(**votingServiceProvider)
success = {"status": "success", "data": "ok"}
# votebits are 5
assert pool.purchaseInfo.voteBits == 5
# ok
http_get_post((pool.apiPath("voting"), repr({"VoteBits": 7})), success)
pool.setVoteBits(7)
# set to 7
assert pool.purchaseInfo.voteBits == 7
# pool error
systemErr = {"status": "error", "code": 14, "message": "system error"}
http_get_post((pool.apiPath("voting"), repr({"VoteBits": 3})), systemErr)
with pytest.raises(DecredError):
pool.setVoteBits(3)
# no change
assert pool.purchaseInfo.voteBits == 7
| en | 0.783004 | Copyright (c) 2020, the Decred developers See LICENSE for details # (res, isSuccess) # bad version # too long # bad version # too long # correct address # valid but wrong address # invalid address # no address # ok # address not submitted # other error # wrong address # ok # error # updated # not updated # within the update threshhold # ok # pool error # votebits are 5 # ok # set to 7 # pool error # no change | 2.292958 | 2 |
line_counter.py | CedricFauth/LineCounter | 0 | 6621610 | import sys
import pathlib
def arg_help():
print("Wrong input format: " + str(sys.argv[1:]) + "\n")
print("Please use something like: python3 line_counter.py [PATH] [FILENAME]")
print("I.e. python3 line_counter.py /home/user/javaproject *.java")
def main():
p = pathlib.Path(sys.argv[1]).glob('**/' + sys.argv[2])
files = [str(x) for x in p if x.is_file()]
#print("\nFiles: " + str(files) + "\n")
total_lines = 0
try:
in_comment = False
for file in files:
file_lines = 0
for line in open(file):
line = line.replace(" ", "").replace("\t", "")
if(line != "\n" and "//" != line[0:2]):
if "/*" in line:
in_comment = True
if "*/" in line:
in_comment = False
if not in_comment:
#print("Line: " + line)
file_lines +=1
total_lines += file_lines
print("Reading " + str(file) + "\nLines: " + str(file_lines))
print("\nFiles: " + str(len(files)))
print("\nTotal lines: " + str(total_lines) + "\n")
except UnicodeDecodeError:
print("Error: Cannot read " + str(sys.argv[2]) + " files")
if __name__ == '__main__':
if(len(sys.argv) == 3):
main()
else:
arg_help()
| import sys
import pathlib
def arg_help():
print("Wrong input format: " + str(sys.argv[1:]) + "\n")
print("Please use something like: python3 line_counter.py [PATH] [FILENAME]")
print("I.e. python3 line_counter.py /home/user/javaproject *.java")
def main():
p = pathlib.Path(sys.argv[1]).glob('**/' + sys.argv[2])
files = [str(x) for x in p if x.is_file()]
#print("\nFiles: " + str(files) + "\n")
total_lines = 0
try:
in_comment = False
for file in files:
file_lines = 0
for line in open(file):
line = line.replace(" ", "").replace("\t", "")
if(line != "\n" and "//" != line[0:2]):
if "/*" in line:
in_comment = True
if "*/" in line:
in_comment = False
if not in_comment:
#print("Line: " + line)
file_lines +=1
total_lines += file_lines
print("Reading " + str(file) + "\nLines: " + str(file_lines))
print("\nFiles: " + str(len(files)))
print("\nTotal lines: " + str(total_lines) + "\n")
except UnicodeDecodeError:
print("Error: Cannot read " + str(sys.argv[2]) + " files")
if __name__ == '__main__':
if(len(sys.argv) == 3):
main()
else:
arg_help()
| en | 0.3905 | #print("\nFiles: " + str(files) + "\n") #print("Line: " + line) | 3.342638 | 3 |
code/python/archive/c0200_chart_patents.py | jesnyder/allogenic | 1 | 6621611 | import os
import matplotlib
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
from c0101_retrieve_clinical import retrieve_clinical
from c0201_query_patents import query_patents
def chart_patents():
"""
"""
query_patents()
# clinical_gov_url = 'https://clinicaltrials.gov/ct2/results?cond=&term=&type=&rslt=&age_v=&gndr=&intr=allogenic+AND+msc&titles=&outc=&spons=&lead=&id=&cntry=&state=&city=&dist=&locn=&rsub=&strd_s=&strd_e=&prcd_s=&prcd_e=&sfpd_s=&sfpd_e=&rfpd_s=&rfpd_e=&lupd_s=&lupd_e=&sort='
# retrieve_clinical(clinical_gov_url)
ref_path = os.path.join( 'metadata')
alloFile = 'allogenicANDmesencymalClinicalGov.csv'
autoFile = 'autologousANDmesencymalClinicalGov.csv'
fig = plt.figure()
ax = plt.subplot(111)
df_return = count_per_year(alloFile)
plt.scatter(df_return['year'], df_return['count'], color = [0,0,1], label = 'allogenic')
plt.plot(df_return['year'], df_return['count'], color = [1,0,0], label = 'allogenic')
df_return = count_per_year(autoFile)
plt.scatter(df_return['year'], df_return['count'], color = [0,0,1], label = 'autologous')
plt.plot(df_return['year'], df_return['count'], color = [0,0,1], label = 'autologous')
ax.legend(loc = 'center left')
plt.title('Clinical Trials of MSC')
plt.savefig('patents.png', bbox_inches='tight')
def count_per_year(refFile):
"""
"""
ref_path = os.path.join( 'metadata')
ref_file = os.path.join(ref_path, refFile)
dfAllo = pd.read_csv(ref_file)
startAllo = list(dfAllo["Start Date"])
years = []
for start in startAllo:
start = str(start)
fullDate = start.split(' ')
year = fullDate[-1]
years.append(year)
dfAllo['Start Year'] = years
# print(years)
unique_years, unique_counts = [], []
for year in np.arange(2000, 2025, 1):
year = str(year)
df = dfAllo
df = dfAllo[ dfAllo['Start Year']==year]
unique_years.append(year)
unique_counts.append(len(list(df['Start Year'])))
df_return = pd.DataFrame()
df_return['year'] = unique_years
df_return['count'] = unique_counts
print(df_return)
return(df_return)
| import os
import matplotlib
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
from c0101_retrieve_clinical import retrieve_clinical
from c0201_query_patents import query_patents
def chart_patents():
"""
"""
query_patents()
# clinical_gov_url = 'https://clinicaltrials.gov/ct2/results?cond=&term=&type=&rslt=&age_v=&gndr=&intr=allogenic+AND+msc&titles=&outc=&spons=&lead=&id=&cntry=&state=&city=&dist=&locn=&rsub=&strd_s=&strd_e=&prcd_s=&prcd_e=&sfpd_s=&sfpd_e=&rfpd_s=&rfpd_e=&lupd_s=&lupd_e=&sort='
# retrieve_clinical(clinical_gov_url)
ref_path = os.path.join( 'metadata')
alloFile = 'allogenicANDmesencymalClinicalGov.csv'
autoFile = 'autologousANDmesencymalClinicalGov.csv'
fig = plt.figure()
ax = plt.subplot(111)
df_return = count_per_year(alloFile)
plt.scatter(df_return['year'], df_return['count'], color = [0,0,1], label = 'allogenic')
plt.plot(df_return['year'], df_return['count'], color = [1,0,0], label = 'allogenic')
df_return = count_per_year(autoFile)
plt.scatter(df_return['year'], df_return['count'], color = [0,0,1], label = 'autologous')
plt.plot(df_return['year'], df_return['count'], color = [0,0,1], label = 'autologous')
ax.legend(loc = 'center left')
plt.title('Clinical Trials of MSC')
plt.savefig('patents.png', bbox_inches='tight')
def count_per_year(refFile):
"""
"""
ref_path = os.path.join( 'metadata')
ref_file = os.path.join(ref_path, refFile)
dfAllo = pd.read_csv(ref_file)
startAllo = list(dfAllo["Start Date"])
years = []
for start in startAllo:
start = str(start)
fullDate = start.split(' ')
year = fullDate[-1]
years.append(year)
dfAllo['Start Year'] = years
# print(years)
unique_years, unique_counts = [], []
for year in np.arange(2000, 2025, 1):
year = str(year)
df = dfAllo
df = dfAllo[ dfAllo['Start Year']==year]
unique_years.append(year)
unique_counts.append(len(list(df['Start Year'])))
df_return = pd.DataFrame()
df_return['year'] = unique_years
df_return['count'] = unique_counts
print(df_return)
return(df_return)
| en | 0.456956 | # clinical_gov_url = 'https://clinicaltrials.gov/ct2/results?cond=&term=&type=&rslt=&age_v=&gndr=&intr=allogenic+AND+msc&titles=&outc=&spons=&lead=&id=&cntry=&state=&city=&dist=&locn=&rsub=&strd_s=&strd_e=&prcd_s=&prcd_e=&sfpd_s=&sfpd_e=&rfpd_s=&rfpd_e=&lupd_s=&lupd_e=&sort=' # retrieve_clinical(clinical_gov_url) # print(years) | 2.545362 | 3 |
test/sam_quest_tests.py | roryj/samquest | 3 | 6621612 | import unittest
from src.sam_quest import handle_game_state
from src.models import RequestType
from test_resources import get_game_state_table, MockTwitterApi
from moto import mock_dynamodb2
@mock_dynamodb2
class TestSAMQuest(unittest.TestCase):
def test_tweet_processing(self):
print('Im here!')
twitter_api = MockTwitterApi()
dynamodb_table = get_game_state_table()
print(dynamodb_table.attribute_definitions)
create_tweet = {
'user_name': 'rory_jacob',
'status_message': 'Hello! Its me! Testing!',
'status_id': 1,
'in_reply_to_status_id': 2,
'request_type': str(RequestType.CREATE_GAME)
}
join_tweet = {
'user_name': 'rory_jacob',
'status_message': 'Hello! Its me! Testing!',
'status_id': 5,
'in_reply_to_status_id': 100,
'request_type': str(RequestType.JOIN_GAME)
}
start_tweet = {
'user_name': 'rory_jacob',
'status_message': 'Hello! Its me! Testing!',
'status_id': 5,
'in_reply_to_status_id': 100,
'request_type': str(RequestType.START_GAME)
}
play_tweet = {
'user_name': 'rory_jacob',
'status_message': 'Hello! Its me! Testing!',
'status_id': 5,
'in_reply_to_status_id': 100,
'request_type': str(RequestType.MAKE_SELECTION),
'hashtags': ['ReadNote']
}
handle_game_state([create_tweet, join_tweet, start_tweet, play_tweet], twitter_api, dynamodb_table)
result = dynamodb_table.scan()
print('Result: ' + str(result))
if __name__ == '__main__':
unittest.main()
| import unittest
from src.sam_quest import handle_game_state
from src.models import RequestType
from test_resources import get_game_state_table, MockTwitterApi
from moto import mock_dynamodb2
@mock_dynamodb2
class TestSAMQuest(unittest.TestCase):
def test_tweet_processing(self):
print('Im here!')
twitter_api = MockTwitterApi()
dynamodb_table = get_game_state_table()
print(dynamodb_table.attribute_definitions)
create_tweet = {
'user_name': 'rory_jacob',
'status_message': 'Hello! Its me! Testing!',
'status_id': 1,
'in_reply_to_status_id': 2,
'request_type': str(RequestType.CREATE_GAME)
}
join_tweet = {
'user_name': 'rory_jacob',
'status_message': 'Hello! Its me! Testing!',
'status_id': 5,
'in_reply_to_status_id': 100,
'request_type': str(RequestType.JOIN_GAME)
}
start_tweet = {
'user_name': 'rory_jacob',
'status_message': 'Hello! Its me! Testing!',
'status_id': 5,
'in_reply_to_status_id': 100,
'request_type': str(RequestType.START_GAME)
}
play_tweet = {
'user_name': 'rory_jacob',
'status_message': 'Hello! Its me! Testing!',
'status_id': 5,
'in_reply_to_status_id': 100,
'request_type': str(RequestType.MAKE_SELECTION),
'hashtags': ['ReadNote']
}
handle_game_state([create_tweet, join_tweet, start_tweet, play_tweet], twitter_api, dynamodb_table)
result = dynamodb_table.scan()
print('Result: ' + str(result))
if __name__ == '__main__':
unittest.main()
| none | 1 | 2.402904 | 2 | |
cli.py | asmodehn/crypy | 2 | 6621613 | <filename>cli.py
import cmd
import os
import random
import sys
class StackableCmd(cmd.Cmd):
def __init__(self, prompt, completekey='tab', stdin=None, stdout=None):
self.prompt = prompt + ">"
super().__init__(completekey=completekey, stdin=stdin, stdout=stdout)
def precmd(self, line):
return line
def postcmd(self, stop, line):
return stop
def preloop(self):
pass
def postloop(self):
pass
def do_exit(self, arg):
return True
def do_EOF(self, arg):
# BROKEN : Closes stdin
# TODO : fixit
return True
class Trader(StackableCmd):
def preloop(self):
print("entering position")
def postloop(self):
print("exiting position")
class Holder(StackableCmd):
def preloop(self):
print("managing assets")
def do_trade(self, pair="EUR/ETH"):
with open(os.dup(sys.stdin.fileno()), sys.stdin.mode) as stdin:
t = Trader(self.prompt + pair, stdin=stdin)
t.cmdloop(f"Position on {pair}")
# prototype of command user interface
class Desk(StackableCmd):
def do_watch(self, pair="EUR/ETH"):
print(f"displaying {pair}")
def do_invest(self, asset="EUR"):
with open(os.dup(sys.stdin.fileno()), sys.stdin.mode) as stdin:
h = Holder(self.prompt + asset, stdin=stdin)
c = random.randint(0,255)
h.cmdloop(f"Assets : {c} {asset}")
def do_trade(self, pair="EUR/ETH"):
with open(os.dup(sys.stdin.fileno()), sys.stdin.mode) as stdin:
t = Trader(self.prompt + pair, stdin=stdin)
t.cmdloop("Trading EUR/ETH")
if __name__ == '__main__':
try:
d = Desk(sys.argv[1])
except Exception:
d = Desk("kraken")
d.cmdloop("Welcome !")
| <filename>cli.py
import cmd
import os
import random
import sys
class StackableCmd(cmd.Cmd):
def __init__(self, prompt, completekey='tab', stdin=None, stdout=None):
self.prompt = prompt + ">"
super().__init__(completekey=completekey, stdin=stdin, stdout=stdout)
def precmd(self, line):
return line
def postcmd(self, stop, line):
return stop
def preloop(self):
pass
def postloop(self):
pass
def do_exit(self, arg):
return True
def do_EOF(self, arg):
# BROKEN : Closes stdin
# TODO : fixit
return True
class Trader(StackableCmd):
def preloop(self):
print("entering position")
def postloop(self):
print("exiting position")
class Holder(StackableCmd):
def preloop(self):
print("managing assets")
def do_trade(self, pair="EUR/ETH"):
with open(os.dup(sys.stdin.fileno()), sys.stdin.mode) as stdin:
t = Trader(self.prompt + pair, stdin=stdin)
t.cmdloop(f"Position on {pair}")
# prototype of command user interface
class Desk(StackableCmd):
def do_watch(self, pair="EUR/ETH"):
print(f"displaying {pair}")
def do_invest(self, asset="EUR"):
with open(os.dup(sys.stdin.fileno()), sys.stdin.mode) as stdin:
h = Holder(self.prompt + asset, stdin=stdin)
c = random.randint(0,255)
h.cmdloop(f"Assets : {c} {asset}")
def do_trade(self, pair="EUR/ETH"):
with open(os.dup(sys.stdin.fileno()), sys.stdin.mode) as stdin:
t = Trader(self.prompt + pair, stdin=stdin)
t.cmdloop("Trading EUR/ETH")
if __name__ == '__main__':
try:
d = Desk(sys.argv[1])
except Exception:
d = Desk("kraken")
d.cmdloop("Welcome !")
| en | 0.541559 | # BROKEN : Closes stdin # TODO : fixit # prototype of command user interface | 2.969778 | 3 |
hic/test_hic.py | zelhar/mg21 | 0 | 6621614 | <reponame>zelhar/mg21
import straw
import numpy as np
from scipy.sparse import coo_matrix
import scipy.sparse as sparse
import matplotlib.pyplot as plt
import seaborn as sns
from matplotlib import cm
#https://colab.research.google.com/drive/1548GgZe7ndeZseaIQ1YQxnB5rMZWSsSj
straw.straw?
res = 100000*5
spmat = straw.straw(
"KR",
"../../mnt/Yiftach_Kolb_project_hic_genome_reconstruction/191-98_hg19_no_hap_EBV_MAPQ30_merged.hic",
"1", "1",
unit="BP",
binsize=res,
)
for i in range(10):
print("{0}\t{1}\t{2}".format(spmat[0][i], spmat[1][i], spmat[2][i]))
n = np.max(spmat[0])
m = np.max(spmat[1])
n = max(n,m)
n
243199373 // res
#x = coo_matrix((spmat[2], (spmat[1], spmat[0])), shape=(n+1,n+1))
I = np.array(spmat[0][:])/res
J = np.array(spmat[1][:])/res
V = np.array(spmat[2][:])
sz=int(n/res)+1
M = coo_matrix((V,(I,J)),shape=(sz,sz))
#M = sparse.coo_matrix((V,(I,J)),shape=(sz,sz)).tocsr()
plt.ion()
x = M.toarray()
x[(np.isnan(x))] = 0
plt.matshow(np.log(x))
plt.colormaps()
plt.matshow(np.log10(x), cmap=cm.hot)
marks = np.zeros_like(x)
marks
plt.cla()
#marks = np.tri(sz, sz, -1)*500
#plt.matshow(np.log(marks))
marks = np.zeros(sz)
marks[192419497//res] = sz
marks[249250621//res] = sz
plt.plot(np.arange(sz), marks)
#plt.imshow(25500*np.log(x))
#plt.imshow(x)
plt.show()
plt.cla()
plt.close()
#sns.heatmap(np.log(x))
def getMatrixAsFlattenedVector(normalization, filepath, chrom, resolution, dozscore=False):
for i in chrs:
result = straw.straw(normalization, filepath, chrom, chrom, 'BP', resolution)
I=np.array(result[0][:])/res
J=np.array(result[1][:])/res
V=np.array(result[2][:])
sz=int(chr_sizes[str(i)]/res)+1
M=sparse.coo_matrix((V,(I,J)),shape=(sz,sz)).tocsr()
# make symmetric instead of upper triangular
N=M+M.T-sparse.diags(M.diagonal(),dtype=int)
A=N.reshape(1,sz*sz)
if (i is not 1):
vector = np.concatenate([vector, A.toarray().flatten()])
else:
vector = A.toarray().flatten()
if dozscore:
vector = stats.zscore(vector)
return vector
| import straw
import numpy as np
from scipy.sparse import coo_matrix
import scipy.sparse as sparse
import matplotlib.pyplot as plt
import seaborn as sns
from matplotlib import cm
#https://colab.research.google.com/drive/1548GgZe7ndeZseaIQ1YQxnB5rMZWSsSj
straw.straw?
res = 100000*5
spmat = straw.straw(
"KR",
"../../mnt/Yiftach_Kolb_project_hic_genome_reconstruction/191-98_hg19_no_hap_EBV_MAPQ30_merged.hic",
"1", "1",
unit="BP",
binsize=res,
)
for i in range(10):
print("{0}\t{1}\t{2}".format(spmat[0][i], spmat[1][i], spmat[2][i]))
n = np.max(spmat[0])
m = np.max(spmat[1])
n = max(n,m)
n
243199373 // res
#x = coo_matrix((spmat[2], (spmat[1], spmat[0])), shape=(n+1,n+1))
I = np.array(spmat[0][:])/res
J = np.array(spmat[1][:])/res
V = np.array(spmat[2][:])
sz=int(n/res)+1
M = coo_matrix((V,(I,J)),shape=(sz,sz))
#M = sparse.coo_matrix((V,(I,J)),shape=(sz,sz)).tocsr()
plt.ion()
x = M.toarray()
x[(np.isnan(x))] = 0
plt.matshow(np.log(x))
plt.colormaps()
plt.matshow(np.log10(x), cmap=cm.hot)
marks = np.zeros_like(x)
marks
plt.cla()
#marks = np.tri(sz, sz, -1)*500
#plt.matshow(np.log(marks))
marks = np.zeros(sz)
marks[192419497//res] = sz
marks[249250621//res] = sz
plt.plot(np.arange(sz), marks)
#plt.imshow(25500*np.log(x))
#plt.imshow(x)
plt.show()
plt.cla()
plt.close()
#sns.heatmap(np.log(x))
def getMatrixAsFlattenedVector(normalization, filepath, chrom, resolution, dozscore=False):
for i in chrs:
result = straw.straw(normalization, filepath, chrom, chrom, 'BP', resolution)
I=np.array(result[0][:])/res
J=np.array(result[1][:])/res
V=np.array(result[2][:])
sz=int(chr_sizes[str(i)]/res)+1
M=sparse.coo_matrix((V,(I,J)),shape=(sz,sz)).tocsr()
# make symmetric instead of upper triangular
N=M+M.T-sparse.diags(M.diagonal(),dtype=int)
A=N.reshape(1,sz*sz)
if (i is not 1):
vector = np.concatenate([vector, A.toarray().flatten()])
else:
vector = A.toarray().flatten()
if dozscore:
vector = stats.zscore(vector)
return vector | en | 0.224252 | #https://colab.research.google.com/drive/1548GgZe7ndeZseaIQ1YQxnB5rMZWSsSj #x = coo_matrix((spmat[2], (spmat[1], spmat[0])), shape=(n+1,n+1)) #M = sparse.coo_matrix((V,(I,J)),shape=(sz,sz)).tocsr() #marks = np.tri(sz, sz, -1)*500 #plt.matshow(np.log(marks)) #plt.imshow(25500*np.log(x)) #plt.imshow(x) #sns.heatmap(np.log(x)) # make symmetric instead of upper triangular | 2.076137 | 2 |
sentence-reading/question_frame.py | michalovsky/knowlegde-based-ai-mini-projects | 0 | 6621615 | <reponame>michalovsky/knowlegde-based-ai-mini-projects<gh_stars>0
class QuestionFrame:
def __init__(self, question_words: list, subjects: list, noun: str):
self.question_words = question_words
self.subjects = subjects
self.noun = noun
def __str__(self):
return f"question words: {self.question_words}, subjects: {self.subjects}, noun: {self.noun}"
| class QuestionFrame:
def __init__(self, question_words: list, subjects: list, noun: str):
self.question_words = question_words
self.subjects = subjects
self.noun = noun
def __str__(self):
return f"question words: {self.question_words}, subjects: {self.subjects}, noun: {self.noun}" | none | 1 | 3.270805 | 3 | |
torchmeta/transforms/target_transforms.py | brando90/pytorch-meta | 1,704 | 6621616 | from torchvision.transforms import Compose, Resize, ToTensor
import PIL
class SegmentationPairTransform(object):
def __init__(self, target_size):
self.image_transform = Compose([Resize((target_size, target_size)), ToTensor()])
self.mask_transform = Compose([Resize((target_size, target_size),
interpolation=PIL.Image.NEAREST),
ToTensor()])
def __call__(self, image, mask):
image = self.image_transform(image)
mask = self.mask_transform(mask)
return image, mask
class TargetTransform(object):
def __call__(self, target):
raise NotImplementedError()
def __repr__(self):
return str(self.__class__.__name__)
class DefaultTargetTransform(TargetTransform):
def __init__(self, class_augmentations):
super(DefaultTargetTransform, self).__init__()
self.class_augmentations = class_augmentations
self._augmentations = dict((augmentation, i + 1)
for (i, augmentation) in enumerate(class_augmentations))
self._augmentations[None] = 0
def __call__(self, target):
assert isinstance(target, tuple) and len(target) == 2
label, augmentation = target
return (label, self._augmentations[augmentation])
| from torchvision.transforms import Compose, Resize, ToTensor
import PIL
class SegmentationPairTransform(object):
def __init__(self, target_size):
self.image_transform = Compose([Resize((target_size, target_size)), ToTensor()])
self.mask_transform = Compose([Resize((target_size, target_size),
interpolation=PIL.Image.NEAREST),
ToTensor()])
def __call__(self, image, mask):
image = self.image_transform(image)
mask = self.mask_transform(mask)
return image, mask
class TargetTransform(object):
def __call__(self, target):
raise NotImplementedError()
def __repr__(self):
return str(self.__class__.__name__)
class DefaultTargetTransform(TargetTransform):
def __init__(self, class_augmentations):
super(DefaultTargetTransform, self).__init__()
self.class_augmentations = class_augmentations
self._augmentations = dict((augmentation, i + 1)
for (i, augmentation) in enumerate(class_augmentations))
self._augmentations[None] = 0
def __call__(self, target):
assert isinstance(target, tuple) and len(target) == 2
label, augmentation = target
return (label, self._augmentations[augmentation])
| none | 1 | 2.737738 | 3 | |
app.py | webclinic017/alpha-2 | 2 | 6621617 | # ----------------------------------------------------------------------------#
# Imports
# ----------------------------------------------------------------------------#
# Flask stuffs
from flask import Flask, render_template, request, redirect, flash, url_for, session
# from flask_debugtoolbar import DebugToolbarExtension
# SQL stuffs
from flask_sqlalchemy import SQLAlchemy
# from sqlalchemy.ext.declarative import declarative_base
# Logging for Flask
import logging
from logging import Formatter, FileHandler
# Flask Login manager
from flask_login import LoginManager, UserMixin, login_user, logout_user, login_required
from werkzeug.security import generate_password_hash, check_password_hash
from werkzeug.utils import secure_filename
# Flask AP Scheduler
from flask_apscheduler import APScheduler
# AI-TB
# from aitblib.basic import Basic
from aitblib import helpers
from aitblib import runners
from aitblib import enrichments
from aitblib import charting
from aitblib import ai
from aitblib.Flask_forms import LoginForm, RegisterForm, ForgotForm, SetupForm
# System
import os
from shutil import copyfile
import oyaml as yaml
import ccxt
from datetime import datetime
# Testing only
import sys
# Remember these two
# print('This is error output', file=sys.stderr)
# print('This is standard output', file=sys.stdout)
# ----------------------------------------------------------------------------#
# App Config.
# ----------------------------------------------------------------------------#
# if os.environ.get("WERKZEUG_RUN_MAIN") == "true":
# Init and config Flask
app = Flask(__name__)
app.config.from_pyfile('conf/flask.py')
app.config.from_pyfile('conf/db-default.py')
# Setup global variables
confPath = app.root_path + os.path.sep + 'conf' + os.path.sep
dataPath = app.root_path + os.path.sep + 'data' + os.path.sep
logPath = app.root_path + os.path.sep + 'logs' + os.path.sep
statPath = app.root_path + os.path.sep + 'static' + os.path.sep
upPath = app.root_path + os.path.sep + 'tmp' + os.path.sep + 'uploads' + os.path.sep
# Add custom Jinja2-filter
def ffname(text):
return os.path.splitext(text)[0]
def u2d(utc):
try:
return datetime.utcfromtimestamp(int(utc) / 1000).strftime('%Y-%m-%d')
except BaseException:
return ''
app.add_template_filter(ffname)
app.add_template_filter(u2d)
# Custom DB setup
if os.path.exists(confPath + 'db.py'):
app.config.from_pyfile('conf/db.py')
# Init and start Login
login_manager = LoginManager()
login_manager.login_view = 'login'
login_manager.init_app(app)
# Init SQLAlchemy
db = SQLAlchemy(app)
# Initialize SQLAlchemy Object
class User(UserMixin, db.Model):
id = db.Column(db.Integer, primary_key=True) # primary keys are required by SQLAlchemy
email = db.Column(db.String(100), unique=True)
password = db.Column(db.String(100))
name = db.Column(db.String(100))
# Add tables if not added
try:
user = User.query.first()
except BaseException:
# No tables found set them up!
db.create_all()
print('Setting up Tables...', file=sys.stderr)
# This needs to be here for flask-login to work
@login_manager.user_loader
def load_user(user_id):
return User.query.get(int(user_id))
# Overwrite weird url for redirect Do Not Remove
@login_manager.unauthorized_handler
def unauthorized_callback():
return redirect('/login')
# APScheduler
# Configuration Object
class ConfigAPS(object):
SCHEDULER_API_ENABLED = True
SCHEDULER_JOB_DEFAULTS = {
'coalesce': True,
'misfire_grace_time': 5,
'max_instances': 1
}
# Test Job
# if os.environ.get("WERKZEUG_RUN_MAIN") == "true":
# Init Scheduler
scheduler = APScheduler()
# Config APS
app.config.from_object(ConfigAPS())
scheduler.init_app(app)
# Init used libraries
RunThe = runners.Runner(app.root_path, db)
AI = ai.AI(app.root_path, db)
# Data Download
@scheduler.task('interval', id='downData', seconds=30)
def downData():
RunThe.dataDownload(True)
@scheduler.task('interval', id='upData', seconds=5)
def upData():
RunThe.dataUpload()
@scheduler.task('interval', id='bkTest', seconds=5)
def bkTest():
RunThe.backTest()
# Sentiment
@scheduler.task('cron', id='gTrend', hour='*')
def gTrend():
RunThe.googleTrends()
@scheduler.task('cron', id='sentiRSS', hour='*')
def sentiRSS():
RunThe.sentiRSS()
# Train AIs
@scheduler.task('interval', id='trainAI', seconds=15)
def trainAI():
AI.trainANN()
# Minute by minute
@scheduler.task('cron', id='minuteJob', minute='*')
def minuteJob():
# print('MinuteByMinute', file=sys.stdout)
pass
# Hourly
# @scheduler.task('cron', id='hourlyjob', hour='*')
# def hourlyjob():
# print('Hourly', file=sys.stdout)
# # Daily
# @scheduler.task('cron', id='dailyjob', day='*')
# def dailyjob():
# print('Daily', file=sys.stdout)
# # Weekly
# @scheduler.task('cron', id='weeklyjob', week='*', day_of_week='sun')
# def weeklyjob():
# print('Weekly', file=sys.stdout)
scheduler.start()
# Automatically tear down SQLAlchemy.
@app.teardown_request
def shutdown_session(exception=None):
db.session.remove()
# Init Helper Class
do = helpers.Helper(app.root_path, db)
en = enrichments.Enrichment()
ch = charting.Chart(app.root_path, db)
# ----------------------------------------------------------------------------#
# Controllers.
# ----------------------------------------------------------------------------#
@app.route('/')
@login_required
def home():
# Create files lists for config files
dataCounts = {'con': len(do.listCfgFiles('conn')),
'data': len(do.listCfgFiles('data')),
'samples': len(do.listDataFiles('samples')),
'nuggets': len(do.listDataFiles('nuggets'))}
# Render page
return render_template('pages/home.html', dataCounts=dataCounts)
@app.route('/connections', methods=['GET', 'POST'])
@login_required
def connections():
if request.method == 'POST':
# Connection page wants something
act = request.form['action']
if act == 'add':
# First page of adding Connection
return render_template('pages/connections-add.html', action=act)
if act == 'add2':
# Second page of adding Connection
mark = request.form['market']
if mark == 'crypto':
ex = ccxt.exchanges
return render_template('pages/connections-add.html', action=act, market=mark, exch=ex, len=len(ex))
if mark == 'forex':
return render_template('pages/connections-add.html', action=act, market=mark)
if act == 'fin':
# Setup of exchange has finished create the connection
ex = request.form['exchSel']
market = request.form['market']
if market == 'crypto':
do.createCryptoCon(ex)
return redirect("/connections")
if act == 'info':
# Create temp exchange instance based on post data
ex = request.form['ex']
return do.createCryptoInfo(ex)
if act == 'fullinfo':
con = request.form['con']
# Create pathname and load connection config
cfname = confPath + 'conn' + os.path.sep + con + '.yml'
with open(cfname, 'r') as file:
cfdata = yaml.full_load(file)
# Create table in html
cftable = "<table>"
for key in cfdata:
cftable = cftable + "<tr><th>" + str(key) + "</th><td>" + str(cfdata[key]) + "</td></tr>"
cftable = cftable + "</table>"
return cftable
if act == 'delete':
# Delete connection
flash('Connection Deleted!', 'important')
# Delete file
delfile = confPath + 'conn' + os.path.sep + request.form['con'] + '.yml'
os.remove(delfile)
return redirect("/connections")
else:
connections = do.allCfgs('conn')
return render_template('pages/connections.html', connections=connections)
@app.route('/data', methods=['GET', 'POST'])
@login_required
def data():
if request.method == 'POST':
# Data page wants something
act = request.form['action']
cons = do.listCfgFiles('conn')
cons = list(map(lambda x: x.replace('.yml', ''), cons))
if act == 'add':
# Add data page
return render_template('pages/data-add.html', cons=cons)
if act == 'gitquotes':
# Get a list of quotes available from selected connection
con = request.form['con']
# Return HTML for quote select box
return do.gitCryptoQuotes(con)
if act == 'gitpairs':
# Get a list of pairs with the selected quote
con = request.form['con']
quote = request.form['quote']
# Return HTML for pairs select box
return do.gitCryptoPairs(con, quote)
if act == 'fin':
# Setup of data has finished create the data YAML
con = request.form['conSel']
quote = request.form['quoteSel']
symb = request.form['symbSel']
start = request.form['start']
do.createCryptoData(con, quote, symb, start)
return redirect("/data")
if act == 'sample':
# Setup of data has finished create the data YAML
data = request.form['data']
fromdate = request.form['fromdate']
todate = request.form['todate']
timeframe = request.form['timeframe']
selection = request.form['selection']
do.createSample(data, fromdate, todate, timeframe, selection)
return redirect("/data")
if act == 'delete':
# Delete file
delfile = confPath + 'data' + os.path.sep + request.form['id'] + '.yml'
os.remove(delfile)
return redirect("/data")
if act == 'enable':
id = request.form['id']
# Read Config file
dCfgFile = do.readCfgFile('data', id + '.yml')
# Flip enabled if needed
if request.form['status'] == 'true':
dCfgFile['enabled'] = True
else:
dCfgFile['enabled'] = False
do.writeCfgFile('data', id, dCfgFile)
return redirect("/data")
if act == 'delete-sample':
# Delete file
delfile = dataPath + 'samples' + os.path.sep + request.form['id'] + '.pkl'
os.remove(delfile)
return redirect("/data")
if act == 'upload':
id = request.form['id']
# If no files sent
if 'file' not in request.files:
flash('No file part')
return redirect(request.url)
file = request.files['file']
# If filename empty. User sent page with file
if file.filename == '':
flash('No selected file')
return redirect(request.url)
# Test secure filename
filename = secure_filename(file.filename)
# Split into filename and extension
nom, ext = os.path.splitext(filename)
# Save file
file.save(upPath + id + ext)
return 'Success'
else:
data = do.allCfgs('data')
# List samples in folder ignoring .keep files
samDatafiles = do.listDataFiles('samples')
# Create data info array
samples = []
info = {}
# Iterate through each file
for dfile in samDatafiles:
dstr = os.path.splitext(dfile)[0]
parts = dstr.split('_')
# print(parts,file=sys.stderr)
info = {'id': dstr, 'con': parts[0], 'symb': parts[1] + '/' + parts[2], 'timeframe': parts[3], 'from': int(parts[4]), 'to': int(parts[5])}
samples.append(info)
return render_template('pages/data.html', data=data, samples=samples)
@app.route('/alchemy-enrich', methods=['GET', 'POST'])
@login_required
def alchemyenrich():
if request.method == 'POST':
# Data page wants something
act = request.form['action']
if act == 'add':
# Add data page
enlist = en.listIndi()
return render_template('pages/alchemy-enrich-add.html', enlist=enlist)
if act == 'fin':
enname = request.form['enname']
enriches = request.form['enriches']
enstr = 'enname: ' + enname + "\n"
enrichlist = []
for item in request.form.getlist('enriches'):
enrichlist.append(item)
enstr = enstr + 'riches: ' + ', '.join(enrichlist) + "\n"
enstr = enstr + 'total: ' + str(len(enrichlist)) + "\n"
do.writeCfgFile('enrich', enname, enstr)
return redirect("/alchemy-enrich")
if act == 'delete':
# Delete file
delfile = confPath + 'enrich' + os.path.sep + request.form['enname'] + '.yml'
os.remove(delfile)
return redirect("/alchemy-enrich")
else:
enriches = do.allCfgs('enrich')
return render_template('pages/alchemy-enrich.html', enriches=enriches)
@app.route('/alchemy-nugs', methods=['GET', 'POST'])
@login_required
def alchemynugs():
if request.method == 'POST':
# Data page wants something
act = request.form['action']
if act == 'add':
samplist = do.listDataFiles('samples')
samples = do.samplesInfo(samplist)
enrichlist = do.listCfgFiles('enrich')
enrichments = [os.path.splitext(x)[0] for x in enrichlist]
depens = en.listDepen()
nanas = en.listNaN()
return render_template('pages/alchemy-nugs-add.html', samples=samples, enrichments=enrichments, depens=depens, nanas=nanas)
if act == 'fin':
sample = request.form['sample']
indie = request.form['indie']
depen = request.form['depen']
nana = request.form['nana']
do.createNugget(sample, indie, depen, nana)
return redirect("/alchemy-nugs")
if act == 'delete':
# Delete file
delfile = dataPath + 'nuggets' + os.path.sep + request.form['id'] + '.pkl'
os.remove(delfile)
return redirect("/alchemy-nugs")
else:
# List samples in folder ignoring .keep files
nugfiles = do.listDataFiles('nuggets')
# Pull nuggets info from above files
nuggets = do.nuggetsInfo(nugfiles)
return render_template('pages/alchemy-nugs.html', nuggets=nuggets)
@app.route('/observe', methods=['GET', 'POST'])
@login_required
def observe():
if request.method == 'POST':
# Observe page wants something
act = request.form['action']
nugget = request.form['nugget']
if act == 'viewNug':
script, div = ch.viewNugget(nugget)
if act == 'viewCorr':
script, div = ch.viewCorr(nugget)
if act == 'viewFeat':
script, div = ch.viewFeat(nugget)
# List samples in folder ignoring .keep files
nugfiles = do.listDataFiles('nuggets')
# Pull nuggets info from above files
nuggets = do.nuggetsInfo(nugfiles)
return render_template('pages/observe.html', selected=nugget, nuggets=nuggets, script=script, div=div)
else:
# List nuggets in folder ignoring .keep files
nugfiles = do.listDataFiles('nuggets')
# Pull nuggets info from above files
nuggets = do.nuggetsInfo(nugfiles)
return render_template('pages/observe.html', nuggets=nuggets)
@app.route('/ai-ann', methods=['GET', 'POST'])
@login_required
def aiann():
if request.method == 'POST':
# ANN page wants something
act = request.form['action']
if act == 'add':
# List nuggets in folder ignoring .keep files
nugfiles = do.listDataFiles('nuggets')
# Pull nuggets info from above files
nuggets = do.nuggetsInfo(nugfiles)
return render_template('pages/ai-ann-add.html', nuggets=nuggets)
if act == 'fin':
nugget = request.form['nugget']
nom = request.form['nom']
scaler = request.form['scaler']
try:
scarcity = request.form['scarcity']
except BaseException:
scarcity = "0"
testsplit = request.form['testsplit']
# Layers
inputlayerunits = request.form['inputlayerunits']
hiddenlayers = request.form['hiddenlayers']
hiddenlayerunits = request.form['hiddenlayerunits']
# Fitting
optimizer = request.form['optimizer']
loss = request.form['loss']
metrics = request.form['metrics']
batchsize = request.form['batchsize']
epoch = request.form['epoch']
do.createANN(nugget, nom, testsplit, scaler, scarcity, inputlayerunits, hiddenlayers, hiddenlayerunits, optimizer, loss, metrics, batchsize, epoch)
return redirect("/ai-ann")
if act == 'train':
id = request.form['id']
do.turnANNon(id)
return redirect("/ai-ann")
if act == 'delete':
# Delete configuration files
os.remove(confPath + 'aiann' + os.path.sep + request.form['id'] + '.yml')
# Delete data files
os.remove(dataPath + 'aiann' + os.path.sep + request.form['id'] + '.tf')
os.remove(dataPath + 'aiann' + os.path.sep + request.form['id'] + '.pkl')
os.remove(dataPath + 'aiann' + os.path.sep + request.form['id'] + '_sorted.pkl')
# Delete static files
os.remove(statPath + 'charts' + os.path.sep + request.form['id'] + '_acc.png')
os.remove(statPath + 'charts' + os.path.sep + request.form['id'] + '_loss.png')
return redirect("/ai-ann")
else:
anns = do.allCfgs('aiann')
return render_template('pages/ai-ann.html', anns=anns)
@app.route('/sent-rss', methods=['GET', 'POST'])
@login_required
def sentrss():
if request.method == 'POST':
# Sent RSS page wants something
act = request.form['action']
if act == 'add':
return render_template('pages/sent-rss-add.html')
if act == 'fin':
do.createRSSFeed(request.form.to_dict())
return redirect("/sent-rss")
if act == 'delete':
# Delete configuration file
os.remove(confPath + 'sentrss' + os.path.sep + request.form['id'] + '.yml')
return redirect("/sent-rss")
if act == 'enable':
id = request.form['id']
# Read Config file
dCfgFile = do.readCfgFile('sentrss', id + '.yml')
# Flip enabled if needed
if request.form['status'] == 'true':
dCfgFile['enabled'] = True
else:
dCfgFile['enabled'] = False
do.writeCfgFile('sentrss', id, dCfgFile)
return redirect("/sent-rss")
else:
rssfeeds = do.allCfgs('sentrss')
return render_template('pages/sent-rss.html', rssfeeds=rssfeeds)
@app.route('/sent-trend', methods=['GET', 'POST'])
@login_required
def senttrend():
if request.method == 'POST':
# Sent RSS page wants something
act = request.form['action']
if act == 'add':
return render_template('pages/sent-trend-add.html')
if act == 'fin':
do.createGoogleTrend(request.form.to_dict())
return redirect("/sent-trend")
if act == 'delete':
# Delete configuration file
os.remove(confPath + 'senttrend' + os.path.sep + request.form['id'] + '.yml')
return redirect("/sent-trend")
if act == 'enable':
id = request.form['id']
# Read Config file
dCfgFile = do.readCfgFile('senttrend', id + '.yml')
# Flip enabled if needed
if request.form['status'] == 'true':
dCfgFile['enabled'] = True
else:
dCfgFile['enabled'] = False
do.writeCfgFile('senttrend', id, dCfgFile)
return redirect("/sent-trend")
else:
trends = do.allCfgs('senttrend')
return render_template('pages/sent-trend.html', trends=trends)
@app.route('/sent-twit', methods=['GET', 'POST'])
@login_required
def senttwit():
if request.method == 'POST':
# Sent RSS page wants something
act = request.form['action']
if act == 'add':
return render_template('pages/sent-twit-add.html')
if act == 'fin':
do.createTwitterFeed(request.form.to_dict())
return redirect("/sent-twit")
if act == 'delete':
# Delete configuration file
os.remove(confPath + 'senttwit' + os.path.sep + request.form['id'] + '.yml')
return redirect("/sent-twit")
else:
twitfeeds = do.allCfgs('senttwit')
return render_template('pages/sent-twit.html', twitfeeds=twitfeeds)
@app.route('/sent-nlp', methods=['GET', 'POST'])
@login_required
def sentnlp():
if request.method == 'POST':
# Sent NLP page wants something
act = request.form['action']
if act == 'changeAI':
sentai = do.readCfgFile('sentnlp', 'sent-ai.yml')
sentai['ai'] = request.form['ai']
do.writeCfgFile('sentnlp', 'sent-ai', sentai)
return redirect("/sent-nlp")
else:
sentai = do.readCfgFile('sentnlp', 'sent-ai.yml')
return render_template('pages/sent-nlp.html', ai=sentai)
@app.route('/backtest', methods=['GET', 'POST'])
@login_required
def backt():
if request.method == 'POST':
# ANN page wants something
act = request.form['action']
if act == 'add':
# List data in folder ignoring .keep files
datafiles = do.listCfgFiles('data')
aifiles = do.listCfgFiles('aiann')
enfiles = do.listCfgFiles('enrich')
return render_template('pages/backtest-add.html', datas=datafiles, ais=aifiles, ens=enfiles)
if act == 'fin':
do.createBacktest(request.form.to_dict())
return redirect("/backtest")
if act == 'run':
id = request.form['id']
do.turnBTon(id)
return redirect("/backtest")
if act == 'delete':
# Delete configuration file
os.remove(confPath + 'bt' + os.path.sep + request.form['id'] + '.yml')
# Delete data files
os.remove(dataPath + 'bt' + os.path.sep + request.form['id'] + '.py')
os.remove(dataPath + 'bt' + os.path.sep + request.form['id'] + '_entry.pkl')
os.remove(dataPath + 'bt' + os.path.sep + request.form['id'] + '_native.pkl')
if os.path.exists(dataPath + 'bt' + os.path.sep + request.form['id'] + '_results.csv'):
os.remove(dataPath + 'bt' + os.path.sep + request.form['id'] + '_results.csv')
if os.path.exists(dataPath + 'bt' + os.path.sep + request.form['id'] + '_exit.pkl'):
os.remove(dataPath + 'bt' + os.path.sep + request.form['id'] + '_exit.pkl')
# Delete static files
if os.path.exists(statPath + 'bt' + os.path.sep + request.form['id'] + '_chart.html'):
os.remove(statPath + 'bt' + os.path.sep + request.form['id'] + '_chart.html')
if os.path.exists(statPath + 'bt' + os.path.sep + request.form['id'] + '_report.html'):
os.remove(statPath + 'bt' + os.path.sep + request.form['id'] + '_report.html')
return redirect("/backtest")
else:
bktests = do.allCfgs('bt')
return render_template('pages/backtest.html', bktests=bktests)
@app.route('/trading')
@login_required
def trading():
return render_template('pages/trading.html')
@app.route('/ops-db')
@login_required
def opsdb():
return render_template('pages/ops-db.html')
@app.route('/ops-run')
@login_required
def opsrun():
runners = {'Data Downloader (Aggressive)': 'dataDownloadAggro.log',
'Data Uploader': 'dataUpload.log',
'ANN Training': 'trainANN.log'}
return render_template('pages/ops-run.html', runners=runners)
@app.route('/ops-users')
@login_required
def opsusers():
return render_template('pages/ops-users.html')
@app.route('/changelogs')
@login_required
def changelogs():
return render_template('pages/changelogs.html')
# ----------------------------------------------------------------------------#
# Login and Registration Templates
# ----------------------------------------------------------------------------#
# User templates
@app.route('/login', methods=['GET', 'POST'])
def login():
logform = LoginForm()
name = request.form.get('name')
# email = request.form.get('email')
password = request.form.get('password')
# remember = True if request.form.get('remember') else False
if logform.validate_on_submit():
# Check for existence of username
user = User.query.filter_by(name=name).first()
# Check if user actually exists and then
# take the user supplied password, hash it, and compare it to the hashed password in database
if not user or not check_password_hash(user.password, password):
flash('Please check your login details and try again.')
return redirect(url_for('login')) # if user doesn't exist or password is wrong, reload the page
login_user(user)
return redirect(url_for('home'))
return render_template('forms/login.html', form=logform)
@app.route("/logout")
def logout():
# Clear flashes
session.pop('_flashes', None)
logout_user()
return redirect(url_for('login'))
@app.route('/register', methods=['GET', 'POST'])
def register():
form = RegisterForm()
if form.validate_on_submit():
# Get variables
email = request.form.get('email')
name = request.form.get('name')
password = request.form.get('password')
# Check for existsing user and push back to register page if exists
user = User.query.filter_by(email=email).first()
if user:
flash('Please check your login details and try again.')
return redirect(url_for('register'))
# Create a new user object of User with the above data
new_user = User(email=email, name=name, password=generate_password_hash(password, method='sha256'))
# Add this new user to the database
db.session.add(new_user)
db.session.commit()
# Form finished successfully go to login
return redirect('/login')
return render_template('forms/register.html', form=form)
@app.route('/forgot')
def forgot():
form = ForgotForm(request.form)
return render_template('forms/forgot.html', form=form)
# Log streamer
@app.route('/logstream/<alog>')
@login_required
def logstream(alog):
def generate(alog):
with open(logPath + alog) as f:
yield f.read()
if os.path.exists(logPath + alog):
return app.response_class(generate(alog), mimetype='text/plain')
else:
return 'Log file empty...'
# Setup
@app.route('/setup', methods=['GET', 'POST'])
def syssetup():
form = SetupForm()
if form.validate_on_submit():
# Get variables
dbtype = request.form.get('dbtype')
hostname = request.form.get('hostname')
database = request.form.get('database')
uname = request.form.get('uname')
password = request.form.get('password')
# Create connection string
conString = "SQLALCHEMY_DATABASE_URI = '" + dbtype + '://' + uname + ':' + password + '@' + hostname + '/' + database + "'"
# Write to file
with open(confPath + 'db.py', 'w') as f:
f.write(conString)
app.config.from_pyfile('conf/db.py')
# Form finished successfully go to login
return redirect('/setup')
return render_template('forms/setup.html', form=form)
# Error handlers.
@app.errorhandler(500)
def internal_error(error):
# db_session.rollback()
return render_template('errors/500.html'), 500
@app.errorhandler(404)
def not_found_error(error):
return render_template('errors/404.html'), 404
if not app.debug:
file_handler = FileHandler('error.log')
file_handler.setFormatter(
Formatter('%(asctime)s %(levelname)s: %(message)s [in %(pathname)s:%(lineno)d]')
)
app.logger.setLevel(logging.INFO)
file_handler.setLevel(logging.INFO)
app.logger.addHandler(file_handler)
app.logger.info('errors')
# ----------------------------------------------------------------------------#
# Launch.
# ----------------------------------------------------------------------------#
# Default port:
if __name__ == '__main__':
# Init debugger
# toolbar = DebugToolbarExtension(app)
# Overwrite config for flask-debugtoolbar
# app.config['DEBUG_TB_INTERCEPT_REDIRECTS'] = False
app.config['DEBUG'] = True
app.config['UPLOAD_FOLDER'] = 'tmp'
# Clear down all current run locks
do.clearRunLocks()
# Logging options DEBUG INFO WARNING ERROR CRITICAL
# app.logger.setLevel(logging.CRITICAL)
logging.getLogger('apscheduler').setLevel(logging.ERROR)
# Create NLP configs if they don't exist
if not os.path.exists(confPath + 'sentnlp' + os.path.sep + 'sent-ai.yml'):
copyfile(confPath + 'sentnlp' + os.path.sep + 'sent-ai-def.yml', confPath + 'sentnlp' + os.path.sep + 'sent-ai.yml')
# Run App
# app.run(use_reloader=False) # threaded=False breaks APScheduler
app.run()
# Or specify port manually:
'''
if __name__ == '__main__':
port = int(os.environ.get('PORT', 5000))
app.run(host='0.0.0.0', port=port)
'''
| # ----------------------------------------------------------------------------#
# Imports
# ----------------------------------------------------------------------------#
# Flask stuffs
from flask import Flask, render_template, request, redirect, flash, url_for, session
# from flask_debugtoolbar import DebugToolbarExtension
# SQL stuffs
from flask_sqlalchemy import SQLAlchemy
# from sqlalchemy.ext.declarative import declarative_base
# Logging for Flask
import logging
from logging import Formatter, FileHandler
# Flask Login manager
from flask_login import LoginManager, UserMixin, login_user, logout_user, login_required
from werkzeug.security import generate_password_hash, check_password_hash
from werkzeug.utils import secure_filename
# Flask AP Scheduler
from flask_apscheduler import APScheduler
# AI-TB
# from aitblib.basic import Basic
from aitblib import helpers
from aitblib import runners
from aitblib import enrichments
from aitblib import charting
from aitblib import ai
from aitblib.Flask_forms import LoginForm, RegisterForm, ForgotForm, SetupForm
# System
import os
from shutil import copyfile
import oyaml as yaml
import ccxt
from datetime import datetime
# Testing only
import sys
# Remember these two
# print('This is error output', file=sys.stderr)
# print('This is standard output', file=sys.stdout)
# ----------------------------------------------------------------------------#
# App Config.
# ----------------------------------------------------------------------------#
# if os.environ.get("WERKZEUG_RUN_MAIN") == "true":
# Init and config Flask
app = Flask(__name__)
app.config.from_pyfile('conf/flask.py')
app.config.from_pyfile('conf/db-default.py')
# Setup global variables
confPath = app.root_path + os.path.sep + 'conf' + os.path.sep
dataPath = app.root_path + os.path.sep + 'data' + os.path.sep
logPath = app.root_path + os.path.sep + 'logs' + os.path.sep
statPath = app.root_path + os.path.sep + 'static' + os.path.sep
upPath = app.root_path + os.path.sep + 'tmp' + os.path.sep + 'uploads' + os.path.sep
# Add custom Jinja2-filter
def ffname(text):
return os.path.splitext(text)[0]
def u2d(utc):
try:
return datetime.utcfromtimestamp(int(utc) / 1000).strftime('%Y-%m-%d')
except BaseException:
return ''
app.add_template_filter(ffname)
app.add_template_filter(u2d)
# Custom DB setup
if os.path.exists(confPath + 'db.py'):
app.config.from_pyfile('conf/db.py')
# Init and start Login
login_manager = LoginManager()
login_manager.login_view = 'login'
login_manager.init_app(app)
# Init SQLAlchemy
db = SQLAlchemy(app)
# Initialize SQLAlchemy Object
class User(UserMixin, db.Model):
id = db.Column(db.Integer, primary_key=True) # primary keys are required by SQLAlchemy
email = db.Column(db.String(100), unique=True)
password = db.Column(db.String(100))
name = db.Column(db.String(100))
# Add tables if not added
try:
user = User.query.first()
except BaseException:
# No tables found set them up!
db.create_all()
print('Setting up Tables...', file=sys.stderr)
# This needs to be here for flask-login to work
@login_manager.user_loader
def load_user(user_id):
return User.query.get(int(user_id))
# Overwrite weird url for redirect Do Not Remove
@login_manager.unauthorized_handler
def unauthorized_callback():
return redirect('/login')
# APScheduler
# Configuration Object
class ConfigAPS(object):
SCHEDULER_API_ENABLED = True
SCHEDULER_JOB_DEFAULTS = {
'coalesce': True,
'misfire_grace_time': 5,
'max_instances': 1
}
# Test Job
# if os.environ.get("WERKZEUG_RUN_MAIN") == "true":
# Init Scheduler
scheduler = APScheduler()
# Config APS
app.config.from_object(ConfigAPS())
scheduler.init_app(app)
# Init used libraries
RunThe = runners.Runner(app.root_path, db)
AI = ai.AI(app.root_path, db)
# Data Download
@scheduler.task('interval', id='downData', seconds=30)
def downData():
RunThe.dataDownload(True)
@scheduler.task('interval', id='upData', seconds=5)
def upData():
RunThe.dataUpload()
@scheduler.task('interval', id='bkTest', seconds=5)
def bkTest():
RunThe.backTest()
# Sentiment
@scheduler.task('cron', id='gTrend', hour='*')
def gTrend():
RunThe.googleTrends()
@scheduler.task('cron', id='sentiRSS', hour='*')
def sentiRSS():
RunThe.sentiRSS()
# Train AIs
@scheduler.task('interval', id='trainAI', seconds=15)
def trainAI():
AI.trainANN()
# Minute by minute
@scheduler.task('cron', id='minuteJob', minute='*')
def minuteJob():
# print('MinuteByMinute', file=sys.stdout)
pass
# Hourly
# @scheduler.task('cron', id='hourlyjob', hour='*')
# def hourlyjob():
# print('Hourly', file=sys.stdout)
# # Daily
# @scheduler.task('cron', id='dailyjob', day='*')
# def dailyjob():
# print('Daily', file=sys.stdout)
# # Weekly
# @scheduler.task('cron', id='weeklyjob', week='*', day_of_week='sun')
# def weeklyjob():
# print('Weekly', file=sys.stdout)
scheduler.start()
# Automatically tear down SQLAlchemy.
@app.teardown_request
def shutdown_session(exception=None):
db.session.remove()
# Init Helper Class
do = helpers.Helper(app.root_path, db)
en = enrichments.Enrichment()
ch = charting.Chart(app.root_path, db)
# ----------------------------------------------------------------------------#
# Controllers.
# ----------------------------------------------------------------------------#
@app.route('/')
@login_required
def home():
# Create files lists for config files
dataCounts = {'con': len(do.listCfgFiles('conn')),
'data': len(do.listCfgFiles('data')),
'samples': len(do.listDataFiles('samples')),
'nuggets': len(do.listDataFiles('nuggets'))}
# Render page
return render_template('pages/home.html', dataCounts=dataCounts)
@app.route('/connections', methods=['GET', 'POST'])
@login_required
def connections():
if request.method == 'POST':
# Connection page wants something
act = request.form['action']
if act == 'add':
# First page of adding Connection
return render_template('pages/connections-add.html', action=act)
if act == 'add2':
# Second page of adding Connection
mark = request.form['market']
if mark == 'crypto':
ex = ccxt.exchanges
return render_template('pages/connections-add.html', action=act, market=mark, exch=ex, len=len(ex))
if mark == 'forex':
return render_template('pages/connections-add.html', action=act, market=mark)
if act == 'fin':
# Setup of exchange has finished create the connection
ex = request.form['exchSel']
market = request.form['market']
if market == 'crypto':
do.createCryptoCon(ex)
return redirect("/connections")
if act == 'info':
# Create temp exchange instance based on post data
ex = request.form['ex']
return do.createCryptoInfo(ex)
if act == 'fullinfo':
con = request.form['con']
# Create pathname and load connection config
cfname = confPath + 'conn' + os.path.sep + con + '.yml'
with open(cfname, 'r') as file:
cfdata = yaml.full_load(file)
# Create table in html
cftable = "<table>"
for key in cfdata:
cftable = cftable + "<tr><th>" + str(key) + "</th><td>" + str(cfdata[key]) + "</td></tr>"
cftable = cftable + "</table>"
return cftable
if act == 'delete':
# Delete connection
flash('Connection Deleted!', 'important')
# Delete file
delfile = confPath + 'conn' + os.path.sep + request.form['con'] + '.yml'
os.remove(delfile)
return redirect("/connections")
else:
connections = do.allCfgs('conn')
return render_template('pages/connections.html', connections=connections)
@app.route('/data', methods=['GET', 'POST'])
@login_required
def data():
if request.method == 'POST':
# Data page wants something
act = request.form['action']
cons = do.listCfgFiles('conn')
cons = list(map(lambda x: x.replace('.yml', ''), cons))
if act == 'add':
# Add data page
return render_template('pages/data-add.html', cons=cons)
if act == 'gitquotes':
# Get a list of quotes available from selected connection
con = request.form['con']
# Return HTML for quote select box
return do.gitCryptoQuotes(con)
if act == 'gitpairs':
# Get a list of pairs with the selected quote
con = request.form['con']
quote = request.form['quote']
# Return HTML for pairs select box
return do.gitCryptoPairs(con, quote)
if act == 'fin':
# Setup of data has finished create the data YAML
con = request.form['conSel']
quote = request.form['quoteSel']
symb = request.form['symbSel']
start = request.form['start']
do.createCryptoData(con, quote, symb, start)
return redirect("/data")
if act == 'sample':
# Setup of data has finished create the data YAML
data = request.form['data']
fromdate = request.form['fromdate']
todate = request.form['todate']
timeframe = request.form['timeframe']
selection = request.form['selection']
do.createSample(data, fromdate, todate, timeframe, selection)
return redirect("/data")
if act == 'delete':
# Delete file
delfile = confPath + 'data' + os.path.sep + request.form['id'] + '.yml'
os.remove(delfile)
return redirect("/data")
if act == 'enable':
id = request.form['id']
# Read Config file
dCfgFile = do.readCfgFile('data', id + '.yml')
# Flip enabled if needed
if request.form['status'] == 'true':
dCfgFile['enabled'] = True
else:
dCfgFile['enabled'] = False
do.writeCfgFile('data', id, dCfgFile)
return redirect("/data")
if act == 'delete-sample':
# Delete file
delfile = dataPath + 'samples' + os.path.sep + request.form['id'] + '.pkl'
os.remove(delfile)
return redirect("/data")
if act == 'upload':
id = request.form['id']
# If no files sent
if 'file' not in request.files:
flash('No file part')
return redirect(request.url)
file = request.files['file']
# If filename empty. User sent page with file
if file.filename == '':
flash('No selected file')
return redirect(request.url)
# Test secure filename
filename = secure_filename(file.filename)
# Split into filename and extension
nom, ext = os.path.splitext(filename)
# Save file
file.save(upPath + id + ext)
return 'Success'
else:
data = do.allCfgs('data')
# List samples in folder ignoring .keep files
samDatafiles = do.listDataFiles('samples')
# Create data info array
samples = []
info = {}
# Iterate through each file
for dfile in samDatafiles:
dstr = os.path.splitext(dfile)[0]
parts = dstr.split('_')
# print(parts,file=sys.stderr)
info = {'id': dstr, 'con': parts[0], 'symb': parts[1] + '/' + parts[2], 'timeframe': parts[3], 'from': int(parts[4]), 'to': int(parts[5])}
samples.append(info)
return render_template('pages/data.html', data=data, samples=samples)
@app.route('/alchemy-enrich', methods=['GET', 'POST'])
@login_required
def alchemyenrich():
if request.method == 'POST':
# Data page wants something
act = request.form['action']
if act == 'add':
# Add data page
enlist = en.listIndi()
return render_template('pages/alchemy-enrich-add.html', enlist=enlist)
if act == 'fin':
enname = request.form['enname']
enriches = request.form['enriches']
enstr = 'enname: ' + enname + "\n"
enrichlist = []
for item in request.form.getlist('enriches'):
enrichlist.append(item)
enstr = enstr + 'riches: ' + ', '.join(enrichlist) + "\n"
enstr = enstr + 'total: ' + str(len(enrichlist)) + "\n"
do.writeCfgFile('enrich', enname, enstr)
return redirect("/alchemy-enrich")
if act == 'delete':
# Delete file
delfile = confPath + 'enrich' + os.path.sep + request.form['enname'] + '.yml'
os.remove(delfile)
return redirect("/alchemy-enrich")
else:
enriches = do.allCfgs('enrich')
return render_template('pages/alchemy-enrich.html', enriches=enriches)
@app.route('/alchemy-nugs', methods=['GET', 'POST'])
@login_required
def alchemynugs():
if request.method == 'POST':
# Data page wants something
act = request.form['action']
if act == 'add':
samplist = do.listDataFiles('samples')
samples = do.samplesInfo(samplist)
enrichlist = do.listCfgFiles('enrich')
enrichments = [os.path.splitext(x)[0] for x in enrichlist]
depens = en.listDepen()
nanas = en.listNaN()
return render_template('pages/alchemy-nugs-add.html', samples=samples, enrichments=enrichments, depens=depens, nanas=nanas)
if act == 'fin':
sample = request.form['sample']
indie = request.form['indie']
depen = request.form['depen']
nana = request.form['nana']
do.createNugget(sample, indie, depen, nana)
return redirect("/alchemy-nugs")
if act == 'delete':
# Delete file
delfile = dataPath + 'nuggets' + os.path.sep + request.form['id'] + '.pkl'
os.remove(delfile)
return redirect("/alchemy-nugs")
else:
# List samples in folder ignoring .keep files
nugfiles = do.listDataFiles('nuggets')
# Pull nuggets info from above files
nuggets = do.nuggetsInfo(nugfiles)
return render_template('pages/alchemy-nugs.html', nuggets=nuggets)
@app.route('/observe', methods=['GET', 'POST'])
@login_required
def observe():
if request.method == 'POST':
# Observe page wants something
act = request.form['action']
nugget = request.form['nugget']
if act == 'viewNug':
script, div = ch.viewNugget(nugget)
if act == 'viewCorr':
script, div = ch.viewCorr(nugget)
if act == 'viewFeat':
script, div = ch.viewFeat(nugget)
# List samples in folder ignoring .keep files
nugfiles = do.listDataFiles('nuggets')
# Pull nuggets info from above files
nuggets = do.nuggetsInfo(nugfiles)
return render_template('pages/observe.html', selected=nugget, nuggets=nuggets, script=script, div=div)
else:
# List nuggets in folder ignoring .keep files
nugfiles = do.listDataFiles('nuggets')
# Pull nuggets info from above files
nuggets = do.nuggetsInfo(nugfiles)
return render_template('pages/observe.html', nuggets=nuggets)
@app.route('/ai-ann', methods=['GET', 'POST'])
@login_required
def aiann():
if request.method == 'POST':
# ANN page wants something
act = request.form['action']
if act == 'add':
# List nuggets in folder ignoring .keep files
nugfiles = do.listDataFiles('nuggets')
# Pull nuggets info from above files
nuggets = do.nuggetsInfo(nugfiles)
return render_template('pages/ai-ann-add.html', nuggets=nuggets)
if act == 'fin':
nugget = request.form['nugget']
nom = request.form['nom']
scaler = request.form['scaler']
try:
scarcity = request.form['scarcity']
except BaseException:
scarcity = "0"
testsplit = request.form['testsplit']
# Layers
inputlayerunits = request.form['inputlayerunits']
hiddenlayers = request.form['hiddenlayers']
hiddenlayerunits = request.form['hiddenlayerunits']
# Fitting
optimizer = request.form['optimizer']
loss = request.form['loss']
metrics = request.form['metrics']
batchsize = request.form['batchsize']
epoch = request.form['epoch']
do.createANN(nugget, nom, testsplit, scaler, scarcity, inputlayerunits, hiddenlayers, hiddenlayerunits, optimizer, loss, metrics, batchsize, epoch)
return redirect("/ai-ann")
if act == 'train':
id = request.form['id']
do.turnANNon(id)
return redirect("/ai-ann")
if act == 'delete':
# Delete configuration files
os.remove(confPath + 'aiann' + os.path.sep + request.form['id'] + '.yml')
# Delete data files
os.remove(dataPath + 'aiann' + os.path.sep + request.form['id'] + '.tf')
os.remove(dataPath + 'aiann' + os.path.sep + request.form['id'] + '.pkl')
os.remove(dataPath + 'aiann' + os.path.sep + request.form['id'] + '_sorted.pkl')
# Delete static files
os.remove(statPath + 'charts' + os.path.sep + request.form['id'] + '_acc.png')
os.remove(statPath + 'charts' + os.path.sep + request.form['id'] + '_loss.png')
return redirect("/ai-ann")
else:
anns = do.allCfgs('aiann')
return render_template('pages/ai-ann.html', anns=anns)
@app.route('/sent-rss', methods=['GET', 'POST'])
@login_required
def sentrss():
if request.method == 'POST':
# Sent RSS page wants something
act = request.form['action']
if act == 'add':
return render_template('pages/sent-rss-add.html')
if act == 'fin':
do.createRSSFeed(request.form.to_dict())
return redirect("/sent-rss")
if act == 'delete':
# Delete configuration file
os.remove(confPath + 'sentrss' + os.path.sep + request.form['id'] + '.yml')
return redirect("/sent-rss")
if act == 'enable':
id = request.form['id']
# Read Config file
dCfgFile = do.readCfgFile('sentrss', id + '.yml')
# Flip enabled if needed
if request.form['status'] == 'true':
dCfgFile['enabled'] = True
else:
dCfgFile['enabled'] = False
do.writeCfgFile('sentrss', id, dCfgFile)
return redirect("/sent-rss")
else:
rssfeeds = do.allCfgs('sentrss')
return render_template('pages/sent-rss.html', rssfeeds=rssfeeds)
@app.route('/sent-trend', methods=['GET', 'POST'])
@login_required
def senttrend():
if request.method == 'POST':
# Sent RSS page wants something
act = request.form['action']
if act == 'add':
return render_template('pages/sent-trend-add.html')
if act == 'fin':
do.createGoogleTrend(request.form.to_dict())
return redirect("/sent-trend")
if act == 'delete':
# Delete configuration file
os.remove(confPath + 'senttrend' + os.path.sep + request.form['id'] + '.yml')
return redirect("/sent-trend")
if act == 'enable':
id = request.form['id']
# Read Config file
dCfgFile = do.readCfgFile('senttrend', id + '.yml')
# Flip enabled if needed
if request.form['status'] == 'true':
dCfgFile['enabled'] = True
else:
dCfgFile['enabled'] = False
do.writeCfgFile('senttrend', id, dCfgFile)
return redirect("/sent-trend")
else:
trends = do.allCfgs('senttrend')
return render_template('pages/sent-trend.html', trends=trends)
@app.route('/sent-twit', methods=['GET', 'POST'])
@login_required
def senttwit():
if request.method == 'POST':
# Sent RSS page wants something
act = request.form['action']
if act == 'add':
return render_template('pages/sent-twit-add.html')
if act == 'fin':
do.createTwitterFeed(request.form.to_dict())
return redirect("/sent-twit")
if act == 'delete':
# Delete configuration file
os.remove(confPath + 'senttwit' + os.path.sep + request.form['id'] + '.yml')
return redirect("/sent-twit")
else:
twitfeeds = do.allCfgs('senttwit')
return render_template('pages/sent-twit.html', twitfeeds=twitfeeds)
@app.route('/sent-nlp', methods=['GET', 'POST'])
@login_required
def sentnlp():
if request.method == 'POST':
# Sent NLP page wants something
act = request.form['action']
if act == 'changeAI':
sentai = do.readCfgFile('sentnlp', 'sent-ai.yml')
sentai['ai'] = request.form['ai']
do.writeCfgFile('sentnlp', 'sent-ai', sentai)
return redirect("/sent-nlp")
else:
sentai = do.readCfgFile('sentnlp', 'sent-ai.yml')
return render_template('pages/sent-nlp.html', ai=sentai)
@app.route('/backtest', methods=['GET', 'POST'])
@login_required
def backt():
if request.method == 'POST':
# ANN page wants something
act = request.form['action']
if act == 'add':
# List data in folder ignoring .keep files
datafiles = do.listCfgFiles('data')
aifiles = do.listCfgFiles('aiann')
enfiles = do.listCfgFiles('enrich')
return render_template('pages/backtest-add.html', datas=datafiles, ais=aifiles, ens=enfiles)
if act == 'fin':
do.createBacktest(request.form.to_dict())
return redirect("/backtest")
if act == 'run':
id = request.form['id']
do.turnBTon(id)
return redirect("/backtest")
if act == 'delete':
# Delete configuration file
os.remove(confPath + 'bt' + os.path.sep + request.form['id'] + '.yml')
# Delete data files
os.remove(dataPath + 'bt' + os.path.sep + request.form['id'] + '.py')
os.remove(dataPath + 'bt' + os.path.sep + request.form['id'] + '_entry.pkl')
os.remove(dataPath + 'bt' + os.path.sep + request.form['id'] + '_native.pkl')
if os.path.exists(dataPath + 'bt' + os.path.sep + request.form['id'] + '_results.csv'):
os.remove(dataPath + 'bt' + os.path.sep + request.form['id'] + '_results.csv')
if os.path.exists(dataPath + 'bt' + os.path.sep + request.form['id'] + '_exit.pkl'):
os.remove(dataPath + 'bt' + os.path.sep + request.form['id'] + '_exit.pkl')
# Delete static files
if os.path.exists(statPath + 'bt' + os.path.sep + request.form['id'] + '_chart.html'):
os.remove(statPath + 'bt' + os.path.sep + request.form['id'] + '_chart.html')
if os.path.exists(statPath + 'bt' + os.path.sep + request.form['id'] + '_report.html'):
os.remove(statPath + 'bt' + os.path.sep + request.form['id'] + '_report.html')
return redirect("/backtest")
else:
bktests = do.allCfgs('bt')
return render_template('pages/backtest.html', bktests=bktests)
@app.route('/trading')
@login_required
def trading():
return render_template('pages/trading.html')
@app.route('/ops-db')
@login_required
def opsdb():
return render_template('pages/ops-db.html')
@app.route('/ops-run')
@login_required
def opsrun():
runners = {'Data Downloader (Aggressive)': 'dataDownloadAggro.log',
'Data Uploader': 'dataUpload.log',
'ANN Training': 'trainANN.log'}
return render_template('pages/ops-run.html', runners=runners)
@app.route('/ops-users')
@login_required
def opsusers():
return render_template('pages/ops-users.html')
@app.route('/changelogs')
@login_required
def changelogs():
return render_template('pages/changelogs.html')
# ----------------------------------------------------------------------------#
# Login and Registration Templates
# ----------------------------------------------------------------------------#
# User templates
@app.route('/login', methods=['GET', 'POST'])
def login():
logform = LoginForm()
name = request.form.get('name')
# email = request.form.get('email')
password = request.form.get('password')
# remember = True if request.form.get('remember') else False
if logform.validate_on_submit():
# Check for existence of username
user = User.query.filter_by(name=name).first()
# Check if user actually exists and then
# take the user supplied password, hash it, and compare it to the hashed password in database
if not user or not check_password_hash(user.password, password):
flash('Please check your login details and try again.')
return redirect(url_for('login')) # if user doesn't exist or password is wrong, reload the page
login_user(user)
return redirect(url_for('home'))
return render_template('forms/login.html', form=logform)
@app.route("/logout")
def logout():
# Clear flashes
session.pop('_flashes', None)
logout_user()
return redirect(url_for('login'))
@app.route('/register', methods=['GET', 'POST'])
def register():
form = RegisterForm()
if form.validate_on_submit():
# Get variables
email = request.form.get('email')
name = request.form.get('name')
password = request.form.get('password')
# Check for existsing user and push back to register page if exists
user = User.query.filter_by(email=email).first()
if user:
flash('Please check your login details and try again.')
return redirect(url_for('register'))
# Create a new user object of User with the above data
new_user = User(email=email, name=name, password=generate_password_hash(password, method='sha256'))
# Add this new user to the database
db.session.add(new_user)
db.session.commit()
# Form finished successfully go to login
return redirect('/login')
return render_template('forms/register.html', form=form)
@app.route('/forgot')
def forgot():
form = ForgotForm(request.form)
return render_template('forms/forgot.html', form=form)
# Log streamer
@app.route('/logstream/<alog>')
@login_required
def logstream(alog):
def generate(alog):
with open(logPath + alog) as f:
yield f.read()
if os.path.exists(logPath + alog):
return app.response_class(generate(alog), mimetype='text/plain')
else:
return 'Log file empty...'
# Setup
@app.route('/setup', methods=['GET', 'POST'])
def syssetup():
form = SetupForm()
if form.validate_on_submit():
# Get variables
dbtype = request.form.get('dbtype')
hostname = request.form.get('hostname')
database = request.form.get('database')
uname = request.form.get('uname')
password = request.form.get('password')
# Create connection string
conString = "SQLALCHEMY_DATABASE_URI = '" + dbtype + '://' + uname + ':' + password + '@' + hostname + '/' + database + "'"
# Write to file
with open(confPath + 'db.py', 'w') as f:
f.write(conString)
app.config.from_pyfile('conf/db.py')
# Form finished successfully go to login
return redirect('/setup')
return render_template('forms/setup.html', form=form)
# Error handlers.
@app.errorhandler(500)
def internal_error(error):
# db_session.rollback()
return render_template('errors/500.html'), 500
@app.errorhandler(404)
def not_found_error(error):
return render_template('errors/404.html'), 404
if not app.debug:
file_handler = FileHandler('error.log')
file_handler.setFormatter(
Formatter('%(asctime)s %(levelname)s: %(message)s [in %(pathname)s:%(lineno)d]')
)
app.logger.setLevel(logging.INFO)
file_handler.setLevel(logging.INFO)
app.logger.addHandler(file_handler)
app.logger.info('errors')
# ----------------------------------------------------------------------------#
# Launch.
# ----------------------------------------------------------------------------#
# Default port:
if __name__ == '__main__':
# Init debugger
# toolbar = DebugToolbarExtension(app)
# Overwrite config for flask-debugtoolbar
# app.config['DEBUG_TB_INTERCEPT_REDIRECTS'] = False
app.config['DEBUG'] = True
app.config['UPLOAD_FOLDER'] = 'tmp'
# Clear down all current run locks
do.clearRunLocks()
# Logging options DEBUG INFO WARNING ERROR CRITICAL
# app.logger.setLevel(logging.CRITICAL)
logging.getLogger('apscheduler').setLevel(logging.ERROR)
# Create NLP configs if they don't exist
if not os.path.exists(confPath + 'sentnlp' + os.path.sep + 'sent-ai.yml'):
copyfile(confPath + 'sentnlp' + os.path.sep + 'sent-ai-def.yml', confPath + 'sentnlp' + os.path.sep + 'sent-ai.yml')
# Run App
# app.run(use_reloader=False) # threaded=False breaks APScheduler
app.run()
# Or specify port manually:
'''
if __name__ == '__main__':
port = int(os.environ.get('PORT', 5000))
app.run(host='0.0.0.0', port=port)
'''
| en | 0.569561 | # ----------------------------------------------------------------------------# # Imports # ----------------------------------------------------------------------------# # Flask stuffs # from flask_debugtoolbar import DebugToolbarExtension # SQL stuffs # from sqlalchemy.ext.declarative import declarative_base # Logging for Flask # Flask Login manager # Flask AP Scheduler # AI-TB # from aitblib.basic import Basic # System # Testing only # Remember these two # print('This is error output', file=sys.stderr) # print('This is standard output', file=sys.stdout) # ----------------------------------------------------------------------------# # App Config. # ----------------------------------------------------------------------------# # if os.environ.get("WERKZEUG_RUN_MAIN") == "true": # Init and config Flask # Setup global variables # Add custom Jinja2-filter # Custom DB setup # Init and start Login # Init SQLAlchemy # Initialize SQLAlchemy Object # primary keys are required by SQLAlchemy # Add tables if not added # No tables found set them up! # This needs to be here for flask-login to work # Overwrite weird url for redirect Do Not Remove # APScheduler # Configuration Object # Test Job # if os.environ.get("WERKZEUG_RUN_MAIN") == "true": # Init Scheduler # Config APS # Init used libraries # Data Download # Sentiment # Train AIs # Minute by minute # print('MinuteByMinute', file=sys.stdout) # Hourly # @scheduler.task('cron', id='hourlyjob', hour='*') # def hourlyjob(): # print('Hourly', file=sys.stdout) # # Daily # @scheduler.task('cron', id='dailyjob', day='*') # def dailyjob(): # print('Daily', file=sys.stdout) # # Weekly # @scheduler.task('cron', id='weeklyjob', week='*', day_of_week='sun') # def weeklyjob(): # print('Weekly', file=sys.stdout) # Automatically tear down SQLAlchemy. # Init Helper Class # ----------------------------------------------------------------------------# # Controllers. # ----------------------------------------------------------------------------# # Create files lists for config files # Render page # Connection page wants something # First page of adding Connection # Second page of adding Connection # Setup of exchange has finished create the connection # Create temp exchange instance based on post data # Create pathname and load connection config # Create table in html # Delete connection # Delete file # Data page wants something # Add data page # Get a list of quotes available from selected connection # Return HTML for quote select box # Get a list of pairs with the selected quote # Return HTML for pairs select box # Setup of data has finished create the data YAML # Setup of data has finished create the data YAML # Delete file # Read Config file # Flip enabled if needed # Delete file # If no files sent # If filename empty. User sent page with file # Test secure filename # Split into filename and extension # Save file # List samples in folder ignoring .keep files # Create data info array # Iterate through each file # print(parts,file=sys.stderr) # Data page wants something # Add data page # Delete file # Data page wants something # Delete file # List samples in folder ignoring .keep files # Pull nuggets info from above files # Observe page wants something # List samples in folder ignoring .keep files # Pull nuggets info from above files # List nuggets in folder ignoring .keep files # Pull nuggets info from above files # ANN page wants something # List nuggets in folder ignoring .keep files # Pull nuggets info from above files # Layers # Fitting # Delete configuration files # Delete data files # Delete static files # Sent RSS page wants something # Delete configuration file # Read Config file # Flip enabled if needed # Sent RSS page wants something # Delete configuration file # Read Config file # Flip enabled if needed # Sent RSS page wants something # Delete configuration file # Sent NLP page wants something # ANN page wants something # List data in folder ignoring .keep files # Delete configuration file # Delete data files # Delete static files # ----------------------------------------------------------------------------# # Login and Registration Templates # ----------------------------------------------------------------------------# # User templates # email = request.form.get('email') # remember = True if request.form.get('remember') else False # Check for existence of username # Check if user actually exists and then # take the user supplied password, hash it, and compare it to the hashed password in database # if user doesn't exist or password is wrong, reload the page # Clear flashes # Get variables # Check for existsing user and push back to register page if exists # Create a new user object of User with the above data # Add this new user to the database # Form finished successfully go to login # Log streamer # Setup # Get variables # Create connection string # Write to file # Form finished successfully go to login # Error handlers. # db_session.rollback() # ----------------------------------------------------------------------------# # Launch. # ----------------------------------------------------------------------------# # Default port: # Init debugger # toolbar = DebugToolbarExtension(app) # Overwrite config for flask-debugtoolbar # app.config['DEBUG_TB_INTERCEPT_REDIRECTS'] = False # Clear down all current run locks # Logging options DEBUG INFO WARNING ERROR CRITICAL # app.logger.setLevel(logging.CRITICAL) # Create NLP configs if they don't exist # Run App # app.run(use_reloader=False) # threaded=False breaks APScheduler # Or specify port manually: if __name__ == '__main__': port = int(os.environ.get('PORT', 5000)) app.run(host='0.0.0.0', port=port) | 1.82119 | 2 |
process_history.py | LulutasoAI/Extract_portfolio_info | 0 | 6621618 | import pickle_around
from matplotlib import pyplot as plt
import datetime
import matplotlib
if __name__ == "__main__":
data_loaded = pickle_around.load_object()
historical_assets_JPY = []
Data_date = []
for date in data_loaded:
print("The data of ",date)
extracted = data_loaded[date]
#print(type(extracted[0]))
USD_amount = extracted[1]
JPY_amount = extracted[2]
Data_date.append(datetime.datetime.strptime(date,"%Y%m%d"))
print(extracted[0]," Stock prices of that day.")
print("The total asset in USD : ",USD_amount,"USD")
print("The total asset in JPY : ",JPY_amount,"JPY")
print("USDJPY rate at that day : ",(int(JPY_amount)/round(USD_amount,2)))
historical_assets_JPY.append(int(JPY_amount))
#print(type(Data_date))
Date_data = matplotlib.dates.date2num(Data_date)
plt.plot(Data_date,historical_assets_JPY)
plt.xlabel("Date")
plt.ylabel("Asset in JPY")
plt.tight_layout()
plt.title("Your Asset history in JPY")
plt.show() | import pickle_around
from matplotlib import pyplot as plt
import datetime
import matplotlib
if __name__ == "__main__":
data_loaded = pickle_around.load_object()
historical_assets_JPY = []
Data_date = []
for date in data_loaded:
print("The data of ",date)
extracted = data_loaded[date]
#print(type(extracted[0]))
USD_amount = extracted[1]
JPY_amount = extracted[2]
Data_date.append(datetime.datetime.strptime(date,"%Y%m%d"))
print(extracted[0]," Stock prices of that day.")
print("The total asset in USD : ",USD_amount,"USD")
print("The total asset in JPY : ",JPY_amount,"JPY")
print("USDJPY rate at that day : ",(int(JPY_amount)/round(USD_amount,2)))
historical_assets_JPY.append(int(JPY_amount))
#print(type(Data_date))
Date_data = matplotlib.dates.date2num(Data_date)
plt.plot(Data_date,historical_assets_JPY)
plt.xlabel("Date")
plt.ylabel("Asset in JPY")
plt.tight_layout()
plt.title("Your Asset history in JPY")
plt.show() | en | 0.242665 | #print(type(extracted[0])) #print(type(Data_date)) | 3.146996 | 3 |
food_ke/entailment/train.py | IBPA/FoodAtlas | 1 | 6621619 | # -*- coding: utf-8 -*-
"""Model training methods.
Authors:
<NAME> - <EMAIL>
<NAME> - <EMAIL>
Todo:
* Docstring
* Batch size for predictions arg.
* move early stopping code block as a callable function.
* scalable approach for setting class distribution. we can add a additional
column to indicate template type.
"""
import logging
import time
from copy import deepcopy
from typing import Optional
import click
import pandas as pd
import torch
import wandb
from imblearn.under_sampling import RandomUnderSampler
from transformers import (
AdamW,
AutoModelForSequenceClassification,
AutoModelWithHeads,
AutoTokenizer,
)
from food_ke.entailment.constants import WHOLE_PLANT_TOKEN
from food_ke.entailment.dataset import EntailmentDataset
logging.basicConfig(
format="[%(filename)s:%(lineno)s - %(funcName)20s() ] %(message)s",
level=logging.DEBUG,
)
if torch.cuda.is_available():
device = torch.device("cuda")
logging.info("using CUDA")
else:
device = torch.device("cpu")
logging.info("using CPU")
def set_class_distribution(
df: pd.DataFrame, ratio: dict, food_part_only: bool = True
) -> pd.DataFrame:
"""
Balances the class distribution in df by undersampling. Currenlty only supports df with single hypothesis template type.
Parameters
----------
df : pd.DataFrame
Data to balance.
ratio : dict
Dict with keys of class name and values of desired ratio in the output data.
food_part_only : bool
Whether the data contains only food part templates. Only supports true for now.
Returns
-------
pd.DataFrame
Data with balanced class distribution
"""
if not sum(ratio.values()) == 1:
raise ValueError
if not food_part_only:
raise NotImplementedError("Only supports food part templates for now")
# df = df.fillna({"food_part": WHOLE_PLANT_TOKEN})
# new_df_list = []
# X_part_conc = df[~df["food_part"].str.contains(WHOLE_PLANT_TOKEN)]
# X_part_conc = X_part_conc[~X_part_conc["conc_unit"].isna()]
# X_base = df[df["food_part"].str.contains(WHOLE_PLANT_TOKEN)]
# X_base = X_base[X_base["conc_unit"].isna()]
# X_conc = df.drop(list(X_part_conc.index) + list(X_base.index), axis=0)
# X_conc = X_conc[~X_conc["conc_unit"].isna()]
# X_part = df.drop(list(X_part_conc.index) + list(X_base.index), axis=0)
# X_part = X_part[X_part["conc_unit"].isna()]
# for i, X in enumerate([X_part_conc, X_base, X_conc, X_part]):
# # Skip if only contains one class.
# if class_distr.shape[0] < 2:
# print(f"Skipped {i}-th template since it only contains one class.")
# # continue
classes_zero = [k for k, v in ratio.items() if v == 0]
df = df[~df["gold_label"].isin(classes_zero)]
ratio = {k: v for k, v in ratio.items() if v > 0}
class_distr = df["gold_label"].value_counts()
mult_factor = class_distr.min()
argmin = class_distr.idxmin()
ratio_ints = {
k: int(v * mult_factor / ratio[argmin]) for k, v in ratio.items()
}
rus = RandomUnderSampler(random_state=42, sampling_strategy=ratio_ints)
X_res, y_res = rus.fit_resample(
df.drop(["gold_label"], axis=1), df["gold_label"]
)
return pd.concat(
[
pd.DataFrame(
X_res, columns=df.drop(["gold_label"], axis=1).columns
),
pd.DataFrame(y_res, columns=["gold_label"]),
],
axis=1,
)
def load_data(data, class_distribution: dict = None):
""" """
data["row_id"] = data.index
logging.info("original value counts")
logging.info(data.gold_label.value_counts())
if class_distribution:
data = set_class_distribution(data, class_distribution)
logging.info("resampled value counts")
logging.info(data.gold_label.value_counts())
return data
# def load_data(
# train_data_location: str, val_data_location: str, class_distribution: dict
# ):
# train_df = pd.read_csv(train_data_location, encoding="latin-1")
# eval_df = pd.read_csv(val_data_location, encoding="latin-1")
# train_df["row_id"] = train_df.index
# eval_df["row_id"] = eval_df.index
# print("train and eval original value counts")
# print(train_df.gold_label.value_counts())
# print(eval_df.gold_label.value_counts())
# print("\n\n")
# train_df_res = set_class_distribution(train_df, class_distribution)
# eval_df_res = set_class_distribution(eval_df, class_distribution)
# print("resampled value counts")
# print(train_df_res.gold_label.value_counts())
# print(eval_df_res.gold_label.value_counts())
# print("\n\n")
# if (
# not len(
# set(train_df_res.orig_idx).intersection(set(eval_df_res.orig_idx))
# )
# == 0
# ):
# raise ValueError(
# "train_df and eval_df have overlapping original example indices"
# )
# return train_df_res, eval_df_res
def load_model(
model_name,
tokenizer_name,
adapter_name=None,
optimizer_kwargs: dict = None,
):
if "mnli" in model_name and "biobert" in model_name:
model = AutoModelForSequenceClassification.from_pretrained(
model_name, num_labels=3
)
elif "mnli" in model_name:
model = AutoModelForSequenceClassification.from_pretrained(model_name)
else:
model = AutoModelWithHeads.from_pretrained(model_name)
pass
if adapter_name:
try:
adapter = model.load_adapter(adapter_name, source=None)
except Exception:
adapter = model.load_adapter(adapter_name, source="hf")
model.active_adapters = adapter
tokenizer = AutoTokenizer.from_pretrained(tokenizer_name)
if optimizer_kwargs is not None:
optimizer = AdamW(model.parameters(), **optimizer_kwargs)
else:
optimizer = AdamW(model.parameters())
model = model.to(device)
return model, tokenizer, optimizer
def multi_acc(y_pred, y_test):
acc = (
torch.log_softmax(y_pred, dim=1).argmax(dim=1) == y_test
).sum().float() / float(y_test.size(0))
return acc
def validate(val_loader, optimizer, model, flatten_neutral_contradicts):
model.eval()
total_val_acc = 0
total_val_loss = 0
total_predictions = []
with torch.no_grad():
for batch_idx, (
pair_token_ids,
mask_ids,
seg_ids,
y,
row_ids,
) in enumerate(val_loader):
if flatten_neutral_contradicts:
y[y == 2] = 1
optimizer.zero_grad()
pair_token_ids = pair_token_ids.to(device)
mask_ids = mask_ids.to(device)
seg_ids = seg_ids.to(device)
labels = y.to(device)
loss, predictions = model(
pair_token_ids, attention_mask=mask_ids, labels=labels
).values()
acc = multi_acc(predictions, labels)
prediction_scores = torch.softmax(predictions, dim=1)
prediction_labels = prediction_scores.argmax(dim=1)
total_predictions += list(
zip(
row_ids.tolist(),
prediction_labels.tolist(),
prediction_scores[:, 0].tolist(),
prediction_scores[:, 1].tolist(),
)
)
total_val_loss += loss.item()
total_val_acc += acc.item()
val_acc = total_val_acc / len(val_loader)
val_loss = total_val_loss / len(val_loader)
return val_loss, val_acc, total_predictions
def log(
train_loss=None,
train_acc=None,
val_loss=None,
val_acc=None,
epoch=None,
steps=None,
examples=None,
):
stepinfo = {"batch": steps, "epoch": epoch + 1, "example": examples}
if train_loss is not None:
wandb.log({**{"train_loss": train_loss}, **stepinfo})
if val_loss is not None:
wandb.log({**{"val_loss": val_loss}, **stepinfo})
if train_acc is not None:
wandb.log({**{"train_acc": train_acc}, **stepinfo})
if val_acc is not None:
wandb.log({**{"val_acc": val_acc}, **stepinfo})
def train(
model,
train_loader,
val_loader,
optimizer,
epochs: int,
validate_every_steps: Optional[int] = None,
validate_every_examples: Optional[int] = None,
early_stopping: bool = False,
patience: Optional[int] = None,
stopping_threshold: Optional[float] = None,
flatten_neutral_contradicts: bool = False,
device=device,
checkpoint_dir: str = None,
adapter_dir: str = None,
adapter_name: str = None,
adapter_checkpoint_name: str = None,
prediction_file: str = None,
):
"""TODO: Finish docstring.
Args:
validate_every_steps (int): validate after seeing this many
steps/batches
validate_every_examples (int): validate after seeing this many
examples
"""
if (
validate_every_steps is not None
and validate_every_examples is not None
):
raise ValueError(
"validate_every_examples and validate_every_steps are mutually "
"exclusive"
)
if early_stopping and (patience is None or stopping_threshold is None):
raise ValueError(
"patience and stopping_threshold must be provided if "
"early_stopping is True."
)
model = model.to(device)
best_model = deepcopy(model)
best_val_acc = 0
steps_since_val = 0
examples_since_val = 0
total_steps = 0
total_examples = 0
# Early stopping statistics.
if early_stopping:
is_early_stopping = False
count_patient = 0
val_loss_last = float("inf")
val_losses_early_stopping = []
for epoch in range(epochs):
start = time.time()
model.train()
total_train_loss = 0
total_train_acc = 0
for batch_idx, (pair_token_ids, mask_ids, seg_ids, y, _) in enumerate(
train_loader
):
if flatten_neutral_contradicts:
y[y == 2] = 1
optimizer.zero_grad()
pair_token_ids = pair_token_ids.to(device)
mask_ids = mask_ids.to(device)
seg_ids = seg_ids.to(device)
labels = y.to(device)
loss, prediction = model(
pair_token_ids, attention_mask=mask_ids, labels=labels
).values()
acc = multi_acc(prediction, labels)
loss.backward()
optimizer.step()
total_train_loss += loss.item()
total_train_acc += acc.item()
steps_since_val += 1
batch_size = len(pair_token_ids)
examples_since_val += batch_size
total_steps += 1
total_examples += batch_size
if validate_every_steps is not None:
if steps_since_val > validate_every_steps:
val_loss, val_acc, _ = validate(
val_loader,
optimizer,
model,
flatten_neutral_contradicts,
)
steps_since_val = examples_since_val = 0
log(
val_loss=val_loss,
val_acc=val_acc,
epoch=epoch,
steps=total_steps,
examples=total_examples,
)
if epoch > 0 and early_stopping:
if val_loss + stopping_threshold > val_loss_last:
if count_patient == 0:
val_losses_early_stopping.append(val_loss_last)
count_patient += 1
val_losses_early_stopping.append(val_loss)
if count_patient == patience:
is_early_stopping = True
print(
f"{patience} consecutive steps "
f"({validate_every_steps} batches) with "
f"less than {stopping_threshold} "
"improvement. Last consecutive losses: "
f"{val_losses_early_stopping}. "
f"Performing early stopping."
)
else:
count_patient = 0
val_losses_early_stopping = []
val_loss_last = val_loss
elif validate_every_examples is not None:
if examples_since_val > validate_every_examples:
val_loss, val_acc, _ = validate(
val_loader,
optimizer,
model,
flatten_neutral_contradicts,
)
steps_since_val = examples_since_val = 0
log(
val_loss=val_loss,
val_acc=val_acc,
epoch=epoch,
steps=total_steps,
examples=total_examples,
)
if epoch > 0 and early_stopping:
if val_loss + stopping_threshold > val_loss_last:
if count_patient == 0:
val_losses_early_stopping.append(val_loss_last)
count_patient += 1
val_losses_early_stopping.append(val_loss)
if count_patient == patience:
is_early_stopping = True
print(
f"{patience} consecutive steps "
f"({validate_every_examples} examples) "
f"with less than {stopping_threshold} "
"improvement. Last consecutive losses: "
f"{val_losses_early_stopping}. "
f"Performing early stopping."
)
else:
count_patient = 0
val_losses_early_stopping = []
val_loss_last = val_loss
if early_stopping and is_early_stopping:
break
# Use actual trained batch num if early stopping happened within
# steps/examples.
train_acc = total_train_acc / min(batch_idx + 1, len(train_loader))
train_loss = total_train_loss / min(batch_idx + 1, len(train_loader))
val_loss, val_acc, predictions = validate(
val_loader, optimizer, model, flatten_neutral_contradicts
)
# Apply early stopping to epochs by default.
if (
epoch > 0
and early_stopping
and (
validate_every_steps is None
and validate_every_examples is None
)
):
if val_loss + stopping_threshold > val_loss_last:
if count_patient == 0:
val_losses_early_stopping.append(val_loss_last)
count_patient += 1
val_losses_early_stopping.append(val_loss)
if count_patient == patience:
is_early_stopping = True
print(
f"{patience} consecutive epochs with less than "
f"{stopping_threshold} improvement. "
f"Last consecutive losses: "
f"{val_losses_early_stopping}. "
"Performing early stopping."
)
else:
count_patient = 0
val_losses_early_stopping = []
val_loss_last = val_loss
end = time.time()
hours, rem = divmod(end - start, 3600)
minutes, seconds = divmod(rem, 60)
print(
f"Epoch {epoch+1}: train_loss: {train_loss:.4f} "
f"train_acc: {train_acc:.4f} | val_loss: {val_loss:.4f} "
f"val_acc: {val_acc:.4f}"
)
print(
"{:0>2}:{:0>2}:{:05.2f}".format(int(hours), int(minutes), seconds)
)
log(train_loss, train_acc, val_loss, val_acc, epoch)
if val_acc > best_val_acc:
best_val_acc = val_acc
best_model = deepcopy(model)
if early_stopping and is_early_stopping:
break
if checkpoint_dir is not None:
print(f"Saving the best model to {checkpoint_dir}")
best_model.save_pretrained(checkpoint_dir)
if adapter_dir is not None and adapter_checkpoint_name is not None:
best_model.save_adapter(adapter_dir, adapter_checkpoint_name)
# Storing validation predictions.
if prediction_file is not None:
df_predictions = pd.DataFrame(
predictions, columns=["row_id", "label", "proba_0", "proba_1"]
)
df_predictions.to_csv(prediction_file, index=False)
def get_prediction(
model,
tokenizer,
premise,
hypothesis,
label,
flatten_neutral_contradicts: bool = False,
device=device,
):
ex_df = pd.DataFrame(
[
{
"sentence1": premise,
"sentence2": hypothesis,
"gold_label": label,
"row_id": 0,
}
]
)
ex_dataset = EntailmentDataset(train_df=ex_df, tokenizer=tokenizer)
ex_loader, _ = ex_dataset.get_data_loaders(batch_size=len(ex_df))
(pair_token_ids, mask_ids, seg_ids, y, _) = next(iter(ex_loader))
pair_token_ids = pair_token_ids.to(device)
mask_ids = mask_ids.to(device)
if flatten_neutral_contradicts:
y[y == 2] = 1
y = y.to(device)
loss, prediction = model(
pair_token_ids, attention_mask=mask_ids, labels=y
).values()
y_pred = torch.log_softmax(-prediction, dim=1).argmax(dim=1)
return ex_dataset.inv_label_dict[y_pred.item()]
def get_batch_predictions(
model,
tokenizer,
premises,
hypotheses,
labels=None,
flatten_neutral_contradicts: bool = False,
device=device,
return_probas=False,
):
ex_df = pd.DataFrame(
{
"sentence1": premises,
"sentence2": hypotheses,
"gold_label": labels,
"row_id": list(range(len(premises))),
}
)
ex_dataset = EntailmentDataset(train_df=ex_df, tokenizer=tokenizer)
ex_loader, _ = ex_dataset.get_data_loaders(batch_size=24, shuffle=False)
results_lst = []
for pair_token_ids, mask_ids, seg_ids, y, _ in iter(ex_loader):
if flatten_neutral_contradicts:
y[y == 2] = 1
pair_token_ids = pair_token_ids.to(device)
mask_ids = mask_ids.to(device)
y = y.to(device)
loss, prediction = model(
pair_token_ids, attention_mask=mask_ids, labels=y
).values()
if not return_probas:
y_pred = torch.log_softmax(-prediction, dim=1).argmax(dim=1)
results = pd.DataFrame(
[
tokenizer.batch_decode(pair_token_ids),
y.tolist(),
y_pred.tolist(),
]
).T
results.columns = ["input", "gold_label", "predicted"]
results_lst.append(results)
else:
probas = torch.softmax(prediction, dim=1)
results = pd.DataFrame(
[tokenizer.batch_decode(pair_token_ids), y.tolist()]
).T
results.columns = ["input", "gold_label"]
results[["proba_entails", "proba_not_entails"]] = (
probas.detach().cpu().numpy().round(3)
)
results_lst.append(results)
return pd.concat(results_lst, axis=0)
@click.command()
@click.option("--model_name")
@click.option(
"--train-data-location",
default="/root/food_ke/data/entailment_data/"
"entailment_train_augmented.csv",
)
@click.option(
"--val-data-location",
default="/root/food_ke/data/entailment_data/entailment_val.csv",
)
@click.option("--checkpoint-dir", default=None)
@click.option("--adapter-dir", default=None)
@click.option("--adapter-name", default=None)
@click.option("--adapter-checkpoint-name", default=None)
@click.option("--epochs", default=2, type=int)
@click.option("--early-stopping", default=False, type=bool)
@click.option("--validate-every-steps", default=None, type=int)
@click.option("--validate-every-examples", default=None, type=int)
@click.option("--patience", default=3, type=int)
@click.option("--stopping-threshold", default=1e-5, type=float)
@click.option("--prediction-file", default=None)
@click.option("--learning-rate", default=2e-5)
@click.option("--batch-size", default=24)
@click.option("--augmentation_strategies", default="all")
@click.option("--train_num_samples", default=25)
@click.option("--ratio-entailment", type=float, default=0.5)
@click.option("--ratio-neutral", type=float, default=0.5)
@click.option("--ratio-contradiction", type=float, default=0.0)
def main(
model_name: str,
train_data_location: str,
val_data_location: str,
epochs: int,
early_stopping: bool,
validate_every_steps: int,
validate_every_examples: int,
patience: int,
stopping_threshold: float,
prediction_file: str,
learning_rate: float,
batch_size: int,
augmentation_strategies: str,
train_num_samples: int,
ratio_entailment: float,
ratio_neutral: float,
ratio_contradiction: float,
adapter_name: str = None,
adapter_checkpoint_name: str = None,
checkpoint_dir: str = None,
adapter_dir: str = None,
):
# Check data class distribution ratios.
if ratio_entailment < 0 or ratio_neutral < 0 or ratio_contradiction < 0:
raise ValueError("Ratios must be non-negative.")
if ratio_entailment + ratio_neutral + ratio_contradiction != 1.0:
raise ValueError("Ratios must sum to 1.")
class_distribution = {
'entailment': ratio_entailment,
'neutral': ratio_neutral,
'contradiction': ratio_contradiction
}
train_df_res = load_data(
pd.read_csv(train_data_location, encoding="latin-1"),
class_distribution,
)
eval_df_res = load_data(pd.read_csv(val_data_location, encoding="latin-1"))
if (
not len(
set(train_df_res.orig_idx).intersection(set(eval_df_res.orig_idx))
)
== 0
):
raise ValueError(
"train_df and eval_df have overlapping original example indices"
)
wandb.init(
project="food_ke_entailment",
entity="food_ke",
config={
"model_name": model_name,
"adapter_name": adapter_name,
"epochs": epochs,
"learning_rate": learning_rate,
"batch_size": batch_size,
"augmentation_strategies": augmentation_strategies,
"num_samples_unaugmented": len(train_df_res.orig_idx.unique()),
"validate_every_steps": validate_every_steps,
"validate_every_examples": validate_every_examples,
},
)
model, tokenizer, optimizer = load_model(
model_name=model_name,
tokenizer_name=model_name,
adapter_name=adapter_name,
optimizer_kwargs={"lr": learning_rate, "correct_bias": True},
)
dataset = EntailmentDataset(
train_df=train_df_res, val_df=eval_df_res, tokenizer=tokenizer
)
train_loader, val_loader = dataset.get_data_loaders(batch_size=batch_size)
train(
model,
train_loader,
val_loader,
optimizer,
epochs=epochs,
flatten_neutral_contradicts=True,
checkpoint_dir=checkpoint_dir,
adapter_dir=adapter_dir,
adapter_name=adapter_name,
adapter_checkpoint_name=adapter_checkpoint_name,
early_stopping=early_stopping,
validate_every_steps=validate_every_steps,
validate_every_examples=validate_every_examples,
patience=patience,
stopping_threshold=stopping_threshold,
prediction_file=prediction_file,
)
if __name__ == "__main__":
main()
| # -*- coding: utf-8 -*-
"""Model training methods.
Authors:
<NAME> - <EMAIL>
<NAME> - <EMAIL>
Todo:
* Docstring
* Batch size for predictions arg.
* move early stopping code block as a callable function.
* scalable approach for setting class distribution. we can add a additional
column to indicate template type.
"""
import logging
import time
from copy import deepcopy
from typing import Optional
import click
import pandas as pd
import torch
import wandb
from imblearn.under_sampling import RandomUnderSampler
from transformers import (
AdamW,
AutoModelForSequenceClassification,
AutoModelWithHeads,
AutoTokenizer,
)
from food_ke.entailment.constants import WHOLE_PLANT_TOKEN
from food_ke.entailment.dataset import EntailmentDataset
logging.basicConfig(
format="[%(filename)s:%(lineno)s - %(funcName)20s() ] %(message)s",
level=logging.DEBUG,
)
if torch.cuda.is_available():
device = torch.device("cuda")
logging.info("using CUDA")
else:
device = torch.device("cpu")
logging.info("using CPU")
def set_class_distribution(
df: pd.DataFrame, ratio: dict, food_part_only: bool = True
) -> pd.DataFrame:
"""
Balances the class distribution in df by undersampling. Currenlty only supports df with single hypothesis template type.
Parameters
----------
df : pd.DataFrame
Data to balance.
ratio : dict
Dict with keys of class name and values of desired ratio in the output data.
food_part_only : bool
Whether the data contains only food part templates. Only supports true for now.
Returns
-------
pd.DataFrame
Data with balanced class distribution
"""
if not sum(ratio.values()) == 1:
raise ValueError
if not food_part_only:
raise NotImplementedError("Only supports food part templates for now")
# df = df.fillna({"food_part": WHOLE_PLANT_TOKEN})
# new_df_list = []
# X_part_conc = df[~df["food_part"].str.contains(WHOLE_PLANT_TOKEN)]
# X_part_conc = X_part_conc[~X_part_conc["conc_unit"].isna()]
# X_base = df[df["food_part"].str.contains(WHOLE_PLANT_TOKEN)]
# X_base = X_base[X_base["conc_unit"].isna()]
# X_conc = df.drop(list(X_part_conc.index) + list(X_base.index), axis=0)
# X_conc = X_conc[~X_conc["conc_unit"].isna()]
# X_part = df.drop(list(X_part_conc.index) + list(X_base.index), axis=0)
# X_part = X_part[X_part["conc_unit"].isna()]
# for i, X in enumerate([X_part_conc, X_base, X_conc, X_part]):
# # Skip if only contains one class.
# if class_distr.shape[0] < 2:
# print(f"Skipped {i}-th template since it only contains one class.")
# # continue
classes_zero = [k for k, v in ratio.items() if v == 0]
df = df[~df["gold_label"].isin(classes_zero)]
ratio = {k: v for k, v in ratio.items() if v > 0}
class_distr = df["gold_label"].value_counts()
mult_factor = class_distr.min()
argmin = class_distr.idxmin()
ratio_ints = {
k: int(v * mult_factor / ratio[argmin]) for k, v in ratio.items()
}
rus = RandomUnderSampler(random_state=42, sampling_strategy=ratio_ints)
X_res, y_res = rus.fit_resample(
df.drop(["gold_label"], axis=1), df["gold_label"]
)
return pd.concat(
[
pd.DataFrame(
X_res, columns=df.drop(["gold_label"], axis=1).columns
),
pd.DataFrame(y_res, columns=["gold_label"]),
],
axis=1,
)
def load_data(data, class_distribution: dict = None):
""" """
data["row_id"] = data.index
logging.info("original value counts")
logging.info(data.gold_label.value_counts())
if class_distribution:
data = set_class_distribution(data, class_distribution)
logging.info("resampled value counts")
logging.info(data.gold_label.value_counts())
return data
# def load_data(
# train_data_location: str, val_data_location: str, class_distribution: dict
# ):
# train_df = pd.read_csv(train_data_location, encoding="latin-1")
# eval_df = pd.read_csv(val_data_location, encoding="latin-1")
# train_df["row_id"] = train_df.index
# eval_df["row_id"] = eval_df.index
# print("train and eval original value counts")
# print(train_df.gold_label.value_counts())
# print(eval_df.gold_label.value_counts())
# print("\n\n")
# train_df_res = set_class_distribution(train_df, class_distribution)
# eval_df_res = set_class_distribution(eval_df, class_distribution)
# print("resampled value counts")
# print(train_df_res.gold_label.value_counts())
# print(eval_df_res.gold_label.value_counts())
# print("\n\n")
# if (
# not len(
# set(train_df_res.orig_idx).intersection(set(eval_df_res.orig_idx))
# )
# == 0
# ):
# raise ValueError(
# "train_df and eval_df have overlapping original example indices"
# )
# return train_df_res, eval_df_res
def load_model(
model_name,
tokenizer_name,
adapter_name=None,
optimizer_kwargs: dict = None,
):
if "mnli" in model_name and "biobert" in model_name:
model = AutoModelForSequenceClassification.from_pretrained(
model_name, num_labels=3
)
elif "mnli" in model_name:
model = AutoModelForSequenceClassification.from_pretrained(model_name)
else:
model = AutoModelWithHeads.from_pretrained(model_name)
pass
if adapter_name:
try:
adapter = model.load_adapter(adapter_name, source=None)
except Exception:
adapter = model.load_adapter(adapter_name, source="hf")
model.active_adapters = adapter
tokenizer = AutoTokenizer.from_pretrained(tokenizer_name)
if optimizer_kwargs is not None:
optimizer = AdamW(model.parameters(), **optimizer_kwargs)
else:
optimizer = AdamW(model.parameters())
model = model.to(device)
return model, tokenizer, optimizer
def multi_acc(y_pred, y_test):
acc = (
torch.log_softmax(y_pred, dim=1).argmax(dim=1) == y_test
).sum().float() / float(y_test.size(0))
return acc
def validate(val_loader, optimizer, model, flatten_neutral_contradicts):
model.eval()
total_val_acc = 0
total_val_loss = 0
total_predictions = []
with torch.no_grad():
for batch_idx, (
pair_token_ids,
mask_ids,
seg_ids,
y,
row_ids,
) in enumerate(val_loader):
if flatten_neutral_contradicts:
y[y == 2] = 1
optimizer.zero_grad()
pair_token_ids = pair_token_ids.to(device)
mask_ids = mask_ids.to(device)
seg_ids = seg_ids.to(device)
labels = y.to(device)
loss, predictions = model(
pair_token_ids, attention_mask=mask_ids, labels=labels
).values()
acc = multi_acc(predictions, labels)
prediction_scores = torch.softmax(predictions, dim=1)
prediction_labels = prediction_scores.argmax(dim=1)
total_predictions += list(
zip(
row_ids.tolist(),
prediction_labels.tolist(),
prediction_scores[:, 0].tolist(),
prediction_scores[:, 1].tolist(),
)
)
total_val_loss += loss.item()
total_val_acc += acc.item()
val_acc = total_val_acc / len(val_loader)
val_loss = total_val_loss / len(val_loader)
return val_loss, val_acc, total_predictions
def log(
train_loss=None,
train_acc=None,
val_loss=None,
val_acc=None,
epoch=None,
steps=None,
examples=None,
):
stepinfo = {"batch": steps, "epoch": epoch + 1, "example": examples}
if train_loss is not None:
wandb.log({**{"train_loss": train_loss}, **stepinfo})
if val_loss is not None:
wandb.log({**{"val_loss": val_loss}, **stepinfo})
if train_acc is not None:
wandb.log({**{"train_acc": train_acc}, **stepinfo})
if val_acc is not None:
wandb.log({**{"val_acc": val_acc}, **stepinfo})
def train(
model,
train_loader,
val_loader,
optimizer,
epochs: int,
validate_every_steps: Optional[int] = None,
validate_every_examples: Optional[int] = None,
early_stopping: bool = False,
patience: Optional[int] = None,
stopping_threshold: Optional[float] = None,
flatten_neutral_contradicts: bool = False,
device=device,
checkpoint_dir: str = None,
adapter_dir: str = None,
adapter_name: str = None,
adapter_checkpoint_name: str = None,
prediction_file: str = None,
):
"""TODO: Finish docstring.
Args:
validate_every_steps (int): validate after seeing this many
steps/batches
validate_every_examples (int): validate after seeing this many
examples
"""
if (
validate_every_steps is not None
and validate_every_examples is not None
):
raise ValueError(
"validate_every_examples and validate_every_steps are mutually "
"exclusive"
)
if early_stopping and (patience is None or stopping_threshold is None):
raise ValueError(
"patience and stopping_threshold must be provided if "
"early_stopping is True."
)
model = model.to(device)
best_model = deepcopy(model)
best_val_acc = 0
steps_since_val = 0
examples_since_val = 0
total_steps = 0
total_examples = 0
# Early stopping statistics.
if early_stopping:
is_early_stopping = False
count_patient = 0
val_loss_last = float("inf")
val_losses_early_stopping = []
for epoch in range(epochs):
start = time.time()
model.train()
total_train_loss = 0
total_train_acc = 0
for batch_idx, (pair_token_ids, mask_ids, seg_ids, y, _) in enumerate(
train_loader
):
if flatten_neutral_contradicts:
y[y == 2] = 1
optimizer.zero_grad()
pair_token_ids = pair_token_ids.to(device)
mask_ids = mask_ids.to(device)
seg_ids = seg_ids.to(device)
labels = y.to(device)
loss, prediction = model(
pair_token_ids, attention_mask=mask_ids, labels=labels
).values()
acc = multi_acc(prediction, labels)
loss.backward()
optimizer.step()
total_train_loss += loss.item()
total_train_acc += acc.item()
steps_since_val += 1
batch_size = len(pair_token_ids)
examples_since_val += batch_size
total_steps += 1
total_examples += batch_size
if validate_every_steps is not None:
if steps_since_val > validate_every_steps:
val_loss, val_acc, _ = validate(
val_loader,
optimizer,
model,
flatten_neutral_contradicts,
)
steps_since_val = examples_since_val = 0
log(
val_loss=val_loss,
val_acc=val_acc,
epoch=epoch,
steps=total_steps,
examples=total_examples,
)
if epoch > 0 and early_stopping:
if val_loss + stopping_threshold > val_loss_last:
if count_patient == 0:
val_losses_early_stopping.append(val_loss_last)
count_patient += 1
val_losses_early_stopping.append(val_loss)
if count_patient == patience:
is_early_stopping = True
print(
f"{patience} consecutive steps "
f"({validate_every_steps} batches) with "
f"less than {stopping_threshold} "
"improvement. Last consecutive losses: "
f"{val_losses_early_stopping}. "
f"Performing early stopping."
)
else:
count_patient = 0
val_losses_early_stopping = []
val_loss_last = val_loss
elif validate_every_examples is not None:
if examples_since_val > validate_every_examples:
val_loss, val_acc, _ = validate(
val_loader,
optimizer,
model,
flatten_neutral_contradicts,
)
steps_since_val = examples_since_val = 0
log(
val_loss=val_loss,
val_acc=val_acc,
epoch=epoch,
steps=total_steps,
examples=total_examples,
)
if epoch > 0 and early_stopping:
if val_loss + stopping_threshold > val_loss_last:
if count_patient == 0:
val_losses_early_stopping.append(val_loss_last)
count_patient += 1
val_losses_early_stopping.append(val_loss)
if count_patient == patience:
is_early_stopping = True
print(
f"{patience} consecutive steps "
f"({validate_every_examples} examples) "
f"with less than {stopping_threshold} "
"improvement. Last consecutive losses: "
f"{val_losses_early_stopping}. "
f"Performing early stopping."
)
else:
count_patient = 0
val_losses_early_stopping = []
val_loss_last = val_loss
if early_stopping and is_early_stopping:
break
# Use actual trained batch num if early stopping happened within
# steps/examples.
train_acc = total_train_acc / min(batch_idx + 1, len(train_loader))
train_loss = total_train_loss / min(batch_idx + 1, len(train_loader))
val_loss, val_acc, predictions = validate(
val_loader, optimizer, model, flatten_neutral_contradicts
)
# Apply early stopping to epochs by default.
if (
epoch > 0
and early_stopping
and (
validate_every_steps is None
and validate_every_examples is None
)
):
if val_loss + stopping_threshold > val_loss_last:
if count_patient == 0:
val_losses_early_stopping.append(val_loss_last)
count_patient += 1
val_losses_early_stopping.append(val_loss)
if count_patient == patience:
is_early_stopping = True
print(
f"{patience} consecutive epochs with less than "
f"{stopping_threshold} improvement. "
f"Last consecutive losses: "
f"{val_losses_early_stopping}. "
"Performing early stopping."
)
else:
count_patient = 0
val_losses_early_stopping = []
val_loss_last = val_loss
end = time.time()
hours, rem = divmod(end - start, 3600)
minutes, seconds = divmod(rem, 60)
print(
f"Epoch {epoch+1}: train_loss: {train_loss:.4f} "
f"train_acc: {train_acc:.4f} | val_loss: {val_loss:.4f} "
f"val_acc: {val_acc:.4f}"
)
print(
"{:0>2}:{:0>2}:{:05.2f}".format(int(hours), int(minutes), seconds)
)
log(train_loss, train_acc, val_loss, val_acc, epoch)
if val_acc > best_val_acc:
best_val_acc = val_acc
best_model = deepcopy(model)
if early_stopping and is_early_stopping:
break
if checkpoint_dir is not None:
print(f"Saving the best model to {checkpoint_dir}")
best_model.save_pretrained(checkpoint_dir)
if adapter_dir is not None and adapter_checkpoint_name is not None:
best_model.save_adapter(adapter_dir, adapter_checkpoint_name)
# Storing validation predictions.
if prediction_file is not None:
df_predictions = pd.DataFrame(
predictions, columns=["row_id", "label", "proba_0", "proba_1"]
)
df_predictions.to_csv(prediction_file, index=False)
def get_prediction(
model,
tokenizer,
premise,
hypothesis,
label,
flatten_neutral_contradicts: bool = False,
device=device,
):
ex_df = pd.DataFrame(
[
{
"sentence1": premise,
"sentence2": hypothesis,
"gold_label": label,
"row_id": 0,
}
]
)
ex_dataset = EntailmentDataset(train_df=ex_df, tokenizer=tokenizer)
ex_loader, _ = ex_dataset.get_data_loaders(batch_size=len(ex_df))
(pair_token_ids, mask_ids, seg_ids, y, _) = next(iter(ex_loader))
pair_token_ids = pair_token_ids.to(device)
mask_ids = mask_ids.to(device)
if flatten_neutral_contradicts:
y[y == 2] = 1
y = y.to(device)
loss, prediction = model(
pair_token_ids, attention_mask=mask_ids, labels=y
).values()
y_pred = torch.log_softmax(-prediction, dim=1).argmax(dim=1)
return ex_dataset.inv_label_dict[y_pred.item()]
def get_batch_predictions(
model,
tokenizer,
premises,
hypotheses,
labels=None,
flatten_neutral_contradicts: bool = False,
device=device,
return_probas=False,
):
ex_df = pd.DataFrame(
{
"sentence1": premises,
"sentence2": hypotheses,
"gold_label": labels,
"row_id": list(range(len(premises))),
}
)
ex_dataset = EntailmentDataset(train_df=ex_df, tokenizer=tokenizer)
ex_loader, _ = ex_dataset.get_data_loaders(batch_size=24, shuffle=False)
results_lst = []
for pair_token_ids, mask_ids, seg_ids, y, _ in iter(ex_loader):
if flatten_neutral_contradicts:
y[y == 2] = 1
pair_token_ids = pair_token_ids.to(device)
mask_ids = mask_ids.to(device)
y = y.to(device)
loss, prediction = model(
pair_token_ids, attention_mask=mask_ids, labels=y
).values()
if not return_probas:
y_pred = torch.log_softmax(-prediction, dim=1).argmax(dim=1)
results = pd.DataFrame(
[
tokenizer.batch_decode(pair_token_ids),
y.tolist(),
y_pred.tolist(),
]
).T
results.columns = ["input", "gold_label", "predicted"]
results_lst.append(results)
else:
probas = torch.softmax(prediction, dim=1)
results = pd.DataFrame(
[tokenizer.batch_decode(pair_token_ids), y.tolist()]
).T
results.columns = ["input", "gold_label"]
results[["proba_entails", "proba_not_entails"]] = (
probas.detach().cpu().numpy().round(3)
)
results_lst.append(results)
return pd.concat(results_lst, axis=0)
@click.command()
@click.option("--model_name")
@click.option(
"--train-data-location",
default="/root/food_ke/data/entailment_data/"
"entailment_train_augmented.csv",
)
@click.option(
"--val-data-location",
default="/root/food_ke/data/entailment_data/entailment_val.csv",
)
@click.option("--checkpoint-dir", default=None)
@click.option("--adapter-dir", default=None)
@click.option("--adapter-name", default=None)
@click.option("--adapter-checkpoint-name", default=None)
@click.option("--epochs", default=2, type=int)
@click.option("--early-stopping", default=False, type=bool)
@click.option("--validate-every-steps", default=None, type=int)
@click.option("--validate-every-examples", default=None, type=int)
@click.option("--patience", default=3, type=int)
@click.option("--stopping-threshold", default=1e-5, type=float)
@click.option("--prediction-file", default=None)
@click.option("--learning-rate", default=2e-5)
@click.option("--batch-size", default=24)
@click.option("--augmentation_strategies", default="all")
@click.option("--train_num_samples", default=25)
@click.option("--ratio-entailment", type=float, default=0.5)
@click.option("--ratio-neutral", type=float, default=0.5)
@click.option("--ratio-contradiction", type=float, default=0.0)
def main(
model_name: str,
train_data_location: str,
val_data_location: str,
epochs: int,
early_stopping: bool,
validate_every_steps: int,
validate_every_examples: int,
patience: int,
stopping_threshold: float,
prediction_file: str,
learning_rate: float,
batch_size: int,
augmentation_strategies: str,
train_num_samples: int,
ratio_entailment: float,
ratio_neutral: float,
ratio_contradiction: float,
adapter_name: str = None,
adapter_checkpoint_name: str = None,
checkpoint_dir: str = None,
adapter_dir: str = None,
):
# Check data class distribution ratios.
if ratio_entailment < 0 or ratio_neutral < 0 or ratio_contradiction < 0:
raise ValueError("Ratios must be non-negative.")
if ratio_entailment + ratio_neutral + ratio_contradiction != 1.0:
raise ValueError("Ratios must sum to 1.")
class_distribution = {
'entailment': ratio_entailment,
'neutral': ratio_neutral,
'contradiction': ratio_contradiction
}
train_df_res = load_data(
pd.read_csv(train_data_location, encoding="latin-1"),
class_distribution,
)
eval_df_res = load_data(pd.read_csv(val_data_location, encoding="latin-1"))
if (
not len(
set(train_df_res.orig_idx).intersection(set(eval_df_res.orig_idx))
)
== 0
):
raise ValueError(
"train_df and eval_df have overlapping original example indices"
)
wandb.init(
project="food_ke_entailment",
entity="food_ke",
config={
"model_name": model_name,
"adapter_name": adapter_name,
"epochs": epochs,
"learning_rate": learning_rate,
"batch_size": batch_size,
"augmentation_strategies": augmentation_strategies,
"num_samples_unaugmented": len(train_df_res.orig_idx.unique()),
"validate_every_steps": validate_every_steps,
"validate_every_examples": validate_every_examples,
},
)
model, tokenizer, optimizer = load_model(
model_name=model_name,
tokenizer_name=model_name,
adapter_name=adapter_name,
optimizer_kwargs={"lr": learning_rate, "correct_bias": True},
)
dataset = EntailmentDataset(
train_df=train_df_res, val_df=eval_df_res, tokenizer=tokenizer
)
train_loader, val_loader = dataset.get_data_loaders(batch_size=batch_size)
train(
model,
train_loader,
val_loader,
optimizer,
epochs=epochs,
flatten_neutral_contradicts=True,
checkpoint_dir=checkpoint_dir,
adapter_dir=adapter_dir,
adapter_name=adapter_name,
adapter_checkpoint_name=adapter_checkpoint_name,
early_stopping=early_stopping,
validate_every_steps=validate_every_steps,
validate_every_examples=validate_every_examples,
patience=patience,
stopping_threshold=stopping_threshold,
prediction_file=prediction_file,
)
if __name__ == "__main__":
main()
| en | 0.58877 | # -*- coding: utf-8 -*- Model training methods. Authors: <NAME> - <EMAIL> <NAME> - <EMAIL> Todo: * Docstring * Batch size for predictions arg. * move early stopping code block as a callable function. * scalable approach for setting class distribution. we can add a additional column to indicate template type. Balances the class distribution in df by undersampling. Currenlty only supports df with single hypothesis template type. Parameters ---------- df : pd.DataFrame Data to balance. ratio : dict Dict with keys of class name and values of desired ratio in the output data. food_part_only : bool Whether the data contains only food part templates. Only supports true for now. Returns ------- pd.DataFrame Data with balanced class distribution # df = df.fillna({"food_part": WHOLE_PLANT_TOKEN}) # new_df_list = [] # X_part_conc = df[~df["food_part"].str.contains(WHOLE_PLANT_TOKEN)] # X_part_conc = X_part_conc[~X_part_conc["conc_unit"].isna()] # X_base = df[df["food_part"].str.contains(WHOLE_PLANT_TOKEN)] # X_base = X_base[X_base["conc_unit"].isna()] # X_conc = df.drop(list(X_part_conc.index) + list(X_base.index), axis=0) # X_conc = X_conc[~X_conc["conc_unit"].isna()] # X_part = df.drop(list(X_part_conc.index) + list(X_base.index), axis=0) # X_part = X_part[X_part["conc_unit"].isna()] # for i, X in enumerate([X_part_conc, X_base, X_conc, X_part]): # # Skip if only contains one class. # if class_distr.shape[0] < 2: # print(f"Skipped {i}-th template since it only contains one class.") # # continue # def load_data( # train_data_location: str, val_data_location: str, class_distribution: dict # ): # train_df = pd.read_csv(train_data_location, encoding="latin-1") # eval_df = pd.read_csv(val_data_location, encoding="latin-1") # train_df["row_id"] = train_df.index # eval_df["row_id"] = eval_df.index # print("train and eval original value counts") # print(train_df.gold_label.value_counts()) # print(eval_df.gold_label.value_counts()) # print("\n\n") # train_df_res = set_class_distribution(train_df, class_distribution) # eval_df_res = set_class_distribution(eval_df, class_distribution) # print("resampled value counts") # print(train_df_res.gold_label.value_counts()) # print(eval_df_res.gold_label.value_counts()) # print("\n\n") # if ( # not len( # set(train_df_res.orig_idx).intersection(set(eval_df_res.orig_idx)) # ) # == 0 # ): # raise ValueError( # "train_df and eval_df have overlapping original example indices" # ) # return train_df_res, eval_df_res TODO: Finish docstring. Args: validate_every_steps (int): validate after seeing this many steps/batches validate_every_examples (int): validate after seeing this many examples # Early stopping statistics. # Use actual trained batch num if early stopping happened within # steps/examples. # Apply early stopping to epochs by default. # Storing validation predictions. # Check data class distribution ratios. | 2.426257 | 2 |
6homework.py | Dendzz/Hilel | 0 | 6621620 | txt_first = "Hi"
big = txt_first.capitalize()
print(type(big))
print(big)
txt_second = "HeLLo, AnD WeLcome To My World! 123 "
low = txt_second.casefold()
print(low)
txt_third = "banana"
centre = txt_third.center(20)
print(centre)
txt_fourth = "I love apples, apple are my favorite fruit,аррle"
count = txt_fourth.count("apple")
print(count)
txt_fifth = "My name is Ståleпв"
decode = txt_fifth.encode()
print(decode)
txt_sixth = "Hello, welcome to my world...."
bool_form = txt_sixth.endswith(".....")
print(bool_form)
txt_seventh = "H\td\tdd\tsdd\ts\tldd\tddod"
space_bars = txt_seventh.expandtabs(3)
print(space_bars)
txt_eith = "Hello, welcomew to my world."
searcher = txt_eith.find("w")
print(searcher)
txt_neith = "For only {price:.2f} dollars! Only at {day:.2f} day"
print(txt_neith.format(price = 49, day = 2))
txt_ten = "Hello, welcome to my world."
also_searcher = txt_ten.index("w")
print(also_searcher)
txt_eleven = "Company12п."
alphanumeric_test = txt_eleven.isalnum()
print(alphanumeric_test)
txt_twelve = "CompanyX2"
letters_checker = txt_twelve.isalpha()
print(letters_checker)
txt_thierteen= "\u0211"
unicode_checker = txt_thierteen.isdecimal()
print(unicode_checker)
txt_fourteen = "5d0800"
number_test = txt_fourteen.isdigit()
print(number_test)
txt_fifeteen = "5d0800"
identify_cheacker = txt_fifeteen.isidentifier()
print(identify_cheacker)
txt_sixteen = "hello World!"
lower_checker = txt_sixteen.islower()
print(lower_checker)
txt_seventeen = "5655s43"
numeric_test_second = txt_seventeen.isnumeric()
print(numeric_test_second)
txt_eitheen = "Hello! Are you #1аыва.ю.э=-0фыё~!@?"
printable_test = txt_eitheen.isprintable()
print(printable_test)
txt_nineteen = " s "
space_bars_checker = txt_nineteen.isspace()
print(space_bars_checker)
txt_twenty = "hello, And Welcome To My World!"
start_every_word_big = txt_twenty.istitle()
print(start_every_word_big)
txt_twenty_one = "ThIS IS NOW!"
letters_capital_checker = txt_twenty_one.isupper()
print(letters_capital_checker)
myTuple = ("John", ' ', "Peter", "Vicky")
fill_spaces = "/".join(myTuple)
print(fill_spaces)
txt_twenty_two = "banana"
words_longer = txt_twenty_two.ljust(-20)
print(words_longer, "is my favorite fruit.")
txt_twenty_three = "Hello my FRIENDS"
do_word_lower = txt_twenty_three.lower()
print(do_word_lower)
txt_twenty_four = " banana "
remove_space_right_sides = txt_twenty_four.lstrip()
print("of all fruits", remove_space_right_sides, "is my favorite")
txt_twenty_five = "Hello Sam!"
replace_letter = txt_twenty_five.maketrans("e", "g")
print(txt_twenty_five.translate(replace_letter))
txt_twenty_six = "I could eat bananas all day"
divides_words = txt_twenty_six.partition("bananas")
print(divides_words)
txt_twenty_seven = "I like bananas"
replace_words = txt_twenty_seven.replace("bananas", "bananas")
print(replace_words)
txt_twenty_eith = "Mi casa, su casa."
last_word_searcher = txt_twenty_eith.rfind("asa")
print(last_word_searcher)
txt_twenty_nine = "Mi casa, su casa."
same_like_last = txt_twenty_nine.rindex("asa")
print(same_like_last)
txt_thirty = "bananas"
make_words_longer_left = txt_thirty.rjust(20)
print(make_words_longer_left, "is my favorite fruit.")
txt_thirty_one = "I cousssssld eat bananas all day, bananas are my favorssssite fruit"
again_divides_words = txt_thirty_one.rpartition("bananas")
print(again_divides_words)
txt_thirty_two = "apple, banana, cherry"
split = txt_thirty_two.rsplit(", ")
print(split)
txt_thirty_three = " s banana s "
remove_space_left_sides = txt_thirty_three.rstrip()
print("of all fruits", remove_space_left_sides, "is my favorite")
txt_thirty_four = "welcome s s to the jungle"
split_each_word = txt_thirty_four.split()
print(split_each_word)
txt_thirty_five = "Thank you \nfor the music\nWelcome to the\n jungle"
split_words_with_slashs = txt_thirty_five.splitlines()
print(split_words_with_slashs)
txt_thirty_six = "Hello, welcome to my world."
first_word_checker = txt_thirty_six.startswith("Hells")
print(first_word_checker)
txt_thirty_seven = " s banana s "
remove_space_both_sides = txt_thirty_seven.strip()
print("of all fruits", remove_space_both_sides, "is my favorite")
txt_thirty_eight = "Hello My Name Is PETER"
swap_capital_lower_letters = txt_thirty_eight.swapcase()
print(swap_capital_lower_letters)
txt_thirty_nine = "Welcome to my world"
make_first_letter_of_words_big = txt_thirty_nine.title()
print(make_first_letter_of_words_big)
replace_letter_ascii_code = {72: 48}
txt_fourtee = "Hello Sam!"
print(txt_fourtee.translate(replace_letter_ascii_code))
txt_fourtee_one = "HelGGGlo my friends"
every_letter_upper = txt_fourtee_one.upper()
print(every_letter_upper)
txt_fourtee_two = "50"
fill_with_zero = txt_fourtee_two.zfill(10)
print(x) | txt_first = "Hi"
big = txt_first.capitalize()
print(type(big))
print(big)
txt_second = "HeLLo, AnD WeLcome To My World! 123 "
low = txt_second.casefold()
print(low)
txt_third = "banana"
centre = txt_third.center(20)
print(centre)
txt_fourth = "I love apples, apple are my favorite fruit,аррle"
count = txt_fourth.count("apple")
print(count)
txt_fifth = "My name is Ståleпв"
decode = txt_fifth.encode()
print(decode)
txt_sixth = "Hello, welcome to my world...."
bool_form = txt_sixth.endswith(".....")
print(bool_form)
txt_seventh = "H\td\tdd\tsdd\ts\tldd\tddod"
space_bars = txt_seventh.expandtabs(3)
print(space_bars)
txt_eith = "Hello, welcomew to my world."
searcher = txt_eith.find("w")
print(searcher)
txt_neith = "For only {price:.2f} dollars! Only at {day:.2f} day"
print(txt_neith.format(price = 49, day = 2))
txt_ten = "Hello, welcome to my world."
also_searcher = txt_ten.index("w")
print(also_searcher)
txt_eleven = "Company12п."
alphanumeric_test = txt_eleven.isalnum()
print(alphanumeric_test)
txt_twelve = "CompanyX2"
letters_checker = txt_twelve.isalpha()
print(letters_checker)
txt_thierteen= "\u0211"
unicode_checker = txt_thierteen.isdecimal()
print(unicode_checker)
txt_fourteen = "5d0800"
number_test = txt_fourteen.isdigit()
print(number_test)
txt_fifeteen = "5d0800"
identify_cheacker = txt_fifeteen.isidentifier()
print(identify_cheacker)
txt_sixteen = "hello World!"
lower_checker = txt_sixteen.islower()
print(lower_checker)
txt_seventeen = "5655s43"
numeric_test_second = txt_seventeen.isnumeric()
print(numeric_test_second)
txt_eitheen = "Hello! Are you #1аыва.ю.э=-0фыё~!@?"
printable_test = txt_eitheen.isprintable()
print(printable_test)
txt_nineteen = " s "
space_bars_checker = txt_nineteen.isspace()
print(space_bars_checker)
txt_twenty = "hello, And Welcome To My World!"
start_every_word_big = txt_twenty.istitle()
print(start_every_word_big)
txt_twenty_one = "ThIS IS NOW!"
letters_capital_checker = txt_twenty_one.isupper()
print(letters_capital_checker)
myTuple = ("John", ' ', "Peter", "Vicky")
fill_spaces = "/".join(myTuple)
print(fill_spaces)
txt_twenty_two = "banana"
words_longer = txt_twenty_two.ljust(-20)
print(words_longer, "is my favorite fruit.")
txt_twenty_three = "Hello my FRIENDS"
do_word_lower = txt_twenty_three.lower()
print(do_word_lower)
txt_twenty_four = " banana "
remove_space_right_sides = txt_twenty_four.lstrip()
print("of all fruits", remove_space_right_sides, "is my favorite")
txt_twenty_five = "Hello Sam!"
replace_letter = txt_twenty_five.maketrans("e", "g")
print(txt_twenty_five.translate(replace_letter))
txt_twenty_six = "I could eat bananas all day"
divides_words = txt_twenty_six.partition("bananas")
print(divides_words)
txt_twenty_seven = "I like bananas"
replace_words = txt_twenty_seven.replace("bananas", "bananas")
print(replace_words)
txt_twenty_eith = "Mi casa, su casa."
last_word_searcher = txt_twenty_eith.rfind("asa")
print(last_word_searcher)
txt_twenty_nine = "Mi casa, su casa."
same_like_last = txt_twenty_nine.rindex("asa")
print(same_like_last)
txt_thirty = "bananas"
make_words_longer_left = txt_thirty.rjust(20)
print(make_words_longer_left, "is my favorite fruit.")
txt_thirty_one = "I cousssssld eat bananas all day, bananas are my favorssssite fruit"
again_divides_words = txt_thirty_one.rpartition("bananas")
print(again_divides_words)
txt_thirty_two = "apple, banana, cherry"
split = txt_thirty_two.rsplit(", ")
print(split)
txt_thirty_three = " s banana s "
remove_space_left_sides = txt_thirty_three.rstrip()
print("of all fruits", remove_space_left_sides, "is my favorite")
txt_thirty_four = "welcome s s to the jungle"
split_each_word = txt_thirty_four.split()
print(split_each_word)
txt_thirty_five = "Thank you \nfor the music\nWelcome to the\n jungle"
split_words_with_slashs = txt_thirty_five.splitlines()
print(split_words_with_slashs)
txt_thirty_six = "Hello, welcome to my world."
first_word_checker = txt_thirty_six.startswith("Hells")
print(first_word_checker)
txt_thirty_seven = " s banana s "
remove_space_both_sides = txt_thirty_seven.strip()
print("of all fruits", remove_space_both_sides, "is my favorite")
txt_thirty_eight = "Hello My Name Is PETER"
swap_capital_lower_letters = txt_thirty_eight.swapcase()
print(swap_capital_lower_letters)
txt_thirty_nine = "Welcome to my world"
make_first_letter_of_words_big = txt_thirty_nine.title()
print(make_first_letter_of_words_big)
replace_letter_ascii_code = {72: 48}
txt_fourtee = "Hello Sam!"
print(txt_fourtee.translate(replace_letter_ascii_code))
txt_fourtee_one = "HelGGGlo my friends"
every_letter_upper = txt_fourtee_one.upper()
print(every_letter_upper)
txt_fourtee_two = "50"
fill_with_zero = txt_fourtee_two.zfill(10)
print(x) | en | 0.526464 | #1аыва.ю.э=-0фыё~!@?" | 3.373992 | 3 |
Python/06 - Itertools/itertools.permutations().py | sohammanjrekar/HackerRank | 0 | 6621621 | """
Problem: https://www.hackerrank.com/challenges/itertools-permutations/problem
Author: <NAME>
"""
import itertools
S = list(map(str, input().split()))
string1 = sorted(S[0])
number = int(S[1])
print(*list(map("".join, itertools.permutations(string1,number))), sep="\n") | """
Problem: https://www.hackerrank.com/challenges/itertools-permutations/problem
Author: <NAME>
"""
import itertools
S = list(map(str, input().split()))
string1 = sorted(S[0])
number = int(S[1])
print(*list(map("".join, itertools.permutations(string1,number))), sep="\n") | en | 0.671099 | Problem: https://www.hackerrank.com/challenges/itertools-permutations/problem
Author: <NAME> | 3.703074 | 4 |
legacy_python/raw_svg/render_cairo.py | smrfeld/convolution-calculator | 0 | 6621622 | from typing import List, Dict
from .path import *
def render_cairo(context, ids_to_paths: Dict[str,List[Path]]):
# Draw paths
for paths in ids_to_paths.values():
for path in paths:
# Draw
context.move_to(path.pts[0][0], path.pts[0][1])
for ipt in range(1,len(path.pts)):
context.line_to(path.pts[ipt][0], path.pts[ipt][1])
# Stroke and fill
if path.fill:
context.set_source_rgba(path.fill_col[0],path.fill_col[1],path.fill_col[2],path.fill_col[3])
if path.stroke:
context.fill_preserve()
else:
context.fill()
if path.stroke:
context.set_line_width(path.line_width)
context.set_source_rgba(path.line_col[0],path.line_col[1],path.line_col[2],path.line_col[3])
context.stroke()
| from typing import List, Dict
from .path import *
def render_cairo(context, ids_to_paths: Dict[str,List[Path]]):
# Draw paths
for paths in ids_to_paths.values():
for path in paths:
# Draw
context.move_to(path.pts[0][0], path.pts[0][1])
for ipt in range(1,len(path.pts)):
context.line_to(path.pts[ipt][0], path.pts[ipt][1])
# Stroke and fill
if path.fill:
context.set_source_rgba(path.fill_col[0],path.fill_col[1],path.fill_col[2],path.fill_col[3])
if path.stroke:
context.fill_preserve()
else:
context.fill()
if path.stroke:
context.set_line_width(path.line_width)
context.set_source_rgba(path.line_col[0],path.line_col[1],path.line_col[2],path.line_col[3])
context.stroke()
| en | 0.826408 | # Draw paths # Draw # Stroke and fill | 2.550251 | 3 |
Scripts/generate_grid.py | Analytics-for-a-Better-World/GPBP_Analytics_Tools | 1 | 6621623 | <gh_stars>1-10
def generate_grid_in_polygon(spacing, polygon):
import numpy as np
from shapely.geometry import Point,Polygon
from shapely.ops import cascaded_union
import geopandas as gpd
''' This Function generates evenly spaced points within the given GeoDataFrame.
The parameter 'spacing' defines the distance between the points in coordinate units. '''
# Convert the GeoDataFrame to a single polygon
poly_in = cascaded_union([poly for poly in polygon.geometry])
# Get the bounds of the polygon
minx, miny, maxx, maxy = poly_in.bounds
# Square around the country with the min, max polygon bounds
# Now generate the entire grid
x_coords = list(np.arange(np.floor(minx), int(np.ceil(maxx)), spacing))
y_coords = list(np.arange(np.floor(miny), int(np.ceil(maxy)), spacing))
grid = [Point(x) for x in zip(np.meshgrid(x_coords, y_coords)[0].flatten(), np.meshgrid(x_coords, y_coords)[1].flatten())]
grid_df = gpd.GeoDataFrame(grid)
grid_df.columns = ['geometry']
grid_df = grid_df.set_crs(epsg=3763)
extracted_grid = gpd.clip(grid_df, polygon)
extracted_grid1 = extracted_grid.to_crs(epsg=4326)
return (extracted_grid1) | def generate_grid_in_polygon(spacing, polygon):
import numpy as np
from shapely.geometry import Point,Polygon
from shapely.ops import cascaded_union
import geopandas as gpd
''' This Function generates evenly spaced points within the given GeoDataFrame.
The parameter 'spacing' defines the distance between the points in coordinate units. '''
# Convert the GeoDataFrame to a single polygon
poly_in = cascaded_union([poly for poly in polygon.geometry])
# Get the bounds of the polygon
minx, miny, maxx, maxy = poly_in.bounds
# Square around the country with the min, max polygon bounds
# Now generate the entire grid
x_coords = list(np.arange(np.floor(minx), int(np.ceil(maxx)), spacing))
y_coords = list(np.arange(np.floor(miny), int(np.ceil(maxy)), spacing))
grid = [Point(x) for x in zip(np.meshgrid(x_coords, y_coords)[0].flatten(), np.meshgrid(x_coords, y_coords)[1].flatten())]
grid_df = gpd.GeoDataFrame(grid)
grid_df.columns = ['geometry']
grid_df = grid_df.set_crs(epsg=3763)
extracted_grid = gpd.clip(grid_df, polygon)
extracted_grid1 = extracted_grid.to_crs(epsg=4326)
return (extracted_grid1) | en | 0.702639 | This Function generates evenly spaced points within the given GeoDataFrame. The parameter 'spacing' defines the distance between the points in coordinate units. # Convert the GeoDataFrame to a single polygon # Get the bounds of the polygon # Square around the country with the min, max polygon bounds # Now generate the entire grid | 3.324155 | 3 |
XdaPy/api/google.py | CyboLabs/XdaPy | 2 | 6621624 | <filename>XdaPy/api/google.py
# Copyright 2015 cybojenix <<EMAIL>>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Based on https://developers.google.com/accounts/docs/OAuth2ForDevices
from ..base import XdaBase
from ..decorators import check_session
class Google(XdaBase):
"""Handles all the requests for getting Google access token
to use this, you must first create a project at the developer
console (https://console.developers.google.com).
go to "APIs & auth", and disable all APIs. then go to
credentials, click "Create new Client ID", and select
"Installed application" -> "Other".
your Client ID and Client Secret can then be added to the session
by doing `x.session.google_session.set_client("id", "secret")`.
"""
def __init__(self, xda):
super(Google, self).__init__(xda)
self.host = "accounts.google.com"
self.scope = "email profile"
self.session = self.xda.session.google_session
@check_session(["client_id"])
def get_user_code(self):
"""Get required data for getting a token
gets a JSON object with device/user codes, the verification
url, and the interval for polling times.
See Also:
https://developers.google.com/accounts/docs/OAuth2ForDevices#obtainingacode
"""
method = "POST"
url = "/o/oauth2/device/code"
body = {"client_id": self.session.client_id,
"scope": self.scope}
return self.xda.requests.basic_enc_request(
method, url, body=body, host=self.host)
@check_session(["client_id", "client_secret"])
def get_tokens(self, device_code):
"""get access and refresh tokens
gets a JSON object with the access/refresh tokens.
See Also:
https://developers.google.com/accounts/docs/OAuth2ForDevices#obtainingatoken
"""
method = "POST"
url = "/o/oauth2/token"
body = {"client_id": self.session.client_id,
"client_secret": self.session.client_secret,
"code": device_code,
"grant_type": "http://oauth.net/grant_type/device/1.0"}
return self.xda.requests.basic_enc_request(
method, url, body=body, host=self.host)
@check_session(["client_id", "client_secret", "refresh_token"])
def refresh_tokens(self):
"""refresh the access token
gets a JSON object with the access token in.
See Also:
https://developers.google.com/accounts/docs/OAuth2ForDevices#refreshtoken
"""
method = "POST"
url = "/o/oauth2/token"
body = {"client_id": self.session.client_id,
"client_secret": self.session.client_secret,
"refresh_token": self.session.refresh_token,
"grant_type": "refresh_token"}
return self.xda.requests.basic_enc_request(
method, url, body=body, host=self.host)
| <filename>XdaPy/api/google.py
# Copyright 2015 cybojenix <<EMAIL>>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Based on https://developers.google.com/accounts/docs/OAuth2ForDevices
from ..base import XdaBase
from ..decorators import check_session
class Google(XdaBase):
"""Handles all the requests for getting Google access token
to use this, you must first create a project at the developer
console (https://console.developers.google.com).
go to "APIs & auth", and disable all APIs. then go to
credentials, click "Create new Client ID", and select
"Installed application" -> "Other".
your Client ID and Client Secret can then be added to the session
by doing `x.session.google_session.set_client("id", "secret")`.
"""
def __init__(self, xda):
super(Google, self).__init__(xda)
self.host = "accounts.google.com"
self.scope = "email profile"
self.session = self.xda.session.google_session
@check_session(["client_id"])
def get_user_code(self):
"""Get required data for getting a token
gets a JSON object with device/user codes, the verification
url, and the interval for polling times.
See Also:
https://developers.google.com/accounts/docs/OAuth2ForDevices#obtainingacode
"""
method = "POST"
url = "/o/oauth2/device/code"
body = {"client_id": self.session.client_id,
"scope": self.scope}
return self.xda.requests.basic_enc_request(
method, url, body=body, host=self.host)
@check_session(["client_id", "client_secret"])
def get_tokens(self, device_code):
"""get access and refresh tokens
gets a JSON object with the access/refresh tokens.
See Also:
https://developers.google.com/accounts/docs/OAuth2ForDevices#obtainingatoken
"""
method = "POST"
url = "/o/oauth2/token"
body = {"client_id": self.session.client_id,
"client_secret": self.session.client_secret,
"code": device_code,
"grant_type": "http://oauth.net/grant_type/device/1.0"}
return self.xda.requests.basic_enc_request(
method, url, body=body, host=self.host)
@check_session(["client_id", "client_secret", "refresh_token"])
def refresh_tokens(self):
"""refresh the access token
gets a JSON object with the access token in.
See Also:
https://developers.google.com/accounts/docs/OAuth2ForDevices#refreshtoken
"""
method = "POST"
url = "/o/oauth2/token"
body = {"client_id": self.session.client_id,
"client_secret": self.session.client_secret,
"refresh_token": self.session.refresh_token,
"grant_type": "refresh_token"}
return self.xda.requests.basic_enc_request(
method, url, body=body, host=self.host)
| en | 0.790426 | # Copyright 2015 cybojenix <<EMAIL>> # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # Based on https://developers.google.com/accounts/docs/OAuth2ForDevices Handles all the requests for getting Google access token to use this, you must first create a project at the developer console (https://console.developers.google.com). go to "APIs & auth", and disable all APIs. then go to credentials, click "Create new Client ID", and select "Installed application" -> "Other". your Client ID and Client Secret can then be added to the session by doing `x.session.google_session.set_client("id", "secret")`. Get required data for getting a token gets a JSON object with device/user codes, the verification url, and the interval for polling times. See Also: https://developers.google.com/accounts/docs/OAuth2ForDevices#obtainingacode get access and refresh tokens gets a JSON object with the access/refresh tokens. See Also: https://developers.google.com/accounts/docs/OAuth2ForDevices#obtainingatoken refresh the access token gets a JSON object with the access token in. See Also: https://developers.google.com/accounts/docs/OAuth2ForDevices#refreshtoken | 2.80469 | 3 |
Python/URI 1012.py | carvalhopedro22/Programacao-URI-Online-Judge | 0 | 6621625 | <filename>Python/URI 1012.py
linha = input().split(" ")
A,B,C = linha
# ou A,B,C = [float(x) for x in input().split()]
tret = (float(A) * float(C))/2.0
circ = 3.14159 * (float(C) * float(C))
trap = ((float(A) + float(B)) * float(C))/2.0
quad = float(B) * float(B)
ret = float(A) * float(B)
print("TRIANGULO: {:.3f}".format(tret))
print("CIRCULO: {:.3f}".format(circ))
print("TRAPEZIO: {:.3f}".format(trap))
print("QUADRADO: {:.3f}".format(quad))
print("RETANGULO: {:.3f}".format(ret)) | <filename>Python/URI 1012.py
linha = input().split(" ")
A,B,C = linha
# ou A,B,C = [float(x) for x in input().split()]
tret = (float(A) * float(C))/2.0
circ = 3.14159 * (float(C) * float(C))
trap = ((float(A) + float(B)) * float(C))/2.0
quad = float(B) * float(B)
ret = float(A) * float(B)
print("TRIANGULO: {:.3f}".format(tret))
print("CIRCULO: {:.3f}".format(circ))
print("TRAPEZIO: {:.3f}".format(trap))
print("QUADRADO: {:.3f}".format(quad))
print("RETANGULO: {:.3f}".format(ret)) | en | 0.379734 | # ou A,B,C = [float(x) for x in input().split()] | 3.713243 | 4 |
cochlear/__init__.py | bburan/cochlear | 0 | 6621626 | import logging.config
# Set up a verbose debugger level for tracing
TRACE_LEVEL_NUM = 5
logging.addLevelName(TRACE_LEVEL_NUM, "TRACE")
def trace(self, message, *args, **kws):
# Yes, logger takes its '*args' as 'args'.
if self.isEnabledFor(TRACE_LEVEL_NUM):
self._log(TRACE_LEVEL_NUM, message, args, **kws)
logging.Logger.trace = trace
def configure_logging(filename=None):
time_format = '[%(asctime)s] :: %(name)s - %(levelname)s - %(message)s'
simple_format = '%(name)s - %(message)s'
logging_config = {
'version': 1,
'formatters': {
'time': {'format': time_format},
'simple': {'format': simple_format},
},
'handlers': {
# This is what gets printed out to the console
'console': {
'class': 'logging.StreamHandler',
'formatter': 'simple',
'level': 'DEBUG',
},
},
'loggers': {
'__main__': {'level': 'DEBUG'},
'neurogen.calibration': {'level': 'ERROR'},
'cochlear.calibration': {'level': 'ERROR'},
'experiment': {'level': 'ERROR'},
'cochlear': {'level': 'DEBUG'},
'cochlear.dpoae_experiment': {'level': 'DEBUG'},
'cochlear.nidaqmx': {'level': 'DEBUG'},
},
'root': {
'handlers': ['console'],
},
}
if filename is not None:
logging_config['handlers']['file'] = {
'class': 'logging.FileHandler',
'formatter': 'time',
'filename': filename,
'level': 'DEBUG',
}
logging_config['root']['handlers'].append('file')
logging.config.dictConfig(logging_config)
| import logging.config
# Set up a verbose debugger level for tracing
TRACE_LEVEL_NUM = 5
logging.addLevelName(TRACE_LEVEL_NUM, "TRACE")
def trace(self, message, *args, **kws):
# Yes, logger takes its '*args' as 'args'.
if self.isEnabledFor(TRACE_LEVEL_NUM):
self._log(TRACE_LEVEL_NUM, message, args, **kws)
logging.Logger.trace = trace
def configure_logging(filename=None):
time_format = '[%(asctime)s] :: %(name)s - %(levelname)s - %(message)s'
simple_format = '%(name)s - %(message)s'
logging_config = {
'version': 1,
'formatters': {
'time': {'format': time_format},
'simple': {'format': simple_format},
},
'handlers': {
# This is what gets printed out to the console
'console': {
'class': 'logging.StreamHandler',
'formatter': 'simple',
'level': 'DEBUG',
},
},
'loggers': {
'__main__': {'level': 'DEBUG'},
'neurogen.calibration': {'level': 'ERROR'},
'cochlear.calibration': {'level': 'ERROR'},
'experiment': {'level': 'ERROR'},
'cochlear': {'level': 'DEBUG'},
'cochlear.dpoae_experiment': {'level': 'DEBUG'},
'cochlear.nidaqmx': {'level': 'DEBUG'},
},
'root': {
'handlers': ['console'],
},
}
if filename is not None:
logging_config['handlers']['file'] = {
'class': 'logging.FileHandler',
'formatter': 'time',
'filename': filename,
'level': 'DEBUG',
}
logging_config['root']['handlers'].append('file')
logging.config.dictConfig(logging_config)
| en | 0.935583 | # Set up a verbose debugger level for tracing # Yes, logger takes its '*args' as 'args'. # This is what gets printed out to the console | 2.764943 | 3 |
ismo/submit/defaults/commands.py | kjetil-lye/iterative_surrogate_optimization | 6 | 6621627 | <gh_stars>1-10
from ismo.submit import Command
class Commands(object):
"""
This class is meant to be inherited from and then you can override whatever methods you want
"""
def __init__(self,
*,
training_parameter_config_file,
optimize_target_file,
optimize_target_class,
dimension,
number_of_output_values=1,
python_command='python',
prefix='',
starting_sample=0,
optimization_parameter_file=None,
optimizer_name='L-BFGS-B',
objective_parameter_file=None,
sample_generator_name='monte-carlo',
output_append=False,
reuse_model=False,
optimization_results_filename=None,
do_not_draw_new_samples=False,
save_loss_function=False
):
self.prefix = prefix
self.output_append = output_append
self.reuse_model = reuse_model
if not self.output_append:
self.parameter_for_optimization_basename = prefix + 'parameters_for_optimization_{}.txt'
self.parameter_basename = prefix + 'parameters_{}.txt'
self.model_file_basename = prefix + 'model_{iteration_number}_{value_number}.h5'
self.values_basename = prefix + 'values_{iteration_number}_{value_number}.txt'
self.objective_basename = prefix + 'objective_{}.txt'
else:
self.parameter_for_optimization_basename = prefix + 'parameters.txt'
self.parameter_basename = prefix + 'parameters.txt'
self.model_file_basename = prefix + 'model_{value_number}.h5'
self.values_basename = prefix + 'values_{value_number}.txt'
self.objective_basename = prefix + 'objective.txt'
self.loss_function_basename = prefix + '_loss_{iteration_number}_{value_number}.npy'
self.python_command = python_command
self.training_parameter_config_file = training_parameter_config_file
self.optimize_target_file = optimize_target_file
self.optimize_target_class = optimize_target_class
self.training_wait_time_in_hours = 24
self.optimize_wait_time_in_hours = 24
self.number_of_output_values = number_of_output_values
self.dimension = dimension
self.starting_sample = starting_sample
self.number_of_samples_generated = starting_sample
self.number_of_generated_samples_in_last_batch = 0
self.additional_optimizer_arguments = {'optimizer_name': optimizer_name}
if optimization_parameter_file is not None:
self.additional_optimizer_arguments['optimization_parameter_file'] = optimization_parameter_file
self.additional_objective_arguments = {}
if objective_parameter_file is not None:
self.additional_objective_arguments['objective_parameter_file'] = objective_parameter_file
self.sample_generator_name = sample_generator_name
self.optimization_results_filename = optimization_results_filename
self.do_not_draw_new_samples = do_not_draw_new_samples
self.save_loss_function = save_loss_function
def add_start_end_values(self, command):
if not self.output_append:
return command
start = self.number_of_samples_generated - self.number_of_generated_samples_in_last_batch - self.starting_sample
end = self.number_of_samples_generated - self.starting_sample
command = command.with_long_arguments(start=start, end=end)
command = command.with_boolean_argument('output_append')
return command
def __run_python_module(self, module):
return Command([self.python_command, "-m", module])
def train(self, submitter, iteration_number):
command = self.__run_python_module("ismo.bin.train")
for value_number in range(self.number_of_output_values):
if not self.output_append:
input_parameters_files = [self.parameter_basename.format(i) for i in range(iteration_number + 1)]
input_values_files = [self.values_basename.format(iteration_number=i,
value_number=value_number) for i in
range(iteration_number + 1)]
else:
input_parameters_files = [self.parameter_basename]
input_values_files = [self.values_basename.format(value_number=value_number)]
output_model_file = self.model_file_basename.format(iteration_number=iteration_number,
value_number=value_number)
command = command.with_long_arguments(
input_parameters_file=input_parameters_files,
input_values_file=input_values_files,
simple_configuration_file=self.training_parameter_config_file,
output_model_file=output_model_file,
)
if self.save_loss_function:
command = command.with_long_arguments(
save_loss_output_file=self.loss_function_basename.format(iteration_number=iteration_number,
value_number=value_number))
if self.reuse_model:
command = command.with_boolean_argument('reuse_model')
submitter(command, wait_time_in_hours=self.training_wait_time_in_hours)
def generate_samples(self, submitter, iteration_number, *, number_of_samples):
command = self.__run_python_module("ismo.bin.generate_samples")
if iteration_number == 0:
output_parameters_file = self.parameter_basename.format(iteration_number)
else:
output_parameters_file = self.parameter_for_optimization_basename.format(iteration_number)
command = command.with_long_arguments(number_of_samples=number_of_samples,
output_file=output_parameters_file,
dimension=self.dimension,
start=self.number_of_samples_generated,
generator=self.sample_generator_name)
if self.output_append:
command = command.with_boolean_argument('output_append')
submitter(command)
self.number_of_samples_generated += number_of_samples
self.number_of_generated_samples_in_last_batch = number_of_samples
def optimize(self, submitter, iteration_number):
command = self.__run_python_module("ismo.bin.optimize")
input_parameters_file = self.parameter_basename.format(iteration_number - 1)
output_parameters_file = self.parameter_basename.format(iteration_number)
models = [self.model_file_basename.format(iteration_number=iteration_number - 1, value_number=k)
for k in range(self.number_of_output_values)]
command = command.with_long_arguments(output_parameters_file=output_parameters_file,
input_model_files=models,
objective_python_module=self.optimize_target_file,
objective_python_class=self.optimize_target_class,
input_parameters_file=input_parameters_file,
**self.additional_optimizer_arguments,
**self.additional_objective_arguments)
if self.do_not_draw_new_samples and iteration_number > 1:
command = command.with_boolean_argument(["do_not_draw_new_samples"])
if self.optimization_results_filename is not None:
command = command.with_long_arguments(optimization_result_filename=self.optimization_results_filename)
command = self.add_start_end_values(command)
submitter(command, wait_time_in_hours=self.optimize_wait_time_in_hours)
def evolve(self, submitter, iteration_number):
input_parameters_file = self.parameter_basename.format(iteration_number)
output_value_files = [self.values_basename.format(iteration_number=iteration_number, value_number=k)
for k in range(self.number_of_output_values)]
self.do_evolve(submitter,
iteration_number=iteration_number,
input_parameters_file=input_parameters_file,
output_value_files=output_value_files
)
# evaluate the objective
objective_output = self.objective_basename.format(iteration_number)
objective_eval = self.__run_python_module("ismo.bin.evaluate_objective")
objective_eval = objective_eval.with_long_arguments(input_values_files=output_value_files,
objective_python_module=self.optimize_target_file,
objective_python_class=self.optimize_target_class,
output_objective_file=objective_output,
**self.additional_objective_arguments
)
submitter(objective_eval)
def do_evolve(self, submitter,
*,
iteration_number: int,
input_parameters_file: str,
output_value_files: list):
raise NotImplementedError('do_evolve needs to be implemented in a subclass of ismo.submit.defaults.Commands')
| from ismo.submit import Command
class Commands(object):
"""
This class is meant to be inherited from and then you can override whatever methods you want
"""
def __init__(self,
*,
training_parameter_config_file,
optimize_target_file,
optimize_target_class,
dimension,
number_of_output_values=1,
python_command='python',
prefix='',
starting_sample=0,
optimization_parameter_file=None,
optimizer_name='L-BFGS-B',
objective_parameter_file=None,
sample_generator_name='monte-carlo',
output_append=False,
reuse_model=False,
optimization_results_filename=None,
do_not_draw_new_samples=False,
save_loss_function=False
):
self.prefix = prefix
self.output_append = output_append
self.reuse_model = reuse_model
if not self.output_append:
self.parameter_for_optimization_basename = prefix + 'parameters_for_optimization_{}.txt'
self.parameter_basename = prefix + 'parameters_{}.txt'
self.model_file_basename = prefix + 'model_{iteration_number}_{value_number}.h5'
self.values_basename = prefix + 'values_{iteration_number}_{value_number}.txt'
self.objective_basename = prefix + 'objective_{}.txt'
else:
self.parameter_for_optimization_basename = prefix + 'parameters.txt'
self.parameter_basename = prefix + 'parameters.txt'
self.model_file_basename = prefix + 'model_{value_number}.h5'
self.values_basename = prefix + 'values_{value_number}.txt'
self.objective_basename = prefix + 'objective.txt'
self.loss_function_basename = prefix + '_loss_{iteration_number}_{value_number}.npy'
self.python_command = python_command
self.training_parameter_config_file = training_parameter_config_file
self.optimize_target_file = optimize_target_file
self.optimize_target_class = optimize_target_class
self.training_wait_time_in_hours = 24
self.optimize_wait_time_in_hours = 24
self.number_of_output_values = number_of_output_values
self.dimension = dimension
self.starting_sample = starting_sample
self.number_of_samples_generated = starting_sample
self.number_of_generated_samples_in_last_batch = 0
self.additional_optimizer_arguments = {'optimizer_name': optimizer_name}
if optimization_parameter_file is not None:
self.additional_optimizer_arguments['optimization_parameter_file'] = optimization_parameter_file
self.additional_objective_arguments = {}
if objective_parameter_file is not None:
self.additional_objective_arguments['objective_parameter_file'] = objective_parameter_file
self.sample_generator_name = sample_generator_name
self.optimization_results_filename = optimization_results_filename
self.do_not_draw_new_samples = do_not_draw_new_samples
self.save_loss_function = save_loss_function
def add_start_end_values(self, command):
if not self.output_append:
return command
start = self.number_of_samples_generated - self.number_of_generated_samples_in_last_batch - self.starting_sample
end = self.number_of_samples_generated - self.starting_sample
command = command.with_long_arguments(start=start, end=end)
command = command.with_boolean_argument('output_append')
return command
def __run_python_module(self, module):
return Command([self.python_command, "-m", module])
def train(self, submitter, iteration_number):
command = self.__run_python_module("ismo.bin.train")
for value_number in range(self.number_of_output_values):
if not self.output_append:
input_parameters_files = [self.parameter_basename.format(i) for i in range(iteration_number + 1)]
input_values_files = [self.values_basename.format(iteration_number=i,
value_number=value_number) for i in
range(iteration_number + 1)]
else:
input_parameters_files = [self.parameter_basename]
input_values_files = [self.values_basename.format(value_number=value_number)]
output_model_file = self.model_file_basename.format(iteration_number=iteration_number,
value_number=value_number)
command = command.with_long_arguments(
input_parameters_file=input_parameters_files,
input_values_file=input_values_files,
simple_configuration_file=self.training_parameter_config_file,
output_model_file=output_model_file,
)
if self.save_loss_function:
command = command.with_long_arguments(
save_loss_output_file=self.loss_function_basename.format(iteration_number=iteration_number,
value_number=value_number))
if self.reuse_model:
command = command.with_boolean_argument('reuse_model')
submitter(command, wait_time_in_hours=self.training_wait_time_in_hours)
def generate_samples(self, submitter, iteration_number, *, number_of_samples):
command = self.__run_python_module("ismo.bin.generate_samples")
if iteration_number == 0:
output_parameters_file = self.parameter_basename.format(iteration_number)
else:
output_parameters_file = self.parameter_for_optimization_basename.format(iteration_number)
command = command.with_long_arguments(number_of_samples=number_of_samples,
output_file=output_parameters_file,
dimension=self.dimension,
start=self.number_of_samples_generated,
generator=self.sample_generator_name)
if self.output_append:
command = command.with_boolean_argument('output_append')
submitter(command)
self.number_of_samples_generated += number_of_samples
self.number_of_generated_samples_in_last_batch = number_of_samples
def optimize(self, submitter, iteration_number):
command = self.__run_python_module("ismo.bin.optimize")
input_parameters_file = self.parameter_basename.format(iteration_number - 1)
output_parameters_file = self.parameter_basename.format(iteration_number)
models = [self.model_file_basename.format(iteration_number=iteration_number - 1, value_number=k)
for k in range(self.number_of_output_values)]
command = command.with_long_arguments(output_parameters_file=output_parameters_file,
input_model_files=models,
objective_python_module=self.optimize_target_file,
objective_python_class=self.optimize_target_class,
input_parameters_file=input_parameters_file,
**self.additional_optimizer_arguments,
**self.additional_objective_arguments)
if self.do_not_draw_new_samples and iteration_number > 1:
command = command.with_boolean_argument(["do_not_draw_new_samples"])
if self.optimization_results_filename is not None:
command = command.with_long_arguments(optimization_result_filename=self.optimization_results_filename)
command = self.add_start_end_values(command)
submitter(command, wait_time_in_hours=self.optimize_wait_time_in_hours)
def evolve(self, submitter, iteration_number):
input_parameters_file = self.parameter_basename.format(iteration_number)
output_value_files = [self.values_basename.format(iteration_number=iteration_number, value_number=k)
for k in range(self.number_of_output_values)]
self.do_evolve(submitter,
iteration_number=iteration_number,
input_parameters_file=input_parameters_file,
output_value_files=output_value_files
)
# evaluate the objective
objective_output = self.objective_basename.format(iteration_number)
objective_eval = self.__run_python_module("ismo.bin.evaluate_objective")
objective_eval = objective_eval.with_long_arguments(input_values_files=output_value_files,
objective_python_module=self.optimize_target_file,
objective_python_class=self.optimize_target_class,
output_objective_file=objective_output,
**self.additional_objective_arguments
)
submitter(objective_eval)
def do_evolve(self, submitter,
*,
iteration_number: int,
input_parameters_file: str,
output_value_files: list):
raise NotImplementedError('do_evolve needs to be implemented in a subclass of ismo.submit.defaults.Commands') | en | 0.95686 | This class is meant to be inherited from and then you can override whatever methods you want # evaluate the objective | 2.807742 | 3 |
rtl/alu.py | bonfireprocessor/bonfire-core | 0 | 6621628 | """
RISC-V ALU
(c) 2019 The Bonfire Project
License: See LICENSE
"""
from myhdl import *
from rtl.barrel_shifter import shift_pipelined
from rtl.instructions import ArithmeticFunct3 as f3
class AluBundle:
def __init__(self,xlen=32):
# ALU Inputs
self.funct3_i = Signal(modbv(0)[3:])
self.funct7_6_i = Signal(bool(0))
self.op1_i = Signal(modbv(0)[xlen:])
self.op2_i = Signal(modbv(0)[xlen:])
# ALU Outputs
self.res_o = Signal(modbv(0)[xlen:])
self.flag_ge = Signal(bool(0)) # Only valid when ALU is subtracting : op1>=op2 (signed)
self.flag_uge = Signal(bool(0)) # Only valid when when ALU is subtracting : op1>=op2 (unsigned)
self.flag_equal = Signal(bool(0)) # op1==op2
# Control Signals
self.en_i=Signal(bool(0))
self.busy_o=Signal(bool(0))
self.valid_o=Signal(bool(0))
# Constants
self.xlen = xlen
@block
def adder(self,subtract_i,result_o,ge_o,uge_o):
"""
subrtact_i : bool do subtract
result_o : modbv[32:] add/subtract result
ge_o : bool output signed greater or equal
uge_o : bool output, unsgined greater or equal
"""
res = Signal(modbv(0)[self.xlen+1:]) ## accomodate for carry bit
@always_comb
def do_add():
op_b = modbv(0)[self.xlen:]
if subtract_i:
op_b[:] = ~self.op2_i
else:
op_b[:] = self.op2_i
# for i in range(self.xlen):
# op_b[i] = self.op2_i[i] ^ subtract_i
res.next = self.op1_i + op_b + subtract_i
@always_comb
def adder_output():
result_o.next = res[self.xlen:]
carry = res[len(res)-1]
s1 = self.op1_i[len(self.op1_i)-1]
s2 = self.op2_i[len(self.op2_i)-1]
uge_o.next = carry
ge_o.next = (s1 and s2 and carry) or (not s1 and not s2 and carry ) or ( not s1 and s2 )
return instances()
@block
def alu(self,clock,reset, c_shifter_mode="none"):
"""
c_shifter_mode:
"none" : Don't implement shifts
"comb" : Single cycle barrel shifter
"pipelined" : 2-cycle barrel shifter
"behavioral" : Implement shift with Python operators
"""
assert ( c_shifter_mode=="none" or c_shifter_mode=="comb" or c_shifter_mode=="pipelined" or c_shifter_mode=="behavioral")
#assert ( c_shifter_mode=="none" or c_shifter_mode=="behavioral")
shifter_out = Signal(modbv(0)[self.xlen:])
shift_valid = Signal(bool(0))
shift_busy = Signal(bool(0))
alu_valid = Signal(bool(0))
# Adder interface
subtract = Signal(bool(0))
adder_out = Signal(modbv(0)[self.xlen:])
flag_ge = Signal(bool(0))
flag_uge = Signal(bool(0))
add_inst=self.adder(subtract,adder_out,flag_ge,flag_uge)
if c_shifter_mode=="behavioral":
@always_comb
def shift():
if self.funct3_i==f3.RV32_F3_SLL:
shifter_out.next = self.op1_i << self.op2_i[5:]
shift_valid.next=True
elif self.funct3_i==f3.RV32_F3_SRL_SRA:
shifter_out.next = ( self.op1_i.signed() if self.funct7_6_i else self.op1_i ) >> self.op2_i[5:]
shift_valid.next=True
else:
shift_valid.next=False
elif c_shifter_mode=="comb" or c_shifter_mode=="pipelined":
fill_v = Signal(bool(0))
shift_en = Signal(bool(0))
shift_ready = Signal(bool(0))
shift_right = Signal(bool(0))
shift_amount=Signal(intbv(0)[5:])
shift_inst=shift_pipelined(clock,reset,self.op1_i,shifter_out,shift_amount, \
shift_right,fill_v,shift_en,shift_ready, 3 if c_shifter_mode=="pipelined" else 0 )
@always_comb
def shift_comb():
shift_valid.next = shift_ready
shift_amount.next = self.op2_i[5:0]
if self.funct3_i==f3.RV32_F3_SLL:
shift_right.next=False
fill_v.next = False
shift_en.next = self.en_i
elif self.funct3_i==f3.RV32_F3_SRL_SRA:
shift_right.next = True
fill_v.next = self.funct7_6_i and self.op1_i[self.xlen-1]
shift_en.next = self.en_i
else:
shift_right.next = False
fill_v.next = False
shift_en.next = False
if c_shifter_mode=="pipelined":
@always_comb
def shift_pipelined_comb():
shift_busy.next = shift_en and not shift_ready
@always_comb
def set_subtract():
"""
The only case the ALU is not subtracting is when there is really an add instruction
"""
subtract.next = not (self.en_i and self.funct3_i==f3.RV32_F3_ADD_SUB and not self.funct7_6_i)
@always_comb
def comb():
alu_valid.next=False
if shift_valid:
self.res_o.next = shifter_out
alu_valid.next = True
elif self.funct3_i==f3.RV32_F3_ADD_SUB:
self.res_o.next = adder_out
alu_valid.next = self.en_i
elif self.funct3_i==f3.RV32_F3_OR:
self.res_o.next = self.op1_i | self.op2_i
alu_valid.next = self.en_i
elif self.funct3_i==f3.RV32_F3_AND:
self.res_o.next = self.op1_i & self.op2_i
alu_valid.next=self.en_i
elif self.funct3_i==f3.RV32_F3_XOR:
self.res_o.next = self.op1_i ^ self.op2_i
alu_valid.next=self.en_i
elif self.funct3_i==f3.RV32_F3_SLT:
self.res_o.next = not flag_ge
alu_valid.next=self.en_i
elif self.funct3_i==f3.RV32_F3_SLTU:
self.res_o.next = not flag_uge
alu_valid.next=self.en_i
# elif not c_shifter_mode=="pipelined" and ( self.funct3_i==f3.RV32_F3_SLL or self.funct3_i==f3.RV32_F3_SRL_SRA):
# self.res_o.next = shifter_out.val
# alu_valid.next = True
else:
#assert not self.en_i, "Invalid funct3_i"
self.res_o.next = 0
# Comparator outputs
self.flag_ge.next = flag_ge
self.flag_uge.next = flag_uge
self.flag_equal.next = self.op1_i == self.op2_i
@always_comb
def valid_ctrl():
self.valid_o.next= alu_valid
@always_seq(clock.posedge,reset=reset)
def busy_ctrl():
self.busy_o.next = shift_busy
return instances()
| """
RISC-V ALU
(c) 2019 The Bonfire Project
License: See LICENSE
"""
from myhdl import *
from rtl.barrel_shifter import shift_pipelined
from rtl.instructions import ArithmeticFunct3 as f3
class AluBundle:
def __init__(self,xlen=32):
# ALU Inputs
self.funct3_i = Signal(modbv(0)[3:])
self.funct7_6_i = Signal(bool(0))
self.op1_i = Signal(modbv(0)[xlen:])
self.op2_i = Signal(modbv(0)[xlen:])
# ALU Outputs
self.res_o = Signal(modbv(0)[xlen:])
self.flag_ge = Signal(bool(0)) # Only valid when ALU is subtracting : op1>=op2 (signed)
self.flag_uge = Signal(bool(0)) # Only valid when when ALU is subtracting : op1>=op2 (unsigned)
self.flag_equal = Signal(bool(0)) # op1==op2
# Control Signals
self.en_i=Signal(bool(0))
self.busy_o=Signal(bool(0))
self.valid_o=Signal(bool(0))
# Constants
self.xlen = xlen
@block
def adder(self,subtract_i,result_o,ge_o,uge_o):
"""
subrtact_i : bool do subtract
result_o : modbv[32:] add/subtract result
ge_o : bool output signed greater or equal
uge_o : bool output, unsgined greater or equal
"""
res = Signal(modbv(0)[self.xlen+1:]) ## accomodate for carry bit
@always_comb
def do_add():
op_b = modbv(0)[self.xlen:]
if subtract_i:
op_b[:] = ~self.op2_i
else:
op_b[:] = self.op2_i
# for i in range(self.xlen):
# op_b[i] = self.op2_i[i] ^ subtract_i
res.next = self.op1_i + op_b + subtract_i
@always_comb
def adder_output():
result_o.next = res[self.xlen:]
carry = res[len(res)-1]
s1 = self.op1_i[len(self.op1_i)-1]
s2 = self.op2_i[len(self.op2_i)-1]
uge_o.next = carry
ge_o.next = (s1 and s2 and carry) or (not s1 and not s2 and carry ) or ( not s1 and s2 )
return instances()
@block
def alu(self,clock,reset, c_shifter_mode="none"):
"""
c_shifter_mode:
"none" : Don't implement shifts
"comb" : Single cycle barrel shifter
"pipelined" : 2-cycle barrel shifter
"behavioral" : Implement shift with Python operators
"""
assert ( c_shifter_mode=="none" or c_shifter_mode=="comb" or c_shifter_mode=="pipelined" or c_shifter_mode=="behavioral")
#assert ( c_shifter_mode=="none" or c_shifter_mode=="behavioral")
shifter_out = Signal(modbv(0)[self.xlen:])
shift_valid = Signal(bool(0))
shift_busy = Signal(bool(0))
alu_valid = Signal(bool(0))
# Adder interface
subtract = Signal(bool(0))
adder_out = Signal(modbv(0)[self.xlen:])
flag_ge = Signal(bool(0))
flag_uge = Signal(bool(0))
add_inst=self.adder(subtract,adder_out,flag_ge,flag_uge)
if c_shifter_mode=="behavioral":
@always_comb
def shift():
if self.funct3_i==f3.RV32_F3_SLL:
shifter_out.next = self.op1_i << self.op2_i[5:]
shift_valid.next=True
elif self.funct3_i==f3.RV32_F3_SRL_SRA:
shifter_out.next = ( self.op1_i.signed() if self.funct7_6_i else self.op1_i ) >> self.op2_i[5:]
shift_valid.next=True
else:
shift_valid.next=False
elif c_shifter_mode=="comb" or c_shifter_mode=="pipelined":
fill_v = Signal(bool(0))
shift_en = Signal(bool(0))
shift_ready = Signal(bool(0))
shift_right = Signal(bool(0))
shift_amount=Signal(intbv(0)[5:])
shift_inst=shift_pipelined(clock,reset,self.op1_i,shifter_out,shift_amount, \
shift_right,fill_v,shift_en,shift_ready, 3 if c_shifter_mode=="pipelined" else 0 )
@always_comb
def shift_comb():
shift_valid.next = shift_ready
shift_amount.next = self.op2_i[5:0]
if self.funct3_i==f3.RV32_F3_SLL:
shift_right.next=False
fill_v.next = False
shift_en.next = self.en_i
elif self.funct3_i==f3.RV32_F3_SRL_SRA:
shift_right.next = True
fill_v.next = self.funct7_6_i and self.op1_i[self.xlen-1]
shift_en.next = self.en_i
else:
shift_right.next = False
fill_v.next = False
shift_en.next = False
if c_shifter_mode=="pipelined":
@always_comb
def shift_pipelined_comb():
shift_busy.next = shift_en and not shift_ready
@always_comb
def set_subtract():
"""
The only case the ALU is not subtracting is when there is really an add instruction
"""
subtract.next = not (self.en_i and self.funct3_i==f3.RV32_F3_ADD_SUB and not self.funct7_6_i)
@always_comb
def comb():
alu_valid.next=False
if shift_valid:
self.res_o.next = shifter_out
alu_valid.next = True
elif self.funct3_i==f3.RV32_F3_ADD_SUB:
self.res_o.next = adder_out
alu_valid.next = self.en_i
elif self.funct3_i==f3.RV32_F3_OR:
self.res_o.next = self.op1_i | self.op2_i
alu_valid.next = self.en_i
elif self.funct3_i==f3.RV32_F3_AND:
self.res_o.next = self.op1_i & self.op2_i
alu_valid.next=self.en_i
elif self.funct3_i==f3.RV32_F3_XOR:
self.res_o.next = self.op1_i ^ self.op2_i
alu_valid.next=self.en_i
elif self.funct3_i==f3.RV32_F3_SLT:
self.res_o.next = not flag_ge
alu_valid.next=self.en_i
elif self.funct3_i==f3.RV32_F3_SLTU:
self.res_o.next = not flag_uge
alu_valid.next=self.en_i
# elif not c_shifter_mode=="pipelined" and ( self.funct3_i==f3.RV32_F3_SLL or self.funct3_i==f3.RV32_F3_SRL_SRA):
# self.res_o.next = shifter_out.val
# alu_valid.next = True
else:
#assert not self.en_i, "Invalid funct3_i"
self.res_o.next = 0
# Comparator outputs
self.flag_ge.next = flag_ge
self.flag_uge.next = flag_uge
self.flag_equal.next = self.op1_i == self.op2_i
@always_comb
def valid_ctrl():
self.valid_o.next= alu_valid
@always_seq(clock.posedge,reset=reset)
def busy_ctrl():
self.busy_o.next = shift_busy
return instances()
| en | 0.571613 | RISC-V ALU (c) 2019 The Bonfire Project License: See LICENSE # ALU Inputs # ALU Outputs # Only valid when ALU is subtracting : op1>=op2 (signed) # Only valid when when ALU is subtracting : op1>=op2 (unsigned) # op1==op2 # Control Signals # Constants subrtact_i : bool do subtract result_o : modbv[32:] add/subtract result ge_o : bool output signed greater or equal uge_o : bool output, unsgined greater or equal ## accomodate for carry bit # for i in range(self.xlen): # op_b[i] = self.op2_i[i] ^ subtract_i c_shifter_mode: "none" : Don't implement shifts "comb" : Single cycle barrel shifter "pipelined" : 2-cycle barrel shifter "behavioral" : Implement shift with Python operators #assert ( c_shifter_mode=="none" or c_shifter_mode=="behavioral") # Adder interface The only case the ALU is not subtracting is when there is really an add instruction # elif not c_shifter_mode=="pipelined" and ( self.funct3_i==f3.RV32_F3_SLL or self.funct3_i==f3.RV32_F3_SRL_SRA): # self.res_o.next = shifter_out.val # alu_valid.next = True #assert not self.en_i, "Invalid funct3_i" # Comparator outputs | 2.392525 | 2 |
FunctionCheck.py | StefanTitusGlover/IA-Flood-warning-System-Group-25 | 0 | 6621629 | <filename>FunctionCheck.py
from floodsystem.geo import station_history
from floodsystem.Plot import plot_water_levels
from floodsystem.stationdata import build_station_list, update_water_levels
from floodsystem.flood import stations_highest_rel_level
from floodsystem.datafetcher import fetch_measure_levels
from floodsystem.Analysis import polyfit, gradient_polyfit
from floodsystem.Plot import plot_water_level_with_fit
from floodsystem.station import consistent_typical_range_stations
stations = build_station_list()
update_water_levels(stations)
stationlist = consistent_typical_range_stations(stations)
check = True
for station in stationlist:
try:
station_profile,dates,levels = station_history(station.name,2)
except:
check = False
if check == True:
station_profile,dates,levels = station_history(station.name,2)
poly = polyfit(dates,levels,4)
grad = gradient_polyfit(dates,levels,4)
print(poly,grad)
| <filename>FunctionCheck.py
from floodsystem.geo import station_history
from floodsystem.Plot import plot_water_levels
from floodsystem.stationdata import build_station_list, update_water_levels
from floodsystem.flood import stations_highest_rel_level
from floodsystem.datafetcher import fetch_measure_levels
from floodsystem.Analysis import polyfit, gradient_polyfit
from floodsystem.Plot import plot_water_level_with_fit
from floodsystem.station import consistent_typical_range_stations
stations = build_station_list()
update_water_levels(stations)
stationlist = consistent_typical_range_stations(stations)
check = True
for station in stationlist:
try:
station_profile,dates,levels = station_history(station.name,2)
except:
check = False
if check == True:
station_profile,dates,levels = station_history(station.name,2)
poly = polyfit(dates,levels,4)
grad = gradient_polyfit(dates,levels,4)
print(poly,grad)
| none | 1 | 2.703089 | 3 | |
src/tengi/command/param.py | luckybots/tengi | 2 | 6621630 | <gh_stars>1-10
from typing import Any
import argparse
class CommandParam:
def __init__(self, name: str, help_str: str, param_type: Any, nargs=None):
assert name.startswith('--')
self.name = name
self.help_str = help_str
self.param_type = param_type
self.nargs = nargs
def add_to_parser(self, parser: argparse.ArgumentParser):
parser.add_argument(self.name,
type=self.param_type,
nargs=self.nargs,
metavar='',
help=self.help_str)
| from typing import Any
import argparse
class CommandParam:
def __init__(self, name: str, help_str: str, param_type: Any, nargs=None):
assert name.startswith('--')
self.name = name
self.help_str = help_str
self.param_type = param_type
self.nargs = nargs
def add_to_parser(self, parser: argparse.ArgumentParser):
parser.add_argument(self.name,
type=self.param_type,
nargs=self.nargs,
metavar='',
help=self.help_str) | none | 1 | 2.961925 | 3 | |
src/Argument_Parser_Template.py | Nirlov24/kushs-utils-tool | 0 | 6621631 | <reponame>Nirlov24/kushs-utils-tool
"""
Argument parser template
"""
import argparse
parser = argparse.ArgumentParser(description='Your application description')
# simple argument (mandatory)
parser.add_argument('a', help='some description')
# cast positional argument to int
parser.add_argument('b', type=int, help='some description')
# option (optional)
parser.add_argument('-r', help='some description')
# set silent=True if this option available
parser.add_argument('-s', '--silent', action='store_true', default=False, help='some description')
# parse arguments/options to an object args
args = parser.parse_args()
# call the arguments/options
print(args.a)
print(args.b)
print(args.r)
print(args.s)
print(args.silent) | """
Argument parser template
"""
import argparse
parser = argparse.ArgumentParser(description='Your application description')
# simple argument (mandatory)
parser.add_argument('a', help='some description')
# cast positional argument to int
parser.add_argument('b', type=int, help='some description')
# option (optional)
parser.add_argument('-r', help='some description')
# set silent=True if this option available
parser.add_argument('-s', '--silent', action='store_true', default=False, help='some description')
# parse arguments/options to an object args
args = parser.parse_args()
# call the arguments/options
print(args.a)
print(args.b)
print(args.r)
print(args.s)
print(args.silent) | en | 0.216656 | Argument parser template # simple argument (mandatory) # cast positional argument to int # option (optional) # set silent=True if this option available # parse arguments/options to an object args # call the arguments/options | 3.611272 | 4 |
convert_to_jpeg.py | marmig0404/StyleGAN2-Tensorflow-2.0 | 0 | 6621632 | <reponame>marmig0404/StyleGAN2-Tensorflow-2.0
"""
convert_to_jpeg.py directory
Used to convert a directory of images to jpg format
<NAME> (marmig0404) 2021
"""
import os
import sys
import PIL.Image as Image
source_dir = sys.argv[1]
for (dirpath, dirnames, filenames) in os.walk(os.path.abspath(source_dir)):
print(filenames)
for file in filenames:
infile = os.path.join(source_dir, file)
f, e = os.path.splitext(infile)
outfile = f + ".jpg"
if infile != outfile:
try:
with Image.open(infile) as im:
im.save(outfile)
except OSError:
print("cannot convert", infile)
| """
convert_to_jpeg.py directory
Used to convert a directory of images to jpg format
<NAME> (marmig0404) 2021
"""
import os
import sys
import PIL.Image as Image
source_dir = sys.argv[1]
for (dirpath, dirnames, filenames) in os.walk(os.path.abspath(source_dir)):
print(filenames)
for file in filenames:
infile = os.path.join(source_dir, file)
f, e = os.path.splitext(infile)
outfile = f + ".jpg"
if infile != outfile:
try:
with Image.open(infile) as im:
im.save(outfile)
except OSError:
print("cannot convert", infile) | en | 0.303089 | convert_to_jpeg.py directory Used to convert a directory of images to jpg format <NAME> (marmig0404) 2021 | 3.50763 | 4 |
Modulo_1/semana2/Estructura-de-Datos/set/conjunto-clear.py | rubens233/cocid_python | 0 | 6621633 | s = {1, 2, 3, 4}
s.clear()
print(s)
| s = {1, 2, 3, 4}
s.clear()
print(s)
| none | 1 | 2.192397 | 2 | |
gmconfig/utils/basicimporter.py | GeekMasher/GMConfig | 0 | 6621634 | from gmconfig.configuration import Configuration
from gmconfig.loaders.load import loadFile
from gmconfig.utils.litemerge import liteMerge
def basicImporter(obj: dict) -> dict:
""" This is a slow but effective way of importing content
"""
return _import(obj)
def _import(obj: dict) -> dict:
new_obj = Configuration()
import_value = None
for key, value in obj.items():
new_obj[key] = value
if key == "import":
import_value = value
new_obj.pop(key)
if isinstance(value, dict):
new_obj[key] = _import(value)
if import_value is not None:
if isinstance(import_value, str):
new_obj.merge(loadFile(import_value))
elif isinstance(import_value, list):
for imp_path in import_value:
new_obj.merge(loadFile(imp_path))
return new_obj
| from gmconfig.configuration import Configuration
from gmconfig.loaders.load import loadFile
from gmconfig.utils.litemerge import liteMerge
def basicImporter(obj: dict) -> dict:
""" This is a slow but effective way of importing content
"""
return _import(obj)
def _import(obj: dict) -> dict:
new_obj = Configuration()
import_value = None
for key, value in obj.items():
new_obj[key] = value
if key == "import":
import_value = value
new_obj.pop(key)
if isinstance(value, dict):
new_obj[key] = _import(value)
if import_value is not None:
if isinstance(import_value, str):
new_obj.merge(loadFile(import_value))
elif isinstance(import_value, list):
for imp_path in import_value:
new_obj.merge(loadFile(imp_path))
return new_obj
| en | 0.901356 | This is a slow but effective way of importing content | 2.378735 | 2 |
src/slack_delete_channel_history.py | x-blood/slack-delete-channel-history | 1 | 6621635 | import urllib.request
import urllib.parse
import datetime
import json
import time
import os
from datetime import timedelta
def lambda_handler(event, context):
print('Start lambda_handler')
token = os.environ['SLACK_DELETE_CHANNEL_HISTORY_APP_TOKEN']
print('env token : %s', token)
channel = event['TARGET_CHANNEL_ID']
print('env channel : %s', channel)
count = event['MAX_DELETABLE_OBJECT_COUNT']
print('env count : %s', count)
expired_date = event['EXPIRED_DATE']
print('env expired_date : %s', expired_date)
now = datetime.datetime.now()
delta = timedelta(days=+expired_date)
target_datetime = now - delta
epoch_time = target_datetime.timestamp()
print('epoch_time : %s' % epoch_time)
hist_url = "https://slack.com/api/conversations.history"
delete_url = "https://slack.com/api/chat.delete"
post_url = "https://slack.com/api/chat.postMessage"
hist_params = {
'channel': channel,
'token': token,
'latest': epoch_time,
'limit': count
}
req = urllib.request.Request(hist_url)
hist_params = urllib.parse.urlencode(hist_params).encode('ascii')
req.data = hist_params
res = urllib.request.urlopen(req)
body = res.read()
data = json.loads(body)
deleted_count = 0
for m in data['messages']:
print(m)
delete_params = {
'channel': channel,
'token': token,
'ts': m["ts"]
}
req = urllib.request.Request(delete_url)
delete_params = urllib.parse.urlencode(delete_params).encode('ascii')
req.data = delete_params
res = urllib.request.urlopen(req)
body = res.read()
print(body)
deleted_count += 1
time.sleep(2)
req = urllib.request.Request(post_url)
post_params = {
'channel': channel,
'token': token,
'text': "%d日前の通知情報を自動的に削除しました。 *`削除した件数:%d`* " % (expired_date, deleted_count)
}
post_params = urllib.parse.urlencode(post_params).encode('ascii')
req.data = post_params
_ = urllib.request.urlopen(req)
| import urllib.request
import urllib.parse
import datetime
import json
import time
import os
from datetime import timedelta
def lambda_handler(event, context):
print('Start lambda_handler')
token = os.environ['SLACK_DELETE_CHANNEL_HISTORY_APP_TOKEN']
print('env token : %s', token)
channel = event['TARGET_CHANNEL_ID']
print('env channel : %s', channel)
count = event['MAX_DELETABLE_OBJECT_COUNT']
print('env count : %s', count)
expired_date = event['EXPIRED_DATE']
print('env expired_date : %s', expired_date)
now = datetime.datetime.now()
delta = timedelta(days=+expired_date)
target_datetime = now - delta
epoch_time = target_datetime.timestamp()
print('epoch_time : %s' % epoch_time)
hist_url = "https://slack.com/api/conversations.history"
delete_url = "https://slack.com/api/chat.delete"
post_url = "https://slack.com/api/chat.postMessage"
hist_params = {
'channel': channel,
'token': token,
'latest': epoch_time,
'limit': count
}
req = urllib.request.Request(hist_url)
hist_params = urllib.parse.urlencode(hist_params).encode('ascii')
req.data = hist_params
res = urllib.request.urlopen(req)
body = res.read()
data = json.loads(body)
deleted_count = 0
for m in data['messages']:
print(m)
delete_params = {
'channel': channel,
'token': token,
'ts': m["ts"]
}
req = urllib.request.Request(delete_url)
delete_params = urllib.parse.urlencode(delete_params).encode('ascii')
req.data = delete_params
res = urllib.request.urlopen(req)
body = res.read()
print(body)
deleted_count += 1
time.sleep(2)
req = urllib.request.Request(post_url)
post_params = {
'channel': channel,
'token': token,
'text': "%d日前の通知情報を自動的に削除しました。 *`削除した件数:%d`* " % (expired_date, deleted_count)
}
post_params = urllib.parse.urlencode(post_params).encode('ascii')
req.data = post_params
_ = urllib.request.urlopen(req)
| none | 1 | 2.52485 | 3 | |
run-pollination.py | fossabot/natcap-invest-docker | 0 | 6621636 | <reponame>fossabot/natcap-invest-docker
# coding=UTF-8
# hardcoded demo runner script for the pollination model
import time
import sys
import os
import logging
import natcap.invest.pollination
logging.basicConfig(stream=sys.stdout, level=logging.WARN)
def now():
return int(time.time() * 1000.0)
start_ms = now()
print('[INFO] starting up')
args = {
u'farm_vector_path': u'/data/pollination/farms.shp',
u'guild_table_path': u'/data/pollination/guild_table.csv',
u'landcover_biophysical_table_path': u'/data/pollination/landcover_biophysical_table.csv',
u'landcover_raster_path': u'/data/pollination/landcover.tif',
u'results_suffix': u'',
u'workspace_dir': u'/workspace/pollination',
}
if __name__ == '__main__':
ptvsd_enable = os.getenv('PTVSD_ENABLE', default=0)
if ptvsd_enable == '1':
print('[INFO] Remote debugging, via ptvsd, is enabled')
# somewhat following https://vinta.ws/code/remotely-debug-a-python-app-inside-a-docker-container-in-visual-studio-code.html
import ptvsd
ptvsd_port = int(os.getenv('PTVSD_PORT', default=3000))
ptvsd.enable_attach(address=('0.0.0.0', ptvsd_port))
print('[INFO] ptvsd is started (port=%d), waiting for you to attach...' % ptvsd_port)
ptvsd.wait_for_attach()
print('[INFO] debugger is attached, breakpointing so you can set your own breakpoints')
breakpoint()
print('[INFO] starting execution of pollination model')
natcap.invest.pollination.execute(args)
elapsed_time = now() - start_ms
print('[INFO] finished execution of pollination model, elapsed time {}ms'.format(elapsed_time))
| # coding=UTF-8
# hardcoded demo runner script for the pollination model
import time
import sys
import os
import logging
import natcap.invest.pollination
logging.basicConfig(stream=sys.stdout, level=logging.WARN)
def now():
return int(time.time() * 1000.0)
start_ms = now()
print('[INFO] starting up')
args = {
u'farm_vector_path': u'/data/pollination/farms.shp',
u'guild_table_path': u'/data/pollination/guild_table.csv',
u'landcover_biophysical_table_path': u'/data/pollination/landcover_biophysical_table.csv',
u'landcover_raster_path': u'/data/pollination/landcover.tif',
u'results_suffix': u'',
u'workspace_dir': u'/workspace/pollination',
}
if __name__ == '__main__':
ptvsd_enable = os.getenv('PTVSD_ENABLE', default=0)
if ptvsd_enable == '1':
print('[INFO] Remote debugging, via ptvsd, is enabled')
# somewhat following https://vinta.ws/code/remotely-debug-a-python-app-inside-a-docker-container-in-visual-studio-code.html
import ptvsd
ptvsd_port = int(os.getenv('PTVSD_PORT', default=3000))
ptvsd.enable_attach(address=('0.0.0.0', ptvsd_port))
print('[INFO] ptvsd is started (port=%d), waiting for you to attach...' % ptvsd_port)
ptvsd.wait_for_attach()
print('[INFO] debugger is attached, breakpointing so you can set your own breakpoints')
breakpoint()
print('[INFO] starting execution of pollination model')
natcap.invest.pollination.execute(args)
elapsed_time = now() - start_ms
print('[INFO] finished execution of pollination model, elapsed time {}ms'.format(elapsed_time)) | en | 0.685762 | # coding=UTF-8 # hardcoded demo runner script for the pollination model # somewhat following https://vinta.ws/code/remotely-debug-a-python-app-inside-a-docker-container-in-visual-studio-code.html | 1.986831 | 2 |
mara_app/views.py | alexeyegorov/mara-app | 15 | 6621637 | <filename>mara_app/views.py
"""Mara admin views"""
import copy
import functools
import html
import sys
import types
import typing
import flask
from mara_app import monkey_patch
from mara_page import acl, navigation, response, _, bootstrap, xml
blueprint = flask.Blueprint('mara_app', __name__, url_prefix='/mara-app', static_folder='static')
acl_resource = acl.AclResource('Configuration')
def _config_modules(with_functions=True):
"""Gathers all configuration modules and their functions"""
import inspect
config_modules = {}
for name, module in copy.copy(sys.modules).items():
if 'MARA_CONFIG_MODULES' in dir(module):
modules = getattr(module, 'MARA_CONFIG_MODULES')
if isinstance(modules, typing.Callable):
modules = modules()
assert (isinstance(modules, typing.Iterable))
for config_module in modules:
assert (isinstance(config_module, types.ModuleType))
config_modules[config_module.__name__] = {'doc': config_module.__doc__, 'functions': {}}
if with_functions:
for member_name, member in config_module.__dict__.items():
if inspect.isfunction(member):
try:
value = member()
except Exception:
value = 'error calling function'
new_function = monkey_patch.REPLACED_FUNCTIONS.get(
config_module.__name__ + '.' + member_name, '')
config_modules[config_module.__name__]['functions'][member_name] \
= {'doc': member.__doc__ or '', 'value': value, 'new_function': new_function}
return config_modules
@blueprint.route('/configuration')
def configuration_page():
import pprint
from . import app
# gather all config functions by package
current_user_has_permission = acl.current_user_has_permission(acl_resource)
return response.Response(
html=[(bootstrap.card(id=module_name,
header_left=html.escape(module_name),
body=[_.p[_.em[html.escape(str(config['doc']))]],
bootstrap.table(
[],
[_.tr[
_.td[_.tt[html.escape(function_name).replace('_', '_<wbr/>')],
[_.br, ' ⟵ ', _.tt[html.escape(function['new_function'])
.replace('.', '<wbr/>.').replace('_', '_<wbr/>')]]
if function['new_function'] else ''],
_.td[_.em[html.escape(function['doc'])]],
_.td[
_.pre[html.escape(pprint.pformat(function['value']))]
if current_user_has_permission
else acl.inline_permission_denied_message()
]] for function_name, function in config['functions'].items()])
]) if config['functions'] else '')
for module_name, config in sorted(_config_modules().items())],
title='Mara Configuration')
def package_configs_navigation_entry():
return navigation.NavigationEntry(
label='Package Configs', icon='cogs', rank=100,
description='Package config functions with project replacements',
uri_fn=lambda: flask.url_for('mara_app.configuration_page'),
children=[
navigation.NavigationEntry(
label=module_name, icon='list', description=config['doc'],
uri_fn=lambda _module_name=module_name: flask.url_for('mara_app.configuration_page',
_anchor=_module_name))
for module_name, config in sorted(_config_modules(with_functions=False).items())]
)
@blueprint.route('/navigation-bar')
@functools.lru_cache(maxsize=None)
def navigation_bar() -> [str]:
from . import app
# The navigation sidebar is loaded asynchronously for better rendering experience
def render_entries(entries: [navigation.NavigationEntry] = [], level: int = 1):
def render_entry(entry: navigation.NavigationEntry, level: int = 1):
attrs = {}
if entry.children:
attrs['onClick'] = 'toggleNavigationEntry(this)'
else:
attrs['href'] = entry.uri_fn()
if entry.description:
attrs.update({'title': entry.description, 'data-toggle': 'tooltip',
'data-container': 'body', 'data-placement': 'right'})
return _.div(class_='mara-nav-entry level-' + str(level),
style='display:none' if level > 1 else '')[
_.a(**attrs)[
_.div(class_='mara-nav-entry-icon fa fa-fw fa-' + entry.icon + (' fa-lg' if level == 1 else ''))[
''] if entry.icon else '',
_.div(class_='mara-nav-entry-text')[entry.label.replace('_', '_<wbr>')],
_.div(class_='mara-caret fa fa-caret-down')[''] if entry.children else ''],
render_entries(entry.children, level + 1)
]
return [functools.partial(render_entry, level=level)(entry)
for entry in sorted([entry for entry in entries if entry.visible], key=lambda x: x.rank)]
return flask.Response(''.join(list(xml.render(render_entries(app.combine_navigation_entries().children)))))
| <filename>mara_app/views.py
"""Mara admin views"""
import copy
import functools
import html
import sys
import types
import typing
import flask
from mara_app import monkey_patch
from mara_page import acl, navigation, response, _, bootstrap, xml
blueprint = flask.Blueprint('mara_app', __name__, url_prefix='/mara-app', static_folder='static')
acl_resource = acl.AclResource('Configuration')
def _config_modules(with_functions=True):
"""Gathers all configuration modules and their functions"""
import inspect
config_modules = {}
for name, module in copy.copy(sys.modules).items():
if 'MARA_CONFIG_MODULES' in dir(module):
modules = getattr(module, 'MARA_CONFIG_MODULES')
if isinstance(modules, typing.Callable):
modules = modules()
assert (isinstance(modules, typing.Iterable))
for config_module in modules:
assert (isinstance(config_module, types.ModuleType))
config_modules[config_module.__name__] = {'doc': config_module.__doc__, 'functions': {}}
if with_functions:
for member_name, member in config_module.__dict__.items():
if inspect.isfunction(member):
try:
value = member()
except Exception:
value = 'error calling function'
new_function = monkey_patch.REPLACED_FUNCTIONS.get(
config_module.__name__ + '.' + member_name, '')
config_modules[config_module.__name__]['functions'][member_name] \
= {'doc': member.__doc__ or '', 'value': value, 'new_function': new_function}
return config_modules
@blueprint.route('/configuration')
def configuration_page():
import pprint
from . import app
# gather all config functions by package
current_user_has_permission = acl.current_user_has_permission(acl_resource)
return response.Response(
html=[(bootstrap.card(id=module_name,
header_left=html.escape(module_name),
body=[_.p[_.em[html.escape(str(config['doc']))]],
bootstrap.table(
[],
[_.tr[
_.td[_.tt[html.escape(function_name).replace('_', '_<wbr/>')],
[_.br, ' ⟵ ', _.tt[html.escape(function['new_function'])
.replace('.', '<wbr/>.').replace('_', '_<wbr/>')]]
if function['new_function'] else ''],
_.td[_.em[html.escape(function['doc'])]],
_.td[
_.pre[html.escape(pprint.pformat(function['value']))]
if current_user_has_permission
else acl.inline_permission_denied_message()
]] for function_name, function in config['functions'].items()])
]) if config['functions'] else '')
for module_name, config in sorted(_config_modules().items())],
title='Mara Configuration')
def package_configs_navigation_entry():
return navigation.NavigationEntry(
label='Package Configs', icon='cogs', rank=100,
description='Package config functions with project replacements',
uri_fn=lambda: flask.url_for('mara_app.configuration_page'),
children=[
navigation.NavigationEntry(
label=module_name, icon='list', description=config['doc'],
uri_fn=lambda _module_name=module_name: flask.url_for('mara_app.configuration_page',
_anchor=_module_name))
for module_name, config in sorted(_config_modules(with_functions=False).items())]
)
@blueprint.route('/navigation-bar')
@functools.lru_cache(maxsize=None)
def navigation_bar() -> [str]:
from . import app
# The navigation sidebar is loaded asynchronously for better rendering experience
def render_entries(entries: [navigation.NavigationEntry] = [], level: int = 1):
def render_entry(entry: navigation.NavigationEntry, level: int = 1):
attrs = {}
if entry.children:
attrs['onClick'] = 'toggleNavigationEntry(this)'
else:
attrs['href'] = entry.uri_fn()
if entry.description:
attrs.update({'title': entry.description, 'data-toggle': 'tooltip',
'data-container': 'body', 'data-placement': 'right'})
return _.div(class_='mara-nav-entry level-' + str(level),
style='display:none' if level > 1 else '')[
_.a(**attrs)[
_.div(class_='mara-nav-entry-icon fa fa-fw fa-' + entry.icon + (' fa-lg' if level == 1 else ''))[
''] if entry.icon else '',
_.div(class_='mara-nav-entry-text')[entry.label.replace('_', '_<wbr>')],
_.div(class_='mara-caret fa fa-caret-down')[''] if entry.children else ''],
render_entries(entry.children, level + 1)
]
return [functools.partial(render_entry, level=level)(entry)
for entry in sorted([entry for entry in entries if entry.visible], key=lambda x: x.rank)]
return flask.Response(''.join(list(xml.render(render_entries(app.combine_navigation_entries().children)))))
| en | 0.81992 | Mara admin views Gathers all configuration modules and their functions # gather all config functions by package # The navigation sidebar is loaded asynchronously for better rendering experience | 2.149203 | 2 |
tests/base/__init__.py | reitermarkus/proxmoxer | 0 | 6621638 | __author__ = "<NAME>"
__copyright__ = "(c) <NAME> 2013-2017"
__license__ = "MIT"
| __author__ = "<NAME>"
__copyright__ = "(c) <NAME> 2013-2017"
__license__ = "MIT"
| none | 1 | 0.971415 | 1 | |
Python3/718.py | rakhi2001/ecom7 | 854 | 6621639 | __________________________________________________________________________________________________
sample 180 ms submission
class Solution:
def findLength(self, A: List[int], B: List[int]) -> int:
# dp
"""
m, n = len(A), len(B)
# dp[i][j]: max common prefix length of A[:(i + 1)], B[:(j + 1)]
dp = [
[0] * n
for _ in range(m)
]
max_len = 0
for j in range(n):
dp[0][j] = int(A[0] == B[j])
max_len = max(max_len, dp[0][j])
for i in range(m):
dp[i][0] = int(A[i] == B[0])
max_len = max(max_len, dp[i][0])
for i in range(1, m):
for j in range(1, n):
if A[i] == B[j]:
dp[i][j] = dp[i - 1][j - 1] + 1
max_len = max(max_len, dp[i][j])
return max_len
"""
# binary search
m, n = len(A), len(B)
def check(k):
if k == 0:
return True
# calculating hash values of k-subarray in O(len) time
# hs[i] = hash(A[i:(i + k)])
# = sum(A[i + j] * (P ** (k - j - 1)) for j in range(k)) % M
# hs[i + 1] = hash(A[(i + 1):(i + k + 1)])
# = ((hs[i] - A[i] * (P ** (k - 1))) * P + A[i + k]) % M
P, M = 113, 10**9 + 7
pows = [1] * k
for j in range(1, k):
pows[j] = (pows[j - 1] * P) % M
h = 0
for j in range(k):
h = (h + A[j] * pows[k - j - 1]) % M
hs = {h}
for i in range(1, m - k + 1):
h = (((h - A[i - 1] * pows[k - 1]) * P) + A[i + k - 1]) % M
hs.add(h)
h = 0
for j in range(k):
h = (h + B[j] * pows[k - j - 1]) % M
if h in hs:
return True
for i in range(1, n - k + 1):
h = (((h - B[i - 1] * pows[k - 1]) * P) + B[i + k - 1]) % M
if h in hs:
return True
return False
l, r = 0, min(m, n) + 1
while l + 1 < r:
k = (l + r) >> 1
if check(k):
l = k
else:
r = k
return l
__________________________________________________________________________________________________
sample 13636 kb submission
class Solution:
def findLength(self, A: List[int], B: List[int]) -> int:
def check(length):
d = {A[i:i+length] for i in range(len(A)-length+1)}
return any(B[j:j+length] in d for j in range(len(B)-length+1))
A = ''.join(map(chr, A))
B = ''.join(map(chr, B))
l, r = 0, min(len(A), len(B))+1
while l<r:
mid = (l+r)//2
if check(mid):
l=mid+1
else:
r=mid
return l-1
__________________________________________________________________________________________________
| __________________________________________________________________________________________________
sample 180 ms submission
class Solution:
def findLength(self, A: List[int], B: List[int]) -> int:
# dp
"""
m, n = len(A), len(B)
# dp[i][j]: max common prefix length of A[:(i + 1)], B[:(j + 1)]
dp = [
[0] * n
for _ in range(m)
]
max_len = 0
for j in range(n):
dp[0][j] = int(A[0] == B[j])
max_len = max(max_len, dp[0][j])
for i in range(m):
dp[i][0] = int(A[i] == B[0])
max_len = max(max_len, dp[i][0])
for i in range(1, m):
for j in range(1, n):
if A[i] == B[j]:
dp[i][j] = dp[i - 1][j - 1] + 1
max_len = max(max_len, dp[i][j])
return max_len
"""
# binary search
m, n = len(A), len(B)
def check(k):
if k == 0:
return True
# calculating hash values of k-subarray in O(len) time
# hs[i] = hash(A[i:(i + k)])
# = sum(A[i + j] * (P ** (k - j - 1)) for j in range(k)) % M
# hs[i + 1] = hash(A[(i + 1):(i + k + 1)])
# = ((hs[i] - A[i] * (P ** (k - 1))) * P + A[i + k]) % M
P, M = 113, 10**9 + 7
pows = [1] * k
for j in range(1, k):
pows[j] = (pows[j - 1] * P) % M
h = 0
for j in range(k):
h = (h + A[j] * pows[k - j - 1]) % M
hs = {h}
for i in range(1, m - k + 1):
h = (((h - A[i - 1] * pows[k - 1]) * P) + A[i + k - 1]) % M
hs.add(h)
h = 0
for j in range(k):
h = (h + B[j] * pows[k - j - 1]) % M
if h in hs:
return True
for i in range(1, n - k + 1):
h = (((h - B[i - 1] * pows[k - 1]) * P) + B[i + k - 1]) % M
if h in hs:
return True
return False
l, r = 0, min(m, n) + 1
while l + 1 < r:
k = (l + r) >> 1
if check(k):
l = k
else:
r = k
return l
__________________________________________________________________________________________________
sample 13636 kb submission
class Solution:
def findLength(self, A: List[int], B: List[int]) -> int:
def check(length):
d = {A[i:i+length] for i in range(len(A)-length+1)}
return any(B[j:j+length] in d for j in range(len(B)-length+1))
A = ''.join(map(chr, A))
B = ''.join(map(chr, B))
l, r = 0, min(len(A), len(B))+1
while l<r:
mid = (l+r)//2
if check(mid):
l=mid+1
else:
r=mid
return l-1
__________________________________________________________________________________________________
| en | 0.425622 | # dp m, n = len(A), len(B) # dp[i][j]: max common prefix length of A[:(i + 1)], B[:(j + 1)] dp = [ [0] * n for _ in range(m) ] max_len = 0 for j in range(n): dp[0][j] = int(A[0] == B[j]) max_len = max(max_len, dp[0][j]) for i in range(m): dp[i][0] = int(A[i] == B[0]) max_len = max(max_len, dp[i][0]) for i in range(1, m): for j in range(1, n): if A[i] == B[j]: dp[i][j] = dp[i - 1][j - 1] + 1 max_len = max(max_len, dp[i][j]) return max_len # binary search # calculating hash values of k-subarray in O(len) time # hs[i] = hash(A[i:(i + k)]) # = sum(A[i + j] * (P ** (k - j - 1)) for j in range(k)) % M # hs[i + 1] = hash(A[(i + 1):(i + k + 1)]) # = ((hs[i] - A[i] * (P ** (k - 1))) * P + A[i + k]) % M | 3.391951 | 3 |
eva_storage/jvc/jvc.py | jaehobang/cs7643_project | 0 | 6621640 | <filename>eva_storage/jvc/jvc.py
"""
In this file, we implement a wrapper around the whole process
"""
from eva_storage.jvc.encoder import Encoder
from eva_storage.jvc.decoder import Decoder
from eva_storage.jvc.preprocessor import Preprocessor
from loaders.seattle_loader import SeattleLoader
import os
"""
Notes:
Preprocessor: self.hierarchy_save_dir = os.path.join('/nethome/jbang36/eva_jaeho/data/frame_hierarchy', video_type,
video_name + '.npy')
Decoder: self.video_base_path = '/nethome/jbang36/eva_jaeho/data/'
self.hierarchy_base_path = '/nethome/jbang36/eva_jaeho/data/frame_hierarchy'
"""
class JVC:
def __init__(self, loader = None):
self.preprocessor = Preprocessor()
### TODO: we have to keep modifying the video_type, video_name variables.... or we can just manage all that here??
self.encoder = Encoder()
self.decoder = Decoder() ## if user doesn't supply a loader, we load the default loader
self.base_directory = '/nethome/jbang36/eva_jaeho/data'
self.images = None
self.directories = {}
if loader is None:
self.loader = SeattleLoader()
def preprocess_default(self, images, video_type, video_name, **kwargs):
"""
Function used when images are already given
:param images:
:return:
"""
hierarchy_save_dir = os.path.join(self.base_directory, 'frame_hierarchy', video_type, video_name + '.npy')
proposed_cluster_count = len(images) // 100 if len(images) // 100 > 0 else len(images)
cluster_count = kwargs.get('cluster_count', proposed_cluster_count)
stopping_point = kwargs.get('stopping_point', proposed_cluster_count)
self.hierarchy = self.preprocessor.run_final(images, hierarchy_save_dir, cluster_count=cluster_count,
stopping_point=stopping_point)
hierarchy = self.hierarchy
self.directories['hierarchy'] = hierarchy_save_dir
return sorted(hierarchy[:cluster_count])
def preprocess(self, video_type, video_name, **kwargs):
extension = kwargs.get('extension', '.mp4')
### this is just the name of the video
self.original_video_directory = os.path.join(self.base_directory, video_type, video_name + extension)
video_directory = self.original_video_directory
hierarchy_save_dir = os.path.join(self.base_directory, 'frame_hierarchy', video_type, video_name + '.npy')
self.images = self.loader.load_images(video_directory)
images = self.images
proposed_cluster_count = len(images) // 100 if len(images) // 100 > 0 else len(images)
cluster_count = kwargs.get('cluster_count', proposed_cluster_count)
stopping_point = kwargs.get('stopping_point', proposed_cluster_count)
self.hierarchy = self.preprocessor.run_final(images, hierarchy_save_dir, cluster_count = cluster_count, stopping_point = stopping_point)
hierarchy = self.hierarchy
##update the directories
self.directories['hierarchy'] = hierarchy_save_dir
self.directories['video_dir'] = video_directory
return sorted(hierarchy[:cluster_count]) ## we want to sort the examples chosen for evaluation
def decode(self, video_type, jvc_video_name, hierarchy_name, **kwargs):
sample_count = kwargs.get('sample_count', 100) ## TODO: make sure the decoder takes care of edge cases
video_directory = os.path.join( self.base_directory, video_type, jvc_video_name + '.mp4')
hierarchy_directory = os.path.join( self.base_directory, 'frame_hierarchy', video_type, hierarchy_name + '.npy')
iframe_indices_directory = os.path.join( self.base_directory, 'iframe_indices', video_type, jvc_video_name + '.npy')
extracted_images = self.decoder.run(video_directory, hierarchy_directory, iframe_indices_directory, number_of_samples = sample_count)
return extracted_images
def encode(self, video_type, jvc_video_name, **kwargs):
save_directory = os.path.join( self.base_directory, video_type, jvc_video_name + '.mp4')
iframe_indices_save_directory = os.path.join( self.base_directory, 'iframe_indices', video_type, jvc_video_name + '.npy')
self.encoder.run(self.images, self.hierarchy, self.original_video_directory, save_directory, iframe_indices_save_directory)
self.jvc_video_directory = save_directory
self.directories['jvc_video_dir'] = self.jvc_video_directory
self.directories['iframe_indices_dir'] = iframe_indices_save_directory
return
if __name__ == "__main__":
jvc = JVC()
jvc.preprocess()
jvc.encode()
jvc.decode()
| <filename>eva_storage/jvc/jvc.py
"""
In this file, we implement a wrapper around the whole process
"""
from eva_storage.jvc.encoder import Encoder
from eva_storage.jvc.decoder import Decoder
from eva_storage.jvc.preprocessor import Preprocessor
from loaders.seattle_loader import SeattleLoader
import os
"""
Notes:
Preprocessor: self.hierarchy_save_dir = os.path.join('/nethome/jbang36/eva_jaeho/data/frame_hierarchy', video_type,
video_name + '.npy')
Decoder: self.video_base_path = '/nethome/jbang36/eva_jaeho/data/'
self.hierarchy_base_path = '/nethome/jbang36/eva_jaeho/data/frame_hierarchy'
"""
class JVC:
def __init__(self, loader = None):
self.preprocessor = Preprocessor()
### TODO: we have to keep modifying the video_type, video_name variables.... or we can just manage all that here??
self.encoder = Encoder()
self.decoder = Decoder() ## if user doesn't supply a loader, we load the default loader
self.base_directory = '/nethome/jbang36/eva_jaeho/data'
self.images = None
self.directories = {}
if loader is None:
self.loader = SeattleLoader()
def preprocess_default(self, images, video_type, video_name, **kwargs):
"""
Function used when images are already given
:param images:
:return:
"""
hierarchy_save_dir = os.path.join(self.base_directory, 'frame_hierarchy', video_type, video_name + '.npy')
proposed_cluster_count = len(images) // 100 if len(images) // 100 > 0 else len(images)
cluster_count = kwargs.get('cluster_count', proposed_cluster_count)
stopping_point = kwargs.get('stopping_point', proposed_cluster_count)
self.hierarchy = self.preprocessor.run_final(images, hierarchy_save_dir, cluster_count=cluster_count,
stopping_point=stopping_point)
hierarchy = self.hierarchy
self.directories['hierarchy'] = hierarchy_save_dir
return sorted(hierarchy[:cluster_count])
def preprocess(self, video_type, video_name, **kwargs):
extension = kwargs.get('extension', '.mp4')
### this is just the name of the video
self.original_video_directory = os.path.join(self.base_directory, video_type, video_name + extension)
video_directory = self.original_video_directory
hierarchy_save_dir = os.path.join(self.base_directory, 'frame_hierarchy', video_type, video_name + '.npy')
self.images = self.loader.load_images(video_directory)
images = self.images
proposed_cluster_count = len(images) // 100 if len(images) // 100 > 0 else len(images)
cluster_count = kwargs.get('cluster_count', proposed_cluster_count)
stopping_point = kwargs.get('stopping_point', proposed_cluster_count)
self.hierarchy = self.preprocessor.run_final(images, hierarchy_save_dir, cluster_count = cluster_count, stopping_point = stopping_point)
hierarchy = self.hierarchy
##update the directories
self.directories['hierarchy'] = hierarchy_save_dir
self.directories['video_dir'] = video_directory
return sorted(hierarchy[:cluster_count]) ## we want to sort the examples chosen for evaluation
def decode(self, video_type, jvc_video_name, hierarchy_name, **kwargs):
sample_count = kwargs.get('sample_count', 100) ## TODO: make sure the decoder takes care of edge cases
video_directory = os.path.join( self.base_directory, video_type, jvc_video_name + '.mp4')
hierarchy_directory = os.path.join( self.base_directory, 'frame_hierarchy', video_type, hierarchy_name + '.npy')
iframe_indices_directory = os.path.join( self.base_directory, 'iframe_indices', video_type, jvc_video_name + '.npy')
extracted_images = self.decoder.run(video_directory, hierarchy_directory, iframe_indices_directory, number_of_samples = sample_count)
return extracted_images
def encode(self, video_type, jvc_video_name, **kwargs):
save_directory = os.path.join( self.base_directory, video_type, jvc_video_name + '.mp4')
iframe_indices_save_directory = os.path.join( self.base_directory, 'iframe_indices', video_type, jvc_video_name + '.npy')
self.encoder.run(self.images, self.hierarchy, self.original_video_directory, save_directory, iframe_indices_save_directory)
self.jvc_video_directory = save_directory
self.directories['jvc_video_dir'] = self.jvc_video_directory
self.directories['iframe_indices_dir'] = iframe_indices_save_directory
return
if __name__ == "__main__":
jvc = JVC()
jvc.preprocess()
jvc.encode()
jvc.decode()
| en | 0.518865 | In this file, we implement a wrapper around the whole process Notes: Preprocessor: self.hierarchy_save_dir = os.path.join('/nethome/jbang36/eva_jaeho/data/frame_hierarchy', video_type, video_name + '.npy') Decoder: self.video_base_path = '/nethome/jbang36/eva_jaeho/data/' self.hierarchy_base_path = '/nethome/jbang36/eva_jaeho/data/frame_hierarchy' ### TODO: we have to keep modifying the video_type, video_name variables.... or we can just manage all that here?? ## if user doesn't supply a loader, we load the default loader Function used when images are already given :param images: :return: ### this is just the name of the video ##update the directories ## we want to sort the examples chosen for evaluation ## TODO: make sure the decoder takes care of edge cases | 2.303981 | 2 |
durak.py | arteum33/HW_Lesson_9_full_version | 0 | 6621641 | <reponame>arteum33/HW_Lesson_9_full_version<gh_stars>0
import random
# масти
SPADES = '♠'
HEARTS = '♥'
DIAMS = '♦'
CLUBS = '♣'
# достоинтсва карт
NOMINALS = ['6', '7', '8', '9', '10', 'J', 'Q', 'K', 'A']
# поиск индекса по достоинству
NAME_TO_VALUE = {n: i for i, n in enumerate(NOMINALS)}
# карт в руке при раздаче
CARDS_IN_HAND_MAX = 6
N_PLAYERS = 2
# эталонная колода (каждая масть по каждому номиналу) - 36 карт
DECK = [(nom, suit) for nom in NOMINALS for suit in [SPADES, HEARTS, DIAMS, CLUBS]]
class Player:
def __init__(self, index, cards):
self.index = index
self.cards = list(map(tuple, cards))
def take_cards_from_deck(self, deck: list):
lack = max(0, CARDS_IN_HAND_MAX - len(self.cards))
n = min(len(deck), lack)
self.add_cards(deck[:n])
del deck[:n]
return self
def sort_hand(self):
self.cards.sort(key=lambda c: (NAME_TO_VALUE[c[0]], c[1]))
return self
def add_cards(self, cards):
self.cards += list(cards)
self.sort_hand()
return self
def __repr__(self):
return f"Player{self.cards!r}"
def take_card(self, card):
self.cards.remove(card)
@property
def n_cards(self):
return len(self.cards)
def __getitem__(self, item):
return self.cards[item]
def rotate(l, n):
return l[n:] + l[:n]
class Durak:
NORMAL = 'normal'
TOOK_CARDS = 'не отблися и забрал карту'
GAME_OVER = 'game_over'
def __init__(self, rng: random.Random = None):
self.attacker_index = 0
self.rng = rng or random.Random()
self.deck = list(DECK)
self.rng.shuffle(self.deck)
self.players = [Player(i, []).take_cards_from_deck(self.deck)
for i in range(N_PLAYERS)]
self.trump = self.deck[0][1]
self.field = {} # atack card: defend card
self.winner = None
def card_match(self, card1, card2):
if card1 is None or card2 is None:
return False
n1, _ = card1
n2, _ = card2
return n1 == n2
def can_beat(self, card1, card2):
nom1, suit1 = card1
nom2, suit2 = card2
nom1 = NAME_TO_VALUE[nom1]
nom2 = NAME_TO_VALUE[nom2]
if suit2 == self.trump:
return suit1 != self.trump or nom2 > nom1
elif suit1 == suit2:
return nom2 > nom1
else:
return False
def can_add_to_field(self, card):
if not self.field:
return True
for attack_card, defend_card in self.field.items():
if self.card_match(attack_card, card) or self.card_match(defend_card, card):
return True
return False
@property
def attacking_cards(self):
return list(filter(bool, self.field.keys()))
@property
def defending_cards(self):
return list(filter(bool, self.field.values()))
@property
def any_unbeated_card(self):
return any(c is None for c in self.defending_cards)
@property
def current_player(self):
return self.players[self.attacker_index]
@property
def opponent_player(self):
return self.players[(self.attacker_index + 1) % N_PLAYERS]
def attack(self, card):
assert not self.winner
if not self.can_add_to_field(card):
return False
cur, opp = self.current_player, self.opponent_player
cur.take_card(card)
self.field[card] = None
return True
def defend(self, attacking_card, defending_card):
assert not self.winner
if self.field[attacking_card] is not None:
return False
if self.can_beat(attacking_card, defending_card):
self.field[attacking_card] = defending_card
self.opponent_player.take_card(defending_card)
return True
return False
def attack_succeed(self):
return any(def_card is None for _, def_card in self.field.items())
def defend_variants(self, card):
unbeaten_cards = [c for c in self.field.keys() if self.field[c] is None]
return [i for i, att_card in enumerate(unbeaten_cards) if self.can_beat(att_card, card)]
def finish_turn(self):
assert not self.winner
took_cards = False
if self.attack_succeed():
self._take_all_field()
took_cards = True
else:
self.field = {}
for p in rotate(self.players, self.attacker_index):
p.take_cards_from_deck(self.deck)
if not self.deck:
self.winner = p.index
return self.GAME_OVER
if took_cards:
return self.TOOK_CARDS
else:
self.attacker_index = self.opponent_player.index
return self.NORMAL
def _take_all_field(self):
cards = self.attacking_cards + self.defending_cards
self.opponent_player.add_cards(cards)
self.field = {} | import random
# масти
SPADES = '♠'
HEARTS = '♥'
DIAMS = '♦'
CLUBS = '♣'
# достоинтсва карт
NOMINALS = ['6', '7', '8', '9', '10', 'J', 'Q', 'K', 'A']
# поиск индекса по достоинству
NAME_TO_VALUE = {n: i for i, n in enumerate(NOMINALS)}
# карт в руке при раздаче
CARDS_IN_HAND_MAX = 6
N_PLAYERS = 2
# эталонная колода (каждая масть по каждому номиналу) - 36 карт
DECK = [(nom, suit) for nom in NOMINALS for suit in [SPADES, HEARTS, DIAMS, CLUBS]]
class Player:
def __init__(self, index, cards):
self.index = index
self.cards = list(map(tuple, cards))
def take_cards_from_deck(self, deck: list):
lack = max(0, CARDS_IN_HAND_MAX - len(self.cards))
n = min(len(deck), lack)
self.add_cards(deck[:n])
del deck[:n]
return self
def sort_hand(self):
self.cards.sort(key=lambda c: (NAME_TO_VALUE[c[0]], c[1]))
return self
def add_cards(self, cards):
self.cards += list(cards)
self.sort_hand()
return self
def __repr__(self):
return f"Player{self.cards!r}"
def take_card(self, card):
self.cards.remove(card)
@property
def n_cards(self):
return len(self.cards)
def __getitem__(self, item):
return self.cards[item]
def rotate(l, n):
return l[n:] + l[:n]
class Durak:
NORMAL = 'normal'
TOOK_CARDS = 'не отблися и забрал карту'
GAME_OVER = 'game_over'
def __init__(self, rng: random.Random = None):
self.attacker_index = 0
self.rng = rng or random.Random()
self.deck = list(DECK)
self.rng.shuffle(self.deck)
self.players = [Player(i, []).take_cards_from_deck(self.deck)
for i in range(N_PLAYERS)]
self.trump = self.deck[0][1]
self.field = {} # atack card: defend card
self.winner = None
def card_match(self, card1, card2):
if card1 is None or card2 is None:
return False
n1, _ = card1
n2, _ = card2
return n1 == n2
def can_beat(self, card1, card2):
nom1, suit1 = card1
nom2, suit2 = card2
nom1 = NAME_TO_VALUE[nom1]
nom2 = NAME_TO_VALUE[nom2]
if suit2 == self.trump:
return suit1 != self.trump or nom2 > nom1
elif suit1 == suit2:
return nom2 > nom1
else:
return False
def can_add_to_field(self, card):
if not self.field:
return True
for attack_card, defend_card in self.field.items():
if self.card_match(attack_card, card) or self.card_match(defend_card, card):
return True
return False
@property
def attacking_cards(self):
return list(filter(bool, self.field.keys()))
@property
def defending_cards(self):
return list(filter(bool, self.field.values()))
@property
def any_unbeated_card(self):
return any(c is None for c in self.defending_cards)
@property
def current_player(self):
return self.players[self.attacker_index]
@property
def opponent_player(self):
return self.players[(self.attacker_index + 1) % N_PLAYERS]
def attack(self, card):
assert not self.winner
if not self.can_add_to_field(card):
return False
cur, opp = self.current_player, self.opponent_player
cur.take_card(card)
self.field[card] = None
return True
def defend(self, attacking_card, defending_card):
assert not self.winner
if self.field[attacking_card] is not None:
return False
if self.can_beat(attacking_card, defending_card):
self.field[attacking_card] = defending_card
self.opponent_player.take_card(defending_card)
return True
return False
def attack_succeed(self):
return any(def_card is None for _, def_card in self.field.items())
def defend_variants(self, card):
unbeaten_cards = [c for c in self.field.keys() if self.field[c] is None]
return [i for i, att_card in enumerate(unbeaten_cards) if self.can_beat(att_card, card)]
def finish_turn(self):
assert not self.winner
took_cards = False
if self.attack_succeed():
self._take_all_field()
took_cards = True
else:
self.field = {}
for p in rotate(self.players, self.attacker_index):
p.take_cards_from_deck(self.deck)
if not self.deck:
self.winner = p.index
return self.GAME_OVER
if took_cards:
return self.TOOK_CARDS
else:
self.attacker_index = self.opponent_player.index
return self.NORMAL
def _take_all_field(self):
cards = self.attacking_cards + self.defending_cards
self.opponent_player.add_cards(cards)
self.field = {} | ru | 0.944878 | # масти # достоинтсва карт # поиск индекса по достоинству # карт в руке при раздаче # эталонная колода (каждая масть по каждому номиналу) - 36 карт # atack card: defend card | 3.42408 | 3 |
src/riski/_raster.py | GFDRR/RISKi | 0 | 6621642 | from typing import Dict, List
from types import MethodType
import os
import re
import inspect
import psycopg2 as pg
import riski as ri
from riski._utils import load_settings, generate_config
def _test():
pass | from typing import Dict, List
from types import MethodType
import os
import re
import inspect
import psycopg2 as pg
import riski as ri
from riski._utils import load_settings, generate_config
def _test():
pass | none | 1 | 1.498357 | 1 | |
xfer/utils.py | 0xflotus/xfer | 244 | 6621643 | # Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License").
# You may not use this file except in compliance with the License.
# A copy of the License is located at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# or in the "license" file accompanying this file. This file is distributed
# on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
# express or implied. See the License for the specific language governing
# permissions and limitations under the License.
# ==============================================================================
import numpy as np
import os
import mxnet as mx
import json
from .constants import serialization_constants as consts
from .constants import repurposer_keys as keys
def sklearn_model_to_dict(target_model):
output_dict = {}
import copy
# model_dict contains all attributes of model
model_dict = copy.deepcopy(target_model.__dict__)
for k in model_dict:
# Replace any numpy array with [data_type_as_str, array_as_list]
# e.g np.array([1,2]) -> ['int', [1,2]]
if isinstance(model_dict[k], np.ndarray):
type_data = str(model_dict[k].dtype)
model_dict[k] = [type_data, model_dict[k].tolist()]
# Replace any tuple with ['tuple', tuple_as_list]
# e.g (1,2) -> ['tuple', [1,2]]
if isinstance(model_dict[k], tuple):
model_dict[k] = [keys.TUPLE, list(model_dict[k])]
output_dict[keys.MODEL] = {}
# Model params are public attributes
output_dict[keys.MODEL][keys.PARAMS] = target_model.get_params()
# Serialise all private attributes
output_dict[keys.MODEL][keys.ATTRS] = {}
for k in model_dict:
# Serialize private parameters as attributes
if k[-1] == '_' or k[0] == '_':
output_dict[keys.MODEL][keys.ATTRS][k] = model_dict[k]
return output_dict
def sklearn_model_from_dict(model_class, input_dict):
# Initialize model with serialized model parameters
model = model_class(**input_dict[keys.MODEL][keys.PARAMS])
# Set model attributes
for k in input_dict[keys.MODEL][keys.ATTRS]:
# Unpack tuples and np.arrays that were serialised as lists
if isinstance(input_dict[keys.MODEL][keys.ATTRS][k], list) \
and isinstance(input_dict[keys.MODEL][keys.ATTRS][k][0], str) \
and type(input_dict[keys.MODEL][keys.ATTRS][k][1]) == list:
if input_dict[keys.MODEL][keys.ATTRS][k][0] == keys.TUPLE:
setattr(model, k, tuple(input_dict[keys.MODEL][keys.ATTRS][k][1]))
else:
type_data = 'np.' + input_dict[keys.MODEL][keys.ATTRS][k][0]
type_data = eval(type_data)
setattr(model, k, np.array(input_dict[keys.MODEL][keys.ATTRS][k][1], dtype=type_data))
else:
setattr(model, k, input_dict[keys.MODEL][keys.ATTRS][k])
return model
def _assert_repurposer_file_exists(repurposer_file_list):
for file_name in repurposer_file_list:
if not os.path.isfile(file_name):
raise NameError('Cannot find repurposer file ({})'.format(file_name))
def save_mxnet_model(model, file_path_prefix, epoch, provide_data=None, provide_label=None):
if not model.binded:
if provide_data is None or provide_label is None:
raise ValueError("provide_data and provide_label are required because mxnet module is not binded")
model.bind(data_shapes=provide_data, label_shapes=provide_label)
model.save_checkpoint(file_path_prefix, epoch)
def save_json(file_prefix, output_dict):
with open(file_prefix + consts.JSON_SUFFIX, mode='w') as fp:
json.dump(obj=output_dict, fp=fp)
def serialize_ctx_fn(context_function):
if context_function == mx.cpu:
return keys.CPU
elif context_function == mx.gpu:
return keys.GPU
else:
raise ValueError('Unexpected context function {}'.format(context_function))
def deserialize_ctx_fn(context_function):
if context_function == keys.CPU:
return mx.cpu
elif context_function == keys.GPU:
return mx.gpu
else:
raise ValueError('Unexpected context function {}'.format(context_function))
| # Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License").
# You may not use this file except in compliance with the License.
# A copy of the License is located at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# or in the "license" file accompanying this file. This file is distributed
# on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
# express or implied. See the License for the specific language governing
# permissions and limitations under the License.
# ==============================================================================
import numpy as np
import os
import mxnet as mx
import json
from .constants import serialization_constants as consts
from .constants import repurposer_keys as keys
def sklearn_model_to_dict(target_model):
output_dict = {}
import copy
# model_dict contains all attributes of model
model_dict = copy.deepcopy(target_model.__dict__)
for k in model_dict:
# Replace any numpy array with [data_type_as_str, array_as_list]
# e.g np.array([1,2]) -> ['int', [1,2]]
if isinstance(model_dict[k], np.ndarray):
type_data = str(model_dict[k].dtype)
model_dict[k] = [type_data, model_dict[k].tolist()]
# Replace any tuple with ['tuple', tuple_as_list]
# e.g (1,2) -> ['tuple', [1,2]]
if isinstance(model_dict[k], tuple):
model_dict[k] = [keys.TUPLE, list(model_dict[k])]
output_dict[keys.MODEL] = {}
# Model params are public attributes
output_dict[keys.MODEL][keys.PARAMS] = target_model.get_params()
# Serialise all private attributes
output_dict[keys.MODEL][keys.ATTRS] = {}
for k in model_dict:
# Serialize private parameters as attributes
if k[-1] == '_' or k[0] == '_':
output_dict[keys.MODEL][keys.ATTRS][k] = model_dict[k]
return output_dict
def sklearn_model_from_dict(model_class, input_dict):
# Initialize model with serialized model parameters
model = model_class(**input_dict[keys.MODEL][keys.PARAMS])
# Set model attributes
for k in input_dict[keys.MODEL][keys.ATTRS]:
# Unpack tuples and np.arrays that were serialised as lists
if isinstance(input_dict[keys.MODEL][keys.ATTRS][k], list) \
and isinstance(input_dict[keys.MODEL][keys.ATTRS][k][0], str) \
and type(input_dict[keys.MODEL][keys.ATTRS][k][1]) == list:
if input_dict[keys.MODEL][keys.ATTRS][k][0] == keys.TUPLE:
setattr(model, k, tuple(input_dict[keys.MODEL][keys.ATTRS][k][1]))
else:
type_data = 'np.' + input_dict[keys.MODEL][keys.ATTRS][k][0]
type_data = eval(type_data)
setattr(model, k, np.array(input_dict[keys.MODEL][keys.ATTRS][k][1], dtype=type_data))
else:
setattr(model, k, input_dict[keys.MODEL][keys.ATTRS][k])
return model
def _assert_repurposer_file_exists(repurposer_file_list):
for file_name in repurposer_file_list:
if not os.path.isfile(file_name):
raise NameError('Cannot find repurposer file ({})'.format(file_name))
def save_mxnet_model(model, file_path_prefix, epoch, provide_data=None, provide_label=None):
if not model.binded:
if provide_data is None or provide_label is None:
raise ValueError("provide_data and provide_label are required because mxnet module is not binded")
model.bind(data_shapes=provide_data, label_shapes=provide_label)
model.save_checkpoint(file_path_prefix, epoch)
def save_json(file_prefix, output_dict):
with open(file_prefix + consts.JSON_SUFFIX, mode='w') as fp:
json.dump(obj=output_dict, fp=fp)
def serialize_ctx_fn(context_function):
if context_function == mx.cpu:
return keys.CPU
elif context_function == mx.gpu:
return keys.GPU
else:
raise ValueError('Unexpected context function {}'.format(context_function))
def deserialize_ctx_fn(context_function):
if context_function == keys.CPU:
return mx.cpu
elif context_function == keys.GPU:
return mx.gpu
else:
raise ValueError('Unexpected context function {}'.format(context_function))
| en | 0.768433 | # Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"). # You may not use this file except in compliance with the License. # A copy of the License is located at # # http://www.apache.org/licenses/LICENSE-2.0 # # or in the "license" file accompanying this file. This file is distributed # on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either # express or implied. See the License for the specific language governing # permissions and limitations under the License. # ============================================================================== # model_dict contains all attributes of model # Replace any numpy array with [data_type_as_str, array_as_list] # e.g np.array([1,2]) -> ['int', [1,2]] # Replace any tuple with ['tuple', tuple_as_list] # e.g (1,2) -> ['tuple', [1,2]] # Model params are public attributes # Serialise all private attributes # Serialize private parameters as attributes # Initialize model with serialized model parameters # Set model attributes # Unpack tuples and np.arrays that were serialised as lists | 2.176381 | 2 |
eliza/Eliza.py | arsatis/nlp-eliza | 0 | 6621644 | from eliza.controller.commands.CommandParser import CommandParser
from eliza.controller.util.PorterStemmer import PorterStemmer
class Eliza:
__name = 'Eliza'
__responsePrefix = __name + ': '
__inputPrefix = 'You: '
def __init__(self):
print(self.__responsePrefix + "Hello! I'm " + self.__name + '. How can I help you today?')
def respond(self, userInput):
ps = PorterStemmer()
arr = []
for token in userInput.split():
arr += [ps.stem(token)]
userInput = ' '.join(map(str, arr)) # user input as a string, after stemming
print(self.__responsePrefix + CommandParser.parse(userInput))
#
def run(self):
userInput = input(self.__inputPrefix).lower()
while not (CommandParser.checkIfExit(userInput)):
self.respond(userInput)
userInput = input(self.__inputPrefix).lower()
else:
print(self.__responsePrefix + 'bye!') | from eliza.controller.commands.CommandParser import CommandParser
from eliza.controller.util.PorterStemmer import PorterStemmer
class Eliza:
__name = 'Eliza'
__responsePrefix = __name + ': '
__inputPrefix = 'You: '
def __init__(self):
print(self.__responsePrefix + "Hello! I'm " + self.__name + '. How can I help you today?')
def respond(self, userInput):
ps = PorterStemmer()
arr = []
for token in userInput.split():
arr += [ps.stem(token)]
userInput = ' '.join(map(str, arr)) # user input as a string, after stemming
print(self.__responsePrefix + CommandParser.parse(userInput))
#
def run(self):
userInput = input(self.__inputPrefix).lower()
while not (CommandParser.checkIfExit(userInput)):
self.respond(userInput)
userInput = input(self.__inputPrefix).lower()
else:
print(self.__responsePrefix + 'bye!') | en | 0.924358 | # user input as a string, after stemming # | 3.187156 | 3 |
tests/conftest.py | shushpanov/async-jaeger | 0 | 6621645 | <gh_stars>0
import mock
import pytest
from async_jaeger import ConstSampler, Tracer
@pytest.fixture(scope='function')
def tracer():
reporter = mock.MagicMock()
sampler = ConstSampler(True)
return Tracer(
service_name='test_service_1', reporter=reporter, sampler=sampler
)
| import mock
import pytest
from async_jaeger import ConstSampler, Tracer
@pytest.fixture(scope='function')
def tracer():
reporter = mock.MagicMock()
sampler = ConstSampler(True)
return Tracer(
service_name='test_service_1', reporter=reporter, sampler=sampler
) | none | 1 | 2.046048 | 2 | |
__main__.py | David-Lor/FastAPI-Pydantic-SQLAlchemy-PetShelter-API | 1 | 6621646 | <filename>__main__.py
from pet_shelter_api import run
run()
| <filename>__main__.py
from pet_shelter_api import run
run()
| none | 1 | 0.828807 | 1 | |
data/train/python/4128e2da4777bbc0e5da663a212927a855d29ff1main.py | harshp8l/deep-learning-lang-detection | 84 | 6621647 | <filename>data/train/python/4128e2da4777bbc0e5da663a212927a855d29ff1main.py
from Tests import runTest
from Controller import *
from Domain import *
from Repository import *
from Repository.file_repository import client_file
from Repository.file_repository import movie_file
from Repository.file_repository import rent_file
from Validators import *
from UI.UI import UI
movie_repository = movie_repository.movie_repository()
movie_validator = movie_validator.movie_validator()
client_repository = client_repository.client_repository()
client_validator = client_validator.client_validator()
rent_repository = rent_repository.rent_repository()
rent_validator = rent_validator.rent_validator()
clients_file = client_file("clients.txt", client_repository)
movies_file = movie_file("movies.txt", movie_repository)
rents_file = rent_file("rents.txt", rent_repository)
client_l = clients_file.loadFromFile()
movie_l = movies_file.loadFromFile()
rent_l = rents_file.loadFromFile()
movie_controller = movie_controller.movie_controller(movie_repository, movie_validator)
client_controller = client_controller.client_controller(client_repository, client_validator)
rent_controller = rent_controller.rent_controller(rent_repository, rent_validator, movie_repository, client_repository)
ui = UI(client_controller, movie_controller, rent_controller, clients_file, movies_file, rents_file)
ui.main() | <filename>data/train/python/4128e2da4777bbc0e5da663a212927a855d29ff1main.py
from Tests import runTest
from Controller import *
from Domain import *
from Repository import *
from Repository.file_repository import client_file
from Repository.file_repository import movie_file
from Repository.file_repository import rent_file
from Validators import *
from UI.UI import UI
movie_repository = movie_repository.movie_repository()
movie_validator = movie_validator.movie_validator()
client_repository = client_repository.client_repository()
client_validator = client_validator.client_validator()
rent_repository = rent_repository.rent_repository()
rent_validator = rent_validator.rent_validator()
clients_file = client_file("clients.txt", client_repository)
movies_file = movie_file("movies.txt", movie_repository)
rents_file = rent_file("rents.txt", rent_repository)
client_l = clients_file.loadFromFile()
movie_l = movies_file.loadFromFile()
rent_l = rents_file.loadFromFile()
movie_controller = movie_controller.movie_controller(movie_repository, movie_validator)
client_controller = client_controller.client_controller(client_repository, client_validator)
rent_controller = rent_controller.rent_controller(rent_repository, rent_validator, movie_repository, client_repository)
ui = UI(client_controller, movie_controller, rent_controller, clients_file, movies_file, rents_file)
ui.main() | none | 1 | 2.100312 | 2 | |
buildbulk.py | x-squared/chem-mov | 0 | 6621648 | from ase import *
from ase.build import bulk
from ase.visualize import view
a1 = bulk('Al', 'fcc', a=3.567)
view(a1) | from ase import *
from ase.build import bulk
from ase.visualize import view
a1 = bulk('Al', 'fcc', a=3.567)
view(a1) | none | 1 | 1.555819 | 2 | |
scripts/fig_param.py | jennhsiao/ideotype | 2 | 6621649 | """Fig. Param."""
import os
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
from sklearn import preprocessing
from palettable.colorbrewer.sequential import YlGnBu_8
df_params = pd.read_csv(
'/home/disk/eos8/ach315/upscale/params/param_fixpd.csv')
outpath = '/home/disk/eos8/ach315/upscale/figs/'
x = df_params.values
minmax_scale = preprocessing.MinMaxScaler()
x_scaled = minmax_scale.fit_transform(x)
df_scaled = pd.DataFrame(x_scaled).transpose()
df_scaled.index = ['g1',
'Vcmax',
'Jmax',
'phyf',
'SG',
'gleaf',
'LTAR',
'LM',
'LAF',
'gdd',
'pop']
# All params
fig, ax = plt.subplots(figsize=(30, 5))
ax = sns.heatmap(df_scaled, cmap=YlGnBu_8.mpl_colormap)
plt.xticks(fontweight='light', fontsize=12)
plt.yticks(rotation=0, fontweight='light', fontsize=12)
cbar = ax.collections[0].colorbar
cbar.ax.tick_params(labelsize=10)
fig.subplots_adjust(left=0.2, bottom=0.45)
plt.savefig(os.path.join(outpath, 'params_all.png'),
format='png', dpi=800)
# Small params fig
fig, ax = plt.subplots(figsize=(5, 5))
df_sub = df_scaled.iloc[:, :15]
ax = sns.heatmap(df_sub, cmap=YlGnBu_8.mpl_colormap)
plt.xticks(fontweight='light', fontsize=12)
plt.yticks(rotation=0, fontweight='light', fontsize=12)
cbar = ax.collections[0].colorbar
cbar.ax.tick_params(labelsize=10)
fig.subplots_adjust(left=0.2, bottom=0.45)
plt.savefig(os.path.join(outpath, 'params_small.png'),
format='png', dpi=800)
| """Fig. Param."""
import os
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
from sklearn import preprocessing
from palettable.colorbrewer.sequential import YlGnBu_8
df_params = pd.read_csv(
'/home/disk/eos8/ach315/upscale/params/param_fixpd.csv')
outpath = '/home/disk/eos8/ach315/upscale/figs/'
x = df_params.values
minmax_scale = preprocessing.MinMaxScaler()
x_scaled = minmax_scale.fit_transform(x)
df_scaled = pd.DataFrame(x_scaled).transpose()
df_scaled.index = ['g1',
'Vcmax',
'Jmax',
'phyf',
'SG',
'gleaf',
'LTAR',
'LM',
'LAF',
'gdd',
'pop']
# All params
fig, ax = plt.subplots(figsize=(30, 5))
ax = sns.heatmap(df_scaled, cmap=YlGnBu_8.mpl_colormap)
plt.xticks(fontweight='light', fontsize=12)
plt.yticks(rotation=0, fontweight='light', fontsize=12)
cbar = ax.collections[0].colorbar
cbar.ax.tick_params(labelsize=10)
fig.subplots_adjust(left=0.2, bottom=0.45)
plt.savefig(os.path.join(outpath, 'params_all.png'),
format='png', dpi=800)
# Small params fig
fig, ax = plt.subplots(figsize=(5, 5))
df_sub = df_scaled.iloc[:, :15]
ax = sns.heatmap(df_sub, cmap=YlGnBu_8.mpl_colormap)
plt.xticks(fontweight='light', fontsize=12)
plt.yticks(rotation=0, fontweight='light', fontsize=12)
cbar = ax.collections[0].colorbar
cbar.ax.tick_params(labelsize=10)
fig.subplots_adjust(left=0.2, bottom=0.45)
plt.savefig(os.path.join(outpath, 'params_small.png'),
format='png', dpi=800)
| en | 0.214496 | Fig. Param. # All params # Small params fig | 2.235598 | 2 |
LollypopCatToy.py | bytedreamer/LollypopCatToy | 1 | 6621650 | <reponame>bytedreamer/LollypopCatToy<gh_stars>1-10
from flask import render_template, make_response
from flask.ext.recaptcha import ReCaptcha
from uuid import uuid4, UUID
from application import create_app, add_to_queue, socketio, activate_cat_toy
__author__ = '<NAME>'
app = create_app()
reCaptcha = ReCaptcha(app)
@app.route('/')
def index():
return render_template('home.html')
@app.route('/register', methods=['POST'])
def register():
if reCaptcha.verify():
key = uuid4()
add_to_queue(key)
return render_template('register.html', key=key)
else:
return render_template('home.html')
@app.route('/play/<key>/<int:gpio_number>', methods=['POST'])
def play(key, gpio_number):
activate_cat_toy(UUID(key), gpio_number)
return make_response('', 204)
@socketio.on('connect', namespace='/queue')
def queue_connect():
print('Client connected')
@socketio.on('disconnect', namespace='/queue')
def queue_disconnect():
print('Client disconnected')
if __name__ == '__main__':
socketio.run(app, host='0.0.0.0')
| from flask import render_template, make_response
from flask.ext.recaptcha import ReCaptcha
from uuid import uuid4, UUID
from application import create_app, add_to_queue, socketio, activate_cat_toy
__author__ = '<NAME>'
app = create_app()
reCaptcha = ReCaptcha(app)
@app.route('/')
def index():
return render_template('home.html')
@app.route('/register', methods=['POST'])
def register():
if reCaptcha.verify():
key = uuid4()
add_to_queue(key)
return render_template('register.html', key=key)
else:
return render_template('home.html')
@app.route('/play/<key>/<int:gpio_number>', methods=['POST'])
def play(key, gpio_number):
activate_cat_toy(UUID(key), gpio_number)
return make_response('', 204)
@socketio.on('connect', namespace='/queue')
def queue_connect():
print('Client connected')
@socketio.on('disconnect', namespace='/queue')
def queue_disconnect():
print('Client disconnected')
if __name__ == '__main__':
socketio.run(app, host='0.0.0.0') | none | 1 | 2.45917 | 2 |