hexsha stringlengths 40 40 | size int64 3 1.03M | ext stringclasses 10
values | lang stringclasses 1
value | max_stars_repo_path stringlengths 3 972 | max_stars_repo_name stringlengths 6 130 | max_stars_repo_head_hexsha stringlengths 40 78 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 191k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 3 972 | max_issues_repo_name stringlengths 6 130 | max_issues_repo_head_hexsha stringlengths 40 78 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 116k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 3 972 | max_forks_repo_name stringlengths 6 130 | max_forks_repo_head_hexsha stringlengths 40 78 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 3 1.03M | avg_line_length float64 1.13 941k | max_line_length int64 2 941k | alphanum_fraction float64 0 1 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
ce89144b73358eced1c79754fb5f490421cab763 | 878 | py | Python | python/lib/Lib/compiler/__init__.py | truthiswill/intellij-community | fff88cfb0dc168eea18ecb745d3e5b93f57b0b95 | [
"Apache-2.0"
] | 69 | 2015-01-16T13:12:55.000Z | 2022-02-14T12:55:27.000Z | python/lib/Lib/compiler/__init__.py | truthiswill/intellij-community | fff88cfb0dc168eea18ecb745d3e5b93f57b0b95 | [
"Apache-2.0"
] | 3 | 2019-07-19T18:02:02.000Z | 2021-04-25T06:35:42.000Z | python/lib/Lib/compiler/__init__.py | truthiswill/intellij-community | fff88cfb0dc168eea18ecb745d3e5b93f57b0b95 | [
"Apache-2.0"
] | 32 | 2015-02-06T12:10:32.000Z | 2019-06-18T03:21:36.000Z | """Package for parsing and compiling Python source code
There are several functions defined at the top level that are imported
from modules contained in the package.
parse(buf, mode="exec") -> AST
Converts a string containing Python source code to an abstract
syntax tree (AST). The AST is defined in compiler.ast.
parseFile(path) -> AST
The same as parse(open(path))
walk(ast, visitor, verbose=None)
Does a pre-order walk over the ast using the visitor instance.
See compiler.visitor for details.
compile(source, filename, mode, flags=None, dont_inherit=None)
Returns a code object. A replacement for the builtin compile() function.
compileFile(filename)
Generates a .pyc file by compiling filename.
"""
from compiler.transformer import parse, parseFile
from compiler.visitor import walk
from compiler.pycodegen import compile, compileFile
| 32.518519 | 77 | 0.763098 |
4ab54dc1535e7e8a9eb016ac3cd1194eee7dca71 | 12,016 | py | Python | sdks/python/client/argo_workflows/model/io_argoproj_workflow_v1alpha1_backoff.py | BearerPipelineTest/argo-workflows | ecd91b1c4215a2ab8742f7c43eaade98a1d47eba | [
"Apache-2.0"
] | 1 | 2022-02-24T01:45:03.000Z | 2022-02-24T01:45:03.000Z | sdks/python/client/argo_workflows/model/io_argoproj_workflow_v1alpha1_backoff.py | BearerPipelineTest/argo-workflows | ecd91b1c4215a2ab8742f7c43eaade98a1d47eba | [
"Apache-2.0"
] | 18 | 2022-02-01T23:09:58.000Z | 2022-03-31T23:28:41.000Z | sdks/python/client/argo_workflows/model/io_argoproj_workflow_v1alpha1_backoff.py | BearerPipelineTest/argo-workflows | ecd91b1c4215a2ab8742f7c43eaade98a1d47eba | [
"Apache-2.0"
] | null | null | null | """
Argo Workflows API
Argo Workflows is an open source container-native workflow engine for orchestrating parallel jobs on Kubernetes. For more information, please see https://argoproj.github.io/argo-workflows/ # noqa: E501
The version of the OpenAPI document: VERSION
Generated by: https://openapi-generator.tech
"""
import re # noqa: F401
import sys # noqa: F401
from argo_workflows.model_utils import ( # noqa: F401
ApiTypeError,
ModelComposed,
ModelNormal,
ModelSimple,
cached_property,
change_keys_js_to_python,
convert_js_args_to_python_args,
date,
datetime,
file_type,
none_type,
validate_get_composed_info,
)
from ..model_utils import OpenApiModel
from argo_workflows.exceptions import ApiAttributeError
class IoArgoprojWorkflowV1alpha1Backoff(ModelNormal):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
Attributes:
allowed_values (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
with a capitalized key describing the allowed value and an allowed
value. These dicts store the allowed enum values.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
discriminator_value_class_map (dict): A dict to go from the discriminator
variable value to the discriminator class name.
validations (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
that stores validations for max_length, min_length, max_items,
min_items, exclusive_maximum, inclusive_maximum, exclusive_minimum,
inclusive_minimum, and regex.
additional_properties_type (tuple): A tuple of classes accepted
as additional properties values.
"""
allowed_values = {
}
validations = {
}
@cached_property
def additional_properties_type():
"""
This must be a method because a model may have properties that are
of type self, this must run after the class is loaded
"""
return (bool, date, datetime, dict, float, int, list, str, none_type,) # noqa: E501
_nullable = False
@cached_property
def openapi_types():
"""
This must be a method because a model may have properties that are
of type self, this must run after the class is loaded
Returns
openapi_types (dict): The key is attribute name
and the value is attribute type.
"""
return {
'duration': (str,), # noqa: E501
'factor': (str,), # noqa: E501
'max_duration': (str,), # noqa: E501
}
@cached_property
def discriminator():
return None
attribute_map = {
'duration': 'duration', # noqa: E501
'factor': 'factor', # noqa: E501
'max_duration': 'maxDuration', # noqa: E501
}
read_only_vars = {
}
_composed_schemas = {}
@classmethod
@convert_js_args_to_python_args
def _from_openapi_data(cls, *args, **kwargs): # noqa: E501
"""IoArgoprojWorkflowV1alpha1Backoff - a model defined in OpenAPI
Keyword Args:
_check_type (bool): if True, values for parameters in openapi_types
will be type checked and a TypeError will be
raised if the wrong type is input.
Defaults to True
_path_to_item (tuple/list): This is a list of keys or values to
drill down to the model in received_data
when deserializing a response
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_configuration (Configuration): the instance to use when
deserializing a file_type parameter.
If passed, type conversion is attempted
If omitted no type conversion is done.
_visited_composed_classes (tuple): This stores a tuple of
classes that we have traveled through so that
if we see that class again we will not use its
discriminator again.
When traveling through a discriminator, the
composed schema that is
is traveled through is added to this set.
For example if Animal has a discriminator
petType and we pass in "Dog", and the class Dog
allOf includes Animal, we move through Animal
once using the discriminator, and pick Dog.
Then in Dog, we will make an instance of the
Animal class but this time we won't travel
through its discriminator because we passed in
_visited_composed_classes = (Animal,)
duration (str): Duration is the amount to back off. Default unit is seconds, but could also be a duration (e.g. \"2m\", \"1h\"). [optional] # noqa: E501
factor (str): [optional] # noqa: E501
max_duration (str): MaxDuration is the maximum amount of time allowed for the backoff strategy. [optional] # noqa: E501
"""
_check_type = kwargs.pop('_check_type', True)
_spec_property_naming = kwargs.pop('_spec_property_naming', False)
_path_to_item = kwargs.pop('_path_to_item', ())
_configuration = kwargs.pop('_configuration', None)
_visited_composed_classes = kwargs.pop('_visited_composed_classes', ())
self = super(OpenApiModel, cls).__new__(cls)
if args:
raise ApiTypeError(
"Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." % (
args,
self.__class__.__name__,
),
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
self._data_store = {}
self._check_type = _check_type
self._spec_property_naming = _spec_property_naming
self._path_to_item = _path_to_item
self._configuration = _configuration
self._visited_composed_classes = _visited_composed_classes + (self.__class__,)
for var_name, var_value in kwargs.items():
if var_name not in self.attribute_map and \
self._configuration is not None and \
self._configuration.discard_unknown_keys and \
self.additional_properties_type is None:
# discard variable.
continue
setattr(self, var_name, var_value)
return self
required_properties = set([
'_data_store',
'_check_type',
'_spec_property_naming',
'_path_to_item',
'_configuration',
'_visited_composed_classes',
])
@convert_js_args_to_python_args
def __init__(self, *args, **kwargs): # noqa: E501
"""IoArgoprojWorkflowV1alpha1Backoff - a model defined in OpenAPI
Keyword Args:
_check_type (bool): if True, values for parameters in openapi_types
will be type checked and a TypeError will be
raised if the wrong type is input.
Defaults to True
_path_to_item (tuple/list): This is a list of keys or values to
drill down to the model in received_data
when deserializing a response
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_configuration (Configuration): the instance to use when
deserializing a file_type parameter.
If passed, type conversion is attempted
If omitted no type conversion is done.
_visited_composed_classes (tuple): This stores a tuple of
classes that we have traveled through so that
if we see that class again we will not use its
discriminator again.
When traveling through a discriminator, the
composed schema that is
is traveled through is added to this set.
For example if Animal has a discriminator
petType and we pass in "Dog", and the class Dog
allOf includes Animal, we move through Animal
once using the discriminator, and pick Dog.
Then in Dog, we will make an instance of the
Animal class but this time we won't travel
through its discriminator because we passed in
_visited_composed_classes = (Animal,)
duration (str): Duration is the amount to back off. Default unit is seconds, but could also be a duration (e.g. \"2m\", \"1h\"). [optional] # noqa: E501
factor (str): [optional] # noqa: E501
max_duration (str): MaxDuration is the maximum amount of time allowed for the backoff strategy. [optional] # noqa: E501
"""
_check_type = kwargs.pop('_check_type', True)
_spec_property_naming = kwargs.pop('_spec_property_naming', False)
_path_to_item = kwargs.pop('_path_to_item', ())
_configuration = kwargs.pop('_configuration', None)
_visited_composed_classes = kwargs.pop('_visited_composed_classes', ())
if args:
raise ApiTypeError(
"Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." % (
args,
self.__class__.__name__,
),
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
self._data_store = {}
self._check_type = _check_type
self._spec_property_naming = _spec_property_naming
self._path_to_item = _path_to_item
self._configuration = _configuration
self._visited_composed_classes = _visited_composed_classes + (self.__class__,)
for var_name, var_value in kwargs.items():
if var_name not in self.attribute_map and \
self._configuration is not None and \
self._configuration.discard_unknown_keys and \
self.additional_properties_type is None:
# discard variable.
continue
setattr(self, var_name, var_value)
if var_name in self.read_only_vars:
raise ApiAttributeError(f"`{var_name}` is a read-only attribute. Use `from_openapi_data` to instantiate "
f"class with read only attributes.")
| 45.515152 | 206 | 0.578063 |
da5ead6eba8086ff0b074b158eca6688c1632f4e | 8,170 | py | Python | tensor_rl/planning/ValueIterationClass.py | umd-huang-lab/reinforcement-learning-via-spectral-methods | c7bd04d7eea6869807ed70af76960dcc542b0a82 | [
"MIT"
] | null | null | null | tensor_rl/planning/ValueIterationClass.py | umd-huang-lab/reinforcement-learning-via-spectral-methods | c7bd04d7eea6869807ed70af76960dcc542b0a82 | [
"MIT"
] | null | null | null | tensor_rl/planning/ValueIterationClass.py | umd-huang-lab/reinforcement-learning-via-spectral-methods | c7bd04d7eea6869807ed70af76960dcc542b0a82 | [
"MIT"
] | null | null | null | # Python imports.
from __future__ import print_function
from collections import defaultdict
import random
# Check python version for queue module.
import sys
if sys.version_info[0] < 3:
import Queue as queue
else:
import queue
# Other imports.
from tensor_rl.planning.PlannerClass import Planner
class ValueIteration(Planner):
def __init__(self, mdp, name="value_iter", delta=0.0001, max_iterations=500, sample_rate=3):
'''
Args:
mdp (MDP)
delta (float): After an iteration if VI, if no change more than @\delta has occurred, terminates.
max_iterations (int): Hard limit for number of iterations.
sample_rate (int): Determines how many samples from @mdp to take to estimate T(s' | s, a).
horizon (int): Number of steps before terminating.
'''
Planner.__init__(self, mdp, name=name)
self.delta = delta
self.max_iterations = max_iterations
self.sample_rate = sample_rate
self.value_func = defaultdict(float)
self.reachability_done = False
self.has_computed_matrix = False
self.bellman_backups = 0
self.trans_dict = defaultdict(lambda:defaultdict(lambda:defaultdict(float)))
def _compute_matrix_from_trans_func(self):
if self.has_computed_matrix:
self._compute_reachable_state_space()
# We've already run this, just return.
return
# K: state
# K: a
# K: s_prime
# V: prob
for s in self.get_states():
for a in self.actions:
for sample in range(self.sample_rate):
s_prime = self.transition_func(s, a)
self.trans_dict[s][a][s_prime] += 1.0 / self.sample_rate
self.has_computed_matrix = True
def get_gamma(self):
return self.mdp.get_gamma()
def get_num_states(self):
if not self.reachability_done:
self._compute_reachable_state_space()
return len(self.states)
def get_states(self):
if self.reachability_done:
return list(self.states)
else:
self._compute_reachable_state_space()
return list(self.states)
def get_value(self, s):
'''
Args:
s (State)
Returns:
(float)
'''
return self._compute_max_qval_action_pair(s)[0]
def get_q_value(self, s, a):
'''
Args:
s (State)
a (str): action
Returns:
(float): The Q estimate given the current value function @self.value_func.
'''
# Compute expected value.
expected_val = 0
for s_prime in self.trans_dict[s][a].keys():
print(s, a, s_prime, self.trans_dict[s][a][s_prime] * self.reward_func(s, a, s_prime) + self.gamma * self.trans_dict[s][a][s_prime] * self.value_func[s_prime])
expected_val += self.trans_dict[s][a][s_prime] * self.reward_func(s, a, s_prime) + self.gamma * self.trans_dict[s][a][s_prime] * self.value_func[s_prime]
return expected_val
def _compute_reachable_state_space(self):
'''
Summary:
Starting with @self.start_state, determines all reachable states
and stores them in self.states.
'''
if self.reachability_done:
return
state_queue = queue.Queue()
state_queue.put(self.init_state)
self.states.add(self.init_state)
while not state_queue.empty():
s = state_queue.get()
for a in self.actions:
for samples in range(self.sample_rate): # Take @sample_rate samples to estimate E[V]
next_state = self.transition_func(s,a)
if next_state not in self.states:
self.states.add(next_state)
state_queue.put(next_state)
self.reachability_done = True
def run_vi(self):
'''
Returns:
(tuple):
1. (int): num iterations taken.
2. (float): value.
Summary:
Runs ValueIteration and fills in the self.value_func.
'''
# Algorithm bookkeeping params.
iterations = 0
max_diff = float("inf")
self._compute_matrix_from_trans_func()
state_space = self.get_states()
self.bellman_backups = 0
# Main loop.
while max_diff > self.delta and iterations < self.max_iterations:
max_diff = 0
for s in state_space:
self.bellman_backups += 1
if s.is_terminal():
continue
max_q = float("-inf")
for a in self.actions:
q_s_a = self.get_q_value(s, a)
max_q = q_s_a if q_s_a > max_q else max_q
# Check terminating condition.
max_diff = max(abs(self.value_func[s] - max_q), max_diff)
# Update value.
self.value_func[s] = max_q
iterations += 1
value_of_init_state = self._compute_max_qval_action_pair(self.init_state)[0]
self.has_planned = True
return iterations, value_of_init_state
def get_num_backups_in_recent_run(self):
if self.has_planned:
return self.bellman_backups
else:
print("Warning: asking for num Bellman backups, but VI has not been run.")
return 0
def print_value_func(self):
for key in self.value_func.keys():
print(key, ":", self.value_func[key])
def plan(self, state=None, horizon=50):
'''
Args:
state (State)
horizon (int)
Returns:
(list): List of actions
'''
state = self.mdp.get_init_state() if state is None else state
if self.has_planned is False:
print("Warning: VI has not been run. Plan will be random.")
action_seq = []
state_seq = [state]
steps = 0
while (not state.is_terminal()) and steps < horizon:
next_action = self._get_max_q_action(state)
action_seq.append(next_action)
state = self.transition_func(state, next_action)
state_seq.append(state)
steps += 1
return action_seq, state_seq
def _get_max_q_action(self, state):
'''
Args:
state (State)
Returns:
(str): The action with the max q value in the given @state.
'''
return self._compute_max_qval_action_pair(state)[1]
def get_max_q_actions(self, state):
'''
Args:
state (State)
Returns:
(list): List of actions with the max q value in the given @state.
'''
max_q_val = self.get_value(state)
best_action_list = []
# Find best action (action w/ current max predicted Q value)
for action in self.actions:
q_s_a = self.get_q_value(state, action)
if q_s_a == max_q_val:
best_action_list.append(action)
return best_action_list
def policy(self, state):
'''
Args:
state (State)
Returns:
(str): Action
Summary:
For use in a FixedPolicyAgent.
'''
return self._get_max_q_action(state)
def _compute_max_qval_action_pair(self, state):
'''
Args:
state (State)
Returns:
(tuple) --> (float, str): where the float is the Qval, str is the action.
'''
# Grab random initial action in case all equal
max_q_val = float("-inf")
best_action = self.actions[0]
# Find best action (action w/ current max predicted Q value)
for action in self.actions:
q_s_a = self.get_q_value(state, action)
if q_s_a > max_q_val:
max_q_val = q_s_a
best_action = action
return max_q_val, best_action
| 30.599251 | 172 | 0.566463 |
f6270cf4070be0bd94f3ea0a479f5616677a7877 | 9,100 | py | Python | CRAFT/craft_utils.py | PaddleEdu/OCR-models-PaddlePaddle | 1a62dcf4b647310b505fa5e4a18bbd8d27c39dfd | [
"Apache-2.0"
] | 12 | 2021-05-10T13:47:32.000Z | 2021-07-30T08:59:53.000Z | CRAFT/craft_utils.py | maxpark/OCR-models-PaddlePaddle | 1a62dcf4b647310b505fa5e4a18bbd8d27c39dfd | [
"Apache-2.0"
] | 4 | 2021-05-16T11:28:32.000Z | 2021-07-23T07:41:44.000Z | CRAFT/craft_utils.py | maxpark/OCR-models-PaddlePaddle | 1a62dcf4b647310b505fa5e4a18bbd8d27c39dfd | [
"Apache-2.0"
] | 4 | 2021-05-12T16:32:03.000Z | 2021-11-17T23:18:39.000Z | """
Copyright (c) 2019-present NAVER Corp.
MIT License
"""
# -*- coding: utf-8 -*-
import numpy as np
import cv2
import math
""" auxilary functions """
# unwarp corodinates
def warpCoord(Minv, pt):
out = np.matmul(Minv, (pt[0], pt[1], 1))
return np.array([out[0]/out[2], out[1]/out[2]])
""" end of auxilary functions """
def getDetBoxes_core(textmap, linkmap, text_threshold, link_threshold, low_text):
# prepare data
linkmap = linkmap.copy()
textmap = textmap.copy()
img_h, img_w = textmap.shape
""" labeling method """
ret, text_score = cv2.threshold(textmap, low_text, 1, 0)
ret, link_score = cv2.threshold(linkmap, link_threshold, 1, 0)
text_score_comb = np.clip(text_score + link_score, 0, 1)
nLabels, labels, stats, centroids = cv2.connectedComponentsWithStats(text_score_comb.astype(np.uint8), connectivity=4)
det = []
mapper = []
for k in range(1,nLabels):
# size filtering
size = stats[k, cv2.CC_STAT_AREA]
if size < 10: continue
# thresholding
if np.max(textmap[labels==k]) < text_threshold: continue
# make segmentation map
segmap = np.zeros(textmap.shape, dtype=np.uint8)
segmap[labels==k] = 255
segmap[np.logical_and(link_score==1, text_score==0)] = 0 # remove link area
x, y = stats[k, cv2.CC_STAT_LEFT], stats[k, cv2.CC_STAT_TOP]
w, h = stats[k, cv2.CC_STAT_WIDTH], stats[k, cv2.CC_STAT_HEIGHT]
niter = int(math.sqrt(size * min(w, h) / (w * h)) * 2)
sx, ex, sy, ey = x - niter, x + w + niter + 1, y - niter, y + h + niter + 1
# boundary check
if sx < 0 : sx = 0
if sy < 0 : sy = 0
if ex >= img_w: ex = img_w
if ey >= img_h: ey = img_h
kernel = cv2.getStructuringElement(cv2.MORPH_RECT,(1 + niter, 1 + niter))
segmap[sy:ey, sx:ex] = cv2.dilate(segmap[sy:ey, sx:ex], kernel)
# make box
np_contours = np.roll(np.array(np.where(segmap!=0)),1,axis=0).transpose().reshape(-1,2)
rectangle = cv2.minAreaRect(np_contours)
box = cv2.boxPoints(rectangle)
# align diamond-shape
w, h = np.linalg.norm(box[0] - box[1]), np.linalg.norm(box[1] - box[2])
box_ratio = max(w, h) / (min(w, h) + 1e-5)
if abs(1 - box_ratio) <= 0.1:
l, r = min(np_contours[:,0]), max(np_contours[:,0])
t, b = min(np_contours[:,1]), max(np_contours[:,1])
box = np.array([[l, t], [r, t], [r, b], [l, b]], dtype=np.float32)
# make clock-wise order
startidx = box.sum(axis=1).argmin()
box = np.roll(box, 4-startidx, 0)
box = np.array(box)
det.append(box)
mapper.append(k)
return det, labels, mapper
def getPoly_core(boxes, labels, mapper, linkmap):
# configs
num_cp = 5
max_len_ratio = 0.7
expand_ratio = 1.45
max_r = 2.0
step_r = 0.2
polys = []
for k, box in enumerate(boxes):
# size filter for small instance
w, h = int(np.linalg.norm(box[0] - box[1]) + 1), int(np.linalg.norm(box[1] - box[2]) + 1)
if w < 10 or h < 10:
polys.append(None); continue
# warp image
tar = np.float32([[0,0],[w,0],[w,h],[0,h]])
M = cv2.getPerspectiveTransform(box, tar)
word_label = cv2.warpPerspective(labels, M, (w, h), flags=cv2.INTER_NEAREST)
try:
Minv = np.linalg.inv(M)
except:
polys.append(None); continue
# binarization for selected label
cur_label = mapper[k]
word_label[word_label != cur_label] = 0
word_label[word_label > 0] = 1
""" Polygon generation """
# find top/bottom contours
cp = []
max_len = -1
for i in range(w):
region = np.where(word_label[:,i] != 0)[0]
if len(region) < 2 : continue
cp.append((i, region[0], region[-1]))
length = region[-1] - region[0] + 1
if length > max_len: max_len = length
# pass if max_len is similar to h
if h * max_len_ratio < max_len:
polys.append(None); continue
# get pivot points with fixed length
tot_seg = num_cp * 2 + 1
seg_w = w / tot_seg # segment width
pp = [None] * num_cp # init pivot points
cp_section = [[0, 0]] * tot_seg
seg_height = [0] * num_cp
seg_num = 0
num_sec = 0
prev_h = -1
for i in range(0,len(cp)):
(x, sy, ey) = cp[i]
if (seg_num + 1) * seg_w <= x and seg_num <= tot_seg:
# average previous segment
if num_sec == 0: break
cp_section[seg_num] = [cp_section[seg_num][0] / num_sec, cp_section[seg_num][1] / num_sec]
num_sec = 0
# reset variables
seg_num += 1
prev_h = -1
# accumulate center points
cy = (sy + ey) * 0.5
cur_h = ey - sy + 1
cp_section[seg_num] = [cp_section[seg_num][0] + x, cp_section[seg_num][1] + cy]
num_sec += 1
if seg_num % 2 == 0: continue # No polygon area
if prev_h < cur_h:
pp[int((seg_num - 1)/2)] = (x, cy)
seg_height[int((seg_num - 1)/2)] = cur_h
prev_h = cur_h
# processing last segment
if num_sec != 0:
cp_section[-1] = [cp_section[-1][0] / num_sec, cp_section[-1][1] / num_sec]
# pass if num of pivots is not sufficient or segment widh is smaller than character height
if None in pp or seg_w < np.max(seg_height) * 0.25:
polys.append(None); continue
# calc median maximum of pivot points
half_char_h = np.median(seg_height) * expand_ratio / 2
# calc gradiant and apply to make horizontal pivots
new_pp = []
for i, (x, cy) in enumerate(pp):
dx = cp_section[i * 2 + 2][0] - cp_section[i * 2][0]
dy = cp_section[i * 2 + 2][1] - cp_section[i * 2][1]
if dx == 0: # gradient if zero
new_pp.append([x, cy - half_char_h, x, cy + half_char_h])
continue
rad = - math.atan2(dy, dx)
c, s = half_char_h * math.cos(rad), half_char_h * math.sin(rad)
new_pp.append([x - s, cy - c, x + s, cy + c])
# get edge points to cover character heatmaps
isSppFound, isEppFound = False, False
grad_s = (pp[1][1] - pp[0][1]) / (pp[1][0] - pp[0][0]) + (pp[2][1] - pp[1][1]) / (pp[2][0] - pp[1][0])
grad_e = (pp[-2][1] - pp[-1][1]) / (pp[-2][0] - pp[-1][0]) + (pp[-3][1] - pp[-2][1]) / (pp[-3][0] - pp[-2][0])
for r in np.arange(0.5, max_r, step_r):
dx = 2 * half_char_h * r
if not isSppFound:
line_img = np.zeros(word_label.shape, dtype=np.uint8)
dy = grad_s * dx
p = np.array(new_pp[0]) - np.array([dx, dy, dx, dy])
cv2.line(line_img, (int(p[0]), int(p[1])), (int(p[2]), int(p[3])), 1, thickness=1)
if np.sum(np.logical_and(word_label, line_img)) == 0 or r + 2 * step_r >= max_r:
spp = p
isSppFound = True
if not isEppFound:
line_img = np.zeros(word_label.shape, dtype=np.uint8)
dy = grad_e * dx
p = np.array(new_pp[-1]) + np.array([dx, dy, dx, dy])
cv2.line(line_img, (int(p[0]), int(p[1])), (int(p[2]), int(p[3])), 1, thickness=1)
if np.sum(np.logical_and(word_label, line_img)) == 0 or r + 2 * step_r >= max_r:
epp = p
isEppFound = True
if isSppFound and isEppFound:
break
# pass if boundary of polygon is not found
if not (isSppFound and isEppFound):
polys.append(None); continue
# make final polygon
poly = []
poly.append(warpCoord(Minv, (spp[0], spp[1])))
for p in new_pp:
poly.append(warpCoord(Minv, (p[0], p[1])))
poly.append(warpCoord(Minv, (epp[0], epp[1])))
poly.append(warpCoord(Minv, (epp[2], epp[3])))
for p in reversed(new_pp):
poly.append(warpCoord(Minv, (p[2], p[3])))
poly.append(warpCoord(Minv, (spp[2], spp[3])))
# add to final result
polys.append(np.array(poly))
return polys
def getDetBoxes(textmap, linkmap, text_threshold, link_threshold, low_text, poly=False):
boxes, labels, mapper = getDetBoxes_core(textmap, linkmap, text_threshold, link_threshold, low_text)
if poly:
polys = getPoly_core(boxes, labels, mapper, linkmap)
else:
polys = [None] * len(boxes)
return boxes, polys
def adjustResultCoordinates(polys, ratio_w, ratio_h, ratio_net = 2):
if len(polys) > 0:
polys = np.array(polys)
for k in range(len(polys)):
if polys[k] is not None:
polys[k] *= (ratio_w * ratio_net, ratio_h * ratio_net)
return polys
| 37.142857 | 122 | 0.538022 |
91929608b0845bc870f3aec7748f38f1578cef35 | 843 | py | Python | frontend/features/steps/home_step.py | mauriciochaves/minicursopythonnordeste | 246059f1404fc3737554ec4b389170b4cf473ac4 | [
"MIT"
] | 1 | 2019-07-21T14:56:06.000Z | 2019-07-21T14:56:06.000Z | frontend/features/steps/home_step.py | mauriciochaves/minicursopythonnordeste | 246059f1404fc3737554ec4b389170b4cf473ac4 | [
"MIT"
] | null | null | null | frontend/features/steps/home_step.py | mauriciochaves/minicursopythonnordeste | 246059f1404fc3737554ec4b389170b4cf473ac4 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from behave import *
from features.pages.home_page import HomePage
use_step_matcher("re")
@given("I open: https://en.wiktionary.org/prepare by the first time")
def step_impl(context):
context.home_page = HomePage(context.driver)
context.home_page.navigate_page(context.config.userdata['url'])
@when("I am on homepage")
def step_impl(context):
context.home_page.assertPage()
@then("I type on look up search text: (?P<searchTxt>.+)")
def step_impl(context, searchTxt):
context.home_page.type_lookUp(searchTxt)
@then("I click on lookup search button")
def step_impl(context, searchTxt):
context.home_page.click_lookUpSearch()
@then("I look up the definition of the word (?P<searchTxt>.+)")
def step_impl(context, searchTxt):
context.home_page.realize_search(searchTxt) | 30.107143 | 69 | 0.740214 |
a1b3ac3d72b806c96786de2ba8aa11032d80c350 | 1,916 | py | Python | gimel-dataapi/gimel-core/src/main/scripts/tools/bin/hbase/hbase_ddl_tool.py | talshimoni/gimel | c8d190e463bafa2e43bfbaafd14b5acc8c5ad50e | [
"Apache-2.0"
] | 222 | 2018-04-08T16:41:49.000Z | 2022-02-20T16:29:02.000Z | gimel-dataapi/gimel-core/src/main/scripts/tools/bin/hbase/hbase_ddl_tool.py | talshimoni/gimel | c8d190e463bafa2e43bfbaafd14b5acc8c5ad50e | [
"Apache-2.0"
] | 173 | 2018-04-08T16:50:59.000Z | 2022-01-21T19:34:22.000Z | gimel-dataapi/gimel-core/src/main/scripts/tools/bin/hbase/hbase_ddl_tool.py | talshimoni/gimel | c8d190e463bafa2e43bfbaafd14b5acc8c5ad50e | [
"Apache-2.0"
] | 102 | 2018-04-08T16:45:06.000Z | 2022-02-01T11:24:57.000Z | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import subprocess
import sys
import os
hbase_table=os.environ["HBASE_TABLE"]
hbase_namespace="default"
hiveDatabase = os.environ["HIVE_DB"]
if len(hbase_table.split(":"))==2:
hbase_namespace=hbase_table.split(":")[0]
hbase_table=hbase_table.split(":")[1]
if (hiveDatabase is ""):
hiveDatabase = "default"
alldata=set()
count=0
for line in sys.stdin:
count +=1
# sys.stdout.write("\r%d-%d" % (len(alldata),count))
alldata.add(line.split(",")[0]),
alldata.remove('\n')
allcolumns = []
for column in alldata:
if not column.startswith(">"):
allcolumns.append(column)
ddl="""
CREATE EXTERNAL TABLE IF NOT EXISTS """+hiveDatabase+""".pc_hbase_"""+ hbase_namespace+"_"+hbase_table +"""
(
`"""+ hbase_table +"_key` string"+"""
""" + '\n'.join(",`"+column.split(":")[1]+"` string" for column in allcolumns) + """
)
STORED BY 'org.apache.hadoop.hive.hbase.HBaseStorageHandler'
WITH SERDEPROPERTIES ("hbase.columns.mapping" =
":key"""+ ''.join(","+column for column in allcolumns)+'\")'+"""
TBLPROPERTIES ("hbase.table.name" = """ + '"'+hbase_namespace+":"+hbase_table+'");'
print(ddl+"\n\n")
| 33.034483 | 107 | 0.702505 |
dd47d0a3acb4969eafb747bd9b2a9ffdb8fe9f84 | 835 | py | Python | audio/utils.py | JonnoFTW/markov-img-gen | 71676df8a066010231684f49eb31d79c15962bd3 | [
"CC0-1.0"
] | 6 | 2019-01-06T06:03:30.000Z | 2022-02-05T17:27:32.000Z | audio/utils.py | JonnoFTW/markov-img-gen | 71676df8a066010231684f49eb31d79c15962bd3 | [
"CC0-1.0"
] | 1 | 2021-09-08T12:50:38.000Z | 2021-09-10T01:38:47.000Z | audio/utils.py | JonnoFTW/markov-img-gen | 71676df8a066010231684f49eb31d79c15962bd3 | [
"CC0-1.0"
] | 1 | 2018-03-19T18:31:19.000Z | 2018-03-19T18:31:19.000Z | from scipy.io import wavfile
import os
import subprocess as sp
import numpy as np
def readwav(fname):
"""
Reads in 2 channel file and converts it to mono PCM
:param fname:
:return: the file as an array of pcm data
"""
wavname = fname.split('.')[0] + '.wav'
outname = 'generated.' + wavname
ffmpeg = 'ffmpeg'
if not os.path.exists(wavname):
print(sp.check_output([ffmpeg, '-i', fname, wavname]))
fs, data = wavfile.read(wavname)
data = (data.sum(axis=1) / 2).astype(np.int16)
return fs, data, outname
def train_test_split(x, y, test_size=0.33):
if x.shape[0] != y.shape[0]:
raise ValueError("x and y must both have same number of rows")
split_idx = int(x.shape[0] * (1 - test_size))
return x[:split_idx], x[split_idx:], y[:split_idx], y[split_idx:]
| 25.30303 | 70 | 0.631138 |
7fc8547e1f93e03c437f4c12c50893ef62e93c92 | 1,295 | py | Python | Hello_8bitPixels.py | TechnoTanuki/Python_BMP | d6f7e7a4b74f7d6e8761d618c156d37c97726038 | [
"MIT"
] | 3 | 2022-02-24T15:46:43.000Z | 2022-03-30T13:17:03.000Z | Hello_8bitPixels.py | TechnoTanuki/Python_BMP | d6f7e7a4b74f7d6e8761d618c156d37c97726038 | [
"MIT"
] | null | null | null | Hello_8bitPixels.py | TechnoTanuki/Python_BMP | d6f7e7a4b74f7d6e8761d618c156d37c97726038 | [
"MIT"
] | null | null | null | notice = """
Plot 256 color pixels Demo
-----------------------------------
| Copyright 2022 by Joel C. Alcarez |
| [joelalcarez1975@gmail.com] |
|-----------------------------------|
| We make absolutely no warranty |
| of any kind, expressed or implied |
|-----------------------------------|
| This graphics library outputs |
| to a bitmap file. |
-----------------------------------
"""
from Python_BMP.BITMAPlib import(
newBMP,
plotxybit,
saveBMP
)
import subprocess as proc
from os import path
def main():
print(notice)
imgedt = 'mspaint' # replace with another editor if Unix
rootdir = path.dirname(__file__) # get path of this script
mx = my= 512 #bitmap size
bmp = newBMP(mx, my, 8) #256 colors
for x in range(mx):
for y in range(my):
plotxybit(bmp, x, y, (x & y) & 0xFF)
file = 'Hello8bitPixels.bmp' #file name
saveBMP(file, bmp) #dump the bytearray to disk
print('Saved to %s in %s\nAll done close %s to finish' % \
(file, rootdir, imgedt)) # tell user we are done
ret = proc.call([imgedt, file])
if __name__=="__main__":
main()
| 32.375 | 67 | 0.488031 |
b7841098220244b5cda4cc7b11be9166f8769791 | 1,133 | py | Python | custom_packages/CustomNeuralNetworks/test_CustomNeuralNetworks/test_unet.py | davidelomeo/mangroves_deep_learning | 27ce24fe183b65f054c1d6b41417a64355cd0c9c | [
"MIT"
] | null | null | null | custom_packages/CustomNeuralNetworks/test_CustomNeuralNetworks/test_unet.py | davidelomeo/mangroves_deep_learning | 27ce24fe183b65f054c1d6b41417a64355cd0c9c | [
"MIT"
] | null | null | null | custom_packages/CustomNeuralNetworks/test_CustomNeuralNetworks/test_unet.py | davidelomeo/mangroves_deep_learning | 27ce24fe183b65f054c1d6b41417a64355cd0c9c | [
"MIT"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# This script tests the function that builds the Un-Net model. The test
# itself does not look for numerical values but checks if the model
# returns am object or not. This is because there are several tests
# within the UNet class that checks if the input parameters are valid
# and returns None if they are not. The test, therefor, simply checks
# if these preliminary tests work as intended.
#
# Author: Davide Lomeo
# Email: davide.lomeo20@imperial.ac.uk
# GitHub: https://github.com/acse-2020/acse2020-acse9-finalreport-acse-dl1420-3
# Date: 16 July 2021
# Version: 1.0
from CustomNeuralNetworks import unet
def test_UNet():
"Testing the UNet class"
u_net = unet.UNet(7)
function_output_1 = u_net.build_model((256, 250, 12))
function_output_2 = u_net.build_model((256, 256, -12))
function_output_3 = u_net.build_model((300, 300, 12))
function_output_4 = u_net.build_model((256, 256, 12))
assert function_output_1 is None
assert function_output_2 is None
assert function_output_3 is None
assert function_output_4 is not None
return
| 32.371429 | 79 | 0.736981 |
2bfe22267b5d461cc28ef2ab4bc69469a27f1447 | 24,734 | py | Python | test/functional/feature_csv_activation.py | Kingourd/bitcoin | bd5670eafcfbdf2b0bcb2c74655f860bac8eef45 | [
"MIT"
] | 1 | 2021-09-12T18:52:22.000Z | 2021-09-12T18:52:22.000Z | test/functional/feature_csv_activation.py | Kingourd/bitcoin | bd5670eafcfbdf2b0bcb2c74655f860bac8eef45 | [
"MIT"
] | null | null | null | test/functional/feature_csv_activation.py | Kingourd/bitcoin | bd5670eafcfbdf2b0bcb2c74655f860bac8eef45 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
# Copyright (c) 2015-2020 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test CSV soft fork activation.
This soft fork will activate the following BIPS:
BIP 68 - nSequence relative lock times
BIP 112 - CHECKSEQUENCEVERIFY
BIP 113 - MedianTimePast semantics for nLockTime
mine 83 blocks whose coinbases will be used to generate inputs for our tests
mine 344 blocks and seed block chain with the 83 inputs used for our tests at height 427
mine 2 blocks and verify soft fork not yet activated
mine 1 block and test that soft fork is activated (rules enforced for next block)
Test BIP 113 is enforced
Mine 4 blocks so next height is 580 and test BIP 68 is enforced for time and height
Mine 1 block so next height is 581 and test BIP 68 now passes time but not height
Mine 1 block so next height is 582 and test BIP 68 now passes time and height
Test that BIP 112 is enforced
Various transactions will be used to test that the BIPs rules are not enforced before the soft fork activates
And that after the soft fork activates transactions pass and fail as they should according to the rules.
For each BIP, transactions of versions 1 and 2 will be tested.
----------------
BIP 113:
bip113tx - modify the nLocktime variable
BIP 68:
bip68txs - 16 txs with nSequence relative locktime of 10 with various bits set as per the relative_locktimes below
BIP 112:
bip112txs_vary_nSequence - 16 txs with nSequence relative_locktimes of 10 evaluated against 10 OP_CSV OP_DROP
bip112txs_vary_nSequence_9 - 16 txs with nSequence relative_locktimes of 9 evaluated against 10 OP_CSV OP_DROP
bip112txs_vary_OP_CSV - 16 txs with nSequence = 10 evaluated against varying {relative_locktimes of 10} OP_CSV OP_DROP
bip112txs_vary_OP_CSV_9 - 16 txs with nSequence = 9 evaluated against varying {relative_locktimes of 10} OP_CSV OP_DROP
bip112tx_special - test negative argument to OP_CSV
bip112tx_emptystack - test empty stack (= no argument) OP_CSV
"""
from itertools import product
import time
from test_framework.blocktools import (
CSV_ACTIVATION_HEIGHT,
create_block,
create_coinbase,
)
from test_framework.p2p import P2PDataStore
from test_framework.script import (
CScript,
OP_CHECKSEQUENCEVERIFY,
OP_DROP,
)
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import (
assert_equal,
softfork_active,
)
from test_framework.wallet import (
MiniWallet,
MiniWalletMode,
)
TESTING_TX_COUNT = 83 # Number of testing transactions: 1 BIP113 tx, 16 BIP68 txs, 66 BIP112 txs (see comments above)
COINBASE_BLOCK_COUNT = TESTING_TX_COUNT # Number of coinbase blocks we need to generate as inputs for our txs
BASE_RELATIVE_LOCKTIME = 10
SEQ_DISABLE_FLAG = 1 << 31
SEQ_RANDOM_HIGH_BIT = 1 << 25
SEQ_TYPE_FLAG = 1 << 22
SEQ_RANDOM_LOW_BIT = 1 << 18
def relative_locktime(sdf, srhb, stf, srlb):
"""Returns a locktime with certain bits set."""
locktime = BASE_RELATIVE_LOCKTIME
if sdf:
locktime |= SEQ_DISABLE_FLAG
if srhb:
locktime |= SEQ_RANDOM_HIGH_BIT
if stf:
locktime |= SEQ_TYPE_FLAG
if srlb:
locktime |= SEQ_RANDOM_LOW_BIT
return locktime
def all_rlt_txs(txs):
return [tx['tx'] for tx in txs]
class BIP68_112_113Test(BitcoinTestFramework):
def set_test_params(self):
self.num_nodes = 1
self.setup_clean_chain = True
self.extra_args = [[
'-peertimeout=999999', # bump because mocktime might cause a disconnect otherwise
'-whitelist=noban@127.0.0.1',
'-par=1', # Use only one script thread to get the exact reject reason for testing
]]
self.supports_cli = False
def create_self_transfer_from_utxo(self, input_tx):
utxo = self.miniwallet.get_utxo(txid=input_tx.rehash(), mark_as_spent=False)
tx = self.miniwallet.create_self_transfer(from_node=self.nodes[0], utxo_to_spend=utxo)['tx']
return tx
def create_bip112special(self, input, txversion):
tx = self.create_self_transfer_from_utxo(input)
tx.nVersion = txversion
self.miniwallet.sign_tx(tx)
tx.vin[0].scriptSig = CScript([-1, OP_CHECKSEQUENCEVERIFY, OP_DROP] + list(CScript(tx.vin[0].scriptSig)))
return tx
def create_bip112emptystack(self, input, txversion):
tx = self.create_self_transfer_from_utxo(input)
tx.nVersion = txversion
self.miniwallet.sign_tx(tx)
tx.vin[0].scriptSig = CScript([OP_CHECKSEQUENCEVERIFY] + list(CScript(tx.vin[0].scriptSig)))
return tx
def send_generic_input_tx(self, coinbases):
input_txid = self.nodes[0].getblock(coinbases.pop(), 2)['tx'][0]['txid']
utxo_to_spend = self.miniwallet.get_utxo(txid=input_txid)
return self.miniwallet.send_self_transfer(from_node=self.nodes[0], utxo_to_spend=utxo_to_spend)['tx']
def create_bip68txs(self, bip68inputs, txversion, locktime_delta=0):
"""Returns a list of bip68 transactions with different bits set."""
txs = []
assert len(bip68inputs) >= 16
for i, (sdf, srhb, stf, srlb) in enumerate(product(*[[True, False]] * 4)):
locktime = relative_locktime(sdf, srhb, stf, srlb)
tx = self.create_self_transfer_from_utxo(bip68inputs[i])
tx.nVersion = txversion
tx.vin[0].nSequence = locktime + locktime_delta
self.miniwallet.sign_tx(tx)
tx.rehash()
txs.append({'tx': tx, 'sdf': sdf, 'stf': stf})
return txs
def create_bip112txs(self, bip112inputs, varyOP_CSV, txversion, locktime_delta=0):
"""Returns a list of bip68 transactions with different bits set."""
txs = []
assert len(bip112inputs) >= 16
for i, (sdf, srhb, stf, srlb) in enumerate(product(*[[True, False]] * 4)):
locktime = relative_locktime(sdf, srhb, stf, srlb)
tx = self.create_self_transfer_from_utxo(bip112inputs[i])
if varyOP_CSV: # if varying OP_CSV, nSequence is fixed
tx.vin[0].nSequence = BASE_RELATIVE_LOCKTIME + locktime_delta
else: # vary nSequence instead, OP_CSV is fixed
tx.vin[0].nSequence = locktime + locktime_delta
tx.nVersion = txversion
self.miniwallet.sign_tx(tx)
if varyOP_CSV:
tx.vin[0].scriptSig = CScript([locktime, OP_CHECKSEQUENCEVERIFY, OP_DROP] + list(CScript(tx.vin[0].scriptSig)))
else:
tx.vin[0].scriptSig = CScript([BASE_RELATIVE_LOCKTIME, OP_CHECKSEQUENCEVERIFY, OP_DROP] + list(CScript(tx.vin[0].scriptSig)))
tx.rehash()
txs.append({'tx': tx, 'sdf': sdf, 'stf': stf})
return txs
def generate_blocks(self, number):
test_blocks = []
for _ in range(number):
block = self.create_test_block([])
test_blocks.append(block)
self.last_block_time += 600
self.tip = block.sha256
self.tipheight += 1
return test_blocks
def create_test_block(self, txs):
block = create_block(self.tip, create_coinbase(self.tipheight + 1), self.last_block_time + 600)
block.nVersion = 4
block.vtx.extend(txs)
block.hashMerkleRoot = block.calc_merkle_root()
block.rehash()
block.solve()
return block
def send_blocks(self, blocks, success=True, reject_reason=None):
"""Sends blocks to test node. Syncs and verifies that tip has advanced to most recent block.
Call with success = False if the tip shouldn't advance to the most recent block."""
self.helper_peer.send_blocks_and_test(blocks, self.nodes[0], success=success, reject_reason=reject_reason)
def run_test(self):
self.helper_peer = self.nodes[0].add_p2p_connection(P2PDataStore())
self.miniwallet = MiniWallet(self.nodes[0], mode=MiniWalletMode.RAW_P2PK)
self.log.info("Generate blocks in the past for coinbase outputs.")
long_past_time = int(time.time()) - 600 * 1000 # enough to build up to 1000 blocks 10 minutes apart without worrying about getting into the future
self.nodes[0].setmocktime(long_past_time - 100) # enough so that the generated blocks will still all be before long_past_time
self.coinbase_blocks = self.generate(self.miniwallet, COINBASE_BLOCK_COUNT) # blocks generated for inputs
self.nodes[0].setmocktime(0) # set time back to present so yielded blocks aren't in the future as we advance last_block_time
self.tipheight = COINBASE_BLOCK_COUNT # height of the next block to build
self.last_block_time = long_past_time
self.tip = int(self.nodes[0].getbestblockhash(), 16)
# Activation height is hardcoded
# We advance to block height five below BIP112 activation for the following tests
test_blocks = self.generate_blocks(CSV_ACTIVATION_HEIGHT - 5 - COINBASE_BLOCK_COUNT)
self.send_blocks(test_blocks)
assert not softfork_active(self.nodes[0], 'csv')
# Inputs at height = 431
#
# Put inputs for all tests in the chain at height 431 (tip now = 430) (time increases by 600s per block)
# Note we reuse inputs for v1 and v2 txs so must test these separately
# 16 normal inputs
bip68inputs = []
for _ in range(16):
bip68inputs.append(self.send_generic_input_tx(self.coinbase_blocks))
# 2 sets of 16 inputs with 10 OP_CSV OP_DROP (actually will be prepended to spending scriptSig)
bip112basicinputs = []
for _ in range(2):
inputs = []
for _ in range(16):
inputs.append(self.send_generic_input_tx(self.coinbase_blocks))
bip112basicinputs.append(inputs)
# 2 sets of 16 varied inputs with (relative_lock_time) OP_CSV OP_DROP (actually will be prepended to spending scriptSig)
bip112diverseinputs = []
for _ in range(2):
inputs = []
for _ in range(16):
inputs.append(self.send_generic_input_tx(self.coinbase_blocks))
bip112diverseinputs.append(inputs)
# 1 special input with -1 OP_CSV OP_DROP (actually will be prepended to spending scriptSig)
bip112specialinput = self.send_generic_input_tx(self.coinbase_blocks)
# 1 special input with (empty stack) OP_CSV (actually will be prepended to spending scriptSig)
bip112emptystackinput = self.send_generic_input_tx(self.coinbase_blocks)
# 1 normal input
bip113input = self.send_generic_input_tx(self.coinbase_blocks)
self.nodes[0].setmocktime(self.last_block_time + 600)
inputblockhash = self.generate(self.nodes[0], 1)[0] # 1 block generated for inputs to be in chain at height 431
self.nodes[0].setmocktime(0)
self.tip = int(inputblockhash, 16)
self.tipheight += 1
self.last_block_time += 600
assert_equal(len(self.nodes[0].getblock(inputblockhash, True)["tx"]), TESTING_TX_COUNT + 1)
# 2 more version 4 blocks
test_blocks = self.generate_blocks(2)
self.send_blocks(test_blocks)
assert_equal(self.tipheight, CSV_ACTIVATION_HEIGHT - 2)
self.log.info(f"Height = {self.tipheight}, CSV not yet active (will activate for block {CSV_ACTIVATION_HEIGHT}, not {CSV_ACTIVATION_HEIGHT - 1})")
assert not softfork_active(self.nodes[0], 'csv')
# Test both version 1 and version 2 transactions for all tests
# BIP113 test transaction will be modified before each use to put in appropriate block time
bip113tx_v1 = self.create_self_transfer_from_utxo(bip113input)
bip113tx_v1.vin[0].nSequence = 0xFFFFFFFE
bip113tx_v1.nVersion = 1
bip113tx_v2 = self.create_self_transfer_from_utxo(bip113input)
bip113tx_v2.vin[0].nSequence = 0xFFFFFFFE
bip113tx_v2.nVersion = 2
# For BIP68 test all 16 relative sequence locktimes
bip68txs_v1 = self.create_bip68txs(bip68inputs, 1)
bip68txs_v2 = self.create_bip68txs(bip68inputs, 2)
# For BIP112 test:
# 16 relative sequence locktimes of 10 against 10 OP_CSV OP_DROP inputs
bip112txs_vary_nSequence_v1 = self.create_bip112txs(bip112basicinputs[0], False, 1)
bip112txs_vary_nSequence_v2 = self.create_bip112txs(bip112basicinputs[0], False, 2)
# 16 relative sequence locktimes of 9 against 10 OP_CSV OP_DROP inputs
bip112txs_vary_nSequence_9_v1 = self.create_bip112txs(bip112basicinputs[1], False, 1, -1)
bip112txs_vary_nSequence_9_v2 = self.create_bip112txs(bip112basicinputs[1], False, 2, -1)
# sequence lock time of 10 against 16 (relative_lock_time) OP_CSV OP_DROP inputs
bip112txs_vary_OP_CSV_v1 = self.create_bip112txs(bip112diverseinputs[0], True, 1)
bip112txs_vary_OP_CSV_v2 = self.create_bip112txs(bip112diverseinputs[0], True, 2)
# sequence lock time of 9 against 16 (relative_lock_time) OP_CSV OP_DROP inputs
bip112txs_vary_OP_CSV_9_v1 = self.create_bip112txs(bip112diverseinputs[1], True, 1, -1)
bip112txs_vary_OP_CSV_9_v2 = self.create_bip112txs(bip112diverseinputs[1], True, 2, -1)
# -1 OP_CSV OP_DROP input
bip112tx_special_v1 = self.create_bip112special(bip112specialinput, 1)
bip112tx_special_v2 = self.create_bip112special(bip112specialinput, 2)
# (empty stack) OP_CSV input
bip112tx_emptystack_v1 = self.create_bip112emptystack(bip112emptystackinput, 1)
bip112tx_emptystack_v2 = self.create_bip112emptystack(bip112emptystackinput, 2)
self.log.info("TESTING")
self.log.info("Pre-Soft Fork Tests. All txs should pass.")
self.log.info("Test version 1 txs")
success_txs = []
# BIP113 tx, -1 CSV tx and empty stack CSV tx should succeed
bip113tx_v1.nLockTime = self.last_block_time - 600 * 5 # = MTP of prior block (not <) but < time put on current block
self.miniwallet.sign_tx(bip113tx_v1)
success_txs.append(bip113tx_v1)
success_txs.append(bip112tx_special_v1)
success_txs.append(bip112tx_emptystack_v1)
# add BIP 68 txs
success_txs.extend(all_rlt_txs(bip68txs_v1))
# add BIP 112 with seq=10 txs
success_txs.extend(all_rlt_txs(bip112txs_vary_nSequence_v1))
success_txs.extend(all_rlt_txs(bip112txs_vary_OP_CSV_v1))
# try BIP 112 with seq=9 txs
success_txs.extend(all_rlt_txs(bip112txs_vary_nSequence_9_v1))
success_txs.extend(all_rlt_txs(bip112txs_vary_OP_CSV_9_v1))
self.send_blocks([self.create_test_block(success_txs)])
self.nodes[0].invalidateblock(self.nodes[0].getbestblockhash())
self.log.info("Test version 2 txs")
success_txs = []
# BIP113 tx, -1 CSV tx and empty stack CSV tx should succeed
bip113tx_v2.nLockTime = self.last_block_time - 600 * 5 # = MTP of prior block (not <) but < time put on current block
self.miniwallet.sign_tx(bip113tx_v2)
success_txs.append(bip113tx_v2)
success_txs.append(bip112tx_special_v2)
success_txs.append(bip112tx_emptystack_v2)
# add BIP 68 txs
success_txs.extend(all_rlt_txs(bip68txs_v2))
# add BIP 112 with seq=10 txs
success_txs.extend(all_rlt_txs(bip112txs_vary_nSequence_v2))
success_txs.extend(all_rlt_txs(bip112txs_vary_OP_CSV_v2))
# try BIP 112 with seq=9 txs
success_txs.extend(all_rlt_txs(bip112txs_vary_nSequence_9_v2))
success_txs.extend(all_rlt_txs(bip112txs_vary_OP_CSV_9_v2))
self.send_blocks([self.create_test_block(success_txs)])
self.nodes[0].invalidateblock(self.nodes[0].getbestblockhash())
# 1 more version 4 block to get us to height 432 so the fork should now be active for the next block
assert not softfork_active(self.nodes[0], 'csv')
test_blocks = self.generate_blocks(1)
self.send_blocks(test_blocks)
assert softfork_active(self.nodes[0], 'csv')
self.log.info("Post-Soft Fork Tests.")
self.log.info("BIP 113 tests")
# BIP 113 tests should now fail regardless of version number if nLockTime isn't satisfied by new rules
bip113tx_v1.nLockTime = self.last_block_time - 600 * 5 # = MTP of prior block (not <) but < time put on current block
self.miniwallet.sign_tx(bip113tx_v1)
bip113tx_v1.rehash()
bip113tx_v2.nLockTime = self.last_block_time - 600 * 5 # = MTP of prior block (not <) but < time put on current block
self.miniwallet.sign_tx(bip113tx_v2)
bip113tx_v2.rehash()
for bip113tx in [bip113tx_v1, bip113tx_v2]:
self.send_blocks([self.create_test_block([bip113tx])], success=False, reject_reason='bad-txns-nonfinal')
# BIP 113 tests should now pass if the locktime is < MTP
bip113tx_v1.nLockTime = self.last_block_time - 600 * 5 - 1 # < MTP of prior block
self.miniwallet.sign_tx(bip113tx_v1)
bip113tx_v1.rehash()
bip113tx_v2.nLockTime = self.last_block_time - 600 * 5 - 1 # < MTP of prior block
self.miniwallet.sign_tx(bip113tx_v2)
bip113tx_v2.rehash()
for bip113tx in [bip113tx_v1, bip113tx_v2]:
self.send_blocks([self.create_test_block([bip113tx])])
self.nodes[0].invalidateblock(self.nodes[0].getbestblockhash())
# Next block height = 437 after 4 blocks of random version
test_blocks = self.generate_blocks(4)
self.send_blocks(test_blocks)
self.log.info("BIP 68 tests")
self.log.info("Test version 1 txs - all should still pass")
success_txs = []
success_txs.extend(all_rlt_txs(bip68txs_v1))
self.send_blocks([self.create_test_block(success_txs)])
self.nodes[0].invalidateblock(self.nodes[0].getbestblockhash())
self.log.info("Test version 2 txs")
# All txs with SEQUENCE_LOCKTIME_DISABLE_FLAG set pass
bip68success_txs = [tx['tx'] for tx in bip68txs_v2 if tx['sdf']]
self.send_blocks([self.create_test_block(bip68success_txs)])
self.nodes[0].invalidateblock(self.nodes[0].getbestblockhash())
# All txs without flag fail as we are at delta height = 8 < 10 and delta time = 8 * 600 < 10 * 512
bip68timetxs = [tx['tx'] for tx in bip68txs_v2 if not tx['sdf'] and tx['stf']]
for tx in bip68timetxs:
self.send_blocks([self.create_test_block([tx])], success=False, reject_reason='bad-txns-nonfinal')
bip68heighttxs = [tx['tx'] for tx in bip68txs_v2 if not tx['sdf'] and not tx['stf']]
for tx in bip68heighttxs:
self.send_blocks([self.create_test_block([tx])], success=False, reject_reason='bad-txns-nonfinal')
# Advance one block to 438
test_blocks = self.generate_blocks(1)
self.send_blocks(test_blocks)
# Height txs should fail and time txs should now pass 9 * 600 > 10 * 512
bip68success_txs.extend(bip68timetxs)
self.send_blocks([self.create_test_block(bip68success_txs)])
self.nodes[0].invalidateblock(self.nodes[0].getbestblockhash())
for tx in bip68heighttxs:
self.send_blocks([self.create_test_block([tx])], success=False, reject_reason='bad-txns-nonfinal')
# Advance one block to 439
test_blocks = self.generate_blocks(1)
self.send_blocks(test_blocks)
# All BIP 68 txs should pass
bip68success_txs.extend(bip68heighttxs)
self.send_blocks([self.create_test_block(bip68success_txs)])
self.nodes[0].invalidateblock(self.nodes[0].getbestblockhash())
self.log.info("BIP 112 tests")
self.log.info("Test version 1 txs")
# -1 OP_CSV tx and (empty stack) OP_CSV tx should fail
self.send_blocks([self.create_test_block([bip112tx_special_v1])], success=False,
reject_reason='non-mandatory-script-verify-flag (Negative locktime)')
self.send_blocks([self.create_test_block([bip112tx_emptystack_v1])], success=False,
reject_reason='non-mandatory-script-verify-flag (Operation not valid with the current stack size)')
# If SEQUENCE_LOCKTIME_DISABLE_FLAG is set in argument to OP_CSV, version 1 txs should still pass
success_txs = [tx['tx'] for tx in bip112txs_vary_OP_CSV_v1 if tx['sdf']]
success_txs += [tx['tx'] for tx in bip112txs_vary_OP_CSV_9_v1 if tx['sdf']]
self.send_blocks([self.create_test_block(success_txs)])
self.nodes[0].invalidateblock(self.nodes[0].getbestblockhash())
# If SEQUENCE_LOCKTIME_DISABLE_FLAG is unset in argument to OP_CSV, version 1 txs should now fail
fail_txs = all_rlt_txs(bip112txs_vary_nSequence_v1)
fail_txs += all_rlt_txs(bip112txs_vary_nSequence_9_v1)
fail_txs += [tx['tx'] for tx in bip112txs_vary_OP_CSV_v1 if not tx['sdf']]
fail_txs += [tx['tx'] for tx in bip112txs_vary_OP_CSV_9_v1 if not tx['sdf']]
for tx in fail_txs:
self.send_blocks([self.create_test_block([tx])], success=False,
reject_reason='non-mandatory-script-verify-flag (Locktime requirement not satisfied)')
self.log.info("Test version 2 txs")
# -1 OP_CSV tx and (empty stack) OP_CSV tx should fail
self.send_blocks([self.create_test_block([bip112tx_special_v2])], success=False,
reject_reason='non-mandatory-script-verify-flag (Negative locktime)')
self.send_blocks([self.create_test_block([bip112tx_emptystack_v2])], success=False,
reject_reason='non-mandatory-script-verify-flag (Operation not valid with the current stack size)')
# If SEQUENCE_LOCKTIME_DISABLE_FLAG is set in argument to OP_CSV, version 2 txs should pass (all sequence locks are met)
success_txs = [tx['tx'] for tx in bip112txs_vary_OP_CSV_v2 if tx['sdf']]
success_txs += [tx['tx'] for tx in bip112txs_vary_OP_CSV_9_v2 if tx['sdf']]
self.send_blocks([self.create_test_block(success_txs)])
self.nodes[0].invalidateblock(self.nodes[0].getbestblockhash())
# SEQUENCE_LOCKTIME_DISABLE_FLAG is unset in argument to OP_CSV for all remaining txs ##
# All txs with nSequence 9 should fail either due to earlier mismatch or failing the CSV check
fail_txs = all_rlt_txs(bip112txs_vary_nSequence_9_v2)
fail_txs += [tx['tx'] for tx in bip112txs_vary_OP_CSV_9_v2 if not tx['sdf']]
for tx in fail_txs:
self.send_blocks([self.create_test_block([tx])], success=False,
reject_reason='non-mandatory-script-verify-flag (Locktime requirement not satisfied)')
# If SEQUENCE_LOCKTIME_DISABLE_FLAG is set in nSequence, tx should fail
fail_txs = [tx['tx'] for tx in bip112txs_vary_nSequence_v2 if tx['sdf']]
for tx in fail_txs:
self.send_blocks([self.create_test_block([tx])], success=False,
reject_reason='non-mandatory-script-verify-flag (Locktime requirement not satisfied)')
# If sequencelock types mismatch, tx should fail
fail_txs = [tx['tx'] for tx in bip112txs_vary_nSequence_v2 if not tx['sdf'] and tx['stf']]
fail_txs += [tx['tx'] for tx in bip112txs_vary_OP_CSV_v2 if not tx['sdf'] and tx['stf']]
for tx in fail_txs:
self.send_blocks([self.create_test_block([tx])], success=False,
reject_reason='non-mandatory-script-verify-flag (Locktime requirement not satisfied)')
# Remaining txs should pass, just test masking works properly
success_txs = [tx['tx'] for tx in bip112txs_vary_nSequence_v2 if not tx['sdf'] and not tx['stf']]
success_txs += [tx['tx'] for tx in bip112txs_vary_OP_CSV_v2 if not tx['sdf'] and not tx['stf']]
self.send_blocks([self.create_test_block(success_txs)])
self.nodes[0].invalidateblock(self.nodes[0].getbestblockhash())
# Additional test, of checking that comparison of two time types works properly
time_txs = []
for tx in [tx['tx'] for tx in bip112txs_vary_OP_CSV_v2 if not tx['sdf'] and tx['stf']]:
tx.vin[0].nSequence = BASE_RELATIVE_LOCKTIME | SEQ_TYPE_FLAG
self.miniwallet.sign_tx(tx)
tx.rehash()
time_txs.append(tx)
self.send_blocks([self.create_test_block(time_txs)])
self.nodes[0].invalidateblock(self.nodes[0].getbestblockhash())
if __name__ == '__main__':
BIP68_112_113Test().main()
| 50.374745 | 155 | 0.688688 |
f0be92933f5c1898a35c67e43696ae5e848d7eae | 4,799 | py | Python | stog/data/token_indexers/token_characters_indexer.py | emorynlp/levi-graph-amr-parser | f71f1056c13181b8db31d6136451fb8d57114819 | [
"Apache-2.0"
] | 9 | 2021-07-12T22:05:47.000Z | 2022-02-22T03:10:14.000Z | stog/data/token_indexers/token_characters_indexer.py | emorynlp/levi-graph-amr-parser | f71f1056c13181b8db31d6136451fb8d57114819 | [
"Apache-2.0"
] | 4 | 2021-08-31T08:28:37.000Z | 2022-03-28T05:52:14.000Z | stog/data/token_indexers/token_characters_indexer.py | emorynlp/levi-graph-amr-parser | f71f1056c13181b8db31d6136451fb8d57114819 | [
"Apache-2.0"
] | null | null | null | from typing import Dict, List
import itertools
from overrides import overrides
from stog.utils.checks import ConfigurationError
from stog.utils.string import pad_sequence_to_length
from stog.data.tokenizers.token import Token
from stog.data.token_indexers.token_indexer import TokenIndexer
from stog.data.vocabulary import Vocabulary
from stog.data.tokenizers.character_tokenizer import CharacterTokenizer
@TokenIndexer.register("characters")
class TokenCharactersIndexer(TokenIndexer[List[int]]):
"""
This :class:`TokenIndexer` represents tokens as lists of character indices.
Parameters
----------
namespace : ``str``, optional (default=``token_characters``)
We will use this namespace in the :class:`Vocabulary` to map the characters in each token
to indices.
character_tokenizer : ``CharacterTokenizer``, optional (default=``CharacterTokenizer()``)
We use a :class:`CharacterTokenizer` to handle splitting tokens into characters, as it has
options for byte encoding and other things. The default here is to instantiate a
``CharacterTokenizer`` with its default parameters, which uses unicode characters and
retains casing.
"""
# pylint: disable=no-self-use
def __init__(self,
namespace: str = 'token_characters',
character_tokenizer: CharacterTokenizer = CharacterTokenizer()) -> None:
self._namespace = namespace
self._character_tokenizer = character_tokenizer
def count_vocab_items(self, token: Token, counter: Dict[str, Dict[str, int]]):
if token.text is None:
raise ConfigurationError('TokenCharactersIndexer needs a tokenizer that retains text')
for character in self._character_tokenizer.tokenize(token.text):
# If `text_id` is set on the character token (e.g., if we're using byte encoding), we
# will not be using the vocab for this character.
if getattr(character, 'text_id', None) is None:
counter[self._namespace][character.text] += 1
def tokens_to_indices(self,
tokens: List[Token],
vocabulary: Vocabulary,
index_name: str) -> Dict[str, List[List[int]]]:
indices: List[List[int]] = []
for token in tokens:
token_indices: List[int] = []
if token.text is None:
raise ConfigurationError('TokenCharactersIndexer needs a tokenizer that retains text')
for character in self._character_tokenizer.tokenize(token.text):
if getattr(character, 'text_id', None) is not None:
# `text_id` being set on the token means that we aren't using the vocab, we just
# use this id instead.
index = character.text_id
else:
index = vocabulary.get_token_index(character.text, self._namespace)
token_indices.append(index)
indices.append(token_indices)
return {index_name: indices}
def get_padding_lengths(self, token: List[int]) -> Dict[str, int]:
return {'num_token_characters': len(token)}
def get_padding_token(self) -> List[int]:
return []
def pad_token_sequence(self,
tokens: Dict[str, List[List[int]]],
desired_num_tokens: Dict[str, int],
padding_lengths: Dict[str, int]) -> Dict[str, List[List[int]]]:
# Pad the tokens.
# tokens has only one key...
key = list(tokens.keys())[0]
padded_tokens = pad_sequence_to_length(
tokens[key], desired_num_tokens[key],
default_value=self.get_padding_token
)
# Pad the characters within the tokens.
desired_token_length = padding_lengths['num_token_characters']
longest_token: List[int] = max(tokens[key], key=len, default=[])
padding_value = 0
if desired_token_length > len(longest_token):
# Since we want to pad to greater than the longest token, we add a
# "dummy token" so we can take advantage of the fast implementation of itertools.zip_longest.
padded_tokens.append([padding_value] * desired_token_length)
# pad the list of lists to the longest sublist, appending 0's
padded_tokens = list(zip(*itertools.zip_longest(*padded_tokens, fillvalue=padding_value)))
if desired_token_length > len(longest_token):
# Removes the "dummy token".
padded_tokens.pop()
# Truncates all the tokens to the desired length, and return the result.
return {key: [list(token[:desired_token_length]) for token in padded_tokens]}
| 45.704762 | 105 | 0.646593 |
e69e67d259938c8ebc95410acce6dc9dd732c4d2 | 840 | py | Python | ihome/web_html.py | tianxinyueming/iHome-python | 7cc6089d12ccebe148d46218c69889d83bd1511f | [
"MIT"
] | 1 | 2020-12-15T07:44:48.000Z | 2020-12-15T07:44:48.000Z | ihome/web_html.py | tianxinyueming/iHome-python | 7cc6089d12ccebe148d46218c69889d83bd1511f | [
"MIT"
] | null | null | null | ihome/web_html.py | tianxinyueming/iHome-python | 7cc6089d12ccebe148d46218c69889d83bd1511f | [
"MIT"
] | null | null | null | # coding: utf-8
from flask import Blueprint, current_app, make_response
from flask_wtf import csrf
# 提供静态文件的蓝图
html = Blueprint("web_html", __name__)
# 127.0.0.1:5000/
# 127.0.0.1:5000/index.html
# 127.0.0.1:5000/register.html
# 127.0.0.1:5000/favicon.ico # 浏览器认为的网站标识,浏览器会自己请求这个资源
@html.route("/<re(r'.*'):html_file_name>")
def get_html(html_file_name):
"""提供html文件"""
# 如果html_file_name为”“, 表示访问的路径是/, 请求的是主页
if not html_file_name:
html_file_name = "index.html"
# 如果资源名不是favicon.ico
if html_file_name != "favicon.ico":
html_file_name = "html/" + html_file_name
# 创建一个csrf_token值
csrf_token = csrf.generate_csrf()
# flak提供的返回静态文件的方法
resp = make_response(current_app.send_static_file(html_file_name))
# 设置cookie值
resp.set_cookie("csrf_token", csrf_token)
return resp | 24.705882 | 70 | 0.7 |
10ff688161d965820c56a570d8782ce1f5cedee2 | 2,221 | py | Python | irrigator_pro/uga/models.py | warnes/irrigatorpro | 4838f8832bdbf87f394a0298adc5dabfc26e82e8 | [
"MIT"
] | null | null | null | irrigator_pro/uga/models.py | warnes/irrigatorpro | 4838f8832bdbf87f394a0298adc5dabfc26e82e8 | [
"MIT"
] | null | null | null | irrigator_pro/uga/models.py | warnes/irrigatorpro | 4838f8832bdbf87f394a0298adc5dabfc26e82e8 | [
"MIT"
] | null | null | null | from django.db import models
from farms.models import *
class UGAProbeData(models.Model):
id = models.AutoField (db_column="data_id", primary_key=True)
datetime = models.DateTimeField(db_column="dt" )
field_code = models.IntegerField (db_column="fieldid" )
node_code = models.IntegerField (db_column="nodeid" )
radio_id = models.CharField (db_column="netaddr", max_length=10 )
battery_voltage = models.FloatField (db_column="batt" )
battery_percent = models.FloatField (db_column="battlife" )
soil_potential_8 = models.FloatField (db_column="sm1" )
soil_potential_16 = models.FloatField (db_column="sm2" )
soil_potential_24 = models.FloatField (db_column="sm3" )
circuit_board_temp = models.FloatField (db_column="boardtemp" )
thermocouple_1_temp = models.FloatField (db_column="temp1" )
thermocouple_2_temp = models.FloatField (db_column="temp2" )
minutes_awake = models.IntegerField (db_column="awake" )
__database__ = "ugatifton"
class Meta:
managed = False
db_table = 'fields"."data' # hack to access 'data' table within schema 'fields'
def __unicode__(self):
return u"RadioID '%s' at '%s': (%f, %f, %f) (%d, %d)" % (self.radio_id,
self.datetime,
self.soil_potential_8,
self.soil_potential_16,
self.soil_potential_24,
self.thermocouple_1_temp,
self.thermocouple_2_temp)
def get_all_radio_ids():
"""
Return a list of all available probe radio_ids
"""
recs = UGAProbeData.objects.values('radio_id').distinct()
radio_ids = [ r['radio_id'] for r in recs]
radio_ids.sort()
return radio_ids
| 52.880952 | 89 | 0.517335 |
eb177681b211bd25acfe721848ba2e8e4bf4cad6 | 538 | py | Python | Ocular/items.py | Ocular/Ocular | a0919e0a2676774fd4ed8d617a76a2574aedee3c | [
"MIT"
] | 1 | 2015-04-05T10:42:57.000Z | 2015-04-05T10:42:57.000Z | Ocular/items.py | Ocular/Ocular | a0919e0a2676774fd4ed8d617a76a2574aedee3c | [
"MIT"
] | null | null | null | Ocular/items.py | Ocular/Ocular | a0919e0a2676774fd4ed8d617a76a2574aedee3c | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
# Define here the models for your scraped items
#
# See documentation in:
# http://doc.scrapy.org/en/latest/topics/items.html
import scrapy
class OcularItem(scrapy.Item):
# define the fields for your item here like:
# name = scrapy.Field()
guid = scrapy.Field()
id = scrapy.Field()
title = scrapy.Field()
url = scrapy.Field()
time = scrapy.Field()
version = scrapy.Field()
author = scrapy.Field()
image=scrapy.Field()
tag = scrapy.Field()
extra = scrapy.Field()
| 22.416667 | 51 | 0.641264 |
b080f2cee3f65b6228b1b3527fe0d040cdc035c2 | 4,361 | py | Python | app/modules/users/routes.py | ezequiaspedro/SBF-Api | 547322505ed4f50bdf7dc86a341eee0a667f0a4c | [
"MIT"
] | null | null | null | app/modules/users/routes.py | ezequiaspedro/SBF-Api | 547322505ed4f50bdf7dc86a341eee0a667f0a4c | [
"MIT"
] | null | null | null | app/modules/users/routes.py | ezequiaspedro/SBF-Api | 547322505ed4f50bdf7dc86a341eee0a667f0a4c | [
"MIT"
] | 1 | 2021-06-12T01:36:17.000Z | 2021-06-12T01:36:17.000Z | # Standard Imports
from fastapi import APIRouter
from fastapi import HTTPException
from fastapi import Depends
from sqlalchemy.orm import Session
# Database Import
from app.db.engine import get_db
# Typing Imports
from typing import List
# Exception imports
from sqlalchemy.exc import IntegrityError
# Authentication Imports
from ..users.models import User
from app.core.auth import manager
# User Schemas
from .services import UserService
from .schemas import UserCreate
from .schemas import UserUpdate
from .schemas import UserResponse
route = APIRouter()
user_service = UserService()
@route.get("/users/", response_model=List[UserResponse])
def get_all_users(db: Session = Depends(get_db), auth_user: User=Depends(manager)):
"""
## Retrieve a list of users.
### Raises:
> HTTPException: Raises 401 is the user is not an admin.
### Returns:
> List[UserResponse]: A List of users response models.
"""
if auth_user.admin == False:
raise HTTPException(status_code=401, detail="Access permitted only for admins")
users = user_service.fetch_all(db)
return users
@route.get("/users/{id}", response_model=UserResponse)
def get_one_user(id: int, db: Session = Depends(get_db), auth_user: User=Depends(manager)):
"""
## Retrieve one user.
### Args:
> id (int): The user ID.
### Raises:
> HTTPException: Raises 404 if user was not found.
> HTTPException: Raises 401 is the user is not an admin.
### Returns:
> UserResponse: The user response model.
"""
if auth_user.admin == False:
raise HTTPException(status_code=401, detail="Access permitted only for admins")
user = user_service.fetch(db, id)
if not user:
raise HTTPException(status_code=404, detail="User was not found.")
return user
@route.post("/users/", response_model=UserResponse)
def create_user(user: UserCreate, db: Session = Depends(get_db), auth_user: User=Depends(manager)):
"""
## Creates an user.
### Args:
> user (UserCreate): The user model.
### Raises:
> HTTPException: Raises 401 is the user is not an admin.
> HTTPException: Raises 422 if the email is already in use.
### Returns:
> UserResponse: The user response model.
"""
if auth_user.admin == False:
raise HTTPException(status_code=401, detail="Access permitted only for admins")
try:
user = user_service.create(db, user)
return user
except IntegrityError as err:
if "email" in repr(err):
raise HTTPException(status_code=422, detail="Já existe um usuário com este email cadastrado.")
@route.patch("/users/{id}", response_model=UserResponse)
def update_user(id: int, user: UserUpdate, db: Session = Depends(get_db), auth_user: User=Depends(manager)):
"""
## Edits an user by id.
### Args:
> id (int): The user ID.
> user (UserUpdate): The user model.
### Raises:
> HTTPException: Raises 404 if user was not found.
> HTTPException: Raises 401 is the user is not an admin.
### Returns:
> UserResponse: The user response model.
"""
if auth_user.admin == False:
raise HTTPException(status_code=401, detail="Access permitted only for admins")
user = user_service.update(db, id, user)
if not user:
raise HTTPException(status_code=404, detail="User was not found.")
return user
@route.delete("/users/{id}", response_model=UserResponse)
def delete_user(id: int, db: Session = Depends(get_db), auth_user: User=Depends(manager)):
"""
## Deletes an user by id.
### Args:
> id (int): The user ID.
### Raises:
> HTTPException: Raises 404 if user was not found.
> HTTPException: Raises 401 is the user is not an admin.
### Returns:
> UserResponse: The user response model.
"""
if auth_user.admin == False:
raise HTTPException(status_code=401, detail="Access permitted only for admins")
user = user_service.delete(db, id)
if not user:
raise HTTPException(status_code=404, detail="User was not found.")
return user | 29.666667 | 109 | 0.639991 |
143fcb665f71f9cfbbfdab294a8a88f75f6f3496 | 408 | py | Python | snippets/urls.py | pplmx/LearningDjango | c1d9c6e70aff91fe2dd4c9a5f175032c868d25ca | [
"MIT"
] | null | null | null | snippets/urls.py | pplmx/LearningDjango | c1d9c6e70aff91fe2dd4c9a5f175032c868d25ca | [
"MIT"
] | null | null | null | snippets/urls.py | pplmx/LearningDjango | c1d9c6e70aff91fe2dd4c9a5f175032c868d25ca | [
"MIT"
] | null | null | null | from django.urls import path, include
from rest_framework.routers import DefaultRouter
from snippets import views
# Create a router and register our viewsets with it.
router = DefaultRouter()
router.register(r"snippets", views.SnippetViewSet)
router.register(r"users", views.UserViewSet)
# The API URLs are now determined automatically by the router.
urlpatterns = [
path("", include(router.urls)),
]
| 27.2 | 62 | 0.779412 |
08f7cc93b1161cb35eaf17a7a572b6b015b170d7 | 123 | py | Python | python/testData/intentions/typeInDocstring7.py | jnthn/intellij-community | 8fa7c8a3ace62400c838e0d5926a7be106aa8557 | [
"Apache-2.0"
] | 2 | 2019-04-28T07:48:50.000Z | 2020-12-11T14:18:08.000Z | python/testData/intentions/typeInDocstring7.py | Cyril-lamirand/intellij-community | 60ab6c61b82fc761dd68363eca7d9d69663cfa39 | [
"Apache-2.0"
] | 173 | 2018-07-05T13:59:39.000Z | 2018-08-09T01:12:03.000Z | python/testData/intentions/typeInDocstring7.py | Cyril-lamirand/intellij-community | 60ab6c61b82fc761dd68363eca7d9d69663cfa39 | [
"Apache-2.0"
] | 2 | 2020-03-15T08:57:37.000Z | 2020-04-07T04:48:14.000Z | def foo(b<caret>ar, baz):
"""Do foo.
:param bar: something
:return: something else
"""
return bar + baz | 20.5 | 27 | 0.569106 |
b90d0bafe1a35626f244ee3f43a00830b3135f7a | 1,240 | py | Python | Python/email_file/send_photo.py | yishantao/DailyPractice | ee26859af3faf48e63d6c2850db1d895a8a88fb1 | [
"MIT"
] | null | null | null | Python/email_file/send_photo.py | yishantao/DailyPractice | ee26859af3faf48e63d6c2850db1d895a8a88fb1 | [
"MIT"
] | null | null | null | Python/email_file/send_photo.py | yishantao/DailyPractice | ee26859af3faf48e63d6c2850db1d895a8a88fb1 | [
"MIT"
] | null | null | null | # -*- coding:utf-8 -*-
import smtplib
from email.mime.image import MIMEImage
from email.mime.text import MIMEText
from email.mime.multipart import MIMEMultipart
from email.header import Header
sender = 'isyishantao@163.com'
password = 'yishantao0825' # 开启邮箱服务后,设置的客户端授权密码
receivers = ['769338809@qq.com']
message = MIMEMultipart('related')
message['From'] = sender
message['To'] = receivers[0]
subject = 'Python邮件测试'
message['Subject'] = Header(subject, 'utf-8')
messageAlternative = MIMEMultipart('alternative')
message.attach(messageAlternative)
mail_msg = """
<html>
<body>
<p>Python邮件发送测试</p>
<p><a href="https://www.python.org">Python官方网站</a></p>
<p>图片演示:</p>
<p><img src="cid:image1" alt="image1"></p>
</body>
</html>
"""
messageAlternative.attach(MIMEText(mail_msg, 'html', 'utf-8'))
fp = open('book.png', 'rb')
msgImage = MIMEImage(fp.read())
fp.close()
# 定义图片ID,在HTML文本中引用
msgImage.add_header('Content-ID', '<image1>')
message.attach(msgImage)
try:
# 使用非本地服务器,需要建立ssl连接
smtp_object = smtplib.SMTP_SSL('smtp.163.com', 465)
smtp_object.login(sender, password)
smtp_object.sendmail(sender, receivers, message.as_string())
print('邮件发送成功!')
except smtplib.SMTPException as e:
print('无法发送邮件case:%s' % e)
| 24.8 | 64 | 0.715323 |
f232d7533f4effe5161611bc3e0862ab87c5f305 | 2,587 | py | Python | official/vision/beta/projects/simclr/configs/simclr_test.py | akshit-protonn/models | 38c8c6fe4144c93d6aadd19981c2b90570c29eba | [
"Apache-2.0"
] | 8 | 2021-12-30T06:07:14.000Z | 2022-02-10T14:49:13.000Z | official/vision/beta/projects/simclr/configs/simclr_test.py | akshit-protonn/models | 38c8c6fe4144c93d6aadd19981c2b90570c29eba | [
"Apache-2.0"
] | 62 | 2021-06-09T00:47:27.000Z | 2021-09-24T09:06:58.000Z | official/vision/beta/projects/simclr/configs/simclr_test.py | akshit-protonn/models | 38c8c6fe4144c93d6aadd19981c2b90570c29eba | [
"Apache-2.0"
] | 2 | 2021-06-13T10:26:17.000Z | 2021-08-28T08:40:01.000Z | # Copyright 2021 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
# Copyright 2020 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for simclr."""
# pylint: disable=unused-import
from absl.testing import parameterized
import tensorflow as tf
from official.core import config_definitions as cfg
from official.core import exp_factory
from official.vision.beta.projects.simclr.common import registry_imports # pylint: disable=unused-import
from official.vision.beta.projects.simclr.configs import simclr as exp_cfg
class SimCLRConfigTest(tf.test.TestCase, parameterized.TestCase):
@parameterized.parameters(
'simclr_pretraining_imagenet', 'simclr_finetuning_imagenet')
def test_simclr_configs(self, config_name):
config = exp_factory.get_exp_config(config_name)
self.assertIsInstance(config, cfg.ExperimentConfig)
if config_name == 'simclr_pretrain_imagenet':
self.assertIsInstance(config.task, exp_cfg.SimCLRPretrainTask)
elif config_name == 'simclr_finetuning_imagenet':
self.assertIsInstance(config.task, exp_cfg.SimCLRFinetuneTask)
self.assertIsInstance(config.task.model,
exp_cfg.SimCLRModel)
self.assertIsInstance(config.task.train_data, exp_cfg.DataConfig)
config.task.train_data.is_training = None
with self.assertRaises(KeyError):
config.validate()
if __name__ == '__main__':
tf.test.main()
| 41.063492 | 105 | 0.745265 |
f0f27d5da2b16d6aac0b8818127dec40bbadf176 | 1,368 | py | Python | jdcloud_sdk/services/vm/models/InstanceTemplateCustomData.py | jdcloud-apigateway/jdcloud-sdk-python | 0886769bcf1fb92128a065ff0f4695be099571cc | [
"Apache-2.0"
] | 14 | 2018-04-19T09:53:56.000Z | 2022-01-27T06:05:48.000Z | jdcloud_sdk/services/vm/models/InstanceTemplateCustomData.py | jdcloud-apigateway/jdcloud-sdk-python | 0886769bcf1fb92128a065ff0f4695be099571cc | [
"Apache-2.0"
] | 15 | 2018-09-11T05:39:54.000Z | 2021-07-02T12:38:02.000Z | jdcloud_sdk/services/vm/models/InstanceTemplateCustomData.py | jdcloud-apigateway/jdcloud-sdk-python | 0886769bcf1fb92128a065ff0f4695be099571cc | [
"Apache-2.0"
] | 33 | 2018-04-20T05:29:16.000Z | 2022-02-17T09:10:05.000Z | # coding=utf8
# Copyright 2018 JDCLOUD.COM
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# NOTE: This class is auto generated by the jdcloud code generator program.
class InstanceTemplateCustomData(object):
def __init__(self, id=None, metadata=None, userdata=None):
"""
:param id: (Optional) 模板ID
:param metadata: (Optional) 用户自定义元数据。
以key-value键值对形式指定,可在实例系统内通过元数据服务查询获取。最多支持40对键值对,且key不超过256字符,value不超过16KB,不区分大小写。
注意:key不要以连字符(-)结尾,否则此key不生效。
:param userdata: (Optional) 自定义脚本。
目前仅支持启动脚本,即 `launch-script`,须 `base64` 编码且编码前数据长度不能超过16KB。
**linux系统**:支持 `bash` 和 `python`,编码前须分别以 `#!/bin/bash` 和 `#!/usr/bin/env python` 作为内容首行。
**Windows系统**:支持 `bat` 和 `powershell`,编码前须分别以 `<cmd></cmd>和<powershell></powershell>` 作为内容首、尾行。
"""
self.id = id
self.metadata = metadata
self.userdata = userdata
| 36 | 95 | 0.714912 |
e153bec67455aa1eb690dacdf0eebd09826efa8a | 6,317 | py | Python | TrainingExtensions/tensorflow/src/python/aimet_tensorflow/common/operation.py | quic-akhobare/aimet | 1811a0ef58a75d103e173731b436876ee5dc4c49 | [
"BSD-3-Clause"
] | null | null | null | TrainingExtensions/tensorflow/src/python/aimet_tensorflow/common/operation.py | quic-akhobare/aimet | 1811a0ef58a75d103e173731b436876ee5dc4c49 | [
"BSD-3-Clause"
] | null | null | null | TrainingExtensions/tensorflow/src/python/aimet_tensorflow/common/operation.py | quic-akhobare/aimet | 1811a0ef58a75d103e173731b436876ee5dc4c49 | [
"BSD-3-Clause"
] | 1 | 2021-03-06T18:40:33.000Z | 2021-03-06T18:40:33.000Z | # /usr/bin/env python3.5
# -*- mode: python -*-
# =============================================================================
# @@-COPYRIGHT-START-@@
#
# Copyright (c) 2019, Qualcomm Innovation Center, Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its contributors
# may be used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#
# SPDX-License-Identifier: BSD-3-Clause
#
# @@-COPYRIGHT-END-@@
# =============================================================================
""" Tf Operation class and utilities """
from typing import List
import tensorflow as tf
import aimet_common.connected_graph.operation
from aimet_tensorflow.common.product import Product
class OpWithMetaInfoType:
""" Data type to hold info on connected graph op as tf.Operation with input and output tensors as tf.Tensor types"""
def __init__(self, conn_op, in_tensor: tf.Tensor, out_tensor: tf.Tensor):
self.op = conn_op.get_module()
self.in_tensor = in_tensor
self.out_tensor = out_tensor
class Op(aimet_common.connected_graph.operation.Op):
""" Subclass Op inherited from aimet_common.connected_graph.operation.Op """
def __init__(self, name: str, dotted_name: str, output_shape: tf.TensorShape,
is_anonymous: bool, op_type: str, pattern_type, internal_ops: List[tf.Operation]):
"""
Initializer for Op
:param name: name of the operation
:param dotted_name: dotted name of the operation
:param output_shape: shape of the output product of the operation
:param is_anonymous: whether this is an anonymous operation
:param op_type: type of the operation
:param pattern_type: pattern type used to match the operation
:param internal_ops: internal tf operations of the operation
"""
super().__init__(name, dotted_name, output_shape, is_anonymous, op_type)
self._output_op_node = None
self._parameters = {}
self._attributes = {}
self._pattern_type = pattern_type
self._internal_ops = internal_ops
@property
def output_op_node(self):
""" Get the output op node for this operation """
return self._output_op_node
@output_op_node.setter
def output_op_node(self, op: tf.Operation):
""" Set the output op node for this operation """
self._output_op_node = op
@property
def pattern_type(self):
""" Get the pattern type matched for this operation """
return self._pattern_type
@property
def internal_ops(self) -> List[tf.Operation]:
""" Returns the internal ops for the module corresponding to this operation. """
return self._internal_ops
def get_attribute(self, attribute_name: str):
""" Get an attribute for this operation, returns None if attribute doesn't exist """
return self._attributes.get(attribute_name, None)
def add_attribute(self, attribute_name: str, attribute):
""" Add an attribute for this operation """
self._attributes[attribute_name] = attribute
def add_param(self, param: str, product: Product):
""" Add a parameter product to parameters dictionary """
self._parameters[param] = product
def get_param_product(self, param: str):
""" Get the product corresponding to a particular parameter. If no such product exists, return None """
return self._parameters.get(param)
def get_tf_op_with_io_tensor(self) -> OpWithMetaInfoType:
"""
For a given connected graph op, this returns info as OpWithMetaInfoType
(returns tf.Operation type along with input and output tensor as tf.Tensor)
:return: OpWithMetaInfoType type
"""
# get input tensor
in_product = self.get_input_products()[0]
in_tensor = in_product.tensor_dict[self]
# handles single output at the moment (check if there is a branch op]
# conn graph explicitly inserts a branch op when op output is fed to two tf outputs ops (>1 consumer).
# get product/link/tensor
output = self.output
assert len(output.consumers) == 1
# product goes to this consumer
# get output tensor
out_tensor = output.tensor_dict[output.consumers[0]]
return OpWithMetaInfoType(self, in_tensor, out_tensor)
def get_input_product_index_of_parent(self, parent_op) -> int:
"""
Get the index of the input product that connects parent_op to this op.
:param parent_op: Parent op
:return: input product index, or None if not found.
"""
input_product_index = None
op_input_products = self.get_input_products()
for index, product in enumerate(op_input_products):
if product.producer == parent_op:
input_product_index = index
break
return input_product_index
| 42.682432 | 120 | 0.680545 |
c40f3390116a677374fa93654d3101220562472b | 315 | py | Python | 1.-FlujoDeControl/fechas.py | vgdobon/python | d4826692856a4328d02c377a69ce73387baaf6e3 | [
"MIT"
] | null | null | null | 1.-FlujoDeControl/fechas.py | vgdobon/python | d4826692856a4328d02c377a69ce73387baaf6e3 | [
"MIT"
] | null | null | null | 1.-FlujoDeControl/fechas.py | vgdobon/python | d4826692856a4328d02c377a69ce73387baaf6e3 | [
"MIT"
] | null | null | null | year = int(input("Año: "))
month = int(input("Mes: "))
day = int(input("Dia: "))
while not (1900 < year < 2021 and 0<month<13 and 0 < day < 32) :
print("Escribe un año correcto")
year = int(input("Año: "))
month = int(input("Mes: "))
day = int(input("Dia: "))
print(year,"-",month,"-",day,sep="") | 26.25 | 64 | 0.555556 |
e23d76752cc11881faaf6aa0bef1ae72f920c7ca | 3,934 | py | Python | dev/Report/NEW_plot_velprofile.py | aakash30jan/Couette-Poiseuille_FlowCode | 3110d5d818cb8fdfb4959e58d9dcbc48db325122 | [
"CC-BY-4.0"
] | 9 | 2019-01-05T09:05:05.000Z | 2021-11-22T19:04:14.000Z | dev/Report/NEW_plot_velprofile.py | aakash30jan/Couette-Poiseuille_FlowCode | 3110d5d818cb8fdfb4959e58d9dcbc48db325122 | [
"CC-BY-4.0"
] | null | null | null | dev/Report/NEW_plot_velprofile.py | aakash30jan/Couette-Poiseuille_FlowCode | 3110d5d818cb8fdfb4959e58d9dcbc48db325122 | [
"CC-BY-4.0"
] | 3 | 2020-02-28T03:44:34.000Z | 2020-09-10T05:32:54.000Z | ###################
##################
#H*UTAU1/KIN_VIS,VW/UTAU1 #first line for plotting y+ against u+
#(YPAD(I)*UTAU1)/KIN_VIS,U(I)/UTAU2 #all lines for plotting y+ against u+
import numpy as np
import matplotlib.pyplot as plt
plt.rcParams.update({'font.size': 11}) #Font 11
def bc_selector(CASE):
if (CASE==1):
case_VW=12.84
case_H=66E-3
case_DPDX=0.000 #CASE 1
elif (CASE==2):
case_VW=12.84
case_H=66E-3
case_DPDX=-0.808 #CASE 2
elif (CASE==3):
case_VW=12.84
case_H=66E-3
case_DPDX=-1.486 #CASE 3
elif (CASE==4):
case_VW=12.84
case_H=66E-3
case_DPDX=-1.510 #CASE 4
elif (CASE==5):
case_VW=12.84
case_H=66E-3
case_DPDX=-1.960 #CASE 5
elif (CASE==6):
case_VW=8.59
case_H=66E-3
case_DPDX=-1.430 #CASE 6
elif (CASE==7):
case_VW=17.08
case_H=101E-3
case_DPDX=-3.548 #CASE 7
elif (CASE==8):
case_VW=12.84
case_H=101E-3
case_DPDX=-2.323 #CASE 8
elif (CASE==9):
case_VW=8.59
case_H=101E-3
case_DPDX=-1.212 #CASE 9
elif (CASE==10):
case_VW=12.84
case_H=66E-3
case_DPDX=-4.830 #CASE 10
elif (CASE==11):
case_VW=12.84
case_H=66E-3
case_DPDX=-7.500 #CASE 11
elif (CASE==12):
case_VW=12.84
case_H=66E-3
case_DPDX=-14.30 #CASE 12
elif (CASE==13):
case_VW=12.84
case_H=66E-3
case_DPDX=-18.50 #CASE 13
elif (CASE==14):
case_VW=8.59
case_H=66E-3
case_DPDX=-20.80 #CASE 14
elif (CASE==15):
case_VW=0.000
case_H=66E-3
case_DPDX=-13.14 #CASE 15
elif (CASE==16):
case_VW=1.8
case_H=30E-3
case_DPDX=-0.48 #CASE 16
elif (CASE==17):
case_VW=3.09
case_H=30E-3
case_DPDX=-0.60 #CASE 17
elif (CASE==18):
case_VW=3.75
case_H=30E-3
case_DPDX=-0.66 #CASE 18
else:
print "Invalid Case Selected. Exiting."
#print "BC for this case: VW=",case_VW," H=",case_H," DPDX=",case_DPDX
return case_VW,case_H,case_DPDX;
def plot(CASE):
...: simstyles=['-r','-','-','-g','-','-','-b','-','-','-y','-','-','-m','-','-','-k','-','-']
...: expstyles=['*r','*','*','*g','*','*','*b','*','*','*y','*','*','*m','*','*','*k','*','*',]
...: if (CASE<10):
...: caseChar='0'+str(CASE)
...: else:
...: caseChar=str(CASE)
...: simDataDir1='../Simulated_Data/G6/'
...: expDataDir='../Experimental_Data/'
...: simDataFile=simDataDir1+'Case_'+caseChar+'_sim.dat'
...: case_sim=np.loadtxt(simDataFile)
...: yvalue=case_sim[1:100,1]+(CASE-1)
...: plt.plot(case_sim[1:100,0],yvalue,simstyles[CASE-1],label='Sim. Case '+caseChar)
...: #expDataFile=expDataDir+'Case_'+caseChar+'_exp.dat'
...: #case_exp=np.loadtxt(expDataFile)
...: #plt.plot(case_exp[:,0],case_exp[:,1],expstyles[CASE-1],label='Exp. Case '+caseChar)
...: return case_sim.shape;
for i in range(1,19):
CASE=i
plot(CASE)
plt.xlabel('y/2h')
plt.ylabel('Velocity')
plt.title('Velocity Profile(NOT ACTUAL VALUES)')
plt.legend()
plt.savefig('Compare_VelocityProfile_ALL.eps')
plt.show()
| 31.472 | 101 | 0.448907 |
1447640d36912c97e36302182ca9c6fb383891c7 | 6,117 | py | Python | chainerui/views/result.py | chainer/chainerui | 91c5c26d9154a008079dbb0bcbf69b5590d105f7 | [
"MIT"
] | 185 | 2017-12-15T09:24:07.000Z | 2022-01-20T11:20:13.000Z | chainerui/views/result.py | chainer/chainerui | 91c5c26d9154a008079dbb0bcbf69b5590d105f7 | [
"MIT"
] | 191 | 2017-12-15T09:14:52.000Z | 2022-02-17T14:09:19.000Z | chainerui/views/result.py | chainer/chainerui | 91c5c26d9154a008079dbb0bcbf69b5590d105f7 | [
"MIT"
] | 29 | 2017-12-15T09:40:45.000Z | 2022-03-13T11:21:11.000Z | import datetime
from flask import jsonify
from flask import request
from flask.views import MethodView
from chainerui.database import db
from chainerui.models.project import Project
from chainerui.models.result import Result
from chainerui.tasks import collect_results
from chainerui.tasks import crawl_result
class ResultAPI(MethodView):
"""ResultAPI."""
def get(self, id=None, project_id=None):
"""get."""
logs_limit = request.args.get('logs_limit', default=-1, type=int)
is_unregistered = request.args.get(
'is_unregistered', default=False, type=bool)
project = db.session.query(Project).filter_by(
id=project_id).first()
if project is None:
return jsonify({
'project': None,
'message': 'No interface defined for URL.'
}), 404
if id is None:
path = request.args.get('path_name', default=None)
if path is not None:
result = db.session.query(Result).filter_by(
path_name=path).first()
if result is None:
return jsonify({
'result': None,
'message': 'Result path \'%s\' is not found' % path
}), 400
return jsonify({'result': result.serialize})
collect_results(project)
results = db.session.query(Result).\
filter_by(project_id=project_id).\
filter_by(is_unregistered=is_unregistered).\
all()
# NOTE: To improve performance, aggregate commit phase. By set
# `commit=False`, implicit transaction is not closed, UPDATE query
# is not committed. Consequently a process of serializing does not
# have to call SELECT query again.
for result in results:
crawl_result(result, commit=False)
db.session.commit()
rs = [r.serialize_with_sampled_logs(logs_limit) for r in results]
return jsonify({'results': rs})
else:
result = db.session.query(Result).\
filter_by(id=id).\
filter_by(is_unregistered=False).\
first()
if result is None:
return jsonify({
'result': None,
'message': 'No interface defined for URL.'
}), 404
result = crawl_result(result)
return jsonify({
'result': result.serialize_with_sampled_logs(logs_limit)
})
def post(self, project_id=None):
project = db.session.query(Project).filter_by(id=project_id).first()
if project is None:
return jsonify({
'project': None,
'message': 'No interface defined for URL.'
}), 404
data = request.get_json()
result_json = data.get('result')
path = result_json.get('pathName', '')
if path == '':
return jsonify({
'result': None,
'message': 'Path of the result is not set.'
}), 400
result = db.session.query(Result).filter_by(path_name=path).first()
if result is not None:
return jsonify({
'result': None,
'message': 'Result path \'%s\' already registered.' % path
}), 400
name = result_json.get('name', None)
crawlable = result_json.get('crawlable', True)
log_modified_at = result_json.get('logModifiedAt', None)
if log_modified_at is not None:
log_modified_at = datetime.datetime.fromtimestamp(log_modified_at)
result = Result.create(
path_name=path, name=name, project_id=project_id,
log_modified_at=log_modified_at, crawlable=crawlable)
# don't return all data to reduce data size
return jsonify({
'result': {'id': result.id}
})
def put(self, id, project_id=None):
"""put."""
result = db.session.query(Result).filter_by(id=id).first()
if result is None:
response = jsonify({
'result': None, 'message': 'No interface defined for URL.'
})
return response, 404
request_json = request.get_json()
request_result = request_json.get('result')
name = request_result.get('name', None)
if name is not None:
result.name = name
is_unregistered = request_result.get('isUnregistered', None)
if is_unregistered is not None:
result.is_unregistered = is_unregistered
db.session.add(result)
db.session.commit()
return jsonify({'result': result.serialize})
def patch(self, project_id=None):
request_json = request.get_json()
request_results = request_json.get('results')
responses = []
for request_item in request_results:
id = request_item.get('id', None)
if id is None:
continue
result = db.session.query(Result).filter_by(id=id).first()
if result is None:
continue
is_unregistered = request_item.get('isUnregistered', None)
if is_unregistered is not None:
result.is_unregistered = is_unregistered
db.session.add(result)
responses.append({
'id': result.id,
'is_unregistered': result.is_unregistered,
})
db.session.commit()
return jsonify({'results': responses})
def delete(self, id, project_id=None):
"""delete."""
result = db.session.query(Result).filter_by(id=id).first()
if result is None:
response = jsonify({
'result': None, 'message': 'No interface defined for URL.'
})
return response, 404
db.session.delete(result)
db.session.commit()
return jsonify({'result': result.serialize})
| 33.79558 | 78 | 0.558117 |
74785e8cf27a7755beb611bb81d9b32164aceb63 | 1,912 | py | Python | promise12.py | zyody/vnet.pytorch | 20113f022aff784fbb4f267a8a21dab7abaf3015 | [
"BSD-3-Clause"
] | 3 | 2018-08-11T12:51:56.000Z | 2019-06-26T08:13:44.000Z | promise12.py | zyody/vnet.pytorch | 20113f022aff784fbb4f267a8a21dab7abaf3015 | [
"BSD-3-Clause"
] | null | null | null | promise12.py | zyody/vnet.pytorch | 20113f022aff784fbb4f267a8a21dab7abaf3015 | [
"BSD-3-Clause"
] | 2 | 2019-03-27T11:16:04.000Z | 2022-03-22T03:03:29.000Z | import torch
import torch.utils.data as data
import numpy as np
class PROMISE12(data.Dataset):
def __init__(self, mode, images, GT=None, transform=None, GT_transform=None):
if images is None:
raise(RuntimeError("images must be set"))
self.mode = mode
self.images = images
self.GT = GT
self.transform = transform
self.GT_transform = GT_transform
def __getitem__(self, index):
"""
Args:
index(int): Index
Returns:
tuple: (image, GT) where GT is index of the
"""
if self.mode == "train" or self.mode == "test":
keys = list(self.images.keys())
id = keys[index]
image = self.images[id]
# print("image shape from DataManager shown in PROMISE12:" + str(image.shape))
x, y, z = image.shape # added by Chao
image = image.reshape((1, z, y, x)) # added by Chao
image = image.astype(np.float32)
if self.transform is not None:
image = torch.from_numpy(image)
# image = self.transform(image)
# print(id + "\n")
if self.GT is None:
return image, id
else:
GT = self.GT[id[:-4] + '_segmentation' + '.mhd']
if self.GT_transform is not None:
GT = self.GT_transform(GT)
return image, GT, id
elif self.mode == "infer":# added by Chao
keys = list(self.images.keys())
id = keys[index]
image = self.images[id]
# print("image shape from DataManager shown in PROMISE12:" + str(image.shape))
x,y,z = image.shape
image = image.reshape((1,z,y,x))
image = image.astype(np.float32)
return image, id
def __len__(self):
return len(self.images)
| 34.763636 | 90 | 0.527197 |
0a1ba0c4807a5ef3cec03d276d9ca24adb1aef05 | 14,844 | py | Python | tests/pools/test_pool_puzzles_lifecycle.py | Fibo-Network/fibo-blockchain | 34471efc081a52443e874749bb8ea3dc50b59891 | [
"Apache-2.0"
] | null | null | null | tests/pools/test_pool_puzzles_lifecycle.py | Fibo-Network/fibo-blockchain | 34471efc081a52443e874749bb8ea3dc50b59891 | [
"Apache-2.0"
] | null | null | null | tests/pools/test_pool_puzzles_lifecycle.py | Fibo-Network/fibo-blockchain | 34471efc081a52443e874749bb8ea3dc50b59891 | [
"Apache-2.0"
] | null | null | null | import copy
from typing import List
from unittest import TestCase
from blspy import AugSchemeMPL, G1Element, G2Element, PrivateKey
from fibo.types.blockchain_format.program import Program
from fibo.types.blockchain_format.sized_bytes import bytes32
from fibo.types.blockchain_format.coin import Coin
from fibo.types.coin_spend import CoinSpend
from fibo.types.spend_bundle import SpendBundle
from fibo.util.ints import uint64, uint32
from fibo.consensus.default_constants import DEFAULT_CONSTANTS
from fibo.wallet.puzzles.p2_delegated_puzzle_or_hidden_puzzle import (
puzzle_for_pk,
solution_for_conditions,
calculate_synthetic_secret_key,
DEFAULT_HIDDEN_PUZZLE_HASH,
)
from fibo.wallet.puzzles.p2_conditions import puzzle_for_conditions
from fibo.wallet.puzzles import singleton_top_layer
from fibo.pools.pool_wallet_info import PoolState
from fibo.pools.pool_puzzles import (
create_waiting_room_inner_puzzle,
create_pooling_inner_puzzle,
create_p2_singleton_puzzle,
create_absorb_spend,
create_travel_spend,
get_most_recent_singleton_coin_from_coin_spend,
get_delayed_puz_info_from_launcher_spend,
SINGLETON_MOD_HASH,
launcher_id_to_p2_puzzle_hash,
is_pool_singleton_inner_puzzle,
get_pubkey_from_member_inner_puzzle,
solution_to_extra_data,
uncurry_pool_waitingroom_inner_puzzle,
get_seconds_and_delayed_puzhash_from_p2_singleton_puzzle,
)
from tests.util.key_tool import KeyTool
from tests.clvm.test_puzzles import (
public_key_for_index,
secret_exponent_for_index,
)
from tests.clvm.coin_store import CoinStore, CoinTimestamp, BadSpendBundleError
"""
This test suite aims to test:
- fibo.pools.pool_puzzles.py
- fibo.wallet.puzzles.pool_member_innerpuz.clvm
- fibo.wallet.puzzles.pool_waiting_room_innerpuz.clvm
"""
# Helper function
def sign_delegated_puz(del_puz: Program, coin: Coin) -> G2Element:
synthetic_secret_key: PrivateKey = calculate_synthetic_secret_key(
PrivateKey.from_bytes(
secret_exponent_for_index(1).to_bytes(32, "big"),
),
DEFAULT_HIDDEN_PUZZLE_HASH,
)
return AugSchemeMPL.sign(
synthetic_secret_key,
(del_puz.get_tree_hash() + coin.name() + DEFAULT_CONSTANTS.AGG_SIG_ME_ADDITIONAL_DATA),
)
class TestPoolPuzzles(TestCase):
def test_pool_lifecycle(self):
# START TESTS
# Generate starting info
key_lookup = KeyTool()
sk: PrivateKey = PrivateKey.from_bytes(
secret_exponent_for_index(1).to_bytes(32, "big"),
)
pk: G1Element = G1Element.from_bytes(public_key_for_index(1, key_lookup))
starting_puzzle: Program = puzzle_for_pk(pk)
starting_ph: bytes32 = starting_puzzle.get_tree_hash()
# Get our starting standard coin created
START_AMOUNT: uint64 = 1023
coin_db = CoinStore()
time = CoinTimestamp(10000000, 1)
coin_db.farm_coin(starting_ph, time, START_AMOUNT)
starting_coin: Coin = next(coin_db.all_unspent_coins())
# LAUNCHING
# Create the escaping inner puzzle
GENESIS_CHALLENGE = bytes32.fromhex("ccd5bb71183532bff220ba46c268991a3ff07eb358e8255a65c30a2dce0e5fbb")
launcher_coin = singleton_top_layer.generate_launcher_coin(
starting_coin,
START_AMOUNT,
)
DELAY_TIME = uint64(60800)
DELAY_PH = starting_ph
launcher_id = launcher_coin.name()
relative_lock_height: uint32 = uint32(5000)
# use a dummy pool state
pool_state = PoolState(
owner_pubkey=pk,
pool_url="",
relative_lock_height=relative_lock_height,
state=3, # farming to pool
target_puzzle_hash=starting_ph,
version=1,
)
# create a new dummy pool state for travelling
target_pool_state = PoolState(
owner_pubkey=pk,
pool_url="",
relative_lock_height=relative_lock_height,
state=2, # Leaving pool
target_puzzle_hash=starting_ph,
version=1,
)
# Standard format comment
comment = Program.to([("p", bytes(pool_state)), ("t", DELAY_TIME), ("h", DELAY_PH)])
pool_wr_innerpuz: bytes32 = create_waiting_room_inner_puzzle(
starting_ph,
relative_lock_height,
pk,
launcher_id,
GENESIS_CHALLENGE,
DELAY_TIME,
DELAY_PH,
)
pool_wr_inner_hash = pool_wr_innerpuz.get_tree_hash()
pooling_innerpuz: Program = create_pooling_inner_puzzle(
starting_ph,
pool_wr_inner_hash,
pk,
launcher_id,
GENESIS_CHALLENGE,
DELAY_TIME,
DELAY_PH,
)
# Driver tests
assert is_pool_singleton_inner_puzzle(pooling_innerpuz)
assert is_pool_singleton_inner_puzzle(pool_wr_innerpuz)
assert get_pubkey_from_member_inner_puzzle(pooling_innerpuz) == pk
# Generating launcher information
conditions, launcher_coinsol = singleton_top_layer.launch_conditions_and_coinsol(
starting_coin, pooling_innerpuz, comment, START_AMOUNT
)
# Creating solution for standard transaction
delegated_puzzle: Program = puzzle_for_conditions(conditions)
full_solution: Program = solution_for_conditions(conditions)
starting_coinsol = CoinSpend(
starting_coin,
starting_puzzle,
full_solution,
)
# Create the spend bundle
sig: G2Element = sign_delegated_puz(delegated_puzzle, starting_coin)
spend_bundle = SpendBundle(
[starting_coinsol, launcher_coinsol],
sig,
)
# Spend it!
coin_db.update_coin_store_for_spend_bundle(
spend_bundle,
time,
DEFAULT_CONSTANTS.MAX_BLOCK_COST_CLVM,
)
# Test that we can retrieve the extra data
assert get_delayed_puz_info_from_launcher_spend(launcher_coinsol) == (DELAY_TIME, DELAY_PH)
assert solution_to_extra_data(launcher_coinsol) == pool_state
# TEST TRAVEL AFTER LAUNCH
# fork the state
fork_coin_db: CoinStore = copy.deepcopy(coin_db)
post_launch_coinsol, _ = create_travel_spend(
launcher_coinsol,
launcher_coin,
pool_state,
target_pool_state,
GENESIS_CHALLENGE,
DELAY_TIME,
DELAY_PH,
)
# Spend it!
fork_coin_db.update_coin_store_for_spend_bundle(
SpendBundle([post_launch_coinsol], G2Element()),
time,
DEFAULT_CONSTANTS.MAX_BLOCK_COST_CLVM,
)
# HONEST ABSORB
time = CoinTimestamp(10000030, 2)
# create the farming reward
p2_singleton_puz: Program = create_p2_singleton_puzzle(
SINGLETON_MOD_HASH,
launcher_id,
DELAY_TIME,
DELAY_PH,
)
p2_singleton_ph: bytes32 = p2_singleton_puz.get_tree_hash()
assert uncurry_pool_waitingroom_inner_puzzle(pool_wr_innerpuz) == (
starting_ph,
relative_lock_height,
pk,
p2_singleton_ph,
)
assert launcher_id_to_p2_puzzle_hash(launcher_id, DELAY_TIME, DELAY_PH) == p2_singleton_ph
assert get_seconds_and_delayed_puzhash_from_p2_singleton_puzzle(p2_singleton_puz) == (DELAY_TIME, DELAY_PH)
coin_db.farm_coin(p2_singleton_ph, time, 1750000000000)
coin_sols: List[CoinSpend] = create_absorb_spend(
launcher_coinsol,
pool_state,
launcher_coin,
2,
GENESIS_CHALLENGE,
DELAY_TIME,
DELAY_PH, # height
)
# Spend it!
coin_db.update_coin_store_for_spend_bundle(
SpendBundle(coin_sols, G2Element()),
time,
DEFAULT_CONSTANTS.MAX_BLOCK_COST_CLVM,
)
# ABSORB A NON EXISTENT REWARD (Negative test)
last_coinsol: CoinSpend = list(
filter(
lambda e: e.coin.amount == START_AMOUNT,
coin_sols,
)
)[0]
coin_sols: List[CoinSpend] = create_absorb_spend(
last_coinsol,
pool_state,
launcher_coin,
2,
GENESIS_CHALLENGE,
DELAY_TIME,
DELAY_PH, # height
)
# filter for only the singleton solution
singleton_coinsol: CoinSpend = list(
filter(
lambda e: e.coin.amount == START_AMOUNT,
coin_sols,
)
)[0]
# Spend it and hope it fails!
try:
coin_db.update_coin_store_for_spend_bundle(
SpendBundle([singleton_coinsol], G2Element()),
time,
DEFAULT_CONSTANTS.MAX_BLOCK_COST_CLVM,
)
except BadSpendBundleError as e:
assert str(e) == "condition validation failure Err.ASSERT_ANNOUNCE_CONSUMED_FAILED"
# SPEND A NON-REWARD P2_SINGLETON (Negative test)
# create the dummy coin
non_reward_p2_singleton = Coin(
bytes32(32 * b"3"),
p2_singleton_ph,
uint64(1337),
)
coin_db._add_coin_entry(non_reward_p2_singleton, time)
# construct coin solution for the p2_singleton coin
bad_coinsol = CoinSpend(
non_reward_p2_singleton,
p2_singleton_puz,
Program.to(
[
pooling_innerpuz.get_tree_hash(),
non_reward_p2_singleton.name(),
]
),
)
# Spend it and hope it fails!
try:
coin_db.update_coin_store_for_spend_bundle(
SpendBundle([singleton_coinsol, bad_coinsol], G2Element()),
time,
DEFAULT_CONSTANTS.MAX_BLOCK_COST_CLVM,
)
except BadSpendBundleError as e:
assert str(e) == "condition validation failure Err.ASSERT_ANNOUNCE_CONSUMED_FAILED"
# ENTER WAITING ROOM
# find the singleton
singleton = get_most_recent_singleton_coin_from_coin_spend(last_coinsol)
# get the relevant coin solution
travel_coinsol, _ = create_travel_spend(
last_coinsol,
launcher_coin,
pool_state,
target_pool_state,
GENESIS_CHALLENGE,
DELAY_TIME,
DELAY_PH,
)
# Test that we can retrieve the extra data
assert solution_to_extra_data(travel_coinsol) == target_pool_state
# sign the serialized state
data = Program.to(bytes(target_pool_state)).get_tree_hash()
sig: G2Element = AugSchemeMPL.sign(
sk,
(data + singleton.name() + DEFAULT_CONSTANTS.AGG_SIG_ME_ADDITIONAL_DATA),
)
# Spend it!
coin_db.update_coin_store_for_spend_bundle(
SpendBundle([travel_coinsol], sig),
time,
DEFAULT_CONSTANTS.MAX_BLOCK_COST_CLVM,
)
# ESCAPE TOO FAST (Negative test)
# find the singleton
singleton = get_most_recent_singleton_coin_from_coin_spend(travel_coinsol)
# get the relevant coin solution
return_coinsol, _ = create_travel_spend(
travel_coinsol,
launcher_coin,
target_pool_state,
pool_state,
GENESIS_CHALLENGE,
DELAY_TIME,
DELAY_PH,
)
# sign the serialized target state
sig = AugSchemeMPL.sign(
sk,
(data + singleton.name() + DEFAULT_CONSTANTS.AGG_SIG_ME_ADDITIONAL_DATA),
)
# Spend it and hope it fails!
try:
coin_db.update_coin_store_for_spend_bundle(
SpendBundle([return_coinsol], sig),
time,
DEFAULT_CONSTANTS.MAX_BLOCK_COST_CLVM,
)
except BadSpendBundleError as e:
assert str(e) == "condition validation failure Err.ASSERT_HEIGHT_RELATIVE_FAILED"
# ABSORB WHILE IN WAITING ROOM
time = CoinTimestamp(10000060, 3)
# create the farming reward
coin_db.farm_coin(p2_singleton_ph, time, 1750000000000)
# generate relevant coin solutions
coin_sols: List[CoinSpend] = create_absorb_spend(
travel_coinsol,
target_pool_state,
launcher_coin,
3,
GENESIS_CHALLENGE,
DELAY_TIME,
DELAY_PH, # height
)
# Spend it!
coin_db.update_coin_store_for_spend_bundle(
SpendBundle(coin_sols, G2Element()),
time,
DEFAULT_CONSTANTS.MAX_BLOCK_COST_CLVM,
)
# LEAVE THE WAITING ROOM
time = CoinTimestamp(20000000, 10000)
# find the singleton
singleton_coinsol: CoinSpend = list(
filter(
lambda e: e.coin.amount == START_AMOUNT,
coin_sols,
)
)[0]
singleton: Coin = get_most_recent_singleton_coin_from_coin_spend(singleton_coinsol)
# get the relevant coin solution
return_coinsol, _ = create_travel_spend(
singleton_coinsol,
launcher_coin,
target_pool_state,
pool_state,
GENESIS_CHALLENGE,
DELAY_TIME,
DELAY_PH,
)
# Test that we can retrieve the extra data
assert solution_to_extra_data(return_coinsol) == pool_state
# sign the serialized target state
data = Program.to([pooling_innerpuz.get_tree_hash(), START_AMOUNT, bytes(pool_state)]).get_tree_hash()
sig: G2Element = AugSchemeMPL.sign(
sk,
(data + singleton.name() + DEFAULT_CONSTANTS.AGG_SIG_ME_ADDITIONAL_DATA),
)
# Spend it!
coin_db.update_coin_store_for_spend_bundle(
SpendBundle([return_coinsol], sig),
time,
DEFAULT_CONSTANTS.MAX_BLOCK_COST_CLVM,
)
# ABSORB ONCE MORE FOR GOOD MEASURE
time = CoinTimestamp(20000000, 10005)
# create the farming reward
coin_db.farm_coin(p2_singleton_ph, time, 1750000000000)
coin_sols: List[CoinSpend] = create_absorb_spend(
return_coinsol,
pool_state,
launcher_coin,
10005,
GENESIS_CHALLENGE,
DELAY_TIME,
DELAY_PH, # height
)
# Spend it!
coin_db.update_coin_store_for_spend_bundle(
SpendBundle(coin_sols, G2Element()),
time,
DEFAULT_CONSTANTS.MAX_BLOCK_COST_CLVM,
)
| 35.511962 | 115 | 0.628335 |
207fd47a891b33d42002da7cbc4ca5274738c8f7 | 4,765 | py | Python | misc/rbm_pytorch.py | tsurubee/bmpy | 52cc3c41ee18c54c975edca3b7cda416177e5edc | [
"MIT"
] | null | null | null | misc/rbm_pytorch.py | tsurubee/bmpy | 52cc3c41ee18c54c975edca3b7cda416177e5edc | [
"MIT"
] | null | null | null | misc/rbm_pytorch.py | tsurubee/bmpy | 52cc3c41ee18c54c975edca3b7cda416177e5edc | [
"MIT"
] | null | null | null | import time
import numpy as np
import torch
import sqapy
class RBM:
def __init__(self, n_visible=784, n_hidden=2, alpha=0.01, device='cpu'):
self.n_visible = n_visible
self.n_hidden = n_hidden
self.alpha = alpha
self.device = device
self.data = None
self.weight = torch.FloatTensor(self.n_visible, self.n_hidden).uniform_(-1, 1).to(self.device)
self.b = torch.FloatTensor(self.n_visible).uniform_(-1, 1).to(self.device)
self.c = torch.FloatTensor(self.n_hidden).uniform_(-1, 1).to(self.device)
self.energy_records = []
def train(self, data, n_epochs=2, n_CD=1, sampler="cd"):
self.energy_records.clear()
self.data = data
if sampler == "cd":
self.__contrastive_divergence(self.data, n_epochs, n_CD)
elif sampler == "sqa":
self.__sqa(self.data, n_epochs)
else:
pass
print("Training finished")
def sample(self, n_iter=5, v_init=None):
if v_init is None:
v_init = torch.randint(2, size=(1, self.n_visible)).float().to(self.device)
v_t = v_init.view(self.n_visible)
for _ in range(n_iter):
h_t = self.__forward(v_t)
v_t = self.__backward(h_t)
return v_t, h_t
def __sqa(self, data, n_epochs, batch_size=10000):
train_time = []
for e in range(n_epochs):
self.energy_list = []
start = time.time()
for i in range(0, data.shape[0], batch_size):
batch = data[i:i+batch_size]
if len(batch) != batch_size:
break
v_0 = batch.mean(axis=0)
h0_sampled = self.__forward(v_0)
b = torch.Tensor.numpy(self.b)
c = torch.Tensor.numpy(self.c)
weight = torch.Tensor.numpy(self.weight)
model = sqapy.BipartiteGraph(b, c, weight)
sampler = sqapy.SQASampler(model, steps=100)
_, states = sampler.sample()
v_sampled= torch.from_numpy(np.array(states[0][:len(self.b)])).float()
h_sampled = torch.from_numpy(np.array(states[0][len(self.b):])).float()
self.__update_params(v_0, v_sampled, h0_sampled, h_sampled)
self.energy_list.append(self._energy(v_0, h_sampled).item())
end = time.time()
avg_energy = np.mean(self.energy_list)
print("[epoch {}] takes {:.2f}s, average energy={}".format(
e, end - start, avg_energy))
self.energy_records.append(avg_energy)
train_time.append(end - start)
print("Average Training Time: {:.2f}".format(np.mean(train_time)))
def __contrastive_divergence(self, data, n_epochs, n_CD):
train_time = []
for e in range(n_epochs):
self.energy_list = []
start = time.time()
for v_0 in data:
h0_sampled = self.__forward(v_0)
h_sampled = h0_sampled
for _ in range(n_CD):
v_sampled = self.__backward(h_sampled)
h_sampled = self.__forward(v_sampled)
self.__update_params(v_0, v_sampled, h0_sampled, h_sampled)
self.energy_list.append(self._energy(v_0, h_sampled).item())
end = time.time()
avg_energy = np.mean(self.energy_list)
print("[epoch {}] takes {:.2f}s, average energy={}".format(
e, end - start, avg_energy))
self.energy_records.append(avg_energy)
train_time.append(end - start)
print("Average Training Time: {:.2f}".format(np.mean(train_time)))
def __update_params(self, v_0, v_sampled, h0, h_sampled):
self.weight += self.alpha * \
(torch.matmul(v_0.view(-1, 1), h0.view(1, -1)) -
torch.matmul(v_sampled.view(-1, 1), h_sampled.view(1, -1)))
self.b += self.alpha * (v_0 - v_sampled)
self.c += self.alpha * (h0 - h_sampled)
def __forward(self, v):
p_h = torch.sigmoid(
torch.matmul(torch.t(self.weight), v) + self.c)
return self.__sampling(p_h)
def __backward(self, h):
p_v = torch.sigmoid(torch.matmul(self.weight, h) + self.b)
return self.__sampling(p_v)
def __sampling(self, p):
dim = p.shape[0]
true_list = torch.rand(dim).to(self.device) <= p
sampled = torch.zeros(dim).to(self.device)
sampled[true_list] = 1
return sampled
def _energy(self, v, h):
return - torch.dot(self.b, v) - torch.dot(self.c, h) \
- torch.matmul(torch.matmul(torch.t(v), self.weight), h)
| 39.380165 | 102 | 0.562644 |
a5a4c85ce4fa5d768f17e54130c1d7f5b95d492f | 2,499 | py | Python | chess/main.py | underscoredam/friday-classes | 4e2988edd3802a7d5dac554fd12c94eb85c7d116 | [
"MIT"
] | null | null | null | chess/main.py | underscoredam/friday-classes | 4e2988edd3802a7d5dac554fd12c94eb85c7d116 | [
"MIT"
] | null | null | null | chess/main.py | underscoredam/friday-classes | 4e2988edd3802a7d5dac554fd12c94eb85c7d116 | [
"MIT"
] | null | null | null | import numpy as np
# Declare constants
PAWN = 1
KNIGHT = 6
ROOK = 5
KING = 2
QUEEN = 3
BISHOP = 4
letter_mapper = {
'P': 1,
'K': 2,
'Q': 3,
'B': 4,
'R': 5,
'N': 6,
}
def print_board(board):
"""Flip and print the board in correct orientation."""
b1 = np.flip(board, axis=0)
print(b1)
def game_is_complete(board, turn):
"""Returns whether the game has ended."""
# TODO
return False
def transform_location(location):
"""Takes board coordinates (eg. d2, c4) and transforms into matrix coordinates (eg: (3, 1) or ."""
c, r = location
c = ord(c) - 97
r = int(r) - 1
return (r, c)
def transform_letter_to_piece(letter):
letter = letter.upper()
number = letter_mapper[letter]
return number
def transform_notation(notation):
piece = transform_letter_to_piece(notation[0])
start = notation[1:3]
start = transform_location(start)
is_capture = (notation[3] is 'x')
if is_capture:
end = notation[4:6]
else:
end = notation[3:5]
end = transform_location(end)
is_check = (notation[-1] is '+')
return piece, start, end, is_capture, is_check
def init():
"""Initializes a new board and set the pieces.
Returns:
dict: an "object" that contains the turn and the board
"""
# Initialization
board = np.zeros(shape=(8, 8), dtype=np.int)
board[0] = [ROOK, KNIGHT, BISHOP, QUEEN, KING, BISHOP, KNIGHT, ROOK]
board[1] = [PAWN, PAWN, PAWN, PAWN, PAWN, PAWN, PAWN, PAWN]
board[7] = -board[0]
board[6] = -board[1]
turn = 1 # 1 or -1. 1 for white, -1 for black
state = {
"turn": turn,
"board": board
}
return state
def move_piece(board, initial_pos, final_pos):
new_board = np.array(board)
piece = new_board[initial_pos[0]][initial_pos[1]]
new_board[initial_pos[0]][initial_pos[1]] = 0
new_board[final_pos[0]][final_pos[1]] = piece
return new_board
def main():
state = init()
turn = state['turn']
board = state['board']
while not game_is_complete(board, turn):
print_board(board)
if turn == 1:
print('White to move: ')
else:
print('Black to move: ')
notation = input()
piece, start, end, is_capture, is_check = transform_notation(notation)
piece = turn * piece # Set the color for this piece
board = move_piece(board, start, end)
turn = -turn
main()
| 18.931818 | 102 | 0.593838 |
d99c8fc3619daae2e3cbb18671c5cd6b7c9a0466 | 1,527 | py | Python | cabinet/exceptions.py | crgwilson/cabinet | b09e4a45656495ccd168d2644edee3106005909f | [
"MIT"
] | null | null | null | cabinet/exceptions.py | crgwilson/cabinet | b09e4a45656495ccd168d2644edee3106005909f | [
"MIT"
] | null | null | null | cabinet/exceptions.py | crgwilson/cabinet | b09e4a45656495ccd168d2644edee3106005909f | [
"MIT"
] | null | null | null | from logging import getLogger as get_logger
from typing import Any
logger = get_logger(__name__)
class CabinetException(Exception):
def __init__(self, *args: Any, **kwargs: Any):
logger.exception(str(self))
class MalformedAuthHeader(CabinetException):
def __init__(self, header: str) -> None:
self.header = header
super().__init__()
def __str__(self) -> str:
return f"Received request with malformed auth header {self.header}"
class UnsupportedAuthType(CabinetException):
def __init__(self, auth_type: str) -> None:
self.auth_type = auth_type
super().__init__()
def __str__(self) -> str:
return f"Received request with unsupported auth type {self.auth_type}"
class MissingConfiguration(CabinetException):
def __init__(self, key: str) -> None:
self.key = key
super().__init__()
def __str__(self) -> str:
return f"Required key {self.key} not found in cabinet app configuration"
class InvalidToken(CabinetException):
def __init__(self, token: str) -> None:
self.token = token
super().__init__()
def __str__(self) -> str:
return f"Received request with invalid auth token {self.token}"
class IncorrectUsernameOrPassword(CabinetException):
def __init__(self, username: str) -> None:
self.username = username
super().__init__()
def __str__(self) -> str:
return f"Failed login attempt for user {self.username} - Incorrect Username or Password"
| 27.763636 | 96 | 0.675835 |
670f5163f4236a1558a8459afe15db4ab3d01fb1 | 4,155 | py | Python | Nets/TTNetParallel.py | AndresOtero/TensorDecompositionMachineLearning | 455f16b405ec9d031999b0ebf9c5a68d3c20b233 | [
"MIT"
] | 3 | 2021-06-11T02:46:06.000Z | 2021-08-17T02:59:30.000Z | Nets/TTNetParallel.py | AndresOtero/TensorDecompositionMachineLearning | 455f16b405ec9d031999b0ebf9c5a68d3c20b233 | [
"MIT"
] | null | null | null | Nets/TTNetParallel.py | AndresOtero/TensorDecompositionMachineLearning | 455f16b405ec9d031999b0ebf9c5a68d3c20b233 | [
"MIT"
] | null | null | null | from __future__ import print_function
import torch
import torch.nn as nn
import torch.nn.functional as F
from Utils import Constant
from Utils.RanksFactory import RanksFactory
from Utils.TensorTools import group_divisions
class FirstKernelTensorTrain(nn.Module):
def __init__(self, m, r_j):
super(FirstKernelTensorTrain, self).__init__()
self.fc1 = nn.Linear(m, r_j, bias=False)
self.m = m
self.r_j = r_j
def forward(self, tensor):
# X size is (64,1,16,49)
transformed_tensor = self.fc1(tensor)
return F.relu(transformed_tensor)
class FeatureMap(nn.Module):
def __init__(self, n, m, amount_of_division, batch_size):
super(FeatureMap, self).__init__()
self.m = m
self.n = n
self.amount_of_division = amount_of_division
self.batch_size = batch_size
self.fc1 = nn.Linear(self.n, self.m)
def forward(self, tensor):
# X size is (64,1,16,49)
last_dim=tensor.size()[-1]
tensor=tensor.contiguous()
tensor_reshaped = tensor.view(-1, last_dim) # x1 size (1024,49)
tensor_transformed = F.relu(self.fc1(tensor_reshaped))
return tensor_transformed
class TTKernel(nn.Module):
def __init__(self, r_i, m, r_j):
super(TTKernel, self).__init__()
self.fc1 = nn.Bilinear(r_i, m, r_j, bias=False)
def forward(self, input_tensor_1, input_tensor_2):
# X size is (64,1,49,16)
tensor_transformed = self.fc1(input_tensor_1, input_tensor_2)
return F.relu(tensor_transformed)
#return tensor_transformed
class TTNetParallel(nn.Module):
def __init__(self, net_params):
super(TTNetParallel, self).__init__()
self.kernels = []
self.dicc_kernels = {}
self.ranks = RanksFactory.create_tensor_train_parallel_ranks(net_params)
self.m = net_params.get_m()
self.n = net_params.get_n()
self.amount_of_divisions=net_params.get_amount_of_divisions()
self.batch_size=net_params.get_batch_size()
self.feature_map = FeatureMap(self.n, self.m, self.amount_of_divisions, self.batch_size)
self.amount_of_divisions = net_params.amount_of_divisions
for category in range(net_params.categories):
first_kernel = FirstKernelTensorTrain(self.m, self.ranks[Constant.SECOND])
self.dicc_kernels[category] = [first_kernel]
self.kernels.append(first_kernel)
for r in range(Constant.THIRD, len(self.ranks)):
r_i = self.ranks[r - 1]
r_j = self.ranks[r]
kernel = TTKernel(r_i, self.m, r_j)
self.dicc_kernels[category].append(kernel)
self.kernels.append(kernel)
self.net = nn.Sequential(*self.kernels)
def forward(self, tensor):
featured_tensor = self.feature_map(tensor)
division_divided_tensors = group_divisions(featured_tensor, self.amount_of_divisions)
outputs = []
for category in self.dicc_kernels:
division = 0
amount_of_kernels = len(self.dicc_kernels[category])
first_kernel = self.dicc_kernels[category][0]
kernel_input = first_kernel(division_divided_tensors[division])
for n_kernel in range(1, amount_of_kernels):
division += 1
division_divided_input = division_divided_tensors[division]
kernel = self.dicc_kernels[category][n_kernel]
kernel_input = kernel(kernel_input, division_divided_input)
outputs.append(kernel_input)
concatenated_outputs = torch.cat(outputs, 1) # Concatenate columns into tensor
return F.log_softmax(concatenated_outputs, dim=1)
@staticmethod
def get_number_of_parameters(n, m, r, d, categories):
feature_map_size = m * n + m
first_kernel_size = m * r
middle_kernels_size = ((d - 2) * r * m * r)
last_kernel_size = r * m
total = feature_map_size+(first_kernel_size+middle_kernels_size+last_kernel_size)*categories
#print("TTNetParallel",total)
return total | 38.831776 | 100 | 0.656799 |
8b2015e1ca6ce09d1e0a020af172ef622b1fa9fb | 7,590 | py | Python | tensorpack/dataflow/remote.py | dan-anghel/tensorpack | 86fcffbc167e2b703b9abd17d41388311c90fe7c | [
"Apache-2.0"
] | 1 | 2021-09-25T15:36:07.000Z | 2021-09-25T15:36:07.000Z | tensorpack/dataflow/remote.py | dan-anghel/tensorpack | 86fcffbc167e2b703b9abd17d41388311c90fe7c | [
"Apache-2.0"
] | 7 | 2019-12-16T21:58:30.000Z | 2022-02-10T00:17:01.000Z | tensorpack/dataflow/remote.py | dan-anghel/tensorpack | 86fcffbc167e2b703b9abd17d41388311c90fe7c | [
"Apache-2.0"
] | 2 | 2019-09-04T00:02:29.000Z | 2020-07-06T20:27:04.000Z | # -*- coding: utf-8 -*-
# File: remote.py
import multiprocessing as mp
import time
from collections import deque
import tqdm
from six.moves import range
from ..utils import logger
from ..utils.concurrency import DIE
from ..utils.serialize import dumps, loads
from ..utils.utils import get_tqdm_kwargs
from .base import DataFlow, DataFlowReentrantGuard
try:
import zmq
except ImportError:
logger.warn("Error in 'import zmq'. remote feature won't be available")
__all__ = []
else:
__all__ = ['send_dataflow_zmq', 'RemoteDataZMQ']
def send_dataflow_zmq(df, addr, hwm=50, format=None, bind=False):
"""
Run DataFlow and send data to a ZMQ socket addr.
It will serialize and send each datapoint to this address with a PUSH socket.
This function never returns.
Args:
df (DataFlow): Will infinitely loop over the DataFlow.
addr: a ZMQ socket endpoint.
hwm (int): ZMQ high-water mark (buffer size)
format (str): The serialization format.
Default format uses :mod:`utils.serialize`.
This format works with :class:`dataflow.RemoteDataZMQ`.
An alternate format is 'zmq_ops', used by https://github.com/tensorpack/zmq_ops
and :class:`input_source.ZMQInput`.
bind (bool): whether to bind or connect to the endpoint address.
"""
assert format in [None, 'zmq_op', 'zmq_ops']
if format is None:
dump_fn = dumps
else:
from zmq_ops import dump_arrays
dump_fn = dump_arrays
ctx = zmq.Context()
socket = ctx.socket(zmq.PUSH)
socket.set_hwm(hwm)
if bind:
socket.bind(addr)
else:
socket.connect(addr)
try:
df.reset_state()
logger.info("Serving data to {} with {} format ...".format(
addr, 'default' if format is None else 'zmq_ops'))
INTERVAL = 200
q = deque(maxlen=INTERVAL)
try:
total = len(df)
except NotImplementedError:
total = 0
tqdm_args = get_tqdm_kwargs(leave=True, smoothing=0.8)
tqdm_args['bar_format'] = tqdm_args['bar_format'] + "{postfix}"
while True:
with tqdm.trange(total, **tqdm_args) as pbar:
for dp in df:
start = time.time()
socket.send(dump_fn(dp), copy=False)
q.append(time.time() - start)
pbar.update(1)
if pbar.n % INTERVAL == 0:
avg = "{:.3f}".format(sum(q) / len(q))
pbar.set_postfix({'AvgSendLat': avg})
finally:
logger.info("Exiting send_dataflow_zmq ...")
socket.setsockopt(zmq.LINGER, 0)
socket.close()
if not ctx.closed:
ctx.destroy(0)
class RemoteDataZMQ(DataFlow):
"""
Produce data from ZMQ PULL socket(s).
It is the receiver-side counterpart of :func:`send_dataflow_zmq`, which uses :mod:`tensorpack.utils.serialize`
for serialization.
See http://tensorpack.readthedocs.io/tutorial/efficient-dataflow.html#distributed-dataflow
Attributes:
cnt1, cnt2 (int): number of data points received from addr1 and addr2
"""
def __init__(self, addr1, addr2=None, hwm=50, bind=True):
"""
Args:
addr1,addr2 (str): addr of the zmq endpoint to connect to.
Use both if you need two protocols (e.g. both IPC and TCP).
I don't think you'll ever need 3.
hwm (int): ZMQ high-water mark (buffer size)
bind (bool): whether to connect or bind the endpoint
"""
assert addr1
self._addr1 = addr1
self._addr2 = addr2
self._hwm = int(hwm)
self._guard = DataFlowReentrantGuard()
self._bind = bind
def reset_state(self):
self.cnt1 = 0
self.cnt2 = 0
def bind_or_connect(self, socket, addr):
if self._bind:
socket.bind(addr)
else:
socket.connect(addr)
def __iter__(self):
with self._guard:
try:
ctx = zmq.Context()
if self._addr2 is None:
socket = ctx.socket(zmq.PULL)
socket.set_hwm(self._hwm)
self.bind_or_connect(socket, self._addr1)
while True:
dp = loads(socket.recv(copy=False))
yield dp
self.cnt1 += 1
else:
socket1 = ctx.socket(zmq.PULL)
socket1.set_hwm(self._hwm)
self.bind_or_connect(socket1, self._addr1)
socket2 = ctx.socket(zmq.PULL)
socket2.set_hwm(self._hwm)
self.bind_or_connect(socket2, self._addr2)
poller = zmq.Poller()
poller.register(socket1, zmq.POLLIN)
poller.register(socket2, zmq.POLLIN)
while True:
evts = poller.poll()
for sock, evt in evts:
dp = loads(sock.recv(copy=False))
yield dp
if sock == socket1:
self.cnt1 += 1
else:
self.cnt2 += 1
finally:
ctx.destroy(linger=0)
# for internal use only
def dump_dataflow_to_process_queue(df, size, nr_consumer):
"""
Convert a DataFlow to a :class:`multiprocessing.Queue`.
The DataFlow will only be reset in the spawned process.
Args:
df (DataFlow): the DataFlow to dump.
size (int): size of the queue
nr_consumer (int): number of consumer of the queue.
The producer will add this many of ``DIE`` sentinel to the end of the queue.
Returns:
tuple(queue, process):
The process will take data from ``df`` and fill
the queue, once you start it. Each element in the queue is (idx,
dp). idx can be the ``DIE`` sentinel when ``df`` is exhausted.
"""
q = mp.Queue(size)
class EnqueProc(mp.Process):
def __init__(self, df, q, nr_consumer):
super(EnqueProc, self).__init__()
self.df = df
self.q = q
def run(self):
self.df.reset_state()
try:
for idx, dp in enumerate(self.df):
self.q.put((idx, dp))
finally:
for _ in range(nr_consumer):
self.q.put((DIE, None))
proc = EnqueProc(df, q, nr_consumer)
return q, proc
if __name__ == '__main__':
from argparse import ArgumentParser
from .raw import FakeData
from .common import TestDataSpeed
"""
Test the multi-producer single-consumer model
"""
parser = ArgumentParser()
parser.add_argument('-t', '--task', choices=['send', 'recv'], required=True)
parser.add_argument('-a', '--addr1', required=True)
parser.add_argument('-b', '--addr2', default=None)
args = parser.parse_args()
# tcp addr like "tcp://127.0.0.1:8877"
# ipc addr like "ipc://@ipc-test"
if args.task == 'send':
# use random=True to make it slow and cpu-consuming
ds = FakeData([(128, 244, 244, 3)], 1000, random=True)
send_dataflow_zmq(ds, args.addr1)
else:
ds = RemoteDataZMQ(args.addr1, args.addr2)
logger.info("Each DP is 73.5MB")
TestDataSpeed(ds).start_test()
| 33.436123 | 114 | 0.559025 |
b53a27c2d3cac9269092b153139120d7a6c8f757 | 358 | py | Python | python/examples/classification_demo.py | zeyiwen/gbdt | 35ddc80c6fc2d072478f8505312fd6fc34dcdd9c | [
"Apache-2.0"
] | 639 | 2019-01-26T14:59:28.000Z | 2022-03-30T09:36:29.000Z | python/examples/classification_demo.py | zeyiwen/gbdt | 35ddc80c6fc2d072478f8505312fd6fc34dcdd9c | [
"Apache-2.0"
] | 73 | 2019-02-15T14:19:04.000Z | 2022-03-10T07:05:25.000Z | python/examples/classification_demo.py | zeyiwen/gbdt | 35ddc80c6fc2d072478f8505312fd6fc34dcdd9c | [
"Apache-2.0"
] | 92 | 2019-02-04T17:13:49.000Z | 2021-12-30T17:30:41.000Z | import sys
sys.path.append("../")
from thundergbm import TGBMClassifier
from sklearn.datasets import load_digits
from sklearn.metrics import accuracy_score
if __name__ == '__main__':
x, y = load_digits(return_X_y=True)
clf = TGBMClassifier()
clf.fit(x, y)
y_pred = clf.predict(x)
accuracy = accuracy_score(y, y_pred)
print(accuracy) | 25.571429 | 42 | 0.723464 |
3d62ddfe514c938c48b68f015fe7a0933a2b0914 | 999 | py | Python | e2e/tests/utils.py | fantastic001/parkingmap | 6bd780a0c50628a9bac182005c130f30af5372c7 | [
"MIT"
] | null | null | null | e2e/tests/utils.py | fantastic001/parkingmap | 6bd780a0c50628a9bac182005c130f30af5372c7 | [
"MIT"
] | 1 | 2022-01-21T23:35:26.000Z | 2022-01-21T23:35:26.000Z | e2e/tests/utils.py | PSW-ISA-G4-MNS/PSW-ISA | f96858c64b91f7b61e8995e59154d6df36f3a970 | [
"MIT"
] | null | null | null | from selenium import webdriver
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait # available since 2.4.0
from selenium.webdriver.support import expected_conditions as EC # available since 2.26.0
import os
import time
def get_driver():
driver = webdriver.Chrome("chromedriver.exe")
driver.get("http://192.168.33.10:300/frontend/")
time.sleep(2)
try:
logout(driver)
except Exception as e:
print(e)
return driver
def logout(driver):
logoutbtn = driver.find_element_by_link_text("Log out")
time.sleep(2)
logoutbtn.click()
time.sleep(2)
def login(driver, email, password):
loginbtn = driver.find_element_by_link_text("Login")
time.sleep(1)
loginbtn.click()
time.sleep(2)
inputs = driver.find_elements_by_class_name("form-control")
inputs[0].send_keys(email)
inputs[1].send_keys(password)
driver.find_element_by_class_name("btn-primary").click()
time.sleep(3) | 30.272727 | 89 | 0.718719 |
bb8ba07d0c9a3b62734ccb5bf8ae258b736d112c | 1,078 | py | Python | desafio/desafio058.py | henriquekirchheck/Curso-em-video-Python | 1a29f68515313af85c8683f626ba35f8fcdd10e7 | [
"MIT"
] | null | null | null | desafio/desafio058.py | henriquekirchheck/Curso-em-video-Python | 1a29f68515313af85c8683f626ba35f8fcdd10e7 | [
"MIT"
] | null | null | null | desafio/desafio058.py | henriquekirchheck/Curso-em-video-Python | 1a29f68515313af85c8683f626ba35f8fcdd10e7 | [
"MIT"
] | null | null | null | # Melhore o jogo do DESAFIO 28 onde o computador vai “pensar” em um número entre 0 e 10. Só que agora o jogador vai tentar adivinhar até acertar, mostrando no final quantos palpites foram necessários para vencer
from random import randint as random
cpu = random(1, 10)
player = None
win = False
cheat = False
counter = 0
while(win == False):
if(cheat == True):
print(f'O numero da cpu é \033[1;34m{cpu}\033[m \n')
player = int(input('Adivinhe um numero de 1 a 10: '))
if(player == 8822464613):
cheat = True
print('''
+=========================+
Você ativou o cheat secreto
+=========================+
''')
elif(player == cpu):
counter = counter + 1
if(counter == 1):
print(f'\nParabens, você ganhou!!!\nVocê precisou de {counter} tentativa')
else:
print(f'\nParabens, você ganhou!!!\nVocê precisou de {counter} tentativas')
win = True
else:
counter = counter + 1
print('\nInfelismente você perdeu, tente novamente\n')
| 32.666667 | 211 | 0.576067 |
ad5e2e5bfc34147531af124c0450fa2253034816 | 7,091 | py | Python | seleniumbase/fixtures/page_utils.py | aoruilin/SeleniumBase | 304fd7a23661ebf561da47d4cd8f7365dba519ca | [
"MIT"
] | null | null | null | seleniumbase/fixtures/page_utils.py | aoruilin/SeleniumBase | 304fd7a23661ebf561da47d4cd8f7365dba519ca | [
"MIT"
] | null | null | null | seleniumbase/fixtures/page_utils.py | aoruilin/SeleniumBase | 304fd7a23661ebf561da47d4cd8f7365dba519ca | [
"MIT"
] | null | null | null | """
This module contains useful utility methods.
这个模块包含有用的实用方法。
"""
import codecs
import re
import requests
def get_domain_url(url):
"""
Use this to convert a url like this:
像这样使用这个转换一个url:
https://blog.xkcd.com/2014/07/22/what-if-book-tour/
Into this:
https://blog.xkcd.com
"""
if not url.startswith("http://") and not url.startswith("https://"):
return url
url_header = url.split('://')[0]
simple_url = url.split('://')[1]
base_url = simple_url.split('/')[0]
domain_url = url_header + '://' + base_url
return domain_url
def is_xpath_selector(selector):
"""
A basic method to determine if a selector is an xpath selector.
确定选择器是否是xpath选择器的基本方法。
"""
if (selector.startswith('/') or selector.startswith('./') or (
selector.startswith('('))):
return True
return False
def is_link_text_selector(selector):
"""
A basic method to determine if a selector is a link text selector.
确定选择器是否是链接文本选择器的基本方法。
"""
if (selector.startswith('link=') or selector.startswith('link_text=') or (
selector.startswith('text='))):
return True
return False
def is_partial_link_text_selector(selector):
"""
A basic method to determine if a selector is a partial link text selector.
一个基本的方法来确定一个选择器是否是一个部分链接文本选择器。
"""
if (selector.startswith('partial_link=') or (
selector.startswith('partial_link_text=') or (
selector.startswith('partial_text=')))):
return True
return False
def is_name_selector(selector):
"""
A basic method to determine if a selector is a name selector.
确定选择器是否是名称选择器的基本方法。
"""
if selector.startswith('name='):
return True
return False
def get_link_text_from_selector(selector):
"""
A basic method to get the link text from a link text selector.
从链接文本选择器获取链接文本的基本方法。
"""
if selector.startswith('link='):
return selector.split('link=')[1]
elif selector.startswith('link_text='):
return selector.split('link_text=')[1]
elif selector.startswith('text='):
return selector.split('text=')[1]
return selector
def get_partial_link_text_from_selector(selector):
"""
A basic method to get the partial link text from a partial link selector.
从部分链接选择器获取部分链接文本的基本方法。
"""
if selector.startswith('partial_link='):
return selector.split('partial_link=')[1]
elif selector.startswith('partial_link_text='):
return selector.split('partial_link_text=')[1]
elif selector.startswith('partial_text='):
return selector.split('partial_text=')[1]
return selector
def get_name_from_selector(selector):
"""
A basic method to get the name from a name selector.
从名称选择器获取名称的基本方法。
"""
if selector.startswith('name='):
return selector.split('name=')[1]
return selector
def is_valid_url(url):
regex = re.compile(
r'^(?:http)s?://' # http:// or https://
r'(?:(?:[A-Z0-9](?:[A-Z0-9-]{0,61}[A-Z0-9])?\.)+'
r'(?:[A-Z]{2,6}\.?|[A-Z0-9-]{2,}\.?)|' # domain...
r'localhost|' # localhost...
r'\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3})' # ...or ip
r'(?::\d+)?' # optional port
r'(?:/?|[/?]\S+)$', re.IGNORECASE)
if regex.match(url) or url == 'about:blank' or url == 'data:,':
return True
else:
return False
def _get_unique_links(page_url, soup):
"""
Returns all unique links.
Includes:
"a"->"href", "img"->"src", "link"->"href", and "script"->"src" links.
"""
if not page_url.startswith("http://") and (
not page_url.startswith("https://")):
return []
prefix = 'http:'
if page_url.startswith('https:'):
prefix = 'https:'
simple_url = page_url.split('://')[1]
base_url = simple_url.split('/')[0]
full_base_url = prefix + "//" + base_url
raw_links = []
raw_unique_links = []
# Get "href" from all "a" tags
links = soup.find_all('a')
for link in links:
raw_links.append(link.get('href'))
# Get "src" from all "img" tags
img_links = soup.find_all('img')
for img_link in img_links:
raw_links.append(img_link.get('src'))
# Get "href" from all "link" tags
links = soup.find_all('link')
for link in links:
raw_links.append(link.get('href'))
# Get "src" from all "script" tags
img_links = soup.find_all('script')
for img_link in img_links:
raw_links.append(img_link.get('src'))
for link in raw_links:
if link not in raw_unique_links:
raw_unique_links.append(link)
unique_links = []
for link in raw_unique_links:
if link and len(link) > 1:
if link.startswith('//'):
link = prefix + link
elif link.startswith('/'):
link = full_base_url + link
elif link.startswith('./'):
link = full_base_url + link[1:]
elif link.startswith('#'):
link = full_base_url + link
elif '//' not in link:
link = full_base_url + "/" + link
else:
pass
unique_links.append(link)
return unique_links
def _get_link_status_code(link, allow_redirects=False, timeout=5):
""" Get the status code of a link.
If the timeout is exceeded, will return a 404.
For a list of available status codes, see:
https://en.wikipedia.org/wiki/List_of_HTTP_status_codes
"""
status_code = None
try:
response = requests.get(
link, allow_redirects=allow_redirects, timeout=timeout)
status_code = response.status_code
except Exception:
status_code = 404
return status_code
def _print_unique_links_with_status_codes(page_url, soup):
""" Finds all unique links in the html of the page source
and then prints out those links with their status codes.
Format: ["link" -> "status_code"] (per line)
Page links include those obtained from:
"a"->"href", "img"->"src", "link"->"href", and "script"->"src".
"""
links = _get_unique_links(page_url, soup)
for link in links:
status_code = _get_link_status_code(link)
print(link, " -> ", status_code)
def _download_file_to(file_url, destination_folder, new_file_name=None):
if new_file_name:
file_name = new_file_name
else:
file_name = file_url.split('/')[-1]
r = requests.get(file_url)
with open(destination_folder + '/' + file_name, "wb") as code:
code.write(r.content)
def _save_data_as(data, destination_folder, file_name):
out_file = codecs.open(destination_folder + '/' + file_name, "w+")
out_file.writelines(data)
out_file.close()
def make_css_match_first_element_only(selector):
# Only get the first match
last_syllable = selector.split(' ')[-1]
if ':' not in last_syllable and ':contains' not in selector:
selector += ':first'
return selector
| 29.794118 | 78 | 0.611338 |
96c61bc2e96ecd4ed8436911d2acf6fb1834682f | 5,241 | py | Python | tests/ClientServer/validation_tests/pys2opc/examples/3-multi-connection-multi-request.py | monate/s2opc | 2fd493febf1c8351a74615acea11783feecf9f4f | [
"Apache-2.0"
] | null | null | null | tests/ClientServer/validation_tests/pys2opc/examples/3-multi-connection-multi-request.py | monate/s2opc | 2fd493febf1c8351a74615acea11783feecf9f4f | [
"Apache-2.0"
] | null | null | null | tests/ClientServer/validation_tests/pys2opc/examples/3-multi-connection-multi-request.py | monate/s2opc | 2fd493febf1c8351a74615acea11783feecf9f4f | [
"Apache-2.0"
] | 1 | 2020-04-28T08:32:27.000Z | 2020-04-28T08:32:27.000Z | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Licensed to Systerel under one or more contributor license
# agreements. See the NOTICE file distributed with this work
# for additional information regarding copyright ownership.
# Systerel licenses this file to you under the Apache
# License, Version 2.0 (the "License"); you may not use this
# file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""
Example script: this script illustrate how to hanlde multiple connections and asynchronous requests.
Some connections are secured and some unsecured. A subscription is made on the secured connection.
"""
import time
from pys2opc import PyS2OPC, BaseConnectionHandler
from _connection_configuration import configuration_parameters_no_subscription, configuration_parameters_subscription, configuration_parameters_security, join_configs
NODES_A = ['s=BRD.NC_000.VP_96.TM.TSEN1.PTSE_TS1_DELTAP_P20_RAW',
's=BRD.NC_000.VP_96.TM.TF.PMC2_TF_MODE_MPPT_RAW']
NODES_B = ['s=BRD.NC_000.VP_96.TC.OBC_TC_LOAD_NTEL.CHIFFRE03_RAW',
's=BRD.NC_000.VP_96.TC.MC2_TC_MODE_SELECT_GS.MC2_AR_ID_MODE_SELECT_GS_RAW',
's=BRD.NC_000.VP_96.TM.TMAI.POBC_MA_CALL_PERIOD_RAW',
's=BRD.NC_000.VP_96.TM.TSEN2.PTSE_TS2_DP_SIGN_D20_RAW']
class PrintSubs(BaseConnectionHandler):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.tag = '' # Will use this tag to differentiate connections with subscriptions
def on_datachanged(self, nodeId, dataValue):
print(' Data changed on connection "{}", "{}" -> {}, '.format(self.tag, nodeId, dataValue.variant) + time.ctime(dataValue.timestampServer))
if __name__ == '__main__':
with PyS2OPC.initialize():
config_unsec_nosub = PyS2OPC.add_configuration_unsecured(**configuration_parameters_no_subscription)
config_sec_nosub = PyS2OPC.add_configuration_secured(**join_configs(configuration_parameters_no_subscription, configuration_parameters_security))
config_unsec_sub = PyS2OPC.add_configuration_unsecured(**configuration_parameters_subscription)
config_sec_sub = PyS2OPC.add_configuration_secured(**join_configs(configuration_parameters_subscription, configuration_parameters_security))
PyS2OPC.mark_configured()
connections = [PyS2OPC.connect(config, PrintSubs) for config in (config_unsec_nosub, config_sec_nosub,
config_unsec_sub, config_sec_sub)]
conn_unsec_nosub, conn_sec_nosub, conn_unsec_sub, conn_sec_sub = connections
conn_unsec_sub.tag = 'unsecure'
conn_sec_sub.tag = 'secure'
try:
# Add node to subscriptions. This is always synchronous.
# On secured connection, call the function twice.
conn_sec_sub.add_nodes_to_subscription(NODES_A)
conn_sec_sub.add_nodes_to_subscription(NODES_B)
# On unsecured connection, calls it once.
conn_unsec_sub.add_nodes_to_subscription(NODES_A + NODES_B)
# Reads
# On secured connection, make two "simultaneous" asynchronous reads.
readA = conn_sec_nosub.read_nodes(NODES_A, bWaitResponse=False)
readB = conn_sec_nosub.read_nodes(NODES_B, bWaitResponse=False)
# On unsecured connection, make a synchronous read.
respRead = conn_unsec_nosub.read_nodes(NODES_A + NODES_B)
# readA and readB are Requests. Manually wait on the responses and display them.
t0 = time.time()
respA = conn_sec_nosub.get_response(readA)
while respA is None:
respA = conn_sec_nosub.get_response(readA)
if time.time() - t0 > 1.: # Wait at most 1 second
break
respB = conn_sec_nosub.get_response(readB)
while respB is None:
respB = conn_sec_nosub.get_response(readB)
if time.time() - t0 > 1.:
break
assert respA is not None and respB is not None
for node, dvAsynch, dvSynch in zip(NODES_A+NODES_B, readA.response.results+readB.response.results, respRead.results):
assert dvAsynch.variant == dvSynch.variant, 'Read on secured and unsecured connection yielded different values.'
print(' Value of {} is {}, timestamp is {}'.format(node, str(dvAsynch.variant), time.ctime(dvAsynch.timestampSource)))
# Waits at least a publish cycle before quitting, otherwise the callback may never be called
time.sleep(configuration_parameters_subscription['publish_period']/1000)
finally:
# Always clean all connections, even when there is a problem
for connection in connections:
connection.disconnect()
| 51.382353 | 166 | 0.70063 |
d68b21505db71aa3d3c2856f212ebafecd030304 | 196 | py | Python | tests/test_pandas.py | kgizdov/awkward-0.x | 11953017a5495527169d3a031a5e866904d824a2 | [
"BSD-3-Clause"
] | 224 | 2018-07-01T00:28:27.000Z | 2020-11-16T11:00:25.000Z | tests/test_pandas.py | kgizdov/awkward-0.x | 11953017a5495527169d3a031a5e866904d824a2 | [
"BSD-3-Clause"
] | 196 | 2018-07-12T06:48:42.000Z | 2020-11-01T17:18:51.000Z | tests/test_pandas.py | kgizdov/awkward-0.x | 11953017a5495527169d3a031a5e866904d824a2 | [
"BSD-3-Clause"
] | 42 | 2018-06-28T11:36:55.000Z | 2020-10-23T03:24:31.000Z | import pytest
pandas = pytest.importorskip("pandas")
@pytest.fixture
def awkward0_pandas():
from awkward0 import pandas
return pandas
def test_import_pandas(awkward0_pandas):
pass
| 15.076923 | 40 | 0.760204 |
75d4b20a78a8c1109db237c182fb8eb3eff825be | 6,278 | py | Python | src/oci/apm_traces/models/query_result_row.py | Manny27nyc/oci-python-sdk | de60b04e07a99826254f7255e992f41772902df7 | [
"Apache-2.0",
"BSD-3-Clause"
] | 249 | 2017-09-11T22:06:05.000Z | 2022-03-04T17:09:29.000Z | src/oci/apm_traces/models/query_result_row.py | Manny27nyc/oci-python-sdk | de60b04e07a99826254f7255e992f41772902df7 | [
"Apache-2.0",
"BSD-3-Clause"
] | 228 | 2017-09-11T23:07:26.000Z | 2022-03-23T10:58:50.000Z | src/oci/apm_traces/models/query_result_row.py | Manny27nyc/oci-python-sdk | de60b04e07a99826254f7255e992f41772902df7 | [
"Apache-2.0",
"BSD-3-Clause"
] | 224 | 2017-09-27T07:32:43.000Z | 2022-03-25T16:55:42.000Z | # coding: utf-8
# Copyright (c) 2016, 2021, Oracle and/or its affiliates. All rights reserved.
# This software is dual-licensed to you under the Universal Permissive License (UPL) 1.0 as shown at https://oss.oracle.com/licenses/upl or Apache License 2.0 as shown at http://www.apache.org/licenses/LICENSE-2.0. You may choose either license.
from oci.util import formatted_flat_dict, NONE_SENTINEL, value_allowed_none_or_none_sentinel # noqa: F401
from oci.decorators import init_model_state_from_kwargs
@init_model_state_from_kwargs
class QueryResultRow(object):
"""
An object that represents a single row of the query result. It contains the queryResultRowData object that holds the actual data
represented by the elements of the query result row, and a queryResultRowMetadata object that holds the metadata about the data contained in
the query result row.
"""
def __init__(self, **kwargs):
"""
Initializes a new QueryResultRow object with values from keyword arguments.
The following keyword arguments are supported (corresponding to the getters/setters of this class):
:param query_result_row_data:
The value to assign to the query_result_row_data property of this QueryResultRow.
:type query_result_row_data: dict(str, object)
:param query_result_row_metadata:
The value to assign to the query_result_row_metadata property of this QueryResultRow.
:type query_result_row_metadata: dict(str, object)
"""
self.swagger_types = {
'query_result_row_data': 'dict(str, object)',
'query_result_row_metadata': 'dict(str, object)'
}
self.attribute_map = {
'query_result_row_data': 'queryResultRowData',
'query_result_row_metadata': 'queryResultRowMetadata'
}
self._query_result_row_data = None
self._query_result_row_metadata = None
@property
def query_result_row_data(self):
"""
**[Required]** Gets the query_result_row_data of this QueryResultRow.
A map containing the actual data represented by a single row of the query result.
The key is the column name or attribute specified in the show clause, or an aggregate function in the show clause.
The value is the actual value of that attribute or aggregate function of the corresponding single row of the query result set.
If an alias name is specified for an attribute or an aggregate function, then the key will be the alias name specified in the show
clause. If an alias name is not specified for the group by aggregate function in the show clause, then the corresponding key
will be the appropriate aggregate_function_name_column_name (Eg: count(traces) will be keyed as count_traces). For more details
on the supported aggregate functions, look at the APM confluence doc on High Level Query Aggregation. The datatype of the value
is presented in the queryResultRowTypeSummaries list in the queryResultMetadata structure, where the ith queryResultRowTypeSummary object
represents the datatype of the ith value when this map is iterated in order.
:return: The query_result_row_data of this QueryResultRow.
:rtype: dict(str, object)
"""
return self._query_result_row_data
@query_result_row_data.setter
def query_result_row_data(self, query_result_row_data):
"""
Sets the query_result_row_data of this QueryResultRow.
A map containing the actual data represented by a single row of the query result.
The key is the column name or attribute specified in the show clause, or an aggregate function in the show clause.
The value is the actual value of that attribute or aggregate function of the corresponding single row of the query result set.
If an alias name is specified for an attribute or an aggregate function, then the key will be the alias name specified in the show
clause. If an alias name is not specified for the group by aggregate function in the show clause, then the corresponding key
will be the appropriate aggregate_function_name_column_name (Eg: count(traces) will be keyed as count_traces). For more details
on the supported aggregate functions, look at the APM confluence doc on High Level Query Aggregation. The datatype of the value
is presented in the queryResultRowTypeSummaries list in the queryResultMetadata structure, where the ith queryResultRowTypeSummary object
represents the datatype of the ith value when this map is iterated in order.
:param query_result_row_data: The query_result_row_data of this QueryResultRow.
:type: dict(str, object)
"""
self._query_result_row_data = query_result_row_data
@property
def query_result_row_metadata(self):
"""
**[Required]** Gets the query_result_row_metadata of this QueryResultRow.
A map containing metadata or add-on data for the data presented in the queryResultRowData map. Data required to present drill down
information from the queryResultRowData is presented as key value pairs.
:return: The query_result_row_metadata of this QueryResultRow.
:rtype: dict(str, object)
"""
return self._query_result_row_metadata
@query_result_row_metadata.setter
def query_result_row_metadata(self, query_result_row_metadata):
"""
Sets the query_result_row_metadata of this QueryResultRow.
A map containing metadata or add-on data for the data presented in the queryResultRowData map. Data required to present drill down
information from the queryResultRowData is presented as key value pairs.
:param query_result_row_metadata: The query_result_row_metadata of this QueryResultRow.
:type: dict(str, object)
"""
self._query_result_row_metadata = query_result_row_metadata
def __repr__(self):
return formatted_flat_dict(self)
def __eq__(self, other):
if other is None:
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
return not self == other
| 51.459016 | 245 | 0.728257 |
90358251fb835f9aeccc8f08a923266802cc2ce3 | 125 | py | Python | SeedApp/reports/urls.py | lutherchikumba/SeedApp | 87b8e22bb256f6574cdf28f03e73a7200df54378 | [
"MIT"
] | null | null | null | SeedApp/reports/urls.py | lutherchikumba/SeedApp | 87b8e22bb256f6574cdf28f03e73a7200df54378 | [
"MIT"
] | null | null | null | SeedApp/reports/urls.py | lutherchikumba/SeedApp | 87b8e22bb256f6574cdf28f03e73a7200df54378 | [
"MIT"
] | null | null | null | from django.urls import path
from . import views
urlpatterns = [
path('reports/', views.reports_view, name='reports'),
] | 20.833333 | 57 | 0.712 |
9fec373acdac97b149d6883c06f8ee70b778ddfa | 986 | py | Python | tradingviewalerts/__init__.py | tistaharahap/tradingview-alerts | c332b211d913b3abaac3f6c7fd9ae680e85a6ada | [
"MIT"
] | null | null | null | tradingviewalerts/__init__.py | tistaharahap/tradingview-alerts | c332b211d913b3abaac3f6c7fd9ae680e85a6ada | [
"MIT"
] | null | null | null | tradingviewalerts/__init__.py | tistaharahap/tradingview-alerts | c332b211d913b3abaac3f6c7fd9ae680e85a6ada | [
"MIT"
] | 3 | 2020-08-15T12:19:49.000Z | 2022-01-28T06:22:21.000Z | from sanic import Sanic
from sanic.request import Request
from sanic.response import text, HTTPResponse
from os import environ
from tradingviewalerts.forwarders.base import BaseForwarder
from tradingviewalerts.forwarders.telegram import TelegramForwarder
from tradingviewalerts.forwarders.rabbitmq import RabbitMQForwarder
# Valid options are ['telegram', 'rabbitmq'] - More coming soon
FORWARDER = environ.get('FORWARDER', 'telegram')
def get_forwarder(fwd: str) -> BaseForwarder:
forwarders = {
'telegram': TelegramForwarder,
'rabbitmq': RabbitMQForwarder
}
forwarder = forwarders.get(fwd)
return forwarder()
async def webhook_handler(request: Request) -> HTTPResponse:
forwarder = get_forwarder(FORWARDER)
await forwarder.send_notification(message=request.json)
return text('ok')
def create_app() -> Sanic:
app = Sanic('TradingView Alerts')
app.add_route(webhook_handler, '/webhook', methods=['POST'])
return app
| 25.282051 | 67 | 0.749493 |
2d0a0b45419fdf04a95386056d3d4db14ee7edf9 | 16,776 | py | Python | tests/unit/fake_api.py | peter-slovak/docker-py | 3076a9ac40b91458f7e95e3c6167e1bbb92682b1 | [
"Apache-2.0"
] | 1 | 2021-07-14T15:01:30.000Z | 2021-07-14T15:01:30.000Z | tests/unit/fake_api.py | peter-slovak/docker-py | 3076a9ac40b91458f7e95e3c6167e1bbb92682b1 | [
"Apache-2.0"
] | null | null | null | tests/unit/fake_api.py | peter-slovak/docker-py | 3076a9ac40b91458f7e95e3c6167e1bbb92682b1 | [
"Apache-2.0"
] | 1 | 2020-01-23T17:13:45.000Z | 2020-01-23T17:13:45.000Z | from . import fake_stat
from docker import constants
CURRENT_VERSION = 'v{0}'.format(constants.DEFAULT_DOCKER_API_VERSION)
FAKE_CONTAINER_ID = '3cc2351ab11b'
FAKE_IMAGE_ID = 'e9aa60c60128'
FAKE_EXEC_ID = 'd5d177f121dc'
FAKE_NETWORK_ID = '33fb6a3462b8'
FAKE_IMAGE_NAME = 'test_image'
FAKE_TARBALL_PATH = '/path/to/tarball'
FAKE_REPO_NAME = 'repo'
FAKE_TAG_NAME = 'tag'
FAKE_FILE_NAME = 'file'
FAKE_URL = 'myurl'
FAKE_PATH = '/path'
FAKE_VOLUME_NAME = 'perfectcherryblossom'
FAKE_NODE_ID = '24ifsmvkjbyhk'
# Each method is prefixed with HTTP method (get, post...)
# for clarity and readability
def get_fake_raw_version():
status_code = 200
response = {
"ApiVersion": "1.18",
"GitCommit": "fake-commit",
"GoVersion": "go1.3.3",
"Version": "1.5.0"
}
return status_code, response
def get_fake_version():
status_code = 200
response = {'GoVersion': '1', 'Version': '1.1.1',
'GitCommit': 'deadbeef+CHANGES'}
return status_code, response
def get_fake_info():
status_code = 200
response = {'Containers': 1, 'Images': 1, 'Debug': False,
'MemoryLimit': False, 'SwapLimit': False,
'IPv4Forwarding': True}
return status_code, response
def post_fake_auth():
status_code = 200
response = {'Status': 'Login Succeeded',
'IdentityToken': '9cbaf023786cd7'}
return status_code, response
def get_fake_ping():
return 200, "OK"
def get_fake_search():
status_code = 200
response = [{'Name': 'busybox', 'Description': 'Fake Description'}]
return status_code, response
def get_fake_images():
status_code = 200
response = [{
'Id': FAKE_IMAGE_ID,
'Created': '2 days ago',
'Repository': 'busybox',
'RepoTags': ['busybox:latest', 'busybox:1.0'],
}]
return status_code, response
def get_fake_image_history():
status_code = 200
response = [
{
"Id": "b750fe79269d",
"Created": 1364102658,
"CreatedBy": "/bin/bash"
},
{
"Id": "27cf78414709",
"Created": 1364068391,
"CreatedBy": ""
}
]
return status_code, response
def post_fake_import_image():
status_code = 200
response = 'Import messages...'
return status_code, response
def get_fake_containers():
status_code = 200
response = [{
'Id': FAKE_CONTAINER_ID,
'Image': 'busybox:latest',
'Created': '2 days ago',
'Command': 'true',
'Status': 'fake status'
}]
return status_code, response
def post_fake_start_container():
status_code = 200
response = {'Id': FAKE_CONTAINER_ID}
return status_code, response
def post_fake_resize_container():
status_code = 200
response = {'Id': FAKE_CONTAINER_ID}
return status_code, response
def post_fake_create_container():
status_code = 200
response = {'Id': FAKE_CONTAINER_ID}
return status_code, response
def get_fake_inspect_container(tty=False):
status_code = 200
response = {
'Id': FAKE_CONTAINER_ID,
'Config': {'Privileged': True, 'Tty': tty},
'ID': FAKE_CONTAINER_ID,
'Image': 'busybox:latest',
'Name': 'foobar',
"State": {
"Status": "running",
"Running": True,
"Pid": 0,
"ExitCode": 0,
"StartedAt": "2013-09-25T14:01:18.869545111+02:00",
"Ghost": False
},
"MacAddress": "02:42:ac:11:00:0a"
}
return status_code, response
def get_fake_inspect_image():
status_code = 200
response = {
'Id': FAKE_IMAGE_ID,
'Parent': "27cf784147099545",
'Created': "2013-03-23T22:24:18.818426-07:00",
'Container': FAKE_CONTAINER_ID,
'ContainerConfig':
{
"Hostname": "",
"User": "",
"Memory": 0,
"MemorySwap": 0,
"AttachStdin": False,
"AttachStdout": False,
"AttachStderr": False,
"PortSpecs": "",
"Tty": True,
"OpenStdin": True,
"StdinOnce": False,
"Env": "",
"Cmd": ["/bin/bash"],
"Dns": "",
"Image": "base",
"Volumes": "",
"VolumesFrom": "",
"WorkingDir": ""
},
'Size': 6823592
}
return status_code, response
def get_fake_insert_image():
status_code = 200
response = {'StatusCode': 0}
return status_code, response
def get_fake_wait():
status_code = 200
response = {'StatusCode': 0}
return status_code, response
def get_fake_logs():
status_code = 200
response = (b'\x01\x00\x00\x00\x00\x00\x00\x11Flowering Nights\n'
b'\x01\x00\x00\x00\x00\x00\x00\x10(Sakuya Iyazoi)\n')
return status_code, response
def get_fake_diff():
status_code = 200
response = [{'Path': '/test', 'Kind': 1}]
return status_code, response
def get_fake_events():
status_code = 200
response = [{'status': 'stop', 'id': FAKE_CONTAINER_ID,
'from': FAKE_IMAGE_ID, 'time': 1423247867}]
return status_code, response
def get_fake_export():
status_code = 200
response = 'Byte Stream....'
return status_code, response
def post_fake_exec_create():
status_code = 200
response = {'Id': FAKE_EXEC_ID}
return status_code, response
def post_fake_exec_start():
status_code = 200
response = (b'\x01\x00\x00\x00\x00\x00\x00\x11bin\nboot\ndev\netc\n'
b'\x01\x00\x00\x00\x00\x00\x00\x12lib\nmnt\nproc\nroot\n'
b'\x01\x00\x00\x00\x00\x00\x00\x0csbin\nusr\nvar\n')
return status_code, response
def post_fake_exec_resize():
status_code = 201
return status_code, ''
def get_fake_exec_inspect():
return 200, {
'OpenStderr': True,
'OpenStdout': True,
'Container': get_fake_inspect_container()[1],
'Running': False,
'ProcessConfig': {
'arguments': ['hello world'],
'tty': False,
'entrypoint': 'echo',
'privileged': False,
'user': ''
},
'ExitCode': 0,
'ID': FAKE_EXEC_ID,
'OpenStdin': False
}
def post_fake_stop_container():
status_code = 200
response = {'Id': FAKE_CONTAINER_ID}
return status_code, response
def post_fake_kill_container():
status_code = 200
response = {'Id': FAKE_CONTAINER_ID}
return status_code, response
def post_fake_pause_container():
status_code = 200
response = {'Id': FAKE_CONTAINER_ID}
return status_code, response
def post_fake_unpause_container():
status_code = 200
response = {'Id': FAKE_CONTAINER_ID}
return status_code, response
def post_fake_restart_container():
status_code = 200
response = {'Id': FAKE_CONTAINER_ID}
return status_code, response
def post_fake_rename_container():
status_code = 204
return status_code, None
def delete_fake_remove_container():
status_code = 200
response = {'Id': FAKE_CONTAINER_ID}
return status_code, response
def post_fake_image_create():
status_code = 200
response = {'Id': FAKE_IMAGE_ID}
return status_code, response
def delete_fake_remove_image():
status_code = 200
response = {'Id': FAKE_IMAGE_ID}
return status_code, response
def get_fake_get_image():
status_code = 200
response = 'Byte Stream....'
return status_code, response
def post_fake_load_image():
status_code = 200
response = {'Id': FAKE_IMAGE_ID}
return status_code, response
def post_fake_commit():
status_code = 200
response = {'Id': FAKE_CONTAINER_ID}
return status_code, response
def post_fake_push():
status_code = 200
response = {'Id': FAKE_IMAGE_ID}
return status_code, response
def post_fake_build_container():
status_code = 200
response = {'Id': FAKE_CONTAINER_ID}
return status_code, response
def post_fake_tag_image():
status_code = 200
response = {'Id': FAKE_IMAGE_ID}
return status_code, response
def get_fake_stats():
status_code = 200
response = fake_stat.OBJ
return status_code, response
def get_fake_top():
return 200, {
'Processes': [
[
'root',
'26501',
'6907',
'0',
'10:32',
'pts/55',
'00:00:00',
'sleep 60',
],
],
'Titles': [
'UID',
'PID',
'PPID',
'C',
'STIME',
'TTY',
'TIME',
'CMD',
],
}
def get_fake_volume_list():
status_code = 200
response = {
'Volumes': [
{
'Name': 'perfectcherryblossom',
'Driver': 'local',
'Mountpoint': '/var/lib/docker/volumes/perfectcherryblossom',
'Scope': 'local'
}, {
'Name': 'subterraneananimism',
'Driver': 'local',
'Mountpoint': '/var/lib/docker/volumes/subterraneananimism',
'Scope': 'local'
}
]
}
return status_code, response
def get_fake_volume():
status_code = 200
response = {
'Name': 'perfectcherryblossom',
'Driver': 'local',
'Mountpoint': '/var/lib/docker/volumes/perfectcherryblossom',
'Labels': {
'com.example.some-label': 'some-value'
},
'Scope': 'local'
}
return status_code, response
def fake_remove_volume():
return 204, None
def post_fake_update_container():
return 200, {'Warnings': []}
def post_fake_update_node():
return 200, None
def get_fake_network_list():
return 200, [{
"Name": "bridge",
"Id": FAKE_NETWORK_ID,
"Scope": "local",
"Driver": "bridge",
"EnableIPv6": False,
"Internal": False,
"IPAM": {
"Driver": "default",
"Config": [
{
"Subnet": "172.17.0.0/16"
}
]
},
"Containers": {
FAKE_CONTAINER_ID: {
"EndpointID": "ed2419a97c1d99",
"MacAddress": "02:42:ac:11:00:02",
"IPv4Address": "172.17.0.2/16",
"IPv6Address": ""
}
},
"Options": {
"com.docker.network.bridge.default_bridge": "true",
"com.docker.network.bridge.enable_icc": "true",
"com.docker.network.bridge.enable_ip_masquerade": "true",
"com.docker.network.bridge.host_binding_ipv4": "0.0.0.0",
"com.docker.network.bridge.name": "docker0",
"com.docker.network.driver.mtu": "1500"
}
}]
def get_fake_network():
return 200, get_fake_network_list()[1][0]
def post_fake_network():
return 201, {"Id": FAKE_NETWORK_ID, "Warnings": []}
def delete_fake_network():
return 204, None
def post_fake_network_connect():
return 200, None
def post_fake_network_disconnect():
return 200, None
# Maps real api url to fake response callback
prefix = 'http+docker://localunixsocket'
if constants.IS_WINDOWS_PLATFORM:
prefix = 'http+docker://localnpipe'
fake_responses = {
'{0}/version'.format(prefix):
get_fake_raw_version,
'{1}/{0}/version'.format(CURRENT_VERSION, prefix):
get_fake_version,
'{1}/{0}/info'.format(CURRENT_VERSION, prefix):
get_fake_info,
'{1}/{0}/auth'.format(CURRENT_VERSION, prefix):
post_fake_auth,
'{1}/{0}/_ping'.format(CURRENT_VERSION, prefix):
get_fake_ping,
'{1}/{0}/images/search'.format(CURRENT_VERSION, prefix):
get_fake_search,
'{1}/{0}/images/json'.format(CURRENT_VERSION, prefix):
get_fake_images,
'{1}/{0}/images/test_image/history'.format(CURRENT_VERSION, prefix):
get_fake_image_history,
'{1}/{0}/images/create'.format(CURRENT_VERSION, prefix):
post_fake_import_image,
'{1}/{0}/containers/json'.format(CURRENT_VERSION, prefix):
get_fake_containers,
'{1}/{0}/containers/3cc2351ab11b/start'.format(CURRENT_VERSION, prefix):
post_fake_start_container,
'{1}/{0}/containers/3cc2351ab11b/resize'.format(CURRENT_VERSION, prefix):
post_fake_resize_container,
'{1}/{0}/containers/3cc2351ab11b/json'.format(CURRENT_VERSION, prefix):
get_fake_inspect_container,
'{1}/{0}/containers/3cc2351ab11b/rename'.format(CURRENT_VERSION, prefix):
post_fake_rename_container,
'{1}/{0}/images/e9aa60c60128/tag'.format(CURRENT_VERSION, prefix):
post_fake_tag_image,
'{1}/{0}/containers/3cc2351ab11b/wait'.format(CURRENT_VERSION, prefix):
get_fake_wait,
'{1}/{0}/containers/3cc2351ab11b/logs'.format(CURRENT_VERSION, prefix):
get_fake_logs,
'{1}/{0}/containers/3cc2351ab11b/changes'.format(CURRENT_VERSION, prefix):
get_fake_diff,
'{1}/{0}/containers/3cc2351ab11b/export'.format(CURRENT_VERSION, prefix):
get_fake_export,
'{1}/{0}/containers/3cc2351ab11b/update'.format(CURRENT_VERSION, prefix):
post_fake_update_container,
'{1}/{0}/containers/3cc2351ab11b/exec'.format(CURRENT_VERSION, prefix):
post_fake_exec_create,
'{1}/{0}/exec/d5d177f121dc/start'.format(CURRENT_VERSION, prefix):
post_fake_exec_start,
'{1}/{0}/exec/d5d177f121dc/json'.format(CURRENT_VERSION, prefix):
get_fake_exec_inspect,
'{1}/{0}/exec/d5d177f121dc/resize'.format(CURRENT_VERSION, prefix):
post_fake_exec_resize,
'{1}/{0}/containers/3cc2351ab11b/stats'.format(CURRENT_VERSION, prefix):
get_fake_stats,
'{1}/{0}/containers/3cc2351ab11b/top'.format(CURRENT_VERSION, prefix):
get_fake_top,
'{1}/{0}/containers/3cc2351ab11b/stop'.format(CURRENT_VERSION, prefix):
post_fake_stop_container,
'{1}/{0}/containers/3cc2351ab11b/kill'.format(CURRENT_VERSION, prefix):
post_fake_kill_container,
'{1}/{0}/containers/3cc2351ab11b/pause'.format(CURRENT_VERSION, prefix):
post_fake_pause_container,
'{1}/{0}/containers/3cc2351ab11b/unpause'.format(CURRENT_VERSION, prefix):
post_fake_unpause_container,
'{1}/{0}/containers/3cc2351ab11b/restart'.format(CURRENT_VERSION, prefix):
post_fake_restart_container,
'{1}/{0}/containers/3cc2351ab11b'.format(CURRENT_VERSION, prefix):
delete_fake_remove_container,
'{1}/{0}/images/create'.format(CURRENT_VERSION, prefix):
post_fake_image_create,
'{1}/{0}/images/e9aa60c60128'.format(CURRENT_VERSION, prefix):
delete_fake_remove_image,
'{1}/{0}/images/e9aa60c60128/get'.format(CURRENT_VERSION, prefix):
get_fake_get_image,
'{1}/{0}/images/load'.format(CURRENT_VERSION, prefix):
post_fake_load_image,
'{1}/{0}/images/test_image/json'.format(CURRENT_VERSION, prefix):
get_fake_inspect_image,
'{1}/{0}/images/test_image/insert'.format(CURRENT_VERSION, prefix):
get_fake_insert_image,
'{1}/{0}/images/test_image/push'.format(CURRENT_VERSION, prefix):
post_fake_push,
'{1}/{0}/commit'.format(CURRENT_VERSION, prefix):
post_fake_commit,
'{1}/{0}/containers/create'.format(CURRENT_VERSION, prefix):
post_fake_create_container,
'{1}/{0}/build'.format(CURRENT_VERSION, prefix):
post_fake_build_container,
'{1}/{0}/events'.format(CURRENT_VERSION, prefix):
get_fake_events,
('{1}/{0}/volumes'.format(CURRENT_VERSION, prefix), 'GET'):
get_fake_volume_list,
('{1}/{0}/volumes/create'.format(CURRENT_VERSION, prefix), 'POST'):
get_fake_volume,
('{1}/{0}/volumes/{2}'.format(
CURRENT_VERSION, prefix, FAKE_VOLUME_NAME
), 'GET'):
get_fake_volume,
('{1}/{0}/volumes/{2}'.format(
CURRENT_VERSION, prefix, FAKE_VOLUME_NAME
), 'DELETE'):
fake_remove_volume,
('{1}/{0}/nodes/{2}/update?version=1'.format(
CURRENT_VERSION, prefix, FAKE_NODE_ID
), 'POST'):
post_fake_update_node,
('{1}/{0}/networks'.format(CURRENT_VERSION, prefix), 'GET'):
get_fake_network_list,
('{1}/{0}/networks/create'.format(CURRENT_VERSION, prefix), 'POST'):
post_fake_network,
('{1}/{0}/networks/{2}'.format(
CURRENT_VERSION, prefix, FAKE_NETWORK_ID
), 'GET'):
get_fake_network,
('{1}/{0}/networks/{2}'.format(
CURRENT_VERSION, prefix, FAKE_NETWORK_ID
), 'DELETE'):
delete_fake_network,
('{1}/{0}/networks/{2}/connect'.format(
CURRENT_VERSION, prefix, FAKE_NETWORK_ID
), 'POST'):
post_fake_network_connect,
('{1}/{0}/networks/{2}/disconnect'.format(
CURRENT_VERSION, prefix, FAKE_NETWORK_ID
), 'POST'):
post_fake_network_disconnect,
}
| 27.233766 | 78 | 0.614986 |
c6f13ef052168cfa7c584aa4b90c78a07962c5f4 | 11,053 | py | Python | recipes/folly/all/conanfile.py | Aypahyo/conan-center-index | c41d64960c66d3d81274d4189534f6fcb7bc4a36 | [
"MIT"
] | null | null | null | recipes/folly/all/conanfile.py | Aypahyo/conan-center-index | c41d64960c66d3d81274d4189534f6fcb7bc4a36 | [
"MIT"
] | null | null | null | recipes/folly/all/conanfile.py | Aypahyo/conan-center-index | c41d64960c66d3d81274d4189534f6fcb7bc4a36 | [
"MIT"
] | null | null | null | import os
from conans import ConanFile, CMake, tools
from conans.tools import Version
from conans.errors import ConanInvalidConfiguration
from conan.tools.microsoft import is_msvc
required_conan_version = ">=1.45.0"
class FollyConan(ConanFile):
name = "folly"
description = "An open-source C++ components library developed and used at Facebook"
topics = ("facebook", "components", "core", "efficiency")
url = "https://github.com/conan-io/conan-center-index"
homepage = "https://github.com/facebook/folly"
license = "Apache-2.0"
settings = "os", "arch", "compiler", "build_type"
options = {"shared": [True, False], "fPIC": [True, False]}
default_options = {"shared": False, "fPIC": True}
exports_sources = ["CMakeLists.txt", "patches/*"]
generators = "cmake", "cmake_find_package"
_cmake = None
@property
def _source_subfolder(self):
return "source_subfolder"
@property
def _minimum_cpp_standard(self):
return 17 if tools.Version(self.version) >= "2022.01.31.00" else 14
@property
def _minimum_compilers_version(self):
return {
"Visual Studio": "15",
"gcc": "5",
"clang": "6",
"apple-clang": "8",
} if self._minimum_cpp_standard == 14 else {
"gcc": "7",
"Visual Studio": "16",
"clang": "6",
"apple-clang": "10",
}
def config_options(self):
if self.settings.os == "Windows":
del self.options.fPIC
def configure(self):
if self.options.shared:
del self.options.fPIC
def source(self):
tools.get(**self.conan_data["sources"][self.version], destination=self._source_subfolder, strip_root=True)
# FIXME: Freeze max. CMake version at 3.16.2 to fix the Linux build
def build_requirements(self):
self.build_requires("cmake/3.16.9")
def requirements(self):
self.requires("boost/1.76.0")
self.requires("bzip2/1.0.8")
self.requires("double-conversion/3.1.5")
self.requires("gflags/2.2.2")
self.requires("glog/0.4.0")
self.requires("libevent/2.1.12")
self.requires("openssl/1.1.1n")
self.requires("lz4/1.9.3")
self.requires("snappy/1.1.8")
self.requires("zlib/1.2.11")
self.requires("zstd/1.4.9")
if not is_msvc(self):
self.requires("libdwarf/20191104")
self.requires("libsodium/1.0.18")
self.requires("xz_utils/5.2.5")
# FIXME: Causing compilation issues on clang: self.requires("jemalloc/5.2.1")
if self.settings.os == "Linux":
self.requires("libiberty/9.1.0")
self.requires("libunwind/1.5.0")
if Version(self.version) >= "2020.08.10.00":
self.requires("fmt/7.0.3")
@property
def _required_boost_components(self):
return ["context", "filesystem", "program_options", "regex", "system", "thread"]
def validate(self):
if self.settings.compiler.get_safe("cppstd"):
tools.check_min_cppstd(self, self._minimum_cpp_standard)
min_version = self._minimum_compilers_version.get(str(self.settings.compiler))
if not min_version:
self.output.warn("{} recipe lacks information about the {} compiler support.".format(self.name, self.settings.compiler))
else:
if tools.Version(self.settings.compiler.version) < min_version:
raise ConanInvalidConfiguration("{} requires C++{} support. The current compiler {} {} does not support it.".format(
self.name, self._minimum_cpp_standard, self.settings.compiler, self.settings.compiler.version))
if self.version < "2022.01.31.00" and self.settings.os != "Linux":
raise ConanInvalidConfiguration("Conan support for non-Linux platforms starts with Folly version 2022.01.31.00")
if self.settings.os == "Macos" and self.settings.arch != "x86_64":
raise ConanInvalidConfiguration("Conan currently requires a 64bit target architecture for Folly on Macos")
if self.settings.os == "Windows" and self.settings.arch != "x86_64":
raise ConanInvalidConfiguration("Folly requires a 64bit target architecture on Windows")
if self.settings.os in ["Macos", "Windows"] and self.options.shared:
raise ConanInvalidConfiguration("Folly could not be built on {} as shared library".format(self.settings.os))
if self.version == "2020.08.10.00" and self.settings.compiler == "clang" and self.options.shared:
raise ConanInvalidConfiguration("Folly could not be built by clang as a shared library")
if self.options["boost"].header_only:
raise ConanInvalidConfiguration("Folly could not be built with a header only Boost")
miss_boost_required_comp = any(getattr(self.options["boost"], "without_{}".format(boost_comp), True) for boost_comp in self._required_boost_components)
if miss_boost_required_comp:
raise ConanInvalidConfiguration("Folly requires these boost components: {}".format(", ".join(self._required_boost_components)))
min_version = self._minimum_compilers_version.get(str(self.settings.compiler))
if not min_version:
self.output.warn("{} recipe lacks information about the {} compiler support.".format(self.name, self.settings.compiler))
else:
if tools.Version(self.settings.compiler.version) < min_version:
raise ConanInvalidConfiguration("{} requires C++{} support. The current compiler {} {} does not support it.".format(
self.name, self._minimum_cpp_standard, self.settings.compiler, self.settings.compiler.version))
def _configure_cmake(self):
if not self._cmake:
self._cmake = CMake(self)
if tools.cross_building(self):
self._cmake.definitions["FOLLY_HAVE_UNALIGNED_ACCESS_EXITCODE"] = "0"
self._cmake.definitions["FOLLY_HAVE_UNALIGNED_ACCESS_EXITCODE__TRYRUN_OUTPUT"] = ""
self._cmake.definitions["FOLLY_HAVE_LINUX_VDSO_EXITCODE"] = "0"
self._cmake.definitions["FOLLY_HAVE_LINUX_VDSO_EXITCODE__TRYRUN_OUTPUT"] = ""
self._cmake.definitions["FOLLY_HAVE_WCHAR_SUPPORT_EXITCODE"] = "0"
self._cmake.definitions["FOLLY_HAVE_WCHAR_SUPPORT_EXITCODE__TRYRUN_OUTPUT"] = ""
self._cmake.definitions["HAVE_VSNPRINTF_ERRORS_EXITCODE"] = "0"
self._cmake.definitions["HAVE_VSNPRINTF_ERRORS_EXITCODE__TRYRUN_OUTPUT"] = ""
self._cmake.definitions["CMAKE_POSITION_INDEPENDENT_CODE"] = self.options.get_safe("fPIC", True)
self._cmake.definitions["CXX_STD"] = self.settings.compiler.get_safe("cppstd") or "c++{}".format(self._minimum_cpp_standard)
if self.settings.compiler == "Visual Studio":
self._cmake.definitions["MSVC_ENABLE_ALL_WARNINGS"] = False
self._cmake.definitions["MSVC_USE_STATIC_RUNTIME"] = "MT" in self.settings.compiler.runtime
self._cmake.configure()
return self._cmake
def build(self):
for patch in self.conan_data.get("patches", {}).get(self.version, []):
tools.patch(**patch)
cmake = self._configure_cmake()
cmake.build()
def package(self):
self.copy(pattern="LICENSE", dst="licenses", src=self._source_subfolder)
cmake = self._configure_cmake()
cmake.install()
tools.rmdir(os.path.join(self.package_folder, "lib", "cmake"))
tools.rmdir(os.path.join(self.package_folder, "lib", "pkgconfig"))
def package_info(self):
self.cpp_info.filenames["cmake_find_package"] = "folly"
self.cpp_info.filenames["cmake_find_package_multi"] = "folly"
self.cpp_info.names["cmake_find_package"] = "Folly"
self.cpp_info.names["cmake_find_package_multi"] = "Folly"
self.cpp_info.names["pkg_config"] = "libfolly"
self.cpp_info.components["libfolly"].names["cmake_find_package"] = "folly"
self.cpp_info.components["libfolly"].names["cmake_find_package_multi"] = "folly"
self.cpp_info.components["libfolly"].names["pkg_config"] = "libfolly"
if Version(self.version) == "2019.10.21.00":
self.cpp_info.components["libfolly"].libs = [
"follybenchmark",
"folly_test_util",
"folly"
]
elif Version(self.version) >= "2020.08.10.00":
if self.settings.os == "Linux":
self.cpp_info.components["libfolly"].libs = [
"folly_exception_counter",
"folly_exception_tracer",
"folly_exception_tracer_base",
"folly_test_util",
"follybenchmark",
"folly"
]
else:
self.cpp_info.components["libfolly"].libs = [
"folly_test_util",
"follybenchmark",
"folly"
]
self.cpp_info.components["libfolly"].requires = [
"boost::boost",
"bzip2::bzip2",
"double-conversion::double-conversion",
"gflags::gflags",
"glog::glog",
"libevent::libevent",
"lz4::lz4",
"openssl::openssl",
"snappy::snappy",
"zlib::zlib",
"zstd::zstd",
"libsodium::libsodium",
"xz_utils::xz_utils"
]
if not is_msvc(self):
self.cpp_info.components["libfolly"].requires.append("libdwarf::libdwarf")
if self.settings.os == "Linux":
self.cpp_info.components["libfolly"].requires.extend(["libiberty::libiberty", "libunwind::libunwind"])
self.cpp_info.components["libfolly"].system_libs.extend(["pthread", "dl", "rt"])
if Version(self.version) >= "2020.08.10.00":
self.cpp_info.components["libfolly"].requires.append("fmt::fmt")
if self.settings.os == "Linux":
self.cpp_info.components["libfolly"].defines.extend(["FOLLY_HAVE_ELF", "FOLLY_HAVE_DWARF"])
elif self.settings.os == "Windows":
self.cpp_info.components["libfolly"].system_libs.extend(["ws2_32", "iphlpapi", "crypt32"])
if (self.settings.os == "Linux" and self.settings.compiler == "clang" and
self.settings.compiler.libcxx == "libstdc++") or \
(self.settings.os == "Macos" and self.settings.compiler == "apple-clang" and
Version(self.settings.compiler.version.value) == "9.0" and self.settings.compiler.libcxx == "libc++"):
self.cpp_info.components["libfolly"].system_libs.append("atomic")
if self.settings.os == "Macos" and self.settings.compiler == "apple-clang" and Version(self.settings.compiler.version.value) >= "11.0":
self.cpp_info.components["libfolly"].system_libs.append("c++abi")
| 47.235043 | 159 | 0.624084 |
330ce6bd51c33c7f01e7f6d11a2a6788f4dd6539 | 14,407 | py | Python | shared_code/pact/opg_pact/tests/test_ci_workflow.py | ministryofjustice/opg-data | 6b08f5a9928823909e22506a06a8a532496a5d75 | [
"RSA-MD"
] | null | null | null | shared_code/pact/opg_pact/tests/test_ci_workflow.py | ministryofjustice/opg-data | 6b08f5a9928823909e22506a06a8a532496a5d75 | [
"RSA-MD"
] | 3 | 2020-04-16T14:56:52.000Z | 2021-01-11T09:26:21.000Z | shared_code/pact/opg_pact/tests/test_ci_workflow.py | ministryofjustice/opg-data | 6b08f5a9928823909e22506a06a8a532496a5d75 | [
"RSA-MD"
] | 2 | 2020-12-18T10:59:42.000Z | 2021-04-11T06:26:07.000Z | import requests
import pytest
from pact_provider.check_pact_deployable import PactDeploymentCheck
provider_base_url = "http://local.mock:5000"
provider_custom_header = "Authorization: asdf1234567890"
pact_broker_url = "http://local.broker:9292"
broker_user_name = "admin"
broker_secret_name = "local" # pactbroker_admin
consumer_pacticipant = "OPGExampleApp"
provider_pacticipant = "OPGExampleAPI"
api_version = "v1"
git_commit_consumer = "x123456"
git_commit_consumer_new = "y123456"
git_commit_provider = "a123456"
git_commit_provider_new = "b123456"
broker_password = "password"
headers = {"Content-Type": "application/json"}
file = "contract.json"
file_new = "contract_new.json"
@pytest.fixture(autouse=True)
def mock_env_setup(monkeypatch):
monkeypatch.setenv("API_VERSION", "v1")
pact_check = PactDeploymentCheck(
provider_base_url,
provider_custom_header,
pact_broker_url,
broker_user_name,
broker_secret_name,
consumer_pacticipant,
provider_pacticipant,
api_version,
git_commit_consumer,
git_commit_provider,
)
pact_check_new_consumer = PactDeploymentCheck(
provider_base_url,
provider_custom_header,
pact_broker_url,
broker_user_name,
broker_secret_name,
consumer_pacticipant,
provider_pacticipant,
api_version,
git_commit_consumer_new,
git_commit_provider,
)
pact_check_new_provider = PactDeploymentCheck(
provider_base_url,
provider_custom_header,
pact_broker_url,
broker_user_name,
broker_secret_name,
consumer_pacticipant,
provider_pacticipant,
api_version,
git_commit_consumer,
git_commit_provider_new,
)
def create_pact(
git_commit_consumer,
provider_pacticipant,
consumer_pacticipant,
file,
broker_user_name="admin",
broker_password="password",
headers={"Content-Type": "application/json"},
):
full_url = f"{pact_broker_url}/pacts/provider/{provider_pacticipant}/consumer/{consumer_pacticipant}/version/{git_commit_consumer}"
pact_response = requests.put(
full_url,
data=open(file, "rb"),
auth=(broker_user_name, broker_password),
headers=headers,
)
if pact_response.status_code < 399:
return True
else:
return False
def delete_pact(
consumer_pacticipant,
provider_pacticipant,
broker_user_name="admin",
broker_password="password",
headers={"Content-Type": "application/json"},
):
full_url = f"{pact_broker_url}/pacticipants/{consumer_pacticipant}"
pact_response = requests.delete(
full_url, auth=(broker_user_name, broker_password), headers=headers
)
if pact_response.status_code < 399:
consumer_deleted = True
else:
consumer_deleted = False
full_url = f"{pact_broker_url}/pacticipants/{provider_pacticipant}"
pact_response = requests.delete(
full_url, auth=(broker_user_name, broker_password), headers=headers
)
if pact_response.status_code < 399 and consumer_deleted:
return True
else:
return False
def tag_consumer_pact(
git_commit_consumer,
consumer_pacticipant,
broker_user_name="admin",
broker_password="password",
headers={"Content-Type": "application/json"},
):
full_url = f"{pact_broker_url}/pacticipants/{consumer_pacticipant}/versions/{git_commit_consumer}/tags/v1"
pact_response = requests.put(
full_url, auth=(broker_user_name, broker_password), headers=headers
)
if pact_response.status_code < 399:
return True
else:
return False
def tag_prod_consumer_pact(
git_commit_consumer,
consumer_pacticipant,
broker_user_name="admin",
broker_password="password",
headers={"Content-Type": "application/json"},
):
full_url = f"{pact_broker_url}/pacticipants/{consumer_pacticipant}/versions/{git_commit_consumer}/tags/v1_production"
pact_response = requests.put(
full_url, auth=(broker_user_name, broker_password), headers=headers
)
if pact_response.status_code < 399:
return True
else:
return False
def tag_prod_provider_pact(
git_commit_provider,
provider_pacticipant,
broker_user_name="admin",
broker_password="password",
headers={"Content-Type": "application/json"},
):
full_url = f"{pact_broker_url}/pacticipants/{provider_pacticipant}/versions/{git_commit_provider}/tags/v1_production"
pact_response = requests.put(
full_url, auth=(broker_user_name, broker_password), headers=headers
)
if pact_response.status_code < 399:
return True
else:
return False
# PROVIDER SIDE WITH NO EXISTING CONSUMER
@pytest.mark.pact_test
def test_provider_no_consumer():
delete_pact(consumer_pacticipant, provider_pacticipant)
message, fail_build, actual_message = pact_check.provider_can_i_deploy()
assert message == "Failure! No verification processed"
assert fail_build
# PROVIDER SIDE WITH CONSUMER NO TAG
@pytest.mark.pact_test
def test_provider_consumer_no_tag():
delete_pact(consumer_pacticipant, provider_pacticipant)
create_pact(git_commit_consumer, provider_pacticipant, consumer_pacticipant, file)
message, fail_build, actual_message = pact_check.provider_can_i_deploy()
assert message == "Failure! No verification processed"
assert fail_build
# PROVIDER SIDE WITH CONSUMER WITH V1 TAG
@pytest.mark.pact_test
def test_provider_consumer_v1():
delete_pact(consumer_pacticipant, provider_pacticipant)
create_pact(git_commit_consumer, provider_pacticipant, consumer_pacticipant, file)
tag_consumer_pact(git_commit_consumer, consumer_pacticipant)
message, fail_build, actual_message = pact_check.provider_can_i_deploy()
assert (
message
== "Provider Side 'Can I Deploy' Successful but against non production tag"
)
assert not fail_build
# PROVIDER SIDE WITH CONSUMER WITH V1_PRODUCTION TAG
@pytest.mark.pact_test
def test_provider_consumer_v1_production():
delete_pact(consumer_pacticipant, provider_pacticipant)
create_pact(git_commit_consumer, provider_pacticipant, consumer_pacticipant, file)
tag_consumer_pact(git_commit_consumer, consumer_pacticipant)
tag_prod_consumer_pact(git_commit_consumer, consumer_pacticipant)
message, fail_build, actual_message = pact_check.provider_can_i_deploy()
assert message == "Provider Side 'Can I Deploy' Successful"
assert not fail_build
# CONSUMER SIDE WITH NO EXISTING PROVIDER
@pytest.mark.pact_test
def test_consumer_no_provider():
delete_pact(consumer_pacticipant, provider_pacticipant)
create_pact(git_commit_consumer, provider_pacticipant, consumer_pacticipant, file)
tag_consumer_pact(git_commit_consumer, consumer_pacticipant)
message, fail_build, actual_message = pact_check.consumer_can_i_deploy()
assert (
message
== "Consumer Side 'Can I Deploy' Failed! No matching provider pact with v1_production tag!"
)
assert fail_build
# CONSUMER SIDE V1 TAG PROVIDER SIDE V1 TAG (not possible for provider to not have tag)
@pytest.mark.pact_test
def test_consumer_v1_provider_v1():
delete_pact(consumer_pacticipant, provider_pacticipant)
create_pact(git_commit_consumer, provider_pacticipant, consumer_pacticipant, file)
tag_consumer_pact(git_commit_consumer, consumer_pacticipant)
pact_check.provider_can_i_deploy()
message, fail_build, actual_message = pact_check.consumer_can_i_deploy()
assert (
message
== "Consumer Side 'Can I Deploy' Failed! No matching provider pact with v1_production tag!"
)
assert fail_build
# CONSUMER SIDE TAG V1 WITH PROVIDER TAG V1_PRODUCTION
@pytest.mark.pact_test
def test_consumer_v1_provider_v1_production():
delete_pact(consumer_pacticipant, provider_pacticipant)
create_pact(git_commit_consumer, provider_pacticipant, consumer_pacticipant, file)
tag_consumer_pact(git_commit_consumer, consumer_pacticipant)
pact_check.provider_can_i_deploy()
tag_prod_provider_pact(git_commit_provider, provider_pacticipant)
message, fail_build, actual_message = pact_check.consumer_can_i_deploy()
assert message == "Consumer Side 'Can I Deploy' Successful"
assert not fail_build
# CONSUMER SIDE CREATES WORKING PACT THEN CHANGES PACT SPEC
@pytest.mark.pact_test
def test_working_to_broken():
delete_pact(consumer_pacticipant, provider_pacticipant)
create_pact(git_commit_consumer, provider_pacticipant, consumer_pacticipant, file)
tag_consumer_pact(git_commit_consumer, consumer_pacticipant)
pact_check.provider_can_i_deploy()
tag_prod_provider_pact(git_commit_provider, provider_pacticipant)
message, fail_build, actual_message = pact_check.consumer_can_i_deploy()
assert message == "Consumer Side 'Can I Deploy' Successful"
assert not fail_build
create_pact(
git_commit_consumer_new, provider_pacticipant, consumer_pacticipant, file_new
)
tag_consumer_pact(git_commit_consumer_new, consumer_pacticipant)
(
message,
fail_build,
actual_message,
) = pact_check_new_consumer.consumer_can_i_deploy()
assert message == "Consumer Side 'Can I Deploy' Failed!"
assert (
actual_message
== "The verification for the pact between version y123456 of OPGExampleApp and the latest version of OPGExampleAPI with tag v1_production (a123456) failed"
)
# CONSUMER CREATES WORKING PACT THEN COMMITS SAME PACT SPEC
@pytest.mark.pact_test
def test_repush_same_pact():
delete_pact(consumer_pacticipant, provider_pacticipant)
create_pact(git_commit_consumer, provider_pacticipant, consumer_pacticipant, file)
tag_consumer_pact(git_commit_consumer, consumer_pacticipant)
pact_check.provider_can_i_deploy()
tag_prod_provider_pact(git_commit_provider, provider_pacticipant)
message, fail_build, actual_message = pact_check.consumer_can_i_deploy()
assert message == "Consumer Side 'Can I Deploy' Successful"
assert not fail_build
create_pact(
git_commit_consumer_new, provider_pacticipant, consumer_pacticipant, file
)
tag_consumer_pact(git_commit_consumer_new, consumer_pacticipant)
(
message,
fail_build,
actual_message,
) = pact_check_new_consumer.consumer_can_i_deploy()
assert message == "Consumer Side 'Can I Deploy' Successful"
assert (
actual_message
== "All required verification results are published and successful"
)
# CONSUMER CREATES NON WORKING PACT THEN WORKING PACT THEN PROVIDER TRIES TO DEPLOY
@pytest.mark.pact_test
def test_latest_working_pact():
delete_pact(consumer_pacticipant, provider_pacticipant)
create_pact(
git_commit_consumer, provider_pacticipant, consumer_pacticipant, file_new
)
tag_consumer_pact(git_commit_consumer, consumer_pacticipant)
pact_check.provider_can_i_deploy()
tag_prod_provider_pact(git_commit_provider, provider_pacticipant)
message, fail_build, actual_message = pact_check.consumer_can_i_deploy()
assert message == "Consumer Side 'Can I Deploy' Failed!"
create_pact(
git_commit_consumer_new, provider_pacticipant, consumer_pacticipant, file
)
tag_consumer_pact(git_commit_consumer_new, consumer_pacticipant)
(
message,
fail_build,
actual_message,
) = pact_check_new_consumer.consumer_can_i_deploy()
assert message == "Consumer Side 'Can I Deploy' Successful"
assert (
actual_message
== "All required verification results are published and successful"
)
(
message,
fail_build,
actual_message,
) = pact_check_new_provider.provider_can_i_deploy()
assert (
message
== "Provider Side 'Can I Deploy' Successful but against non production tag"
)
assert (
actual_message
== "All required verification results are published and successful"
)
# CONSUMER CREATES WORKING PACT THEN NON WORKING THEN PROVIDER TRIES TO DEPLOY
@pytest.mark.pact_test
def test_latest_broken_pact():
delete_pact(consumer_pacticipant, provider_pacticipant)
create_pact(git_commit_consumer, provider_pacticipant, consumer_pacticipant, file)
tag_consumer_pact(git_commit_consumer, consumer_pacticipant)
pact_check.provider_can_i_deploy()
tag_prod_provider_pact(git_commit_provider, provider_pacticipant)
message, fail_build, actual_message = pact_check.consumer_can_i_deploy()
assert message == "Consumer Side 'Can I Deploy' Successful"
create_pact(
git_commit_consumer_new, provider_pacticipant, consumer_pacticipant, file_new
)
tag_consumer_pact(git_commit_consumer_new, consumer_pacticipant)
(
message,
fail_build,
actual_message,
) = pact_check_new_consumer.consumer_can_i_deploy()
assert message == "Consumer Side 'Can I Deploy' Failed!"
message, fail_build, actual_message = pact_check.provider_can_i_deploy()
assert message == "Failure! 1 interaction, 1 failure"
assert actual_message == "Failed Verification Step"
# CONSUMER CREATES WORKING PACT IN PROD BUT NON WORKING IN BRANCH THEN PROVIDER TRIES TO DEPLOY
@pytest.mark.pact_test
def test_old_working_prod_pact():
delete_pact(consumer_pacticipant, provider_pacticipant)
create_pact(git_commit_consumer, provider_pacticipant, consumer_pacticipant, file)
tag_consumer_pact(git_commit_consumer, consumer_pacticipant)
pact_check.provider_can_i_deploy()
tag_prod_provider_pact(git_commit_provider, provider_pacticipant)
message, fail_build, actual_message = pact_check.consumer_can_i_deploy()
assert message == "Consumer Side 'Can I Deploy' Successful"
tag_prod_consumer_pact(git_commit_consumer, consumer_pacticipant)
create_pact(
git_commit_consumer_new, provider_pacticipant, consumer_pacticipant, file_new
)
tag_consumer_pact(git_commit_consumer_new, consumer_pacticipant)
(
message,
fail_build,
actual_message,
) = pact_check_new_consumer.consumer_can_i_deploy()
assert message == "Consumer Side 'Can I Deploy' Failed!"
message, fail_build, actual_message = pact_check.provider_can_i_deploy()
assert message == "Provider Side 'Can I Deploy' Successful"
assert (
actual_message
== "All required verification results are published and successful"
)
| 35.927681 | 163 | 0.758104 |
3c76ae542a7cf79d14381a43a1af70207fab4ddf | 11,553 | py | Python | test/main.py | ghl1024/RedisShake | 0d72f6e6687d28f4b2218609d12f3a5bbf2ad0f0 | [
"MIT"
] | 2,224 | 2019-03-19T09:49:45.000Z | 2022-03-31T10:10:20.000Z | test/main.py | ghl1024/RedisShake | 0d72f6e6687d28f4b2218609d12f3a5bbf2ad0f0 | [
"MIT"
] | 313 | 2019-03-20T12:43:44.000Z | 2022-03-31T01:48:13.000Z | test/main.py | ghl1024/RedisShake | 0d72f6e6687d28f4b2218609d12f3a5bbf2ad0f0 | [
"MIT"
] | 460 | 2019-03-20T07:31:42.000Z | 2022-03-31T10:31:44.000Z | import os
import shutil
import time
import redis
import redistrib.command
import requests
from colorama import Fore, Style
import launcher
def green_print(string):
print(Fore.GREEN + str(string) + Style.RESET_ALL)
def wait():
while True:
time.sleep(1024)
DIR = "." # RedisShake/test
BASE_CONF_PATH = "../conf/redis-shake.conf"
SHAKE_EXE = "../bin/redis-shake.darwin"
USED_PORT = []
METRIC_URL = "http://127.0.0.1:9320/metric"
def get_port():
cmd = "netstat -ntl |grep -v Active| grep -v Proto|awk '{print $4}'|awk -F: '{print $NF}'"
proc = os.popen(cmd).read()
proc_ports = set(proc.split("\n"))
port = 20000
while port in proc_ports or port in USED_PORT:
port += 1
USED_PORT.append(port)
return port
def get_work_dir(port):
os.makedirs(f"{DIR}/tmp", exist_ok=True)
work_dir = f"{DIR}/tmp/{port}"
if os.path.exists(work_dir):
shutil.rmtree(work_dir)
os.makedirs(work_dir)
return work_dir
def test_work_dir():
print(get_work_dir(1234))
def load_conf(file_path):
conf = {}
with open(file_path, "r") as fp:
for line in fp:
line = line.strip()
if line.startswith('#') or line == "":
continue
key, val = line.split('=')
conf[key.strip()] = val.strip()
return conf
def save_conf(conf, file_path):
with open(file_path, "w") as fp:
for k, v in conf.items():
fp.write(f"{k}={v}\n")
class Redis:
def __init__(self, port, work_dir, cluster_enable=False):
if cluster_enable:
self.server = launcher.Launcher(
["redis-server", "--logfile", "redis.log", "--port", str(port), "--cluster-enabled yes"], work_dir)
else:
self.server = launcher.Launcher(["redis-server", "--logfile", "redis.log", "--port", str(port)], work_dir)
self.server.fire()
self.client = None
self.port = port
self.work_dir = work_dir
def wait_start(self):
log_file = f"{self.work_dir}/redis.log"
while not os.path.exists(log_file):
time.sleep(0.3)
with open(log_file, "r") as f:
while "Ready to accept connections" not in f.readline():
time.sleep(0.1)
self.client = redis.Redis(port=self.port)
print(f"Redis start at {self.port}.")
def stop(self):
self.server.stop()
def get_redis():
port = get_port()
work_dir = get_work_dir(f"redis_{port}")
r = Redis(port, work_dir)
r.wait_start()
return r
def get_cluster_redis(num):
port_list = []
r_list = []
for _ in range(num):
port = get_port()
work_dir = get_work_dir(f"redis_cluster_{port}")
r = Redis(port, work_dir, cluster_enable=True)
r_list.append(r)
port_list.append(port)
for r in r_list:
r.wait_start()
return port_list, r_list
def test_sync_standalone2standalone():
r1 = get_redis()
r2 = get_redis()
r1.client.execute_command(f"DEBUG POPULATE 1024 prefix_{r1.port} 1024")
r2.client.execute_command(f"DEBUG POPULATE 1024 prefix_{r2.port} 1024")
conf = load_conf(BASE_CONF_PATH)
conf["source.address"] = f"127.0.0.1:{r1.port}"
conf["target.address"] = f"127.0.0.1:{r2.port}"
conf["source.password_raw"] = ""
conf["target.password_raw"] = ""
work_dir = get_work_dir("sync_standalone2standalone")
conf_path = f"{work_dir}/redis-shake.conf"
save_conf(conf, conf_path)
shake = launcher.Launcher([SHAKE_EXE, "-conf", "redis-shake.conf", "-type", "sync"], work_dir)
shake.fire()
time.sleep(3)
ret = requests.get(METRIC_URL)
assert ret.json()[0]["FullSyncProgress"] == 100
print("sync successful!")
source_cnt = int(r1.client.execute_command("dbsize"))
target_cnt = int(r2.client.execute_command("dbsize"))
print(f"source_cnt: {source_cnt}, target_cnt: {target_cnt}")
assert source_cnt == target_cnt / 2 == 1024
r1.stop()
r2.stop()
shake.stop()
# DEBUG POPULATE count [prefix] [size]
def test_sync_cluster2cluster():
# redis start
port_list, r_list = get_cluster_redis(6)
print(f"redis cluster nodes:", port_list)
# populate data
for r in r_list:
r.client.execute_command(f"DEBUG POPULATE 1024 prefix_{r.port} 1024")
redistrib.command.create([('127.0.0.1', port_list[0]),
('127.0.0.1', port_list[1]),
('127.0.0.1', port_list[2])], max_slots=16384)
print(f"redis cluster source:", port_list[:3])
redistrib.command.create([('127.0.0.1', port_list[3]),
('127.0.0.1', port_list[4]),
('127.0.0.1', port_list[5])], max_slots=16384)
print(f"redis cluster target:", port_list[3:])
conf = load_conf(BASE_CONF_PATH)
conf["source.type"] = f"cluster"
conf["source.address"] = f"127.0.0.1:{port_list[0]};127.0.0.1:{port_list[1]};127.0.0.1:{port_list[2]}"
conf["source.password_raw"] = ""
conf["target.type"] = f"cluster"
conf["target.address"] = f"127.0.0.1:{port_list[3]};127.0.0.1:{port_list[4]};127.0.0.1:{port_list[5]}"
conf["target.password_raw"] = ""
conf["target.dbmap"] = ""
conf["key_exists"] = "rewrite"
work_dir = get_work_dir("sync_cluster2cluster")
conf_path = f"{work_dir}/redis-shake.conf"
save_conf(conf, conf_path)
shake = launcher.Launcher([SHAKE_EXE, "-conf", "redis-shake.conf", "-type", "sync"], work_dir)
shake.fire()
time.sleep(3)
ret = requests.get(METRIC_URL)
assert ret.json()[0]["FullSyncProgress"] == 100
print("sync successful!")
source_cnt = 0
for r in r_list[:3]:
source_cnt += int(r.client.execute_command("dbsize"))
target_cnt = 0
for r in r_list[3:]:
target_cnt += int(r.client.execute_command("dbsize"))
print(f"source_cnt: {source_cnt}, target_cnt: {target_cnt}")
assert source_cnt == target_cnt / 2 == 1024 * 3
for r in r_list:
r.stop()
shake.stop()
def test_sync_standalone2cluster():
r = get_redis()
r.client.execute_command(f"DEBUG POPULATE 1024 prefix_{r.port} 1024")
port_list, r_list = get_cluster_redis(3)
for r_ in r_list:
r_.client.execute_command(f"DEBUG POPULATE 1024 prefix_{r_.port} 1024")
print(f"redis source:", r.port)
redistrib.command.create([('127.0.0.1', port_list[0]),
('127.0.0.1', port_list[1]),
('127.0.0.1', port_list[2])], max_slots=16384)
print(f"redis cluster target:", port_list)
conf = load_conf(BASE_CONF_PATH)
conf["source.type"] = f"standalone"
conf["source.address"] = f"127.0.0.1:{r.port}"
conf["source.password_raw"] = ""
conf["target.type"] = f"cluster"
conf["target.address"] = f"127.0.0.1:{port_list[0]};127.0.0.1:{port_list[1]};127.0.0.1:{port_list[2]}"
conf["target.password_raw"] = ""
conf["target.dbmap"] = ""
conf["key_exists"] = "rewrite"
work_dir = get_work_dir("sync_standalone2cluster")
conf_path = f"{work_dir}/redis-shake.conf"
save_conf(conf, conf_path)
shake = launcher.Launcher([SHAKE_EXE, "-conf", "redis-shake.conf", "-type", "sync"], work_dir)
shake.fire()
time.sleep(3)
ret = requests.get(METRIC_URL)
assert ret.json()[0]["FullSyncProgress"] == 100
print("sync successful!")
source_cnt = int(r.client.execute_command("dbsize"))
target_cnt = 0
for r_ in r_list:
target_cnt += int(r_.client.execute_command("dbsize"))
print(f"source_cnt: {source_cnt}, target_cnt: {target_cnt}")
assert source_cnt == target_cnt / 4 == 1024
r.stop()
for r_ in r_list:
r_.stop()
shake.stop()
def action_sync_standalone2standalone_bigdata():
r1 = get_redis()
r2 = get_redis()
r1.client.execute_command(f"DEBUG POPULATE 1000000 prefix_{r1.port} 10") # 4GB RAM
conf = load_conf(BASE_CONF_PATH)
conf["source.address"] = f"127.0.0.1:{r1.port}"
conf["target.address"] = f"127.0.0.1:{r2.port}"
conf["source.password_raw"] = ""
conf["target.password_raw"] = ""
conf["key_exists"] = "rewrite"
work_dir = get_work_dir("action_sync_standalone2standalone_bigdata")
conf_path = f"{work_dir}/redis-shake.conf"
save_conf(conf, conf_path)
print("need run redis-shake manually, and command+c to shutdown main.py")
wait()
def action_sync_standalone2cluster():
r = get_redis()
port_list, r_list = get_cluster_redis(3)
print(f"redis source:", r.port)
redistrib.command.create([('127.0.0.1', port_list[0]),
('127.0.0.1', port_list[1]),
('127.0.0.1', port_list[2])], max_slots=16384)
print(f"redis cluster target:", port_list)
conf = load_conf(BASE_CONF_PATH)
conf["source.type"] = f"standalone"
conf["source.address"] = f"127.0.0.1:{r.port}"
conf["source.password_raw"] = ""
conf["target.type"] = f"cluster"
conf["target.address"] = f"127.0.0.1:{port_list[0]};127.0.0.1:{port_list[1]};127.0.0.1:{port_list[2]}"
conf["target.password_raw"] = ""
conf["target.dbmap"] = ""
conf["key_exists"] = "rewrite"
work_dir = get_work_dir("action_sync_standalone2cluster")
conf_path = f"{work_dir}/redis-shake.conf"
save_conf(conf, conf_path)
print("need run redis-shake manually, and command+c to shutdown main.py")
wait()
def test_sync_select_db(target_db=-1):
r1 = get_redis()
r2 = get_redis()
r1.client.execute_command("select", "1")
for i in range(10):
r1.client.set(str(i), "v")
conf = load_conf(BASE_CONF_PATH)
conf["source.address"] = f"127.0.0.1:{r1.port}"
conf["target.address"] = f"127.0.0.1:{r2.port}"
conf["source.password_raw"] = ""
conf["target.password_raw"] = ""
conf["target.db"] = target_db
work_dir = get_work_dir("test_sync_select_db_with_target_db")
conf_path = f"{work_dir}/redis-shake.conf"
save_conf(conf, conf_path)
shake = launcher.Launcher([SHAKE_EXE, "-conf", "redis-shake.conf", "-type", "sync"], work_dir)
shake.fire()
time.sleep(3)
ret = requests.get(METRIC_URL)
assert ret.json()[0]["FullSyncProgress"] == 100
r1.client.execute_command("select", "2" if target_db == -1 else target_db)
for i in range(10, 20):
r1.client.set(str(i), "v20")
time.sleep(1)
r2.client.execute_command("select", "1" if target_db == -1 else target_db)
for i in range(10):
assert r2.client.get(str(i)) == b'v'
r2.client.execute_command("select", "2" if target_db == -1 else target_db)
for i in range(10, 20):
assert r2.client.get(str(i)) == b'v20'
print("sync successful!")
r1.stop()
r2.stop()
shake.stop()
if __name__ == '__main__':
SHAKE_EXE = os.path.abspath(SHAKE_EXE)
os.system("killall -9 redis-server")
shutil.rmtree(f"{DIR}/tmp")
green_print("----------- test_sync_select_db --------")
test_sync_select_db()
green_print("----------- test_sync_select_db with target db--------")
test_sync_select_db(3)
green_print("----------- test_sync_standalone2standalone --------")
test_sync_standalone2standalone()
green_print("----------- test_sync_cluster2cluster --------")
test_sync_cluster2cluster()
green_print("----------- test_sync_standalone2cluster --------")
test_sync_standalone2cluster()
# action_sync_standalone2standalone_bigdata()
# action_sync_standalone2cluster()
| 32.635593 | 118 | 0.620964 |
f85d9d0e4bc201ff422b6ac4a676eb4ef37460a3 | 5,183 | py | Python | saleor/graphql/checkout/types.py | jdmueller/ArmoniaSaleor | 1d7c1e9bb697325cee3d007b3ea811f25c4086d9 | [
"BSD-3-Clause"
] | 2 | 2019-06-08T04:07:41.000Z | 2019-07-21T15:25:16.000Z | saleor/graphql/checkout/types.py | jdmueller/ArmoniaSaleor | 1d7c1e9bb697325cee3d007b3ea811f25c4086d9 | [
"BSD-3-Clause"
] | 2 | 2019-07-02T13:39:49.000Z | 2019-07-07T09:38:27.000Z | saleor/graphql/checkout/types.py | jdmueller/ArmoniaSaleor | 1d7c1e9bb697325cee3d007b3ea811f25c4086d9 | [
"BSD-3-Clause"
] | 1 | 2019-05-02T17:30:49.000Z | 2019-05-02T17:30:49.000Z | import graphene
import graphene_django_optimizer as gql_optimizer
from django.conf import settings
from ...checkout import models
from ...core.taxes import zero_taxed_money
from ...core.taxes.interface import (
calculate_checkout_line_total,
calculate_checkout_shipping,
calculate_checkout_subtotal,
calculate_checkout_total,
)
from ..core.connection import CountableDjangoObjectType
from ..core.types.money import TaxedMoney
from ..giftcard.types import GiftCard
from ..order.utils import applicable_shipping_methods
from ..payment.enums import PaymentGatewayEnum
from ..shipping.types import ShippingMethod
class CheckoutLine(CountableDjangoObjectType):
total_price = graphene.Field(
TaxedMoney,
description="The sum of the checkout line price, taxes and discounts.",
)
requires_shipping = graphene.Boolean(
description="Indicates whether the item need to be delivered."
)
class Meta:
only_fields = ["id", "quantity", "variant"]
description = "Represents an item in the checkout."
interfaces = [graphene.relay.Node]
model = models.CheckoutLine
filter_fields = ["id"]
@staticmethod
def resolve_total_price(self, info):
return calculate_checkout_line_total(
checkout_line=self, discounts=info.context.discounts
)
@staticmethod
def resolve_requires_shipping(root: models.CheckoutLine, *_args):
return root.is_shipping_required()
class Checkout(CountableDjangoObjectType):
available_shipping_methods = graphene.List(
ShippingMethod,
required=True,
description="Shipping methods that can be used with this order.",
)
available_payment_gateways = graphene.List(
PaymentGatewayEnum,
description="List of available payment gateways.",
required=True,
)
email = graphene.String(description="Email of a customer", required=True)
gift_cards = gql_optimizer.field(
graphene.List(
GiftCard, description="List of gift cards associated with this checkout"
),
model_field="gift_cards",
)
is_shipping_required = graphene.Boolean(
description="Returns True, if checkout requires shipping.", required=True
)
lines = gql_optimizer.field(
graphene.List(
CheckoutLine,
description=(
"A list of checkout lines, each containing information about "
"an item in the checkout."
),
),
model_field="lines",
)
shipping_price = graphene.Field(
TaxedMoney,
description="The price of the shipping, with all the taxes included.",
)
subtotal_price = graphene.Field(
TaxedMoney,
description="The price of the checkout before shipping, with taxes included.",
)
total_price = graphene.Field(
TaxedMoney,
description=(
"The sum of the the checkout line prices, with all the taxes,"
"shipping costs, and discounts included."
),
)
class Meta:
only_fields = [
"billing_address",
"created",
"discount_amount",
"discount_name",
"gift_cards",
"is_shipping_required",
"last_change",
"note",
"quantity",
"shipping_address",
"shipping_method",
"token",
"translated_discount_name",
"user",
"voucher_code",
]
description = "Checkout object"
model = models.Checkout
interfaces = [graphene.relay.Node]
filter_fields = ["token"]
@staticmethod
def resolve_total_price(root: models.Checkout, info):
taxed_total = (
calculate_checkout_total(checkout=root, discounts=info.context.discounts)
- root.get_total_gift_cards_balance()
)
return max(taxed_total, zero_taxed_money())
@staticmethod
def resolve_subtotal_price(root: models.Checkout, info):
return calculate_checkout_subtotal(
checkout=root, discounts=info.context.discounts
)
@staticmethod
def resolve_shipping_price(root: models.Checkout, info):
return calculate_checkout_shipping(
checkout=root, discounts=info.context.discounts
)
@staticmethod
def resolve_lines(root: models.Checkout, *_args):
return root.lines.prefetch_related("variant")
@staticmethod
def resolve_available_shipping_methods(root: models.Checkout, info):
price = calculate_checkout_subtotal(
checkout=root, discounts=info.context.discounts
)
return applicable_shipping_methods(root, price.gross.amount)
@staticmethod
def resolve_available_payment_gateways(_: models.Checkout, _info):
return settings.CHECKOUT_PAYMENT_GATEWAYS.keys()
@staticmethod
def resolve_gift_cards(root: models.Checkout, _info):
return root.gift_cards.all()
@staticmethod
def resolve_is_shipping_required(root: models.Checkout, _info):
return root.is_shipping_required()
| 32.39375 | 86 | 0.660042 |
83ae36d704f0960f65e1dc8c14dba07be1fd2c7f | 2,236 | py | Python | apache/airflow/providers/clickhouse/hooks/ClickhouseHook.py | klimenkoIv/apache-airflow-providers-clickhouse | 11852d8b9ca6c2f8798dee931d393ea2a19f5032 | [
"Apache-2.0"
] | 1 | 2021-11-09T10:59:44.000Z | 2021-11-09T10:59:44.000Z | apache/airflow/providers/clickhouse/hooks/ClickhouseHook.py | klimenkoIv/apache-airflow-providers-clickhouse | 11852d8b9ca6c2f8798dee931d393ea2a19f5032 | [
"Apache-2.0"
] | 3 | 2021-11-06T23:59:37.000Z | 2022-02-25T06:14:16.000Z | apache/airflow/providers/clickhouse/hooks/ClickhouseHook.py | klimenkoIv/apache-airflow-providers-clickhouse | 11852d8b9ca6c2f8798dee931d393ea2a19f5032 | [
"Apache-2.0"
] | null | null | null | from typing import Dict, Any, Iterable, Union
from airflow.hooks.dbapi import DbApiHook
from airflow.models.connection import Connection
from clickhouse_driver import Client
class ClickhouseHook(DbApiHook):
"""
@author = klimenko.iv@gmail.com
"""
def bulk_dump(self, table, tmp_file):
pass
def bulk_load(self, table, tmp_file):
pass
conn_name_attr = 'click_conn_id'
default_conn_name = 'click_default'
conn_type = 'clickhouse'
hook_name = 'ClickHouse'
database = ''
@staticmethod
def get_ui_field_behaviour() -> Dict:
"""Returns custom field behaviour"""
return {
"hidden_fields": ['extra'],
"relabeling": {'schema': 'Database'},
}
def get_conn(self, conn_name_attr: str = None) -> Client:
if conn_name_attr:
self.conn_name_attr = conn_name_attr
conn: Connection = self.get_connection(getattr(self, self.conn_name_attr))
host: str = conn.host
port: int = int(conn.port) if conn.port else 9000
user: str = conn.login
password: str = conn.password
database: str = conn.schema
click_kwargs = conn.extra_dejson.copy()
if password is None:
password = ''
click_kwargs.update(port=port)
click_kwargs.update(user=user)
click_kwargs.update(password=password)
if database:
click_kwargs.update(database=database)
result = Client(host or 'localhost', **click_kwargs)
result.connection.connect()
return result
def run(self, sql: Union[str, Iterable[str]], parameters: dict = None,
with_column_types: bool = True, **kwargs) -> Any:
if isinstance(sql, str):
queries = (sql,)
client = self.get_conn()
result = None
index = 0
for query in queries:
index += 1
self.log.info("Query_%s to database : %s", index, query)
result = client.execute(
query=query,
# params=parameters,
with_column_types=with_column_types,
)
self.log.info("Query_%s completed", index)
return result
| 29.813333 | 82 | 0.59839 |
b2bb4617ea51d4db66a449a2ee222ddc815233f8 | 3,735 | py | Python | prediction.py | khzhan11/medical_tool_detection_2021 | bbf4814d7b83879a753d0593780395e6be8251fd | [
"MIT"
] | null | null | null | prediction.py | khzhan11/medical_tool_detection_2021 | bbf4814d7b83879a753d0593780395e6be8251fd | [
"MIT"
] | null | null | null | prediction.py | khzhan11/medical_tool_detection_2021 | bbf4814d7b83879a753d0593780395e6be8251fd | [
"MIT"
] | null | null | null | import argparse
import tensorflow as tf
import numpy as np
import pathlib
import cv2
import datetime
from object_detection.utils import ops as utils_ops
from object_detection.utils import label_map_util
from object_detection.utils import visualization_utils as vis_util
# Patches
# patch tf1 into `utils.ops`
utils_ops.tf = tf.compat.v1
# Patch the location of gfile
tf.gfile = tf.io.gfile
def run_inference_for_single_image(model, image):
#Convert image to numpy array
image = np.asarray(image)
#Convert numpy array to tensor
input_tensor = tf.convert_to_tensor(image)
# The model expects a batch of images, so add an axis with `tf.newaxis`.
input_tensor = input_tensor[tf.newaxis, ...]
# Run inference
model_fn = model.signatures['serving_default']
output_dict = model_fn(input_tensor)
# All outputs are batches tensors.
# Convert to numpy arrays, and take index [0] to remove the batch dimension.
# We're only interested in the first num_detections.
num_detections = int(output_dict.pop('num_detections'))
output_dict = {key: value[0, :num_detections].numpy()
for key, value in output_dict.items()}
output_dict['num_detections'] = num_detections
# detection_classes should be ints.
output_dict['detection_classes'] = output_dict['detection_classes'].astype(np.int64)
# Handle models with masks:
if 'detection_masks' in output_dict:
# Reframe the the bbox mask to the image size.
detection_masks_reframed = utils_ops.reframe_box_masks_to_image_masks(
output_dict['detection_masks'], output_dict['detection_boxes'],
image.shape[0], image.shape[1])
detection_masks_reframed = tf.cast(detection_masks_reframed > 0.5,
tf.uint8)
output_dict['detection_masks_reframed'] = detection_masks_reframed.numpy()
return output_dict
def visualize_inference(model, image_np):
# Actual detection.
output_dict = run_inference_for_single_image(model, image_np)
# Visualization of the results of a detection.
vis_util.visualize_boxes_and_labels_on_image_array(
image_np,
output_dict['detection_boxes'],
output_dict['detection_classes'],
output_dict['detection_scores'],
category_index,
instance_masks=output_dict.get('detection_masks_reframed', None),
use_normalized_coordinates=True,
line_thickness=3)
return image_np
if __name__ == '__main__':
arg = argparse.ArgumentParser()
arg.add_argument("--input", required=True)
arg.add_argument("--output")
args = vars(arg.parse_args())
model_dir = './Model/' #ADD MODEL DIR
model = tf.saved_model.load(str(model_dir))
# List of the strings that is used to add correct label for each box.
label_path = './Model/label_map.pbtxt' ##ADD LABEL MAP
category_index = label_map_util.create_category_index_from_labelmap(label_path, use_display_name=True)
input_image_dir = pathlib.Path(args["input"])
input_img_paths = sorted(list(input_image_dir.glob("*.jpg")))
for image_path in input_img_paths:
image_path = r"{}".format(image_path)
img = cv2.imread(image_path)
try:
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
except:
print("Error converting to RGB")
image_np = np.array(img)
output_img = visualize_inference(model, image_np)
output_img = cv2.cvtColor(output_img, cv2.COLOR_RGB2BGR)
cv2.imshow('Detection', output_img)
file_name, _ = pathlib.Path(image_path).name.split(".")
cv2.imwrite(args["output"] + f'/{file_name}_output.jpg', output_img)
print("Done")
| 36.262136 | 106 | 0.699866 |
bdbdade4c868a2ba72d3cf38ec34c20a02ed5b2d | 9,905 | py | Python | orthogonal-planes/ROS_node/catkin_ws/src/openni2/openni2_launch/doc/conf.py | tamaslevente/trai | 4bf68463b941f305d9b25a9374b6c2a2d51a8046 | [
"MIT"
] | null | null | null | orthogonal-planes/ROS_node/catkin_ws/src/openni2/openni2_launch/doc/conf.py | tamaslevente/trai | 4bf68463b941f305d9b25a9374b6c2a2d51a8046 | [
"MIT"
] | null | null | null | orthogonal-planes/ROS_node/catkin_ws/src/openni2/openni2_launch/doc/conf.py | tamaslevente/trai | 4bf68463b941f305d9b25a9374b6c2a2d51a8046 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
#
# openni2_launch documentation build configuration file, created by
# sphinx-quickstart on Fri Nov 3 15:35:05 2017.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
import catkin_pkg.package
catkin_dir = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
catkin_package = catkin_pkg.package.parse_package(os.path.join(catkin_dir, catkin_pkg.package.PACKAGE_MANIFEST_FILENAME))
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.doctest',
'sphinx.ext.intersphinx',
'sphinx.ext.todo',
'sphinx.ext.coverage',
'sphinx.ext.pngmath',
'sphinx.ext.ifconfig',
'sphinx.ext.viewcode',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['.templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'openni2_launch'
copyright = u'2017, Julius Kammerl, Michael Ferguson'
author = u'Julius Kammerl, Michael Ferguson'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = catkin_package.version
# The full version, including alpha/beta/rc tags.
release = catkin_package.version
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['.build']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = True
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'alabaster'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (relative to this directory) to use as a favicon of
# the docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['.static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Language to be used for generating the HTML full-text search index.
# Sphinx supports the following languages:
# 'da', 'de', 'en', 'es', 'fi', 'fr', 'hu', 'it', 'ja'
# 'nl', 'no', 'pt', 'ro', 'ru', 'sv', 'tr'
#html_search_language = 'en'
# A dictionary with options for the search language support, empty by default.
# Now only 'ja' uses this config value
#html_search_options = {'type': 'default'}
# The name of a javascript file (relative to the configuration directory) that
# implements a search results scorer. If empty, the default will be used.
#html_search_scorer = 'scorer.js'
# Output file base name for HTML help builder.
htmlhelp_basename = 'openni2_launchdoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
# Latex figure (float) alignment
#'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'openni2_launch.tex', u'openni2\\_launch Documentation',
u'Julius Kammerl, Michael Ferguson', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'openni2_launch', u'openni2_launch Documentation',
[author], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'openni2_launch', u'openni2_launch Documentation',
author, 'openni2_launch', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
# Example configuration for intersphinx: refer to the Python standard library.
intersphinx_mapping = {'https://docs.python.org/': None}
| 32.906977 | 121 | 0.721656 |
47a0b818d2ab2e20d4ddd57dfaa22d5d0cf419ad | 6,096 | py | Python | setup.py | onceclick/sentry | 2d554649d59275338b102610a68191624f6f77db | [
"BSD-3-Clause"
] | 1 | 2021-08-10T06:07:13.000Z | 2021-08-10T06:07:13.000Z | setup.py | qlg/sentry | 2d554649d59275338b102610a68191624f6f77db | [
"BSD-3-Clause"
] | null | null | null | setup.py | qlg/sentry | 2d554649d59275338b102610a68191624f6f77db | [
"BSD-3-Clause"
] | 1 | 2017-04-08T04:09:18.000Z | 2017-04-08T04:09:18.000Z | #!/usr/bin/env python
"""
Sentry
======
Sentry is a realtime event logging and aggregation platform. It specializes
in monitoring errors and extracting all the information needed to do a proper
post-mortem without any of the hassle of the standard user feedback loop.
Sentry is a Server
------------------
The Sentry package, at its core, is just a simple server and web UI. It will
handle authentication clients (such as `Raven
<https://github.com/getsentry/raven-python>`_)
and all of the logic behind storage and aggregation.
That said, Sentry is not limited to Python. The primary implementation is in
Python, but it contains a full API for sending events from any language, in
any application.
:copyright: (c) 2011-2014 by the Sentry Team, see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
"""
from __future__ import absolute_import
# if sys.version_info[:2] != (2, 7):
# print 'Error: Sentry requires Python 2.7'
# sys.exit(1)
import os
import os.path
import sys
from distutils.command.build import build as BuildCommand
from setuptools import setup, find_packages
from setuptools.command.sdist import sdist as SDistCommand
from setuptools.command.develop import develop as DevelopCommand
ROOT = os.path.realpath(os.path.join(os.path.dirname(
sys.modules['__main__'].__file__)))
# Add Sentry to path so we can import distutils
sys.path.insert(0, os.path.join(ROOT, 'src'))
from sentry.utils.distutils import (
BuildAssetsCommand, BuildIntegrationDocsCommand
)
# The version of sentry
VERSION = '8.12.0.dev0'
# Hack to prevent stupid "TypeError: 'NoneType' object is not callable" error
# in multiprocessing/util.py _exit_function when running `python
# setup.py test` (see
# http://www.eby-sarna.com/pipermail/peak/2010-May/003357.html)
for m in ('multiprocessing', 'billiard'):
try:
__import__(m)
except ImportError:
pass
IS_LIGHT_BUILD = os.environ.get('SENTRY_LIGHT_BUILD') == '1'
dev_requires = [
'Babel',
'flake8>=2.6,<2.7',
'pycodestyle>=2.0,<2.1',
'isort>=4.2.2,<4.3.0',
]
tests_require = [
# cassandra
'blist',
# TODO(dcramer): figure out why Travis needs this
'cassandra-driver<=3.5.0',
'casscache',
'cqlsh',
# /cassandra
'datadog',
'pytest-cov>=1.8.0,<1.9.0',
'pytest-timeout>=0.5.0,<0.6.0',
'pytest-xdist>=1.11.0,<1.12.0',
'python-coveralls',
'responses',
]
install_requires = [
'boto3>=1.4.1,<1.5',
'celery>=3.1.8,<3.1.19',
'click>=5.0,<7.0',
# 'cryptography>=1.3,<1.4',
'cssutils>=0.9.9,<0.10.0',
'Django>=1.6.0,<1.7',
'django-bitfield>=1.7.0,<1.8.0',
'django-crispy-forms>=1.4.0,<1.5.0',
'django-debug-toolbar>=1.3.2,<1.4.0',
'django-jsonfield>=0.9.13,<0.9.14',
'django-picklefield>=0.3.0,<0.4.0',
'django-sudo>=2.1.0,<3.0.0',
'django-templatetag-sugar>=0.1.0',
'djangorestframework>=2.3.8,<2.4.0',
'email-reply-parser>=0.2.0,<0.3.0',
'enum34>=0.9.18,<1.2.0',
'exam>=0.5.1',
# broken on python3
'hiredis>=0.1.0,<0.2.0',
'honcho>=0.7.0,<0.8.0',
'kombu==3.0.35',
'lxml>=3.4.1',
'ipaddress>=1.0.16,<1.1.0',
'libsourcemap>=0.5.0,<0.6.0',
'mock>=0.8.0,<1.1',
'oauth2>=1.5.167',
'percy>=0.2.5',
'petname>=1.7,<1.8',
'Pillow>=3.2.0,<3.3.0',
'progressbar2>=3.10,<3.11',
'psycopg2>=2.6.0,<2.7.0',
'pytest>=2.6.4,<2.7.0',
'pytest-django>=2.9.1,<2.10.0',
'pytest-html>=1.9.0,<1.10.0',
'python-dateutil>=2.0.0,<3.0.0',
'python-memcached>=1.53,<2.0.0',
'python-openid>=2.2',
'PyYAML>=3.11,<3.12',
'raven>=5.29.0,<6.0.0',
'redis>=2.10.3,<2.11.0',
'requests[security]>=2.9.1,<2.13.0',
'selenium==3.0.0b3',
'simplejson>=3.2.0,<3.9.0',
'six>=1.10.0,<1.11.0',
'setproctitle>=1.1.7,<1.2.0',
'statsd>=3.1.0,<3.2.0',
'structlog==16.1.0',
'South==1.0.1',
'symsynd>=1.3.0,<2.0.0',
'toronado>=0.0.11,<0.1.0',
'ua-parser>=0.6.1,<0.8.0',
'urllib3>=1.14,<1.17',
'uwsgi>2.0.0,<2.1.0',
'rb>=1.6.0,<2.0.0',
'qrcode>=5.2.2,<6.0.0',
'python-u2flib-server>=4.0.1,<4.1.0',
]
class SentrySDistCommand(SDistCommand):
# If we are not a light build we want to also execute build_assets as
# part of our source build pipeline.
if not IS_LIGHT_BUILD:
sub_commands = SDistCommand.sub_commands + \
[('build_assets', None), ('build_integration_docs', None)]
class SentryBuildCommand(BuildCommand):
def run(self):
BuildCommand.run(self)
if not IS_LIGHT_BUILD:
self.run_command('build_assets')
self.run_command('build_integration_docs')
class SentryDevelopCommand(DevelopCommand):
def run(self):
DevelopCommand.run(self)
if not IS_LIGHT_BUILD:
self.run_command('build_assets')
self.run_command('build_integration_docs')
cmdclass = {
'sdist': SentrySDistCommand,
'develop': SentryDevelopCommand,
'build': SentryBuildCommand,
'build_assets': BuildAssetsCommand,
'build_integration_docs': BuildIntegrationDocsCommand,
}
setup(
name='sentry',
version=VERSION,
author='Sentry',
author_email='hello@sentry.io',
url='https://sentry.io',
description='A realtime logging and aggregation server.',
long_description=open(os.path.join(ROOT, 'README.rst')).read(),
package_dir={'': 'src'},
packages=find_packages('src'),
zip_safe=False,
install_requires=install_requires,
extras_require={
'dev': dev_requires,
'postgres': install_requires,
'tests': tests_require,
},
cmdclass=cmdclass,
license='BSD',
include_package_data=True,
entry_points={
'console_scripts': [
'sentry = sentry.runner:main',
],
'flake8.extension': [
],
},
classifiers=[
'Framework :: Django',
'Intended Audience :: Developers',
'Intended Audience :: System Administrators',
'Operating System :: POSIX :: Linux',
'Topic :: Software Development'
],
)
| 27.835616 | 77 | 0.62664 |
89965c155e93e71ac2949ba10726486a9ef424b3 | 5,212 | py | Python | data/shelling/shelling_extended.py | CarrKnight/nofreelunch | 0400f750063215d06659a203f777b21da2e99231 | [
"MIT"
] | 2 | 2019-08-05T15:21:42.000Z | 2021-04-06T15:41:40.000Z | data/shelling/shelling_extended.py | CarrKnight/nofreelunch | 0400f750063215d06659a203f777b21da2e99231 | [
"MIT"
] | null | null | null | data/shelling/shelling_extended.py | CarrKnight/nofreelunch | 0400f750063215d06659a203f777b21da2e99231 | [
"MIT"
] | null | null | null | import os
import pandas as pd
import pyNetLogo
from multiprocessing import Pool
from numpy import random
import numpy
NUMBER_OF_PROCESSORS = 2
NUMBER_OF_RUNS = 5000
VARIABLES_TO_TRACK = [
"percent-similar",
"percent-similar-Red",
"percent-similar-Blue",
"percent-unhappy",
'percent-unhappy-Red',
'percent-unhappy-Blue',
'percent-clustering-Red'
]
SNAPSHOT_TIME = [50,100,
150,200,
250
]
TOT_STEPS = 301
PRE_SETUP_COMMANDS = [
# "set number-of-firms 100"
]
POST_SETUP_COMMANDS = [
#"setup",
#"setup-patches",
#"allocate"
]
SPINUP_GO_CALLS = 0
CSV_RESULT_NAME = 'shelling.csv'
CSV_PARAM_NAME = 'params_'+CSV_RESULT_NAME;
PATH_TO_NLOGO = 'modified.nlogo'
PROBLEM_DEFINITION = {
'num_vars': 4,
'names': ['random-seed',
'density',
'%-similar-wanted',
'radiusNeighborhood'
],
'bounds': [[1, 100000],
[50, 99],
[25, 75],
[1, 5]
],
'round' : [
False,
True,
True,
True
]
}
##create link and start the model
def initializer(modelfile):
'''initialize a subprocess
Parameters
----------
modelfile : str
'''
# we need to set the instantiated netlogo
# link as a global so run_simulation can
# use it
global netlogo
netlogo = pyNetLogo.NetLogoLink(gui=False,
# netlogo_home="/home/carrknight/Downloads/netlogo-5.3.1-64",
netlogo_home="/opt/netlogo",
# netlogo_version="5"
netlogo_version="6.1"
)
netlogo.load_model(modelfile)
def get_trend(data):
x = numpy.arange(0, len(data), dtype=numpy.float)
y = numpy.array(data, dtype=numpy.float)
z = numpy.polyfit(x, y, 1)
return(z[0])
#### single simulation
def run_simulation(experiment):
'''run a netlogo model
Parameters
----------
experiments : dict
'''
for command in PRE_SETUP_COMMANDS:
netlogo.command(command)
print("starting")
#Set the input parameters
for key, value in experiment.items():
if key == 'random-seed':
#The NetLogo random seed requires a different syntax
netlogo.command('random-seed {}'.format(value))
else:
#Otherwise, assume the input parameters are global variables
netlogo.command('set {0} {1}'.format(key, value))
netlogo.command('setup')
for command in POST_SETUP_COMMANDS:
netlogo.command(command)
print("setup done")
### if you need to spin up the model before collecting data, do it now
for i in range(SPINUP_GO_CALLS):
netlogo.command("go")
# Run for 100 ticks and return the number of sheep and
# wolf agents at each time step
counts = netlogo.repeat_report(VARIABLES_TO_TRACK, TOT_STEPS)
means = counts.apply(lambda x : x.values.mean())
means.rename(index = lambda x: "mean_"+x,
inplace = True)
maxs = counts.apply(lambda x : x.values.max())
maxs.rename(lambda x: "max_"+x,
inplace = True)
mins = counts.apply(lambda x : x.values.min())
mins.rename(lambda x: "min_"+x,
inplace = True)
lasts = counts.apply(lambda x : x.values[len(x.values)-1])
lasts.rename(lambda x: "last_"+x,
inplace = True)
sds = counts.apply(lambda x : numpy.std(x.values) )
sds.rename(lambda x: "sd_"+x,
inplace = True)
trends = counts.apply(lambda x : get_trend(x.values) )
trends.rename(lambda x: "trend_"+x,
inplace = True)
results = pd.concat([means,mins,lasts,sds,trends,maxs])
#### do snapshots!
for snapshot in SNAPSHOT_TIME:
current_snapshot = counts.apply(lambda x : x.values[snapshot] )
current_snapshot.rename(lambda x: "snap_" + str(snapshot) +"_" + x,
inplace=True)
results = pd.concat([results,current_snapshot])
print("simulation done!")
return results
if __name__ == '__main__':
modelfile = os.path.abspath(PATH_TO_NLOGO)
problem = PROBLEM_DEFINITION
# we want completely random values or we are going to get smart enough
# classifiers that figure out where the parameters are concentrated!
param_values = {}
for name, bound, rounded in zip(problem['names'],problem['bounds'],problem['round']):
param_values[name] = random.uniform(bound[0], bound[1], NUMBER_OF_RUNS)
if rounded:
param_values[name] = numpy.round(param_values[name])
experiments = pd.DataFrame(param_values)
with Pool(NUMBER_OF_PROCESSORS, initializer=initializer, initargs=(modelfile,)) as executor:
results = []
for entry in executor.map(run_simulation, experiments.to_dict('records')):
results.append(entry)
results = pd.DataFrame(results)
print(results)
experiments.to_csv(CSV_PARAM_NAME, index=False)
results.to_csv(CSV_RESULT_NAME, index=False)
| 27.145833 | 97 | 0.595932 |
b5418a5d8b8cc3bce49b20a1bd0215c299bb9323 | 20,512 | py | Python | flatcat/io.py | MilesQLi/flatcat | 123a3cbe60bb9c9ea31e6bf5f3d55dbdd88f24fd | [
"BSD-2-Clause"
] | null | null | null | flatcat/io.py | MilesQLi/flatcat | 123a3cbe60bb9c9ea31e6bf5f3d55dbdd88f24fd | [
"BSD-2-Clause"
] | null | null | null | flatcat/io.py | MilesQLi/flatcat | 123a3cbe60bb9c9ea31e6bf5f3d55dbdd88f24fd | [
"BSD-2-Clause"
] | 2 | 2018-04-04T17:46:44.000Z | 2018-04-17T21:40:19.000Z | from __future__ import unicode_literals
import collections
import datetime
import logging
import re
import sys
import bz2
import codecs
import gzip
import locale
import os
import tarfile
from contextlib import contextmanager
import morfessor
from . import get_version
from .categorizationscheme import get_categories, CategorizedMorph
from .exception import InvalidCategoryError
from .flatcat import FlatcatModel
from .utils import _generator_progress, _is_string
PY3 = sys.version_info.major == 3
if PY3:
from io import BytesIO as StringIO
else:
from StringIO import StringIO
_logger = logging.getLogger(__name__)
class FlatcatIO(morfessor.MorfessorIO):
"""Definition for all input and output files. Also handles all
encoding issues.
The only state this class has is the separators used in the data.
Therefore, the same class instance can be used for initializing multiple
files.
Extends Morfessor Baseline data file formats to include category tags.
"""
def __init__(self,
encoding=None,
construction_separator=' + ',
comment_start='#',
compound_separator='\s+',
analysis_separator=',',
category_separator='/',
strict=True):
super(FlatcatIO, self).__init__(
encoding=encoding,
construction_separator=construction_separator,
comment_start=comment_start,
compound_separator=compound_separator,
atom_separator=None)
self.analysis_separator = analysis_separator
self.category_separator = category_separator
self._strict = strict
self._version = get_version()
def write_tarball_model_file(self, file_name, model):
_logger.info("Saving model as tarball...")
if '.tar.gz' not in file_name:
_logger.warn('Tarball model misleadingly named: {}'.format(
file_name))
with TarGzModel(file_name, 'w') as tarmodel:
with tarmodel.newmember('params') as member:
self.write_parameter_file(member,
model.get_params())
with tarmodel.newmember('analysis') as member:
self.write_segmentation_file(member,
model.segmentations)
if model._supervised:
with tarmodel.newmember('annotations') as member:
self.write_annotations_file(
member,
model.annotations,
construction_sep=' ',
output_tags=True)
def read_tarball_model_file(self, file_name, model=None):
"""Read model from a tarball."""
if model is None:
model = FlatcatModel()
with TarGzModel(file_name, 'r') as tarmodel:
for (name, fobj) in tarmodel.members():
if name == 'params':
model.set_params(
self.read_parameter_file(fobj))
elif name == 'analysis':
model.add_corpus_data(
self.read_segmentation_file(fobj))
elif name == 'annotations':
model.add_annotations(
self.read_annotations_file(fobj))
else:
_logger.warn(
'Unknown model component {}'.format(name))
return model
def write_segmentation_file(self, file_name, segmentations,
construction_sep=None,
output_tags=True,
comment_string=''):
"""Write segmentation file.
File format (single line, wrapped only for pep8):
<count> <construction1><cat_sep><category1><cons_sep>...
<constructionN><cat_sep><categoryN>
"""
construction_sep = (construction_sep if construction_sep
else self.construction_separator)
_logger.info("Saving analysis to '%s'..." % file_name)
output_morph = _make_morph_formatter(
self.category_separator, output_tags)
with self._open_text_file_write(file_name) as file_obj:
d = datetime.datetime.now().replace(microsecond=0)
file_obj.write(
'# Output from Morfessor {}{}, {!s}\n'.format(
get_version(), comment_string, d))
for count, morphs in segmentations:
s = construction_sep.join(
[output_morph(m) for m in morphs])
file_obj.write('{} {}\n'.format(count, s))
_logger.info("Done.")
def read_segmentation_file(self, file_name):
"""Read segmentation file.
see docstring for write_segmentation_file for file format.
"""
_logger.info("Reading segmentations from '%s'..." % file_name)
re_space = re.compile(r'\s+')
for line in self._read_text_file(file_name):
count, analysis = re_space.split(line, 1)
try:
count = int(count)
except ValueError:
# first column was compound instead of count
count = 1
cmorphs = []
for morph_cat in analysis.split(self.construction_separator):
cmorphs.append(self._morph_or_cmorph(morph_cat))
yield(count, tuple(cmorphs))
_logger.info("Done.")
def read_annotations_file(self, file_name, construction_sep=' ',
analysis_sep=None):
"""Read an annotations file.
Each line has the format:
<compound> <constr1> <constr2>... <constrN>, <constr1>...<constrN>, ...
Returns a defaultdict mapping a compound to a list of analyses.
"""
analysis_sep = (analysis_sep if analysis_sep
else self.analysis_separator)
annotations = collections.defaultdict(list)
_logger.info("Reading annotations from '%s'..." % file_name)
for line in self._read_text_file(file_name):
compound, analyses_line = line.split(None, 1)
analysis = self.read_annotation(analyses_line,
construction_sep,
analysis_sep)
annotations[compound].extend(analysis)
_logger.info("Done.")
return annotations
def write_annotations_file(self,
file_name,
annotations,
construction_sep=' ',
analysis_sep=None,
output_tags=False):
_logger.info("Writing annotations to '%s'..." % file_name)
def _annotation_func(item):
(compound, annotation) = item
try:
alts = annotation.alternatives
except AttributeError:
alts = annotation
return (1, compound, alts, 0, 0)
self.write_formatted_file(
file_name,
'{compound}\t{analysis}\n',
sorted(annotations.items()),
_annotation_func,
analysis_sep=analysis_sep,
output_tags=output_tags,
construction_sep=construction_sep)
_logger.info("Done.")
def read_combined_file(self, file_name, annotation_prefix='<',
construction_sep=' ',
analysis_sep=','):
"""Reads a file that combines unannotated word tokens
and annotated data.
The formats are the same as for files containing only one of the
mentioned types of data, except that lines with annotations are
additionally prefixed with a special symbol.
"""
for line in self._read_text_file(file_name):
if line.startswith(annotation_prefix):
analysis = self.read_annotation(
line[len(annotation_prefix):],
construction_sep=construction_sep,
analysis_sep=analysis_sep)[0]
compound = ''.join([x.morph for x in analysis])
yield (True, 1, compound, analysis)
else:
for compound in self.compound_sep_re.split(line):
if len(compound) > 0:
yield (False, 1, compound, self._split_atoms(compound))
def write_lexicon_file(self, file_name, lexicon):
"""Write to a Lexicon file all constructions
and their emission counts.
"""
_logger.info("Saving model lexicon to '%s'..." % file_name)
with self._open_text_file_write(file_name) as file_obj:
for (construction, counts) in lexicon:
count = sum(counts)
file_obj.write('{}\t{}\t{}\n'.format(count,
construction,
'\t'.join('{}'.format(x)
for x in counts)))
_logger.info("Done.")
def write_formatted_file(self,
file_name,
line_format,
data,
data_func,
newline_func=None,
output_newlines=False,
output_tags=False,
construction_sep=None,
analysis_sep=None,
category_sep=None,
filter_tags=None,
filter_len=3):
"""Writes a file in the specified format.
Formatting is flexible: even formats that cannot be read by
FlatCat can be specified.
"""
construction_sep = (construction_sep if construction_sep
else self.construction_separator)
analysis_sep = (analysis_sep if analysis_sep # FIXME
else self.analysis_separator)
category_sep = (category_sep if category_sep
else self.category_separator)
output_morph = _make_morph_formatter(category_sep, output_tags)
with self._open_text_file_write(file_name) as fobj:
for item in _generator_progress(data):
if newline_func is not None and newline_func(item):
if output_newlines:
fobj.write("\n")
continue
(count, compound, alternatives, logp, clogp) = data_func(item)
analysis = []
if len(alternatives) == 1:
constructions = alternatives[0]
num_morphs = len(constructions)
num_nonmorphemes = sum(1 for cmorph in constructions
if cmorph.category == 'ZZZ')
num_letters = sum(len(cmorph.morph)
for cmorph in constructions)
else:
num_morphs = None
num_nonmorphemes = None
num_letters = None
for constructions in alternatives:
if filter_tags is not None:
constructions = [cmorph for cmorph in constructions
if cmorph.category not in filter_tags
or len(cmorph) > filter_len]
constructions = [output_morph(cmorph)
for cmorph in constructions]
analysis.append(construction_sep.join(constructions))
analysis = analysis_sep.join(analysis)
fobj.write(line_format.format(
analysis=analysis,
compound=compound,
count=count,
logprob=logp,
clogprob=clogp,
num_morphs=num_morphs,
num_nonmorphemes=num_nonmorphemes,
num_letters=num_letters))
def read_annotation(self, line, construction_sep, analysis_sep=None):
if analysis_sep is not None:
analyses = line.split(analysis_sep)
else:
analyses = [line]
out = []
for analysis in analyses:
analysis = analysis.strip()
segments = analysis.split(construction_sep)
out.append(tuple(self._morph_or_cmorph(x) for x in segments))
return out
def _morph_or_cmorph(self, morph_cat):
"""Parses a string describing a morph, either tagged
or not tagged, returing a CategorizedMorph.
"""
parts = morph_cat.rsplit(self.category_separator, 1)
morph = parts[0].strip()
if len(parts) == 1:
category = None
else:
category = parts[1]
if self._strict and category not in get_categories():
raise InvalidCategoryError(category)
cmorph = CategorizedMorph(morph, category)
return cmorph
#### This can be removed once it finds its way to Baseline ####
#
def _open_text_file_write(self, file_name_or_obj):
"""Open a file for writing with the appropriate compression/encoding"""
if _is_string(file_name_or_obj):
file_name = file_name_or_obj
if file_name == '-':
file_obj = sys.stdout
if PY3:
return file_obj
elif file_name.endswith('.gz'):
file_obj = gzip.open(file_name, 'wb')
elif file_name.endswith('.bz2'):
file_obj = bz2.BZ2File(file_name, 'wb')
else:
file_obj = open(file_name, 'wb')
else:
file_obj = file_name_or_obj
if self.encoding is None:
# Take encoding from locale if not set so far
self.encoding = locale.getpreferredencoding()
return codecs.getwriter(self.encoding)(file_obj)
def _open_text_file_read(self, file_name_or_obj):
"""Open a file for reading with the appropriate compression/encoding"""
if _is_string(file_name_or_obj):
file_name = file_name_or_obj
if file_name == '-':
if PY3:
return sys.stdin
else:
class StdinUnicodeReader:
def __init__(self, encoding):
self.encoding = encoding
if self.encoding is None:
self.encoding = locale.getpreferredencoding()
def __iter__(self):
return self
def next(self):
l = sys.stdin.readline()
if not l:
raise StopIteration()
return l.decode(self.encoding)
return StdinUnicodeReader(self.encoding)
else:
if file_name.endswith('.gz'):
file_obj = gzip.open(file_name, 'rb')
elif file_name.endswith('.bz2'):
file_obj = bz2.BZ2File(file_name, 'rb')
else:
file_obj = open(file_name, 'rb')
else:
file_obj = file_name_or_obj
if self.encoding is None:
self.encoding = locale.getpreferredencoding()
if self.encoding is None:
# Try to determine encoding if not set so far
self.encoding = self._find_encoding(file_name)
inp = codecs.getreader(self.encoding)(file_obj)
return inp
# straight copypasta
def _read_text_file(self, file_name, raw=False):
"""Read a text file with the appropriate compression and encoding.
Comments and empty lines are skipped unless raw is True.
"""
inp = self._open_text_file_read(file_name)
try:
for line in inp:
line = line.rstrip()
if not raw and \
(len(line) == 0 or line.startswith(self.comment_start)):
continue
if self.lowercase:
yield line.lower()
else:
yield line
except KeyboardInterrupt:
if file_name == '-':
_logger.info("Finished reading from stdin")
return
else:
raise
# straight copypasta
def read_parameter_file(self, file_name):
"""Read learned or estimated parameters from a file"""
params = {}
line_re = re.compile(r'^(.*)\s*:\s*(.*)$')
for line in self._read_text_file(file_name):
m = line_re.match(line.rstrip())
if m:
key = m.group(1)
val = m.group(2)
try:
val = float(val)
except ValueError:
pass
params[key] = val
return params
class TarGzMember(object):
"""File-like object that writes itself into the tarfile on closing"""
def __init__(self, arcname, tarmodel):
self.arcname = arcname
self.tarmodel = tarmodel
self.strio = None
def __enter__(self):
self.strio = StringIO()
return self
def __exit__(self, typ, value, trace):
self.close()
def close(self):
if self.strio.closed:
return
info = tarfile.TarInfo(name=self.arcname)
self.strio.seek(0, os.SEEK_END)
info.size = self.strio.tell()
self.strio.seek(0)
self.tarmodel.tarfobj.addfile(tarinfo=info, fileobj=self.strio)
self.strio.close()
def write(self, *args, **kwargs):
self.strio.write(*args, **kwargs)
def __repr__(self):
return '{} in {}'.format(
self.arcname, self.tarmodel.filename)
class TarGzModel(object):
"""A wrapper to hide the ugliness of the tarfile API.
Both TarGzModel itself and the method newmember are context managers:
Writing a model requires a nested with statement.
"""
def __init__(self, filename, mode):
self.filename = filename
if mode == 'w':
self.mode = 'w|gz'
else:
self.mode = 'r|gz'
self.tarfobj = None
def __enter__(self):
self.tarfobj = tarfile.open(self.filename, self.mode)
return self
def __exit__(self, typ, value, trace):
self.tarfobj.close()
def newmember(self, arcname):
"""Receive a new member to the .tar.gz archive.
Arguments:
arcname - the name of the file within the archive.
Returns:
a file-like object into which the contents can be written.
This is a context manager: use a "with" statement.
"""
assert 'w' in self.mode
return TarGzMember(arcname, self)
def members(self):
"""Generates the (name, contents) pairs for each file in
the archive.
The contents are in the form of file-like objects.
The files are generated in the order they are in the archive:
the recipient must be able to handle them in an arbitrary order.
"""
assert 'r' in self.mode
while True:
info = self.tarfobj.next()
if info is None:
break
fobj = self.tarfobj.extractfile(info)
yield (info.name, fobj)
fobj.close()
#
#### End of stuff belonging in Baseline ####
def _make_morph_formatter(category_sep, output_tags):
if output_tags:
def output_morph(cmorph):
if cmorph.category is None:
return cmorph.morph
return '{}{}{}'.format(cmorph.morph,
category_sep,
cmorph.category)
else:
def output_morph(cmorph):
try:
return cmorph.morph
except AttributeError:
return cmorph
return output_morph
| 37.705882 | 79 | 0.536174 |
96fe32afef5f63f8535b96d4ebffbe21c1175821 | 674 | py | Python | src/items/legendary.py | bonetou/GildedRose-Refactoring-Kata | 564e9d41f9233e60412d550bcef4ba18b4a2ace8 | [
"MIT"
] | null | null | null | src/items/legendary.py | bonetou/GildedRose-Refactoring-Kata | 564e9d41f9233e60412d550bcef4ba18b4a2ace8 | [
"MIT"
] | null | null | null | src/items/legendary.py | bonetou/GildedRose-Refactoring-Kata | 564e9d41f9233e60412d550bcef4ba18b4a2ace8 | [
"MIT"
] | 1 | 2021-08-15T17:00:48.000Z | 2021-08-15T17:00:48.000Z | from src.items.helpers.constants import MIN_QUALITY_DEFAULT_VALUE
from src.items.exceptions.invalid_quality_value import InvalidQualityValue
from src.items.base import Item
class LegendaryItem(Item):
def __init__(self, name: str, sell_in: int, quality: int):
super().__init__(name, sell_in, quality)
self.is_valid_quality_legendary(self.get_quality())
def update_quality(self):
pass
def is_valid_quality_legendary(self, quality_value):
if quality_value >= MIN_QUALITY_DEFAULT_VALUE:
return quality_value
raise InvalidQualityValue(
'Legendary quality value must be greater than or equal to 0')
| 33.7 | 74 | 0.734421 |
bd4688ce600077304977fcb01b68e8f92ff27901 | 66 | py | Python | book1/ch01/exercise_1_4.py | dragancvetic/py_training | f27fa021e630fa16882a0438e009a73e11d9515b | [
"MIT"
] | null | null | null | book1/ch01/exercise_1_4.py | dragancvetic/py_training | f27fa021e630fa16882a0438e009a73e11d9515b | [
"MIT"
] | null | null | null | book1/ch01/exercise_1_4.py | dragancvetic/py_training | f27fa021e630fa16882a0438e009a73e11d9515b | [
"MIT"
] | null | null | null | # has to be run as "python exercise_1_4.py"
print "Hello world"
| 13.2 | 43 | 0.712121 |
0f5d182b2db9b6e4c2466fd51f76ce90234594fe | 8,267 | py | Python | tests/components/geocaching/test_config_flow.py | eyager1/core | c0ae31d86c841107930cf471fd60d65b5c163f16 | [
"Apache-2.0"
] | null | null | null | tests/components/geocaching/test_config_flow.py | eyager1/core | c0ae31d86c841107930cf471fd60d65b5c163f16 | [
"Apache-2.0"
] | 17 | 2021-11-24T06:24:25.000Z | 2022-03-31T06:23:29.000Z | tests/components/geocaching/test_config_flow.py | eyager1/core | c0ae31d86c841107930cf471fd60d65b5c163f16 | [
"Apache-2.0"
] | null | null | null | """Test the Geocaching config flow."""
from collections.abc import Awaitable, Callable
from http import HTTPStatus
from unittest.mock import MagicMock
from aiohttp.test_utils import TestClient
from homeassistant.components.geocaching.const import (
DOMAIN,
ENVIRONMENT,
ENVIRONMENT_URLS,
)
from homeassistant.config_entries import (
DEFAULT_DISCOVERY_UNIQUE_ID,
SOURCE_INTEGRATION_DISCOVERY,
SOURCE_REAUTH,
SOURCE_USER,
)
from homeassistant.const import CONF_CLIENT_ID, CONF_CLIENT_SECRET
from homeassistant.core import HomeAssistant
from homeassistant.data_entry_flow import RESULT_TYPE_ABORT, RESULT_TYPE_EXTERNAL_STEP
from homeassistant.helpers import config_entry_oauth2_flow
from homeassistant.setup import async_setup_component
from . import CLIENT_ID, CLIENT_SECRET, REDIRECT_URI
from tests.common import MockConfigEntry
from tests.test_util.aiohttp import AiohttpClientMocker
CURRENT_ENVIRONMENT_URLS = ENVIRONMENT_URLS[ENVIRONMENT]
async def setup_geocaching_component(hass: HomeAssistant) -> bool:
"""Set up the Geocaching component."""
return await async_setup_component(
hass,
DOMAIN,
{
DOMAIN: {
CONF_CLIENT_ID: CLIENT_ID,
CONF_CLIENT_SECRET: CLIENT_SECRET,
},
},
)
async def test_full_flow(
hass: HomeAssistant,
hass_client_no_auth: Callable[[], Awaitable[TestClient]],
aioclient_mock: AiohttpClientMocker,
current_request_with_host: None,
mock_geocaching_config_flow: MagicMock,
mock_setup_entry: MagicMock,
) -> None:
"""Check full flow."""
assert await setup_geocaching_component(hass)
# Ensure integration is discovered when manual implementation is configured
flows = hass.config_entries.flow.async_progress()
assert len(flows) == 1
assert "context" in flows[0]
assert flows[0]["context"]["source"] == SOURCE_INTEGRATION_DISCOVERY
assert flows[0]["context"]["unique_id"] == DEFAULT_DISCOVERY_UNIQUE_ID
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": SOURCE_USER}
)
assert "flow_id" in result
# pylint: disable=protected-access
state = config_entry_oauth2_flow._encode_jwt(
hass,
{
"flow_id": result["flow_id"],
"redirect_uri": REDIRECT_URI,
},
)
assert result.get("type") == RESULT_TYPE_EXTERNAL_STEP
assert result.get("step_id") == "auth"
assert result.get("url") == (
f"{CURRENT_ENVIRONMENT_URLS['authorize_url']}?response_type=code&client_id={CLIENT_ID}"
f"&redirect_uri={REDIRECT_URI}"
f"&state={state}&scope=*"
)
client = await hass_client_no_auth()
resp = await client.get(f"/auth/external/callback?code=abcd&state={state}")
assert resp.status == HTTPStatus.OK
assert resp.headers["content-type"] == "text/html; charset=utf-8"
aioclient_mock.post(
CURRENT_ENVIRONMENT_URLS["token_url"],
json={
"access_token": "mock-access-token",
"token_type": "bearer",
"expires_in": 3599,
"refresh_token": "mock-refresh_token",
},
)
await hass.config_entries.flow.async_configure(result["flow_id"])
assert len(hass.config_entries.async_entries(DOMAIN)) == 1
assert len(mock_setup_entry.mock_calls) == 1
async def test_existing_entry(
hass: HomeAssistant,
hass_client_no_auth: Callable[[], Awaitable[TestClient]],
aioclient_mock: AiohttpClientMocker,
current_request_with_host: None,
mock_geocaching_config_flow: MagicMock,
mock_setup_entry: MagicMock,
mock_config_entry: MockConfigEntry,
) -> None:
"""Check existing entry."""
assert await setup_geocaching_component(hass)
mock_config_entry.add_to_hass(hass)
assert len(hass.config_entries.async_entries(DOMAIN)) == 1
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": SOURCE_USER}
)
assert "flow_id" in result
# pylint: disable=protected-access
state = config_entry_oauth2_flow._encode_jwt(
hass,
{
"flow_id": result["flow_id"],
"redirect_uri": REDIRECT_URI,
},
)
client = await hass_client_no_auth()
resp = await client.get(f"/auth/external/callback?code=abcd&state={state}")
assert resp.status == HTTPStatus.OK
assert resp.headers["content-type"] == "text/html; charset=utf-8"
aioclient_mock.post(
CURRENT_ENVIRONMENT_URLS["token_url"],
json={
"access_token": "mock-access-token",
"token_type": "bearer",
"expires_in": 3599,
"refresh_token": "mock-refresh_token",
},
)
await hass.config_entries.flow.async_configure(result["flow_id"])
assert len(hass.config_entries.async_entries(DOMAIN)) == 1
async def test_oauth_error(
hass: HomeAssistant,
hass_client_no_auth: Callable[[], Awaitable[TestClient]],
aioclient_mock: AiohttpClientMocker,
current_request_with_host: None,
mock_geocaching_config_flow: MagicMock,
mock_setup_entry: MagicMock,
) -> None:
"""Check if aborted when oauth error occurs."""
assert await setup_geocaching_component(hass)
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": SOURCE_USER}
)
assert "flow_id" in result
# pylint: disable=protected-access
state = config_entry_oauth2_flow._encode_jwt(
hass,
{
"flow_id": result["flow_id"],
"redirect_uri": REDIRECT_URI,
},
)
assert result.get("type") == RESULT_TYPE_EXTERNAL_STEP
client = await hass_client_no_auth()
resp = await client.get(f"/auth/external/callback?code=abcd&state={state}")
assert resp.status == HTTPStatus.OK
# No user information is returned from API
mock_geocaching_config_flow.update.return_value.user = None
aioclient_mock.post(
CURRENT_ENVIRONMENT_URLS["token_url"],
json={
"access_token": "mock-access-token",
"token_type": "bearer",
"expires_in": 3599,
"refresh_token": "mock-refresh_token",
},
)
result2 = await hass.config_entries.flow.async_configure(result["flow_id"])
assert result2.get("type") == RESULT_TYPE_ABORT
assert result2.get("reason") == "oauth_error"
assert len(hass.config_entries.async_entries(DOMAIN)) == 0
assert len(mock_setup_entry.mock_calls) == 0
async def test_reauthentication(
hass: HomeAssistant,
hass_client_no_auth: Callable[[], Awaitable[TestClient]],
aioclient_mock: AiohttpClientMocker,
current_request_with_host: None,
mock_geocaching_config_flow: MagicMock,
mock_setup_entry: MagicMock,
mock_config_entry: MockConfigEntry,
) -> None:
"""Test Geocaching reauthentication."""
mock_config_entry.add_to_hass(hass)
assert await setup_geocaching_component(hass)
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": SOURCE_REAUTH}
)
flows = hass.config_entries.flow.async_progress()
assert len(flows) == 1
assert "flow_id" in flows[0]
result = await hass.config_entries.flow.async_configure(flows[0]["flow_id"], {})
assert "flow_id" in result
# pylint: disable=protected-access
state = config_entry_oauth2_flow._encode_jwt(
hass,
{
"flow_id": result["flow_id"],
"redirect_uri": "https://example.com/auth/external/callback",
},
)
client = await hass_client_no_auth()
resp = await client.get(f"/auth/external/callback?code=abcd&state={state}")
assert resp.status == HTTPStatus.OK
assert resp.headers["content-type"] == "text/html; charset=utf-8"
aioclient_mock.post(
CURRENT_ENVIRONMENT_URLS["token_url"],
json={
"access_token": "mock-access-token",
"token_type": "bearer",
"expires_in": 3599,
"refresh_token": "mock-refresh_token",
},
)
await hass.config_entries.flow.async_configure(result["flow_id"])
assert len(hass.config_entries.async_entries(DOMAIN)) == 1
assert len(mock_setup_entry.mock_calls) == 1
| 32.167315 | 95 | 0.683319 |
8ca9915341e3360d3ed7a2f74eb8a033bedc45fb | 2,938 | py | Python | plato_based/HandcraftedPolicy.py | dertilo/dialogue-systems | 3530bdfae2a75360e3b09841cfff773647027d6e | [
"MIT"
] | null | null | null | plato_based/HandcraftedPolicy.py | dertilo/dialogue-systems | 3530bdfae2a75360e3b09841cfff773647027d6e | [
"MIT"
] | null | null | null | plato_based/HandcraftedPolicy.py | dertilo/dialogue-systems | 3530bdfae2a75360e3b09841cfff773647027d6e | [
"MIT"
] | null | null | null | import Ontology
from State import SlotFillingDialogueState
from DialoguePolicy import DialoguePolicy
from dialog_action_classes import DialogueAct, DialogueActItem, Operator
from copy import deepcopy
import random
"""
HandcraftedPolicy is a rule-based system policy, developed as a baseline and as
a quick way to perform sanity checks and debug a Conversational Agent.
It will try to fill unfilled slots, then suggest an item, and answer any
requests from the user.
"""
def get_value(item_in_focus, requested_slot):
if requested_slot in item_in_focus and item_in_focus[requested_slot]:
value = item_in_focus[requested_slot]
else:
value = "not available"
return value
def build_inform_act(dialogue_state: SlotFillingDialogueState):
requested_slot = dialogue_state.requested_slot
# Reset request as we attempt to address it
dialogue_state.requested_slot = ""
value = get_value(dialogue_state.item_in_focus, requested_slot)
dact = [
DialogueAct("inform", [DialogueActItem(requested_slot, Operator.EQ, value)])
]
return dact
def make_request(unfilled_slots):
slot = random.choice(unfilled_slots)
dacts = [DialogueAct("request", [DialogueActItem(slot, Operator.EQ, "")])]
return dacts
def make_offer(ds):
name = ds.item_in_focus["name"] if "name" in ds.item_in_focus else "unknown"
dacts = [DialogueAct("offer", [DialogueActItem("name", Operator.EQ, name)])]
inform_acts = [
build_inform(slot, ds)
for slot in ds.slots_filled
if slot != "requested" and ds.slots_filled[slot] and slot not in ["id", "name"]
]
dacts += inform_acts
return dacts
def build_inform(slot, ds: SlotFillingDialogueState):
if slot in ds.item_in_focus:
value = ds.item_in_focus[slot]
else:
value = "no info"
return DialogueAct("inform", [DialogueActItem(slot, Operator.EQ, value)])
class HandcraftedPolicy:
def __init__(self, ontology: Ontology.Ontology):
super(HandcraftedPolicy, self).__init__()
self.ontology = ontology
def next_action(self, ds: SlotFillingDialogueState):
if ds.is_terminal_state:
dacts = [DialogueAct("bye", [DialogueActItem("", Operator.EQ, "")])]
elif ds.requested_slot != "" and ds.item_in_focus and ds.system_made_offer:
dacts = build_inform_act(ds)
else:
dacts = self.request_slots_or_make_offer(ds)
return dacts
def request_slots_or_make_offer(self, ds: SlotFillingDialogueState):
unfilled_slots = [
s
for s in self.ontology.ontology["system_requestable"]
if ds.slots_filled[s] is None
]
if len(unfilled_slots) > 0:
dacts = make_request(unfilled_slots)
elif ds.item_in_focus:
dacts = make_offer(ds)
else:
dacts = [DialogueAct("canthelp", [])]
return dacts
| 32.285714 | 87 | 0.686862 |
3fd4c1349722ebd6973ab7aee0139befd73c5ec1 | 1,883 | py | Python | otenki-bot.py | koba0819/Discord-OtenkiBOT | 50b04de21e898f9cb18765af6273ee35a2cee2e7 | [
"MIT"
] | null | null | null | otenki-bot.py | koba0819/Discord-OtenkiBOT | 50b04de21e898f9cb18765af6273ee35a2cee2e7 | [
"MIT"
] | null | null | null | otenki-bot.py | koba0819/Discord-OtenkiBOT | 50b04de21e898f9cb18765af6273ee35a2cee2e7 | [
"MIT"
] | null | null | null | import discord
import requests
import json
import settings
json_file = open('citycode.json', encoding='utf-8' )
citycode = json.load(json_file)
token = settings.key
client = discord.Client()
@client.event
async def on_ready():
print('-------------')
print('logged in')
print('BOT Name:', client.user.name)
print('BOT ID:', client.user.id)
print('-------------')
@client.event
async def on_message(message):
if message.author.bot: #BOTのメッセージを無視
return
if citycode[message.content] != message.content: #メッセージがjsonに記述されたものと一致
code = citycode[message.content]["code"]
url = 'http://weather.livedoor.com/forecast/webservice/json/v1?city=%s'%code
d = requests.get(url).json()
for i in range(3):
max_temp = d['forecasts'][i]['temperature']['max']
min_temp = d['forecasts'][i]['temperature']['min']
if max_temp is not None and min_temp is not None:
embed = discord.Embed(title=d['forecasts'][i]['dateLabel'] + ", " + d['forecasts'][i]['date'] + ' ' + d['location']['city'] + "の天気")
embed.add_field(name="天候", value=d['forecasts'][i]['telop'])
embed.add_field(name="気温", value="最高" + max_temp['celsius'] + "度 最低" + min_temp['celsius'] + "度")
embed.set_thumbnail(url=d['forecasts'][i]['image']['url'])
await message.channel.send(embed=embed)
else:
embed = discord.Embed(title=d['forecasts'][i]['dateLabel'] + ", " + d['forecasts'][i]['date'] + ' ' + d['location']['city'] + "の天気")
embed.add_field(name="天候", value=d['forecasts'][i]['telop'])
embed.add_field(name="気温", value="気温は不明です")
embed.set_thumbnail(url=d['forecasts'][i]['image']['url'])
await message.channel.send(embed=embed)
client.run(token) | 36.921569 | 149 | 0.576739 |
18f6062e69f2adfe9a6fc67f5dffb7f360cfdccf | 17,123 | py | Python | sncosmo/utils.py | jasminelujia/sncosmo | 6ca3be6a52f7a096b874e181c21b93f711610f12 | [
"BSD-3-Clause"
] | 1 | 2019-03-27T09:46:46.000Z | 2019-03-27T09:46:46.000Z | sncosmo/utils.py | jasminelujia/sncosmo | 6ca3be6a52f7a096b874e181c21b93f711610f12 | [
"BSD-3-Clause"
] | null | null | null | sncosmo/utils.py | jasminelujia/sncosmo | 6ca3be6a52f7a096b874e181c21b93f711610f12 | [
"BSD-3-Clause"
] | null | null | null | from __future__ import absolute_import, division
from collections import OrderedDict
import os
import sys
import math
import warnings
import socket
import codecs
import numpy as np
from scipy import integrate, optimize
from astropy.extern import six
def dict_to_array(d):
"""Convert a dictionary of lists (or single values) to a structured
numpy.ndarray."""
# Convert all lists/values to 1-d arrays, in order to let numpy
# figure out the necessary size of the string arrays.
new_d = OrderedDict()
for key in d:
new_d[key] = np.atleast_1d(d[key])
# Determine dtype of output array.
dtype = [(key, arr.dtype)
for key, arr in six.iteritems(new_d)]
# Initialize ndarray and then fill it.
col_len = max([len(v) for v in new_d.values()])
result = np.empty(col_len, dtype=dtype)
for key in new_d:
result[key] = new_d[key]
return result
def format_value(value, error=None, latex=False):
"""Return a string representing value and uncertainty.
If latex=True, use '\\pm' and '\\times'.
"""
if latex:
pm = '\\pm'
suffix_templ = ' \\times 10^{{{0:d}}}'
else:
pm = '+/-'
suffix_templ = ' x 10^{0:d}'
# First significant digit
absval = abs(value)
if absval == 0.:
first = 0
else:
first = int(math.floor(math.log10(absval)))
if error is None or error == 0.:
last = first - 6 # Pretend there are 7 significant figures.
else:
last = int(math.floor(math.log10(error))) # last significant digit
# use exponential notation if
# value > 1000 and error > 1000 or value < 0.01
if (first > 2 and last > 2) or first < -2:
value /= 10**first
if error is not None:
error /= 10**first
p = max(0, first - last + 1)
suffix = suffix_templ.format(first)
else:
p = max(0, -last + 1)
suffix = ''
if error is None:
prefix = ('{0:.' + str(p) + 'f}').format(value)
else:
prefix = (('{0:.' + str(p) + 'f} {1:s} {2:.' + str(p) + 'f}')
.format(value, pm, error))
if suffix != '':
prefix = '({0})'.format(prefix)
return prefix + suffix
class Result(dict):
"""Represents an optimization result.
Notes
-----
This is a cut and paste from scipy, normally imported with `from
scipy.optimize import Result`. However, it isn't available in
scipy 0.9 (or possibly 0.10), so it is included here.
Since this class is essentially a subclass of dict with attribute
accessors, one can see which attributes are available using the
`keys()` method.
Deprecated attributes can be added via, e.g.:
>>> res = Result(a=1, b=2)
>>> res.__dict__['deprecated']['c'] = (2, "Use b instead")
"""
# only necessary for deprecation functionality
def __init__(self, *args, **kwargs):
self.__dict__['deprecated'] = {}
dict.__init__(self, *args, **kwargs)
# only necessary for deprecation functionality
def __getitem__(self, name):
try:
return dict.__getitem__(self, name)
except KeyError:
val, msg = self.__dict__['deprecated'][name]
warnings.warn(msg)
return val
def __getattr__(self, name):
try:
return self[name]
except KeyError:
raise AttributeError(name)
__setattr__ = dict.__setitem__
__delattr__ = dict.__delitem__
def __repr__(self):
if self.keys():
m = max(map(len, list(self.keys()))) + 1
return '\n'.join([k.rjust(m) + ': ' + repr(v)
for k, v in self.items()])
else:
return self.__class__.__name__ + "()"
def _integral_diff(x, pdf, a, q):
"""Return difference between q and the integral of the function `pdf`
between a and x. This is used for solving for the ppf."""
return integrate.quad(pdf, a, x)[0] - q
def ppf(pdf, x, a, b):
"""Percent-point function (inverse cdf), given the probability
distribution function pdf and limits a, b.
Parameters
----------
pdf : callable
Probability distribution function
x : array_like
Points at which to evaluate the ppf
a, b : float
Limits (can be -np.inf, np.inf, assuming pdf has finite integral).
"""
FACTOR = 10.
if not b > a:
raise ValueError('b must be greater than a')
# integral of pdf between a and b
tot = integrate.quad(pdf, a, b)[0]
# initialize result array
x = np.asarray(x)
shape = x.shape
x = np.ravel(x)
result = np.zeros(len(x))
for i in range(len(x)):
cumsum = x[i] * tot # target cumulative sum
left = a
right = b
# Need finite limits for the solver.
# For inifinite upper or lower limits, find finite limits such that
# cdf(left) < cumsum < cdf(right).
if left == -np.inf:
left = -FACTOR
while integrate.quad(pdf, a, left)[0] > cumsum:
right = left
left *= FACTOR
if right == np.inf:
right = FACTOR
while integrate.quad(pdf, a, right)[0] < cumsum:
left = right
right *= FACTOR
result[i] = optimize.brentq(_integral_diff, left, right,
args=(pdf, a, cumsum))
return result.reshape(shape)
class Interp1D(object):
def __init__(self, xmin, xmax, y):
self._xmin = xmin
self._xmax = xmax
self._n = len(y)
self._xstep = (xmax - xmin) / (self._n - 1)
self._y = y
def __call__(self, x):
"""works only in range [xmin, xmax)"""
nsteps = (x - self._xmin) / self._xstep
i = int(nsteps)
w = nsteps - i
return (1.-w) * self._y[i] + w * self._y[i+1]
def _download_file(remote_url, target):
"""
Accepts a URL, downloads the file to a given open file object.
This is a modified version of astropy.utils.data.download_file that
downloads to an open file object instead of a cache directory.
"""
from contextlib import closing
from astropy.extern.six.moves.urllib.request import urlopen, Request
from astropy.extern.six.moves.urllib.error import URLError, HTTPError
from astropy.utils.console import ProgressBarOrSpinner
from . import conf
timeout = conf.remote_timeout
download_block_size = 32768
try:
# Pretend to be a web browser (IE 6.0). Some servers that we download
# from forbid access from programs.
headers = {'User-Agent': 'Mozilla/5.0',
'Accept': ('text/html,application/xhtml+xml,'
'application/xml;q=0.9,*/*;q=0.8')}
req = Request(remote_url, headers=headers)
with closing(urlopen(req, timeout=timeout)) as remote:
# get size of remote if available (for use in progress bar)
info = remote.info()
size = None
if 'Content-Length' in info:
try:
size = int(info['Content-Length'])
except ValueError:
pass
dlmsg = "Downloading {0}".format(remote_url)
with ProgressBarOrSpinner(size, dlmsg) as p:
bytes_read = 0
block = remote.read(download_block_size)
while block:
target.write(block)
bytes_read += len(block)
p.update(bytes_read)
block = remote.read(download_block_size)
# Append a more informative error message to HTTPErrors, URLErrors.
except HTTPError as e:
e.msg = "{}. requested URL: {!r}".format(e.msg, remote_url)
raise
except URLError as e:
append_msg = (hasattr(e, 'reason') and hasattr(e.reason, 'errno') and
e.reason.errno == 8)
if append_msg:
msg = "{0}. requested URL: {1}".format(e.reason.strerror,
remote_url)
e.reason.strerror = msg
e.reason.args = (e.reason.errno, msg)
raise e
# This isn't supposed to happen, but occasionally a socket.timeout gets
# through. It's supposed to be caught in `urrlib2` and raised in this
# way, but for some reason in mysterious circumstances it doesn't. So
# we'll just re-raise it here instead.
except socket.timeout as e:
# add the requested URL to the message (normally just 'timed out')
e.args = ('requested URL {!r} timed out'.format(remote_url),)
raise URLError(e)
def download_file(remote_url, local_name):
"""
Download a remote file to local path, unzipping if the URL ends in '.gz'.
Parameters
----------
remote_url : str
The URL of the file to download
local_name : str
Absolute path filename of target file.
Raises
------
URLError (from urllib2 on PY2, urllib.request on PY3)
Whenever there's a problem getting the remote file.
"""
from astropy.extern.six.moves.urllib.error import HTTPError, URLError
# ensure target directory exists
dn = os.path.dirname(local_name)
if not os.path.exists(dn):
os.makedirs(dn)
if remote_url.endswith(".gz"):
import io
import gzip
buf = io.BytesIO()
_download_file(remote_url, buf)
buf.seek(0)
f = gzip.GzipFile(fileobj=buf, mode='rb')
with open(local_name, 'wb') as target:
target.write(f.read())
f.close()
else:
try:
with open(local_name, 'wb') as target:
_download_file(remote_url, target)
except: # noqa
# in case of error downloading, remove file.
if os.path.exists(local_name):
os.remove(local_name)
raise
def download_dir(remote_url, dirname):
"""
Download a remote tar file to a local directory.
Parameters
----------
remote_url : str
The URL of the file to download
dirname : str
Directory in which to place contents of tarfile. Created if it
doesn't exist.
Raises
------
URLError (from urllib2 on PY2, urllib.request on PY3)
Whenever there's a problem getting the remote file.
"""
import io
import tarfile
if not os.path.exists(dirname):
os.makedirs(dirname)
mode = 'r:gz' if remote_url.endswith(".gz") else None
# download file to buffer
buf = io.BytesIO()
_download_file(remote_url, buf)
buf.seek(0)
# create a tarfile with the buffer and extract
tf = tarfile.open(fileobj=buf, mode=mode)
tf.extractall(path=dirname)
tf.close()
buf.close() # buf not closed when tf is closed.
class DataMirror(object):
"""Lazy fetcher for remote data.
When asked for local absolute path to a file or directory, DataMirror
checks if the file or directory exists locally and, if so, returns it.
If it doesn't exist, it first determines where to get it from.
It first downloads the file ``{remote_root}/redirects.json`` and checks
it for a redirect from ``{relative_path}`` to a full URL. If no redirect
exists, it uses ``{remote_root}/{relative_path}`` as the URL.
It downloads then downloads the URL to ``{rootdir}/{relative_path}``.
For directories, ``.tar.gz`` is appended to the
``{relative_path}`` before the above is done and then the
directory is unpacked locally.
Parameters
----------
rootdir : str or callable
The local root directory, or a callable that returns the local root
directory given no parameters. (The result of the call is cached.)
Using a callable allows one to customize the discovery of the root
directory (e.g., from a config file), and to defer that discovery
until it is needed.
remote_root : str
Root URL of the remote server.
"""
def __init__(self, rootdir, remote_root):
if not remote_root.endswith('/'):
remote_root = remote_root + '/'
self._checked_rootdir = None
self._rootdir = rootdir
self._remote_root = remote_root
self._redirects = None
def rootdir(self):
"""Return the path to the local data directory, ensuring that it
exists"""
if self._checked_rootdir is None:
# If the supplied value is a string, use it. Otherwise
# assume it is a callable that returns a string)
rootdir = (self._rootdir
if isinstance(self._rootdir, six.string_types)
else self._rootdir())
# Check existance
if not os.path.isdir(rootdir):
raise Exception("data directory {!r} not an existing "
"directory".format(rootdir))
# Cache value for future calls
self._checked_rootdir = rootdir
return self._checked_rootdir
def _fetch_redirects(self):
from astropy.extern.six.moves.urllib.request import urlopen
import json
f = urlopen(self._remote_root + "redirects.json")
reader = codecs.getreader("utf-8")
self._redirects = json.load(reader(f))
f.close()
def _get_url(self, remote_relpath):
if self._redirects is None:
self._fetch_redirects()
if remote_relpath in self._redirects:
return self._redirects[remote_relpath]
else:
return self._remote_root + remote_relpath
def abspath(self, relpath, isdir=False):
"""Return absolute path to file or directory, ensuring that it exists.
If ``isdir``, look for ``{relpath}.tar.gz`` on the remote server and
unpackage it.
Otherwise, just look for ``{relpath}``. If redirect points to a gz, it
will be uncompressed."""
abspath = os.path.join(self.rootdir(), relpath)
if not os.path.exists(abspath):
if isdir:
url = self._get_url(relpath + ".tar.gz")
# Download and unpack a directory.
download_dir(url, os.path.dirname(abspath))
# ensure that tarfile unpacked into the expected directory
if not os.path.exists(abspath):
raise RuntimeError("Tarfile not unpacked into expected "
"subdirectory. Please file an issue.")
else:
url = self._get_url(relpath)
download_file(url, abspath)
return abspath
def alias_map(aliased, aliases, required=()):
"""For each key in ``aliases``, find the item in ``aliased`` matching
exactly one of the corresponding items in ``aliases``.
Parameters
----------
aliased : list of str
Input keys, will be values in output map.
aliases : dict of sets
Dictionary where keys are "canonical name" and values are sets of
possible aliases.
required : list_like
Keys in ``aliases`` that are considered required. An error is raised
if no alias is found in ``aliased``.
Returns
-------
Example::
>>> aliases = {'a':set(['a', 'a_']), 'b':set(['b', 'b_'])}
>>> alias_map(['A', 'B_', 'foo'], aliases)
{'a': 'A', 'b': 'B_'}
"""
lowered_to_orig = {key.lower(): key for key in aliased}
lowered = set(lowered_to_orig.keys())
mapping = {}
for key, key_aliases in aliases.items():
common = lowered & key_aliases
if len(common) == 1:
mapping[key] = lowered_to_orig[common.pop()]
elif len(common) == 0 and key in required:
raise ValueError('no alias found for {!r} (possible '
'case-independent aliases: {})'.format(
key,
', '.join(repr(ka) for ka in key_aliases)))
elif len(common) > 1:
raise ValueError('multiple aliases found for {!r}: {}'
.format(key, ', '.join(repr(a) for a in common)))
return mapping
def integration_grid(low, high, target_spacing):
"""Divide the range between `start` and `stop` into uniform bins
with spacing less than or equal to `target_spacing` and return the
bin midpoints and the actual spacing."""
range_diff = high - low
spacing = range_diff / int(math.ceil(range_diff / target_spacing))
grid = np.arange(low + 0.5 * spacing, high, spacing)
return grid, spacing
warned = [] # global used in warn_once
def warn_once(name, depver, rmver, extra=None):
global warned
if name not in warned:
msg = ("{} is deprecated in sncosmo {} "
"and will be removed in sncosmo {}".format(name, depver, rmver))
if extra is not None:
msg += " " + extra
warnings.warn(msg, stacklevel=2)
warned.append(name)
| 31.076225 | 79 | 0.585937 |
c2547657a381a6a77fcb061bd427540862400fb6 | 25,042 | py | Python | allennlp/state_machines/transition_functions/basic_transition_function.py | tony-tong-punchh/allennlp | 9a13ab570025a0c1659986009d2abddb2e652020 | [
"Apache-2.0"
] | 24 | 2019-09-16T00:10:54.000Z | 2021-09-08T19:31:51.000Z | allennlp/state_machines/transition_functions/basic_transition_function.py | TalSchuster/allennlp-MultiLang | dbb28b939652491d2f633326edccca2cd0e528c8 | [
"Apache-2.0"
] | 3 | 2020-03-24T16:45:36.000Z | 2021-02-02T21:57:00.000Z | allennlp/state_machines/transition_functions/basic_transition_function.py | TalSchuster/allennlp-MultiLang | dbb28b939652491d2f633326edccca2cd0e528c8 | [
"Apache-2.0"
] | 10 | 2019-12-06T11:32:37.000Z | 2022-01-06T15:39:09.000Z | from collections import defaultdict
from typing import Any, Dict, List, Set, Tuple
from overrides import overrides
import torch
from torch.nn.modules.rnn import LSTM, LSTMCell
from torch.nn.modules.linear import Linear
from allennlp.modules import Attention
from allennlp.nn import util, Activation
from allennlp.state_machines.states import RnnStatelet, GrammarBasedState
from allennlp.state_machines.transition_functions.transition_function import TransitionFunction
class BasicTransitionFunction(TransitionFunction[GrammarBasedState]):
"""
This is a typical transition function for a state-based decoder. We use an LSTM to track
decoder state, and at every timestep we compute an attention over the input question/utterance
to help in selecting the action. All actions have an embedding, and we use a dot product
between a predicted action embedding and the allowed actions to compute a distribution over
actions at each timestep.
We allow the first action to be predicted separately from everything else. This is optional,
and is because that's how the original WikiTableQuestions semantic parser was written. The
intuition is that maybe you want to predict the type of your output program outside of the
typical LSTM decoder (or maybe Jayant just didn't realize this could be treated as another
action...).
Parameters
----------
encoder_output_dim : ``int``
action_embedding_dim : ``int``
input_attention : ``Attention``
activation : ``Activation``, optional (default=relu)
The activation that gets applied to the decoder LSTM input and to the action query.
predict_start_type_separately : ``bool``, optional (default=True)
If ``True``, we will predict the initial action (which is typically the base type of the
logical form) using a different mechanism than our typical action decoder. We basically
just do a projection of the hidden state, and don't update the decoder RNN.
num_start_types : ``int``, optional (default=None)
If ``predict_start_type_separately`` is ``True``, this is the number of start types that
are in the grammar. We need this so we can construct parameters with the right shape.
This is unused if ``predict_start_type_separately`` is ``False``.
add_action_bias : ``bool``, optional (default=True)
If ``True``, there has been a bias dimension added to the embedding of each action, which
gets used when predicting the next action. We add a dimension of ones to our predicted
action vector in this case to account for that.
dropout : ``float`` (optional, default=0.0)
num_layers: ``int``, (optional, default=1)
The number of layers in the decoder LSTM.
"""
def __init__(self,
encoder_output_dim: int,
action_embedding_dim: int,
input_attention: Attention,
activation: Activation = Activation.by_name('relu')(),
predict_start_type_separately: bool = True,
num_start_types: int = None,
add_action_bias: bool = True,
dropout: float = 0.0,
num_layers: int = 1) -> None:
super().__init__()
self._input_attention = input_attention
self._add_action_bias = add_action_bias
self._activation = activation
self._num_layers = num_layers
self._predict_start_type_separately = predict_start_type_separately
if predict_start_type_separately:
self._start_type_predictor = Linear(encoder_output_dim, num_start_types)
self._num_start_types = num_start_types
else:
self._start_type_predictor = None
self._num_start_types = None
# Decoder output dim needs to be the same as the encoder output dim since we initialize the
# hidden state of the decoder with the final hidden state of the encoder.
output_dim = encoder_output_dim
input_dim = output_dim
# Our decoder input will be the concatenation of the decoder hidden state and the previous
# action embedding, and we'll project that down to the decoder's `input_dim`, which we
# arbitrarily set to be the same as `output_dim`.
self._input_projection_layer = Linear(output_dim + action_embedding_dim, input_dim)
# Before making a prediction, we'll compute an attention over the input given our updated
# hidden state. Then we concatenate those with the decoder state and project to
# `action_embedding_dim` to make a prediction.
self._output_projection_layer = Linear(output_dim + encoder_output_dim, action_embedding_dim)
if self._num_layers > 1:
self._decoder_cell = LSTM(input_dim, output_dim, self._num_layers)
else:
# We use a ``LSTMCell`` if we just have one layer because it is slightly faster since we are
# just running the LSTM for one step each time.
self._decoder_cell = LSTMCell(input_dim, output_dim)
if dropout > 0:
self._dropout = torch.nn.Dropout(p=dropout)
else:
self._dropout = lambda x: x
@overrides
def take_step(self,
state: GrammarBasedState,
max_actions: int = None,
allowed_actions: List[Set[int]] = None) -> List[GrammarBasedState]:
if self._predict_start_type_separately and not state.action_history[0]:
# The wikitables parser did something different when predicting the start type, which
# is our first action. So in this case we break out into a different function. We'll
# ignore max_actions on our first step, assuming there aren't that many start types.
return self._take_first_step(state, allowed_actions)
# Taking a step in the decoder consists of three main parts. First, we'll construct the
# input to the decoder and update the decoder's hidden state. Second, we'll use this new
# hidden state (and maybe other information) to predict an action. Finally, we will
# construct new states for the next step. Each new state corresponds to one valid action
# that can be taken from the current state, and they are ordered by their probability of
# being selected.
updated_state = self._update_decoder_state(state)
batch_results = self._compute_action_probabilities(state,
updated_state['hidden_state'],
updated_state['attention_weights'],
updated_state['predicted_action_embeddings'])
new_states = self._construct_next_states(state,
updated_state,
batch_results,
max_actions,
allowed_actions)
return new_states
def _update_decoder_state(self, state: GrammarBasedState) -> Dict[str, torch.Tensor]:
# For updating the decoder, we're doing a bunch of tensor operations that can be batched
# without much difficulty. So, we take all group elements and batch their tensors together
# before doing these decoder operations.
group_size = len(state.batch_indices)
attended_question = torch.stack([rnn_state.attended_input for rnn_state in state.rnn_state])
if self._num_layers > 1:
hidden_state = torch.stack([rnn_state.hidden_state for rnn_state in state.rnn_state], 1)
memory_cell = torch.stack([rnn_state.memory_cell for rnn_state in state.rnn_state], 1)
else:
hidden_state = torch.stack([rnn_state.hidden_state for rnn_state in state.rnn_state])
memory_cell = torch.stack([rnn_state.memory_cell for rnn_state in state.rnn_state])
previous_action_embedding = torch.stack([rnn_state.previous_action_embedding
for rnn_state in state.rnn_state])
# (group_size, decoder_input_dim)
projected_input = self._input_projection_layer(torch.cat([attended_question,
previous_action_embedding], -1))
decoder_input = self._activation(projected_input)
if self._num_layers > 1:
_, (hidden_state, memory_cell) = self._decoder_cell(decoder_input.unsqueeze(0),
(hidden_state, memory_cell))
else:
hidden_state, memory_cell = self._decoder_cell(decoder_input, (hidden_state, memory_cell))
hidden_state = self._dropout(hidden_state)
# (group_size, encoder_output_dim)
encoder_outputs = torch.stack([state.rnn_state[0].encoder_outputs[i] for i in state.batch_indices])
encoder_output_mask = torch.stack([state.rnn_state[0].encoder_output_mask[i] for i in state.batch_indices])
if self._num_layers > 1:
attended_question, attention_weights = self.attend_on_question(hidden_state[-1],
encoder_outputs,
encoder_output_mask)
action_query = torch.cat([hidden_state[-1], attended_question], dim=-1)
else:
attended_question, attention_weights = self.attend_on_question(hidden_state,
encoder_outputs,
encoder_output_mask)
action_query = torch.cat([hidden_state, attended_question], dim=-1)
# (group_size, action_embedding_dim)
projected_query = self._activation(self._output_projection_layer(action_query))
predicted_action_embeddings = self._dropout(projected_query)
if self._add_action_bias:
# NOTE: It's important that this happens right before the dot product with the action
# embeddings. Otherwise this isn't a proper bias. We do it here instead of right next
# to the `.mm` below just so we only do it once for the whole group.
ones = predicted_action_embeddings.new([[1] for _ in range(group_size)])
predicted_action_embeddings = torch.cat([predicted_action_embeddings, ones], dim=-1)
return {
'hidden_state': hidden_state,
'memory_cell': memory_cell,
'attended_question': attended_question,
'attention_weights': attention_weights,
'predicted_action_embeddings': predicted_action_embeddings,
}
def _compute_action_probabilities(self,
state: GrammarBasedState,
hidden_state: torch.Tensor,
attention_weights: torch.Tensor,
predicted_action_embeddings: torch.Tensor
) -> Dict[int, List[Tuple[int, Any, Any, Any, List[int]]]]:
# We take a couple of extra arguments here because subclasses might use them.
# pylint: disable=unused-argument,no-self-use
# In this section we take our predicted action embedding and compare it to the available
# actions in our current state (which might be different for each group element). For
# computing action scores, we'll forget about doing batched / grouped computation, as it
# adds too much complexity and doesn't speed things up, anyway, with the operations we're
# doing here. This means we don't need any action masks, as we'll only get the right
# lengths for what we're computing.
group_size = len(state.batch_indices)
actions = state.get_valid_actions()
batch_results: Dict[int, List[Tuple[int, Any, Any, Any, List[int]]]] = defaultdict(list)
for group_index in range(group_size):
instance_actions = actions[group_index]
predicted_action_embedding = predicted_action_embeddings[group_index]
action_embeddings, output_action_embeddings, action_ids = instance_actions['global']
# This is just a matrix product between a (num_actions, embedding_dim) matrix and an
# (embedding_dim, 1) matrix.
action_logits = action_embeddings.mm(predicted_action_embedding.unsqueeze(-1)).squeeze(-1)
current_log_probs = torch.nn.functional.log_softmax(action_logits, dim=-1)
# This is now the total score for each state after taking each action. We're going to
# sort by this later, so it's important that this is the total score, not just the
# score for the current action.
log_probs = state.score[group_index] + current_log_probs
batch_results[state.batch_indices[group_index]].append((group_index,
log_probs,
current_log_probs,
output_action_embeddings,
action_ids))
return batch_results
def _construct_next_states(self,
state: GrammarBasedState,
updated_rnn_state: Dict[str, torch.Tensor],
batch_action_probs: Dict[int, List[Tuple[int, Any, Any, Any, List[int]]]],
max_actions: int,
allowed_actions: List[Set[int]]):
# pylint: disable=no-self-use
# We'll yield a bunch of states here that all have a `group_size` of 1, so that the
# learning algorithm can decide how many of these it wants to keep, and it can just regroup
# them later, as that's a really easy operation.
#
# We first define a `make_state` method, as in the logic that follows we want to create
# states in a couple of different branches, and we don't want to duplicate the
# state-creation logic. This method creates a closure using variables from the method, so
# it doesn't make sense to pull it out of here.
# Each group index here might get accessed multiple times, and doing the slicing operation
# each time is more expensive than doing it once upfront. These three lines give about a
# 10% speedup in training time.
group_size = len(state.batch_indices)
chunk_index = 1 if self._num_layers > 1 else 0
hidden_state = [x.squeeze(chunk_index)
for x in updated_rnn_state['hidden_state'].chunk(group_size, chunk_index)]
memory_cell = [x.squeeze(chunk_index)
for x in updated_rnn_state['memory_cell'].chunk(group_size, chunk_index)]
attended_question = [x.squeeze(0) for x in updated_rnn_state['attended_question'].chunk(group_size, 0)]
def make_state(group_index: int,
action: int,
new_score: torch.Tensor,
action_embedding: torch.Tensor) -> GrammarBasedState:
new_rnn_state = RnnStatelet(hidden_state[group_index],
memory_cell[group_index],
action_embedding,
attended_question[group_index],
state.rnn_state[group_index].encoder_outputs,
state.rnn_state[group_index].encoder_output_mask)
batch_index = state.batch_indices[group_index]
for i, _, current_log_probs, _, actions in batch_action_probs[batch_index]:
if i == group_index:
considered_actions = actions
probabilities = current_log_probs.exp().cpu()
break
return state.new_state_from_group_index(group_index,
action,
new_score,
new_rnn_state,
considered_actions,
probabilities,
updated_rnn_state['attention_weights'])
new_states = []
for _, results in batch_action_probs.items():
if allowed_actions and not max_actions:
# If we're given a set of allowed actions, and we're not just keeping the top k of
# them, we don't need to do any sorting, so we can speed things up quite a bit.
for group_index, log_probs, _, action_embeddings, actions in results:
for log_prob, action_embedding, action in zip(log_probs, action_embeddings, actions):
if action in allowed_actions[group_index]:
new_states.append(make_state(group_index, action, log_prob, action_embedding))
else:
# In this case, we need to sort the actions. We'll do that on CPU, as it's easier,
# and our action list is on the CPU, anyway.
group_indices = []
group_log_probs: List[torch.Tensor] = []
group_action_embeddings = []
group_actions = []
for group_index, log_probs, _, action_embeddings, actions in results:
group_indices.extend([group_index] * len(actions))
group_log_probs.append(log_probs)
group_action_embeddings.append(action_embeddings)
group_actions.extend(actions)
log_probs = torch.cat(group_log_probs, dim=0)
action_embeddings = torch.cat(group_action_embeddings, dim=0)
log_probs_cpu = log_probs.data.cpu().numpy().tolist()
batch_states = [(log_probs_cpu[i],
group_indices[i],
log_probs[i],
action_embeddings[i],
group_actions[i])
for i in range(len(group_actions))
if (not allowed_actions or
group_actions[i] in allowed_actions[group_indices[i]])]
# We use a key here to make sure we're not trying to compare anything on the GPU.
batch_states.sort(key=lambda x: x[0], reverse=True)
if max_actions:
batch_states = batch_states[:max_actions]
for _, group_index, log_prob, action_embedding, action in batch_states:
new_states.append(make_state(group_index, action, log_prob, action_embedding))
return new_states
def _take_first_step(self,
state: GrammarBasedState,
allowed_actions: List[Set[int]] = None) -> List[GrammarBasedState]:
# We'll just do a projection from the current hidden state (which was initialized with the
# final encoder output) to the number of start actions that we have, normalize those
# logits, and use that as our score. We end up duplicating some of the logic from
# `_compute_new_states` here, but we do things slightly differently, and it's easier to
# just copy the parts we need than to try to re-use that code.
# (group_size, hidden_dim)
hidden_state = torch.stack([rnn_state.hidden_state for rnn_state in state.rnn_state])
# (group_size, num_start_type)
start_action_logits = self._start_type_predictor(hidden_state)
log_probs = torch.nn.functional.log_softmax(start_action_logits, dim=-1)
sorted_log_probs, sorted_actions = log_probs.sort(dim=-1, descending=True)
sorted_actions = sorted_actions.detach().cpu().numpy().tolist()
if state.debug_info is not None:
probs_cpu = log_probs.exp().detach().cpu().numpy().tolist()
else:
probs_cpu = [None] * len(state.batch_indices)
# state.get_valid_actions() will return a list that is consistently sorted, so as along as
# the set of valid start actions never changes, we can just match up the log prob indices
# above with the position of each considered action, and we're good.
valid_actions = state.get_valid_actions()
considered_actions = [actions['global'][2] for actions in valid_actions]
if len(considered_actions[0]) != self._num_start_types:
raise RuntimeError("Calculated wrong number of initial actions. Expected "
f"{self._num_start_types}, found {len(considered_actions[0])}.")
best_next_states: Dict[int, List[Tuple[int, int, int]]] = defaultdict(list)
for group_index, (batch_index, group_actions) in enumerate(zip(state.batch_indices, sorted_actions)):
for action_index, action in enumerate(group_actions):
# `action` is currently the index in `log_probs`, not the actual action ID. To get
# the action ID, we need to go through `considered_actions`.
action = considered_actions[group_index][action]
if allowed_actions is not None and action not in allowed_actions[group_index]:
# This happens when our _decoder trainer_ wants us to only evaluate certain
# actions, likely because they are the gold actions in this state. We just skip
# emitting any state that isn't allowed by the trainer, because constructing the
# new state can be expensive.
continue
best_next_states[batch_index].append((group_index, action_index, action))
new_states = []
for batch_index, best_states in sorted(best_next_states.items()):
for group_index, action_index, action in best_states:
# We'll yield a bunch of states here that all have a `group_size` of 1, so that the
# learning algorithm can decide how many of these it wants to keep, and it can just
# regroup them later, as that's a really easy operation.
new_score = state.score[group_index] + sorted_log_probs[group_index, action_index]
# This part is different from `_compute_new_states` - we're just passing through
# the previous RNN state, as predicting the start type wasn't included in the
# decoder RNN in the original model.
new_rnn_state = state.rnn_state[group_index]
new_state = state.new_state_from_group_index(group_index,
action,
new_score,
new_rnn_state,
considered_actions[group_index],
probs_cpu[group_index],
None)
new_states.append(new_state)
return new_states
def attend_on_question(self,
query: torch.Tensor,
encoder_outputs: torch.Tensor,
encoder_output_mask: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor]:
"""
Given a query (which is typically the decoder hidden state), compute an attention over the
output of the question encoder, and return a weighted sum of the question representations
given this attention. We also return the attention weights themselves.
This is a simple computation, but we have it as a separate method so that the ``forward``
method on the main parser module can call it on the initial hidden state, to simplify the
logic in ``take_step``.
"""
# (group_size, question_length)
question_attention_weights = self._input_attention(query,
encoder_outputs,
encoder_output_mask)
# (group_size, encoder_output_dim)
attended_question = util.weighted_sum(encoder_outputs, question_attention_weights)
return attended_question, question_attention_weights
| 60.634383 | 115 | 0.604704 |
576fc900258ca39a664eba1aac186f35dec84d65 | 1,483 | py | Python | Thomas/Notebooks/track1.py | VigneshBaskar/gent-commuters | d631828cdae7e5af8ae106c08348b013b93617cd | [
"Apache-2.0"
] | 2 | 2018-03-19T16:10:44.000Z | 2018-04-27T13:51:37.000Z | Thomas/Notebooks/track1.py | VigneshBaskar/gent-commuters | d631828cdae7e5af8ae106c08348b013b93617cd | [
"Apache-2.0"
] | null | null | null | Thomas/Notebooks/track1.py | VigneshBaskar/gent-commuters | d631828cdae7e5af8ae106c08348b013b93617cd | [
"Apache-2.0"
] | null | null | null | bpm = 130.0 # beat per minute
beat_per_bar = 8
steps_per_beat=4
bar_per_emphasis = 2
n_bars_buildup = 8
progression = [
{"part":"intro", "buildup": 1.0, "duration": 4 , "tracks":[("base", "no") , ("chords", "no") , ("drums1", "no") , ("drums2", "no") , ("melody1", "yes"), ("melody2", "no") ]},
{"part":"verse", "buildup": 1.0, "duration": 12, "tracks":[("base", "yes"), ("chords", "yes"), ("drums1", "yes"), ("drums2", "no") , ("melody1", "yes"), ("melody2", "no") ]},
{"part":"pause", "buildup": 1.0, "duration": 4, "tracks":[("base", "yes"), ("chords", "no") , ("drums1", "no") , ("drums2", "no") , ("melody1", "no") , ("melody2", "no") ]},
{"part":"verse", "buildup": 1.0, "duration": 12, "tracks":[("base", "yes"), ("chords", "yes"), ("drums1", "yes"), ("drums2", "yes"), ("melody1", "yes"), ("melody2", "yes")]},
{"part":"verse", "buildup": 0.5, "duration": 6, "tracks":[("base", "yes"), ("chords", "yes"), ("drums1", "yes"), ("drums2", "yes"), ("melody1", "yes"), ("melody2", "yes")]},
{"part":"verse", "buildup": 0.0, "duration": 6, "tracks":[("base", "yes"), ("chords", "yes"), ("drums1", "yes"), ("drums2", "no") , ("melody1", "yes"), ("melody2", "no") ]}
]
def track_primer(drum, bar):
clonk = 49 if bar["final_bar"] else 42
if drum == "drum1":
primer = [(35,),(),(42,),(),(35,),(),(clonk,),()]
elif drum == "drum2":
primer = [(46,),(42,),(46,42,),(42,),(46,),(42,),(46,clonk,),(42,)]
return primer | 54.925926 | 178 | 0.493594 |
3f2632a1163aec9d4f499432ec6d8ad63e107829 | 20,846 | py | Python | ai/game/hw1cs561s16.py | thammegowda/algos | a23f6129c2f031c86314a489c4a8a26f9c7130bf | [
"Apache-2.0"
] | 3 | 2015-10-25T06:25:01.000Z | 2017-02-03T01:51:49.000Z | ai/game/hw1cs561s16.py | thammegowda/algos | a23f6129c2f031c86314a489c4a8a26f9c7130bf | [
"Apache-2.0"
] | null | null | null | ai/game/hw1cs561s16.py | thammegowda/algos | a23f6129c2f031c86314a489c4a8a26f9c7130bf | [
"Apache-2.0"
] | 1 | 2016-08-17T16:37:48.000Z | 2016-08-17T16:37:48.000Z | # -*- coding: utf-8 -*-
# Author : ThammeGowda Narayanaswamy
# USC ID : 2074-6694-39
# Session: Spring 2016
# Course : USC CSCI 561 Foundations of Artificial Intelligence
# Topic : Home work 1 : Squirrel Fight Game Strategy
#
# NOTE: Refer to CS561Spring2016HW1.pdf for the game rules
from __future__ import print_function
import argparse
from os import path
from decimal import Decimal
import string
POS_INFINITY = Decimal("inf")
NEG_INFINITY = Decimal("-inf")
COL_NAMES = [i for i in string.uppercase[0:5]]
ROW_NAMES = [str(i) for i in range(1, 6)]
STATE_OUT_FILE = "next_state.txt"
LOG_OUT_FILE = "traverse_log.txt"
TRACE_OUT_FILE = "trace_state.txt"
class SquirrelProblem(object):
'''
Squirrel Problem stated by Home work 1 of USC CSCI 561 - Spring 2016
'''
def __init__(self, prob_file):
self.board_size = 5 # Fixed, as per description
self.empty_cell = '*' # Fixed, as per description
self.opponent = lambda piece: 'O' if piece == 'X' else 'X'
with open(prob_file) as f:
lines = f.readlines()
count = 0
self.strategy = int(lines[0].strip())
if self.strategy < 4:
self.my_piece = lines[1].strip()
self.opp_piece = self.opponent(self.my_piece)
self.cutoff = int(lines[2].strip())
count = 3
else:
self.first_player = lines[1].strip()
self.first_player_algo = int(lines[2].strip())
self.first_player_cut_off = int(lines[3].strip())
self.second_player = lines[4].strip()
self.second_player_algo = int(lines[5].strip())
self.second_player_cut_off = int(lines[6].strip())
count = 7
# lines are board scores
# n x n board. each cell has value
self.costs = [[int(j) for j in lines[count + i].strip().split()]
for i in range(self.board_size)]
count += self.board_size
# lines 8 to 12 are positions
# # n x n board. each cell has playerSign
self.state = [[j for j in lines[count + i].strip()]
for i in range(self.board_size)]
def print_state(self, state, debug=True, fileName=None, stdOut=True):
'''
Prints current state of the board to console
:return:
'''
out_format = lambda i, j: ' %2d|%s' % (self.costs[i][j], state[i][j])\
if debug else self.state[i][j]
res = ""
for i in range(self.board_size):
for j in range(self.board_size):
res += out_format(i, j)
res += "\r\n"
if fileName:
with open(fileName, 'w') as w:
w.write(res)
if stdOut:
print(res)
return res
def eval_score(self, state, player):
'''
Evaluates the score of game at any given state.
The game score = my score - opponent score
:param state: state of game
:return: game score which is an integer
'''
score = 0
opp_player = self.opponent(player)
for (i,j) in self.all_cells():
if state[i][j] == player:
score += self.costs[i][j]
elif state[i][j] == opp_player:
score -= self.costs[i][j]
#else it is a free node
return score
def determineNextStateHeuristic(self, state, player):
"""
Determines heuristics for the next possible state of given player from any given state
:param state: the current state
:param player: the player who has a move
:return: heuristic
"""
opp_piece = self.opponent(player)
heuristic = [[None for j in range(self.board_size)] for i in range(self.board_size)]
# the heuristic is computed without actually making the move
# this is done using delta with current score
current_score = self.eval_score(state, player)
for i, j in self.all_cells():
if state[i][j] == self.empty_cell: # empty cell else: it's owned by somebody
heuristic[i][j] = current_score + self.costs[i][j]
# checking if this can also be a raid
# checking if this can be a raid
adjacent_cells = [(i-1, j), (i+1, j), (i, j-1), (i, j+1)]
raid = False
opp_loss = 0
for x, y in adjacent_cells:
if 0 <= x < self.board_size and 0 <= y < self.board_size:
if state[x][y] == opp_piece:
opp_loss += self.costs[x][y]
elif state[x][y] == player:
raid = True
if raid:
# for all the raids, the new score goes up by 2 times the raided cell
# its 2 times because the foe loses it and we gain it. Thus difference is large by 2 times
heuristic[i][j] += 2 * opp_loss
return heuristic
def all_cells(self):
"""
Yields indices for all possibles cells in the board
:return: (row, column) indices from top left to bottom right
"""
for i in range(self.board_size):
for j in range(self.board_size):
yield (i, j)
def all_empty_cells(self, state):
res = []
for i, j in self.all_cells():
if state[i][j] == self.empty_cell:
res.append((i, j))
return res
def is_terminal_state(self, state):
"""
checks if the game state is terminal
:param state: game state
:return: boolean True if the game is complete, False if further moves are possible
"""
#state is complete if all cells are occupied
for i, j in self.all_cells():
if state[i][j] == self.empty_cell:
return False
return True
def move_to_cell(self, state, row, col, player_piece):
'''
Takes over a specified cell
:param state: state
:param row: the row number
:param col: column number
:param player_piece: player piece
:return: list of triples (row, col, piece);
NOTE return list can be used for reverting the state by undoing the moves
'''
oppPiece = self.opponent(player_piece)
undoLog = []
if state[row][col] == self.empty_cell:
# player owns this cell now. so it is no longer empty
self.state[row][col] = player_piece
undoLog.append((row, col, self.empty_cell))
# checking if this can be a raid
adjacent_cells = [(row-1, col), (row+1, col), (row, col-1), (row, col+1)]
raid = False
oppCells = []
for i, j in adjacent_cells:
if 0 <= i < self.board_size and 0 <= j < self.board_size:
if state[i][j] == oppPiece:
oppCells.append((i, j))
elif state[i][j] == player_piece:
raid = True
if raid:
for x, y in oppCells:
state[x][y] = player_piece
undoLog.append((x, y, oppPiece))
else:
raise Exception("I don't break Game Rules! The cell is not empty")
return undoLog
def apply_moves(self, state, actions):
"""
Applies a sequence of moves on a state
:param state: the initial state
:param actions: sequence of moves (row, col, player)
:return:
"""
for action in actions:
state[action[0]][action[1]] = action[2]
def greedy_bfs(self, player):
'''
Performs next move by using Greedy best first search strategy
:return:
'''
heuristic = self.determineNextStateHeuristic(self.state, player)
maxVal = NEG_INFINITY
pos = None
# Find greedy best first position
for i, j in self.all_cells():
if heuristic[i][j] != None and heuristic[i][j] > maxVal:
maxVal = heuristic[i][j]
pos = (i, j)
if pos:
return self.move_to_cell(self.state, pos[0], pos[1], player)
#else: no available slot, die
def mini_max(self, logfile):
root = MiniMaxSolver(self, self.my_piece, self.opp_piece, self.cutoff, logfile)\
.solve(self.state)
if root.next_move:
move = root.next_move
self.move_to_cell(self.state, move.pos[0], move.pos[1], move.piece)
def alpha_beta_pruning(self, logfile):
root = AlphaBetaSolver(self, self.my_piece, self.opp_piece, self.cutoff, logfile).solve(self.state)
if root.next_move:
move = root.next_move
self.move_to_cell(self.state, move.pos[0], move.pos[1], move.piece)
def simulate(self):
count = 0
with open(TRACE_OUT_FILE, 'w') as out:
while not self.is_terminal_state(self.state):
if count % 2 == 0:
self.your_turn(self.first_player_algo, self.first_player, self.second_player, self.first_player_cut_off)
else:
self.your_turn(self.second_player_algo, self.second_player, self.first_player, self.second_player_cut_off)
if count > 0:
out.write("\r\n")
res = self.print_state(self.state, debug=False, stdOut=False).strip()
out.write(res)
count += 1
def your_turn(self, strategy, player, opponent, cutoff):
if strategy == 1:
self.greedy_bfs(player)
elif strategy == 2:
move = MiniMaxSolver(self, player, opponent, cutoff).solve(self.state).next_move
self.move_to_cell(self.state, move.pos[0], move.pos[1], move.piece)
pass
elif strategy == 3:
move = AlphaBetaSolver(self, player, opponent, cutoff).solve(self.state).next_move
self.move_to_cell(self.state, move.pos[0], move.pos[1], move.piece)
else:
raise Exception("Invalid state")
def play_game(self, algorithm):
'''
Makes the next move as per the algorithm
:param algorithm: the strategy for next move
:return:
'''
if algorithm == 1:
self.greedy_bfs(self.my_piece)
elif algorithm == 2:
with open(LOG_OUT_FILE, 'w') as logfile:
self.mini_max(logfile)
elif algorithm == 3:
with open(LOG_OUT_FILE, 'w') as logfile:
self.alpha_beta_pruning(logfile)
elif algorithm == 4:
self.simulate()
else:
raise Exception("Algorithm %d is unknown!" % algorithm)
def read_state_file(self, fileName, n):
"""
Reads game state from a file
:param fileName: path to file
:param n: the matrix/board size
:return: nxn matrix having game state
"""
with open(fileName) as f:
return [[j for j in next(f).strip()] for _ in range(n)]
def are_states_same(self, state1, state2):
"""
Returns True if give two states are same
:param state1: first state
:param state2: second state
:return: True if states are same; false otherwise
"""
for i, j in self.all_cells():
if state1[i][j] != state2[i][j]:
return False
return True
class Node(object):
def __init__(self, score, pos, piece, depth=0, parent=None):
self.parent = parent
self.children = None
self.score = score
self.pos = pos
self.piece = piece
self.depth = depth
self.next_move = None
def add_child(self, node):
node.parent = self
node.depth = self.depth + 1
if self.children == None:
self.children = []
self.children.append(node)
def pretty_print(self, prefix="", isTail=True):
name = "%3s %s" % (self.score, self.pos)
print(prefix + ("└── " if isTail else "├── ") + name)
if (self.children):
formatstring = " " if isTail else "│ "
for i in range(0, len(self.children) - 1):
self.children[i].__prettyPrint(prefix + formatstring, False)
self.children[-1].__prettyPrint(prefix + formatstring, True)
class MiniMaxSolver(object):
def __init__(self, problem, max_player, min_player, cutoff, logfile=None):
self.problem = problem
self.logfile = logfile
self.eval = lambda state : problem.eval_score(state, max_player)
self.max_player = max_player
self.min_player = min_player
self.maxdepth = cutoff
def solve(self, state):
# first turn is my players'. My player is maxPlayer
if self.logfile:
self.logfile.write("Node,Depth,Value")
root = Node(NEG_INFINITY, (None, None), None)
self.maximize(state, root)
return root
def log(self, node, alphaBeta=False):
if self.logfile:
self.logfile.write("\r\n" + MiniMaxSolver.format_log(node, alphaBeta))
@staticmethod
def format_log(node, alpha_beta=False):
name = "root" if node.depth == 0\
else "%s%s" % (COL_NAMES[node.pos[1]], ROW_NAMES[node.pos[0]])
res = "%s,%s,%s" % (name, node.depth, node.score)
if alpha_beta:
res += ",%s,%s" % (node.alpha, node.beta)
return res
def maximize(self, state, parent):
cells = self.problem.all_empty_cells(state)
if parent.depth == self.maxdepth or not cells:
parent.score = self.eval(state)
else:
self.log(parent)
for x, (i, j) in enumerate(cells):
moves = self.problem.move_to_cell(state, i, j, self.max_player) # max's move
child = Node(POS_INFINITY, (i, j), self.max_player)
parent.add_child(child)
child.score = self.minimize(state, child) # turn goes to min player
if child.score > parent.score:
parent.score = child.score
parent.next_move = child
if x < len(cells) - 1: # for all except the last one
self.log(parent)
self.problem.apply_moves(state, moves)
self.log(parent)
return parent.score
def minimize(self, state, parent):
cells = self.problem.all_empty_cells(state)
if parent.depth == self.maxdepth or not cells:
parent.score = self.eval(state)
else:
self.log(parent)
for x, (i, j) in enumerate(cells):
moves = self.problem.move_to_cell(state, i, j, self.min_player) # min's move
child = Node(NEG_INFINITY, (i, j), self.min_player)
parent.add_child(child)
self.maximize(state, child) # turn goes to max, depth reduced by 1
if child.score < parent.score:
parent.score = child.score
parent.next_move = child
if x < len(cells) -1: # for all except the last one
self.log(parent)
self.problem.apply_moves(state, moves)
self.log(parent)
return parent.score
class AlphaBetaSolver(MiniMaxSolver):
def solve(self, state):
if self.logfile:
self.logfile.write("Node,Depth,Value,Alpha,Beta")
root = Node(NEG_INFINITY, (None, None), None) # this node for the next move, which is maximizer
# The worst possible value for him is -Infinity
root.alpha = NEG_INFINITY # Max value, we dont know yet, so -Infinity
root.beta = POS_INFINITY # Min value, we dont know yet, so +Infinity
self.maximize(state, root)
return root
def maximize(self, state, parent):
cells = self.problem.all_empty_cells(state)
if parent.depth == self.maxdepth or not cells:
parent.score = self.eval(state)
else:
self.log(parent, True)
for x, (i, j) in enumerate(cells):
moves = self.problem.move_to_cell(state, i, j, self.max_player) # max's move
child = Node(POS_INFINITY, (i, j), self.max_player) # this node is for the next move, which is minimizer
# The worst possible value for him is +infinity
child.alpha = parent.alpha # Inherit alpha beta
child.beta = parent.beta
parent.add_child(child) #dept gets incremented
self.minimize(state, child) # turn goes to min player
self.problem.apply_moves(state, moves) #undo
if child.score > parent.score:
parent.score = child.score
parent.next_move = child
if child.score >= parent.beta: # intuition : Min player (parent) wont let this happen
break
if child.score > parent.alpha:
parent.alpha = child.score
if x < len(cells) - 1: # for all except the last one
self.log(parent, True)
self.log(parent, True)
return parent.score
def minimize(self, state, parent):
cells = self.problem.all_empty_cells(state)
if parent.depth == self.maxdepth or not cells:
parent.score = self.eval(state)
else:
self.log(parent, True)
for x, (i, j) in enumerate(cells):
moves = self.problem.move_to_cell(state, i, j, self.min_player) # min's move
child = Node(NEG_INFINITY, (i, j), self.min_player)
child.alpha = parent.alpha
child.beta = parent.beta
parent.add_child(child)
self.maximize(state, child) # turn goes to max, depth reduced by 1
self.problem.apply_moves(state, moves)
if child.score < parent.score:
parent.score = child.score
parent.next_move = child
if child.score <= parent.alpha:
break
if child.score < parent.beta:
parent.beta = child.score
if x < len(cells) -1: # for all except the last one
self.log(parent, True)
self.log(parent, True)
return parent.score
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='CSCI-561 - HW 1 Solutions - by Thamme Gowda N.')
parser.add_argument('-i','--input', help='Input File', required=True)
parser.add_argument('-t','--test', action="store_true", help='Auto detect tests in directory')
parser.add_argument('-tf','--testfile', required = False, help='Use this test file')
args = vars(parser.parse_args())
prob = SquirrelProblem(args['input'])
prob.play_game(prob.strategy)
prob.print_state(debug=False, state=prob.state, fileName=STATE_OUT_FILE, stdOut=False)
# below is for testing
testfile = None
testLogFile = None
if args['test']: # test was requested
tmp = path.join(path.dirname(path.abspath(args['input'])), STATE_OUT_FILE)
if path.exists(tmp): # see if there is a test file
testfile = tmp
tmp = path.join(path.dirname(path.abspath(args['input'])), LOG_OUT_FILE)
if path.exists(tmp): # see if there is a test file
testLogFile = tmp
#print("Score X : %s" % prob.eval_score(prob.state, player='X'))
#print("Score O : %s" % prob.eval_score(prob.state, player='O'))
if 'testfile' in args and args['testfile']:
testfile = args['testfile']
if testfile:
terminalState = prob.read_state_file(testfile, prob.board_size)
res = prob.are_states_same(prob.state, terminalState)
print("Next State Same ?: %s" % res)
if not res:
print("Error:\n Expected state:\n")
prob.print_state(terminalState)
print("But actual state:\n")
prob.print_state(prob.state)
if 2 <= prob.strategy <= 3 and testLogFile:
import filecmp
res = filecmp.cmp(testLogFile, LOG_OUT_FILE)
print("Log Matched ? %s " % res)
| 39.858509 | 126 | 0.550753 |
1c60eee6e9af40c2f54578e2239261783f2a8d46 | 11,427 | py | Python | analysis/analysis_utils.py | eduardofv/lang_model_eval | d89dbe9fe291f0befb9701e8dc4cea4154cf9d45 | [
"MIT"
] | 1 | 2020-03-19T20:20:40.000Z | 2020-03-19T20:20:40.000Z | analysis/analysis_utils.py | eduardofv/lang_model_eval | d89dbe9fe291f0befb9701e8dc4cea4154cf9d45 | [
"MIT"
] | null | null | null | analysis/analysis_utils.py | eduardofv/lang_model_eval | d89dbe9fe291f0befb9701e8dc4cea4154cf9d45 | [
"MIT"
] | null | null | null | """Utility functions to be used by analysis tasks."""
import os
import re
import collections
import json
import numpy as np
import pandas as pd
import matplotlib as mpl
from matplotlib import pyplot as plt
import seaborn as sns
#File Utils
def load_metadata(directory):
"""Load the metadata file 'experiment_metadata.json' from experiment dir"""
val = None
try:
fname = f"{directory}/experiment_metadata.json"
with open(fname) as fin:
val = json.load(fin)
except IOError as ex:
print(ex)
val = None
return val
def validate_required_fields(metadata):
"""Check if the metadata can be used on analysis. This is evolving"""
required = ['RESULTS-BALANCED_ACCURACY']
val = True
for field in required:
val = val & (field in metadata)
return val
def load_result_directories(directories):
"""
Walks a directory list in which every directory contains experiment
results, loading a metadata file for each directory.
Validates and returns the metadata dict
"""
dirs = []
for search_dir in directories:
_, directory, _ = list(os.walk(search_dir, followlinks=True))[0]
dirs += [f"{search_dir}{subdir}" for subdir in directory]
results = {}
for directory in dirs:
metadata = load_metadata(directory)
if metadata is None or not validate_required_fields(metadata):
print(f"WARNING: Invalid metadata: {directory}")
else:
results[directory] = metadata
return results
#Data Utils
def load_full_dataset(results):
"""Converts a results dict to a DataFrame"""
experiments = results.keys()
return pd.DataFrame({
#Experiment and Environment
'runid': [results[k]['EXPERIMENT-RUNID'] for k in experiments],
'version': [results[k]['EXPERIMENT-VERSION'] for k in experiments],
'GPU': [gpu(results[k]) for k in experiments],
#Data
'dataset': [results[k]['DATA-DATASET_FN'] for k in experiments],
'rows_to_load': [rows_to_load(results[k]) for k in experiments],
'training_set_size': [training_set_size(results[k]) for k in experiments],
'test_set_size': [test_set_size(results[k]) for k in experiments],
'max_seq_len': [max_seq_len(results[k]) for k in experiments],
'output_dim': [results[k]['DATA-OUTPUT_DIM'] for k in experiments],
#Model and training
'lm': [lm(results[k]) for k in experiments],
'batch_size': [results[k]['TRAIN-BATCH_SIZE'] for k in experiments],
'learning_rate': [learning_rate(results[k]) for k in experiments],
#Results
'training_time': [training_time(results[k]) for k in experiments],
'bac': [results[k]['RESULTS-BALANCED_ACCURACY'] for k in experiments],
'min_loss': [min_loss(results[k]) for k in experiments],
'last_loss': [last_loss(results[k]) for k in experiments],
'total_epochs': [epochs(results[k]) for k in experiments],
'best_epoch': [best_epoch(results[k]) for k in experiments],
'val_loss': [val_loss(results[k]) for k in experiments],
'test_bac': [test_bac(results[k]) for k in experiments]
})
def warn_if_experiments_differ(data, must_be_unique):
"""
Sends a warning if a field which is expected to have one unique value
has more. Use to inspect the dataframe
"""
for field in must_be_unique:
if len(data[field].unique()) != 1:
print(f"WARNING: There are experiments with different {field}:")
print(data[field].unique())
def extract_type_from_nnlm(data):
"""
Extract the version for the NNLM collection of Language Models
https://tfhub.dev/google/collections/nnlm/1
"""
return data['lm'].str.extract(r'nnlm-(e.-.*)-w.*')
def average_list_metric(
data,
metric_field,
dimension_field,
ignore_trailing=True):
"""
Averages "dimension_field" values of list field (metric_field).
This is used on experiments results that are a list, for instance,
val_loss or accuracy. This can be used to plot learning curves.
The argument 'ignore_trailing' will stop averaging values on the list
with the least arguments. For example,
if ignore_trailing is True: [1, 1, 3] [1, 2] will produce [1, 1,5]
if ignore_trailing is False: [1, 1, 3] [1, 2] will produce [1, 1,5, 3]
"""
def val_or_none(list_, index):
if index < len(list_):
return list_[index]
return None
if not ignore_trailing:
print("WARNING: ignore_trailing set to False. "
"Last epochs for some dimensions may be misleading.")
values = collections.defaultdict(list)
for _, row in data.iterrows():
values[row[dimension_field]].append(row[metric_field])
aggregated = collections.defaultdict(list)
for dim, metrics in values.items():
aggregated[dimension_field].append(dim)
vals = []
if ignore_trailing:
epochs = min([len(x) for x in metrics])
else:
epochs = max([len(x) for x in metrics])
for index in range(epochs):
if not ignore_trailing:
m = [val_or_none(x, index) for x in metrics]
m = list(filter(lambda x: x is not None, m))
val = sum(m) / len(m)
else:
val = sum([x[index] for x in metrics]) / len(metrics)
vals.append(val)
aggregated[metric_field].append(vals)
return pd.DataFrame(aggregated)
#Graph utils
#from https://stackoverflow.com/questions/1855884/determine-font-color-based-on-background-color
def contrast_color(color, blackish='black', whiteish='whitesmoke'):
"""Selects white(ish) or black(ish) for text to contrast over some RGB"""
luminance = (0.299 * color[0] + 0.587 * color[1] + 0.114 * color[2])
if luminance > 0.6:
return blackish
return whiteish
def colors_by_value(values, color_space='muted', return_legend_handles=False):
"""
Creates a list of colors based on the unique categorical values
"""
categories = sorted(values.unique())
pal = sns.color_palette(color_space, len(categories))
col_dict = dict(zip(categories, pal))
#colors = list(values.map(col_dict))
colors = [col_dict[val] for val in values]
if return_legend_handles:
handles = []
for k, v in col_dict.items():
handles.append(mpl.patches.Patch(color=v, label=k))
return (colors, handles)
return colors
#TODO: Revisar
#https://matplotlib.org/3.1.3/gallery/statistics/barchart_demo.html
#para mejor manejo de la posición del texto.
# Hacer el blog post despues con la solucion
##http://eduardofv.com/wp-admin/post.php?post=517&action=edit
#https://colab.research.google.com/drive/1kwKuOwim7ngYmFSRjkVYMi5_K6WA9vmD
def annotate_barh(ax, fontsize='x-small'):
"""Adds value labels inside the bars of a barh plot"""
plt.draw()
for patch in ax.patches:
label = f"{patch.get_width():1.4f}"
p_x = patch.get_width()
p_y = patch.get_y()
#Put an invisible text to measure it's dimensions
txt = plt.text(p_x, p_y, label, fontsize=fontsize, alpha=0.0)
bbox = txt.get_window_extent().transformed(ax.transData.inverted())
t_w = bbox.width * 1.1
t_h = bbox.height
p_y += (patch.get_height() - t_h)/1.5
if t_w > 0.9 * patch.get_width():
plt.text(p_x, p_y, label, fontsize=fontsize)
else:
p_x -= t_w
col = contrast_color(patch.get_facecolor())
plt.text(p_x, p_y, label, fontsize=fontsize, color=col)
def plot_learning_curve(data, curve_field, dimension_field):
"""
Plots learning curves contained as lists in in the *curve_field* of the
DataFrame. Dimension field will be used for the labels of each sample.
"""
#fig = plt.figure()
ax = plt.axes()
df1 = data.sort_values(dimension_field)
df1['color'] = colors_by_value(df1[dimension_field])
for _, row in df1.iterrows():
plt.plot(row[curve_field],
label=row[dimension_field],
color=row['color'])
ax.legend()
#Config object field parsing
# These methods convert the values from the metadata to standard values that
# can be used in the analysis. Fills non-existent values, select correct fields
# set default, etc
#pylint: disable=C0116
def learning_rate(metadata):
val = None
try:
val = metadata['MODEL-OPTIMIZER_FULL_CONFIG']['learning_rate']
except KeyError:
print("WARNING: Actual learning_rate not found")
return val
def training_time(metadata):
val = None
if 'EXPERIMENT-TRAINING_TOOK' in metadata:
val = metadata['EXPERIMENT-TRAINING_TOOK']
return val
def lm(metadata):
lm_val = None
if 'TFHUB-EMB_MODEL' in metadata:
lm_val = metadata['TFHUB-EMB_MODEL']
match = re.search("https://tfhub.dev/google/([^/]+)/.$", lm_val)
if match is not None:
lm_val = match.group(1)
else:
print(f"WARNING: LM could not be parsed from {lm_val}")
lm_val = "LM Not Found"
elif 'HUG-EMB_MODEL' in metadata:
lm_val = metadata['HUG-EMB_MODEL']
return lm_val
def gpu(metadata):
if metadata['EXPERIMENT-ENVIRONMENT'][4] == 'GPU: available':
return metadata['EXPERIMENT-ENVIRONMENT'][5].split('(')[0].split(":")[1].strip()
return "GPU: Not available"
def max_seq_len(metadata):
max_seq_len_val = 'Full'
if 'MODEL-HUG_MAX_SEQ_LEN' in metadata:
max_seq_len_val = metadata['MODEL-HUG_MAX_SEQ_LEN']
elif 'MODEL-BERT_MAX_SEQ_LEN' in metadata:
max_seq_len_val = metadata['MODEL-BERT_MAX_SEQ_LEN']
return max_seq_len_val
def rows_to_load(metadata):
rows_to_load_val = "All"
if 'DATA-ROWS_TO_LOAD' in metadata:
rows_to_load_val = metadata['DATA-ROWS_TO_LOAD']
return rows_to_load_val
def min_loss(metadata):
val = None
if 'RESULTS-HISTORIES' in metadata and metadata['RESULTS-HISTORIES']:
val = min(metadata['RESULTS-HISTORIES'][0]['val_loss'])
return val
def last_loss(metadata):
val = None
if 'RESULTS-HISTORIES' in metadata and metadata['RESULTS-HISTORIES']:
val = metadata['RESULTS-HISTORIES'][0]['val_loss'][-1]
return val
def epochs(metadata):
val = 'NA'
if 'RESULTS-HISTORIES' in metadata and metadata['RESULTS-HISTORIES']:
val = len(metadata['RESULTS-HISTORIES'][0]['loss'])
return val
def best_epoch(metadata):
val = 'NA'
if 'RESULTS-HISTORIES' in metadata and metadata['RESULTS-HISTORIES']:
if 'val_loss' in metadata['RESULTS-HISTORIES'][0]:
v_loss = metadata['RESULTS-HISTORIES'][0]['val_loss']
val = np.argmin(v_loss) + 1
return val
def training_set_size(metadata):
return metadata['DATA-TRAINING_SET_SIZE']
def test_set_size(metadata):
return metadata['DATA-TEST_SET_SIZE']
def test_bac(metadata):
val = None
try:
val = metadata['RESULTS-HISTORIES'][0]['test_bac']
except KeyError:
print(f"WARNING: test_bac not found in Histories")
return val
def val_loss(metadata):
val = None
try:
val = metadata['RESULTS-HISTORIES'][0]['val_loss']
except KeyError:
print(f"WARNING: val_loss not found in Histories")
return val
| 35.933962 | 96 | 0.651615 |
729edd111bc9e4b6b6fd230c210dbe6c51060d2b | 466 | py | Python | packages/python/plotly/plotly/validators/layout/xaxis/rangeselector/_activecolor.py | mastermind88/plotly.py | efa70710df1af22958e1be080e105130042f1839 | [
"MIT"
] | null | null | null | packages/python/plotly/plotly/validators/layout/xaxis/rangeselector/_activecolor.py | mastermind88/plotly.py | efa70710df1af22958e1be080e105130042f1839 | [
"MIT"
] | null | null | null | packages/python/plotly/plotly/validators/layout/xaxis/rangeselector/_activecolor.py | mastermind88/plotly.py | efa70710df1af22958e1be080e105130042f1839 | [
"MIT"
] | null | null | null | import _plotly_utils.basevalidators
class ActivecolorValidator(_plotly_utils.basevalidators.ColorValidator):
def __init__(
self,
plotly_name="activecolor",
parent_name="layout.xaxis.rangeselector",
**kwargs,
):
super(ActivecolorValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "plot"),
**kwargs,
)
| 27.411765 | 72 | 0.633047 |
f5f7956e9c5ff667021ad10ccdbb0eb47192b31f | 19,401 | py | Python | tests/components/input_select/test_init.py | basicpail/core | 5cc54618c5af3f75c08314bf2375cc7ac40d2b7e | [
"Apache-2.0"
] | 11 | 2018-02-16T15:35:47.000Z | 2020-01-14T15:20:00.000Z | tests/components/input_select/test_init.py | jagadeeshvenkatesh/core | 1bd982668449815fee2105478569f8e4b5670add | [
"Apache-2.0"
] | 87 | 2020-07-06T22:22:54.000Z | 2022-03-31T06:01:46.000Z | tests/components/input_select/test_init.py | jagadeeshvenkatesh/core | 1bd982668449815fee2105478569f8e4b5670add | [
"Apache-2.0"
] | 11 | 2020-12-16T13:48:14.000Z | 2022-02-01T00:28:05.000Z | """The tests for the Input select component."""
# pylint: disable=protected-access
from unittest.mock import patch
import pytest
from homeassistant.components.input_select import (
ATTR_OPTION,
ATTR_OPTIONS,
CONF_INITIAL,
DOMAIN,
SERVICE_SELECT_FIRST,
SERVICE_SELECT_LAST,
SERVICE_SELECT_NEXT,
SERVICE_SELECT_OPTION,
SERVICE_SELECT_PREVIOUS,
SERVICE_SET_OPTIONS,
)
from homeassistant.const import (
ATTR_EDITABLE,
ATTR_ENTITY_ID,
ATTR_FRIENDLY_NAME,
ATTR_ICON,
ATTR_NAME,
SERVICE_RELOAD,
)
from homeassistant.core import Context, State
from homeassistant.exceptions import Unauthorized
from homeassistant.helpers import entity_registry as er
from homeassistant.loader import bind_hass
from homeassistant.setup import async_setup_component
from tests.common import mock_restore_cache
@pytest.fixture
def storage_setup(hass, hass_storage):
"""Storage setup."""
async def _storage(items=None, config=None):
if items is None:
hass_storage[DOMAIN] = {
"key": DOMAIN,
"version": 1,
"data": {
"items": [
{
"id": "from_storage",
"name": "from storage",
"options": ["storage option 1", "storage option 2"],
}
]
},
}
else:
hass_storage[DOMAIN] = {
"key": DOMAIN,
"version": 1,
"data": {"items": items},
}
if config is None:
config = {DOMAIN: {}}
return await async_setup_component(hass, DOMAIN, config)
return _storage
@bind_hass
def select_option(hass, entity_id, option):
"""Set value of input_select.
This is a legacy helper method. Do not use it for new tests.
"""
hass.async_create_task(
hass.services.async_call(
DOMAIN,
SERVICE_SELECT_OPTION,
{ATTR_ENTITY_ID: entity_id, ATTR_OPTION: option},
)
)
@bind_hass
def select_next(hass, entity_id):
"""Set next value of input_select.
This is a legacy helper method. Do not use it for new tests.
"""
hass.async_create_task(
hass.services.async_call(
DOMAIN, SERVICE_SELECT_NEXT, {ATTR_ENTITY_ID: entity_id}
)
)
@bind_hass
def select_previous(hass, entity_id):
"""Set previous value of input_select.
This is a legacy helper method. Do not use it for new tests.
"""
hass.async_create_task(
hass.services.async_call(
DOMAIN, SERVICE_SELECT_PREVIOUS, {ATTR_ENTITY_ID: entity_id}
)
)
@bind_hass
def select_first(hass, entity_id):
"""Set first value of input_select.
This is a legacy helper method. Do not use it for new tests.
"""
hass.async_create_task(
hass.services.async_call(
DOMAIN, SERVICE_SELECT_FIRST, {ATTR_ENTITY_ID: entity_id}
)
)
@bind_hass
def select_last(hass, entity_id):
"""Set last value of input_select.
This is a legacy helper method. Do not use it for new tests.
"""
hass.async_create_task(
hass.services.async_call(
DOMAIN, SERVICE_SELECT_LAST, {ATTR_ENTITY_ID: entity_id}
)
)
async def test_config(hass):
"""Test config."""
invalid_configs = [
None,
{},
{"name with space": None},
# {'bad_options': {'options': None}},
{"bad_initial": {"options": [1, 2], "initial": 3}},
]
for cfg in invalid_configs:
assert not await async_setup_component(hass, DOMAIN, {DOMAIN: cfg})
async def test_select_option(hass):
"""Test select_option methods."""
assert await async_setup_component(
hass,
DOMAIN,
{DOMAIN: {"test_1": {"options": ["some option", "another option"]}}},
)
entity_id = "input_select.test_1"
state = hass.states.get(entity_id)
assert state.state == "some option"
select_option(hass, entity_id, "another option")
await hass.async_block_till_done()
state = hass.states.get(entity_id)
assert state.state == "another option"
select_option(hass, entity_id, "non existing option")
await hass.async_block_till_done()
state = hass.states.get(entity_id)
assert state.state == "another option"
async def test_select_next(hass):
"""Test select_next methods."""
assert await async_setup_component(
hass,
DOMAIN,
{
DOMAIN: {
"test_1": {
"options": ["first option", "middle option", "last option"],
"initial": "middle option",
}
}
},
)
entity_id = "input_select.test_1"
state = hass.states.get(entity_id)
assert state.state == "middle option"
select_next(hass, entity_id)
await hass.async_block_till_done()
state = hass.states.get(entity_id)
assert state.state == "last option"
select_next(hass, entity_id)
await hass.async_block_till_done()
state = hass.states.get(entity_id)
assert state.state == "first option"
async def test_select_previous(hass):
"""Test select_previous methods."""
assert await async_setup_component(
hass,
DOMAIN,
{
DOMAIN: {
"test_1": {
"options": ["first option", "middle option", "last option"],
"initial": "middle option",
}
}
},
)
entity_id = "input_select.test_1"
state = hass.states.get(entity_id)
assert state.state == "middle option"
select_previous(hass, entity_id)
await hass.async_block_till_done()
state = hass.states.get(entity_id)
assert state.state == "first option"
select_previous(hass, entity_id)
await hass.async_block_till_done()
state = hass.states.get(entity_id)
assert state.state == "last option"
async def test_select_first_last(hass):
"""Test select_first and _last methods."""
assert await async_setup_component(
hass,
DOMAIN,
{
DOMAIN: {
"test_1": {
"options": ["first option", "middle option", "last option"],
"initial": "middle option",
}
}
},
)
entity_id = "input_select.test_1"
state = hass.states.get(entity_id)
assert state.state == "middle option"
select_first(hass, entity_id)
await hass.async_block_till_done()
state = hass.states.get(entity_id)
assert state.state == "first option"
select_last(hass, entity_id)
await hass.async_block_till_done()
state = hass.states.get(entity_id)
assert state.state == "last option"
async def test_config_options(hass):
"""Test configuration options."""
count_start = len(hass.states.async_entity_ids())
test_2_options = ["Good Option", "Better Option", "Best Option"]
assert await async_setup_component(
hass,
DOMAIN,
{
DOMAIN: {
"test_1": {"options": [1, 2]},
"test_2": {
"name": "Hello World",
"icon": "mdi:work",
"options": test_2_options,
"initial": "Better Option",
},
}
},
)
assert count_start + 2 == len(hass.states.async_entity_ids())
state_1 = hass.states.get("input_select.test_1")
state_2 = hass.states.get("input_select.test_2")
assert state_1 is not None
assert state_2 is not None
assert state_1.state == "1"
assert state_1.attributes.get(ATTR_OPTIONS) == ["1", "2"]
assert ATTR_ICON not in state_1.attributes
assert state_2.state == "Better Option"
assert state_2.attributes.get(ATTR_OPTIONS) == test_2_options
assert state_2.attributes.get(ATTR_FRIENDLY_NAME) == "Hello World"
assert state_2.attributes.get(ATTR_ICON) == "mdi:work"
async def test_set_options_service(hass):
"""Test set_options service."""
assert await async_setup_component(
hass,
DOMAIN,
{
DOMAIN: {
"test_1": {
"options": ["first option", "middle option", "last option"],
"initial": "middle option",
}
}
},
)
entity_id = "input_select.test_1"
state = hass.states.get(entity_id)
assert state.state == "middle option"
data = {ATTR_OPTIONS: ["test1", "test2"], "entity_id": entity_id}
await hass.services.async_call(DOMAIN, SERVICE_SET_OPTIONS, data)
await hass.async_block_till_done()
state = hass.states.get(entity_id)
assert state.state == "test1"
select_option(hass, entity_id, "first option")
await hass.async_block_till_done()
state = hass.states.get(entity_id)
assert state.state == "test1"
select_option(hass, entity_id, "test2")
await hass.async_block_till_done()
state = hass.states.get(entity_id)
assert state.state == "test2"
async def test_restore_state(hass):
"""Ensure states are restored on startup."""
mock_restore_cache(
hass,
(
State("input_select.s1", "last option"),
State("input_select.s2", "bad option"),
),
)
options = {"options": ["first option", "middle option", "last option"]}
await async_setup_component(hass, DOMAIN, {DOMAIN: {"s1": options, "s2": options}})
state = hass.states.get("input_select.s1")
assert state
assert state.state == "last option"
state = hass.states.get("input_select.s2")
assert state
assert state.state == "first option"
async def test_initial_state_overrules_restore_state(hass):
"""Ensure states are restored on startup."""
mock_restore_cache(
hass,
(
State("input_select.s1", "last option"),
State("input_select.s2", "bad option"),
),
)
options = {
"options": ["first option", "middle option", "last option"],
"initial": "middle option",
}
await async_setup_component(hass, DOMAIN, {DOMAIN: {"s1": options, "s2": options}})
state = hass.states.get("input_select.s1")
assert state
assert state.state == "middle option"
state = hass.states.get("input_select.s2")
assert state
assert state.state == "middle option"
async def test_input_select_context(hass, hass_admin_user):
"""Test that input_select context works."""
assert await async_setup_component(
hass,
"input_select",
{
"input_select": {
"s1": {"options": ["first option", "middle option", "last option"]}
}
},
)
state = hass.states.get("input_select.s1")
assert state is not None
await hass.services.async_call(
"input_select",
"select_next",
{"entity_id": state.entity_id},
True,
Context(user_id=hass_admin_user.id),
)
state2 = hass.states.get("input_select.s1")
assert state2 is not None
assert state.state != state2.state
assert state2.context.user_id == hass_admin_user.id
async def test_reload(hass, hass_admin_user, hass_read_only_user):
"""Test reload service."""
count_start = len(hass.states.async_entity_ids())
ent_reg = er.async_get(hass)
assert await async_setup_component(
hass,
DOMAIN,
{
DOMAIN: {
"test_1": {
"options": ["first option", "middle option", "last option"],
"initial": "middle option",
},
"test_2": {
"options": ["an option", "not an option"],
"initial": "an option",
},
}
},
)
assert count_start + 2 == len(hass.states.async_entity_ids())
state_1 = hass.states.get("input_select.test_1")
state_2 = hass.states.get("input_select.test_2")
state_3 = hass.states.get("input_select.test_3")
assert state_1 is not None
assert state_2 is not None
assert state_3 is None
assert state_1.state == "middle option"
assert state_2.state == "an option"
assert ent_reg.async_get_entity_id(DOMAIN, DOMAIN, "test_1") is not None
assert ent_reg.async_get_entity_id(DOMAIN, DOMAIN, "test_2") is not None
assert ent_reg.async_get_entity_id(DOMAIN, DOMAIN, "test_3") is None
with patch(
"homeassistant.config.load_yaml_config_file",
autospec=True,
return_value={
DOMAIN: {
"test_2": {
"options": ["an option", "reloaded option"],
"initial": "reloaded option",
},
"test_3": {
"options": ["new option", "newer option"],
"initial": "newer option",
},
}
},
):
with pytest.raises(Unauthorized):
await hass.services.async_call(
DOMAIN,
SERVICE_RELOAD,
blocking=True,
context=Context(user_id=hass_read_only_user.id),
)
await hass.services.async_call(
DOMAIN,
SERVICE_RELOAD,
blocking=True,
context=Context(user_id=hass_admin_user.id),
)
await hass.async_block_till_done()
assert count_start + 2 == len(hass.states.async_entity_ids())
state_1 = hass.states.get("input_select.test_1")
state_2 = hass.states.get("input_select.test_2")
state_3 = hass.states.get("input_select.test_3")
assert state_1 is None
assert state_2 is not None
assert state_3 is not None
assert state_2.state == "an option"
assert state_3.state == "newer option"
assert ent_reg.async_get_entity_id(DOMAIN, DOMAIN, "test_1") is None
assert ent_reg.async_get_entity_id(DOMAIN, DOMAIN, "test_2") is not None
assert ent_reg.async_get_entity_id(DOMAIN, DOMAIN, "test_3") is not None
async def test_load_from_storage(hass, storage_setup):
"""Test set up from storage."""
assert await storage_setup()
state = hass.states.get(f"{DOMAIN}.from_storage")
assert state.state == "storage option 1"
assert state.attributes.get(ATTR_FRIENDLY_NAME) == "from storage"
assert state.attributes.get(ATTR_EDITABLE)
async def test_editable_state_attribute(hass, storage_setup):
"""Test editable attribute."""
assert await storage_setup(
config={DOMAIN: {"from_yaml": {"options": ["yaml option", "other option"]}}}
)
state = hass.states.get(f"{DOMAIN}.from_storage")
assert state.state == "storage option 1"
assert state.attributes.get(ATTR_FRIENDLY_NAME) == "from storage"
assert state.attributes.get(ATTR_EDITABLE)
state = hass.states.get(f"{DOMAIN}.from_yaml")
assert state.state == "yaml option"
assert not state.attributes.get(ATTR_EDITABLE)
async def test_ws_list(hass, hass_ws_client, storage_setup):
"""Test listing via WS."""
assert await storage_setup(
config={DOMAIN: {"from_yaml": {"options": ["yaml option"]}}}
)
client = await hass_ws_client(hass)
await client.send_json({"id": 6, "type": f"{DOMAIN}/list"})
resp = await client.receive_json()
assert resp["success"]
storage_ent = "from_storage"
yaml_ent = "from_yaml"
result = {item["id"]: item for item in resp["result"]}
assert len(result) == 1
assert storage_ent in result
assert yaml_ent not in result
assert result[storage_ent][ATTR_NAME] == "from storage"
async def test_ws_delete(hass, hass_ws_client, storage_setup):
"""Test WS delete cleans up entity registry."""
assert await storage_setup()
input_id = "from_storage"
input_entity_id = f"{DOMAIN}.{input_id}"
ent_reg = er.async_get(hass)
state = hass.states.get(input_entity_id)
assert state is not None
assert ent_reg.async_get_entity_id(DOMAIN, DOMAIN, input_id) is not None
client = await hass_ws_client(hass)
await client.send_json(
{"id": 6, "type": f"{DOMAIN}/delete", f"{DOMAIN}_id": f"{input_id}"}
)
resp = await client.receive_json()
assert resp["success"]
state = hass.states.get(input_entity_id)
assert state is None
assert ent_reg.async_get_entity_id(DOMAIN, DOMAIN, input_id) is None
async def test_update(hass, hass_ws_client, storage_setup):
"""Test updating min/max updates the state."""
items = [
{
"id": "from_storage",
"name": "from storage",
"options": ["yaml update 1", "yaml update 2"],
}
]
assert await storage_setup(items)
input_id = "from_storage"
input_entity_id = f"{DOMAIN}.{input_id}"
ent_reg = er.async_get(hass)
state = hass.states.get(input_entity_id)
assert state.attributes[ATTR_OPTIONS] == ["yaml update 1", "yaml update 2"]
assert ent_reg.async_get_entity_id(DOMAIN, DOMAIN, input_id) is not None
client = await hass_ws_client(hass)
await client.send_json(
{
"id": 6,
"type": f"{DOMAIN}/update",
f"{DOMAIN}_id": f"{input_id}",
"options": ["new option", "newer option"],
CONF_INITIAL: "newer option",
}
)
resp = await client.receive_json()
assert resp["success"]
state = hass.states.get(input_entity_id)
assert state.attributes[ATTR_OPTIONS] == ["new option", "newer option"]
await client.send_json(
{
"id": 7,
"type": f"{DOMAIN}/update",
f"{DOMAIN}_id": f"{input_id}",
"options": ["new option", "no newer option"],
}
)
resp = await client.receive_json()
assert not resp["success"]
async def test_ws_create(hass, hass_ws_client, storage_setup):
"""Test create WS."""
assert await storage_setup(items=[])
input_id = "new_input"
input_entity_id = f"{DOMAIN}.{input_id}"
ent_reg = er.async_get(hass)
state = hass.states.get(input_entity_id)
assert state is None
assert ent_reg.async_get_entity_id(DOMAIN, DOMAIN, input_id) is None
client = await hass_ws_client(hass)
await client.send_json(
{
"id": 6,
"type": f"{DOMAIN}/create",
"name": "New Input",
"options": ["new option", "even newer option"],
"initial": "even newer option",
}
)
resp = await client.receive_json()
assert resp["success"]
state = hass.states.get(input_entity_id)
assert state.state == "even newer option"
async def test_setup_no_config(hass, hass_admin_user):
"""Test component setup with no config."""
count_start = len(hass.states.async_entity_ids())
assert await async_setup_component(hass, DOMAIN, {})
with patch(
"homeassistant.config.load_yaml_config_file", autospec=True, return_value={}
):
await hass.services.async_call(
DOMAIN,
SERVICE_RELOAD,
blocking=True,
context=Context(user_id=hass_admin_user.id),
)
await hass.async_block_till_done()
assert count_start == len(hass.states.async_entity_ids())
| 28.657312 | 87 | 0.606979 |
04c52ca28be1fe20d8d392a987289186b26ab507 | 1,397 | py | Python | mapping/hdl_graph_slam/scripts/map2odom_publisher.py | xiaoshitou4/GNSS-INS | 6ea16568d85eb1ed6b5cc49fb192dcba0e0f7491 | [
"Unlicense"
] | 3 | 2019-07-27T05:31:15.000Z | 2021-06-10T02:16:46.000Z | mapping/hdl_graph_slam/scripts/map2odom_publisher.py | yxw027/GNSS-INS | e5c5b7901b270a9c4d3a0ffd5555843d969f4018 | [
"Unlicense"
] | null | null | null | mapping/hdl_graph_slam/scripts/map2odom_publisher.py | yxw027/GNSS-INS | e5c5b7901b270a9c4d3a0ffd5555843d969f4018 | [
"Unlicense"
] | 3 | 2019-12-25T07:47:22.000Z | 2021-02-03T03:24:46.000Z | #!/usr/bin/python
import tf
import rospy
from geometry_msgs.msg import *
class Map2OdomPublisher:
def __init__(self):
self.broadcaster = tf.TransformBroadcaster()
self.subscriber = rospy.Subscriber('/hdl_graph_slam/odom2pub', TransformStamped, self.callback)
# self.subscriber = rospy.Subscriber('/hdl_graph_slam/odom2pub', TransformStamped, self.callback)
def callback(self, odom_msg):
self.odom_msg = odom_msg
def spin(self):
if not hasattr(self, 'odom_msg'):
self.broadcaster.sendTransform((0, 0, 0), (0, 0, 0, 1), rospy.Time.now(), 'odom', 'map')
print 'return ----------------'
return
pose = self.odom_msg.transform
pos = (pose.translation.x, pose.translation.y, pose.translation.z)
quat = (pose.rotation.x, pose.rotation.y, pose.rotation.z, pose.rotation.w)
map_frame_id = self.odom_msg.header.frame_id
odom_frame_id = self.odom_msg.child_frame_id
self.broadcaster.sendTransform(pos, quat, rospy.Time.now(), odom_frame_id, map_frame_id)
print 'ssssss'
# self.broadcaster.sendTransform(pos, quat, rospy.Time.now(), odom_frame_id, map_frame_id)
def main():
rospy.init_node('map2odom_publisher')
node = Map2OdomPublisher()
rate = rospy.Rate(50.0) # initially this is 100
while not rospy.is_shutdown():
node.spin()
rate.sleep()
if __name__ == '__main__':
try:
main()
except:
print 'shutdown'
| 28.510204 | 106 | 0.702219 |
0dba08f62db28c5709c2b056113d3e4f20dff3d7 | 2,342 | py | Python | shortcode/migrations/0021_auto_20200211_1607.py | marcolw/shortcode_backend | 888ffb4832ec8d0ee682780e6121e7d771a1d98b | [
"MIT"
] | null | null | null | shortcode/migrations/0021_auto_20200211_1607.py | marcolw/shortcode_backend | 888ffb4832ec8d0ee682780e6121e7d771a1d98b | [
"MIT"
] | null | null | null | shortcode/migrations/0021_auto_20200211_1607.py | marcolw/shortcode_backend | 888ffb4832ec8d0ee682780e6121e7d771a1d98b | [
"MIT"
] | null | null | null | # Generated by Django 3.0.2 on 2020-02-11 16:07
from django.conf import settings
import django.contrib.postgres.fields
import django.contrib.postgres.fields.jsonb
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('shortcode', '0020_auto_20200207_0414'),
]
operations = [
migrations.CreateModel(
name='ProductChangeLog',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('sku', models.CharField(db_index=True, max_length=31, verbose_name='SKU')),
('prev_data', django.contrib.postgres.fields.jsonb.JSONField(verbose_name='Previous Product Data')),
('new_data', django.contrib.postgres.fields.jsonb.JSONField(verbose_name='New Product Data')),
('fields', django.contrib.postgres.fields.ArrayField(base_field=models.CharField(max_length=63), blank=True, size=None)),
],
),
migrations.AddField(
model_name='eventlog',
name='event_state',
field=models.IntegerField(choices=[(0, 'Started'), (1, 'Completed')], default=0),
),
migrations.AlterField(
model_name='field',
name='field_type',
field=models.IntegerField(choices=[(0, 'Data'), (1, 'Shortcode'), (2, 'Result')], default=0),
),
migrations.AlterField(
model_name='usersetting',
name='size_shortcode',
field=models.IntegerField(default=11, verbose_name='Font Size for the ShortCode'),
),
migrations.DeleteModel(
name='UserLog',
),
migrations.AddField(
model_name='productchangelog',
name='event_log',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, to='shortcode.EventLog', verbose_name='EventLog'),
),
migrations.AddField(
model_name='productchangelog',
name='user',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, to=settings.AUTH_USER_MODEL, verbose_name='User'),
),
]
| 41.087719 | 143 | 0.626388 |
b7502c6781bfce75b06e665ffd661a8f536452cc | 7,677 | py | Python | models/model.py | mkmysk123456789/Informer2020 | ad4b895169a17db580aab6d2c09fd07e06c9b6fa | [
"Apache-2.0"
] | 1 | 2021-07-05T11:20:07.000Z | 2021-07-05T11:20:07.000Z | models/model.py | mkmysk123456789/Informer2020 | ad4b895169a17db580aab6d2c09fd07e06c9b6fa | [
"Apache-2.0"
] | null | null | null | models/model.py | mkmysk123456789/Informer2020 | ad4b895169a17db580aab6d2c09fd07e06c9b6fa | [
"Apache-2.0"
] | null | null | null | import torch
import torch.nn as nn
import torch.nn.functional as F
from utils.masking import TriangularCausalMask, ProbMask
from models.encoder import Encoder, EncoderLayer, ConvLayer, EncoderStack
from models.decoder import Decoder, DecoderLayer
from models.attn import FullAttention, ProbAttention, AttentionLayer
from models.embed import DataEmbedding
class Informer(nn.Module):
def __init__(self, enc_in, dec_in, c_out, seq_len, label_len, out_len,
factor=5, d_model=512, n_heads=8, e_layers=3, d_layers=2, d_ff=512,
dropout=0.0, attn='prob', embed='fixed', freq='h', activation='gelu',
output_attention=False, distil=True, mix=True,
device=torch.device('cuda:0')):
super(Informer, self).__init__()
self.pred_len = out_len # 予測する長さ
self.attn = attn
self.output_attention = output_attention
# Encoding
self.enc_embedding = DataEmbedding(
enc_in, d_model, embed, freq, dropout)
self.dec_embedding = DataEmbedding(
dec_in, d_model, embed, freq, dropout)
# Attention
Attn = ProbAttention if attn == 'prob' else FullAttention
# Encoder
self.encoder = Encoder(
[
EncoderLayer(
AttentionLayer(Attn(False, factor, attention_dropout=dropout, output_attention=output_attention),
d_model, n_heads, mix=False),
d_model,
d_ff,
dropout=dropout,
activation=activation
) for l in range(e_layers)
],
[
ConvLayer(
d_model
) for l in range(e_layers-1)
] if distil else None, # default false
norm_layer=torch.nn.LayerNorm(d_model) # これを変化??
)
# Decoder
self.decoder = Decoder(
[
DecoderLayer(
AttentionLayer(Attn(True, factor, attention_dropout=dropout, output_attention=False),
d_model, n_heads, mix=mix),
AttentionLayer(FullAttention(False, factor, attention_dropout=dropout, output_attention=False),
d_model, n_heads, mix=False),
d_model,
d_ff,
dropout=dropout,
activation=activation,
)
for l in range(d_layers)
],
norm_layer=torch.nn.LayerNorm(d_model)
)
# self.end_conv1 = nn.Conv1d(in_channels=label_len+out_len, out_channels=out_len, kernel_size=1, bias=True)
# self.end_conv2 = nn.Conv1d(in_channels=d_model, out_channels=c_out, kernel_size=1, bias=True)
self.projection = nn.Linear(d_model, c_out, bias=True)
# c_out = 7
def forward(self, x_enc, x_mark_enc, x_dec, x_mark_dec,
enc_self_mask=None, dec_self_mask=None, dec_enc_mask=None):
enc_out = self.enc_embedding(x_enc, x_mark_enc)
enc_out, attns = self.encoder(enc_out, attn_mask=enc_self_mask)
dec_out = self.dec_embedding(x_dec, x_mark_dec)
# デコーダの入力にエンコーダの出力を入れる
dec_out = self.decoder(
dec_out, enc_out, x_mask=dec_self_mask, cross_mask=dec_enc_mask)
dec_out = self.projection(dec_out)
# dec_out = self.end_conv1(dec_out)
# dec_out = self.end_conv2(dec_out.transpose(2,1)).transpose(1,2)
if self.output_attention:
return dec_out[:, -self.pred_len:, :], attns
else:
return dec_out[:, -self.pred_len:, :] # [B, L, D]
# エンコーダがたくさんあるということ??
class InformerStack(nn.Module):
def __init__(self, enc_in, dec_in, c_out, seq_len, label_len, out_len,
factor=5, d_model=512, n_heads=8, e_layers=[3, 2, 1], d_layers=2, d_ff=512,
dropout=0.0, attn='prob', embed='fixed', freq='h', activation='gelu',
output_attention=False, distil=True, mix=True,
device=torch.device('cuda:0')):
super(InformerStack, self).__init__()
self.pred_len = out_len
self.attn = attn
self.output_attention = output_attention
# Encoding
self.enc_embedding = DataEmbedding(
enc_in, d_model, embed, freq, dropout)
self.dec_embedding = DataEmbedding(
dec_in, d_model, embed, freq, dropout)
# Attention
Attn = ProbAttention if attn == 'prob' else FullAttention
# Encoder
# [0,1,2,...] you can customize here
inp_lens = list(range(len(e_layers)))
encoders = [
Encoder(
[
# attention + conv
EncoderLayer(
AttentionLayer(Attn(False, factor, attention_dropout=dropout, output_attention=output_attention),
d_model, n_heads, mix=False),
d_model,
d_ff,
dropout=dropout,
activation=activation
) for l in range(el)
],
[
ConvLayer(
d_model
) for l in range(el-1)
] if distil else None,
norm_layer=torch.nn.LayerNorm(d_model)
) for el in e_layers]
self.encoder = EncoderStack(encoders, inp_lens)
# Decoder
self.decoder = Decoder(
[
DecoderLayer(
AttentionLayer(Attn(True, factor, attention_dropout=dropout, output_attention=False),
d_model, n_heads, mix=mix),
AttentionLayer(FullAttention(False, factor, attention_dropout=dropout, output_attention=False),
d_model, n_heads, mix=False),
d_model,
d_ff,
dropout=dropout,
activation=activation,
)
for l in range(d_layers)
],
norm_layer=torch.nn.LayerNorm(d_model)
)
# self.end_conv1 = nn.Conv1d(in_channels=label_len+out_len, out_channels=out_len, kernel_size=1, bias=True)
# self.end_conv2 = nn.Conv1d(in_channels=d_model, out_channels=c_out, kernel_size=1, bias=True)
self.projection = nn.Linear(d_model, c_out, bias=True)
def forward(self, x_enc, x_mark_enc, x_dec, x_mark_dec,
enc_self_mask=None, dec_self_mask=None, dec_enc_mask=None):
# モデルに入力されるbacth_xの次元数を確認 forwardの中のprintは無視される
# print("Shape of x_enc on top model:{}".format(x_enc.shape))
enc_out = self.enc_embedding(x_enc, x_mark_enc) # mark?? 埋め込み表現にする
enc_out, attns = self.encoder(
enc_out, attn_mask=enc_self_mask) # エンコーダの計算
dec_out = self.dec_embedding(x_dec, x_mark_dec) # デコーダの埋め込み表現
dec_out = self.decoder(
dec_out, enc_out, x_mask=dec_self_mask, cross_mask=dec_enc_mask) # デコーダの計算
dec_out = self.projection(dec_out) # 線形正規化
# dec_out = self.end_conv1(dec_out)
# dec_out = self.end_conv2(dec_out.transpose(2,1)).transpose(1,2)
if self.output_attention:
return dec_out[:, -self.pred_len:, :], attns
else:
return dec_out[:, -self.pred_len:, :] # [B, L, D]
| 43.868571 | 122 | 0.553602 |
dceadc0a0af83decb0cdd64166c58e1bae5ca48e | 5,827 | py | Python | exercises/classifiers_evaluation.py | nirpet/IML.HUJI | 6f8c7719760df3e381115f01cd5c3cfc9951b59c | [
"MIT"
] | null | null | null | exercises/classifiers_evaluation.py | nirpet/IML.HUJI | 6f8c7719760df3e381115f01cd5c3cfc9951b59c | [
"MIT"
] | null | null | null | exercises/classifiers_evaluation.py | nirpet/IML.HUJI | 6f8c7719760df3e381115f01cd5c3cfc9951b59c | [
"MIT"
] | null | null | null | from typing import Tuple
import numpy
import plotly.graph_objects as go
from plotly.subplots import make_subplots
from IMLearn.learners.classifiers import LDA, GaussianNaiveBayes, Perceptron
from utils import *
pio.templates.default = "simple_white"
from math import atan2, pi
def load_dataset(filename: str) -> Tuple[np.ndarray, np.ndarray]:
"""
Load dataset for comparing the Gaussian Naive Bayes and LDA classifiers. File is assumed to be an
ndarray of shape (n_samples, 3) where the first 2 columns represent features and the third column the class
Parameters
----------
filename: str
Path to .npy data file
Returns
-------
X: ndarray of shape (n_samples, 2)
Design matrix to be used
y: ndarray of shape (n_samples,)
Class vector specifying for each sample its class
"""
data = np.load(filename)
return data[:, :2], data[:, 2].astype(int)
def run_perceptron():
"""
Fit and plot fit progression of the Perceptron algorithm over both the linearly separable and inseparable datasets
Create a line plot that shows the perceptron algorithm's training loss values (y-axis)
as a function of the training iterations (x-axis).
"""
for n, f in [("Linearly Separable", "linearly_separable.npy"),
("Linearly Inseparable", "linearly_inseparable.npy")]:
# Load dataset
dataset = np.load(file="../datasets/" + f)
y = dataset[:, -1]
X = dataset[:, :-1]
# Fit Perceptron and record loss in each fit iteration
losses = []
def perceptron_callback(per: Perceptron, sample: numpy.ndarray, response: int):
losses.append(perceptron.loss(X, y))
perceptron = Perceptron(callback=perceptron_callback)
perceptron.fit(X, y)
# Plot figure of loss as function of fitting iteration
fig = go.Figure([go.Scatter(x=np.arange(1, len(losses) + 1), y=losses, mode="markers+lines",
name="Loss per iteration")])
fig.update_layout(
title_text="Loss per iteration of dataset : " + f,
xaxis={"title": "iteration"},
yaxis={"title": "loss"})
fig.show()
def get_ellipse(mu: np.ndarray, cov: np.ndarray):
"""
Draw an ellipse centered at given location and according to specified covariance matrix
Parameters
----------
mu : ndarray of shape (2,)
Center of ellipse
cov: ndarray of shape (2,2)
Covariance of Gaussian
Returns
-------
scatter: A plotly trace object of the ellipse
"""
l1, l2 = tuple(np.linalg.eigvalsh(cov)[::-1])
theta = atan2(l1 - cov[0, 0], cov[0, 1]) if cov[0, 1] != 0 else (np.pi / 2 if cov[0, 0] < cov[1, 1] else 0)
t = np.linspace(0, 2 * pi, 100)
xs = (l1 * np.cos(theta) * np.cos(t)) - (l2 * np.sin(theta) * np.sin(t))
ys = (l1 * np.sin(theta) * np.cos(t)) + (l2 * np.cos(theta) * np.sin(t))
return go.Scatter(x=mu[0] + xs, y=mu[1] + ys, mode="lines", marker_color="black")
def compare_gaussian_classifiers():
"""
Fit both Gaussian Naive Bayes and LDA classifiers on both gaussians1 and gaussians2 datasets
"""
for f in ["gaussian1.npy", "gaussian2.npy"]:
# Load dataset
dataset = np.load(file="../datasets/" + f)
y = dataset[:, -1]
X = dataset[:, :-1]
lda = LDA()
lda.fit(X, y)
gnb = GaussianNaiveBayes()
gnb.fit(X, y)
lda_y_pred = lda.predict(X)
gnb_y_pred = gnb.predict(X)
lda_accuracy = np.mean(lda_y_pred == y)
gnb_accuracy = np.mean(gnb_y_pred == y)
# Plot a figure with two suplots, showing the Gaussian Naive Bayes predictions on the left and LDA predictions
# on the right. Plot title should specify dataset used and subplot titles should specify algorithm and accuracy
# Create subplots
fig = make_subplots(rows=1, cols=2,
subplot_titles=["Gaussian naive Bayes, Accuracy: " + str(gnb_accuracy)
, "Linear discriminant analysis, Accuracy: " + str(lda_accuracy)],
horizontal_spacing=0.01, vertical_spacing=.03)
fig.update_layout(title="Dataset: " + f, showlegend=False)
gnb_results = go.Scatter(x=X[:, 0], y=X[:, 1], mode="markers", showlegend=False,
marker=dict(color=gnb_y_pred, symbol=y, size=15))
gnb_data = [gnb_results]
for i in range(gnb.classes_.shape[0]):
cov_matrix = np.zeros(shape=(2, 2))
cov_matrix[0][0] = gnb.vars_[i][0]
cov_matrix[1][1] = gnb.vars_[i][1]
gnb_X = go.Scatter(x=[gnb.mu_[i][0]], y=[gnb.mu_[i][1]], mode="markers", showlegend=False,
marker=dict(color='black', symbol='x', size=30))
gnb_ellipse = get_ellipse(gnb.mu_[i], cov_matrix)
gnb_data.append(gnb_ellipse)
gnb_data.append(gnb_X)
fig.add_traces(gnb_data, rows=1, cols=1)
lda_results = go.Scatter(x=X[:, 0], y=X[:, 1], mode="markers", showlegend=False,
marker=dict(color=lda_y_pred, symbol=y, size=15))
lda_data = [lda_results]
for i in range(lda.classes_.shape[0]):
lda_ellipse = get_ellipse(lda.mu_[i], lda.cov_)
lda_X = go.Scatter(x=[lda.mu_[i][0]], y=[lda.mu_[i][1]], mode="markers", showlegend=False,
marker=dict(color='black', symbol='x', size=30))
lda_data.append(lda_ellipse)
lda_data.append(lda_X)
fig.add_traces(lda_data, rows=1, cols=2)
fig.show()
if __name__ == '__main__':
np.random.seed(0)
run_perceptron()
compare_gaussian_classifiers()
| 36.879747 | 119 | 0.597391 |
c3cf537b3a65b2e1fa26932608545ff6d0816568 | 3,363 | py | Python | django_blog/settings.py | DLance96/django-blog | d9d92a877804915a553899833e576f27186d2bc0 | [
"MIT"
] | null | null | null | django_blog/settings.py | DLance96/django-blog | d9d92a877804915a553899833e576f27186d2bc0 | [
"MIT"
] | 3 | 2016-02-13T21:14:41.000Z | 2016-03-02T20:28:59.000Z | django_blog/settings.py | DLance96/django-blog | d9d92a877804915a553899833e576f27186d2bc0 | [
"MIT"
] | null | null | null | """
Django settings for django_blog project.
Generated by 'django-admin startproject' using Django 1.9.2.
For more information on this file, see
https://docs.djangoproject.com/en/1.9/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.9/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.9/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
with open('/home/david/Documents/Projects/blogKey.txt') as f:
SECRET_KEY = f.read().strip()
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'blog'
]
MIDDLEWARE_CLASSES = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'django_blog.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'django_blog.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.9/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/1.9/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.9/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.9/howto/static-files/
STATIC_URL = '/static/'
STATIC_ROOT = os.path.join(BASE_DIR, 'static')
MEDIA_URL = "/media/"
MEDIA_ROOT = "/home/david/Documents/Projects/django-blog/blog/media/"
| 26.273438 | 91 | 0.700862 |
46b4731517588a716b1fa72e172955e1d627a728 | 9,480 | py | Python | bootstrap.py | linupi/silx | 51bc5cbc696880e7cf13feb3ff2476a5c32422d4 | [
"CC0-1.0",
"MIT"
] | 2 | 2020-03-09T15:50:17.000Z | 2020-03-09T15:50:23.000Z | bootstrap.py | linupi/silx | 51bc5cbc696880e7cf13feb3ff2476a5c32422d4 | [
"CC0-1.0",
"MIT"
] | 1 | 2020-03-12T13:11:59.000Z | 2020-03-12T13:53:55.000Z | bootstrap.py | JuliusHarald/silx | 3f9bcda88c074438fdb30cde29fec314d26f471c | [
"CC0-1.0",
"MIT"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Bootstrap helps you to test scripts without installing them
by patching your PYTHONPATH on the fly
example: ./bootstrap.py ipython
"""
__authors__ = ["Frédéric-Emmanuel Picca", "Jérôme Kieffer"]
__contact__ = "jerome.kieffer@esrf.eu"
__license__ = "MIT"
__date__ = "26/07/2018"
import argparse
import distutils.util
import logging
import os
import subprocess
import sys
import tempfile
logging.basicConfig()
logger = logging.getLogger("bootstrap")
def is_debug_python():
"""Returns true if the Python interpreter is in debug mode."""
try:
import sysconfig
except ImportError: # pragma nocover
# Python < 2.7
import distutils.sysconfig as sysconfig
if sysconfig.get_config_var("Py_DEBUG"):
return True
return hasattr(sys, "gettotalrefcount")
def _distutils_dir_name(dname="lib"):
"""
Returns the name of a distutils build directory
"""
platform = distutils.util.get_platform()
architecture = "%s.%s-%i.%i" % (dname, platform,
sys.version_info[0], sys.version_info[1])
if is_debug_python():
architecture += "-pydebug"
return architecture
def _distutils_scripts_name():
"""Return the name of the distrutils scripts sirectory"""
f = "scripts-{version[0]}.{version[1]}"
return f.format(version=sys.version_info)
def _get_available_scripts(path):
res = []
try:
res = " ".join([s.rstrip('.py') for s in os.listdir(path)])
except OSError:
res = ["no script available, did you ran "
"'python setup.py build' before bootstrapping ?"]
return res
if sys.version_info[0] >= 3: # Python3
def execfile(fullpath, globals=None, locals=None):
"Python3 implementation for execfile"
with open(fullpath) as f:
try:
data = f.read()
except UnicodeDecodeError:
raise SyntaxError("Not a Python script")
code = compile(data, fullpath, 'exec')
exec(code, globals, locals)
def run_file(filename, argv):
"""
Execute a script trying first to use execfile, then a subprocess
:param str filename: Script to execute
:param list[str] argv: Arguments passed to the filename
"""
full_args = [filename]
full_args.extend(argv)
try:
logger.info("Execute target using exec")
# execfile is considered as a local call.
# Providing globals() as locals will force to feed the file into
# globals() (for examples imports).
# Without this any function call from the executed file loses imports
try:
old_argv = sys.argv
sys.argv = full_args
logger.info("Patch the sys.argv: %s", sys.argv)
logger.info("Executing %s.main()", filename)
print("########### EXECFILE ###########")
module_globals = globals().copy()
module_globals['__file__'] = filename
execfile(filename, module_globals, module_globals)
finally:
sys.argv = old_argv
except SyntaxError as error:
logger.error(error)
logger.info("Execute target using subprocess")
env = os.environ.copy()
env.update({"PYTHONPATH": LIBPATH + os.pathsep + os.environ.get("PYTHONPATH", ""),
"PATH": os.environ.get("PATH", "")})
print("########### SUBPROCESS ###########")
run = subprocess.Popen(full_args, shell=False, env=env)
run.wait()
def run_entry_point(entry_point, argv):
"""
Execute an entry_point using the current python context
(http://setuptools.readthedocs.io/en/latest/setuptools.html#automatic-script-creation)
:param str entry_point: A string identifying a function from a module
(NAME = PACKAGE.MODULE:FUNCTION [EXTRA])
"""
import importlib
elements = entry_point.split("=")
target_name = elements[0].strip()
elements = elements[1].split(":")
module_name = elements[0].strip()
# Take care of entry_point optional "extra" requirements declaration
function_name = elements[1].split()[0].strip()
logger.info("Execute target %s (function %s from module %s) using importlib", target_name, function_name, module_name)
full_args = [target_name]
full_args.extend(argv)
try:
old_argv = sys.argv
sys.argv = full_args
print("########### IMPORTLIB ###########")
module = importlib.import_module(module_name)
if hasattr(module, function_name):
func = getattr(module, function_name)
func()
else:
logger.info("Function %s not found", function_name)
finally:
sys.argv = old_argv
def find_executable(target):
"""Find a filename from a script name.
- Check the script name as file path,
- Then checks if the name is a target of the setup.py
- Then search the script from the PATH environment variable.
:param str target: Name of the script
:returns: Returns a tuple: kind, name.
"""
if os.path.isfile(target):
return ("path", os.path.abspath(target))
# search the file from setup.py
import setup
config = setup.get_project_configuration(dry_run=True)
# scripts from project configuration
if "scripts" in config:
for script_name in config["scripts"]:
if os.path.basename(script) == target:
return ("path", os.path.abspath(script_name))
# entry-points from project configuration
if "entry_points" in config:
for kind in config["entry_points"]:
for entry_point in config["entry_points"][kind]:
elements = entry_point.split("=")
name = elements[0].strip()
if name == target:
return ("entry_point", entry_point)
# search the file from env PATH
for dirname in os.environ.get("PATH", "").split(os.pathsep):
path = os.path.join(dirname, target)
if os.path.isfile(path):
return ("path", path)
return None, None
def main(argv):
parser = argparse.ArgumentParser(
prog="bootstrap", usage="./bootstrap.py <script>", description=__doc__)
parser.add_argument("script", nargs=argparse.REMAINDER)
group = parser.add_mutually_exclusive_group()
group.add_argument(
"-m", nargs=argparse.REMAINDER, dest='module',
help="run library module as a script (terminates option list)")
group.add_argument(
"-j", "--jupyter", action='store_true',
help="Start jupyter notebook rather than IPython console")
options = parser.parse_args()
if options.jupyter:
if options.script:
logger.error("-j, --jupyter is mutually exclusive with other options")
parser.print_help()
return
logger.info("Start Jupyter notebook")
from notebook.notebookapp import main as notebook_main
os.environ["PYTHONPATH"] = LIBPATH + os.pathsep + os.environ.get("PYTHONPATH", "")
filename = os.path.join(LIBPATH, '.__bootstrap_pythonstartup.py')
with open(filename, 'w') as fp:
fp.write('import sys; sys.path.pop(0)')
os.environ["PYTHONSTARTUP"] = filename
notebook_main(argv=[])
try:
os.remove(filename)
except:
logger.error("Cannot delete temporary file: %s", filename)
elif options.script:
logger.info("Executing %s from source checkout", options.script)
script = options.script[0]
argv = options.script[1:]
kind, target = find_executable(script)
if kind == "path":
run_file(target, argv)
elif kind == "entry_point":
run_entry_point(target, argv)
else:
logger.error("Script %s not found", options.script)
elif options.module:
logging.info("Running module %s", options.module)
import runpy
module = options.module[0]
try:
old = sys.argv
sys.argv = [None] + options.module[1:]
runpy.run_module(module, run_name="__main__", alter_sys=True)
finally:
sys.argv = old
else:
logging.info("Running IPython by default")
logger.info("Patch the sys.argv: %s", sys.argv)
sys.path.insert(2, "")
try:
from IPython import start_ipython
except Exception as err:
logger.error("Unable to execute iPython, using normal Python")
logger.error(err)
import code
code.interact()
else:
start_ipython(argv=[])
if __name__ == "__main__":
home = os.path.dirname(os.path.abspath(__file__))
LIBPATH = os.path.join(home, 'build', _distutils_dir_name('lib'))
cwd = os.getcwd()
os.chdir(home)
build = subprocess.Popen([sys.executable, "setup.py", "build"], shell=False)
build_rc = build.wait()
if not os.path.exists(LIBPATH):
logger.warning("`lib` directory does not exist, trying common Python3 lib")
LIBPATH = os.path.join(os.path.split(LIBPATH)[0], "lib")
os.chdir(cwd)
if build_rc == 0:
logger.info("Build process ended.")
else:
logger.error("Build process ended with rc=%s", build_rc)
sys.exit(-1)
sys.path.insert(0, LIBPATH)
logger.info("Patched sys.path with %s", LIBPATH)
main(sys.argv)
| 32.689655 | 122 | 0.617722 |
d3ecc4d81194db16f95d80164759b3fd8ad6321c | 617 | py | Python | aoikconsulwatcher/src/aoikconsulwatcher/config.py | AoiKuiyuyou/AoikConsulWatcherHosts | 3b87bfa17738383828f7828c3ff11ffb882b9a00 | [
"MIT"
] | null | null | null | aoikconsulwatcher/src/aoikconsulwatcher/config.py | AoiKuiyuyou/AoikConsulWatcherHosts | 3b87bfa17738383828f7828c3ff11ffb882b9a00 | [
"MIT"
] | null | null | null | aoikconsulwatcher/src/aoikconsulwatcher/config.py | AoiKuiyuyou/AoikConsulWatcherHosts | 3b87bfa17738383828f7828c3ff11ffb882b9a00 | [
"MIT"
] | null | null | null | # coding: utf-8
from __future__ import absolute_import
import os
CONSUL_HOST = os.environ.get('CONSUL_HOST', '127.0.0.1')
CONSUL_PORT = int(os.environ.get('CONSUL_PORT', '8500'))
def handle_service_infos(service_infos):
for service_name, service_info in service_infos.items():
print('\nService name: {0}'.format(service_name))
tags = service_info['tags']
if tags:
print('Service tags: {0}'.format(' '.join(tags)))
for node in service_info['nodes']:
print('{0}:{1}'.format(
node['ServiceAddress'], node['ServicePort'])
)
| 24.68 | 61 | 0.619125 |
bed3ea087218a7308e9abedad86d646685777ed3 | 859 | py | Python | django_testing_tutorial_2018/django_testing_tutorial_2018/urls.py | bluebamus/django_pytest | 2a2e6b070e3f23616119f1661eee25722c770eb2 | [
"MIT"
] | null | null | null | django_testing_tutorial_2018/django_testing_tutorial_2018/urls.py | bluebamus/django_pytest | 2a2e6b070e3f23616119f1661eee25722c770eb2 | [
"MIT"
] | null | null | null | django_testing_tutorial_2018/django_testing_tutorial_2018/urls.py | bluebamus/django_pytest | 2a2e6b070e3f23616119f1661eee25722c770eb2 | [
"MIT"
] | null | null | null | """django_testing_tutorial_2018 URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/3.2/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path
from products import views
urlpatterns = [
path('<int:pk>',views.product_detail, name='detail'),
path('admin/', admin.site.urls),
]
| 35.791667 | 77 | 0.71362 |
b425ab7b3a297c1eb812e7052a43c9f8657a8367 | 2,161 | py | Python | openstack/network/v2/qos_policy.py | nicolasochem/openstacksdk | 34ea72ce5b0b7f16a038ca57b2a9f1ec2f90ce00 | [
"Apache-2.0"
] | null | null | null | openstack/network/v2/qos_policy.py | nicolasochem/openstacksdk | 34ea72ce5b0b7f16a038ca57b2a9f1ec2f90ce00 | [
"Apache-2.0"
] | null | null | null | openstack/network/v2/qos_policy.py | nicolasochem/openstacksdk | 34ea72ce5b0b7f16a038ca57b2a9f1ec2f90ce00 | [
"Apache-2.0"
] | null | null | null | # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from openstack.network import network_service
from openstack.network.v2 import tag
from openstack import resource
from openstack import utils
class QoSPolicy(resource.Resource, tag.TagMixin):
resource_key = 'policy'
resources_key = 'policies'
base_path = '/qos/policies'
service = network_service.NetworkService()
# capabilities
allow_create = True
allow_get = True
allow_update = True
allow_delete = True
allow_list = True
_query_mapping = resource.QueryParameters(
'name', 'description', 'is_default',
project_id='tenant_id',
is_shared='shared',
**tag.TagMixin._tag_query_parameters
)
# Properties
#: QoS policy name.
name = resource.Body('name')
#: The ID of the project who owns the network. Only administrative
#: users can specify a project ID other than their own.
project_id = resource.Body('tenant_id')
#: The QoS policy description.
description = resource.Body('description')
#: Indicates whether this QoS policy is the default policy for this
#: project.
#: *Type: bool*
is_default = resource.Body('is_default', type=bool)
#: Indicates whether this QoS policy is shared across all projects.
#: *Type: bool*
is_shared = resource.Body('shared', type=bool)
#: List of QoS rules applied to this QoS policy.
rules = resource.Body('rules')
def set_tags(self, session, tags):
url = utils.urljoin('/policies', self.id, 'tags')
session.put(url, json={'tags': tags})
self._body.attributes.update({'tags': tags})
return self
| 34.854839 | 75 | 0.6969 |
73536aeacee18857cb18322e56e76a4c7ecdce59 | 4,844 | py | Python | Archive/train_no_yaml_efficientnetb0.py | lkm2835/ELimNet | c6eb9c26727967531a570ebabdd9dbf65404ebd6 | [
"MIT"
] | 6 | 2021-12-03T04:09:24.000Z | 2021-12-11T12:18:18.000Z | Archive/train_no_yaml_efficientnetb0.py | lkm2835/ELimNet | c6eb9c26727967531a570ebabdd9dbf65404ebd6 | [
"MIT"
] | 12 | 2021-12-04T06:42:13.000Z | 2021-12-07T04:59:56.000Z | Archive/train_no_yaml_efficientnetb0.py | lkm2835/ELimNet | c6eb9c26727967531a570ebabdd9dbf65404ebd6 | [
"MIT"
] | 2 | 2021-12-03T11:09:42.000Z | 2021-12-08T17:54:50.000Z | """
Eliminating pretrained ResNet18's top layers
- Author: snoop2head, JoonHong-Kim, lkm2835
- Reference: Junghoon Kim, Jongsun Shin's baseline code provided at https://stages.ai/competitions/81/overview/requirements
"""
import argparse
import os
from datetime import datetime
from typing import Any, Dict, Tuple, Union
import torch
import torch.nn as nn
import torch.optim as optim
import yaml
from collections import OrderedDict
from src.dataloader import create_dataloader
from src.loss import CustomCriterion
from src.model import Model
from src.trainer import TorchTrainer
from src.utils.common import get_label_counts, read_yaml
from src.utils.torch_utils import check_runtime, model_info
from src.modules.mbconv import MBConvGenerator
from torch.utils.model_zoo import load_url as load_state_dict_from_url
from adamp import SGDP
from torchvision import models
import glob
class ElimResNet18(nn.Module):
def __init__(self):
super(ElimResNet18, self).__init__()
self.model = models.resnet18(pretrained=True)
del self.model.layer3
del self.model.layer4
# replace fully connected layers
self.num_in_features = 128
self.model.fc = nn.Linear(self.num_in_features, 6)
def _forward_impl(self, x):
x = self.model.conv1(x)
x = self.model.bn1(x)
x = self.model.relu(x)
x = self.model.maxpool(x)
x = self.model.layer1(x)
x = self.model.layer2(x)
# x = self.model.layer3(x)
# x = self.model.layer4(x)
x = self.model.avgpool(x)
x = torch.flatten(x, 1)
x = self.model.fc(x)
return x
def forward(self, x):
return self._forward_impl(x)
def train(
model_config: Dict[str, Any],
data_config: Dict[str, Any],
log_dir: str,
fp16: bool,
device: torch.device,
) -> Tuple[float, float, float]:
"""Train."""
# save model_config, data_config
with open(os.path.join(log_dir, "data.yml"), "w") as f:
yaml.dump(data_config, f, default_flow_style=False)
newmodel = ElimResNet18()
print("======changed model======")
print(model_info(newmodel))
# move model to device
print(device)
newmodel.to(device)
# Create dataloader
train_dl, val_dl, test_dl = create_dataloader(data_config)
model_path = os.path.join(log_dir, f"best_teacher.pt")
# Create optimizer, scheduler, criterion
optimizer = SGDP(newmodel.parameters(), lr=data_config["INIT_LR"], momentum=0.9)
scheduler = torch.optim.lr_scheduler.OneCycleLR(
optimizer=optimizer,
max_lr=data_config["INIT_LR"],
steps_per_epoch=len(train_dl),
epochs=data_config["EPOCHS"],
pct_start=0.05,
)
criterion = CustomCriterion(
samples_per_cls=get_label_counts(data_config["DATA_PATH"])
if data_config["DATASET"] == "TACO"
else None,
device=device,
)
# Amp loss scaler
scaler = torch.cuda.amp.GradScaler() if fp16 and device != torch.device("cpu") else None
# Create trainer
trainer = TorchTrainer(
model=newmodel,
criterion=criterion,
optimizer=optimizer,
scheduler=scheduler,
scaler=scaler,
device=device,
model_path=model_path,
verbose=1,
)
best_acc, best_f1 = trainer.train(
train_dataloader=train_dl,
n_epoch=data_config["EPOCHS"],
val_dataloader=val_dl if val_dl else test_dl,
)
# evaluate model with test set
# model_instance.model.load_state_dict(torch.load(model_path))
test_loss, test_f1, test_acc = trainer.test(
model=newmodel, test_dataloader=val_dl if val_dl else test_dl
)
return test_loss, test_f1, test_acc
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="Train model.")
parser.add_argument("--data", default="./data/taco.yaml", type=str, help="data config")
args = parser.parse_args()
data_config = read_yaml(cfg=args.data)
data_config["DATA_PATH"] = os.environ.get("SM_CHANNEL_TRAIN", data_config["DATA_PATH"])
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
log_dir = os.environ.get("SM_MODEL_DIR", os.path.join("teacher", "latest"))
if os.path.exists(log_dir):
# find *.pt file in log_dir
previous_model_path = glob.glob(os.path.join(log_dir, "*.pt"))[0]
modified = datetime.fromtimestamp(os.path.getmtime(previous_model_path))
new_log_dir = os.path.dirname(log_dir) + "/" + modified.strftime("%Y-%m-%d_%H-%M-%S")
os.rename(log_dir, new_log_dir)
os.makedirs(log_dir, exist_ok=True)
test_loss, test_f1, test_acc = train(
model_config=None,
data_config=data_config,
log_dir=log_dir,
fp16=data_config["FP16"],
device=device,
)
| 31.051282 | 123 | 0.672378 |
4f632e8d15228d7e4a3e09f5f2c44f323615056e | 4,533 | py | Python | venv/Lib/site-packages/astropy/utils/tests/test_misc.py | temelkirci/Motion_Editor | a8b8d4c4d2dcc9be28385600f56066cef92a38ad | [
"MIT"
] | 1 | 2022-03-02T17:07:20.000Z | 2022-03-02T17:07:20.000Z | venv/Lib/site-packages/astropy/utils/tests/test_misc.py | temelkirci/RealTime_6DOF_Motion_Editor | a8b8d4c4d2dcc9be28385600f56066cef92a38ad | [
"MIT"
] | null | null | null | venv/Lib/site-packages/astropy/utils/tests/test_misc.py | temelkirci/RealTime_6DOF_Motion_Editor | a8b8d4c4d2dcc9be28385600f56066cef92a38ad | [
"MIT"
] | null | null | null | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import json
import os
from datetime import datetime
import locale
import pytest
import numpy as np
from .. import data, misc
def test_isiterable():
assert misc.isiterable(2) is False
assert misc.isiterable([2]) is True
assert misc.isiterable([1, 2, 3]) is True
assert misc.isiterable(np.array(2)) is False
assert misc.isiterable(np.array([1, 2, 3])) is True
def test_signal_number_to_name_no_failure():
# Regression test for #5340: ensure signal_number_to_name throws no
# AttributeError (it used ".iteritems()" which was removed in Python3).
misc.signal_number_to_name(0)
@pytest.mark.remote_data
def test_api_lookup():
strurl = misc.find_api_page('astropy.utils.misc', 'dev', False, timeout=3)
objurl = misc.find_api_page(misc, 'dev', False, timeout=3)
assert strurl == objurl
assert strurl == 'http://devdocs.astropy.org/utils/index.html#module-astropy.utils.misc'
def test_skip_hidden():
path = data._find_pkg_data_path('data')
for root, dirs, files in os.walk(path):
assert '.hidden_file.txt' in files
assert 'local.dat' in files
# break after the first level since the data dir contains some other
# subdirectories that don't have these files
break
for root, dirs, files in misc.walk_skip_hidden(path):
assert '.hidden_file.txt' not in files
assert 'local.dat' in files
break
def test_JsonCustomEncoder():
from ... import units as u
assert json.dumps(np.arange(3), cls=misc.JsonCustomEncoder) == '[0, 1, 2]'
assert json.dumps(1+2j, cls=misc.JsonCustomEncoder) == '[1.0, 2.0]'
assert json.dumps(set([1, 2, 1]), cls=misc.JsonCustomEncoder) == '[1, 2]'
assert json.dumps(b'hello world \xc3\x85',
cls=misc.JsonCustomEncoder) == '"hello world \\u00c5"'
assert json.dumps({1: 2},
cls=misc.JsonCustomEncoder) == '{"1": 2}' # default
assert json.dumps({1: u.m}, cls=misc.JsonCustomEncoder) == '{"1": "m"}'
# Quantities
tmp = json.dumps({'a': 5*u.cm}, cls=misc.JsonCustomEncoder)
newd = json.loads(tmp)
tmpd = {"a": {"unit": "cm", "value": 5.0}}
assert newd == tmpd
tmp2 = json.dumps({'a': np.arange(2)*u.cm}, cls=misc.JsonCustomEncoder)
newd = json.loads(tmp2)
tmpd = {"a": {"unit": "cm", "value": [0., 1.]}}
assert newd == tmpd
tmp3 = json.dumps({'a': np.arange(2)*u.erg/u.s}, cls=misc.JsonCustomEncoder)
newd = json.loads(tmp3)
tmpd = {"a": {"unit": "erg / s", "value": [0., 1.]}}
assert newd == tmpd
def test_inherit_docstrings():
class Base(metaclass=misc.InheritDocstrings):
def __call__(self, *args):
"FOO"
pass
class Subclass(Base):
def __call__(self, *args):
pass
if Base.__call__.__doc__ is not None:
# TODO: Maybe if __doc__ is None this test should be skipped instead?
assert Subclass.__call__.__doc__ == "FOO"
def test_set_locale():
# First, test if the required locales are available
current = locale.setlocale(locale.LC_ALL)
try:
locale.setlocale(locale.LC_ALL, str('en_US'))
locale.setlocale(locale.LC_ALL, str('de_DE'))
except locale.Error as e:
pytest.skip('Locale error: {}'.format(e))
finally:
locale.setlocale(locale.LC_ALL, current)
date = datetime(2000, 10, 1, 0, 0, 0)
day_mon = date.strftime('%a, %b')
with misc.set_locale('en_US'):
assert date.strftime('%a, %b') == 'Sun, Oct'
with misc.set_locale('de_DE'):
assert date.strftime('%a, %b') == 'So, Okt'
# Back to original
assert date.strftime('%a, %b') == day_mon
with misc.set_locale(current):
assert date.strftime('%a, %b') == day_mon
def test_check_broadcast():
assert misc.check_broadcast((10, 1), (3,)) == (10, 3)
assert misc.check_broadcast((10, 1), (3,), (4, 1, 1, 3)) == (4, 1, 10, 3)
with pytest.raises(ValueError):
misc.check_broadcast((10, 2), (3,))
with pytest.raises(ValueError):
misc.check_broadcast((10, 1), (3,), (4, 1, 2, 3))
def test_dtype_bytes_or_chars():
assert misc.dtype_bytes_or_chars(np.dtype(np.float64)) == 8
assert misc.dtype_bytes_or_chars(np.dtype(object)) is None
assert misc.dtype_bytes_or_chars(np.dtype(np.int32)) == 4
assert misc.dtype_bytes_or_chars(np.array(b'12345').dtype) == 5
assert misc.dtype_bytes_or_chars(np.array(u'12345').dtype) == 5
| 33.577778 | 92 | 0.638209 |
fa7679eefe81928ab9637f7bd58d288796fceec3 | 382 | py | Python | nicos_sinq/boa/setups/table2.py | jkrueger1/nicos | 5f4ce66c312dedd78995f9d91e8a6e3c891b262b | [
"CC-BY-3.0",
"Apache-2.0",
"CC-BY-4.0"
] | null | null | null | nicos_sinq/boa/setups/table2.py | jkrueger1/nicos | 5f4ce66c312dedd78995f9d91e8a6e3c891b262b | [
"CC-BY-3.0",
"Apache-2.0",
"CC-BY-4.0"
] | null | null | null | nicos_sinq/boa/setups/table2.py | jkrueger1/nicos | 5f4ce66c312dedd78995f9d91e8a6e3c891b262b | [
"CC-BY-3.0",
"Apache-2.0",
"CC-BY-4.0"
] | null | null | null | description = 'BOA Table 2'
pvprefix = 'SQ:BOA:mcu2:'
devices = dict(
t2tx = device('nicos.devices.epics.EpicsReadable',
description = 'Table 2 x translation',
readpv = pvprefix + 'T2TX',
),
Table2 = device('nicos_sinq.boa.devices.boatable.BoaTable',
description = 'Table 2',
standard_devices = [
't2tx',
]
),
)
| 22.470588 | 63 | 0.575916 |
3333d8aa3b3e07a0dc9b7addc0b087fe0a2feb90 | 7,947 | py | Python | selvbetjening/core/members/migrations/0007_remove_user_communication.py | animekita/selvbetjening | fee63d178fbd5ce2976c04d3a4b2dde6d8691892 | [
"MIT"
] | null | null | null | selvbetjening/core/members/migrations/0007_remove_user_communication.py | animekita/selvbetjening | fee63d178fbd5ce2976c04d3a4b2dde6d8691892 | [
"MIT"
] | 3 | 2020-02-11T21:54:59.000Z | 2021-06-10T17:35:21.000Z | selvbetjening/core/members/migrations/0007_remove_user_communication.py | animekita/selvbetjening | fee63d178fbd5ce2976c04d3a4b2dde6d8691892 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Removing unique constraint on 'UserCommunication', fields ['method', 'user']
try:
db.delete_unique(u'members_usercommunication', ['method', 'user_id'])
except ValueError:
pass
# Deleting model 'UserCommunication'
db.delete_table(u'members_usercommunication')
def backwards(self, orm):
# Adding model 'UserCommunication'
db.create_table(u'members_usercommunication', (
('user', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['auth.User'], db_column='user_id')),
('identification', self.gf('django.db.models.fields.CharField')(max_length=255)),
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('method', self.gf('django.db.models.fields.CharField')(max_length=12)),
))
db.send_create_signal(u'members', ['UserCommunication'])
# Adding unique constraint on 'UserCommunication', fields ['method', 'user']
db.create_unique(u'members_usercommunication', ['method', 'user_id'])
models = {
u'auth.group': {
'Meta': {'object_name': 'Group'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
u'auth.permission': {
'Meta': {'ordering': "(u'content_type__app_label', u'content_type__model', u'codename')", 'unique_together': "((u'content_type', u'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
u'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
u'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
u'countries.country': {
'Meta': {'ordering': "('name',)", 'object_name': 'Country', 'db_table': "'country'"},
'iso': ('django.db.models.fields.CharField', [], {'max_length': '2', 'primary_key': 'True'}),
'iso3': ('django.db.models.fields.CharField', [], {'max_length': '3', 'null': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'numcode': ('django.db.models.fields.PositiveSmallIntegerField', [], {'null': 'True'}),
'printable_name': ('django.db.models.fields.CharField', [], {'max_length': '128'})
},
u'members.userlocation': {
'Meta': {'object_name': 'UserLocation'},
'expired': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'lat': ('django.db.models.fields.FloatField', [], {'default': 'None', 'null': 'True', 'blank': 'True'}),
'lng': ('django.db.models.fields.FloatField', [], {'default': 'None', 'null': 'True', 'blank': 'True'}),
'timestamp': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.OneToOneField', [], {'related_name': "'location'", 'unique': 'True', 'to': u"orm['auth.User']"})
},
u'members.userprofile': {
'Meta': {'object_name': 'UserProfile', '_ormbases': [u'auth.User']},
'city': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'country': ('django.db.models.fields.related.ForeignKey', [], {'default': "'DK'", 'to': u"orm['countries.Country']", 'null': 'True', 'blank': 'True'}),
'dateofbirth': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'jabber': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'msn': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'phonenumber': ('django.db.models.fields.CharField', [], {'max_length': '32', 'null': 'True', 'blank': 'True'}),
'picture': ('django.db.models.fields.files.ImageField', [], {'max_length': '100', 'blank': 'True'}),
'postalcode': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True', 'blank': 'True'}),
'send_me_email': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'sex': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '6', 'blank': 'True'}),
'skype': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'street': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
u'user_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': u"orm['auth.User']", 'unique': 'True', 'primary_key': 'True'})
},
u'members.userwebsite': {
'Meta': {'object_name': 'UserWebsite'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'url': ('django.db.models.fields.URLField', [], {'max_length': '200'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']", 'db_column': "'user_id'"})
}
}
complete_apps = ['members'] | 69.710526 | 187 | 0.568013 |
459ee3677aec429180413079964ab1100187bc34 | 2,189 | py | Python | python/tests/unit/io/test_dart_loader.py | lakshmipathyarjun6/dart | 0cb60d4c9ff99129b8a0dffb1747f68944b677f4 | [
"BSD-2-Clause"
] | 1 | 2020-04-06T05:24:54.000Z | 2020-04-06T05:24:54.000Z | python/tests/unit/io/test_dart_loader.py | lakshmipathyarjun6/dart | 0cb60d4c9ff99129b8a0dffb1747f68944b677f4 | [
"BSD-2-Clause"
] | null | null | null | python/tests/unit/io/test_dart_loader.py | lakshmipathyarjun6/dart | 0cb60d4c9ff99129b8a0dffb1747f68944b677f4 | [
"BSD-2-Clause"
] | null | null | null | import platform
import pytest
import dartpy
from dartpy.io import DartLoader
import os
from tests.util import get_asset_path
def test_parse_skeleton_non_existing_path_returns_null():
assert os.path.isfile(get_asset_path('skel/cubes.skel')) is True
loader = DartLoader()
assert loader.parseSkeleton(get_asset_path('skel/test/does_not_exist.urdf')) is None
def test_parse_skeleton_invalid_urdf_returns_null():
loader = DartLoader()
assert loader.parseSkeleton(get_asset_path('urdf/invalid.urdf')) is None
def test_parse_skeleton_missing_mesh_returns_null():
loader = DartLoader()
assert loader.parseSkeleton(get_asset_path('urdf/missing_mesh.urdf')) is None
def test_parse_skeleton_invalid_mesh_returns_null():
loader = DartLoader()
assert loader.parseSkeleton(get_asset_path('urdf/invalid_mesh.urdf')) is None
def test_parse_skeleton_missing_package_returns_null():
loader = DartLoader()
assert loader.parseSkeleton(get_asset_path('urdf/missing_package.urdf')) is None
def test_parse_skeleton_loads_primitive_geometry():
loader = DartLoader()
assert loader.parseSkeleton(get_asset_path('urdf/test/primitive_geometry.urdf')) is not None
# Failing with following errors:
# TypeError: No to_python (by-value) converter found for C++ type: std::shared_ptr<dart::simulation::World>
#
# def test_parse_world():
# loader = DartLoader()
# assert loader.parseWorld(get_asset_path('urdf/testWorld.urdf')) is not None
def test_parse_joint_properties():
loader = DartLoader()
robot = loader.parseSkeleton(get_asset_path('urdf/test/joint_properties.urdf'))
assert robot is not None
# joint1 = robot.getJoint(1)
# assert joint1 is not None
# assert joint1.getDampingCoefficient(0) == pytest.approx(1.2, 1e-12)
# assert joint1.getCoulombFriction(0) == pytest.approx(2.3, 1e-12)
# joint2 = robot.getJoint(2)
# assert joint2 is not None
# assert joint2.getPositionLowerLimit(0) == -float("inf")
# assert joint2.getPositionUpperLimit(0) == float("inf")
# if not platform.linux_distribution()[1] == '14.04':
# assert joint2.isCyclic(0)
if __name__ == "__main__":
pytest.main()
| 31.271429 | 107 | 0.751028 |
633516036af3a4c3e8ae99b1bb4a34061f655d7d | 959 | py | Python | Reacher-PyBullet/00_Random_Gym.py | hyunjun529/Learn-OpenAI-GYM | 51e1f3dc4cdfa7582690fc8338918aeb9671f4e3 | [
"MIT"
] | null | null | null | Reacher-PyBullet/00_Random_Gym.py | hyunjun529/Learn-OpenAI-GYM | 51e1f3dc4cdfa7582690fc8338918aeb9671f4e3 | [
"MIT"
] | null | null | null | Reacher-PyBullet/00_Random_Gym.py | hyunjun529/Learn-OpenAI-GYM | 51e1f3dc4cdfa7582690fc8338918aeb9671f4e3 | [
"MIT"
] | null | null | null | import gym
from gym import wrappers
env = gym.make('Reacher-v1')
env.reset()
env.render()
outdir = './log/'
f_act = open(outdir + 'log_act.txt', 'w')
f_obs = open(outdir + 'log_obs.txt', 'w')
f_rwd = open(outdir + 'log_rwd.txt', 'w')
f_info = open(outdir + 'log_info.txt', 'w')
env = wrappers.Monitor(env, directory=outdir, force=True)
for i_episode in range(101):
observation = env.reset()
for t in range(100):
env.render()
# action selection
action = env.action_space.sample()
# take the action and observe the reward and next state
observation, reward, done, info = env.step(action)
# print observation
f_act.write(str(action) + "\n")
f_obs.write(str(observation) + "\n")
f_rwd.write(str(reward) + "\n")
f_info.write(str(info) + "\n")
if done:
print("Episode finished after {} timesteps".format(t+1))
break
env.monitor.close()
| 25.236842 | 68 | 0.606882 |
97ff54cbd69145ec09dce5c4357346c1945bedb7 | 176 | py | Python | backend/apps/question/views/__init__.py | xingxingzaixian/python-django-online-exam | c504e121814061cbf7647d4d916f363230239f17 | [
"MIT"
] | 47 | 2021-09-23T00:52:49.000Z | 2022-03-27T12:57:28.000Z | backend/apps/question/views/__init__.py | xingxingzaixian/django-drf-online-exam | c504e121814061cbf7647d4d916f363230239f17 | [
"MIT"
] | null | null | null | backend/apps/question/views/__init__.py | xingxingzaixian/django-drf-online-exam | c504e121814061cbf7647d4d916f363230239f17 | [
"MIT"
] | 17 | 2021-09-02T12:12:21.000Z | 2022-03-27T10:28:27.000Z | from .category import CategoryViewset
from .level import LevelViewset
from .option import OptionViewset
from .question import QuestionViewset
from .type_ import TypeViewset | 35.2 | 38 | 0.840909 |
53b4ec7ae6b7c5b6c8556a265e0195d981f2259e | 550 | py | Python | ectypes/block_id.py | wenbobuaa/pykit | 43e38fe40297a1e7a9329bcf3db3554c7ca48ead | [
"MIT"
] | null | null | null | ectypes/block_id.py | wenbobuaa/pykit | 43e38fe40297a1e7a9329bcf3db3554c7ca48ead | [
"MIT"
] | null | null | null | ectypes/block_id.py | wenbobuaa/pykit | 43e38fe40297a1e7a9329bcf3db3554c7ca48ead | [
"MIT"
] | null | null | null | #!/usr/bin/env python2
# coding: utf-8
from .block_group_id import BlockGroupID
from .block_index import BlockIndex
from .idbase import IDBase
from .drive_id import DriveID
class BlockID(IDBase):
_attrs = (
('type', 0, 2, str),
('block_group_id', 2, 18, BlockGroupID),
('block_index', 18, 22, BlockIndex),
('drive_id', 22, 38, DriveID),
('block_id_seq', 38, 48, int),
)
_str_len = 48
_tostr_fmt = '{type}{block_group_id}{block_index}{drive_id}{block_id_seq:0>10}'
| 23.913043 | 83 | 0.612727 |
1bba933f6aa70f96c691d488e0a97e79ac27ec74 | 1,311 | py | Python | notification_django_app/users/tests/test_admin.py | rajat-np/notification-django-app | 37929b654cc8bceadb73b50897297ef49f1af7c4 | [
"MIT"
] | null | null | null | notification_django_app/users/tests/test_admin.py | rajat-np/notification-django-app | 37929b654cc8bceadb73b50897297ef49f1af7c4 | [
"MIT"
] | 1 | 2022-02-28T23:10:02.000Z | 2022-02-28T23:10:02.000Z | notification_django_app/users/tests/test_admin.py | rajat-np/notification-django-app | 37929b654cc8bceadb73b50897297ef49f1af7c4 | [
"MIT"
] | null | null | null | import pytest
from django.urls import reverse
from notification_django_app.users.models import User
pytestmark = pytest.mark.django_db
class TestUserAdmin:
def test_changelist(self, admin_client):
url = reverse("admin:users_user_changelist")
response = admin_client.get(url)
assert response.status_code == 200
def test_search(self, admin_client):
url = reverse("admin:users_user_changelist")
response = admin_client.get(url, data={"q": "test"})
assert response.status_code == 200
def test_add(self, admin_client):
url = reverse("admin:users_user_add")
response = admin_client.get(url)
assert response.status_code == 200
response = admin_client.post(
url,
data={
"username": "test",
"password1": "My_R@ndom-P@ssw0rd",
"password2": "My_R@ndom-P@ssw0rd",
},
)
assert response.status_code == 302
assert User.objects.filter(username="test").exists()
def test_view_user(self, admin_client):
user = User.objects.get(username="admin")
url = reverse("admin:users_user_change", kwargs={"object_id": user.pk})
response = admin_client.get(url)
assert response.status_code == 200
| 31.97561 | 79 | 0.631579 |
d37d8eefe11b41697c0e628bbf79015e77390bb0 | 2,299 | py | Python | Config.py | meuns/Sandbox | 8d5cde8a079d156b6dc5c3531d036b4ce278bbae | [
"BSD-3-Clause"
] | null | null | null | Config.py | meuns/Sandbox | 8d5cde8a079d156b6dc5c3531d036b4ce278bbae | [
"BSD-3-Clause"
] | null | null | null | Config.py | meuns/Sandbox | 8d5cde8a079d156b6dc5c3531d036b4ce278bbae | [
"BSD-3-Clause"
] | null | null | null | # coding: utf8
from random import random, seed
# Hell yeah !
seed(42)
# Compute shader settings
RAY_COUNT = 128 * 16
RAY_GROUP_SIZE = 128
RAY_DIR_COUNT = 128
RAY_DIR_GROUP_SIZE = 64
# Ray batch 0
RAY_INIT_DX = -1.0
RAY_INIT_DY = -1.0
RAY_INIT_JITTER_OX = 0.0
RAY0_DATA_OX = [-0.9 + (i / RAY_COUNT) * 1.8 + random() * RAY_INIT_JITTER_OX for i in range(RAY_COUNT)]
RAY0_DATA_OY = [+0.3] * RAY_COUNT
RAY0_DATA_DX = [RAY_INIT_DX] * RAY_COUNT
RAY0_DATA_DY = [RAY_INIT_DY] * RAY_COUNT
#RAY0_DATA_OX = RAY0_DATA_OX[100:101]
#RAY0_DATA_OY = RAY0_DATA_OY[100:101]
#RAY0_DATA_DX = RAY0_DATA_DX[100:101]
#RAY0_DATA_DY = RAY0_DATA_DY[100:101]
#RAY_COUNT = len(RAY0_DATA_OX)
#RAY_GROUP_SIZE = 1
# World 0
#WORLD_INT_LINE_COUNT = 256
#WORLD_INT_X_JITTER = 0.0
#WORLD_INT_Y_JITTER = 0.005
#
#INTX_POINT_COUNT = WORLD_INT_LINE_COUNT + 1
#INT_X = [WORLD_INT_X_JITTER * random() + -1.0 + (i / WORLD_INT_LINE_COUNT) * 2.0 for i in range(INTX_POINT_COUNT)]
#INT_Y = [WORLD_INT_Y_JITTER * random() + -0.2 for i in range(INTX_POINT_COUNT)]
#WORLD_LINE_COUNT = WORLD_INT_LINE_COUNT
# World 1
#WORLD_INT_LINE_COUNT = 128
#WORLD_INT_X_JITTER = 0.0
#WORLD_INT_Y_JITTER = -0.5
#
#INTX_POINT_COUNT = WORLD_INT_LINE_COUNT + 1
#INT0_X = [WORLD_INT_X_JITTER * random() + -1.0 + (i / WORLD_INT_LINE_COUNT) * 2.0 for i in range(INTX_POINT_COUNT)]
#INT0_Y = [WORLD_INT_Y_JITTER * random() + -0.2 for i in range(INTX_POINT_COUNT)]
#INT_X = [INT0_X]
#INT_Y = [INT0_Y]
#WORLD_LINE_COUNT = len(INT_X[0]) - 1
# World 2
WORLD_LINE_COUNT = 1024
WORLD_INT_X_JITTER = 0.0
WORLD_INT_Y_JITTER = 0.01
INTX_LINE_COUNT = WORLD_LINE_COUNT // 2
INTX_POINT_COUNT = INTX_LINE_COUNT + 1
INT0_X = [WORLD_INT_X_JITTER * random() - 2.0 + (i / INTX_LINE_COUNT) * 4.0 for i in range(INTX_POINT_COUNT)]
INT0_Y = [WORLD_INT_Y_JITTER * random() + 0.2 for i in range(INTX_POINT_COUNT)]
INT0_IOR_I = [1.0] * INTX_LINE_COUNT
INT0_IOR_T = [1.4] * INTX_LINE_COUNT
INT1_X = [WORLD_INT_X_JITTER * random() - 2.0 + (i / INTX_LINE_COUNT) * 4.0 for i in range(INTX_POINT_COUNT)]
INT1_Y = [WORLD_INT_Y_JITTER * random() - 0.2 for i in range(INTX_POINT_COUNT)]
INT1_IOR_I = [1.4] * INTX_LINE_COUNT
INT1_IOR_T = [1.0] * INTX_LINE_COUNT
INT_X = [INT0_X, INT1_X]
INT_Y = [INT0_Y, INT1_Y]
INT_IOR_I = [INT0_IOR_I, INT1_IOR_I]
INT_IOR_T = [INT0_IOR_T, INT1_IOR_T]
| 30.653333 | 116 | 0.735102 |
bbd28cae758fedf6d656a8a23dc8d34a55cb3461 | 470 | py | Python | python/15873_WithoutSpaceA+B.py | anothel/BOJ | cfc693322e609d319aaa8705d4375d098c034b76 | [
"MIT"
] | null | null | null | python/15873_WithoutSpaceA+B.py | anothel/BOJ | cfc693322e609d319aaa8705d4375d098c034b76 | [
"MIT"
] | null | null | null | python/15873_WithoutSpaceA+B.py | anothel/BOJ | cfc693322e609d319aaa8705d4375d098c034b76 | [
"MIT"
] | null | null | null | from sys import stdin, stdout
def main():
s = list(stdin.readline().strip())
if s[-2] == str(1) and s[-1] == str(0):
s.pop()
s.pop()
sum: int = 0
i: int = 0
while len(s) > 0:
sum += int(s.pop()) * (10**i)
i += 1
print(sum + 10)
else:
tmp: int = int(s.pop())
sum: int = 0
i: int = 0
while len(s) > 0:
sum += int(s.pop()) * (10**i)
i += 1
print(sum + tmp)
if __name__ == "__main__":
main()
| 17.407407 | 41 | 0.453191 |
883e2785c331ce6d84d767a0344e427c1aca3900 | 8,745 | py | Python | djangomom/base/mixin.py | emiamar/d | abfd0ca81224a1259fdfac92ed21ad771d901e18 | [
"BSD-3-Clause"
] | null | null | null | djangomom/base/mixin.py | emiamar/d | abfd0ca81224a1259fdfac92ed21ad771d901e18 | [
"BSD-3-Clause"
] | 2 | 2018-02-27T07:56:18.000Z | 2018-03-09T12:45:48.000Z | djangomom/base/mixin.py | emiamar/d | abfd0ca81224a1259fdfac92ed21ad771d901e18 | [
"BSD-3-Clause"
] | 2 | 2018-02-21T07:43:04.000Z | 2018-11-10T18:09:26.000Z | import datetime
from django.utils import timezone
from django.contrib import messages
from django.shortcuts import HttpResponseRedirect
from django.http import Http404
from django.core.exceptions import ImproperlyConfigured
from project.models import Project
from project.forms import ProjectForm
import logging
logger = logging.getLogger(__name__)
class GeneralContextMixin(object):
def get_context_data(self, **kwargs):
logger.debug('Hello just testing logs')
context = super(GeneralContextMixin, self).get_context_data(**kwargs)
context["username"] = self.request.user.username
context['projects'] = Project.objects.filter(
account__user=self.request.user)
context['project_form'] = ProjectForm()
return context
class CustomRedirectMixin(object):
app_url = None
page_url = None
def get_success_url(self):
if self.app_url is None and self.page_url is None:
raise ImproperlyConfigured(
"GenericModalCreateView requires either a definition of "
"'success_url'")
return "{0}{1}{2}".format(
self.app_url,
self.kwargs.get('pk'),
self.page_url)
class DeleteMixin(object):
model = None
app_url = None
page_url = None
object_name = None
def post(self, request, *args, **kwargs):
for_action = request.POST.getlist('for_action')
objects = self.model.objects.filter(pk__in=for_action)
if len(objects) == 0:
messages.warning(
request,
"Select atleast one %s" % self.object_name)
return HttpResponseRedirect(self.get_success_url())
else:
if 'delete' in request.POST:
for x in objects:
x.delete()
messages.warning(
request,
"Following %s are deleted %s" % (self.object_name, objects)
)
return HttpResponseRedirect(self.get_success_url())
def get_success_url(self):
if self.app_url is None and self.page_url is None:
raise ImproperlyConfigured(
"GenericModalCreateView requires either a definition of "
"'success_url'")
return "{0}{1}{2}".format(
self.app_url,
self.kwargs.get('pk'),
self.page_url)
class DeleteLastObjectMixin(object):
related_obj = None
object_name = None
app_url = None
page_url = None
model = None
def get(self, request, *args, **kwargs):
try:
obj = self.model.objects.get(pk=self.kwargs.get('pk'))
except:
raise Http404
objs = getattr(obj, self.related_obj)
if objs.last():
objs.last().delete()
messages.warning(
request,
"Deleted last %s" % (self.object_name)
)
return HttpResponseRedirect(self.get_success_url())
else:
messages.warning(
request,
"No roll"
)
return HttpResponseRedirect(self.get_success_url())
def get_success_url(self):
if self.app_url is None and self.page_url is None:
raise ImproperlyConfigured(
"GenericModalCreateView requires either a definition of "
"'success_url'")
return "{0}{1}{2}".format(
self.app_url,
self.kwargs.get('pk'),
self.page_url)
class DateQueryMixin(object):
def post(self, request, *args, **kwargs):
logger.debug("Its Post DateQuerying ")
form = DateRangeForm(request.POST)
if form.is_valid():
to_date = form.cleaned_data['to_date']
from_date = form.cleaned_data['from_date']
logger.info("Form is valid with {0}-{1}".format(from_date, to_date))
if to_date <= timezone.now().date() and from_date <= timezone.now().date():
if to_date == from_date:
to_date = datetime.datetime.strptime(request.POST["to_date"],"%m/%d/%Y" )
self.object_list = self.model.objects.filter(
created_at__year = to_date.year,
created_at__month = to_date.month,
created_at__day = to_date.day).order_by('-created_at')
else:
# some weired stuff need to add 24hr or day
date_range = [from_date,
to_date+datetime.timedelta(hours=24)]
self.object_list = self.model.objects.filter(created_at__range=date_range).order_by('-created_at')
else:
logger.debug("Improper Date Range")
messages.warning(request,
"Improper dates try other dates")
return self.get(self, request, *args, **kwargs)
else:
logger.debug("Form is invalid {0}".format(form.errors))
messages.warning(request, "Improper dates try other dates")
return self.get(self, request, *args, **kwargs)
context = self.get_context_data(
to_date=request.POST["to_date"],
from_date=request.POST["from_date"]
)
return self.render_to_response(context)
class DateQueryTemplateViewMixin(object):
"""
This mixin Querys 'date_query_model' with given and returns context
defined as 'query_context_object_name' """
date_query_model = None
query_context_object_name = None
def post(self, request, *args, **kwargs):
logger.debug("Its Post DateQuerying ")
form = DateRangeForm(request.POST)
if form.is_valid():
to_date = form.cleaned_data['to_date']
from_date = form.cleaned_data['from_date']
logger.info("Form is valid with {0}-{1}".format(from_date, to_date))
if to_date <= timezone.now().date() and from_date <= timezone.now().date():
if to_date == from_date:
to_date = datetime.datetime.strptime(request.POST["to_date"],"%m/%d/%Y" )
object_list = self.date_query_model.objects.filter(
created_at__year=to_date.year,
created_at__month=to_date.month,
created_at__day=to_date.day).order_by('-created_at')
else:
# some weired stuff need to add 24hr or day
date_range = [from_date,
to_date+datetime.timedelta(hours=24)]
object_list = self.date_query_model.objects.filter(created_at__range=date_range).order_by('-created_at')
else:
logger.debug("Improper Date Range")
messages.warning(request,
"Improper dates try other dates")
return self.get(self, request, *args, **kwargs)
else:
logger.debug("Form is invalid {0}".format(form.errors))
messages.warning(request, "Improper dates try other dates")
return self.get(self, request, *args, **kwargs)
filtered_list = self.get_more_filters(object_list)
query_context_object_name = self.query_context_object_name
context = self.get_context_data(
to_date=request.POST["to_date"],
from_date=request.POST["from_date"]
)
context[query_context_object_name] = filtered_list
return self.render_to_response(context)
def get_more_filters(self, object_list):
return object_list
class ForActionMixin(object):
model = None
app_url = None
page_url = None
object_name = None
def post(self, request, *args, **kwargs):
for_action = request.POST.getlist('for_action')
objects = self.model.objects.filter(pk__in=for_action)
if len(objects) == 0:
messages.warning(
request,
"Select atleast one %s" % self.object_name)
return HttpResponseRedirect(self.get_success_url())
else:
return self.do_action(objects)
def do_action(self, objects):
return HttpResponseRedirect(self.get_success_url())
def get_success_url(self):
if self.app_url is None and self.page_url is None:
raise ImproperlyConfigured(
"GenericModalCreateView requires either a definition of "
"'success_url'")
return "{0}{1}{2}".format(
self.app_url,
self.kwargs.get('pk'),
self.page_url)
| 36.743697 | 124 | 0.58239 |
1361b0965c84614f6e10bd225991a0d089ef1cea | 70 | py | Python | plugins/palo_alto_cortex_xdr/icon_palo_alto_cortex_xdr/actions/block_file/__init__.py | lukaszlaszuk/insightconnect-plugins | 8c6ce323bfbb12c55f8b5a9c08975d25eb9f8892 | [
"MIT"
] | 46 | 2019-06-05T20:47:58.000Z | 2022-03-29T10:18:01.000Z | plugins/palo_alto_cortex_xdr/icon_palo_alto_cortex_xdr/actions/block_file/__init__.py | lukaszlaszuk/insightconnect-plugins | 8c6ce323bfbb12c55f8b5a9c08975d25eb9f8892 | [
"MIT"
] | 386 | 2019-06-07T20:20:39.000Z | 2022-03-30T17:35:01.000Z | plugins/palo_alto_cortex_xdr/icon_palo_alto_cortex_xdr/actions/block_file/__init__.py | lukaszlaszuk/insightconnect-plugins | 8c6ce323bfbb12c55f8b5a9c08975d25eb9f8892 | [
"MIT"
] | 43 | 2019-07-09T14:13:58.000Z | 2022-03-28T12:04:46.000Z | # GENERATED BY KOMAND SDK - DO NOT EDIT
from .action import BlockFile
| 23.333333 | 39 | 0.771429 |
5fbf5e5710e923aa090b74f22046b882402ad815 | 639 | py | Python | dask_ml/preprocessing/__init__.py | souravsingh/dask-ml | 37eca7d335509c2a4aa9332aa454f57092318487 | [
"BSD-3-Clause"
] | null | null | null | dask_ml/preprocessing/__init__.py | souravsingh/dask-ml | 37eca7d335509c2a4aa9332aa454f57092318487 | [
"BSD-3-Clause"
] | null | null | null | dask_ml/preprocessing/__init__.py | souravsingh/dask-ml | 37eca7d335509c2a4aa9332aa454f57092318487 | [
"BSD-3-Clause"
] | null | null | null | """Utilties for Preprocessing data.
"""
from .._compat import SK_VERSION
from packaging.version import parse
from .data import (
StandardScaler,
MinMaxScaler,
RobustScaler,
QuantileTransformer,
Categorizer,
DummyEncoder,
OrdinalEncoder,
)
from .label import LabelEncoder
__all__ = [
"StandardScaler",
"MinMaxScaler",
"RobustScaler",
"QuantileTransformer",
"Categorizer",
"DummyEncoder",
"OrdinalEncoder",
"LabelEncoder",
]
if SK_VERSION >= parse("0.20.0.dev0"):
from ._encoders import OneHotEncoder # noqa
__all__.append("OneHotEncoder")
del SK_VERSION
del parse
| 17.27027 | 48 | 0.691706 |
49981cc604fc5ea32d1e30349cd852873df7e36b | 297 | py | Python | src/ransacflow/util.py | liuyenting/RANSAC-Flow | 2f348a4e31b6087ad2d9c0e5e35d5ad382f2a7b3 | [
"Apache-2.0"
] | 4 | 2021-11-14T09:47:40.000Z | 2021-12-21T21:10:31.000Z | src/ransacflow/util.py | liuyenting/RANSAC-Flow | 2f348a4e31b6087ad2d9c0e5e35d5ad382f2a7b3 | [
"Apache-2.0"
] | 6 | 2021-12-08T20:26:32.000Z | 2021-12-16T02:26:30.000Z | src/ransacflow/util.py | liuyenting/RANSAC-Flow | 2f348a4e31b6087ad2d9c0e5e35d5ad382f2a7b3 | [
"Apache-2.0"
] | null | null | null | from pathlib import Path
def get_project_root() -> Path:
# ransacflow src /
return Path(__file__).parent.parent.parent
def get_model_root() -> Path:
return get_project_root() / "models"
def get_data_root() -> Path:
return get_project_root() / "data"
| 18.5625 | 46 | 0.632997 |
0668e263c6a78f763f6e8c47e1102c09184c85e7 | 597 | py | Python | tests/test_lineset.py | bluetyson/omf | ec40877da5761b1102163346f7c507050e36c1b4 | [
"MIT"
] | null | null | null | tests/test_lineset.py | bluetyson/omf | ec40877da5761b1102163346f7c507050e36c1b4 | [
"MIT"
] | null | null | null | tests/test_lineset.py | bluetyson/omf | ec40877da5761b1102163346f7c507050e36c1b4 | [
"MIT"
] | 1 | 2021-07-08T01:44:08.000Z | 2021-07-08T01:44:08.000Z | """Tests for LineSet validation"""
import numpy as np
import pytest
import omf
def test_lineset():
"""Test lineset geometry validation"""
elem = omf.lineset.LineSetElement()
elem.vertices = np.random.rand(10, 3)
elem.segments = np.random.randint(9, size=[5, 2])
assert elem.validate()
assert elem.location_length('vertices') == 10
assert elem.location_length('segments') == 5
elem.segments.array[0, 0] = -1
with pytest.raises(ValueError):
elem.validate()
elem.segments.array[0, 0] = 10
with pytest.raises(ValueError):
elem.validate()
| 27.136364 | 53 | 0.666667 |
4f592acac203cdc72af25fc4d0c9ba545d8d6212 | 210 | py | Python | tests/test_packaging.py | moi90/torch_testing | 90bfba23f72bb2a8586c19ae255daf245ea47384 | [
"MIT"
] | 5 | 2018-11-26T08:57:26.000Z | 2018-12-05T09:07:45.000Z | tests/test_packaging.py | moi90/torch_testing | 90bfba23f72bb2a8586c19ae255daf245ea47384 | [
"MIT"
] | null | null | null | tests/test_packaging.py | moi90/torch_testing | 90bfba23f72bb2a8586c19ae255daf245ea47384 | [
"MIT"
] | 1 | 2020-12-16T14:29:54.000Z | 2020-12-16T14:29:54.000Z | import unittest
import torch_testing as tt
class TestPackaging(unittest.TestCase):
def test_name(self):
self.assertEqual(tt.name, 'torch_testing')
if __name__ == '__main__':
unittest.main()
| 17.5 | 50 | 0.719048 |
ba3774498bed14511ee0965b653be9722ff6ce03 | 3,333 | py | Python | lnbits/tasks.py | claytantor/lnbits-legend | 129409296a235c9b0cdf9d591ab165d6709fb46f | [
"MIT"
] | null | null | null | lnbits/tasks.py | claytantor/lnbits-legend | 129409296a235c9b0cdf9d591ab165d6709fb46f | [
"MIT"
] | null | null | null | lnbits/tasks.py | claytantor/lnbits-legend | 129409296a235c9b0cdf9d591ab165d6709fb46f | [
"MIT"
] | null | null | null | import time
import trio
import traceback
from http import HTTPStatus
from quart import current_app
from typing import List, Callable
from lnbits.settings import WALLET
from lnbits.core.crud import (
get_payments,
get_standalone_payment,
delete_expired_invoices,
get_balance_checks,
)
from lnbits.core.services import redeem_lnurl_withdraw
deferred_async: List[Callable] = []
def record_async(func: Callable) -> Callable:
def recorder(state):
deferred_async.append(func)
return recorder
def run_deferred_async():
for func in deferred_async:
current_app.nursery.start_soon(catch_everything_and_restart, func)
async def catch_everything_and_restart(func):
try:
await func()
except trio.Cancelled:
raise # because we must pass this up
except Exception as exc:
print("caught exception in background task:", exc)
print(traceback.format_exc())
print("will restart the task in 5 seconds.")
await trio.sleep(5)
await catch_everything_and_restart(func)
async def send_push_promise(a, b) -> None:
pass
invoice_listeners: List[trio.MemorySendChannel] = []
def register_invoice_listener(send_chan: trio.MemorySendChannel):
"""
A method intended for extensions to call when they want to be notified about
new invoice payments incoming.
"""
invoice_listeners.append(send_chan)
async def webhook_handler():
handler = getattr(WALLET, "webhook_listener", None)
if handler:
return await handler()
return "", HTTPStatus.NO_CONTENT
internal_invoice_paid, internal_invoice_received = trio.open_memory_channel(0)
async def internal_invoice_listener():
async for checking_id in internal_invoice_received:
current_app.nursery.start_soon(invoice_callback_dispatcher, checking_id)
async def invoice_listener():
async for checking_id in WALLET.paid_invoices_stream():
#print("> got a payment notification", checking_id)
current_app.nursery.start_soon(invoice_callback_dispatcher, checking_id)
async def check_pending_payments():
await delete_expired_invoices()
outgoing = True
incoming = True
while True:
for payment in await get_payments(
since=(int(time.time()) - 60 * 60 * 24 * 15), # 15 days ago
complete=False,
pending=True,
outgoing=outgoing,
incoming=incoming,
exclude_uncheckable=True,
):
print(f"check_pending_payments - checking payment {payment.checking_id}")
await payment.check_pending()
# after the first check we will only check outgoing, not incoming
# that will be handled by the global invoice listeners, hopefully
incoming = False
await trio.sleep(60 * 30) # every 30 minutes
async def perform_balance_checks():
while True:
for bc in await get_balance_checks():
redeem_lnurl_withdraw(bc.wallet, bc.url)
await trio.sleep(60 * 60 * 6) # every 6 hours
async def invoice_callback_dispatcher(checking_id: str):
payment = await get_standalone_payment(checking_id)
if payment and payment.is_in:
await payment.set_pending(False)
for send_chan in invoice_listeners:
await send_chan.send(payment)
| 27.545455 | 85 | 0.70297 |
0194571469512822fdac11b5f807242d0bda8e5a | 3,333 | py | Python | kafka/settings.py | raybesiga/kafka-on-the-shore | 3d1d152630b53855a4fc1daf92c9036de36ab2a9 | [
"MIT"
] | null | null | null | kafka/settings.py | raybesiga/kafka-on-the-shore | 3d1d152630b53855a4fc1daf92c9036de36ab2a9 | [
"MIT"
] | null | null | null | kafka/settings.py | raybesiga/kafka-on-the-shore | 3d1d152630b53855a4fc1daf92c9036de36ab2a9 | [
"MIT"
] | null | null | null | """
Django settings for kafka project.
Generated by 'django-admin startproject' using Django 2.1.1.
For more information on this file, see
https://docs.djangoproject.com/en/2.1/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.1/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.1/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'rfg40uk&p32nm6+m!&x+%l(^n63ru_+jxz(-omxt3b17)l+&bq'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
# kafka specific
'webpack_loader'
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'kafka.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR, "templates"), ],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'kafka.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.1/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/2.1/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.1/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.1/howto/static-files/
STATIC_URL = '/static/'
WEBPACK_LOADER = {
'DEFAULT': {
'BUNDLE_DIR_NAME': 'bundles/',
'STATS_FILE': os.path.join(BASE_DIR, 'webpack-stats.dev.json'),
}
}
| 25.442748 | 91 | 0.686469 |
8c208b929ae53c605a749563f0cef9369e19376e | 2,630 | py | Python | examples/multi_clients/notif/src/notif/__init__.py | mardiros/pyramid-blacksmith | ff338776fc8802c9f46475fba619aa1f9413aba0 | [
"BSD-3-Clause"
] | null | null | null | examples/multi_clients/notif/src/notif/__init__.py | mardiros/pyramid-blacksmith | ff338776fc8802c9f46475fba619aa1f9413aba0 | [
"BSD-3-Clause"
] | 5 | 2022-01-09T18:54:14.000Z | 2022-01-23T22:22:23.000Z | examples/multi_clients/notif/src/notif/__init__.py | mardiros/pyramid-blacksmith | ff338776fc8802c9f46475fba619aa1f9413aba0 | [
"BSD-3-Clause"
] | null | null | null | import email as emaillib
import smtplib
from textwrap import dedent
from blacksmith.sd._sync.adapters.consul import SyncConsulDiscovery
from notif.resources.user import User
from pyramid.config import Configurator
smtp_sd = SyncConsulDiscovery()
def send_email(user: User, message: str):
email_content = dedent(
f"""\
Subject: notification
From: notification@localhost
To: "{user.firstname} {user.lastname}" <{user.email}>
{message}
"""
)
msg = emaillib.message_from_string(email_content)
srv = smtp_sd.resolve("smtp", None)
# XXX Synchronous socket here, OK for the example
# real code should use aiosmtplib
s = smtplib.SMTP(srv.address, int(srv.port))
s.send_message(msg)
s.quit()
def post_notif_using_static(request):
if request.method == "GET":
return {"detail": "Use POST to test the static driver"}
body = request.json
api_user = request.blacksmith.client_static("api_user")
user: User = (api_user.users.get({"username": body["username"]})).response
send_email(user, body["message"])
return {"detail": f"{user.email} accepted"}
def post_notif_using_consul(request):
if request.method == "GET":
return {"detail": "Use POST to test the consul driver"}
body = request.json
api_user = request.blacksmith.client_consul("api_user")
user: User = (api_user.users.get({"username": body["username"]})).response
send_email(user, body["message"])
return {"detail": f"{user.email} accepted"}
def post_notif_using_router(request):
if request.method == "GET":
return {"detail": "Use POST to test the router driver"}
body = request.json
api_user = request.blacksmith.client_router("api_user")
user: User = (api_user.users.get({"username": body["username"]})).response
send_email(user, body["message"])
return {"detail": f"{user.email} accepted"}
def main(global_config, **settings):
"""Build the pyramid WSGI App."""
with Configurator(settings=settings) as config:
config.add_route("notify_v1", "/v1/notification")
config.add_view(
post_notif_using_consul, route_name="notify_v1", renderer="json"
)
config.add_route("notify_v2", "/v2/notification")
config.add_view(
post_notif_using_consul, route_name="notify_v2", renderer="json"
)
config.add_route("notify_v3", "/v3/notification")
config.add_view(
post_notif_using_router, route_name="notify_v3", renderer="json"
)
app = config.make_wsgi_app()
return app
| 30.581395 | 78 | 0.66616 |
c33a944c108ac9f7a233a9e57f71284983415696 | 1,397 | py | Python | hipchat/komand_hipchat/actions/post/schema.py | xhennessy-r7/insightconnect-plugins | 59268051313d67735b5dd3a30222eccb92aca8e9 | [
"MIT"
] | null | null | null | hipchat/komand_hipchat/actions/post/schema.py | xhennessy-r7/insightconnect-plugins | 59268051313d67735b5dd3a30222eccb92aca8e9 | [
"MIT"
] | null | null | null | hipchat/komand_hipchat/actions/post/schema.py | xhennessy-r7/insightconnect-plugins | 59268051313d67735b5dd3a30222eccb92aca8e9 | [
"MIT"
] | null | null | null | # GENERATED BY KOMAND SDK - DO NOT EDIT
import komand
import json
class Input:
MESSAGE = "message"
ROOM_ID_OR_NAME = "room_id_or_name"
class Output:
ID = "id"
TIMESTAMP = "timestamp"
class PostInput(komand.Input):
schema = json.loads("""
{
"type": "object",
"title": "Variables",
"properties": {
"message": {
"type": "string",
"title": "Message",
"description": "The message post to room. Valid length range: 1 - 1000",
"order": 2
},
"room_id_or_name": {
"type": "string",
"title": "Room Id or Name",
"description": "The id or url encoded name of the room",
"order": 1
}
},
"required": [
"room_id_or_name",
"message"
]
}
""")
def __init__(self):
super(self.__class__, self).__init__(self.schema)
class PostOutput(komand.Output):
schema = json.loads("""
{
"type": "object",
"title": "Variables",
"properties": {
"id": {
"type": "string",
"title": "Message Id",
"description": "The unique identifier of the sent message",
"order": 1
},
"timestamp": {
"type": "string",
"title": "Timestamp",
"description": "The utc timestamp representing when the message was processed",
"order": 2
}
}
}
""")
def __init__(self):
super(self.__class__, self).__init__(self.schema)
| 19.957143 | 85 | 0.561918 |
a125f6d19642517c322a092bbd6eda31c3542f50 | 221 | py | Python | napari_nucleaizer/misc.py | etasnadi/napari_nucleaizer | 6bfe4b9a6cfc82963f4a2a45fa9be1a70482b04b | [
"BSD-3-Clause"
] | null | null | null | napari_nucleaizer/misc.py | etasnadi/napari_nucleaizer | 6bfe4b9a6cfc82963f4a2a45fa9be1a70482b04b | [
"BSD-3-Clause"
] | 2 | 2022-02-14T22:37:12.000Z | 2022-03-17T12:33:26.000Z | napari_nucleaizer/misc.py | etasnadi/napari_nucleaizer | 6bfe4b9a6cfc82963f4a2a45fa9be1a70482b04b | [
"BSD-3-Clause"
] | null | null | null | import json
def json_load(file):
data = None
with open(file) as fp:
data = json.load(fp)
return data
def json_save(file, data):
with open(file, 'w') as fp:
json.dump(data, fp, indent=4)
| 17 | 37 | 0.597285 |
dd067c35bbcf76e6479e1608f824cec50fbfb08a | 8,048 | py | Python | python/tvm/contrib/xcode.py | jiangzoi/incubator-tvm | 144c6f45f7217b9df2f5605e06f0903e470ac11c | [
"Apache-2.0"
] | 9 | 2019-12-17T08:03:54.000Z | 2022-01-19T02:34:23.000Z | python/tvm/contrib/xcode.py | jiangzoi/incubator-tvm | 144c6f45f7217b9df2f5605e06f0903e470ac11c | [
"Apache-2.0"
] | 2 | 2020-09-14T09:18:25.000Z | 2020-09-24T03:28:18.000Z | python/tvm/contrib/xcode.py | jiangzoi/incubator-tvm | 144c6f45f7217b9df2f5605e06f0903e470ac11c | [
"Apache-2.0"
] | 3 | 2020-10-04T20:30:18.000Z | 2022-01-24T18:03:52.000Z | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name
"""Utility to invoke Xcode compiler toolchain"""
from __future__ import absolute_import as _abs
import os
import sys
import subprocess
import json
from .._ffi.base import py_str
from . import util
def xcrun(cmd):
"""Run xcrun and return the output.
Parameters
----------
cmd : list of str
The command sequence.
Returns
-------
out : str
The output string.
"""
cmd = ["xcrun"] + cmd
proc = subprocess.Popen(cmd,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT)
(out, _) = proc.communicate()
return out.strip()
def codesign(lib):
"""Codesign the shared libary
This is an required step for library to be loaded in
the app.
Parameters
----------
lib : The path to the library.
"""
if "TVM_IOS_CODESIGN" not in os.environ:
raise RuntimeError("Require environment variable TVM_IOS_CODESIGN "
" to be the signature")
signature = os.environ["TVM_IOS_CODESIGN"]
cmd = ["codesign", "--force", "--sign", signature]
cmd += [lib]
proc = subprocess.Popen(cmd,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT)
(out, _) = proc.communicate()
if proc.returncode != 0:
msg = "Codesign error:\n"
msg += py_str(out)
raise RuntimeError(msg)
def create_dylib(output, objects, arch, sdk="macosx"):
"""Create dynamic library.
Parameters
----------
output : str
The target shared library.
objects : list
List of object files.
options : str
The additional options.
arch : str
Target major architectures
sdk : str
The sdk to be used.
"""
clang = xcrun(["-sdk", sdk, "-find", "clang"])
sdk_path = xcrun(["-sdk", sdk, "--show-sdk-path"])
cmd = [clang]
cmd += ["-dynamiclib"]
cmd += ["-arch", arch]
cmd += ["-isysroot", sdk_path]
cmd += ["-o", output]
if isinstance(objects, str):
cmd += [objects]
else:
cmd += objects
proc = subprocess.Popen(
cmd, stdout=subprocess.PIPE,
stderr=subprocess.STDOUT)
(out, _) = proc.communicate()
if proc.returncode != 0:
msg = "Compilation error:\n"
msg += py_str(out)
raise RuntimeError(msg)
# assign so as default output format
create_dylib.output_format = "dylib"
def compile_metal(code, path_target=None, sdk="macosx"):
"""Compile metal with CLI tool from env.
Parameters
----------
code : str
The cuda code.
path_target : str, optional
Output file.
sdk : str, optional
The target platform SDK.
Return
------
metallib : bytearray
The bytearray of the metallib
"""
temp = util.tempdir()
temp_code = temp.relpath("my_lib.metal")
temp_ir = temp.relpath("my_lib.air")
temp_target = temp.relpath("my_lib.metallib")
with open(temp_code, "w") as out_file:
out_file.write(code)
file_target = path_target if path_target else temp_target
# See:
# - https://developer.apple.com/documentation/metal/gpu_functions_libraries/building_a_library_with_metal_s_command-line_tools#overview # pylint: disable=line-too-long
#
# xcrun -sdk macosx metal -c MyLibrary.metal -o MyLibrary.air
# xcrun -sdk macosx metallib MyLibrary.air -o MyLibrary.metallib
cmd1 = ["xcrun", "-sdk", sdk, "metal", "-O3"]
cmd1 += ["-c", temp_code, "-o", temp_ir]
cmd2 = ["xcrun", "-sdk", sdk, "metallib"]
cmd2 += [temp_ir, "-o", file_target]
proc = subprocess.Popen(
' '.join(cmd1) + ";" + ' '.join(cmd2),
shell=True,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT)
(out, _) = proc.communicate()
if proc.returncode != 0:
sys.stderr.write("Compilation error:\n")
sys.stderr.write(py_str(out))
sys.stderr.flush()
libbin = None
else:
libbin = bytearray(open(file_target, "rb").read())
return libbin
def compile_coreml(model, model_name="main", out_dir="."):
"""Compile coreml model and return the compiled model path.
"""
mlmodel_path = os.path.join(out_dir, model_name + ".mlmodel")
mlmodelc_path = os.path.join(out_dir, model_name + ".mlmodelc")
metadata = {
"inputs": list(model.input_description),
"outputs": list(model.output_description)
}
# Use the description field to send info to CoreML runtime
model.short_description = json.dumps(metadata)
model.save(mlmodel_path)
res = xcrun(["coremlcompiler", "compile", mlmodel_path, out_dir])
if not os.path.isdir(mlmodelc_path):
raise RuntimeError("Compile failed: %s" % res)
return mlmodelc_path
class XCodeRPCServer(object):
"""Wrapper for RPC server
Parameters
----------
cmd : list of str
The command to run
lock: FileLock
Lock on the path
"""
def __init__(self, cmd, lock):
self.proc = subprocess.Popen(cmd)
self.lock = lock
def join(self):
"""Wait server to finish and release its resource
"""
self.proc.wait()
self.lock.release()
def popen_test_rpc(host,
port,
key,
destination,
libs=None,
options=None):
"""Launch rpc server via xcodebuild test through another process.
Parameters
----------
host : str
The address of RPC proxy host.
port : int
The port of RPC proxy host
key : str
The key of the RPC server
destination : str
Destination device of deployment, as in xcodebuild
libs : list of str
List of files to be packed into app/Frameworks/tvm
These can be dylibs that can be loaed remoted by RPC.
options : list of str
Additional options to xcodebuild
Returns
-------
proc : Popen
The test rpc server process.
Don't do wait() on proc, since it can terminate normally.
"""
if "TVM_IOS_RPC_ROOT" in os.environ:
rpc_root = os.environ["TVM_IOS_RPC_ROOT"]
else:
curr_path = os.path.dirname(os.path.realpath(os.path.expanduser(__file__)))
rpc_root = os.path.join(curr_path, "../../../apps/ios_rpc")
proj_path = os.path.realpath(os.path.join(rpc_root, "tvmrpc.xcodeproj"))
if not os.path.exists(proj_path):
raise RuntimeError("Cannot find tvmrpc.xcodeproj in %s," +
(" please set env TVM_IOS_RPC_ROOT correctly" % rpc_root))
# Lock the path so only one file can run
lock = util.filelock(os.path.join(rpc_root, "ios_rpc.lock"))
with open(os.path.join(rpc_root, "rpc_config.txt"), "w") as fo:
fo.write("%s %d %s\n" % (host, port, key))
libs = libs if libs else []
for file_name in libs:
fo.write("%s\n" % file_name)
cmd = ["xcrun", "xcodebuild",
"-scheme", "tvmrpc",
"-project", proj_path,
"-destination", destination]
if options:
cmd += options
cmd += ["test"]
return XCodeRPCServer(cmd, lock)
| 28.845878 | 171 | 0.610462 |
5fa458c6cca79fca9656ad539d5fe42d02dd5e34 | 5,656 | py | Python | detection/models/vae.py | kai-wen-yang/CD-VAE | a33b5070d5d936396d51c8c2e7dedd62351ee5b2 | [
"MIT"
] | 23 | 2021-12-10T02:09:49.000Z | 2022-03-24T11:46:58.000Z | detection/models/vae.py | kai-wen-yang/CD-VAE | a33b5070d5d936396d51c8c2e7dedd62351ee5b2 | [
"MIT"
] | 6 | 2021-12-20T07:27:31.000Z | 2022-03-30T07:22:26.000Z | detection/models/vae.py | kai-wen-yang/CD-VAE | a33b5070d5d936396d51c8c2e7dedd62351ee5b2 | [
"MIT"
] | 3 | 2021-12-20T13:38:50.000Z | 2022-02-20T20:58:45.000Z | from __future__ import print_function
import abc
import os
import math
import numpy as np
import logging
import torch
import torch.utils.data
from torch import nn
from torch.nn import init
from torch.nn import functional as F
from torch.autograd import Variable
class AbstractAutoEncoder(nn.Module):
__metaclass__ = abc.ABCMeta
@abc.abstractmethod
def encode(self, x):
return
@abc.abstractmethod
def decode(self, z):
return
@abc.abstractmethod
def forward(self, x):
"""model return (reconstructed_x, *)"""
return
@abc.abstractmethod
def sample(self, size):
"""sample new images from model"""
return
@abc.abstractmethod
def loss_function(self, **kwargs):
"""accepts (original images, *) where * is the same as returned from forward()"""
return
@abc.abstractmethod
def latest_losses(self):
"""returns the latest losses in a dictionary. Useful for logging."""
return
class ResBlock(nn.Module):
def __init__(self, in_channels, out_channels, mid_channels=None, bn=False):
super(ResBlock, self).__init__()
if mid_channels is None:
mid_channels = out_channels
layers = [
nn.LeakyReLU(),
nn.Conv2d(in_channels, mid_channels, kernel_size=3, stride=1, padding=1),
nn.LeakyReLU(),
nn.Conv2d(mid_channels, out_channels, kernel_size=1, stride=1, padding=0)]
if bn:
layers.insert(2, nn.BatchNorm2d(out_channels))
self.convs = nn.Sequential(*layers)
def forward(self, x):
return x + self.convs(x)
class CVAE(AbstractAutoEncoder):
def __init__(self, d, z, **kwargs):
super(CVAE, self).__init__()
self.encoder = nn.Sequential(
nn.Conv2d(3, d // 2, kernel_size=4, stride=2, padding=1, bias=False),
nn.BatchNorm2d(d // 2),
nn.ReLU(inplace=True),
nn.Conv2d(d // 2, d, kernel_size=4, stride=2, padding=1, bias=False),
nn.BatchNorm2d(d),
nn.ReLU(inplace=True),
ResBlock(d, d, bn=True),
nn.BatchNorm2d(d),
ResBlock(d, d, bn=True),
)
self.decoder = nn.Sequential(
ResBlock(d, d, bn=True),
nn.BatchNorm2d(d),
ResBlock(d, d, bn=True),
nn.BatchNorm2d(d),
nn.ConvTranspose2d(d, d // 2, kernel_size=4, stride=2, padding=1, bias=False),
nn.BatchNorm2d(d // 2),
nn.LeakyReLU(inplace=True),
nn.ConvTranspose2d(d // 2, 3, kernel_size=4, stride=2, padding=1, bias=False),
)
self.xi_bn = nn.BatchNorm2d(3)
self.f = 8
self.d = d
self.z = z
self.fc11 = nn.Linear(d * self.f ** 2, self.z)
self.fc12 = nn.Linear(d * self.f ** 2, self.z)
self.fc21 = nn.Linear(self.z, d * self.f ** 2)
def encode(self, x):
h = self.encoder(x)
h1 = h.view(-1, self.d * self.f ** 2)
return h, self.fc11(h1), self.fc12(h1)
def reparameterize(self, mu, logvar):
if self.training:
std = logvar.mul(0.5).exp_()
eps = std.new(std.size()).normal_()
return eps.mul(std).add_(mu)
else:
return mu
def decode(self, z):
z = z.view(-1, self.d, self.f, self.f)
h3 = self.decoder(z)
return torch.tanh(h3)
def forward(self, x):
_, mu, logvar = self.encode(x)
hi = self.reparameterize(mu, logvar)
hi_projected = self.fc21(hi)
xi = self.decode(hi_projected)
xi = self.xi_bn(xi)
return xi
class CVAE_imagenet(nn.Module):
def __init__(self, d, k=10, num_channels=3, **kwargs):
super(CVAE_imagenet, self).__init__()
self.encoder = nn.Sequential(
nn.Conv2d(num_channels, d, kernel_size=4, stride=2, padding=1),
nn.BatchNorm2d(d),
nn.LeakyReLU(inplace=True),
nn.Conv2d(d, d, kernel_size=4, stride=2, padding=1),
nn.BatchNorm2d(d),
nn.LeakyReLU(inplace=True),
ResBlock(d, d),
nn.BatchNorm2d(d),
ResBlock(d, d),
nn.BatchNorm2d(d),
)
self.decoder = nn.Sequential(
ResBlock(d, d),
nn.BatchNorm2d(d),
ResBlock(d, d),
nn.ConvTranspose2d(d, d, kernel_size=4, stride=2, padding=1),
nn.BatchNorm2d(d),
nn.LeakyReLU(inplace=True),
nn.ConvTranspose2d(d, num_channels, kernel_size=4, stride=2, padding=1),
)
self.d = d
self.emb = NearestEmbed(k, d)
for l in self.modules():
if isinstance(l, nn.Linear) or isinstance(l, nn.Conv2d):
l.weight.detach().normal_(0, 0.02)
torch.fmod(l.weight, 0.04)
nn.init.constant_(l.bias, 0)
self.encoder[-1].weight.detach().fill_(1 / 40)
self.emb.weight.detach().normal_(0, 0.02)
torch.fmod(self.emb.weight, 0.04)
self.L_bn = nn.BatchNorm2d(num_channels)
def encode(self, x):
return self.encoder(x)
def decode(self, x):
return torch.tanh(self.decoder(x))
def forward(self, x):
z_e = self.encode(x)
z_q, _ = self.emb(z_e, weight_sg=True)
emb, _ = self.emb(z_e.detach())
l = self.decode(z_q)
xi = self.L_bn(l)
return xi | 30.408602 | 91 | 0.545262 |
f45f03b9d8ee6289f616627926fac53e452adbb0 | 2,067 | py | Python | tests/test_tasks_project.py | brilliantorg/elm-doc | 69ddbcd57aee3da6283c2497d735951d95b85426 | [
"BSD-3-Clause"
] | 29 | 2017-02-01T11:58:44.000Z | 2021-05-21T15:18:33.000Z | tests/test_tasks_project.py | brilliantorg/elm-doc | 69ddbcd57aee3da6283c2497d735951d95b85426 | [
"BSD-3-Clause"
] | 143 | 2017-07-26T17:34:44.000Z | 2022-03-01T18:01:43.000Z | tests/test_tasks_project.py | brilliantorg/elm-doc | 69ddbcd57aee3da6283c2497d735951d95b85426 | [
"BSD-3-Clause"
] | 7 | 2018-03-09T10:04:45.000Z | 2021-10-19T19:17:40.000Z | from pathlib import Path
from elm_doc import elm_project
from elm_doc.tasks import project as project_tasks
def test_sync_source_files_create_new_file(
tmpdir, elm_version, make_elm_project):
project_dir = make_elm_project(
elm_version,
tmpdir,
sources={
'src': [
'Main.elm',
],
},
copy_elm_stuff=False,
)
project = elm_project.from_path(Path(str(project_dir)))
target_dir = Path(str(project_dir / 'tmp'))
target_dir.mkdir()
project_tasks.actions.SyncSources(project, target_dir).execute()
assert (target_dir / 'Main.elm').exists()
def test_sync_source_files_update_file(
tmpdir, elm_version, make_elm_project):
project_dir = make_elm_project(
elm_version,
tmpdir,
sources={
'src': [
'Main.elm',
],
},
copy_elm_stuff=False,
)
project = elm_project.from_path(Path(str(project_dir)))
target_dir = Path(str(project_dir / 'tmp'))
target_dir.mkdir()
project_tasks.actions.SyncSources(project, target_dir).execute()
main_elm = project_dir.join('src', 'Main.elm')
main_elm.write('updated for testing')
project_tasks.actions.SyncSources(project, target_dir).execute()
assert (target_dir / 'Main.elm').read_text() == 'updated for testing'
def test_sync_source_files_delete_file(
tmpdir, elm_version, make_elm_project):
project_dir = make_elm_project(
elm_version,
tmpdir,
sources={
'src': [
'Main.elm',
],
},
copy_elm_stuff=False,
)
project = elm_project.from_path(Path(str(project_dir)))
target_dir = Path(str(project_dir / 'tmp'))
target_dir.mkdir()
project_tasks.actions.SyncSources(project, target_dir).execute()
main_elm = project_dir.join('src', 'Main.elm')
main_elm.remove()
project_tasks.actions.SyncSources(project, target_dir).execute()
assert not (target_dir / 'Main.elm').exists()
| 28.708333 | 73 | 0.636188 |
6153c35a33dee65649d185d49f11dc4463731047 | 1,054 | py | Python | anomalyframework/tests/test_shuffle.py | alliedel/anomalyframework_python | 63c56d9fb2e1dc37dfca494805e7fa179e078623 | [
"MIT"
] | 15 | 2018-12-28T07:32:56.000Z | 2022-02-20T00:17:34.000Z | anomalyframework/tests/test_shuffle.py | alliedel/anomalyframework_python | 63c56d9fb2e1dc37dfca494805e7fa179e078623 | [
"MIT"
] | 1 | 2018-04-06T18:14:12.000Z | 2018-07-11T11:18:34.000Z | anomalyframework/tests/test_shuffle.py | alliedel/anomalyframework_python | 63c56d9fb2e1dc37dfca494805e7fa179e078623 | [
"MIT"
] | 5 | 2018-02-07T19:08:09.000Z | 2019-07-01T23:17:23.000Z | import numpy as np
import unittest
from anomalyframework import shuffle
class TestBlockShuffle(unittest.TestCase):
def block_shuffle_test(self):
y = np.array([1, 1, 1, 2, 2, 2, 3, 3, 4, 5, 6, 1, 1])
block_size = 2
np.random.seed(0)
shuffled_indices, indices_to_blocks = shuffle.block_shuffle(y, block_size, one_based=True)
self.assertListEqual(shuffled_indices, [9, 10, 6, 7, 8, 0, 1, 2, 11, 12, 3, 4, 5])
self.assertEqual(indices_to_blocks.tolist(), [[1, 3, 5],
[2, 4, 6]])
y = np.array([1, 1, 1, 2, 2, 2, 3, 3, 4, 5, 6, 1, 1])
block_size = 2
np.random.seed(1)
shuffled_indices, indices_to_blocks = shuffle.block_shuffle(y, block_size, one_based=False)
self.assertListEqual(shuffled_indices, [10, 8, 9, 0, 1, 2, 11, 12, 3, 4, 5, 6, 7])
np.testing.assert_array_equal(indices_to_blocks, np.array([
[0, 2, 4, 6], [1, 3, 5, np.nan]]))
if __name__ == '__main__':
unittest.main()
| 34 | 100 | 0.57685 |
d827c94fed0e8c78f647d12d779c502b16e06a27 | 701 | py | Python | LeetCode/python/tree/recover-binary-search-tree.py | Leoyuseu/Code | 34edfbbfb7875b3ed06de393c192c1f13a5074f4 | [
"BSD-Source-Code"
] | null | null | null | LeetCode/python/tree/recover-binary-search-tree.py | Leoyuseu/Code | 34edfbbfb7875b3ed06de393c192c1f13a5074f4 | [
"BSD-Source-Code"
] | null | null | null | LeetCode/python/tree/recover-binary-search-tree.py | Leoyuseu/Code | 34edfbbfb7875b3ed06de393c192c1f13a5074f4 | [
"BSD-Source-Code"
] | null | null | null | # Definition for a binary tree node.
# class TreeNode(object):
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
class Solution(object):
def inOrder(self,root):
if root is None:
return
self.inOrder(root.left)
if self.prev and self.prev.val > root.val:
self.t2 = root
if self.t1 is None:
self.t1 = self.prev
self.prev = root;
self.inOrder(root.right)
def recoverTree(self, root):
self.prev = None
self.t1 = None
self.t2 = None
self.inOrder(root)
self.t1.val, self.t2.val = self.t2.val,self.t1.val
| 21.90625 | 58 | 0.536377 |
b4149d3ef007e807c3dbc3a00a513d0a0d4cceda | 51,828 | py | Python | ServidorPython/python32_web/Lib/test/test_smtplib.py | mak213k/Servidor_automatizado_python | 4403ef8027a2f814220baacc95856cf5fbf01d21 | [
"MIT"
] | 1,738 | 2017-09-21T10:59:12.000Z | 2022-03-31T21:05:46.000Z | ServidorPython/python32_web/Lib/test/test_smtplib.py | mak213k/Servidor_automatizado_python | 4403ef8027a2f814220baacc95856cf5fbf01d21 | [
"MIT"
] | 427 | 2017-09-29T22:54:36.000Z | 2022-02-15T19:26:50.000Z | ServidorPython/python32_web/Lib/test/test_smtplib.py | mak213k/Servidor_automatizado_python | 4403ef8027a2f814220baacc95856cf5fbf01d21 | [
"MIT"
] | 671 | 2017-09-21T08:04:01.000Z | 2022-03-29T14:30:07.000Z | import asyncore
import base64
import email.mime.text
from email.message import EmailMessage
from email.base64mime import body_encode as encode_base64
import email.utils
import hmac
import socket
import smtpd
import smtplib
import io
import re
import sys
import time
import select
import errno
import textwrap
import threading
import unittest
from test import support, mock_socket
from test.support import HOST, HOSTv4, HOSTv6
from unittest.mock import Mock
if sys.platform == 'darwin':
# select.poll returns a select.POLLHUP at the end of the tests
# on darwin, so just ignore it
def handle_expt(self):
pass
smtpd.SMTPChannel.handle_expt = handle_expt
def server(evt, buf, serv):
serv.listen()
evt.set()
try:
conn, addr = serv.accept()
except socket.timeout:
pass
else:
n = 500
while buf and n > 0:
r, w, e = select.select([], [conn], [])
if w:
sent = conn.send(buf)
buf = buf[sent:]
n -= 1
conn.close()
finally:
serv.close()
evt.set()
class GeneralTests(unittest.TestCase):
def setUp(self):
smtplib.socket = mock_socket
self.port = 25
def tearDown(self):
smtplib.socket = socket
# This method is no longer used but is retained for backward compatibility,
# so test to make sure it still works.
def testQuoteData(self):
teststr = "abc\n.jkl\rfoo\r\n..blue"
expected = "abc\r\n..jkl\r\nfoo\r\n...blue"
self.assertEqual(expected, smtplib.quotedata(teststr))
def testBasic1(self):
mock_socket.reply_with(b"220 Hola mundo")
# connects
smtp = smtplib.SMTP(HOST, self.port)
smtp.close()
def testSourceAddress(self):
mock_socket.reply_with(b"220 Hola mundo")
# connects
smtp = smtplib.SMTP(HOST, self.port,
source_address=('127.0.0.1',19876))
self.assertEqual(smtp.source_address, ('127.0.0.1', 19876))
smtp.close()
def testBasic2(self):
mock_socket.reply_with(b"220 Hola mundo")
# connects, include port in host name
smtp = smtplib.SMTP("%s:%s" % (HOST, self.port))
smtp.close()
def testLocalHostName(self):
mock_socket.reply_with(b"220 Hola mundo")
# check that supplied local_hostname is used
smtp = smtplib.SMTP(HOST, self.port, local_hostname="testhost")
self.assertEqual(smtp.local_hostname, "testhost")
smtp.close()
def testTimeoutDefault(self):
mock_socket.reply_with(b"220 Hola mundo")
self.assertIsNone(mock_socket.getdefaulttimeout())
mock_socket.setdefaulttimeout(30)
self.assertEqual(mock_socket.getdefaulttimeout(), 30)
try:
smtp = smtplib.SMTP(HOST, self.port)
finally:
mock_socket.setdefaulttimeout(None)
self.assertEqual(smtp.sock.gettimeout(), 30)
smtp.close()
def testTimeoutNone(self):
mock_socket.reply_with(b"220 Hola mundo")
self.assertIsNone(socket.getdefaulttimeout())
socket.setdefaulttimeout(30)
try:
smtp = smtplib.SMTP(HOST, self.port, timeout=None)
finally:
socket.setdefaulttimeout(None)
self.assertIsNone(smtp.sock.gettimeout())
smtp.close()
def testTimeoutValue(self):
mock_socket.reply_with(b"220 Hola mundo")
smtp = smtplib.SMTP(HOST, self.port, timeout=30)
self.assertEqual(smtp.sock.gettimeout(), 30)
smtp.close()
def test_debuglevel(self):
mock_socket.reply_with(b"220 Hello world")
smtp = smtplib.SMTP()
smtp.set_debuglevel(1)
with support.captured_stderr() as stderr:
smtp.connect(HOST, self.port)
smtp.close()
expected = re.compile(r"^connect:", re.MULTILINE)
self.assertRegex(stderr.getvalue(), expected)
def test_debuglevel_2(self):
mock_socket.reply_with(b"220 Hello world")
smtp = smtplib.SMTP()
smtp.set_debuglevel(2)
with support.captured_stderr() as stderr:
smtp.connect(HOST, self.port)
smtp.close()
expected = re.compile(r"^\d{2}:\d{2}:\d{2}\.\d{6} connect: ",
re.MULTILINE)
self.assertRegex(stderr.getvalue(), expected)
# Test server thread using the specified SMTP server class
def debugging_server(serv, serv_evt, client_evt):
serv_evt.set()
try:
if hasattr(select, 'poll'):
poll_fun = asyncore.poll2
else:
poll_fun = asyncore.poll
n = 1000
while asyncore.socket_map and n > 0:
poll_fun(0.01, asyncore.socket_map)
# when the client conversation is finished, it will
# set client_evt, and it's then ok to kill the server
if client_evt.is_set():
serv.close()
break
n -= 1
except socket.timeout:
pass
finally:
if not client_evt.is_set():
# allow some time for the client to read the result
time.sleep(0.5)
serv.close()
asyncore.close_all()
serv_evt.set()
MSG_BEGIN = '---------- MESSAGE FOLLOWS ----------\n'
MSG_END = '------------ END MESSAGE ------------\n'
# NOTE: Some SMTP objects in the tests below are created with a non-default
# local_hostname argument to the constructor, since (on some systems) the FQDN
# lookup caused by the default local_hostname sometimes takes so long that the
# test server times out, causing the test to fail.
# Test behavior of smtpd.DebuggingServer
class DebuggingServerTests(unittest.TestCase):
maxDiff = None
def setUp(self):
self.real_getfqdn = socket.getfqdn
socket.getfqdn = mock_socket.getfqdn
# temporarily replace sys.stdout to capture DebuggingServer output
self.old_stdout = sys.stdout
self.output = io.StringIO()
sys.stdout = self.output
self.serv_evt = threading.Event()
self.client_evt = threading.Event()
# Capture SMTPChannel debug output
self.old_DEBUGSTREAM = smtpd.DEBUGSTREAM
smtpd.DEBUGSTREAM = io.StringIO()
# Pick a random unused port by passing 0 for the port number
self.serv = smtpd.DebuggingServer((HOST, 0), ('nowhere', -1),
decode_data=True)
# Keep a note of what server host and port were assigned
self.host, self.port = self.serv.socket.getsockname()[:2]
serv_args = (self.serv, self.serv_evt, self.client_evt)
self.thread = threading.Thread(target=debugging_server, args=serv_args)
self.thread.start()
# wait until server thread has assigned a port number
self.serv_evt.wait()
self.serv_evt.clear()
def tearDown(self):
socket.getfqdn = self.real_getfqdn
# indicate that the client is finished
self.client_evt.set()
# wait for the server thread to terminate
self.serv_evt.wait()
self.thread.join()
# restore sys.stdout
sys.stdout = self.old_stdout
# restore DEBUGSTREAM
smtpd.DEBUGSTREAM.close()
smtpd.DEBUGSTREAM = self.old_DEBUGSTREAM
def get_output_without_xpeer(self):
test_output = self.output.getvalue()
return re.sub(r'(.*?)^X-Peer:\s*\S+\n(.*)', r'\1\2',
test_output, flags=re.MULTILINE|re.DOTALL)
def testBasic(self):
# connect
smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost', timeout=3)
smtp.quit()
def testSourceAddress(self):
# connect
src_port = support.find_unused_port()
try:
smtp = smtplib.SMTP(self.host, self.port, local_hostname='localhost',
timeout=3, source_address=(self.host, src_port))
self.assertEqual(smtp.source_address, (self.host, src_port))
self.assertEqual(smtp.local_hostname, 'localhost')
smtp.quit()
except OSError as e:
if e.errno == errno.EADDRINUSE:
self.skipTest("couldn't bind to source port %d" % src_port)
raise
def testNOOP(self):
smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost', timeout=3)
expected = (250, b'OK')
self.assertEqual(smtp.noop(), expected)
smtp.quit()
def testRSET(self):
smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost', timeout=3)
expected = (250, b'OK')
self.assertEqual(smtp.rset(), expected)
smtp.quit()
def testELHO(self):
# EHLO isn't implemented in DebuggingServer
smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost', timeout=3)
expected = (250, b'\nSIZE 33554432\nHELP')
self.assertEqual(smtp.ehlo(), expected)
smtp.quit()
def testEXPNNotImplemented(self):
# EXPN isn't implemented in DebuggingServer
smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost', timeout=3)
expected = (502, b'EXPN not implemented')
smtp.putcmd('EXPN')
self.assertEqual(smtp.getreply(), expected)
smtp.quit()
def testVRFY(self):
smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost', timeout=3)
expected = (252, b'Cannot VRFY user, but will accept message ' + \
b'and attempt delivery')
self.assertEqual(smtp.vrfy('nobody@nowhere.com'), expected)
self.assertEqual(smtp.verify('nobody@nowhere.com'), expected)
smtp.quit()
def testSecondHELO(self):
# check that a second HELO returns a message that it's a duplicate
# (this behavior is specific to smtpd.SMTPChannel)
smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost', timeout=3)
smtp.helo()
expected = (503, b'Duplicate HELO/EHLO')
self.assertEqual(smtp.helo(), expected)
smtp.quit()
def testHELP(self):
smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost', timeout=3)
self.assertEqual(smtp.help(), b'Supported commands: EHLO HELO MAIL ' + \
b'RCPT DATA RSET NOOP QUIT VRFY')
smtp.quit()
def testSend(self):
# connect and send mail
m = 'A test message'
smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost', timeout=3)
smtp.sendmail('John', 'Sally', m)
# XXX(nnorwitz): this test is flaky and dies with a bad file descriptor
# in asyncore. This sleep might help, but should really be fixed
# properly by using an Event variable.
time.sleep(0.01)
smtp.quit()
self.client_evt.set()
self.serv_evt.wait()
self.output.flush()
mexpect = '%s%s\n%s' % (MSG_BEGIN, m, MSG_END)
self.assertEqual(self.output.getvalue(), mexpect)
def testSendBinary(self):
m = b'A test message'
smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost', timeout=3)
smtp.sendmail('John', 'Sally', m)
# XXX (see comment in testSend)
time.sleep(0.01)
smtp.quit()
self.client_evt.set()
self.serv_evt.wait()
self.output.flush()
mexpect = '%s%s\n%s' % (MSG_BEGIN, m.decode('ascii'), MSG_END)
self.assertEqual(self.output.getvalue(), mexpect)
def testSendNeedingDotQuote(self):
# Issue 12283
m = '.A test\n.mes.sage.'
smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost', timeout=3)
smtp.sendmail('John', 'Sally', m)
# XXX (see comment in testSend)
time.sleep(0.01)
smtp.quit()
self.client_evt.set()
self.serv_evt.wait()
self.output.flush()
mexpect = '%s%s\n%s' % (MSG_BEGIN, m, MSG_END)
self.assertEqual(self.output.getvalue(), mexpect)
def testSendNullSender(self):
m = 'A test message'
smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost', timeout=3)
smtp.sendmail('<>', 'Sally', m)
# XXX (see comment in testSend)
time.sleep(0.01)
smtp.quit()
self.client_evt.set()
self.serv_evt.wait()
self.output.flush()
mexpect = '%s%s\n%s' % (MSG_BEGIN, m, MSG_END)
self.assertEqual(self.output.getvalue(), mexpect)
debugout = smtpd.DEBUGSTREAM.getvalue()
sender = re.compile("^sender: <>$", re.MULTILINE)
self.assertRegex(debugout, sender)
def testSendMessage(self):
m = email.mime.text.MIMEText('A test message')
smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost', timeout=3)
smtp.send_message(m, from_addr='John', to_addrs='Sally')
# XXX (see comment in testSend)
time.sleep(0.01)
smtp.quit()
self.client_evt.set()
self.serv_evt.wait()
self.output.flush()
# Remove the X-Peer header that DebuggingServer adds as figuring out
# exactly what IP address format is put there is not easy (and
# irrelevant to our test). Typically 127.0.0.1 or ::1, but it is
# not always the same as socket.gethostbyname(HOST). :(
test_output = self.get_output_without_xpeer()
del m['X-Peer']
mexpect = '%s%s\n%s' % (MSG_BEGIN, m.as_string(), MSG_END)
self.assertEqual(test_output, mexpect)
def testSendMessageWithAddresses(self):
m = email.mime.text.MIMEText('A test message')
m['From'] = 'foo@bar.com'
m['To'] = 'John'
m['CC'] = 'Sally, Fred'
m['Bcc'] = 'John Root <root@localhost>, "Dinsdale" <warped@silly.walks.com>'
smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost', timeout=3)
smtp.send_message(m)
# XXX (see comment in testSend)
time.sleep(0.01)
smtp.quit()
# make sure the Bcc header is still in the message.
self.assertEqual(m['Bcc'], 'John Root <root@localhost>, "Dinsdale" '
'<warped@silly.walks.com>')
self.client_evt.set()
self.serv_evt.wait()
self.output.flush()
# Remove the X-Peer header that DebuggingServer adds.
test_output = self.get_output_without_xpeer()
del m['X-Peer']
# The Bcc header should not be transmitted.
del m['Bcc']
mexpect = '%s%s\n%s' % (MSG_BEGIN, m.as_string(), MSG_END)
self.assertEqual(test_output, mexpect)
debugout = smtpd.DEBUGSTREAM.getvalue()
sender = re.compile("^sender: foo@bar.com$", re.MULTILINE)
self.assertRegex(debugout, sender)
for addr in ('John', 'Sally', 'Fred', 'root@localhost',
'warped@silly.walks.com'):
to_addr = re.compile(r"^recips: .*'{}'.*$".format(addr),
re.MULTILINE)
self.assertRegex(debugout, to_addr)
def testSendMessageWithSomeAddresses(self):
# Make sure nothing breaks if not all of the three 'to' headers exist
m = email.mime.text.MIMEText('A test message')
m['From'] = 'foo@bar.com'
m['To'] = 'John, Dinsdale'
smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost', timeout=3)
smtp.send_message(m)
# XXX (see comment in testSend)
time.sleep(0.01)
smtp.quit()
self.client_evt.set()
self.serv_evt.wait()
self.output.flush()
# Remove the X-Peer header that DebuggingServer adds.
test_output = self.get_output_without_xpeer()
del m['X-Peer']
mexpect = '%s%s\n%s' % (MSG_BEGIN, m.as_string(), MSG_END)
self.assertEqual(test_output, mexpect)
debugout = smtpd.DEBUGSTREAM.getvalue()
sender = re.compile("^sender: foo@bar.com$", re.MULTILINE)
self.assertRegex(debugout, sender)
for addr in ('John', 'Dinsdale'):
to_addr = re.compile(r"^recips: .*'{}'.*$".format(addr),
re.MULTILINE)
self.assertRegex(debugout, to_addr)
def testSendMessageWithSpecifiedAddresses(self):
# Make sure addresses specified in call override those in message.
m = email.mime.text.MIMEText('A test message')
m['From'] = 'foo@bar.com'
m['To'] = 'John, Dinsdale'
smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost', timeout=3)
smtp.send_message(m, from_addr='joe@example.com', to_addrs='foo@example.net')
# XXX (see comment in testSend)
time.sleep(0.01)
smtp.quit()
self.client_evt.set()
self.serv_evt.wait()
self.output.flush()
# Remove the X-Peer header that DebuggingServer adds.
test_output = self.get_output_without_xpeer()
del m['X-Peer']
mexpect = '%s%s\n%s' % (MSG_BEGIN, m.as_string(), MSG_END)
self.assertEqual(test_output, mexpect)
debugout = smtpd.DEBUGSTREAM.getvalue()
sender = re.compile("^sender: joe@example.com$", re.MULTILINE)
self.assertRegex(debugout, sender)
for addr in ('John', 'Dinsdale'):
to_addr = re.compile(r"^recips: .*'{}'.*$".format(addr),
re.MULTILINE)
self.assertNotRegex(debugout, to_addr)
recip = re.compile(r"^recips: .*'foo@example.net'.*$", re.MULTILINE)
self.assertRegex(debugout, recip)
def testSendMessageWithMultipleFrom(self):
# Sender overrides To
m = email.mime.text.MIMEText('A test message')
m['From'] = 'Bernard, Bianca'
m['Sender'] = 'the_rescuers@Rescue-Aid-Society.com'
m['To'] = 'John, Dinsdale'
smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost', timeout=3)
smtp.send_message(m)
# XXX (see comment in testSend)
time.sleep(0.01)
smtp.quit()
self.client_evt.set()
self.serv_evt.wait()
self.output.flush()
# Remove the X-Peer header that DebuggingServer adds.
test_output = self.get_output_without_xpeer()
del m['X-Peer']
mexpect = '%s%s\n%s' % (MSG_BEGIN, m.as_string(), MSG_END)
self.assertEqual(test_output, mexpect)
debugout = smtpd.DEBUGSTREAM.getvalue()
sender = re.compile("^sender: the_rescuers@Rescue-Aid-Society.com$", re.MULTILINE)
self.assertRegex(debugout, sender)
for addr in ('John', 'Dinsdale'):
to_addr = re.compile(r"^recips: .*'{}'.*$".format(addr),
re.MULTILINE)
self.assertRegex(debugout, to_addr)
def testSendMessageResent(self):
m = email.mime.text.MIMEText('A test message')
m['From'] = 'foo@bar.com'
m['To'] = 'John'
m['CC'] = 'Sally, Fred'
m['Bcc'] = 'John Root <root@localhost>, "Dinsdale" <warped@silly.walks.com>'
m['Resent-Date'] = 'Thu, 1 Jan 1970 17:42:00 +0000'
m['Resent-From'] = 'holy@grail.net'
m['Resent-To'] = 'Martha <my_mom@great.cooker.com>, Jeff'
m['Resent-Bcc'] = 'doe@losthope.net'
smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost', timeout=3)
smtp.send_message(m)
# XXX (see comment in testSend)
time.sleep(0.01)
smtp.quit()
self.client_evt.set()
self.serv_evt.wait()
self.output.flush()
# The Resent-Bcc headers are deleted before serialization.
del m['Bcc']
del m['Resent-Bcc']
# Remove the X-Peer header that DebuggingServer adds.
test_output = self.get_output_without_xpeer()
del m['X-Peer']
mexpect = '%s%s\n%s' % (MSG_BEGIN, m.as_string(), MSG_END)
self.assertEqual(test_output, mexpect)
debugout = smtpd.DEBUGSTREAM.getvalue()
sender = re.compile("^sender: holy@grail.net$", re.MULTILINE)
self.assertRegex(debugout, sender)
for addr in ('my_mom@great.cooker.com', 'Jeff', 'doe@losthope.net'):
to_addr = re.compile(r"^recips: .*'{}'.*$".format(addr),
re.MULTILINE)
self.assertRegex(debugout, to_addr)
def testSendMessageMultipleResentRaises(self):
m = email.mime.text.MIMEText('A test message')
m['From'] = 'foo@bar.com'
m['To'] = 'John'
m['CC'] = 'Sally, Fred'
m['Bcc'] = 'John Root <root@localhost>, "Dinsdale" <warped@silly.walks.com>'
m['Resent-Date'] = 'Thu, 1 Jan 1970 17:42:00 +0000'
m['Resent-From'] = 'holy@grail.net'
m['Resent-To'] = 'Martha <my_mom@great.cooker.com>, Jeff'
m['Resent-Bcc'] = 'doe@losthope.net'
m['Resent-Date'] = 'Thu, 2 Jan 1970 17:42:00 +0000'
m['Resent-To'] = 'holy@grail.net'
m['Resent-From'] = 'Martha <my_mom@great.cooker.com>, Jeff'
smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost', timeout=3)
with self.assertRaises(ValueError):
smtp.send_message(m)
smtp.close()
class NonConnectingTests(unittest.TestCase):
def testNotConnected(self):
# Test various operations on an unconnected SMTP object that
# should raise exceptions (at present the attempt in SMTP.send
# to reference the nonexistent 'sock' attribute of the SMTP object
# causes an AttributeError)
smtp = smtplib.SMTP()
self.assertRaises(smtplib.SMTPServerDisconnected, smtp.ehlo)
self.assertRaises(smtplib.SMTPServerDisconnected,
smtp.send, 'test msg')
def testNonnumericPort(self):
# check that non-numeric port raises OSError
self.assertRaises(OSError, smtplib.SMTP,
"localhost", "bogus")
self.assertRaises(OSError, smtplib.SMTP,
"localhost:bogus")
class DefaultArgumentsTests(unittest.TestCase):
def setUp(self):
self.msg = EmailMessage()
self.msg['From'] = 'Páolo <főo@bar.com>'
self.smtp = smtplib.SMTP()
self.smtp.ehlo = Mock(return_value=(200, 'OK'))
self.smtp.has_extn, self.smtp.sendmail = Mock(), Mock()
def testSendMessage(self):
expected_mail_options = ('SMTPUTF8', 'BODY=8BITMIME')
self.smtp.send_message(self.msg)
self.smtp.send_message(self.msg)
self.assertEqual(self.smtp.sendmail.call_args_list[0][0][3],
expected_mail_options)
self.assertEqual(self.smtp.sendmail.call_args_list[1][0][3],
expected_mail_options)
def testSendMessageWithMailOptions(self):
mail_options = ['STARTTLS']
expected_mail_options = ('STARTTLS', 'SMTPUTF8', 'BODY=8BITMIME')
self.smtp.send_message(self.msg, None, None, mail_options)
self.assertEqual(mail_options, ['STARTTLS'])
self.assertEqual(self.smtp.sendmail.call_args_list[0][0][3],
expected_mail_options)
# test response of client to a non-successful HELO message
class BadHELOServerTests(unittest.TestCase):
def setUp(self):
smtplib.socket = mock_socket
mock_socket.reply_with(b"199 no hello for you!")
self.old_stdout = sys.stdout
self.output = io.StringIO()
sys.stdout = self.output
self.port = 25
def tearDown(self):
smtplib.socket = socket
sys.stdout = self.old_stdout
def testFailingHELO(self):
self.assertRaises(smtplib.SMTPConnectError, smtplib.SMTP,
HOST, self.port, 'localhost', 3)
class TooLongLineTests(unittest.TestCase):
respdata = b'250 OK' + (b'.' * smtplib._MAXLINE * 2) + b'\n'
def setUp(self):
self.old_stdout = sys.stdout
self.output = io.StringIO()
sys.stdout = self.output
self.evt = threading.Event()
self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.sock.settimeout(15)
self.port = support.bind_port(self.sock)
servargs = (self.evt, self.respdata, self.sock)
thread = threading.Thread(target=server, args=servargs)
thread.start()
self.addCleanup(thread.join)
self.evt.wait()
self.evt.clear()
def tearDown(self):
self.evt.wait()
sys.stdout = self.old_stdout
def testLineTooLong(self):
self.assertRaises(smtplib.SMTPResponseException, smtplib.SMTP,
HOST, self.port, 'localhost', 3)
sim_users = {'Mr.A@somewhere.com':'John A',
'Ms.B@xn--fo-fka.com':'Sally B',
'Mrs.C@somewhereesle.com':'Ruth C',
}
sim_auth = ('Mr.A@somewhere.com', 'somepassword')
sim_cram_md5_challenge = ('PENCeUxFREJoU0NnbmhNWitOMjNGNn'
'dAZWx3b29kLmlubm9zb2Z0LmNvbT4=')
sim_lists = {'list-1':['Mr.A@somewhere.com','Mrs.C@somewhereesle.com'],
'list-2':['Ms.B@xn--fo-fka.com',],
}
# Simulated SMTP channel & server
class ResponseException(Exception): pass
class SimSMTPChannel(smtpd.SMTPChannel):
quit_response = None
mail_response = None
rcpt_response = None
data_response = None
rcpt_count = 0
rset_count = 0
disconnect = 0
AUTH = 99 # Add protocol state to enable auth testing.
authenticated_user = None
def __init__(self, extra_features, *args, **kw):
self._extrafeatures = ''.join(
[ "250-{0}\r\n".format(x) for x in extra_features ])
super(SimSMTPChannel, self).__init__(*args, **kw)
# AUTH related stuff. It would be nice if support for this were in smtpd.
def found_terminator(self):
if self.smtp_state == self.AUTH:
line = self._emptystring.join(self.received_lines)
print('Data:', repr(line), file=smtpd.DEBUGSTREAM)
self.received_lines = []
try:
self.auth_object(line)
except ResponseException as e:
self.smtp_state = self.COMMAND
self.push('%s %s' % (e.smtp_code, e.smtp_error))
return
super().found_terminator()
def smtp_AUTH(self, arg):
if not self.seen_greeting:
self.push('503 Error: send EHLO first')
return
if not self.extended_smtp or 'AUTH' not in self._extrafeatures:
self.push('500 Error: command "AUTH" not recognized')
return
if self.authenticated_user is not None:
self.push(
'503 Bad sequence of commands: already authenticated')
return
args = arg.split()
if len(args) not in [1, 2]:
self.push('501 Syntax: AUTH <mechanism> [initial-response]')
return
auth_object_name = '_auth_%s' % args[0].lower().replace('-', '_')
try:
self.auth_object = getattr(self, auth_object_name)
except AttributeError:
self.push('504 Command parameter not implemented: unsupported '
' authentication mechanism {!r}'.format(auth_object_name))
return
self.smtp_state = self.AUTH
self.auth_object(args[1] if len(args) == 2 else None)
def _authenticated(self, user, valid):
if valid:
self.authenticated_user = user
self.push('235 Authentication Succeeded')
else:
self.push('535 Authentication credentials invalid')
self.smtp_state = self.COMMAND
def _decode_base64(self, string):
return base64.decodebytes(string.encode('ascii')).decode('utf-8')
def _auth_plain(self, arg=None):
if arg is None:
self.push('334 ')
else:
logpass = self._decode_base64(arg)
try:
*_, user, password = logpass.split('\0')
except ValueError as e:
self.push('535 Splitting response {!r} into user and password'
' failed: {}'.format(logpass, e))
return
self._authenticated(user, password == sim_auth[1])
def _auth_login(self, arg=None):
if arg is None:
# base64 encoded 'Username:'
self.push('334 VXNlcm5hbWU6')
elif not hasattr(self, '_auth_login_user'):
self._auth_login_user = self._decode_base64(arg)
# base64 encoded 'Password:'
self.push('334 UGFzc3dvcmQ6')
else:
password = self._decode_base64(arg)
self._authenticated(self._auth_login_user, password == sim_auth[1])
del self._auth_login_user
def _auth_cram_md5(self, arg=None):
if arg is None:
self.push('334 {}'.format(sim_cram_md5_challenge))
else:
logpass = self._decode_base64(arg)
try:
user, hashed_pass = logpass.split()
except ValueError as e:
self.push('535 Splitting response {!r} into user and password '
'failed: {}'.format(logpass, e))
return False
valid_hashed_pass = hmac.HMAC(
sim_auth[1].encode('ascii'),
self._decode_base64(sim_cram_md5_challenge).encode('ascii'),
'md5').hexdigest()
self._authenticated(user, hashed_pass == valid_hashed_pass)
# end AUTH related stuff.
def smtp_EHLO(self, arg):
resp = ('250-testhost\r\n'
'250-EXPN\r\n'
'250-SIZE 20000000\r\n'
'250-STARTTLS\r\n'
'250-DELIVERBY\r\n')
resp = resp + self._extrafeatures + '250 HELP'
self.push(resp)
self.seen_greeting = arg
self.extended_smtp = True
def smtp_VRFY(self, arg):
# For max compatibility smtplib should be sending the raw address.
if arg in sim_users:
self.push('250 %s %s' % (sim_users[arg], smtplib.quoteaddr(arg)))
else:
self.push('550 No such user: %s' % arg)
def smtp_EXPN(self, arg):
list_name = arg.lower()
if list_name in sim_lists:
user_list = sim_lists[list_name]
for n, user_email in enumerate(user_list):
quoted_addr = smtplib.quoteaddr(user_email)
if n < len(user_list) - 1:
self.push('250-%s %s' % (sim_users[user_email], quoted_addr))
else:
self.push('250 %s %s' % (sim_users[user_email], quoted_addr))
else:
self.push('550 No access for you!')
def smtp_QUIT(self, arg):
if self.quit_response is None:
super(SimSMTPChannel, self).smtp_QUIT(arg)
else:
self.push(self.quit_response)
self.close_when_done()
def smtp_MAIL(self, arg):
if self.mail_response is None:
super().smtp_MAIL(arg)
else:
self.push(self.mail_response)
if self.disconnect:
self.close_when_done()
def smtp_RCPT(self, arg):
if self.rcpt_response is None:
super().smtp_RCPT(arg)
return
self.rcpt_count += 1
self.push(self.rcpt_response[self.rcpt_count-1])
def smtp_RSET(self, arg):
self.rset_count += 1
super().smtp_RSET(arg)
def smtp_DATA(self, arg):
if self.data_response is None:
super().smtp_DATA(arg)
else:
self.push(self.data_response)
def handle_error(self):
raise
class SimSMTPServer(smtpd.SMTPServer):
channel_class = SimSMTPChannel
def __init__(self, *args, **kw):
self._extra_features = []
self._addresses = {}
smtpd.SMTPServer.__init__(self, *args, **kw)
def handle_accepted(self, conn, addr):
self._SMTPchannel = self.channel_class(
self._extra_features, self, conn, addr,
decode_data=self._decode_data)
def process_message(self, peer, mailfrom, rcpttos, data):
self._addresses['from'] = mailfrom
self._addresses['tos'] = rcpttos
def add_feature(self, feature):
self._extra_features.append(feature)
def handle_error(self):
raise
# Test various SMTP & ESMTP commands/behaviors that require a simulated server
# (i.e., something with more features than DebuggingServer)
class SMTPSimTests(unittest.TestCase):
def setUp(self):
self.real_getfqdn = socket.getfqdn
socket.getfqdn = mock_socket.getfqdn
self.serv_evt = threading.Event()
self.client_evt = threading.Event()
# Pick a random unused port by passing 0 for the port number
self.serv = SimSMTPServer((HOST, 0), ('nowhere', -1), decode_data=True)
# Keep a note of what port was assigned
self.port = self.serv.socket.getsockname()[1]
serv_args = (self.serv, self.serv_evt, self.client_evt)
self.thread = threading.Thread(target=debugging_server, args=serv_args)
self.thread.start()
# wait until server thread has assigned a port number
self.serv_evt.wait()
self.serv_evt.clear()
def tearDown(self):
socket.getfqdn = self.real_getfqdn
# indicate that the client is finished
self.client_evt.set()
# wait for the server thread to terminate
self.serv_evt.wait()
self.thread.join()
def testBasic(self):
# smoke test
smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost', timeout=15)
smtp.quit()
def testEHLO(self):
smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost', timeout=15)
# no features should be present before the EHLO
self.assertEqual(smtp.esmtp_features, {})
# features expected from the test server
expected_features = {'expn':'',
'size': '20000000',
'starttls': '',
'deliverby': '',
'help': '',
}
smtp.ehlo()
self.assertEqual(smtp.esmtp_features, expected_features)
for k in expected_features:
self.assertTrue(smtp.has_extn(k))
self.assertFalse(smtp.has_extn('unsupported-feature'))
smtp.quit()
def testVRFY(self):
smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost', timeout=15)
for addr_spec, name in sim_users.items():
expected_known = (250, bytes('%s %s' %
(name, smtplib.quoteaddr(addr_spec)),
"ascii"))
self.assertEqual(smtp.vrfy(addr_spec), expected_known)
u = 'nobody@nowhere.com'
expected_unknown = (550, ('No such user: %s' % u).encode('ascii'))
self.assertEqual(smtp.vrfy(u), expected_unknown)
smtp.quit()
def testEXPN(self):
smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost', timeout=15)
for listname, members in sim_lists.items():
users = []
for m in members:
users.append('%s %s' % (sim_users[m], smtplib.quoteaddr(m)))
expected_known = (250, bytes('\n'.join(users), "ascii"))
self.assertEqual(smtp.expn(listname), expected_known)
u = 'PSU-Members-List'
expected_unknown = (550, b'No access for you!')
self.assertEqual(smtp.expn(u), expected_unknown)
smtp.quit()
def testAUTH_PLAIN(self):
self.serv.add_feature("AUTH PLAIN")
smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost', timeout=15)
resp = smtp.login(sim_auth[0], sim_auth[1])
self.assertEqual(resp, (235, b'Authentication Succeeded'))
smtp.close()
def testAUTH_LOGIN(self):
self.serv.add_feature("AUTH LOGIN")
smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost', timeout=15)
resp = smtp.login(sim_auth[0], sim_auth[1])
self.assertEqual(resp, (235, b'Authentication Succeeded'))
smtp.close()
def testAUTH_CRAM_MD5(self):
self.serv.add_feature("AUTH CRAM-MD5")
smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost', timeout=15)
resp = smtp.login(sim_auth[0], sim_auth[1])
self.assertEqual(resp, (235, b'Authentication Succeeded'))
smtp.close()
def testAUTH_multiple(self):
# Test that multiple authentication methods are tried.
self.serv.add_feature("AUTH BOGUS PLAIN LOGIN CRAM-MD5")
smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost', timeout=15)
resp = smtp.login(sim_auth[0], sim_auth[1])
self.assertEqual(resp, (235, b'Authentication Succeeded'))
smtp.close()
def test_auth_function(self):
supported = {'CRAM-MD5', 'PLAIN', 'LOGIN'}
for mechanism in supported:
self.serv.add_feature("AUTH {}".format(mechanism))
for mechanism in supported:
with self.subTest(mechanism=mechanism):
smtp = smtplib.SMTP(HOST, self.port,
local_hostname='localhost', timeout=15)
smtp.ehlo('foo')
smtp.user, smtp.password = sim_auth[0], sim_auth[1]
method = 'auth_' + mechanism.lower().replace('-', '_')
resp = smtp.auth(mechanism, getattr(smtp, method))
self.assertEqual(resp, (235, b'Authentication Succeeded'))
smtp.close()
def test_quit_resets_greeting(self):
smtp = smtplib.SMTP(HOST, self.port,
local_hostname='localhost',
timeout=15)
code, message = smtp.ehlo()
self.assertEqual(code, 250)
self.assertIn('size', smtp.esmtp_features)
smtp.quit()
self.assertNotIn('size', smtp.esmtp_features)
smtp.connect(HOST, self.port)
self.assertNotIn('size', smtp.esmtp_features)
smtp.ehlo_or_helo_if_needed()
self.assertIn('size', smtp.esmtp_features)
smtp.quit()
def test_with_statement(self):
with smtplib.SMTP(HOST, self.port) as smtp:
code, message = smtp.noop()
self.assertEqual(code, 250)
self.assertRaises(smtplib.SMTPServerDisconnected, smtp.send, b'foo')
with smtplib.SMTP(HOST, self.port) as smtp:
smtp.close()
self.assertRaises(smtplib.SMTPServerDisconnected, smtp.send, b'foo')
def test_with_statement_QUIT_failure(self):
with self.assertRaises(smtplib.SMTPResponseException) as error:
with smtplib.SMTP(HOST, self.port) as smtp:
smtp.noop()
self.serv._SMTPchannel.quit_response = '421 QUIT FAILED'
self.assertEqual(error.exception.smtp_code, 421)
self.assertEqual(error.exception.smtp_error, b'QUIT FAILED')
#TODO: add tests for correct AUTH method fallback now that the
#test infrastructure can support it.
# Issue 17498: make sure _rset does not raise SMTPServerDisconnected exception
def test__rest_from_mail_cmd(self):
smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost', timeout=15)
smtp.noop()
self.serv._SMTPchannel.mail_response = '451 Requested action aborted'
self.serv._SMTPchannel.disconnect = True
with self.assertRaises(smtplib.SMTPSenderRefused):
smtp.sendmail('John', 'Sally', 'test message')
self.assertIsNone(smtp.sock)
# Issue 5713: make sure close, not rset, is called if we get a 421 error
def test_421_from_mail_cmd(self):
smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost', timeout=15)
smtp.noop()
self.serv._SMTPchannel.mail_response = '421 closing connection'
with self.assertRaises(smtplib.SMTPSenderRefused):
smtp.sendmail('John', 'Sally', 'test message')
self.assertIsNone(smtp.sock)
self.assertEqual(self.serv._SMTPchannel.rset_count, 0)
def test_421_from_rcpt_cmd(self):
smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost', timeout=15)
smtp.noop()
self.serv._SMTPchannel.rcpt_response = ['250 accepted', '421 closing']
with self.assertRaises(smtplib.SMTPRecipientsRefused) as r:
smtp.sendmail('John', ['Sally', 'Frank', 'George'], 'test message')
self.assertIsNone(smtp.sock)
self.assertEqual(self.serv._SMTPchannel.rset_count, 0)
self.assertDictEqual(r.exception.args[0], {'Frank': (421, b'closing')})
def test_421_from_data_cmd(self):
class MySimSMTPChannel(SimSMTPChannel):
def found_terminator(self):
if self.smtp_state == self.DATA:
self.push('421 closing')
else:
super().found_terminator()
self.serv.channel_class = MySimSMTPChannel
smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost', timeout=15)
smtp.noop()
with self.assertRaises(smtplib.SMTPDataError):
smtp.sendmail('John@foo.org', ['Sally@foo.org'], 'test message')
self.assertIsNone(smtp.sock)
self.assertEqual(self.serv._SMTPchannel.rcpt_count, 0)
def test_smtputf8_NotSupportedError_if_no_server_support(self):
smtp = smtplib.SMTP(
HOST, self.port, local_hostname='localhost', timeout=3)
self.addCleanup(smtp.close)
smtp.ehlo()
self.assertTrue(smtp.does_esmtp)
self.assertFalse(smtp.has_extn('smtputf8'))
self.assertRaises(
smtplib.SMTPNotSupportedError,
smtp.sendmail,
'John', 'Sally', '', mail_options=['BODY=8BITMIME', 'SMTPUTF8'])
self.assertRaises(
smtplib.SMTPNotSupportedError,
smtp.mail, 'John', options=['BODY=8BITMIME', 'SMTPUTF8'])
def test_send_unicode_without_SMTPUTF8(self):
smtp = smtplib.SMTP(
HOST, self.port, local_hostname='localhost', timeout=3)
self.addCleanup(smtp.close)
self.assertRaises(UnicodeEncodeError, smtp.sendmail, 'Alice', 'Böb', '')
self.assertRaises(UnicodeEncodeError, smtp.mail, 'Älice')
def test_send_message_error_on_non_ascii_addrs_if_no_smtputf8(self):
# This test is located here and not in the SMTPUTF8SimTests
# class because it needs a "regular" SMTP server to work
msg = EmailMessage()
msg['From'] = "Páolo <főo@bar.com>"
msg['To'] = 'Dinsdale'
msg['Subject'] = 'Nudge nudge, wink, wink \u1F609'
smtp = smtplib.SMTP(
HOST, self.port, local_hostname='localhost', timeout=3)
self.addCleanup(smtp.close)
with self.assertRaises(smtplib.SMTPNotSupportedError):
smtp.send_message(msg)
def test_name_field_not_included_in_envelop_addresses(self):
smtp = smtplib.SMTP(
HOST, self.port, local_hostname='localhost', timeout=3
)
self.addCleanup(smtp.close)
message = EmailMessage()
message['From'] = email.utils.formataddr(('Michaël', 'michael@example.com'))
message['To'] = email.utils.formataddr(('René', 'rene@example.com'))
self.assertDictEqual(smtp.send_message(message), {})
self.assertEqual(self.serv._addresses['from'], 'michael@example.com')
self.assertEqual(self.serv._addresses['tos'], ['rene@example.com'])
class SimSMTPUTF8Server(SimSMTPServer):
def __init__(self, *args, **kw):
# The base SMTP server turns these on automatically, but our test
# server is set up to munge the EHLO response, so we need to provide
# them as well. And yes, the call is to SMTPServer not SimSMTPServer.
self._extra_features = ['SMTPUTF8', '8BITMIME']
smtpd.SMTPServer.__init__(self, *args, **kw)
def handle_accepted(self, conn, addr):
self._SMTPchannel = self.channel_class(
self._extra_features, self, conn, addr,
decode_data=self._decode_data,
enable_SMTPUTF8=self.enable_SMTPUTF8,
)
def process_message(self, peer, mailfrom, rcpttos, data, mail_options=None,
rcpt_options=None):
self.last_peer = peer
self.last_mailfrom = mailfrom
self.last_rcpttos = rcpttos
self.last_message = data
self.last_mail_options = mail_options
self.last_rcpt_options = rcpt_options
class SMTPUTF8SimTests(unittest.TestCase):
maxDiff = None
def setUp(self):
self.real_getfqdn = socket.getfqdn
socket.getfqdn = mock_socket.getfqdn
self.serv_evt = threading.Event()
self.client_evt = threading.Event()
# Pick a random unused port by passing 0 for the port number
self.serv = SimSMTPUTF8Server((HOST, 0), ('nowhere', -1),
decode_data=False,
enable_SMTPUTF8=True)
# Keep a note of what port was assigned
self.port = self.serv.socket.getsockname()[1]
serv_args = (self.serv, self.serv_evt, self.client_evt)
self.thread = threading.Thread(target=debugging_server, args=serv_args)
self.thread.start()
# wait until server thread has assigned a port number
self.serv_evt.wait()
self.serv_evt.clear()
def tearDown(self):
socket.getfqdn = self.real_getfqdn
# indicate that the client is finished
self.client_evt.set()
# wait for the server thread to terminate
self.serv_evt.wait()
self.thread.join()
def test_test_server_supports_extensions(self):
smtp = smtplib.SMTP(
HOST, self.port, local_hostname='localhost', timeout=3)
self.addCleanup(smtp.close)
smtp.ehlo()
self.assertTrue(smtp.does_esmtp)
self.assertTrue(smtp.has_extn('smtputf8'))
def test_send_unicode_with_SMTPUTF8_via_sendmail(self):
m = '¡a test message containing unicode!'.encode('utf-8')
smtp = smtplib.SMTP(
HOST, self.port, local_hostname='localhost', timeout=3)
self.addCleanup(smtp.close)
smtp.sendmail('Jőhn', 'Sálly', m,
mail_options=['BODY=8BITMIME', 'SMTPUTF8'])
self.assertEqual(self.serv.last_mailfrom, 'Jőhn')
self.assertEqual(self.serv.last_rcpttos, ['Sálly'])
self.assertEqual(self.serv.last_message, m)
self.assertIn('BODY=8BITMIME', self.serv.last_mail_options)
self.assertIn('SMTPUTF8', self.serv.last_mail_options)
self.assertEqual(self.serv.last_rcpt_options, [])
def test_send_unicode_with_SMTPUTF8_via_low_level_API(self):
m = '¡a test message containing unicode!'.encode('utf-8')
smtp = smtplib.SMTP(
HOST, self.port, local_hostname='localhost', timeout=3)
self.addCleanup(smtp.close)
smtp.ehlo()
self.assertEqual(
smtp.mail('Jő', options=['BODY=8BITMIME', 'SMTPUTF8']),
(250, b'OK'))
self.assertEqual(smtp.rcpt('János'), (250, b'OK'))
self.assertEqual(smtp.data(m), (250, b'OK'))
self.assertEqual(self.serv.last_mailfrom, 'Jő')
self.assertEqual(self.serv.last_rcpttos, ['János'])
self.assertEqual(self.serv.last_message, m)
self.assertIn('BODY=8BITMIME', self.serv.last_mail_options)
self.assertIn('SMTPUTF8', self.serv.last_mail_options)
self.assertEqual(self.serv.last_rcpt_options, [])
def test_send_message_uses_smtputf8_if_addrs_non_ascii(self):
msg = EmailMessage()
msg['From'] = "Páolo <főo@bar.com>"
msg['To'] = 'Dinsdale'
msg['Subject'] = 'Nudge nudge, wink, wink \u1F609'
# XXX I don't know why I need two \n's here, but this is an existing
# bug (if it is one) and not a problem with the new functionality.
msg.set_content("oh là là, know what I mean, know what I mean?\n\n")
# XXX smtpd converts received /r/n to /n, so we can't easily test that
# we are successfully sending /r/n :(.
expected = textwrap.dedent("""\
From: Páolo <főo@bar.com>
To: Dinsdale
Subject: Nudge nudge, wink, wink \u1F609
Content-Type: text/plain; charset="utf-8"
Content-Transfer-Encoding: 8bit
MIME-Version: 1.0
oh là là, know what I mean, know what I mean?
""")
smtp = smtplib.SMTP(
HOST, self.port, local_hostname='localhost', timeout=3)
self.addCleanup(smtp.close)
self.assertEqual(smtp.send_message(msg), {})
self.assertEqual(self.serv.last_mailfrom, 'főo@bar.com')
self.assertEqual(self.serv.last_rcpttos, ['Dinsdale'])
self.assertEqual(self.serv.last_message.decode(), expected)
self.assertIn('BODY=8BITMIME', self.serv.last_mail_options)
self.assertIn('SMTPUTF8', self.serv.last_mail_options)
self.assertEqual(self.serv.last_rcpt_options, [])
EXPECTED_RESPONSE = encode_base64(b'\0psu\0doesnotexist', eol='')
class SimSMTPAUTHInitialResponseChannel(SimSMTPChannel):
def smtp_AUTH(self, arg):
# RFC 4954's AUTH command allows for an optional initial-response.
# Not all AUTH methods support this; some require a challenge. AUTH
# PLAIN does those, so test that here. See issue #15014.
args = arg.split()
if args[0].lower() == 'plain':
if len(args) == 2:
# AUTH PLAIN <initial-response> with the response base 64
# encoded. Hard code the expected response for the test.
if args[1] == EXPECTED_RESPONSE:
self.push('235 Ok')
return
self.push('571 Bad authentication')
class SimSMTPAUTHInitialResponseServer(SimSMTPServer):
channel_class = SimSMTPAUTHInitialResponseChannel
class SMTPAUTHInitialResponseSimTests(unittest.TestCase):
def setUp(self):
self.real_getfqdn = socket.getfqdn
socket.getfqdn = mock_socket.getfqdn
self.serv_evt = threading.Event()
self.client_evt = threading.Event()
# Pick a random unused port by passing 0 for the port number
self.serv = SimSMTPAUTHInitialResponseServer(
(HOST, 0), ('nowhere', -1), decode_data=True)
# Keep a note of what port was assigned
self.port = self.serv.socket.getsockname()[1]
serv_args = (self.serv, self.serv_evt, self.client_evt)
self.thread = threading.Thread(target=debugging_server, args=serv_args)
self.thread.start()
# wait until server thread has assigned a port number
self.serv_evt.wait()
self.serv_evt.clear()
def tearDown(self):
socket.getfqdn = self.real_getfqdn
# indicate that the client is finished
self.client_evt.set()
# wait for the server thread to terminate
self.serv_evt.wait()
self.thread.join()
def testAUTH_PLAIN_initial_response_login(self):
self.serv.add_feature('AUTH PLAIN')
smtp = smtplib.SMTP(HOST, self.port,
local_hostname='localhost', timeout=15)
smtp.login('psu', 'doesnotexist')
smtp.close()
def testAUTH_PLAIN_initial_response_auth(self):
self.serv.add_feature('AUTH PLAIN')
smtp = smtplib.SMTP(HOST, self.port,
local_hostname='localhost', timeout=15)
smtp.user = 'psu'
smtp.password = 'doesnotexist'
code, response = smtp.auth('plain', smtp.auth_plain)
smtp.close()
self.assertEqual(code, 235)
if __name__ == '__main__':
unittest.main()
| 38.939144 | 90 | 0.608397 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.