hexsha string | size int64 | ext string | lang string | max_stars_repo_path string | max_stars_repo_name string | max_stars_repo_head_hexsha string | max_stars_repo_licenses list | max_stars_count int64 | max_stars_repo_stars_event_min_datetime string | max_stars_repo_stars_event_max_datetime string | max_issues_repo_path string | max_issues_repo_name string | max_issues_repo_head_hexsha string | max_issues_repo_licenses list | max_issues_count int64 | max_issues_repo_issues_event_min_datetime string | max_issues_repo_issues_event_max_datetime string | max_forks_repo_path string | max_forks_repo_name string | max_forks_repo_head_hexsha string | max_forks_repo_licenses list | max_forks_count int64 | max_forks_repo_forks_event_min_datetime string | max_forks_repo_forks_event_max_datetime string | content string | avg_line_length float64 | max_line_length int64 | alphanum_fraction float64 | qsc_code_num_words_quality_signal int64 | qsc_code_num_chars_quality_signal float64 | qsc_code_mean_word_length_quality_signal float64 | qsc_code_frac_words_unique_quality_signal float64 | qsc_code_frac_chars_top_2grams_quality_signal float64 | qsc_code_frac_chars_top_3grams_quality_signal float64 | qsc_code_frac_chars_top_4grams_quality_signal float64 | qsc_code_frac_chars_dupe_5grams_quality_signal float64 | qsc_code_frac_chars_dupe_6grams_quality_signal float64 | qsc_code_frac_chars_dupe_7grams_quality_signal float64 | qsc_code_frac_chars_dupe_8grams_quality_signal float64 | qsc_code_frac_chars_dupe_9grams_quality_signal float64 | qsc_code_frac_chars_dupe_10grams_quality_signal float64 | qsc_code_frac_chars_replacement_symbols_quality_signal float64 | qsc_code_frac_chars_digital_quality_signal float64 | qsc_code_frac_chars_whitespace_quality_signal float64 | qsc_code_size_file_byte_quality_signal float64 | qsc_code_num_lines_quality_signal float64 | qsc_code_num_chars_line_max_quality_signal float64 | qsc_code_num_chars_line_mean_quality_signal float64 | qsc_code_frac_chars_alphabet_quality_signal float64 | qsc_code_frac_chars_comments_quality_signal float64 | qsc_code_cate_xml_start_quality_signal float64 | qsc_code_frac_lines_dupe_lines_quality_signal float64 | qsc_code_cate_autogen_quality_signal float64 | qsc_code_frac_lines_long_string_quality_signal float64 | qsc_code_frac_chars_string_length_quality_signal float64 | qsc_code_frac_chars_long_word_length_quality_signal float64 | qsc_code_frac_lines_string_concat_quality_signal float64 | qsc_code_cate_encoded_data_quality_signal float64 | qsc_code_frac_chars_hex_words_quality_signal float64 | qsc_code_frac_lines_prompt_comments_quality_signal float64 | qsc_code_frac_lines_assert_quality_signal float64 | qsc_codepython_cate_ast_quality_signal float64 | qsc_codepython_frac_lines_func_ratio_quality_signal float64 | qsc_codepython_cate_var_zero_quality_signal bool | qsc_codepython_frac_lines_pass_quality_signal float64 | qsc_codepython_frac_lines_import_quality_signal float64 | qsc_codepython_frac_lines_simplefunc_quality_signal float64 | qsc_codepython_score_lines_no_logic_quality_signal float64 | qsc_codepython_frac_lines_print_quality_signal float64 | qsc_code_num_words int64 | qsc_code_num_chars int64 | qsc_code_mean_word_length int64 | qsc_code_frac_words_unique null | qsc_code_frac_chars_top_2grams int64 | qsc_code_frac_chars_top_3grams int64 | qsc_code_frac_chars_top_4grams int64 | qsc_code_frac_chars_dupe_5grams int64 | qsc_code_frac_chars_dupe_6grams int64 | qsc_code_frac_chars_dupe_7grams int64 | qsc_code_frac_chars_dupe_8grams int64 | qsc_code_frac_chars_dupe_9grams int64 | qsc_code_frac_chars_dupe_10grams int64 | qsc_code_frac_chars_replacement_symbols int64 | qsc_code_frac_chars_digital int64 | qsc_code_frac_chars_whitespace int64 | qsc_code_size_file_byte int64 | qsc_code_num_lines int64 | qsc_code_num_chars_line_max int64 | qsc_code_num_chars_line_mean int64 | qsc_code_frac_chars_alphabet int64 | qsc_code_frac_chars_comments int64 | qsc_code_cate_xml_start int64 | qsc_code_frac_lines_dupe_lines int64 | qsc_code_cate_autogen int64 | qsc_code_frac_lines_long_string int64 | qsc_code_frac_chars_string_length int64 | qsc_code_frac_chars_long_word_length int64 | qsc_code_frac_lines_string_concat null | qsc_code_cate_encoded_data int64 | qsc_code_frac_chars_hex_words int64 | qsc_code_frac_lines_prompt_comments int64 | qsc_code_frac_lines_assert int64 | qsc_codepython_cate_ast int64 | qsc_codepython_frac_lines_func_ratio int64 | qsc_codepython_cate_var_zero int64 | qsc_codepython_frac_lines_pass int64 | qsc_codepython_frac_lines_import int64 | qsc_codepython_frac_lines_simplefunc int64 | qsc_codepython_score_lines_no_logic int64 | qsc_codepython_frac_lines_print int64 | effective string | hits int64 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
4fb78c149a41dbeab3a570874abce45140a86530 | 959 | py | Python | python/peakAccelTest.py | crotwell/dragrace | 2fdb009e9ca7e868e1435d3a38ac81a0b3698433 | [
"MIT"
] | 12 | 2018-11-27T16:18:16.000Z | 2020-01-10T03:17:26.000Z | python/peakAccelTest.py | crotwell/dragrace | 2fdb009e9ca7e868e1435d3a38ac81a0b3698433 | [
"MIT"
] | null | null | null | python/peakAccelTest.py | crotwell/dragrace | 2fdb009e9ca7e868e1435d3a38ac81a0b3698433 | [
"MIT"
] | 1 | 2019-04-12T18:34:22.000Z | 2019-04-12T18:34:22.000Z | from SeismogramTasks import VectorMagnitude, Rotate_2D_TimeSeries, Coordinate_Rotation_2D
from peakACC import Magnitude_ThreeC_TimeSeries_jake
import math
x = [1.2, 1.5, 0.0, 0.4, -0.3, 1.5]
y = [0.3, 0.2, 0.7, 0.3, 0.0, -0.5]
z = [-0.1, 1.2, 1.4, 1.0, 1.1, 0.2]
theta = [110.0, 45.0, -45.0, 20.0, -20.0, 30.0]
# Rotate xyz array, find vector mag
def maxaccel(x,y,z,theta):
# Rotate
r = Rotate_2D_TimeSeries(x, z, theta)
x_prime = r[0]
z_prime = r[1]
# x_prime = []
# z_prime = []
# for i,j,k in x,z,theta:
# r = Coordinate_Rotation_2D(i, j, k)
# x_prime.append(r[0])
# z_prime.append(r[1])
# find vector mag
vmag = Magnitude_ThreeC_TimeSeries_jake(x_prime,z_prime,y)
ACCjson = {
"x": x_prime,
"y": y,
"z": z_prime,
"theta": theta,
"VMAG": vmag
}
return ACCjson
v = maxaccel(x,y,z,110.0)
# # print(v)
#
magnitude = v["VMAG"]
print(magnitude)
| 23.975 | 89 | 0.580813 | 170 | 959 | 3.135294 | 0.288235 | 0.056285 | 0.067542 | 0.108818 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.093185 | 0.250261 | 959 | 39 | 90 | 24.589744 | 0.648122 | 0.214807 | 0 | 0 | 0 | 0 | 0.021622 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.043478 | false | 0 | 0.130435 | 0 | 0.217391 | 0.043478 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
4fb7c5d2e861fa21e084d759a0950f43e6cd8a52 | 6,898 | py | Python | or_suite/agents/rl/utils/tree_model_based.py | JasmineSamadi/ORSuite | e2b2b0a5b497ea6566e794dcef1f176081fca4ce | [
"MIT"
] | 4 | 2021-12-01T10:56:17.000Z | 2022-02-06T17:07:43.000Z | or_suite/agents/rl/utils/tree_model_based.py | JasmineSamadi/ORSuite | e2b2b0a5b497ea6566e794dcef1f176081fca4ce | [
"MIT"
] | 2 | 2021-08-11T13:25:01.000Z | 2022-03-20T19:23:23.000Z | or_suite/agents/rl/utils/tree_model_based.py | JasmineSamadi/ORSuite | e2b2b0a5b497ea6566e794dcef1f176081fca4ce | [
"MIT"
] | 3 | 2021-04-02T20:24:25.000Z | 2021-04-10T23:53:28.000Z | import numpy as np
import matplotlib.pyplot as plt
import matplotlib.patches as patches
import matplotlib as mpl
''' Implementation of a tree structured used in the Adaptive Discretization Algorithm'''
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.patches as patches
import matplotlib as mpl
from or_suite.agents.rl.utils.bounds_utils import bounds_contains, split_bounds
from or_suite.agents.rl.utils.tree import Node, Tree
class MBNode(Node):
"""
Node representing an l-infinity ball in R^d, that points
to sub-balls (defined via node children).
Stores a value for the q_estimate, a number of visits, and
**** rewards and transition probability to a list of other nodes. ***
This class is used to represent (and store data about)
a tuple (state, action, stage) = (x, a, h).
Attributes:
bounds : numpy.ndarray
Bounds of each dimension [ [x0, y0], [x1, y1], ..., [xd, yd] ],
representing the cartesian product in R^d:
[x0, y0] X [x1, y1] X ... X [xd, yd]
depth: int
Node depth, root is at depth 0.
qVal : double, default: 0
Initial node Q value
num_visits : int, default = 0
Number of visits to the node.
"""
def __init__(self, bounds, depth, qVal, rEst, pEst, num_visits):
self.dim = len(bounds)
self.radius = (bounds[:, 1] - bounds[:, 0]).max() / 2.0
assert self.radius > 0.0
self.bounds = bounds
self.depth = depth
self.qVal = qVal
self.rEst = rEst
self.pEst = pEst
self.num_visits = num_visits
self.children = []
# Splits a node
def split_node(self, inherit_flag = True, value = 1):
child_bounds = split_bounds(self.bounds)
for bounds in child_bounds:
if inherit_flag: # updates estimates based on whether we are inheriting estimates or not
self.children.append(
MBNode(bounds, self.depth+1, self.qVal, self.rEst, self.pEst.copy(), self.num_visits)
)
else:
self.children.append(
MBNode(bounds, self.depth+1, value, 0, [0 for _ in range(len(self.pEst))], 0)
)
return self.children
class MBTree(Tree):
"""
Tree representing a collection of l-infinity ball in R^d, that points
to sub-balls (defined via node children).
Stores a hierarchical collections of nodes with value for the q_estimate, a number of visits, and
Attributes:
dim : int
Dimension of the space of R^d.
head: (Node)
Pointer to the first node in the hierarchical partition
epLen: (int)
Number of episodes (used for initializing estimates for Q Values)
"""
# Defines a tree by the number of steps for the initialization
def __init__(self, epLen, state_dim, action_dim):
self.dim = state_dim+action_dim # total dimension of state and action space
self.epLen = epLen
self.state_dim = state_dim # stores state space dimension separately
# initializes head of the tree
bounds = np.asarray([[0.0,1.0] for _ in range(self.dim)])
self.head = MBNode(bounds, 0, epLen, 0, [0.0], 0)
# initializes state leaves of the tree and their value estimates used in the model based algorithm
self.state_leaves = [[0.5 for _ in range(self.state_dim)]]
self.leaves = [self.head]
self.vEst = [self.epLen]
def get_leaves(self):
return self.leaves
def tr_split_node(self, node, timestep = 0, inherit_flag = True, value = 1, previous_tree = None):
"""
Splits a node, while simultaneously updating the estimate of the transition kernels for all nodes if needed.
Args:
node: MBNode to split
inherit_flag: (bool) boolean of whether to inherit estimates of not
value: (float) default qVal estimate
"""
# Splits a node and updates the list of leaves
self.leaves.remove(node)
children = node.split_node(inherit_flag, value)
self.leaves = self.leaves + children
# Determines if we also need to adjust the state_leaves and carry those
# estimates down as well
# Gets one of their state value
child_1_bounds = children[0].bounds
child_1_radius = (child_1_bounds[:, 1] - child_1_bounds[:, 0]).max() / 2.0
child_1_state = child_1_bounds[:self.state_dim, 0] + child_1_radius
if np.min(np.abs(np.asarray(self.state_leaves) - child_1_state)) >= child_1_radius: # determines if the children are at a finer granularity
# gets state portion of the value of the current node
node_radius = (node.bounds[:, 1] - node.bounds[:, 0]).max() / 2.0
node_state = node.bounds[:self.state_dim, 0] + node_radius
# location of node in the larger state_leaves list
parent_index = np.argmin(np.max(np.abs(np.asarray(self.state_leaves) - node_state), axis=1))
parent_vEst = self.vEst[parent_index]
# pops their estimate
self.state_leaves.pop(parent_index)
self.vEst.pop(parent_index)
# keeps track of the number added for redistributing the transition kernel estimate
num_add = 0
for child in node.children:
child_radius = (child.bounds[:,1] - child.bounds[:,0]).max() / 2.0
child_state = child.bounds[:self.state_dim, 0] + child_radius # gets the state portion of the node
# determines if this child state has been added before
if len(self.state_leaves) == 0 or np.min(np.max(np.abs(np.asarray(self.state_leaves) - child_state), axis=1)) > 0:
num_add += 1
self.state_leaves.append(child_state)
self.vEst.append(parent_vEst) # updates estimates based on the parent
# updates the transition distribution for all leaves in the previous tree
if timestep >= 1:
previous_tree.update_transitions_after_split(parent_index, num_add)
return children
def update_transitions_after_split(self, parent_index, num_add):
"""
Helper function in order to update the transition estimates after a split.
Args:
parent_index: location in the list where the parent node was
num_children: the numer of new nodes that were added for redistributing transition kernel estimate
"""
for node in self.leaves:
pEst_parent = node.pEst[parent_index]
node.pEst.pop(parent_index)
for _ in range(num_add):
node.pEst.append(pEst_parent / num_add) | 34.49 | 147 | 0.623079 | 943 | 6,898 | 4.440085 | 0.227996 | 0.025794 | 0.025078 | 0.010509 | 0.197277 | 0.17411 | 0.139718 | 0.139718 | 0.111297 | 0.095056 | 0 | 0.013989 | 0.295303 | 6,898 | 200 | 148 | 34.49 | 0.847357 | 0.385764 | 0 | 0.12987 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.012987 | 1 | 0.077922 | false | 0 | 0.12987 | 0.012987 | 0.272727 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
4fb817161338ef80c25e699b2bc29de57a33bf5a | 1,219 | py | Python | netpyne/tutorials/saving_loading_tut/saving_netParams.py | adamjhn/netpyne | b9e104645f11fe6688496b22cd4183f463e11adc | [
"MIT"
] | 120 | 2015-12-29T08:30:08.000Z | 2021-11-16T11:49:58.000Z | netpyne/tutorials/saving_loading_tut/saving_netParams.py | ericaygriffith/netpyne | d5745015755855a1214e25d6033d3685cccace0d | [
"MIT"
] | 1,178 | 2020-06-21T16:52:57.000Z | 2021-03-11T15:47:54.000Z | netpyne/tutorials/saving_loading_tut/saving_netParams.py | ericaygriffith/netpyne | d5745015755855a1214e25d6033d3685cccace0d | [
"MIT"
] | 143 | 2016-01-09T17:51:43.000Z | 2022-01-02T06:37:12.000Z | from netpyne import specs, sim
from __main__ import cfg
# Network parameters
netParams = specs.NetParams() # object of class NetParams to store the network parameters
## Cell parameters
PYRcell = {'secs': {}}
PYRcell['secs']['soma'] = {'geom': {}, 'mechs': {}}
PYRcell['secs']['soma']['geom'] = {'diam': 18.8, 'L': 18.8, 'Ra': 123.0}
PYRcell['secs']['soma']['mechs']['hh'] = {'gnabar': 0.12, 'gkbar': 0.036, 'gl': 0.003, 'el': -70}
netParams.cellParams['PYR'] = PYRcell
## Population parameters
netParams.popParams['S'] = {'cellType': 'PYR', 'numCells': 20}
netParams.popParams['M'] = {'cellType': 'PYR', 'numCells': 20}
## Synaptic mechanism parameters
netParams.synMechParams['exc'] = {'mod': 'Exp2Syn', 'tau1': 0.1, 'tau2': cfg.synMechTau2, 'e': 0}
# Stimulation parameters
netParams.stimSourceParams['bkg'] = {'type': 'NetStim', 'rate': 10, 'noise': 0.5}
netParams.stimTargetParams['bkg->PYR'] = {'source': 'bkg', 'conds': {'cellType': 'PYR'}, 'weight': 0.01, 'delay': 5, 'synMech': 'exc'}
## Cell connectivity rules
netParams.connParams['S->M'] = {
'preConds': {'pop': 'S'},
'postConds': {'pop': 'M'},
'probability': 0.5,
'weight': cfg.connWeight,
'delay': 5,
'synMech': 'exc'}
| 35.852941 | 134 | 0.62018 | 148 | 1,219 | 5.081081 | 0.554054 | 0.101064 | 0.05984 | 0.050532 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.042857 | 0.138638 | 1,219 | 33 | 135 | 36.939394 | 0.673333 | 0.156686 | 0 | 0 | 0 | 0 | 0.268701 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.1 | 0 | 0.1 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
4fc21df0fc15f305205a0557ed15aa9949db40c0 | 60,989 | py | Python | moral_kombat_backend/lead/tables/views.py | david-fisher/320-S21-Track2 | abd78fa150c07cc375cff4bd1a5595f3267a1884 | [
"BSD-3-Clause"
] | 8 | 2021-02-12T16:13:55.000Z | 2021-03-12T00:24:46.000Z | moral_kombat_backend/lead/tables/views.py | david-fisher/320-S21-Track2 | abd78fa150c07cc375cff4bd1a5595f3267a1884 | [
"BSD-3-Clause"
] | 225 | 2021-02-17T19:24:25.000Z | 2021-10-02T19:10:28.000Z | moral_kombat_backend/lead/tables/views.py | david-fisher/320-S21-Track2 | abd78fa150c07cc375cff4bd1a5595f3267a1884 | [
"BSD-3-Clause"
] | 1 | 2022-03-24T15:38:01.000Z | 2022-03-24T15:38:01.000Z | from django.shortcuts import render
from rest_framework import viewsets, permissions, generics
from rest_framework.views import APIView
from rest_framework.response import Response
from django_filters.rest_framework import DjangoFilterBackend
from .models import *
from .serializer import *
from django.core import serializers
from rest_framework import status
import json
from django.db import connection
from rest_framework.parsers import JSONParser
from rest_framework.viewsets import ModelViewSet
from django.http.response import JsonResponse
from rest_framework.decorators import action
from rest_framework.decorators import api_view
from rest_framework import mixins
# DemographicsSerializer, StudentSerializer, ProfessorSerializer, ScenariosSerializer, allScenariosSerializer, Stakeholder_pageSerializer, StakeholdersSerializer, ConversationsSerializer
def getcredentials(request):
credentials = {
"uid": request.META['uid'],
"name": request.META['displayname'],
"affiliation": request.META['edupersonprimaryaffiliation'],
"email": request.META['mail'],
#"title": request.META['title'],
"intid": request.META['fcidnumber']
}
credentials.update({"intid": credentials.get("intid").split("@")[0]})
return credentials
class ReturnIdentifierView(APIView):
def get(self, request, *args, **kwargs):
# if ('title' in request.META):
# return Response({"id":"professor"})
# else:
# # if(len(scenarios.objects.filter(professors_to_scenario = request.META['displayname']).values()) != 0):
# # return Response({"id":"editor"})
# # else:
# return Response({"id":"student"})
#return(Response({"id": request.META['uid']}))
if(len(professors.objects.filter(professor = request.META['uid']).values()) != 0):
#data = "You are prof " + request.META['uid']
return(Response({"id": "You are prof "}))
else:
#data = "You are student " + request.META['uid']
return(Response({"id": "You are student "}))
# if (credentials.get("title") == "lecturer"):
# return Response({"id":"professor"})
# else:
# return Response({"id":"student"})
#return Response({"id":"student"})
# stakeholders viewset - chirag - 4/14
class StakeholdersViewSet(viewsets.ModelViewSet):
def get_queryset(self):
queryset = stakeholders.objects.all()
return queryset
queryset = stakeholders.objects.all()
permissions_classes = [
permissions.AllowAny
]
serializer_class = StakeholdersSerializer
filter_backends = [DjangoFilterBackend]
filterset_fields = ['scenario']
lookup_field = 'stakeholder'
# class stakeholdersviewset(viewsets.ModelViewSet):
# queryset = stakeholders.objects.all()
# permissions_classes = [
# permissions.AllowAny
# ]
# serializer_class = stakeholdersserializer
class QuestionsViewset(viewsets.ModelViewSet):
queryset = questions.objects.all()
permissions_classes = [
permissions.AllowAny
]
serializer_class = QuestionsSerializer
# conversations viewset
# checked - chirag - 04/15/2021
class ConversationsViewSet(viewsets.ModelViewSet):
queryset = conversations.objects.all()
permissions_classes = [
permissions.AllowAny
]
serializer_class = ConversationsSerializer
filter_backends = [DjangoFilterBackend]
filterset_fields = ['stakeholder', 'question']
class Responses_to_ConversationsViewSet(viewsets.ModelViewSet):
queryset = responses_to_conversations.objects.all()
permissions_classes = [
permissions.AllowAny
]
serializer_class = Responses_to_ConversationsSerializer
# checked - chirag - 04/15/2021
class multi_conv(APIView):
def put(self, request, *args, **kwargs):
stakeholder = self.request.query_params.get('stakeholder')
if stakeholder == None:
return Response({'status': 'details'}, status=status.HTTP_404_NOT_FOUND)
for updated_conv in request.data:
extant_conv = conversations.objects.get(stakeholder = stakeholder, conversation = updated_conv['conversation'])
serializer = ConversationsSerializer(extant_conv, data=updated_conv)
if serializer.is_valid():
serializer.save()
conv_query = conversations.objects.filter(stakeholder = stakeholder).values()
return Response(conv_query)
# no change - checked - chirag - 04/15/2021
class multi_stake(APIView):
def put(self, request, *args, **kwargs):
scenario = self.request.query_params.get('scenario')
if scenario == None:
return Response({'status': 'details'}, status=status.HTTP_404_NOT_FOUND)
for updated_stake in request.data:
extant_stake = stakeholders.objects.get(scenario_id = scenario, stakeholder = updated_stake['stakeholder'])
serializer = StakeholdersSerializer(extant_stake, data=updated_stake)
if serializer.is_valid():
serializer.save()
stake_query = stakeholders.objects.filter(scenario = scenario).values()
return Response(stake_query)
# checked - ed - 4/15/2021
class multi_coverage(APIView):
def put(self, request, *args, **kwargs):
stakeholder = self.request.query_params.get('stakeholder')
if stakeholder == None:
return Response({'status': 'details'}, status=status.HTTP_404_NOT_FOUND)
for updated_coverage in request.data:
extant_coverage = coverage.objects.get(stakeholder = stakeholder, issue = updated_coverage['issue'])
serializer = coverageSerializer(extant_coverage, data=updated_coverage)
if serializer.is_valid():
serializer.save()
coverage_query = coverage.objects.filter(stakeholder = stakeholder).values()
return Response(coverage_query)
# done - chirag - 04/15/2021
class CoverageViewSet(viewsets.ModelViewSet):
queryset = coverage.objects.all()
permission_classe = [permissions.AllowAny]
serializer_class = coverageSerializer
filter_backends = [DjangoFilterBackend]
filterset_fields = ['stakeholder']
class DemographicsViewSet(viewsets.ModelViewSet):
# print(demographics.objects.all())
serializer_class = DemographicsSerializer
queryset = demographics.objects.only('student', 'age', 'gender', 'race', 'major')
# print(queryset)
permission_classes = [
permissions.AllowAny
]
class StudentsViewSet(viewsets.ModelViewSet):
queryset = students.objects.all()
permission_classes = [
permissions.AllowAny
]
serializer_class = StudentSerializer
class PagesToScenarioViewSet(viewsets.ModelViewSet):
queryset = pages_to_scenario.objects.all()
permission_classes = [
permissions.AllowAny
]
serializer_class = PagesToScenarioSerializer
class ProfessorsViewSet(viewsets.ModelViewSet):
queryset = professors.objects.all()
permission_classes = [
permissions.AllowAny
]
serializer_class = ProfessorSerializer
class StudentTimesViewSet(viewsets.ModelViewSet):
queryset = student_times.objects.all()
permission_classes = [
permissions.AllowAny
]
serializer_class = StudentTimesSerializer
class ScenariosViewSet(viewsets.ModelViewSet):
queryset = scenarios.objects.all()
permissions_classes = [
permissions.AllowAny
]
serializer_class = ScenariosSerializer
# uncommeented cuz main - chirag - 04/15/2021
def delete(self, request, pk, format=None):
snippet = self.get_object(pk)
snippet.delete()
return Response(status=status.HTTP_204_NO_CONTENT)
class SingleScenarioViewSet(viewsets.ModelViewSet):
def get(self, request):
scenario = scenarios.objects.all()
serializer = ScenariosSerializer(scenarios)
return Response(serializer.data)
# class professors_to_scenarioviewset(viewsets.ModelViewSet):
# def get(self, request):
# scenario = scenarios.objects.all()
# serializer = scenariosserializer(scenarios)
# return Response(serializer.data)
# def delete(self, request, pk, format=None):
# snippet = self.get_object(pk)
# snippet.delete()
# return Response(status=status.HTTP_204_NO_CONTENT)
class professors_to_scenarioViewSet(viewsets.ModelViewSet):
queryset = professors_to_scenario.objects.all()
permission_classes = [
permissions.AllowAny
]
serializer_class = Professors_to_scenarioSerializer
class PagesViewSet(viewsets.ModelViewSet):
queryset = pages.objects.all()
serializer_class = PagesSerializer
# stakeholder_page viewset
class Stakeholder_pageViewSet(viewsets.ModelViewSet):
queryset = stakeholder_to_page.objects.all()
permissions_classes = [
permissions.AllowAny
]
serializer_class = Stakeholder_to_pageSerializer
class Reflection_QuestionsViewSet(viewsets.ModelViewSet):
queryset = reflection_questions.objects.all()
permissions_classes = [
permissions.AllowAny
]
serializer_class = Reflection_questionsSerializer
class Reflection_Question_to_pageViewSet(viewsets.ModelViewSet):
queryset = reflection_question_to_page.objects.all()
permissions_classes = [
permissions.AllowAny
]
serializer_class = Reflection_questions_to_pageSerializer
class ReflectionsTakenViewSet(viewsets.ModelViewSet):
queryset = reflections_taken.objects.all()
permission_class = [
permissions.AllowAny
]
serializer_class = ReflectionsTakenSerializer
# class actionstakenviewset(viewsets.ModelViewSet):
# queryset = actions_taken.objects.all()
# permission_class = [
# permissions.AllowAny
# ]
# serializer_class = actions_takenserializer
# class conversationshadviewset(viewsets.ModelViewSet):
# queryset = conversations_had.objects.all()
# permission_class = [
# permissions.AllowAny
# ]
# serializer_class = conversationshadserializer
# class studentsinviewset(viewsets.ModelViewSet):
# queryset = students_in.objects.all()
# permission_class = [permissions.AllowAny]
# serializer_class = studentsinserializer
class CoursesViewSet(viewsets.ModelViewSet):
queryset = courses.objects.all()
permission_classes = [permissions.AllowAny]
serializer_class = CoursesSerializer
class ResponsesViewSet(viewsets.ModelViewSet):
queryset = responses.objects.all()
permission_classe = [permissions.AllowAny]
serializer_class = ResponsesSerializer
#this allows for filerting scenarios by professor_id
class allScenariosViewSet(generics.ListAPIView):
serializer_class = allScenariosSerializer
queryset = scenarios.objects.all()
filter_backends = [DjangoFilterBackend]
filterset_fields = ['professor', 'is_finished']
# scenarios_for viewset
class Scenarios_forViewSet(viewsets.ModelViewSet):
queryset = scenarios_for.objects.all()
permissions_class = [
permissions.AllowAny
]
serializer_class = Scenarios_forSerializer
class courses_to_scenarioViewset(viewsets.ModelViewSet):
queryset = courses_to_scenario.objects.all()
permissions_class = [
permissions.AllowAny
]
serializer_class = Courses_to_ScenarioSerializer
# generic_page viewset
class generic_pageViewSet(viewsets.ModelViewSet):
queryset = generic_page.objects.all()
permissions_class = [
permissions.AllowAny
]
serializer_class = Generic_pageSerializer
# professors_teach viewset
# class professors_teachviewset(viewsets.ModelViewSet):
# queryset = professors_teach.objects.all()
# permissions_class = [
# permissions.AllowAny
# ]
# serializer_class = professors_teachserializer
# changed - chirag - 04/15/2021
class IssuesViewSet(viewsets.ModelViewSet):
queryset = issues.objects.all()
serializer_class = IssuesSerializer
# queryset = issues.objects.all()
# permission_classes = [
# permissions.AllowAny
# ]
# serializer_class = IssuesSerializer
# filter_backends = [DjangoFilterBackend]
# filterset_fields = ['scenario_id', "name"]
# def create(self, request, *args, **kwargs):
# serializer = IssuesSerializer(data=request.data)
# if serializer.is_valid():
# serializer.save()
# scenarioID = serializer.data['scenario_id']
# issueID = serializer.data['issue']
# stakeholders = stakeholders.objects.filter(scenario=scenarioID).values()
# for stakeholder in stakeholders:
# newCoverage = {}
# newCoverage['stakeholder'] = stakeholder['stakeholder']
# newCoverage['issue'] = issueID
# newCoverage['coverage_score'] = 0
# coverageSerial = coverageSerializer(data=newCoverage)
# if coverageSerial.is_valid():
# coverageSerial.save()
# else:
# return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
# return Response(serializer.data, status=status.HTTP_201_CREATED)
# return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
class Action_pageViewSet(viewsets.ModelViewSet):
queryset = action_page.objects.all()
permission_classes = [
permissions.AllowAny
]
serializer_class = Action_pageSerializer
class response_to_action_pageViewSet(viewsets.ModelViewSet):
queryset = response_to_action_page.objects.all()
permission_classes = [
permissions.AllowAny
]
serializer_class = Response_to_action_pageSerializer
# checked - ed - 4/15/21
#for getting/editing scenarios in dashboard
class logistics_page(APIView):
#http_method_names = [ 'post,' 'put', 'delete']
def get(self, request, *args, **kwargs):
#take professor_id as input from url by adding ?professor_id=<the id #> to the end of the url.
scenario_id = self.request.query_params.get('scenario')
#todo check that id != None
#get all scenarios belonging to this professor
# scenario_query = professors_to_scenario.objects.filter(professor = professor_id).values()
scenario = scenarios.objects.get(scenario_id = scenario_id)
scenario_dict = ScenariosSerializer(scenario).data
#loop through scenarios and append required information (course, page info)
# print(scenario_dict)
scenarios_for_query = scenarios_for.objects.filter(scenario_id=scenario_dict['scenario_id']).values()
course_id_array = []
for x in scenarios_for_query:
# print(x)
course_id_array.append(x['course_id'])
course_dict_array = []
for x in course_id_array:
course = courses.objects.get(course = x)
course_dict_array.append({"course":course.course, "name": course.name})
pages_query = pages.objects.filter(scenario=scenario_id).values()
# print("pages: ", pages_query)
page_array = []
for page in pages_query:
cropped_page = {}
cropped_page['page'] = page['page']
cropped_page['page_title'] = page['page_title']
cropped_page['page_type'] = page['page_type']
page_array.append(cropped_page)
scenario_dict.update({
"courses": course_dict_array,
"pages": page_array
})
logistics = scenario_dict
# print(logistics)
return Response(logistics)
"""format:
{
"scenario": 1,
"version": 0,
"name": "pizza is good!",
"is_finished": False,
"public": False,
"num_conversation": 5,
"professor": 12345678,
"courses":
[
{
"course": 2,
"name": "590g"
},
{
"course": 1,
"name": "320"
}
]
}
"""
#a put request for editing scenarios. must provide scenario in url thusly: /logistics?scenario=<insert id number here>
def put(self, request, *args, **kwargs):
#save the scenario
extant_scenario = scenarios.objects.get(scenario_id = request.data['scenario_id'])
scenario_serializer = ScenariosSerializer(extant_scenario, data = request.data)
if scenario_serializer.is_valid():
scenario_serializer.save()
#delete currently assocated classes
scenarios_for.objects.filter(scenario_id = request.data['scenario_id']).delete()
#get array of courses from frontend
courses = request.data['courses']
for course in courses:
scenarios_for_dict = {
"course" : course['course'],
"scenario" : request.data['scenario'],
"version" : request.data['version']
}
print(scenarios_for_dict)
#save the classes associated with it in scenarios_for
for_serializer = Scenarios_forSerializer(data=scenarios_for_dict)
if for_serializer.is_valid():
for_serializer.save()
print('saved!')
print(for_serializer.errors)
scenario_dict = ScenariosSerializer(scenarios.objects.get(scenario_id = request.data['scenario_id'])).data
scenario_dict['courses'] = request.data['courses']
return Response(scenario_dict)
# checked - ed - 4/15/2021
#returns list of scenarios for given professor along with list of associated courses
class dashboard_page(APIView):
def get(self, request, *args, **kwargs):
#take professor_id as input from url by adding ?professor=<the id #> to the end of the url.
#--old schema
#professor_id = self.request.query_params.get('professor')
#new, changed the endpoint request
#professor_id = request.META['uid']
#todo check that id != None
#scenario_query = scenarios_for.objects.filter(scenario_id=scenario_dict['scenario_id']).values()
#get all scenarios belonging to this professor
#scenario_query = professors_to_scenario.objects.filter(professor = professor_id).values()
scenario_query = scenarios.objects.values()
# if(len(scenario_query) == 0):
# return Response({"error": "you are not associated with any scenarios"})
#loop through scenarios and append required information (course, page info)
logistics = []
#print(scenario_query)
for scenario in scenario_query:
scenarios_for_query = scenarios_for.objects.filter(scenario_id = scenario['scenario_id']).values()
course_id_array = []
for x in scenarios_for_query:
course_id_array.append(x['course_id'])
course_dict_array = []
for x in course_id_array:
course = courses.objects.get(course= x)
course_dict = {"course":course.course, "name": course.name}
course_dict_array.append(course_dict)
scenario["courses"] = course_dict_array
logistics.append(scenario)
return Response(logistics)
"""format:
{
"name": "best test",
"is_finished": False,
"public": False,
"num_conversation": 5,
"professor": 12345678,
"courses":[
{"course": 1},
{"course": 2},
{"course": 3}
]
}
"""
def post(self, request, *args, **kwargs):
#save the scenario
scenario_serializer = ScenariosSerializer(data = request.data)
if not (scenario_serializer.is_valid()):
print("scenario saved incorrectly")
return Response(scenario_serializer.errors)
scenario_serializer.save()
scenario_dict = scenario_serializer.data
#get array of courses from frontend
courses = request.data['courses']
for course in courses:
scenarios_for_dict = {
"scenario" : scenario_dict['scenario'],
"course" : course['course'],
"version" : scenario_dict['version']
}
print(scenarios_for_dict)
print(scenario_dict)
for_serializer = Scenarios_forSerializer(data=scenarios_for_dict)
if not for_serializer.is_valid():
print("scenarios_for saved incorrectly")
return Response(for_serializer.errors)
for_serializer.save()
#create a new intro page
intro_page = {
"page_type": "i",
"page_title": "introduction",
"page_body": "page body",
"scenario": scenario_dict['scenario'],
"next_page": None,
"x_coordinate": 0,
"y_coordinate": 0,
"next_page_version": None
}
intro_page_serializer = PagesSerializer(data=intro_page)
if intro_page_serializer.is_valid():
intro_page_serializer.save()
print("intro page saved")
else:
print("intro page saved incorrectly")
return Response(intro_page_serializer.errors)
#todo create blank stakeholder page and return it
#page must be called stakeholder_page and serialier must be called stakeholder_page_serializer
stakeholder_page = {
"page_type": "s",
"page_title": "stakeholders",
"page_body": "page of stakeholders",
"scenario": scenario_dict['scenario'],
"next_page": None,
"x_coordinate": 0,
"y_coordinate": 0,
"next_page_version": None
}
stakeholder_page_serializer = PagesSerializer(data=stakeholder_page)
if stakeholder_page_serializer.is_valid():
stakeholder_page_serializer.save()
else:
print("stakeholders page saved incorrectly")
return Response(stakeholder_page_serializer.errors)
scenario_dict = ScenariosSerializer(scenarios.objects.get(scenario = scenario_dict['scenario'])).data
scenario_dict['courses'] = request.data['courses']
scenario_dict['intro_page'] = intro_page_serializer.data
scenario_dict['stakeholder_page'] = stakeholder_page_serializer.data
return Response(scenario_dict)
# checked - ed - 4/15/2021
#change a list of issue objects at url /multi_issue?scenario=<insert id number here>
class multi_issue(APIView):
def put(self, request, *args, **kwargs):
scenario = self.request.query_params.get('scenario')
if scenario == None:
return Response({'status': 'details'}, status=status.HTTP_404_NOT_FOUND)
for updated_issue in request.data:
extant_issue = issues.objects.get(scenario_id = scenario, issue = updated_issue['issue'])
serializer = IssuesSerializer(extant_issue, data=updated_issue)
if not serializer.is_valid():
return Response(serializer.errors)
try:
serializer.save()
except:
print('something went wrong with the put')
issues_query = issues.objects.filter(scenario_id = scenario).values()
return Response(issues_query)
# checked - ed - 4/15/2021
#for use in the pages flowchart, input is an array of page objects
class flowchart(APIView):
#get all page objects given a scenario id
def get(self, request, *args, **kwargs):
scenario_id = self.request.query_params.get('scenario')
print(scenario_id)
pages_query = pages.objects.filter(scenario=scenario_id).values()
print(pages_query)
for page in pages_query:
if page['page_type'] == 'a':
page['action'] = action_page.objects.filter(page=page['page']).values()
return Response(pages_query)
#update the next_page field of all page objects
def put(self, request, *args, **kwargs):
scenario_id = self.request.query_params.get('scenario')
if scenario_id == None:
return Response({'status': 'details'}, status=status.HTTP_404_NOT_FOUND)
for updated_page in request.data:
#save updated choices within action pages
if updated_page['page_type'] == 'a':
print('action page')
print(update)
for updated_choice in updated_page['action']:
print(updated_choice)
extant_choice = action_page.objects.get(id=updated_choice['id'])
action_serializer = action_pageserializer(extant_choice, updated_choice)
if not action_serializer.is_valid():
print("error with puting choices")
return Response(action_serializer.errors)
action_serializer.save()
#save the page itself
extant_page = pages.objects.get(scenario = scenario_id, page = updated_page['page'])
serializer = PagesSerializer(extant_page, data=updated_page)
if not serializer.is_valid():
print("error with puting pages")
return Response(serializer.errors)
serializer.save()
#return query with newly saved pages
pages_query = pages.objects.filter(scenario=scenario_id).values()
for page in pages_query:
if page['page_type'] == 'a':
page['action'] = action_page.objects.filter(page=page['page']).values()
return Response(pages_query)
#pages viewset
#Cooper 05/05/2021
class Page_reflectionViewSet(generics.CreateAPIView):
model = pages
serializer_class = Pages_reflectionSerializer
#Cooper 05/05/2021
class Page_actionViewSet(generics.CreateAPIView):
model = pages
serializer_class = Pages_actionSerializer
#Cooper 05/05/2021
class Page_genericViewSet(generics.CreateAPIView):
model = pages
serializer_class = Pages_genericSerializer
#Cooper 05/05/2021
class Page_StakeholderViewSet(generics.CreateAPIView):
model = pages
serializer_class = Pages_stakeholderSerializer
class pages_page(APIView):
# define get method for pages
# @api_view(['get'])
def get(self, request, *args, **kwargs):
# takes the page_id from the url if the url has ?page_id=<id> at the end, no parameter passed return error 400
page_id = self.request.query_params.get('page_id')
# get all fields from this page_id if ti doesn't exist return error 404
try:
page = pages.objects.get(page = page_id)
except pages.DoesNotExist:
return Response(status=status.HTTP_404_NOT_FOUND)
# print(page)
# convers django model object into a dictionary
page_data = PagesSerializer(page).data
# print(page_data)
page_type = page_data['page_type']
# print("page type: ", page_type)
# check page.page_type = 'reflection'
if (page_type == 'r'):
reflection_query = reflection_questions.objects.filter(reflection_questions_to_page1 = page_id).values()
page_data.update(
{
"reflection_questions": reflection_query
}
)
return Response(page_data, status=status.HTTP_200_OK)
# check page.page_type = 'action'
if (page_type == 'a'):
action_query = action_page.objects.filter(page = page_id).values()
page_data.update(
{
"choices": action_query
}
)
return Response(page_data, status=status.HTTP_200_OK)
# check page.page_type = 'generic'
if (page_type == 'g' or page_type == 'i'):
generic_query = generic_page.objects.filter(page = page_id).values()
page_data.update(
{
"bodies":generic_query
}
)
return Response(page_data, status=status.HTTP_200_OK)
# check page.page_type = 'stakeholder'
if (page_type == 's'):
stakeholder_query = stakeholder_to_page.objects.filter(page = page_id).values()
page_data.update(
{
"stakeholders": stakeholder_query
}
)
return Response(page_data, status=status.HTTP_200_OK)
# neither of these pages, something went wrong or missing implementation
else:
return Response(status=status.HTTP_400_BAD_REQUEST)
# # define post function for pages
# # @api_view(['post'])
def post(self, request):
# takes the scenario_id from the url if the url has ?scenario_id=<id> at the end, no parameter passed return error 400
page_type = request.data["page_type"]
# if the request is a reflection page
if (page_type == 'r'):
pages_serializer = PagesSerializer(data=request.data)
if pages_serializer.is_valid():
pages_serializer.save()
page_id = pages_serializer.data["page"]
for question in request.data['reflection_questions']:
question['page'] = page_id
nested_serializer = Reflection_questionsSerializer(data=question)
if nested_serializer.is_valid():
nested_serializer.save()
# if the nested page is not valid it deletes the wrapper page created above
else:
page = pages.objects.get(page=page_id)
page.delete()
return Response(nested_serializer.data, status=status.HTTP_400_BAD_REQUEST)
#nested_serializer.save()
return Response(pages_serializer.data, status=status.HTTP_201_CREATED)
# if the request was badly made or could not be created
return Response(pages_serializer.errors, status=status.HTTP_400_BAD_REQUEST)
# if the request is an action page
if (page_type == 'a'):
pages_serializer = PagesSerializer(data=request.data)
if pages_serializer.is_valid():
pages_serializer.save()
page_id = pages_serializer.data["page"]
for choice in request.data['page_choices']:
choice['page'] = page_id
nested_serializer = Action_pageSerializer(data=choice)
if nested_serializer.is_valid():
nested_serializer.save()
# if the nested page is not valid it deletes the wrapper page created above
else:
page = pages.objects.get(page=page_id)
page.delete()
return Response(nested_serializer.data, status=status.HTTP_400_BAD_REQUEST)
#nested_serializer.save()
return Response(pages_serializer.data, status=status.HTTP_201_CREATED)
# if the request was badly made or could not be created
return Response(pages_serializer.errors, status=status.HTTP_400_BAD_REQUEST)
# if the request is a generic page
if (page_type == 'g' or page_type == 'i'):
pages_serializer = PagesSerializer(data=request.data)
if pages_serializer.is_valid():
pages_serializer.save()
page_id = pages_serializer.data["page"]
for body in request.data['body']:
body['page'] = page_id
nested_serializer = Generic_pageSerializer(data=body)
if nested_serializer.is_valid():
nested_serializer.save()
# if the nested page is not valid it deletes the wrapper page created above
else:
page = pages.objects.get(page=page_id)
page.delete()
return Response(nested_serializer.data, status=status.HTTP_400_BAD_REQUEST)
#nested_serializer.save()
return Response(pages_serializer.data, status=status.HTTP_201_CREATED)
# if the request was badly made or could not be created
return Response(pages_serializer.errors, status=status.HTTP_400_BAD_REQUEST)
# if the request is a stakeholder page
if (page_type == 's'):
pages_serializer = PagesSerializer(data=request.data)
if pages_serializer.is_valid():
pages_serializer.save()
page_id = pages_serializer.data["page"]
for stakeholder in request.data['stakeholders']:
stakeholder['page'] = page_id
nested_serializer = Stakeholder_pageSerializer(data=stakeholder)
if nested_serializer.is_valid():
nested_serializer.save()
# if the nested page is not valid it deletes the wrapper page created above
else:
page = pages.objects.get(page=page_id)
page.delete()
return Response(nested_serializer.data, status=status.HTTP_400_BAD_REQUEST)
#nested_serializer.save() #delete
return Response(pages_serializer.data, status=status.HTTP_201_CREATED)
# if the request was badly made or could not be created
return Response(pages_serializer.data, status=status.HTTP_400_BAD_REQUEST)
else:
return Response(status=status.HTTP_400_BAD_REQUEST)
# @api_view(['put'])
def put(self, request):
# takes the page_id from the url if the url has ?page_id=<id> at the end, no parameter passed return error 400
page_id = self.request.query_params.get('page_id')
# get all fields from this page_id if it doesn't exist return error 404
try:
page = pages.objects.get(page = page_id)
except pages.DoesNotExist:
return Response(status=status.HTTP_404_NOT_FOUND)
# please don't modify the scenario
print(request.data)
request.data["scenario_id"] = PagesSerializer(page).data['scenario']
if request.method == "put":
page_type = request.data["page_type"]
# check page.page_type = 'reflection'
if (page_type == 'r'):
pages_serializer = PagesSerializer(page, data=request.data)
if pages_serializer.is_valid():
pages_serializer.save()
# check that each reflectuon question already exists
for question in request.data['reflection_questions']:
try:
reflection_page = reflection_questions.objects.get(id = question.get('id'))
except:
# if the subpage does not exist, then you create that new page and post it and continue to the next component
question['page'] = page_id
nested_serializer = Reflection_questionsSerializer(data=question)
if nested_serializer.is_valid():
nested_serializer.save()
else:
return Response(nested_serializer.errors, status=status.HTTP_400_BAD_REQUEST)
continue
question['page'] = page_id
nested_serializer = Reflection_questionsSerializer(reflection_page, data=question)
if nested_serializer.is_valid():
nested_serializer.save()
else:
return Response(nested_serializer.errors, status=status.HTTP_400_BAD_REQUEST)
return Response(pages_serializer.data, status=status.HTTP_200_OK)
# else the request was badly made
return Response(pages_serializer.errors, status=status.HTTP_400_BAD_REQUEST)
# check page.page_type = 'action'
if (page_type == 'a'):
pages_serializer = PagesSerializer(page, data=request.data)
if pages_serializer.is_valid():
pages_serializer.save()
# check that each action_page already exists
for action in request.data['choices']:
try:
choices_page = action_page.objects.get(id = action.get('id'))
except:
# if the subpage does not exist, then you create that new page and post it and continue to the next component
action['page'] = page_id
nested_serializer = Action_pageSerializer(data=action)
if nested_serializer.is_valid():
nested_serializer.save()
else:
return Response(nested_serializer.errors, status=status.HTTP_400_BAD_REQUEST)
continue
action['page'] = page_id
nested_serializer = Action_pageSerializer(choices_page, data=action)
if nested_serializer.is_valid():
nested_serializer.save()
else:
return Response(nested_serializer.errors, status=status.HTTP_400_BAD_REQUEST)
return Response(pages_serializer.data, status=status.HTTP_200_OK)
# else the request was badly made
return Response(pages_serializer.errors, status=status.HTTP_400_BAD_REQUEST)
# check page.page_type = 'generic'
if (page_type == 'g' or page_type == 'i'):
pages_serializer = PagesSerializer(page, data=request.data)
if pages_serializer.is_valid():
pages_serializer.save()
# check that each generic page already exists
for body in request.data['bodies']:
try:
body_page = generic_page.objects.get(id = body.get('id'))
except:
# if the subpage does not exist, then you create that new page and post it and continue to the next component
body['page'] = page_id
nested_serializer = Generic_pageSerializer(data=body)
if nested_serializer.is_valid():
nested_serializer.save()
else:
return Response(nested_serializer.errors, status=status.HTTP_400_BAD_REQUEST)
continue
body['page'] = page_id
nested_serializer = Generic_pageSerializer(body_page, data=body)
if nested_serializer.is_valid():
nested_serializer.save()
else:
return Response(nested_serializer.errors, status=status.HTTP_400_BAD_REQUEST)
return Response(pages_serializer.data, status=status.HTTP_200_OK)
# else the request was badly made
return Response(pages_serializer.errors, status=status.HTTP_400_BAD_REQUEST)
# check page.page_type = 'stakeholders'
if (page_type == 's'):
pages_serializer = PagesSerializer(page, data=request.data)
if pages_serializer.is_valid():
pages_serializer.save()
# check that each stakeholder page already exists
for stakeholder in request.data['stakeholders']:
try:
page_stakeholder = stakeholder_to_page.objects.get(stakeholder = stakeholder.get('id'))
except:
# if the subpage does not exist, then you create that new page and post it and continue to the next component
stakeholder['page'] = page_id
nested_serializer = Stakeholder_pageSerializer(data=stakeholder)
if nested_serializer.is_valid():
nested_serializer.save()
else:
return Response(nested_serializer.errors, status=status.HTTP_400_BAD_REQUEST)
continue
stakeholder['page'] = page_id
nested_serializer = Stakeholder_pageSerializer(page_stakeholder, data=stakeholder)
if nested_serializer.is_valid():
nested_serializer.save()
else:
return Response(nested_serializer.errors, status=status.HTTP_400_BAD_REQUEST)
return Response(pages_serializer.data, status=status.HTTP_200_OK)
# else the request was badly made
return Response(pages_serializer.errors, status=status.HTTP_400_BAD_REQUEST)
# not a valid type of page
else:
return Response(status=status.HTTP_400_BAD_REQUEST)
# @api_view(['delete'])
def delete(self, request):
# takes the page_id from the url if the url has ?page_id=<id> at the end, no parameter passed return error 400
page_id = self.request.query_params.get('page_id')
# check if the page exists.
try:
page = pages.objects.get(page=page_id)
except pages.DoesNotExist:
return Response(status=status.HTTP_404_NOT_FOUND)
# delete the page
if (request.method == "delete"):
#set next page field of pages pointing to the deleted page to be None/null
next_pages = pages.objects.filter(next_page = page_id)
for updated_page in next_pages:
extant_page = updated_page
updated_page.next_page = None
updated_page_dict = PagesSerializer(updated_page).data
pages_serializer = PagesSerializer(extant_page, data=updated_page_dict)
if pages_serializer.is_valid():
pages_serializer.save()
else:
print("error in making next_page = null during delete!")
return Response(pages_serializer.errors, status=status.HTTP_400_BAD_REQUEST)
#also set and result_page fields pointing to the deleted page to be null as well.
action_pages = action_page.objects.filter(result_page = page_id)
for updated_page in action_pages:
extant_page = updated_page
updated_page.result_page = None
updated_page_dict = Action_pageSerializer(updated_page).data
action_pages_serializer = Action_pageSerializer(extant_page, data=updated_page_dict)
if action_pages_serializer.is_valid():
action_pages_serializer.save()
else:
print("error in making next_page = null during delete!")
return Response(action_pages_serializer.errors, status=status.HTTP_400_BAD_REQUEST)
# finally delete the page
operation = page.delete()
page_data = {}
if (operation):
page_data["success"] = "delete successful"
else:
page_data["failure"] = "delete failed"
return Response(data=page_data)
# checked - ed - 4/15/2021
class student_info(APIView):
def get(self,request,*args,**kwargs):
scenario_id = self.request.query_params.get('scenario')
responses_query = responses.objects.filter(scenario=scenario_id).values()
student_ids = []
data = []
for response in responses_query:
student = response['student']
if student not in student_ids:
date_taken = response['date_taken']
student_ids.append(student)
for student in student_ids:
demographics_query = demographics.objects.filter(student = student).values()
for dem in demographics_query:
student_query = students.objects.filter(student = dem['student']).values()
for x in student_query:
name = x['name']
dem['name'] = name
dem['date_taken'] = date_taken
data.append(dem)
return Response(data)
# seems like no change required - chirag - 4/15
class coverages_page(APIView):
def get(self, request, *args, **kwargs):
stakeholder_id = self.request.query_params.get('stakeholder')
stkholder = {}
# print(stakeholder_id)
try:
coverage_list = coverage.objects.filter(stakeholder=stakeholder_id).values()
# print("coverage list:", coverage_list)
except coverage.DoesNotExist:
return Response(status=status.HTTP_404_NOT_FOUND)
issue_list = []
# check for every single coverage object that belongs to the staheholder id 'id'
for coverages in coverage_list:
issues_dict = {}
# issuelist = coverageserializer(coverage.objects.get(issue=issueid)).data
# issuelist.update({"name": issuesserializer(issues.objects.get(issue=issueid)).data['name']})
# getting the issue for the coverage dictionary associated with the stakeholder_id
try:
issue = issues.objects.get(issue=coverages.get('issue_id'))
except:
continue
issues_dict.update(coverages)
# del issues_dict['id']
# issues_dict['issue'] = issues_dict['issue_id']
# del issues_dict['issue_id']
# issues_dict['stakeholder'] = issues_dict['stakeholder_id']
# del issues_dict['stakeholder']
issues_dict.update(
{
"name": issue.name
})
issue_list.append(issues_dict)
stkholder.update(
{
"issues": issue_list
}
)
return Response(stkholder, status=status.HTTP_200_OK)
def put(self, request, *args, **kwargs):
# """
# docstring
# """
data = JSONParser().parse(request)
if type(data) == list:
response = []
for item in data:
stkholderid = item['stakeholder']
issueid = item['issue']
updatingitem = coverage.objects.get(
stakeholder=stkholderid, issue=issueid)
serializer = coverageSerializer(
updatingitem, data=item)
if serializer.is_valid():
serializer.save()
response.append(serializer.data)
else:
return Response(response, status=status.HTTP_400_BAD_REQUEST)
return Response(response, status=status.HTTP_200_OK)
else:
stkholderid = data['stakeholder']
issueid = data['issue']
updatingitem = coverage.objects.get(
stakeholder=stkholderid, issue=issueid)
serializer = coverageSerializer(
updatingitem, data=data)
if serializer.is_valid():
serializer.save()
return Response(serializer.data, status=status.HTTP_200_OK)
else:
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
class stakeholders_page(APIView):
def add_detail(self, stkholders):
for stkholder in stkholders:
stakeholder_id = stkholder['stakeholder']
queryset = conversations.objects.filter(stakeholder=stakeholder_id)
conlist = ConversationsSerializer(queryset, many=True).data
stkholder['conversations'] = conlist
try:
coverage_list = coverage.objects.filter(stakeholder=stakeholder_id).values()
except coverage.DoesNotExist:
return Response(status=status.HTTP_404_NOT_FOUND)
issue_list = []
# check for every single coverage object that belongs to the staheholder id 'id'
for coverages in coverage_list:
issues_dict = {}
# issuelist = coverageserializer(coverage.objects.get(issue=issueid)).data
# issuelist.update({"name": issuesserializer(issues.objects.get(issue=issueid)).data['name']})
# getting the issue for the coverage dictionary associated with the stakeholder_id
try:
issue = issues.objects.get(issue=coverages.get('issue'))
except:
continue
issues_dict.update(coverages)
# del issues_dict['id']
# issues_dict['issue'] = issues_dict['issue_id']
# del issues_dict['issue_id']
# issues_dict['stakeholder'] = issues_dict['stakeholder_id']
# del issues_dict['stakeholder_id']
issues_dict.update(
{
"name": issue.name
})
issue_list.append(issues_dict)
stkholder.update(
{
"issues": issue_list
}
)
return stkholders
# '''
# page_data = pagesserializer(page).datapage_data.update(
# {
# "reflection_questions": reflection_query
# }
# )
# reflection_query = reflection_questions.objects.filter(page = page_id).values()
# page_data.update(
# {
# "reflection_questions": reflection_query
# }
# )
# '''
def get(self, request, *args, **kwargs):
'''
return format
[
{
"stakeholder": 3,
"name": "mon",
"description": "this is mon",
"job": "driver",
"introduction": "mon is a driver",
"scenario": 1,
"version": 1,
"conversations": [
{
"conversation": 4,
"question": "question 1",
"response": "answer 1",
"stakeholder": 3
}
],
"issues": [
{
"issue": 4,
"name": "issue 3",
"importance_score": 10.0,
"scenario": 1,
"version": 1
}
]
},
]
parse scenario_id and stakeholder_id from the request url
example
http://127.0.0.1:8000/stakeholders?scenario_id=3
http://127.0.0.1:8000/stakeholders?stakeholder_id=0
'''
# scenario not id
scenario_id = self.request.query_params.get('scenario_id')
stakeholder_id = self.request.query_params.get('stakeholder_id')
# stakeholder_id = self.request.get.get('stakeholder_id')
# handle request for scenario_id
# get all stakeholder in scenario with id = scenario_id
if scenario_id != None:
# checking valid scenario id
try:
# return empty if scenario doesn't have any stakeholder
# return list of stakeholder belong to that scenario
scenarios.objects.get(scenario_id = scenario_id)
queryset = stakeholders.objects.filter(
scenario=scenario_id)
data = list(StakeholdersSerializer(queryset, many=True).data)
data = self.add_detail(data)
return Response(data, status=status.HTTP_200_OK)
# return an error for non-existed scenario id
except scenarios.DoesNotExist:
message = {'message': 'invalid scenario id'}
return Response(message, status=status.HTTP_404_NOT_FOUND)
# handle request for stakeholder_id
# get the stakeholder id = stakeholder_id
if stakeholder_id != None:
try:
queryset = stakeholders.objects.filter(
stakeholder=stakeholder_id)
data = list(StakeholdersSerializer(queryset, many=True).data)
data = self.add_detail(data)
return Response(data, status=status.HTTP_200_OK)
except stakeholders.DoesNotExist:
message = {'message': 'invalid stakeholder id'}
return Response(message, status=status.HTTP_404_NOT_FOUND)
queryset = stakeholders.objects.all()
data = StakeholdersSerializer(queryset, many=True).data
return Response(data, status=status.HTTP_200_OK)
def post(self, request, *args, **kwargs):
serializer = StakeholdersSerializer(data=request.data)
if serializer.is_valid():
serializer.save()
stkholderid = serializer.data['stakeholder']
scenarioid = serializer.data['scenario']
stkholderversion = serializer.data['version']
queryset = issues.objects.filter(scenario_id=scenarioid)
data = issuesserializer(queryset, many=True).data
for item in data:
itemdict = {}
itemdict['stakeholder'] = stkholderid
itemdict['stakeholder_version'] = stkholderversion
itemdict['issue'] = item['issue']
itemdict['name'] = item['name']
itemdict['coverage_score'] = 0
print(itemdict)
itemserializer = coverageSerializer(data=itemdict)
if itemserializer.is_valid():
itemserializer.save()
else:
return Response(itemSerializer.errors,
status=status.HTTP_400_BAD_REQUEST)
return Response(serializer.data, status=status.HTTP_201_CREATED)
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
def delete(self, request, *args, **kwargs):
stakeholder_id = self.request.query_params.get('stakeholder_id')
if stakeholder_id != None:
try:
response = stakeholders.objects.get(
stakeholder =stakeholder_id)
response.delete()
return Response({'message': 'deleted'}, status=status.HTTP_202_ACCEPTED)
except stakeholders.doesnotexist:
return Response({'message': 'not found'}, status=status.HTTP_404_NOT_FOUND)
else:
return Response({'message': 'missing id'}, status=status.HTTP_400_BAD_REQUEST)
def put(self, request, *args, **kwargs):
'''
put can take one object or a list
for one object put
{
"stakeholder": 1,
"name": "stakeholder 1a",
"description": "description 1",
"job": "job 1",
"introduction": "introduction 1",
"scenario": 1,
"version": 1
}
for list put
[
{
"stakeholder": 1,
"name": "stakeholder 1a",
"description": "description 1",
"job": "job 1",
"introduction": "introduction 1",
"scenario": 1,
"version": 1
},
{
"stakeholder": 2,
"name": "stakeholder 2a",
"description": "description 2",
"job": "job 2",
"introduction": "introduction 2",
"scenario": 1,
"version": 1
}
]
'''
data = JSONParser().parse(request)
if type(data) == list:
response = []
for item in data:
id = item['stakeholder']
updatingitem = stakeholders.objects.get(stakeholder=id)
stkholderserializer = StakeholdersSerializer(
updatingitem, data=item)
if stkholderserializer.is_valid():
stkholderserializer.save()
response.append(stkholderserializer.data)
else:
return Response(response, status=status.HTTP_400_BAD_REQUEST)
return Response(response, status=status.HTTP_200_OK)
else:
id = data['stakeholder']
updatingitem = stakeholders.objects.get(stakeholder=id)
stkholderserializer = StakeholdersSerializer(
updatingitem, data=data)
if stkholderserializer.is_valid():
stkholderserializer.save()
return Response(stkholderserializer.data, status=status.HTTP_200_OK)
else:
return Response(stkholderserializer.errors, status=status.HTTP_400_BAD_REQUEST)
# class coverages_page(APIView):
# checked - ed - 4/15/2021
class student_responses(APIView):
def get(self, request, *args, **kwargs):
#filter by scenario and student id
scenario = self.request.query_params.get('scenario')
student = self.request.query_params.get('student')
filterargs = {'scenario':scenario,'student':student}
responses_query = responses.objects.filter(**filterargs).values()
choice_array = []
choices_array = []
choices_dict = {}
#get the different actions
for response in responses_query:
#filter by page number
name_query = pages.objects.filter(page = response["action_page"]).values()
for name in name_query:
name = name['page_title']
type = name['page_type']
choices_query = action_page.objects.filter(page = response["action_page"]).values()
for choice in choices_query:
choice_array.append(choice['choice'])
chosen_query = responses.objects.filter(action_page = response["action_page"]).values()
for chose in chosen_query:
chosen = chose['choice']
date_taken = chose['date_taken']
#only if it is an action page
choices_dict = {"name": name, "choices":choice_array, "chosen": chosen, "date_taken": date_taken }
choices_array.append(choices_dict)
choice_array = []
reflections_array = []
reflections_dict = {}
#get the different reflections
reflections_query = reflections_taken.objects.filter(**filterargs).values()
for reflection in reflections_query:
name_query = pages.objects.filter(page = reflection["page"]).values()
for name in name_query:
name = name['page_title']
type = name['page_type']
ref_questions_query = reflection_question_to_page.objects.filter(page_id = reflection["page"]).values()
for question in ref_questions_query:
question = question['reflection_question']
date_taken = answer['date_taken']
ref_answers_query = reflections_taken.objects.filter(response_id = reflection["response_id"]).values()
for answer in ref_answers_query:
reflection = answer['reflections']
#
#only if it is a reflection page
reflections_dict = {"name": name, "question": question, "reflection": reflection, "date_taken": date_taken}
reflections_array.append(reflections_dict)
data_dict = {}
data_dict["choices"] = choices_array
data_dict["reflections"] = reflections_array
return Response(data_dict) | 40.822624 | 186 | 0.598469 | 6,033 | 60,989 | 5.857285 | 0.075253 | 0.041203 | 0.0326 | 0.018281 | 0.611257 | 0.567366 | 0.526672 | 0.486459 | 0.435804 | 0.398478 | 0 | 0.011126 | 0.313253 | 60,989 | 1,494 | 187 | 40.822624 | 0.832561 | 0.210087 | 0 | 0.506024 | 0 | 0 | 0.050205 | 0.000579 | 0 | 0 | 0 | 0.002008 | 0 | 1 | 0.030668 | false | 0 | 0.01862 | 0 | 0.299014 | 0.024096 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
4fc3c6b3bac4ffbf5de1286290c334c20512556d | 5,763 | py | Python | src/pipelines/train.py | mnrozhkov/MLOps-demo-project | 42d6fd1546345c9bf03b7882fe8e34639a62f686 | [
"MIT"
] | 1 | 2021-12-24T00:08:32.000Z | 2021-12-24T00:08:32.000Z | src/pipelines/train.py | mnrozhkov/MLOps-demo-project | 42d6fd1546345c9bf03b7882fe8e34639a62f686 | [
"MIT"
] | null | null | null | src/pipelines/train.py | mnrozhkov/MLOps-demo-project | 42d6fd1546345c9bf03b7882fe8e34639a62f686 | [
"MIT"
] | null | null | null | """This script trains the model
Can be run from both cmd line (argparse to read config.yaml) or imported as a module
Pipelines has artifacts on the output and describe logic"""
import argparse
import os
import catboost as ctb
import joblib
import json
import matplotlib.pyplot as plt
import pandas as pd
from typing import Text
import yaml
from envyaml import EnvYAML
from src.train.train import custom_ts_split, get_split_data
from src.utils.logging import get_logger
from src.evaluate.metrics import precision_at_k_score, recall_at_k_score, lift_score
def train(config_path: Text) -> None:
"""Train model
Params:
config_path {Text}: path to config file
"""
# Import configs:
# -------------------------------------------
# config = yaml.safe_load(open(config_path))
config = EnvYAML(config_path)
print(config.get('base'))
# base params:
random_state = config['base']['random_state']
log_level = config['base']['log_level']
# features params:
features_path = config['featurize']['features_path']
categories = config['featurize']['categories']
# train params:
estimator_params = config['train']['catboost_params']
# estimator = config['train']['estimator']
top_K_coef = config['train']['top_K_coef']
model_path = config['train']['model_path']
raw_metrics_path = config['train']['raw_metrics_path']
train_metrics_path = config['train']['train_metrics_path']
train_plots_path = config['train']['train_plots_path']
train_metrics_png = config['train']['train_metrics_png']
# -------------------------------------------
logger = get_logger("TRAIN", log_level)
# 1. load and process joint data:
logger.info('Load data')
features = pd.read_feather(features_path)
features['month'] = pd.to_datetime(features['month'])
# 2. instantiate a model:
# logger.info(f'Estimator = {estimator}')
logger.info('Instantiate model')
clf = ctb.CatBoostClassifier(
**estimator_params,
cat_features=categories,
random_state=random_state
)
# 3. count top k instances for subsequent train and evaluations
metrics_df = pd.DataFrame(columns=['test_period', 'lift', 'precision_at_k', 'recall_at_k'])
top_K = int(features.shape[0] * top_K_coef)
months = features.month.sort_values().unique()
logger.info(f'Top_K {top_K_coef*100}% of the dataset size: {top_K}')
# 4. train model
k = 1
for start_train, end_train, test_period in custom_ts_split(months, train_period=1):
logger.info(f'Fold {k}:')
logger.info(f'Train: {start_train} - {end_train}')
logger.info(f'Test: {test_period} \n')
# Get train / test data for the split
X_train, X_test, y_train, y_test = get_split_data(features, start_train, end_train, test_period)
logger.info(f'Train shapes: X - {X_train.shape}, y - {y_train.shape}')
logger.info(f'Test shapes: X - {X_test.shape}, y - {y_test.shape}')
# Fit estimator
clf.fit(X_train, y_train)
# Predict on test
y_pred = clf.predict(X_test)
probas = clf.predict_proba(X_test)
logger.info(f'Max probas: {probas[:, 1].max()}')
# Calculate raw metrics on test per each fold:
# -------------------------------------------
lift = lift_score(y_test, y_pred, probas[:, 1], top_K)
precision_at_k = precision_at_k_score(y_test, y_pred, probas[:, 1], top_K)
recall_at_k = recall_at_k_score(y_test, y_pred, probas[:, 1], top_K)
metrics_df = metrics_df.append(
dict(zip(metrics_df.columns, [test_period, lift, precision_at_k, recall_at_k])),
ignore_index=True
)
k += 1
logger.info(f'Precision at {top_K}: {precision_at_k}')
logger.info(f'Recall at {top_K}: {recall_at_k}\n')
logger.info('Save "raw" metrics for plotting')
metrics_df.to_csv(raw_metrics_path, index=False)
# Create and safe aggregated (min, max, std, mean) metrics:
# -------------------------------------------
logger.info('Save aggregated metrics')
metrics_aggs = metrics_df[['lift', 'precision_at_k', 'recall_at_k']].agg(['max', 'min', 'std', 'mean'])
metrics = {
f'{metric}_{agg}': metrics_aggs.loc[agg, metric]
for metric in metrics_aggs.columns
for agg in metrics_aggs.index
}
with open(os.path.join(config['base']['project_dir'], train_metrics_path), 'w') as metrics_f:
json.dump(obj=metrics, fp=metrics_f, indent=4)
# Generate and save data for plots:
# -------------------------------------------
logger.info('Generate & save plots')
plots_df = pd.DataFrame({
'metric': list(metrics.keys()),
'value': list(metrics.values())
})
plots_df.to_csv(train_plots_path, index=False)
# Make a plot using a csv above:
x_labels = list(range(0, len(metrics)))
plt.figure(figsize=(10, 10))
fig = plt.bar(x_labels, list(metrics.values()))
plt.title('Train metrics', fontsize=14)
plt.xlabel('Metrics')
plt.ylabel('Values')
plt.xticks(x_labels, metrics.keys(), size='small', rotation='45')
plt.grid(color='k', linestyle='-', linewidth=0.5)
# plt.show()
plt.savefig(train_metrics_png)
# Save the trained model:
# -------------------------------------------
logger.info('Save model')
path = os.path.join(config['base']['project_dir'], model_path)
joblib.dump(clf, path)
logger.info(f'Model saved to: {path}')
if __name__ == '__main__':
args_parser = argparse.ArgumentParser()
args_parser.add_argument('--config', dest='config', required=True)
args = args_parser.parse_args()
train(config_path=args.config)
| 35.795031 | 107 | 0.631615 | 779 | 5,763 | 4.444159 | 0.272144 | 0.049105 | 0.034951 | 0.017331 | 0.10543 | 0.089255 | 0.073079 | 0.048527 | 0.048527 | 0.041017 | 0 | 0.005829 | 0.196252 | 5,763 | 160 | 108 | 36.01875 | 0.74158 | 0.188444 | 0 | 0 | 0 | 0 | 0.190435 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.010204 | false | 0 | 0.132653 | 0 | 0.142857 | 0.010204 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
4fc639588c22d23a25831ae5bc48b4a8caf6594e | 9,803 | py | Python | sre-recipes/recipe_runner.py | joyoza/cloud-ops-sandbox | bdd550c18b91be8953ba4b57c2e2a786ff9ad59c | [
"Apache-2.0"
] | 70 | 2019-03-13T19:45:43.000Z | 2020-08-15T16:58:19.000Z | sre-recipes/recipe_runner.py | joyoza/cloud-ops-sandbox | bdd550c18b91be8953ba4b57c2e2a786ff9ad59c | [
"Apache-2.0"
] | 187 | 2019-04-02T22:57:13.000Z | 2020-08-20T20:18:10.000Z | sre-recipes/recipe_runner.py | joyoza/cloud-ops-sandbox | bdd550c18b91be8953ba4b57c2e2a786ff9ad59c | [
"Apache-2.0"
] | 29 | 2019-04-02T18:58:38.000Z | 2020-08-20T04:04:00.000Z | # Copyright 2021 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# -*- coding: utf-8 -*-
"""
This file contains utility runtime classes implementing core SRE Recipes
features, such as breaking and restoring microservices, printing hints, and
running interactive multiple choice questions.
Currently, it implements two SRE Recipe Runner:
- ImplBasedRecipeRunner: runs SRE Recipe implemented via python classes.
- ConfigBasedRecipeRunner: runs SRE Recipes defined as YAML configs.
Refer to the class docstring for further explanations.
"""
import abc
import importlib
import requests
import subprocess
import yaml
from inspect import isclass
from os import path
import utils
from recipes.impl_based.base import BaseRecipeImpl
# Default Load Generation Config
DEFAULT_LOADGEN_USER_TYPE = "BasicHomePageViewingUser"
DEFAULT_LOADGEN_USER_COUNT = 20
DEFAULT_LOADGEN_SPAWN_RATE = 1
DEFAULT_LOADGEN_TIMEOUT_SECONDS = 600
class ImplBasedRecipeRunner:
"""A SRE Recipe runner for running recipes implemented as class objects.
Given a `recipe_name`, it tries to run `recipes/impl_based/recipe_name.py`.
This runner will propgate all exceptions to the caller, and it is caller's
responsibility to handle any exception and to perform any error logging.
"""
def __init__(self, recipe_name):
self.recipe = None
module = importlib.import_module(f"recipes.impl_based.{recipe_name}")
for attribute_name in dir(module):
attr = getattr(module, attribute_name)
if isclass(attr) and attr is not BaseRecipeImpl and issubclass(attr, BaseRecipeImpl):
self.recipe = attr()
break
if not self.recipe:
raise NotImplementedError(
f"No valid implementation exists for `{recipe_name}` recipe.")
def get_name(self):
return self.recipe.get_name()
def get_description(self):
return self.recipe.get_description()
def run_break(self):
return self.recipe.run_break()
def run_restore(self):
return self.recipe.run_restore()
def run_hint(self):
return self.recipe.run_hint()
def run_verify(self):
return self.recipe.run_verify()
class ConfigBasedRecipeRunner:
"""A SRE Recipe runner for running recipes implemented using configs.
Given a `recipe_name`, it tries to load `recipes/configs_based/recipe_name.yaml`.
This runner will propagate all exceptions to the caller, and it is caller's
responsibility to handle any exception and to perform any error logging.
"""
def __init__(self, recipe_name, skip_loadgen=False):
filepath = path.join(path.dirname(
path.abspath(__file__)), f"recipes/configs_based/{recipe_name}.yaml")
with open(filepath, "r") as file:
self.recipe = yaml.safe_load(file.read())
if not self.recipe:
raise ValueError("Cannot parse config as YAML.")
self.action_handler = ActionHandler(skip_loadgen)
def get_name(self):
return self.recipe.get("name", "No name found")
def get_description(self):
return self.recipe.get("description", "No description found")
@property
def config(self):
return self.recipe.get("config", {})
def run_break(self):
print('Deploying broken service...')
for action in self.config.get("break", []):
self.action_handler.handle_action(action)
print('Done. Deployed broken service')
def run_restore(self):
print('Restoring service back to normal...')
for action in self.config.get("restore", []):
self.action_handler.handle_action(action)
print('Done. Restored broken service to working state.')
def run_hint(self):
hint = self.config.get("hint", None)
if hint:
print(f'Here is your hint!\n\n{hint}')
else:
print("This recipe has no hints.")
def run_verify(self):
verify_config = self.config.get("verify", [])
if not verify_config:
raise NotImplementedError("Verify is not configured")
for action in verify_config:
self.action_handler.handle_action(action)
class ActionHandler:
"""A utility helper for executing actions supported by SRE Recipe configs.
Implementation Guide
--------------------
1. Map the action name to the action handler in the `__init__` method.
2. All action handlers should take exactly one argument, which is the full
config specified for the action itself, as it is defined in YAML.
For example: {action: "run-shell-commands", commands: ['echo Hi']}
This runner will propgate all exceptions to the caller, and it is caller's
responsibility to handle any exception and to perform any error logging.
"""
def __init__(self, skip_loadgen=False):
# Action types to action handlers
self.action_map = {
"run-shell-commands": self.run_shell_commands,
"multiple-choice-quiz": self.run_multiple_choice_quiz,
"loadgen-spawn": self.loadgen_spawn,
"loadgen-stop": self.loadgen_stop,
}
if skip_loadgen:
# ignore loadgen actions when requested
self.action_map["loadgen-spawn"] = lambda *args: None
self.action_map['loadgen-stop'] = lambda *args: None
# Reusable parameters shared between action handlers
self.loadgen_ip = None
def handle_action(self, config):
if "action" not in config:
raise ValueError("Action config missing `action` type")
action_type = config["action"]
if action_type not in self.action_map:
raise NotImplementedError(
f"Action type not implemented: {action_type}")
return self.action_map[action_type](config)
def init_loadgen_ip(self):
if not self.loadgen_ip:
self.loadgen_ip, err = utils.get_loadgen_ip()
if err:
raise RuntimeError(f"Failed to get loadgen IP: {err}")
############################ Action Handlers ###############################
def run_shell_commands(self, config):
"""Runs the commands one at a time in shell.
Config Paramters
----------------
commands: string[]
Required. A list of shell command strings.
"""
for cmd in config["commands"]:
output, err = utils.run_shell_command(cmd)
if err:
raise RuntimeError(
f"Failed to run command `{cmd}`: {err}")
def run_multiple_choice_quiz(self, config):
"""Runs an interactive multiple choice quiz.
Config Paramters
----------------
prompt: string
Required. The question prompt to display to the user.
choices: dict[]
option: string
Required. The answer display text to show to the user.
accept: bool
Optional. If true, the choice is considered correct.
"""
if "prompt" not in config:
raise ValueError("No prompt specified for the multiple choice.")
elif "choices" not in config:
raise ValueError(
"No answer choices available for the multiple choice.")
utils.run_interactive_multiple_choice(
config["prompt"], config["choices"])
def loadgen_spawn(self, config):
"""
Starts spawning a load shape at specified spawn rate until a total
user count is reached. Then, stop the load after a specified timesout.
Config Paramters
----------------
user_type: string
Optional. Same as the `sre_recipe_user_identifier` for locust tasks
defined in `sre/loadgenerator/locust_tasks`.
Default: BasicHomePageViewingUser.
user_count: int
Optional. The number of total users to spawn. Default: 20.
spawn_rate: int
Optional. The number of users per second to spawn. Default: 1.
stop_after: int
Optional. The number of seconds to spawn before stopping.
Default: 600 seconds.
"""
self.init_loadgen_ip()
user_type = config.get(
"user_type", DEFAULT_LOADGEN_USER_TYPE)
resp = requests.post(
f"http://{self.loadgen_ip}:81/api/spawn/{user_type}",
{
"user_count": int(config.get("user_count", DEFAULT_LOADGEN_USER_COUNT)),
"spawn_rate": int(config.get("spawn_rate", DEFAULT_LOADGEN_SPAWN_RATE)),
"stop_after": int(config.get("stop_after", DEFAULT_LOADGEN_TIMEOUT_SECONDS))
})
if not resp.ok:
raise RuntimeError(
f"Failed to start load generation: {resp.status_code} {resp.reason}")
def loadgen_stop(self, config):
"""Stops any active load generation produced by SRE Recipes.
Config Paramters is not required.
"""
self.init_loadgen_ip()
resp = requests.post(f"http://{self.loadgen_ip}:81/api/stop")
if not resp.ok:
raise RuntimeError(
f"Failed to stop existing load generation: {resp.status_code} {resp.reason}")
| 36.578358 | 97 | 0.645007 | 1,216 | 9,803 | 5.072368 | 0.259046 | 0.02594 | 0.020428 | 0.029183 | 0.257782 | 0.209306 | 0.17607 | 0.14559 | 0.117056 | 0.076524 | 0 | 0.00373 | 0.261553 | 9,803 | 267 | 98 | 36.715356 | 0.848322 | 0.357748 | 0 | 0.208955 | 0 | 0 | 0.195339 | 0.01645 | 0 | 0 | 0 | 0 | 0 | 1 | 0.164179 | false | 0 | 0.074627 | 0.067164 | 0.335821 | 0.044776 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
4fc6f58b73db4f14ff029cec250fcca647c6c8f4 | 4,065 | py | Python | backend/ctgov/api/serializers.py | ClinicalTrialsTeam/CTFrontier | d7e2558f314f6bbd9964667e12ee5655bc64215b | [
"Apache-2.0"
] | 4 | 2021-03-07T02:16:22.000Z | 2022-03-13T03:22:42.000Z | backend/ctgov/api/serializers.py | ClinicalTrialsTeam/CTFrontier | d7e2558f314f6bbd9964667e12ee5655bc64215b | [
"Apache-2.0"
] | 8 | 2021-03-14T22:14:07.000Z | 2021-04-26T17:20:56.000Z | backend/ctgov/api/serializers.py | ClinicalTrialsTeam/CTFrontier | d7e2558f314f6bbd9964667e12ee5655bc64215b | [
"Apache-2.0"
] | 1 | 2021-03-07T02:16:39.000Z | 2021-03-07T02:16:39.000Z | from rest_framework import serializers
from ctgov.models import (
BriefSummaries,
SearchStudies,
Facilities,
BrowseConditions,
Countries,
)
from django_elasticsearch_dsl_drf.serializers import DocumentSerializer
from ctgov.documents import ClinicalTrialsSearchStudies
# Serializer to return Brief Summaries dataset
class BriefSummariesSerializer(serializers.ModelSerializer):
class Meta:
model = BriefSummaries
fields = ["nct", "description"]
# Serializer to return Search Studies Results dataset
class SearchStudiesSerializer(serializers.ModelSerializer):
class Meta:
model = SearchStudies
fields = [
"status",
"brief_title",
"nct_id",
"condition_name",
"intervention_name",
"location_name",
"study_phase",
"sponsor_name",
"location_name",
"study_brief_desc",
"primary_outcome_measures",
"secondary_outcome_measures",
"study_start_date",
"primary_completion_date",
]
# Serializer to return Study Countries list
class CountriesSerializer(serializers.ModelSerializer):
class Meta:
model = Countries
fields = ["name"]
# Serializer to return Study States list
class StatesSerializer(serializers.ModelSerializer):
class Meta:
model = Facilities
fields = ["state"]
# Serializer to return Study Cities list
class CitySerializer(serializers.ModelSerializer):
class Meta:
model = Facilities
fields = ["city"]
# Serializer to return conditions list
class ConditionsSerializer(serializers.ModelSerializer):
class Meta:
model = BrowseConditions
fields = ["mesh_term"]
# Serializer to return Trial Timelines dataset
class TrialTimelinesSerializer(serializers.ModelSerializer):
class Meta:
model = SearchStudies
fields = [
"brief_title",
"status",
"sponsor_name",
"nct_id",
"study_start_date",
"primary_completion_date",
"study_phase",
]
# Serializer to return single Study dataset
class StudyDetailSerializer(serializers.ModelSerializer):
class Meta:
model = SearchStudies
fields = [
"nct_id",
"brief_title",
"official_title",
"study_brief_desc",
"study_detailed_desc",
"status",
"study_phase",
"study_start_date",
"primary_completion_date",
"study_first_posted_date",
"results_first_posted_date",
"last_update_posted_date",
"results_submitted_qc_not_done",
"results_submitted_qc_done",
"study_type",
"condition_name",
"intervention_name",
"eligibility_criteria",
"eligibility_gender",
"eligibility_min_age",
"eligibility_max_age",
"sponsor_name",
"funder_type",
"primary_outcome_measures",
"secondary_outcome_measures",
"study_ids",
"document_types",
"is_unapproved_device",
"acronym",
"healthy_volunteers",
"location_name",
"country_name",
"city_name",
"state_name",
]
# Serialzer for Elastic Search document
class SearchStudiesDocumentSerializer(DocumentSerializer):
class Meta:
document = ClinicalTrialsSearchStudies
fields = [
"status",
"brief_title",
"nct_id",
"condition_name",
"intervention_name",
"location_name",
"study_phase",
"sponsor_name",
"location_name",
"study_brief_desc",
"primary_outcome_measures",
"secondary_outcome_measures",
"study_start_date",
"primary_completion_date",
]
| 27.466216 | 71 | 0.59508 | 332 | 4,065 | 6.981928 | 0.313253 | 0.034944 | 0.062123 | 0.120794 | 0.399482 | 0.347714 | 0.347714 | 0.169111 | 0.169111 | 0.169111 | 0 | 0 | 0.326445 | 4,065 | 147 | 72 | 27.653061 | 0.846603 | 0.092989 | 0 | 0.554622 | 0 | 0 | 0.295349 | 0.09981 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.033613 | 0 | 0.184874 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
4fc8f0cb502a004f5f664475459658f0253a384a | 3,792 | py | Python | histcensusgis/text/download_sm_crosswalk.py | graziul/hist-census-gis | 558bf38cd0e444b5a91133dd70c88210da3cbbc9 | [
"MIT"
] | 4 | 2017-05-15T20:54:25.000Z | 2019-01-30T19:04:24.000Z | histcensusgis/text/download_sm_crosswalk.py | graziul/hist-census-gis | 558bf38cd0e444b5a91133dd70c88210da3cbbc9 | [
"MIT"
] | null | null | null | histcensusgis/text/download_sm_crosswalk.py | graziul/hist-census-gis | 558bf38cd0e444b5a91133dd70c88210da3cbbc9 | [
"MIT"
] | 1 | 2017-07-12T18:06:19.000Z | 2017-07-12T18:06:19.000Z | import re
import urllib
state_list = ["ri"]
# Dict: add v(alue) to k(ey), create k if it doesn't exist
def Dict_append(Dict, k, v) :
if not k in Dict :
Dict[k] = [v]
else :
Dict[k].append(v)
# Version of Dict_append that only accepts unique v(alues) for each k(ey)
def Dict_append_unique(Dict, k, v) :
if not k in Dict :
Dict[k] = [v]
else :
if not v in Dict[k] :
Dict[k].append(v)
def download_year(year) : #year = 1920 or year = 1940
for state_abbr in state_list :
url = "http://stevemorse.org/census/%s/%s.txt" % (str(year),state_abbr)
url_handle = urllib.urlopen(url)
sourcetext = url_handle.readlines()
url_handle.close()
if year == 1940 :
old_year = 1930
if year == 1920 :
year = 1930
old_year = 1920
year, old_year = str(year),str(old_year)
county, old_county = '',''
city = ''
ed, old_ed = '',''
county_name,old_county_name = '',''
for line in sourcetext :
line = line.strip()
if line[:2] == '**' or line == '' :
continue
if line[0] == '+' : #line defining a county number
year_county = re.search("\+([0-9]+)=([0-9%]+),(.*)",line)
line_year,line_county,line_county_name = year_county.group(1),year_county.group(2),year_county.group(3)
if line_year == year :
if line_county == '%' : #"%" sign means number is same as old number
county = old_county
else :
county = line_county
if line_county_name == '%' :
county_name = old_county_name
else :
county_name = line_county_name
if line_year == old_year :
old_county = line_county
old_county_name = line_county_name
county_name_county_dict[county_name] = county
Dict_append_unique(state_county_dict,state_abbr,county)
if old_county != county or old_county_name != county_name:
county_name_county_dict[old_county_name] = old_county
county_change_dict[old_county_name] = county_name
continue
if line[0] == '^' : #cache value(s) for old_ed
cache_ed = re.search("\^\*(.+)",line)
old_ed = cache_ed.group(1)
continue
eds_city = re.search("([^*]+)\*([^*]+)\*?(.*)",line)
ed,line_old_ed,line_city = eds_city.group(1),eds_city.group(2),eds_city.group(3)
if line_old_ed != '^' :
old_ed = line_old_ed
if line_city != "" :
city = line_city
if line_city == "#" : # "#" clears previous value for city
city = ""
Dict_append_unique(ed_old_ed_dict,county+'-'+ed,old_county+'-'+old_ed)
if city!= "" :
Dict_append_unique(city_ed_dict,city,county+'-'+ed)
Dict_append_unique(county_ed_dict,county,ed)
if county != old_county:
Dict_append_unique(county_ed_dict,old_county,old_ed)
ed_old_ed_dict = {} #lookup "[county]-[ed]" -> list of old ed(s) corresponding
city_ed_dict = {} #lookup city+state_abbr -> list of eds in city
county_ed_dict = {} #lookup county number -> list of eds in county
state_county_dict = {} #lookup state_abbr -> list of counties in state
county_name_county_dict = {} #lookup county name+state_abbr -> county number
county_change_dict = {} #lookup old_county_name -> county_name
download_year(1940)
| 39.915789 | 119 | 0.539821 | 482 | 3,792 | 3.966805 | 0.176349 | 0.115063 | 0.083682 | 0.062762 | 0.172071 | 0.089958 | 0.029289 | 0.029289 | 0.029289 | 0.029289 | 0 | 0.018511 | 0.344673 | 3,792 | 94 | 120 | 40.340426 | 0.750905 | 0.147943 | 0 | 0.1625 | 0 | 0 | 0.033271 | 0.014925 | 0 | 0 | 0 | 0 | 0 | 1 | 0.0375 | false | 0 | 0.025 | 0 | 0.0625 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
4fcbdc8b235bd4cfffdf1d92aa508c37e0145018 | 6,973 | py | Python | model_lib/src/model.py | modzy/grpc-tensorflow-object-detection | e81b2ae60c46f48cad204934868849e97e0d4a63 | [
"Apache-2.0"
] | null | null | null | model_lib/src/model.py | modzy/grpc-tensorflow-object-detection | e81b2ae60c46f48cad204934868849e97e0d4a63 | [
"Apache-2.0"
] | null | null | null | model_lib/src/model.py | modzy/grpc-tensorflow-object-detection | e81b2ae60c46f48cad204934868849e97e0d4a63 | [
"Apache-2.0"
] | null | null | null | import json
from typing import Dict, List
import os
import numpy as np
import tensorflow as tf
import tensorflow_hub as hub
"""
The required output structure for a successful inference run for a models is the following JSON:
{
"data": {
"result": <inference-result>,
"explanation": <explanation-data>,
"drift": <drift-data>,
}
}
The `data` key is required and stores a dictionary which represents the output for a specific input. The only top-level
key within these dictionaries that is required is `result`, however, `explanation` and `drift` are additional keys that
may be included if your particular model supports drift detection or explainability. All three of these keys
(`result`, `explanation`, and `drift`) are required to have a particular format in order to provide platform support.
This format type must be specified in the model.yaml file for the version that you are releasing, and the structure for
this format type must be followed. If no formats are specified, it is possible to define your own custom structure on a
per-model basis.
The required output structure for a failed inference run for a models is the following JSON:
{
"error_message": <error-message>
}
Here, all error information that you can extract can be loaded into a single string and returned. This could be a JSON
string with a structured error log, or a stack trace dumped to a string.
Specifications:
This section details the currently supported specifications for the "result", "explanation", and "drift" fields of each
successful output JSON. These correspond to specifications selected in the `resultsFormat`, `driftFormat`,
`explanationFormat` of the model.yaml file for the particular version of the model.
* `resultsFormat`:
1A) imageClassification
"result": {
"classPredictions": [
{"class": <class-1-label>, "score": <class-1-probability>},
...,
{"class": <class-n-label>, "score": <class-n-probability>}
]
}
* `driftFormat`
2A) imageRLE
explanation: {
"maskRLE": <rle-mask>
}
Here, the <rle-mask> is a fortran ordered run-length encoding.
* `explanationFormat`
3A) ResNet50
drift: {
{
"layer1": <layer-data>
"layer2": <layer-data>
"layer3": <layer-data>
"layer4": <layer-data>
}
}
"""
ROOT_DIR = os.path.dirname(os.path.abspath(__file__))
MODEL_DIR = os.path.join(ROOT_DIR, "openimages_v4_ssd_mobilenet_v2_1")
def get_success_json_structure(inference_result, explanation_result, drift_result) -> Dict[str, bytes]:
output_item_json = {
"data": {
"result": inference_result,
"explanation": explanation_result,
"drift": drift_result,
}
}
return {"results.json": json.dumps(output_item_json, separators=(",", ":")).encode()}
def get_failure_json_structure(error_message: str) -> Dict[str, bytes]:
error_json = {"error_message": error_message}
return {"error": json.dumps(error_json).encode()}
class TensorflowObjectDetection:
# Note: Throwing unhandled exceptions that contain lots of information about the issue is expected and encouraged
# for models when they encounter any issues or internal errors.
def __init__(self):
"""
This constructor should perform all initialization for your model. For example, all one-time tasks such as
loading your model weights into memory should be performed here.
This corresponds to the Status remote procedure call.
"""
self.detector = hub.load(MODEL_DIR).signatures['default']
def format_detections(self,result_object):
# parse out what we need from result_object
class_names = result_object["detection_class_entities"]
scores = result_object["detection_scores"]
bboxes = result_object["detection_boxes"]
# store formatted detections in this list
formatted_detections = []
for name, score, bbox in zip(class_names, scores, bboxes):
ymin, xmin, ymax, xmax = tuple(bbox)
detection = {}
detection["class"] = name.decode()
detection["score"] = round(score.item(), 3)
detection["xmin"] = xmin.item()
detection["ymin"] = ymin.item()
detection["xmax"] = xmax.item()
detection["ymax"] = ymax.item()
formatted_detections.append(detection)
formatted_results = {"detections": formatted_detections}
return formatted_results
def handle_single_input(self, model_input: Dict[str, bytes], detect_drift: bool, explain: bool) -> Dict[str, bytes]:
"""
This corresponds to the Run remote procedure call for single inputs.
"""
# `model_input` will have binary contents for each of the input file types specified in your model.yaml file
# You are responsible for processing these files in a manner that is specific to your model, and producing
# inference, drift, and explainability results where appropriate.
# process image bytes using tf libary
img_bytes = model_input["image"]
img = tf.io.decode_image(img_bytes, channels=3)
converted_img = tf.image.convert_image_dtype(img, tf.float32)[tf.newaxis, ...]
results = self.detector(converted_img)
# format results
result = {key:value.numpy() for key,value in results.items()}
inference_result = self.format_detections(result)
explanation_result = None
drift_result = None
# structure outputs correctly
output = get_success_json_structure(inference_result, explanation_result, drift_result)
return output
def handle_input_batch(self, model_inputs: List[Dict[str, bytes]], detect_drift, explain) -> List[Dict[str, bytes]]:
"""
This is an optional method that will be attempted to be called when more than one inputs to the model
are ready to be processed. This enables a user to provide a more efficient means of handling inputs in batch
that takes advantage of specific properties of their model.
If you are not implementing custom batch processing, this method should raise a NotImplementedError. If you are
implementing custom batch processing, then any unhandled exception will be interpreted as a fatal error that
will result in the entire batch failing. If you would like to allow individual elements of the batch to fail
without failing the entire batch, then you must handle the exception within this function, and ensure the JSON
structure for messages with an error has a top level "error" key with a detailed description of the error
message.
This corresponds to the Run remote procedure call for batch inputs.
{
"error": "your error message here"
}
"""
raise NotImplementedError
| 38.738889 | 120 | 0.686792 | 897 | 6,973 | 5.245262 | 0.347826 | 0.025292 | 0.015303 | 0.012752 | 0.136451 | 0.10712 | 0.085016 | 0.064187 | 0.064187 | 0.028055 | 0 | 0.003363 | 0.232468 | 6,973 | 179 | 121 | 38.955307 | 0.875747 | 0.265022 | 0 | 0 | 0 | 0 | 0.070335 | 0.020408 | 0 | 0 | 0 | 0 | 0 | 1 | 0.113208 | false | 0 | 0.113208 | 0 | 0.320755 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
4fce8b82f36a45eec7ceec13d644693923705d92 | 740 | py | Python | dexbot/qt_queue/queue_dispatcher.py | Learn-code-strategies/DEXBot | ed85b12d8ad8d6ec373fd216a98e55b72f90b860 | [
"MIT"
] | 1 | 2019-11-10T06:53:35.000Z | 2019-11-10T06:53:35.000Z | dexbot/qt_queue/queue_dispatcher.py | g3d/DEXBot | a2b1462d78d7154cb10871a7cec9a44c8d6664de | [
"MIT"
] | null | null | null | dexbot/qt_queue/queue_dispatcher.py | g3d/DEXBot | a2b1462d78d7154cb10871a7cec9a44c8d6664de | [
"MIT"
] | 1 | 2019-11-10T06:53:37.000Z | 2019-11-10T06:53:37.000Z | from PyQt5.Qt import QApplication
from PyQt5.QtCore import QThread, QEvent
from dexbot.qt_queue.idle_queue import idle_loop
class ThreadDispatcher(QThread):
def __init__(self, parent):
QThread.__init__(self)
self.parent = parent
def run(self):
while True:
callback = idle_loop.get()
if callback is None:
break
QApplication.postEvent(self.parent, _Event(callback))
def stop(self):
idle_loop.put(None)
self.wait()
class _Event(QEvent):
EVENT_TYPE = QEvent.Type(QEvent.registerEventType())
def __init__(self, callback):
# Thread-safe
QEvent.__init__(self, _Event.EVENT_TYPE)
self.callback = callback | 24.666667 | 65 | 0.644595 | 86 | 740 | 5.244186 | 0.418605 | 0.070953 | 0.04878 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.00369 | 0.267568 | 740 | 30 | 66 | 24.666667 | 0.828413 | 0.014865 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.190476 | false | 0 | 0.142857 | 0 | 0.47619 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
4fd1f271a1d4fe49bf1991df1e909dc9274b04de | 9,083 | py | Python | google/cloud/operation/__init__.py | yukihira1992/python-cloud-core | 45dcc73722846f671cc1434bebdd99d154a8c892 | [
"Apache-2.0"
] | 18 | 2020-08-06T04:01:03.000Z | 2022-03-28T04:05:57.000Z | google/cloud/operation/__init__.py | yukihira1992/python-cloud-core | 45dcc73722846f671cc1434bebdd99d154a8c892 | [
"Apache-2.0"
] | 75 | 2020-02-07T02:45:27.000Z | 2022-03-07T21:57:52.000Z | google/cloud/operation/__init__.py | yukihira1992/python-cloud-core | 45dcc73722846f671cc1434bebdd99d154a8c892 | [
"Apache-2.0"
] | 18 | 2020-02-08T13:52:05.000Z | 2022-03-31T19:50:51.000Z | # Copyright 2016 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Wrap long-running operations returned from Google Cloud APIs."""
from typing import Dict
from google.longrunning import operations_pb2
from google.protobuf import json_format
_GOOGLE_APIS_PREFIX = "type.googleapis.com"
_TYPE_URL_MAP: Dict[str, type] = {}
def _compute_type_url(klass, prefix=_GOOGLE_APIS_PREFIX):
"""Compute a type URL for a klass.
:type klass: type
:param klass: class to be used as a factory for the given type
:type prefix: str
:param prefix: URL prefix for the type
:rtype: str
:returns: the URL, prefixed as appropriate
"""
name = klass.DESCRIPTOR.full_name
return "%s/%s" % (prefix, name)
def register_type(klass, type_url=None):
"""Register a klass as the factory for a given type URL.
:type klass: :class:`type`
:param klass: class to be used as a factory for the given type
:type type_url: str
:param type_url: (Optional) URL naming the type. If not provided,
infers the URL from the type descriptor.
:raises ValueError: if a registration already exists for the URL.
"""
if type_url is None:
type_url = _compute_type_url(klass)
if type_url in _TYPE_URL_MAP:
if _TYPE_URL_MAP[type_url] is not klass:
raise ValueError("Conflict: %s" % (_TYPE_URL_MAP[type_url],))
_TYPE_URL_MAP[type_url] = klass
def _from_any(any_pb):
"""Convert an ``Any`` protobuf into the actual class.
Uses the type URL to do the conversion.
.. note::
This assumes that the type URL is already registered.
:type any_pb: :class:`google.protobuf.any_pb2.Any`
:param any_pb: An any object to be converted.
:rtype: object
:returns: The instance (of the correct type) stored in the any
instance.
"""
klass = _TYPE_URL_MAP[any_pb.type_url]
return klass.FromString(any_pb.value)
class Operation(object):
"""Representation of a Google API Long-Running Operation.
.. _protobuf: https://github.com/googleapis/googleapis/blob/\
050400df0fdb16f63b63e9dee53819044bffc857/\
google/longrunning/operations.proto#L80
.. _service: https://github.com/googleapis/googleapis/blob/\
050400df0fdb16f63b63e9dee53819044bffc857/\
google/longrunning/operations.proto#L38
.. _JSON: https://cloud.google.com/speech/reference/rest/\
v1beta1/operations#Operation
This wraps an operation `protobuf`_ object and attempts to
interact with the long-running operations `service`_ (specific
to a given API). (Some services also offer a `JSON`_
API that maps the same underlying data type.)
:type name: str
:param name: The fully-qualified path naming the operation.
:type client: :class:`~google.cloud.client.Client`
:param client: The client used to poll for the status of the operation.
If the operation was created via JSON/HTTP, the client
must own a :class:`~google.cloud._http.Connection`
to send polling requests. If created via protobuf, the
client must have a gRPC stub in the ``_operations_stub``
attribute.
:type caller_metadata: dict
:param caller_metadata: caller-assigned metadata about the operation
"""
target = None
"""Instance assocated with the operations: callers may set."""
response = None
"""Response returned from completed operation.
Only one of this and :attr:`error` can be populated.
"""
error = None
"""Error that resulted from a failed (complete) operation.
Only one of this and :attr:`response` can be populated.
"""
metadata = None
"""Metadata about the current operation (as a protobuf).
Code that uses operations must register the metadata types (via
:func:`register_type`) to ensure that the metadata fields can be
converted into the correct types.
"""
_from_grpc = True
def __init__(self, name, client, **caller_metadata):
self.name = name
self.client = client
self.caller_metadata = caller_metadata.copy()
self._complete = False
@classmethod
def from_pb(cls, operation_pb, client, **caller_metadata):
"""Factory: construct an instance from a protobuf.
:type operation_pb:
:class:`~google.longrunning.operations_pb2.Operation`
:param operation_pb: Protobuf to be parsed.
:type client: object: must provide ``_operations_stub`` accessor.
:param client: The client used to poll for the status of the operation.
:type caller_metadata: dict
:param caller_metadata: caller-assigned metadata about the operation
:rtype: :class:`Operation`
:returns: new instance, with attributes based on the protobuf.
"""
result = cls(operation_pb.name, client, **caller_metadata)
result._update_state(operation_pb)
result._from_grpc = True
return result
@classmethod
def from_dict(cls, operation, client, **caller_metadata):
"""Factory: construct an instance from a dictionary.
:type operation: dict
:param operation: Operation as a JSON object.
:type client: :class:`~google.cloud.client.Client`
:param client: The client used to poll for the status of the operation.
:type caller_metadata: dict
:param caller_metadata: caller-assigned metadata about the operation
:rtype: :class:`Operation`
:returns: new instance, with attributes based on the protobuf.
"""
operation_pb = json_format.ParseDict(operation, operations_pb2.Operation())
result = cls(operation_pb.name, client, **caller_metadata)
result._update_state(operation_pb)
result._from_grpc = False
return result
@property
def complete(self):
"""Has the operation already completed?
:rtype: bool
:returns: True if already completed, else false.
"""
return self._complete
def _get_operation_rpc(self):
"""Polls the status of the current operation.
Uses gRPC request to check.
:rtype: :class:`~google.longrunning.operations_pb2.Operation`
:returns: The latest status of the current operation.
"""
request_pb = operations_pb2.GetOperationRequest(name=self.name)
return self.client._operations_stub.GetOperation(request_pb)
def _get_operation_http(self):
"""Checks the status of the current operation.
Uses HTTP request to check.
:rtype: :class:`~google.longrunning.operations_pb2.Operation`
:returns: The latest status of the current operation.
"""
path = "operations/%s" % (self.name,)
api_response = self.client._connection.api_request(method="GET", path=path)
return json_format.ParseDict(api_response, operations_pb2.Operation())
def _get_operation(self):
"""Checks the status of the current operation.
:rtype: :class:`~google.longrunning.operations_pb2.Operation`
:returns: The latest status of the current operation.
"""
if self._from_grpc:
return self._get_operation_rpc()
else:
return self._get_operation_http()
def _update_state(self, operation_pb):
"""Update the state of the current object based on operation.
:type operation_pb:
:class:`~google.longrunning.operations_pb2.Operation`
:param operation_pb: Protobuf to be parsed.
"""
if operation_pb.done:
self._complete = True
if operation_pb.HasField("metadata"):
self.metadata = _from_any(operation_pb.metadata)
result_type = operation_pb.WhichOneof("result")
if result_type == "error":
self.error = operation_pb.error
elif result_type == "response":
self.response = _from_any(operation_pb.response)
def poll(self):
"""Check if the operation has finished.
:rtype: bool
:returns: A boolean indicating if the current operation has completed.
:raises ValueError: if the operation
has already completed.
"""
if self.complete:
raise ValueError("The operation has completed.")
operation_pb = self._get_operation()
self._update_state(operation_pb)
return self.complete
| 33.765799 | 83 | 0.666079 | 1,139 | 9,083 | 5.169447 | 0.215101 | 0.026155 | 0.016814 | 0.014266 | 0.330333 | 0.321671 | 0.321671 | 0.305367 | 0.29178 | 0.274457 | 0 | 0.010865 | 0.250138 | 9,083 | 268 | 84 | 33.891791 | 0.853619 | 0.531873 | 0 | 0.109589 | 0 | 0 | 0.034449 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.164384 | false | 0 | 0.041096 | 0 | 0.424658 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
4fd2696a35b16c6c97eed2b7e1a388183c51d0e4 | 3,007 | py | Python | src/datasets/pamap2.py | niall-twomey/har_datasets | 68f142ba613ce26f67cdd6b871117f4c24ea603f | [
"MIT"
] | 24 | 2018-12-12T08:54:52.000Z | 2021-12-07T08:45:13.000Z | src/datasets/pamap2.py | niall-twomey/har_datasets | 68f142ba613ce26f67cdd6b871117f4c24ea603f | [
"MIT"
] | 3 | 2019-07-18T20:14:41.000Z | 2022-03-12T01:03:28.000Z | src/datasets/pamap2.py | niall-twomey/har_datasets | 68f142ba613ce26f67cdd6b871117f4c24ea603f | [
"MIT"
] | 9 | 2018-12-12T16:18:39.000Z | 2022-03-30T16:25:47.000Z | from collections import defaultdict
from os.path import join
import numpy as np
import pandas as pd
from tqdm import tqdm
from src.datasets.base import Dataset
from src.utils.decorators import fold_decorator
from src.utils.decorators import index_decorator
from src.utils.decorators import label_decorator
__all__ = [
"pamap2",
]
class pamap2(Dataset):
def __init__(self):
super(pamap2, self).__init__(name=self.__class__.__name__, unzip_path=lambda p: join(p, "Protocol"))
@label_decorator
def build_label(self, task, *args, **kwargs):
df = pd.DataFrame(iter_pamap2_subs(path=self.unzip_path, cols=[1], desc=f"{self.identifier} Labels"))
return self.meta.inv_lookup[task], df
@fold_decorator
def build_predefined(self, *args, **kwargs):
def folder(sid, data):
return np.zeros(data.shape[0]) + sid
df = iter_pamap2_subs(
path=self.unzip_path, cols=[1], desc=f"{self.identifier} Folds", callback=folder, columns=["fold"],
).astype(int)
lookup = {
1: "train",
2: "train",
3: "test",
4: "train",
5: "train",
6: "test",
7: "train",
8: "train",
9: "test",
}
return df.assign(fold_0=df["fold"].apply(lookup.__getitem__))[["fold_0"]].astype("category")
@index_decorator
def build_index(self, *args, **kwargs):
def indexer(sid, data):
subject = np.zeros(data.shape[0])[:, None] + sid
trial = np.zeros(data.shape[0])[:, None] + sid
return np.concatenate((subject, trial, data), axis=1)
df = iter_pamap2_subs(
path=self.unzip_path,
cols=[0],
desc=f"{self.identifier} Index",
callback=indexer,
columns=["subject", "trial", "time"],
).astype(dict(subject=int, trial=int, time=float))
return df
def build_data(self, loc, mod, *args, **kwargs):
offset = dict(wrist=3, chest=20, ankle=37)[loc] + dict(accel=1, gyro=7, mag=10)[mod]
df = iter_pamap2_subs(
path=self.unzip_path,
cols=list(range(offset, offset + 3)),
desc=f"Parsing {mod} at {loc}",
columns=["x", "y", "z"],
).astype(float)
scale = dict(accel=9.80665, gyro=np.pi * 2.0, mag=1.0)[mod]
return df.values / scale
def iter_pamap2_subs(path, cols, desc, columns=None, callback=None, n_subjects=9):
data = []
for sid in tqdm(range(1, n_subjects + 1), desc=desc):
datum = pd.read_csv(join(path, f"subject10{sid}.dat"), delim_whitespace=True, header=None, usecols=cols).fillna(
method="ffill"
)
assert np.isfinite(datum.values).all()
if callback:
data.extend(callback(sid, datum.values))
else:
data.extend(datum.values)
df = pd.DataFrame(data)
if columns:
df.columns = columns
return df
| 30.373737 | 120 | 0.584968 | 389 | 3,007 | 4.377892 | 0.341902 | 0.026424 | 0.041104 | 0.052848 | 0.207281 | 0.180857 | 0.137405 | 0.109219 | 0.109219 | 0.064592 | 0 | 0.02333 | 0.27303 | 3,007 | 98 | 121 | 30.683673 | 0.755718 | 0 | 0 | 0.090909 | 0 | 0 | 0.070502 | 0 | 0 | 0 | 0 | 0 | 0.012987 | 1 | 0.103896 | false | 0 | 0.116883 | 0.012987 | 0.324675 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
4fd295385f1b54e68df6d434899e0f6d50c545f6 | 8,970 | py | Python | tumor_migration_analysis/analyze_TrackMate_tracks.py | agclark12/tumor_migration_analysis | c63d43306f9f381ddec04a8301fcd268a5d71c38 | [
"MIT"
] | null | null | null | tumor_migration_analysis/analyze_TrackMate_tracks.py | agclark12/tumor_migration_analysis | c63d43306f9f381ddec04a8301fcd268a5d71c38 | [
"MIT"
] | null | null | null | tumor_migration_analysis/analyze_TrackMate_tracks.py | agclark12/tumor_migration_analysis | c63d43306f9f381ddec04a8301fcd268a5d71c38 | [
"MIT"
] | null | null | null | #!/opt/local/bin/python
"""
This script reads TrackMate data from .xml files and analyzes the trajectories.
The dynamics parameters are written to a new csv file and histograms for
persistence and mean instantaneous speed are generated.
"""
import os
import time
import xml.etree.ElementTree as ET
import numpy as np
import matplotlib.pyplot as plt
from skimage.io._plugins import tifffile_plugin as tifffile
import migration_analysis as migra
import utility_functions as uf
def plot_corr_vs_dist(ax,dist,corr_mean,corr_std,color):
"""Bins correlation vs. distance data (or any 2D data for that matter)
Parameters
----------
ax : matplotlib axis
the axis used for plotting
dist : 1D numpy array
the distance data (binned x coordinate)
corr_mean : 1D numpy array
the mean directional correlation data (binned)
corr_mean : 1D numpy array
the standard deviation of the directional correlation data (binned)
color : string
the matplotlib color used for plotting
Returns
-------
ax : matplotlib axis
the axis used for plotting
"""
#gets rid of any nans
nan_array = ~np.isnan(corr_mean)
dist = dist[nan_array]
corr_std = corr_std[nan_array]
corr_mean = corr_mean[nan_array]
#plot means
ax.plot(dist, corr_mean, 'o', color=color, zorder=2, alpha=0.7)
#plot std
fill_y_top = np.ones(len(dist))*(corr_mean+corr_std)
fill_y_bottom = np.ones(len(dist))*(corr_mean-corr_std)
ax.fill_between(dist,fill_y_top,fill_y_bottom,facecolor=color,color=color,alpha=0.3,linewidth=0,zorder=1)
ax.set_xlabel('Distance ($\mu$m)')
ax.set_ylabel('Directional Correlation')
return ax
def bin_corr_vs_dist(dist_list,corr_list,n_bins=50):
"""Bins correlation vs. distance data (or any 2D data for that matter)
Parameters
----------
dist_list : 1D list (or numpy array)
the distance data
corr_list : 1D list (or numpy array)
the directional correlation data
Returns
-------
x_vals : 1D numpy array
the binned distance data (centered on the bin)
H_means : 1D numpy array
the mean of the directional correlation at each bin
H_stds : 1D numpy array
the standard deviation of the directional correlation at each bin
H_lens : 1D numpy array
the number of data points (n) at each bin
"""
# converts to np arrays
start_dist_list = np.array(dist_list)
mean_corr_list = np.array(corr_list)
# calculates the means/SDs for the binned data
bins = np.linspace(np.min(start_dist_list), np.max(start_dist_list) + .000000001, n_bins)
bin_id = np.digitize(start_dist_list, bins)
H_means = np.array([np.nanmean(mean_corr_list[bin_id == i]) for i in range(1, len(bins))])
H_stds = np.array([np.nanstd(mean_corr_list[bin_id == i]) for i in range(1, len(bins))])
H_lens = np.array([len(mean_corr_list[bin_id == i]) for i in range(1, len(bins))])
# adjust edges
x_vals = np.array([(bins[i] + bins[i + 1]) / 2. for i in range(len(bins) - 1)])
return x_vals, H_means, H_stds, H_lens
def analyze_trackmate_file(img_file_path, track_file_path, time_int=1, px_size=1):
"""Analyzes the TrackMate data
Parameters
----------
img_file_path : string
path where the image file is located (must be a .tif file)
track_file_path : string
path where the TrackMate tracks file is located (.xml file)
time_int : float
time interval in minutes
px_size: float
pixel size in um/px
"""
#makes a new directory to store the data
save_dir = os.path.splitext(track_file_path)[0]
if not os.path.isdir(save_dir):
os.mkdir(save_dir)
basename = os.path.basename(save_dir)
print(basename)
#opens and parses the TrackMate xml file
tree = ET.parse(track_file_path)
root = tree.getroot()
#makes some lists for collecting the tracking data
print("Calculating Tracking Parameters")
traj_dict_list = []
param_dict_list = []
#loops through each trajectory
for i, particle in enumerate(root.findall('particle')):
#sets up lists for the trajectory
t = np.zeros(int(particle.attrib['nSpots']))
x = np.zeros_like(t)
y = np.zeros_like(t)
#gets the time and position values for the trajectory
for j, detection in enumerate(particle.findall('detection')):
t[j] = float(detection.attrib['t']) * time_int
x[j] = float(detection.attrib['x']) * px_size
y[j] = float(detection.attrib['y']) * px_size
if len(t) > 3: #only analyze the tracks if there are at least 3 time points
#appends the trajectory data to the trajectory list
traj_dict_list.append({'track_id' : i+1, 't' : t, 'x' : x, 'y' : y})
#gets some dynamics parameters for the trajectory and appends to the param list
mean_inst_speed = migra.extract_mean_inst_speed(x,y,t)
persistence = migra.extract_persistence(x,y)
time_lag, msd, (slope, intercept) = migra.extract_msd(x,y,t)
param_dict_list.append({'track_id' : i+1, 'mean_inst_speed' : mean_inst_speed,
'persistence' : persistence, 'coeff_persist' : slope})
#extracts the directional correlation
print("Getting Directional Correlation from Trajectories")
start = time.time()
dist_list, corr_list = migra.extract_dir_corr(traj_dict_list)
end = time.time()
print("Time required:", end - start)
#bins the distance and correlation data and saves
print("Plotting and Saving Correlation Data")
dist_means, corr_means, corr_stds, corr_lens = bin_corr_vs_dist(dist_list,corr_list)
data_to_write = list(zip(dist_means,corr_means,corr_stds,corr_lens))
data_to_write.insert(0,['dist_um','corr_mean','corr_std','corr_n'])
uf.write_csv(data_to_write, os.path.join(save_dir, basename + '_corr_vs_dist.csv'))
#plots the binned data and saves
fig, ax = plt.subplots()
plot_corr_vs_dist(ax,dist_means,corr_means,corr_stds,'b')
plt.tight_layout()
plt.savefig(os.path.join(save_dir, basename + '_corr_vs_dist.pdf'))
#writes params to file
print("Plotting and Saving Dynamics Parameters")
key_list = ['track_id','mean_inst_speed','persistence','coeff_persist']
data_to_write = [key_list]
for line in param_dict_list:
data_to_write.append([line[_] for _ in key_list])
uf.write_csv(data_to_write, os.path.join(save_dir, basename + '_params.csv'))
# plots a histogram of the persistence
persistence_list = np.array([_['persistence'] for _ in param_dict_list])
fig,ax = plt.subplots()
ax.hist(persistence_list,bins=np.linspace(0,1,20))
ax.set_ylabel('Count')
ax.set_xlabel('Persistence')
plt.tight_layout()
plt.savefig(os.path.join(save_dir, basename + '_hist_persistence.pdf'))
plt.close()
# plots a histogram of the mean inst. speed
speed_list = np.array([_['mean_inst_speed'] for _ in param_dict_list]) * 60. #converts to um/hour
fig, ax = plt.subplots()
ax.hist(speed_list,bins=np.linspace(0,20,20))
ax.set_ylabel('Count')
ax.set_xlabel('Mean Inst. Speed ($\mu$m/hr)')
plt.tight_layout()
plt.savefig(os.path.join(save_dir, basename + '_hist_mean_inst_speed.pdf'))
plt.close()
#makes a plot of the trajectories
print("Plotting Trajectories (this will take a long time if you have >500 trajectories)")
im_stk = tifffile.imread(img_file_path)
height, width = im_stk[0].shape
fig, ax = plt.subplots()
cm = plt.get_cmap('hot')
# goes through each trajectory
for j, traj in enumerate(traj_dict_list):
#sets the colors for plotting
n = len(traj['x'])
colors = [cm(1. * i / (n - 1)) for i in range(n - 1)]
ax.set_prop_cycle('color', colors)
#plots the trajectory
for i in range(n - 1):
ax.plot(traj['x'][i:i + 2], traj['y'][i:i + 2])
#finishes the plot
ax.set_xlim(0, width * px_size)
ax.set_ylim(0, height * px_size)
ax.set_xlabel("Distance ($\mu$m)")
ax.set_ylabel("Distance ($\mu$m)")
plt.tight_layout()
plt.savefig(os.path.join(save_dir, basename + '_trajectories.pdf'))
plt.close()
def main():
"""Sets up the analysis for the trajectories from TrackMate.
You should update the image path, track file path, time interval and pixel size here.
You should not have to change anything in the rest of the script.
"""
#sets some initial parameters
img_file_path = './sample_data/tumor_nuclei_small/tumor_nuclei_small.tif'
track_file_path = './sample_data/tumor_nuclei_small/tumor_nuclei_small_stardist_Tracks.xml'
time_int = 30 #min
px_size = 0.91 #um/px
analyze_trackmate_file(img_file_path, track_file_path, time_int, px_size)
if __name__ == "__main__":
main() | 35.454545 | 109 | 0.673913 | 1,367 | 8,970 | 4.221653 | 0.212143 | 0.016635 | 0.020274 | 0.018194 | 0.316929 | 0.280541 | 0.247791 | 0.225957 | 0.163057 | 0.148155 | 0 | 0.010115 | 0.217503 | 8,970 | 253 | 110 | 35.454545 | 0.812081 | 0.308473 | 0 | 0.110169 | 0 | 0 | 0.136922 | 0.028932 | 0 | 0 | 0 | 0 | 0 | 1 | 0.033898 | false | 0 | 0.067797 | 0 | 0.118644 | 0.059322 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
4fd65dd48fcff0e5a0416234249d0f79a329e39e | 1,575 | py | Python | libfennel/randutil.py | hyraxZK/fennel | 0aeae644455057547a48422fd9a19d4564d1348f | [
"Apache-2.0"
] | 3 | 2018-04-04T16:26:55.000Z | 2021-02-27T03:08:08.000Z | libfennel/randutil.py | hyraxZK/fennel | 0aeae644455057547a48422fd9a19d4564d1348f | [
"Apache-2.0"
] | 1 | 2019-08-02T09:47:43.000Z | 2019-08-02T09:47:43.000Z | libfennel/randutil.py | hyraxZK/fennel | 0aeae644455057547a48422fd9a19d4564d1348f | [
"Apache-2.0"
] | 3 | 2018-10-30T09:40:10.000Z | 2020-01-16T07:48:48.000Z | #!/usr/bin/python
#
# (C) 2017 Riad S. Wahby <rsw@cs.stanford.edu>
#
# rand gen utilities (split from util to break circular dep)
import random
from libfennel.defs import Defs
import libfennel.gateprover as gp
import libfennel.util as util
def rand_ckt(nOutBits, nInBits):
in0v = []
in1v = []
typv = []
choices = ( gp.MulGateProver
, gp.AddGateProver
, gp.SubGateProver
, gp.OrGateProver
, gp.XorGateProver
, gp.NotGateProver
, gp.NandGateProver
, gp.NorGateProver
, gp.NxorGateProver
, gp.NaabGateProver
)
for _ in xrange(0, 2**nOutBits):
in0v.append(random.randint(0, 2**nInBits - 1))
in1v.append(random.randint(0, 2**nInBits - 1))
# XXX test muxes!!!
typv.append(random.choice(choices))
return (in0v, in1v, typv)
def rand_inputs(nInBits, nCopies, inLay=None):
out = []
if inLay is None:
inLay = [None] * (2 ** nInBits)
else:
nInBits = util.clog2(len(inLay))
inLay += [0] * (2 ** nInBits - len(inLay))
for _ in xrange(0, nCopies):
out.append([ Defs.gen_random() if elm is None else elm % Defs.prime for elm in inLay ])
return out
def rand_str(slen):
ostr = ""
for _ in xrange(0, slen):
cval = random.randint(0, 61)
if cval < 26:
ostr += chr(cval + 65)
elif cval < 52:
ostr += chr(cval + 71)
else:
ostr += str(cval - 52)
return ostr
| 24.230769 | 95 | 0.549841 | 190 | 1,575 | 4.521053 | 0.436842 | 0.009313 | 0.038417 | 0.041909 | 0.06752 | 0.06752 | 0.06752 | 0 | 0 | 0 | 0 | 0.035272 | 0.333968 | 1,575 | 64 | 96 | 24.609375 | 0.783603 | 0.087619 | 0 | 0.044444 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.066667 | false | 0 | 0.088889 | 0 | 0.222222 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
4fd7ccdd03065688e02ce09c4b9b0a91f1bcbd70 | 3,122 | py | Python | app.py | clcert/bg-mixnet | 1f572e639ed30e4ba8ef850ac2d922f53a6c7447 | [
"Apache-2.0"
] | null | null | null | app.py | clcert/bg-mixnet | 1f572e639ed30e4ba8ef850ac2d922f53a6c7447 | [
"Apache-2.0"
] | 1 | 2022-01-21T00:44:07.000Z | 2022-01-21T00:44:07.000Z | app.py | clcert/bg-mixnet | 1f572e639ed30e4ba8ef850ac2d922f53a6c7447 | [
"Apache-2.0"
] | 1 | 2022-01-21T00:40:36.000Z | 2022-01-21T00:40:36.000Z | from flask import (
flash,
Flask,
redirect,
render_template,
request,
send_file,
url_for
)
from os import makedirs
from os.path import (
exists,
join as p_join,
realpath,
split as p_split
)
from werkzeug.utils import secure_filename
from zipfile import ZipFile
from main import (
mix as f_mix,
verify as f_verify
)
UPLOAD_FOLDER = p_join(p_split(realpath(__file__))[0], "data")
ALLOWED_EXTENSIONS = {"json", "txt"}
app = Flask(__name__)
app.config["UPLOAD_FOLDER"] = UPLOAD_FOLDER
app.secret_key = b"example"
def check_upfolder():
if not exists(UPLOAD_FOLDER):
makedirs(UPLOAD_FOLDER)
def allowed_file(filename):
return "." in filename and filename.rsplit(".", 1)[1].lower() in ALLOWED_EXTENSIONS
def validate_files(file_dict):
ret = True
for key in file_dict.keys():
if file_dict[key] and allowed_file(file_dict[key].filename):
continue
ret = False
return ret
@app.route("/")
def index():
return "Try /mix or /verify"
@app.route("/mix", methods=("GET", "POST"))
def mix():
if request.method == "POST":
check_upfolder()
m = int(request.form["m"])
n = int(request.form["n"])
election_file = request.files["election_file"]
if election_file and allowed_file(election_file.filename):
filename = secure_filename(election_file.filename)
path = p_join(app.config["UPLOAD_FOLDER"], filename)
election_file.save(path)
outs = ["ciphers.json", "public_randoms.txt", "proof.txt"]
for out in outs:
out = p_join(app.config["UPLOAD_FOLDER"], out)
f_mix(m, n, outs[0], outs[1], outs[2], path)
res_path = p_join(app.config["UPLOAD_FOLDER"], "response.zip")
zipObj = ZipFile(res_path, "w")
for out in outs:
zipObj.write(out)
zipObj.close()
return send_file(res_path, mimetype="application/zip")
return render_template("mix.html")
#TODO: debug verification
@app.route("/verify", methods=("GET", "POST"))
def verify():
if request.method == "POST":
check_upfolder()
m = int(request.form["m"])
n = int(request.form["n"])
files = {}
files["ciphers"] = request.files["ciphers_file"]
files["publics"] = request.files["publics_file"]
files["proof"] = request.files["proof_file"]
if validate_files(files):
paths = {}
for key in files.keys():
filename = secure_filename(files[key].filename)
paths[key] = p_join(app.config["UPLOAD_FOLDER"], filename)
files[key].save(paths[key])
data = {
"valid": f_verify(m, n, paths["ciphers"], paths["publics"], paths["proof"]),
"show": True
}
return render_template("verify.html", data=data)
data = {"valid": False, "show": False}
return render_template("verify.html", data=data)
if __name__ == "__main__":
app.run(host="127.0.0.1", port=8080, debug=True) | 30.019231 | 92 | 0.600256 | 393 | 3,122 | 4.577608 | 0.272265 | 0.060033 | 0.04169 | 0.058366 | 0.184547 | 0.184547 | 0.170095 | 0.071151 | 0.071151 | 0.071151 | 0 | 0.006935 | 0.261051 | 3,122 | 104 | 93 | 30.019231 | 0.772865 | 0.007687 | 0 | 0.134831 | 0 | 0 | 0.115881 | 0 | 0 | 0 | 0 | 0.009615 | 0 | 1 | 0.067416 | false | 0 | 0.067416 | 0.022472 | 0.213483 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
4fd91e584b61e5bb1129513cbabbd75da3205ba8 | 1,846 | py | Python | lambda_function.py | prodipta/S3_read_and_save | 202878bb992b2b2318b29e402415b724c0980cc0 | [
"Apache-2.0"
] | null | null | null | lambda_function.py | prodipta/S3_read_and_save | 202878bb992b2b2318b29e402415b724c0980cc0 | [
"Apache-2.0"
] | null | null | null | lambda_function.py | prodipta/S3_read_and_save | 202878bb992b2b2318b29e402415b724c0980cc0 | [
"Apache-2.0"
] | null | null | null | import email
import boto3
s3 = boto3.client('s3')
s3r = boto3.resource('s3')
temp_dir = "/tmp/"
output_prefix = "output/"
def lambda_handler(event, context):
bucket = event['Records'][0]['s3']['bucket']['name']
key = event['Records'][0]['s3']['object']['key']
# ignore if it is not in mail directory, avoid recursive calls
if "mail/" not in key:
print("not an incoming mail")
return None
try:
waiter = s3.get_waiter('object_exists')
waiter.wait(Bucket=bucket, Key=key)
obj = s3r.Bucket(bucket).Object(key)
msg = email.message_from_bytes(obj.get()["Body"].read())
# quit if there is no attachments
attachments = msg.get_payload()
if len(attachments) < 2:
print("we've got no attachment")
return None
# delete the first item, it will be the mail itself
del attachments[0]
# run over each attachments
for attachment in attachments:
# get the file name
content_type = attachment.get('Content-Disposition')
file_name = content_type.split("=")[1].replace('\"', '')
print("attachment is {}".format(file_name))
# download to temp dir with the same filename
with open(temp_dir + file_name, 'wb') as writefile:
writefile.write(attachment.get_payload(decode=True))
# now upload to the right prefix + mail
with open(temp_dir + file_name, 'rb')as data:
s3.upload_fileobj(data, bucket, output_prefix+file_name)
except Exception as e:
# something went wrong - probably permissioning
print(e)
# we are done here
return {
'bucket' : bucket,
'key': key
}
| 31.827586 | 72 | 0.569881 | 223 | 1,846 | 4.627803 | 0.497758 | 0.046512 | 0.025194 | 0.02907 | 0.044574 | 0.044574 | 0 | 0 | 0 | 0 | 0 | 0.013492 | 0.317443 | 1,846 | 57 | 73 | 32.385965 | 0.805556 | 0.179307 | 0 | 0.055556 | 0 | 0 | 0.112292 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.027778 | false | 0 | 0.055556 | 0 | 0.166667 | 0.111111 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
4fd948c0ac2fd79b966285fa2f3b307c08cff2b5 | 3,827 | py | Python | contrib/cli/great_expectations_contrib/commands.py | denimalpaca/great_expectations | 0f28f3c2b3cc6fae3bc5d257c6d4d13dbcf37df0 | [
"Apache-2.0"
] | 1 | 2021-04-11T20:54:23.000Z | 2021-04-11T20:54:23.000Z | contrib/cli/great_expectations_contrib/commands.py | denimalpaca/great_expectations | 0f28f3c2b3cc6fae3bc5d257c6d4d13dbcf37df0 | [
"Apache-2.0"
] | 53 | 2021-10-02T02:26:51.000Z | 2021-12-28T20:49:25.000Z | contrib/cli/great_expectations_contrib/commands.py | denimalpaca/great_expectations | 0f28f3c2b3cc6fae3bc5d257c6d4d13dbcf37df0 | [
"Apache-2.0"
] | 1 | 2021-11-29T07:37:28.000Z | 2021-11-29T07:37:28.000Z | import os
import subprocess
import sys
from collections import namedtuple
import click
from cookiecutter.main import cookiecutter
Command = namedtuple("Command", ["name", "full_command", "error_message"])
def init_cmd(url: str) -> None:
"""
Initializes a contributor package by pulling down the Cookiecutter template
and hydrating it.
"""
echo("Configure your template:\n", "blue", bold=True)
cookiecutter(url, overwrite_if_exists=False)
echo("\nSuccessfully set up contrib package!", "green", bold=True)
def check_cmd() -> None:
"""
Performs a series of checks on a contributor package.
These include code style, testing, docstrings, and more.
"""
perform_check(suppress_output=False)
def publish_cmd() -> None:
"""
Performs same checks as `check_cmd`; if they pass, the user is prompted to
supply PyPi credentials. Valid inputs will result in an uploaded package.
"""
success = perform_check(suppress_output=True)
if not success:
echo(
"Please run the `check` command to diagnose before publishing",
"red",
bold=True,
)
return
echo("All checks have succeeded; you are ready to publish!", "green", bold=True)
publish_to_pypi()
def perform_check(suppress_output: bool) -> bool:
commands = [
Command(
"black",
"black --check .",
"Please ensure that your files are linted properly with `black .`",
),
Command(
"isort",
"isort --profile black --check .",
"Please ensure that your imports are sorted properly with `isort --profile black .`",
),
Command(
"pytest",
"pytest .",
"Please ensure that you've written tests and that they all pass",
),
Command(
"mypy",
"mypy --ignore-missing-imports --disallow-untyped-defs --show-error-codes --exclude venv .",
"Please ensure that all functions are type hinted",
),
]
successes = 0
for command in commands:
if run_command(command, suppress_output=suppress_output):
successes += 1
is_successful = successes == len(commands)
color = "green" if is_successful else "red"
echo(
f"Summary: [{successes}/{len(commands)}] checks have passed!", color, bold=True
)
return is_successful
def publish_to_pypi() -> None:
commands = [
Command(
"wheel",
"python setup.py sdist bdist_wheel",
"Something went wrong when creating a wheel",
),
Command(
"twine",
"twine upload --repository testpypi dist/*",
"Something went wrong when uploading with twine",
),
]
for command in commands:
if not run_command(command):
return
echo(
"Successfully uploaded package to PyPi! Congratulations on a job well done :)",
"green",
bold=True,
)
def run_command(command: Command, suppress_output: bool = False) -> bool:
# If suppressed, set STDOUT to dev/null
stdout = sys.stdout
if suppress_output:
sys.stdout = open(os.devnull, "w")
name, full_command, err = command
echo(f"{name}:", "blue", bold=True)
result = subprocess.run(
full_command.split(" "), shell=False, stdout=sys.stdout, stderr=sys.stdout
)
success = result.returncode == 0
if success:
echo("[SUCCEEDED]\n", "green")
else:
echo(f"[FAILED] {err}\n", "red")
# If reassigned before, set STDOUT back to its default value
sys.stdout = stdout
return success
def echo(msg: str, color: str, bold: bool = False) -> None:
click.echo(click.style(msg, fg=color, bold=bold))
| 27.934307 | 104 | 0.605435 | 446 | 3,827 | 5.125561 | 0.408072 | 0.024497 | 0.027997 | 0.034121 | 0.045494 | 0.026247 | 0 | 0 | 0 | 0 | 0 | 0.0011 | 0.28717 | 3,827 | 136 | 105 | 28.139706 | 0.836877 | 0.117847 | 0 | 0.239583 | 0 | 0.010417 | 0.306602 | 0.022912 | 0 | 0 | 0 | 0 | 0 | 1 | 0.072917 | false | 0.020833 | 0.083333 | 0 | 0.197917 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
4fdc6a8b68b5812b4517e515328c39c7aeb1166e | 914 | py | Python | lib/eventio/sync.py | bboser/eventio | cdad47772d94e87a8ca8927e8d578fc7aba78266 | [
"MIT"
] | 6 | 2018-12-04T02:53:20.000Z | 2020-03-08T15:42:16.000Z | lib/eventio/sync.py | bboser/eventio | cdad47772d94e87a8ca8927e8d578fc7aba78266 | [
"MIT"
] | null | null | null | lib/eventio/sync.py | bboser/eventio | cdad47772d94e87a8ca8927e8d578fc7aba78266 | [
"MIT"
] | null | null | null | import digitalio
from .kernel import _get_kernel
from .traps import _scheduler_wait, _scheduler_wake
class Event:
def __init__(self):
self._set = False
self._waitq = None
def is_set(self):
return self._set
def clear(self):
self._set = False
async def wait(self):
if self._set: return
if not self._waitq:
self._waitq = []
await _scheduler_wait(self._waitq)
async def set(self):
self._set = True
await _scheduler_wake(self._waitq)
class PinEvent(Event):
def __init__(self, pin, pull=None):
super().__init__()
self.pin = digitalio.DigitalInOut(pin)
self.pin.switch_to_input(pull=pull)
self.pin.irq(handler=self._cb)
def _cb(self, v):
# interrupt callback, knows nothing about kernel
if self._waitq:
_get_kernel()._schedule(self._waitq) | 23.435897 | 56 | 0.623632 | 116 | 914 | 4.551724 | 0.37931 | 0.119318 | 0.0625 | 0.060606 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.282276 | 914 | 39 | 57 | 23.435897 | 0.804878 | 0.050328 | 0 | 0.071429 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.178571 | false | 0 | 0.107143 | 0.035714 | 0.392857 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
4fe10e233e5bbd8bd614cfb551d5ab1080a6b260 | 1,741 | py | Python | make_download.py | niall-twomey/har_datasets | 68f142ba613ce26f67cdd6b871117f4c24ea603f | [
"MIT"
] | 24 | 2018-12-12T08:54:52.000Z | 2021-12-07T08:45:13.000Z | make_download.py | niall-twomey/har_datasets | 68f142ba613ce26f67cdd6b871117f4c24ea603f | [
"MIT"
] | 3 | 2019-07-18T20:14:41.000Z | 2022-03-12T01:03:28.000Z | make_download.py | niall-twomey/har_datasets | 68f142ba613ce26f67cdd6b871117f4c24ea603f | [
"MIT"
] | 9 | 2018-12-12T16:18:39.000Z | 2022-03-30T16:25:47.000Z | import zipfile
from os import makedirs
from os.path import basename
from os.path import exists
from os.path import join
from os.path import split
from os.path import splitext
import requests
from loguru import logger
from tqdm import tqdm
from src.meta import DatasetMeta
from src.utils.loaders import iter_dataset_paths
def unzip_data(zip_path, in_name, out_name):
if exists(join(zip_path, out_name)):
return
with zipfile.ZipFile(join(zip_path, in_name), "r") as fil:
fil.extractall(zip_path)
def download_and_save(url, path, force=False, chunk_size=2 ** 12):
response = requests.get(url, stream=True)
fname = join(path, split(url)[1])
desc = f"Downloading {fname}..."
if exists(fname):
if not force:
return
chunks = tqdm(response.iter_content(chunk_size=chunk_size), desc=basename(desc))
with open(fname, "wb") as fil:
for chunk in chunks:
fil.write(chunk)
def download_dataset(dataset_meta_path):
dataset = DatasetMeta(dataset_meta_path)
if not exists(dataset.zip_path):
makedirs(dataset.zip_path)
for ii, url in enumerate(dataset.meta["download_urls"]):
logger.info("\t{}/{} {}".format(ii + 1, len(dataset.meta["download_urls"]), url))
download_and_save(url=url, path=dataset.zip_path)
zip_name = basename(dataset.meta["download_urls"][0])
unzip_path = join(dataset.zip_path, splitext(zip_name)[0])
unzip_data(zip_path=dataset.zip_path, in_name=zip_name, out_name=unzip_path)
def main():
for dataset_meta_path in iter_dataset_paths():
logger.info(f"Downloading {dataset_meta_path}")
download_dataset(dataset_meta_path)
if __name__ == "__main__":
main()
| 30.54386 | 89 | 0.701321 | 256 | 1,741 | 4.542969 | 0.28125 | 0.060189 | 0.042992 | 0.068788 | 0.051591 | 0 | 0 | 0 | 0 | 0 | 0 | 0.004954 | 0.188397 | 1,741 | 56 | 90 | 31.089286 | 0.818117 | 0 | 0 | 0.045455 | 0 | 0 | 0.064905 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.090909 | false | 0 | 0.272727 | 0 | 0.409091 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
4fe15f1925208e13bb0c4c6b632376f12695329d | 3,533 | py | Python | djangofy/djangofy.py | etpinard/djangofy | 9ad1437255df66b1a8e7d5f684e56c3414f47bf5 | [
"MIT"
] | 1 | 2020-08-24T21:23:06.000Z | 2020-08-24T21:23:06.000Z | djangofy/djangofy.py | etpinard/djangofy | 9ad1437255df66b1a8e7d5f684e56c3414f47bf5 | [
"MIT"
] | null | null | null | djangofy/djangofy.py | etpinard/djangofy | 9ad1437255df66b1a8e7d5f684e56c3414f47bf5 | [
"MIT"
] | null | null | null | """
djangofy
=========
"""
TAB = " " # a tab in spaces
# -------------------------------------------------------------------------------
def _make_urls(group,
path_to_output_file,
group_name='group',
app_name='app',
template_name='template'):
"""
group [list]:
"""
#
out = (
"from django.conf.urls import patterns, url\n\n"
"import {app_name}.views\n\n\n"
"urlpatterns = patterns(\n"
"{TAB}'',\n"
).format(TAB=TAB, app_name=app_name)
#
for page in group:
item = (
'r(?P<{group_name})/{page}$>'
).format(group_name=group_name, page=page)
out += (
'{TAB}url("' + item + '",\n'
'{TAB}{TAB}{app_name}'
'.views.{template_name})'
).format(TAB=TAB, app_name=app_name,
template_name=template_name)
if page != group[-1]:
out += ",\n"
out += "\n)\n"
with open(path_to_output_file, 'wb') as f:
f.write(out)
def make_urls(names, relative_urls,
path_to_output_file,
app_name='app',
class_name='Page'):
#
out = (
"from django.conf.urls import patterns, url\n\n"
"from {app_name}.views import {class_name}\n\n\n"
"urlpatterns = patterns(\n"
"{TAB}'',\n"
).format(TAB=TAB, app_name=app_name, class_name=class_name)
#
for name, url in zip(names, relative_urls):
item = r'{url}/$'.format(url=url)
out += (
'{TAB}url("' + item + '",\n'
'{TAB}{TAB}{class_name}.as_view(\n'
'{TAB}{TAB}{TAB}lang=\'ipython-notebooks\',\n'
'{TAB}{TAB}{TAB}notebook=\'{name}\'),\n'
'{TAB}{TAB}name=\'ipython-notebook-{name}\')'
).format(TAB=TAB, class_name=class_name, name=name)
if name != names[-1]:
out += ",\n"
out += "\n)\n"
with open(path_to_output_file, 'wb') as f:
f.write(out)
def make_sitemaps(names, relative_urls,
path_to_output_file,
app_name='app',
template_name='template'):
"""
"""
out = (
"import os\n\n"
"from django.conf import settings\n\n\n"
"def items():\n"
"{TAB}items = [\n"
).format(TAB=TAB)
for name, url in zip(names, relative_urls):
location = "'/ipython-notebooks/{url}'".format(url=url)
lmfile = (
"os.path.join(\n{TAB}{TAB}{TAB}{TAB}"
"settings.TOP_DIR,\n{TAB}{TAB}{TAB}{TAB}"
"'shelly',\n{TAB}{TAB}{TAB}{TAB}"
"'templates',\n{TAB}{TAB}{TAB}{TAB}"
"'api_docs',\n{TAB}{TAB}{TAB}{TAB}"
"'includes',\n{TAB}{TAB}{TAB}{TAB}"
"'ipython_notebooks',\n{TAB}{TAB}{TAB}{TAB}"
"'{name}',\n{TAB}{TAB}{TAB}{TAB}"
"'body.html')"
).format(url=url, name=name, TAB=TAB)
out += (
"{TAB}{TAB}dict(\n"
"{TAB}{TAB}{TAB}location={location},\n"
"{TAB}{TAB}{TAB}lmfile={lmfile},\n"
"{TAB}{TAB}{TAB}priority=0.5\n"
"{TAB}{TAB})"
).format(location=location, lmfile=lmfile, TAB=TAB)
if name != names[-1]:
out += ",\n"
out += (
"\n{TAB}]"
"\n{TAB}return items"
"\n"
).format(TAB=TAB)
with open(path_to_output_file, 'wb') as f:
f.write(out)
def make_redirects():
# TODO
return
| 25.977941 | 81 | 0.46844 | 426 | 3,533 | 3.746479 | 0.171362 | 0.176692 | 0.118421 | 0.081454 | 0.513784 | 0.432331 | 0.368421 | 0.327068 | 0.267544 | 0.267544 | 0 | 0.00208 | 0.319558 | 3,533 | 135 | 82 | 26.17037 | 0.661814 | 0.040192 | 0 | 0.434783 | 0 | 0 | 0.316637 | 0.165176 | 0 | 0 | 0 | 0.007407 | 0 | 1 | 0.043478 | false | 0 | 0.065217 | 0.01087 | 0.119565 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
4fe1cb63130b1fc64ca46b9fc8d4c160a04bdb49 | 1,394 | py | Python | Guitar Training Remote/practice/tritone_sub.py | keremkoseoglu/Python-Library | f66ab246da4eabea94596494cf2bc9b416b65b1d | [
"MIT"
] | 3 | 2018-06-28T07:09:04.000Z | 2019-03-04T14:43:52.000Z | Guitar Training Remote/practice/tritone_sub.py | keremkoseoglu/Python-Library | f66ab246da4eabea94596494cf2bc9b416b65b1d | [
"MIT"
] | null | null | null | Guitar Training Remote/practice/tritone_sub.py | keremkoseoglu/Python-Library | f66ab246da4eabea94596494cf2bc9b416b65b1d | [
"MIT"
] | 5 | 2018-06-28T07:12:28.000Z | 2021-06-03T18:20:21.000Z | from model import exercise, exercise_step
from music_theory import chord
from practice import abstract_practice
import random
class TritoneSub(abstract_practice.AbstractPractice):
_SUBTITLE = "Do chords, walking bass and improv"
_APPROACHES = [
"Tritone Sub - Mixo",
"Tritone Sub - Jazz min"
]
def get_exercise(self, quantity: int) -> exercise.Exercise:
random_steps = []
for i in range(quantity):
# Get chords
number_of_chords = random.randint(1, 3)
chords = chord.Chord().get_random_chords(number_of_chords)
# Build chord text
chord_txt = ""
sub_txt = ""
for ch in chords:
if chord_txt == "":
chord_txt = ch
else:
if sub_txt == "":
sub_txt = "followed by: "
else:
sub_txt += " | "
sub_txt += ch
# Add to steps
random_step = exercise_step.ExerciseStep(chord_txt, sub_txt)
random_steps.append(random_step)
output = exercise.Exercise(self._get_random_approach(), self._SUBTITLE, random_steps)
return output
def _get_random_approach(self) -> str:
i = random.randint(0, len(self._APPROACHES) - 1)
return self._APPROACHES[i] | 30.304348 | 93 | 0.558106 | 150 | 1,394 | 4.933333 | 0.4 | 0.048649 | 0.048649 | 0.054054 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.004484 | 0.360115 | 1,394 | 46 | 94 | 30.304348 | 0.825112 | 0.028694 | 0 | 0.060606 | 0 | 0 | 0.066617 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.060606 | false | 0 | 0.121212 | 0 | 0.333333 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
4fe3e277cb0aaebb11dd380d6970774d9b923543 | 1,477 | py | Python | setup.py | prasannaba/COVID-19_Analysis_Library | d7788f0f6e188a6e766053a1751adbf36b4612fe | [
"MIT"
] | null | null | null | setup.py | prasannaba/COVID-19_Analysis_Library | d7788f0f6e188a6e766053a1751adbf36b4612fe | [
"MIT"
] | null | null | null | setup.py | prasannaba/COVID-19_Analysis_Library | d7788f0f6e188a6e766053a1751adbf36b4612fe | [
"MIT"
] | null | null | null | from setuptools import setup
from COVID19analysis import __version__
with open('Readme.md', 'r') as f:
readme = f.read()
setup(
name='COVID19analysis',
version=__version__,
packages=['COVID19analysis'],
url='https://github.com/prasannaba/COVID-19_Analysis_Library',
license='MIT',
author='Prasanna',
python_requires='>=3.7',
install_requires=['bokeh>=2.3.3', 'panel>=0.11.3', 'pandas<=1.2.5', 'holoviews>=1.14.4',
'hvplot>=0.7.2', 'tqdm>=4.61.2'],
author_email='prasanna.badami@hotmail.com',
description='COVID19Analysis based on CSSEGISandData on GitHub',
long_description=readme,
long_description_content_type='text/markdown',
classifiers=[
'Development Status :: 5 - Production/Stable',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
'Topic :: Scientific/Engineering',
'Topic :: Scientific/Engineering :: Mathematics',
'Topic :: Scientific/Engineering :: Visualization',
'Topic :: Software Development :: Libraries',
'Topic :: Software Development :: Libraries :: Python Modules',
'Intended Audience :: Developers',
'Intended Audience :: Science/Research',
'Intended Audience :: Information Technology',
'Intended Audience :: Financial and Insurance Industry'
],
)
| 38.868421 | 92 | 0.641165 | 152 | 1,477 | 6.118421 | 0.585526 | 0.068817 | 0.080645 | 0.055914 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.032759 | 0.214624 | 1,477 | 37 | 93 | 39.918919 | 0.768966 | 0 | 0 | 0 | 0 | 0 | 0.579553 | 0.062965 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.057143 | 0 | 0.057143 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
4fe6ecf0ab0d93bd5e942e8e08086260bfb163e3 | 3,215 | py | Python | tests/beam_benchmark_helper_test.py | bltb/PerfKitBenchmarker | 903eb82d4e7ee5ed2ac2953cf6ce1b80459497ed | [
"Apache-2.0"
] | null | null | null | tests/beam_benchmark_helper_test.py | bltb/PerfKitBenchmarker | 903eb82d4e7ee5ed2ac2953cf6ce1b80459497ed | [
"Apache-2.0"
] | null | null | null | tests/beam_benchmark_helper_test.py | bltb/PerfKitBenchmarker | 903eb82d4e7ee5ed2ac2953cf6ce1b80459497ed | [
"Apache-2.0"
] | null | null | null | # Copyright 2014 PerfKitBenchmarker Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for beam_benchmark_helper."""
import unittest
from perfkitbenchmarker import beam_benchmark_helper
from perfkitbenchmarker import dpb_service
class BeamBenchmarkHelperTestCase(unittest.TestCase):
def test_runner_option_override_non_dataflow(self):
# This is documenting the current behavior - when we add an EMR
# service_type, this test should change.
actual_options = []
beam_benchmark_helper.AddRunnerOptionMvnArgument(
dpb_service.EMR, actual_options, None)
self.assertListEqual([], actual_options)
def test_runner_option_override_dataflow(self):
actual_options = []
beam_benchmark_helper.AddRunnerOptionMvnArgument(
dpb_service.DATAFLOW, actual_options, None)
self.assertListEqual(['"--runner=TestDataflowRunner"'], actual_options)
def test_runner_option_override_use_override(self):
testOptionVal = "--runner=TestVal"
actual_options = []
beam_benchmark_helper.AddRunnerOptionMvnArgument(
dpb_service.DATAFLOW, actual_options, testOptionVal)
self.assertListEqual([testOptionVal], actual_options)
def test_runner_option_override_empty_override(self):
testOptionVal = ""
actual_options = []
beam_benchmark_helper.AddRunnerOptionMvnArgument(
dpb_service.DATAFLOW, actual_options, testOptionVal)
self.assertListEqual([], actual_options)
def test_runner_profile_override_dataflow(self):
actual_mvn_command = []
beam_benchmark_helper.AddRunnerProfileMvnArgument(
dpb_service.DATAFLOW, actual_mvn_command, None)
self.assertListEqual(['-Pdataflow-runner'], actual_mvn_command)
def test_runner_profile_override_non_dataflow(self):
# This is documenting the current behavior - when we add an EMR
# service_type, this test should change.
actual_mvn_command = []
beam_benchmark_helper.AddRunnerProfileMvnArgument(
dpb_service.EMR, actual_mvn_command, None)
self.assertListEqual([], actual_mvn_command)
def test_runner_profile_override_use_override(self):
testOptionVal = "testval"
actual_mvn_command = []
beam_benchmark_helper.AddRunnerProfileMvnArgument(
dpb_service.DATAFLOW, actual_mvn_command, testOptionVal)
self.assertListEqual(['-P' + testOptionVal], actual_mvn_command)
def test_runner_profile_override_empty_override(self):
testOptionVal = ""
actual_mvn_command = []
beam_benchmark_helper.AddRunnerProfileMvnArgument(
dpb_service.DATAFLOW, actual_mvn_command, testOptionVal)
self.assertListEqual([], actual_mvn_command)
if __name__ == '__main__':
unittest.main()
| 35.722222 | 75 | 0.772006 | 370 | 3,215 | 6.402703 | 0.308108 | 0.065851 | 0.081047 | 0.060785 | 0.669481 | 0.607851 | 0.556353 | 0.486703 | 0.408189 | 0.380329 | 0 | 0.002937 | 0.152722 | 3,215 | 89 | 76 | 36.123596 | 0.86674 | 0.254743 | 0 | 0.52 | 0 | 0 | 0.033277 | 0.012216 | 0 | 0 | 0 | 0 | 0.16 | 1 | 0.16 | false | 0 | 0.06 | 0 | 0.24 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
4fe9bd05ea772e2f1770e833100ea91c824dc3bf | 719 | py | Python | kartta.py | UrsaOK/supertassu | c9158f50281000f57fe14aba4115aa867a72e0ca | [
"BSD-2-Clause"
] | null | null | null | kartta.py | UrsaOK/supertassu | c9158f50281000f57fe14aba4115aa867a72e0ca | [
"BSD-2-Clause"
] | 2 | 2015-01-18T14:51:32.000Z | 2016-02-24T20:15:12.000Z | kartta.py | UrsaOK/supertassu | c9158f50281000f57fe14aba4115aa867a72e0ca | [
"BSD-2-Clause"
] | null | null | null | from merkki import Merkki
class Ruutu:
def __init__(self, merkki, tyhja):
print("ruutu init")
self.merkki = Merkki(merkki)
self.tyhja = tyhja
OVI = Ruutu(".", True)
TYHJA = Ruutu(" ", True)
SEINA = Ruutu("#", False)
SUPERSEINA = Ruutu("?", False)
class Kartta(list):
def __init__(self):
print("kartta init")
super(Kartta, self).__init__()
self.leveys = 80
self.korkeus = 50
for x in range(self.leveys):
self.append([TYHJA] * self.korkeus)
def draw(self, mihin):
print("kartta draw")
for x in range(self.leveys):
for y in range(self.korkeus):
self[x][y].merkki.draw(mihin, x, y)
| 23.966667 | 51 | 0.564673 | 90 | 719 | 4.377778 | 0.333333 | 0.081218 | 0.083756 | 0.055838 | 0.106599 | 0.106599 | 0 | 0 | 0 | 0 | 0 | 0.007905 | 0.296245 | 719 | 29 | 52 | 24.793103 | 0.770751 | 0 | 0 | 0.086957 | 0 | 0 | 0.050139 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.130435 | false | 0 | 0.043478 | 0 | 0.26087 | 0.130435 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
4feb3f10714033df750e63992a556639520521e8 | 10,753 | py | Python | cloudmesh_web/modules/rack.py | JulienPalard/cloudmesh | 1759b88daef3a13917492d028fdabe08f03ca996 | [
"Apache-2.0"
] | null | null | null | cloudmesh_web/modules/rack.py | JulienPalard/cloudmesh | 1759b88daef3a13917492d028fdabe08f03ca996 | [
"Apache-2.0"
] | 4 | 2021-06-08T20:20:08.000Z | 2022-03-11T23:30:22.000Z | cloudmesh_web/modules/rack.py | JulienPalard/cloudmesh | 1759b88daef3a13917492d028fdabe08f03ca996 | [
"Apache-2.0"
] | null | null | null | from cloudmesh_base.locations import config_file
from cloudmesh.config.cm_config import cm_config, cm_config_server
from cloudmesh.rack.cluster_map_heat import HeatClusterMap
from cloudmesh.rack.cluster_map_service import ServiceClusterMap
from cloudmesh.rack.fetch_cluster_info import FetchClusterInfo
from flask import Blueprint, g, render_template, request, redirect, url_for
from flask.ext.login import login_required # @UnresolvedImport
from flask.ext.wtf import Form # @UnresolvedImport
from pprint import pprint
from sh import pwd # @UnresolvedImport
from wtforms import SelectField
from flask.ext.principal import Permission, RoleNeed
import time
from cloudmesh.rack.rack_progress import get_temperature_progress, get_service_progress
import json
import sys
from cloudmesh_base.logger import LOGGER
log = LOGGER(__file__)
rack_module = Blueprint('rack_module', __name__)
admin_permission = Permission(RoleNeed('admin'))
#
# ROUTE: rack
#
class RackForm(Form):
# MUST create an unique selector for each different service
service_rack = SelectField()
temperature_rack = SelectField()
all_racks_dict = {
"all": ('all', 'All Clusters'),
"india": ('india', 'India Cluster'),
"echo": ('echo', 'Echo Cluster'),
"delta": ('delta', 'Delta Cluster'),
"bravo": ('bravo', 'Bravo Cluster'),
}
# all possible service provided
all_services_list = ["service", "temperature", ]
# content of each service, including label, and range of clusters
# 'clusters' means the specific service can be used on some different clusters
# 'select' means one attribute name of SelectField, typical name is "{service name}_rack"
all_services_dict = {
"service": {
"label": "Service Map",
"clusters": ["all", "india", "echo", "delta", "bravo", ],
"select": "service_rack",
},
"temperature": {
"label": 'Heat Map',
"clusters": ["echo", ],
"select": "temperature_rack",
},
}
# a dict that holds all selector
selector_dict = {}
def initForm(self):
for service in self.all_services_list:
service_dict = {}
service_dict["name"] = service
service_dict["label"] = self.all_services_dict[service]["label"]
service_dict["select"] = getattr(
self, self.all_services_dict[service]["select"])
rack_list = []
for rack in self.all_services_dict[service]["clusters"]:
rack_list.append(self.all_racks_dict[rack])
service_dict["select"].choices = rack_list
self.selector_dict[service] = service_dict
def validate_on_submit(self):
return True
@rack_module.route('/inventory/rack')
@login_required
def display_rack_home():
rack_form = RackForm()
if rack_form.validate_on_submit():
rack_form.initForm()
return render_template("mesh/rack/rack.html", form=rack_form, flag_home=True)
@rack_module.route('/inventory/rack/mapcontainer', methods=['POST'])
@login_required
def display_rack_map_container():
# rack denote the rack that user selected
# service denote the service user selected on the specific rack
rack = request.form['select_rack']
service = request.form['select_service']
# double check to make sure rack can provide the specific service
rack_form = RackForm()
if rack not in rack_form.all_services_dict[service]["clusters"]:
log.error("Someone try to hack the service [service: '{0}' on rack: '{1}'] provided by Rack Diagram. Just ignore it.".format(
service, rack))
return redirect("/inventory/rack")
return render_template(
"mesh/rack/map_container.html",
rack=rack,
service=service,
)
@rack_module.route('/inventory/rack/genmap', methods=['GET', 'POST'])
@login_required
def gen_rack_map():
service = request.args.get("service")
rack = request.args.get("rack")
# double check to make sure rack can provide the specific service
rack_form = RackForm()
if rack not in rack_form.all_services_dict[service]["clusters"]:
log.error("Someone try to hack the service [service: '{0}' on rack: '{1}'] provided by Rack Diagram. Just ignore it.".format(
service, rack))
return redirect("/inventory/rack")
myfetch = FetchClusterInfo(g.user.id)
map_progress = myfetch.get_map_progress(service)
map_progress.set_load_map()
map_progress.set_send_http_request()
result = {"result": "failure", "reason": {
"status": "failure", "text": "Read DB Error"}}
if myfetch.start_gen_map(service, rack):
result["result"] = "success"
return json.dumps(result)
@rack_module.route('/inventory/rack/refreshmap', methods=['GET', 'POST'])
@login_required
def refresh_rack_map():
service = request.args.get("service")
rack = request.args.get("rack")
# double check to make sure rack can provide the specific service
rack_form = RackForm()
if rack not in rack_form.all_services_dict[service]["clusters"]:
log.error("Someone try to hack the service [service: '{0}' on rack: '{1}'] provided by Rack Diagram. Just ignore it.".format(
service, rack))
return redirect("/inventory/rack")
myfetch = FetchClusterInfo(g.user.id)
map_progress = myfetch.get_map_progress(service)
map_progress.set_refresh_map()
map_progress.set_send_http_request()
result = {"result": "failure", "reason": {
"status": "failure", "text": "Read DB Error"}}
result_dict = myfetch.start_refresh_map(service, rack)
if result_dict["result"]:
result["result"] = "success"
elif result_dict["fresh"]:
result["reason"]["status"] = "success"
result["reason"]["text"] = "Data is already newest"
return json.dumps(result)
@rack_module.route('/inventory/rack/mapprogress', methods=['GET', 'POST'])
@login_required
def rack_map_progress_status():
service = request.args.get("service")
result = {"text": "", "value": 0, "next": ""}
myfetch = FetchClusterInfo(g.user.id)
map_progress = myfetch.get_map_progress(service)
if map_progress:
result = map_progress.get_status()
# log.debug("progress status: {0}".format(result))
if result["next"] == "loading map":
result["data"] = map_progress.get_data("map_data")
return json.dumps(result)
@rack_module.route('/inventory/rack/map', methods=['POST'])
@login_required
def display_rack_map():
####
#
# Flag of debug, True means generate fake data with random generator
# False means fetch the real data from server
####
flag_debug = False
# class name means the specific class to generate map for different service type
# method name means the specific method to fetch real data of different service type,
# the methods are defined in class FetchClusterInfo
service_options = {
"temperature": {
"class": HeatClusterMap,
"method": "fetch_temperature_ipmi",
},
"service": {
"class": ServiceClusterMap,
"method": "fetch_service_type",
},
}
# rack denote the rack user selected
# service denote the service user selected on the specific rack
rack = request.form['select_rack']
service = request.form['select_service']
# double check to make sure rack can provide the specific service
rack_form = RackForm()
if rack not in rack_form.all_services_dict[service]["clusters"]:
log.error("Someone try to hack the service [service: '{0}' on rack: '{1}'] provided by Rack Diagram. Just ignore it.".format(
service, rack))
return redirect("/inventory/rack")
# get location of configuration file, input diag, output image
dir_base = config_file("")
server_config = cm_config_server()
relative_dir_diag = server_config.get("cloudmesh.server.rack.input")
relative_dir_image = server_config.get(
"cloudmesh.server.rack.diagrams.{0}".format(service))
# log.debug("relative dir image, {0}".format(relative_dir_image))
flask_dir = "static"
# guess absolute path of cloudmesh_web
rack_py_dir = pwd().strip().split("/")
cloudmesh_web_dir = rack_py_dir # [:-1]
# log.debug("cloudmesh_web dir, {0}".format(cloudmesh_web_dir))
list_image_dir = [flask_dir] + relative_dir_image.strip().split("/")
abs_dir_image = "/".join(cloudmesh_web_dir + list_image_dir)
abs_dir_diag = dir_base + "/" + relative_dir_diag
# dynamic generate image
map_class = service_options[service]["class"](
rack, dir_base, abs_dir_diag, abs_dir_image)
# get cluster server data
dict_data = None
if flag_debug:
dict_data = map_class.genRandomValues()
else:
# fetch the real data ....
# TODO cloudmesh.hpc.proxyserver
# should we add a field in cloudmesh.yaml for the proxy server to run
# pbsnodes ???
config = cm_config()
user = config.get("cloudmesh.hpc.username")
myfetch = FetchClusterInfo(user, "india.futuregrid.org")
flag_filter = None if rack == "all" else rack
# If user want to customize the action, user can set optional param here
# by calling map_class.set_optional_param(value)
# optional param
aparam = map_class.get_optional_param()
dict_data = getattr(myfetch, service_options[service]["method"])(
flag_filter, aparam)
# update data
map_class.update(dict_data)
# plot map
map_class.plot()
# get image names
filename_image = map_class.getImageFilename()
filename_legend = map_class.getLegendFilename()
image_size = map_class.getImageSize()
legend_size = map_class.getImageLegendSize()
# log.debug("legend size is: {0}".format(legend_size))
abs_web_path_image = "/".join([""] + list_image_dir + [filename_image])
abs_web_path_legend = "/".join([""] + list_image_dir + [filename_legend])
img_flag = "?" + str(time.time())
return render_template("mesh/rack/rack.html",
flag_home=False,
rack=rack,
imageWidth=image_size["width"],
imageHeight=image_size["height"],
legendWidth=legend_size["width"],
legendHeight=legend_size["height"],
service=service,
imageFilename=abs_web_path_image + img_flag,
legendFilename=abs_web_path_legend + img_flag
)
| 38.131206 | 133 | 0.655073 | 1,308 | 10,753 | 5.182722 | 0.191896 | 0.02434 | 0.017702 | 0.025963 | 0.395191 | 0.351084 | 0.300339 | 0.289718 | 0.277622 | 0.255937 | 0 | 0.001812 | 0.230168 | 10,753 | 281 | 134 | 38.266904 | 0.817106 | 0.176602 | 0 | 0.306533 | 0 | 0.020101 | 0.180795 | 0.026818 | 0 | 0 | 0 | 0.003559 | 0 | 1 | 0.040201 | false | 0 | 0.085427 | 0.005025 | 0.21608 | 0.015075 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
4feefc3dff38a87a277d1dfa1f890a31090eb36f | 738 | py | Python | setup.py | xiaoxq/pyspark-utils | 705d9f047519881bb07f0bee0db863111508b59b | [
"Apache-2.0"
] | 9 | 2019-04-03T21:31:50.000Z | 2021-07-22T06:07:02.000Z | setup.py | xiaoxq/pyspark-utils | 705d9f047519881bb07f0bee0db863111508b59b | [
"Apache-2.0"
] | null | null | null | setup.py | xiaoxq/pyspark-utils | 705d9f047519881bb07f0bee0db863111508b59b | [
"Apache-2.0"
] | 1 | 2019-12-12T12:55:57.000Z | 2019-12-12T12:55:57.000Z | #!/usr/bin/env python
"""The missing PySpark utils steup file."""
import setuptools
with open("README.rst", "r") as fh:
long_description = fh.read()
setuptools.setup(
name="pyspark_utils",
version="1.8.0",
license="Apache License Version 2.0",
author="Xiangquan Xiao",
author_email="xiaoxiangquan@gmail.com",
description="The missing PySpark utils",
long_description=long_description,
url="https://github.com/xiaoxq/pyspark-utils",
packages=setuptools.find_packages(),
classifiers=[
"Programming Language :: Python",
"License :: OSI Approved :: Apache Software License",
"Operating System :: OS Independent",
],
install_requires=[
"absl-py",
],
)
| 26.357143 | 61 | 0.657182 | 84 | 738 | 5.690476 | 0.690476 | 0.100418 | 0.07113 | 0.09205 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.008489 | 0.201897 | 738 | 27 | 62 | 27.333333 | 0.803056 | 0.078591 | 0 | 0.090909 | 0 | 0 | 0.410979 | 0.034125 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.045455 | 0 | 0.045455 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
4ff0c33b625a3b17c41a51d1fc61857295c864cf | 6,467 | py | Python | theia/ide/admin/cli/anubis/assignment/pipeline.py | synoet/Anubis | 051888a88e37c67e5e772245604c79ceb4db8764 | [
"MIT"
] | 2 | 2022-02-24T17:39:27.000Z | 2022-02-25T02:14:06.000Z | theia/ide/admin/cli/anubis/assignment/pipeline.py | synoet/Anubis | 051888a88e37c67e5e772245604c79ceb4db8764 | [
"MIT"
] | null | null | null | theia/ide/admin/cli/anubis/assignment/pipeline.py | synoet/Anubis | 051888a88e37c67e5e772245604c79ceb4db8764 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
import json
import logging
import os
import traceback
import git
import requests
import yaml
root_logger = logging.getLogger()
root_logger.setLevel(logging.DEBUG)
root_logger.addHandler(logging.StreamHandler())
def post(path: str, data: dict, params=None):
if params is None:
params = {}
headers = {'Content-Type': 'application/json'}
params['token'] = TOKEN
if DEBUG:
logging.info("post: {} data: {}".format(path, data))
return None
# Attempt to contact the pipeline API
try:
res = requests.post(
'http://pipeline-api:5000' + path,
headers=headers,
params=params,
json=data,
)
except:
logging.error('UNABLE TO REPORT POST TO PIPELINE API')
exit(0)
# If the call to the api failed we're in trouble,
# and need to abort.
if res.status_code != 200:
logging.error('UNABLE TO REPORT POST TO PIPELINE API')
exit(0)
return res
def report_panic(message: str, traceback: str, ):
"""
Report and error to the API
:param message: error message
:param traceback: optional traceback
:return:
"""
data = {
'token': TOKEN,
'commit': COMMIT,
'message': message,
'traceback': traceback,
}
print(traceback)
logging.info('report_error {}'.format(json.dumps(data, indent=2)))
post('/pipeline/report/panic/{}'.format(SUBMISSION_ID), data)
try:
import assignment
except ImportError:
report_panic('Unable to import assignment', traceback.format_exc())
exit(0)
from utils import registered_tests, build_function
from utils import fix_permissions, Panic, DEBUG
git_creds = os.environ.get('GIT_CRED', default=None)
if git_creds is not None:
del os.environ['GIT_CRED']
with open(os.environ.get('HOME') + '/.git-credentials', 'w') as f:
f.write(git_creds)
f.close()
with open(os.environ.get('HOME') + '/.gitconfig', 'w') as f:
f.write('[credential]\n')
f.write('\thelper = store\n')
f.close()
TOKEN = os.environ.get('TOKEN')
COMMIT = os.environ.get('COMMIT')
GIT_REPO = os.environ.get('GIT_REPO')
SUBMISSION_ID = os.environ.get('SUBMISSION_ID')
del os.environ['TOKEN']
def report_state(state: str, params=None):
"""
Report a state update for the current submission
:param params:
:param state: text representation of state
:return:
"""
data = {
'token': TOKEN,
'commit': COMMIT,
'state': state,
}
logging.info('report_state {}'.format(json.dumps(data, indent=2)))
post('/pipeline/report/state/{}'.format(SUBMISSION_ID), data, params=params)
def report_build_results(stdout: str, passed: bool):
"""
Report the results of a given build.
:param stdout:
:param passed:
:return:
"""
data = {
'token': TOKEN,
'commit': COMMIT,
# 'stdout': base64.b16encode(stdout).decode(),
'stdout': stdout,
'passed': passed,
}
logging.info('report_build {}'.format(json.dumps(data, indent=2)))
post('/pipeline/report/build/{}'.format(SUBMISSION_ID), data)
def report_test_results(test_name: str, stdout: str, message: str, passed: bool):
"""
Report a single test result to the pipeline API.
:param test_name:
:param stdout:
:param message:
:param passed:
:return:
"""
data = {
'token': TOKEN,
'commit': COMMIT,
'test_name': test_name,
# 'stdout': base64.b16encode(stdout).decode(),
'stdout': stdout,
'message': message,
'passed': passed,
}
logging.info('report_test_results {}'.format(json.dumps(data, indent=2)))
post('/pipeline/report/test/{}'.format(SUBMISSION_ID), data)
def get_assignment_data() -> dict:
"""
Load the assignment metadata out from the assignment yaml file
:return:
"""
# Figure out filename
assignment_filename = None
for assignment_filename_option in ['meta.yml', 'meta.yaml']:
if os.path.isfile(assignment_filename_option):
assignment_filename = assignment_filename_option
break
# Make sure we figured out the metadata filename
if assignment_filename is None:
report_panic('No meta.yml was found', '')
exit(0)
# Load yaml
with open(assignment_filename, 'r') as f:
try:
assignment_data = yaml.safe_load(f.read())
except yaml.YAMLError:
report_panic('Unable to read assignment yaml', traceback.format_exc())
logging.info(assignment_data)
return assignment_data
def clone():
"""
Clone the assigment repo into the student folder.
File permissions will need to be updated.
:return:
"""
report_state('Cloning repo')
# Clone
try:
repo = git.Repo.clone_from(GIT_REPO, './student')
if COMMIT.lower() != 'null':
repo.git.checkout(COMMIT)
except git.exc.GitCommandError:
report_panic('Git error', traceback.format_exc())
exit(0)
fix_permissions()
os.system('rm -rf ./student/.git')
os.system('rm -rf /home/anubis/.git-credentials')
os.system('rm -rf /home/anubis/.gitconfig')
def run_build(assignment_data: dict):
"""
Build the student repo.
:param assignment_data: assignment meta
:return:
"""
# build
report_state('Running Build...')
result = build_function()
report_build_results(result.stdout, result.passed)
if not result.passed:
exit(0)
def run_tests(assignment_data: dict):
"""
Run the assignment test scripts. Update submission state as you go.
:param assignment_data:
:return:
"""
# Tests
for test_name in registered_tests:
report_state('Running test: {}'.format(test_name))
result = registered_tests[test_name]()
report_test_results(test_name, result.stdout, result.message, result.passed)
def main():
try:
assignment_data = get_assignment_data()
clone()
os.chdir('./student')
run_build(assignment_data)
run_tests(assignment_data)
report_state('Finished!', params={'processed': '1'})
except Panic as e:
report_panic(repr(e), traceback.format_exc())
except Exception as e:
report_panic(repr(e), traceback.format_exc())
if __name__ == '__main__':
main()
| 25.360784 | 84 | 0.626875 | 790 | 6,467 | 5.003797 | 0.227848 | 0.042499 | 0.02125 | 0.020238 | 0.228181 | 0.171515 | 0.132052 | 0.109284 | 0.087528 | 0.024285 | 0 | 0.005531 | 0.24509 | 6,467 | 254 | 85 | 25.46063 | 0.804179 | 0.167311 | 0 | 0.239726 | 0 | 0 | 0.159992 | 0.029213 | 0 | 0 | 0 | 0 | 0 | 1 | 0.068493 | false | 0.047945 | 0.082192 | 0 | 0.171233 | 0.006849 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
4ff20bbd47aa0c170d3217fde2abcab9c8fbd137 | 2,193 | py | Python | eval_image_classifier.py | kurnianggoro/Deep-Mutual-Learning | 34a20583debe4e0dab1d9856db69bed278c5c011 | [
"MIT"
] | 317 | 2018-03-28T02:11:23.000Z | 2022-03-18T08:32:27.000Z | eval_image_classifier.py | SaintLogos1234/Deep-Mutual-Learning | 34a20583debe4e0dab1d9856db69bed278c5c011 | [
"MIT"
] | 19 | 2018-04-11T02:48:29.000Z | 2021-07-09T11:03:19.000Z | eval_image_classifier.py | SaintLogos1234/Deep-Mutual-Learning | 34a20583debe4e0dab1d9856db69bed278c5c011 | [
"MIT"
] | 65 | 2018-04-23T01:52:45.000Z | 2022-03-06T01:49:22.000Z | """
Generic evaluation script that evaluates a model using a given dataset.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
import eval_models
from datasets.utils import *
slim = tf.contrib.slim
tf.app.flags.DEFINE_string('dataset_name', 'market1501',
'The name of the dataset to load.')
tf.app.flags.DEFINE_string('split_name', 'test',
'The name of the train/test split.')
tf.app.flags.DEFINE_string('dataset_dir', None,
'The directory where the dataset files are stored.')
tf.app.flags.DEFINE_string('checkpoint_dir', None,
'The directory where the model was written to or an absolute path to a '
'checkpoint file.')
tf.app.flags.DEFINE_string('eval_dir', 'results',
'Directory where the results are saved to.')
tf.app.flags.DEFINE_string('model_name', 'mobilenet_v1',
'The name of the architecture to evaluate.')
tf.app.flags.DEFINE_integer('num_networks', 2,
'Number of Networks')
tf.app.flags.DEFINE_integer('num_classes', 751,
'The number of classes.')
tf.app.flags.DEFINE_integer('batch_size', 1,
'The number of samples in each batch.')
tf.app.flags.DEFINE_string('preprocessing_name', None,
'The name of the preprocessing to use. If left '
'as `None`, then the model_name flag is used.')
tf.app.flags.DEFINE_integer('num_preprocessing_threads', 1,
'The number of threads used to create the batches.')
tf.app.flags.DEFINE_float('moving_average_decay', 0.9999,
'The decay to use for the moving average.'
'If left as None, then moving averages are not used.')
#########################
FLAGS = tf.app.flags.FLAGS
def main(_):
# create folders
mkdir_if_missing(FLAGS.eval_dir)
# test
eval_models.evaluate()
if __name__ == '__main__':
tf.app.run()
| 31.782609 | 99 | 0.603283 | 276 | 2,193 | 4.59058 | 0.355072 | 0.055249 | 0.102605 | 0.151539 | 0.280189 | 0.149961 | 0 | 0 | 0 | 0 | 0 | 0.010329 | 0.293662 | 2,193 | 68 | 100 | 32.25 | 0.807618 | 0.041952 | 0 | 0 | 0 | 0 | 0.383123 | 0.012124 | 0 | 0 | 0 | 0 | 0 | 1 | 0.025 | false | 0 | 0.15 | 0 | 0.175 | 0.025 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
4ff34a06c77e4b0809fa1c64b42511f3762f61e0 | 2,776 | py | Python | tests/cli/test_new.py | deeplearninc/auger-ai | b50af35e8ea28b528ec233a2f4a8d4e412059be9 | [
"MIT"
] | null | null | null | tests/cli/test_new.py | deeplearninc/auger-ai | b50af35e8ea28b528ec233a2f4a8d4e412059be9 | [
"MIT"
] | 25 | 2019-07-09T04:26:19.000Z | 2020-07-21T06:43:25.000Z | tests/cli/test_new.py | deeplearninc/auger-ai | b50af35e8ea28b528ec233a2f4a8d4e412059be9 | [
"MIT"
] | 1 | 2019-07-09T15:19:13.000Z | 2019-07-09T15:19:13.000Z | import os
from auger.cli.cli import cli
from auger.api.utils.config import Config
class TestNewCommand():
def test_minimal_arguments_successfull_creation(self, runner, isolated):
# successful status
result = runner.invoke(cli, ['new', 'test_project'])
assert result.exit_code == 0
# directory created
target_dir = os.path.join(os.getcwd(), 'test_project')
assert os.path.exists(target_dir) and os.path.isdir(target_dir)
# config file exists
config_file = os.path.join(target_dir, 'auger.yaml')
assert os.path.exists(config_file)
# config contains proper data
config = Config().load('test_project')
assert config.get('name', '') == 'test_project'
def test_project_with_given_name_already_exists(
self, runner, log, project):
os.chdir('..')
runner.invoke(cli, ['new', 'test_project'])
result = runner.invoke(cli, ['new', 'test_project'])
assert result.exit_code != 0
assert (log.records[-1].message ==
"Can't create 'test_project'. Folder already exists.")
def test_nested_project_forbidden(self, runner, log, project):
result = runner.invoke(cli, ['new', 'test_project'])
assert result.exit_code != 0
assert (log.records[-1].message ==
"Can't create 'test_project' inside a project."
" './auger.yaml' already exists")
def test_full_set_of_arguments(self, log, runner, isolated, project):
os.chdir('..')
result = runner.invoke(
cli, [
'new', 'new_project',
'--model-type', 'regression',
'--target', 'target_column',
'--source', 'test_project/iris.csv'])
assert result.exit_code == 0
config = Config().load('new_project')
assert config.get('model_type', '') == 'regression'
assert config.get('target', '') == 'target_column'
assert config.get('source', '') == os.path.join(
os.getcwd(), 'test_project', 'iris.csv')
def test_bad_source(self, log, runner, isolated):
result = runner.invoke(
cli, ['new', 'test_project', '--source', 'not_existing_file.csv'])
assert result.exit_code != 0
assert log.messages[-1].startswith("Can't find file to import:")
def test_source_wrong_extension(self, log, runner, isolated):
result = runner.invoke(
cli, ['new', 'test_project', '--source', 'file_with_wrong.extension'])
assert result.exit_code != 0
assert log.messages[-1] ==\
'Source file has to be one of the supported fomats: .csv, .arff, .gz, .bz2, .zip, .xz, .json, .xls, .xlsx, .feather, .h5, .hdf5'
| 39.657143 | 141 | 0.599784 | 333 | 2,776 | 4.831832 | 0.297297 | 0.095712 | 0.065258 | 0.07831 | 0.37601 | 0.361094 | 0.33064 | 0.294593 | 0.294593 | 0.246116 | 0 | 0.006302 | 0.256844 | 2,776 | 69 | 142 | 40.231884 | 0.773631 | 0.029539 | 0 | 0.307692 | 0 | 0.019231 | 0.240238 | 0.024916 | 0 | 0 | 0 | 0 | 0.307692 | 1 | 0.115385 | false | 0 | 0.076923 | 0 | 0.211538 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
4ff4d9710a1d2817b836253d41e8fd15539f9501 | 13,090 | py | Python | enn_zoo/enn_zoo/microrts/__init__.py | Miffyli/incubator | 35c920b31fd0ed6cabdcb536201b39f31c3d9f03 | [
"Apache-2.0",
"MIT"
] | null | null | null | enn_zoo/enn_zoo/microrts/__init__.py | Miffyli/incubator | 35c920b31fd0ed6cabdcb536201b39f31c3d9f03 | [
"Apache-2.0",
"MIT"
] | null | null | null | enn_zoo/enn_zoo/microrts/__init__.py | Miffyli/incubator | 35c920b31fd0ed6cabdcb536201b39f31c3d9f03 | [
"Apache-2.0",
"MIT"
] | null | null | null | from dataclasses import dataclass
from tokenize import String
from typing import Any, Dict, List, Mapping, MutableMapping, Optional, Sequence, Tuple
import random
from entity_gym.environment.environment import EntityObs
import numpy as np
import numpy.typing as npt
from copy import deepcopy
import os
import gym_microrts
from gym_microrts import microrts_ai
import xml.etree.ElementTree as ET
import json
from PIL import Image
from entity_gym.environment import (
CategoricalAction,
CategoricalActionMask,
Entity,
Environment,
EpisodeStats,
ObsSpace,
CategoricalActionSpace,
ActionSpace,
Observation,
Action,
VecEnv,
)
import jpype
from jpype.imports import registerDomain
import jpype.imports
from jpype.types import JArray, JInt
class GymMicrorts(Environment):
"""
A real-time strategy environment for microrts.
See https://github.com/santiontanon/microrts
Light grey squares are bases, dark grey squares are barracks,
green squares are resources, colored circles are combat units,
and grey circles are workers that harvest resources.
Args:
map_path: the path to the map, see the list of supported maps [here](https://github.com/vwxyzjn/microrts/tree/52d17e58592722889197aeee03fffafb154cfb8c/maps)
reward_weight: the weight mutiplied to each each reward functions,
which are in order:
- win/loss reward: + 1 for win, - 1 for loss, 0 for tie
- resource gather reward: + 1 for each resource gathered and +1 for returned
- produce worker reward: + 1 for each worker produced
- produce building reward: + 1 for each building produced
- attack reward: + 1 for each attack action
- produce combat unit reward: + 1 for each combat unit produced
"""
def __init__(
self,
map_path: str = "maps/10x10/basesTwoWorkers10x10.xml",
reward_weight: List[float] = [10.0, 1.0, 1.0, 0.2, 1.0, 4.0],
):
self.map_path = map_path
self.reward_weight = np.array(reward_weight)
self.step = 0
# read map
self.microrts_path = os.path.join(gym_microrts.__path__[0], "microrts")
root = ET.parse(os.path.join(self.microrts_path, self.map_path)).getroot()
self.height = int(root.get("height")) # type: ignore
self.width = int(root.get("width")) # type: ignore
# launch the JVM
if not jpype._jpype.isStarted():
registerDomain("ts", alias="tests")
registerDomain("ai")
jars = [
"microrts.jar",
"lib/bots/Coac.jar",
"lib/bots/Droplet.jar",
"lib/bots/GRojoA3N.jar",
"lib/bots/Izanagi.jar",
"lib/bots/MixedBot.jar",
"lib/bots/TiamatBot.jar",
"lib/bots/UMSBot.jar",
"lib/bots/mayariBot.jar", # "MindSeal.jar"
]
for jar in jars:
jpype.addClassPath(os.path.join(self.microrts_path, jar))
jpype.startJVM(convertStrings=False)
# start microrts client
from rts.units import UnitTypeTable
self.real_utt = UnitTypeTable()
from ai.rewardfunction import (
RewardFunctionInterface,
WinLossRewardFunction,
ResourceGatherRewardFunction,
AttackRewardFunction,
ProduceWorkerRewardFunction,
ProduceBuildingRewardFunction,
ProduceCombatUnitRewardFunction,
)
self.rfs = JArray(RewardFunctionInterface)(
[
WinLossRewardFunction(),
ResourceGatherRewardFunction(),
ProduceWorkerRewardFunction(),
ProduceBuildingRewardFunction(),
AttackRewardFunction(),
ProduceCombatUnitRewardFunction(),
]
)
self.rfs_names = [str(rf) for rf in self.rfs]
self.ai2s = [microrts_ai.coacAI for _ in range(1)]
from ts.entity import JNIEntityClient as Client
from ai.core import AI
self.client = Client(
self.rfs,
os.path.expanduser(self.microrts_path),
self.map_path,
self.ai2s[0](self.real_utt),
self.real_utt,
False,
)
# get the unit type table
self.utt = json.loads(str(self.client.sendUTT()))
@classmethod
def obs_space(cls) -> ObsSpace:
return ObsSpace(
{
"Resource": Entity(["x", "y"]),
"Base": Entity(["x", "y"]),
"Barracks": Entity(["x", "y"]),
"Worker": Entity(["x", "y"]),
"Light": Entity(["x", "y"]),
"Heavy": Entity(["x", "y"]),
"Ranged": Entity(["x", "y"]),
}
)
@classmethod
def action_space(cls) -> Dict[str, ActionSpace]:
return {
"unit_action": CategoricalActionSpace(
choices=[
"move_up",
"move_right",
"move_down",
"move_left",
"harvest_up",
"harvest_right",
"harvest_down",
"harvest_left",
"return_up",
"return_right",
"return_down",
"return_left",
"produce_base_up",
"produce_base_right",
"produce_base_down",
"produce_base_left",
"produce_barrack_up",
"produce_barrack_right",
"produce_barrack_down",
"produce_barrack_left",
]
+ [
f"attack_location_{i}" for i in range(49)
], # the attack trange is a 7x7 relative grid
),
"base_action": CategoricalActionSpace(
choices=[
"produce_worker_up",
"produce_worker_right",
"produce_worker_down",
"produce_worker_left",
],
),
"barrack_action": CategoricalActionSpace(
choices=[
"produce_light_up",
"produce_light_right",
"produce_light_down",
"produce_light_left",
"produce_heavy_up",
"produce_heavy_right",
"produce_heavy_down",
"produce_heavy_left",
"produce_ranged_up",
"produce_ranged_right",
"produce_ranged_down",
"produce_ranged_left",
],
),
}
def render(self, **kwargs: Any) -> npt.NDArray[np.uint8]:
if "mode" in kwargs and kwargs["mode"] == "rgb_array":
bytes_array = np.array(self.client.render(True))
image = Image.frombytes("RGB", (640, 640), bytes_array)
return np.array(image)[:, :, ::-1]
else:
return self.client.render(False) # type: ignore
def reset(self) -> Observation:
self.step = 0
self.returns = np.zeros(len(self.rfs))
response = self.client.reset(0)
unit_action_actor_ids = np.array(response.observation[8])
unit_action_actor_masks = np.array(response.observation[9], dtype=np.bool8)
base_action_actor_ids = np.array(response.observation[10])
base_action_actor_masks = np.array(response.observation[11], dtype=np.bool8)
barrack_action_actor_ids = np.array(response.observation[12])
barrack_action_actor_masks = np.array(response.observation[13], dtype=np.bool8)
return Observation.from_entity_obs(
entities=self.generate_entities(response),
actions={
"unit_action": CategoricalActionMask(
actor_ids=unit_action_actor_ids, # type: ignore
mask=unit_action_actor_masks,
),
"base_action": CategoricalActionMask(
actor_ids=base_action_actor_ids, # type: ignore
mask=base_action_actor_masks,
),
"barrack_action": CategoricalActionMask(
actor_ids=barrack_action_actor_ids, # type: ignore
mask=barrack_action_actor_masks,
),
},
reward=response.reward @ self.reward_weight,
done=response.done[0],
end_of_episode_info=EpisodeStats(
length=self.step, total_reward=float(self.reward_weight @ self.returns)
)
if response.done[0]
else None,
)
def act(self, action: Mapping[str, Action]) -> Observation:
game_over = False
self.step += 1
unit_action_actors: Sequence[Any] = []
unit_actions: npt.NDArray[np.int64] = np.empty(0, dtype=np.int64)
base_action_actors: Sequence[Any] = []
base_actions: npt.NDArray[np.int64] = np.empty(0, dtype=np.int64)
barrack_action_actors: Sequence[Any] = []
barrack_actions: npt.NDArray[np.int64] = np.empty(0, dtype=np.int64)
if "unit_action" in action and isinstance(
action["unit_action"], CategoricalAction
):
unit_action_actors = action["unit_action"].actors
unit_actions = action["unit_action"].actions
if "base_action" in action and isinstance(
action["base_action"], CategoricalAction
):
base_action_actors = action["base_action"].actors
base_actions = action["base_action"].actions
if "barrack_action" in action and isinstance(
action["barrack_action"], CategoricalAction
):
barrack_action_actors = action["barrack_action"].actors
barrack_actions = action["barrack_action"].actions
response = self.client.gameStep(
unit_action_actors,
unit_actions,
base_action_actors,
base_actions,
barrack_action_actors,
barrack_actions,
0,
)
unit_action_actor_ids = np.array(response.observation[8])
unit_action_actor_masks = None
if len(unit_action_actor_ids) > 0:
unit_action_actor_masks = np.array(response.observation[9], dtype=np.bool8)
base_action_actor_ids = np.array(response.observation[10])
base_action_actor_masks = None
if len(base_action_actor_ids) > 0:
base_action_actor_masks = np.array(response.observation[11], dtype=np.bool8)
barrack_action_actor_ids = np.array(response.observation[12])
barrack_action_actor_masks = None
if len(barrack_action_actor_ids) > 0:
barrack_action_actor_masks = np.array(
response.observation[13], dtype=np.bool8
)
self.returns += response.reward
return Observation.from_entity_obs(
entities=self.generate_entities(response),
actions={
"unit_action": CategoricalActionMask(
actor_ids=unit_action_actor_ids, # type: ignore
mask=unit_action_actor_masks,
),
"base_action": CategoricalActionMask(
actor_ids=base_action_actor_ids, # type: ignore
mask=base_action_actor_masks,
),
"barrack_action": CategoricalActionMask(
actor_ids=barrack_action_actor_ids, # type: ignore
mask=barrack_action_actor_masks,
),
},
reward=response.reward @ self.reward_weight,
done=response.done[0],
end_of_episode_info=EpisodeStats(
length=self.step,
total_reward=float(self.reward_weight @ self.returns),
metrics=dict(
zip(
[f"charts/episodic_return/{item}" for item in self.rfs_names],
self.returns,
)
),
)
if response.done[0]
else None,
)
def generate_entities(self, response: Any) -> Mapping[str, Optional[EntityObs]]:
entities: MutableMapping[str, Optional[EntityObs]] = {}
for entity_type, observation in zip(
["Resource", "Base", "Barracks", "Worker", "Light", "Heavy", "Ranged"],
response.observation,
):
observation = np.array(observation).astype(np.float32)
if len(observation) > 0:
entities[entity_type] = EntityObs(
features=observation[:, 1:], ids=observation[:, 0].astype(np.int32)
)
return entities
def __del__(self) -> None:
self.client.close()
| 37.723343 | 164 | 0.559664 | 1,305 | 13,090 | 5.403831 | 0.206897 | 0.046795 | 0.029779 | 0.044243 | 0.323313 | 0.298639 | 0.268009 | 0.268009 | 0.268009 | 0.259075 | 0 | 0.015419 | 0.345989 | 13,090 | 346 | 165 | 37.83237 | 0.808317 | 0.090298 | 0 | 0.232323 | 0 | 0 | 0.103028 | 0.014465 | 0 | 0 | 0 | 0 | 0 | 1 | 0.026936 | false | 0 | 0.077441 | 0.006734 | 0.131313 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
4ff8cae0f2cae43548c5258dd28dcf5612f9a7f7 | 289 | py | Python | Python/07. Collections/05. Word Order/Solution.py | AdityaSingh17/HackerRank-Solutions | 65b7fcd6e82be242fcc7e5b1771941206a8b7940 | [
"MIT"
] | null | null | null | Python/07. Collections/05. Word Order/Solution.py | AdityaSingh17/HackerRank-Solutions | 65b7fcd6e82be242fcc7e5b1771941206a8b7940 | [
"MIT"
] | null | null | null | Python/07. Collections/05. Word Order/Solution.py | AdityaSingh17/HackerRank-Solutions | 65b7fcd6e82be242fcc7e5b1771941206a8b7940 | [
"MIT"
] | null | null | null | # Word Order
# Problem Link: https://www.hackerrank.com/challenges/word-order/problem
from collections import OrderedDict
words = OrderedDict()
for _ in range(int(input())):
word = input()
words.setdefault(word, 0)
words[word] += 1
print(len(words))
print(*words.values())
| 19.266667 | 72 | 0.698962 | 38 | 289 | 5.289474 | 0.657895 | 0.089552 | 0.159204 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.008163 | 0.152249 | 289 | 14 | 73 | 20.642857 | 0.812245 | 0.280277 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.125 | 0 | 0.125 | 0.25 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
4ffde2f06494b47edff9a4bc601d3565042d59f0 | 1,166 | py | Python | recap_utils/text.py | ReCAP-UTR/Utils | 10f2912a91f8bbfb1ede818e240a3ba7cf767656 | [
"Apache-2.0"
] | null | null | null | recap_utils/text.py | ReCAP-UTR/Utils | 10f2912a91f8bbfb1ede818e240a3ba7cf767656 | [
"Apache-2.0"
] | null | null | null | recap_utils/text.py | ReCAP-UTR/Utils | 10f2912a91f8bbfb1ede818e240a3ba7cf767656 | [
"Apache-2.0"
] | null | null | null | import shutil
from pathlib import Path
import deepl_pro as dl
import typer
from recap_utils import model
cli = typer.Typer()
@cli.command()
def translate(
folder_in: Path,
folder_out: Path,
source_lang: str,
target_lang: str,
auth_key: str,
input_glob: str,
output_suffix: str,
clean: bool = False,
overwrite: bool = False,
start: int = 1,
) -> None:
if clean:
shutil.rmtree(folder_out)
folder_out.mkdir()
paths = model.PathPair.create(folder_in, folder_out, input_glob, output_suffix)
translator = dl.Translator(
auth_key, dl.Language(source_lang), dl.Language(target_lang)
)
with typer.progressbar(
paths[start - 1 :],
item_show_func=model.PathPair.label,
show_pos=True,
) as bar:
for path_pair in bar:
if overwrite or not path_pair.target.exists():
with path_pair.source.open("r") as file:
source_text = file.read()
target_text = translator.translate_text(source_text)
with path_pair.target.open("w") as file:
file.write(target_text)
| 24.291667 | 83 | 0.621784 | 151 | 1,166 | 4.602649 | 0.443709 | 0.051799 | 0.040288 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.002398 | 0.284734 | 1,166 | 47 | 84 | 24.808511 | 0.830935 | 0 | 0 | 0 | 0 | 0 | 0.001715 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.026316 | false | 0 | 0.131579 | 0 | 0.157895 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
8b00c152dd7573d50b0538c91b16535244efb283 | 1,613 | py | Python | django_cbv_utils/forms/clean.py | okwrtdsh/django_cbv_utils | 6858f53dda65c79e2201a67ba34d3885b3f11e22 | [
"MIT"
] | null | null | null | django_cbv_utils/forms/clean.py | okwrtdsh/django_cbv_utils | 6858f53dda65c79e2201a67ba34d3885b3f11e22 | [
"MIT"
] | null | null | null | django_cbv_utils/forms/clean.py | okwrtdsh/django_cbv_utils | 6858f53dda65c79e2201a67ba34d3885b3f11e22 | [
"MIT"
] | null | null | null | class RequiredMixin(object):
multiple_required_list = []
chain_required_list = []
def get_multiple_required_list(self):
return self.multiple_required_list
def get_chain_required_list(self):
return self.chain_required_list
def multiple_required(
self, cleaned_data, fields,
func=lambda l: not any(l), msg=None):
"""
複数の入力による必須項目の判定
defaultはいずれか必須
"""
if msg is None:
msg = "{}のいずれかの入力が必須です。".format(
"、".join(self.fields[f].label for f in fields))
cleaned_fields = (cleaned_data.get(f) for f in fields)
if func(cleaned_fields):
for f in fields:
self.add_error(f, msg)
def chain_required(
self, cleaned_data, trigger, fields,
func=bool, msg=None):
"""
triggerが条件を満たす場合に入力を必須にする
"""
if msg is None:
msg = "このフィールドは必須です。"
if isinstance(trigger, (list, tuple)):
cleaned_trigger = (cleaned_data.get(f) for f in trigger)
else:
cleaned_trigger = cleaned_data.get(trigger)
if func(cleaned_trigger):
for f in fields:
if not cleaned_data.get(f):
self.add_error(f, msg)
def clean(self):
cleaned_data = super().clean()
for kwargs in self.get_multiple_required_list():
self.multiple_required(cleaned_data, **kwargs)
for kwargs in self.get_chain_required_list():
self.chain_required(cleaned_data, **kwargs)
return cleaned_data
| 32.26 | 68 | 0.584625 | 185 | 1,613 | 4.875676 | 0.237838 | 0.121951 | 0.033259 | 0.053215 | 0.359202 | 0.088692 | 0.046563 | 0 | 0 | 0 | 0 | 0 | 0.323001 | 1,613 | 49 | 69 | 32.918367 | 0.826007 | 0.034718 | 0 | 0.162162 | 0 | 0 | 0.019973 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.135135 | false | 0 | 0 | 0.054054 | 0.297297 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
8b04e2c43a95b31379b3c5f312a7366fcd858adf | 9,406 | py | Python | SUNIWARD.py | TracyCuiq/S-UNIWARD-python | d6fa096472af0f5a3ce3d83b72041817629710b8 | [
"MIT"
] | 7 | 2020-04-28T02:37:15.000Z | 2021-10-18T07:43:11.000Z | SUNIWARD.py | TracyCuiq/S-UNIWARD-python | d6fa096472af0f5a3ce3d83b72041817629710b8 | [
"MIT"
] | 1 | 2020-07-06T03:37:21.000Z | 2020-07-06T03:37:21.000Z | SUNIWARD.py | TracyCuiq/S-UNIWARD-python | d6fa096472af0f5a3ce3d83b72041817629710b8 | [
"MIT"
] | null | null | null | import numpy as np
import matplotlib.pyplot as plt
import random
from scipy.signal import convolve2d
import math
from scipy import misc
import os
from PIL import Image
from numba import jit
import cv2
import scipy.misc
np.set_printoptions(threshold=np.inf)
def S_UNIWARD(coverPath, payload):
sgm = 1
## Get 2D wavelet filters - Daubechies 8
# 1D high pass decomposition filter
hpdf_list = [-0.0544158422, 0.3128715909, -0.6756307363, 0.5853546837, 0.0158291053,
-0.2840155430, -0.0004724846, 0.1287474266, 0.0173693010, -0.0440882539,
- 0.0139810279, 0.0087460940, 0.0048703530, -0.0003917404, -0.0006754494, -0.0001174768]
# 1D low pass decomposition filter
hpdf_len = range(0, len(hpdf_list))
hpdf_list_reverse = hpdf_list[::-1]
lpdf_list = hpdf_list
for i in range(len(hpdf_list)):
lpdf_list[i] = ((-1) ** hpdf_len[i]) * hpdf_list_reverse[i]
hpdf_array = np.array([hpdf_list])
lpdf_array = np.array([lpdf_list])
lpdf = lpdf_array.reshape(len(lpdf_list), 1)
hpdf = hpdf_array.reshape(len(hpdf_list), 1)
# construction of 2D wavelet filters
F1 = lpdf * hpdf_array
F2 = hpdf * lpdf_array
F3 = hpdf * hpdf_array
W_F = np.zeros((F1.shape[0], F1.shape[0], 3))
W_F[:, :, 0] = F1
W_F[:, :, 1] = F2
W_F[:, :, 2] = F3
## Get embedding costs
# initialization
cover = scipy.misc.imread(coverPath, flatten=False, mode='RGB')
wetCost = 100000000
k, l, _ = cover.shape
# add padding
S1, _1 = F1.shape
S2, _2 = F2.shape
S3, _3 = F3.shape
padSize = max(S1, S2, S3)
coverPadded = np.zeros((k + padSize * 2, l + padSize * 2, 3))
for i in range(3):
coverPadded[:, :, i] = np.lib.pad(cover[:, :, i], padSize, 'symmetric')
xi = np.zeros((k + padSize * 2, l + padSize * 2, 3))
x = np.zeros((k, l, 3))
for i in range(3):
# compute residual
R = convolve2d(coverPadded[:, :, i], W_F[:, :, i], mode='same')
xi[:, :, i] = convolve2d(1. / (np.abs(R) + sgm), np.rot90(abs(W_F[:, :, i]), 2), mode='same')
# correct the suitability shift if filter size is even
if S1 % 2 == 0:
xi[:, :, i] = np.roll(xi[:, :, i], [1, 0])
xi[:, :, i] = np.roll(xi[:, :, i], [0, 1])
# remove padding
S_xi, __xi = xi[:, :, i].shape
x[:, :, i] = xi[(S_xi - k) / 2: -(S_xi - k) / 2, (__xi - l) / 2: -(__xi - l) / 2, i]
# compute embedding costs \rho
rho = np.zeros((k, l))
rho = x[:, :, 0] + x[:, :, 1] + x[:, :, 2]
# adjust embedding costs
a, b = np.where(rho > wetCost)
for i in range(len(a)):
rho[a[i], b[i]] = wetCost # threshold on the costs
a, b = np.where(np.isnan(rho))
for i in range(len(a)):
rho[a[i], b[i]] = wetCost # if all xi{} are zero threshold the cost
#k, k_ = rho.shape
rhoP1 = np.zeros((k, l, 3))
rhoM1 = np.zeros((k, l, 3))
for i in range(3):
rhoP1[:,:,i] = rho
rhoM1[:,:,i] = rho
#a, b, c = np.where(cover - 255.0 <= 0.1)
a, b, c = np.where(cover == 255)
for i in range(len(a)):
rhoP1[a[i], b[i], c[i]] = wetCost # do not embed +1 if the pixel has max value
#a, b, c = np.where(cover - 0 <= 0.1)
a, b, c = np.where(cover == 0)
for i in range(len(a)):
rhoM1[a[i], b[i], c[i]] = wetCost # do not embed -1 if the pixel has min value
## Embedding simulator ##
cover_len = len(cover[:, :, 0]) * len(cover[:, :, 0])
stego = cover
print(rhoP1)
for i in range(3):
stego[:, :, i] = EmbeddingSimulator_singel(cover[:, :, i], rhoP1[:, :, i], rhoM1[:, :, i], payload * cover_len,
fixEmbeddingChanges=False)
return stego
# TODO
def EmbeddingSimulator(x, rhoP1, rhoM1, m, fixEmbeddingChanges=False):
cover_len = len(x[:, :, 0]) * len(x[:, :, 0])
l = cal_lambda(rhoP1, rhoM1, m, cover_len)
randChange = random.random(x.shape)
y = x
def EmbeddingSimulator_singel(x, rhoP1, rhoM1, m, fixEmbeddingChanges=False):
w, h = x.shape
cover_len = (w * h)
l = cal_lambda_(rhoP1, rhoM1, m, cover_len)
shape = rhoP1.shape
pChangeP1 = [(math.exp(-l * rhoP1[i][j])) / (1 + math.exp(-l * rhoP1[i][j]) + math.exp(-l * rhoM1[i][j]))
for j in range(shape[1]) for i in range(shape[0])]
pChangeM1 = [(math.exp(-l * rhoM1[i][j])) / (1 + math.exp(-l * rhoP1[i][j]) + math.exp(-l * rhoM1[i][j]))
for j in range(shape[1]) for i in range(shape[0])]
pChangeP1_array = np.array(pChangeP1).reshape(shape[1], shape[0]).T
pChangeM1_array = np.array(pChangeM1).reshape(shape[1], shape[0]).T
if fixEmbeddingChanges == True:
np.random.seed(139187)
randChange = np.random.rand(w, h)
y = x
arr0, _0 = np.where(randChange < pChangeP1_array)
for i in range(len(arr0)):
y[arr0[i]][_0[i]] += 1
arr1, _1 = np.where((randChange >= pChangeP1_array) & (randChange < pChangeP1_array + pChangeM1_array))
for i in range(len(arr1)):
y[arr1[i]][_1[i]] -= 1
return y
# TODO
def cal_lambda(rhoP1, rhoM1, message_length, n):
l3 = 1e+3
m3 = math.ceil(message_length)
iterations = 0
while m3 > message_length:
pP1 = rhoP1
pM1 = rhoM1
shape = pP1.shape
l3 = l3 * 2
pP1 = [
(math.exp(-l3 * rhoP1[i][j][k])) / (1 + math.exp(-l3 * rhoP1[i][j][k]) + math.exp(-l3 * rhoM1[i][j][k]))
for k in range(shape[2]) for j in range(shape[1]) for i in range(shape[0])] # list
pM1 = [
(math.exp(-l3 * rhoM1[i][j][k])) / (1 + math.exp(-l3 * rhoP1[i][j][k]) + math.exp(-l3 * rhoM1[i][j][k]))
for k in range(shape[2]) for j in range(shape[1]) for i in range(shape[0])] # list
pP1_array = (np.array(pP1)).reshape(shape[0], shape[1], shape[2])
pM1_array = (np.array(pM1)).reshape(shape[0], shape[1], shape[2])
m3 = ternary_entropyf_4list(pP1, pM1)
iterations = iterations + 1
if iterations > 10:
return l3
return 0
def cal_lambda_(rhoP1, rhoM1, message_length, n):
l3 = 1e+3
m3 = math.ceil(message_length)
iterations = 0
while m3 > message_length:
pP1 = rhoP1
pM1 = rhoM1
# shape = lambda x: pP1.shape if pP1.shape == pM1.shape else 0
shape = pP1.shape
l3 = l3 * 2
pP1 = [(math.exp(-l3 * rhoP1[i][j])) / (1 + math.exp(-l3 * rhoP1[i][j]) + math.exp(-l3 * rhoM1[i][j]))
for j in range(shape[1]) for i in range(shape[0])] # list
pM1 = [(math.exp(-l3 * rhoM1[i][j])) / (1 + math.exp(-l3 * rhoP1[i][j]) + math.exp(-l3 * rhoM1[i][j]))
for j in range(shape[1]) for i in range(shape[0])] # list
pP1_array = (np.array(pP1)).reshape(shape[1], shape[0]).T
pM1_array = (np.array(pM1)).reshape(shape[1], shape[0]).T
m3 = ternary_entropyf_4list(pP1, pM1)
iterations = iterations + 1
if iterations > 10:
return l3
l1 = 0
m1 = n
l = 0
alpha = message_length / n
# limit search to 30 iterations
# and require that relative payload embedded is roughly within 1/1000 of the required relative payload
while (m1 - m3) / n > alpha / 1000.0 and iterations < 30:
l = l1 + (l3 - l1) / 2
pP1 = [(math.exp(-l * rhoP1[i][j])) / (1 + math.exp(-l * rhoP1[i][j]) + math.exp(-l * rhoM1[i][j]))
for j in range(shape[1]) for i in range(shape[0])]
pM1 = [(math.exp(-l * rhoM1[i][j])) / (1 + math.exp(-l * rhoP1[i][j]) + math.exp(-l * rhoM1[i][j]))
for j in range(shape[1]) for i in range(shape[0])]
m2 = ternary_entropyf_4list(pP1, pM1)
if m2 < message_length:
l3 = l
m3 = m2
else:
l1 = l
m1 = m2
iterations = iterations + 1
return 0
def ternary_entropyf(pP1_, pM1_):
p0 = pP1_
shape = p0.shape
p0 = [1 - pP1_[i][j] - pM1_[i][j] for j in range(shape[1]) for i in range(shape[0])]
ptemp = np.concatenate([[p0], [pP1_], [pM1_]])
_, m, n = ptemp.shape
p = np.reshape(ptemp, _ * m * n, 1)
H = (-(p[i] * math.log(p[i])) for i in range(_ * m * n))
Ht = sum(H)
return Ht
def ternary_entropyf_4list(pP1_, pM1_):
p0 = [1 - pP1_[i] - pM1_[i] for i in range(len(pP1_))]
p = p0 + pP1_ + pM1_
Ht = 0
for i in range(len(p)):
if p[i] != 0:
H = -(p[i] * math.log(p[i]))
Ht += H
# Ht = sum(H)
return Ht
coverPath = './sample'
stegoPath = './stego'
for home, dirs, files in os.walk(coverPath):
for file in files:
if not file.startswith('.'):
imgpath = os.path.join(home, file)
print(imgpath)
#img = misc.imread(imgpath)
img = Image.open(imgpath)
#if img.ndim == 3:
if len(img.split())== 3:
stego = S_UNIWARD(imgpath, 0.4)
stegoname = os.path.join(stegoPath, file)
misc.imsave(stegoname, stego)
#misc.imsave(stegoname, stego-img)
plt.subplot(121)
plt.imshow(img)
plt.subplot(122)
plt.imshow(stego)
plt.show()
| 34.07971 | 119 | 0.536785 | 1,417 | 9,406 | 3.486239 | 0.165138 | 0.048178 | 0.027935 | 0.051215 | 0.438462 | 0.39251 | 0.347368 | 0.312348 | 0.300607 | 0.282794 | 0 | 0.081694 | 0.297257 | 9,406 | 275 | 120 | 34.203636 | 0.665658 | 0.095258 | 0 | 0.242574 | 0 | 0 | 0.00425 | 0 | 0 | 0 | 0 | 0.003636 | 0 | 1 | 0.034653 | false | 0 | 0.054455 | 0 | 0.128713 | 0.014851 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
8b06db32e78be673f09554f2ce608ac83895e9bf | 1,463 | py | Python | Boatwright_et_al.,2018/assembly_and_qc_scripts/drosophila_scripts/blast/parseBlastResultsForLocation.py | BBarbazukLab/papers | fc77bcae17d475da99d758407be3ff7f9b298c3d | [
"MIT"
] | 3 | 2018-09-18T15:22:25.000Z | 2019-07-10T17:57:42.000Z | Boatwright_et_al.,2018/assembly_and_qc_scripts/drosophila_scripts/blast/parseBlastResultsForLocation.py | BBarbazukLab/papers | fc77bcae17d475da99d758407be3ff7f9b298c3d | [
"MIT"
] | null | null | null | Boatwright_et_al.,2018/assembly_and_qc_scripts/drosophila_scripts/blast/parseBlastResultsForLocation.py | BBarbazukLab/papers | fc77bcae17d475da99d758407be3ff7f9b298c3d | [
"MIT"
] | 4 | 2018-12-01T15:05:15.000Z | 2019-12-17T13:43:55.000Z | #!/usr/bin/env python
# This script parses BLAST output to remove exact, self-hits.
# Output includes query, subject, percent identity, query start,
# query end, subject start, subject end, and the e-value in a
# BED formatted file.
#AMR 03/28/2013
import csv
import operator
with open('/project/ambiguity/blast_results.tsv', 'rb') as input:
with open('/project/ambiguity/blast_ambig_regions.bed', 'wb') as output:
input_read = csv.reader(input, delimiter='\t')
input_sort = sorted(input_read, key=operator.itemgetter(0)) # Sorts the input file by fusion_id
for row in input_sort:
query=row[0]
subject=row[1]
per_identity=row[2]
q_start=int(row[6])-1 #BED files are 0-based, and BLAST results are 1-based, so 1 must be subtracted.
q_end=row[7]
s_start=int(row[8])+1 #BED files are 0-based, and BLAST results are 1-based, so 1 must be subtracted.
s_end=row[9]
e_value=row[10]
if query==subject and per_identity=='100.00':
continue
else:
output.write(query+'\t')
#output.write(subject+'\t')
#output.write(per_identity+'\t')
output.write(str(q_start)+'\t')
output.write(q_end+'\t')
#output.write(str(s_start)+'\t')
#output.write(s_end+'\t')
output.write(e_value+'\n')
| 38.5 | 113 | 0.588517 | 210 | 1,463 | 4 | 0.428571 | 0.104762 | 0.1 | 0.057143 | 0.214286 | 0.145238 | 0.145238 | 0.145238 | 0.145238 | 0.145238 | 0 | 0.029722 | 0.287081 | 1,463 | 37 | 114 | 39.540541 | 0.775647 | 0.369105 | 0 | 0 | 0 | 0 | 0.10793 | 0.085903 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.090909 | 0 | 0.090909 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
8b07634e37eaf0bbc99e3ff4b057bb8cd1ffb567 | 6,138 | py | Python | medium/1801-number-of-orders-in-the-backlog.py | wanglongjiang/leetcode | c61d2e719e81575cfb5bde9d64e15cee7cf01ef3 | [
"MIT"
] | 2 | 2021-03-14T11:38:26.000Z | 2021-03-14T11:38:30.000Z | medium/1801-number-of-orders-in-the-backlog.py | wanglongjiang/leetcode | c61d2e719e81575cfb5bde9d64e15cee7cf01ef3 | [
"MIT"
] | null | null | null | medium/1801-number-of-orders-in-the-backlog.py | wanglongjiang/leetcode | c61d2e719e81575cfb5bde9d64e15cee7cf01ef3 | [
"MIT"
] | 1 | 2022-01-17T19:33:23.000Z | 2022-01-17T19:33:23.000Z | '''
积压订单中的订单总数
给你一个二维整数数组 orders ,其中每个 orders[i] = [pricei, amounti, orderTypei] 表示有 amounti 笔类型为 orderTypei 、
价格为 pricei 的订单。
订单类型 orderTypei 可以分为两种:
0 表示这是一批采购订单 buy
1 表示这是一批销售订单 sell
注意,orders[i] 表示一批共计 amounti 笔的独立订单,这些订单的价格和类型相同。对于所有有效的 i ,
由 orders[i] 表示的所有订单提交时间均早于 orders[i+1] 表示的所有订单。
存在由未执行订单组成的 积压订单 。积压订单最初是空的。提交订单时,会发生以下情况:
如果该订单是一笔采购订单 buy ,则可以查看积压订单中价格 最低 的销售订单 sell 。
如果该销售订单 sell 的价格 低于或等于 当前采购订单 buy 的价格,则匹配并执行这两笔订单,
并将销售订单 sell 从积压订单中删除。否则,采购订单 buy 将会添加到积压订单中。
反之亦然,如果该订单是一笔销售订单 sell ,则可以查看积压订单中价格 最高 的采购订单 buy 。
如果该采购订单 buy 的价格 高于或等于 当前销售订单 sell 的价格,则匹配并执行这两笔订单,并将采购订单 buy 从积压订单中删除。
否则,销售订单 sell 将会添加到积压订单中。
输入所有订单后,返回积压订单中的 订单总数 。由于数字可能很大,所以需要返回对 109 + 7 取余的结果。
1 <= orders.length <= 10^5
orders[i].length == 3
1 <= pricei, amounti <= 10^9
orderTypei 为 0 或 1
'''
from typing import List
'''
思路1、最大堆、最小堆。构造采购订单的最大堆,销售订单的最小堆。
遍历订单list
1、遇到采购订单,先在销售订单堆里面查找,如果最小堆里没有满足需求的订单,将采购订单加入最大堆。
2、遇到销售订单,先在采购订单堆里面查找,如果最大堆里没有满足需求的订单,将销售订单加入最小堆。
所有的订单都处理完之后,返回2个堆中订单数之和
时间复杂度:最坏情况下O(nlogn),这种情况下所有订单都入堆。10^6。
空间复杂度:最坏情况下O(n)
'''
class Solution:
def getNumberOfBacklogOrders(self, orders: List[List[int]]) -> int:
buyOrders, sellOrders = MaxHeap(), MinHeap() # 采购是最大堆,销售是最小堆
for order in orders:
if order[2] == 0: # 当前订单为采购订单,需要从销售订单里面查找最小值
while sellOrders.notEmpty() and sellOrders.getMin()[0] <= order[0]: # 如果价格最低的销售订单小于等于当前采购订单价格,执行订单
sellOrder = sellOrders.getMin()
if sellOrder[1] < order[1]: # 订单数量大于当前销售订单数量,需要删除当前销售订单,当前订单数量减去销售订单数量
order[1] -= sellOrder[1]
sellOrders.extractMin()
elif sellOrder[1] > order[1]: # 销售订单数量大于当前订单数量,当前订单处理完毕,销售订单减去数量
sellOrder[1] -= order[1]
order[1] = 0
break
else: # 当前订单数等于销售订单数,需要删除销售订单,当前订单处理完成
sellOrders.extractMin()
order[1] = 0
break
if order[1] > 0: # 当前订单未处理完毕,需要加入采购订单
buyOrders.insert(order)
else: # 当前订单为销售订单,需要从采购订单里面查找最大值,基本与上面的采购订单逻辑互为镜像
while buyOrders.notEmpty() and buyOrders.getMax()[0] >= order[0]: # 如果价格最高的采购订单大于等于当前销售订单价格,执行订单
buyOrder = buyOrders.getMax()
if buyOrder[1] < order[1]: # 订单数量大于当前采购订单数量,需要删除当前采购订单,当前订单数量减去采购订单数量
order[1] -= buyOrder[1]
buyOrders.extractMin()
elif buyOrder[1] > order[1]: # 采购订单数量大于当前订单数量,当前订单处理完毕,采购订单减去数量
buyOrder[1] -= order[1]
order[1] = 0
break
else: # 当前订单数等于采购订单数,需要删除采购订单,当前订单处理完成
buyOrders.extractMin()
order[1] = 0
break
if order[1] > 0: # 当前订单未处理完毕,需要加入销售订单
sellOrders.insert(order)
# 所有的订单都处理完成之后,统计剩余的订单数
ans = sum([item[1] for item in buyOrders.heap])
ans += sum([item[1] for item in sellOrders.heap])
return ans % (10**9 + 7)
class MaxHeap:
def __init__(self):
self.heap = []
self.size = 0
# 向堆中插入值
def insert(self, item):
self.heap.append(item)
i = self.size
self.size += 1
while i > 0 and self.heap[self.parent(i)][0] < item[0]: # 将大于父节点的值向上提升
self.heap[i], self.heap[self.parent(i)] = self.heap[self.parent(i)], self.heap[i]
i = self.parent(i)
# 从堆中删除最大元素并返回
def extractMin(self):
i = self.heap[0]
self.size -= 1
last = self.heap.pop()
if self.size:
self.heap[0] = last
self.maxHeapify(0)
return i
# 保持最大堆的性质
def maxHeapify(self, i):
left = 2 * i + 1
right = 2 * i + 2
minIndex = i
# 如果左、右子节点大于父节点,不满足最大堆性质,需要将父节点与左或右节点交换,使之满足最大堆性质
if left < self.size and self.heap[left][0] > self.heap[minIndex][0]:
minIndex = left
if right < self.size and self.heap[right][0] > self.heap[minIndex][0]:
minIndex = right
if minIndex != i:
self.heap[minIndex], self.heap[i] = self.heap[i], self.heap[minIndex]
self.maxHeapify(minIndex) # 交换后子节点可能不满足最大堆性质,需要递归向下执行
# 求父节点的索引
def parent(self, i):
return (i - 1) // 2
def getMax(self):
return self.heap[0]
def notEmpty(self):
return self.size > 0
class MinHeap:
def __init__(self):
self.heap = []
self.size = 0
# 向堆中插入值
def insert(self, item):
self.heap.append(item)
i = self.size
self.size += 1
while i > 0 and self.heap[self.parent(i)][0] > item[0]: # 将小于父节点的值向上提升
self.heap[i], self.heap[self.parent(i)] = self.heap[self.parent(i)], self.heap[i]
i = self.parent(i)
# 从堆中删除最小元素并返回
def extractMin(self):
i = self.heap[0]
self.size -= 1
last = self.heap.pop()
if self.size:
self.heap[0] = last
self.minHeapify(0)
return i
# 保持最小堆的性质
def minHeapify(self, i):
left = 2 * i + 1
right = 2 * i + 2
minIndex = i
# 如果左、右子节点小于父节点,不满足最小堆性质,需要将父节点与左或右节点交换,使之满足最小堆性质
if left < self.size and self.heap[left][0] < self.heap[minIndex][0]:
minIndex = left
if right < self.size and self.heap[right][0] < self.heap[minIndex][0]:
minIndex = right
if minIndex != i:
self.heap[minIndex], self.heap[i] = self.heap[i], self.heap[minIndex]
self.minHeapify(minIndex) # 交换后子节点可能不满足最小堆性质,需要递归向下执行
# 求父节点的索引
def parent(self, i):
return (i - 1) // 2
def getMin(self):
return self.heap[0]
def notEmpty(self):
return self.size > 0
s = Solution()
print(s.getNumberOfBacklogOrders([[10, 5, 0], [15, 2, 1], [25, 1, 1], [30, 4, 0]]))
print(s.getNumberOfBacklogOrders([[7, 1000000000, 1], [15, 3, 0], [5, 999999995, 0], [5, 1, 1]]))
| 33.358696 | 115 | 0.562398 | 717 | 6,138 | 4.803347 | 0.277545 | 0.088269 | 0.036585 | 0.031359 | 0.382695 | 0.382695 | 0.382695 | 0.37108 | 0.357724 | 0.357724 | 0 | 0.036664 | 0.320137 | 6,138 | 183 | 116 | 33.540984 | 0.788641 | 0.234441 | 0 | 0.634783 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.130435 | false | 0 | 0.008696 | 0.052174 | 0.243478 | 0.017391 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
8b09e08df5407786baf6f2c99080da13eca9eb0e | 943 | py | Python | Solutions/Problem_Statement_11_Solution.py | bhaktee01ugale/Hacktoberfest_Moz_Cummins | d8f5cb503c0df48ebae7fd927b812e145f121279 | [
"MIT"
] | 11 | 2021-10-01T09:02:23.000Z | 2022-02-18T17:21:38.000Z | Solutions/Problem_Statement_11_Solution.py | bhaktee01ugale/Hacktoberfest_Moz_Cummins | d8f5cb503c0df48ebae7fd927b812e145f121279 | [
"MIT"
] | 100 | 2021-09-28T11:45:37.000Z | 2021-11-02T05:47:41.000Z | Solutions/Problem_Statement_11_Solution.py | bhaktee01ugale/Hacktoberfest_Moz_Cummins | d8f5cb503c0df48ebae7fd927b812e145f121279 | [
"MIT"
] | 68 | 2021-09-26T11:47:23.000Z | 2022-02-18T17:09:13.000Z | # Write a function howSum (targetSum, numbers) that takes in a targetSum and an array of numbers of arguments.
# The function should return an array containing any combination of elements that add up to exactly the targetSum.
# If there is no combination that adds up to the targetSum, then return null.
# If there are many combinations then return any single one. A Number in numbers can be repeated any number of times to give the targetSum.
def howSum(targetSum,numbers,htable={}):
if targetSum in htable:
return htable[targetSum]
if targetSum==0:
return []
if targetSum<0:
return None
for num in numbers:
rem= targetSum-num
remres= howSum(rem,numbers,htable)
if (remres !=None):
newarr=remres.copy()
newarr.append(num)
htable[targetSum]=newarr
return htable[targetSum]
htable[targetSum]=None
return None
| 37.72 | 139 | 0.673383 | 128 | 943 | 4.960938 | 0.421875 | 0.094488 | 0.069291 | 0.056693 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.002882 | 0.264051 | 943 | 24 | 140 | 39.291667 | 0.912104 | 0.461294 | 0 | 0.235294 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.058824 | false | 0 | 0 | 0 | 0.352941 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
8b12442e6d705f6084e9703508feba4841dc204c | 1,989 | py | Python | src/c_net.py | PlebeianDev/Bookshelf-Format-Parser | 6ead6db3dac5c7568a7f1e5f35ee9ddc62fc01e1 | [
"MIT"
] | null | null | null | src/c_net.py | PlebeianDev/Bookshelf-Format-Parser | 6ead6db3dac5c7568a7f1e5f35ee9ddc62fc01e1 | [
"MIT"
] | null | null | null | src/c_net.py | PlebeianDev/Bookshelf-Format-Parser | 6ead6db3dac5c7568a7f1e5f35ee9ddc62fc01e1 | [
"MIT"
] | null | null | null | import sys
class Net:
"""
Defines a net object according to bookshelf format
"""
counter = -1
def __init__(self):
Net.counter += 1
self.name = None
self.cells = [] # list of cells-in-net names
self.net_degree = 0
self.area = 0.0
self.left_x = 0.0
self.low_y = 0.0
self.right_x = 0.0
self.high_y = 0.0
self.hpwl = 0.0
self.id = Net.counter
def calculate_net_area(self):
return abs((self.high_y - self.low_y) * (self.right_x - self.left_x))
def make_cells_list(self, cells_from_file: set):
self.cells = cells_from_file
def calculate_net_corners(self, cells: {}):
low_y = sys.float_info.max
left_x = sys.float_info.max
high_y = sys.float_info.min
right_x = sys.float_info.min
for cell_name in self.cells:
if cells[cell_name].low_y <= low_y:
low_y = cells[cell_name].low_y
if cells[cell_name].left_x <= left_x:
left_x = cells[cell_name].left_x
if cells[cell_name].high_y >= high_y:
high_y = cells[cell_name].high_y
if cells[cell_name].right_x >= right_x:
right_x = cells[cell_name].right_x
self.left_x = left_x
self.low_y = low_y
self.high_y = high_y
self.right_x = right_x
def calculate_hpwl(self):
h = self.high_y - self.low_y
w = self.right_x - self.left_x
self.hpwl = h + w
def generate_net(self, nets_dict: dict, cells_list: list, nets_index: dict):
"""
Custom net constructor compatible to info given by file-parsing
:param nets_dict:
:param cells_list:
:param nets_index:
:return:
"""
self.name = nets_index[self.id]
tmp = nets_dict[self.name]
self.make_cells_list(tmp)
| 28.414286 | 81 | 0.553042 | 287 | 1,989 | 3.554007 | 0.219512 | 0.04902 | 0.101961 | 0.058824 | 0.22451 | 0.070588 | 0 | 0 | 0 | 0 | 0 | 0.011637 | 0.351936 | 1,989 | 69 | 82 | 28.826087 | 0.779674 | 0.104072 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.133333 | false | 0 | 0.022222 | 0.022222 | 0.222222 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
8b12549806fd848f9a9bc11c2c0d0a7049d040eb | 2,510 | py | Python | appengine_module/gae_ts_mon/handlers.py | mithro/chromium-infra | d27ac0b230bedae4bc968515b02927cf9e17c2b7 | [
"BSD-3-Clause"
] | null | null | null | appengine_module/gae_ts_mon/handlers.py | mithro/chromium-infra | d27ac0b230bedae4bc968515b02927cf9e17c2b7 | [
"BSD-3-Clause"
] | null | null | null | appengine_module/gae_ts_mon/handlers.py | mithro/chromium-infra | d27ac0b230bedae4bc968515b02927cf9e17c2b7 | [
"BSD-3-Clause"
] | null | null | null | # Copyright 2015 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import datetime
import logging
import webapp2
from google.appengine.ext import ndb
from infra_libs.ts_mon import shared
from infra_libs.ts_mon.common import interface
def find_gaps(num_iter):
"""Generate integers not present in an iterable of integers.
Caution: this is an infinite generator.
"""
next_num = -1
for n in num_iter:
next_num += 1
while next_num < n:
yield next_num
next_num += 1
while True:
next_num += 1
yield next_num
def _assign_task_num(time_fn=datetime.datetime.utcnow):
expired_keys = []
unassigned = []
used_task_nums = []
time_now = time_fn()
expired_time = time_now - datetime.timedelta(
seconds=shared.INSTANCE_EXPIRE_SEC)
for entity in shared.Instance.query():
# Don't reassign expired task_num right away to avoid races.
if entity.task_num >= 0:
used_task_nums.append(entity.task_num)
# At the same time, don't assign task_num to expired entities.
if entity.last_updated < expired_time:
expired_keys.append(entity.key)
shared.expired_counter.increment()
logging.debug(
'Expiring %s task_num %d, inactive for %s',
entity.key.id(), entity.task_num,
time_now - entity.last_updated)
elif entity.task_num < 0:
shared.started_counter.increment()
unassigned.append(entity)
logging.debug('Found %d expired and %d unassigned instances',
len(expired_keys), len(unassigned))
used_task_nums = sorted(used_task_nums)
for entity, task_num in zip(unassigned, find_gaps(used_task_nums)):
entity.task_num = task_num
logging.debug('Assigned %s task_num %d', entity.key.id(), task_num)
futures_unassigned = ndb.put_multi_async(unassigned)
futures_expired = ndb.delete_multi_async(expired_keys)
ndb.Future.wait_all(futures_unassigned + futures_expired)
logging.debug('Committed all changes')
class SendHandler(webapp2.RequestHandler):
def get(self):
if self.request.headers.get('X-Appengine-Cron') != 'true':
self.abort(403)
with shared.instance_namespace_context():
_assign_task_num()
for name, callback in shared.global_metrics_callbacks.iteritems():
logging.debug('Invoking callback %s', name)
callback()
app = webapp2.WSGIApplication([
(r'/internal/cron/ts_mon/send', SendHandler),
], debug=True)
| 29.880952 | 72 | 0.714741 | 356 | 2,510 | 4.831461 | 0.418539 | 0.056977 | 0.045349 | 0.017442 | 0.02093 | 0 | 0 | 0 | 0 | 0 | 0 | 0.007866 | 0.189641 | 2,510 | 83 | 73 | 30.240964 | 0.837758 | 0.149402 | 0 | 0.086207 | 0 | 0 | 0.091509 | 0.012264 | 0 | 0 | 0 | 0 | 0 | 1 | 0.051724 | false | 0 | 0.103448 | 0 | 0.172414 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
8b12e8fd1b8be9f518f4ed422d7d1347bdaadf1c | 4,719 | py | Python | deepvariant/realigner/window_selector_test.py | ruif2009/deepvariant | c7fd07016577c253f81ef253aed65c416e4c0ef7 | [
"BSD-3-Clause"
] | null | null | null | deepvariant/realigner/window_selector_test.py | ruif2009/deepvariant | c7fd07016577c253f81ef253aed65c416e4c0ef7 | [
"BSD-3-Clause"
] | null | null | null | deepvariant/realigner/window_selector_test.py | ruif2009/deepvariant | c7fd07016577c253f81ef253aed65c416e4c0ef7 | [
"BSD-3-Clause"
] | 1 | 2022-02-03T21:54:57.000Z | 2022-02-03T21:54:57.000Z | # Copyright 2017 Google Inc.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from this
# software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
"""Tests for deepvariant.realigner.window_selector."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl.testing import absltest
from third_party.nucleus.protos import range_pb2
from third_party.nucleus.testing import test_utils
from deepvariant.protos import realigner_pb2
from deepvariant.realigner.window_selector import WindowSelector
class WindowSelectorTest(absltest.TestCase):
def test_ws_config(self):
return realigner_pb2.RealignerOptions.WindowSelectorOptions(
min_num_supporting_reads=2,
max_num_supporting_reads=10,
min_mapq=20,
min_base_quality=20,
min_windows_distance=4)
def test_process_read(self):
"""Test WindowSelector.process_read()."""
window = WindowSelector(self.test_ws_config())
ref = 'A' * 100
read_1 = test_utils.make_read(
'AAGA', start=10, cigar='4M', quals=[64] * 4, name='read_1')
read_2 = test_utils.make_read(
'AAGTA', start=10, cigar='2M2I1M', quals=[64] * 5, name='read_2')
read_3 = test_utils.make_read(
'AAA', start=10, cigar='2M2D1M', quals=[64] * 3, name='read_3')
read_4 = test_utils.make_read(
'TGATAC', start=10, cigar='2S3M1S', quals=[64] * 6, name='read_4')
read_5 = test_utils.make_read(
'AAGA', start=10, cigar='2M1X1M', quals=[64] * 4, name='read_5')
self.assertEqual(list(window.process_read(ref, read_1)), [12])
self.assertEqual(list(window.process_read(ref, read_2)), [10, 11, 12, 13])
self.assertEqual(list(window.process_read(ref, read_3)), [12, 13])
self.assertEqual(list(window.process_read(ref, read_4)), [8, 9, 11, 13])
self.assertEqual(list(window.process_read(ref, read_5)), [12])
def test_candidate_pos_low_qual(self):
"""Test WindowSelector.process_read() with reads of low quality."""
window = WindowSelector(self.test_ws_config())
ref = 'A' * 100
read_1 = test_utils.make_read(
'AAGA', start=10, cigar='4M', quals=[64, 64, 10, 30], name='read_1')
read_2 = test_utils.make_read(
'AAGTA',
start=10,
cigar='2M2I1M',
quals=[64, 64, 10, 30, 64],
name='read_2')
read_3 = test_utils.make_read(
'TGATAC',
start=10,
cigar='2S3M1S',
quals=[64, 10, 64, 64, 64, 64],
name='read_3')
read_4 = test_utils.make_read(
'AAGA', start=10, cigar='2M1X1M', quals=[64, 64, 30, 10], name='read_4')
self.assertEqual(list(window.process_read(ref, read_1)), [])
self.assertEqual(list(window.process_read(ref, read_2)), [11, 13])
self.assertEqual(list(window.process_read(ref, read_3)), [8, 11, 13])
self.assertEqual(list(window.process_read(ref, read_4)), [12])
def test_windows(self):
"""Test WindowSelector.windows()."""
window = WindowSelector(self.test_ws_config())
candidates = {0: 2, 2: 4, 3: 11, 8: 3}
self.assertEqual(
list(window.windows(candidates, 'ref', 0)), [
range_pb2.Range(reference_name='ref', start=-4, end=6),
range_pb2.Range(reference_name='ref', start=4, end=12)
])
if __name__ == '__main__':
absltest.main()
| 39 | 80 | 0.700996 | 671 | 4,719 | 4.757079 | 0.304024 | 0.041353 | 0.059524 | 0.078321 | 0.426692 | 0.384085 | 0.372807 | 0.372807 | 0.372807 | 0.27099 | 0 | 0.050649 | 0.184149 | 4,719 | 120 | 81 | 39.325 | 0.778442 | 0.348379 | 0 | 0.223881 | 0 | 0 | 0.052858 | 0 | 0 | 0 | 0 | 0 | 0.149254 | 1 | 0.059701 | false | 0 | 0.119403 | 0.014925 | 0.208955 | 0.014925 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
8b1354634de1b6853d29a1c3908b67f6aae8fd67 | 1,973 | py | Python | myapi.py | madhav06/FastAPI_for_Beginner | 5c408f450a4b9e08f4cf9db7f2312e4510c2a151 | [
"Apache-2.0"
] | null | null | null | myapi.py | madhav06/FastAPI_for_Beginner | 5c408f450a4b9e08f4cf9db7f2312e4510c2a151 | [
"Apache-2.0"
] | null | null | null | myapi.py | madhav06/FastAPI_for_Beginner | 5c408f450a4b9e08f4cf9db7f2312e4510c2a151 | [
"Apache-2.0"
] | null | null | null | from typing import Optional
from fastapi import FastAPI, Path
from pydantic import BaseModel
app = FastAPI()
students = {
1: {
"name": "john",
"age": 17,
"year": "year 2019"
}
}
'''
GET - GET an Information
POST - Create Something new
PUT - Update
DELETE - Delete something
'''
class Student(BaseModel):
name: str
age: int
year: str
class UpdateStudent(BaseModel):
name: Optional[str] = None
age: Optional[int] = None
year: Optional[str] = None
@app.get("/")
def index():
return { "name": "First Data" }
@app.get("/get-student/{student_id}")
def get_student(student_id: int = Path(None, description="The ID of the student you want here.", gt=0)):
return students[student_id]
@app.get("/get-by-name/{student_id}")
def get_student(*, student_id:int, name: Optional[str] = None, test: int):
for student_id in students:
if students[student_id]["name"] == name:
return students[student_id]
return {"Data": "Not found"}
@app.post("/create-student/{student_id}")
def create_student(student_id: int, student: Student):
if student_id in students:
return {"Error": "Student Exists"}
students[student_id] = student
return students[student_id]
@app.put("/update-student/{student_id}")
def update_student(student_id: int, student: UpdateStudent):
if student_id not in students:
return {"Error": "Student does not exists"}
if student.name != None:
students[student_id].name = student.name
if student.age != None:
students[student_id].age = student.age
if student.year != None:
students[student_id].year = student.year
return students[student_id]
@app.delete("/delete-student/{student_id}")
def delete_student(student_id: int):
if student_id not in students:
return {"Error": "Student does not exists."}
del students[student_id]
return {"Message": "Student deleted successfully."} | 24.974684 | 104 | 0.659909 | 260 | 1,973 | 4.896154 | 0.234615 | 0.169678 | 0.133543 | 0.074627 | 0.263943 | 0.139827 | 0.139827 | 0.139827 | 0.08641 | 0.08641 | 0 | 0.005115 | 0.207299 | 1,973 | 79 | 105 | 24.974684 | 0.808824 | 0 | 0 | 0.111111 | 0 | 0 | 0.180267 | 0.071467 | 0 | 0 | 0 | 0 | 0 | 1 | 0.111111 | false | 0 | 0.055556 | 0.037037 | 0.5 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
8b149868c0ca680e4123557f7080e18cf0b161ce | 1,601 | py | Python | music/forms.py | amin-da71/Benbb96 | 0c9e37425d0665e403ba6fecf0c4b17669c29ada | [
"MIT"
] | null | null | null | music/forms.py | amin-da71/Benbb96 | 0c9e37425d0665e403ba6fecf0c4b17669c29ada | [
"MIT"
] | 13 | 2021-02-13T20:15:18.000Z | 2022-03-11T23:57:07.000Z | music/forms.py | amin-da71/Benbb96 | 0c9e37425d0665e403ba6fecf0c4b17669c29ada | [
"MIT"
] | null | null | null | from django import forms
from django_select2.forms import ModelSelect2Widget, ModelSelect2MultipleWidget
from music.models import Lien, LienPlaylist, Musique, Artiste, Style, Playlist
class MusiqueForm(forms.ModelForm):
class Meta:
model = Musique
fields = ('titre', 'artiste', 'featuring', 'remixed_by', 'styles', 'album', 'label', 'playlists')
widgets = {
'artiste': ModelSelect2Widget(queryset=Artiste.objects.all(), search_fields=['nom_artiste__icontains']),
'featuring': ModelSelect2MultipleWidget(
queryset=Artiste.objects.all(), search_fields=['nom_artiste__icontains']
),
'remixed_by': ModelSelect2Widget(queryset=Artiste.objects.all(), search_fields=['nom_artiste__icontains']),
'styles': ModelSelect2MultipleWidget(
queryset=Style.objects.all(), search_fields=['nom__startswith']
),
'playlists': ModelSelect2MultipleWidget(
queryset=Playlist.objects.all(), search_fields=['nom__icontains'],
attrs={'data-minimum-input-length': 0}
),
}
class BaseLienForm(forms.ModelForm):
class Meta:
fields = ('url', 'plateforme')
widgets = {
'url': forms.TextInput(attrs={'class': 'form-control'}),
'plateforme': forms.Select(attrs={'class': 'form-control'})
}
class LienForm(BaseLienForm):
class Meta(BaseLienForm.Meta):
model = Lien
class LienPlaylistForm(BaseLienForm):
class Meta(BaseLienForm.Meta):
model = LienPlaylist
| 36.386364 | 119 | 0.640225 | 142 | 1,601 | 7.070423 | 0.366197 | 0.049801 | 0.079681 | 0.109562 | 0.336653 | 0.286853 | 0.203187 | 0.203187 | 0.203187 | 0.14741 | 0 | 0.007341 | 0.234229 | 1,601 | 43 | 120 | 37.232558 | 0.811582 | 0 | 0 | 0.264706 | 0 | 0 | 0.173017 | 0.056839 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.088235 | 0 | 0.323529 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
8b165bbd672e68d9449efa107574ea5f5b8f291b | 5,231 | py | Python | spin/side_to_side_spin.py | otaviocv/spin | 04ec49b62a81b973c0553a0f808aa021c5c83294 | [
"MIT"
] | null | null | null | spin/side_to_side_spin.py | otaviocv/spin | 04ec49b62a81b973c0553a0f808aa021c5c83294 | [
"MIT"
] | 1 | 2019-10-26T12:42:59.000Z | 2019-10-26T12:42:59.000Z | spin/side_to_side_spin.py | otaviocv/spin | 04ec49b62a81b973c0553a0f808aa021c5c83294 | [
"MIT"
] | null | null | null | """Side to side SPIN Module."""
import numpy as np
from .utils import spin_energy, random_permutation_matrix
class SideToSideSPIN():
"""Side to side SPIN clustering method.
Parameters
----------
random_starts : int, optional (default=5)
The number of different initial random permutations that will
generated.
max_iter : int, optional (default=100)
The maximum number of iterations of each round of sorting.
verbose : boolean, optional (default=False)
Flag indicating to show logs and information during the SPIN process.
Attributes
----------
distances_ : array, shape (n, n)
The original distances matrix provided.
permutation_ : array, shape (n, n)
Permutation matrix that can be applied to the original distances matrix
to get to the ordered distances matrix.
ordered_distances_ : array, shape (n, n)
Distances matrix reordered by the permutation matrix. Before run this
is the original distance matrix.
References
----------
D. Tsafrir, I. Tsafrir, L. Ein-Dor, O. Zuk, D.A. Notterman, E. Domany,
Sortiug points into neighborhoods (SPIN): data analysis and
visualization by ordering distance matrices, Bioinformatics, Volume 21,
Issue 10, , Pages 2301–2308,
https://doi.org/10.1093/bioinformatics/bti329
"""
def __init__(self, random_starts=5, max_iter=100, verbose=False):
self.random_starts = random_starts
self.max_iter = max_iter
self.verbose = verbose
def run(self, X):
"""Execute the Side To Side sorting.
Parameters
----------
X : array, shape (n, n)
Returns
-------
self : SideToSideSPIN
The object itself containing the ordered distances matrix.
"""
if X.shape[0] != X.shape[1]:
raise ValueError("The SPIN method only works with square matrices."
f"You provided a matrix of shape {X.shape}.")
print("Setup")
self.size_ = X.shape[0]
self.distances_ = X
self.permutation_ = np.identity(self.size_)
self.ordered_distances_ = self.permutation_.dot(X) \
.dot(self.permutation_.T)
assert np.array_equal(self.distances_, self.ordered_distances_)
self.increasing_vector_ = np.array([i-(self.size_+1)/2
for i in range(self.size_)]) \
.reshape(-1, 1)
self.weight_matrix_ = self.increasing_vector_ \
.dot(self.increasing_vector_.T)
print(self.weight_matrix_)
self.energy_ = spin_energy(self.ordered_distances_,
self.weight_matrix_)
print(f"Initial energy: {self.energy_}")
print("Actual spin")
for i in range(self.random_starts):
initial_permutation = random_permutation_matrix(self.size_)
print(initial_permutation[:5, :5])
permutation = side_to_side(self.distances_,
self.increasing_vector_,
initial_permutation,
self.max_iter,
self.verbose)
if np.array_equal(permutation, initial_permutation):
print("They are equal.")
ordered_distances = permutation.dot(self.distances_) \
.dot(permutation.T)
energy = spin_energy(ordered_distances, self.weight_matrix_)
print(f"{i}: {energy}")
if energy < self.energy_:
self.permutation_ = permutation
self.ordered_distances_ = ordered_distances
self.energy_ = energy
def side_to_side(distances, strictly_increasing_vector, initial_permutation,
max_iter=100, verbose=False):
"""Side To Side SPIN algorithm.
Parameters
----------
distances : np.array, shape [n, n]
Distance symmetric square matrix.
strictly_increasing_vector : np.array, shape [n]
A vector with strictly increasing elements with the same dimension as
the distance matrix.
initial_permutation : array, shape [n ,n]
The initial permutation matrix.
max_iter : int, default=100
Maximum number of iterations.
verbose : bool
Verbosity flag, if it is true print useful information about the
process.
Returns
-------
permutation : np.array, shape [n, n]
Permutation matrix with the same dimensions of the distance matrix.
"""
X = strictly_increasing_vector
permutation = initial_permutation.copy()
for i in range(max_iter):
print(".", end="")
S = distances.dot(X).flatten()
reverse_index_sort = (S).argsort()[::-1]
new_permutation = np.identity(distances.shape[0])[reverse_index_sort]
if np.all(new_permutation.dot(S) == permutation.dot(S)):
break
permutation = new_permutation
X = permutation.dot(X)
return permutation
| 38.463235 | 79 | 0.59243 | 580 | 5,231 | 5.175862 | 0.294828 | 0.053298 | 0.029314 | 0.027981 | 0.101932 | 0.044637 | 0.025316 | 0 | 0 | 0 | 0 | 0.012882 | 0.317339 | 5,231 | 135 | 80 | 38.748148 | 0.827499 | 0.376792 | 0 | 0 | 0 | 0 | 0.054941 | 0 | 0 | 0 | 0 | 0 | 0.016667 | 1 | 0.05 | false | 0 | 0.033333 | 0 | 0.116667 | 0.133333 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
8b16a8a0c1bd81ea5cb86890226cf0055745db26 | 1,071 | py | Python | oocli/entrypoint.py | enigma0Z/python-object-oriented-cli | 2122ab0b4ab1bec35f36e9ad8d4437dc3056f484 | [
"MIT"
] | null | null | null | oocli/entrypoint.py | enigma0Z/python-object-oriented-cli | 2122ab0b4ab1bec35f36e9ad8d4437dc3056f484 | [
"MIT"
] | null | null | null | oocli/entrypoint.py | enigma0Z/python-object-oriented-cli | 2122ab0b4ab1bec35f36e9ad8d4437dc3056f484 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
"""
oocli.entrypoint
--------------------
"""
import sys
from . import base
class Command(base.Command):
"""
Entrypoint - Defines the entrypoint for your program and processes sys.argv into said entrypoint
"""
def __init__(self, description=None, command=None):
super().__init__(name=sys.argv[0], description=description)
assert isinstance(command, base.Command)
self.command = command
def do(self):
"""
Execute the entrypoint command's .do() method, and translate bool return values
for Linux's sanity (0 is true, 1 is false).
"""
#pylint: disable=arguments-differ
returnCode = self.command.do(*sys.argv[1:])
# Translate into zero/nonzero return codes
# Linux, zero is true, nonzero is false
if isinstance(returnCode, bool):
if returnCode:
sys.exit(0)
else:
sys.exit(1)
else:
# String and int are handled correctly
sys.exit(returnCode)
| 26.775 | 100 | 0.59197 | 124 | 1,071 | 5.048387 | 0.5 | 0.033546 | 0.057508 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.009235 | 0.29225 | 1,071 | 39 | 101 | 27.461538 | 0.816623 | 0.399627 | 0 | 0.125 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.0625 | 1 | 0.125 | false | 0 | 0.125 | 0 | 0.3125 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
8b1764db3ef7dd93f665da1fea8a6fb564297ced | 1,251 | py | Python | python plotting/full_apd_time_in_AF_plot.py | pm2111/Heart-Defibrillation-Project | 48ea3570c360aac7c3ff46354891998f4f364fab | [
"MIT"
] | null | null | null | python plotting/full_apd_time_in_AF_plot.py | pm2111/Heart-Defibrillation-Project | 48ea3570c360aac7c3ff46354891998f4f364fab | [
"MIT"
] | null | null | null | python plotting/full_apd_time_in_AF_plot.py | pm2111/Heart-Defibrillation-Project | 48ea3570c360aac7c3ff46354891998f4f364fab | [
"MIT"
] | null | null | null | import numpy as np
import matplotlib.pyplot as plt
import os
path = "0.17_restitution_200_healthy_pacemaker__100000.txt"
filenames = []
for f in os.listdir(os.getcwd()):
if not f.startswith('.'):
filenames.append( "/" + f )
horizontal = 100
L = 200 #system size
total_time = 100000.
# = os.listdir(path)
runs = 4
num_nu = np.size(filenames)/runs
nu_min = 0.1
nu_max = .22
fraction = np.zeros((num_nu,runs))
cells = np.zeros((200,200))
j = 0
fib_time = np.zeros(np.size(filenames))
full = np.zeros((2,40))
data = np.genfromtxt(os.getcwd()+"/average_time_in_af.txt")
data1 = np.genfromtxt(os.getcwd()+"/average_time_in_af_res1.txt")
"""np.insert(data,0,np.zeros(8))
np.insert(data,-1,np.zeros(8))
np.insert(data1,0,np.zeros(8))
np.insert(data1,-1,np.zeros(8))"""
np.append(data,np.zeros(3))
nu = np.linspace(.09,.22,13)
plt.figure()
plt.plot(data,"o",label = "no restitution")
plt.plot(data1,"o", label = "moderate restitution")
plt.legend()
plt.xlabel("nu")
plt.ylabel("average duration of AF")
#plt.title( "Fracion of time spent in excited regime for nu = " )
plt.grid()
plt.show()
#LOOK FOR:
#average duration of episode (counter)
#P risk: add column and divide by time (length of array)"""
| 20.177419 | 65 | 0.672262 | 208 | 1,251 | 3.951923 | 0.442308 | 0.076642 | 0.038929 | 0.048662 | 0.172749 | 0.158151 | 0.085158 | 0.085158 | 0 | 0 | 0 | 0.05566 | 0.152678 | 1,251 | 61 | 66 | 20.508197 | 0.719811 | 0.158273 | 0 | 0 | 0 | 0 | 0.17756 | 0.110022 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.09375 | 0 | 0.09375 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
8b17beb8458cfb645d1fc05c4a70ff154e3e4a58 | 2,339 | py | Python | SIDRE/sort.py | joshwalawender/SIDRE | 6f6b78414180c433a7fc4ac504373e1d87501c26 | [
"BSD-2-Clause"
] | 1 | 2018-07-08T14:44:24.000Z | 2018-07-08T14:44:24.000Z | SIDRE/sort.py | joshwalawender/SIDRE | 6f6b78414180c433a7fc4ac504373e1d87501c26 | [
"BSD-2-Clause"
] | null | null | null | SIDRE/sort.py | joshwalawender/SIDRE | 6f6b78414180c433a7fc4ac504373e1d87501c26 | [
"BSD-2-Clause"
] | null | null | null | import os
import re
import ccdproc as ccd
import astropy.units as u
from astropy import table
from .config import get_config
def get_ImageFileCollection(filepath):
'''
Given a directory path with FITS files in it, use the header keywords (hard
coded in this function) to categorize each file as one of:
Science: A science exposure
Bias: A bias frame
Dark: A dark frame
Flat: A flat field frame (twilight or dome)
Rejected: A file that has been rejection for any reason.
Uncategorized: A file which was not categorized as one of the above.
A column called "CATEGORY" is added to the `ImageFileCollection.summary`
table and populated with a string of the above category.
This method can be replaced to customize the code to any particular header
or metadata convention.
'''
assert os.path.exists(os.path.abspath(filepath))
temperature_deadband = get_config().get('TemperatureDeadband', 1.0)
keywords = ['EXPTIME', 'SET-TEMP', 'CCD-TEMP', 'XBINNING', 'YBINNING',
'IMAGETYP', 'OBJECT', 'DATE-OBS']
ifc = ccd.ImageFileCollection(filepath, keywords=keywords)
ifc.summary.add_column(table.Column(data=['']*len(ifc.summary),
name='CATEGORY', dtype='a12'))
for i,entry in enumerate(ifc.summary):
tempdiff = float(entry['SET-TEMP']) - float(entry['CCD-TEMP'])
if abs(tempdiff) > temperature_deadband:
ifc.summary[i]['CATEGORY'] = b'Rejected'
elif re.search('Light Frame', entry['IMAGETYP'], flags=re.IGNORECASE):
ifc.summary[i]['CATEGORY'] = b'Science'
elif re.search('Bias Frame', entry['IMAGETYP'], flags=re.IGNORECASE):
ifc.summary[i]['CATEGORY'] = b'Bias'
elif re.search('Dark Frame', entry['IMAGETYP'], flags=re.IGNORECASE):
ifc.summary[i]['CATEGORY'] = b'Dark'
elif re.search('Flat', entry['IMAGETYP'], flags=re.IGNORECASE):
ifc.summary[i]['CATEGORY'] = b'Flat'
else:
ifc.summary[i]['CATEGORY'] = b'Uncategorized'
return ifc
def get_image_table(filepath, type):
ifc = get_ImageFileCollection(filepath)
bytype = ifc.summary.group_by('CATEGORY')
typelist = bytype.groups[bytype.groups.keys['CATEGORY'] == type]
return typelist
| 38.344262 | 79 | 0.652416 | 302 | 2,339 | 5.019868 | 0.423841 | 0.065963 | 0.043536 | 0.075198 | 0.168206 | 0.141821 | 0.141821 | 0.141821 | 0.141821 | 0.141821 | 0 | 0.002219 | 0.229158 | 2,339 | 60 | 80 | 38.983333 | 0.838602 | 0.273621 | 0 | 0 | 0 | 0 | 0.170552 | 0 | 0 | 0 | 0 | 0 | 0.029412 | 1 | 0.058824 | false | 0 | 0.176471 | 0 | 0.294118 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
8b17d0be018adb93b11563e8419966be49d28784 | 910 | py | Python | server/vo/request/ocr_request_vo.py | sesmond/Detectron | d3f7459fdc0fca2182897fb8acba243010914eb5 | [
"Apache-2.0"
] | null | null | null | server/vo/request/ocr_request_vo.py | sesmond/Detectron | d3f7459fdc0fca2182897fb8acba243010914eb5 | [
"Apache-2.0"
] | null | null | null | server/vo/request/ocr_request_vo.py | sesmond/Detectron | d3f7459fdc0fca2182897fb8acba243010914eb5 | [
"Apache-2.0"
] | null | null | null | class OcrRequest:
"""
OCR 请求报文
"""
# 检测model(ctpn/psenet等)
detect_model = ''
# 二值化阈值
threshold = None
# 是否返回debug图片
do_verbose = False
# 是否做文字矫正
do_correct = False
# 是否做版面行分析
do_layout = False
# 要识别的图片(base64格式)
img = ''
def __str__(self):
return "detect_model:%s," \
"threshold:%r," \
"do_verbose:%r," \
"do_correct:%r," \
"do_layout:%r," \
"img:%r," \
"" % \
(self.detect_model,
self.threshold,
self.do_verbose,
self.do_correct,
self.do_layout,
len(self.img))
if __name__ == '__main__':
req =OcrRequest()
req.do_layout=False
# req.detect_model="psenet"
# print(req.__str__())
# print(req)
# logger.info("qingca shu:%s",req) | 22.75 | 38 | 0.473626 | 88 | 910 | 4.556818 | 0.454545 | 0.109726 | 0.064838 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.003636 | 0.395604 | 910 | 40 | 38 | 22.75 | 0.725455 | 0.191209 | 0 | 0 | 0 | 0 | 0.119382 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.041667 | false | 0 | 0 | 0.041667 | 0.375 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
8b1846d27ef55491d1f71eac99ddd48f40a267a9 | 3,819 | py | Python | scraping/scraper.py | vnurhaqiqi/scraping-cnbcindonesia-api | eae9842eafec1a578736a7d20829f996382a8ee2 | [
"MIT"
] | 2 | 2021-09-09T07:03:55.000Z | 2022-02-08T00:44:41.000Z | scraping/scraper.py | vnurhaqiqi/scraping-cnbcindonesia-api | eae9842eafec1a578736a7d20829f996382a8ee2 | [
"MIT"
] | 1 | 2022-01-07T01:33:02.000Z | 2022-01-07T07:55:40.000Z | scraping/scraper.py | vnurhaqiqi/scraping-cnbcindonesia-api | eae9842eafec1a578736a7d20829f996382a8ee2 | [
"MIT"
] | null | null | null | from builtins import Exception
from bs4 import BeautifulSoup
from requests import get
from helpers.helpers import *
class Scraper():
def scraping_data(self, url):
web_data = get(url)
if web_data.status_code == 200:
soup = BeautifulSoup(web_data.text, 'html.parser')
contents = soup.find_all('article')
news_data = {'headline': {}, 'total_news': 0, 'news': []}
# get headline news
headline_content = soup.find('article', id='hl')
try:
news_data['headline']['title'] = headline_content.find('h1').text
news_data['headline']['label'] = headline_content.find('span', class_='label').text
headline_release_updated = headline_content.find('span', class_='date') \
.text.replace(news_data['headline']['label'] + ' ', '')
news_data['headline']['release_updated'] = headline_release_updated
news_data['headline']['url'] = headline_content.find('a', href=True).get('href')
news_data['headline']['img_url'] = headline_content.find('img').get('src')
except Exception as e:
pass
# get all news articles
for content in contents:
try:
title = content.find('h2').text
news_label = content.find('span', class_='label').text
time_desc = content.find('span', class_='date').text.replace(news_label, '').split(' ')[4:8]
release_updated = ' '.join(time_desc)
news_url = content.find('a', href=True).get('href')
img_url = content.find('img').get('src')
news_data['news'].append({
'title': title,
'label': news_label,
'release_updated': release_updated,
'url': news_url,
'img_url': img_url
})
except Exception as e:
continue
news_data['total_news'] = len(news_data['news'])
return news_data
elif web_data.status_code == 404:
return False
def get_data_from_page(self, path=None):
url_path = SOURCE_URL + path if path else SOURCE_URL
res = self.scraping_data(url_path)
return res
def get_data_by_query(self, query=None):
url_path = SOURCE_URL + 'search?query={}'.format(query) if query else SOURCE_URL
res = self.scraping_data(url_path)
return res
def scraping_data_detail(self, url):
web_data = get(url)
if web_data.status_code == 200:
soup = BeautifulSoup(web_data.text, 'html.parser')
try:
header = soup.find('div', class_='jdl')
title = header.find('h1').text
author_class = header.find('div', class_='author').text.split(' ')
label = author_class[0]
author = ' '.join(author_class[2:])
release_date = header.find('div', class_='date').text
detail_text_class = soup.find('div', class_='detail_text')
texts = detail_text_class.find_all('p')
news_content = ' '.join([text.text for text in texts])
news_content_data = {
'title': title,
'label': label,
'author': author,
'release_date': release_date,
'content': news_content
}
return news_content_data
except Exception as e:
return {'status': 400}
elif web_data.status_code == 404:
return False
| 37.07767 | 112 | 0.523959 | 411 | 3,819 | 4.635037 | 0.218978 | 0.046194 | 0.058793 | 0.035696 | 0.324409 | 0.274016 | 0.24357 | 0.215223 | 0.137533 | 0.137533 | 0 | 0.009764 | 0.356376 | 3,819 | 102 | 113 | 37.441176 | 0.765256 | 0.010212 | 0 | 0.285714 | 0 | 0 | 0.095843 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.051948 | false | 0.012987 | 0.051948 | 0 | 0.207792 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
8b1a95dd21d4df6514b5079162f09a28b0bbc4f9 | 1,555 | py | Python | vkapi/incoming_message.py | kalinochkind/vkbot | 306a244cb15745057fd838cd7c3163f0b6754d4b | [
"MIT"
] | 39 | 2015-12-18T20:02:35.000Z | 2021-12-01T13:43:08.000Z | vkapi/incoming_message.py | kalinochkind/vkbot | 306a244cb15745057fd838cd7c3163f0b6754d4b | [
"MIT"
] | 5 | 2016-01-31T19:33:10.000Z | 2017-11-27T04:18:32.000Z | vkapi/incoming_message.py | kalinochkind/vkbot | 306a244cb15745057fd838cd7c3163f0b6754d4b | [
"MIT"
] | 16 | 2015-11-21T19:34:36.000Z | 2021-05-09T20:30:24.000Z | from .utils import CONF_START, doc_types, cached_property
class IncomingMessage:
def __init__(self, data, method=''):
self.id = data.get('id')
self.date = data['date']
self.body = data.get('text', '')
self.user_id = data['from_id']
if 'peer_id' in data:
self.chat_id = data['peer_id'] - CONF_START if data['peer_id'] > CONF_START else None
else:
self.chat_id = None
self.action = data.get('action')
self.attachments = data.get('attachments', [])
self._fwd_messages_raw = data.get('fwd_messages', [])
if 'reply_message' in data:
self._fwd_messages_raw.append(data['reply_message'])
self.method = method
self.is_sticker = False
self.is_voice = False
for att in self.attachments:
if att['type'] == 'sticker':
self.is_sticker = True
if att['type'] == 'doc' and att['doc']['type'] == doc_types.AUDIO:
self.is_voice = True
def _construct_forwarded_message(self, data):
return self.__class__(data)
@property
def peer_id(self):
if self.chat_id is not None:
return CONF_START + self.chat_id
return self.user_id
@property
def is_chat(self):
return self.chat_id is not None
@cached_property
def fwd_messages(self):
fwd_messages = [self._construct_forwarded_message(data) for data in self._fwd_messages_raw]
del self._fwd_messages_raw
return fwd_messages
| 32.395833 | 99 | 0.604502 | 204 | 1,555 | 4.328431 | 0.25 | 0.09966 | 0.056625 | 0.08154 | 0.08607 | 0.043035 | 0 | 0 | 0 | 0 | 0 | 0 | 0.285531 | 1,555 | 47 | 100 | 33.085106 | 0.794779 | 0 | 0 | 0.051282 | 0 | 0 | 0.075884 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.128205 | false | 0 | 0.025641 | 0.051282 | 0.307692 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
8b1aac57c08259b9336e1a7e93c2b22f68d4fe57 | 1,759 | py | Python | show_data.py | wtyhub/University1652-Baseline | 26becc2b73b74bd9e7c7de31f9d7f6baebbd64c8 | [
"MIT"
] | 41 | 2021-02-25T11:21:48.000Z | 2022-03-15T10:41:04.000Z | show_data.py | gold-pipe/University1652-Baseline | bad3c7555decf8e5213bfdda85dc317057ff3cc2 | [
"MIT"
] | 5 | 2021-03-04T12:00:04.000Z | 2021-05-06T06:10:21.000Z | show_data.py | gold-pipe/University1652-Baseline | bad3c7555decf8e5213bfdda85dc317057ff3cc2 | [
"MIT"
] | 1 | 2022-02-21T07:50:26.000Z | 2022-02-21T07:50:26.000Z | import sys
import torch
import os
import numpy as np
from PIL import Image
#target_root = 'data/train/drone'
#target_root = 'data/train/street'
#target_root = 'data/train/satellite'
target_root = 'data/train/google'
def pad(inp, pad = 3):
#print(inp.size)
h, w = inp.size
bg = np.zeros((h+2*pad, w+2*pad, len(inp.mode)))
bg[pad:pad+h, pad:pad+w, :] = inp
return bg
count = 0
ncol = 20
nrow = 25
npad = 3
im = {}
white_col = np.ones( (128+2*npad,24,3))*255
for folder_name in os.listdir(target_root):
folder_root = target_root + '/' + folder_name
if not os.path.isdir(folder_root):
continue
for img_name in os.listdir(folder_root):
input1 = Image.open(folder_root + '/' + img_name)
input1 = input1.convert('RGB')
print(folder_root + '/' + img_name)
input1 = input1.resize( (128, 128))
# Start testing
tmp = pad(input1, pad=npad)
if count%ncol == 0:
im[count//ncol] = tmp
else:
im[count//ncol] = np.concatenate((im[count//ncol], white_col, tmp), axis=1)
count +=1
if 'drone' in target_root:
break
if count > nrow*ncol:
break
first_row = np.ones((128+2*npad,128+2*npad,3))*255
white_row = np.ones( (24,im[0].shape[1],3))*255
for i in range(nrow):
if i == 0:
pic = im[0]
else:
pic = np.concatenate((pic, im[i]), axis=0)
pic = np.concatenate((pic, white_row), axis=0)
#first_row = np.concatenate((first_row, white_col, im[i][0:256+2*npad, 0:256+2*npad, 0:3]), axis=1)
#pic = np.concatenate((first_row, white_row, pic), axis=0)
pic = Image.fromarray(pic.astype('uint8'))
pic.save('sample_%s.jpg'%os.path.basename(target_root))
#pic.save('sample.jpg')
| 28.836066 | 103 | 0.6083 | 283 | 1,759 | 3.678445 | 0.300353 | 0.076849 | 0.053794 | 0.073007 | 0.150817 | 0.055716 | 0 | 0 | 0 | 0 | 0 | 0.053756 | 0.22797 | 1,759 | 60 | 104 | 29.316667 | 0.712813 | 0.174531 | 0 | 0.086957 | 0 | 0 | 0.031856 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.021739 | false | 0 | 0.108696 | 0 | 0.152174 | 0.021739 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
8b1ab4cbf0d4db7f695d403b5c435f7c2fa3dae1 | 4,223 | py | Python | csi/controllerserver.py | Madhu-1/kadalu | 4243d711ece08c3ac06a9a079628a78cb94f3b1c | [
"Apache-2.0"
] | null | null | null | csi/controllerserver.py | Madhu-1/kadalu | 4243d711ece08c3ac06a9a079628a78cb94f3b1c | [
"Apache-2.0"
] | null | null | null | csi/controllerserver.py | Madhu-1/kadalu | 4243d711ece08c3ac06a9a079628a78cb94f3b1c | [
"Apache-2.0"
] | null | null | null | """
controller server implementation
"""
import os
import csi_pb2
import csi_pb2_grpc
from utils import mount_glusterfs, execute, get_pv_hosting_volumes, \
PV_TYPE_SUBVOL, PV_TYPE_VIRTBLOCK, is_space_available
HOSTVOL_MOUNTDIR = "/mnt"
GLUSTERFS_CMD = "/usr/sbin/glusterfs"
MOUNT_CMD = "/usr/bin/mount"
UNMOUNT_CMD = "/usr/bin/umount"
MKFS_XFS_CMD = "/usr/sbin/mkfs.xfs"
class ControllerServer(csi_pb2_grpc.ControllerServicer):
"""
ControllerServer object is responsible for handling host
volume mount and PV creation.
Ref:https://github.com/container-storage-interface/spec/blob/master/spec.md
"""
def CreateVolume(self, request, context):
pvsize = request.capacity_range.required_bytes
# TODO: Check the available space under lock
host_volumes = get_pv_hosting_volumes()
hostvol = ""
for hvol in host_volumes:
mntdir = os.path.join(HOSTVOL_MOUNTDIR, hvol)
# Try to mount the Host Volume, handle failure if already mounted
mount_glusterfs(hvol, mntdir)
if is_space_available(mntdir, pvsize):
hostvol = hvol
break
if hostvol == "":
raise Exception("no Hosting Volumes available, add more storage")
pvtype = PV_TYPE_SUBVOL
for vol_capability in request.volume_capabilities:
# using getattr to avoid Pylint error
single_node_writer = getattr(csi_pb2.VolumeCapability.AccessMode,
"SINGLE_NODE_WRITER")
if vol_capability.access_mode.mode == single_node_writer:
pvtype = PV_TYPE_VIRTBLOCK
volpath = os.path.join(HOSTVOL_MOUNTDIR, hostvol, pvtype, request.name)
if pvtype == PV_TYPE_VIRTBLOCK:
# Create a file with required size
os.makedirs(os.path.dirname(volpath), exist_ok=True)
volpath_fd = os.open(volpath, os.O_CREAT | os.O_RDWR)
os.close(volpath_fd)
os.truncate(volpath, pvsize)
# TODO: Multiple FS support based on volume_capability mount option
execute(MKFS_XFS_CMD, volpath)
else:
# Create a subdir
os.makedirs(volpath)
# TODO: Set BackendQuota using RPC to sidecar
# container of each glusterfsd pod
return csi_pb2.CreateVolumeResponse(
volume={
"volume_id": request.name,
"capacity_bytes": pvsize,
"volume_context": {
"hostvol": hostvol,
"pvtype": pvtype,
"fstype": "xfs"
}
}
)
def DeleteVolume(self, request, context):
hostvol = request.volume_context.get("hostvol", "")
mntdir = os.path.join(HOSTVOL_MOUNTDIR, hostvol)
# Try to mount the Host Volume, handle
# failure if already mounted
mount_glusterfs(hostvol, mntdir)
# TODO: get pvtype from storage class
pvtype = request.volume_context.get("pvtype", "")
volpath = os.path.join(mntdir, pvtype, request.name)
if pvtype == PV_TYPE_VIRTBLOCK:
os.remove(volpath)
else:
os.removedirs(volpath)
return csi_pb2.DeleteVolumeResponse()
def ValidateVolumeCapabilities(self, request, context):
# TODO
pass
def ListVolumes(self, request, context):
# TODO
# Mount hostvol
# Listdir and return the list
# Volume capacity need to be stored somewhere
pass
def ControllerGetCapabilities(self, request, context):
# using getattr to avoid Pylint error
capability_type = getattr(
csi_pb2.ControllerServiceCapability.RPC, "Type").Value
return csi_pb2.ControllerGetCapabilitiesResponse(
capabilities=[
{
"rpc": {
"type": capability_type("CREATE_DELETE_VOLUME")
}
},
{
"rpc": {
"type": capability_type("LIST_VOLUMES")
}
}
]
)
| 33.515873 | 79 | 0.590339 | 437 | 4,223 | 5.528604 | 0.370709 | 0.019868 | 0.037252 | 0.021109 | 0.154387 | 0.154387 | 0.087748 | 0.087748 | 0.054636 | 0.054636 | 0 | 0.00283 | 0.330571 | 4,223 | 125 | 80 | 33.784 | 0.851786 | 0.180677 | 0 | 0.098765 | 0 | 0 | 0.075007 | 0 | 0 | 0 | 0 | 0.008 | 0 | 1 | 0.061728 | false | 0.024691 | 0.049383 | 0 | 0.160494 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
8b1ce4d69b758884aef660664de6367d1e512475 | 2,164 | py | Python | cpmpy/among_seq.py | hakank/hakank | 313e5c0552569863047f6ce9ae48ea0f6ec0c32b | [
"MIT"
] | 279 | 2015-01-10T09:55:35.000Z | 2022-03-28T02:34:03.000Z | cpmpy/among_seq.py | hakank/hakank | 313e5c0552569863047f6ce9ae48ea0f6ec0c32b | [
"MIT"
] | 10 | 2017-10-05T15:48:50.000Z | 2021-09-20T12:06:52.000Z | cpmpy/among_seq.py | hakank/hakank | 313e5c0552569863047f6ce9ae48ea0f6ec0c32b | [
"MIT"
] | 83 | 2015-01-20T03:44:00.000Z | 2022-03-13T23:53:06.000Z | """
Global constraint among_seq in cpmpy.
From Global constraint catalog:
http://www.emn.fr/x-info/sdemasse/gccat/Camong_seq.html
'''
Constraint
among_seq(LOW,UP,SEQ,VARIABLES,VALUES)
Purpose
Constrains all sequences of SEQ consecutive variables of the collection
VARIABLES to take at least LOW values in VALUES and at most UP values
in VALUES.
Example
(
1,2,4,<9,2,4,5,5,7,2>,
<0,2,4,6,8>
)
The among_seq constraint holds since the different sequences of 4
consecutive variables contains respectively 2, 2, 1 and 1 even numbers.
'''
Model created by Hakan Kjellerstrand, hakank@hakank.com
See also my cpmpy page: http://www.hakank.org/cpmpy/
"""
import sys
import numpy as np
from cpmpy import *
from cpmpy.solvers import *
from cpmpy_hakank import *
def among_seq_test(xval=None):
n = 7
# The set as a list
v = [0,2,4,6,8]
# variables
x = intvar(0,9,shape=n,name="x")
low = intvar(0,n-1,name="low")
high = intvar(0,n-1,name="high")
# Note: seqlen cannot be a decision variable since
# it's used together with range (in this implementation)
# seqlen = intvar(1,n-1,name="seqlen")
# low = 1
# high = 2
seqlen = 4
# constraints
if xval == None:
model = Model([AllDifferent(x),
increasing(x),
among_seq(low,high,seqlen,x,v),
low == 1,
high == 2,
])
else:
model = Model([x == xval,
among_seq(low,high,seqlen,x,v),
])
# ortools_wrapper2(model,[x,[low,high]])
ss = CPM_ortools(model)
num_solutions = 0
while ss.solve() is not False:
num_solutions += 1
print("x:", x.value())
print("low:",low.value(),"high:",high.value(),"seqlen:",seqlen)
get_different_solution(ss,list(x)+[low,high])
print("num_solutions:",num_solutions)
print("No fixed x but fixed low=1, high=2, and seqlen=4:")
xval = None
among_seq_test(xval)
xval=[9,2,4,5,5,7,2]
print(f"\nFixed x = {xval} . No fixed low or hig. seqlen=4")
among_seq_test(xval)
| 22.541667 | 72 | 0.60305 | 325 | 2,164 | 3.950769 | 0.384615 | 0.049844 | 0.025701 | 0.037383 | 0.074766 | 0.046729 | 0.046729 | 0 | 0 | 0 | 0 | 0.033481 | 0.268484 | 2,164 | 95 | 73 | 22.778947 | 0.777637 | 0.417745 | 0 | 0.162162 | 0 | 0 | 0.111826 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.027027 | false | 0 | 0.135135 | 0 | 0.162162 | 0.135135 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
8b1f86bfb12bf0609bdbdbeb6c764ecbc1838d24 | 1,878 | py | Python | Array/Surrounded_Regions.py | shua2018ti/Google | 3a9847e0c60d887d15eb4b0d4d8ebf51e464df1b | [
"MIT"
] | 87 | 2015-07-15T20:41:09.000Z | 2022-03-08T13:55:38.000Z | Array/Surrounded_Regions.py | shua2018ti/Google | 3a9847e0c60d887d15eb4b0d4d8ebf51e464df1b | [
"MIT"
] | 59 | 2015-03-19T22:26:41.000Z | 2015-07-25T17:58:08.000Z | Array/Surrounded_Regions.py | shua2018ti/Google | 3a9847e0c60d887d15eb4b0d4d8ebf51e464df1b | [
"MIT"
] | 45 | 2015-07-15T20:41:12.000Z | 2022-02-01T20:18:07.000Z | '''
Given a 2D board containing 'X' and 'O', capture all regions surrounded by 'X'.
A region is captured by flipping all 'O's into 'X's in that surrounded region.
For example,
X X X X
X O O X
X X O X
X O X X
After running your function, the board should be:
X X X X
X X X X
X X X X
X O X X
'''
class Solution:
# @param {character[][]} board
# @return {void} Do not return anything, modify board in-place instead.
def solve(self, board):
if not board: return
queue = []
m = len(board); n = len(board[0])
for i in xrange(m):
self.dfs(i, 0, board, queue)
self.dfs(i, n-1, board, queue)
for j in xrange(1, n-1):
self.dfs(m-1, j, board, queue)
self.dfs(0, j, board, queue)
for i in xrange(m):
for j in xrange(n):
if board[i][j] == 'O':
board[i][j] = 'X'
elif board[i][j] == 'J':
board[i][j] = 'O'
def dfs(self, x, y, board, queue):
self.check(x, y, board, queue)
while queue:
i, j = queue.pop()
self.check(i+1, j, board, queue) # 注意这里不是用dfs,用check
self.check(i-1, j, board, queue)
self.check(i, j+1, board, queue)
self.check(i, j-1, board, queue)
def check(self, x, y, board, queue):
if x < 0 or x >= len(board) or y < 0 or y >= len(board[0]) or board[x][y] != 'O': return
queue.append((x,y))
board[x][y] = 'J'
# 解题思路:
# instead of go through every node in the board, we only need to go through the edge
# of the board, if there is 'O' in the edge, then find all the adjecent 'O', make it
# as 'J', then go through the board again, if the 'O', will mark it as 'X', if 'J',
# mark it as 'O'
| 29.34375 | 96 | 0.51065 | 313 | 1,878 | 3.063898 | 0.27476 | 0.043796 | 0.046924 | 0.050052 | 0.201251 | 0.120959 | 0.105318 | 0.069864 | 0.069864 | 0.013556 | 0 | 0.012448 | 0.35836 | 1,878 | 63 | 97 | 29.809524 | 0.783402 | 0.36049 | 0 | 0.068966 | 0 | 0 | 0.005063 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.103448 | false | 0 | 0 | 0 | 0.137931 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
8b1fe8e9f49cf4b8c60a865d5fd71e372cf52fa6 | 1,884 | py | Python | appengine/components/components/auth/testing.py | maruel/swarming | 8ab7568635fcbfd85a01884b64704fc2a1ac13c7 | [
"Apache-2.0"
] | 74 | 2015-04-01T02:35:15.000Z | 2021-12-17T22:10:56.000Z | appengine/components/components/auth/testing.py | maruel/swarming | 8ab7568635fcbfd85a01884b64704fc2a1ac13c7 | [
"Apache-2.0"
] | 123 | 2015-04-01T04:02:57.000Z | 2022-03-02T12:49:55.000Z | appengine/components/components/auth/testing.py | maruel/swarming | 8ab7568635fcbfd85a01884b64704fc2a1ac13c7 | [
"Apache-2.0"
] | 32 | 2015-04-03T01:40:47.000Z | 2021-11-13T15:20:13.000Z | # Copyright 2019 The LUCI Authors. All rights reserved.
# Use of this source code is governed under the Apache License, Version 2.0
# that can be found in the LICENSE file.
"""Utilities for internal components.auth tests."""
import collections
import logging
from components.auth import api
from components.auth import config
from components.auth import delegation
from components.auth import model
from test_support import test_case
# Mocked subset of config tuple returned by config.ensure_configured().
_MockedConfig = collections.namedtuple('_MockedConfig', [
'USE_PROJECT_IDENTITIES'
])
class TestCase(test_case.TestCase):
"""Test case with a separate auth context and captured logging."""
# pylint: disable=unused-argument
def setUp(self):
super(TestCase, self).setUp()
api.reset_local_state()
self.logged_errors = []
self.mock(
logging, 'error',
lambda *args, **kwargs: self.logged_errors.append((args, kwargs)))
self.logged_warnings = []
self.mock(
logging, 'warning',
lambda *args, **kwargs: self.logged_warnings.append((args, kwargs)))
self.trusted_signers = {'user:token-server@example.com': self}
self.mock(delegation, 'get_trusted_signers', lambda: self.trusted_signers)
# Implements CertificateBundle interface, as used by get_trusted_signers.
def check_signature(self, blob, key_name, signature):
return True
def mock_config(self, **kwargs):
"""Mocks result of config.ensure_configured() call."""
self.mock(config, 'ensure_configured', lambda: _MockedConfig(**kwargs))
@staticmethod
def mock_group(group, members):
"""Creates new group entity in the datastore."""
members = [
model.Identity.from_bytes(m) if isinstance(m, basestring) else m
for m in members
]
model.AuthGroup(key=model.group_key(group), members=members).put()
| 31.4 | 78 | 0.72293 | 242 | 1,884 | 5.508264 | 0.508264 | 0.052513 | 0.054014 | 0.072018 | 0.066017 | 0 | 0 | 0 | 0 | 0 | 0 | 0.003841 | 0.170913 | 1,884 | 59 | 79 | 31.932203 | 0.849552 | 0.286624 | 0 | 0.057143 | 0 | 0 | 0.084977 | 0.038695 | 0 | 0 | 0 | 0 | 0 | 1 | 0.114286 | false | 0 | 0.2 | 0.028571 | 0.371429 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
8b23579389559c2d609641d615bb6c487bbe1594 | 19,397 | py | Python | pyNastran/op2/op2_interface/random_results.py | ACea15/pyNastran | 5ffc37d784b52c882ea207f832bceb6b5eb0e6d4 | [
"BSD-3-Clause"
] | 1 | 2021-08-02T09:49:24.000Z | 2021-08-02T09:49:24.000Z | pyNastran/op2/op2_interface/random_results.py | ACea15/pyNastran | 5ffc37d784b52c882ea207f832bceb6b5eb0e6d4 | [
"BSD-3-Clause"
] | 1 | 2021-06-07T16:33:59.000Z | 2021-06-07T16:33:59.000Z | pyNastran/op2/op2_interface/random_results.py | ACea15/pyNastran | 5ffc37d784b52c882ea207f832bceb6b5eb0e6d4 | [
"BSD-3-Clause"
] | 1 | 2021-10-14T03:52:44.000Z | 2021-10-14T03:52:44.000Z | from typing import Dict, Any
import numpy as np
class RandomObjects:
prefix = ''
postfix = ''
def __init__(self):
self.displacements = {}
self.velocities = {}
self.accelerations = {}
self.load_vectors = {}
self.spc_forces = {}
self.mpc_forces = {}
self.crod_force = {}
self.conrod_force = {}
self.ctube_force = {}
self.cbar_force = {}
self.cbeam_force = {}
self.cbush_stress = {}
self.cbush_strain = {}
self.crod_stress = {}
self.conrod_stress = {}
self.ctube_stress = {}
self.cbar_stress = {}
self.cbeam_stress = {}
self.crod_strain = {}
self.conrod_strain = {}
self.ctube_strain = {}
self.cbar_strain = {}
self.cbeam_strain = {}
self.ctetra_strain = {}
self.cpenta_strain = {}
self.chexa_strain = {}
self.ctetra_stress = {}
self.cpenta_stress = {}
self.chexa_stress = {}
self.celas1_stress = {}
self.celas2_stress = {}
self.celas3_stress = {}
self.celas4_stress = {}
self.celas1_strain = {}
self.celas2_strain = {}
self.celas3_strain = {}
self.celas4_strain = {}
self.celas1_force = {}
self.celas2_force = {}
self.celas3_force = {}
self.celas4_force = {}
self.ctria3_force = {}
self.ctria6_force = {}
self.ctriar_force = {}
self.cquad4_force = {}
self.cquad8_force = {}
self.cquadr_force = {}
self.ctria3_stress = {}
self.ctria6_stress = {}
self.cquad4_stress = {}
self.cquad8_stress = {}
self.cquadr_stress = {}
self.ctriar_stress = {}
self.ctria3_strain = {}
self.ctria6_strain = {}
self.cquad4_strain = {}
self.cquad8_strain = {}
self.cquadr_strain = {}
self.ctriar_strain = {}
self.cbend_stress = {}
self.cbend_strain = {}
self.cbend_force = {}
self.cshear_stress = {}
self.cshear_strain = {}
self.cshear_force = {}
self.cbush_force = {}
self.cdamp1_force = {}
self.cdamp2_force = {}
self.cdamp3_force = {}
self.cdamp4_force = {}
self.cvisc_force = {}
self.cquad4_composite_stress = {}
self.cquad8_composite_stress = {}
self.cquadr_composite_stress = {}
self.ctria3_composite_stress = {}
self.ctria6_composite_stress = {}
self.ctriar_composite_stress = {}
self.cquad4_composite_strain = {}
self.cquad8_composite_strain = {}
self.cquadr_composite_strain = {}
self.ctria3_composite_strain = {}
self.ctria6_composite_strain = {}
self.ctriar_composite_strain = {}
def get_table_types(self):
tables = [
'displacements', 'velocities', 'accelerations',
'load_vectors', 'spc_forces', 'mpc_forces',
'celas1_force', 'celas2_force', 'celas3_force', 'celas4_force',
'crod_force', 'conrod_force', 'ctube_force',
'cbar_force', 'cbeam_force',
'cquad4_force', 'cquad8_force', 'cquadr_force',
'ctria3_force', 'ctria6_force', 'ctriar_force',
'celas1_stress', 'celas2_stress', 'celas3_stress', 'celas4_stress',
'crod_stress', 'conrod_stress', 'ctube_stress',
'cbar_stress', 'cbeam_stress',
'ctria3_stress', 'ctriar_stress', 'ctria6_stress',
'cquadr_stress', 'cquad4_stress', 'cquad8_stress',
'ctetra_stress', 'cpenta_stress', 'chexa_stress',
'celas1_strain', 'celas2_strain', 'celas3_strain', 'celas4_strain',
'crod_strain', 'conrod_strain', 'ctube_strain',
'cbar_strain', 'cbeam_strain',
'ctria3_strain', 'ctriar_strain', 'ctria6_strain',
'cquadr_strain', 'cquad4_strain', 'cquad8_strain',
'ctetra_strain', 'cpenta_strain', 'chexa_strain',
'cquad4_composite_stress', 'cquad8_composite_stress', 'cquadr_composite_stress',
'ctria3_composite_stress', 'ctria6_composite_stress', 'ctriar_composite_stress',
'cquad4_composite_strain', 'cquad8_composite_strain', 'cquadr_composite_strain',
'ctria3_composite_strain', 'ctria6_composite_strain', 'ctriar_composite_strain',
'cbend_stress', 'cbend_strain', 'cbend_force',
'cbush_stress', 'cbush_strain',
'cshear_stress', 'cshear_strain', 'cshear_force',
'cbush_force',
'cdamp1_force', 'cdamp2_force', 'cdamp3_force', 'cdamp4_force',
'cvisc_force',
]
return [self.prefix + table + self.postfix for table in tables]
class PSDObjects():
"""storage class for the ATO objects"""
prefix = 'psds.'
postfix = ''
def __init__(self):
self.displacements = {}
self.velocities = {}
self.accelerations = {}
self.spc_forces = {}
self.load_vectors = {}
self.force = {}
self.stress = {}
self.strain = {}
def get_table_types(self):
tables = self._tables()
return [self.prefix + table + self.postfix for table in tables]
def _tables(self):
tables = [
'displacements', 'velocities', 'accelerations',
'spc_forces', 'load_vectors',
'force', 'stress', 'strain',
]
return tables
def get_results(self):
tables = self._tables()
results = {}
for table in tables:
result = getattr(self, table)
if result:
results[table] = result
return results
def get_stats(self, short=True):
msg = ''
psds_dict = self.get_results()
for result_type, slot in psds_dict.items():
npsds = len(slot)
if short:
msg += f'op2_results.psds.{result_type}; n={npsds}\n'
else:
ipsd = 0
msg += f'op2_results.psds.{result_type}:\n'
msg += f' # (subtitle, analysis_code, stress_strain_flag, node, dof)\n'
for key in slot:
msg += f' {key}\n'
if ipsd == 10:
msg += f' ... npsds={npsds}\n'
break
ipsd += 1
msg += '\n'
return msg
def get_psds_by_subtitles(self) -> Dict[Any, Any]:
psd_results = self.get_results()
if not psd_results:
return {}
from collections import defaultdict
psds_subtitle = defaultdict(dict)
for res_type, psds in psd_results.items():
for key, psd in psds.items():
(subtitle, nid, dof) = key
psds_subtitle[subtitle][(res_type, nid, dof)] = psd
return psds_subtitle
def plot(self):
psds_subtitle = self.get_psds_by_subtitles()
if not psds_subtitle:
return
import matplotlib.pyplot as plt
for subtitle, psds in psds_subtitle.items():
fig = plt.figure(1)
for (res_type, nid, dof), psd in psds.items():
freqs, psd = psd[:, 0], psd[:, 1]
plt.plot(freqs, psd, name=f'(restype,nid,dof)=({res_type}, {nid}, {dof})')
plt.legend()
plt.show()
def write_f06(self, f06):
psds_subtitle = self.get_psds_by_subtitles()
if not psds_subtitle:
return
psd_type_map = {
'displacements' : 'DISP',
'velocities' : 'VELO',
'accelerations' : 'ACCE',
'load_vectors' : 'OLOAD',
'spc_forces' : 'SPCF',
'force' : 'EL FOR',
'stress' : 'EL STR',
'strain' : 'STRAIN',
}
from scipy.integrate import trapz
for subtitle, psds in psds_subtitle.items():
f06.write(subtitle + '\n')
f06.write('0 X Y - O U T P U T S U M M A R Y ( A U T O O R P S D F )\n')
f06.write('0 PLOT CURVE FRAME CURVE ID./ RMS NO. POSITIVE XMIN FOR XMAX FOR YMIN FOR X FOR YMAX FOR X FOR*\n')
f06.write(' TYPE TYPE NO. PANEL : GRID ID VALUE CROSSINGS ALL DATA ALL DATA ALL DATA YMIN ALL DATA YMAX\n')
#fig = plt.figure(1)
for (res_type, nid, dof), psd in psds.items():
try:
psd_type = psd_type_map[res_type]
except KeyError:
raise NotImplementedError(f'res_type = {res_type}')
#psd_type = analysis_code
#rms_value = 2.879461E+00
#no_crossings = 2.879461E+00
#no_crossings = np.nan
freqs, psd = psd[:, 0], psd[:, 1]
#plt.plot(freqs, psd, name=f'(restype,nid,dof)=({res_type}, {nid}, {dof})')
ymin = psd.min()
ymax = psd.max()
imin = np.where(psd == ymin)[0][0]
imax = np.where(psd == ymax)[0][0]
xmin = freqs[imin]
xmax = freqs[imax]
fmin = freqs.min()
fmax = freqs.max()
# If you want the RMS value, this is computed as RMS = SQRT(SUM(PSD*DF)) and,
# where DF is the spectral resolution, where you integarate from Fmin to Fmax,
# i.e. your lowest and highest analysis frequency of interest, respectively.
psd_f = trapz(psd, freqs)
rms = psd_f ** 0.5
if psd_f == 0.0:
# really this is nan, but that's Nastran for you
no_crossings = 0.0
else:
f2_psd_f = trapz(freqs**2 * psd, freqs)
no_crossings = (f2_psd_f / psd_f) ** 0.5 # Hz
#print('ymin=%s ymax=%s xmin=%s xmax=%s fmin=%s fmax=%s' % (ymin, ymax, xmin, xmax, fmin, fmax))
#'0 X Y - O U T P U T S U M M A R Y ( A U T O O R P S D F )'
#'0 PLOT CURVE FRAME CURVE ID./ RMS NO. POSITIVE XMIN FOR XMAX FOR YMIN FOR X FOR YMAX FOR X FOR*'
#' TYPE TYPE NO. PANEL : GRID ID VALUE CROSSINGS ALL DATA ALL DATA ALL DATA YMIN ALL DATA YMAX'
#' PSDF ACCE 0 9400703( 5) 2.879461E+00 8.191217E+02 2.000E+01 2.000E+03 4.476E-06 7.900E+01 1.474E+00 3.980E+01'
f06.write('0 \n')
f06.write(f' PSDF {psd_type:6s} 0 {nid:8d}( {dof:2d}) {rms:8.6E} {no_crossings:9.6E} {fmin:9.3E} {fmax:9.3E} {ymin:9.3E} {xmin:9.3E} {ymax:9.3E} {xmax:9.3E}\n')
#plt.legend()
#plt.show()
class AutoCorrelationObjects(RandomObjects):
"""storage class for the ATO objects"""
prefix = 'ato.'
#postfix = ''
class PowerSpectralDensityObjects(RandomObjects):
"""storage class for the PSD objects"""
prefix = 'psd.'
#postfix = ''
class RootMeansSquareObjects(RandomObjects):
"""storage class for the RMS objects"""
prefix = 'rms.'
#postfix = ''
class CumulativeRootMeansSquareObjects(RandomObjects):
"""storage class for the CRMS objects"""
prefix = 'crm.'
#postfix = ''
class NumberOfCrossingsObjects(RandomObjects):
"""storage class for the NO objects"""
prefix = 'no.'
#postfix = ''
class RAECONS:
"""storage class for the RAECONS objects"""
def __init__(self):
self.ctria3_strain = {}
self.cquad4_strain = {}
self.chexa_strain = {}
def get_table_types(self):
tables = [
'chexa_strain',
'ctria3_strain', 'cquad4_strain',
]
return ['RAECONS.' + table for table in tables]
class RASCONS:
"""storage class for the RASCONS objects"""
def __init__(self):
self.ctetra_stress = {}
self.cpenta_stress = {}
self.chexa_stress = {}
self.ctetra_strain = {}
self.cpenta_strain = {}
self.chexa_strain = {}
self.ctria3_stress = {}
self.ctria6_stress = {}
self.cquad4_stress = {}
self.cquad8_stress = {}
self.cquadr_stress = {}
self.ctriar_stress = {}
self.ctria3_strain = {}
self.ctria6_strain = {}
self.cquad4_strain = {}
self.cquad8_strain = {}
self.cquadr_strain = {}
self.ctriar_strain = {}
def get_table_types(self):
tables = [
# OES - isotropic CTRIA3/CQUAD4 stress
'ctria3_stress', 'ctriar_stress', 'ctria6_stress',
'cquadr_stress', 'cquad4_stress', 'cquad8_stress',
# OES - isotropic CTRIA3/CQUAD4 strain
'ctria3_strain', 'ctriar_strain', 'ctria6_strain',
'cquadr_strain', 'cquad4_strain', 'cquad8_strain',
'ctetra_stress', 'chexa_stress', 'cpenta_stress',
'ctetra_strain', 'chexa_strain', 'cpenta_strain',
]
return ['RASCONS.' + table for table in tables]
class RAPCONS:
"""storage class for the RAPCONS objects"""
def __init__(self):
self.cquad4_composite_stress = {}
self.cquad8_composite_stress = {}
self.cquadr_composite_stress = {}
self.ctria3_composite_stress = {}
self.ctria6_composite_stress = {}
self.ctriar_composite_stress = {}
def get_table_types(self):
tables = [
'cquad4_composite_stress',
'cquad8_composite_stress',
'cquadr_composite_stress',
'ctria3_composite_stress',
'ctria6_composite_stress',
'ctriar_composite_stress',
#'cquad4_composite_strain',
#'cquad8_composite_strain',
#'cquadr_composite_strain',
#'ctria3_composite_strain',
#'ctria6_composite_strain',
#'ctriar_composite_strain',
]
return ['RAPCONS.' + table for table in tables]
class RAPEATC:
"""storage class for the RAPEATC objects"""
def __init__(self):
self.cquad4_composite_stress = {}
self.cquad8_composite_stress = {}
self.cquadr_composite_stress = {}
self.ctria3_composite_stress = {}
self.ctria6_composite_stress = {}
self.ctriar_composite_stress = {}
def get_table_types(self):
tables = [
'cquad4_composite_stress',
'cquad8_composite_stress',
'cquadr_composite_stress',
'ctria3_composite_stress',
'ctria6_composite_stress',
'ctriar_composite_stress',
#'cquad4_composite_strain',
#'cquad8_composite_strain',
#'cquadr_composite_strain',
#'ctria3_composite_strain',
#'ctria6_composite_strain',
#'ctriar_composite_strain',
]
return ['RAPEATC.' + table for table in tables]
class RAFCONS:
"""storage class for the RAFCONS objects"""
def __init__(self):
self.cbar_force = {}
self.cquad4_force = {}
self.cbush_force = {}
def get_table_types(self):
tables = [
'cbar_force',
'cquad4_force',
'cbush_force',
]
return ['RAFCONS.' + table for table in tables]
class RAGCONS:
"""storage class for the RAGCONS objects"""
def __init__(self):
self.grid_point_forces = {}
def get_table_types(self):
tables = [
'grid_point_forces',
]
return ['RAGCONS.' + table for table in tables]
class RAGEATC:
"""storage class for the RAGEATC objects"""
def __init__(self):
self.grid_point_forces = {}
def get_table_types(self):
tables = [
'grid_point_forces',
]
return ['RAGEATC.' + table for table in tables]
class RANCONS:
"""storage class for the RANCONS objects"""
def __init__(self):
self.cbar_strain_energy = {}
self.cbush_strain_energy = {}
self.chexa_strain_energy = {}
self.ctria3_strain_energy = {}
self.cquad4_strain_energy = {}
def get_table_types(self):
tables = [
'cbar_strain_energy', 'cbush_strain_energy',
'chexa_strain_energy',
'ctria3_strain_energy', 'cquad4_strain_energy',
]
return ['RANCONS.' + table for table in tables]
class RADEFFM:
"""storage class for the RADEFFM objects"""
def __init__(self):
self.eigenvectors = {}
def get_table_types(self):
tables = [
'eigenvectors',
]
return ['RADEFFM.' + table for table in tables]
class RADCONS:
def __init__(self):
self.eigenvectors = {}
def get_table_types(self):
tables = [
'eigenvectors',
]
return ['RADCONS.' + table for table in tables]
class RADEATC:
"""storage class for the RADEATC objects"""
def __init__(self):
self.eigenvectors = {}
def get_table_types(self):
tables = [
'eigenvectors',
]
return ['RADEATC.' + table for table in tables]
class RANEATC:
"""storage class for the RANEATC objects"""
def __init__(self):
self.cbar_strain_energy = {}
self.cbush_strain_energy = {}
self.chexa_strain_energy = {}
self.ctria3_strain_energy = {}
self.cquad4_strain_energy = {}
def get_table_types(self):
tables = [
'cbar_strain_energy', 'cbush_strain_energy',
'chexa_strain_energy',
'ctria3_strain_energy', 'cquad4_strain_energy',
]
return ['RANEATC.' + table for table in tables]
class ROUGV1:
"""storage class for the ROUGV1 objects"""
def __init__(self):
self.displacements = {}
self.velocities = {}
self.accelerations = {}
self.eigenvectors = {}
def get_table_types(self):
tables = [
'displacements', 'velocities', 'accelerations', 'eigenvectors',
]
return ['ROUGV1.' + table for table in tables]
class RAFEATC:
"""storage class for the RAFEATC objects"""
def __init__(self):
self.cbar_force = {}
self.cquad4_force = {}
self.cbush_force = {}
def get_table_types(self):
tables = [
'cbar_force',
'cquad4_force',
'cbush_force',
]
return ['RAFEATC.' + table for table in tables]
class RASEATC:
"""storage class for the RASEATC objects"""
def __init__(self):
self.chexa_stress = {}
self.cquad4_stress = {}
def get_table_types(self):
tables = [
'chexa_stress',
'cquad4_stress',
]
return ['RASEATC.' + table for table in tables]
class RAEEATC:
"""storage class for the RAEEATC objects"""
def __init__(self):
self.chexa_strain = {}
self.ctria3_strain = {}
self.cquad4_strain = {}
def get_table_types(self):
tables = [
'chexa_strain',
'ctria3_strain', 'cquad4_strain',
]
return ['RAEEATC.' + table for table in tables]
| 32.436455 | 191 | 0.546837 | 2,062 | 19,397 | 4.876334 | 0.121242 | 0.047737 | 0.031328 | 0.037593 | 0.567976 | 0.533764 | 0.483839 | 0.460269 | 0.453307 | 0.440975 | 0 | 0.022587 | 0.338094 | 19,397 | 597 | 192 | 32.490787 | 0.760573 | 0.117905 | 0 | 0.486607 | 0 | 0.008929 | 0.191655 | 0.038072 | 0 | 0 | 0 | 0 | 0 | 1 | 0.09375 | false | 0 | 0.011161 | 0 | 0.232143 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
8b24c9cba2b46c498766a19c58c260ffa0ca6a3b | 6,656 | py | Python | src/data/prepare_language_modelling.py | abrinkmann/productCategorization | 75732e4b1c9da941a793db80b5fe2245bae45e87 | [
"MIT"
] | 5 | 2021-06-24T12:12:17.000Z | 2022-01-22T08:19:30.000Z | src/data/prepare_language_modelling.py | abrinkmann/productCategorization | 75732e4b1c9da941a793db80b5fe2245bae45e87 | [
"MIT"
] | null | null | null | src/data/prepare_language_modelling.py | abrinkmann/productCategorization | 75732e4b1c9da941a793db80b5fe2245bae45e87 | [
"MIT"
] | 1 | 2022-03-11T16:00:13.000Z | 2022-03-11T16:00:13.000Z | import logging
import os
from pathlib import Path
import click
import pandas as pd
from src.data.preprocessing import preprocess
@click.command()
@click.option('--dataset_name', help='Dataset which you like to prepare for language modelling')
@click.option('--additional_ds_path', help='Additional dataset for language modelling')
@click.option('--additional_ds_suffix', help='Suffix to identify the additional ds')
def main(dataset_name, additional_ds_path, additional_ds_suffix):
""" Runs data processing scripts to turn raw data from (../raw) into
cleaned data ready to be analyzed (saved in ../processed).
"""
dataset = load_dataset(dataset_name)
#Check if additional dataset information is provided
if not (additional_ds_path is None) and not (additional_ds_suffix is None):
df_additional_ds = pd.read_csv(additional_ds_path, sep=';')
else:
df_additional_ds = None
generate_datasets_for_language_modelling(dataset, dataset_name, df_additional_ds, additional_ds_suffix)
def load_dataset(dataset_name):
"""Load dataset for the given experiments"""
logger = logging.getLogger(__name__)
data_dir = os.environ['DATA_DIR']
data_dir = Path(data_dir)
splits = ['train', 'validate']
dataset = {}
for split in splits:
relative_path = 'data/processed/{}/split/raw/{}_data_{}.pkl'.format(dataset_name, split, dataset_name)
file_path = data_dir.joinpath(relative_path)
dataset[split] = pd.read_pickle(file_path)
logger.info('Loaded dataset {}!'.format(dataset_name))
return dataset
def generate_datasets_for_language_modelling(dataset, dataset_name, df_additional_ds, additional_ds_suffix):
logger = logging.getLogger(__name__)
data_dir = os.environ['DATA_DIR']
data_dir = Path(data_dir)
configurations = []
config_1 = {'category': True, 'category_reverse': False, 'description': True,
'multiple_rows': True, 'additional_ds': True}
configurations.append(config_1)
for config in configurations:
# Make sure that an additional dataset is properly provided if requested
if df_additional_ds is None and additional_ds_suffix:
config['additional_ds'] = False
generate_and_store_single_dataset_for_language_modelling(dataset, dataset_name, data_dir, config, df_additional_ds, additional_ds_suffix)
def generate_and_store_single_dataset_for_language_modelling(dataset, dataset_name, data_dir, config, df_additional_ds, additional_ds_suffix):
logger = logging.getLogger(__name__)
suffix = 'title'
for key in config:
if config[key]:
suffix = '{}_{}'.format(suffix, key)
if not (additional_ds_suffix is None) and config['additional_ds']:
suffix = '{}_{}'.format(suffix, additional_ds_suffix)
for split in dataset:
relative_path = 'data/processed/{}/language-modelling/{}_language_modelling_{}_with_{}.txt'.format(dataset_name, split, dataset_name, suffix)
file_path = data_dir.joinpath(relative_path)
with open(file_path, 'w') as file:
for index, row in dataset[split].iterrows():
#preprocess values
prep_title = preprocess(row['title'])
line = '{}'.format(prep_title)
if config['category']:
categories = row['path_list'].split('>')
categories = [value.split('_')[1] for value in categories]
categories = [preprocess(value) for value in categories]
new_line = prepare_category(config,categories,line)
if config['multiple_rows']:
write_dataset_to_file(file,new_line)
else:
line = new_line
if config['description']:
new_line = prepare_description(row['description'], line)
if config['multiple_rows']:
write_dataset_to_file(file, new_line)
else:
line = new_line
if not config['multiple_rows']:
write_dataset_to_file(file, line)
if split == 'train' and config['additional_ds']:
for index, row in df_additional_ds.iterrows():
line = preprocess(row['Title'])
categories = []
if row['Category'] is not None and type(row['Category']) is str:
categories.append(row['Category'])
if row['Breadcrumb'] is not None and type(row['Breadcrumb']) is str:
categories.append(row['Breadcrumb'])
if row['BreadcrumbList'] is not None and type(row['BreadcrumbList']) is str:
categories.append(row['BreadcrumbList'])
if len(categories) > 0 and config['category']:
new_line = prepare_category(config, categories, line)
if config['multiple_rows']:
write_dataset_to_file(file, new_line)
else:
line = new_line
if type(row['Description']) is str and config['description']:
new_line = prepare_description(row['Description'], line)
if config['multiple_rows']:
write_dataset_to_file(file, new_line)
else:
line = new_line
file.write('{}\n'.format(line))
logger.info('File {} created for Language Modelling!'.format(relative_path))
def prepare_category(config, categories, line):
if config['category_reverse']:
categories.reverse()
prep_catgories = ' '.join(categories)
new_line = '{} - {}'.format(line, prep_catgories)
return new_line
def prepare_description(description, line):
description_values = description.split('.')
preprocessed_description_values = []
for value in description_values:
if len(value) > 4:
preprocessed_description_values.append(preprocess(value))
new_line = '{} - {}'.format(line, '. '.join(preprocessed_description_values))
return new_line
def write_dataset_to_file(file, line):
line = '{}\n'.format(line)
file.write(line)
if __name__ == '__main__':
log_fmt = '%(asctime)s - %(name)s - %(levelname)s - %(message)s'
logging.basicConfig(level=logging.INFO, format=log_fmt)
main() | 40.585366 | 149 | 0.614633 | 745 | 6,656 | 5.218792 | 0.185235 | 0.083333 | 0.050926 | 0.027778 | 0.397634 | 0.379115 | 0.325874 | 0.273148 | 0.26286 | 0.26286 | 0 | 0.001048 | 0.283053 | 6,656 | 164 | 150 | 40.585366 | 0.813705 | 0.045373 | 0 | 0.254237 | 0 | 0 | 0.137326 | 0.02165 | 0 | 0 | 0 | 0 | 0 | 1 | 0.059322 | false | 0 | 0.050847 | 0 | 0.135593 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
8b25f4fb270018c7e67c4a8b1dfad4619b36e0e5 | 1,591 | py | Python | demo_01/01_Sensors.py | vcubells/iot_supermercado | 9b850fcd971fd5053515cc16c0834bf836af6155 | [
"MIT"
] | 3 | 2019-10-29T14:27:35.000Z | 2022-01-20T23:29:16.000Z | demo_01/01_Sensors.py | vcubells/iot_supermercado | 9b850fcd971fd5053515cc16c0834bf836af6155 | [
"MIT"
] | 2 | 2019-10-28T03:30:12.000Z | 2021-06-02T00:31:56.000Z | demo_02/01_Sensors.py | vcubells/iot_supermercado | 9b850fcd971fd5053515cc16c0834bf836af6155 | [
"MIT"
] | 1 | 2019-10-31T17:24:49.000Z | 2019-10-31T17:24:49.000Z | import RPi.GPIO as GPIO
import time
import pyrebase
import subprocess
from datetime import datetime
from pprint import pprint
import sys
import time
import Adafruit_DHT
# Configuracion del tipo de sensor DHT
sensor = Adafruit_DHT.DHT11
#humedad
pin = 23
#button camera
chanel = 10
#led_pin
led_pin=12
#button presencia alimento
chanelFood = 8
flagFood = False
GPIO.setwarnings(False) # Ignore warning for now
GPIO.setmode(GPIO.BOARD) # Use physical pin numbering
GPIO.setup(chanel, GPIO.IN, pull_up_down=GPIO.PUD_DOWN) # Set pin 10 to be an input pin and set initial value to be pulled low (off)
GPIO.setup(led_pin, GPIO.OUT)
def callbackCamera(chanel):
if GPIO.input(chanel) == GPIO.HIGH:
subprocess.call(['fswebcam -r 640x480 --no-banner /home/pi/Desktop/image.jpg', '-1'], shell=True)
#delete photo.
#subprocess.call(['rm /home/pi/Desktop/image.jpg', '-1'], shell=True)
def humCallback(pin):
humedad, temperatura = Adafruit_DHT.read_retry(sensor, pin)
if temperatura >21:
GPIO.output(led_pin, GPIO.HIGH)
else:
GPIO.output(led_pin, GPIO.LOW)
def button_callback():
print(flagFood)
if GPIO.input(8) == 0 and flagFood == False:
print("Slot 1: Vacio")
self.flagFood = True
time.sleep(1)
if GPIO.input(8) == 1 and flagFood == False:
flagFood = True
print("Slot 1: Coca-Cola")
time.sleep(1)
while True:
humCallback(pin)
button_callback()
| 27.912281 | 145 | 0.638592 | 215 | 1,591 | 4.660465 | 0.465116 | 0.02994 | 0.02994 | 0.035928 | 0.101796 | 0.061876 | 0.061876 | 0.061876 | 0 | 0 | 0 | 0.024681 | 0.261471 | 1,591 | 56 | 146 | 28.410714 | 0.828085 | 0.184789 | 0 | 0.097561 | 0 | 0 | 0.069984 | 0.020218 | 0 | 0 | 0 | 0 | 0 | 1 | 0.073171 | false | 0 | 0.219512 | 0 | 0.292683 | 0.097561 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
8b27a2fec3b8fe8db08bac8d780a90de627046a6 | 1,152 | py | Python | app.py | w-lindvall/weather_check | 78881a0901da8f363b4e53378c4d70cdba02263f | [
"MIT"
] | null | null | null | app.py | w-lindvall/weather_check | 78881a0901da8f363b4e53378c4d70cdba02263f | [
"MIT"
] | null | null | null | app.py | w-lindvall/weather_check | 78881a0901da8f363b4e53378c4d70cdba02263f | [
"MIT"
] | null | null | null | import datetime
from time import sleep
import dht11
from picamera import PiCamera
import RPi.GPIO as GPIO
# initialize GPIO
GPIO.setwarnings(False)
GPIO.setmode(GPIO.BCM)
GPIO.cleanup()
camera = PiCamera()
check = dht11.DHT11(pin=17)
# set output pin number to match setup
while True:
result = check.read()
if result.is_valid():
camera.start_preview()
sleep(5)
# wait 5 seconds to allow for camera to correct exposure
camera.annotate_text = (datetime.datetime.now().strftime('%d-%m-%y %H:%M')
+ '\n'
+ '=' * 20
+ '\n'
+ '-{} C'.format(result.temperature)
+ '\n'
+ '-{}%'.format(result.humidity))
camera.capture(('/home/pi/Desktop/{}.jpg'
.format('weather_check-' +
datetime.datetime.now().strftime(
'%d_%m_%y-%H_%M'))))
camera.stop_preview()
sleep(255)
# wait about 5 minutes until next loop
| 31.135135 | 82 | 0.490451 | 119 | 1,152 | 4.680672 | 0.579832 | 0.050269 | 0.068223 | 0.096948 | 0.114901 | 0.114901 | 0.114901 | 0.114901 | 0.114901 | 0 | 0 | 0.022792 | 0.390625 | 1,152 | 36 | 83 | 32 | 0.770655 | 0.125 | 0 | 0.107143 | 0 | 0 | 0.080758 | 0.022931 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.178571 | 0 | 0.178571 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
8b2a4d307a290f21bfe4f3c97478d6f00ddefec6 | 4,621 | py | Python | test-backend.py | Drazcmd/Comp431BackendFinal | 767c79b1e00172ce1be895ac01af832a4684d516 | [
"MIT"
] | null | null | null | test-backend.py | Drazcmd/Comp431BackendFinal | 767c79b1e00172ce1be895ac01af832a4684d516 | [
"MIT"
] | null | null | null | test-backend.py | Drazcmd/Comp431BackendFinal | 767c79b1e00172ce1be895ac01af832a4684d516 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
import requests, json, sys, pprint
pp = pprint.PrettyPrinter(indent=4)
class cc:
HEADER = '\033[95m'
BLUE = '\033[94m'
GREEN = '\033[92m'
YELLOW = '\033[93m'
FAIL = '\033[91m'
ENDC = '\033[0m'
BOLD = '\033[1m'
UNDERLINE = '\033[4m'
def get(endpoint):
url = config["backend"] + endpoint
r = requests.get(url)
if r.status_code != 200:
print(cc.FAIL + ("ERROR: For GET %s received %d response code " % (endpoint, r.status_code)) + str(r.text) + cc.ENDC)
sys.exit(1)
return json.loads(r.text)
def put(endpoint):
url = config["backend"] + endpoint
r = requests.put(url)
if r.status_code != 200:
print(cc.FAIL + ("ERROR: For PUT %s received %d response code " % (endpoint, r.status_code)) + str(r.text) + cc.ENDC)
sys.exit(1)
return json.loads(r.text)
def getArticles(articleId=None):
endpoint = '/articles'
if articleId is not None:
endpoint = (endpoint + "/%d") % articleId
return checkArticles(get(endpoint))
def checkArticles(result):
if "articles" not in result:
print(cc.FAIL + "ERROR: GET /articles did not have \"articles\" entry" + cc.ENDC)
print(result)
return []
else:
return result["articles"]
def addArticle(body):
r = requests.post(config["backend"] + "/article", json={'text':body})
return checkArticles( json.loads(r.text) )
def msg(message):
print(cc.BLUE + message + cc.ENDC)
################################################
if len(sys.argv) < 2:
print("usage: %s README.json" % sys.argv[0])
sys.exit(1)
with open(sys.argv[1], 'r') as f:
config = json.loads(f.read())
for key in config.keys():
if config[key].endswith('/'):
config[key] = (config[key])[:-1]
print(cc.YELLOW + ("Checking for %s site %s" % (config['netid'], config['backend'])) + cc.ENDC)
######################################
# inital GET
r = get("/")
msg("GET /")
pp.pprint(r)
# GET /articles
articles = getArticles()
msg("GET /articles")
pp.pprint(articles)
if len(articles) < 3:
print(cc.FAIL + ("FAIL: Expected at least 3 articles from GET /articles but found %d " % len(articles)) + cc.ENDC)
else:
print(cc.GREEN + ("OK: GET /articles returned %d articles, expecting at least 3" % len(articles)) + cc.ENDC)
######################################
# add a new article
body = "Hello World!"
newArticles = addArticle(body)
msg("POST /article -d " + body)
pp.pprint(newArticles)
if len(newArticles) is not 1:
print(cc.FAIL + ("FAIL: Expected 1 new article added but found %d articles" % len(newArticles)) + cc.ENDC)
else:
newArticleId = newArticles[0]['id']
print(cc.GREEN + ("OK: POST /article returned one new article with id=%d" % newArticleId) + cc.ENDC)
if newArticles[0]['text'] != body:
print(cc.FAIL + ("FAIL: Article did not have the correct body message: %s vs %s" % (newArticles[0]['text'], body)) + cc.ENDC)
else:
print(cc.GREEN + ("OK: article body was correct") + cc.ENDC)
######################################
# get that new article by itself
getNewArticle = getArticles(newArticleId)
msg("GET /articles/%d" % newArticleId)
pp.pprint(getNewArticle)
if len(getNewArticle) is not 1:
print(cc.FAIL + ("FAIL: Expected to get the one article that was added but found %d articles" % len(getNewArticle)) + cc.ENDC)
else:
print(cc.GREEN + ("OK: GET /articles/%d got the new article" % newArticleId) + cc.ENDC)
if getNewArticle[0]['text'] != newArticles[0]['text'] or newArticles[0]['text'] != body:
print(cc.FAIL + ("FAIL: Article did not have the correct text message: %s" % getNewArticle[0]['text']) + cc.ENDC)
else:
print(cc.GREEN + ("OK: article text was correct") + cc.ENDC)
######################################
# confirm that we only added one article
articles2 = getArticles()
msg("GET /articles")
pp.pprint(articles2)
if len(articles2) is not len(articles) + 1:
print(cc.FAIL + ("FAIL: Expected one new article added but found %d + 1 = %d" % (len(articles), len(articles2))) + cc.ENDC)
else:
print(cc.GREEN + ("OK: GET /articles returned one additional article") + cc.ENDC)
######################################
print(cc.YELLOW + ('Testing stubs...') + cc.ENDC)
# Stubs
for e in [ "/headlines", "/headlines/"+config['netid'], "/email", "/email/"+config['netid'], "/zipcode", "/zipcode/"+config['netid'], "/avatars", "/avatars/" + config['netid'] ]:
msg("GET " + e)
pp.pprint(get(e))
## done
print(cc.YELLOW + ('COMPLETE!') + cc.ENDC)
| 35.007576 | 178 | 0.591863 | 615 | 4,621 | 4.44065 | 0.226016 | 0.0487 | 0.03625 | 0.032955 | 0.332845 | 0.318198 | 0.254486 | 0.22446 | 0.18052 | 0.167704 | 0 | 0.018853 | 0.196494 | 4,621 | 131 | 179 | 35.274809 | 0.716671 | 0.031162 | 0 | 0.185567 | 0 | 0 | 0.273759 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.061856 | false | 0 | 0.010309 | 0 | 0.226804 | 0.298969 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
8b2b29b944bd51adeeab734c224ec38b5ca49934 | 1,553 | py | Python | robustbench/eval.py | dedeswim/robustbench | afdaaab9ddd89bc689420b6a9ee7a48d98defc4d | [
"MIT"
] | 1 | 2020-11-14T10:18:38.000Z | 2020-11-14T10:18:38.000Z | robustbench/eval.py | GeoffNN/robustbench | 34e5f426266bf78d72e149efdade7f32622aff19 | [
"MIT"
] | null | null | null | robustbench/eval.py | GeoffNN/robustbench | 34e5f426266bf78d72e149efdade7f32622aff19 | [
"MIT"
] | null | null | null | import argparse
import torch
from robustbench.utils import load_model, clean_accuracy
from robustbench.data import load_cifar10
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument('--model_name', type=str, default='Carmon2019Unlabeled')
parser.add_argument('--norm', type=str, default='Linf')
parser.add_argument('--eps', type=float, default=8/255)
parser.add_argument('--n_ex', type=int, default=100, help='number of examples to evaluate on')
parser.add_argument('--batch_size', type=int, default=500, help='batch size for evaluation')
parser.add_argument('--data_dir', type=str, default='./data', help='where to store downloaded datasets')
parser.add_argument('--model_dir', type=str, default='./models', help='where to store downloaded models')
parser.add_argument('--device', type=str, default='cuda:0', help='device to use for computations')
args = parser.parse_args()
return args
if __name__ == '__main__':
args = parse_args()
device = torch.device(args.device)
x_test, y_test = load_cifar10(args.n_ex, args.data_dir)
x_test, y_test = x_test.to(device), y_test.to(device)
model = load_model(args.model_name, args.model_dir, args.norm).to(device).eval()
acc = clean_accuracy(model, x_test, y_test, batch_size=args.batch_size, device=device)
print('Clean accuracy: {:.2%}'.format(acc))
adversary = AutoAttack(model, norm=args.norm, eps=args.eps, version='standard', device=device)
x_adv = adversary.run_standard_evaluation(x_test, y_test)
| 43.138889 | 109 | 0.722473 | 224 | 1,553 | 4.790179 | 0.330357 | 0.067102 | 0.126747 | 0.037279 | 0.048462 | 0 | 0 | 0 | 0 | 0 | 0 | 0.014859 | 0.13329 | 1,553 | 35 | 110 | 44.371429 | 0.782318 | 0 | 0 | 0 | 0 | 0 | 0.196394 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.038462 | false | 0 | 0.153846 | 0 | 0.230769 | 0.038462 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
8b2bd46ff544bd973934ac2848e266641de5778e | 12,943 | py | Python | neutron_lbaas/tests/unit/drivers/octavia/test_octavia_driver.py | bdrich/neutron-lbaas | b4711abfe0207c4fdd5d7fb7ecbf017e753abbfd | [
"Apache-2.0"
] | null | null | null | neutron_lbaas/tests/unit/drivers/octavia/test_octavia_driver.py | bdrich/neutron-lbaas | b4711abfe0207c4fdd5d7fb7ecbf017e753abbfd | [
"Apache-2.0"
] | null | null | null | neutron_lbaas/tests/unit/drivers/octavia/test_octavia_driver.py | bdrich/neutron-lbaas | b4711abfe0207c4fdd5d7fb7ecbf017e753abbfd | [
"Apache-2.0"
] | null | null | null | # Copyright 2015, Banashankar Veerad, Copyright IBM Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
from oslo_config import cfg
from neutron import context
from neutron_lbaas.drivers.octavia import driver
from neutron_lbaas.services.loadbalancer import data_models
from neutron_lbaas.tests.unit.db.loadbalancer import test_db_loadbalancerv2
class ManagerTest(object):
def __init__(self, parent, manager, mocked_req):
self.parent = parent
self.context = parent.context
self.driver = parent.driver
self.manager = manager
self.mocked_req = mocked_req
def create(self, model, url, args):
self.manager.create(self.context, model)
self.mocked_req.post.assert_called_with(url, args)
def update(self, old_model, model, url, args):
self.manager.update(self.context, old_model, model)
self.mocked_req.put.assert_called_with(url, args)
def delete(self, model, url):
self.manager.delete(self.context, model)
self.mocked_req.delete.assert_called_with(url)
# TODO(Banashankar) : Complete refresh function. Need more info.
def refresh(self):
pass
# TODO(Banashankar): Complete stats function. Need more info.
def stats(self):
pass
class BaseOctaviaDriverTest(test_db_loadbalancerv2.LbaasPluginDbTestCase):
# Copied it from Brocade's test code :/
def _create_fake_models(self):
# This id is used for all the entities.
id = 'test_id'
lb = data_models.LoadBalancer(id=id)
sni_container = data_models.SNI(listener_id=id)
listener = data_models.Listener(id=id, loadbalancer=lb,
sni_containers=[sni_container])
pool = data_models.Pool(id=id, listener=listener)
member = data_models.Member(id=id, pool=pool)
hm = data_models.HealthMonitor(id=id, pool=pool)
lb.listeners = [listener]
listener.default_pool = pool
pool.members = [member]
pool.healthmonitor = hm
return lb
def setUp(self):
super(BaseOctaviaDriverTest, self).setUp()
self.context = context.get_admin_context()
self.plugin = mock.Mock()
self.driver = driver.OctaviaDriver(self.plugin)
# mock of rest call.
self.driver.req = mock.Mock()
self.lb = self._create_fake_models()
class TestOctaviaDriver(BaseOctaviaDriverTest):
def test_allocates_vip(self):
self.addCleanup(cfg.CONF.clear_override,
'allocates_vip', group='octavia')
cfg.CONF.set_override('allocates_vip', True, group='octavia')
test_driver = driver.OctaviaDriver(self.plugin)
self.assertTrue(test_driver.load_balancer.allocates_vip)
def test_load_balancer_ops(self):
m = ManagerTest(self, self.driver.load_balancer,
self.driver.req)
lb = self.lb
# urls for assert test.
lb_url = '/v1/loadbalancers'
lb_url_id = '/v1/loadbalancers/' + lb.id
# Create LB test
# args for create assert.
args = {
'id': lb.id,
'name': lb.name,
'description': lb.description,
'enabled': lb.admin_state_up,
'project_id': lb.tenant_id,
'vip': {
'subnet_id': lb.vip_subnet_id,
'ip_address': lb.vip_address,
'port_id': lb.vip_port_id,
}
}
m.create(lb, lb_url, args)
# Update LB test
# args for update assert.
args = args = {
'name': lb.name,
'description': lb.description,
'enabled': lb.admin_state_up,
}
m.update(lb, lb, lb_url_id, args)
# delete LB test
m.delete(lb, lb_url_id)
# TODO(Banashankar) : refresh n stats fucntions are not yet done.
#m.refresh()
#m.stats()
def test_listener_ops(self):
m = ManagerTest(self, self.driver.listener,
self.driver.req)
listener = self.lb.listeners[0]
# urls for assert test.
list_url = '/v1/loadbalancers/%s/listeners' % listener.loadbalancer.id
list_url_id = list_url + '/%s' % (listener.id)
# Create Listener test.
# args for create and update assert.
sni_containers = [sni.tls_container_id
for sni in listener.sni_containers]
args = {
'id': listener.id,
'name': listener.name,
'description': listener.description,
'enabled': listener.admin_state_up,
'protocol': listener.protocol,
'protocol_port': listener.protocol_port,
'connection_limit': listener.connection_limit,
'tls_certificate_id': listener.default_tls_container_id,
'sni_containers': sni_containers,
'project_id': listener.tenant_id
}
m.create(listener, list_url, args)
# Update listener test.
del args['id']
del args['project_id']
m.update(listener, listener, list_url_id, args)
# Delete listener.
m.delete(listener, list_url_id)
def test_pool_ops(self):
m = ManagerTest(self, self.driver.pool,
self.driver.req)
pool = self.lb.listeners[0].default_pool
# urls for assert test.
pool_url = '/v1/loadbalancers/%s/listeners/%s/pools' % (
pool.listener.loadbalancer.id,
pool.listener.id)
pool_url_id = pool_url + "/%s" % pool.id
# Test create pool.
# args for create and update assert.
args = {
'id': pool.id,
'name': pool.name,
'description': pool.description,
'enabled': pool.admin_state_up,
'protocol': pool.protocol,
'lb_algorithm': pool.lb_algorithm,
'project_id': pool.tenant_id
}
if pool.session_persistence:
args['session_persistence'] = {
'type': pool.session_persistence.type,
'cookie_name': pool.session_persistence.cookie_name,
}
m.create(pool, pool_url, args)
# Test update pool.
del args['id']
del args['project_id']
m.update(pool, pool, pool_url_id, args)
# Test pool delete.
m.delete(pool, pool_url_id)
def test_member_ops(self):
m = ManagerTest(self, self.driver.member,
self.driver.req)
member = self.lb.listeners[0].default_pool.members[0]
# urls for assert.
mem_url = '/v1/loadbalancers/%s/listeners/%s/pools/%s/members' % (
member.pool.listener.loadbalancer.id,
member.pool.listener.id,
member.pool.id)
mem_url_id = mem_url + "/%s" % member.id
# Test Create member.
# args for create assert.
args = {
'id': member.id,
'enabled': member.admin_state_up,
'ip_address': member.address,
'protocol_port': member.protocol_port,
'weight': member.weight,
'subnet_id': member.subnet_id,
'project_id': member.tenant_id
}
m.create(member, mem_url, args)
# Test member update.
# args for update assert.
args = {
'enabled': member.admin_state_up,
'protocol_port': member.protocol_port,
'weight': member.weight,
}
m.update(member, member, mem_url_id, args)
# Test member delete.
m.delete(member, mem_url_id)
def test_health_monitor_ops(self):
m = ManagerTest(self, self.driver.health_monitor,
self.driver.req)
hm = self.lb.listeners[0].default_pool.healthmonitor
# urls for assert.
hm_url = '/v1/loadbalancers/%s/listeners/%s/pools/%s/healthmonitor' % (
hm.pool.listener.loadbalancer.id,
hm.pool.listener.id,
hm.pool.id)
# Test HM create.
# args for create and update assert.
args = {
'type': hm.type,
'delay': hm.delay,
'timeout': hm.timeout,
'rise_threshold': hm.max_retries,
'fall_threshold': hm.max_retries,
'http_method': hm.http_method,
'url_path': hm.url_path,
'expected_codes': hm.expected_codes,
'enabled': hm.admin_state_up,
'project_id': hm.tenant_id
}
m.create(hm, hm_url, args)
# Test HM update
del args['project_id']
m.update(hm, hm, hm_url, args)
# Test HM delete
m.delete(hm, hm_url)
class TestThreadedDriver(BaseOctaviaDriverTest):
def setUp(self):
super(TestThreadedDriver, self).setUp()
cfg.CONF.set_override('request_poll_interval', 1, group='octavia')
cfg.CONF.set_override('request_poll_timeout', 5, group='octavia')
self.driver.req.get = mock.MagicMock()
self.succ_completion = mock.MagicMock()
self.fail_completion = mock.MagicMock()
self.context = mock.MagicMock()
ctx_patcher = mock.patch('neutron.context.get_admin_context',
return_value=self.context)
ctx_patcher.start()
self.addCleanup(ctx_patcher.stop)
self.driver.load_balancer.successful_completion = (
self.succ_completion)
self.driver.load_balancer.failed_completion = self.fail_completion
def test_thread_op_goes_active(self):
self.driver.req.get.side_effect = [
{'provisioning_status': 'PENDING_CREATE'},
{'provisioning_status': 'ACTIVE'}
]
driver.thread_op(self.driver.load_balancer, self.lb)
self.succ_completion.assert_called_once_with(self.context, self.lb,
delete=False)
self.assertEqual(0, self.fail_completion.call_count)
def test_thread_op_goes_deleted(self):
self.driver.req.get.side_effect = [
{'provisioning_status': 'PENDING_DELETE'},
{'provisioning_status': 'DELETED'}
]
driver.thread_op(self.driver.load_balancer, self.lb, delete=True)
self.succ_completion.assert_called_once_with(self.context, self.lb,
delete=True)
self.assertEqual(0, self.fail_completion.call_count)
def test_thread_op_goes_error(self):
self.driver.req.get.side_effect = [
{'provisioning_status': 'PENDING_CREATE'},
{'provisioning_status': 'ERROR'}
]
driver.thread_op(self.driver.load_balancer, self.lb)
self.fail_completion.assert_called_once_with(self.context, self.lb)
self.assertEqual(0, self.succ_completion.call_count)
def test_thread_op_a_times_out(self):
cfg.CONF.set_override('request_poll_timeout', 1, group='octavia')
self.driver.req.get.side_effect = [
{'provisioning_status': 'PENDING_CREATE'}
]
driver.thread_op(self.driver.load_balancer, self.lb)
self.fail_completion.assert_called_once_with(self.context, self.lb)
self.assertEqual(0, self.succ_completion.call_count)
def test_thread_op_updates_vip_when_vip_delegated(self):
cfg.CONF.set_override('allocates_vip', True, group='octavia')
expected_vip = '10.1.1.1'
self.driver.req.get.side_effect = [
{'provisioning_status': 'PENDING_CREATE',
'vip': {'ip_address': ''}},
{'provisioning_status': 'ACTIVE',
'vip': {'ip_address': expected_vip}}
]
driver.thread_op(self.driver.load_balancer,
self.lb,
lb_create=True)
self.succ_completion.assert_called_once_with(self.context, self.lb,
delete=False,
lb_create=True)
self.assertEqual(expected_vip, self.lb.vip_address)
| 36.665722 | 79 | 0.588117 | 1,478 | 12,943 | 4.945873 | 0.169824 | 0.035568 | 0.021341 | 0.024077 | 0.36156 | 0.311081 | 0.253762 | 0.207934 | 0.185226 | 0.152394 | 0 | 0.003699 | 0.31067 | 12,943 | 352 | 80 | 36.769886 | 0.815624 | 0.114348 | 0 | 0.220884 | 0 | 0 | 0.103995 | 0.020063 | 0 | 0 | 0 | 0.002841 | 0.056225 | 1 | 0.080321 | false | 0.008032 | 0.024096 | 0 | 0.124498 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
8b2bea0330bb4f6054006045716aef8aee59015e | 1,863 | py | Python | semiring.py | dodgejesse/rational-recurrences | 4d126903399cc4a86734733d037a9bb7c5dda93d | [
"MIT"
] | 7 | 2019-09-09T06:25:20.000Z | 2020-03-21T13:53:43.000Z | semiring.py | dodgejesse/rational-recurrences | 4d126903399cc4a86734733d037a9bb7c5dda93d | [
"MIT"
] | 1 | 2020-12-13T14:26:03.000Z | 2020-12-13T14:26:03.000Z | semiring.py | dodgejesse/rational-recurrences | 4d126903399cc4a86734733d037a9bb7c5dda93d | [
"MIT"
] | 1 | 2019-11-24T12:47:21.000Z | 2019-11-24T12:47:21.000Z | import torch
def identity(x):
return x
def zero(data, *size):
return data.new(*size).zero_()
def one(data, *size):
return data.new(*size).zero_() + 1.
def neg_infinity(data, *size):
return -100 * one(data, *size)
class Semiring:
def __init__(self,
type,
zero,
one,
plus,
times,
conditional_times,
from_float,
to_float,
activation):
self.type = type
self.zero = zero
self.one = one
self.plus = plus
self.times = times
self.conditional_times = conditional_times
self.from_float = from_float
self.to_float = to_float
self.activation = activation
# element-wise plus, times
PlusTimesSemiring = \
Semiring(
0,
zero,
one,
torch.add,
torch.mul,
torch.mul,
identity,
identity,
identity
)
# element-wise max, plus
MaxPlusSemiring = \
Semiring(
1,
neg_infinity,
zero,
torch.max,
torch.add,
lambda x,y: x,
identity,
identity,
torch.sigmoid
)
# element-wise max, times. in log-space
MaxTimesSemiring = \
Semiring(
2,
neg_infinity,
one,
torch.max,
torch.mul,
lambda x,y: x,
identity,
identity,
torch.sigmoid
)
def LogSum(x, y):
return torch.log(torch.exp(x) + torch.exp(y))
# element-wise max, times. in log-space
LogSemiring = \
Semiring(
3,
neg_infinity,
zero,
# lambda x, y: torch.log(torch.exp(x) + torch.exp(y)),
LogSum,
torch.add,
lambda x,y: x,
identity,
identity,
torch.sigmoid
)
| 19.40625 | 62 | 0.500805 | 199 | 1,863 | 4.592965 | 0.236181 | 0.087527 | 0.035011 | 0.02954 | 0.322757 | 0.322757 | 0.322757 | 0.195842 | 0.098468 | 0.098468 | 0 | 0.007188 | 0.402576 | 1,863 | 95 | 63 | 19.610526 | 0.814016 | 0.094471 | 0 | 0.455696 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.075949 | false | 0 | 0.012658 | 0.063291 | 0.164557 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
8b2e122bf3ed314d93abd5a384da45184c0aff9c | 1,470 | py | Python | examples/6_capacitive_touch/code.py | fasteddy516/CircuitPython_GamepadXL | 63626ff04cbe205510620801fe492d73c63e703d | [
"MIT"
] | 1 | 2021-12-23T18:40:18.000Z | 2021-12-23T18:40:18.000Z | examples/6_capacitive_touch/code.py | fasteddy516/CircuitPython_GamepadXL | 63626ff04cbe205510620801fe492d73c63e703d | [
"MIT"
] | 7 | 2021-08-18T16:34:12.000Z | 2021-12-23T08:35:28.000Z | examples/6_capacitive_touch/code.py | fasteddy516/CircuitPython_JoystickXL | 63626ff04cbe205510620801fe492d73c63e703d | [
"MIT"
] | null | null | null | """
JoystickXL Example #6 - Capacitive Touch (8 buttons and 1 hat switch).
This example uses an MPR121 12-Key Capacitive Touch Sensor Breakout
(https://www.adafruit.com/product/1982), and requires the `adafruit_mpr121` and
`adafruit_bus_device` libraries from the CircuitPython Library Bundle.
Tested on an Adafruit Metro M4 Express, but should work on other CircuitPython
boards with a sufficient quantity/type of pins.
* 3V, G, SCL, SDA from CircuitPython board to MPR121 board
* Buttons are on MPR121 inputs 0-7
* Hat Switch is on MPR121 inputs 8-11 (8=UP, 9=DOWN, 10=LEFT, 11=RIGHT)
Don't forget to copy boot.py from the example folder to your CIRCUITPY drive.
"""
import adafruit_mpr121
import board # type: ignore (this is a CircuitPython built-in)
import busio # type: ignore (this is a CircuitPython built-in)
from joystick_xl.inputs import Button, Hat
from joystick_xl.joystick import Joystick
# Set up I2C MPR121 capacitive touch sensor
i2c = busio.I2C(board.SCL, board.SDA)
mpr121 = adafruit_mpr121.MPR121(i2c)
# Set up JoystickXL!
js = Joystick()
# The MPR121 library returns True when a capacitive touch channel is activated. This
# makes it "active high", so we set `active_low` to False
for i in range(8):
js.add_input(Button(mpr121[i], active_low=False))
js.add_input(
Hat(
up=mpr121[8],
down=mpr121[9],
left=mpr121[10],
right=mpr121[11],
active_low=False,
)
)
while True:
js.update()
| 30.625 | 85 | 0.733333 | 233 | 1,470 | 4.575107 | 0.472103 | 0.056285 | 0.0394 | 0.030019 | 0.069418 | 0.069418 | 0.069418 | 0.069418 | 0 | 0 | 0 | 0.06722 | 0.180272 | 1,470 | 47 | 86 | 31.276596 | 0.817427 | 0.654422 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.238095 | 0 | 0.238095 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
8b343ec3681fb404a7c7b9240fc57669b03d1fb5 | 2,216 | py | Python | urbanairship/experiments/variant.py | rodsenra/python-library | bd3fb129ee0eb72265f6d0f2f03fd9e8184dcac0 | [
"Apache-2.0"
] | null | null | null | urbanairship/experiments/variant.py | rodsenra/python-library | bd3fb129ee0eb72265f6d0f2f03fd9e8184dcac0 | [
"Apache-2.0"
] | null | null | null | urbanairship/experiments/variant.py | rodsenra/python-library | bd3fb129ee0eb72265f6d0f2f03fd9e8184dcac0 | [
"Apache-2.0"
] | null | null | null | class Variant(object):
"""The variants for the experiment. An experiment must have at least 1 variant
and no more than 26.
"""
def __init__(self,
push,
description=None,
name=None,
schedule=None,
weight=None
):
"""
:keyword push: [required] A push object without audience and device_types
fields. These two fields are not allowed because they are already defined
in the experiment object
:keyword description: [optional] A description of the variant.
:keyword name: [optional] A name for the variant
unless either message or in_app is present. You can provide an alert and any
platform overrides that apply to the device_type platforms you specify.
:keyword schedule: [optional] The time when the push notification should be sent
:keyword weight: [optional] The proportion of the audience that will receive
this variant. Defaults to 1.
"""
self.push = push
self.description = description
self.name = name
self.schedule = schedule
self.weight = weight
@property
def description(self):
if not self._description:
return None
return self._description
@description.setter
def description(self, value):
if not isinstance(value, str):
TypeError(
'the description must be type string'
)
self._description = value
@property
def name(self):
if not self._name:
return None
return self._name
@name.setter
def name(self, value):
if not isinstance(value, str):
TypeError(
'the name must be a string type'
)
self._name = value
@property
def weight(self):
if not self._weight:
return None
return self._weight
@weight.setter
def weight(self, value):
if not isinstance(value, int):
TypeError(
'the value must be a integer type'
)
self._weight = value
| 29.945946 | 88 | 0.573105 | 249 | 2,216 | 5.036145 | 0.35743 | 0.023923 | 0.021531 | 0.0311 | 0.093301 | 0.093301 | 0.070175 | 0.070175 | 0.070175 | 0 | 0 | 0.002849 | 0.366426 | 2,216 | 73 | 89 | 30.356164 | 0.890313 | 0.332581 | 0 | 0.22449 | 0 | 0 | 0.070855 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.142857 | false | 0 | 0 | 0 | 0.285714 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
8b35915e56f7da506994b2940d5b12de39f029a2 | 11,278 | py | Python | creator/utils.py | MeTeoRise/chatbot_automation | 6dfbbdbf8b71219b35052c6549ff32347a6248be | [
"MIT"
] | 3 | 2022-03-04T10:18:29.000Z | 2022-03-23T20:16:01.000Z | creator/utils.py | MeTeoRise/chatbot_automation | 6dfbbdbf8b71219b35052c6549ff32347a6248be | [
"MIT"
] | null | null | null | creator/utils.py | MeTeoRise/chatbot_automation | 6dfbbdbf8b71219b35052c6549ff32347a6248be | [
"MIT"
] | null | null | null | import os
import io
import socket
import yaml
from rasa.shared.nlu.training_data.loading import load_data
class MyDumper(yaml.SafeDumper):
def write_line_break(self, data=None):
super().write_line_break(data)
if len(self.indents) == 1:
super().write_line_break()
def chatbot_create(name):
cmd = "cd chatbots && mkdir \"{0}\" && cd \"{0}\" && rasa init --no-prompt&".format(name)
os.system(cmd)
def chatbot_delete(name):
cmd = "cd chatbots && rm -r \"{0}\"".format(name)
os.system(cmd)
def chatbot_train(chatbot, intents, examples, responses, utterances, stories, steps, rules, actions, forms, slots):
write_intents(chatbot, intents, examples)
write_responses(chatbot, responses, utterances)
write_stories(chatbot, stories, steps)
write_rules(chatbot, rules, forms, slots)
write_actions(chatbot, actions)
write_policies(chatbot)
write_domain(chatbot, intents, responses, utterances, actions, forms, slots)
clear_models(chatbot)
cmd = "cd chatbots && cd \"{0}\" && rasa train&".format(chatbot.name)
os.system(cmd)
def chatbot_start(chatbot, actions):
cmd = "cd chatbots && cd \"{0}\" && rasa run -m models --enable-api --cors \"*\" --debug&".format(chatbot.name)
os.system(cmd)
if len(list(actions)) != 0:
cmd = "cd chatbots && cd \"{0}\" && rasa run actions --cors \"*\" --debug&".format(chatbot.name)
os.system(cmd)
def chatbot_stop():
if check_chatbot() == 0:
cmd = "kill $(lsof -t -i:5005)"
os.system(cmd)
if check_chatbot_actions() == 0:
cmd = "kill $(lsof -t -i:5055)"
os.system(cmd)
def check_chatbot():
a_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
location = ("127.0.0.1", 5005)
result_of_check = a_socket.connect_ex(location)
a_socket.close()
return result_of_check
def check_chatbot_actions():
a_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
location = ("127.0.0.1", 5055)
result_of_check = a_socket.connect_ex(location)
a_socket.close()
return result_of_check
def write_intents(chatbot, intents, examples):
path = "chatbots/" + chatbot.name + "/data/nlu.yml"
intents_list = []
examples_list = []
intents = list(intents)
examples = list(examples)
for i in intents:
intents_list.append(list(i))
for i in examples:
examples_list.append(list(i))
intents = intents_list[:]
examples = examples_list[:]
merged = {}
for i in intents:
merged[i[1]] = []
for j in examples:
if i[0] == j[0]:
merged[i[1]].append(j[1])
diction = dict()
diction['version'] = '2.0'
diction['nlu'] = []
for i in range(len(intents)):
diction['nlu'].append({"intent": intents[i][1]})
diction['nlu'][i]['examples'] = yaml.dump(merged[intents[i][1]])
f = open(path, 'w+')
yaml.dump(diction, f, Dumper=MyDumper, sort_keys=False)
def write_stories(chatbot, stories, steps):
path = "chatbots/" + chatbot.name + "/data/stories.yml"
stories_list = []
steps_list = []
stories = list(stories)
steps = list(steps)
for i in stories:
stories_list.append(list(i))
for i in steps:
steps_list.append(list(i))
stories = stories_list[:]
steps = steps_list[:]
merged = {}
for i in stories:
merged[i[1]] = []
for j in steps:
if i[0] == j[5]:
if j[0] is not None:
merged[i[1]].append({'intent': j[0]})
if j[1] is not None:
merged[i[1]].append({'action': j[1]})
if j[2] is not None:
merged[i[1]].append({'action': j[2]})
if j[3] is not None:
merged[i[1]].append({'action': j[3]})
merged[i[1]].append({'active_loop': j[3]})
diction = dict()
diction['version'] = '2.0'
diction['stories'] = []
for i in range(len(stories)):
diction['stories'].append({"story": stories[i][1]})
diction['stories'][i]['steps'] = merged[stories[i][1]]
f = open(path, 'w+')
yaml.dump(diction, f, Dumper=MyDumper, sort_keys=False)
def write_responses(chatbot, responses, utterances):
path = "chatbots/" + chatbot.name + "/data/responses.yml"
responses_list = []
utterances_list = []
responses = list(responses)
utterances = list(utterances)
for i in responses:
responses_list.append(list(i))
for i in utterances:
utterances_list.append(list(i))
responses = responses_list[:]
utterances = utterances_list[:]
merged = {}
for i in responses:
merged[i[1]] = []
for idx, j in enumerate(utterances):
if i[0] == j[0]:
merged[i[1]].append({'text': j[1]})
if j[2] is not None:
merged[i[1]][idx-1]['image'] = j[2]
diction = dict()
diction['responses'] = dict()
for i in range(len(responses)):
diction['responses'][responses[i][1]] = merged[responses[i][1]]
f = open(path, 'w+')
yaml.dump(diction, f, Dumper=MyDumper, sort_keys=False)
def write_rules(chatbot, rules, forms, slots):
path = "chatbots/" + chatbot.name + "/data/rules.yml"
rules_list = []
rules = list(rules)
for i in rules:
rules_list.append(list(i))
rules = rules_list[:]
diction = dict()
diction['version'] = '2.0'
diction['rules'] = []
for i in range(len(rules)):
diction['rules'].append({"rule": rules[i][0]})
diction['rules'][i]['steps'] = []
diction['rules'][i]['steps'].append({"intent": rules[i][1]})
diction['rules'][i]['steps'].append({"action": rules[i][2]})
for i in range(len(forms)):
diction['rules'].append({"rule": "Activate form"})
diction['rules'][2*i+len(rules)]['steps'] = []
diction['rules'][2*i+len(rules)]['steps'].append({"intent": forms[i][2]})
diction['rules'][2*i+len(rules)]['steps'].append({"action": forms[i][1]})
diction['rules'][2*i+len(rules)]['steps'].append({"active_loop": forms[i][1]})
diction['rules'].append({"rule": "Submit form"})
diction['rules'][2*i+1+len(rules)]['condition'] = []
diction['rules'][2*i+1+len(rules)]['condition'].append({"active_loop": forms[i][1]})
diction['rules'][2*i+1+len(rules)]['steps'] = []
diction['rules'][2*i+1+len(rules)]['steps'].append({"action": forms[i][1]})
diction['rules'][2*i+1+len(rules)]['steps'].append({"active_loop": None})
diction['rules'][2*i+1+len(rules)]['steps'].append({"action": "utter_submit"})
f = open(path, 'w+')
yaml.dump(diction, f, Dumper=MyDumper, sort_keys=False)
def write_actions(chatbot, actions):
path = "chatbots/" + chatbot.name + "/actions/actions.py"
actions_file = open(path, "w")
actions = list(actions)
actions_file.write("from typing import Any, Text, Dict, List\n\n")
actions_file.write("from rasa_sdk import Action, Tracker\nfrom rasa_sdk.executor import CollectingDispatcher\n\n")
for action in actions:
actions_file.write("\nclass Action{0}(Action):\n\n".format(action[0]))
actions_file.write(" def name(self) -> Text:\n")
actions_file.write(' return "action_{0}"\n\n'.format(action[0].lower()))
actions_file.write(" def run(self, dispatcher: CollectingDispatcher,\n")
actions_file.write(" tracker: Tracker,\n")
actions_file.write(" domain: Dict[Text, Any]) -> List[Dict[Text, Any]]:\n\n")
for line in action[1].split("\n"):
actions_file.write(" {0}\n\n".format(line))
actions_file.write(" return []\n\n")
def write_policies(obj):
path = "chatbots/" + obj.name + "/config.yml"
source = "chatbots/default/config.yml"
cmd = "cp -fr {0} \"{1}\"&".format(source, path)
os.system(cmd)
def write_domain(chatbot, intents, responses, utterances, actions, forms, slots):
path = "chatbots/" + chatbot.name + "/domain.yml"
nlu_path = "chatbots/" + chatbot.name + "/data/nlu.yml"
data = load_data(nlu_path)
entities = list(data.entities)
intents = list(intents)
responses = list(responses)
actions = list(actions)
forms = list(forms)
slots = list(slots)
intents_list = []
responses_list = []
utterances_list = []
forms_list = []
slots_list = []
for i in intents:
intents_list.append(list(i))
for i in responses:
responses_list.append(list(i))
for i in utterances:
utterances_list.append(list(i))
for i in forms:
forms_list.append(list(i))
for i in slots:
slots_list.append(list(i))
utterances = utterances_list[:]
intents = intents_list[:]
responses = responses_list[:]
forms = forms_list[:]
slots = slots_list[:]
merged = {}
for i in responses:
merged[i[1]] = []
for idx, j in enumerate(utterances):
if i[0] == j[0]:
merged[i[1]].append({'text': j[1]})
if j[2] is not None:
merged[i[1]][idx-1]['image'] = j[2]
merged_forms = {}
for i in forms:
merged_forms[i[1]] = dict()
for idx, j in enumerate(slots):
if i[0] == j[4]:
merged_forms[i[1]][j[0]] = []
merged_forms[i[1]][j[0]].append({'type': 'from_text'})
diction = dict()
diction['version'] = '2.0'
diction['intents'] = []
for i in range(len(intents)):
diction['intents'].append(intents[i][1])
diction['entities'] = []
for i in range(len(entities)):
diction['entities'].append(entities[i])
diction['slots'] = dict()
for i in range(len(slots)):
slot_dict = dict()
slot_dict['type'] = slots[i][1]
slot_dict['influence_conversation'] = slots[i][2]
diction['slots'][slots[i][0]] = slot_dict
diction['responses'] = dict()
for i in range(len(responses)):
diction['responses'][responses[i][1]] = merged[responses[i][1]]
diction['actions'] = []
for i in range(len(actions)):
diction['actions'].append("{0}".format(actions[i][0].lower()))
diction['forms'] = dict()
for i in range(len(forms)):
diction['forms'][forms[i][1]] = merged_forms[forms[i][1]]
diction['session_config'] = dict()
diction['session_config']['session_expiration_time'] = 60
diction['session_config']['carry_over_slots_to_new_session'] = True
f = open(path, 'w+')
yaml.dump(diction, f, Dumper=MyDumper, sort_keys=False)
def clear_models(chatbot):
cmd = "cd chatbots && cd \"{0}\" && cd models && rm *".format(chatbot.name)
os.system(cmd)
# TODO Pass intents to signals
def read_default_intents(chatbot):
default_intents = "chatbots/default/data/nlu.yml"
data = load_data(default_intents)
intents = data.intents
return intents
# TODO Read default stories and pass them to signals
def read_default_stories():
default_intents = "chatbots/default/data/stories.yml"
data = load_data(default_intents)
intents = data.intents
return intents
| 30.646739 | 118 | 0.589378 | 1,489 | 11,278 | 4.364003 | 0.112156 | 0.012311 | 0.025854 | 0.027701 | 0.538012 | 0.452139 | 0.409972 | 0.330563 | 0.274392 | 0.240536 | 0 | 0.016733 | 0.236921 | 11,278 | 367 | 119 | 30.730245 | 0.738322 | 0.007005 | 0 | 0.396364 | 0 | 0 | 0.153805 | 0.0209 | 0 | 0 | 0 | 0.002725 | 0 | 1 | 0.065455 | false | 0 | 0.025455 | 0 | 0.109091 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
8b38978d010f03a17b04862154c277cb73cd65d9 | 2,332 | py | Python | formlib/templates/__dmpcache__/form.htm.py | brandenclark/413-Final | d606f825bd1a9cf703e4907fc7b704f7df8d205b | [
"MIT"
] | null | null | null | formlib/templates/__dmpcache__/form.htm.py | brandenclark/413-Final | d606f825bd1a9cf703e4907fc7b704f7df8d205b | [
"MIT"
] | null | null | null | formlib/templates/__dmpcache__/form.htm.py | brandenclark/413-Final | d606f825bd1a9cf703e4907fc7b704f7df8d205b | [
"MIT"
] | null | null | null | # -*- coding:utf-8 -*-
from mako import runtime, filters, cache
UNDEFINED = runtime.UNDEFINED
STOP_RENDERING = runtime.STOP_RENDERING
__M_dict_builtin = dict
__M_locals_builtin = locals
_magic_number = 10
_modified_time = 1524253334.9406211
_enable_loop = True
_template_filename = '/Users/brand/Desktop/finalexam/formlib/templates/form.htm'
_template_uri = 'form.htm'
_source_encoding = 'utf-8'
import django_mako_plus
_exports = []
def render_body(context,**pageargs):
__M_caller = context.caller_stack._push_frame()
try:
__M_locals = __M_dict_builtin(pageargs=pageargs)
self = context.get('self', UNDEFINED)
csrf_input = context.get('csrf_input', UNDEFINED)
extra = context.get('extra', UNDEFINED)
form = context.get('form', UNDEFINED)
__M_writer = context.writer()
__M_writer(str( django_mako_plus.links(self) ))
__M_writer('\n\n<div class="form-container">\n <form id="')
__M_writer(str( form.form_id ))
__M_writer('" action="')
__M_writer(str( form.form_action or '' ))
__M_writer('" method="')
__M_writer(str( form.form_method ))
__M_writer('">\n\n')
__M_writer(' ')
__M_writer(str( csrf_input ))
__M_writer('\n\n')
__M_writer(' ')
__M_writer(str( form.as_p() ))
__M_writer('\n\n')
if extra:
__M_writer(' ')
__M_writer(str( extra ))
__M_writer('\n')
__M_writer('\n')
if form.submit_text is not None:
__M_writer(' <p class="text-center"><button type="submit" class="btn btn-primary">')
__M_writer(filters.html_escape(str( form.submit_text )))
__M_writer('</button></p>\n')
__M_writer('\n </form>\n</div>\n')
return ''
finally:
context.caller_stack._pop_frame()
"""
__M_BEGIN_METADATA
{"filename": "/Users/brand/Desktop/finalexam/formlib/templates/form.htm", "uri": "form.htm", "source_encoding": "utf-8", "line_map": {"17": 0, "26": 3, "27": 3, "28": 6, "29": 6, "30": 6, "31": 6, "32": 6, "33": 6, "34": 9, "35": 9, "36": 9, "37": 12, "38": 12, "39": 12, "40": 15, "41": 16, "42": 16, "43": 16, "44": 18, "45": 20, "46": 21, "47": 21, "48": 21, "49": 23, "55": 49}}
__M_END_METADATA
"""
| 38.866667 | 382 | 0.59434 | 313 | 2,332 | 4.003195 | 0.408946 | 0.128492 | 0.055866 | 0.051077 | 0.233839 | 0.177175 | 0.177175 | 0.132482 | 0.132482 | 0 | 0 | 0.063581 | 0.231132 | 2,332 | 59 | 383 | 39.525424 | 0.635248 | 0.008576 | 0 | 0.142857 | 0 | 0 | 0.173036 | 0.057856 | 0 | 0 | 0 | 0 | 0 | 1 | 0.020408 | false | 0 | 0.040816 | 0 | 0.081633 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
8b38a1b4b8772554293e9955f47a3488fe3192be | 580 | py | Python | app.py | nurhasanhilmi/GOA-SVM-for-GPT-Classification | ccc2356957654477930edf33b384a199ac8b707c | [
"MIT"
] | null | null | null | app.py | nurhasanhilmi/GOA-SVM-for-GPT-Classification | ccc2356957654477930edf33b384a199ac8b707c | [
"MIT"
] | 1 | 2021-08-19T07:42:40.000Z | 2021-08-22T08:14:55.000Z | app.py | nurhasanhilmi/GOA-SVM-for-GPT-Classification | ccc2356957654477930edf33b384a199ac8b707c | [
"MIT"
] | null | null | null | import streamlit as st
from multiapp import MultiApp
from apps import app_unoptimized_svm ,app_goa_svm, app_grid_search_svm, app_dataset, app_saved_model, app_gpt_classification # import app modules here
app = MultiApp()
# Add all application here
app.add_app("GOA-SVM", app_goa_svm.app)
app.add_app("Grid Search-SVM", app_grid_search_svm.app)
app.add_app("Unoptimized SVM", app_unoptimized_svm.app)
app.add_app("Saved Models", app_saved_model.app)
app.add_app("Dataset", app_dataset.app)
app.add_app("GPT Classification", app_gpt_classification.app)
# The main app
app.run()
| 32.222222 | 150 | 0.805172 | 98 | 580 | 4.459184 | 0.27551 | 0.12357 | 0.12357 | 0.1373 | 0.254005 | 0.100687 | 0 | 0 | 0 | 0 | 0 | 0 | 0.094828 | 580 | 17 | 151 | 34.117647 | 0.832381 | 0.105172 | 0 | 0 | 0 | 0 | 0.143689 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.272727 | 0 | 0.272727 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
8b39056fdec027e94bea7ccb868554665f22f2e9 | 16,695 | py | Python | apps/invoicing/forms.py | karpiq24/django-klima-kar | e62e79c66053749e249f55e1ab47f810f449f0aa | [
"MIT"
] | 2 | 2018-01-23T22:38:57.000Z | 2019-07-14T08:59:19.000Z | apps/invoicing/forms.py | karpiq24/django-klima-kar | e62e79c66053749e249f55e1ab47f810f449f0aa | [
"MIT"
] | 237 | 2018-08-15T23:13:52.000Z | 2022-01-13T13:08:50.000Z | apps/invoicing/forms.py | karpiq24/django-klima-kar | e62e79c66053749e249f55e1ab47f810f449f0aa | [
"MIT"
] | null | null | null | from dal import autocomplete
from extra_views import InlineFormSetFactory
from django import forms
from django.urls import reverse
from django.forms.models import model_to_dict
from django.db.models import Q
from KlimaKar.widgets import PrettySelect
from KlimaKar.forms import ToggleInput
from apps.invoicing.models import (
Contractor,
SaleInvoice,
SaleInvoiceItem,
ServiceTemplate,
RefrigerantWeights,
CorrectiveSaleInvoice,
)
from apps.warehouse.models import Ware
from apps.commission.models import CommissionItem, CommissionFile
class EnableDisableDateInput(forms.DateInput):
template_name = "invoicing/sale_invoice/date_field.html"
class SaleInvoiceModelForm(forms.ModelForm):
contractor = forms.ModelChoiceField(
label="Kontrahent",
queryset=Contractor.objects.all(),
widget=autocomplete.ModelSelect2(
url="invoicing:contractor_autocomplete_create"
),
)
generate_pdf = forms.BooleanField(
label="Wydruk po zapisie", widget=forms.HiddenInput(), required=False
)
contractor_modified = forms.BooleanField(widget=forms.HiddenInput(), required=False)
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.fields["number"].widget.attrs.update(
{"placeholder": "Podaj numer faktury"}
)
self.fields["contractor"].widget.attrs.update(
{"data-placeholder": "Podaj nazwę, NIP albo numer telefonu"}
)
self.fields["issue_date"].widget.attrs.update({"placeholder": "Wybierz datę"})
self.fields["issue_date"].widget.attrs.update({"class": "date-input"})
self.fields["completion_date"].widget.attrs.update(
{"placeholder": "Wybierz datę"}
)
self.fields["completion_date"].widget.attrs.update({"class": "date-input"})
self.fields["payment_date"].widget.attrs.update({"placeholder": "Wybierz datę"})
self.fields["payment_date"].widget.attrs.update({"class": "date-input"})
self.fields["payment_type_other"].widget.attrs.update(
{"placeholder": "Podaj formę płatności"}
)
self.fields["calculation"].disabled = True # TODO: Brutto
contractor = self.initial.get("contractor")
if contractor:
self.fields["contractor"].initial = contractor
def clean(self):
cleaned_data = super().clean()
number = cleaned_data["number"]
if self.instance and self.instance.number == number:
return cleaned_data
else:
invoice_type = cleaned_data["invoice_type"]
invoices = SaleInvoice.objects.filter(invoice_type=invoice_type)
if invoice_type == SaleInvoice.TYPE_VAT:
invoices = (
invoices
| SaleInvoice.objects.filter(invoice_type=SaleInvoice.TYPE_WDT)
).distinct()
elif invoice_type == SaleInvoice.TYPE_WDT:
invoices = (
invoices
| SaleInvoice.objects.filter(invoice_type=SaleInvoice.TYPE_VAT)
).distinct()
elif invoice_type == SaleInvoice.TYPE_PRO_FORMA:
invoices = (
invoices
| SaleInvoice.objects.filter(
invoice_type=SaleInvoice.TYPE_WDT_PRO_FORMA
)
).distinct()
elif invoice_type == SaleInvoice.TYPE_WDT_PRO_FORMA:
invoices = (
invoices
| SaleInvoice.objects.filter(
invoice_type=SaleInvoice.TYPE_PRO_FORMA
)
).distinct()
if invoices.filter(number=number).exists():
self.add_error("number", "Faktura o tym numerze już istnieje.")
return cleaned_data
class Meta:
model = SaleInvoice
fields = [
"issue_date",
"completion_date",
"invoice_type",
"number",
"contractor",
"payment_type",
"payment_date",
"payment_type_other",
"comment",
"tax_percent",
"calculation",
]
widgets = {
"comment": forms.Textarea(attrs={"rows": 2}),
"payment_date": EnableDisableDateInput(),
"invoice_type": forms.HiddenInput(),
"tax_percent": forms.HiddenInput(),
"payment_type": PrettySelect(),
"calculation": PrettySelect(),
}
class CorrectiveSaleInvoiceModelForm(SaleInvoiceModelForm):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.fields["contractor"].disabled = True
self.fields["completion_date"].disabled = True
self.fields["calculation"].disabled = True
class Meta:
model = CorrectiveSaleInvoice
fields = [
"issue_date",
"completion_date",
"invoice_type",
"number",
"contractor",
"payment_type",
"payment_date",
"payment_type_other",
"comment",
"tax_percent",
"original_invoice",
"reason",
"calculation",
]
widgets = {
"comment": forms.Textarea(attrs={"rows": 2}),
"reason": forms.Textarea(attrs={"rows": 2}),
"payment_date": EnableDisableDateInput(),
"invoice_type": forms.HiddenInput(),
"original_invoice": forms.HiddenInput(),
"tax_percent": forms.HiddenInput(),
"payment_type": PrettySelect(),
"calculation": PrettySelect(),
}
class NipInput(forms.TextInput):
template_name = "invoicing/contractor/nip_field.html"
class Media:
js = ("js/invoicing/contractor-gus.js",)
def __init__(self, *args, **kwargs):
self.prefix = kwargs.pop("prefix")
super().__init__(*args, **kwargs)
def get_context(self, name, value, attrs, *args, **kwargs):
context = super().get_context(name, value, attrs)
context["url"] = reverse("invoicing:contractor_gus")
context["prefix"] = self.prefix
return context
class ContractorModelForm(forms.ModelForm):
ignore_duplicated_phone = forms.CharField(
required=False, widget=forms.HiddenInput()
)
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
if self.instance and self.instance.is_locked:
self.fields["nip"].disabled = True
self.fields[
"nip"
].help_text = "Ten kontrahent ma przypisaną fakturę, zamiast edytować numer NIP, dodaj nowego kontrahenta."
self.fields["name"].widget.attrs.update({"placeholder": "Podaj nazwę"})
if self.instance and self.instance.nip_prefix:
nip_prefix = self.instance.nip_prefix
else:
nip_prefix = ""
self.fields["nip"].widget = NipInput(prefix=nip_prefix)
self.fields["nip"].widget.attrs.update({"placeholder": "Podaj NIP"})
self.fields["address_1"].widget.attrs.update({"placeholder": "Podaj adres"})
self.fields["address_2"].widget.attrs.update({"placeholder": "Podaj adres"})
self.fields["city"].widget.attrs.update({"placeholder": "Podaj miasto"})
self.fields["postal_code"].widget.attrs.update(
{"placeholder": "Podaj kod pocztowy"}
)
self.fields["email"].widget.attrs.update({"placeholder": "Podaj adres e-mail"})
self.fields["phone_1"].widget.attrs.update(
{"placeholder": "Podaj numer telefonu"}
)
self.fields["phone_2"].widget.attrs.update(
{"placeholder": "Podaj numer telefonu"}
)
self.fields["bdo_number"].widget.attrs.update(
{"placeholder": "Podaj numer BDO"}
)
class Meta:
model = Contractor
fields = [
"nip_prefix",
"nip",
"name",
"city",
"postal_code",
"address_1",
"address_2",
"email",
"phone_1",
"phone_2",
"bdo_number",
"ignore_duplicated_phone",
]
widgets = {"nip_prefix": forms.HiddenInput()}
class Media:
js = ("js/invoicing/contractor-form.js",)
def clean_nip(self):
nip = self.cleaned_data["nip"]
if self.instance and self.instance.is_locked:
return self.instance.nip
return nip
def clean_nip_prefix(self):
nip_prefix = self.cleaned_data["nip_prefix"]
if self.instance and self.instance.is_locked:
return self.instance.nip_prefix
return nip_prefix
def clean_phone_1(self):
data = self.cleaned_data["phone_1"]
if data:
data = data.replace(" ", "")
if not bool(self.data.get("ignore_duplicated_phone", "False")):
self._check_duplicate_phones(data)
return data
def clean_phone_2(self):
data = self.cleaned_data["phone_2"]
if data:
data = data.replace(" ", "")
if not bool(self.data.get("ignore_duplicated_phone", "False")):
self._check_duplicate_phones(data)
return data
def _check_duplicate_phones(self, number):
queryset = Contractor.objects.filter(Q(phone_1=number) | Q(phone_2=number))
if self.instance and self.instance.pk:
queryset = queryset.exclude(pk=self.instance.pk)
if queryset.exists():
raise forms.ValidationError(
"Podany numer jest już przypisany do innego kontrahenta.",
code="duplicated_phone",
params=[queryset.first().as_json()],
)
class SaleInvoiceItemModelForm(forms.ModelForm):
ware = forms.ModelChoiceField(
label="Towar",
queryset=Ware.objects.all(),
required=False,
widget=autocomplete.ModelSelect2(url="warehouse:ware_autocomplete"),
)
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.fields["name"].widget.attrs.update({"placeholder": "Podaj nazwę"})
self.fields["name"].widget.attrs.update({"class": "item-name"})
self.fields["description"].widget.attrs.update({"placeholder": "Podaj opis"})
self.fields["description"].widget.attrs.update({"class": "item-description"})
self.fields["quantity"].widget.attrs.update({"placeholder": "Ilość"})
self.fields["quantity"].widget.attrs.update({"class": "item-quantity"})
self.fields["price_netto"].widget.attrs.update({"placeholder": "Netto"})
self.fields["price_netto"].widget.attrs.update({"class": "item-netto"})
self.fields["price_brutto"].widget.attrs.update({"placeholder": "Brutto"})
self.fields["price_brutto"].widget.attrs.update({"class": "item-brutto"})
self.fields["ware"].widget.attrs.update({"data-placeholder": "Wybierz towar"})
self.fields["ware"].widget.attrs.update({"class": "item-ware"})
class Meta:
model = SaleInvoiceItem
fields = [
"name",
"description",
"quantity",
"price_netto",
"price_brutto",
"ware",
]
localized_fields = ["price_netto", "price_brutto", "quantity"]
class SaleInvoiceItemsInline(InlineFormSetFactory):
model = SaleInvoiceItem
form_class = SaleInvoiceItemModelForm
factory_kwargs = {"extra": 20}
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.commission = self.kwargs.get("commission", None)
self.value_type = self.kwargs.get("value_type", None)
self.original_invoice = self.kwargs.get("original_invoice", None)
def get_initial(self):
if not self.object and self.original_invoice:
items = SaleInvoiceItem.objects.filter(sale_invoice=self.original_invoice)
initial = [model_to_dict(item) for item in items]
return initial
if not self.object and self.commission:
items = CommissionItem.objects.filter(commission=self.commission)
initial = [self._commission_item_to_dict(item) for item in items]
return initial
return self.initial[:]
def _commission_item_to_dict(self, item):
d = model_to_dict(item, exclude=["id", "commission"])
if self.value_type == "NETTO":
d["price_netto"] = d.pop("price")
else:
d["price_brutto"] = d.pop("price")
return d
class AlwaysChangedModelForm(forms.ModelForm):
"""
Force saving RefrigerantWeightsInline formset with default values
"""
def has_changed(self):
return True
class RefrigerantWeightsInline(InlineFormSetFactory):
model = RefrigerantWeights
factory_kwargs = {"max_num": 1, "min_num": 1, "extra": 0, "can_delete": False}
form_class = AlwaysChangedModelForm
fields = "__all__"
class ServiceTemplateModelForm(forms.ModelForm):
ware = forms.ModelChoiceField(
label="Towar",
queryset=Ware.objects.all(),
required=False,
widget=autocomplete.ModelSelect2(url="warehouse:ware_autocomplete"),
)
services = forms.ModelMultipleChoiceField(
label="Usługi",
queryset=ServiceTemplate.objects.all(),
required=False,
widget=autocomplete.ModelSelect2Multiple(
url="invoicing:service_template_autocomplete"
),
)
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.fields["name"].widget.attrs.update({"placeholder": "Podaj nazwę"})
self.fields["ware_filter"].widget.attrs.update(
{"placeholder": "Podaj nazwę towaru"}
)
self.fields["button_name"].widget.attrs.update(
{"placeholder": "Podaj nazwę przycisku"}
)
self.fields["name"].widget.attrs.update({"class": "item-name"})
self.fields["description"].widget.attrs.update({"placeholder": "Podaj opis"})
self.fields["description"].widget.attrs.update({"class": "item-description"})
self.fields["quantity"].widget.attrs.update({"placeholder": "Podaj ilość"})
self.fields["quantity"].widget.attrs.update({"class": "item-quantity"})
self.fields["price_netto"].widget.attrs.update(
{"placeholder": "Podaj cenę netto"}
)
self.fields["price_netto"].widget.attrs.update({"class": "item-netto"})
self.fields["price_brutto"].widget.attrs.update(
{"placeholder": "Podaj cenę brutto"}
)
self.fields["price_brutto"].widget.attrs.update({"class": "item-brutto"})
self.fields["ware"].widget.attrs.update({"data-placeholder": "Wybierz towar"})
self.fields["ware"].widget.attrs.update({"class": "item-ware"})
self.fields["services"].widget.attrs.update(
{"data-placeholder": "Wybierz usługi"}
)
class Meta:
model = ServiceTemplate
fields = [
"name",
"description",
"quantity",
"price_netto",
"price_brutto",
"ware",
"button_color",
"display_as_button",
"button_name",
"is_ware_service",
"ware_filter",
"is_group",
"services",
]
widgets = {
"display_as_button": ToggleInput,
"is_ware_service": ToggleInput,
"is_group": ToggleInput,
}
localized_fields = ["price_netto", "price_brutto", "quantity"]
class EmailForm(forms.Form):
recipient = forms.EmailField(label="Do")
subject = forms.CharField(label="Temat")
message = forms.CharField(widget=forms.Textarea, label="Treść")
sale_invoice = forms.ModelChoiceField(
queryset=SaleInvoice.objects.none(), widget=forms.HiddenInput()
)
files = forms.ModelMultipleChoiceField(
queryset=CommissionFile.objects.none(), label="Pliki", required=False
)
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.fields["sale_invoice"].queryset = SaleInvoice.objects.filter(
pk=self.initial["sale_invoice"].pk
)
self.fields["files"].queryset = CommissionFile.objects.filter(
commission__sale_invoices=self.initial["sale_invoice"]
)
if not self.fields["files"].queryset.exists():
self.fields["files"].widget = forms.HiddenInput()
| 36.935841 | 119 | 0.600719 | 1,643 | 16,695 | 5.930006 | 0.154595 | 0.05953 | 0.080263 | 0.077594 | 0.537001 | 0.502207 | 0.44904 | 0.414451 | 0.38325 | 0.315919 | 0 | 0.002131 | 0.269063 | 16,695 | 451 | 120 | 37.017738 | 0.79628 | 0.004732 | 0 | 0.383838 | 0 | 0 | 0.192952 | 0.021687 | 0 | 0 | 0 | 0.002217 | 0 | 1 | 0.045455 | false | 0 | 0.027778 | 0.002525 | 0.207071 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
8b397e52975595e4604862b4354dceb45600a27d | 1,731 | bzl | Python | tools/javacc.bzl | sgammon/closure-stylesheets | 7a107fda2336060a6bb02227ff7b9ef525f74ece | [
"Apache-2.0"
] | 1 | 2019-06-15T04:55:55.000Z | 2019-06-15T04:55:55.000Z | tools/javacc.bzl | Bloombox/closure-stylesheets | 716aed6cde8772d8f119e813c1b48fb3a13d974c | [
"Apache-2.0"
] | null | null | null | tools/javacc.bzl | Bloombox/closure-stylesheets | 716aed6cde8772d8f119e813c1b48fb3a13d974c | [
"Apache-2.0"
] | null | null | null | ## Copyright 2018 The Closure Stylesheets Authors.
##
## Licensed under the Apache License, Version 2.0 (the "License");
## you may not use this file except in compliance with the License.
## You may obtain a copy of the License at
##
## http://www.apache.org/licenses/LICENSE-2.0
##
## Unless required by applicable law or agreed to in writing, software
## distributed under the License is distributed on an "AS IS" BASIS,
## WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
## See the License for the specific language governing permissions and
## limitations under the License.
def _declare_output_files(ctx, files):
if len(files) < 1:
fail("files must not be empty.")
sources = []
for file in files:
sources.append(ctx.actions.declare_file(file))
return struct(
files = sources,
path = '/'.join(sources[0].path.split('/')[:-1]),
)
def _javacc_impl(ctx):
outputs = _declare_output_files(ctx, ctx.attr.outs)
args = [
'-OUTPUT_DIRECTORY=%s' % outputs.path,
ctx.file.src.path,
]
ctx.actions.run(
inputs = [ctx.file.src],
outputs = outputs.files,
arguments = args,
executable = ctx.executable._compiler,
)
return struct(
files = depset(outputs.files),
)
javacc = rule(
implementation = _javacc_impl,
output_to_genfiles = True,
attrs={
"src": attr.label(
mandatory = True,
allow_files = [".jj"],
single_file = True,
),
"outs": attr.string_list(
mandatory = True,
),
"_compiler": attr.label(
default = Label("@javacc//:javacc"),
executable = True,
cfg = "host",
),
},
)
| 27.046875 | 75 | 0.622761 | 213 | 1,731 | 4.971831 | 0.535211 | 0.056657 | 0.024551 | 0.030217 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.008534 | 0.255344 | 1,731 | 63 | 76 | 27.47619 | 0.813033 | 0.328712 | 0 | 0.159091 | 0 | 0 | 0.07489 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.045455 | false | 0 | 0 | 0 | 0.090909 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
8b39e6d8d610f45267b9579e7529ef1e138da10a | 8,924 | py | Python | tests/test_transitions_anyio.py | MicahLyle/transitions-anyio | b581dfebdfd8641adec285db6b4f7e34287eb309 | [
"MIT"
] | 4 | 2021-01-07T16:33:53.000Z | 2021-09-19T20:14:26.000Z | tests/test_transitions_anyio.py | MicahLyle/transitions-anyio | b581dfebdfd8641adec285db6b4f7e34287eb309 | [
"MIT"
] | 4 | 2020-10-12T17:01:53.000Z | 2021-06-24T10:22:16.000Z | tests/test_transitions_anyio.py | thedrow/transitions-anyio | ff97c7e7a0298bb3ed8cd05ec56f8b13a5ddfdac | [
"MIT"
] | 1 | 2021-06-20T21:10:10.000Z | 2021-06-20T21:10:10.000Z | from unittest.mock import MagicMock
import anyio
import pytest
from transitions import MachineError
from transitions_anyio import HierarchicalAnyIOMachine
pytestmark = pytest.mark.anyio
async def await_true():
await anyio.sleep(0.1)
return True
async def await_false():
await anyio.sleep(0.1)
return False
def synced_true():
return True
async def cancel_soon():
await anyio.sleep(1)
raise TimeoutError("Callback was not cancelled!")
async def call_delayed(func, time):
await anyio.sleep(time)
await func()
class DummyModel(object):
pass
async def test_async_machine_cb(m):
mock = MagicMock()
async def async_process():
await anyio.sleep(0.1)
mock()
m.after_state_change = async_process
await m.go()
assert m.state == 'B'
mock.assert_called_once_with()
async def test_async_condition(m):
m.add_transition('proceed', 'A', 'C', conditions=await_true, unless=await_false)
await m.proceed()
assert m.state == 'C'
async def test_async_enter_exit(m):
enter_mock = MagicMock()
exit_mock = MagicMock()
async def async_enter():
await anyio.sleep(0.1)
enter_mock()
async def async_exit():
await anyio.sleep(0.1)
exit_mock()
m.on_exit_A(async_exit)
m.on_enter_B(async_enter)
await m.go()
enter_mock.assert_called_once_with()
exit_mock.assert_called_once_with()
async def test_async_conditions(m):
mock = MagicMock()
m.add_transition('proceed', 'A', 'C', conditions=synced_true, after=mock)
await m.proceed()
assert m.state == 'C'
mock.assert_called_once_with()
async def test_multiple_models(machine_cls):
m1 = machine_cls(states=['A', 'B', 'C'], initial='A', name="m1")
m2 = machine_cls(states=['A'], initial='A', name='m2')
m1.add_transition(trigger='go', source='A', dest='B', before=cancel_soon)
m1.add_transition(trigger='fix', source='A', dest='C', after=cancel_soon)
m1.add_transition(trigger='check', source='C', dest='B', conditions=await_false)
m1.add_transition(trigger='reset', source='C', dest='A')
m2.add_transition(trigger='go', source='A', dest=None, conditions=m1.is_C, after=m1.reset)
async with anyio.create_task_group() as tg:
tg.start_soon(m1.go)
tg.start_soon(call_delayed, m1.fix, 0.05)
tg.start_soon(call_delayed, m1.check, 0.07)
tg.start_soon(call_delayed, m2.go, 0.1)
assert m1.is_A()
async def test_async_callback_arguments(m):
async def process(should_fail=True):
if should_fail is not False:
raise ValueError("should_fail has been set")
m.on_enter_B(process)
with pytest.raises(ValueError):
await m.go()
await m.to_A()
await m.go(should_fail=False)
async def test_async_callback_event_data(machine_cls):
state_a = machine_cls.state_cls('A')
state_b = machine_cls.state_cls('B')
def sync_condition(event_data):
return event_data.state == state_a
async def async_conditions(event_data):
return event_data.state == state_a
async def async_callback(event_data):
assert event_data.state == state_b
def sync_callback(event_data):
assert event_data.state == state_b
m = machine_cls(states=[state_a, state_b], initial='A', send_event=True)
m.add_transition('go', 'A', 'B', conditions=[sync_condition, async_conditions],
after=[sync_callback, async_callback])
m.add_transition('go', 'B', 'A', conditions=sync_condition)
await m.go()
assert m.is_B() is True
await m.go()
assert m.is_B() is True
async def test_async_callback_trigger(machine_cls):
mock_processed = MagicMock()
async def on_event(event_data):
await event_data.model.to_C()
mock_processed()
m = machine_cls(states=['A', 'B', 'C'],
transitions=[dict(trigger='go', source='A', dest='B', after=on_event)],
initial='A', send_event=True)
await m.go()
assert m.is_C()
assert mock_processed.called
async def test_async_invalid_triggers(m):
await m.to_B()
with pytest.raises(MachineError):
await m.go()
m.ignore_invalid_triggers = True
await m.go()
assert m.is_B() is True
async def test_async_dispatch(machine_cls):
model1 = DummyModel()
model2 = DummyModel()
model3 = DummyModel()
machine = machine_cls(model=None, states=['A', 'B', 'C'], transitions=[['go', 'A', 'B'],
['go', 'B', 'C'],
['go', 'C', 'A']], initial='A')
machine.add_model(model1)
machine.add_model(model2, initial='B')
machine.add_model(model3, initial='C')
await machine.dispatch('go')
assert model1.is_B() is True
assert 'C' == model2.state
assert machine.initial == model3.state
# @pytest.mark.xfail(reason="we should investigate")
async def test_queued(machine_cls):
states = ['A', 'B', 'C', 'D']
# Define with list of dictionaries
async def change_state(machine):
assert machine.state == 'A'
if machine.has_queue:
await machine.run(machine=machine)
assert machine.state == 'A'
else:
with pytest.raises(MachineError):
await machine.run(machine=machine)
async def raise_machine_error(event_data):
assert event_data.machine.has_queue is True
await event_data.model.to_A()
event_data.machine._queued = False
await event_data.model.to_C()
async def raise_exception(event_data):
await event_data.model.to_C()
raise ValueError("Clears queue")
transitions = [
{'trigger': 'walk', 'source': 'A', 'dest': 'B', 'before': change_state},
{'trigger': 'run', 'source': 'B', 'dest': 'C'},
{'trigger': 'sprint', 'source': 'C', 'dest': 'D'}
]
m = machine_cls(states=states, transitions=transitions, initial='A')
await m.walk(machine=m)
assert 'B' == m.state
m = machine_cls(states=states, transitions=transitions, initial='A', queued=True)
await m.walk(machine=m)
assert 'C' == m.state
m = machine_cls(states=states, initial='A', queued=True, send_event=True,
before_state_change=raise_machine_error)
with pytest.raises(MachineError):
await m.to_C()
m = machine_cls(states=states, initial='A', queued=True, send_event=True)
m.add_transition('go', 'A', 'B', after='go')
m.add_transition('go', 'B', 'C', before=raise_exception)
with pytest.raises(ValueError):
await m.go()
assert 'B' == m.state
async def test_callback_order(machine_cls):
finished = []
class Model:
async def before(self):
await anyio.sleep(0.1)
finished.append(2)
async def after(self):
await anyio.sleep(0.1)
finished.append(3)
async def after_state_change():
finished.append(4)
async def before_state_change():
finished.append(1)
model = Model()
m = machine_cls(
model=model,
states=['start', 'end'],
after_state_change=after_state_change,
before_state_change=before_state_change,
initial='start',
)
m.add_transition('transit', 'start', 'end', after='after', before='before')
await model.transit()
assert finished == [1, 2, 3, 4]
async def test_nested_async():
mock = MagicMock()
async def sleep_mock():
await anyio.sleep(0.1)
mock()
states = ['A', 'B', {'name': 'C', 'children': ['1', {'name': '2', 'children': ['a', 'b'], 'initial': 'a'},
'3'], 'initial': '2'}]
transitions = [{'trigger': 'go', 'source': 'A', 'dest': 'C',
'after': [sleep_mock] * 100}]
machine = HierarchicalAnyIOMachine(states=states, transitions=transitions, initial='A')
await machine.go()
assert 'C{0}2{0}a'.format(machine.state_cls.separator) == machine.state
assert 100 == mock.call_count
async def test_parallel_async():
states = ['A', 'B', {'name': 'P',
'parallel': [
{'name': '1', 'children': ['a'], 'initial': 'a'},
{'name': '2', 'children': ['b', 'c'], 'initial': 'b'},
{'name': '3', 'children': ['x', 'y', 'z'], 'initial': 'y'}]}]
machine = HierarchicalAnyIOMachine(states=states, initial='A')
await machine.to_P()
assert [
'P{0}1{0}a'.format(machine.state_cls.separator),
'P{0}2{0}b'.format(machine.state_cls.separator),
'P{0}3{0}y'.format(machine.state_cls.separator)
] == machine.state
await machine.to_B()
assert machine.is_B() is True
| 29.549669 | 110 | 0.610713 | 1,180 | 8,924 | 4.432203 | 0.127119 | 0.050478 | 0.032122 | 0.029254 | 0.437667 | 0.326195 | 0.247419 | 0.15392 | 0.121797 | 0.056214 | 0 | 0.012337 | 0.246078 | 8,924 | 301 | 111 | 29.647841 | 0.765012 | 0.009301 | 0 | 0.240909 | 0 | 0 | 0.055895 | 0 | 0 | 0 | 0 | 0 | 0.131818 | 1 | 0.013636 | false | 0.004545 | 0.022727 | 0.009091 | 0.068182 | 0.004545 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
8b3a120d3d802f1bacae4d23b235c03a8bccc3c2 | 6,263 | py | Python | ditto/scripts/chat_bot.py | Kvoti/ditto | eb4efb241e54bf679222d14afeb71d9d5441c122 | [
"BSD-3-Clause"
] | null | null | null | ditto/scripts/chat_bot.py | Kvoti/ditto | eb4efb241e54bf679222d14afeb71d9d5441c122 | [
"BSD-3-Clause"
] | 9 | 2015-11-10T15:17:22.000Z | 2015-11-12T11:07:02.000Z | ditto/scripts/chat_bot.py | Kvoti/ditto | eb4efb241e54bf679222d14afeb71d9d5441c122 | [
"BSD-3-Clause"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Chat admin bot to open and close the chatroom.
Run via cron.
SleekXMPP: The Sleek XMPP Library
Copyright (C) 2010 Nathanael C. Fritz
This file is part of SleekXMPP.
See the file LICENSE for copying permission.
"""
import sys
import logging
import getpass
from optparse import OptionParser
from time import sleep
import sleekxmpp
import chat.utils
# Python versions before 3.0 do not use UTF-8 encoding
# by default. To ensure that Unicode is handled properly
# throughout SleekXMPP, we will set the default encoding
# ourselves to UTF-8.
if sys.version_info < (3, 0):
from sleekxmpp.util.misc_ops import setdefaultencoding
setdefaultencoding('utf8')
else:
raw_input = input
# Need to query the domain here as doing it inside SendMsgBot doesnt work
DOMAIN = chat.utils.domain()
class SendMsgBot(sleekxmpp.ClientXMPP):
def __init__(self, jid, password, actions):
self.me = jid
self.actions = actions
sleekxmpp.ClientXMPP.__init__(self, jid, password)
# The session_start event will be triggered when
# the bot establishes its connection with the server
# and the XML streams are ready for use. We want to
# listen for this event so that we we can initialize
# our roster.
self.add_event_handler("session_start", self.start, threaded=True)
def start(self, event):
"""
Process the session_start event.
Typical actions for the session_start event are
requesting the roster and broadcasting an initial
presence stanza.
Arguments:
event -- An empty dictionary. The session_start
event does not provide any additional
data.
"""
self.send_presence()
self.get_roster()
for action in self.actions:
room = "%s@muc.%s" % (action['room'].slug, DOMAIN)
self.plugin['xep_0045'].joinMUC(room,
"chatadmin",
# If a room password is needed, use:
# password=the_room_password,
pfrom=self.me,
wait=True)
if action['action'] == 'open':
config = self.plugin['xep_0045'].getRoomConfig(room)
if action['members']:
logging.warn('setting member only %s' % room)
config.field['muc#roomconfig_membersonly']['value'] = 1
else:
config.field['muc#roomconfig_membersonly']['value'] = 0
self.plugin['xep_0045'].configureRoom(room, ifrom=self.me, form=config)
for member in action['members']:
self.plugin['xep_0045'].setAffiliation(
room,
ifrom=self.me,
jid=jid(member)
)
else:
# TODO maybe destroy is too strong here, should just set
# unusable password or set to private room with no
# participants?
self.plugin['xep_0045'].destroy(room, ifrom=self.me)
if action['action'] == 'open':
action['room'].is_opened = True
action['room'].is_closed = False
else:
action['room'].is_closed = True
action['room'].is_opened = False
action['room'].save()
self.disconnect(wait=True)
def jid(username):
return "%s@%s" % (username, DOMAIN)
def run():
# Setup logging.
logging.basicConfig(level=logging.DEBUG,
format='%(levelname)-8s %(message)s')
# Iterate over the chatrooms in the django db and see which need
# opened or closed
actions = []
for room in chat.models.Room.objects.all():
if room.is_open() and not room.is_opened:
# we set the members of the room once when we open it, we
# don't keep checking if role changes mean changes to the
# member list.
# TODO maybe we should?
# TODO can we use the chatserver idea of role?
# TODO this could get very big, probably *need* to do something
# smarter than using explicit member list
members = list(room.members().values_list('username', flat=True))
actions.append({'room': room, 'action': 'open', 'members': members})
elif not room.is_open() and not room.is_closed:
actions.append({'room': room, 'action': 'close'})
# TESTING
import os
if 'OPEN' in os.environ:
room = chat.models.Room.objects.get(slug='main')
actions = [{'room': room, 'action': 'open', 'members': []}]
elif 'CLOSE' in os.environ:
room = chat.models.Room.objects.get(slug='main')
actions = [{'room': room, 'action': 'close'}]
####################
# Setup the EchoBot and register plugins. Note that while plugins may
# have interdependencies, the order in which you register them does
# not matter.
xmpp = SendMsgBot(jid("mark"), chat.utils.password("mark"), actions)
xmpp.register_plugin('xep_0030') # Service Discovery
xmpp.register_plugin('xep_0199') # XMPP Ping
xmpp.register_plugin('xep_0045')
# If you are working with an OpenFire server, you may need
# to adjust the SSL version used:
# xmpp.ssl_version = ssl.PROTOCOL_SSLv3
# If you want to verify the SSL certificates offered by a server:
# xmpp.ca_certs = "path/to/ca/cert"
# Connect to the XMPP server and start processing XMPP stanzas.
if xmpp.connect((chat.utils.server(), 5222)):
# If you do not have the dnspython library installed, you will need
# to manually specify the name of the server if it does not match
# the one in the JID. For example, to use Google Talk you would
# need to use:
#
# if xmpp.connect(('talk.google.com', 5222)):
# ...
xmpp.process(block=True)
print("Done")
else:
print("Unable to connect.")
| 36.841176 | 87 | 0.582628 | 763 | 6,263 | 4.720839 | 0.380079 | 0.019989 | 0.021655 | 0.023598 | 0.093282 | 0.072182 | 0.049972 | 0.037757 | 0.037757 | 0.037757 | 0 | 0.013142 | 0.319655 | 6,263 | 169 | 88 | 37.059172 | 0.832199 | 0.369951 | 0 | 0.1125 | 0 | 0 | 0.10543 | 0.013706 | 0 | 0 | 0 | 0.005917 | 0 | 1 | 0.05 | false | 0.05 | 0.1125 | 0.0125 | 0.1875 | 0.025 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
8b3a1e66bfa48a8241c8827793551ccbe10ec13f | 25,104 | py | Python | peripheral/aic_11051/config/aic.py | Unitek-KL/csp | 2ac7ba59465f23959e51d2f16a5712b57b79ef5f | [
"0BSD"
] | null | null | null | peripheral/aic_11051/config/aic.py | Unitek-KL/csp | 2ac7ba59465f23959e51d2f16a5712b57b79ef5f | [
"0BSD"
] | null | null | null | peripheral/aic_11051/config/aic.py | Unitek-KL/csp | 2ac7ba59465f23959e51d2f16a5712b57b79ef5f | [
"0BSD"
] | null | null | null | """*****************************************************************************
* Copyright (C) 2019 Microchip Technology Inc. and its subsidiaries.
*
* Subject to your compliance with these terms, you may use Microchip software
* and any derivatives exclusively with Microchip products. It is your
* responsibility to comply with third party license terms applicable to your
* use of third party software (including open source software) that may
* accompany Microchip software.
*
* THIS SOFTWARE IS SUPPLIED BY MICROCHIP "AS IS". NO WARRANTIES, WHETHER
* EXPRESS, IMPLIED OR STATUTORY, APPLY TO THIS SOFTWARE, INCLUDING ANY IMPLIED
* WARRANTIES OF NON-INFRINGEMENT, MERCHANTABILITY, AND FITNESS FOR A
* PARTICULAR PURPOSE.
*
* IN NO EVENT WILL MICROCHIP BE LIABLE FOR ANY INDIRECT, SPECIAL, PUNITIVE,
* INCIDENTAL OR CONSEQUENTIAL LOSS, DAMAGE, COST OR EXPENSE OF ANY KIND
* WHATSOEVER RELATED TO THE SOFTWARE, HOWEVER CAUSED, EVEN IF MICROCHIP HAS
* BEEN ADVISED OF THE POSSIBILITY OR THE DAMAGES ARE FORESEEABLE. TO THE
* FULLEST EXTENT ALLOWED BY LAW, MICROCHIP'S TOTAL LIABILITY ON ALL CLAIMS IN
* ANY WAY RELATED TO THIS SOFTWARE WILL NOT EXCEED THE AMOUNT OF FEES, IF ANY,
* THAT YOU HAVE PAID DIRECTLY TO MICROCHIP FOR THIS SOFTWARE.
*****************************************************************************"""
from os.path import join
Log.writeInfoMessage( "Loading Interrupt Manager for " + Variables.get( "__PROCESSOR" ) )
################################################################################
#### Public Globals -- variables used in this module and accessible from other files
################################################################################
global getInterruptName
global interruptNamespace
global interruptSymbolEnable
global interruptSymbolHandler
global interruptSymbolHandlerLock
global interruptLastNameEnable
global interruptLastNameHandler
global interruptLastNameLock
interruptNamespace = "core"
interruptLastNameEnable = "_INTERRUPT_ENABLE"
interruptLastNameHandler = "_INTERRUPT_HANDLER"
interruptLastNameLock = "_INTERRUPT_HANDLER_LOCK"
################################################################################
global showSharedVectorsInMenu
global numSharedVectors
global sharedVectors
global subVectorToSharedVector
showSharedVectorsInMenu = False
numSharedVectors = 0
sharedVectors = {}
subVectorToSharedVector = {}
################################################################################
#### Static Globals -- variables intended to be used inside this file only
################################################################################
# not currently public
global interruptsChildren
global interruptLastNameMapType
global interruptLastNameVector
global interruptLastNameSrcType
global interruptLastNamePriority
global aicMenuTitle
global aicRedirectionVisibility
global aicMapTypeVisibility
global aicPriorityOutputMode
global aicPriorityChoices
global aicSrcTypes
global aicMinPriorityName
global aicMaxPriorityName
interruptLastNameMapType = "_INTERRUPT_MAP_TYPE"
interruptLastNameVector = "_INTERRUPT_VECTOR"
interruptLastNameSrcType = "_INTERRUPT_SRC_TYPE"
interruptLastNamePriority = "_INTERRUPT_PRIORITY"
interruptsChildren = ATDF.getNode( "/avr-tools-device-file/devices/device/interrupts" ).getChildren()
aicMenuTitle = ""
aicRedirectionVisibility = False
aicMapTypeVisibility = False
aicPriorityOutputMode = ""
aicPriorityChoices = []
aicSrcTypes = []
aicMinPriorityName = ""
aicMaxPriorityName = ""
aicCodeGenerationDependencies = []
neverSecureList = []
alwaysSecureList = []
programmedSecureList = []
externalList = []
################################################################################
#### Global Methods
################################################################################
def getInterruptName( interruptNode ):
if "header:alternate-name" in interruptNode.getAttributeList():
retval = interruptNode.getAttribute( "header:alternate-name" )
else:
retval = interruptNode.getAttribute( "name" )
return( str( retval ) )
################################################################################
#### Local Methods
################################################################################
def getInterruptDescription( interruptNode ):
if "header:alternate-caption" in interruptNode.getAttributeList():
retval = interruptNode.getAttribute( "header:alternate-caption" )
else:
retval = interruptNode.getAttribute( "caption" )
return( str( retval ) )
global getNameValueCaptionTuple
def getNameValueCaptionTuple( aGroupName, aTupleArray ):
choiceNode = ATDF.getNode("/avr-tools-device-file/modules/module@[name=\"AIC\"]/value-group@[name=\"" + aGroupName + "\"]")
if choiceNode:
choiceValues = choiceNode.getChildren()
del aTupleArray[:]
for ii in range( 0, len( choiceValues ) ):
aTupleArray.append( ( choiceValues[ ii ].getAttribute("name"),
choiceValues[ ii ].getAttribute("value"),
choiceValues[ ii ].getAttribute("caption")
) )
def getTupleNameContaining( aTupleArray, aString ):
tupleName = ""
if len( aTupleArray ):
tupleName = aTupleArray[ 0 ][ 0 ]
aString = aString.upper()
for tuple in aTupleArray:
if( aString in tuple[ 0 ].upper() ):
tupleName = tuple[ 0 ]
break
return tupleName
def aicMapTypeRedirectionCallback( aicMapType, eventDictionary ):
if( True == eventDictionary[ "value" ] ):
# Mapping Secure to NonSecure
if( ("AlwaysSecure" == aicMapType.getDefaultValue())
or ("Secure" == aicMapType.getDefaultValue())
):
aicMapType.setValue( "RedirectedToNonSecure", 1 ) # make change evident for user
else:
if( ("AlwaysSecure" == aicMapType.getDefaultValue())
or ("Secure" == aicMapType.getDefaultValue())
):
aicMapType.clearValue() # restore the default value
def priorityMapTypeCallback( aicVectorPriority, eventDictionary ):
global aicMaxPriorityName
if( ("AlwaysSecure" == eventDictionary[ "value" ])
or ("Secure" == eventDictionary[ "value" ])
):
aicVectorPriority.setSelectedKey( aicMaxPriorityName, 0 )
aicVectorPriority.setVisible( False )
else:
aicVectorPriority.setVisible( True )
def aicCodeGenerationCallback( aicCodeGeneration, eventDictionary ):
global interruptLastNameEnable
# Interrupt enables and map type determine the code generation to be done later
secureCount = 0
nonSecureCount = 0
for interrupt in interruptsChildren:
interruptName = getInterruptName( interrupt )
component = aicCodeGeneration.getComponent()
enableSymbol = component.getSymbolByID( interruptName + interruptLastNameEnable )
if( enableSymbol.getValue() ):
mapTypeSymbol = component.getSymbolByID( interruptName + interruptLastNameMapType )
if( ("NeverSecure" == mapTypeSymbol.value)
or ("NonSecure" == mapTypeSymbol.value)
or ("RedirectedToNonSecure" == mapTypeSymbol.value)
):
nonSecureCount = nonSecureCount + 1
else:
secureCount = secureCount + 1
if secureCount and nonSecureCount:
aicCodeGeneration.setValue( "AICandSAIC", 0xFF )
elif nonSecureCount:
aicCodeGeneration.setValue( "AIC", 0xFF )
elif secureCount:
aicCodeGeneration.setValue( "SAIC", 0xFF )
else:
aicCodeGeneration.setValue( "NONE", 0xFF )
global aicVectorEnableCallback
def aicVectorEnableCallback( aicVectorEnable, eventDictionary ):
global sharedVectors
desiredValue = eventDictionary[ "value" ]
interrupt = eventDictionary[ "id" ].replace( interruptLastNameLock, "" ).replace( interruptLastNameEnable, "" )
aicVectorEnable.setReadOnly( True )
if aicVectorEnable.getDefaultValue() == desiredValue:
aicVectorEnable.clearValue()
else:
aicVectorEnable.setValue( desiredValue, 1 )
aicVectorEnable.setReadOnly( False )
sharedInterrupt = subVectorToSharedVector.get( interrupt )
if( sharedInterrupt ):
# check if any sibling is enabled
component = aicVectorEnable.getComponent()
desiredValue = False
for elem in sharedVectors[ sharedInterrupt ]:
vectorEnable = component.getSymbolByID( elem + interruptLastNameEnable )
if vectorEnable and vectorEnable.getValue():
desiredValue = True
aicVectorEnable = component.getSymbolByID( sharedInterrupt + interruptLastNameEnable )
aicVectorEnable.setValue( desiredValue, 1 )
def setupEnableAndHandler( component, anInterrupt, aicVectorEnable, aicVectorHandler ):
global sharedVectors
enableDependencies = []
interruptName = getInterruptName( anInterrupt )
moduleInstance = anInterrupt.getAttribute( "module-instance" ).split()
sharedVectorMaxShares = len( moduleInstance )
if 1 < sharedVectorMaxShares:
aicVectorHandler.setReadOnly( True )
aicVectorHandler.setValue( interruptName + "_SharedHandler", 0 )
aicVectorHandler.setReadOnly( False )
sharedVectors[ interruptName ] = moduleInstance
aicVectorHandler.setVisible( False )
for elem in moduleInstance:
subVectorToSharedVector[ elem ] = interruptName
subVectorEnable = component.createBooleanSymbol( elem + interruptLastNameEnable, aicVectorEnable )
subVectorEnable.setLabel( "Enable " + elem )
subVectorEnable.setDefaultValue( False )
subVectorEnable.setDependencies( aicVectorEnableCallback, [elem + interruptLastNameLock] )
enableDependencies.append( elem + interruptLastNameEnable ) # Parent enable depends on children
subVectorHandlerLock = component.createBooleanSymbol( elem + interruptLastNameLock, subVectorEnable )
subVectorHandlerLock.setDefaultValue( False )
subVectorHandlerLock.setVisible( False )
subVectorHandler = component.createStringSymbol( elem + interruptLastNameHandler, subVectorEnable )
subVectorHandler.setLabel( elem + " Handler" )
subVectorHandler.setDefaultValue( elem + "_Handler" )
enableDependencies.append( interruptName + interruptLastNameLock )
aicVectorEnable.setDependencies( aicVectorEnableCallback, enableDependencies )
def setupSharedVectorFtlSymbols( component, anInterrupt, aicVectorEnable ):
global showSharedVectorsInMenu
global numSharedVectors
interruptName = getInterruptName( anInterrupt )
moduleInstance = anInterrupt.getAttribute( "module-instance" ).split()
numShares = len( moduleInstance )
if 1 < numShares:
numSharedVectors = numSharedVectors + 1
# SHARED_VECTOR_N = "name", e.g. SHARED_VECTOR_1 = "SYSC"
# Create a generic shared handler symbol with a value indicating the HANDLER
sharedVector = component.createStringSymbol( "SHARED_VECTOR_" + str( numSharedVectors - 1 ), aicVectorEnable )
Database.clearSymbolValue( "core", interruptName + "SHARED_VECTOR_" + str( numSharedVectors - 1 ) )
sharedVector.setDefaultValue( interruptName )
sharedVector.setVisible( False )
sharedVectorNumShares = component.createIntegerSymbol( interruptName + "_NUM_SHARES", sharedVector )
sharedVectorNumShares.setMin( numShares )
sharedVectorNumShares.setMax( numShares )
Database.clearSymbolValue( "core", interruptName + "_NUM_SHARES" )
sharedVectorNumShares.setValue( numShares, 0 )
sharedVectorNumShares.setVisible( showSharedVectorsInMenu )
# Create symbols for the shared handler names
# {SHARED_VECTOR_#}_HANDLER_#, e.g.
# SYSC_HANDLER_0 = "PMC" ==> PMC_InterruptHandler
# SYSC_HANDLER_1 = "RSTC" ==> RSTC_InterruptHandler
# SYSC_HANDLER_2 = "RTC" ==> RTC_InterruptHandler
ii = 0
for elem in moduleInstance:
shareName = component.createStringSymbol( interruptName + "_SHARE_" + str( ii ), aicVectorEnable )
shareName.setDefaultValue( elem )
shareName.setVisible( showSharedVectorsInMenu )
ii = ii + 1
def formAicPyGlobalData( theProcessor, theCoreComponent ):
global getNameValueCaptionTuple
global aicMenuTitle
global aicRedirectionVisibility
global aicMapTypeVisibility
global aicPriorityOutputMode
global aicPriorityChoices
global aicSrcTypes
aicPriorityOutputMode = "Value"
aicPrioritySymbolStem = "PRIORITY"
getNameValueCaptionTuple( "AIC_SMR__" + aicPrioritySymbolStem, aicPriorityChoices )
if not len( aicPriorityChoices ):
aicPrioritySymbolStem = "PRIOR"
getNameValueCaptionTuple( "AIC_SMR__" + aicPrioritySymbolStem, aicPriorityChoices )
if not len( aicPriorityChoices ):
# still not found in the atdf; so set some defaults
aicPriorityChoices.append( ( "MINIMUM", "0x0", "Minimum priority" ) )
aicPriorityChoices.append( ( "VERY_LOW", "0x1", "Very low priority" ) )
aicPriorityChoices.append( ( "LOW", "0x2", "Low priority" ) )
aicPriorityChoices.append( ( "MEDIUM_LOW", "0x3", "Medium priority" ) )
aicPriorityChoices.append( ( "MEDIUM_HIGH","0x4", "Medium high priority" ) )
aicPriorityChoices.append( ( "HIGH", "0x5", "High priority" ) )
aicPriorityChoices.append( ( "VERY_HIGH", "0x6", "Very high priority" ) )
aicPriorityChoices.append( ( "MAXIMUM", "0x7", "Maximum priority" ) )
aicSmrPrioritySymbol = theCoreComponent.createStringSymbol( "AIC_SMR_PRIORITY_SYMBOL", None )
aicSmrPrioritySymbol.setDefaultValue( "AIC_SMR_" + aicPrioritySymbolStem )
aicSmrPrioritySymbol.setVisible( False )
#
aicSrcTypeSymbolStem = "SRCTYPE"
getNameValueCaptionTuple( "AIC_SMR__" + aicSrcTypeSymbolStem, aicSrcTypes )
aicSmrSrcTypeSymbol = theCoreComponent.createStringSymbol( "AIC_SMR_SRCTYPE_SYMBOL", None )
aicSmrSrcTypeSymbol.setDefaultValue( "AIC_SMR_" + aicSrcTypeSymbolStem )
aicSmrSrcTypeSymbol.setVisible( False )
#
if "SAMA5" in theProcessor:
aicMenuTitle = "Interrupts (AIC/SAIC)"
aicRedirectionVisibility = True
aicMapTypeVisibility = True
neverSecureList = [ '49', '62' ]
alwaysSecureList = [ '0', '14', '15', '16', '18', '51', '61', '68', '69', '70' ]
programmedSecureList = [] # Todo create map interface to populate this list
externalList = [ '0', '49' ] # '2', '56', '57', '64', '65', '66', '67', '71', '72' have been subsumed data sheet peripheral table is misleading
elif "SAM9X60" in theProcessor:
aicMenuTitle = "Interrupts"
aicRedirectionVisibility = False
aicMapTypeVisibility = False
neverSecureList = [ str( ii ) for ii in list( range( 0, 50 ) ) ] # '0', '1',...'49'
alwaysSecureList = []
programmedSecureList = []
externalList = [ '0', '31' ]
################################################################################
#### Component
################################################################################
theProcessor = Variables.get("__PROCESSOR")
formAicPyGlobalData( theProcessor, coreComponent )
aicMinPriorityName = getTupleNameContaining( aicPriorityChoices, "min" )
aicMaxPriorityName = getTupleNameContaining( aicPriorityChoices, "max" )
aicMenu = coreComponent.createMenuSymbol( "AIC_MENU", cortexMenu )
aicMenu.setLabel( aicMenuTitle )
aicMenu.setDescription( "Configuration for AIC Initialization" )
### Symbol for interrupt redirection decision
aicRedirection = coreComponent.createBooleanSymbol( "SECURE_TO_NONSECURE_REDIRECTION", aicMenu )
aicRedirection.setLabel( "Secure to NonSecure Redirection" )
aicRedirection.setDefaultValue( True )
aicRedirection.setVisible( aicRedirectionVisibility )
aicVectorMax = coreComponent.createIntegerSymbol( "AIC_VECTOR_MAX", aicMenu )
aicVectorMax.setDefaultValue( Interrupt.getMaxInterruptID() )
aicVectorMax.setVisible( False )
aicVectorMax = coreComponent.createIntegerSymbol( "AIC_VECTOR_MIN", aicMenu )
aicVectorMax.setDefaultValue( Interrupt.getMinInterruptID() )
aicVectorMax.setVisible( False )
for interrupt in interruptsChildren:
interruptName = getInterruptName( interrupt )
aicNumber = str( interrupt.getAttribute( "index" ) )
if aicNumber in neverSecureList: # secure to nonSecure redirection will have no effect
mapTypeDefault = "NeverSecure"
elif aicNumber in alwaysSecureList: # secure to nonSecure redirection will disable and hide these
mapTypeDefault = "AlwaysSecure"
elif aicNumber in programmedSecureList: # secure to nonSecure redirection will change mapType to 'RedirectedToNonSecure' and set highest priority
mapTypeDefault = "Secure"
else: # programmed nonSecure # secure to nonSecure redirection will have no effect
mapTypeDefault = "NonSecure"
# only for use by the aic ftl code
aicInterruptFirstName = coreComponent.createStringSymbol( "AIC_FIRST_NAME_KEY" + aicNumber, None )
aicInterruptFirstName.setDefaultValue( interruptName )
aicInterruptFirstName.setVisible( False )
###
aicVectorEnable = coreComponent.createBooleanSymbol( interruptName + interruptLastNameEnable, aicMenu )
aicVectorEnable.setLabel( "Enable " + aicNumber + " -- " + getInterruptDescription( interrupt ) )
aicVectorEnable.setDefaultValue( False )
###
if (aicNumber in externalList):
vectorPreCursor = "External Vector: "
else:
vectorPreCursor = "Internal Vector: "
aicVectorSourceGUILabel = coreComponent.createCommentSymbol( interruptName + "_INTERRUPT_VECTOR_LABEL", aicVectorEnable )
aicVectorSourceGUILabel.setLabel( vectorPreCursor + interruptName + "_IRQn" )
# This is the same as aicVectorSourceGUILabel but creates a .var assignment accessible in plib_aic.c.ftl
aicVectorSource = coreComponent.createStringSymbol( interruptName + interruptLastNameVector, aicVectorEnable )
aicVectorSource.setDefaultValue( interruptName + "_IRQn" )
aicVectorSource.setVisible( False )
###
aicVectorLock = coreComponent.createBooleanSymbol( interruptName + interruptLastNameLock, aicVectorEnable )
aicVectorLock.setDefaultValue( False )
aicVectorLock.setVisible( False )
aicVectorHandler = coreComponent.createStringSymbol( interruptName + interruptLastNameHandler, aicVectorEnable )
aicVectorHandler.setLabel( "Handler" )
aicVectorHandler.setDefaultValue( interruptName + "_Handler" )
###
setupEnableAndHandler( coreComponent, interrupt, aicVectorEnable, aicVectorHandler )
setupSharedVectorFtlSymbols( coreComponent, interrupt, aicVectorEnable )
#
aicMapType = coreComponent.createStringSymbol( interruptName + interruptLastNameMapType, aicVectorEnable )
aicMapType.setLabel( "Map Type" )
aicMapType.setDefaultValue( mapTypeDefault )
aicMapType.setVisible( aicMapTypeVisibility )
aicMapType.clearValue()
aicMapType.setReadOnly( True )
aicMapType.setDependencies( aicMapTypeRedirectionCallback, [ "SECURE_TO_NONSECURE_REDIRECTION" ] )
aicVectorSourceType = coreComponent.createKeyValueSetSymbol( interruptName + interruptLastNameSrcType, aicVectorEnable )
aicVectorSourceType.setLabel( "Source Type" )
for tupleElem in aicSrcTypes:
if (aicNumber not in externalList) and ("internal" not in tupleElem[ 2 ]):
continue
aicVectorSourceType.addKey( tupleElem[ 0 ], tupleElem[ 1 ], tupleElem[ 2 ] )
aicVectorSourceType.setOutputMode( "Key" )
aicVectorSourceType.setDisplayMode( "Description" )
aicVectorSourceType.setDefaultValue( 0 )
aicVectorSourceType.setSelectedKey( str( aicSrcTypes[ 0 ][ 0 ] ), 0 )
aicVectorPriority = coreComponent.createKeyValueSetSymbol( interruptName + interruptLastNamePriority, aicVectorEnable )
aicVectorPriority.setLabel( "Priority" )
for tupleElem in aicPriorityChoices:
aicVectorPriority.addKey( tupleElem[ 0 ], tupleElem[ 1 ], tupleElem[ 2 ] )
aicVectorPriority.setOutputMode( aicPriorityOutputMode )
aicVectorPriority.setDisplayMode( "Description" )
aicVectorPriority.setDefaultValue( 0 )
if( ("AlwaysSecure" == aicMapType.value) or ("Secure" == aicMapType.value) ):
aicVectorPriority.setSelectedKey( aicMaxPriorityName, 0 )
aicVectorPriority.setVisible( False ) # fiq interrupts do not have a priority, but if the get forced nonSecure we want a reasonable value
else:
aicVectorPriority.setSelectedKey( aicMinPriorityName, 0 )
aicVectorPriority.setDependencies( priorityMapTypeCallback, [ interruptName + interruptLastNameMapType ] )
aicCodeGenerationDependencies.append( interruptName + interruptLastNameEnable ) # add to dependency list for code generation symbol
aicCodeGenerationDependencies.append( interruptName + interruptLastNameMapType ) # add to dependency list for code generation symbol
###
aicNumSharedVectors = coreComponent.createIntegerSymbol( "NUM_SHARED_VECTORS", aicMenu )
aicNumSharedVectors.setMin( numSharedVectors )
aicNumSharedVectors.setMax( numSharedVectors )
Database.clearSymbolValue( "core", "NUM_SHARED_VECTORS" )
aicNumSharedVectors.setValue( numSharedVectors, 1 )
aicNumSharedVectors.setVisible( showSharedVectorsInMenu )
### Symbol for code generation decisions
aicCodeGeneration = coreComponent.createComboSymbol( "AIC_CODE_GENERATION", aicMenu, [ "NONE", "AIC", "SAIC", "AICandSAIC" ] )
aicCodeGeneration.setDefaultValue( "NONE" )
aicCodeGeneration.setDependencies( aicCodeGenerationCallback, aicCodeGenerationDependencies )
aicCodeGeneration.setVisible( False )
###
aicRedirection.setValue( True, 0 ) # stimulate a aicMapTypeRedirectionCallback() by setting the aicRedirection value
aicRedirection.setReadOnly( True )
############################################################################
#### Code Generation
############################################################################
configName = Variables.get( "__CONFIGURATION_NAME" )
aicSystemDefFile = coreComponent.createFileSymbol( "SYSTEM_AIC_DEFINITIONS", None )
aicSystemDefFile.setType( "STRING" )
aicSystemDefFile.setSourcePath( "../peripheral/aic_11051/templates/system/definitions.h.ftl" )
aicSystemDefFile.setOutputName( "core.LIST_SYSTEM_DEFINITIONS_H_INCLUDES" )
aicSystemDefFile.setMarkup( True )
aicSystemInitFile = coreComponent.createFileSymbol( "SYS_AIC_INITIALIZE", None )
aicSystemInitFile.setType( "STRING" )
aicSystemInitFile.setSourcePath( "../peripheral/aic_11051/templates/system/initialization.c.ftl" )
aicSystemInitFile.setOutputName( "core.LIST_SYSTEM_INIT_C_SYS_INITIALIZE_PERIPHERALS" )
aicSystemInitFile.setMarkup( True )
aicSystemIntWeakHandleFile = coreComponent.createFileSymbol( "AIC_WEAK_HANDLERS", None )
aicSystemIntWeakHandleFile.setType( "STRING" )
aicSystemIntWeakHandleFile.setSourcePath( "../peripheral/aic_11051/templates/system/interrupt_weak_handlers.h.ftl" )
aicSystemIntWeakHandleFile.setOutputName( "core.LIST_SYSTEM_INTERRUPT_WEAK_HANDLERS" )
aicSystemIntWeakHandleFile.setMarkup( True )
aicSharedHandlerFile = coreComponent.createFileSymbol( "AIC_SHARED_HANDLERS", None )
aicSharedHandlerFile.setType( "STRING" )
aicSharedHandlerFile.setSourcePath( "../peripheral/aic_11051/templates/system/interrupt_shared_handlers.h.ftl" )
aicSharedHandlerFile.setOutputName( "core.LIST_SYSTEM_INTERRUPT_SHARED_HANDLERS" )
aicSharedHandlerFile.setMarkup( True )
aicSourceFile = coreComponent.createFileSymbol( "AIC_SOURCE", None )
aicSourceFile.setType( "SOURCE" )
aicSourceFile.setProjectPath( "config/" + configName + "/peripheral/aic/" )
aicSourceFile.setSourcePath( "../peripheral/aic_11051/templates/plib_aic.c.ftl" )
aicSourceFile.setDestPath( "/peripheral/aic/" )
aicSourceFile.setOutputName( "plib_aic.c" )
aicSourceFile.setMarkup( True )
aicSourceFile.setOverwrite( True )
aicSourceFile.setEnabled( True )
aicHeaderFile = coreComponent.createFileSymbol( "AIC_HEADER", None )
aicHeaderFile.setType( "HEADER" )
aicHeaderFile.setProjectPath( "config/" + configName + "/peripheral/aic/" )
aicHeaderFile.setSourcePath( "../peripheral/aic_11051/templates/plib_aic.h.ftl" )
aicHeaderFile.setDestPath( "/peripheral/aic/" )
aicHeaderFile.setOutputName( "plib_aic.h" )
aicHeaderFile.setMarkup( True )
aicHeaderFile.setOverwrite( True )
aicHeaderFile.setEnabled( True )
| 48.276923 | 165 | 0.691284 | 1,929 | 25,104 | 8.921203 | 0.24987 | 0.011331 | 0.007903 | 0.011389 | 0.134581 | 0.111802 | 0.103086 | 0.078912 | 0.054971 | 0.016503 | 0 | 0.007505 | 0.1826 | 25,104 | 519 | 166 | 48.369942 | 0.83114 | 0.126275 | 0 | 0.203125 | 0 | 0 | 0.114677 | 0.045592 | 0 | 0 | 0.001922 | 0.001927 | 0 | 1 | 0.028646 | false | 0 | 0.002604 | 0 | 0.033854 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
8b3af0e86d5663788d4b0dfe5890096086d9ac78 | 765 | py | Python | etl/utils/__init__.py | cfh294/ElectionModeling | 714da9ea004f042f9f775804168e3761e34f64f0 | [
"MIT"
] | null | null | null | etl/utils/__init__.py | cfh294/ElectionModeling | 714da9ea004f042f9f775804168e3761e34f64f0 | [
"MIT"
] | null | null | null | etl/utils/__init__.py | cfh294/ElectionModeling | 714da9ea004f042f9f775804168e3761e34f64f0 | [
"MIT"
] | null | null | null | """
Some utility functions
"""
from sqlalchemy.orm import sessionmaker
from sqlalchemy import create_engine
from models import Election, ElectionType, PoliticalParty, Campaign
database_string = "sqlite:///election.db"
def get_session(cnxn_string):
return sessionmaker(
create_engine(cnxn_string)
)()
def get_campaign(session, year, party_id, election_type="PRESIDENTIAL", cand_name=None):
election = session.query(Election).filter_by(
election_type=session.query(ElectionType).filter_by(code=election_type).first(),
year=year
).first()
pp = session.query(PoliticalParty).filter_by(code=party_id).first()
return session.query(Campaign).filter_by(
political_party=pp,
election=election
).first() | 30.6 | 88 | 0.733333 | 91 | 765 | 5.967033 | 0.428571 | 0.088398 | 0.044199 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.156863 | 765 | 25 | 89 | 30.6 | 0.84186 | 0.028758 | 0 | 0.111111 | 0 | 0 | 0.044837 | 0.028533 | 0 | 0 | 0 | 0 | 0 | 1 | 0.111111 | false | 0 | 0.166667 | 0.055556 | 0.388889 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
8b3bf65902f5df3d7d2afeef5fe91c29fecdf077 | 711 | py | Python | tests/test_validators.py | ridone6/AVWX-API | 615f27df5c3c0e8ecdbfd0bba67fe54f65e63f6d | [
"MIT"
] | null | null | null | tests/test_validators.py | ridone6/AVWX-API | 615f27df5c3c0e8ecdbfd0bba67fe54f65e63f6d | [
"MIT"
] | null | null | null | tests/test_validators.py | ridone6/AVWX-API | 615f27df5c3c0e8ecdbfd0bba67fe54f65e63f6d | [
"MIT"
] | 1 | 2020-09-23T10:33:56.000Z | 2020-09-23T10:33:56.000Z | """
Michael duPont - michael@mdupont.com
tests/test_validators.py - Test parameter validators
"""
# library
import pytest
from voluptuous import Invalid
# module
import avwx_api.validators as validators
def test_splitin():
"""
Tests that SplitIn returns a split string only containing certain values
"""
validator = validators.SplitIn(("test", "values", "here"))
good_strings = ("test,values,here", "here", "values,test")
for string in good_strings:
assert string.split(",") == validator(string)
bad_strings = ("testvalues", "test,stuff", "crazy,nulls", "what?" "really,")
for string in bad_strings:
with pytest.raises(Invalid):
validator(string)
| 27.346154 | 80 | 0.682138 | 84 | 711 | 5.690476 | 0.547619 | 0.041841 | 0.058577 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.19128 | 711 | 25 | 81 | 28.44 | 0.831304 | 0.250352 | 0 | 0 | 0 | 0 | 0.174853 | 0 | 0 | 0 | 0 | 0 | 0.083333 | 1 | 0.083333 | false | 0 | 0.25 | 0 | 0.333333 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
8b3cb1567dcbf6e80a939bd6354346e43a2a18ad | 314 | py | Python | yacht/utils/misc.py | IusztinPaul/portfolio-management | 42cb9d046201fedcc3e3b522af04c32cfcc571ed | [
"MIT"
] | 1 | 2021-07-22T13:44:20.000Z | 2021-07-22T13:44:20.000Z | yacht/utils/misc.py | IusztinPaul/portfolio-management | 42cb9d046201fedcc3e3b522af04c32cfcc571ed | [
"MIT"
] | null | null | null | yacht/utils/misc.py | IusztinPaul/portfolio-management | 42cb9d046201fedcc3e3b522af04c32cfcc571ed | [
"MIT"
] | null | null | null | def calc_chunksize(n_workers, len_iterable, factor=4):
"""Calculate chunksize argument for Pool-methods.
Resembles source-code within `multiprocessing.pool.Pool._map_async`.
"""
chunksize, extra = divmod(len_iterable, n_workers * factor)
if extra:
chunksize += 1
return chunksize
| 28.545455 | 72 | 0.707006 | 38 | 314 | 5.657895 | 0.684211 | 0.074419 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.007937 | 0.197452 | 314 | 10 | 73 | 31.4 | 0.845238 | 0.369427 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.2 | false | 0 | 0 | 0 | 0.4 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
8b3de86420a3a5739ac46c06ed28a78c9136f086 | 3,514 | py | Python | old_version/PITF_old.py | JasonLC506/CollaborativeFiltering | 055b9c2494c89357a269a8e0a1b5b2ed91aa0eae | [
"MIT"
] | 1 | 2020-07-28T09:49:59.000Z | 2020-07-28T09:49:59.000Z | old_version/PITF_old.py | JasonLC506/CollaborativeFiltering | 055b9c2494c89357a269a8e0a1b5b2ed91aa0eae | [
"MIT"
] | null | null | null | old_version/PITF_old.py | JasonLC506/CollaborativeFiltering | 055b9c2494c89357a269a8e0a1b5b2ed91aa0eae | [
"MIT"
] | null | null | null | """
pairwise interaction tensor factorization (PITF) model,
pairwise comapison among tags and pairwise decomposition of tensor by user-tag and item-tag additively, based on
Rendle, S. and Schmidt-Thieme, L., 2010, February. Pairwise interaction tensor factorization for personalized tag recommendation. In Proceedings of the third ACM international conference on Web search and data mining (pp. 81-90). ACM. [1]
"""
import numpy as np
from CDMultiClass import CD
SCALE = 0.01
class PITF(CD):
def __init__(self):
CD.__init__(self)
self.r_u = None
self.r_v = None
self.lamda = 0.000
self.SCALE = SCALE
def basicInitialize(self):
self.r_u = np.random.normal(0.0, self.SCALE, size = (self.L, self.k))
self.r_v = np.random.normal(0.0, self.SCALE, size = (self.L, self.k))
def update(self, instance, isamp):
uid, iid, lid = instance
lid_neg_list = [i for i in range(self.L)]
del lid_neg_list[lid]
m = np.tensordot(self.r_u, self.u[uid], axes = (1,0)) + np.tensordot(self.r_v, self.v[iid], axes = (1,0))
delt_u = np.zeros(self.k, dtype=np.float64)
delt_v = np.zeros(self.k, dtype=np.float64)
delt_r_u = np.zeros([self.L, self.k], dtype=np.float64)
delt_r_v = np.zeros([self.L, self.k], dtype=np.float64)
for lid_neg in lid_neg_list:
delt = (1.0 - sigmoid(m[lid] - m[lid_neg]))
delt_u += (delt * (self.r_u[lid] - self.r_u[lid_neg]) - self.lamda * self.u[uid])
delt_v += (delt * (self.r_v[lid] - self.r_v[lid_neg]) - self.lamda * self.v[iid])
delt_r_u[lid] += (delt * self.u[uid] - self.lamda * self.r_u[lid])
delt_r_u[lid_neg] += (- delt * self.u[uid] - self.lamda * self.r_u[lid_neg])
delt_r_v[lid] += (delt * self.v[iid] - self.lamda * self.r_v[lid])
delt_r_v[lid_neg] += (- delt * self.v[iid] - self.lamda * self.r_v[lid_neg])
# update #
self.u[uid] += (self.SGDstep * delt_u)
self.v[iid] += (self.SGDstep * delt_v)
self.r_u += (self.SGDstep * delt_r_u)
self.r_v += (self.SGDstep * delt_r_v)
return self
def loss(self, test):
losssum = 0.0
Nsamp = 0
for samp in test.sample(random=False):
losssum += self.lossSingle(samp)
Nsamp += 1
for uid in self.u.keys():
losssum = losssum + self.lamda * np.power(np.linalg.norm(self.u[uid]),2)
for iid in self.v.keys():
losssum = losssum + self.lamda * np.power(np.linalg.norm(self.v[iid]),2)
losssum = losssum + self.lamda * (np.power(np.linalg.norm(self.r_u), 2) + np.power(np.linalg.norm(self.r_v), 2))
return losssum / Nsamp
def lossSingle(self, instance):
uid, iid, lid = instance
self.initialize(uid,iid,predict=True)
m = np.tensordot(self.r_u, self.u[uid], axes = (1,0)) + np.tensordot(self.r_v, self.v[iid], axes = (1,0))
lid_neg_list = [i for i in range(self.L)]
del lid_neg_list[lid]
loss = 0.0
for lid_neg in lid_neg_list:
loss += np.log(sigmoid(m[lid] - m[lid_neg]))
loss = - loss
return loss
def predict(self, uid, iid, distribution = False):
self.initialize(uid, iid, predict=True)
m = np.tensordot(self.r_u, self.u[uid], axes = (1,0)) + np.tensordot(self.r_v, self.v[iid], axes = (1,0))
return np.argmax(m)
def sigmoid(a):
return 1.0 / (1.0 + np.exp(-a)) | 42.853659 | 238 | 0.594195 | 570 | 3,514 | 3.533333 | 0.194737 | 0.054618 | 0.032771 | 0.047666 | 0.486594 | 0.45283 | 0.434955 | 0.398709 | 0.368918 | 0.338133 | 0 | 0.021399 | 0.255265 | 3,514 | 82 | 239 | 42.853659 | 0.748185 | 0.118099 | 0 | 0.203125 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.109375 | false | 0 | 0.03125 | 0.015625 | 0.234375 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
8b3f8a7ed1ad3b93c99803e80ebfd47461faecd4 | 4,572 | py | Python | hand_eye_calibration/python/hand_eye_calibration/hand_eye_calibration_plotting_tools.py | Chatoyant19/handeye_calibration | 590c93eba0fef835d0be6da0d750f71e4891a8fb | [
"BSD-3-Clause"
] | 333 | 2017-09-25T03:24:05.000Z | 2022-03-31T12:09:13.000Z | hand_eye_calibration/python/hand_eye_calibration/hand_eye_calibration_plotting_tools.py | bygreencn/hand_eye_calibration | 5d5077572d650a5491040a4a90d98850df4cf068 | [
"BSD-3-Clause"
] | 40 | 2017-09-15T13:39:20.000Z | 2021-11-24T15:44:03.000Z | hand_eye_calibration/python/hand_eye_calibration/hand_eye_calibration_plotting_tools.py | bygreencn/hand_eye_calibration | 5d5077572d650a5491040a4a90d98850df4cf068 | [
"BSD-3-Clause"
] | 108 | 2017-09-19T02:34:35.000Z | 2022-03-18T10:08:34.000Z |
import matplotlib.pyplot as plt
from matplotlib.patches import FancyArrowPatch
from mpl_toolkits.mplot3d import (proj3d, Axes3D)
import copy
import numpy as np
import tf
from hand_eye_calibration.dual_quaternion import DualQuaternion
from hand_eye_calibration.quaternion import Quaternion
class Arrow3D(FancyArrowPatch):
def __init__(self, xs, ys, zs, *args, **kwargs):
FancyArrowPatch.__init__(self, (0, 0), (0, 0), *args, **kwargs)
self._verts3d = xs, ys, zs
def draw(self, renderer):
xs3d, ys3d, zs3d = self._verts3d
xs, ys, zs = proj3d.proj_transform(xs3d, ys3d, zs3d, renderer.M)
self.set_positions((xs[0], ys[0]), (xs[1], ys[1]))
FancyArrowPatch.draw(self, renderer)
def compute_bbox_3D(poses_list):
bbox_min = np.zeros((len(poses_list), 3))
bbox_max = np.zeros((len(poses_list), 3))
for i in range(0, len(poses_list)):
poses = poses_list[i]
bbox_min[i, :] = np.amin(poses[:, 0:3], axis=0)
bbox_max[i, :] = np.amax(poses[:, 0:3], axis=0)
return (np.amax(bbox_max, axis=0), np.amin(bbox_min, axis=0))
def plot_poses(poses_list, plot_arrows=True, title="", blocking=True):
title_position = 1.05
fig = plt.figure()
plt.clf()
ax = Axes3D(fig)
if title:
fig.suptitle(title, fontsize='24')
colors = ['r', 'g', 'b', 'c', 'm', 'k']
num_colors = len(colors)
assert len(poses_list) < num_colors, (
"Need to define more colors to plot more trajectories!")
(bbox_max, bbox_min) = compute_bbox_3D(poses_list)
arrow_size = np.linalg.norm(bbox_max - bbox_min) * 0.05
arrow_width = 2
axis_min = np.amin(bbox_min)
axis_max = np.amax(bbox_max)
ax.set_xlim3d(axis_min, axis_max)
ax.set_ylim3d(axis_min, axis_max)
ax.set_zlim3d(axis_min, axis_max)
for i in range(0, len(poses_list)):
poses = poses_list[i].copy()
# Plot line.
positions = ax.plot(xs=poses[:, 0], ys=poses[:, 1],
zs=poses[:, 2], color=colors[i])
for pose in poses:
# Position point
ax.plot([pose[0]], [pose[1]], [pose[2]], 'o',
markersize=5, color=colors[i], alpha=0.5)
if not plot_arrows:
continue
rotation_quaternion = Quaternion(q=pose[3:7])
x_rotated = rotation_quaternion.rotate_vector([1, 0, 0, 0])
x_rotated *= arrow_size
a = Arrow3D(
[pose[0], pose[0] + x_rotated[0]
], [pose[1], pose[1] + x_rotated[1]],
[pose[2], pose[2] + x_rotated[2]],
mutation_scale=20,
lw=arrow_width,
arrowstyle="-|>",
color="r")
ax.add_artist(a)
y_rotated = rotation_quaternion.rotate_vector([0, 1, 0, 0])
y_rotated *= arrow_size
a = Arrow3D(
[pose[0], pose[0] + y_rotated[0]
], [pose[1], pose[1] + y_rotated[1]],
[pose[2], pose[2] + y_rotated[2]],
mutation_scale=20,
lw=arrow_width,
arrowstyle="-|>",
color="g")
ax.add_artist(a)
z_rotated = rotation_quaternion.rotate_vector([0, 0, 1, 0])
z_rotated *= arrow_size
a = Arrow3D(
[pose[0], pose[0] + z_rotated[0]
], [pose[1], pose[1] + z_rotated[1]],
[pose[2], pose[2] + z_rotated[2]],
mutation_scale=20,
lw=arrow_width,
arrowstyle="-|>",
color="b")
ax.add_artist(a)
ax.auto_scale_xyz([axis_min, axis_max], [
axis_min, axis_max], [axis_min, axis_max])
plt.show(block=blocking)
def plot_alignment_errors(errors_position, rmse_pose, errors_orientation,
rmse_orientation, blocking=True):
assert np.array_equal(errors_position.shape, errors_orientation.shape)
num_error_values = errors_position.shape[0]
title_position = 1.05
fig = plt.figure()
a1 = fig.add_subplot(2, 1, 1)
fig.suptitle("Alignment Evaluation", fontsize='24')
a1.set_title(
"Red = Position Error Norm [m] - Black = RMSE", y=title_position)
plt.plot(errors_position, c='r')
plt.plot(rmse_pose * np.ones((num_error_values, 1)), c='k')
a2 = fig.add_subplot(2, 1, 2)
a2.set_title(
"Red = Absolute Orientation Error [Degrees] - Black = RMSE", y=title_position)
plt.plot(errors_orientation, c='r')
plt.plot(rmse_orientation * np.ones((num_error_values, 1)), c='k')
if plt.get_backend() == 'TkAgg':
mng = plt.get_current_fig_manager()
max_size = mng.window.maxsize()
max_size = (max_size[0], max_size[1] * 0.45)
mng.resize(*max_size)
fig.tight_layout()
plt.subplots_adjust(left=0.025, right=0.975, top=0.8, bottom=0.05)
plt.show(block=blocking)
| 31.102041 | 84 | 0.627734 | 686 | 4,572 | 3.975219 | 0.249271 | 0.033003 | 0.025669 | 0.030803 | 0.380271 | 0.296296 | 0.20022 | 0.179685 | 0.121012 | 0.083608 | 0 | 0.040011 | 0.218285 | 4,572 | 146 | 85 | 31.315068 | 0.722999 | 0.005468 | 0 | 0.2 | 0 | 0 | 0.045344 | 0 | 0 | 0 | 0 | 0 | 0.017391 | 1 | 0.043478 | false | 0 | 0.069565 | 0 | 0.130435 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
8b403f2a06db8daf6d77e02536351a2ffa3d1999 | 2,049 | py | Python | utils/DataFormatter.py | kaantecik/covid-prediction | 8d2a788cc93d07b4bfcffb9480b747e41e13848f | [
"MIT"
] | 1 | 2021-05-05T06:47:57.000Z | 2021-05-05T06:47:57.000Z | utils/DataFormatter.py | kaantecik/covid-prediction | 8d2a788cc93d07b4bfcffb9480b747e41e13848f | [
"MIT"
] | null | null | null | utils/DataFormatter.py | kaantecik/covid-prediction | 8d2a788cc93d07b4bfcffb9480b747e41e13848f | [
"MIT"
] | null | null | null | import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from sklearn.metrics import mean_absolute_error
class DataFormatter(object):
@staticmethod
def get_data(country=None, path=None):
df = pd.read_csv(path)
df = df.drop(columns=["Province/State", "Lat", "Long"])
df = (df.melt(id_vars=['Country/Region'],
var_name='Date', value_name='Cases').assign(Date=lambda x: pd.to_datetime(x['Date'])))
index = pd.date_range(start='2020-1-22', end='2021-4-27', freq="D")
if country:
df = df.loc[df['Country/Region'] == country]
df = df.reset_index()
df = df.drop(columns=["index"])
df = pd.Series([value['Cases']
for key, value in df.iterrows()], index=index)
return df
else:
data = []
for date in index:
case = int(df.loc[df['Date'] == date].sum(axis=0).values[1])
data.append(case)
total_cases = pd.Series(data, index=index)
return total_cases
@staticmethod
def mse(actual, predicted):
difference_array = np.subtract(actual, predicted)
squared_array = np.square(difference_array)
mse = squared_array.mean()
return mse
@staticmethod
def mae(actual, predicted):
return mean_absolute_error(actual, predicted)
@staticmethod
def r_square(actual, predicted):
correlation_matrix = np.corrcoef(actual, predicted)
correlation_xy = correlation_matrix[0, 1]
r_squared = correlation_xy ** 2
return round(r_squared, 5)
@staticmethod
def draw(data, forecast):
plt.figure(figsize=(12, 8))
plt.plot(data, marker='o', color='black')
plt.plot(forecast, marker='o', color='blue')
line1, = plt.plot(data, marker='o', color='black')
line2, = plt.plot(forecast, marker='o', color='blue')
plt.legend([line1, line2], ['Test', 'Forecast'])
plt.show()
| 34.15 | 108 | 0.584675 | 254 | 2,049 | 4.61811 | 0.440945 | 0.076726 | 0.040921 | 0.025575 | 0.100597 | 0.100597 | 0.100597 | 0 | 0 | 0 | 0 | 0.018268 | 0.278673 | 2,049 | 59 | 109 | 34.728814 | 0.775372 | 0 | 0 | 0.1 | 0 | 0 | 0.062958 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.1 | false | 0 | 0.08 | 0.02 | 0.3 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
8b43f5ce83fe89d5b91dcaf42b2b3a83264d1771 | 566 | py | Python | tools/token_types.py | RiatTahiri/gabbs | b0a9acc2fb868a8037ee3e2eee9134d31cca411d | [
"MIT"
] | null | null | null | tools/token_types.py | RiatTahiri/gabbs | b0a9acc2fb868a8037ee3e2eee9134d31cca411d | [
"MIT"
] | null | null | null | tools/token_types.py | RiatTahiri/gabbs | b0a9acc2fb868a8037ee3e2eee9134d31cca411d | [
"MIT"
] | null | null | null | from enum import Enum
class Types(Enum):
LEFT_PAREN = 101
RIGHT_PAREN = 102
LEFT_BRACE = 103
RIGHT_BRACE = 104
COMMA = 105
DOT = 106
MINUS = 107
PLUS = 108
SEMICOLON = 109
SLASH = 110
STAR = 111
BANG,
BANG_EQUAL,
EQUAL,
EQUAL_EQUAL,
GREATER,
GREATER_EQUAL,
LESS,
LESS_EQUAL,
IDENTIFIER,
STRING,
NUMBER,
AND,
CLASS,
ELSE,
FALSE,
FUN,
FOR,
IF,
NIL,
OR,
PRINT,
RETURN,
SUPER,
THIS,
TRUE,
VAR,
WHILE,
EOF = -0 | 12.304348 | 21 | 0.519435 | 66 | 566 | 4.333333 | 0.772727 | 0.104895 | 0.104895 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.10089 | 0.404594 | 566 | 46 | 22 | 12.304348 | 0.747774 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.02439 | 0 | 0.341463 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
8b479e6fd1fb470817da900e89adc53b74973c6b | 224 | py | Python | example.py | leitao-bcc/studious-engine | a38b04e9f70b82c1451ba5bfc20c26a0a190d9b1 | [
"Unlicense"
] | null | null | null | example.py | leitao-bcc/studious-engine | a38b04e9f70b82c1451ba5bfc20c26a0a190d9b1 | [
"Unlicense"
] | null | null | null | example.py | leitao-bcc/studious-engine | a38b04e9f70b82c1451ba5bfc20c26a0a190d9b1 | [
"Unlicense"
] | null | null | null | from os import getcwd
from race.race import Racing
def main():
racing_obj = Racing()
racing_obj.parser_logfile('{}/data/log_example'.format(getcwd()))
print(racing_obj)
if __name__ == "__main__":
main() | 16 | 69 | 0.683036 | 30 | 224 | 4.666667 | 0.6 | 0.192857 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.183036 | 224 | 14 | 70 | 16 | 0.765027 | 0 | 0 | 0 | 0 | 0 | 0.12 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.125 | false | 0 | 0.25 | 0 | 0.375 | 0.125 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
8b4b27460ec741891e3e3e6b670a42d8a780ce8b | 909 | py | Python | backend/run.py | codestrange/calendario-matcom | fa5a742d3ae3e8f6c7635022a6984409b731ea92 | [
"MIT"
] | 8 | 2019-02-12T20:03:03.000Z | 2020-09-08T03:51:25.000Z | backend/run.py | codestrange/calendario-matcom | fa5a742d3ae3e8f6c7635022a6984409b731ea92 | [
"MIT"
] | 11 | 2021-02-01T05:17:42.000Z | 2021-04-27T05:13:46.000Z | backend/run.py | codestrange/calendario-matcom | fa5a742d3ae3e8f6c7635022a6984409b731ea92 | [
"MIT"
] | 4 | 2019-03-06T22:13:56.000Z | 2021-02-03T05:37:43.000Z | from os import getenv
from app import create_app
from app.database import db, Course, Event, Group, Interval, Local, Notification, Option, \
Permission, Resource, Role, Student, Tag, Teacher, User, Vote, UserGroupNotification
app = create_app(getenv('FLASK_CONFIG') or 'default')
@app.shell_context_processor
def make_shell_context():
return dict(app=app, db=db, Course=Course, Event=Event, Group=Group, Interval=Interval,
Local=Local, Notification=Notification, Option=Option, Permission=Permission,
Resource=Resource, Role=Role, Student=Student, Tag=Tag, Teacher=Teacher,
User=User, Vote=Vote, UserGroupNotification=UserGroupNotification)
@app.cli.command()
def init():
insert(Role, 'roles')
insert(Interval, 'intervals')
def insert(model, name):
print(f'Inserting {name} ...')
model.insert()
print(f'Inserted {name} - OK')
| 33.666667 | 93 | 0.706271 | 110 | 909 | 5.772727 | 0.436364 | 0.022047 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.172717 | 909 | 26 | 94 | 34.961538 | 0.844415 | 0 | 0 | 0 | 0 | 0 | 0.080308 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.157895 | false | 0 | 0.157895 | 0.052632 | 0.368421 | 0.105263 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
8b4b47722179eef3ae76ed74451ae1ccc107cfc2 | 3,955 | py | Python | core/modules/config.py | tangb/cleep-desktop | 7e333b0ce8445fad86216c4b51b1ade8c21695fd | [
"MIT"
] | 2 | 2020-07-31T13:24:05.000Z | 2022-03-10T08:44:06.000Z | core/modules/config.py | tangb/cleep-desktop | 7e333b0ce8445fad86216c4b51b1ade8c21695fd | [
"MIT"
] | 6 | 2020-04-09T16:44:28.000Z | 2022-02-22T11:26:24.000Z | core/modules/config.py | tangb/cleep-desktop | 7e333b0ce8445fad86216c4b51b1ade8c21695fd | [
"MIT"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*
from functools import reduce
from core.utils import CleepDesktopModule
class Config(CleepDesktopModule):
"""
Config module. Handles application configuration
"""
def __init__(self, context, app_config, debug_enabled):
"""
Constructor
Args:
context (AppContext): application context
app_config (AppConfig): application config instance
debug_enabled (bool): True if debug is enabled
"""
CleepDesktopModule.__init__(self, context, debug_enabled)
# members
self.app_config = app_config
def set_config_value(self, key, value):
"""
Save specified value on specified key
Args:
key (string): key to update. Can be a deep key like xxx.yyy.zzz
Returns:
bool: True if value updated
"""
def walk(node, keys, value):
key = keys.pop(0)
if len(keys)==0:
# leaf, update value
if key in node.keys():
node[key] = value
return True
else:
# self.context.main_logger.debug('+++++++++ Key "%s" not found' % key)
return False
elif key in node.keys():
return walk(node[key], keys, value)
else:
# self.context.main_logger.debug('----------Key "%s" not found' % key)
return False
config = self.app_config.load_config()
if walk(config, key.split('.'), value):
return self.set_config(config)
return False
def set_config(self, config):
"""
Save config file.
Args:
config (dict): config to save.
Returns:
bool: True if file successfully saved, False otherwise
"""
old = self.app_config.load_config()
# process debug flag
if old['cleep']['debug']!=config['cleep']['debug']:
if config['cleep']['debug']:
self.context.main_logger.setLevel(True)
for _, module in self.context.modules.items():
module.set_debug(True)
else:
self.context.main_logger.setLevel(False)
for _, module in self.context.modules.items():
module.set_debug(False)
# process crashreport flag
if old['cleep']['crashreport']!=config['cleep']['crashreport']:
if config['cleep']['crashreport']:
self.crash_report.enable()
else:
self.crash_report.disable()
return self.app_config.save_config(config)
def get_config_value(self, key):
"""
Return config value for specified key
Args:
key (string): config key. Can be deep key like xxx.yyy.zzz
Returns:
any: config key value
"""
config = self.app_config.load_config()
return self.__deep_get(config, key)
def get_config(self):
"""
Returns config
Returns:
dict: config file content
"""
return {
'config': self.app_config.load_config(),
'logs': self.context.log_filepath,
'cachedir': self.context.paths.cache,
}
def __deep_get(self, dictionary, keys, default=None):
"""
Deep dict value get with complex key "part1.part2.part3"
Note:
https://stackoverflow.com/a/46890853
Args:
dictionnary: dict to search onto
keys (string): key (x.x.x)
default (any): default value when nothing found
Returns:
any: value or default if not found
"""
return reduce(lambda d, key: d.get(key, default) if isinstance(d, dict) else default, keys.split("."), dictionary)
| 29.736842 | 122 | 0.538306 | 427 | 3,955 | 4.871194 | 0.29274 | 0.052885 | 0.0375 | 0.040385 | 0.236538 | 0.183654 | 0.125962 | 0.1 | 0.1 | 0.1 | 0 | 0.005501 | 0.356511 | 3,955 | 132 | 123 | 29.962121 | 0.811788 | 0.312263 | 0 | 0.215686 | 0 | 0 | 0.041684 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.137255 | false | 0 | 0.039216 | 0 | 0.392157 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
8b4ba1e1247e6d819ae250ab5b176460d8aa3e9b | 4,285 | py | Python | example.py | brego81/Legrand-BTicino-Python-API | e1c2eb1677fd356306c0755e677a957c108f6b6e | [
"Apache-2.0"
] | null | null | null | example.py | brego81/Legrand-BTicino-Python-API | e1c2eb1677fd356306c0755e677a957c108f6b6e | [
"Apache-2.0"
] | 1 | 2022-01-06T17:18:19.000Z | 2022-01-09T10:26:00.000Z | example.py | brego81/Legrand-BTicino-Python-API | e1c2eb1677fd356306c0755e677a957c108f6b6e | [
"Apache-2.0"
] | null | null | null | from LegrandBiticinoAPI import LegrandBiticinoAPI
import pprint as pp
import datetime
API = LegrandBiticinoAPI()
# Test the API using and echo endpoint as per documentation
# https://portal.developer.legrand.com/docs/services/echo-api/operations/create-resource
out = API.echo()
if out['status_code'] == 200:
print("It works fine!")
else:
raise SystemExit("ERROR: " + str(out['status_code']))
# Plants - Operation used to retrieve all the plants associated to a user.
# https://portal.developer.legrand.com/docs/services/smartherV2/operations/Plants
plants = API.get_plants()
plantId = plants['text']['plants'][0]['id']
print("plantId = " + str(plantId))
# Topology - Operation used to retrieve the complete topology of a plant.
# https://portal.developer.legrand.com/docs/services/smartherV2/operations/Topology
modules = API.get_topology(plantId)
moduleId = modules['text']['plant']['modules'][0]['id']
print("moduleId = " + str(moduleId))
# Chronothermostat Measures - Operation used to retrieve the measured temperature and humidity detected by a chronothermostat.
# https://portal.developer.legrand.com/docs/services/smartherV2/operations/Chronothermostat-Measures
out = API.get_chronothermostat_measures(plantId, moduleId)
if out['status_code'] == 200:
print('Chronothermostat measures = ')
pp.pprint(out['text'])
else:
raise SystemExit("ERROR -> " + str(out))
# Chronothermostat ProgramList - Operation used to retrieve the list of programs managed by a chronothermostat.
# https://portal.developer.legrand.com/docs/services/smartherV2/operations/Chronothermostat-ProgramList
out = API.get_chronothermostat_programlist(plantId, moduleId)
if out['status_code'] == 200:
print('Chronothermostat programlist = ')
pp.pprint(out['text'])
else:
raise SystemExit("ERROR -> " + str(out))
# Get Chronothermostat Status - Operation used to retrieve the complete status of a chronothermostat.
# https://portal.developer.legrand.com/docs/services/smartherV2/operations/Get-Chronothermostat-Status
status = API.get_chronothermostat_status(plantId, moduleId)
if out['status_code'] == 200:
print('Chronothermostat status = ')
pp.pprint(status['text'])
else:
raise SystemExit("ERROR -> " + str(status))
# As example, we want to set to AUTOMATIC if a temeprature is manually defined
if status['text']['chronothermostats'][0]['mode'] != 'AUTOMATIC':
print("setPoint = " + status['chronothermostats'][0]['setPoint']['value'] + "\n")
data = {
"function": "heating", "mode": "AUTOMATIC",
"setPoint": { "value": "18.20000", "unit": "C" },
"programs": [ { "number": 1 }],
"activationTime": datetime.datetime.now().strftime("%Y-%m-%dT%H:%M:%S")
}
# Set Chronothermostat Status - Operation used to set the status of a chronothermostat.
# https://portal.developer.legrand.com/docs/services/smartherV2/operations/Set-Chronothermostat-Status
out = API.set_chronothermostat_status(plantId, moduleId, data)
print(str(out) + "\n")
# Get subscriptions to C2C notifications - Operation used to get subscriptions of a user to get Cloud2Cloud notifications of a plant.
# https://portal.developer.legrand.com/docs/services/smartherV2/operations/Get-subscriptions-to-C2C-notifications
subscriptions = API.get_subscriptions_C2C_notifications()
if subscriptions['status_code'] == 204:
print("No subscription associated with this user")
elif subscriptions['status_code'] == 200:
pp.pprint(subscriptions['text'])
# Subscribe to C2C notifications - Operation used to subscribe a user to get Cloud2Cloud notifications of a plant.
# https://portal.developer.legrand.com/docs/services/smartherV2/operations/Subscribe-to-C2C-notifications
data = {"EndPointUrl": "http://www.example.com"}
out = API.set_subscribe_C2C_notifications(plantId, data)
print(str(out['status_code']) + " " + out['text'])
# Delete subscription to C2C notifications - Operation used to delete the subscription of a user to get Cloud2Cloud notifications of a plant.
# https://portal.developer.legrand.com/docs/services/smartherV2/operations/Delete-subscription-to-C2C-notifications
subscriptionId = '123'
out = API.delete_subscribe_C2C_notifications(plantId, subscriptionId)
print(str(out['status_code']) + " " + out['text']) | 49.825581 | 141 | 0.744457 | 532 | 4,285 | 5.943609 | 0.225564 | 0.034788 | 0.063251 | 0.085389 | 0.517078 | 0.450032 | 0.370651 | 0.339658 | 0.339658 | 0.268817 | 0 | 0.014358 | 0.122287 | 4,285 | 86 | 142 | 49.825581 | 0.826376 | 0.482614 | 0 | 0.259259 | 0 | 0 | 0.257741 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.055556 | 0 | 0.055556 | 0.296296 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
8c68d7cb9c3f3209998f1769aded9bdf8c7b676c | 3,417 | py | Python | tst/test_fbmatrix.py | sharky5102/fbmatrix | 6558e1b249d94908d92a6475b07ebf9beae776a1 | [
"BSD-2-Clause"
] | 4 | 2021-01-31T07:06:33.000Z | 2022-01-22T09:28:21.000Z | tst/test_fbmatrix.py | sharky5102/fbmatrix | 6558e1b249d94908d92a6475b07ebf9beae776a1 | [
"BSD-2-Clause"
] | 3 | 2021-03-02T20:31:41.000Z | 2021-12-18T12:52:59.000Z | tst/test_fbmatrix.py | sharky5102/fbmatrix | 6558e1b249d94908d92a6475b07ebf9beae776a1 | [
"BSD-2-Clause"
] | 3 | 2021-08-02T17:10:24.000Z | 2022-02-14T11:24:49.000Z | import sys
import numpy as np
import OpenGL.GL as gl
import OpenGL.GLUT as glut
import time
import fbo
import signal
import displays.ws2811
import displays.hub75e
import geometry.simple
import assembly.tree
import fbmatrix
import unittest
from OpenGL.GL.EXT.framebuffer_object import *
def hub75_decompose(data):
pixels = np.frombuffer(data, dtype=[('r', 'B'), ('g', 'B'), ('b', 'B'), ('a', 'B')])
channels = {
'D': ('r', 0),
'LAT': ('r', 1),
'A': ('r', 2),
'B2': ('r', 3),
'E': ('r', 4),
'B': ('r', 6),
'C': ('r', 7),
'R2': ('g', 0),
'G1': ('g', 1),
'G2': ('g', 4),
'CLK': ('g', 5),
'OE': ('b', 0),
'R1': ('b', 1),
'B1': ('b', 2)
}
output = {}
for name, source in channels.items():
channel = np.bitwise_and(pixels[source[0]], 1 << source[1])
channel = np.where(channel > 0, np.ubyte(ord('1')), np.ubyte(ord('_')))
output[name] = channel.tobytes().decode('utf-8')
return output
def scanlines(data, stride):
if len(data) % stride != 0:
raise RuntimeError('Data len %d not divisible by stride %d' % (len(data), stride))
end = len(data)
for i in range(0, int(len(data)/stride)):
yield data[end-(i+1)*stride:end-i*stride]
def parseFrameData(data, width):
for scanline in scanlines(data, width * 4):
yield hub75_decompose(scanline)
def hub75ToText(data, width):
n = 0;
for decomposed in parseFrameData(data, width):
yield 'Scanline %d' % n
for chan in [ 'A', 'B', 'C', 'D', 'E', 'OE', 'LAT', 'CLK', 'R1', 'G1', 'B1', 'R2', 'G2', 'B2' ]:
yield('%05s %s' % (chan, decomposed[chan]))
n+=1
class TestHub75(unittest.TestCase):
height = 194
width = 4096
maxDiff = None
def setup(self):
pass
def testPatternWhite(self):
gl.glClearColor(1,1,1,1)
gl.glClear(gl.GL_COLOR_BUFFER_BIT | gl.GL_DEPTH_BUFFER_BIT)
def writeFrameData(self, filename, data):
with open(filename, 'wt') as f:
for line in hub75ToText(data, self.width):
f.write(line + '\n')
def assertFrameData(self, filename, data):
self.writeFrameData(filename + '.new', data)
with open(filename, 'rt') as f:
for expected, actual in zip(f.readlines(), hub75ToText(data, self.width)):
self.assertEquals(expected.rstrip(), actual)
def testSimple16Scan(self):
self.renderer = fbmatrix.renderer()
screen = fbo.FBO(self.width, self.height)
with screen:
self.renderer.render = lambda: self.testPatternWhite()
self.renderer.display()
data = gl.glReadPixels(0, 0, 4096, 194, gl.GL_RGBA, gl.GL_UNSIGNED_BYTE, None);
self.assertFrameData('tst/data/hub75_32x32_white.txt', data)
def testFieldFirstOrder(self):
self.renderer = fbmatrix.renderer(order='field-first')
screen = fbo.FBO(self.width, self.height)
with screen:
self.renderer.render = lambda: self.testPatternWhite()
self.renderer.display()
data = gl.glReadPixels(0, 0, 4096, 194, gl.GL_RGBA, gl.GL_UNSIGNED_BYTE, None);
self.assertFrameData('tst/data/hub75_fieldfirst_32x32_white.txt', data)
| 29.973684 | 104 | 0.560433 | 433 | 3,417 | 4.371824 | 0.34642 | 0.012678 | 0.020602 | 0.02113 | 0.234548 | 0.20074 | 0.20074 | 0.20074 | 0.20074 | 0.20074 | 0 | 0.04099 | 0.2789 | 3,417 | 113 | 105 | 30.238938 | 0.727273 | 0 | 0 | 0.113636 | 0 | 0 | 0.066452 | 0.020785 | 0 | 0 | 0 | 0 | 0.045455 | 1 | 0.113636 | false | 0.011364 | 0.159091 | 0 | 0.329545 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
8c6e96f01c0a7304a221f7c9aae7d4b2f501a42a | 8,181 | py | Python | keystone-moon/keystone/catalog/routers.py | hashnfv/hashnfv-moon | daaba34fa2ed4426bc0fde359e54a5e1b872208c | [
"Apache-2.0"
] | null | null | null | keystone-moon/keystone/catalog/routers.py | hashnfv/hashnfv-moon | daaba34fa2ed4426bc0fde359e54a5e1b872208c | [
"Apache-2.0"
] | 1 | 2019-08-18T09:25:49.000Z | 2019-08-18T09:25:49.000Z | keystone-moon/keystone/catalog/routers.py | hashnfv/hashnfv-moon | daaba34fa2ed4426bc0fde359e54a5e1b872208c | [
"Apache-2.0"
] | 1 | 2021-03-21T11:38:30.000Z | 2021-03-21T11:38:30.000Z | # Copyright 2012 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import functools
from keystone.catalog import controllers
from keystone.common import json_home
from keystone.common import router
from keystone.common import wsgi
build_resource_relation = functools.partial(
json_home.build_v3_extension_resource_relation,
extension_name='OS-EP-FILTER', extension_version='1.0')
build_parameter_relation = functools.partial(
json_home.build_v3_extension_parameter_relation,
extension_name='OS-EP-FILTER', extension_version='1.0')
ENDPOINT_GROUP_PARAMETER_RELATION = build_parameter_relation(
parameter_name='endpoint_group_id')
class Routers(wsgi.RoutersBase):
"""API for the keystone catalog.
The API Endpoint Filter looks like::
PUT /OS-EP-FILTER/projects/{project_id}/endpoints/{endpoint_id}
GET /OS-EP-FILTER/projects/{project_id}/endpoints/{endpoint_id}
HEAD /OS-EP-FILTER/projects/{project_id}/endpoints/{endpoint_id}
DELETE /OS-EP-FILTER/projects/{project_id}/endpoints/{endpoint_id}
GET /OS-EP-FILTER/endpoints/{endpoint_id}/projects
GET /OS-EP-FILTER/projects/{project_id}/endpoints
GET /OS-EP-FILTER/projects/{project_id}/endpoint_groups
GET /OS-EP-FILTER/endpoint_groups
POST /OS-EP-FILTER/endpoint_groups
GET /OS-EP-FILTER/endpoint_groups/{endpoint_group_id}
HEAD /OS-EP-FILTER/endpoint_groups/{endpoint_group_id}
PATCH /OS-EP-FILTER/endpoint_groups/{endpoint_group_id}
DELETE /OS-EP-FILTER/endpoint_groups/{endpoint_group_id}
GET /OS-EP-FILTER/endpoint_groups/{endpoint_group_id}/projects
GET /OS-EP-FILTER/endpoint_groups/{endpoint_group_id}/endpoints
PUT /OS-EP-FILTER/endpoint_groups/{endpoint_group}/projects/
{project_id}
GET /OS-EP-FILTER/endpoint_groups/{endpoint_group}/projects/
{project_id}
HEAD /OS-EP-FILTER/endpoint_groups/{endpoint_group}/projects/
{project_id}
DELETE /OS-EP-FILTER/endpoint_groups/{endpoint_group}/projects/
{project_id}
"""
PATH_PREFIX = '/OS-EP-FILTER'
PATH_PROJECT_ENDPOINT = '/projects/{project_id}/endpoints/{endpoint_id}'
PATH_ENDPOINT_GROUPS = '/endpoint_groups/{endpoint_group_id}'
PATH_ENDPOINT_GROUP_PROJECTS = PATH_ENDPOINT_GROUPS + (
'/projects/{project_id}')
def append_v3_routers(self, mapper, routers):
regions_controller = controllers.RegionV3()
endpoint_filter_controller = controllers.EndpointFilterV3Controller()
endpoint_group_controller = controllers.EndpointGroupV3Controller()
project_endpoint_group_controller = (
controllers.ProjectEndpointGroupV3Controller())
routers.append(router.Router(regions_controller,
'regions', 'region',
resource_descriptions=self.v3_resources))
# Need to add an additional route to support PUT /regions/{region_id}
mapper.connect(
'/regions/{region_id}',
controller=regions_controller,
action='create_region_with_id',
conditions=dict(method=['PUT']))
routers.append(router.Router(controllers.ServiceV3(),
'services', 'service',
resource_descriptions=self.v3_resources))
routers.append(router.Router(controllers.EndpointV3(),
'endpoints', 'endpoint',
resource_descriptions=self.v3_resources))
self._add_resource(
mapper, endpoint_filter_controller,
path=self.PATH_PREFIX + '/endpoints/{endpoint_id}/projects',
get_action='list_projects_for_endpoint',
rel=build_resource_relation(resource_name='endpoint_projects'),
path_vars={
'endpoint_id': json_home.Parameters.ENDPOINT_ID,
})
self._add_resource(
mapper, endpoint_filter_controller,
path=self.PATH_PREFIX + self.PATH_PROJECT_ENDPOINT,
get_head_action='check_endpoint_in_project',
put_action='add_endpoint_to_project',
delete_action='remove_endpoint_from_project',
rel=build_resource_relation(resource_name='project_endpoint'),
path_vars={
'endpoint_id': json_home.Parameters.ENDPOINT_ID,
'project_id': json_home.Parameters.PROJECT_ID,
})
self._add_resource(
mapper, endpoint_filter_controller,
path=self.PATH_PREFIX + '/projects/{project_id}/endpoints',
get_action='list_endpoints_for_project',
rel=build_resource_relation(resource_name='project_endpoints'),
path_vars={
'project_id': json_home.Parameters.PROJECT_ID,
})
self._add_resource(
mapper, endpoint_group_controller,
path=self.PATH_PREFIX + '/projects/{project_id}/endpoint_groups',
get_action='list_endpoint_groups_for_project',
rel=build_resource_relation(
resource_name='project_endpoint_groups'),
path_vars={
'project_id': json_home.Parameters.PROJECT_ID,
})
self._add_resource(
mapper, endpoint_group_controller,
path=self.PATH_PREFIX + '/endpoint_groups',
get_action='list_endpoint_groups',
post_action='create_endpoint_group',
rel=build_resource_relation(resource_name='endpoint_groups'))
self._add_resource(
mapper, endpoint_group_controller,
path=self.PATH_PREFIX + self.PATH_ENDPOINT_GROUPS,
get_head_action='get_endpoint_group',
patch_action='update_endpoint_group',
delete_action='delete_endpoint_group',
rel=build_resource_relation(resource_name='endpoint_group'),
path_vars={
'endpoint_group_id': ENDPOINT_GROUP_PARAMETER_RELATION
})
self._add_resource(
mapper, project_endpoint_group_controller,
path=self.PATH_PREFIX + self.PATH_ENDPOINT_GROUP_PROJECTS,
get_head_action='get_endpoint_group_in_project',
put_action='add_endpoint_group_to_project',
delete_action='remove_endpoint_group_from_project',
rel=build_resource_relation(
resource_name='endpoint_group_to_project_association'),
path_vars={
'project_id': json_home.Parameters.PROJECT_ID,
'endpoint_group_id': ENDPOINT_GROUP_PARAMETER_RELATION
})
self._add_resource(
mapper, endpoint_group_controller,
path=self.PATH_PREFIX + self.PATH_ENDPOINT_GROUPS + (
'/projects'),
get_action='list_projects_associated_with_endpoint_group',
rel=build_resource_relation(
resource_name='projects_associated_with_endpoint_group'),
path_vars={
'endpoint_group_id': ENDPOINT_GROUP_PARAMETER_RELATION
})
self._add_resource(
mapper, endpoint_group_controller,
path=self.PATH_PREFIX + self.PATH_ENDPOINT_GROUPS + (
'/endpoints'),
get_action='list_endpoints_associated_with_endpoint_group',
rel=build_resource_relation(
resource_name='endpoints_in_endpoint_group'),
path_vars={
'endpoint_group_id': ENDPOINT_GROUP_PARAMETER_RELATION
})
| 44.704918 | 78 | 0.663 | 893 | 8,181 | 5.701008 | 0.160134 | 0.112355 | 0.043214 | 0.042428 | 0.672952 | 0.593597 | 0.546455 | 0.514241 | 0.45001 | 0.308387 | 0 | 0.003911 | 0.249847 | 8,181 | 182 | 79 | 44.950549 | 0.825648 | 0.237379 | 0 | 0.439024 | 0 | 0 | 0.190912 | 0.123897 | 0 | 0 | 0 | 0 | 0 | 1 | 0.00813 | false | 0 | 0.04065 | 0 | 0.089431 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
8c7093c6406e83e10ac8bb087f77ed632e0fff46 | 1,520 | py | Python | Notebooks/Brain-Tumor-Detection/displayTumor.py | Abhijit2505/Grokking-Machine-Learning- | e088eeecacaa93d0bc87478b20d3401b5699224e | [
"MIT"
] | 43 | 2020-12-18T17:18:22.000Z | 2022-03-10T08:09:45.000Z | Notebooks/Brain-Tumor-Detection/displayTumor.py | Abhijit2505/Grokking-Machine-Learning- | e088eeecacaa93d0bc87478b20d3401b5699224e | [
"MIT"
] | 232 | 2020-12-24T20:33:30.000Z | 2021-05-28T16:03:13.000Z | Notebooks/Brain-Tumor-Detection/displayTumor.py | Abhijit2505/Grokking-Machine-Learning- | e088eeecacaa93d0bc87478b20d3401b5699224e | [
"MIT"
] | 94 | 2020-12-21T18:17:36.000Z | 2021-12-14T17:37:56.000Z | import numpy as np
import cv2 as cv
class DisplayTumor:
curImg = 0
Img = 0
def readImage(self, img):
self.Img = np.array(img)
self.curImg = np.array(img)
gray = cv.cvtColor(np.array(img), cv.COLOR_BGR2GRAY)
self.ret, self.thresh = cv.threshold(gray, 0, 255, cv.THRESH_BINARY_INV + cv.THRESH_OTSU)
def getImage(self):
return self.curImg
# noise removal
def removeNoise(self):
self.kernel = np.ones((3, 3), np.uint8)
opening = cv.morphologyEx(self.thresh, cv.MORPH_OPEN, self.kernel, iterations=2)
self.curImg = opening
def displayTumor(self):
# sure background area
sure_bg = cv.dilate(self.curImg, self.kernel, iterations=3)
# Finding sure foreground area
dist_transform = cv.distanceTransform(self.curImg, cv.DIST_L2, 5)
ret, sure_fg = cv.threshold(dist_transform, 0.7 * dist_transform.max(), 255, 0)
# Find unknown region
sure_fg = np.uint8(sure_fg)
unknown = cv.subtract(sure_bg, sure_fg)
# Marker labelling
ret, markers = cv.connectedComponents(sure_fg)
# Add one to all labels so that sure background is not 0, but 1
markers = markers + 1
# Now mark the region of unknown with zero
markers[unknown == 255] = 0
markers = cv.watershed(self.Img, markers)
self.Img[markers == -1] = [255, 0, 0]
tumorImage = cv.cvtColor(self.Img, cv.COLOR_HSV2BGR)
self.curImg = tumorImage | 31.666667 | 97 | 0.628289 | 208 | 1,520 | 4.509615 | 0.403846 | 0.063966 | 0.031983 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.032374 | 0.268421 | 1,520 | 48 | 98 | 31.666667 | 0.811151 | 0.133553 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.137931 | false | 0 | 0.068966 | 0.034483 | 0.344828 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
8c7265999435df509fcf7bb2ce25c3a081ad847b | 3,196 | py | Python | macaw/core/mrc/drqa_mrc.py | SouvickG/macaw | 7d55d25c364118d45e3d62f05a29d912be2ebe9c | [
"MIT"
] | 146 | 2019-12-19T23:15:58.000Z | 2022-03-18T02:48:48.000Z | macaw/core/mrc/drqa_mrc.py | SouvickG/macaw | 7d55d25c364118d45e3d62f05a29d912be2ebe9c | [
"MIT"
] | 6 | 2020-02-11T11:43:32.000Z | 2022-02-14T16:11:38.000Z | macaw/core/mrc/drqa_mrc.py | SouvickG/macaw | 7d55d25c364118d45e3d62f05a29d912be2ebe9c | [
"MIT"
] | 43 | 2019-12-21T08:40:25.000Z | 2022-01-10T08:14:35.000Z | import os
import sys
from abc import ABC, abstractmethod
import drqa
from drqa.reader import Predictor
"""
A wrapper to the DrQA model from FAIR: https://github.com/facebookresearch/DrQA
Authors: Hamed Zamani (hazamani@microsoft.com)
"""
from macaw.core.retrieval.doc import Document
class MRC(ABC):
@abstractmethod
def __init__(self, params):
"""
An abstract class for machine reading comprehension models implemented in Macaw.
Args:
params(dict): A dict containing some mandatory and optional parameters.
"""
self.params = params
@abstractmethod
def get_results(self, conv_list, doc):
"""
This method is called to get the answer(s) to a question.
Args:
conv_list(list): List of util.msg.Message, each corresponding to a conversational message from / to the
user. This list is in reverse order, meaning that the first elements is the last interaction made by user.
doc(Document): A document (core.retrieval.doc.Document) that potentially contains the answer.
Returns:
The inherited class should implements this method and return a list of Documents each containing a candidate
answer and its confidence score.
"""
pass
class DrQA(MRC):
def __init__(self, params):
"""
A machine reading comprehension model based on DrQA (https://github.com/facebookresearch/DrQA).
Args:
params(dict): A dict of parameters. Required parameters are:
'mrc_path': The path to the DrQA repository.
'corenlp_path': The path to the Stanford's corenlp toolkit. DrQA requires corenlp.
'mrc_model_path': The path to the learned DrQA parameters.
'qa_results_requested': The maximum number of candidate answers that should be found by DrQA.
"""
super().__init__(params)
sys.path.insert(0, self.params['mrc_path'])
drqa.tokenizers.set_default('corenlp_classpath', os.path.join(self.params['corenlp_path'], '*'))
self.predictor = Predictor(self.params['mrc_model_path'], tokenizer='simple', num_workers=0, normalize=False)
def get_results(self, conv_list, doc):
"""
This method returns the answers to the question.
Args:
conv_list(list): List of util.msg.Message, each corresponding to a conversational message from / to the
user. This list is in reverse order, meaning that the first elements is the last interaction made by user.
doc(Document): A document (core.retrieval.doc.Document) that potentially contains the answer.
Returns:
Returns a list of Documents each containing a candidate answer and its confidence score. The length of this
list is less than or equal to the parameter 'qa_results_requested'.
"""
q = conv_list[0].text
predictions = self.predictor.predict(doc, q, None, self.params['qa_results_requested'])
results = []
for i, p in enumerate(predictions, 1):
results.append(Document(None, None, p[0], p[1]))
return results
| 37.6 | 120 | 0.663329 | 416 | 3,196 | 5.009615 | 0.341346 | 0.019194 | 0.023033 | 0.018714 | 0.426104 | 0.352207 | 0.352207 | 0.352207 | 0.352207 | 0.315739 | 0 | 0.002527 | 0.257197 | 3,196 | 84 | 121 | 38.047619 | 0.875316 | 0.544743 | 0 | 0.230769 | 0 | 0 | 0.07457 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.153846 | false | 0.038462 | 0.230769 | 0 | 0.5 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
8c72dc73b844c661f77097244741e4573ea0a98e | 463 | py | Python | LeetCode/Easy/implement_strstr.py | CajetanP/programming-exercises | aee01ff3208ab14e7d0e0a7077798342123bc3e6 | [
"MIT"
] | 1 | 2017-06-23T16:39:17.000Z | 2017-06-23T16:39:17.000Z | LeetCode/Easy/implement_strstr.py | CajetanP/coding-exercises | aee01ff3208ab14e7d0e0a7077798342123bc3e6 | [
"MIT"
] | 10 | 2021-05-09T00:06:22.000Z | 2021-09-02T12:07:41.000Z | LeetCode/Easy/implement_strstr.py | mrkajetanp/programming-exercises | aee01ff3208ab14e7d0e0a7077798342123bc3e6 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
def strStr(haystack: str, needle: str) -> int:
if len(needle) == 0:
return 0
if len(haystack) == 0:
return -1
for i in range(len(haystack)):
if i+len(needle) > len(haystack):
return -1
for j in range(len(needle)):
if needle[j] == haystack[i+j]:
if j == len(needle)-1:
return i
else:
break
return -1
| 21.045455 | 46 | 0.464363 | 60 | 463 | 3.583333 | 0.366667 | 0.167442 | 0.093023 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.029304 | 0.410367 | 463 | 21 | 47 | 22.047619 | 0.758242 | 0.045356 | 0 | 0.2 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.066667 | false | 0 | 0 | 0 | 0.4 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
8c7381c4fc0f9e79f1152b55fbb18bb5f202027f | 1,174 | py | Python | automation/create_db.py | kartikeyas00/cowin-automate | e32c1d5187349d9e68e8a0dc4325adedf49d7a1e | [
"MIT"
] | 15 | 2021-05-26T13:51:33.000Z | 2022-03-28T15:48:49.000Z | automation/create_db.py | krish-ag/cowin-automate | fa824ccdb7004893f12e4f15c3a22959f3916a3a | [
"MIT"
] | 1 | 2021-05-24T05:28:35.000Z | 2021-05-24T08:55:53.000Z | automation/create_db.py | kartikeyas00/cowin-automate | e32c1d5187349d9e68e8a0dc4325adedf49d7a1e | [
"MIT"
] | 3 | 2021-05-24T03:52:09.000Z | 2021-05-27T04:49:00.000Z | from automation.read_config import DATBASE_URL
from sqlite3 import Error
from automation.utils import create_connection
SQL_CREATE_DAILY_STATISTICS = """
CREATE TABLE IF NOT EXISTS daily_statistics (
id integer PRIMARY KEY,
district_name text NOT NULL,
min_age_limit integer NOT NULL,
vaccine text NOT NULL,
available_capacity integer NOT NULL,
timestamp datetime
);
"""
def create_table(conn, create_table_sql):
"""
Create table from the create_table_sql statement.
Parameters
----------
conn : sqlite3.Connection
Sqlite3 connection object.
create_table_sql : str
Create table sql statement.
Returns
-------
None.
"""
try:
c = conn.cursor()
c.execute(create_table_sql)
conn.commit()
except Error as e:
conn.rollback()
print(e)
def run():
conn = create_connection(DATBASE_URL)
# create table
if conn is not None:
# create daily_statistics table
create_table(conn, SQL_CREATE_DAILY_STATISTICS)
else:
print("Error! cannot create the database connection.")
| 21.740741 | 62 | 0.642249 | 138 | 1,174 | 5.268116 | 0.427536 | 0.151307 | 0.096286 | 0.066025 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.003567 | 0.283646 | 1,174 | 53 | 63 | 22.150943 | 0.86088 | 0.213799 | 0 | 0 | 0 | 0 | 0.371692 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.074074 | false | 0 | 0.111111 | 0 | 0.185185 | 0.074074 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
8c7486ea7b4fa9b8b9465470a75b89af68eb25e2 | 411 | py | Python | users/urls.py | amjadcp/bookingLine-grpA-miniProject | f57fc06f85edfb08f9c170757fddbf7b6de6f35a | [
"PostgreSQL",
"Unlicense",
"MIT"
] | null | null | null | users/urls.py | amjadcp/bookingLine-grpA-miniProject | f57fc06f85edfb08f9c170757fddbf7b6de6f35a | [
"PostgreSQL",
"Unlicense",
"MIT"
] | null | null | null | users/urls.py | amjadcp/bookingLine-grpA-miniProject | f57fc06f85edfb08f9c170757fddbf7b6de6f35a | [
"PostgreSQL",
"Unlicense",
"MIT"
] | null | null | null | from django.urls import path
from .views import *
app_name='users'
urlpatterns = [
path('signup-client', signup_client, name='signup-client'),
path('signup-serviceprovider', signup_serviceprovider, name='signup-serviceprovider'),
path('profile', profile, name='profile'),
path('dashboard', dashboard, name='dashboard'),
path('dashboard-client', dashboard_client, name='dashboard-client'),
] | 37.363636 | 90 | 0.725061 | 46 | 411 | 6.391304 | 0.326087 | 0.122449 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.116788 | 411 | 11 | 91 | 37.363636 | 0.809917 | 0 | 0 | 0 | 0 | 0 | 0.337379 | 0.106796 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.2 | 0 | 0.2 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
8c7753814f7329c609a7c87d2124c9854cf924ef | 905 | py | Python | setup.py | AlbertoMira/Project | 38289707475252235098499174bb59b794a480d8 | [
"MIT"
] | null | null | null | setup.py | AlbertoMira/Project | 38289707475252235098499174bb59b794a480d8 | [
"MIT"
] | null | null | null | setup.py | AlbertoMira/Project | 38289707475252235098499174bb59b794a480d8 | [
"MIT"
] | null | null | null | import re
import ast
from setuptools import setup
_version_re = re.compile(r'__version__\s+=\s+(.*)')
with open('preprocessing/__init__.py', 'rb') as f:
version = str(ast.literal_eval(_version_re.search(
f.read().decode('utf-8')).group(1)))
setup(
name='Project2',
version=version,
description='Import, preprocess and interpret EMG data for hand prosthesis',
url='https://github.com/AlbertoMira/Project.git',
license='MIT',
author='Alberto Mira Criado, Johannes Payr',
author_email=' ma8237@mci4me.at, j.payr@mci4me.at',
platforms='any',
packages=[
'preprocessing'
],
install_requires=[
'numpy',
'scipy',
'click'
],
entry_points='''
[console_scripts]
preprocess=preprocessing.main:main
'''
)
| 23.205128 | 84 | 0.566851 | 95 | 905 | 5.221053 | 0.757895 | 0.03629 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.014151 | 0.297238 | 905 | 38 | 85 | 23.815789 | 0.765723 | 0 | 0 | 0.068966 | 0 | 0 | 0.391593 | 0.089602 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.137931 | 0 | 0.137931 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
8c7a40598f725b8f3ad34f2dba276b032d2670d2 | 2,283 | py | Python | Today.py | U-Sharma/Today | 185b117b9c34fc2a0f94a7909d17da3b3ce3e0e3 | [
"MIT"
] | 1 | 2019-06-04T11:46:14.000Z | 2019-06-04T11:46:14.000Z | Today.py | U-Sharma/Today | 185b117b9c34fc2a0f94a7909d17da3b3ce3e0e3 | [
"MIT"
] | null | null | null | Today.py | U-Sharma/Today | 185b117b9c34fc2a0f94a7909d17da3b3ce3e0e3 | [
"MIT"
] | null | null | null | # Datetime 1
from datetime import date
def is_leap(ayear): # returns True if ayear is leap and False otherwise
ayear = int(ayear)
if ayear%400 == 0:
return True
elif ayear%100 == 0:
return False
elif ayear%4 == 0:
return True
else:
return False
def what_day_today(): # returns the day today
date_today = date.today() # today's date
year_today = int(date_today.year) # this year
month_today = int(date_today.month) # this month
day_today = int(date_today.day) # this day in this month
days_total = day_today # Initialize days_total
month_ = month_today - 1 # Initianize month_ which is number of months past this year
year_ = year_today - 1 # Initianize year_ which is number of years past
while month_ > 0: # Looping through month_ to calculate days past this year
if month_ == 2: # For a february
if is_leap(year_today): # In a leap year
days_total += 29 # Add 29 days to days_total
else: # Not in a leap year
days_total += 28 # Add 28 days to days_total
elif month_ == 1 or month_ == 3 or month_ == 5 or month_ == 7 or month_ == 8 or month_ == 10 or month_ == 12: # For the months with 31 days
days_total += 31 # Add 31 to days_total
else: # For the months with 30 days
days_total += 30 # Add 30 to days_total
month_ -= 1 # Move to the previous month
while year_ >= 1970: # Looping through year_ to calculate number of days past from 1970
if is_leap(year_): # For a leap year
days_total += 366 # Add 366 to days_total
else: # For a year which is not leap
days_total += 365 # Add 365 days to days_total
year_ -= 1 # Move to the previous year
# It was wednesday on 1st Jan 1970
if days_total%7 == 0:
print("Wednesday")
elif days_total%7 == 1:
print("Thursday")
elif days_total%7 == 2:
print("Friday")
elif days_total%7 == 3:
print("Saturday")
elif days_total%7 == 4:
print("Sunday")
elif days_total%7 == 5:
print("Monday")
else:
print("Tuesday")
what_day_today() | 39.362069 | 148 | 0.593079 | 335 | 2,283 | 3.871642 | 0.247761 | 0.138782 | 0.050887 | 0.053971 | 0.100231 | 0.03084 | 0 | 0 | 0 | 0 | 0 | 0.054284 | 0.330267 | 2,283 | 58 | 149 | 39.362069 | 0.793983 | 0.339028 | 0 | 0.176471 | 0 | 0 | 0.035112 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.039216 | false | 0 | 0.019608 | 0 | 0.137255 | 0.137255 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
8c7c0ef0d22188d4f2f1d359d75764702b46655e | 1,804 | py | Python | msbdev/views.py | mattburlage/msb.dev | b2c574646d5ada35fbe5a236cbb1dec3793f4995 | [
"MIT"
] | null | null | null | msbdev/views.py | mattburlage/msb.dev | b2c574646d5ada35fbe5a236cbb1dec3793f4995 | [
"MIT"
] | null | null | null | msbdev/views.py | mattburlage/msb.dev | b2c574646d5ada35fbe5a236cbb1dec3793f4995 | [
"MIT"
] | null | null | null | from django.shortcuts import render
from rest_framework import status
from rest_framework.decorators import api_view
from rest_framework.response import Response
from msbdev.models import AppSetting, ContactForm, TextCopy, WorkItem
from msbdev.serializers import ContactFormSerializer
def index(request):
""" Handle standard page view"""
# Render template
context = {
'show_numbers': AppSetting.get_setting('show_numbers'),
'show_cur_numbers': AppSetting.get_setting('show_cur_numbers'),
'about_me_text': TextCopy.get_html('about_me_text'),
'work_items': WorkItem.objects.filter(active=True).order_by('order'),
}
return render(request, 'msbdev/msbdev2.html', context)
def index2(request):
""" Handle standard page view"""
# Render template
return render(request, 'msbdev/msbdev3.html')
@api_view(['POST'])
def submit_form(request):
serializer = ContactFormSerializer(data=request.data)
if serializer.is_valid():
email = serializer.validated_data['email']
note = serializer.validated_data['note']
try:
if email in AppSetting.objects.get(name="EMAIL_BLACKLIST").content:
return Response(status=status.HTTP_401_UNAUTHORIZED)
except AppSetting.DoesNotExist:
pass
existing_form = ContactForm.objects.filter(email=email, note=note)
if existing_form:
existing_form = existing_form[0]
existing_form.copies += 1
existing_form.save()
return Response(data=serializer.data, status=status.HTTP_200_OK)
serializer.save()
return Response(data=serializer.data, status=status.HTTP_200_OK)
else:
return Response(data=serializer.errors, status=status.HTTP_400_BAD_REQUEST)
| 32.214286 | 83 | 0.700111 | 209 | 1,804 | 5.851675 | 0.392345 | 0.058872 | 0.05233 | 0.068684 | 0.214227 | 0.163532 | 0.163532 | 0.093213 | 0.093213 | 0.093213 | 0 | 0.011806 | 0.201774 | 1,804 | 55 | 84 | 32.8 | 0.8375 | 0.046563 | 0 | 0.054054 | 0 | 0 | 0.095545 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.081081 | false | 0.027027 | 0.162162 | 0 | 0.405405 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
8c7c4372702da805ee0f1b31fd8aadb800975492 | 2,599 | py | Python | take_snapshot.py | LightningAA/YouTube-Playlist-Snapshot | 46a90659fb099ee1e17780d10c9ac9b3b3e68240 | [
"MIT"
] | 1 | 2021-09-16T17:52:19.000Z | 2021-09-16T17:52:19.000Z | take_snapshot.py | LightningAA/YouTube-Playlist-Snapshot | 46a90659fb099ee1e17780d10c9ac9b3b3e68240 | [
"MIT"
] | null | null | null | take_snapshot.py | LightningAA/YouTube-Playlist-Snapshot | 46a90659fb099ee1e17780d10c9ac9b3b3e68240 | [
"MIT"
] | null | null | null | # https://pypi.org/project/requests/
import requests
import json
from datetime import datetime
import os
api_key = input('Enter your api key: (you can get one at https://console.developers.google.com/)\n')
#api_key = 'hard_coded_api_key'
playlist_id = input('Enter your playlist id:\n')
#playlist_id = 'hard_coded_playlist_id'
payload = {'part': 'snippet', 'playlistId': playlist_id,
'maxResults': 50, 'pageToken': None, 'key': api_key}
response = requests.get('https://youtube.googleapis.com/youtube/v3/playlistItems', params=payload)
response_json = response.json()
videos_in_playlist = []
videos_processed = 0
while True:
video_ids = []
for item in response_json['items']:
video_ids.append(item['snippet']['resourceId']['videoId'])
# make HTTP GET request
video_payload = {'part': 'snippet', 'maxResults': 50, 'id': ','.join(video_ids), 'key': api_key}
video_response = requests.get('https://youtube.googleapis.com/youtube/v3/videos', params=video_payload)
video_response.raise_for_status()
video_json = video_response.json()
for video_resource in video_json['items']:
video_snippet = video_resource['snippet']
videos_in_playlist.append({'title': video_snippet['title'], 'channelTitle': video_snippet['channelTitle']})
if not 'previousPageToken' in response_json and not 'nextPageToken' in response_json:
break
payload['pageToken'] = response_json['nextPageToken']
response = requests.get('https://youtube.googleapis.com/youtube/v3/playlistItems', params=payload)
response.raise_for_status()
response_json = response.json()
videos_processed += response_json['pageInfo']['resultsPerPage']
print(str(videos_processed) + ' of ' + str(response_json['pageInfo']['totalResults']) + ' videos processed.', end='\r', flush=True)
# get playlist name
playlist_payload = {'part': 'snippet', 'id': playlist_id, 'key': api_key}
playlist_response = requests.get("https://youtube.googleapis.com/youtube/v3/playlists", playlist_payload)
playlist_response.raise_for_status()
playlist_json = playlist_response.json()
playlist_name = playlist_json['items'][0]['snippet']['title']
playlist_snapshot_count = len([f for f in os.listdir('.') if os.path.isfile(f) and f.startswith(playlist_name)])
snapshot_file_name = f"{playlist_name} Snapshot #{playlist_snapshot_count + 1}.json"
with open(snapshot_file_name, 'w') as file:
file.write(json.dumps({'timeTaken': datetime.now().isoformat(), 'playlistId': playlist_id, 'videos': videos_in_playlist}))
print('Finished taking snapshot sucessfully.') | 43.316667 | 135 | 0.727588 | 334 | 2,599 | 5.446108 | 0.314371 | 0.079164 | 0.041781 | 0.052776 | 0.182518 | 0.153931 | 0.153931 | 0.153931 | 0.153931 | 0.095657 | 0 | 0.004833 | 0.124279 | 2,599 | 60 | 136 | 43.316667 | 0.794376 | 0.054636 | 0 | 0.097561 | 0 | 0.02439 | 0.299755 | 0.010196 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.097561 | 0 | 0.097561 | 0.04878 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
8c7da5a8ffd720e5cba6875c433374448d66b500 | 1,408 | py | Python | libc/AOR_v20.02/math/tools/plot.py | mkinsner/llvm | 589d48844edb12cd357b3024248b93d64b6760bf | [
"Apache-2.0"
] | 2,338 | 2018-06-19T17:34:51.000Z | 2022-03-31T11:00:37.000Z | libc/AOR_v20.02/math/tools/plot.py | mkinsner/llvm | 589d48844edb12cd357b3024248b93d64b6760bf | [
"Apache-2.0"
] | 3,740 | 2019-01-23T15:36:48.000Z | 2022-03-31T22:01:13.000Z | libc/AOR_v20.02/math/tools/plot.py | mkinsner/llvm | 589d48844edb12cd357b3024248b93d64b6760bf | [
"Apache-2.0"
] | 500 | 2019-01-23T07:49:22.000Z | 2022-03-30T02:59:37.000Z | #!/usr/bin/env python
# ULP error plot tool.
#
# Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
# See https://llvm.org/LICENSE.txt for license information.
# SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
import numpy as np
import matplotlib.pyplot as plt
import sys
import re
# example usage:
# build/bin/ulp -e .0001 log 0.5 2.0 2345678 | math/tools/plot.py
def fhex(s):
return float.fromhex(s)
def parse(f):
xs = []
gs = []
ys = []
es = []
# Has to match the format used in ulp.c
r = re.compile(r'[^ (]+\(([^ )]*)\) got ([^ ]+) want ([^ ]+) [^ ]+ ulp err ([^ ]+)')
for line in f:
m = r.match(line)
if m:
x = fhex(m.group(1))
g = fhex(m.group(2))
y = fhex(m.group(3))
e = float(m.group(4))
xs.append(x)
gs.append(g)
ys.append(y)
es.append(e)
elif line.startswith('PASS') or line.startswith('FAIL'):
# Print the summary line
print(line)
return xs, gs, ys, es
def plot(xs, gs, ys, es):
if len(xs) < 2:
print('not enough samples')
return
a = min(xs)
b = max(xs)
fig, (ax0,ax1) = plt.subplots(nrows=2)
es = np.abs(es) # ignore the sign
emax = max(es)
ax0.text(a+(b-a)*0.7, emax*0.8, '%s\n%g'%(emax.hex(),emax))
ax0.plot(xs,es,'r.')
ax0.grid()
ax1.plot(xs,ys,'r.',label='want')
ax1.plot(xs,gs,'b.',label='got')
ax1.grid()
ax1.legend()
plt.show()
xs, gs, ys, es = parse(sys.stdin)
plot(xs, gs, ys, es)
| 22.349206 | 85 | 0.609375 | 255 | 1,408 | 3.364706 | 0.466667 | 0.027972 | 0.034965 | 0.04662 | 0.027972 | 0 | 0 | 0 | 0 | 0 | 0 | 0.033246 | 0.18821 | 1,408 | 62 | 86 | 22.709677 | 0.71741 | 0.276278 | 0 | 0 | 0 | 0 | 0.109127 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.066667 | false | 0.022222 | 0.088889 | 0.022222 | 0.222222 | 0.044444 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
8c7dcb29d1059858eefa9223cd4270409a444b6c | 9,667 | py | Python | espa_validation/validate_data/qa_images.py | jakebrinkmann/lagoon-vampire-bat | 799050568741f5aa22f36d3b5be8dbee935e5926 | [
"Unlicense"
] | null | null | null | espa_validation/validate_data/qa_images.py | jakebrinkmann/lagoon-vampire-bat | 799050568741f5aa22f36d3b5be8dbee935e5926 | [
"Unlicense"
] | null | null | null | espa_validation/validate_data/qa_images.py | jakebrinkmann/lagoon-vampire-bat | 799050568741f5aa22f36d3b5be8dbee935e5926 | [
"Unlicense"
] | null | null | null | # qa_images.py
import os
import logging
import numpy as np
def do_diff(test, mast, nodata=False):
"""Do image diff, break if the grids are not the same size.
Args:
test <numpy.ndarray>: array of test raster
mast <numpy.ndarray>: array of master raster
"""
if nodata:
test = np.ma.masked_where(test == nodata, test)
mast = np.ma.masked_where(mast == nodata, mast)
logging.info("Making nodata value {0} from diff calc.".format(nodata))
try:
## TODO: Figure out why some bands cannot be compared correctly.
diff = test.astype(np.float) - mast.astype(np.float)
return diff
except (ValueError, AttributeError, TypeError) as e:
logging.warning("Error: {0}".format(e))
import pdb; pdb.set_trace()
return False
def call_stats(test, mast, rast_arr, fn_out, dir_out, rast_num=0):
"""Call stats function(s) if data are valid
Args:
test <str>: name of test file
mast <str>: name of master file
rast_arr <numpy.ndarray>: array of target raster
fn_out <str>: file path of image
dir_out <str>: path to output directory
rast_num <int>: individual number of image (default=0)
nodata <int>: no data value (default=-9999)
"""
import os
import espa_validation.validate_data.stats
from espa_validation.validate_data.file_io import ImWrite
if isinstance(rast_arr, (np.ndarray, np.ma.core.MaskedArray)):
if np.any(rast_arr != 0):
logging.warning("Image difference found!")
logging.warning("Test: {0} | Master: {1}".format(test, mast))
# find file name (for saving plot)
fout = fn_out.split(os.sep)[-1]
# do stats of difference
stats.img_stats(test, mast, rast_arr, os.path.dirname(fn_out),
fout, dir_out, rast_num)
# plot diff image
ImWrite.plot_diff_image(test, mast, rast_arr, fout, "diff_" +
str(rast_num), dir_out)
# plot abs diff image
ImWrite.plot_diff_image(test, mast, rast_arr, fout, "abs_diff_" +
str(rast_num), dir_out, do_abs=True)
# plot diff histograms
ImWrite.plot_hist(test, mast, rast_arr, fout, "diff_" +
str(rast_num), dir_out)
else:
logging.info("Binary data match.")
else:
logging.warning("Target raster is not a valid numpy array or numpy "
"masked array. Cannot run statistics!")
class ArrayImage:
@staticmethod
def check_images(test, mast):
"""Read in a generic (non-geographic) image, like JPEG, and do a diff
Return diff raster if actually different
Args:
test <str>: path to test image
mast <str>: path to master image
"""
try:
from scipy.misc import imread
except ImportError:
from scipy.ndimage import imread
# read images
try:
test_im = imread(test)
mast_im = imread(mast)
except ImportError:
logging.warning("Likely missing Python Image Library (PIL).")
# try Scikit Image
from skimage.io import imread
try:
mast_im = imread(mast)
test_im = imread(test)
except (ValueError, TypeError, ImportError):
logging.warning("Not able to open image with skimag.io. Likely"
" missing image library.")
return None
# check diff
try:
diff_im = do_diff(test_im, mast_im)
if len(np.nonzero(diff_im)) > 3:
logging.error("Values differ between {0} and {1}.".
format(test, mast))
return diff_im
else:
logging.info("Values equivalent between {0} and {1}.".
format(test, mast))
return None
except ValueError:
logging.error("Image {0} and {1} are not the same dimensions.".
format(test, mast))
def sha256_checksum(filename, block_size=65536):
import hashlib
sha256 = hashlib.sha256()
with open(filename, 'rb') as f:
for block in iter(lambda: f.read(block_size), b''):
sha256.update(block)
return sha256.hexdigest()
class GeoImage:
@staticmethod
def check_images(test, mast, dir_out, ext, include_nd=False):
"""Compare the test and master images, both for their raw contents and
geographic parameters. If differences exist, produce diff plot + CSV
stats file.
Args:
test <str>: path to test image
mast <str>: path to master image
dir_out <str>: path to output directory
ext <str>: file extension
include_nd <bool>: incl. nodata values in file cmp (default=False)
"""
from espa_validation.validate_data.image_io import RasterIO, RasterCmp
from espa_validation.validate_data.file_io import Cleanup, Find
from itertools import zip_longest
print("Checking {0} files...".format(ext))
# clean up non-matching files
test, mast = Cleanup.remove_nonmatching_files(test, mast)
# make sure there are actually files to check
if mast is None or test is None:
logging.error("No {0} files to check in test and/or mast "
"directories.".format(ext))
return False
print('+++++ %100s +++++ %100s' % ('TESTING', 'MASTER'))
for n, (i, j) in enumerate(zip_longest(test, mast)):
logging.debug('%2d: [%100s] %2d: [%100s]' % (n, os.path.basename(str(i)),
n, os.path.basename(str(j))))
order = zip_longest(range(len(test)), range(len(mast)))
# if raw_input('Need to re-order the comparisons? (Y/[n]): ') == 'Y':
# order = input('Enter new indexing ([0,9], [1,2], [2,1]...)\n\n: ')
# do other comparison checks, return stats + plots if diffs exist
for (ix, jx) in order:
i, j = test[ix], mast[jx]
logging.info("Checking Test {0} against Master {1}".format(i, j))
if os.path.getsize(i) == os.path.getsize(j):
hash1, hash2 = sha256_checksum(i), sha256_checksum(j)
if hash1 == hash2:
logging.info("Geo files {0} and {1} are the same size and hash ({2})".format(i, j, hash1))
continue
# Open each raster
ds_test = RasterIO.open_raster(i)
ds_mast = RasterIO.open_raster(j)
# Compare various raster parameters
status = []
status.append(RasterCmp.compare_proj_ref(ds_test, ds_mast))
status.append(RasterCmp.compare_geo_trans(ds_test, ds_mast))
status.append(RasterCmp.extent_diff_cols(ds_test, ds_mast))
status.append(RasterCmp.extent_diff_rows(ds_test, ds_mast))
# If any above tests fail, go to next iteration
if any(stat == False for stat in status):
continue
# Count number of sub-bands in the files
d_range = Find.count(i, ds_test, j, ds_mast, ext)
if d_range is None:
logging.critical("Number of files different; data cannot be "
"tested successfully.")
continue
# if sub-bands exist, read them one-by-one and do diffs + stats
if d_range > 1:
for ii in range(0, d_range):
# Get the first band from each raster
if ext == ".img":
logging.info("Reading sub-band {0} from .img {1}...".format(ii, i))
ds_tband = RasterIO.read_band_as_array(ds_test, ii)
ds_mband = RasterIO.read_band_as_array(ds_mast, ii)
else:
logging.info("Reading .hdf/.nc SDS {0} from file {1}...".format(ii, i))
sds_tband = RasterIO.open_raster(RasterIO.get_sds(ds_test)[ii][0])
sds_mband = RasterIO.open_raster(RasterIO.get_sds(ds_mast)[ii][0])
ds_tband, t_nd = RasterIO.read_band_as_array(sds_tband)
ds_mband, m_nd = RasterIO.read_band_as_array(sds_mband)
# do diff
if type(t_nd) is type(None) or include_nd:
diff = do_diff(ds_tband, ds_mband)
else:
diff = do_diff(ds_tband, ds_mband, nodata=int(t_nd))
# call stats functions to write out results/plots/etc.
call_stats(i, j, diff, i, dir_out, rast_num=ii)
else: # else it's a singleband raster
logging.info("Reading {0}...".format(i))
# read in band as array
ds_tband, t_nd = RasterIO.read_band_as_array(ds_test)
ds_mband, m_nd = RasterIO.read_band_as_array(ds_mast)
# do diff
if type(t_nd) is type(None) or include_nd:
diff = do_diff(ds_tband, ds_mband)
else:
diff = do_diff(ds_tband, ds_mband, nodata=int(t_nd))
# call stats functions to write out results/plots/etc.
call_stats(i, j, diff, i, dir_out)
| 38.361111 | 110 | 0.552912 | 1,224 | 9,667 | 4.227941 | 0.239379 | 0.02628 | 0.014879 | 0.02087 | 0.268019 | 0.247536 | 0.230531 | 0.196329 | 0.155169 | 0.113043 | 0 | 0.01367 | 0.349229 | 9,667 | 251 | 111 | 38.513944 | 0.808933 | 0.205855 | 0 | 0.28169 | 0 | 0 | 0.111513 | 0 | 0 | 0 | 0 | 0.003984 | 0 | 1 | 0.035211 | false | 0 | 0.119718 | 0 | 0.21831 | 0.014085 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
8c7f25e891fd7d2f2b9e55adb1773dc050888db1 | 1,818 | py | Python | tests/test_ifcollector.py | jgrugru/ifcollector | bb9153f6a06ff81a86c3ea2a0b4db2c7e8451dd4 | [
"MIT"
] | null | null | null | tests/test_ifcollector.py | jgrugru/ifcollector | bb9153f6a06ff81a86c3ea2a0b4db2c7e8451dd4 | [
"MIT"
] | null | null | null | tests/test_ifcollector.py | jgrugru/ifcollector | bb9153f6a06ff81a86c3ea2a0b4db2c7e8451dd4 | [
"MIT"
] | null | null | null | from pytest import mark, raises
from re import search
from ifcollector import ifandstatement, iforstatement, CannotEvaluateExpression
def matches_email_regex(value):
match_object = search(r"^(\w|\.|\_|\-)+[@](\w|\_|\-|\.)+[.]\w{2,3}$", value)
return bool(match_object)
is_valid_test_str = [
str.isalnum,
"len(value) > 5",
"value == 'Testing'",
lambda value: value == "Testing",
]
is_valid_gmail = [
"len(value) > 5",
"'@' in value",
matches_email_regex,
"'gmail.com' in value",
lambda value: bool(search(r"^(\w|\.|\_|\-)+[@](\w|\_|\-|\.)+[.]\w{2,3}$", value)),
]
@mark.parametrize(
"value, ifstatement, expression_list, expected_result",
[
("Test String", ifandstatement, is_valid_test_str, False),
("Test ", ifandstatement, is_valid_test_str, False),
("Testing", ifandstatement, is_valid_test_str, True),
("Testing1", ifandstatement, is_valid_test_str, False),
("Test String", iforstatement, is_valid_test_str, True),
("Test ", iforstatement, is_valid_test_str, False),
("Testing", iforstatement, is_valid_test_str, True),
("Testing1", iforstatement, is_valid_test_str, True),
("jeff.gruenbaum@gmail.com", ifandstatement, is_valid_gmail, True),
("jeff.gruenbaum@yahoo.com", ifandstatement, is_valid_gmail, False),
("@gmail.com", ifandstatement, is_valid_gmail, False),
(" @gmail.com", ifandstatement, is_valid_gmail, False),
],
)
def test_ifstatements(value, ifstatement, expression_list, expected_result):
assert ifstatement(value, *expression_list, debug=True) == expected_result
def test_CannotEvaluateExpression():
with raises(CannotEvaluateExpression):
ifandstatement("Test String", lambda x, y: print("I am the lambda"), debug=True)
| 34.961538 | 88 | 0.662266 | 210 | 1,818 | 5.471429 | 0.280952 | 0.085292 | 0.086162 | 0.109661 | 0.489121 | 0.465622 | 0.191471 | 0.127067 | 0.097476 | 0.097476 | 0 | 0.005344 | 0.176568 | 1,818 | 51 | 89 | 35.647059 | 0.762191 | 0 | 0 | 0.097561 | 0 | 0 | 0.209021 | 0.073707 | 0 | 0 | 0 | 0 | 0.02439 | 1 | 0.073171 | false | 0 | 0.073171 | 0 | 0.170732 | 0.02439 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
8c7f835cd12e745e1bc1ba6d7ed4716788846c28 | 698 | py | Python | keras_dgl/layers/graph_ops.py | michael-cowan/keras-deep-graph-learning | 36854d374df931d063ada1c7ea3a5a2d67d3a8e4 | [
"MIT"
] | 100 | 2018-05-23T17:31:31.000Z | 2022-03-28T14:17:19.000Z | keras_dgl/layers/graph_ops.py | michael-cowan/keras-deep-graph-learning | 36854d374df931d063ada1c7ea3a5a2d67d3a8e4 | [
"MIT"
] | 9 | 2018-12-03T06:50:20.000Z | 2021-07-15T10:15:48.000Z | keras_dgl/layers/graph_ops.py | michael-cowan/keras-deep-graph-learning | 36854d374df931d063ada1c7ea3a5a2d67d3a8e4 | [
"MIT"
] | 55 | 2018-11-20T12:54:07.000Z | 2022-03-29T09:54:25.000Z | import keras.backend as K
import tensorflow as tf
def graph_conv_op(x, num_filters, graph_conv_filters, kernel):
if len(x.get_shape()) == 2:
conv_op = K.dot(graph_conv_filters, x)
conv_op = tf.split(conv_op, num_filters, axis=0)
conv_op = K.concatenate(conv_op, axis=1)
elif len(x.get_shape()) == 3:
conv_op = K.batch_dot(graph_conv_filters, x)
conv_op = tf.split(conv_op, num_filters, axis=1)
conv_op = K.concatenate(conv_op, axis=2)
else:
raise ValueError('x must be either 2 or 3 dimension tensor'
'Got input shape: ' + str(x.get_shape()))
conv_out = K.dot(conv_op, kernel)
return conv_out
| 33.238095 | 67 | 0.637536 | 114 | 698 | 3.657895 | 0.377193 | 0.172662 | 0.067146 | 0.057554 | 0.388489 | 0.388489 | 0.388489 | 0.254197 | 0.254197 | 0.254197 | 0 | 0.015296 | 0.250716 | 698 | 20 | 68 | 34.9 | 0.782027 | 0 | 0 | 0 | 0 | 0 | 0.081662 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.0625 | false | 0 | 0.125 | 0 | 0.25 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
8c8177220839df80a0eb470e5d230d3192332127 | 4,661 | py | Python | parsl/app/app_factory.py | Xarthisius/parsl | fdaef6e5c97365f32591fba987e7653df5ef3e2b | [
"Apache-2.0"
] | null | null | null | parsl/app/app_factory.py | Xarthisius/parsl | fdaef6e5c97365f32591fba987e7653df5ef3e2b | [
"Apache-2.0"
] | null | null | null | parsl/app/app_factory.py | Xarthisius/parsl | fdaef6e5c97365f32591fba987e7653df5ef3e2b | [
"Apache-2.0"
] | null | null | null | """Centralize app object creation."""
import logging
from inspect import getsource
from hashlib import md5
from inspect import signature
from parsl.app.bash_app import BashApp
from parsl.app.python_app import PythonApp
from parsl.app.errors import InvalidAppTypeError
logger = logging.getLogger(__name__)
class AppFactory(object):
"""AppFactory streamlines creation of apps."""
def __init__(self, app_class, func, data_flow_kernel=None, cache=False, executors='all', walltime=60):
"""Construct an AppFactory for a particular app_class.
Args:
- app_class(Class) : An app class
- func(Function) : The function to execute
Kwargs:
- data_flow_kernel(DataFlowKernel) : The DataFlowKernel which will manage app execution.
- walltime(int) : Walltime in seconds, default=60
- executors (str|list) : Labels of the executors that this app can execute over. Default is 'all'.
- cache (Bool) : Enable caching of app.
Returns:
An AppFactory Object
"""
self.__name__ = func.__name__
self.app_class = app_class
self.data_flow_kernel = data_flow_kernel
self.func = func
self.status = 'created'
self.walltime = walltime
self.executors = executors
self.sig = signature(func)
self.cache = cache
# Function source hashing is done here to avoid redoing this every time
# the app is called.
if cache is True:
try:
fn_source = getsource(func)
except OSError:
logger.debug("Unable to get source code for AppCaching. Recommend creating module")
fn_source = func.__name__
self.func_hash = md5(fn_source.encode('utf-8')).hexdigest()
else:
self.func_hash = func.__name__
def __call__(self, *args, **kwargs):
"""Create a new object of app_class with the args, execute the app_object and return the futures.
Args:
Arbitrary args to the decorated function
Kwargs:
Arbitrary kwargs to the decorated function
Returns:
(App_Future, [Data_Futures...])
The call is mostly pass through
"""
# Create and call the new App object
app_obj = self.app_class(self.func,
data_flow_kernel=self.data_flow_kernel,
executors=self.executors,
walltime=self.walltime,
cache=self.cache,
fn_hash=self.func_hash)
return app_obj(*args, **kwargs)
def __repr__(self):
return self.__str__()
def __str__(self):
return '<class %s"%s for %s>' % (self.app_class.__name__,
self.__class__.__name__,
self.__name__)
class AppFactoryFactory(object):
"""An instance AppFactoryFactory will be factory that creates object of a particular kind.
AppFactoryFactory has the various apps registered with it, and it will return an AppFactory
that constructs objects of a specific kind.
"""
def __init__(self, name):
"""Constructor.
Args:
name(string) : Name for the appfactory
Returns:
object(AppFactoryFactory)
"""
self.name = name
self.apps = {'bash': BashApp,
'python': PythonApp}
def make(self, kind, func, data_flow_kernel=None, **kwargs):
"""Creates a new App of the kind specified.
Args:
kind(string) : For now only(bash|python)
data_flow_kernel(DataFlowKernel) : The DataFlowKernel which will manage app execution.
func(Function) : The function to execute
Kwargs:
Walltime(int) : Walltime in seconds
Arbritrary kwargs passed onto the AppFactory
Raises:
InvalidAppTypeError
Returns:
An AppFactory object bound to the specific app_class kind
"""
if kind in self.apps:
return AppFactory(self.apps[kind],
func,
data_flow_kernel=data_flow_kernel,
**kwargs)
else:
logger.error("AppFactory:%s Invalid app kind requested : %s ",
self.name, kind)
raise InvalidAppTypeError(
"AppFactory:%s Invalid app kind requested : %s ",
self.name, kind)
| 33.292857 | 110 | 0.579489 | 510 | 4,661 | 5.094118 | 0.292157 | 0.030793 | 0.053888 | 0.027714 | 0.185527 | 0.142417 | 0.120862 | 0.091609 | 0.091609 | 0.091609 | 0 | 0.002302 | 0.347565 | 4,661 | 139 | 111 | 33.532374 | 0.852022 | 0.370092 | 0 | 0.067797 | 0 | 0 | 0.078764 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.101695 | false | 0 | 0.118644 | 0.033898 | 0.322034 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
8c83ca186957fc9af62d9f2f84e4116c77bc9153 | 3,790 | py | Python | src/model/layers.py | luyiyun/MCluster-VAEs | 15e33e79a7e03859370b21a7cfdd843739a78992 | [
"MIT"
] | null | null | null | src/model/layers.py | luyiyun/MCluster-VAEs | 15e33e79a7e03859370b21a7cfdd843739a78992 | [
"MIT"
] | null | null | null | src/model/layers.py | luyiyun/MCluster-VAEs | 15e33e79a7e03859370b21a7cfdd843739a78992 | [
"MIT"
] | null | null | null | from math import sqrt
import torch
import torch.nn as nn
ACT = nn.GELU()
# ACT = nn.LeakyReLU()
class ParallelModule(nn.Module):
def __init__(self, *submodels):
super().__init__()
self._submodels = nn.ModuleList(submodels)
def forward(self, xs):
feats = [self._submodels[i](x) for i, x in enumerate(xs)]
return feats
class ConcatModule(nn.Module):
def __init__(self, *submodels):
super().__init__()
self._submodels = nn.ModuleList(submodels)
def forward(self, xs):
feats = [self._submodels[i](x) for i, x in enumerate(xs)]
return torch.cat(feats, dim=1)
class SplitModule(nn.Module):
def __init__(self, *dims):
super().__init__()
self._dims = dims
self._cumsum = []
s = 0
for d in dims:
s += d
self._cumsum.append(s)
def forward(self, x):
if len(self._dims) == 0:
return x
return [
x[:, d1:d2]
for d1, d2 in zip([0] + self._cumsum[:-1], self._cumsum)
]
class SwapAxeModule(nn.Module):
def __init__(self, dim1, dim2):
super().__init__()
self._dim1, self._dim2 = dim1, dim2
def forward(self, x):
return x.transpose(self._dim1, self._dim2)
class DotAttentionModule(nn.Module):
def __init__(self, inp, out, qk_dim=None, concat=True):
super().__init__()
self._concat = concat
qk_dim = out if qk_dim is None else qk_dim
self.v_fc = nn.Linear(inp, out)
self.k_fc = nn.Linear(inp, qk_dim)
self.q_fc = nn.Linear(inp, qk_dim)
def forward(self, xs):
xs = torch.stack(xs, dim=1) # (batch, n, inp)
q = self.q_fc(xs)
k = self.k_fc(xs)
v = self.v_fc(xs)
score = torch.bmm(q, k.transpose(1, 2))
score = score / sqrt(xs.size(1))
score = torch.softmax(score, dim=-1)
res = torch.bmm(score, v) # (batch, n, out)
if self._concat:
return res.view(res.size(0), -1)
else:
return [res[:, i, :] for i in res.size(1)]
class GatedAttetionModule(nn.Module):
def __init__(self, inp, hidden=None, use_sigmoid=True, use_tanh=False):
super().__init__()
self._use_sigmoid = use_sigmoid
self._use_tanh = use_tanh
if hidden is not None:
self.embed1 = nn.Sequential(
nn.Linear(inp, hidden),
ACT,
nn.Linear(hidden, 1)
)
if use_tanh:
self.embed2 = nn.Sequential(
nn.Linear(inp, hidden),
ACT,
nn.Linear(hidden, inp)
)
else:
self.embed1 = nn.Linear(inp, 1)
if use_tanh:
self.embed2 = nn.Linear(inp, inp)
def forward(self, xs, return_score=False):
xs = torch.stack(xs, dim=1) # (batch, n, inp)
score = self.embed1(xs) # (batch, n, 1)
if self._use_sigmoid:
score = torch.sigmoid(score)
else:
score = torch.softmax(score, dim=1)
if self._use_tanh:
xs = self.embed2(xs) # (batch, n, inp)
xs = torch.tanh(xs)
res = (xs * score).sum(dim=1)
if return_score:
return res, score
return res
# 会报错,因为GRU只能在train mode下运行
# class GRUModule(nn.Module):
# def __init__(self, inp, hidden=50, dropout=0.5):
# super().__init__()
# self.embed = nn.GRU(
# inp, hidden_size=hidden, num_layers=1,
# batch_first=True, dropout=dropout
# )
# def forward(self, xs):
# xs = torch.stack(xs, dim=1) # (batch, n, inp)
# return self.embed(xs)[0][:, -1]
| 27.071429 | 75 | 0.534828 | 493 | 3,790 | 3.902637 | 0.192698 | 0.058212 | 0.040021 | 0.054574 | 0.370582 | 0.350832 | 0.293659 | 0.242723 | 0.242723 | 0.22869 | 0 | 0.017864 | 0.335356 | 3,790 | 139 | 76 | 27.266187 | 0.745931 | 0.129024 | 0 | 0.291667 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.125 | false | 0 | 0.03125 | 0.010417 | 0.3125 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |