hexsha
string | size
int64 | ext
string | lang
string | max_stars_repo_path
string | max_stars_repo_name
string | max_stars_repo_head_hexsha
string | max_stars_repo_licenses
list | max_stars_count
int64 | max_stars_repo_stars_event_min_datetime
string | max_stars_repo_stars_event_max_datetime
string | max_issues_repo_path
string | max_issues_repo_name
string | max_issues_repo_head_hexsha
string | max_issues_repo_licenses
list | max_issues_count
int64 | max_issues_repo_issues_event_min_datetime
string | max_issues_repo_issues_event_max_datetime
string | max_forks_repo_path
string | max_forks_repo_name
string | max_forks_repo_head_hexsha
string | max_forks_repo_licenses
list | max_forks_count
int64 | max_forks_repo_forks_event_min_datetime
string | max_forks_repo_forks_event_max_datetime
string | content
string | avg_line_length
float64 | max_line_length
int64 | alphanum_fraction
float64 | qsc_code_num_words_quality_signal
int64 | qsc_code_num_chars_quality_signal
float64 | qsc_code_mean_word_length_quality_signal
float64 | qsc_code_frac_words_unique_quality_signal
float64 | qsc_code_frac_chars_top_2grams_quality_signal
float64 | qsc_code_frac_chars_top_3grams_quality_signal
float64 | qsc_code_frac_chars_top_4grams_quality_signal
float64 | qsc_code_frac_chars_dupe_5grams_quality_signal
float64 | qsc_code_frac_chars_dupe_6grams_quality_signal
float64 | qsc_code_frac_chars_dupe_7grams_quality_signal
float64 | qsc_code_frac_chars_dupe_8grams_quality_signal
float64 | qsc_code_frac_chars_dupe_9grams_quality_signal
float64 | qsc_code_frac_chars_dupe_10grams_quality_signal
float64 | qsc_code_frac_chars_replacement_symbols_quality_signal
float64 | qsc_code_frac_chars_digital_quality_signal
float64 | qsc_code_frac_chars_whitespace_quality_signal
float64 | qsc_code_size_file_byte_quality_signal
float64 | qsc_code_num_lines_quality_signal
float64 | qsc_code_num_chars_line_max_quality_signal
float64 | qsc_code_num_chars_line_mean_quality_signal
float64 | qsc_code_frac_chars_alphabet_quality_signal
float64 | qsc_code_frac_chars_comments_quality_signal
float64 | qsc_code_cate_xml_start_quality_signal
float64 | qsc_code_frac_lines_dupe_lines_quality_signal
float64 | qsc_code_cate_autogen_quality_signal
float64 | qsc_code_frac_lines_long_string_quality_signal
float64 | qsc_code_frac_chars_string_length_quality_signal
float64 | qsc_code_frac_chars_long_word_length_quality_signal
float64 | qsc_code_frac_lines_string_concat_quality_signal
float64 | qsc_code_cate_encoded_data_quality_signal
float64 | qsc_code_frac_chars_hex_words_quality_signal
float64 | qsc_code_frac_lines_prompt_comments_quality_signal
float64 | qsc_code_frac_lines_assert_quality_signal
float64 | qsc_codepython_cate_ast_quality_signal
float64 | qsc_codepython_frac_lines_func_ratio_quality_signal
float64 | qsc_codepython_cate_var_zero_quality_signal
bool | qsc_codepython_frac_lines_pass_quality_signal
float64 | qsc_codepython_frac_lines_import_quality_signal
float64 | qsc_codepython_frac_lines_simplefunc_quality_signal
float64 | qsc_codepython_score_lines_no_logic_quality_signal
float64 | qsc_codepython_frac_lines_print_quality_signal
float64 | qsc_code_num_words
int64 | qsc_code_num_chars
int64 | qsc_code_mean_word_length
int64 | qsc_code_frac_words_unique
null | qsc_code_frac_chars_top_2grams
int64 | qsc_code_frac_chars_top_3grams
int64 | qsc_code_frac_chars_top_4grams
int64 | qsc_code_frac_chars_dupe_5grams
int64 | qsc_code_frac_chars_dupe_6grams
int64 | qsc_code_frac_chars_dupe_7grams
int64 | qsc_code_frac_chars_dupe_8grams
int64 | qsc_code_frac_chars_dupe_9grams
int64 | qsc_code_frac_chars_dupe_10grams
int64 | qsc_code_frac_chars_replacement_symbols
int64 | qsc_code_frac_chars_digital
int64 | qsc_code_frac_chars_whitespace
int64 | qsc_code_size_file_byte
int64 | qsc_code_num_lines
int64 | qsc_code_num_chars_line_max
int64 | qsc_code_num_chars_line_mean
int64 | qsc_code_frac_chars_alphabet
int64 | qsc_code_frac_chars_comments
int64 | qsc_code_cate_xml_start
int64 | qsc_code_frac_lines_dupe_lines
int64 | qsc_code_cate_autogen
int64 | qsc_code_frac_lines_long_string
int64 | qsc_code_frac_chars_string_length
int64 | qsc_code_frac_chars_long_word_length
int64 | qsc_code_frac_lines_string_concat
null | qsc_code_cate_encoded_data
int64 | qsc_code_frac_chars_hex_words
int64 | qsc_code_frac_lines_prompt_comments
int64 | qsc_code_frac_lines_assert
int64 | qsc_codepython_cate_ast
int64 | qsc_codepython_frac_lines_func_ratio
int64 | qsc_codepython_cate_var_zero
int64 | qsc_codepython_frac_lines_pass
int64 | qsc_codepython_frac_lines_import
int64 | qsc_codepython_frac_lines_simplefunc
int64 | qsc_codepython_score_lines_no_logic
int64 | qsc_codepython_frac_lines_print
int64 | effective
string | hits
int64 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
06274af203a120ff736b3555a30e9a1003120ec1
| 4,324
|
py
|
Python
|
BlenderAddon/game_gamekit/config.py
|
slagusev/gamekit
|
a6e97fcf2a9c3b9b9799bc12c3643818503ffc7d
|
[
"MIT"
] | 1
|
2017-01-16T11:53:44.000Z
|
2017-01-16T11:53:44.000Z
|
BlenderAddon/game_gamekit/config.py
|
slagusev/gamekit
|
a6e97fcf2a9c3b9b9799bc12c3643818503ffc7d
|
[
"MIT"
] | null | null | null |
BlenderAddon/game_gamekit/config.py
|
slagusev/gamekit
|
a6e97fcf2a9c3b9b9799bc12c3643818503ffc7d
|
[
"MIT"
] | null | null | null |
#Copyright (c) 2010 harkon.kr
#
# ***** BEGIN MIT LICENSE BLOCK *****
#
#Permission is hereby granted, free of charge, to any person obtaining a copy
#of this software and associated documentation files (the "Software"), to deal
#in the Software without restriction, including without limitation the rights
#to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
#copies of the Software, and to permit persons to whom the Software is
#furnished to do so, subject to the following conditions:
#
#The above copyright notice and this permission notice shall be included in
#all copies or substantial portions of the Software.
#
#THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
#IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
#FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
#AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
#LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
#OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
#THE SOFTWARE.
#
# ***** END MIT LICENCE BLOCK *****
import bpy
from bpy.types import Operator, AddonPreferences
from bpy.props import StringProperty, IntProperty, BoolProperty
import os, os.path
class GamekitAddonPreferences(AddonPreferences):
# this must match the addon name, use '__package__'
# when defining this in a submodule of a python package.
bl_idname = __package__
runtime_path = StringProperty(
name="Runtime File Path",
subtype='FILE_PATH',
)
working_dir = StringProperty(
name="Working Directory",
subtype='FILE_PATH',
)
def draw(self, context):
layout = self.layout
layout.label(text="Gamekit Runtime options")
layout.prop(self, "runtime_path")
layout.prop(self, "working_dir")
class GamekitConfig:
cfg = dict()
defaultswin= {
'runtime':'./OgreKit/OgreKit-NoDX.exe',
'workingdir':'//'
}
defaultsmac= {
'runtime':'./OgreKit/AppOgreKit',
'workingdir':'//'
}
defaultslinux= {
'runtime':'./OgreKit/AppOgreKit',
'workingdir':'//'
}
def load_defaults(self):
if os.name == "nt":
self.cfg.update(self.defaultswin)
elif os.name == "mac":
self.cfg.update(self.defaultsmac)
else:
self.cfg.update(self.defaultslinux)
return True
def read_config(self, fn, clear_cfg = True):
if clear_cfg: self.cfg = {}
try:
f = open(fn)
lines = f.readlines()
for s in lines:
s = s.strip()
if len(s) > 0 and s[0] != '#':
kv = s.split('=', 1)
self.cfg[kv[0].strip()] = kv[1].strip()
except:
return False
return True
def write_config(self, fn):
try:
file = open(fn, 'w')
except IOError as er:
print(str(er))
return False
for k,v in self.cfg.items():
file.write(k + " = " + v + "\n")
file.close()
return True
def get(self, key, defvalue = ""):
try:
v = self.cfg[str(key)]
if not v: return defvalue
return v
except:
return defvalue
def set(self, key, value):
self.cfg[str(key)] = str(value)
def get_bool(self, key, defvalue = "False"):
v = self.get(key, defvalue)
if v == "" or v.lower() == "false" or v == "0": return False
return bool(v)
def get_int(self, key, defvalue = "0"):
try:
return int(self.get(key, defvalue))
except:
return 0
def get_float(self, key, defvalue = "0.0"):
try:
return float(self.get(key, defvalue))
except:
return 0.0
def get_color(self, key, defvalue = "(0.0, 0.0, 0.0)"):
try:
return eval(self.get(key, defvalue))
except:
return (0,0, 0.0, 0.0)
| 30.885714
| 78
| 0.563136
| 519
| 4,324
| 4.645472
| 0.38921
| 0.009954
| 0.009954
| 0.009954
| 0.056823
| 0.04355
| 0.039403
| 0.026545
| 0
| 0
| 0
| 0.009685
| 0.331406
| 4,324
| 139
| 79
| 31.107914
| 0.824282
| 0.279602
| 0
| 0.25
| 0
| 0
| 0.085437
| 0.008414
| 0
| 0
| 0
| 0
| 0
| 1
| 0.108696
| false
| 0
| 0.043478
| 0
| 0.402174
| 0.01087
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
06277cec3797e9fe624139a04c8d08693f4a94d7
| 5,063
|
py
|
Python
|
app/models/__init__.py
|
LIhDi/python-atendimento-agendamento-back-end
|
affb722440678415d1d6293e84be3f1743c915b7
|
[
"MIT"
] | null | null | null |
app/models/__init__.py
|
LIhDi/python-atendimento-agendamento-back-end
|
affb722440678415d1d6293e84be3f1743c915b7
|
[
"MIT"
] | null | null | null |
app/models/__init__.py
|
LIhDi/python-atendimento-agendamento-back-end
|
affb722440678415d1d6293e84be3f1743c915b7
|
[
"MIT"
] | null | null | null |
from enum import Enum
class StatusType(Enum):
DEFAULT = "dflag updated_at created_at".split()
class UnidadesType(Enum):
DEFAULT = "dflag updated_at created_at".split()
class AssuntoType(Enum):
DEFAULT = "dflag updated_at created_at".split()
class PersonsType(Enum):
DEFAULT = "dflag updated_at created_at".split()
class AppointmentsType(Enum):
DEFAULT = "dflag updated_at created_at id_person id_subject".split()
class Status:
def __new__(cls, status_json, remove_keys=StatusType.DEFAULT.value):
instance = super(Status, cls).__new__(cls)
instance.__init__(status_json, remove_keys)
return vars(instance)
def __init__(self, status_json, remove_keys: list):
status = dict(status_json)
self.name = status.get("name")
self.code = status.get("code")
self.description = status.get("description")
self.__remove_unwanted_keys(remove_keys)
def __remove_unwanted_keys(self, keys):
[delattr(self, key) for key in keys if hasattr(self, key)]
class Assunto:
def __new__(cls, assunto_json, remove_keys=AssuntoType.DEFAULT.value):
instance = super(Assunto, cls).__new__(cls)
instance.__init__(assunto_json, remove_keys)
return vars(instance)
def __init__(self, assunto_json, remove_keys: list):
assunto = dict(assunto_json)
self.name = assunto.get("name")
self.code = assunto.get("code")
self.description = assunto.get("description")
self.active = assunto.get("active")
self.__remove_unwanted_keys(remove_keys)
def __remove_unwanted_keys(self, keys):
[delattr(self, key) for key in keys if hasattr(self, key)]
class Unidade:
def __new__(cls, unidade_json, remove_keys=UnidadesType.DEFAULT.value):
instance = super(Unidade, cls).__new__(cls)
instance.__init__(unidade_json, remove_keys)
return vars(instance)
def __init__(self, unidade_json, remove_keys: list):
unidade = dict(unidade_json)
self.name = unidade.get("name")
self.code = unidade.get("code")
self.attendants_number = unidade.get("attendants_number")
self.description = unidade.get("description")
self.phone = unidade.get("phone")
self.email = unidade.get("email")
self.active = unidade.get("active")
self.__remove_unwanted_keys(remove_keys)
def __remove_unwanted_keys(self, keys):
[delattr(self, key) for key in keys if hasattr(self, key)]
class Person:
def __new__(cls, person_json, remove_keys=PersonsType.DEFAULT.value):
instance = super(Person, cls).__new__(cls)
instance.__init__(person_json, remove_keys)
return vars(instance)
def __init__(self, person_json, remove_keys: list):
person = dict(person_json)
self.email = person.get("email")
self.national_registration = person.get("national_registration")
self.__remove_unwanted_keys(remove_keys)
def __remove_unwanted_keys(self, keys):
[delattr(self, key) for key in keys if hasattr(self, key)]
class Appointment:
def __new__(cls, appointments_json, remove_keys=AppointmentsType.DEFAULT.value):
instance = super(Appointment, cls).__new__(cls)
instance.__init__(appointments_json, remove_keys)
return vars(instance)
def __init__(self, appointments_json, remove_keys: list):
appointment = dict(appointments_json)
self.unit = appointment.get("unit")
self.formatted_date = appointment.get("formatted_date")
self.formatted_day = appointment.get("formatted_day")
self.formatted_time = appointment.get("formatted_time")
self.attendance_number = appointment.get("attendance_number")
self.__remove_unwanted_keys(remove_keys)
def __remove_unwanted_keys(self, keys):
[delattr(self, key) for key in keys if hasattr(self, key)]
class Message:
def __init__(self, message, marketplace_id, type="notification.sms", event="send.sms", resource="teste"):
self.topic_message = {
"type": type,
"resource": resource,
"description": "",
"object": message
}
self.message_attributes = {
"event": {
"DataType": "String",
"StringValue": event
},
"marketplace_id": {
"DataType": "String",
"StringValue": marketplace_id
},
"resource": {
"DataType": "String",
"StringValue": resource
},
"source": {
"DataType": "String",
"StringValue": "api"
},
"type": {
"DataType": "String",
"StringValue": type
}
}
def __eq__(self, obj):
return isinstance(obj, Message) and obj.topic_message == self.topic_message and \
obj.message_attributes == self.message_attributes
| 35.405594
| 109
| 0.635197
| 563
| 5,063
| 5.35524
| 0.147425
| 0.066335
| 0.069652
| 0.038143
| 0.360862
| 0.326036
| 0.326036
| 0.31476
| 0.31476
| 0.185075
| 0
| 0
| 0.254395
| 5,063
| 142
| 110
| 35.65493
| 0.798676
| 0
| 0
| 0.252174
| 0
| 0
| 0.111199
| 0.004148
| 0
| 0
| 0
| 0
| 0
| 1
| 0.147826
| false
| 0
| 0.008696
| 0.008696
| 0.347826
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
06282a67e7f0b15f67df455cc6ff5d4a87a91f5b
| 550
|
py
|
Python
|
2018/day01.py
|
leandrocoding/aoc
|
8e7d072d2302fcdec3bd441970ccf81d1479f1ef
|
[
"MIT"
] | 1
|
2020-12-31T13:32:52.000Z
|
2020-12-31T13:32:52.000Z
|
2018/day01.py
|
leandrocoding/aoc
|
8e7d072d2302fcdec3bd441970ccf81d1479f1ef
|
[
"MIT"
] | null | null | null |
2018/day01.py
|
leandrocoding/aoc
|
8e7d072d2302fcdec3bd441970ccf81d1479f1ef
|
[
"MIT"
] | null | null | null |
import os
path = os.path.join(os.path.dirname(__file__), 'day01.txt')
with open(path) as f:
inputdata = f.readlines()
def part1():
total = 0
freqlist = {}
for line in inputdata:
total += int(line)
return total
def part2():
total = 0
freqlist = set()
while True:
for line in inputdata:
total += int(line)
if total in freqlist:
return total
freqlist.add(total)
print(f"\nAOC 2018 Day 01: \n")
print(f"Part 1: {part1()}")
print(f"Part 2: {part2()}")
| 21.153846
| 59
| 0.56
| 75
| 550
| 4.053333
| 0.52
| 0.059211
| 0.092105
| 0.118421
| 0.197368
| 0.197368
| 0.197368
| 0
| 0
| 0
| 0
| 0.041885
| 0.305455
| 550
| 26
| 60
| 21.153846
| 0.753927
| 0
| 0
| 0.363636
| 0
| 0
| 0.116152
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.090909
| false
| 0
| 0.045455
| 0
| 0.227273
| 0.136364
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
062887e67ee371cc8403b61489299bbce2f354a5
| 2,259
|
py
|
Python
|
setup.py
|
abhijithneilabraham/signs
|
1ce8f6fe5468ec9a69d2d29646be3b3e400879d2
|
[
"MIT"
] | 13
|
2018-06-22T21:30:28.000Z
|
2022-01-26T20:58:24.000Z
|
setup.py
|
abhijithneilabraham/signs
|
1ce8f6fe5468ec9a69d2d29646be3b3e400879d2
|
[
"MIT"
] | 13
|
2018-07-29T14:41:52.000Z
|
2022-02-09T08:22:27.000Z
|
setup.py
|
abhijithneilabraham/signs
|
1ce8f6fe5468ec9a69d2d29646be3b3e400879d2
|
[
"MIT"
] | 3
|
2018-08-06T06:42:39.000Z
|
2022-02-10T14:53:02.000Z
|
#! /usr/bin/env python
#
# Copyright (C) 2018 Mikko Kotila
import os
DESCRIPTION = "Signs Text Processing for Deep Learning"
LONG_DESCRIPTION = """\
Signs is a utility for text preprocessing, vectorizing, and analysis
such as semantic similarity, mainly for the purpose of using unstructured
data in deep learning models.
"""
DISTNAME = 'signs'
MAINTAINER = 'Mikko Kotila'
MAINTAINER_EMAIL = 'mailme@mikkokotila.com'
URL = 'http://autonom.io'
LICENSE = 'MIT'
DOWNLOAD_URL = 'https://github.com/autonomio/signs/'
VERSION = '0.3.2'
try:
from setuptools import setup
_has_setuptools = True
except ImportError:
from distutils.core import setup
install_requires = ['kerasplotlib',
'wrangle',
'pandas',
'numpy',
'cython',
'spacy',
'gensim',
'keras',
'ipython']
if __name__ == "__main__":
setup(name=DISTNAME,
author=MAINTAINER,
author_email=MAINTAINER_EMAIL,
maintainer=MAINTAINER,
maintainer_email=MAINTAINER_EMAIL,
description=DESCRIPTION,
long_description=LONG_DESCRIPTION,
license=LICENSE,
url=URL,
version=VERSION,
download_url=DOWNLOAD_URL,
install_requires=install_requires,
packages=['signs',
'signs.commands',
'signs.preprocess',
'signs.vectorize',
'signs.grams',
'signs.utils',
'signs.models',
'signs.similarity'],
classifiers=[
'Intended Audience :: Science/Research',
'Programming Language :: Python :: 3.6',
'License :: OSI Approved :: MIT License',
'Topic :: Scientific/Engineering :: Human Machine Interfaces',
'Topic :: Scientific/Engineering :: Artificial Intelligence',
'Topic :: Scientific/Engineering :: Mathematics',
'Operating System :: POSIX',
'Operating System :: Unix',
'Operating System :: MacOS'])
os.system("python -m spacy download en")
| 30.945205
| 78
| 0.557769
| 199
| 2,259
| 6.211055
| 0.582915
| 0.048544
| 0.063107
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.006057
| 0.342187
| 2,259
| 72
| 79
| 31.375
| 0.825707
| 0.023462
| 0
| 0
| 0
| 0
| 0.388561
| 0.039946
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.066667
| 0
| 0.066667
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
062a23a5f74a1d166ea135fc95ab88b6338d9bd7
| 856
|
py
|
Python
|
burstInfer/get_adjusted.py
|
ManchesterBioinference/burstInfer
|
933bc76ae8e7fadc36bab1b6bf07ed18e5978a01
|
[
"Apache-2.0"
] | 1
|
2021-05-05T05:09:53.000Z
|
2021-05-05T05:09:53.000Z
|
burstInfer/get_adjusted.py
|
ManchesterBioinference/burstInfer
|
933bc76ae8e7fadc36bab1b6bf07ed18e5978a01
|
[
"Apache-2.0"
] | 2
|
2022-02-08T20:42:30.000Z
|
2022-02-11T17:57:22.000Z
|
burstInfer/get_adjusted.py
|
ManchesterBioinference/burstInfer
|
933bc76ae8e7fadc36bab1b6bf07ed18e5978a01
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
"""
Created on Wed Sep 9 08:46:08 2020
@author: Jon
"""
from numba import jit
import numpy as np
@jit(nopython=True)
def get_adjusted(state, K, W, ms2_coeff):
#ms2_coeff_flipped = np.flip(ms2_coeff_flipped, 1)
ms2_coeff_flipped = ms2_coeff
one_accumulator = 0
zero_accumulator = 0
for count in np.arange(0,W):
##print(count)
##print(state&1)
if state & 1 == 1:
##print('one')
one_accumulator = one_accumulator + ms2_coeff_flipped[0,count]
else:
##print('zero')
zero_accumulator = zero_accumulator + ms2_coeff_flipped[0,count]
state = state >> 1
##print(state)
return_list = []
return_list.append(one_accumulator)
return_list.append(zero_accumulator)
return return_list
| 24.457143
| 80
| 0.603972
| 112
| 856
| 4.392857
| 0.419643
| 0.113821
| 0.152439
| 0.105691
| 0.130081
| 0.130081
| 0
| 0
| 0
| 0
| 0
| 0.047386
| 0.285047
| 856
| 34
| 81
| 25.176471
| 0.756536
| 0.214953
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.058824
| false
| 0
| 0.117647
| 0
| 0.235294
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
062db264f7e87f340fddb2744772935245986efa
| 5,542
|
py
|
Python
|
Snow-Cooling/Libraries/HT_thermal_resistance.py
|
CarlGriffinsteed/UVM-ME144-Heat-Transfer
|
9c477449d6ba5d6a9ee7c57f1c0ed4aab0ce4cca
|
[
"CC-BY-3.0"
] | 7
|
2017-06-02T20:31:22.000Z
|
2021-04-05T13:52:33.000Z
|
Snow-Cooling/Libraries/HT_thermal_resistance.py
|
CarlGriffinsteed/UVM-ME144-Heat-Transfer
|
9c477449d6ba5d6a9ee7c57f1c0ed4aab0ce4cca
|
[
"CC-BY-3.0"
] | null | null | null |
Snow-Cooling/Libraries/HT_thermal_resistance.py
|
CarlGriffinsteed/UVM-ME144-Heat-Transfer
|
9c477449d6ba5d6a9ee7c57f1c0ed4aab0ce4cca
|
[
"CC-BY-3.0"
] | 9
|
2019-01-24T17:43:41.000Z
|
2021-07-25T18:08:34.000Z
|
"""Object name: Resistance
Function name: serial_sum(R,nori,nend), performs serial sum of a resistance object list from nori to nend
Function name: parallel_sum(R,nori,nend), performs parallel sum of a resistance object list from nori to nend
"""
### definition of thermal resistance ###
from sympy.interactive import printing
printing.init_printing(use_latex='mathjax')
from IPython.display import display,Image, Latex
import numpy as np
import math
import scipy.constants as sc
import sympy as sym
#from sympy import *
class Resistance(object):
""" Defines thermal resistances for conduction, convection and radiation heat transfer.
First define the object attached with class with the name used in the thermal circuit
and the units, which can only be 'W', 'W/m' or 'W/m^2'
Second use self.conduction, self.convection or self.radiation to calculate your
resistance. Each mode requires different arguments:
from Libraries import HT_thermal_resistance as res
R = []
R.append(res.Resistance("$label$", "units")) where units = 'W', 'W/m' or 'W/m^2'
then
For conduction, there are 3 options:
- R.cond_plane(k, L, A = 1.0) for planar conduction: k is the thermal conductivity,
L is the thickness of the wall, and A is the optional surface area (=1 by default)
- R.cond_cylinder(k , ra, rb, L = 1.0, angle = 2.*math.pi) for conduction in a
cylindrical shell between the radii ra (internal) and rb (external). L is the length
of the shell (optional, default = 1) and angle is angular dimension of shell, also
optional and set to a full revolution by default (2 pi)
- R.cond_sphere(k, ra, rb, scale = 1.0) for conductuion within a spherical shell bounded by radii ra and rb
ra < rb. The optional parameter scale allows to calculate the thermal resistance for a fraction
of a spherical shell. For instance a cornea is about 1/3 of spherical shell, so scale = 1./3.
Convection:
- R.convection(h, A = 1.0), where h is the convection coefficient (W/m^2K) and A is
the surface area (optional, default is unit surface aera 1 m^2)
Radiation:
- R.radiation(eps, T_s, T_sur, A = 1.0), where eps is the permissivity of the material, T_s
the surface temperature, T_sur the far away surface temperature, A the surface area (optional,
by default A is the unit surface area 1 m^2).
Contact:
- R.contact(R,A,R_name= "R_{t}",A_name = "A",T_a_name = "T_a",Tb_name = "T_b"), where R is the contact resistance, typically obtained from a table
A is the surface area
The minimum number of arguments are:
R.contact(R,A)
R.display_equation(index) displays the heat flux/rate equations for a given resistance. index is the number of
your resistance (you specify)
Outputs:
- R[i].R the resistance of element i, R[i].h the convection or radiation coefficient.
Functions include
R_tot = res.serial_sum(R,first_resistance,last_resistance) sums serial resistance
R_tot = res.parallel_sum(R,first_resistance,last_resistance) sums parallel resistance
"""
def __init__(self,name,units):
self.name = name
self.units = units
def cond_plane(self, k, L, A = 1.0):
self.mode = "conduction"
self.geometry = "planar"
self.k = k
if k <= 0.:
print("problem with the definition of thermal conductivity")
self.L = L
self.A = A
self.R = self.L / (self.k * self.A)
def cond_cylinder(self, k , ra, rb, L = 1.0, angle = 2.*math.pi):
self.mode = "conduction"
self.geometry = "cylindrical"
self.k = k
if k <= 0.:
print("problem with the definition of thermal conductivity")
self.ra = ra
self.rb = rb
if ra*rb <= 0.:
print("problem with the definition of radii")
self.L = L
self.angle = angle
self.R = np.log(rb/ra)/(angle*L*k)
def cond_sphere(self, k, ra, rb, scale = 1.0):
self.mode = "conduction"
self.geometry = "spherical"
self.k = k
if k <= 0.:
print("problem with the definition of thermal conductivity")
self.ra = ra
self.rb = rb
if ra*rb <= 0.:
print("problem with the definition of radii")
self.R = (1./r_a-1./r_b)/(scale*4.*math.pi*k)
def convection(self, h, A = 1.0):
self.mode = 'convection'
self.geometry = "whatever"
self.R = 1./(h*A)
self.A = A
self.h = h
def radiation(self,eps,T_s,T_sur, A = 1.0):
self.R = 1./(eps*sc.sigma*(T_s+T_sur)*(T_s**2+T_sur**2)*A)
self.mode = 'radiation'
self.geometry = "whatever"
self.A = A
self.h = eps*sc.sigma*(T_s+T_sur)*(T_s**2+T_sur**2)
def contact(self, R, A=1.0):
self.R = R/A
self.geometry = 'whatever'
self.mode = 'contact'
### summation of thermal resistance (R is a vector) ###
def serial_sum(R,nori,nend):
sum = 0.
for i in range(nori,nend+1):
sum += R[i].R
return sum
def parallel_sum(R,nori,nend):
sum = 0.
for i in range(nori,nend+1):
sum += 1./R[i].R
return 1./sum
| 39.304965
| 154
| 0.601227
| 834
| 5,542
| 3.934053
| 0.219424
| 0.006705
| 0.0064
| 0.025907
| 0.293813
| 0.230418
| 0.223712
| 0.176775
| 0.169461
| 0.169461
| 0
| 0.015159
| 0.297726
| 5,542
| 140
| 155
| 39.585714
| 0.827852
| 0.498196
| 0
| 0.442857
| 0
| 0
| 0.145439
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.128571
| false
| 0
| 0.085714
| 0
| 0.257143
| 0.1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
063bfcdb61c52f48cebfa7c465fedd55623d891f
| 621
|
py
|
Python
|
setup.py
|
OseiasBeu/speaker_ass
|
b7ec38c131b17c502348873f5c90450752e41b9e
|
[
"MIT"
] | null | null | null |
setup.py
|
OseiasBeu/speaker_ass
|
b7ec38c131b17c502348873f5c90450752e41b9e
|
[
"MIT"
] | null | null | null |
setup.py
|
OseiasBeu/speaker_ass
|
b7ec38c131b17c502348873f5c90450752e41b9e
|
[
"MIT"
] | null | null | null |
#-*- coding: utf-8 -*-
from setuptools import setup
with open("README.md", "r") as fh:
readme = fh.read()
setup(name='fala_assis',
version='0.0.1',
url='https://github.com/OseiasBeu/AssistenteDeFala',
license='MIT License',
author='Oseias Beu',
long_description=readme,
long_description_content_type="text/markdown",
author_email='oseiasbeu@outlook.com',
keywords='Assistente de Fala',
description=u'Assistente de fala que avisa um portador de deficiência visual quando o programa executou',
packages=['fala_assis'],
install_requires=['gtts','IPython'],)
| 34.5
| 110
| 0.679549
| 78
| 621
| 5.307692
| 0.75641
| 0.043478
| 0.077295
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.007843
| 0.178744
| 621
| 18
| 111
| 34.5
| 0.803922
| 0.033816
| 0
| 0
| 0
| 0
| 0.433962
| 0.036021
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.066667
| 0
| 0.066667
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
063ed7c5bac55d0b3c1f5775f5ccbe6840c1974c
| 3,603
|
py
|
Python
|
download_data/download_data.py
|
russelljjarvis/readabilityinscience
|
353d79f11f2380fd4872242397a255a4b1da675c
|
[
"MIT"
] | 14
|
2017-03-24T16:01:52.000Z
|
2021-01-22T17:57:48.000Z
|
download_data/download_data.py
|
russelljjarvis/readabilityinscience
|
353d79f11f2380fd4872242397a255a4b1da675c
|
[
"MIT"
] | 3
|
2021-03-05T07:49:21.000Z
|
2022-01-09T00:54:51.000Z
|
download_data/download_data.py
|
russelljjarvis/readabilityinscience
|
353d79f11f2380fd4872242397a255a4b1da675c
|
[
"MIT"
] | 7
|
2017-08-08T09:46:36.000Z
|
2021-08-23T16:18:12.000Z
|
#%%
#md
"""
This script downloads the dataset use in the analysis.
__It requires 2 inputs to be specified__
repo_directory and email (see first cell block).
"""
#%%
# Where is the main directory of the repo
repo_directory = './'
# Pubmed requires you to identify with an email addreesss
email = ''
#%%
import os
os.chdir(repo_directory)
import numpy as np
import pandas as pd
import functions.dataminingfunctions as dmf
import functions.readabilityFunctions as rf
#%%
#Load journal info
journalInfo=pd.read_csv('./JournalSelection/JournalSelection.csv')
#%%
#md
"""
Specify the search data that you want to get from pubmeddata
"""
#%%
#What to get. "all" saves a txt. Otherwise the xml tags wanted (see https://www.nlm.nih.gov/bsd/licensee/elements_alphabetical.html). Seperated by a comma
#"Trees" are possible to specify column you want. (e.g. <year> occurs) in several
#places so pubate_year takes the <year> tag in <pubdate>
dataOfInterest = 'abstracttext,pubdate_year,pmid,articletitle,journal_title,keyword,doi'
#If dataframe, what is the index column (usally article or author)
dfId = 'article'
#%%
#md
"""
Download the data
"""
#%%
for n in range(0, len(journalInfo)):
#Parameters needed (if left blank, get_pubmeddata asks for response)
#What to search pubmed with
searchString = journalInfo.search[n]
print(' ---Running search: ' + searchString + ' (' + str(n) + ')' + ' ---')
#Run get data
dmf.get_pubmeddata(searchString.lower(), dataOfInterest, dfId, email, 'ignore')
#%%
#md
"""
Sometimes the pubdate, year tags were missing in articles. The next cell finds those instances and
"""
#%%
# Sometimes the
for n in range(0, len(journalInfo)):
searchString = journalInfo.search[n].lower()
#make path to data (always this, if dataframe)
mDir = os.getcwd() + '/data/abstracts/' + searchString + '/' + 'id_' + dfId + '/' + dataOfInterest + '/'
mDir = mDir.replace(' ','_')
mDir = mDir.replace(',','_')
mDir = mDir.replace('\"','')
dat=pd.read_json(mDir + 'searchresults')
dat.sort_index(inplace=True)
idMissing = [i for i,x in enumerate(dat.pubdate_year) if x == '']
if len(idMissing)>0:
#Make a list of strings
pmidMissing=list(map(str,list(dat.pmid[idMissing])))
print(' ---Finding missing years (' + str(len(pmidMissing)) + ' found): ' + searchString + '. term: ' + str(n) + ' ---')
missingYears = dmf.get_medlineyear(list(pmidMissing))
dat['pubdate_year'].loc[idMissing]=missingYears
dat.to_json(mDir + 'searchresults')
#%%
#md
"""
For the "nr authors" the author info also has to be download.
"""
#%%
#What to get. "all" saves a txt. Otherwise the xml tags wanted (see https://www.nlm.nih.gov/bsd/licensee/elements_alphabetical.html). Seperated by a comma
#"Trees" are possible to specify column you want. (e.g. <year> occurs) in several
#places so pubate_year takes the <year> tag in <pubdate>
dataOfInterest = 'forename,lastname,affiliation'
#If dataframe, what is the index column (usally article or author)
dfId = 'author'
for n in range(0, len(journalInfo)):
#Parameters needed (if left blank, get_pubmeddata asks for response)
#What to search pubmed with
searchString = journalInfo.search[n]
print(' ---Running search: ' + searchString + ' (' + str(n) + ')' + ' ---')
#Run get data
dmf.get_pubmeddata(searchString.lower(), dataOfInterest, dfId, email, 'ignore')
#dataOfInterest = 'forename,lastname,affiliation'
#dfId = 'author'
#dmf.get_pubmeddata(searchString.lower(),dataOfInterest,dfId,email,'ignore')
| 28.595238
| 154
| 0.686095
| 479
| 3,603
| 5.102296
| 0.36952
| 0.026596
| 0.007365
| 0.013502
| 0.486498
| 0.486498
| 0.468085
| 0.457447
| 0.457447
| 0.432079
| 0
| 0.001684
| 0.175687
| 3,603
| 125
| 155
| 28.824
| 0.821212
| 0.396614
| 0
| 0.25
| 0
| 0
| 0.178591
| 0.073696
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.138889
| 0
| 0.138889
| 0.083333
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
063f03923348104f14d70cb5ad60e17ea2bae4f7
| 1,586
|
py
|
Python
|
scripts/parser_example.py
|
sync-or-swim/sos-journaler
|
f98897b47a8025e74fae4b427af95e07363a64c8
|
[
"MIT"
] | null | null | null |
scripts/parser_example.py
|
sync-or-swim/sos-journaler
|
f98897b47a8025e74fae4b427af95e07363a64c8
|
[
"MIT"
] | 27
|
2020-01-29T05:50:52.000Z
|
2020-12-20T04:53:01.000Z
|
scripts/parser_example.py
|
BryceBeagle/sync-or-swim
|
f98897b47a8025e74fae4b427af95e07363a64c8
|
[
"MIT"
] | null | null | null |
import xml.etree.ElementTree as ET
from pathlib import Path
from argparse import ArgumentParser
import dateutil.parser
def main():
parser = ArgumentParser(
description="An example script demonstrating how to parse a few "
"values out of a FIXM XML file.")
parser.add_argument("xml_file",
type=Path,
help="The XML file to parse")
args = parser.parse_args()
tree = ET.parse(args.xml_file)
message_collection = tree.getroot()
for message in message_collection:
for flight in message:
center = flight.attrib["centre"]
flight_identification = flight.find("flightIdentification")
flight_number = flight_identification.attrib[
"aircraftIdentification"]
timestamp_str = flight.attrib["timestamp"]
timestamp = dateutil.parser.parse(timestamp_str)
print(f"Center: {center}\n"
f"Flight Number: {flight_number}\n"
f"Timestamp: {timestamp}")
en_route = flight.find("enRoute")
if en_route is None:
print("Data does not have en-route information")
else:
pos = (en_route
.find("position")
.find("position")
.find("location")
.find("pos"))
latitude, longitude = pos.text.split(" ")
print(f" Lat: {latitude}, Long: {longitude}")
if __name__ == "__main__":
main()
| 32.367347
| 73
| 0.551702
| 159
| 1,586
| 5.358491
| 0.490566
| 0.032864
| 0.042254
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.35309
| 1,586
| 48
| 74
| 33.041667
| 0.830409
| 0
| 0
| 0.052632
| 0
| 0
| 0.226356
| 0.013871
| 0
| 0
| 0
| 0
| 0
| 1
| 0.026316
| false
| 0
| 0.105263
| 0
| 0.131579
| 0.078947
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
0643039f86602184a503cb24840a75bcaf50a6c2
| 10,371
|
py
|
Python
|
handlers.py
|
martinslabber/tape-library-robot-control
|
ce4ca180c6d5a6be81702c252a1a8b4cde848b9b
|
[
"MIT"
] | null | null | null |
handlers.py
|
martinslabber/tape-library-robot-control
|
ce4ca180c6d5a6be81702c252a1a8b4cde848b9b
|
[
"MIT"
] | 1
|
2020-05-05T09:08:20.000Z
|
2020-06-19T10:15:01.000Z
|
handlers.py
|
martinslabber/tape-library-robot-control
|
ce4ca180c6d5a6be81702c252a1a8b4cde848b9b
|
[
"MIT"
] | 1
|
2020-06-15T09:02:01.000Z
|
2020-06-15T09:02:01.000Z
|
# Handlers
import json
import logging
from aiohttp import web
def tape_library_handler_wrapper(
request,
action_name,
required_params=None,
optional_params=None,
skip_lock_check=False,
):
"""This wrapper performs error handling for the API calls.
Raises
------
Multiple exceptions
see: https://docs.aiohttp.org/en/latest/web_exceptions.html
"""
# Check parameters
if required_params is not None:
for param in required_params:
if param in request.query:
if not request.query[param]:
error = {
"error": {
"description": "empty parameter",
"parameter": param,
"reason": "empty",
"type": "parameter",
}
}
raise web.HTTPUnprocessableEntity(text=json.dumps(error))
else:
error = {
"error": {
"description": "missing parameter",
"parameter": param,
"reason": "undefined",
"type": "parameter",
}
}
raise web.HTTPUnprocessableEntity(text=json.dumps(error))
library = request.app["tape_library"]
# Check that library is not locked
if not library.running and not skip_lock_check:
error = {
"error": {
"description": "Library is locked",
"reason": "locked",
"type": "lock",
}
}
raise web.HTTPForbidden(text=json.dumps(error))
# Check library queue
if library.check_queue_max_depth_reached():
error = {
"error": {
"description": "to many requests in progress",
"reason": "full",
"type": "taskqueue",
}
}
raise web.HTTPTooManyRequests(text=json.dumps(error))
# Check if action is available, run it, catch errors if any
if hasattr(library, "action_" + action_name):
try:
data = getattr(library, "action_" + action_name)(**request.query)
except web.HTTPException:
raise
except Exception as excpt:
logging.exception(action_name)
error = {
"error": {
"description": str(excpt),
"reason": "internal",
"type": "server",
}
}
raise web.HTTPInternalServerError(text=json.dumps(error))
else:
error = {
"error": {
"description": "no such method",
"reason": "nosuch",
"type": "method",
}
}
raise web.HTTPNotImplemented(text=json.dumps(error))
return web.json_response(data)
# Handlers that represent the system we simulate.
async def load_handle(request):
"""
---
description: Load media from slot to drive.
tags:
- mtx
parameters:
- in: query
name: drive
schema:
type: string
required: true
description: The ID of the drive.
- in: query
name: slot
schema:
type: string
required: true
description: The ID of the slot.
responses:
"200":
$ref: '#/components/responses/Reply200Ack'
"405":
$ref: '#/components/responses/HTTPMethodNotAllowed'
"421":
$ref: '#/components/responses/HTTPMisdirectedRequest'
"422":
$ref: '#/components/responses/HTTPUnprocessableEntity'
"""
return tape_library_handler_wrapper(
request, "load", required_params=["slot", "drive"]
)
async def unload_handle(request):
"""
---
description: Unload media from drive to slot.
tags:
- mtx
parameters:
- in: query
name: drive
schema:
type: string
required: true
description: The ID of the drive.
- in: query
name: slot
schema:
type: string
required: true
description: The ID of the slot.
responses:
"200":
$ref: '#/components/responses/Reply200Ack'
"405":
$ref: '#/components/responses/HTTPMethodNotAllowed'
"421":
$ref: '#/components/responses/HTTPMisdirectedRequest'
"422":
$ref: '#/components/responses/HTTPUnprocessableEntity'
"""
return tape_library_handler_wrapper(
request, "unload", required_params=["drive", "slot"]
)
async def transfer_handle(request):
"""
---
description: Move media from source-slot to target-slot.
tags:
- mtx
parameters:
- in: query
name: source
schema:
type: string
required: true
description: The ID of the source slot.
- in: query
name: target
schema:
type: string
required: true
description: The ID of the target slot.
responses:
"200":
$ref: '#/components/responses/Reply200Ack'
"405":
$ref: '#/components/responses/HTTPMethodNotAllowed'
"421":
$ref: '#/components/responses/HTTPMisdirectedRequest'
"422":
$ref: '#/components/responses/HTTPUnprocessableEntity'
"""
return tape_library_handler_wrapper(
request, "transfer", required_params=["source", "target"]
)
async def park_handle(request):
"""
---
description: Move the picker head to a safe position and lock the unit.
tags:
- mtx
responses:
"200":
$ref: '#/components/responses/Reply200Ack'
"405":
$ref: '#/components/responses/HTTPMethodNotAllowed'
"421":
$ref: '#/components/responses/HTTPMisdirectedRequest'
"422":
$ref: '#/components/responses/HTTPUnprocessableEntity'
"""
return tape_library_handler_wrapper(request, "park")
async def scan_handle(request):
"""
---
description: Perform inventory scan on a slot. Move the picker to the slot
and barcode scan the tape.
tags:
- mtx
parameters:
- in: query
name: slot
schema:
type: string
required: true
description: The ID of the slot to scan.
responses:
"200":
$ref: '#/components/responses/Reply200Ack'
"405":
$ref: '#/components/responses/HTTPMethodNotAllowed'
"421":
$ref: '#/components/responses/HTTPMisdirectedRequest'
"422":
$ref: '#/components/responses/HTTPUnprocessableEntity'
"""
return tape_library_handler_wrapper(request, "scan", required_params=["slot"])
async def inventory_handle(request):
"""
---
description: Return the known inventory. Use scan command to scan a slot.
For each slot either the tapeid, true, false, or null is returned. null
indicates that the slot has not been scanned. false indicate that the
slot has no tape and true that the slot has a tape but we dont know the ID.
A real tape library might remember a tapeid as it moves from slot to drive, but the
simulator is kept dump to simulate the bare minimum required.
tags:
- info
responses:
"200":
$ref: '#/components/responses/Reply200Ack'
"405":
$ref: '#/components/responses/HTTPMethodNotAllowed'
"""
return tape_library_handler_wrapper(request, "inventory", skip_lock_check=True)
async def sensors_handle(request):
"""
---
summary: sensor values
description: Return sensor values.
tags:
- info
responses:
"200":
$ref: '#/components/responses/Reply200Ack'
"405":
$ref: '#/components/responses/HTTPMethodNotAllowed'
"421":
$ref: '#/components/responses/HTTPMisdirectedRequest'
"422":
$ref: '#/components/responses/HTTPUnprocessableEntity'
"""
# TODO(MS): Maybe allow some filter. It could be quite a bit of info.
return tape_library_handler_wrapper(request, "sensors", skip_lock_check=True)
async def config_handle(request):
"""
---
summary: get/set config
description: Return configuration, configuration can also be set.
tags:
- info
responses:
"200":
$ref: '#/components/responses/Reply200Ack'
"405":
$ref: '#/components/responses/HTTPMethodNotAllowed'
"421":
$ref: '#/components/responses/HTTPMisdirectedRequest'
"422":
$ref: '#/components/responses/HTTPUnprocessableEntity'
"""
return tape_library_handler_wrapper(request, "config", skip_lock_check=True)
async def state_handle(request):
"""
---
summary: state
description: Return the library state.
tags:
- info
responses:
"200":
$ref: '#/components/responses/Reply200Ack'
"405":
$ref: '#/components/responses/HTTPMethodNotAllowed'
"""
return tape_library_handler_wrapper(request, "state", skip_lock_check=True)
async def lock_handle(request):
"""
---
summary: lock tape library
description: Lock the tape library. No actions will be allowed until unlocked.
This action clears the internal work queue.
tags:
- mtx
responses:
"200":
$ref: '#/components/responses/Reply200Ack'
"405":
$ref: '#/components/responses/HTTPMethodNotAllowed'
"""
return tape_library_handler_wrapper(request, "lock", skip_lock_check=True)
async def unlock_handle(request):
"""
---
summary: Unlock tape library
description: Unlock the tape library. Has no side effect if already unlocked.
tags:
- mtx
responses:
"200":
$ref: '#/components/responses/Reply200Ack'
"405":
$ref: '#/components/responses/HTTPMethodNotAllowed'
"""
# TODO: Should unlock have a clear_queue argument?
return tape_library_handler_wrapper(request, "unlock", skip_lock_check=True)
| 29.05042
| 89
| 0.563687
| 979
| 10,371
| 5.884576
| 0.214505
| 0.081236
| 0.137476
| 0.052074
| 0.530116
| 0.516577
| 0.478736
| 0.472487
| 0.460684
| 0.43916
| 0
| 0.020376
| 0.332755
| 10,371
| 356
| 90
| 29.132022
| 0.812139
| 0.043776
| 0
| 0.219048
| 0
| 0
| 0.115375
| 0
| 0
| 0
| 0
| 0.005618
| 0
| 1
| 0.009524
| false
| 0
| 0.028571
| 0
| 0.152381
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
0643deae65bf97584696f33e80afdf35b197abcf
| 1,677
|
py
|
Python
|
robit/core/alert.py
|
stratusadv/robit
|
7e0414d0ed3d98bb2c9a8785bf36961ac08f1d27
|
[
"MIT"
] | null | null | null |
robit/core/alert.py
|
stratusadv/robit
|
7e0414d0ed3d98bb2c9a8785bf36961ac08f1d27
|
[
"MIT"
] | 1
|
2021-11-01T18:51:04.000Z
|
2021-11-01T18:51:04.000Z
|
robit/core/alert.py
|
stratusadv/robit
|
7e0414d0ed3d98bb2c9a8785bf36961ac08f1d27
|
[
"MIT"
] | null | null | null |
import logging
from datetime import datetime, timedelta
from robit.core.health import Health
class Alert:
def __init__(
self,
**kwargs,
):
if 'alert_method' in kwargs:
self.method = kwargs['alert_method']
if 'alert_method_kwargs' in kwargs:
self.method_kwargs = kwargs['alert_method_kwargs']
else:
self.method_kwargs = dict()
if 'alert_health_threshold' in kwargs:
self.health_threshold = kwargs['alert_health_threshold']
else:
self.health_threshold = 95.0
if 'alert_hours_between_messages' in kwargs:
self.hours_between_messages = kwargs['alert_hours_between_messages']
else:
self.hours_between_messages = 24
self.last_message_datetime = datetime.now() - timedelta(hours=self.hours_between_messages)
def check_health_threshold(self, name, health: Health):
if datetime.now() >= self.last_message_datetime + timedelta(hours=self.hours_between_messages):
if health.percentage_hundreds <= self.health_threshold:
alert_message = f'ALERT: {name} dropped below the {self.health_threshold} percentage health threshold.'
self.method_kwargs['alert_message'] = alert_message
try:
self.method(**self.method_kwargs)
self.last_message_datetime = datetime.now()
logging.warning(alert_message)
except Exception as e:
failed_message = f'ERROR: Alert method failed on exception "{e}"'
logging.warning(failed_message)
| 37.266667
| 119
| 0.627907
| 183
| 1,677
| 5.47541
| 0.251366
| 0.11976
| 0.11976
| 0.095808
| 0.191617
| 0.143713
| 0
| 0
| 0
| 0
| 0
| 0.004209
| 0.291592
| 1,677
| 44
| 120
| 38.113636
| 0.839226
| 0
| 0
| 0.085714
| 0
| 0
| 0.181276
| 0.073345
| 0
| 0
| 0
| 0
| 0
| 1
| 0.057143
| false
| 0
| 0.085714
| 0
| 0.171429
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
064427ba3481c1d9ed4c628c04dbaf55a12eda29
| 365
|
py
|
Python
|
202-happy-number/202-happy-number.py
|
hyeseonko/LeetCode
|
48dfc93f1638e13041d8ce1420517a886abbdc77
|
[
"MIT"
] | 2
|
2021-12-05T14:29:06.000Z
|
2022-01-01T05:46:13.000Z
|
202-happy-number/202-happy-number.py
|
hyeseonko/LeetCode
|
48dfc93f1638e13041d8ce1420517a886abbdc77
|
[
"MIT"
] | null | null | null |
202-happy-number/202-happy-number.py
|
hyeseonko/LeetCode
|
48dfc93f1638e13041d8ce1420517a886abbdc77
|
[
"MIT"
] | null | null | null |
class Solution:
def isHappy(self, n: int) -> bool:
pool = set()
pool.add(n)
result=n
while(result>1):
strn = str(result)
result = 0
for c in strn:
result+=int(c)*int(c)
if result in pool:
return False
pool.add(result)
return True
| 26.071429
| 38
| 0.441096
| 43
| 365
| 3.744186
| 0.55814
| 0.086957
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.010204
| 0.463014
| 365
| 14
| 39
| 26.071429
| 0.811224
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.071429
| false
| 0
| 0
| 0
| 0.285714
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
0648e18f81ac883f3b49a5656d1320a8eddbf0ed
| 5,014
|
py
|
Python
|
unitorch/score/voc_map.py
|
fuliucansheng/UniTorch
|
47038321593ce4e7eabda555bd58c0cf89482146
|
[
"MIT"
] | 2
|
2022-02-05T08:52:00.000Z
|
2022-03-27T07:01:34.000Z
|
unitorch/score/voc_map.py
|
Lixin-Qian/unitorch
|
47038321593ce4e7eabda555bd58c0cf89482146
|
[
"MIT"
] | null | null | null |
unitorch/score/voc_map.py
|
Lixin-Qian/unitorch
|
47038321593ce4e7eabda555bd58c0cf89482146
|
[
"MIT"
] | 1
|
2022-03-27T07:01:13.000Z
|
2022-03-27T07:01:13.000Z
|
import numpy as np
from collections import defaultdict
from typing import Any, Callable, Dict, List, Optional, Set, Tuple, Union
def _voc_ap(
rec,
prec,
use_07_metric=False,
):
"""Compute VOC AP given precision and recall. If use_07_metric is true, uses
the VOC 07 11-point method (default:False).
"""
if use_07_metric:
# 11 point metric
ap = 0.0
for t in np.arange(0.0, 1.1, 0.1):
if np.sum(rec >= t) == 0:
p = 0
else:
p = np.max(prec[rec >= t])
ap = ap + p / 11.0
else:
# correct AP calculation
# first append sentinel values at the end
mrec = np.concatenate(([0.0], rec, [1.0]))
mpre = np.concatenate(([0.0], prec, [0.0]))
# compute the precision envelope
for i in range(mpre.size - 1, 0, -1):
mpre[i - 1] = np.maximum(mpre[i - 1], mpre[i])
# to calculate area under PR curve, look for points
# where X axis (recall) changes value
i = np.where(mrec[1:] != mrec[:-1])[0]
# and sum (\Delta recall) * prec
ap = np.sum((mrec[i + 1] - mrec[i]) * mpre[i + 1])
return ap
def voc_ap_score(
p_bboxes: List[np.ndarray],
p_scores: List[np.ndarray],
p_classes: List[np.ndarray],
gt_bboxes: List[np.ndarray],
gt_classes: List[np.ndarray],
class_id: int = None,
threshold: float = 0.5,
):
"""
Args:
p_bboxes: a list of predict bboxes
p_scores: a list of predict score for bbox
p_classes: a list of predict class id for bbox
gt_bboxes: a list of ground truth bboxes
gt_classes: a list of true class id for each true bbox
class_id: the class id to compute ap score
threshold: the threshold to ap score
"""
if class_id is not None:
gt_bboxes = [gt_bbox[gt_class == class_id] for gt_class, gt_bbox in zip(gt_classes, gt_bboxes)]
p_bboxes = [p_bbox[p_class == class_id] for p_class, p_bbox in zip(p_classes, p_bboxes)]
p_scores = [p_score[p_class == class_id] for p_class, p_score in zip(p_classes, p_scores)]
p_indexes = [np.array([i] * len(p_bboxes[i])) for i in range(len(p_bboxes))]
p_bboxes, p_scores, p_indexes = (
np.concatenate(p_bboxes),
np.concatenate(p_scores),
np.concatenate(p_indexes),
)
p_sort_indexes = np.argsort(-p_scores)
tp = np.zeros(p_scores.shape[0])
fp = np.zeros(p_scores.shape[0])
gt_bbox_status = defaultdict(set)
for idx, p_sort_index in enumerate(p_sort_indexes):
p_index = int(p_indexes[p_sort_index])
gt_bbox = gt_bboxes[p_index]
p_bbox = p_bboxes[p_sort_index]
vmax = -float("inf")
jmax = -1
if gt_bbox.size > 0:
ixmin = np.maximum(gt_bbox[:, 0], p_bbox[0])
iymin = np.maximum(gt_bbox[:, 1], p_bbox[1])
ixmax = np.minimum(gt_bbox[:, 2], p_bbox[2])
iymax = np.minimum(gt_bbox[:, 3], p_bbox[3])
iw = np.maximum(ixmax - ixmin + 1.0, 0.0)
ih = np.maximum(iymax - iymin + 1.0, 0.0)
inters = iw * ih
uni = (
(p_bbox[2] - p_bbox[0] + 1.0) * (p_bbox[3] - p_bbox[1] + 1.0)
+ (gt_bbox[:, 2] - gt_bbox[:, 0] + 1.0) * (gt_bbox[:, 3] - gt_bbox[:, 1] + 1.0)
- inters
)
overlaps = inters / uni
vmax = np.max(overlaps)
jmax = np.argmax(overlaps)
if vmax > threshold:
if jmax not in gt_bbox_status[p_index]:
tp[idx] = 1
gt_bbox_status[p_index].add(jmax)
else:
fp[idx] = 1
else:
fp[idx] = 1
fp = np.cumsum(fp, axis=0)
tp = np.cumsum(tp, axis=0)
rec = tp / float(sum([len(gt) for gt in gt_bboxes]))
prec = tp / np.maximum(tp + fp, np.finfo(np.float).eps)
ap = _voc_ap(rec, prec)
return ap
def voc_map_score(
p_bboxes: List[np.ndarray],
p_scores: List[np.ndarray],
p_classes: List[np.ndarray],
gt_bboxes: List[np.ndarray],
gt_classes: List[np.ndarray],
):
"""
Args:
p_bboxes: a list of predict bboxes
p_scores: a list of predict score for bbox
p_classes: a list of predict class id for bbox
gt_bboxes: a list of ground truth bboxes
gt_classes: a list of true class id for each true bbox
Returns:
a avg ap score of all classes in ground truth
"""
classes = set(list(np.concatenate(gt_classes)))
ap_scores = dict()
for thres in range(50, 100, 5):
ap_scores[thres] = [
voc_ap_score(
p_bboxes,
p_scores,
p_classes,
gt_bboxes,
gt_classes,
c,
thres / 100,
)
for c in classes
]
mAP = {iou: np.mean(x) for iou, x in ap_scores.items()}
return np.mean(list(mAP.values()))
| 33.426667
| 103
| 0.553849
| 763
| 5,014
| 3.473132
| 0.192661
| 0.033962
| 0.049057
| 0.031698
| 0.301887
| 0.246038
| 0.230943
| 0.230943
| 0.213585
| 0.213585
| 0
| 0.028165
| 0.327284
| 5,014
| 149
| 104
| 33.651007
| 0.757486
| 0.196649
| 0
| 0.196262
| 0
| 0
| 0.000765
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.028037
| false
| 0
| 0.028037
| 0
| 0.084112
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
064ca7c37993e4810c14d5f7e1d0f4a40a067487
| 8,098
|
py
|
Python
|
video_utils.py
|
Domhnall-Liopa/Lip2Wav
|
236ae24cd7945da8a75ddea1cfdc3da271c3c59f
|
[
"MIT"
] | null | null | null |
video_utils.py
|
Domhnall-Liopa/Lip2Wav
|
236ae24cd7945da8a75ddea1cfdc3da271c3c59f
|
[
"MIT"
] | null | null | null |
video_utils.py
|
Domhnall-Liopa/Lip2Wav
|
236ae24cd7945da8a75ddea1cfdc3da271c3c59f
|
[
"MIT"
] | null | null | null |
import json
import random
import re
import subprocess
import tempfile
from datetime import timedelta
import cv2
import numpy as np
import requests
from vidaug import augmentors as va
# this is a static build from https://www.johnvansickle.com/ffmpeg/old-releases/ffmpeg-4.4.1-i686-static.tar.xz
# requires new ffmpeg version for:
# - duration of extracted audio == video
# - contains x264 codec in build required for clean video frames
FFMPEG_PATH = '/opt/lip2wav/ffmpeg-4.4.1-i686-static/ffmpeg'
FFPROBE_PATH = '/opt/lip2wav/ffmpeg-4.4.1-i686-static/ffprobe'
OLD_FFMPEG_PATH = 'ffmpeg-2.8.15'
FFMPEG_OPTIONS = '-hide_banner -loglevel panic'
VIDEO_CROP_COMMAND = f'{FFMPEG_PATH} {FFMPEG_OPTIONS} -y -i {{input_video_path}} -ss {{start_time}} -to {{end_time}} -async 1 {{output_video_path}}'
VIDEO_INFO_COMMAND = f'{FFMPEG_PATH} -i {{input_video_path}}'
VIDEO_DURATION_COMMAND = f'{FFPROBE_PATH} {FFMPEG_OPTIONS} -show_entries format=duration -of default=noprint_wrappers=1:nokey=1 {{video_path}}'
VIDEO_TO_AUDIO_COMMAND = f'{{ffmpeg_path}} {FFMPEG_OPTIONS} -threads 1 -y -i {{input_video_path}} -async 1 -ac 1 -vn -acodec pcm_s16le -ar 16000 {{output_audio_path}}'
VIDEO_CONVERT_FPS_COMMAND = f'{FFMPEG_PATH} {FFMPEG_OPTIONS} -y -i {{input_video_path}} -strict -2 -filter:v fps=fps={{fps}} {{output_video_path}}' # copies original codecs and metadata (rotation)
VIDEO_SPEED_ALTER_COMMAND = f'{FFMPEG_PATH} {FFMPEG_OPTIONS} -y -i {{input_video_path}} -filter_complex "[0:v]setpts={{video_speed}}*PTS[v];[0:a]atempo={{audio_speed}}[a]" -map "[v]" -map "[a]" {{output_video_path}}'
VIDEO_REMOVE_AUDIO_COMMAND = f'{FFMPEG_PATH} {FFMPEG_OPTIONS} -y -i {{input_video_path}} -c copy -an {{output_video_path}}'
VIDEO_ADD_AUDIO_COMMAND = f'{FFMPEG_PATH} {FFMPEG_OPTIONS} -y -i {{input_video_path}} -i {{input_audio_path}} -strict -2 -c:v copy -c:a aac {{output_video_path}}'
def get_num_frames(video_path):
video_capture = cv2.VideoCapture(video_path)
num_frames = int(video_capture.get(cv2.CAP_PROP_FRAME_COUNT))
video_capture.release()
return num_frames
def get_video_frame(video_path, index):
video_capture = cv2.VideoCapture(video_path)
i = 0
selected_frame = None
while True:
success, frame = video_capture.read()
if not success:
break
if i == index:
selected_frame = frame
break
i += 1
video_capture.release()
return selected_frame
def get_video_duration(video_path):
result = subprocess.check_output(VIDEO_DURATION_COMMAND.format(video_path=video_path).split(' '),
stderr=subprocess.STDOUT).decode()
return float(result)
def get_video_rotation(video_path):
cmd = VIDEO_INFO_COMMAND.format(input_video_path=video_path)
p = subprocess.Popen(
cmd.split(' '),
stderr=subprocess.PIPE,
close_fds=True
)
stdout, stderr = p.communicate()
try:
reo_rotation = re.compile('rotate\s+:\s(\d+)')
match_rotation = reo_rotation.search(str(stderr))
rotation = match_rotation.groups()[0]
except AttributeError:
# print(f'Rotation not found: {video_path}')
return 0
return int(rotation)
def fix_frame_rotation(image, rotation):
if rotation == 90:
image = cv2.rotate(image, cv2.ROTATE_90_CLOCKWISE)
elif rotation == 180:
image = cv2.rotate(image, cv2.ROTATE_180)
elif rotation == 270:
image = cv2.rotate(image, cv2.ROTATE_90_COUNTERCLOCKWISE)
return image
def get_fps(video_path):
video_capture = cv2.VideoCapture(video_path)
fps = int(video_capture.get(cv2.CAP_PROP_FPS))
video_capture.release()
return fps
def get_video_frames(video_path, rotation):
video_reader = cv2.VideoCapture(video_path)
frames = []
while True:
success, frame = video_reader.read()
if not success:
break
frame = fix_frame_rotation(frame, rotation)
frames.append(frame)
video_reader.release()
return frames
def show_frames(video_frames, delay, title):
for frame in video_frames:
cv2.imshow(title, frame)
cv2.waitKey(delay)
def run_video_augmentation(video_path, new_video_path, random_prob=0.5):
if random.random() < random_prob:
# https://trac.ffmpeg.org/wiki/How%20to%20speed%20up%20/%20slow%20down%20a%20video
# speed required between 0 and 2
# < 1 = slow down
# > 1 = speed up
speed = round(random.uniform(0.5, 1.5), 2)
subprocess.call(VIDEO_SPEED_ALTER_COMMAND.format(
input_video_path=video_path,
output_video_path=new_video_path,
video_speed=round(1. / speed, 2),
audio_speed=float(speed)
), shell=True)
return new_video_path
return video_path
class RandomRotate:
def __init__(self, degrees):
self.degrees = degrees
def __call__(self, clip):
image_center = tuple(np.array(clip[0].shape[1::-1]) / 2)
rot_mat = cv2.getRotationMatrix2D(image_center, self.degrees, 1.0)
return [cv2.warpAffine(frame, rot_mat, frame.shape[1::-1], flags=cv2.INTER_LINEAR)
for frame in clip]
def run_frame_augmentation(frames, method, random_prob=0.5, rotation_range=10, intensity_range=30):
sometimes = lambda aug: va.Sometimes(random_prob, aug)
random_int = lambda max: np.random.randint(-max, max) # inclusive
# TODO: Zoom in/out
if method == 'full':
seq = va.Sequential([
RandomRotate(degrees=random_int(rotation_range)), # random rotate of angle between (-degrees, degrees)
])
elif method == 'mouth':
seq = va.Sequential([
sometimes(va.HorizontalFlip()), # flip video horizontally
sometimes(va.Add(random_int(intensity_range))), # add random value to pixels between (-max, max)
])
else:
print(f'{method} does not exist')
return
# normalize frames to 0-255 uint8 dtype
return [cv2.normalize(src=frame, dst=None, alpha=0, beta=255, norm_type=cv2.NORM_MINMAX, dtype=cv2.CV_8U)
for frame in seq(frames)]
def extract_audio(video_path, use_old_ffmpeg=False):
audio_file = tempfile.NamedTemporaryFile(suffix='.wav')
if use_old_ffmpeg:
ffmpeg_path = OLD_FFMPEG_PATH
else:
ffmpeg_path = FFMPEG_PATH
subprocess.call(VIDEO_TO_AUDIO_COMMAND.format(
ffmpeg_path=ffmpeg_path,
input_video_path=video_path,
output_audio_path=audio_file.name
), shell=True)
return audio_file
def convert_fps(video_path, new_video_path, fps):
subprocess.call(VIDEO_CONVERT_FPS_COMMAND.format(
input_video_path=video_path,
output_video_path=new_video_path,
fps=fps
), shell=True)
return new_video_path
def replace_audio(video_path, audio_path, output_video_path):
with tempfile.NamedTemporaryFile(suffix='.mp4') as f:
subprocess.call(VIDEO_REMOVE_AUDIO_COMMAND.format(
input_video_path=video_path,
output_video_path=f.name
), shell=True)
subprocess.call(VIDEO_ADD_AUDIO_COMMAND.format(
input_video_path=f.name,
input_audio_path=audio_path,
output_video_path=output_video_path
), shell=True)
def get_lip_embeddings(video_path):
with open(video_path, 'rb') as f:
response = requests.post('http://127.0.0.1:6002/lip_embeddings', files={'video': f.read()})
if response.status_code != 200:
print(response.content)
return
return json.loads(response.content)
def crop(video_path, start, end):
suffix = video_path.split('/')[-1].split('.')[1]
output_video_path = f'/tmp/cropped_video.{suffix}'
subprocess.call(VIDEO_CROP_COMMAND.format(
input_video_path=video_path,
start_time='0' + str(timedelta(seconds=start))[:-3],
end_time='0' + str(timedelta(seconds=end))[:-3],
output_video_path=output_video_path
), shell=True)
return output_video_path
| 32.785425
| 216
| 0.67918
| 1,111
| 8,098
| 4.691269
| 0.261926
| 0.110514
| 0.04317
| 0.024175
| 0.25518
| 0.220837
| 0.168841
| 0.125863
| 0.09363
| 0.080967
| 0
| 0.024603
| 0.206965
| 8,098
| 246
| 217
| 32.918699
| 0.786982
| 0.082119
| 0
| 0.222222
| 0
| 0.040936
| 0.162217
| 0.046251
| 0
| 0
| 0
| 0.004065
| 0
| 1
| 0.099415
| false
| 0
| 0.05848
| 0
| 0.25731
| 0.017544
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
064f53cd615575e4bc66f6d26d74337b90be2852
| 621
|
py
|
Python
|
aflcov/vis.py
|
axt/afl-cov-vis
|
7806fa430113732790563b0f15884a087ebd21ea
|
[
"BSD-2-Clause"
] | 29
|
2017-11-12T09:35:01.000Z
|
2022-02-17T09:29:54.000Z
|
aflcov/vis.py
|
usc-isi-bass/afl-cov
|
18e305d101443d8a06c46f9ac080dd45ca13d8bb
|
[
"BSD-2-Clause"
] | 2
|
2017-11-12T09:40:43.000Z
|
2018-01-19T10:37:17.000Z
|
aflcov/vis.py
|
usc-isi-bass/afl-cov
|
18e305d101443d8a06c46f9ac080dd45ca13d8bb
|
[
"BSD-2-Clause"
] | 6
|
2017-11-12T09:50:20.000Z
|
2022-02-22T06:01:17.000Z
|
from bingraphvis.base import Content
class AflCovInfo(Content):
def __init__(self, project):
super(AflCovInfo, self).__init__('aflcovinfo', ['text'])
self.project = project
def gen_render(self, n):
node = n.obj
n.content[self.name] = {
'data': [{
'text': {
'content': "Hit: %d / %d " % (self.project.kb.cov.node_hit_count(node.addr), self.project.kb.cov.nr_of_paths),
'style':'B',
'align':'LEFT'
}
}],
'columns': self.get_columns()
}
| 31.05
| 130
| 0.481481
| 63
| 621
| 4.52381
| 0.555556
| 0.154386
| 0.091228
| 0.112281
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.371981
| 621
| 19
| 131
| 32.684211
| 0.730769
| 0
| 0
| 0
| 0
| 0
| 0.10306
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.117647
| false
| 0
| 0.058824
| 0
| 0.235294
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
064faa0fae768ef7598b80938b851b966512e6ab
| 3,418
|
py
|
Python
|
corehq/couchapps/tests/test_all_docs.py
|
kkrampa/commcare-hq
|
d64d7cad98b240325ad669ccc7effb07721b4d44
|
[
"BSD-3-Clause"
] | 1
|
2020-05-05T13:10:01.000Z
|
2020-05-05T13:10:01.000Z
|
corehq/couchapps/tests/test_all_docs.py
|
kkrampa/commcare-hq
|
d64d7cad98b240325ad669ccc7effb07721b4d44
|
[
"BSD-3-Clause"
] | 1
|
2019-12-09T14:00:14.000Z
|
2019-12-09T14:00:14.000Z
|
corehq/couchapps/tests/test_all_docs.py
|
MaciejChoromanski/commcare-hq
|
fd7f65362d56d73b75a2c20d2afeabbc70876867
|
[
"BSD-3-Clause"
] | 5
|
2015-11-30T13:12:45.000Z
|
2019-07-01T19:27:07.000Z
|
from __future__ import absolute_import
from __future__ import unicode_literals
from corehq.dbaccessors.couchapps.all_docs import \
get_all_doc_ids_for_domain_grouped_by_db, get_doc_count_by_type, \
delete_all_docs_by_doc_type, get_doc_count_by_domain_type
from dimagi.utils.couch.database import get_db
from django.test import TestCase
class AllDocsTest(TestCase):
maxDiff = None
@classmethod
def setUpClass(cls):
super(AllDocsTest, cls).setUpClass()
cls.main_db = get_db(None)
cls.users_db = get_db('users')
cls.doc_types = ('Application', 'CommCareUser')
delete_all_docs_by_doc_type(cls.main_db, cls.doc_types)
delete_all_docs_by_doc_type(cls.users_db, cls.doc_types)
cls.domain1 = 'all-docs-domain1'
cls.domain2 = 'all-docs-domain2'
cls.main_db_doc = {'_id': 'main_db_doc', 'doc_type': 'Application'}
cls.users_db_doc = {'_id': 'users_db_doc', 'doc_type': 'CommCareUser'}
for doc_type in cls.doc_types:
for domain in (cls.domain1, cls.domain2):
db_alias = 'main' if doc_type == 'Application' else 'users'
doc_id = '{}_db_doc_{}'.format(db_alias, domain)
doc = {'_id': doc_id, 'doc_type': doc_type, 'domain': domain}
if doc_type == 'Application':
cls.main_db.save_doc(doc)
else:
cls.users_db.save_doc(doc)
@classmethod
def tearDownClass(cls):
delete_all_docs_by_doc_type(cls.main_db, cls.doc_types)
delete_all_docs_by_doc_type(cls.users_db, cls.doc_types)
super(AllDocsTest, cls).tearDownClass()
def test_get_all_doc_ids_for_domain_grouped_by_db(self):
self.assertEqual(
{key.uri: list(value) for key, value in
get_all_doc_ids_for_domain_grouped_by_db(self.domain1)},
{get_db(None).uri: ['main_db_doc_all-docs-domain1'],
get_db('users').uri: ['users_db_doc_all-docs-domain1'],
get_db('meta').uri: [],
get_db('fixtures').uri: [],
get_db('domains').uri: [],
get_db('apps').uri: []}
)
def test_get_doc_count_by_type(self):
self.assertEqual(get_doc_count_by_type(get_db(None), 'Application'), 2)
self.assertEqual(get_doc_count_by_type(get_db('users'), 'CommCareUser'), 2)
self.assertEqual(get_doc_count_by_type(get_db(None), 'CommCareUser'), 0)
self.assertEqual(get_doc_count_by_type(get_db('users'), 'Application'), 0)
def test_get_doc_count_by_domain_type(self):
self.assertEqual(get_doc_count_by_domain_type(get_db(None), self.domain1, 'Application'), 1)
self.assertEqual(get_doc_count_by_domain_type(get_db(None), self.domain2, 'Application'), 1)
self.assertEqual(get_doc_count_by_domain_type(get_db(None), 'other', 'Application'), 0)
self.assertEqual(get_doc_count_by_domain_type(get_db('users'), self.domain1, 'CommCareUser'), 1)
self.assertEqual(get_doc_count_by_domain_type(get_db('users'), self.domain2, 'CommCareUser'), 1)
self.assertEqual(get_doc_count_by_domain_type(get_db('users'), 'other', 'CommCareUser'), 0)
self.assertEqual(get_doc_count_by_domain_type(get_db(None), self.domain1, 'CommCareUser'), 0)
self.assertEqual(get_doc_count_by_domain_type(get_db('users'), self.domain1, 'Application'), 0)
| 50.264706
| 104
| 0.675834
| 484
| 3,418
| 4.349174
| 0.13843
| 0.049881
| 0.08361
| 0.098812
| 0.539667
| 0.531591
| 0.488361
| 0.465558
| 0.451781
| 0.43658
| 0
| 0.009894
| 0.20158
| 3,418
| 67
| 105
| 51.014925
| 0.761451
| 0
| 0
| 0.1
| 0
| 0
| 0.131949
| 0.016676
| 0
| 0
| 0
| 0
| 0.216667
| 1
| 0.083333
| false
| 0
| 0.083333
| 0
| 0.2
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
0652b6080d711fc812aa3a6054f91161bc0d0a8b
| 16,913
|
py
|
Python
|
lattpy/spatial.py
|
dylanljones/lattpy
|
6779ae7755aaf9e844d63a6f63b5036fb64d9f89
|
[
"MIT"
] | 11
|
2020-10-29T17:23:02.000Z
|
2022-02-28T12:25:41.000Z
|
lattpy/spatial.py
|
dylanljones/lattpy
|
6779ae7755aaf9e844d63a6f63b5036fb64d9f89
|
[
"MIT"
] | 7
|
2021-01-12T13:53:42.000Z
|
2022-03-29T11:21:58.000Z
|
lattpy/spatial.py
|
dylanljones/lattpy
|
6779ae7755aaf9e844d63a6f63b5036fb64d9f89
|
[
"MIT"
] | 1
|
2021-10-31T11:15:20.000Z
|
2021-10-31T11:15:20.000Z
|
# coding: utf-8
#
# This code is part of lattpy.
#
# Copyright (c) 2021, Dylan Jones
#
# This code is licensed under the MIT License. The copyright notice in the
# LICENSE file in the root directory and this permission notice shall
# be included in all copies or substantial portions of the Software.
"""Spatial algorithms and data structures."""
import math
import numpy as np
import itertools
import matplotlib.pyplot as plt
from scipy.spatial import cKDTree, Voronoi
from typing import Iterable, Sequence, Optional, Union
from .utils import ArrayLike, min_dtype, chain
from .plotting import draw_points, draw_vectors, draw_lines, draw_surfaces
__all__ = [
"distance", "interweave", "vindices", "vrange", "cell_size", "cell_volume",
"compute_vectors", "compute_neighbors", "KDTree", "VoronoiTree", "WignerSeitzCell",
"rx", "ry", "rz", "rotate2d", "rotate3d", "build_periodic_translation_vector"
]
def distance(r1: ArrayLike, r2: ArrayLike, decimals: Optional[int] = None) -> float:
""" Calculates the euclidian distance bewteen two points.
Parameters
----------
r1: array_like
First input point.
r2: array_like
Second input point of matching size.
decimals: int, optional
Optional decimals to round distance to.
Returns
-------
distance: float
"""
dist = math.sqrt(np.sum(np.square(r1 - r2)))
if decimals is not None:
dist = round(dist, decimals)
return dist
def interweave(arrays: Sequence[np.ndarray]) -> np.ndarray:
""" Interweaves multiple arrays along the first axis
Example
-------
>>> arr1 = np.array([[1, 1], [3, 3], [5, 5]])
>>> arr2 = np.array([[2, 2], [4, 4], [6, 6]])
>>> interweave([arr1, arr2])
array([[1, 1], [2, 2], [3, 3], [4, 4], [5, 5], [6, 6]])
Parameters
----------
arrays: (M) Sequence of (N, ...) array_like
The input arrays to interwave. The shape of all arrays must match.
Returns
-------
interweaved: (M*N, ....) np.ndarray
"""
shape = list(arrays[0].shape)
shape[0] = sum(x.shape[0] for x in arrays)
result = np.empty(shape, dtype=arrays[0].dtype)
n = len(arrays)
for i, arr in enumerate(arrays):
result[i::n] = arr
return result
def vindices(limits: Iterable[Sequence[int]], sort_axis: Optional[int] = 0,
dtype: Optional[Union[int, str, np.dtype]] = None) -> np.ndarray:
""" Return an array representing the indices of a d-dimensional grid.
Parameters
----------
limits: (D, 2) array_like
The limits of the indices for each axis.
sort_axis: int, optional
Optional axis that is used to sort indices.
dtype: int or str or np.dtype, optional
Optional data-type for storing the lattice indices. By default the given limits
are checked to determine the smallest possible data-type.
Returns
-------
vectors: (N, D) np.ndarray
"""
if dtype is None:
dtype = min_dtype(limits, signed=True)
limits = np.asarray(limits)
dim = limits.shape[0]
# Create meshgrid reshape grid to array of indices
# version 1:
# axis = np.meshgrid(*(np.arange(*lim, dtype=dtype) for lim in limits))
# nvecs = np.asarray([np.asarray(a).flatten("F") for a in axis]).T
# version 2:
# slices = [slice(lim[0], lim[1], 1) for lim in limits]
# nvecs = np.mgrid[slices].astype(dtype).reshape(dim, -1).T
# version 3:
size = limits[:, 1] - limits[:, 0]
nvecs = np.indices(size, dtype=dtype).reshape(dim, -1).T + limits[:, 0]
# Optionally sort indices along given axis
if sort_axis is not None:
nvecs = nvecs[np.lexsort(nvecs.T[[sort_axis]])]
return nvecs
def vrange(start=None, *args,
dtype: Optional[Union[int, str, np.dtype]] = None,
sort_axis: Optional[int] = 0, **kwargs) -> np.ndarray:
""" Return evenly spaced vectors within a given interval.
Parameters
----------
start: array_like, optional
The starting value of the interval. The interval includes this value.
The default start value is 0.
stop: array_like
The end value of the interval.
step: array_like, optional
Spacing between values. If `start` and `stop` are sequences and the `step`
is a scalar the given step size is used for all dimensions of the vectors.
The default step size is 1.
sort_axis: int, optional
Optional axis that is used to sort indices.
dtype: dtype, optional
The type of the output array. If `dtype` is not given, infer the data
type from the other input arguments.
Returns
-------
vectors: (N, D) np.ndarray
"""
# parse arguments
if len(args) == 0:
stop = start
start = np.zeros_like(stop)
step = kwargs.get("step", 1.0)
elif len(args) == 1:
stop = args[0]
step = kwargs.get("step", 1.0)
else:
stop, step = args
start = np.atleast_1d(start)
stop = np.atleast_1d(stop)
if step is None:
step = np.ones_like(start)
elif not hasattr(step, "__len__"):
step = np.ones_like(start) * step
# Create grid and reshape to array of vectors
slices = [slice(i, f, s) for i, f, s in zip(start, stop, step)]
array = np.mgrid[slices].reshape(len(slices), -1).T
# Optionally sort array along given axis
if sort_axis is not None:
array = array[np.lexsort(array.T[[sort_axis]])]
return array if dtype is None else array.astype(dtype)
def cell_size(vectors: ArrayLike) -> np.ndarray:
""" Computes the shape of the box spawned by the given vectors.
Parameters
----------
vectors: array_like
The basis vectors defining the cell.
Returns
-------
size: np.ndarray
"""
max_values = np.max(vectors, axis=0)
min_values = np.min(vectors, axis=0)
min_values[min_values > 0] = 0
return max_values - min_values
def cell_volume(vectors: ArrayLike) -> float:
r""" Computes the volume of the unit cell defined by the primitive vectors.
The volume of the unit-cell in two and three dimensions is defined by
.. math::
V_{2d} = \abs{a_1 \cross a_2}, \quad V_{3d} = a_1 \cdot \abs{a_2 \cross a_3}
For higher dimensions the volume is computed using the determinant:
.. math::
V_{d} = \sqrt{\det{A A^T}}
where .math:`A` is the array of vectors.
Parameters
----------
vectors: array_like
The basis vectors defining the cell.
Returns
-------
vol: float
"""
dim = len(vectors)
if dim == 1:
v = float(vectors)
elif dim == 2:
v = np.cross(vectors[0], vectors[1])
elif dim == 3:
cross = np.cross(vectors[1], vectors[2])
v = np.dot(vectors[0], cross)
else:
v = np.sqrt(np.linalg.det(np.dot(vectors.T, vectors)))
return abs(v)
def build_periodic_translation_vector(indices, axs):
limits = np.array([np.min(indices, axis=0), np.max(indices, axis=0)])
nvec = np.zeros(indices.shape[1] - 1, dtype=np.int)
for ax in np.atleast_1d(axs):
nvec[ax] = np.floor(limits[1][ax]) + 1
return nvec
def compute_vectors(a: float, b: Optional[float] = None, c: Optional[float] = None,
alpha: Optional[float] = None, beta: Optional[float] = None,
gamma: Optional[float] = None,
decimals: Optional[int] = 0) -> np.ndarray:
""" Computes lattice vectors by the lengths and angles. """
if b is None and c is None:
vectors = [a]
elif c is None:
alpha = np.deg2rad(alpha)
ax = a
bx = b * np.cos(alpha)
by = b * np.sin(alpha)
vectors = np.array([
[ax, 0],
[bx, by]
])
else:
alpha = np.deg2rad(alpha)
beta = np.deg2rad(beta)
gamma = np.deg2rad(gamma)
ax = a
bx = b * np.cos(gamma)
by = b * np.sin(gamma)
cx = c * np.cos(beta)
cy = (abs(c) * abs(b) * np.cos(alpha) - bx * cx) / by
cz = np.sqrt(c ** 2 - cx ** 2 - cy ** 2)
vectors = np.array([
[ax, 0, 0],
[bx, by, 0],
[cx, cy, cz]
])
if decimals:
vectors = np.round(vectors, decimals=decimals)
return vectors
# noinspection PyUnresolvedReferences
class KDTree(cKDTree):
"""Simple wrapper of scipy's cKTree with global query settings."""
def __init__(self, points, k=1, max_dist=np.inf, eps=0., p=2):
super().__init__(points)
self.max_dist = max_dist
self.k = k
self.p = p
self.eps = eps
def query_ball_point(self, x, r):
return super().query_ball_point(x, r, self.p, self.eps)
def query_ball_tree(self, other, r):
return super().query_ball_tree(other, r, self.p, self.eps)
def query_pairs(self, r):
return super().query_pairs(r, self.p, self.eps)
def query(self, x=None, num_jobs=1, decimals=None, include_zero=False, compact=True):
x = self.data if x is None else x
distances, neighbors = super().query(x, self.k, self.eps, self.p, self.max_dist, num_jobs)
# Remove zero-distance neighbors and convert dtype
if not include_zero and np.all(distances[:, 0] == 0):
distances = distances[:, 1:]
neighbors = neighbors[:, 1:]
neighbors = neighbors.astype(min_dtype(self.n, signed=False))
# Remove neighbors with distance larger than max_dist
if self.max_dist < np.inf:
invalid = distances > self.max_dist
neighbors[invalid] = self.n
distances[invalid] = np.inf
# Remove all invalid columns
if compact:
mask = np.any(distances != np.inf, axis=0)
neighbors = neighbors[:, mask]
distances = distances[:, mask]
# Round distances
if decimals is not None:
distances = np.round(distances, decimals=decimals)
return neighbors, distances
def compute_neighbors(positions, k=20, max_dist=np.inf, num_jobs=1, decimals=None, eps=0.,
include_zero=False, compact=True, x=None):
# Build tree and query neighbors
x = positions if x is None else x
tree = KDTree(positions, k=k, max_dist=max_dist, eps=eps)
distances, neighbors = tree.query(x, num_jobs, decimals, include_zero, compact)
return neighbors, distances
class VoronoiTree:
def __init__(self, points):
points = np.asarray(points)
dim = points.shape[1]
edges = list()
if dim == 1:
vertices = points / 2
idx = np.where((vertices == np.zeros(vertices.shape[1])).all(axis=1))[0]
vertices = np.delete(vertices, idx)
vertices = np.atleast_2d(vertices).T
else:
vor = Voronoi(points)
# Save only finite vertices
vertices = vor.vertices # noqa
for pointidx, simplex in zip(vor.ridge_points, vor.ridge_vertices): # noqa
simplex = np.asarray(simplex)
if np.all(simplex >= 0):
edges.append(simplex)
self.dim = dim
self.points = points
self.edges = edges
self.vertices = vertices
self.tree = cKDTree(points) # noqa
self.origin = self.query(np.zeros(dim))
def query(self, x, k=1, eps=0):
return self.tree.query(x, k, eps) # noqa
def draw(self, ax=None, color="C0", size=3, lw=1, alpha=0.15, point_color="k", point_size=3,
draw_data=True, points=True, draw=True, fill=True):
if ax is None:
fig = plt.figure()
ax = fig.add_subplot(111, projection="3d" if self.dim == 3 else None)
if draw_data:
draw_points(ax, self.points, size=point_size, color=point_color)
if self.dim > 1:
draw_vectors(ax, self.points, lw=0.5, color=point_color)
if points:
draw_points(ax, self.vertices, size=size, color=color)
if self.dim == 2 and draw:
segments = np.array([self.vertices[i] for i in self.edges])
draw_lines(ax, segments, color=color, lw=lw)
elif self.dim == 3:
if draw:
segments = np.array([self.vertices[np.append(i, i[0])] for i in self.edges])
draw_lines(ax, segments, color=color, lw=lw)
if fill:
surfaces = np.array([self.vertices[i] for i in self.edges])
draw_surfaces(ax, surfaces, color=color, alpha=alpha)
if self.dim == 3:
ax.set_aspect("equal")
else:
ax.set_aspect("equal", "box")
return ax
def __repr__(self):
return f"{self.__class__.__name__}(vertices: {len(self.vertices)})"
def __str__(self):
return f"vertices:\n{self.vertices}\n" \
f"egdes:\n{self.edges}"
class WignerSeitzCell(VoronoiTree):
def __init__(self, points):
super().__init__(points)
self._root = self.query(np.zeros(self.dim))[1]
@property
def limits(self):
return np.array([np.min(self.vertices, axis=0), np.max(self.vertices, axis=0)]).T
@property
def size(self):
return self.limits[1] - self.limits[0]
def check(self, points):
cells = np.asarray(self.query(points)[1])
return cells == self._root
def arange(self, steps, offset=0.):
limits = self.limits * (1 + offset)
steps = [steps] * self.dim if not hasattr(steps, "__len__") else steps
return [np.arange(*lims, step=step) for lims, step in zip(limits, steps)]
def linspace(self, nums, offset=0.):
limits = self.limits * (1 + offset)
nums = [nums] * self.dim if not hasattr(nums, "__len__") else nums
return [np.linspace(*lims, num=num) for lims, num in zip(limits, nums)]
def meshgrid(self, nums=None, steps=None, offset=0., check=True):
if nums is not None:
grid = np.array(np.meshgrid(*self.linspace(nums, offset)))
elif steps is not None:
grid = np.array(np.meshgrid(*self.arange(steps, offset)))
else:
raise ValueError("Either the number of points or the step size muste be specified")
if check:
lengths = grid.shape[1:]
dims = range(len(lengths))
for item in itertools.product(*[range(n) for n in lengths]):
point = np.array([grid[d][item] for d in dims])
if not self.check(point):
for d in dims:
grid[d][item] = np.nan
return grid
def symmetry_points(self):
origin = np.zeros((1,))
corners = self.vertices.copy()
face_centers = None
if self.dim == 1:
return origin, corners, None, None
elif self.dim == 2:
edge_centers = np.zeros((len(self.edges), 2))
for i, simplex in enumerate(self.edges):
p1, p2 = self.vertices[simplex]
edge_centers[i] = p1 + (p2 - p1) / 2
elif self.dim == 3:
edge_centers = list()
face_centers = list()
for i, simplex in enumerate(self.edges):
edges = self.vertices[simplex]
# compute face centers
face_centers.append(np.mean(edges, axis=0))
# compute edge centers
for p1, p2 in chain(edges, cycle=True):
edge_centers.append(p1 + (p2 - p1) / 2)
edge_centers = np.asarray(edge_centers)
face_centers = np.asarray(face_centers)
else:
raise NotImplementedError()
return origin, corners, edge_centers, face_centers
def rx(theta: float) -> np.ndarray:
"""X-Rotation matrix."""
sin, cos = np.sin(theta), np.cos(theta)
return np.array([[1, 0, 0], [0, cos, -sin], [0, sin, cos]])
def ry(theta: float) -> np.ndarray:
"""Y-Rotation matrix."""
sin, cos = np.sin(theta), np.cos(theta)
return np.array([[cos, 0, sin], [0, 1, 0], [-sin, 0, +cos]])
def rz(theta: float) -> np.ndarray:
"""Z-Rotation matrix."""
sin, cos = np.sin(theta), np.cos(theta)
return np.array([[cos, -sin, 0], [sin, cos, 0], [0, 0, 1]])
def rot(thetax: float = 0., thetay: float = 0., thetaz: float = 0.) -> np.ndarray:
"""General rotation matrix"""
r = np.eye(3)
if thetaz:
r = np.dot(r, rz(thetaz))
if thetay:
r = np.dot(r, ry(thetay))
if thetax:
r = np.dot(r, rz(thetax))
return r
def rotate2d(a, theta):
"""Applies the z-rotation matrix to a 2D point"""
return np.dot(a, rz(theta)[:2, :2])
def rotate3d(a, thetax=0., thetay=0., thetaz=0.):
"""Applies the general rotation matrix to a 3D point"""
return np.dot(a, rot(thetax, thetay, thetaz))
| 32.840777
| 98
| 0.586945
| 2,339
| 16,913
| 4.170158
| 0.163745
| 0.010765
| 0.005536
| 0.005229
| 0.185565
| 0.132253
| 0.099139
| 0.080172
| 0.072996
| 0.059258
| 0
| 0.017308
| 0.282623
| 16,913
| 514
| 99
| 32.904669
| 0.786615
| 0.234199
| 0
| 0.143345
| 0
| 0
| 0.030875
| 0.009359
| 0
| 0
| 0
| 0
| 0
| 1
| 0.112628
| false
| 0
| 0.027304
| 0.027304
| 0.255973
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
06540074f347487eb56749881c52a1e16c5be40a
| 6,615
|
py
|
Python
|
Foobar 3.3.py
|
SambhavG/Google-foobar
|
f64f1a4a367c0eab5265e4ed6e22f94b7a297cad
|
[
"MIT"
] | null | null | null |
Foobar 3.3.py
|
SambhavG/Google-foobar
|
f64f1a4a367c0eab5265e4ed6e22f94b7a297cad
|
[
"MIT"
] | null | null | null |
Foobar 3.3.py
|
SambhavG/Google-foobar
|
f64f1a4a367c0eab5265e4ed6e22f94b7a297cad
|
[
"MIT"
] | null | null | null |
def printMatrix(m):
for i in range(0, len(m)):
print(m[i])
print("\n")
def convertInputToReq(data):
matrix1 = data
width = len(data)
terminalStates = []
for i in range(0, width):
#are all in the row 0?
all0 = True
rowSum = sum(data[i])
if (rowSum==0):
terminalStates.append(i)
else:
for j in range(0, width):
if (data[i][j] != 0):
matrix1[i][j] = [data[i][j], rowSum]
#Move each terminal state row to the beginning
matrix2 = []
for i in terminalStates:
matrix2.append(matrix1[i])
for i in range(0, width):
if not i in terminalStates:
matrix2.append(matrix1[i])
#Move each terminal state column to the beginning
matrix3 = []
for i in range(0, width):
matrix3.append([])
for j in terminalStates:
matrix3[i].append(matrix2[i][j])
for j in range(0, width):
if not j in terminalStates:
matrix3[i].append(matrix2[i][j])
#Add identity elements to the first len(terminalStates) elements
for i in range(len(terminalStates)):
matrix3[i][i] = [1, 1]
return matrix3, len(terminalStates)
def identityMatrix(x):
identity = []
for i in range(0, x):
identity.append([])
for j in range(0, x):
if (i == j):
identity[i].append([1,1])
else:
identity[i].append(0)
return identity
def gcd(a, b):
while b:
a, b = b, a % b
return a
def simplify(c):
if (c != 0):
gcdVal = gcd(c[0],c[1])
return [int(c[0]/gcdVal), int(c[1]/gcdVal)]
else:
return 0
def commonDenomAdd(a, b):
if (a==0):
return b
elif (b==0):
return a
else:
raw = [a[0]*b[1]+a[1]*b[0], a[1]*b[1]]
return simplify(raw)
def simplifyMultiply(a, b):
if (a==0 or b == 0):
return 0
else:
raw = [a[0]*b[0], a[1]*b[1]]
return simplify(raw)
def simplifyDivide(a, b):
#if a is 0, return 0
#if b is 0, print error
#otherwise, raw=[a[0]*b[1], a[1]*b[0]]
if (a == 0):
return 0
elif (b == 0):
print("ERROR")
else:
raw=[a[0]*b[1], a[1]*b[0]]
return simplify(raw)
def matrixSubtract(a, b):
returnMat = []
for i in range(len(a)):
returnMat.append([])
for j in range(len(a)):
bNegated = b[i][j]
if (not bNegated == 0):
bNegated[0] = (-1)*b[i][j][0]
returnMat[i].append(commonDenomAdd(a[i][j], bNegated))
return returnMat
def matrixMinor(a, m, n):
#remove row m and column n
subMatrix = []
for i in range(len(a)):
subMatrix.append([])
for j in range(len(a)):
subMatrix[i].append(a[i][j])
subMatrix.pop(m)
for j in range(0, len(subMatrix)):
subMatrix[j].pop(n)
return subMatrix
def matrixDeterminant(a):
if (len(a) == 1):
return a[0][0]
else:
determinant = 0
for i in range(len(a)):
#Add contribution to determinant from top row of matrix a
cofactorMultiplier = (-1)**(i)
minorMat = matrixMinor(a, 0, i)
minorDet = matrixDeterminant(minorMat)
minorDet = simplifyMultiply(minorDet, a[0][i])
if (minorDet != 0):
minorDet[0]*=cofactorMultiplier
determinant = commonDenomAdd(determinant, minorDet)
return determinant
def matrixTranspose(a):
transpose = []
for i in range(len(a)):
transpose.append([])
for j in range(len(a)):
transpose[i].append(a[j][i])
return transpose
def matrixInverse(a):
#Find cofactor matrix of a
cofactors = []
for i in range(0, len(a)):
cofactors.append([])
for j in range(0, len(a)):
#Create submatrix without row i or column j
subMatrix = matrixMinor(a, i, j)
#Find determinant of subMatrix
determinant = matrixDeterminant(subMatrix)
#Append
if (determinant != 0):
determinant[0]*=((-1)**(i+j))
cofactors[i].append(determinant)
cofactorTranspose = matrixTranspose(cofactors)
aDeterminant = matrixDeterminant(a)
for i in range(0, len(a)):
for j in range(0, len(a)):
cofactorTranspose[i][j] = simplifyDivide(cofactorTranspose[i][j], aDeterminant)
return cofactorTranspose
def matrixProduct(a, b):
product = []
for i in range(len(a)):
product.append([])
for j in range(len(b[0])):
ijEntry = 0
for k in range(len(b)):
ijEntry = commonDenomAdd(ijEntry, simplifyMultiply(a[i][k],b[k][j]))
product[i].append(ijEntry)
return product
def getFirstNonzeroElement(a):
for i in range(len(a)):
if (a[i] != 0):
return a[i][1]
return 0
def scrapeTopRow(a):
if (len(a)==0):
return [1,1]
returnVals = []
smallestLCM = 1
for i in range(len(a[0])):
if (a[0][i] != 0):
smallestLCM = smallestLCM*a[0][i][1]//gcd(smallestLCM, a[0][i][1])
for i in range(len(a[0])):
if (a[0][i] != 0):
returnVals.append(int(a[0][i][0]*smallestLCM/a[0][i][1]))
else:
returnVals.append(0)
returnVals.append(sum(returnVals))
return returnVals
def findR(data, numTerminal):
R = []
for i in range(numTerminal, len(data)):
R.append([])
for j in range(0, numTerminal):
R[i-numTerminal].append(data[i][j])
return R
def findQ(data, numTerminal):
Q = []
for i in range(numTerminal, len(data)):
Q.append([])
for j in range(numTerminal, len(data)):
Q[i-numTerminal].append(data[i][j])
return Q
def solution(m):
reqInput = convertInputToReq(m)
reqMatrix = reqInput[0]
numTerminal = reqInput[1]
qMatrix = findQ(reqMatrix, numTerminal)
rMatrix = findR(reqMatrix, numTerminal)
iminusq = matrixSubtract(identityMatrix(len(reqMatrix)-numTerminal),qMatrix)
fMatrix = matrixInverse(iminusq)
frMatrix = matrixProduct(fMatrix, rMatrix)
topRow = scrapeTopRow(frMatrix)
return topRow
| 28.636364
| 92
| 0.521088
| 839
| 6,615
| 4.108462
| 0.134684
| 0.062953
| 0.033072
| 0.057441
| 0.289527
| 0.244851
| 0.167102
| 0.062953
| 0.062953
| 0.036554
| 0
| 0.026461
| 0.343008
| 6,615
| 230
| 93
| 28.76087
| 0.766682
| 0.066213
| 0
| 0.260638
| 0
| 0
| 0.00118
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.101064
| false
| 0
| 0
| 0
| 0.239362
| 0.021277
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
06585c3b0c0000d446eb614d1e5895fa37089822
| 1,105
|
py
|
Python
|
backend/project_requests/admin.py
|
mnieber/taskboard
|
7925342751e2782bd0a0258eb2d43d9ec90ce9d8
|
[
"MIT"
] | null | null | null |
backend/project_requests/admin.py
|
mnieber/taskboard
|
7925342751e2782bd0a0258eb2d43d9ec90ce9d8
|
[
"MIT"
] | null | null | null |
backend/project_requests/admin.py
|
mnieber/taskboard
|
7925342751e2782bd0a0258eb2d43d9ec90ce9d8
|
[
"MIT"
] | null | null | null |
from django.contrib import admin
from django.http import HttpResponseRedirect
from django.urls import path
from faker import Faker
from .models import ProjectRequest
from .utils import create_project_request
@admin.register(ProjectRequest)
class ProjectRequestAdmin(admin.ModelAdmin):
change_list_template = "project_requests/admin/project_requests_changelist.html"
def get_urls(self):
urls = super().get_urls()
my_urls = [
path("create-fake/", self.create_fake),
]
return my_urls + urls
def create_fake(self, request):
f = Faker()
project_request = create_project_request(
**dict(
location=f.country(),
description=f.text(),
changemaker_name=f.name(),
date_of_birth=f.date(),
project_name=f.word(),
email=f.email(),
google_doc_url=f.url(),
description_url=f.url(),
)
)
project_request.task.transition("receive", {})
return HttpResponseRedirect("../")
| 29.864865
| 84
| 0.608145
| 117
| 1,105
| 5.538462
| 0.444444
| 0.08642
| 0.061728
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.291403
| 1,105
| 36
| 85
| 30.694444
| 0.827586
| 0
| 0
| 0
| 0
| 0
| 0.069683
| 0.049774
| 0
| 0
| 0
| 0
| 0
| 1
| 0.064516
| false
| 0
| 0.193548
| 0
| 0.387097
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
0660694db2ddc7b0023f6b169f47cbe6fc31c8a7
| 916
|
py
|
Python
|
topo.py
|
rahil-g/gpf
|
234c22f500283f75454ccba4a12b765be9ddad05
|
[
"MIT"
] | null | null | null |
topo.py
|
rahil-g/gpf
|
234c22f500283f75454ccba4a12b765be9ddad05
|
[
"MIT"
] | null | null | null |
topo.py
|
rahil-g/gpf
|
234c22f500283f75454ccba4a12b765be9ddad05
|
[
"MIT"
] | null | null | null |
#Author: Rahil Gandotra
#This file consists of the custom Mininet topology used for GPF.
from mininet.topo import Topo
class MyTopo(Topo):
def __init__(self):
Topo.__init__(self)
h1 = self.addHost('h1')
h2 = self.addHost('h2')
s1 = self.addSwitch('s1', listenPort=6675, dpid='0000000000000100')
s5 = self.addSwitch('s5', listenPort=6676, dpid='0000000000000200')
s2 = self.addSwitch('s2', listenPort=6677, dpid='0000000000000300')
s3 = self.addSwitch('s3', listenPort=6678, dpid='0000000000000400')
s4 = self.addSwitch('s4', listenPort=6679, dpid='0000000000000500')
self.addLink(h1, s1)
self.addLink(h2, s5)
self.addLink(s1, s2)
self.addLink(s1, s3)
self.addLink(s1, s4)
self.addLink(s5, s2)
self.addLink(s5, s3)
self.addLink(s5, s4)
topos = { 'mytopo': ( lambda: MyTopo() ) }
| 31.586207
| 75
| 0.622271
| 112
| 916
| 5.017857
| 0.410714
| 0.156584
| 0.069395
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.18598
| 0.2369
| 916
| 28
| 76
| 32.714286
| 0.618026
| 0.092795
| 0
| 0
| 0
| 0
| 0.120627
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.05
| false
| 0
| 0.05
| 0
| 0.15
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
0661b5f4de7b9d1818fd8ebe0cb07e2e58e19d2a
| 10,819
|
py
|
Python
|
Contents/Libraries/Shared/subliminal_patch/providers/legendastv.py
|
jippo015/Sub-Zero.bundle
|
734e0f7128c05c0f639e11e7dfc77daa1014064b
|
[
"MIT"
] | 1,553
|
2015-11-09T02:17:06.000Z
|
2022-03-31T20:24:52.000Z
|
Contents/Libraries/Shared/subliminal_patch/providers/legendastv.py
|
saiterlz/Sub-Zero.bundle
|
1a0bb9c3e4be84be35d46672907783363fe5a87b
|
[
"MIT"
] | 691
|
2015-11-05T21:32:26.000Z
|
2022-03-17T10:52:45.000Z
|
Contents/Libraries/Shared/subliminal_patch/providers/legendastv.py
|
saiterlz/Sub-Zero.bundle
|
1a0bb9c3e4be84be35d46672907783363fe5a87b
|
[
"MIT"
] | 162
|
2015-11-06T19:38:55.000Z
|
2022-03-16T02:42:41.000Z
|
# coding=utf-8
import logging
import rarfile
import os
from subliminal.exceptions import ConfigurationError
from subliminal.providers.legendastv import LegendasTVSubtitle as _LegendasTVSubtitle, \
LegendasTVProvider as _LegendasTVProvider, Episode, Movie, guess_matches, guessit, sanitize, region, type_map, \
raise_for_status, json, SHOW_EXPIRATION_TIME, title_re, season_re, datetime, pytz, NO_VALUE, releases_key, \
SUBTITLE_EXTENSIONS, language_converters
from subzero.language import Language
logger = logging.getLogger(__name__)
class LegendasTVSubtitle(_LegendasTVSubtitle):
def __init__(self, language, type, title, year, imdb_id, season, archive, name):
super(LegendasTVSubtitle, self).__init__(language, type, title, year, imdb_id, season, archive, name)
self.archive.content = None
self.release_info = archive.name
self.page_link = archive.link
def make_picklable(self):
self.archive.content = None
return self
def get_matches(self, video, hearing_impaired=False):
matches = set()
# episode
if isinstance(video, Episode) and self.type == 'episode':
# series
if video.series and (sanitize(self.title) in (
sanitize(name) for name in [video.series] + video.alternative_series)):
matches.add('series')
# year
if video.original_series and self.year is None or video.year and video.year == self.year:
matches.add('year')
# imdb_id
if video.series_imdb_id and self.imdb_id == video.series_imdb_id:
matches.add('series_imdb_id')
# movie
elif isinstance(video, Movie) and self.type == 'movie':
# title
if video.title and (sanitize(self.title) in (
sanitize(name) for name in [video.title] + video.alternative_titles)):
matches.add('title')
# year
if video.year and self.year == video.year:
matches.add('year')
# imdb_id
if video.imdb_id and self.imdb_id == video.imdb_id:
matches.add('imdb_id')
# name
matches |= guess_matches(video, guessit(self.name, {'type': self.type, 'single_value': True}))
return matches
class LegendasTVProvider(_LegendasTVProvider):
languages = {Language(*l) for l in language_converters['legendastv'].to_legendastv.keys()}
subtitle_class = LegendasTVSubtitle
def __init__(self, username=None, password=None):
# Provider needs UNRAR installed. If not available raise ConfigurationError
try:
rarfile.custom_check([rarfile.UNRAR_TOOL], True)
except rarfile.RarExecError:
raise ConfigurationError('UNRAR tool not available')
if any((username, password)) and not all((username, password)):
raise ConfigurationError('Username and password must be specified')
self.username = username
self.password = password
self.logged_in = False
self.session = None
@staticmethod
def is_valid_title(title, title_id, sanitized_title, season, year, imdb_id):
"""Check if is a valid title."""
if title["imdb_id"] and title["imdb_id"] == imdb_id:
logger.debug(u'Matched title "%s" as IMDB ID %s', sanitized_title, title["imdb_id"])
return True
if title["title2"] and sanitize(title['title2']) == sanitized_title:
logger.debug(u'Matched title "%s" as "%s"', sanitized_title, title["title2"])
return True
return _LegendasTVProvider.is_valid_title(title, title_id, sanitized_title, season, year)
@region.cache_on_arguments(expiration_time=SHOW_EXPIRATION_TIME, should_cache_fn=lambda value: value)
def search_titles(self, title, season, title_year, imdb_id):
"""Search for titles matching the `title`.
For episodes, each season has it own title
:param str title: the title to search for.
:param int season: season of the title
:param int title_year: year of the title
:return: found titles.
:rtype: dict
"""
titles = {}
sanitized_titles = [sanitize(title)]
ignore_characters = {'\'', '.'}
if any(c in title for c in ignore_characters):
sanitized_titles.append(sanitize(title, ignore_characters=ignore_characters))
for sanitized_title in sanitized_titles:
# make the query
if season:
logger.info('Searching episode title %r for season %r', sanitized_title, season)
else:
logger.info('Searching movie title %r', sanitized_title)
r = self.session.get(self.server_url + 'legenda/sugestao/{}'.format(sanitized_title), timeout=10)
raise_for_status(r)
results = json.loads(r.text)
# loop over results
for result in results:
source = result['_source']
# extract id
title_id = int(source['id_filme'])
# extract type
title = {'type': type_map[source['tipo']], 'title2': None, 'imdb_id': None}
# extract title, year and country
name, year, country = title_re.match(source['dsc_nome']).groups()
title['title'] = name
if "dsc_nome_br" in source:
name2, ommit1, ommit2 = title_re.match(source['dsc_nome_br']).groups()
title['title2'] = name2
# extract imdb_id
if source['id_imdb'] != '0':
if not source['id_imdb'].startswith('tt'):
title['imdb_id'] = 'tt' + source['id_imdb'].zfill(7)
else:
title['imdb_id'] = source['id_imdb']
# extract season
if title['type'] == 'episode':
if source['temporada'] and source['temporada'].isdigit():
title['season'] = int(source['temporada'])
else:
match = season_re.search(source['dsc_nome_br'])
if match:
title['season'] = int(match.group('season'))
else:
logger.debug('No season detected for title %d (%s)', title_id, name)
# extract year
if year:
title['year'] = int(year)
elif source['dsc_data_lancamento'] and source['dsc_data_lancamento'].isdigit():
# year is based on season air date hence the adjustment
title['year'] = int(source['dsc_data_lancamento']) - title.get('season', 1) + 1
# add title only if is valid
# Check against title without ignored chars
if self.is_valid_title(title, title_id, sanitized_titles[0], season, title_year, imdb_id):
logger.debug(u'Found title: %s', title)
titles[title_id] = title
logger.debug('Found %d titles', len(titles))
return titles
def query(self, language, title, season=None, episode=None, year=None, imdb_id=None):
# search for titles
titles = self.search_titles(title, season, year, imdb_id)
subtitles = []
# iterate over titles
for title_id, t in titles.items():
logger.info('Getting archives for title %d and language %d', title_id, language.legendastv)
archives = self.get_archives(title_id, language.legendastv, t['type'], season, episode)
if not archives:
logger.info('No archives found for title %d and language %d', title_id, language.legendastv)
# iterate over title's archives
for a in archives:
# compute an expiration time based on the archive timestamp
expiration_time = (datetime.utcnow().replace(tzinfo=pytz.utc) - a.timestamp).total_seconds()
# attempt to get the releases from the cache
cache_key = releases_key.format(archive_id=a.id, archive_name=a.name)
releases = region.get(cache_key, expiration_time=expiration_time)
# the releases are not in cache or cache is expired
if releases == NO_VALUE:
logger.info('Releases not found in cache')
# download archive
self.download_archive(a)
# extract the releases
releases = []
for name in a.content.namelist():
# discard the legendastv file
if name.startswith('Legendas.tv'):
continue
# discard hidden files
if os.path.split(name)[-1].startswith('.'):
continue
# discard non-subtitle files
if not name.lower().endswith(SUBTITLE_EXTENSIONS):
continue
releases.append(name)
# cache the releases
region.set(cache_key, releases)
# iterate over releases
for r in releases:
subtitle = self.subtitle_class(language, t['type'], t['title'], t.get('year'), t.get('imdb_id'),
t.get('season'), a, r)
logger.debug('Found subtitle %r', subtitle)
subtitles.append(subtitle)
return subtitles
def list_subtitles(self, video, languages):
season = episode = None
if isinstance(video, Episode):
titles = [video.series] + video.alternative_series
season = video.season
episode = video.episode
else:
titles = [video.title] + video.alternative_titles
for title in titles:
subtitles = [s for l in languages for s in
self.query(l, title, season=season, episode=episode, year=video.year, imdb_id=video.imdb_id)]
if subtitles:
return subtitles
return []
def download_subtitle(self, subtitle):
super(LegendasTVProvider, self).download_subtitle(subtitle)
subtitle.archive.content = None
def get_archives(self, title_id, language_code, title_type, season, episode):
return super(LegendasTVProvider, self).get_archives.original(self, title_id, language_code, title_type,
season, episode)
| 41.136882
| 118
| 0.574175
| 1,192
| 10,819
| 5.061242
| 0.196309
| 0.028841
| 0.014918
| 0.009945
| 0.155644
| 0.11752
| 0.109233
| 0.086856
| 0.076579
| 0.047074
| 0
| 0.002487
| 0.331084
| 10,819
| 262
| 119
| 41.293893
| 0.831146
| 0.096589
| 0
| 0.1
| 0
| 0.0125
| 0.085711
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.0625
| false
| 0.025
| 0.0375
| 0.00625
| 0.1875
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
06631addf22bfb69f24be36f23cfcd2fff2aa5f2
| 1,587
|
py
|
Python
|
Position.py
|
bubakazouba/Robinhood-for-Google-Finance
|
4e0aa8955e4bc786a8528ea500459f5937f15a96
|
[
"MIT"
] | 5
|
2017-11-24T08:13:47.000Z
|
2021-05-05T04:48:30.000Z
|
Position.py
|
bubakazouba/Robinhood-for-Google-Finance
|
4e0aa8955e4bc786a8528ea500459f5937f15a96
|
[
"MIT"
] | null | null | null |
Position.py
|
bubakazouba/Robinhood-for-Google-Finance
|
4e0aa8955e4bc786a8528ea500459f5937f15a96
|
[
"MIT"
] | null | null | null |
import re
class Position(object):
def __init__(self):
self.total_in = None
self.total_out = None
self.ticker_symbol = None
self.total_number_of_shares = None
self.remaining_number_of_shares = None
self.open_date = None
self.close_date = None
def format_date(self, date):
match = re.match("(\d{4})-(\d{2})-(\d{2})",date)
yyyy = match.group(1)
mm = match.group(2)
dd = match.group(3)
return "%s/%s/%s" % (mm, dd, yyyy)
def to_string(self):
cost_open = self.total_in / self.total_number_of_shares
if self.close_date is not None:
cost_close = self.total_out / self.total_number_of_shares
profit = (self.total_out - self.total_in)
profit_percentage = ("%+.2f" % (100 * profit / self.total_in)) + "%"
return "\t".join([self.ticker_symbol , self.format_date(self.open_date) , "B" , self.format_money(cost_open) , str(self.total_number_of_shares) , self.format_money(self.total_in) , self.format_money(cost_close) , self.format_money(self.total_out) , self.format_money_with_sign(profit), profit_percentage, self.format_date(self.close_date)])
else:
return "\t".join([self.ticker_symbol , self.format_date(self.open_date) , "B" , self.format_money(cost_open) , str(self.total_number_of_shares) , self.format_money(self.total_in) , "" , "" , "", "", ""])
def format_money(self, money):
return "$%.2f" % money
def format_money_with_sign(self, money):
return "$%+.2f" % money
| 44.083333
| 352
| 0.63264
| 224
| 1,587
| 4.1875
| 0.232143
| 0.143923
| 0.11194
| 0.090618
| 0.488273
| 0.275053
| 0.275053
| 0.275053
| 0.275053
| 0.275053
| 0
| 0.009764
| 0.225583
| 1,587
| 36
| 353
| 44.083333
| 0.753458
| 0
| 0
| 0
| 0
| 0
| 0.034005
| 0.014484
| 0
| 0
| 0
| 0
| 0
| 1
| 0.172414
| false
| 0
| 0.034483
| 0.068966
| 0.413793
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
066587c08345eadec5ce3298131ac1c2190623fb
| 15,789
|
py
|
Python
|
app_framework/main_window.py
|
planktontoolbox/plankton-toolbox
|
626930120329983fb9419a9aed94712148bac219
|
[
"MIT"
] | 5
|
2016-12-02T08:24:35.000Z
|
2021-02-24T08:41:41.000Z
|
app_framework/main_window.py
|
planktontoolbox/plankton-toolbox
|
626930120329983fb9419a9aed94712148bac219
|
[
"MIT"
] | 53
|
2016-11-14T13:11:41.000Z
|
2022-01-13T09:28:11.000Z
|
app_framework/main_window.py
|
planktontoolbox/plankton-toolbox
|
626930120329983fb9419a9aed94712148bac219
|
[
"MIT"
] | 1
|
2020-11-27T01:20:10.000Z
|
2020-11-27T01:20:10.000Z
|
#!/usr/bin/python3
# -*- coding:utf-8 -*-
# Project: http://plankton-toolbox.org
# Copyright (c) 2010-2018 SMHI, Swedish Meteorological and Hydrological Institute
# License: MIT License (see LICENSE.txt or http://opensource.org/licenses/mit).
import time
import codecs
from PyQt5 import QtWidgets
from PyQt5 import QtCore
import plankton_core
import app_framework
import app_activities
import app_tools
import toolbox_utils
class MainWindow(QtWidgets.QMainWindow):
"""
Main window for the Desktop application.
The layout is an activity area in the middle, activity-and-tool-selector to the
left and movable tools to the right and bottom. Activites are handled as stacked widgets
and tools are dockable widgets. The activity-and-tool-selector can also be dockable by
is currently locked.
Note: Camel case method names are used since the class is inherited from a Qt class.
"""
def __init__(self):
""" """
# Initialize parent.
super(MainWindow, self).__init__()
self.setWindowTitle(self.tr('Plankton Toolbox - Desktop application'))
# Version.
self._version = ''
# Note: Tools menu is public.
self.toolsmenu = None
def initialise(self):
# Load app settings.
self._ui_settings = QtCore.QSettings()
# Logging. Always log to plankton_toolbox_log.txt. Use the Log tool when
# it is available.
self._logfile = codecs.open('plankton_toolbox_log.txt', mode = 'w', encoding = 'cp1252')
self._logfile.write('Plankton Toolbox. ' +
time.strftime('%Y-%m-%d %H:%M:%S') )
self._logfile.write('')
self._logtool = None # Should be initiated later.
toolbox_utils.Logging().set_log_target(self)
# Setup main window.
self._createActions()
self._createMenu()
self._createStatusBar()
self._activity = None
self._createCentralWidget()
# Set up activities and tools.
self._toolmanager = app_tools.ToolManager()
self._toolmanager.set_parent(self)
self._toolmanager.init_tools()
#
toolbox_utils.Logging().log('Plankton Toolbox. Version: ' + self._version + '.')
# Log if user _settings.txt is used.
data_path = app_framework.ToolboxUserSettings().get_path_to_plankton_toolbox_data()
counter_path = app_framework.ToolboxUserSettings().get_path_to_plankton_toolbox_counter()
if (data_path != 'plankton_toolbox_data') or (counter_path != 'plankton_toolbox_counter'):
toolbox_utils.Logging().log('')
toolbox_utils.Logging().log('User settings in "plankton_toolbox_data/user_settings.txt": ')
toolbox_utils.Logging().log('- Path to data dictionary: ' + data_path)
toolbox_utils.Logging().log('- Path to counter dictionary: ' + counter_path)
#
self._activitymanager = app_activities.ActivityManager()
self._activitymanager.set_parent(self)
self._activitymanager.init_activities()
# Add tools to selector.
self._create_contentSelectors()
# Load last used window positions.
size = self._ui_settings.value('MainWindow/Size', QtCore.QSize(900, 600)) #.toSize()
position = self._ui_settings.value('MainWindow/Position', QtCore.QPoint(100, 80)) #.toPoint()
# Check if outside windows. New, including Windows 10.
# print("DEBUG position x: ", position.x())
# print("DEBUG position y: ", position.y())
# print("DEBUG size w: ", size.width())
# print("DEBUG size h: ", size.height())
fit_in_screen = False
screen_x = 0
screen_y = 0
screen_width = 1920
screen_height = 1020
for screen in QtWidgets.QApplication.screens():
# print("DEBUG: ", screen.name())
# print("DEBUG x: ", screen.availableGeometry().x())
# print("DEBUG y: ", screen.availableGeometry().y())
# print("DEBUG w: ", screen.availableGeometry().width())
# print("DEBUG h: ", screen.availableGeometry().height())
screen_x = screen.availableGeometry().x()
screen_y = screen.availableGeometry().y()
screen_width = screen.availableGeometry().width()
screen_height = screen.availableGeometry().height()
screen_x_max = screen_x + screen_width
screen_y_max = screen_y + screen_height
if ((position.x() + size.width()) <= (screen_x_max + 20)) and \
((position.y() + size.height()) <= (screen_y_max + 20)):
if (position.x() >= (screen_x - 20)) and (position.y() >= (screen_y - 20)):
fit_in_screen = True
break
if fit_in_screen == False:
size.setWidth(900)
size.setHeight(600)
position.setX(100)
position.setY(80)
try:
self.setGeometry(self._ui_settings.value('MainWindow/Geometry'))
self.restoreState(self._ui_settings.value('MainWindow/State'))
except:
pass # May contain None at first start on new computer.
self.resize(size)
self.move(position)
# Tell the user.
app_tools.ToolManager().show_tool_by_name('Toolbox logging') # Show the log tool if hidden.
# Load resources when the main event loop has started.
# if app_framework.ToolboxSettings().get_value('Resources:Load at startup'):
# QtCore.QTimer.singleShot(10, app_framework.ToolboxResources().loadAllResources)
QtCore.QTimer.singleShot(1000, self._loadResources)
# self._loadResources()
def closeEvent(self, event):
""" Called on application shutdown. """
# Stores current window positions.
self._ui_settings.setValue('MainWindow/Size', QtCore.QVariant(self.size()))
self._ui_settings.setValue('MainWindow/Position', QtCore.QVariant(self.pos()))
self._ui_settings.setValue('MainWindow/State', self.saveState())
self._ui_settings.setValue('MainWindow/Geometry', self.geometry())
self._logfile.close
def _createMenu(self):
"""
The main menu of the application.
Note: The Tools menu will be populated by the tool base class. Search
for 'toggleViewAction' to see the implementation.
"""
self._filemenu = self.menuBar().addMenu(self.tr('&File'))
self._filemenu.addSeparator()
self._filemenu.addAction(self._quitaction)
# self._viewmenu = self.menuBar().addMenu(self.tr('&View'))
self.toolsmenu = self.menuBar().addMenu(self.tr('&Extra tools')) # Note: Public.
self._helpmenu = self.menuBar().addMenu(self.tr('&Help'))
self._helpmenu.addAction(self._aboutaction)
# Add sub-menu in the tools menu to hide all tools.
self._hidealltools = QtWidgets.QAction(self.tr('Hide all'), self)
self._hidealltools.setStatusTip(self.tr('Makes all extra tools invisible.'))
self._hidealltools.triggered.connect(self._hideAllTools)
self.toolsmenu.addAction(self._hidealltools)
#
self.toolsmenu.addSeparator()
def _hideAllTools(self):
""" """
tools = self._toolmanager.get_tool_list()
for tool in tools:
tool.close()
def _createStatusBar(self):
"""
The status bar is located at the bottom of the main window. Tools can
write messages here by calling <i>_writeToStatusBar</i> located in the
tool base class.
"""
self.statusBar().showMessage(self.tr('Plankton Toolbox.'))
def _create_contentSelectors(self):
"""
The user should be able to choose one activity and a number of tools.
"""
# Dock widgets can be tabbed with vertical tabs.
self.setDockOptions(QtWidgets.QMainWindow.AnimatedDocks |
QtWidgets.QMainWindow.AllowTabbedDocks |
QtWidgets.QMainWindow.VerticalTabs)
# Create left dock widget and dock to main window.
# dock = QtWidgets.QDockWidget(self.tr(' Tool selector '), self)
dock = QtWidgets.QDockWidget(self.tr(' Activities: '), self)
dock.setObjectName('Activities and tools selector')
dock.setAllowedAreas(QtCore.Qt.LeftDockWidgetArea)
dock.setFeatures(QtWidgets.QDockWidget.NoDockWidgetFeatures)
# dock.setFeatures(QtWidgets.QDockWidget.DockWidgetFloatable |
# QtWidgets.QDockWidget.DockWidgetMovable)
self.addDockWidget(QtCore.Qt.LeftDockWidgetArea, dock)
# Widget to create space and layout for two groupboxes.
content = QtWidgets.QWidget()
widget = QtWidgets.QWidget()
widget.setStyleSheet("""
QDockWidget .QWidget { background-color: white; }
""")
dock.setWidget(widget)
# Add scroll.
mainscroll = QtWidgets.QScrollArea()
### mainscroll.setFrameShape(QtWidgets.QFrame.NoFrame)
mainscroll.setWidget(content)
mainscroll.setWidgetResizable(True)
mainlayout = QtWidgets.QVBoxLayout()
mainlayout.setContentsMargins(0, 0, 0, 0)
mainlayout.setSpacing(0)
mainlayout.addWidget(mainscroll)
self.test_mainscroll = mainscroll
widget.setLayout(mainlayout)
grid1 = QtWidgets.QVBoxLayout()
content.setLayout(grid1)
# Frame for activites.
activitiesgroup = QtWidgets.QFrame()
grid1.addWidget(activitiesgroup)
activitiesvbox = QtWidgets.QVBoxLayout()
activitiesgroup.setLayout(activitiesvbox)
# Groupbox for tools.
toolsgroup = QtWidgets.QGroupBox('Extra tools:')
grid1.addWidget(toolsgroup)
toolsvbox = QtWidgets.QVBoxLayout()
toolsgroup.setLayout(toolsvbox)
grid1.addStretch(5)
# Add one button for each activity. Create stacked widgets.
for activity in self._activitymanager.get_activity_list():
button = app_framework.ActivityMenuQLabel(' ' + activity.objectName())
activity.set_main_menu_button(button)
activitiesvbox.addWidget(button) # Adds to stack.
# The activity is called to select stack item by object, not index.
button.activity_menu_label_clicked.connect(button.markAsSelected)
button.activity_menu_label_clicked.connect(activity.show_in_main_window)
# Create one layer in the stacked activity widget.
self._activitystack.addWidget(activity)
#
activitiesvbox.addStretch(5)
# Add one button for each tool.
for tool in self._toolmanager.get_tool_list():
button = app_framework.ClickableQLabel(' ' + tool.objectName())
button_hide = app_framework.ClickableQLabel(' (hide)')
showhidehbox = QtWidgets.QHBoxLayout()
showhidehbox.addWidget(button)
showhidehbox.addWidget(button_hide)
showhidehbox.addStretch(10)
toolsvbox.addLayout(showhidehbox)
button.label_clicked.connect(tool.show_tool)
button_hide.label_clicked.connect(tool.hide_tool)
#
# Button to hide all tools.
button = app_framework.ClickableQLabel(' (Hide all)')
toolsvbox.addWidget(button)
button.label_clicked.connect(self._hideAllTools)
#
toolsvbox.addStretch(10)
# Activate startup activity. Select the first one in list.
activities = self._activitymanager.get_activity_list()
if len(activities) > 0:
activities[0].show_in_main_window()
# DEBUG: During development...
### activities[1].show_in_main_window()
def showActivity(self, activity):
""" """
### self._activityheader.setText('<b>' + activity.objectName() + '</b>')
self._activitystack.setCurrentWidget(activity)
# Mark left menu item as active.
if activity.get_main_menu_button():
activity.get_main_menu_button().markAsSelected()
def show_activity_by_name(self, activity_name):
""" """
for activity in self._activitymanager.get_activity_list():
if activity.objectName() == activity_name:
self.showActivity(activity)
return
def _createCentralWidget(self):
"""
The central widget contains the selected activity. It is implemented as
stacked layout, QStackedLayout, where the pages are selected from
the activities group box.
"""
### self._activityheader = QtWidgets.QLabel('<b>Activity not selected...</b>", self)
### self._activityheader.setAlignment(QtCore.Qt.AlignHCenter)
self._activitystack = QtWidgets.QStackedLayout()
# Layout widgets.
widget = QtWidgets.QWidget(self)
layout = QtWidgets.QVBoxLayout()
widget.setLayout(layout)
self.setCentralWidget(widget)
### layout.addWidget(self._activityheader)
layout.addLayout(self._activitystack)
# Dummy stack content.
dummy = QtWidgets.QWidget(self)
self._activitystack.addWidget(dummy)
def _createActions(self):
""" Common application related actions. """
self._quitaction = QtWidgets.QAction(self.tr('&Quit'), self)
self._quitaction.setShortcut(self.tr('Ctrl+Q'))
self._quitaction.setStatusTip(self.tr('Quit the application'))
self._quitaction.triggered.connect(self.close)
#
self._aboutaction = QtWidgets.QAction(self.tr('&About'), self)
self._aboutaction.setStatusTip(self.tr('Show the application\'s About box'))
self._aboutaction.triggered.connect(self._about)
def write_to_log(self, message):
""" Log to file and to the log tool when available. """
# self.console.addItem(message)
try:
self._logfile.write(message + '\r\n')
self._logfile.flush()
# Search for the console tool. Note: Not available during startup.
if not self._logtool:
for tool in self._toolmanager.get_tool_list():
if type(tool) == app_tools.LogTool:
self._logtool = tool
# Log message.
if self._logtool: self._logtool.write_to_log(message)
#
except Exception as e:
print('Exception (write_to_log):', str(e))
def _loadResources(self):
""" """
try:
# Load resources here.
self.statusBar().showMessage(self.tr('Loading species lists...'))
plankton_core.Species()
finally:
self.statusBar().showMessage(self.tr(''))
def setVersion(self, version):
""" """
self._version = version
def _about(self):
""" """
about_text = app_framework.HelpTexts().get_text('about')
about_text = about_text.replace('###version###',
' Version: ' + self._version)
QtWidgets.QMessageBox.about(self, self.tr('About'), self.tr(about_text))
| 44.601695
| 104
| 0.611565
| 1,615
| 15,789
| 5.821053
| 0.251393
| 0.012126
| 0.013403
| 0.011701
| 0.122115
| 0.05425
| 0.036379
| 0.029997
| 0.012552
| 0
| 0
| 0.007356
| 0.285389
| 15,789
| 353
| 105
| 44.728045
| 0.825844
| 0.251821
| 0
| 0.033333
| 0
| 0
| 0.074397
| 0.009986
| 0
| 0
| 0
| 0
| 0
| 1
| 0.071429
| false
| 0.004762
| 0.042857
| 0
| 0.12381
| 0.004762
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
06669e5cbe5823ce5ec6dea9345b3539ee4591b9
| 1,443
|
py
|
Python
|
two_buckets_and_a_lambda/terraform/lambdas/credentials-lambda.py
|
chariotsolutions/aws-examples
|
0c0945966f3e1b118ba5db948d5db3e304bc2ac3
|
[
"MIT"
] | 6
|
2020-05-20T13:58:35.000Z
|
2022-02-04T13:25:05.000Z
|
two_buckets_and_a_lambda/terraform/lambdas/credentials-lambda.py
|
chariotsolutions/aws-examples
|
0c0945966f3e1b118ba5db948d5db3e304bc2ac3
|
[
"MIT"
] | 1
|
2021-09-02T21:19:10.000Z
|
2021-09-02T21:19:10.000Z
|
two_buckets_and_a_lambda/terraform/lambdas/credentials-lambda.py
|
chariotsolutions/aws-examples
|
0c0945966f3e1b118ba5db948d5db3e304bc2ac3
|
[
"MIT"
] | 3
|
2019-11-14T21:03:15.000Z
|
2022-01-17T19:12:02.000Z
|
import boto3
import json
import logging
import os
bucket = os.environ['UPLOAD_BUCKET']
role_arn = os.environ['ASSUMED_ROLE_ARN']
sts_client = boto3.client('sts')
logger = logging.getLogger(__name__)
logger.setLevel(logging.DEBUG)
def lambda_handler(event, context):
body = json.loads(event['body'])
key = body['key']
session_name = f"{context.aws_request_id}"
session_policy = {
'Version': '2012-10-17',
'Statement': [
{
'Effect': 'Allow',
'Action': 's3:PutObject',
'Resource': f"arn:aws:s3:::{bucket}/{key}"
}
]
}
logger.info(f"generating restricted credentials for: s3://{bucket}/{key} for session {session_name}")
logger.info(f"role_arn is {role_arn}")
response = sts_client.assume_role(
RoleArn=role_arn,
RoleSessionName=session_name,
Policy=json.dumps(session_policy)
)
creds = response['Credentials']
return {
'statusCode': 200,
'headers': {
'Content-Type': 'application/json'
},
'body': json.dumps({
'access_key': creds['AccessKeyId'],
'secret_key': creds['SecretAccessKey'],
'session_token': creds['SessionToken'],
'region': os.environ['AWS_REGION'],
'bucket': bucket
})
}
| 27.226415
| 105
| 0.546778
| 143
| 1,443
| 5.335664
| 0.482517
| 0.045872
| 0.028834
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.016211
| 0.316008
| 1,443
| 52
| 106
| 27.75
| 0.756839
| 0
| 0
| 0
| 0
| 0
| 0.286209
| 0.035343
| 0
| 0
| 0
| 0
| 0
| 1
| 0.022727
| false
| 0
| 0.090909
| 0
| 0.136364
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
0672220769ef18bb8f7d78e648bf612a87c0cd49
| 253
|
py
|
Python
|
setup.py
|
SodakDoubleD/dbprime
|
76d2824adbe0f10d6ad04a5607a07f36874389c7
|
[
"MIT"
] | null | null | null |
setup.py
|
SodakDoubleD/dbprime
|
76d2824adbe0f10d6ad04a5607a07f36874389c7
|
[
"MIT"
] | null | null | null |
setup.py
|
SodakDoubleD/dbprime
|
76d2824adbe0f10d6ad04a5607a07f36874389c7
|
[
"MIT"
] | null | null | null |
from distutils.core import setup
with open("README.md", "r") as fh:
long_description = fh.read()
setup(
name="dbprime",
version="0.1dev",
author="Dalton Dirkson",
author_email="sodakdoubled@gmail.com",
packages=["dbprime",],
)
| 19.461538
| 42
| 0.652174
| 32
| 253
| 5.09375
| 0.875
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.009662
| 0.181818
| 253
| 12
| 43
| 21.083333
| 0.777778
| 0
| 0
| 0
| 0
| 0
| 0.26087
| 0.086957
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.1
| 0
| 0.1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
067270cf798fc12d58fd8f1dd276c3807b8272a4
| 4,102
|
py
|
Python
|
tfsnippet/utils/misc.py
|
Feng37/tfsnippet
|
70c7dc5c8c8f6314f9d9e44697f90068417db5cd
|
[
"MIT"
] | null | null | null |
tfsnippet/utils/misc.py
|
Feng37/tfsnippet
|
70c7dc5c8c8f6314f9d9e44697f90068417db5cd
|
[
"MIT"
] | null | null | null |
tfsnippet/utils/misc.py
|
Feng37/tfsnippet
|
70c7dc5c8c8f6314f9d9e44697f90068417db5cd
|
[
"MIT"
] | null | null | null |
import os
import re
from contextlib import contextmanager
import numpy as np
import six
__all__ = ['humanize_duration', 'camel_to_underscore', 'NOT_SET',
'cached_property', 'clear_cached_property', 'maybe_close',
'iter_files']
def humanize_duration(seconds):
"""
Format specified time duration as human readable text.
Args:
seconds: Number of seconds of the time duration.
Returns:
str: The formatted time duration.
"""
if seconds < 0:
seconds = -seconds
suffix = ' ago'
else:
suffix = ''
pieces = []
for uvalue, uname in [(86400, 'day'),
(3600, 'hr'),
(60, 'min')]:
if seconds >= uvalue:
val = int(seconds // uvalue)
if val > 0:
if val > 1:
uname += 's'
pieces.append('{:d} {}'.format(val, uname))
seconds %= uvalue
if seconds > np.finfo(np.float64).eps:
pieces.append('{:.4g} sec{}'.format(
seconds, 's' if seconds > 1 else ''))
elif not pieces:
pieces.append('0 sec')
return ' '.join(pieces) + suffix
def camel_to_underscore(name):
"""Convert a camel-case name to underscore."""
s1 = re.sub(CAMEL_TO_UNDERSCORE_S1, r'\1_\2', name)
return re.sub(CAMEL_TO_UNDERSCORE_S2, r'\1_\2', s1).lower()
CAMEL_TO_UNDERSCORE_S1 = re.compile('([^_])([A-Z][a-z]+)')
CAMEL_TO_UNDERSCORE_S2 = re.compile('([a-z0-9])([A-Z])')
class NotSet(object):
"""Object for denoting ``not set`` value."""
def __repr__(self):
return 'NOT_SET'
NOT_SET = NotSet()
def cached_property(cache_key):
"""
Decorator to cache the return value of an instance property.
.. code-block:: python
class MyClass(object):
@cached_property('_cached_property'):
def cached_property(self):
return ...
# usage
o = MyClass()
print(o.cached_property) # fetch the cached value
Args:
cache_key (str): Attribute name to store the cached value.
"""
def wrapper(method):
@property
@six.wraps(method)
def inner(self, *args, **kwargs):
if not hasattr(self, cache_key):
setattr(self, cache_key, method(self, *args, **kwargs))
return getattr(self, cache_key)
return inner
return wrapper
def clear_cached_property(instance, cache_key):
"""
Clear the cached values of specified property.
Args:
instance: The owner instance of the cached property.
cache_key (str): Attribute name to store the cached value.
"""
if hasattr(instance, cache_key):
delattr(instance, cache_key)
@contextmanager
def maybe_close(obj):
"""
Enter a context, and if `obj` has ``.close()`` method, close it
when exiting the context.
Args:
obj: The object maybe to close.
Yields:
The specified `obj`.
"""
try:
yield obj
finally:
if hasattr(obj, 'close'):
obj.close()
def iter_files(root_dir, sep='/'):
"""
Iterate through all files in `root_dir`, returning the relative paths
of each file. The sub-directories will not be yielded.
Args:
root_dir (str): The root directory, from which to iterate.
sep (str): The separator for the relative paths.
Yields:
str: The relative paths of each file.
"""
def f(parent_path, parent_name):
for f_name in os.listdir(parent_path):
f_child_path = parent_path + os.sep + f_name
f_child_name = parent_name + sep + f_name
if os.path.isdir(f_child_path):
for s in f(f_child_path, f_child_name):
yield s
else:
yield f_child_name
for name in os.listdir(root_dir):
child_path = root_dir + os.sep + name
if os.path.isdir(child_path):
for x in f(child_path, name):
yield x
else:
yield name
| 25.962025
| 73
| 0.573623
| 515
| 4,102
| 4.405825
| 0.297087
| 0.055531
| 0.044954
| 0.014103
| 0.096959
| 0.062583
| 0.039665
| 0.039665
| 0.039665
| 0.039665
| 0
| 0.01104
| 0.315456
| 4,102
| 157
| 74
| 26.127389
| 0.797009
| 0.305948
| 0
| 0.038961
| 0
| 0
| 0.074745
| 0.007928
| 0
| 0
| 0
| 0
| 0
| 1
| 0.12987
| false
| 0
| 0.064935
| 0.012987
| 0.285714
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
0673b5944cf3b730042b94eae2844b3646f79c99
| 54,598
|
py
|
Python
|
spaic/Backend/Backend.py
|
ZhejianglabNCRC/SPAIC
|
5a08328adcc5a197316cf353746bae7ab6865337
|
[
"Apache-2.0"
] | 3
|
2022-03-01T03:04:25.000Z
|
2022-03-01T03:07:15.000Z
|
spaic/Backend/Backend.py
|
ZhejianglabNCRC/SPAIC
|
5a08328adcc5a197316cf353746bae7ab6865337
|
[
"Apache-2.0"
] | null | null | null |
spaic/Backend/Backend.py
|
ZhejianglabNCRC/SPAIC
|
5a08328adcc5a197316cf353746bae7ab6865337
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
"""
Created on 2020/8/6
@project: SPAIC
@filename: Backend
@author: Hong Chaofei
@contact: hongchf@gmail.com
@description:
定义网络仿真使用的backend,如 Pytorch, Tensorflow, CUDA, 达尔文芯片等,以及相应的微分方程求解方法比如 Euler, 2阶 Runge-Kutta等
"""
from abc import abstractmethod, ABC
from collections import OrderedDict
from ..Network.BaseModule import BaseModule, VariableAgent
from ..Network.DelayQueue import DelayQueue
import numpy as np
import torch
backends = dict()
class Backend(BaseModule, ABC):
'''
Basic backend class. All specified backend backend should subclass it.
The backend is a parameter for the build function and becomes an attribute of all objects defined
in the frontend backend network in building process. These objects build their initial data
and specified operations into the attributes of backend, according to _variables
and _operations respectively. The data will update in each step according the computation graph.
Args:
dt (float, optional): the length of a backend timestep, in millisecond.
Attributes:
device (str): the desired device of returned tensor. Its value can be 'cpu' or 'cuda'. If None, uses
the current device for the default tensor type.
builded (bool): whether the object defined in the frontend backend network has been builded.
time (float): current backend time, in millisecond.
n_time_step (int): the num of current time step.
_variables (OrderedDict): records all variables from the build function of frontend objects.
_parameters_dict (OrderedDict): records the variables to be trained.
_InitVariables_dict (OrderedDict): reserves a copy of the initialization variables for initialization.
_graph_var_dicts (dict): has following format: {'variables_dict': self._variables, 'temp_dict': dict(), 'update_dict': dict(), 'reduce_dict': dict()},
recording the intermediate value of variables in computation progress.
basic_operate (dict): dictionary of basic operators, mapping from operator names using in frontend to
the funtion objects implemented in backend.
_operations (list): records all basic operations from the build function of frontend objects, each of
which has following format: [ret_var_name: str, operation_name, input_var_name1: str, input_var_name2 :str, ...].
_graph_operations (list): redefine each basic operation, that is, add the corresponding keyword in the _graph_var_dicts to each variable,
which has following format: [(dict_type, ret_var_name), operation_name, [(dict_type1, input_var_name1),(dict_type2, input_var_name2),...]].
_standalone_operations (list): records all standalone operations from the build function of frontend objects,
each of which has following format: (ret_var_name: str, function, input_var_names: list).
_initial_operations (list): records all initial operations from the build function of frontend objects, each of
which has following format: (ret_var_name: str, function, input_var_names: list).
_monitors (list): records all monitors defined in fronted network through build function of Monitor object.
Methods:
build_graph: build a computation graph before performing the calculation.
graph_update_step: update value of _graph_var_dicts.
initial_step: initialize network variables.
update_step: update the return variables of standalone operations and basic operations and current backend time.
r_update_step: update the return variables of basic operations without using graph_update_step().
add_variable: add variables from front objects to _variables of Backend.
add_backend_variable: add variables according to the specified backend.
add_operation: add basic operations from front objects to _operations of Backend.
register_standalone: add standalone operations from front objects to _standalone_operations of Backend.
register_initial: add initial operations from front objects to _initial_operations of Backend.
'''
basic_operate = dict()
param_init_operate = dict() # -> param_init_operate
backend_name = 'None'
def __init__(self, dt=0.1):
super(Backend, self).__init__()
self.device = None
self.runtime = None
self.builded = False
self.dt = dt # the length of a backend timestep
self.time = 0.0 # current backend time
self.n_time_step = 0 # the num of current time step
self._batch_size = 1
self._variables = dict() # build from orderedDict to Tuple
self._parameters_dict = dict()
self._clamp_parameter_dict = dict()
self._delay_dict = dict() # store conduction delays
self._SparseVariables_dict = dict()
self._InitVariables_dict = dict()
self._operations = list()
self._standalone_operations = list()
self._initial_operations = list()
self._monitors = list() # TODO: need to add to update
self._stored_states = dict() # TODO: store network self._variables in the dict
self.basic_operate['threshold'] = self.threshold
self.basic_operate['var_linear'] = self.var_linear
self.basic_operate['mat_linear'] = self.mat_linear
self.basic_operate['mat_mult_weight'] = self.mat_mult_weight
self.basic_operate['mat_mult_pre'] = self.mat_mult_pre
self.basic_operate['mat_mult'] = self.mat_mult
self.basic_operate['bmm'] = self.bmm
self.basic_operate['ger'] = self.ger
self.basic_operate['sparse_mat_mult_weight'] = self.sparse_mat_mult_weight
self.basic_operate['var_mult'] = self.var_mult
self.basic_operate['add'] = self.add
self.basic_operate['minus'] = self.minus
self.basic_operate['div'] = self.div
self.basic_operate['cat'] = self.cat
self.basic_operate['stack'] = self.stack
self.basic_operate['permute'] = self.permute
self.basic_operate['view'] = self.view
self.basic_operate['equal'] = self.equal
self.basic_operate['reduce_sum'] = self.reduce_sum
self.basic_operate['conv_2d'] = self.conv_2d
self.basic_operate['relu'] = self.relu
self.basic_operate['sin'] = self.sin
self.basic_operate['cos'] = self.cos
self.basic_operate['tan'] = self.tan
self.basic_operate['log'] = self.log
self.basic_operate['log2'] = self.log2
self.basic_operate['log10'] = self.log10
self.basic_operate['conv_max_pool2d'] = self.conv_max_pool2d
self.basic_operate['reshape_mat_mult'] = self.reshape_mat_mult
self.basic_operate['exp'] = self.exp
self.basic_operate['mult_sum_weight'] = self.mult_sum_weight
self.basic_operate['im2col_indices'] = self.im2col_indices
self.basic_operate['conv2d_flatten'] = self.conv2d_flatten
self.basic_operate['feature_map_flatten'] = self.feature_map_flatten
self.param_init_operate['uniform'] = self.uniform
self.param_init_operate['normal'] = self.normal
self.param_init_operate['xavier_uniform'] = self.xavier_uniform
self.param_init_operate['xavier_noraml'] = self.xavier_normal
self.param_init_operate['kaiming_uniform'] = self.kaiming_uniform
self.param_init_operate['kaiming_normal'] = self.kaiming_normal
self.param_init_operate['zero_init'] = self.zero_init
# self._graph_var_dicts = {'variables_dict': self._variables, 'temp_dict': dict(), 'update_dict': dict(),
# 'reduce_dict': dict()}
self._graph_operations = list()
self._push_operations = list()
self._fetch_operations = list()
def set_batch_size(self, batch_size):
self._batch_size = batch_size
def get_batch_size(self):
return self._batch_size
def set_runtime(self, runtime):
self.runtime = runtime
def build_graph(self):
'''
Build a computation graph before performing the calculation.
Note that only the basic operations are redefiend into the _graph_operations list. The format of _graph_operations is as follows:
[(dict_type, ret_var_name), operation_name, [(dict_type1, input_var_name1),(dict_type2, input_var_name2),...]].
Traverse all basic operations and add the corresponding keyword in the _graph_var_dicts as dict_type to each variable in basic operation.
'''
variables_index = {k: i for i, k in enumerate(self._variables.keys())}
self.initial_step()
operation_type = 'update_dict or temp_dict or reduce_dict'
# traverse basic operations
fetch_operations = []
push_operations = []
graph_operations = []
for op in self._operations:
if len(op[0]) == 0 and len(op[2]) == 0:
# functions with no input and output will not push into the computation graph
raise ValueError(" Operation lacks both input and output can't be build")
elif len(op[0]) == 0:
fetch_operations.append(op)
elif len(op[2]) == 0:
push_operations.append(op)
else:
graph_operations.append(op)
################################
## for push_operation build ##
################################
update_dict = dict()
reduce_dict = dict()
for ind, op in enumerate(push_operations):
outputs = []
label_outputs = []
# if the operation return one variable, then it is appended into a list, to accordant with multi-variable returns
if len(op[0]) == 1:
outputs.append(op[1]())
else:
outputs = op[1]()
for ind, var_name in enumerate(op[0]):
if var_name in self._variables:
# when the same ret_var_name occurs more than once, op[0] is added to the reduce_dict of _graph_var_dicts
if var_name in update_dict:
reduce_dict[var_name] = [update_dict[var_name], outputs[ind]]
label_outputs.append(('reduce_dict', var_name))
# # add op[0] into graph: reduce_dict
self._graph_var_dicts['reduce_dict'][op[0]] = []
# revise the first reduce operation
for gop in self._push_operations:
tmp_label_outputs = gop[0]
for tmp_ind, tmp_label in enumerate(tmp_label_outputs):
if tmp_label[1] == var_name:
tmp_label_outputs[tmp_ind] = ('reduce_dict', var_name)
break
del update_dict[var_name]
elif var_name in reduce_dict:
reduce_dict[var_name].append(outputs[ind])
label_outputs.append(('reduce_dict', var_name))
else:
# In the push_operation, new data is directly pushed to update_dict, as
# there is no need to remain the last step variable value
update_dict[var_name] = outputs[ind]
label_outputs.append(('update_dict', var_name))
else:
raise ValueError("No state variable to get the input ")
# add the operation to built graph
self._push_operations.append([label_outputs, op[1], []])
# for var_name in reduce_dict:
# # add the reduce_sum operation into the graph
# self._graph_operations.append(
# [[('update_dict', var_name)], self.reduce_sum_update, [('reduce_dict', var_name)]])
#################################
## for graph_operation build ##
#################################
temp_dict = dict()
# update_dict = dict()
# reduce_dict = dict()
temp_reduce_sum_ops = []
for ind, op in enumerate(graph_operations):
inputs = []
label_inputs = []
for var_name in op[2]:
# try:
# var_name in self._variables
# except:
# a = 1
if '[updated]' in var_name:
var_name = var_name.replace("[updated]", "")
if var_name in update_dict:
inputs.append(update_dict[var_name])
label_inputs.append(('update_dict', var_name))
# elif var_name in reduce_dict:
# # if the reduce_dict[var_name] is frozen: do reduce_sum operation before this op, and put the value to update_dict
# value = self.reduce_sum(self.stack(reduce_dict[var_name]))
# inputs.append(value)
# label_inputs.append(('update_dict', var_name))
# temp_reduce_sum_ops.append((var_name, len(reduce_dict[var_name])))
# # add the reduce_sum operation into the graph
# self._graph_operations.append(
# [[('update_dict', var_name)], self.reduce_sum_update, [('reduce_dict', var_name)]])
elif var_name in self._variables:
inputs.append(self._variables[var_name])
label_inputs.append(('variables_dict', var_name))
else:
raise ValueError(" No State Variable [%s] in the update_dict" % var_name)
elif var_name in self._variables:
inputs.append(self._variables[var_name])
label_inputs.append(('variables_dict', var_name))
elif var_name in temp_dict:
inputs.append(temp_dict[var_name])
label_inputs.append(('temp_dict', var_name))
else:
raise ValueError(" No State Variable [%s] in the variable dict" % var_name)
outputs = []
label_outputs = []
if len(op[0]) == 0:
self.var_check(op[1], inputs)
op[1](*inputs)
else:
self.var_check(op[1], inputs)
if len(op[0]) == 1:
outputs.append(op[1](*inputs))
else:
outputs = op[1](*inputs)
for ind, var_name in enumerate(op[0]):
if var_name in self._variables:
# when the same ret_var_name occurs more than once, op[0] is added to the reduce_dict of _graph_var_dicts
if var_name in update_dict:
reduce_dict[var_name] = [update_dict[var_name], outputs[ind]]
label_outputs.append(('reduce_dict', var_name))
# # add op[0] into graph: reduce_dict
# self._graph_var_dicts['reduce_dict'][op[0]] = []
# revise the first reduce operation
InGop = True
for pop in self._push_operations:
tmp_label_outputs = pop[0]
for tmp_ind, tmp_label in enumerate(tmp_label_outputs):
if tmp_label[1] == var_name:
tmp_label_outputs[tmp_ind] = ('reduce_dict', var_name)
InGop = False
break
if InGop:
for gop in self._graph_operations:
tmp_label_outputs = gop[0]
for tmp_ind, tmp_label in enumerate(tmp_label_outputs):
if tmp_label[1] == var_name:
tmp_label_outputs[tmp_ind] = ('reduce_dict', var_name)
break
del update_dict[var_name]
elif var_name in reduce_dict:
reduce_dict[var_name].append(outputs[ind])
label_outputs.append(('reduce_dict', var_name))
else:
update_dict[var_name] = outputs[ind]
label_outputs.append(('update_dict', var_name))
else:
temp_dict[var_name] = outputs[ind]
label_outputs.append(('temp_dict', var_name))
# add the operation to built graph
self._graph_operations.append([label_outputs, op[1], label_inputs])
for reduce_op in temp_reduce_sum_ops:
reduce_len = len(reduce_dict[reduce_op[0]])
if reduce_len != reduce_op[1]:
raise ValueError(
"Can't use [updated] tag for variable: %s, as it is a reduce_dict variable which is have updating conflict" %
reduce_op[0])
else:
del reduce_dict[reduce_op[0]]
# for reduced variables that not used within [update]
for var_name in reduce_dict:
# add the reduce_sum operation into the graph
self._graph_operations.append(
[[('update_dict', var_name)], self.reduce_sum_update, [('reduce_dict', var_name)]])
#################################
## for fetch_operation build ##
#################################
for ind, op in enumerate(fetch_operations):
inputs = []
label_inputs = []
for var_name in op[2]:
if '[updated]' in var_name:
# there is no need to have updated tag, as all variables computed in graph_operation have benn updated
var_name = var_name.replace("[updated]", "")
if var_name in self._variables:
inputs.append(self._variables[var_name])
label_inputs.append(('variables_dict', var_name))
# elif var_name in temp_dict:
# inputs.append(temp_dict[var_name])
# label_inputs.append(('temp_dict', var_name))
else:
raise ValueError(" No State Variable [%s] in the update_dict" % var_name)
self.var_check(op[1], inputs)
op[1](*inputs)
# add the operation to built graph
self._fetch_operations.append([[], op[1], label_inputs])
# self._variables.update(update_dict)
for ii in range(len(self._graph_operations)):
self._graph_operations[ii] = tuple(self._graph_operations[ii])
self._graph_operations = tuple(self._graph_operations)
def var_check(self, op, *args):
'''
For specified operation, check the type or the shape of input variables.
'''
if op == 'mat_mult':
if args[0][0].shape[1] != args[0][1].shape[0]:
raise ValueError("%s and %s do not match" % (args[0].shape, args[1].shape))
pass
def graph_update_step_r(self):
for op in self._graph_operations:
inputs = []
for var in op[2]:
inputs.append(self._graph_var_dicts[var[0]][var[1]])
if op[0][0] is None:
op[1](*inputs)
elif op[0][0] == 'reduce_dict':
self._graph_var_dicts['reduce_dict'][op[0][1]].append(op[1](*inputs))
else:
self._graph_var_dicts[op[0][0]][op[0][1]] = op[1](*inputs)
# if '[updated]' in op[0][1]:
# op_name = op[0][1].strip('[updated]')
# if op_name in self._graph_var_dicts['update_dict'] and op_name in self._graph_var_dicts['variables_dict']:
# self._graph_var_dicts['update_dict'][op_name] = self._graph_var_dicts['temp_dict'][op[0][1]] # 更新返回名中带[updated]的变量的值
return # tuple(self._graph_var_dicts['variables_dict'].values())
def graph_update_step(self, variables, update_dict, reduce_dict):
temp_dict = dict()
# update_dict = dict()
# reduce_dict = dict()
for op in self._graph_operations:
# for inputs
inputs = []
for var in op[2]:
if var[0] == 'variables_dict':
inputs.append(variables[var[1]])
elif var[0] == 'temp_dict':
inputs.append(temp_dict[var[1]])
elif var[0] == 'update_dict':
inputs.append(update_dict[var[1]])
elif var[0] == 'reduce_dict':
inputs.append(reduce_dict[var[1]])
# compute the operation
result = op[1](*inputs)
if len(op[0]) == 1: result = [result]
# assign the result variables
for ind, var in enumerate(op[0]):
if var[0] == 'temp_dict':
temp_dict[var[1]] = result[ind]
elif var[0] == 'update_dict':
update_dict[var[1]] = result[ind]
elif var[0] == 'reduce_dict':
if var[1] in reduce_dict:
reduce_dict[var[1]].append(result[ind])
else:
reduce_dict[var[1]] = [result[ind]]
return update_dict
def push_update_step(self):
reduce_dict = dict()
update_dict = dict()
for op in self._push_operations:
result = op[1]()
if len(op[0]) == 1: result = [result]
for ind, var in enumerate(op[0]):
if var[0] == 'update_dict':
update_dict[var[1]] = result[ind]
elif var[1] in reduce_dict:
reduce_dict[var[1]].append(result[ind])
else:
reduce_dict[var[1]] = [result[ind]]
return update_dict, reduce_dict
def fetch_update_step(self):
for op in self._fetch_operations:
# for inputs
inputs = []
for var in op[2]:
inputs.append(self._variables[var[1]])
op[1](*inputs)
def initial_step(self):
'''
Initialize network variables.
'''
# Initialize the current backend time and the num of time step
self.last_time = 0.0
self.time = 0.0 # current backend time
self.n_time_step = 0
for key, value in self._variables.items():
if '[stay]' in key:
self._InitVariables_dict[key] = self._variables[key]
# Initialize untrainable variables
self._variables.clear()
for key, value in self._InitVariables_dict.items():
self._variables[key] = value
# Initialize the trainable parameters
for key, clamp_code in self._clamp_parameter_dict.items():
clamp_code[0](*clamp_code[1])
for key, value in self._parameters_dict.items():
self._variables[key] = value
for key, value in self._SparseVariables_dict.items():
index_name = key + '_sparse_index'
value_name = key + '_sparse_value'
shape_name = key + '_sparse_shape'
if index_name in self._variables.keys() and value_name in self._variables.keys():
if self.backend_name == 'pytorch':
self._variables[key] = torch.sparse.FloatTensor(self._variables[index_name],
self._variables[value_name],
self._variables[shape_name])
# Initialize the record of Monitor
for monitor in self._monitors:
monitor.init_record()
# Traverse initial operations
for op in self._initial_operations:
inputs = []
for var_name in op[2]:
if var_name in self._variables:
inputs.append(self._variables[var_name])
else:
raise ValueError(" No State Variable [%s] in the variable dict" % var_name)
if op[0] is None:
op[1](*inputs)
else:
self._variables[op[0]] = op[1](*inputs)
# Change intial variable's batch_size
for key in self._variables.keys():
if hasattr(self._variables[key], 'shape'):
shape = self._variables[key].shape
if self._variables[key].ndim > 1 and shape[0] == 1 and (key not in self._parameters_dict):
expand_shape = -np.ones_like(shape, dtype=int)
expand_shape[0] = self._batch_size
self._variables[key] = self._variables[key].expand(tuple(expand_shape))
# if '{O}' in key:
# o_shape = self._variables[key].shape
#
# shape = []
# for s in o_shape:
# if s != 1:
# shape.append(s)
# else:
# shape.append(self._batch_size)
# self._variables[key] = torch.zeros(shape, dtype=torch.float32, device=self.device)
def initial_continue_step(self):
'''
Initialize network for continuous run.
'''
self.last_time = self.time
def update_step(self):
'''
Update the return variables of standalone operations and basic operations and current backend time.
Returns:
tuple(self._variables.values())
'''
# push input data
update_dict, reduce_dict = self.push_update_step()
# static graph compuation
update_dict = self.graph_update_step(self._variables, update_dict, reduce_dict)
# Update time and state variables
self.n_time_step += 1
self.time = round(self.n_time_step * self.dt, 2)
self._variables.update(update_dict)
# fetch output data
self.fetch_update_step()
# Record Variables
for monitor in self._monitors:
monitor.update_step(self._variables)
return tuple(self._variables.values())
def update_time_steps(self):
while (self.runtime > self.time - self.last_time):
self.update_step()
def r_update_step(self):
'''
Update the return variables of basic operations without using graph_update_step().
Returns:
tuple(self._variables.values())
'''
reduce_dict = dict()
self._graph_var_dicts['update_dict'].clear()
self._graph_var_dicts['temp_dict'].clear()
self._graph_var_dicts['reduce_dict'].clear()
# Traverse standalone operations
for op in self._standalone_operations:
inputs = []
for var_name in op[2]:
if 'pytorch' in backends:
inputs.append(self._variables[var_name])
else:
inputs.append(self.to_numpy(self._variables[var_name]))
if op[0] is None:
op[1](*inputs)
else:
if 'pytorch' in backends:
self._variables[op[0]] = op[1](*inputs)
else:
self._variables[op[0]] = self.to_tensor(op[1](*inputs))
# update one time_step
for op in self._operations:
if op[0] in self._graph_var_dicts['variables_dict']:
inputs = []
for var_name in op[2:]:
if '[updated]' in var_name:
var_name = var_name.replace("[updated]", "")
if var_name in self._graph_var_dicts['update_dict']:
inputs.append(self._graph_var_dicts['update_dict'][var_name])
else:
raise ValueError(" No State Variable [%s] in the update_dict" % var_name)
elif var_name in self._graph_var_dicts['variables_dict']:
inputs.append(self._graph_var_dicts['variables_dict'][var_name])
elif var_name in self._graph_var_dicts['temp_dict']:
inputs.append(self._graph_var_dicts['temp_dict'][var_name])
else:
raise ValueError(" No State Variable [%s] in the variable dict" % var_name)
if op[0] in self._graph_var_dicts['update_dict']:
if op[0] in self._graph_var_dicts['reduce_dict']:
self._graph_var_dicts['reduce_dict'][op[0]].append(op[1](*inputs))
else:
self._graph_var_dicts['reduce_dict'][op[0]] = [self._graph_var_dicts['update_dict'][op[0]],
op[1](*inputs)]
else:
self._graph_var_dicts['update_dict'][op[0]] = op[1](*inputs)
pass
else:
inputs = []
for var_name in op[2:]:
if '[updated]' in var_name:
var_name = var_name.replace("[updated]", "")
if var_name in self._graph_var_dicts['update_dict']:
inputs.append(self._graph_var_dicts['update_dict'][var_name])
else:
raise ValueError(" No State Variable [%s] in the update_dict" % var_name)
elif var_name in self._graph_var_dicts['variables_dict']:
inputs.append(self._graph_var_dicts['variables_dict'][var_name])
elif var_name in self._graph_var_dicts['temp_dict']:
inputs.append(self._graph_var_dicts['temp_dict'][var_name])
else:
raise ValueError(" No State Variable [%s] in the variable dict" % var_name)
self._graph_var_dicts['temp_dict'][op[0]] = op[1](*inputs)
if '[updated]' in op[0]:
op_name = op[0].replace("[updated]", "")
if op_name in self._graph_var_dicts['update_dict']:
self._graph_var_dicts['update_dict'][op_name] = self._graph_var_dicts['temp_dict'][
op[0]] # update the variable in update_dict
else:
raise ValueError(" No State Variable [%s] in the update_dict" % var_name)
# Update reduce_dict into update_dict
for key, value in reduce_dict.items():
value = self.stack(value)
self._graph_var_dicts['update_dict'][key] = self.reduce_sum(value)
self._graph_var_dicts['update_dict'][key] = []
# update time
self.n_time_step += 1
self.time = round(self.n_time_step * self.dt, 2)
self._graph_var_dicts['variables_dict'].update(self._graph_var_dicts['update_dict'])
# Record Variables
for monitor in self._monitors:
monitor.update_step(self._graph_var_dicts)
return tuple(self._variables.values())
def reduce_sum_update(self, value):
reduced = self.reduce_sum(self.stack(value))
return reduced
def get_varialble(self, name):
if name in self._variables:
return self._variables[name]
elif name in self._parameters_dict:
return self._parameters_dict[name]
elif name in self._InitVariables_dict:
return self._InitVariables_dict[name]
else:
raise ValueError("not found variable:%s in the backend"%name)
def add_variable(self, name, shape, value=None, is_parameter=False, is_sparse=False, init=None, init_param=None,
min=None, max=None, is_constant=False):
'''
Add variables from front objects to _variables of Backend and get copies to assign to _parameters_dict and _InitVariables_dict.
Args:
name (str): the name of the added variable
shape (list, int): the shape of the variable
value (optional): the value of the variable
is_parameter (bool, optional): whether the variable is trainable
init (optinal):
'''
if is_parameter:
self._parameters_dict[name] = self.add_backend_variable(name, shape, value, grad=True, is_sparse=is_sparse,
init=init, init_param=init_param)
if min is not None and max is not None:
self._clamp_parameter_dict[name] = (self.clamp_, [self._parameters_dict[name], min, max])
elif min is not None:
self._clamp_parameter_dict[name] = (self.clamp_min_, [self._parameters_dict[name], min])
elif max is not None:
self._clamp_parameter_dict[name] = (self.clamp_max_, [self._parameters_dict[name], max])
# 稀疏矩阵weight非叶子节点,反传的时候更新的是weight中的value,但前向计算的时候用的是weight,所以对于稀疏矩阵要单独用个dict记录以便初始化
elif is_sparse:
self._SparseVariables_dict[name] = self.add_backend_variable(name, shape, value, grad=True,
is_sparse=is_sparse, init=init,init_param=init_param)
elif is_constant:
self._InitVariables_dict[name] = value
self._variables[name] = value
else:
self._InitVariables_dict[name] = self.add_backend_variable(name, shape, value, grad=False,
is_sparse=is_sparse, init=init,
init_param=init_param)
var_agent = VariableAgent(self, name)
return var_agent
def add_delay(self, var_name, max_delay):
max_len = int(max_delay / self.dt)
if var_name in self._delay_dict:
if self._delay_dict[var_name].max_len < max_len:
self._delay_dict[var_name].max_len = max_len
else:
self._delay_dict[var_name] = DelayQueue(var_name, max_len, self)
self.register_initial(None, self._delay_dict[var_name].initial, [var_name, ])
self.register_standalone(var_name, self._delay_dict[var_name].push, [var_name, ])
return self._delay_dict[var_name]
@abstractmethod
def add_backend_variable(self, name, shape, value=None, grad=False, is_sparse=False, init=None, init_param=None):
'''
This method will be overwritten by different subclasses to add variables to _variables of specified backend.
Args:
name (str): the name of the added variable
shape (list, int): the shape of the variable
value (optional): the value of the variable
is_parameter (bool, optional): whether the variable is trainable
init (optinal):
grad (bool, optional): whether to use grad
'''
NotImplementedError()
def add_operation(self, op):
'''
Add basic operations from front objects to _operations of Backend.
Args:
op (list): the operation includes [ret_var_name: str, operation_name, input_var_name1: str, input_var_name2 :str, ...]
transformed to : [[return_var_names], operation_name, [input_var_names]]
'''
if not isinstance(op[0], list):
op[0] = [op[0]]
if len(op)==2:
op.append([])
elif not isinstance(op[2], list):
op[2] = op[2:] # op[2]是list,说明本身就采用了list多输入的结构,如果op[3]还有数值,直接不考虑
if op[1] in self.basic_operate:
op[1] = self.basic_operate[op[1]]
# if isinstance(op[0], str):
# op[0] = [op[0]]
# elif op[0] is None:
# op[0] = []
# op[2] = op[2:]
self._operations.append(op)
elif callable(op[1]):
self.register_standalone(op[0], op[1], op[2])
else:
raise ValueError("No operation %s in basic_operate" % op[1])
# if isinstance(op[0], str):
# op[0] = [op[0]]
# elif op[0] is None:
# op[0] = []
# op[2] = op[2:]
# if op[1] in self.basic_operate:
# op[1] = self.basic_operate[op[1]]
# elif not callable(op[1]):
# raise ValueError("No operation %s in basic_operate or not exist operation %s" % (op[1], op[1]))
#
# self._operations.append(op)
def register_standalone(self, output_names: list, function, input_names: list):
'''
Add standalone operations from front objects to _standalone_operations of Backend.
Args:
output_name (str): the name of the return variable of the method
funtion (): the standalone method
input_names (list): the name of the arguments of the method
'''
# TODO:
if isinstance(output_names, str):
output_names = [output_names]
elif output_names is None:
output_names = []
op = [output_names, function, input_names]
self._operations.append(op)
# self._standalone_operations.append((output_name, function, input_names))
def register_initial(self, output_name: str, function, input_names: list):
'''
Add initial operations from front objects to _initial_operations of Backend..
Args:
output_name (str): the name of the return variable of the method
funtion (): the standalone method
input_names (list): the name of the arguments of the method
'''
self._initial_operations.append((output_name, function, input_names))
def store(self, name='default'):
'''
Store backend_name and _variables into _stored_states dictionary.
Args:
name (str, optional): the name of network state.
'''
self._stored_states[name] = (self.backend_name, self._variables)
def restore(self, name='default'):
'''
Restore network state from _stored_states dictionary.
Args:
name (str): the name of network state.
'''
if name not in self._stored_states:
raise ValueError("No network state named: %s is stored" % name)
else:
stored_backend = self._stored_states[name][0]
if stored_backend != self.backend_name:
raise ValueError(
"The stored network is run by %s not %s" % (stored_backend, self.backend_name))
else:
self._variables = self._stored_states[name]
def check_key(self, ckey, target_dict):
cnetname = ckey[:ckey.find('<net>')]
for key, value in target_dict.items():
netname = key[:key.find('<net>')]
break
ckey = ckey.replace(cnetname, netname)
if ckey in target_dict.keys():
return ckey
import warnings
warnings.warn('Key error occurs, please check keys.')
# result = [key for key in target_dict.keys() if key.endswith(variables[variables.find('<net>'):])]
# if result:
# if len(result) > 1:
# import warnings
# warnings.warn('Given key matchs two variables in the backend dict, choose the first one as default')
# result = result[0]
# return result
# -------- basic backends operations -----
@abstractmethod
def threshold(self, v, v_th):
'''
Args:
v: membrane voltage
v_th: threshold
Returns:
v> v_th
'''
@abstractmethod
def cat(self, x, dim=1):
'''
Joining data together along a dimension.
Note that the total dimension of the data remains the same after cat.
Args:
x (list):
dim (int): the dimension to cat.
Returns:
concat(x, dim)
'''
@abstractmethod
def stack(self, x, dim=1):
'''
Add new dimension when stack data.
Args:
x (list):
dim (int): the dimension to stack.
Returns:
stack(x, dim)
'''
@abstractmethod
def permute(self, x, permute_dim):
'''
Parameters
----------
x---> input
permute_dim---> the dimension index of permute operation
Returns
-------
'''
@abstractmethod
def view(self, x, view_dim):
'''
Parameters
----------
x---> input
view_dim---> the shape of view operation
Returns
-------
'''
def equal(self, x):
'''
Parameters
----------
y---> target
x---> input
Returns
-------
'''
y = x
return y
@abstractmethod
def reduce_sum(self, x, *dim):
'''
Reduce the dimensions of the data
Args:
x (list):
dim (tuple(int)): the dimension to reduce.
Returns:
sum(x, dim)
'''
@abstractmethod
def index_select(self, x, indices, dim=1):
'''
Parameters
----------
x
indices
Returns
-------
'''
@abstractmethod
def scatter(self, x, indices):
'''
Parameters
----------
x
indices
Returns
-------
'''
@abstractmethod
def conv1d(self, x, kernel):
'''
Parameters
----------
x
kernel
Returns
-------
'''
@abstractmethod
def conv_trans1d(self, x, kernel):
'''
Parameters
----------
x
kernel
Returns
-------
'''
@abstractmethod
def im2col_indices(self, x, kh, kw, padding, stride):
'''
Parameters
----------
x: 4D array N, FH, FW, C_{in}
kh: kernel_height
kw: kernel_width
stride:
padding:
Returns
----------
'''
@abstractmethod
def conv2d_flatten(self, x):
'''
Parameters
----------
x: 4D array (batch_size, out_channels, height, width)
Returns
3D array (batch_size, out_channels, height * width)
----------
'''
@abstractmethod
def feature_map_flatten(self, x):
'''
For RSTDP and STDP learning rules which is follwed with conv pre_layer
Parameters
----------
x: 4D array (batch_size, out_channels, height, width)
Returns
2D array (batch_size, out_channels * height * width)
----------
'''
@abstractmethod
def add(self, x, y):
'''
Add the tensor y to the input x and returns a new result.
Args:
x (Tensor): input
y (Tensor or Number): the second input
Returns:
x + y
'''
NotImplementedError()
@abstractmethod
def minus(self, x, y):
'''
The first input minus the second input
Args:
x (Tensor): input
y (Tensor or Number): the second input
Returns:
x - y
'''
NotImplementedError()
@abstractmethod
def div(self, x, y):
'''
The first input div the second input
Args:
x (Tensor): input
y (Tensor or Number): the second input
Returns:
x/y
'''
NotImplementedError()
@abstractmethod
def relu(self, x):
'''
Rectified Linear
Args:
x:
Returns:
x = x if x>0. else x = 0
'''
@abstractmethod
def mat_mult_weight(self, A, X):
'''
Matrix product.
Args:
A (Tensor): the first input to be multiplied
X (Tensor): the second input to be multiplied
Returns:
mat_mult_weight(A,X)
'''
NotImplementedError()
@abstractmethod
def mat_mult_pre(self, A, X):
'''
Matrix product.
Args:
A (Tensor): the first input to be multiplied
X (Tensor): the second input to be multiplied
Returns:
mat_mult_pre(A,X)
'''
NotImplementedError()
@abstractmethod
def sigmoid(self, x):
'''
Args:
x:
Returns:
'''
@abstractmethod
def mat_mult(self, A, X):
'''
Matrix product.
Args:
A (Tensor): the first input to be multiplied
X (Tensor): the second input to be multiplied
Returns:
mat_mult(A,X)
'''
NotImplementedError()
@abstractmethod
def reshape_mat_mult(self, A, X):
'''
Matrix product.
Args:
A (Tensor): the first input to be multiplied
X (Tensor): the second input to be multiplied
Returns:
'''
NotImplementedError()
@abstractmethod
def bmm(self, A, X):
'''
Performs a batch matrix-matrix product.
Args:
A (Tensor): the first input to be multiplied [batch_size, n, m]
X (Tensor): the second input to be multiplied [batch_size, m, p]
Returns:
bmm(A,X) [batch_size, n, p]
'''
NotImplementedError()
@abstractmethod
def sparse_mat_mult_weight(self, A, X):
'''
Sparse matrix product.
Args:
A (Tensor): the first input to be multiplied
X (Tensor): the second input to be multiplied
Returns:
sparse_mat_mult_weight(A,X)
'''
NotImplementedError()
@abstractmethod
def var_mult(self, A, X):
'''
Args:
A, X
Returns:
A * X
'''
NotImplementedError()
@abstractmethod
def mult_sum_weight(self, A, X):
'''
sum(A*X, dim=-2)
Args:
A:
X:
Returns:
'''
NotImplementedError()
@abstractmethod
def mat_linear(self, A, X, b):
'''
Args:
A
X
b
Returns:
mat_mul(A,X)+b
'''
NotImplementedError()
@abstractmethod
def ger(self, A, X):
'''
Args:
A
X
Returns:
ger(A,X)
'''
NotImplementedError()
@abstractmethod
def var_linear(self, A, X, b):
'''
If A is matrix, then A and X should have the same shape, A*X is elemen-wise multiplication
else A should be a scalar value.
Returns:
A*X +b
'''
NotImplementedError()
@abstractmethod
def to_numpy(self, data):
'''
Args:
data
Returns:
data.numpy()
'''
NotImplementedError()
@abstractmethod
def to_tensor(self, data):
'''
Args:
data
Returns:
torch.tensor(data)
'''
NotImplementedError()
@abstractmethod
def clamp_(self, data, min, max):
'''
in-place clamp the data
'''
NotImplementedError()
@abstractmethod
def clamp_max_(self, data, max):
'''
in-place clamp the max of the data
'''
NotImplementedError()
@abstractmethod
def clamp_min_(self, data, min):
'''
in-place clamp the min of the data
'''
NotImplementedError()
@abstractmethod
def uniform(self, data, a=0.0, b=1.0):
'''
Args:
data(tensor): an n-dimensional torch.Tensor
a(float): the lower bound of the uniform distribution
b(float): the upper bound of the uniform distribution
Returns:
torch.nn.init.uniform_(data, a=0.0, b=1.0)
'''
NotImplementedError()
@abstractmethod
def normal(self, data, mean=0.0, std=1.0):
'''
Args:
data(tensor): an n-dimensional torch.Tensor
mean(float): the mean of the normal distribution
std(float): the standard deviation of the normal distribution
Returns:
torch.nn.init.normal_(data, mean=0.0, std=1.0)
'''
NotImplementedError()
@abstractmethod
def xavier_normal(self, data, gain=1.0):
'''
Args:
data(tensor): an n-dimensional torch.Tensor
gain: an optional scaling factor
Returns:
torch.nn.init.xavier_normal_(data, gain=1.0)
'''
NotImplementedError()
@abstractmethod
def xavier_uniform(self, data, gain=1.0):
'''
Args:
data(tensor): an n-dimensional torch.Tensor
gain: an optional scaling factor
Returns:
torch.nn.init.xavier_uniform_(data, gain=1.0)
'''
NotImplementedError()
@abstractmethod
def kaiming_normal(self, data, a=0, mode='fan_in', nonlinearity='leaky_relu'):
'''
Args:
data(tensor): an n-dimensional torch.Tensor
a: the negative slope of the rectifier used after this layer (only used with 'leaky_relu')
mode: either 'fan_in' (default) or 'fan_out'. Choosing 'fan_in' preserves the magnitude of the variance of the weights in the forward pass. Choosing 'fan_out' preserves the magnitudes in the backwards pass.
nonlinearity: the non-linear function (nn.functional name), recommended to use only with 'relu' or 'leaky_relu' (default).
Returns:
torch.nn.init.kaiming_normal_(data, a=0, mode='fan_in', nonlinearity='leaky_relu')
'''
NotImplementedError()
@abstractmethod
def kaiming_uniform(self, data, a=0, mode='fan_in', nonlinearity='leaky_relu'):
'''
Args:
data(tensor): an n-dimensional torch.Tensor
a: the negative slope of the rectifier used after this layer (only used with 'leaky_relu')
mode: either 'fan_in' (default) or 'fan_out'. Choosing 'fan_in' preserves the magnitude of the variance of the weights in the forward pass. Choosing 'fan_out' preserves the magnitudes in the backwards pass.
nonlinearity: the non-linear function (nn.functional name), recommended to use only with 'relu' or 'leaky_relu' (default).
Returns:
torch.nn.init.kaiming_uniform_(data, a=0, mode='fan_in', nonlinearity='leaky_relu')
'''
NotImplementedError()
@abstractmethod
def zero_init(self, data, constant_value=0.0):
'''
Args:
data(tensor): an n-dimensional torch.Tensor
constant_value(float): the value to fill the tensor with
Returns:
torch.nn.init.constant_(data, constant_value)
'''
NotImplementedError()
# @abstractmethod
# def euler_update(self):
# pass
#
# @abstractmethod
# def rk2_update(self):
# pass
#
# @abstractmethod
# def reset(self, v, v_reset, u_reset, spike):
# '''
# voltage reset
#
# Parameters
# ----------
# v
# v_reset
# u_reset
# spike
#
# Returns
# -------
# v[spike] = v_reset
# v[spike] += u_reset
# '''
#
# @abstractmethod
# def reset_u(self, u, u_reset, spike):
# '''
# recovery reset
#
# Parameters
# ----------
# u
# _reset
# spike
#
# Returns
# -------
# u[spike] = u+u_reset
# '''
# NotImplementedError()
#
# @abstractmethod
# def next_stage(self, x):
# '''
#
# Parameters
# ----------
# x: list
#
# Returns
# -------
# x[index]
# '''
#
# @abstractmethod
# def izh_v(self, v, u, psp):
# '''
#
# Parameters
# ----------
# v: list
# u: list
# psp: list
#
# Returns
# -------
# V=V+dt*(0.04*V^2+5*V+140-U+PSP)
# '''
# NotImplementedError()
#
# @abstractmethod
# def izh_u(self, a, b, v, u):
# '''
#
# Parameters
# ----------
# a: list
# b: list
# u: list
# v: list
#
# Returns
# -------
# U=U+a*(b*V-U)
# '''
# NotImplementedError()
def exp(self, x):
'''
Args:
x(tensor): an n-dimensional torch.Tensor
Returns:
return exp(x)
'''
NotImplementedError()
def sin(self, x):
'''
Args:
x(tensor): an n-dimensional torch.Tensor
Returns:
return exp(x)
'''
NotImplementedError()
def cos(self, x):
'''
Args:
x(tensor): an n-dimensional torch.Tensor
Returns:
return exp(x)
'''
NotImplementedError()
def tan(self, x):
'''
Args:
x(tensor): an n-dimensional torch.Tensor
Returns:
return exp(x)
'''
NotImplementedError()
def log(self, x):
'''
Args:
x(tensor): an n-dimensional torch.Tensor
Returns:
return exp(x)
'''
NotImplementedError()
def log2(self, x):
'''
Args:
x(tensor): an n-dimensional torch.Tensor
Returns:
return exp(x)
'''
NotImplementedError()
def log10(self, x):
'''
Args:
x(tensor): an n-dimensional torch.Tensor
Returns:
return exp(x)
'''
NotImplementedError()
# class Darwin_Backend(Backend):
#
# def __init__(self):
# super(Darwin_Backend, self).__init__()
# pass
| 36.447263
| 218
| 0.54502
| 6,183
| 54,598
| 4.607472
| 0.08394
| 0.031452
| 0.023554
| 0.025063
| 0.573083
| 0.503054
| 0.451488
| 0.42653
| 0.396272
| 0.355974
| 0
| 0.008308
| 0.351826
| 54,598
| 1,497
| 219
| 36.47161
| 0.796688
| 0.336826
| 0
| 0.432602
| 0
| 0.001567
| 0.06338
| 0.000682
| 0
| 0
| 0
| 0.001336
| 0
| 1
| 0.117555
| false
| 0.003135
| 0.010972
| 0.001567
| 0.15674
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
0673b6dfdd8c195674ae3591ed3bb93d152c2801
| 1,257
|
py
|
Python
|
yuz_egitimi.py
|
mehdikosaca/yuz_tanima
|
d2d7828a1f5562d21acde3af8df60ec96a88e7c3
|
[
"Apache-2.0"
] | 2
|
2021-12-30T06:38:21.000Z
|
2021-12-30T06:39:24.000Z
|
yuz_egitimi.py
|
mehdikosaca/yuz_tanima
|
d2d7828a1f5562d21acde3af8df60ec96a88e7c3
|
[
"Apache-2.0"
] | null | null | null |
yuz_egitimi.py
|
mehdikosaca/yuz_tanima
|
d2d7828a1f5562d21acde3af8df60ec96a88e7c3
|
[
"Apache-2.0"
] | null | null | null |
import cv2
import numpy as np
from PIL import Image
import os
#Verilerin yolu
path = "veriseti"
recognizer = cv2.face.LBPHFaceRecognizer_create()
detector = cv2.CascadeClassifier(cv2.data.haarcascades + 'haarcascade_frontalface_default.xml')
#imajların alınması ve etiketlenmesi için fonksiyon
def getImageAndLabels(path):
imagePaths = [os.path.join(path,f) for f in os.listdir(path)]
ornekler = []
ids = []
for imagePath in imagePaths:
PIL_img = Image.open(imagePath).convert("L") #GRİ
img_numpy = np.array(PIL_img,"uint8")
id = int(os.path.split(imagePath)[-1].split(".")[0])
print("id = ",id)
yuzler = detector.detectMultiScale(img_numpy)
for (x,y,w,h) in yuzler:
ornekler.append(img_numpy[y:y+h,x:x+w])
ids.append(id)
return ornekler,ids
print("\n [INFO] yüzler eğitiliyor. Birkaç saniye bekleyin...")
yuzler, ids = getImageAndLabels(path)
recognizer.train(yuzler,np.array(ids))
#Modeli eğitim/eğitim dosyasına kaydet
recognizer.write("egitim/egitim.yml") #Dikkat! recognizer.save() Raspberry Pi üzerinde çalışmıyor
#Eğitilen yüz sayısını göster ve kodu sonlandır
print(f"\n [INFO] {len(np.unique(ids))} yüz eğitildi. Betik sonlandırılıyor...")
print(yuzler)
| 36.970588
| 97
| 0.706444
| 170
| 1,257
| 5.182353
| 0.564706
| 0.027242
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.00666
| 0.163882
| 1,257
| 33
| 98
| 38.090909
| 0.830637
| 0.165473
| 0
| 0
| 0
| 0
| 0.187919
| 0.053691
| 0
| 0
| 0
| 0
| 0
| 1
| 0.037037
| false
| 0
| 0.148148
| 0
| 0.222222
| 0.148148
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
06743547989129e1af7ae30ff01eaf04b4056ad2
| 1,846
|
py
|
Python
|
hello.py
|
jferroaq/Tarea7z
|
013f1f1e8dc3b631be102d6e5731d2ffdffd3657
|
[
"Apache-2.0"
] | null | null | null |
hello.py
|
jferroaq/Tarea7z
|
013f1f1e8dc3b631be102d6e5731d2ffdffd3657
|
[
"Apache-2.0"
] | null | null | null |
hello.py
|
jferroaq/Tarea7z
|
013f1f1e8dc3b631be102d6e5731d2ffdffd3657
|
[
"Apache-2.0"
] | null | null | null |
import kivy
from kivy.app import App
from kivy.uix.button import Label
from kivy.uix.colorpicker import ColorPicker
from kivy.graphics import Color, Ellipse, Triangle
from kivy.properties import StringProperty, ObjectProperty
class Titulo(Label):
cadena=StringProperty("Jesus te ama...")
triangle=ObjectProperty(None)
def __init__(self, **kwargs):
super(Titulo, self).__init__(**kwargs)
with self.canvas:
self.triangle=Triangle(points= [40, 40, 200, 200, 160, 40])
def on_touch_down(self, touch):
if self.collide_point(*touch.pos):
self.cadena="Collide: "+str(touch.pos)
print("on_touch_down-->Collide")
return True
return super(Titulo, self).on_touch_down(touch)
def on_cadena(self, obj, pos):
print("Se ha actualizado 'Cadena'")
def on_triangle(self, obj, pos):
print("Se ha actualizado 'triangle'")
class SaludoApp(App):
def build(self):
self.paleta=ColorPicker()
self.pintor=Titulo()
self.pintor.bind(on_touch_down=self.dentro)
return self.pintor
def dentro(self, obj, st):
lista=self.pintor.triangle.points
tu=st.x, st.y
rpta = True
py=lista[-1]
px=lista[-2]
for i in range(0, len(lista), 2):
px0=px
py0=py
px=lista[i]
py=lista[i+1]
a=px - px0
b=py - py0
c=tu[0] - px0
d=tu[1] - py0
if (b*c - a*d) < 0:
rpta = False
print(rpta)
break
if rpta == True:
self.pintor.add_widget(self.paleta)
return rpta
def eleccion(self, obj, st):
print("Pos X: %g, Pos Y: %g" %(st.x, st.y))
ca,cb,cc = .5, .5, .6
a,b = 150,45
radio = 50
with self.pintor.canvas:
Color(ca, cb, cc, mode = 'hsv' )
Triangle(
points = [0, 0, 100, 100, 80, 20])
if __name__ in ["__main__", "__android__"]:
SaludoApp().run()
| 25.287671
| 65
| 0.62026
| 274
| 1,846
| 4.062044
| 0.364964
| 0.053908
| 0.039533
| 0.026954
| 0.053908
| 0.053908
| 0.053908
| 0
| 0
| 0
| 0
| 0.036273
| 0.238353
| 1,846
| 72
| 66
| 25.638889
| 0.755334
| 0
| 0
| 0
| 0
| 0
| 0.077465
| 0.012459
| 0
| 0
| 0
| 0
| 0
| 1
| 0.112903
| false
| 0
| 0.096774
| 0
| 0.33871
| 0.080645
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
0674d6e58cd606f3c44fa44647eb41365904b800
| 356
|
py
|
Python
|
mundo-02/aula13-ex054.py
|
fabiocoutoaraujo/CursoVideoPython
|
7e3b6ab89cbbba79f640d12e40f3d1e8c22295cf
|
[
"MIT"
] | 1
|
2020-04-18T16:39:23.000Z
|
2020-04-18T16:39:23.000Z
|
mundo-02/aula13-ex054.py
|
fabiocoutoaraujo/CursoVideoPython
|
7e3b6ab89cbbba79f640d12e40f3d1e8c22295cf
|
[
"MIT"
] | null | null | null |
mundo-02/aula13-ex054.py
|
fabiocoutoaraujo/CursoVideoPython
|
7e3b6ab89cbbba79f640d12e40f3d1e8c22295cf
|
[
"MIT"
] | null | null | null |
from datetime import date
maior = menor = 0
atual = date.today().year
for c in range(1, 8):
nascimento = int(input(f'Em que ano a {c}ª pessoa nasceu? '))
if atual - nascimento > 20:
maior += 1
else:
menor += 1
print(f'Ao todo, temos {maior} pessoas maiores de idade!')
print(f'Ao todo, temos {menor} pessoas menores de idade!')
| 29.666667
| 65
| 0.63764
| 58
| 356
| 3.913793
| 0.655172
| 0.052863
| 0.070485
| 0.105727
| 0.14978
| 0
| 0
| 0
| 0
| 0
| 0
| 0.025735
| 0.235955
| 356
| 11
| 66
| 32.363636
| 0.808824
| 0
| 0
| 0
| 0
| 0
| 0.36236
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.090909
| 0
| 0.090909
| 0.181818
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
0675b9a64430a3b476aa0125ccfd22711ba0b255
| 6,356
|
py
|
Python
|
Contents/Code/zdfneo.py
|
typekitrel/abctestard
|
1df43561327694ba155f513ad152aab51c56ef42
|
[
"MIT"
] | null | null | null |
Contents/Code/zdfneo.py
|
typekitrel/abctestard
|
1df43561327694ba155f513ad152aab51c56ef42
|
[
"MIT"
] | null | null | null |
Contents/Code/zdfneo.py
|
typekitrel/abctestard
|
1df43561327694ba155f513ad152aab51c56ef42
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
# zdfneo.py - Aufruf durch __init__.py/ZDF_get_content
#
# Die Funktionen dienen zur Auswertung der ZDF-Neo-Seiten
#
Neo_Base = 'https://www.neo-magazin-royale.de'
PREFIX = '/video/ardmediathek2016/zdfneo'
####################################################################################################
@route(PREFIX + '/neo_content')
def neo_content(path, ID, offset=0):
Log('neo_content')
# JUMPPATH = 'https://www.neo-magazin-royale.de/zdi/?start=%s&count=8' # auch redakt. Beiträge
# JUMPPATH: start=0: Seite 1, 8=Seite 2
JUMPPATH = 'https://www.neo-magazin-royale.de/zdi/themen/134270/thema-ganze-folge.html?start=%s&count=8'
title_main = 'NEO MAGAZIN ROYALE'
if offset == 0: # 1. Pfad (aus ZDF_get_content) verwerfen, jumppath enthält ganze Folgen
path = JUMPPATH % str(0)
page = HTTP.Request(path).content
pagination = blockextract('class="pagination', page) # "pagination active" = akt. Seite
page_cnt = len(pagination)
last_page = stringextract('count=8">', '</a>', pagination[-1]) # letzte Seite
act_page = stringextract('pagination active">', 'a>', page)
act_page = stringextract('count=8">', '<', act_page)
if offset == 0:
act_page = '1'
cnt_per_page = 8
oc = ObjectContainer(title2='Seite ' + act_page, view_group="List")
oc = home(cont=oc, ID='ZDF') # Home-Button
content = blockextract('class="modules', page)
if len(oc) == 0:
msg_notfound = title + ': Auswertung fehlgeschlagen'
title = msg_notfound.decode(encoding="utf-8", errors="ignore")
name = "ZDF Mediathek"
summary = 'zurück zur ' + name.decode(encoding="utf-8", errors="ignore")
oc.add(DirectoryObject(key=Callback(Main_ZDF, name=name), title=title,
summary=summary, tagline='TV', thumb=R(ICON_MAIN_ZDF)))
return oc
for rec in content:
url = Neo_Base + stringextract('href="', '"', rec)
img = stringextract('sophoraimage="', '"', rec) # ZDF-Pfad
if img == '':
img = Neo_Base + stringextract('src="', '"', rec) # NEO-Pfad ohne Base
img = img.decode(encoding="utf-8", errors="ignore") # Umlaute im Pfad (hurensöhne_mannheims)
img_alt = 'Bild: ' + stringextract('alt="', '"', rec)
img_alt = unescape_neo(img_alt)
img_alt = img_alt.decode(encoding="utf-8", errors="ignore")
title = stringextract('name">', '</h3', rec)
if title == '':
title = stringextract('content="', '"', rec)
dataplayer = stringextract('data-player="', '"', rec)
sid = stringextract('data-sophoraid="', '"', rec)
datetime = ''
if 'datetime=""' in rec:
datetime = stringextract('datetime="">', '</time>', rec)# datetime="">07.09.2016</time>
else:
datetime = stringextract('datetime="', '</time>', rec) # ="2017-05-18 18:10">18.05.2017</time>
datetime = datetime[11:] # 1. Datum abschneiden
datetime = datetime.replace('">', ', ')
Log('neuer Satz:')
Log(url);Log(img);Log(title);Log(dataplayer);Log(sid);Log(datetime);
title = title.decode(encoding="utf-8", errors="ignore")
oc.add(DirectoryObject(key=Callback(GetNeoVideoSources, url=url, sid=sid, title=title, summary=datetime,
tagline=img_alt, thumb=img), title=title, summary=datetime, tagline=img_alt, thumb=img))
# Prüfung auf Mehr
Log('offset: ' + str(offset));Log(act_page); Log(last_page)
if int(act_page) < int(last_page):
offset = int(offset) + 8
JUMPPATH = JUMPPATH % offset
Log(JUMPPATH);
oc.add(DirectoryObject(key=Callback(neo_content, path=JUMPPATH, ID=ID, offset=offset),
title=title_main, thumb=R(ICON_MEHR), summary=''))
return oc
#-------------------------
@route(PREFIX + '/GetNeoVideoSources')
# Ladekette ähnlich ZDF (get_formitaeten), aber nur bei videodat_url identisch
def GetNeoVideoSources(url, sid, title, summary, tagline, thumb):
Log('GetNeoVideoSources url: ' + url)
oc = ObjectContainer(title2='Videoformate', view_group="List")
oc = home(cont=oc, ID='ZDF') # Home-Button
formitaeten = get_formitaeten(sid=sid, ID='NEO') # Video-URL's ermitteln
if formitaeten == '': # Nachprüfung auf Videos
msg = 'Videoquellen zur Zeit nicht erreichbar' + ' Seite:\r' + url
return ObjectContainer(header='Error', message=msg)
only_list = ["h264_aac_ts_http_m3u8_http"]
oc, download_list = show_formitaeten(oc=oc, title_call=title, formitaeten=formitaeten, tagline=tagline,
thumb=thumb, only_list=only_list)
title_oc='weitere Video-Formate'
if Prefs['pref_use_downloads']:
title=title + ' und Download'
# oc = Parseplaylist(oc, videoURL, thumb) # hier nicht benötigt - das ZDF bietet bereits 3 Auflösungsbereiche
oc.add(DirectoryObject(key=Callback(NEOotherSources, title=title, tagline=tagline, thumb=thumb, sid=sid),
title=title_oc, summary='', thumb=R(ICON_MEHR)))
return oc
#-------------------------
@route(PREFIX + '/NEOotherSources')
def NEOotherSources(title, tagline, thumb, sid):
Log('NEOotherSources')
title_org = title # Backup für Textdatei zum Video
summary_org = tagline # Tausch summary mit tagline (summary erstrangig bei Wiedergabe)
oc = ObjectContainer(title2='Videoformate', view_group="List")
oc = home(cont=oc, ID='ZDF') # Home-Button
formitaeten = get_formitaeten(sid=sid, ID='NEO') # Video-URL's ermitteln
if formitaeten == '': # Nachprüfung auf Videos
msg = 'Video leider nicht mehr vorhanden' + ' Seite:\r' + url
return ObjectContainer(header='Error', message=msg)
only_list = ["h264_aac_mp4_http_na_na", "vp8_vorbis_webm_http_na_na", "vp8_vorbis_webm_http_na_na"]
oc, download_list = show_formitaeten(oc=oc, title_call=title, formitaeten=formitaeten, tagline=tagline,
thumb=thumb, only_list=only_list)
# high=0: 1. Video bisher höchste Qualität: [progressive] veryhigh
oc = test_downloads(oc,download_list,title_org,summary_org,tagline,thumb,high=0) # Downloadbutton(s)
return oc
####################################################################################################
# htmlentities in neo, Zeichen s. http://aurelio.net/bin/python/fix-htmldoc-utf8.py
# HTMLParser() versagt hier
def unescape_neo(line):
line_ret = (line.replace("ö", "ö").replace("ä", "Ä").replace("ü", "ü")
.replace("Ã\x96", "Ö").replace("Ã\x84", "Ä").replace("Ã\x9c", "Ü")
.replace("Ã\x9f", "ß"))
return line_ret
| 43.834483
| 112
| 0.660321
| 814
| 6,356
| 5.035627
| 0.299754
| 0.021957
| 0.020737
| 0.021957
| 0.319102
| 0.286411
| 0.258112
| 0.258112
| 0.240059
| 0.205416
| 0
| 0.017089
| 0.143801
| 6,356
| 144
| 113
| 44.138889
| 0.736126
| 0.18927
| 0
| 0.205882
| 0
| 0.009804
| 0.207893
| 0.026648
| 0.019608
| 0
| 0
| 0
| 0
| 1
| 0.039216
| false
| 0
| 0
| 0
| 0.107843
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
067d4e2d3158aba74160b531385178fe32b82215
| 1,379
|
py
|
Python
|
src/cogs/example_cog.py
|
Abaan404/MagmaBot
|
2149f6ad8a6a1158112ab9efb4dc77c04c3a5f8e
|
[
"MIT"
] | 1
|
2021-10-03T21:05:45.000Z
|
2021-10-03T21:05:45.000Z
|
src/cogs/example_cog.py
|
Abaan404/MagmaBot
|
2149f6ad8a6a1158112ab9efb4dc77c04c3a5f8e
|
[
"MIT"
] | null | null | null |
src/cogs/example_cog.py
|
Abaan404/MagmaBot
|
2149f6ad8a6a1158112ab9efb4dc77c04c3a5f8e
|
[
"MIT"
] | null | null | null |
import discord, itertools
from discord.ext import commands, tasks
# Lava is not allowed to change the first text
PRESENCE_TEXT = itertools.cycle(["lava is cute", "*pushes you against wall* wanna play fortnite amongus?", "with ur mum", "owo.exe", "dangit jelly", "gewrhgkhewghkhfuckoiyo5uo", "MiEWcWAFT?? OWOWO"])
class ExampleCog(commands.Cog):
def __init__(self, bot):
self.bot = bot
self.presence_text_loop.start()
# A command example
@commands.command(name = "sus", aliases = ["sussy", "amongus", "AAAA"])
async def _sus(self, ctx, user: discord.Member):
"""
`+sus [user]`: Sends a sus link
### Parameters
---------------
`[user]`: discord.Member
The member being mentioned
"""
await ctx.send(f"Heres your link {user.mention} you sussy little baka ***pushes you against wall*** owo?\n https://youtu.be/rlkSMp7iz6c")
# A task example
@tasks.loop(seconds = 30)
async def presence_text_loop(self):
"""
Cycle through `Now playing` statuses
"""
await self.bot.change_presence(activity = discord.Activity(type = discord.enums.ActivityType.playing, name = next(PRESENCE_TEXT)))
@presence_text_loop.before_loop
async def _wait(self):
await self.bot.wait_until_ready()
def setup(bot):
bot.add_cog(ExampleCog(bot))
| 35.358974
| 199
| 0.645395
| 173
| 1,379
| 5.034682
| 0.543353
| 0.068886
| 0.055109
| 0.045924
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.004655
| 0.221175
| 1,379
| 38
| 200
| 36.289474
| 0.806331
| 0.055838
| 0
| 0
| 0
| 0.055556
| 0.258702
| 0.023518
| 0
| 0
| 0
| 0
| 0
| 1
| 0.111111
| false
| 0
| 0.111111
| 0
| 0.277778
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
068506b54ed89a62c865b814f0418d72003474e6
| 856
|
py
|
Python
|
packit_dashboard/api/routes.py
|
lbarcziova/dashboard
|
6ad1141a475d68b081a4fa2ceec5363678ae4e38
|
[
"MIT"
] | null | null | null |
packit_dashboard/api/routes.py
|
lbarcziova/dashboard
|
6ad1141a475d68b081a4fa2ceec5363678ae4e38
|
[
"MIT"
] | null | null | null |
packit_dashboard/api/routes.py
|
lbarcziova/dashboard
|
6ad1141a475d68b081a4fa2ceec5363678ae4e38
|
[
"MIT"
] | null | null | null |
from flask import Blueprint, jsonify, request
from packit_dashboard.utils import return_json
from packit_dashboard.config import API_URL
api = Blueprint("api", __name__)
# The react frontend will request information here instead of fetching directly
# from the main API.
# This is because it will be easier to implement caching API requests here.
# (Flask-Caching etc)
@api.route("/api/copr-builds/")
def copr_builds():
page = request.args.get("page")
per_page = request.args.get("per_page")
url = f"{API_URL}/copr-builds?page={page}&per_page={per_page}"
return jsonify(return_json(url))
@api.route("/api/testing-farm/")
def testing_farm():
page = request.args.get("page")
per_page = request.args.get("per_page")
url = f"{API_URL}/testing-farm/results?page={page}&per_page={per_page}"
return jsonify(return_json(url))
| 31.703704
| 79
| 0.731308
| 130
| 856
| 4.646154
| 0.376923
| 0.092715
| 0.109272
| 0.119205
| 0.357616
| 0.357616
| 0.357616
| 0.357616
| 0.357616
| 0.357616
| 0
| 0
| 0.13785
| 856
| 26
| 80
| 32.923077
| 0.818428
| 0.221963
| 0
| 0.375
| 0
| 0
| 0.267776
| 0.173979
| 0
| 0
| 0
| 0
| 0
| 1
| 0.125
| false
| 0
| 0.1875
| 0
| 0.4375
| 0.125
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
0687810d3ca357eb81c8f40b9ee9e277ec90842e
| 3,668
|
py
|
Python
|
examples/mag_wmm2015.py
|
CHEN-Zhaohui/geoist
|
06a00db3e0ed3d92abf3e45b7b3bfbef6a858a5b
|
[
"MIT"
] | 53
|
2018-11-17T03:29:55.000Z
|
2022-03-18T02:36:25.000Z
|
examples/mag_wmm2015.py
|
CHEN-Zhaohui/geoist
|
06a00db3e0ed3d92abf3e45b7b3bfbef6a858a5b
|
[
"MIT"
] | 3
|
2018-11-28T11:37:51.000Z
|
2019-01-30T01:52:45.000Z
|
examples/mag_wmm2015.py
|
CHEN-Zhaohui/geoist
|
06a00db3e0ed3d92abf3e45b7b3bfbef6a858a5b
|
[
"MIT"
] | 35
|
2018-11-17T03:29:57.000Z
|
2022-03-23T17:57:06.000Z
|
# -*- coding: utf-8 -*-
"""
Created on Thu Jan 10 18:34:07 2019
计算WMM2015模型,WMM.cof文件需要放到与py相同目录
@author: chens
"""
import numpy as np
from pathlib import Path
import xarray
import ctypes as ct
import sys
import datetime
from matplotlib.pyplot import figure
#libwmm = ct.cdll.LoadLibrary(str('D:\\MyWorks\\WMM2015-master\\wmm15.dll'))
libwmm = ct.cdll.LoadLibrary(str('D:\\MyWorks\\WMM2015-master\\noaa.dll'))
def noaa(glats: np.ndarray, glons: np.ndarray, alt_km: float, yeardec: float, mod = 'wmm') -> xarray.Dataset:
glats = np.atleast_2d(glats).astype(float) # to coerce all else to float64
glons = np.atleast_2d(glons)
assert glats.shape == glons.shape
mag = xarray.Dataset(coords={'glat': glats[:, 0], 'glon': glons[0, :]})
north = np.empty(glats.size)
east = np.empty(glats.size)
down = np.empty(glats.size)
total = np.empty(glats.size)
decl = np.empty(glats.size)
incl = np.empty(glats.size)
for i, (glat, glon) in enumerate(zip(glats.ravel(), glons.ravel())):
x = ct.c_double()
y = ct.c_double()
z = ct.c_double()
T = ct.c_double()
D = ct.c_double()
mI = ct.c_double()
if mod == 'wmm':
ret = libwmm.wmmsub(ct.c_double(glat),
ct.c_double(glon),
ct.c_double(alt_km),
ct.c_double(yeardec),
ct.byref(x), ct.byref(y), ct.byref(z),
ct.byref(T), ct.byref(D), ct.byref(mI))
else:
ret = libwmm.emmsub(ct.c_double(glat),
ct.c_double(glon),
ct.c_double(alt_km),
ct.c_double(yeardec),
ct.byref(x), ct.byref(y), ct.byref(z),
ct.byref(T), ct.byref(D), ct.byref(mI))
#print(ret)
assert ret == 0
north[i] = x.value
east[i] = y.value
down[i] = z.value
total[i] = T.value
decl[i] = D.value
incl[i] = mI.value
mag['north'] = (('glat', 'glon'), north.reshape(glats.shape))
mag['east'] = (('glat', 'glon'), east.reshape(glats.shape))
mag['down'] = (('glat', 'glon'), down.reshape(glats.shape))
mag['total'] = (('glat', 'glon'), total.reshape(glats.shape))
mag['incl'] = (('glat', 'glon'), incl.reshape(glats.shape))
mag['decl'] = (('glat', 'glon'), decl.reshape(glats.shape))
mag.attrs['time'] = yeardec
return mag
def plotwmm(mag: xarray.Dataset):
fg = figure()
ax = fg.subplots(1, 2, sharey=True)
fg.suptitle('WMM2015 {}'.format(mag.time))
h = ax[0].contour(mag.glon, mag.glat, mag.decl, range(-90, 90+20, 20))
ax[0].clabel(h, inline=True, fmt='%0.1f')
ax[0].set_title('Magnetic Declination [degrees]')
h = ax[1].contour(mag.glon, mag.glat, mag.incl, range(-90, 90+20, 20))
ax[1].clabel(h, inline=True, fmt='%0.1f')
ax[1].set_title('Magnetic Inclination [degrees]')
ax[0].set_ylabel('Geographic latitude (deg)')
for a in ax:
a.set_xlabel('Geographic longitude (deg)')
from geoist.others.scidates import datetime2yeardec
dt = datetime.datetime(2012, 7, 12, 12)
print(datetime2yeardec(dt))
mag = noaa(45.5, 105.6, 0.2, datetime2yeardec(dt), mod='emm')
#print(mag.north.item())
#print(mag.east.item())
#print(mag.down.item())
print("F:",mag.total.item()) #F
print("D:",mag.decl.item()) #D
print("I:",mag.incl.item()) #I
from matplotlib.pyplot import show
lon, lat = np.meshgrid(np.arange(-180, 180+10, 10), np.arange(-90, 90+10, 10))
mag = noaa(lat, lon, 0, 2015)
plotwmm(mag)
show()
| 33.045045
| 109
| 0.571156
| 527
| 3,668
| 3.931689
| 0.29222
| 0.02027
| 0.060811
| 0.046332
| 0.208494
| 0.208494
| 0.170849
| 0.170849
| 0.146718
| 0.101351
| 0
| 0.040247
| 0.248092
| 3,668
| 111
| 110
| 33.045045
| 0.711022
| 0.079335
| 0
| 0.125
| 0
| 0
| 0.080333
| 0.011009
| 0
| 0
| 0
| 0
| 0.025
| 1
| 0.025
| false
| 0
| 0.1125
| 0
| 0.15
| 0.05
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
0688619f7ef43b02605de1e45f9fd553d9142b12
| 3,089
|
py
|
Python
|
test/e2e/tests/test_transit_gateway.py
|
timbyr/ec2-controller
|
d96d056fdc6ec7d31981f4c14cad8d740f6cf6ec
|
[
"Apache-2.0"
] | 14
|
2021-08-04T00:21:49.000Z
|
2022-03-21T01:06:09.000Z
|
test/e2e/tests/test_transit_gateway.py
|
timbyr/ec2-controller
|
d96d056fdc6ec7d31981f4c14cad8d740f6cf6ec
|
[
"Apache-2.0"
] | 48
|
2021-08-03T19:00:42.000Z
|
2022-03-31T22:18:42.000Z
|
test/e2e/tests/test_transit_gateway.py
|
timbyr/ec2-controller
|
d96d056fdc6ec7d31981f4c14cad8d740f6cf6ec
|
[
"Apache-2.0"
] | 9
|
2021-07-22T15:49:43.000Z
|
2022-03-06T22:24:14.000Z
|
# Copyright Amazon.com Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You may
# not use this file except in compliance with the License. A copy of the
# License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is distributed
# on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
# express or implied. See the License for the specific language governing
# permissions and limitations under the License.
"""Integration tests for the Transit Gateway API.
"""
import boto3
import pytest
import time
import logging
from acktest.resources import random_suffix_name
from acktest.k8s import resource as k8s
from e2e import service_marker, CRD_GROUP, CRD_VERSION, load_ec2_resource
from e2e.replacement_values import REPLACEMENT_VALUES
RESOURCE_PLURAL = "transitgateways"
## The long delete wait is required to make sure the TGW can transition out of its "pending" status.
## TGWs are unable to be deleted while in "pending"
CREATE_WAIT_AFTER_SECONDS = 90
DELETE_WAIT_AFTER_SECONDS = 10
@pytest.fixture(scope="module")
def ec2_client():
return boto3.client("ec2")
def get_tgw(ec2_client, tgw_id: str) -> dict:
try:
resp = ec2_client.describe_transit_gateways(
TransitGatewayIds=[tgw_id]
)
except Exception as e:
logging.debug(e)
return None
if len(resp["TransitGateways"]) == 0:
return None
return resp["TransitGateways"][0]
def tgw_exists(ec2_client, tgw_id: str) -> bool:
tgw = get_tgw(ec2_client, tgw_id)
return tgw is not None and tgw['State'] != "deleting" and tgw['State'] != "deleted"
@service_marker
@pytest.mark.canary
class TestTGW:
def test_create_delete(self, ec2_client):
resource_name = random_suffix_name("tgw-ack-test", 24)
replacements = REPLACEMENT_VALUES.copy()
replacements["TGW_NAME"] = resource_name
# Load TGW CR
resource_data = load_ec2_resource(
"transitgateway",
additional_replacements=replacements,
)
logging.debug(resource_data)
# Create k8s resource
ref = k8s.CustomResourceReference(
CRD_GROUP, CRD_VERSION, RESOURCE_PLURAL,
resource_name, namespace="default",
)
k8s.create_custom_resource(ref, resource_data)
cr = k8s.wait_resource_consumed_by_controller(ref)
assert cr is not None
assert k8s.get_resource_exists(ref)
resource = k8s.get_resource(ref)
resource_id = resource["status"]["transitGatewayID"]
time.sleep(CREATE_WAIT_AFTER_SECONDS)
# Check TGW exists
exists = tgw_exists(ec2_client, resource_id)
assert exists
# Delete k8s resource
_, deleted = k8s.delete_custom_resource(ref, 2, 5)
assert deleted is True
time.sleep(DELETE_WAIT_AFTER_SECONDS)
# Check TGW doesn't exist
exists = tgw_exists(ec2_client, resource_id)
assert not exists
| 30.89
| 100
| 0.697637
| 409
| 3,089
| 5.085575
| 0.408313
| 0.034615
| 0.030769
| 0.020192
| 0.090385
| 0.057692
| 0.038462
| 0.038462
| 0
| 0
| 0
| 0.016257
| 0.223373
| 3,089
| 100
| 101
| 30.89
| 0.850771
| 0.265458
| 0
| 0.068966
| 0
| 0
| 0.063308
| 0
| 0
| 0
| 0
| 0
| 0.086207
| 1
| 0.068966
| false
| 0
| 0.137931
| 0.017241
| 0.310345
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
06897ca4a2ea127df4c4fbdc8e71310f23dfe61f
| 2,862
|
py
|
Python
|
Phase 4/src/search.py
|
ishaanshah/GameDhaBha
|
5ab4f13ec7554ba74739d9a149da1154bb09041a
|
[
"MIT"
] | null | null | null |
Phase 4/src/search.py
|
ishaanshah/GameDhaBha
|
5ab4f13ec7554ba74739d9a149da1154bb09041a
|
[
"MIT"
] | null | null | null |
Phase 4/src/search.py
|
ishaanshah/GameDhaBha
|
5ab4f13ec7554ba74739d9a149da1154bb09041a
|
[
"MIT"
] | null | null | null |
""" Contains all the functions related to the search of enitities in the Database """
from tabulate import tabulate
def SearchPlayerByName(cur, con):
""" Searches for the provided name's similar occurences in the Player's first and last name """
# Take in the input for the search query
search = {}
search["pattern"] = input("Enter the player name that you are looking for: ")
search["pattern"] = "%" + search["pattern"] + "%"
query = """
SELECT *
FROM Players
WHERE FirstName LIKE %(pattern)s
OR LastName LIKE %(pattern)s
"""
print("\nExecuting")
print(query)
# Execute query
cur.execute(query, search)
# Print the output
headers = ["Username", "PlayerID", "FirstName", "LastName", "Winnings",
"Nationality", "DateOfBirth"]
rows = []
while True:
res = cur.fetchone()
if res is None:
break
rows.append([
res["Username"], res["PlayerID"], res["FirstName"], res["LastName"],
res["Winnings"], res["Nationality"], res["DateOfBirth"]
])
print(tabulate(rows, headers = headers, tablefmt = "orgtbl"))
print("")
def SearchOrganisationByName(cur, con):
""" Searches for an Organisation by the name given. """
# Take in the input for the search query
search = {}
search["pattern"] = input("Enter the organisation's name that you are looking for: ")
search["pattern"] = "%" + search["pattern"] + "%"
query = """
SELECT *
FROM Organisations
WHERE Name LIKE %(pattern)s
"""
print("\nExecuting")
print(query)
# Execute query
cur.execute(query, search)
# Print the output
headers = ["OrganisationID", "Name", "Headquarters", "Founded", "Earnings"]
rows = []
while True:
res = cur.fetchone()
if res is None:
break
rows.append([
res["OrganisationID"], res["Name"], res["Headquarters"], res["Founded"],
res["Earnings"]
])
print(tabulate(rows, headers = headers, tablefmt = "orgtbl"))
print("")
def SearchHandler(cur, con):
# Define Handlers
handlers = [
SearchPlayerByName,
SearchOrganisationByName
]
# Get operation to Perform
print("1. Search Player by Name.")
print("2. Search Organisation by Name.")
print("3. Go Back.")
ch = int(input("Enter choice: "))
if ch == 3:
return
try:
handlers[ch - 1](cur, con)
con.commit()
print("Search Successful.")
except (IndexError, TypeError):
print(f"Error: Invalid Option {ch}")
except Exception as error:
con.rollback()
print("Failed to update the Database.")
print(f"Error: {error}")
| 25.327434
| 99
| 0.568484
| 303
| 2,862
| 5.369637
| 0.353135
| 0.047941
| 0.022127
| 0.020897
| 0.403196
| 0.403196
| 0.403196
| 0.403196
| 0.403196
| 0.338045
| 0
| 0.002494
| 0.299441
| 2,862
| 112
| 100
| 25.553571
| 0.808978
| 0.139064
| 0
| 0.472222
| 0
| 0
| 0.335931
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.041667
| false
| 0
| 0.013889
| 0
| 0.069444
| 0.208333
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
068a35a559d65ea89371c4e0284f743170c94d8d
| 15,413
|
py
|
Python
|
machine/qemu/sources/u-boot/test/py/tests/test_efi_fit.py
|
muddessir/framework
|
5b802b2dd7ec9778794b078e748dd1f989547265
|
[
"MIT"
] | 1
|
2021-11-21T19:56:29.000Z
|
2021-11-21T19:56:29.000Z
|
machine/qemu/sources/u-boot/test/py/tests/test_efi_fit.py
|
muddessir/framework
|
5b802b2dd7ec9778794b078e748dd1f989547265
|
[
"MIT"
] | null | null | null |
machine/qemu/sources/u-boot/test/py/tests/test_efi_fit.py
|
muddessir/framework
|
5b802b2dd7ec9778794b078e748dd1f989547265
|
[
"MIT"
] | null | null | null |
# SPDX-License-Identifier: GPL-2.0
# Copyright (c) 2019, Cristian Ciocaltea <cristian.ciocaltea@gmail.com>
#
# Work based on:
# - test_net.py
# Copyright (c) 2016, NVIDIA CORPORATION. All rights reserved.
# - test_fit.py
# Copyright (c) 2013, Google Inc.
#
# Test launching UEFI binaries from FIT images.
"""
Note: This test relies on boardenv_* containing configuration values to define
which network environment is available for testing. Without this, the parts
that rely on network will be automatically skipped.
For example:
# Boolean indicating whether the Ethernet device is attached to USB, and hence
# USB enumeration needs to be performed prior to network tests.
# This variable may be omitted if its value is False.
env__net_uses_usb = False
# Boolean indicating whether the Ethernet device is attached to PCI, and hence
# PCI enumeration needs to be performed prior to network tests.
# This variable may be omitted if its value is False.
env__net_uses_pci = True
# True if a DHCP server is attached to the network, and should be tested.
# If DHCP testing is not possible or desired, this variable may be omitted or
# set to False.
env__net_dhcp_server = True
# A list of environment variables that should be set in order to configure a
# static IP. If solely relying on DHCP, this variable may be omitted or set to
# an empty list.
env__net_static_env_vars = [
('ipaddr', '10.0.0.100'),
('netmask', '255.255.255.0'),
('serverip', '10.0.0.1'),
]
# Details regarding a file that may be read from a TFTP server. This variable
# may be omitted or set to None if TFTP testing is not possible or desired.
# Additionally, when the 'size' is not available, the file will be generated
# automatically in the TFTP root directory, as specified by the 'dn' field.
env__efi_fit_tftp_file = {
'fn': 'test-efi-fit.img', # File path relative to TFTP root
'size': 3831, # File size
'crc32': '9fa3f79c', # Checksum using CRC-32 algorithm, optional
'addr': 0x40400000, # Loading address, integer, optional
'dn': 'tftp/root/dir', # TFTP root directory path, optional
}
"""
import os.path
import pytest
import u_boot_utils as util
# Define the parametrized ITS data to be used for FIT images generation.
ITS_DATA = '''
/dts-v1/;
/ {
description = "EFI image with FDT blob";
#address-cells = <1>;
images {
efi {
description = "Test EFI";
data = /incbin/("%(efi-bin)s");
type = "%(kernel-type)s";
arch = "%(sys-arch)s";
os = "efi";
compression = "%(efi-comp)s";
load = <0x0>;
entry = <0x0>;
};
fdt {
description = "Test FDT";
data = /incbin/("%(fdt-bin)s");
type = "flat_dt";
arch = "%(sys-arch)s";
compression = "%(fdt-comp)s";
};
};
configurations {
default = "config-efi-fdt";
config-efi-fdt {
description = "EFI FIT w/ FDT";
kernel = "efi";
fdt = "fdt";
};
config-efi-nofdt {
description = "EFI FIT w/o FDT";
kernel = "efi";
};
};
};
'''
# Define the parametrized FDT data to be used for DTB images generation.
FDT_DATA = '''
/dts-v1/;
/ {
#address-cells = <1>;
#size-cells = <1>;
model = "%(sys-arch)s %(fdt_type)s EFI FIT Boot Test";
compatible = "%(sys-arch)s";
reset@0 {
compatible = "%(sys-arch)s,reset";
reg = <0 4>;
};
};
'''
@pytest.mark.buildconfigspec('bootm_efi')
@pytest.mark.buildconfigspec('cmd_bootefi_hello_compile')
@pytest.mark.buildconfigspec('fit')
@pytest.mark.notbuildconfigspec('generate_acpi_table')
@pytest.mark.requiredtool('dtc')
def test_efi_fit_launch(u_boot_console):
"""Test handling of UEFI binaries inside FIT images.
The tests are trying to launch U-Boot's helloworld.efi embedded into
FIT images, in uncompressed or gzip compressed format.
Additionally, a sample FDT blob is created and embedded into the above
mentioned FIT images, in uncompressed or gzip compressed format.
For more details, see launch_efi().
The following test cases are currently defined and enabled:
- Launch uncompressed FIT EFI & internal FDT
- Launch uncompressed FIT EFI & FIT FDT
- Launch compressed FIT EFI & internal FDT
- Launch compressed FIT EFI & FIT FDT
"""
def net_pre_commands():
"""Execute any commands required to enable network hardware.
These commands are provided by the boardenv_* file; see the comment
at the beginning of this file.
"""
init_usb = cons.config.env.get('env__net_uses_usb', False)
if init_usb:
cons.run_command('usb start')
init_pci = cons.config.env.get('env__net_uses_pci', False)
if init_pci:
cons.run_command('pci enum')
def net_dhcp():
"""Execute the dhcp command.
The boardenv_* file may be used to enable/disable DHCP; see the
comment at the beginning of this file.
"""
has_dhcp = cons.config.buildconfig.get('config_cmd_dhcp', 'n') == 'y'
if not has_dhcp:
cons.log.warning('CONFIG_CMD_DHCP != y: Skipping DHCP network setup')
return False
test_dhcp = cons.config.env.get('env__net_dhcp_server', False)
if not test_dhcp:
cons.log.info('No DHCP server available')
return False
cons.run_command('setenv autoload no')
output = cons.run_command('dhcp')
assert 'DHCP client bound to address ' in output
return True
def net_setup_static():
"""Set up a static IP configuration.
The configuration is provided by the boardenv_* file; see the comment at
the beginning of this file.
"""
has_dhcp = cons.config.buildconfig.get('config_cmd_dhcp', 'n') == 'y'
if not has_dhcp:
cons.log.warning('CONFIG_NET != y: Skipping static network setup')
return False
env_vars = cons.config.env.get('env__net_static_env_vars', None)
if not env_vars:
cons.log.info('No static network configuration is defined')
return False
for (var, val) in env_vars:
cons.run_command('setenv %s %s' % (var, val))
return True
def make_fpath(file_name):
"""Compute the path of a given (temporary) file.
Args:
file_name: The name of a file within U-Boot build dir.
Return:
The computed file path.
"""
return os.path.join(cons.config.build_dir, file_name)
def make_efi(fname, comp):
"""Create an UEFI binary.
This simply copies lib/efi_loader/helloworld.efi into U-Boot
build dir and, optionally, compresses the file using gzip.
Args:
fname: The target file name within U-Boot build dir.
comp: Flag to enable gzip compression.
Return:
The path of the created file.
"""
bin_path = make_fpath(fname)
util.run_and_log(cons,
['cp', make_fpath('lib/efi_loader/helloworld.efi'),
bin_path])
if comp:
util.run_and_log(cons, ['gzip', '-f', bin_path])
bin_path += '.gz'
return bin_path
def make_dtb(fdt_type, comp):
"""Create a sample DTB file.
Creates a DTS file and compiles it to a DTB.
Args:
fdt_type: The type of the FDT, i.e. internal, user.
comp: Flag to enable gzip compression.
Return:
The path of the created file.
"""
# Generate resources referenced by FDT.
fdt_params = {
'sys-arch': sys_arch,
'fdt_type': fdt_type,
}
# Generate a test FDT file.
dts = make_fpath('test-efi-fit-%s.dts' % fdt_type)
with open(dts, 'w') as file:
file.write(FDT_DATA % fdt_params)
# Build the test FDT.
dtb = make_fpath('test-efi-fit-%s.dtb' % fdt_type)
util.run_and_log(cons, ['dtc', '-I', 'dts', '-O', 'dtb', '-o', dtb, dts])
if comp:
util.run_and_log(cons, ['gzip', '-f', dtb])
dtb += '.gz'
return dtb
def make_fit(comp):
"""Create a sample FIT image.
Runs 'mkimage' to create a FIT image within U-Boot build dir.
Args:
comp: Enable gzip compression for the EFI binary and FDT blob.
Return:
The path of the created file.
"""
# Generate resources referenced by ITS.
its_params = {
'sys-arch': sys_arch,
'efi-bin': os.path.basename(make_efi('test-efi-fit-helloworld.efi', comp)),
'kernel-type': 'kernel' if comp else 'kernel_noload',
'efi-comp': 'gzip' if comp else 'none',
'fdt-bin': os.path.basename(make_dtb('user', comp)),
'fdt-comp': 'gzip' if comp else 'none',
}
# Generate a test ITS file.
its_path = make_fpath('test-efi-fit-helloworld.its')
with open(its_path, 'w') as file:
file.write(ITS_DATA % its_params)
# Build the test ITS.
fit_path = make_fpath('test-efi-fit-helloworld.fit')
util.run_and_log(
cons, [make_fpath('tools/mkimage'), '-f', its_path, fit_path])
return fit_path
def load_fit_from_host(fit):
"""Load the FIT image using the 'host load' command and return its address.
Args:
fit: Dictionary describing the FIT image to load, see env__efi_fit_test_file
in the comment at the beginning of this file.
Return:
The address where the file has been loaded.
"""
addr = fit.get('addr', None)
if not addr:
addr = util.find_ram_base(cons)
output = cons.run_command(
'host load hostfs - %x %s/%s' % (addr, fit['dn'], fit['fn']))
expected_text = ' bytes read'
size = fit.get('size', None)
if size:
expected_text = '%d' % size + expected_text
assert expected_text in output
return addr
def load_fit_from_tftp(fit):
"""Load the FIT image using the tftpboot command and return its address.
The file is downloaded from the TFTP server, its size and optionally its
CRC32 are validated.
Args:
fit: Dictionary describing the FIT image to load, see env__efi_fit_tftp_file
in the comment at the beginning of this file.
Return:
The address where the file has been loaded.
"""
addr = fit.get('addr', None)
if not addr:
addr = util.find_ram_base(cons)
file_name = fit['fn']
output = cons.run_command('tftpboot %x %s' % (addr, file_name))
expected_text = 'Bytes transferred = '
size = fit.get('size', None)
if size:
expected_text += '%d' % size
assert expected_text in output
expected_crc = fit.get('crc32', None)
if not expected_crc:
return addr
if cons.config.buildconfig.get('config_cmd_crc32', 'n') != 'y':
return addr
output = cons.run_command('crc32 $fileaddr $filesize')
assert expected_crc in output
return addr
def launch_efi(enable_fdt, enable_comp):
"""Launch U-Boot's helloworld.efi binary from a FIT image.
An external image file can be downloaded from TFTP, when related
details are provided by the boardenv_* file; see the comment at the
beginning of this file.
If the size of the TFTP file is not provided within env__efi_fit_tftp_file,
the test image is generated automatically and placed in the TFTP root
directory specified via the 'dn' field.
When running the tests on Sandbox, the image file is loaded directly
from the host filesystem.
Once the load address is available on U-Boot console, the 'bootm'
command is executed for either 'config-efi-fdt' or 'config-efi-nofdt'
FIT configuration, depending on the value of the 'enable_fdt' function
argument.
Eventually the 'Hello, world' message is expected in the U-Boot console.
Args:
enable_fdt: Flag to enable using the FDT blob inside FIT image.
enable_comp: Flag to enable GZIP compression on EFI and FDT
generated content.
"""
with cons.log.section('FDT=%s;COMP=%s' % (enable_fdt, enable_comp)):
if is_sandbox:
fit = {
'dn': cons.config.build_dir,
}
else:
# Init networking.
net_pre_commands()
net_set_up = net_dhcp()
net_set_up = net_setup_static() or net_set_up
if not net_set_up:
pytest.skip('Network not initialized')
fit = cons.config.env.get('env__efi_fit_tftp_file', None)
if not fit:
pytest.skip('No env__efi_fit_tftp_file binary specified in environment')
size = fit.get('size', None)
if not size:
if not fit.get('dn', None):
pytest.skip('Neither "size", nor "dn" info provided in env__efi_fit_tftp_file')
# Create test FIT image.
fit_path = make_fit(enable_comp)
fit['fn'] = os.path.basename(fit_path)
fit['size'] = os.path.getsize(fit_path)
# Copy image to TFTP root directory.
if fit['dn'] != cons.config.build_dir:
util.run_and_log(cons, ['mv', '-f', fit_path, '%s/' % fit['dn']])
# Load FIT image.
addr = load_fit_from_host(fit) if is_sandbox else load_fit_from_tftp(fit)
# Select boot configuration.
fit_config = 'config-efi-fdt' if enable_fdt else 'config-efi-nofdt'
# Try booting.
output = cons.run_command('bootm %x#%s' % (addr, fit_config))
if enable_fdt:
assert 'Booting using the fdt blob' in output
assert 'Hello, world' in output
assert '## Application failed' not in output
cons.restart_uboot()
cons = u_boot_console
# Array slice removes leading/trailing quotes.
sys_arch = cons.config.buildconfig.get('config_sys_arch', '"sandbox"')[1:-1]
is_sandbox = sys_arch == 'sandbox'
try:
if is_sandbox:
# Use our own device tree file, will be restored afterwards.
control_dtb = make_dtb('internal', False)
old_dtb = cons.config.dtb
cons.config.dtb = control_dtb
# Run tests
# - fdt OFF, gzip OFF
launch_efi(False, False)
# - fdt ON, gzip OFF
launch_efi(True, False)
if is_sandbox:
# - fdt OFF, gzip ON
launch_efi(False, True)
# - fdt ON, gzip ON
launch_efi(True, True)
finally:
if is_sandbox:
# Go back to the original U-Boot with the correct dtb.
cons.config.dtb = old_dtb
cons.restart_uboot()
| 33.579521
| 99
| 0.594693
| 2,060
| 15,413
| 4.317961
| 0.191262
| 0.012816
| 0.014165
| 0.008769
| 0.337718
| 0.239123
| 0.197976
| 0.178865
| 0.158291
| 0.140079
| 0
| 0.007421
| 0.309349
| 15,413
| 458
| 100
| 33.652838
| 0.828182
| 0.405826
| 0
| 0.2723
| 0
| 0
| 0.2816
| 0.034698
| 0
| 0
| 0.000708
| 0
| 0.032864
| 1
| 0.051643
| false
| 0
| 0.014085
| 0
| 0.131455
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
068bed0bd09441343b0ab11a87d3f70ca8cbcf66
| 2,234
|
py
|
Python
|
data_dictionary/data_dictionary.py
|
georgetown-analytics/DC-Bikeshare
|
9f5a6a3256cff15a29f0dca6e9a9d8098ab2df28
|
[
"MIT"
] | 11
|
2018-07-01T16:43:05.000Z
|
2020-07-17T19:08:16.000Z
|
data_dictionary/data_dictionary.py
|
noahnewberger/Bikeshare-DC
|
42676654d103cdaddfb76db76d1eece533251261
|
[
"MIT"
] | 5
|
2021-02-08T20:21:12.000Z
|
2021-12-13T19:47:04.000Z
|
data_dictionary/data_dictionary.py
|
noahnewberger/Bikeshare-DC
|
42676654d103cdaddfb76db76d1eece533251261
|
[
"MIT"
] | 5
|
2018-10-05T19:54:20.000Z
|
2020-10-27T11:54:09.000Z
|
#!/usr/bin/env python
import report, sys
import psycopg2.extras
parser = report.get_parser(sys.argv[0])
parser.add_argument('--title', '-t', required=False, dest='title', default="Data Dictionary", help='Report Title')
args = parser.parse_args()
conn = report.get_connection(args)
curs = conn.cursor(cursor_factory=psycopg2.extras.RealDictCursor)
def get_dictionary():
q = """
select t1.nspname as schema, t3.description, count(*) as count
from pg_namespace t1
join information_schema.tables t2 on t1.nspname = t2.table_schema
left outer join pg_description t3 on t1.oid = t3.objoid
where t1.nspname in ('public')
group by schema, description
order by schema
"""
curs.execute(q)
schemas = curs.fetchall()
for schema in schemas:
schema_name = schema['schema']
q = """
select table_name as table, t3.description
from information_schema.tables t1
join pg_class t2 on (table_name = relname)
left outer join pg_description t3 on (t2.oid = objoid and objsubid = 0)
where table_schema = '{schema_name}'
and table_name not like 'raster%'
and table_name not like 'spatial%'
and table_name not like '%2018%'
and table_name not like '%columns%'
order by table_name """.format(**vars())
curs.execute(q)
tables = curs.fetchall()
for table in tables:
table_name = table['table']
q = """
select column_name as column, data_type, is_nullable, t3.description
from information_schema.columns t1
join pg_class t2 on (t1.table_name = t2.relname)
left outer join pg_description t3 on (t2.oid = t3.objoid and t3.objsubid = t1.ordinal_position)
where table_schema = '{schema_name}'
and table_name = '{table_name}'
order by ordinal_position
""".format(**vars())
curs.execute(q)
table['columns'] = curs.fetchall()
schema['tables'] = tables
return schemas
tmpl_vars = {
'dictionary': get_dictionary(),
'title': args.title
}
report.generate_report(tmpl_vars, args)
report.generate_csv(tmpl_vars, args)
| 29.394737
| 114
| 0.637422
| 290
| 2,234
| 4.762069
| 0.303448
| 0.071687
| 0.043447
| 0.043447
| 0.289645
| 0.162201
| 0.137581
| 0.115858
| 0.060825
| 0.060825
| 0
| 0.019916
| 0.258281
| 2,234
| 75
| 115
| 29.786667
| 0.813518
| 0.008953
| 0
| 0.145455
| 0
| 0.018182
| 0.59286
| 0.034343
| 0
| 0
| 0
| 0
| 0
| 1
| 0.018182
| false
| 0
| 0.036364
| 0
| 0.072727
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
068d0a9c6eb823b33105c8883388612ae4b08f65
| 1,112
|
py
|
Python
|
LeetCode/InsertionLL.py
|
Jaidev810/Competitive-Questions
|
5d5b28be69e8572e9b4353e9790ee39b56769fc3
|
[
"MIT"
] | 1
|
2021-02-27T06:12:55.000Z
|
2021-02-27T06:12:55.000Z
|
LeetCode/InsertionLL.py
|
Jaidev810/Competitive-Questions
|
5d5b28be69e8572e9b4353e9790ee39b56769fc3
|
[
"MIT"
] | 1
|
2021-02-02T08:52:17.000Z
|
2021-02-03T08:19:12.000Z
|
LeetCode/InsertionLL.py
|
Jaidev810/Competitive-Questions
|
5d5b28be69e8572e9b4353e9790ee39b56769fc3
|
[
"MIT"
] | null | null | null |
class LinkedList:
def __init__(self, data, next='None'):
self.data = data
self.next = next
def takeinputLL():
inputlist = [int(x) for x in input().split()]
head = None
temp = None
for cur in inputlist:
if cur == -1:
break
Newnode = LinkedList(cur)
if head is None:
head = Newnode
temp = head
else:
temp.next = Newnode
temp = temp.next
return head
def printLL(head):
while head is not None:
print(head.data, end='->')
head = head.next
print('None')
def insertionLL(head):
test = LinkedList(0, head)
curr = head
while curr.next is not None:
if curr.next.data >= curr.data:
curr = curr.next
else:
temp = curr.next
temp1 = test
curr.next = curr.next.next
while temp1.next.data <= temp.data:
temp1 = temp1.next
temp1.next, temp.next = temp, temp1.next
return test.next
head = takeinputLL()
printLL(insertionLL(head))
| 19.508772
| 52
| 0.522482
| 132
| 1,112
| 4.371212
| 0.272727
| 0.083189
| 0.031196
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.011561
| 0.377698
| 1,112
| 57
| 53
| 19.508772
| 0.822254
| 0
| 0
| 0.05
| 0
| 0
| 0.008985
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.1
| false
| 0
| 0
| 0
| 0.175
| 0.1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
068db78fb9e1cc510a957bc841fd463a0fc7de6a
| 2,581
|
py
|
Python
|
migrations/versions/458a7da0c9da_.py
|
dmiklic/psiholeks-web
|
68dda07228a53790ab1e797336bb236031a544de
|
[
"MIT"
] | null | null | null |
migrations/versions/458a7da0c9da_.py
|
dmiklic/psiholeks-web
|
68dda07228a53790ab1e797336bb236031a544de
|
[
"MIT"
] | 1
|
2018-05-01T09:15:12.000Z
|
2018-05-01T09:25:03.000Z
|
migrations/versions/458a7da0c9da_.py
|
dmiklic/psiholeks-web
|
68dda07228a53790ab1e797336bb236031a544de
|
[
"MIT"
] | null | null | null |
"""empty message
Revision ID: 458a7da0c9da
Revises:
Create Date: 2018-05-01 21:15:27.029811
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '458a7da0c9da'
down_revision = None
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.create_table('rijeci',
sa.Column('rijec', sa.Unicode(length=60), nullable=False),
sa.Column('konkretnost_m', sa.Float(), nullable=True),
sa.Column('konkretnost_std', sa.Float(), nullable=True),
sa.Column('predocivost_m', sa.Float(), nullable=True),
sa.Column('predocivost_std', sa.Float(), nullable=True),
sa.Column('dob_usvajanja_m', sa.Float(), nullable=True),
sa.Column('dob_usvajanja_std', sa.Float(), nullable=True),
sa.Column('subj_frekvencija_m', sa.Float(), nullable=True),
sa.Column('subj_frekvencija_std', sa.Float(), nullable=True),
sa.PrimaryKeyConstraint('rijec')
)
op.create_index(op.f('ix_rijeci_dob_usvajanja_m'), 'rijeci', ['dob_usvajanja_m'], unique=False)
op.create_index(op.f('ix_rijeci_dob_usvajanja_std'), 'rijeci', ['dob_usvajanja_std'], unique=False)
op.create_index(op.f('ix_rijeci_konkretnost_m'), 'rijeci', ['konkretnost_m'], unique=False)
op.create_index(op.f('ix_rijeci_konkretnost_std'), 'rijeci', ['konkretnost_std'], unique=False)
op.create_index(op.f('ix_rijeci_predocivost_m'), 'rijeci', ['predocivost_m'], unique=False)
op.create_index(op.f('ix_rijeci_predocivost_std'), 'rijeci', ['predocivost_std'], unique=False)
op.create_index(op.f('ix_rijeci_subj_frekvencija_m'), 'rijeci', ['subj_frekvencija_m'], unique=False)
op.create_index(op.f('ix_rijeci_subj_frekvencija_std'), 'rijeci', ['subj_frekvencija_std'], unique=False)
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_index(op.f('ix_rijeci_subj_frekvencija_std'), table_name='rijeci')
op.drop_index(op.f('ix_rijeci_subj_frekvencija_m'), table_name='rijeci')
op.drop_index(op.f('ix_rijeci_predocivost_std'), table_name='rijeci')
op.drop_index(op.f('ix_rijeci_predocivost_m'), table_name='rijeci')
op.drop_index(op.f('ix_rijeci_konkretnost_std'), table_name='rijeci')
op.drop_index(op.f('ix_rijeci_konkretnost_m'), table_name='rijeci')
op.drop_index(op.f('ix_rijeci_dob_usvajanja_std'), table_name='rijeci')
op.drop_index(op.f('ix_rijeci_dob_usvajanja_m'), table_name='rijeci')
op.drop_table('rijeci')
# ### end Alembic commands ###
| 46.089286
| 109
| 0.720651
| 371
| 2,581
| 4.719677
| 0.177898
| 0.063963
| 0.073101
| 0.091376
| 0.685323
| 0.685323
| 0.659623
| 0.588806
| 0.442033
| 0.384923
| 0
| 0.014873
| 0.114297
| 2,581
| 55
| 110
| 46.927273
| 0.751094
| 0.109647
| 0
| 0
| 0
| 0
| 0.351172
| 0.18222
| 0
| 0
| 0
| 0
| 0
| 1
| 0.054054
| false
| 0
| 0.054054
| 0
| 0.108108
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
068fc5e74266b5c9c2303aed1e80240bd5fd0b7c
| 573
|
py
|
Python
|
mimic/modalities/MimicLateral.py
|
Jimmy2027/MoPoE-MIMIC
|
d167719b0dc7ba002b7421eb82a83e47d2437795
|
[
"MIT"
] | 1
|
2021-09-30T07:56:46.000Z
|
2021-09-30T07:56:46.000Z
|
mimic/modalities/MimicLateral.py
|
Jimmy2027/MoPoE-MIMIC
|
d167719b0dc7ba002b7421eb82a83e47d2437795
|
[
"MIT"
] | null | null | null |
mimic/modalities/MimicLateral.py
|
Jimmy2027/MoPoE-MIMIC
|
d167719b0dc7ba002b7421eb82a83e47d2437795
|
[
"MIT"
] | null | null | null |
import torch
import mimic.modalities.utils
from mimic.modalities.Modality import ModalityIMG
class MimicLateral(ModalityIMG):
def __init__(self, enc, dec, args):
self.name = 'Lateral'
self.likelihood_name = 'laplace'
self.data_size = torch.Size((1, args.img_size, args.img_size))
super().__init__(data_size=self.data_size)
self.gen_quality_eval = True
self.file_suffix = '.png'
self.encoder = enc
self.decoder = dec
self.likelihood = mimic.modalities.utils.get_likelihood(self.likelihood_name)
| 31.833333
| 85
| 0.687609
| 72
| 573
| 5.208333
| 0.486111
| 0.12
| 0.106667
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.002212
| 0.211169
| 573
| 17
| 86
| 33.705882
| 0.827434
| 0
| 0
| 0
| 0
| 0
| 0.031414
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.071429
| false
| 0
| 0.214286
| 0
| 0.357143
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
0693b9613a135ff67d5413df7255909db8145fcb
| 1,131
|
py
|
Python
|
setup.py
|
Sipondo/ulix-dexflow
|
de46482fe08e3d600dd5da581f0524b55e5df961
|
[
"MIT"
] | 5
|
2021-06-25T16:44:38.000Z
|
2021-12-31T01:29:00.000Z
|
setup.py
|
Sipondo/ulix-dexflow
|
de46482fe08e3d600dd5da581f0524b55e5df961
|
[
"MIT"
] | null | null | null |
setup.py
|
Sipondo/ulix-dexflow
|
de46482fe08e3d600dd5da581f0524b55e5df961
|
[
"MIT"
] | 1
|
2021-06-25T20:33:47.000Z
|
2021-06-25T20:33:47.000Z
|
import os, sys, shutil
from cx_Freeze import setup, Executable
from pathlib import Path
def copytree(src, dst, symlinks=False, ignore=None):
for item in os.listdir(src):
s = os.path.join(src, item)
d = os.path.join(dst, item)
if os.path.isdir(s):
shutil.copytree(s, d, symlinks, ignore)
else:
shutil.copy2(s, d)
# Dependencies are automatically detected, but it might need fine tuning.
additional_modules = []
build_exe_options = {
"includes": additional_modules,
"packages": [
"moderngl",
"moderngl_window",
"pyglet",
"moderngl_window.context.pyglet",
"glcontext",
"moderngl_window.loaders.texture",
"moderngl_window.loaders.program",
],
}
base = None
if sys.platform == "win32":
base = "Win32GUI"
setup(
name="Catchbase",
version="1.0",
description="Play your fangame",
options={"build_exe": build_exe_options},
executables=[Executable(script="game.py", base=base)],
)
for x in Path("build").glob("*"):
p = x
break
copytree("resources", str(p / "resources"))
| 22.62
| 73
| 0.622458
| 137
| 1,131
| 5.051095
| 0.569343
| 0.080925
| 0.028902
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.008149
| 0.240495
| 1,131
| 49
| 74
| 23.081633
| 0.797439
| 0.062776
| 0
| 0
| 0
| 0
| 0.215705
| 0.087039
| 0
| 0
| 0
| 0
| 0
| 1
| 0.026316
| false
| 0
| 0.078947
| 0
| 0.105263
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
069788761b0d146c14baf5d90bdb0884306cd8a1
| 472
|
py
|
Python
|
python/readGeoJsonFIle.py
|
toddstavish/BEE-CSharp
|
223e8ef64d582e625d36a3a2db4e0b53deddf057
|
[
"Apache-2.0"
] | null | null | null |
python/readGeoJsonFIle.py
|
toddstavish/BEE-CSharp
|
223e8ef64d582e625d36a3a2db4e0b53deddf057
|
[
"Apache-2.0"
] | null | null | null |
python/readGeoJsonFIle.py
|
toddstavish/BEE-CSharp
|
223e8ef64d582e625d36a3a2db4e0b53deddf057
|
[
"Apache-2.0"
] | null | null | null |
def importFromGeoJson(geoJsonName):
#driver = ogr.GetDriverByName('geojson')
dataSource = ogr.Open(geoJsonName, 0)
layer = dataSource.GetLayer()
print(layer.GetFeatureCount())
polys = []
image_id = 1
building_id = 0
for feature in layer:
building_id = building_id + 1
polys.append({'ImageId': feature.GetField('ImageId'), 'BuildingId': feature.GetField('BuildingId'), 'poly': feature.GetGeometryRef()})
return polys
| 29.5
| 142
| 0.669492
| 49
| 472
| 6.367347
| 0.591837
| 0.096154
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.01061
| 0.201271
| 472
| 16
| 143
| 29.5
| 0.816976
| 0.082627
| 0
| 0
| 0
| 0
| 0.08776
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.090909
| false
| 0
| 0.090909
| 0
| 0.272727
| 0.090909
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
069859b4e100fade3b9371a57b0661bbf0c77719
| 1,518
|
py
|
Python
|
DailyCodingProblem/52_Google_LRU.py
|
RafayAK/CodingPrep
|
718eccb439db0f6e727806964766a40e8234c8a9
|
[
"MIT"
] | 5
|
2019-09-07T17:31:17.000Z
|
2022-03-05T09:59:46.000Z
|
DailyCodingProblem/52_Google_LRU.py
|
RafayAK/CodingPrep
|
718eccb439db0f6e727806964766a40e8234c8a9
|
[
"MIT"
] | null | null | null |
DailyCodingProblem/52_Google_LRU.py
|
RafayAK/CodingPrep
|
718eccb439db0f6e727806964766a40e8234c8a9
|
[
"MIT"
] | 2
|
2019-09-07T17:31:24.000Z
|
2019-10-28T16:10:52.000Z
|
"""
Good morning! Here's your coding interview problem for today.
This problem was asked by Google.
Implement an LRU (Least Recently Used) cache.
It should be able to be initialized with a cache size n, and contain the following methods:
set(key, value): sets key to value. If there are already n items in
the cache and we are adding a new item,
then it should also remove the least recently used item.
get(key): gets the value at key. If no such key exists, return null.
Each operation should run in O(1) time.
"""
class lru:
def __init__(self, n):
self._cache = dict()
self._cache_size = n
def set(self, key, value):
if len(self._cache) == 0 or len(self._cache) < self._cache_size:
# add value t dict
self._cache[key] = value
else:
del(self._cache[list(self._cache.keys())[0]])
# now add new data
self._cache[key] = value
assert len(self._cache) == self._cache_size
def get(self, key):
if key in self._cache:
return self._cache[key]
else:
return None
if __name__ == '__main__':
lru_cache = lru(5)
assert not lru_cache.get(key='a')
lru_cache.set('a', 1)
assert lru_cache.get(key='a') == 1
lru_cache.set('b', 2)
lru_cache.set('c', 3)
lru_cache.set('d', 4)
lru_cache.set('f', 6)
lru_cache.set('e', 5)
assert not lru_cache.get(key='a')
assert lru_cache.get('e') == 5
| 24.885246
| 91
| 0.601449
| 237
| 1,518
| 3.687764
| 0.417722
| 0.133867
| 0.075515
| 0.048055
| 0.131579
| 0.114416
| 0.057208
| 0.057208
| 0
| 0
| 0
| 0.011091
| 0.28722
| 1,518
| 60
| 92
| 25.3
| 0.796673
| 0.38274
| 0
| 0.214286
| 0
| 0
| 0.019481
| 0
| 0
| 0
| 0
| 0
| 0.178571
| 1
| 0.107143
| false
| 0
| 0
| 0
| 0.214286
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
069b851f5bdd3f1be09d224c228765a0b963eeeb
| 624
|
py
|
Python
|
news_buddy/tasks/post_to_solr.py
|
izacus/newsbuddy
|
f26e94f54bb8eeeb46fc48e697f6dd062607a7ea
|
[
"MIT"
] | null | null | null |
news_buddy/tasks/post_to_solr.py
|
izacus/newsbuddy
|
f26e94f54bb8eeeb46fc48e697f6dd062607a7ea
|
[
"MIT"
] | null | null | null |
news_buddy/tasks/post_to_solr.py
|
izacus/newsbuddy
|
f26e94f54bb8eeeb46fc48e697f6dd062607a7ea
|
[
"MIT"
] | null | null | null |
def post_to_solr(article):
import settings
from pysolarized import solr, to_solr_date
solr_int = solr.Solr(settings.SOLR_ENDPOINT_URLS, settings.SOLR_DEFAULT_ENDPOINT)
# Build documents for solr dispatch
doc = {"id": article["id"], "title": article["title"],
"source": article["source"], "language": article["language"],
"source_url": article["source_url"], "content": article["text"],
"published": to_solr_date(article["published"])}
if article["author"] is not None:
doc["author"] = article["author"]
solr_int.add(doc)
solr_int._addFlushBatch()
| 34.666667
| 85
| 0.657051
| 75
| 624
| 5.253333
| 0.453333
| 0.045685
| 0.050761
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.195513
| 624
| 18
| 86
| 34.666667
| 0.784861
| 0.052885
| 0
| 0
| 0
| 0
| 0.185374
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.083333
| false
| 0
| 0.166667
| 0
| 0.25
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
069dac451eea987083fb0222c0d932e8a5b6741b
| 2,462
|
py
|
Python
|
services/web/project/routes/api.py
|
sthe0/test-bot-fullstack
|
602c876177eb16958748a9e46274533759ff5792
|
[
"MIT"
] | null | null | null |
services/web/project/routes/api.py
|
sthe0/test-bot-fullstack
|
602c876177eb16958748a9e46274533759ff5792
|
[
"MIT"
] | null | null | null |
services/web/project/routes/api.py
|
sthe0/test-bot-fullstack
|
602c876177eb16958748a9e46274533759ff5792
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
from flask import Blueprint, jsonify, request
from functools import wraps
from sqlalchemy import desc
from project.common import app, db, fb_api
from project.config import ApiConfig
from project.models import Client, Message
api = Blueprint('api', __name__)
def make_error(message):
return jsonify(error=message), 500
def verify_token(f):
@wraps(f)
def wrapper(*args, **kwargs):
if request.args.get('auth_token') != ApiConfig.AUTH_TOKEN:
return make_error('Unauthorized')
return f(*args, **kwargs)
return wrapper
@api.route('/bot/api/check')
@verify_token
def check():
return 'ok'
@api.route('/bot/api/clients')
@verify_token
def clients():
offset = int(request.args.get('start') or '0')
limit = int(request.args.get('count') or '10')
clients = []
for user in db.session.query(Client).order_by(Client.id).offset(offset).limit(limit):
clients.append(user.to_json())
return jsonify(clients)
@api.route('/bot/api/messages/<client_id>')
@verify_token
def messages(client_id):
if not client_id:
return make_error('No client_id provided')
offset = int(request.args.get('start') or '0')
limit = int(request.args.get('count') or '10')
messages = []
for message in (
db.session.query(Message)
.filter(Message.client_id == client_id)
.order_by(desc(Message.date))
.offset(offset)
.limit(limit)
):
messages.append(message.to_json())
return jsonify(messages)
@api.route('/bot/api/send/tag/<client_id>')
@verify_token
def send_tag(client_id):
text = request.args.get('text', '')
tag = request.args.get('tag', 'ACCOUNT_UPDATE')
if not client_id:
return make_error('No recipient_id provided')
if not text:
return make_error('No text provided')
db.session.add(Message(client_id=client_id, text=text, from_client=False))
db.session.commit()
return jsonify(fb_api.send_tag_message(client_id, text, tag))
@api.route('/bot/api/send/message/<client_id>')
@verify_token
def send_message(client_id):
text = request.args.get('text', '')
if not client_id:
return make_error('No recipient_id provided')
if not text:
return make_error('No text provided')
db.session.add(Message(client_id=client_id, text=text, from_client=False))
db.session.commit()
return jsonify(fb_api.send_message(client_id, text))
| 27.662921
| 89
| 0.675467
| 348
| 2,462
| 4.623563
| 0.227011
| 0.094469
| 0.069608
| 0.043505
| 0.462399
| 0.391548
| 0.359229
| 0.321939
| 0.303294
| 0.303294
| 0
| 0.00498
| 0.184403
| 2,462
| 88
| 90
| 27.977273
| 0.796315
| 0.00853
| 0
| 0.347826
| 0
| 0
| 0.123001
| 0.03731
| 0
| 0
| 0
| 0
| 0
| 1
| 0.115942
| false
| 0
| 0.086957
| 0.028986
| 0.405797
| 0.028986
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
069f9b47635b756c567cad2b645af0001f7d8f95
| 4,045
|
py
|
Python
|
multi_view_ctrl/grid_element_div.py
|
imldresden/mcv-displaywall
|
d08cf6fab869ee03d8b3af203dd0e55b42ab4605
|
[
"MIT"
] | 2
|
2019-12-12T20:57:37.000Z
|
2021-09-29T02:59:19.000Z
|
multi_view_ctrl/grid_element_div.py
|
imldresden/mcv-displaywall
|
d08cf6fab869ee03d8b3af203dd0e55b42ab4605
|
[
"MIT"
] | null | null | null |
multi_view_ctrl/grid_element_div.py
|
imldresden/mcv-displaywall
|
d08cf6fab869ee03d8b3af203dd0e55b42ab4605
|
[
"MIT"
] | null | null | null |
from libavg import avg
from events.event_dispatcher import EventDispatcher
from multi_view_ctrl.grid_element import GridElement
from multi_view_ctrl.configurations.grid_element_div_configuration import GridElementDivConfigurations
class GridElementDiv(avg.DivNode, EventDispatcher):
def __init__(self, grid_element, grid_element_div_config=None, parent=None, **kwargs):
"""
:param grid_element: The grid element that is the base for this div.
:type grid_element: GridElement
:param grid_element_div_config: The configuration that is used to create this grid element div.
:type grid_element_div_config: GridElementDivConfigurations
:param parent: The parent of this div.
:type parent: DivNode
:param kwargs: All other parameters that are possible for the DivNode.
"""
super(GridElementDiv, self).__init__(**kwargs)
self.registerInstance(self, parent)
EventDispatcher.__init__(self)
self._grid_element = grid_element
self._grid_element_div_config = grid_element_div_config if grid_element_div_config else GridElementDivConfigurations()
avg.RectNode(
parent=self,
pos=(self._grid_element_div_config.margin,self._grid_element_div_config. margin),
size=(self.size[0] - 2 * self._grid_element_div_config.margin,
self.size[1] - 2 * self._grid_element_div_config.margin),
strokewidth=self._grid_element_div_config.border_width,
color=self._grid_element_div_config.border_color,
fillcolor=self._grid_element_div_config.background_color,
fillopacity=1
)
self._internal_div = avg.DivNode(
parent=self,
pos=(self._grid_element_div_config.margin, self._grid_element_div_config.margin),
size=(self.size[0] - 2 * self._grid_element_div_config.margin,
self.size[1] - 2 * self._grid_element_div_config.margin),
crop=True
)
self._child_nodes = []
@property
def grid_id(self):
"""
:rtype: int
"""
return self._grid_element.id
@property
def child_nodes(self):
"""
:rtype: list[Node]
"""
return self._child_nodes
def get_rel_pos(self, pos):
"""
Calculates a relative pos to this grid element div.
:param pos: The source pos.
:type pos: tuple[float, float]
:return: The relative pos.
:rtype: tuple[float, float]
"""
return pos[0] - self.pos[0] - self._grid_element_div_config.margin, pos[1] - self.pos[1] - self._grid_element_div_config.margin
def is_pos_in(self, pos):
"""
Checks if a given pos lies inside in this grid element div.
:param pos: The pos to check for.
:type pos: tuple[float, float]
:return: Is the given pos in this element?
:rtype: bool
"""
return self.pos[0] <= pos[0] <= self.pos[0] + self.size[0] and self.pos[1] <= pos[1] <= self.pos[1] + self.size[1]
def append_child_for_grid(self, node):
"""
Appends the given node. It also sets the size of the node to the size of this grid element div.
:param node: The node to add to this grid element.
:type node: Node
"""
node.size = self._internal_div.size
node.view_id = self.grid_id
self._internal_div.appendChild(node)
self._child_nodes.append(node)
def start_listening(self):
"""
Registers a callback to listen to changes to this grid elemen div. Listeners can register to any number of the provided
events. For the required structure of the callbacks see below.
"""
pass
def stop_listening(self):
"""
Stops listening to an event the listener has registered to previously. The provided callback needs to be the
same that was used to listen to the event in the fist place.
"""
pass
| 38.52381
| 135
| 0.648949
| 528
| 4,045
| 4.729167
| 0.246212
| 0.145374
| 0.134561
| 0.152183
| 0.291149
| 0.24189
| 0.148979
| 0.125751
| 0.125751
| 0.125751
| 0
| 0.00709
| 0.267738
| 4,045
| 104
| 136
| 38.894231
| 0.835922
| 0.317923
| 0
| 0.25
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.166667
| false
| 0.041667
| 0.083333
| 0
| 0.354167
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
06a3f43967e178259c2fded854053a178b218002
| 208
|
py
|
Python
|
src/utils/const.py
|
yizhongw/TagNN-PDTB
|
9b944210bcc3851c65cb479ef705acbb1b45b08f
|
[
"MIT"
] | 14
|
2018-11-19T02:49:34.000Z
|
2022-02-18T04:00:31.000Z
|
src/utils/const.py
|
lidejian/TreeLSTM-PDTB
|
3f048d2a3daf3fb5e803037f9344f515d0e71450
|
[
"MIT"
] | null | null | null |
src/utils/const.py
|
lidejian/TreeLSTM-PDTB
|
3f048d2a3daf3fb5e803037f9344f515d0e71450
|
[
"MIT"
] | 5
|
2017-12-04T13:29:29.000Z
|
2018-05-07T08:45:04.000Z
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# author: yizhong
# created_at: 17-5-2 下午5:00
PAD_WORD = '<blank>'
UNK_WORD = '<unk>'
BOS_WORD = '<s>'
EOS_WORD = '</s>'
NUM_WORD = '<num>'
PUNC_TAG = '<punc>'
| 16
| 27
| 0.586538
| 34
| 208
| 3.382353
| 0.764706
| 0.086957
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.045977
| 0.163462
| 208
| 12
| 28
| 17.333333
| 0.614943
| 0.403846
| 0
| 0
| 0
| 0
| 0.25
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
06a83d0998f9996abe66240e832c87433d984bc2
| 626
|
py
|
Python
|
src/learning_language/views.py
|
gsi-luis/djangolearning
|
4cf1e016cfe2910c907a669e518f5233ae04fb12
|
[
"MIT"
] | 1
|
2020-07-05T18:33:33.000Z
|
2020-07-05T18:33:33.000Z
|
src/learning_language/views.py
|
gsi-luis/djangolearning
|
4cf1e016cfe2910c907a669e518f5233ae04fb12
|
[
"MIT"
] | 2
|
2021-03-30T13:49:58.000Z
|
2021-06-10T19:43:27.000Z
|
src/learning_language/views.py
|
gsi-luis/djangolearning
|
4cf1e016cfe2910c907a669e518f5233ae04fb12
|
[
"MIT"
] | null | null | null |
from django.shortcuts import render
from .forms import LanguageForm
from learning_django import settings
from django.utils import translation
def index(request):
language_default = settings.LANGUAGE_CODE
if request.method == "POST":
form = LanguageForm(request.POST)
if form.is_valid():
language_default = request.POST['language_field']
else:
form = LanguageForm()
context = {
'form': form,
'language_default': language_default
}
translation.activate(language_default)
return render(request, 'learning_language/language_index.html', context)
| 27.217391
| 76
| 0.701278
| 68
| 626
| 6.294118
| 0.426471
| 0.175234
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.214058
| 626
| 22
| 77
| 28.454545
| 0.869919
| 0
| 0
| 0
| 0
| 0
| 0.119808
| 0.059105
| 0
| 0
| 0
| 0
| 0
| 1
| 0.055556
| false
| 0
| 0.222222
| 0
| 0.333333
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
06ada35b71f676f14ae2a8fbfcb628afacd0c4d8
| 512
|
py
|
Python
|
oj2.py
|
YanshuHu/combinatoricsoj2
|
51fa8cf06042e63642b8407d12de99d22f0e7a3b
|
[
"Apache-2.0"
] | null | null | null |
oj2.py
|
YanshuHu/combinatoricsoj2
|
51fa8cf06042e63642b8407d12de99d22f0e7a3b
|
[
"Apache-2.0"
] | null | null | null |
oj2.py
|
YanshuHu/combinatoricsoj2
|
51fa8cf06042e63642b8407d12de99d22f0e7a3b
|
[
"Apache-2.0"
] | null | null | null |
def main():
variable1 = input()
variable2 = input()
a = variable1.split()
b = variable2.split()
first_line = []
second_line = []
for i in a:
first_line.append(int(i))
for i in b:
second_line.append(int(i))
code(first_line[0], second_line)
def code(target, number):
ways = [1]+[0]*target
for value in number:
for i in range(value, target+1):
ways[i] += ways[i-value]
print(ways[target])
if __name__ == '__main__':
main()
| 20.48
| 40
| 0.564453
| 71
| 512
| 3.873239
| 0.380282
| 0.098182
| 0.065455
| 0.101818
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.021978
| 0.289063
| 512
| 24
| 41
| 21.333333
| 0.733516
| 0
| 0
| 0
| 0
| 0
| 0.015625
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.1
| false
| 0
| 0
| 0
| 0.1
| 0.05
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
06af865f1a3973785536a7d3858ef8ea324bb911
| 1,437
|
py
|
Python
|
tests/bugs/core_4158_test.py
|
FirebirdSQL/firebird-qa
|
96af2def7f905a06f178e2a80a2c8be4a4b44782
|
[
"MIT"
] | 1
|
2022-02-05T11:37:13.000Z
|
2022-02-05T11:37:13.000Z
|
tests/bugs/core_4158_test.py
|
FirebirdSQL/firebird-qa
|
96af2def7f905a06f178e2a80a2c8be4a4b44782
|
[
"MIT"
] | 1
|
2021-09-03T11:47:00.000Z
|
2021-09-03T12:42:10.000Z
|
tests/bugs/core_4158_test.py
|
FirebirdSQL/firebird-qa
|
96af2def7f905a06f178e2a80a2c8be4a4b44782
|
[
"MIT"
] | 1
|
2021-06-30T14:14:16.000Z
|
2021-06-30T14:14:16.000Z
|
#coding:utf-8
#
# id: bugs.core_4158
# title: Regression: LIKE with escape does not work
# decription:
# tracker_id: CORE-4158
# min_versions: ['2.0.7']
# versions: 2.0.7
# qmid: None
import pytest
from firebird.qa import db_factory, isql_act, Action
# version: 2.0.7
# resources: None
substitutions_1 = []
init_script_1 = """
recreate table tab1 (
id int constraint pk_tab1 primary key,
val varchar(30)
);
insert into tab1 (id, val) values (1, 'abcdef');
insert into tab1 (id, val) values (2, 'abc_ef');
insert into tab1 (id, val) values (3, 'abc%ef');
insert into tab1 (id, val) values (4, 'abc&%ef');
insert into tab1 (id, val) values (5, 'abc&_ef');
"""
db_1 = db_factory(page_size=4096, sql_dialect=3, init=init_script_1)
test_script_1 = """
set list on;
select id, val from tab1 where val like 'abc&%ef' escape '&';
select id, val from tab1 where val like 'abc&_ef' escape '&';
"""
act_1 = isql_act('db_1', test_script_1, substitutions=substitutions_1)
expected_stdout_1 = """
ID 3
VAL abc%ef
ID 2
VAL abc_ef
"""
@pytest.mark.version('>=2.0.7')
def test_1(act_1: Action):
act_1.expected_stdout = expected_stdout_1
act_1.execute()
assert act_1.clean_stdout == act_1.clean_expected_stdout
| 26.127273
| 70
| 0.592902
| 209
| 1,437
| 3.880383
| 0.373206
| 0.049322
| 0.086313
| 0.098644
| 0.276202
| 0.276202
| 0.21455
| 0.21455
| 0.103576
| 0.103576
| 0
| 0.059339
| 0.284621
| 1,437
| 54
| 71
| 26.611111
| 0.729572
| 0.160752
| 0
| 0.09375
| 0
| 0
| 0.58459
| 0
| 0
| 0
| 0
| 0
| 0.03125
| 1
| 0.03125
| false
| 0
| 0.0625
| 0
| 0.09375
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
06afc4b209dc7b6ac90802b9ff2ce19d8ee2b910
| 18,430
|
py
|
Python
|
trustyroles/arpd_update/arpd_update.py
|
hmcguire1/trustyroles
|
5dbe3d65353538f84f12f3ecef6de2a8cc3f731f
|
[
"MIT"
] | 2
|
2019-12-16T15:10:13.000Z
|
2020-02-24T20:13:40.000Z
|
trustyroles/arpd_update/arpd_update.py
|
hmcguire1/trustyroles
|
5dbe3d65353538f84f12f3ecef6de2a8cc3f731f
|
[
"MIT"
] | null | null | null |
trustyroles/arpd_update/arpd_update.py
|
hmcguire1/trustyroles
|
5dbe3d65353538f84f12f3ecef6de2a8cc3f731f
|
[
"MIT"
] | 1
|
2019-12-05T01:12:33.000Z
|
2019-12-05T01:12:33.000Z
|
"""
arpd_update focuses on easily editing the assume role policy document of a role.
"""
import os
import json
import logging
import argparse
from datetime import datetime
from typing import List, Dict, Optional
import boto3 # type: ignore
from botocore.exceptions import ClientError # type: ignore
LOGGER = logging.getLogger("IAM-ROLE-TRUST-POLICY")
logging.basicConfig(level=logging.WARNING)
PARSER = argparse.ArgumentParser()
def _main():
"""The _main method can take in a list of ARNs, role to update,
and method [get, update, remove, restore]."""
PARSER.add_argument(
"-a",
"--arn",
nargs="+",
required=False,
help="Add new ARNs to trust policy. Takes a comma-seperated list of ARNS.",
)
PARSER.add_argument(
"-u",
"--update_role",
type=str,
required=True,
help="Role for updating trust policy. Takes an role friendly name as string.",
)
PARSER.add_argument(
"-m",
"--method",
type=str,
required=False,
choices=["get", "update", "remove", "restore"],
help="Takes choice of method to get, update, or remove.",
)
PARSER.add_argument(
"-e",
"--add_external_id",
type=str,
required=False,
help="Takes an externalId as a string.",
)
PARSER.add_argument(
"--remove_external_id",
action="store_true",
required=False,
help="Method for removing externalId condition. Takes no arguments",
)
PARSER.add_argument(
"--json",
action="store_true",
required=False,
help="Add to print json in get method.",
)
PARSER.add_argument(
"--add_sid",
type=str,
required=False,
help="Add a Sid to trust policy. Takes a string.",
)
PARSER.add_argument(
"--remove_sid",
action="store_true",
required=False,
help="Remove a Sid from a trust policy. Takes no arguments.",
)
PARSER.add_argument(
"--backup_policy",
type=str,
required=False,
help="""Creates a backup of previous policy
in current directory as <ISO-time>.policy.bk""",
)
PARSER.add_argument(
"--dir_path",
type=str,
required=False,
help="Path to directory for backup policy. Takes a string",
)
PARSER.add_argument(
"--file_path",
type=str,
required=False,
help="File for backup policy. Takes a string",
)
PARSER.add_argument(
"--bucket",
type=str,
required=False,
help="S3 bucket name for backup policy. Takes a string",
)
PARSER.add_argument(
"--key",
type=str,
required=False,
help="S3 key name for restoring S3 policy. Takes a string",
)
args = vars(PARSER.parse_args())
if args["backup_policy"]:
if args["backup_policy"] == "local":
if args["dir_path"]:
dir_path = args["dir_path"]
else:
dir_path = os.getcwd()
bucket = None
elif args["backup_policy"] == "s3":
bucket = args["bucket"]
dir_path = None
else:
dir_path = os.getcwd()
bucket = ""
if args["method"] == "update":
arpd = update_arn(
args["arn"],
args["update_role"],
dir_path=dir_path,
bucket=bucket,
backup_policy=args["backup_policy"],
)
print(json.dumps(arpd["Statement"][0], indent=4))
elif args["method"] == "remove":
arpd = remove_arn(
args["arn"],
args["update_role"],
dir_path=dir_path,
bucket=bucket,
backup_policy=args["backup_policy"],
)
print(json.dumps(arpd["Statement"][0], indent=4))
elif args["method"] == "get":
arpd = get_arpd(args["update_role"])
if args["json"]:
print(json.dumps(arpd["Statement"][0], indent=4))
else:
print(f"\nARNS:")
if isinstance(arpd["Statement"][0]["Principal"]["AWS"], list):
for arn in arpd["Statement"][0]["Principal"]["AWS"]:
print(f" {arn}")
else:
print(f" {arpd['Statement'][0]['Principal']['AWS']}")
print(f"Conditions:")
if arpd["Statement"][0]["Condition"]:
print(f" {arpd['Statement'][0]['Condition']}")
elif args["method"] == "restore" and args["backup_policy"]:
if args["backup_policy"].lower() == "local" and args["file_path"]:
arpd = restore_from_backup(
role_name=args["update_role"],
location_type="local",
file_path=args["file_path"],
)
elif args["backup_policy"].lower() == "s3":
arpd = restore_from_backup(
role_name=args["update_role"],
location_type="s3",
file_path="",
key=args["key"],
bucket=bucket,
backup_policy=args["backup_policy"],
)
print(json.dumps(arpd["Statement"][0], indent=4))
if args["add_external_id"]:
arpd = add_external_id(
external_id=args["add_external_id"],
role_name=args["update_role"],
dir_path=dir_path,
bucket=bucket,
backup_policy=args["backup_policy"],
)
print(json.dumps(arpd["Statement"][0], indent=4))
if args["remove_external_id"]:
arpd = remove_external_id(
role_name=args["update_role"],
dir_path=dir_path,
bucket=bucket,
backup_policy=args["backup_policy"],
)
print(json.dumps(arpd["Statement"][0], indent=4))
if args["add_sid"]:
arpd = add_sid(
role_name=args["update_role"],
sid=args["add_sid"],
dir_path=dir_path,
bucket=bucket,
backup_policy=args["backup_policy"],
)
print(json.dumps(arpd["Statement"][0], indent=4))
if args["remove_sid"]:
arpd = remove_sid(
role_name=args["update_role"],
dir_path=dir_path,
bucket=bucket,
backup_policy=args["backup_policy"],
)
print(json.dumps(arpd["Statement"][0], indent=4))
def get_arpd(role_name: str, session=None, client=None) -> Dict:
"""The get_arpd method takes in a role_name as a string
and provides trusted ARNS and Conditions.
"""
if session:
iam_client = session.client("iam")
elif client:
iam_client = client
else:
iam_client = boto3.client("iam")
role = iam_client.get_role(RoleName=role_name)
return role["Role"]["AssumeRolePolicyDocument"]
def update_arn(
role_name: str,
arn_list: List,
dir_path: Optional[str],
client=None,
session=None,
backup_policy: Optional[str] = "",
bucket: Optional[str] = None,
) -> Dict:
"""The update_arn method takes a multiple ARNS(arn_list) and a role_name
to add to trust policy of suppplied role.
"""
if session:
iam_client = session.client("iam")
elif client:
iam_client = client
else:
iam_client = boto3.client("iam")
role = iam_client.get_role(RoleName=role_name)
arpd = role["Role"]["AssumeRolePolicyDocument"]
old_principal_list = arpd["Statement"][0]["Principal"]["AWS"]
if backup_policy:
if backup_policy.lower() == "local":
if dir_path:
retain_policy(
policy=arpd,
role_name=role_name,
location_type="local",
dir_path=dir_path,
)
else:
retain_policy(policy=arpd, role_name=role_name, location_type="local")
elif backup_policy.lower() == "s3":
retain_policy(
policy=arpd, role_name=role_name, location_type="s3", bucket=bucket
)
if isinstance(old_principal_list, list):
for arn in arn_list:
arpd["Statement"][0]["Principal"]["AWS"].append(arn)
else:
old_principal_list = [old_principal_list]
for arn in arn_list:
arpd["Statement"][0]["Principal"]["AWS"] = old_principal_list
arpd["Statement"][0]["Principal"]["AWS"].append(arn)
try:
iam_client.update_assume_role_policy(
RoleName=role_name, PolicyDocument=json.dumps(arpd)
)
return arpd
except ClientError as error:
raise error
def remove_arn(
role_name: str,
arn_list: List,
dir_path: Optional[str],
session=None,
client=None,
backup_policy: Optional[str] = "",
bucket: Optional[str] = None,
) -> Dict:
"""The remove_arn method takes in a string or multiple of ARNs and a role_name
to remove ARNS from trust policy of supplied role.
"""
if session:
iam_client = session.client("iam")
elif client:
iam_client = client
else:
iam_client = boto3.client("iam")
role = iam_client.get_role(RoleName=role_name)
arpd = role["Role"]["AssumeRolePolicyDocument"]
old_principal_list = arpd["Statement"][0]["Principal"]["AWS"]
if backup_policy:
if backup_policy.lower() == "local":
if dir_path:
retain_policy(
policy=arpd,
role_name=role_name,
location_type="local",
dir_path=dir_path,
)
else:
retain_policy(policy=arpd, role_name=role_name, location_type="local")
elif backup_policy.lower() == "s3":
retain_policy(
policy=arpd, role_name=role_name, location_type="s3", bucket=bucket
)
for arn in arn_list:
if arn in old_principal_list:
arpd["Statement"][0]["Principal"]["AWS"].remove(arn)
try:
iam_client.update_assume_role_policy(
RoleName=role_name, PolicyDocument=json.dumps(arpd)
)
return arpd
except ClientError as error:
raise error
def add_external_id(
role_name: str,
external_id: str,
dir_path: Optional[str],
client=None,
session=None,
backup_policy: Optional[str] = "",
bucket: Optional[str] = None,
) -> Dict:
"""
The add_external_id method takes an external_id and role_name as strings
to allow the addition of an externalId condition.
"""
if session:
iam_client = session.client("iam")
elif client:
iam_client = client
else:
iam_client = boto3.client("iam")
role = iam_client.get_role(RoleName=role_name)
arpd = role["Role"]["AssumeRolePolicyDocument"]
if backup_policy:
if backup_policy.lower() == "local":
if dir_path:
retain_policy(
policy=arpd,
role_name=role_name,
location_type="local",
dir_path=dir_path,
)
else:
retain_policy(policy=arpd, role_name=role_name, location_type="local")
elif backup_policy.lower() == "s3":
retain_policy(
policy=arpd, role_name=role_name, location_type="s3", bucket=bucket
)
arpd["Statement"][0]["Condition"] = {
"StringEquals": {"sts:ExternalId": external_id}
}
try:
iam_client.update_assume_role_policy(
RoleName=role_name, PolicyDocument=json.dumps(arpd)
)
return arpd
except ClientError as error:
raise error
def remove_external_id(
role_name: str,
dir_path: Optional[str],
session=None,
client=None,
backup_policy: Optional[str] = "",
bucket: Optional[str] = None,
) -> Dict:
"""The remove_external_id method takes a role_name as a string
to allow the removal of an externalId condition.
"""
if session:
iam_client = session.client("iam")
elif client:
iam_client = client
else:
iam_client = boto3.client("iam")
role = iam_client.get_role(RoleName=role_name)
arpd = role["Role"]["AssumeRolePolicyDocument"]
if backup_policy:
if backup_policy.lower() == "local":
if dir_path:
retain_policy(
policy=arpd,
role_name=role_name,
location_type="local",
dir_path=dir_path,
)
else:
retain_policy(policy=arpd, role_name=role_name, location_type="local")
elif backup_policy.lower() == "s3":
retain_policy(
policy=arpd, role_name=role_name, location_type="s3", bucket=bucket
)
arpd["Statement"][0]["Condition"] = {}
try:
iam_client.update_assume_role_policy(
RoleName=role_name, PolicyDocument=json.dumps(arpd)
)
return arpd
except ClientError as error:
raise error
def add_sid(
role_name: str,
sid: str,
dir_path: Optional[str],
session=None,
client=None,
backup_policy: str = "",
bucket: Optional[str] = None,
) -> Dict:
"""
The add_sid method adds a statement ID to
the assume role policy document
"""
if session:
iam_client = session.client("iam")
elif client:
iam_client = client
else:
iam_client = boto3.client("iam")
role = iam_client.get_role(RoleName=role_name)
arpd = role["Role"]["AssumeRolePolicyDocument"]
if backup_policy.lower() == "local":
if dir_path:
retain_policy(
policy=arpd,
role_name=role_name,
location_type="local",
dir_path=dir_path,
)
else:
retain_policy(policy=arpd, role_name=role_name, location_type="local")
elif backup_policy.lower() == "s3":
retain_policy(
policy=arpd, role_name=role_name, location_type="s3", bucket=bucket
)
arpd["Statement"][0]["Sid"] = sid
try:
iam_client.update_assume_role_policy(
RoleName=role_name, PolicyDocument=json.dumps(arpd)
)
return arpd
except ClientError as ex:
raise ex
def remove_sid(
role_name: str,
dir_path: Optional[str],
session=None,
client=None,
backup_policy: str = "",
bucket: Optional[str] = None,
) -> Dict:
"""
The remove_sid method removes the statement ID
from the assume role policy document
"""
if session:
iam_client = session.client("iam")
elif client:
iam_client = client
else:
iam_client = boto3.client("iam")
role = iam_client.get_role(RoleName=role_name)
arpd = role["Role"]["AssumeRolePolicyDocument"]
if backup_policy.lower() == "local":
if dir_path:
retain_policy(
policy=arpd,
role_name=role_name,
location_type="local",
dir_path=dir_path,
)
else:
retain_policy(policy=arpd, role_name=role_name, location_type="local")
elif backup_policy.lower() == "s3":
retain_policy(
policy=arpd, role_name=role_name, location_type="s3", bucket=bucket
)
if arpd["Statement"][0]["Sid"]:
arpd["Statement"][0].pop("Sid")
try:
iam_client.update_assume_role_policy(
RoleName=role_name, PolicyDocument=json.dumps(arpd)
)
except ClientError as error:
raise error
return arpd
def retain_policy(
role_name: str,
policy: Dict,
session=None,
client=None,
location_type: Optional[str] = None,
dir_path=os.getcwd(),
bucket: Optional[str] = None,
) -> None:
"""
The retain_policy method creates a backup of previous
policy in current directory by default as <ISO-time>.<RoleName>.bk or specified directory
for local file or with s3 to specified bucket and key name.
"""
assert location_type
if location_type.lower() == "local":
with open(
dir_path
+ "/"
+ datetime.utcnow().strftime("%Y-%m-%dT%H:%M:%SZ")
+ f".{role_name}.bk",
"w",
) as file:
json.dump(policy, file, ensure_ascii=False, indent=4)
elif location_type.lower() == "s3":
if session:
s3_client = session.client("s3")
elif client:
s3_client = client
else:
s3_client = boto3.client("s3")
try:
s3_client.put_object(
Bucket=bucket,
Key=datetime.utcnow().strftime("%Y-%m-%dT%H:%M:%SZ")
+ f".{role_name}.bk",
Body=json.dumps(policy).encode(),
)
except ClientError as error:
raise error
def restore_from_backup(
role_name: str,
location_type: str,
session=None,
client=None,
bucket: Optional[str] = None,
key: Optional[str] = None,
file_path: Optional[str] = None,
) -> None:
if session:
iam_client = session.client("iam")
elif client:
iam_client = client
else:
iam_client = boto3.client("iam")
if location_type.lower() == "local":
assert file_path
with open(file_path, "r") as file:
policy = file.read()
iam_client.update_assume_role_policy(RoleName=role_name, PolicyDocument=policy)
elif location_type.lower() == "s3":
if session:
s3_client = session.client("s3")
else:
s3_client = boto3.client("s3")
filename = datetime.utcnow().strftime("%Y-%m-%dT%H:%M:%SZ") + f".{role_name}.dl"
s3_client.download_file(Bucket=bucket, Key=key, Filename=filename)
# incompat type here (BinaryIO and TextIO)
with open(filename, "rb") as file:
# str doesn't have read decode apparently
policy = file.read().decode()
os.remove(filename)
iam_client.update_assume_role_policy(RoleName=role_name, PolicyDocument=policy)
return json.loads(policy)
if __name__ == "__main__":
_main()
| 27.507463
| 93
| 0.566522
| 2,115
| 18,430
| 4.743735
| 0.089362
| 0.059005
| 0.033489
| 0.03947
| 0.711751
| 0.671384
| 0.624539
| 0.600817
| 0.586265
| 0.56354
| 0
| 0.006025
| 0.315572
| 18,430
| 669
| 94
| 27.54858
| 0.789361
| 0.066196
| 0
| 0.646833
| 0
| 0
| 0.135697
| 0.015612
| 0
| 0
| 0
| 0
| 0.003839
| 1
| 0.019194
| false
| 0
| 0.015355
| 0
| 0.049904
| 0.026871
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
06aff71efc0dec027a46c0058c117887035af9c9
| 7,471
|
py
|
Python
|
kartingpros/timetrial.py
|
Vishvak365/Karting-Pros
|
1c482cff78e7402c8da8870ff519eea760be4a34
|
[
"MIT"
] | 1
|
2021-06-28T21:55:18.000Z
|
2021-06-28T21:55:18.000Z
|
kartingpros/timetrial.py
|
wboyd600/Karting-Pros
|
4db4b9f075b152dfea79c89640c0bac1becce89b
|
[
"MIT"
] | 17
|
2020-11-27T14:33:39.000Z
|
2020-12-08T00:45:18.000Z
|
kartingpros/timetrial.py
|
wboyd600/Karting-Pros
|
4db4b9f075b152dfea79c89640c0bac1becce89b
|
[
"MIT"
] | 1
|
2021-06-27T20:27:38.000Z
|
2021-06-27T20:27:38.000Z
|
import pygame
import time
import math
import sys
from kartingpros import track, mainmenu, car, settings, loadimage
from kartingpros.loadimage import _load_image, _load_sound, _load_font
import numpy as np
from numpy import save
from kartingpros.car import Car
from pygame.locals import *
from pygame import mixer
import os
def completeLap(car, finish_line):
if (car.hitbox[1] < (finish_line[1] + 100)) and (car.hitbox[1] > (finish_line[1] - 100)):
if (car.hitbox[0] < (finish_line[0] + 15)) and (car.hitbox[0] > (finish_line[0] - 15)):
return True
def checkOutOfBounds(car):
x, y = 1920, 1080
if (car.position[0] > x or car.position[0] < 0 or car.position[1] > y or car.position[1] < 0):
return True
else:
return False
def checkpoint1(car, checkpoint, checkpoint_check):
if (car.hitbox[1] < (checkpoint[1] + 110)) and (car.hitbox[1] > (checkpoint[1] - 110)):
if (car.hitbox[0] < (checkpoint[0] + 15)) and (car.hitbox[0] > (checkpoint[0] - 15)):
checkpoint_check = checkpoint_check + 1
else:
checkpoint_check = checkpoint_check
return checkpoint_check
def timeTrial(display_surface):
best_lap_time = 30000
trackImg = _load_image('./images/track1-min.png')
track1 = track.Track()
white = (0, 128, 0)
clock = pygame.time.Clock()
t0 = time.time()
# Car Setup
start_position = (1010, 144)
car = Car('./images/f1sprite.png', start_position)
car_group = pygame.sprite.Group(car)
# Lap logic
checkpoint_check = 0
pad_group = track1.getPads()
finish_line = (960, 50, 20, 125)
checkpoint = (960, 845, 10, 125)
# Countdown timer logic
countdownTimerStart = time.time()
countdownFinished = False
# Music for countdown sound
current_path = os.path.abspath(os.path.dirname(__file__))
absolute_path = os.path.join(
current_path, './sounds/race_coundown.mp3')
print(absolute_path)
mixer.init()
mixer.music.load(absolute_path)
mixer.music.set_volume(0.7)
mixer.music.play()
crowd = mixer.Sound(os.path.join(current_path, './sounds/crowd.wav'))
rev = mixer.Sound(os.path.join(current_path, './sounds/rev.wav'))
data_collection = settings.getSetting('collect_data_for_AI')
draw_hitbox = settings.getSetting('draw_hitbox')
i = 0
if data_collection:
# Data collection for machine learning
features = []
labels = []
right_press, left_press, up_press, down_press = 0, 0, 0, 0
while True:
pygame.display.flip()
if data_collection:
# Machine Learning Features
# Direction (%360), Position.X, Position.Y
feature = []
# Label(right,left,up,down)(1 or 0 for all)
label = []
# Draw the Track
# display_surface.fill(white)
display_surface.blit(trackImg, (0, 0))
# pad_group.draw(display_surface)
font = _load_font('./fonts/American Captain.ttf', 32)
if data_collection:
feature.append(car.direction % 360)
feature.append(int(car.position[0]))
feature.append(int(car.position[1]))
feature = np.array(feature)
feature = feature / feature.max(axis=0)
features.append(feature)
track.checkpoint(display_surface)
deltat = clock.tick(30)
# Update Car and draw
car_group.update(deltat)
car_group.draw(display_surface)
t1 = time.time()
dt = t1-t0
for event in pygame.event.get():
if event.type == QUIT:
sys.exit(0)
if not hasattr(event, 'key'):
continue
if event.key == K_RIGHT:
right_press = 1
elif event.key == K_SPACE:
car.speed = 0
elif event.key == K_LEFT:
left_press = 1
elif event.key == K_UP:
mixer.music.load(os.path.join(current_path, './sounds/rev.mp3'))
mixer.music.play(-1)
up_press = 1
elif event.key == K_DOWN:
down_press = 1
elif event.key == K_ESCAPE:
mixer.music.stop()
mixer.Sound.stop(crowd)
if data_collection:
np.save('features.npy', np.array(features))
np.save('labels.npy', np.array(labels))
mixer.music.stop()
mainmenu.main_menu(display_surface)
if event.type == KEYUP:
if event.key == pygame.K_RIGHT:
right_press = 0
elif event.key == pygame.K_LEFT:
left_press = 0
elif event.key == pygame.K_UP:
mixer.music.stop()
up_press = 0
elif event.key == pygame.K_DOWN:
down_press = 0
car.k_right = right_press * -5
car.k_left = left_press * 5
car.k_up = up_press * 2
car.k_down = down_press * -2
if up_press == 0 and down_press == 0 and int(car.speed) != 0:
car.k_down = -.2
car.k_up = 0
if data_collection:
labels.append([right_press, left_press, up_press, down_press])
# Check if car is on track
on_track = pygame.sprite.groupcollide(
car_group, pad_group, False, False)
# Slow down car if not on track
if not on_track:
car.setOffTrackSpeed()
else:
car.setRegularSpeed()
if draw_hitbox:
pygame.draw.rect(display_surface, (255, 0, 0), car.hitbox, 2)
checkpoint_check = checkpoint1(car, checkpoint, checkpoint_check)
# Countdown Timer Logic (program does not move forward until this is finished)
while(time.time()-countdownTimerStart < 4):
image = _load_image('./images/starting_lights/lights' +
str(int(time.time()-countdownTimerStart)+1)+'.png')
display_surface.blit(image, ((1920/2)-(768/2), 50))
fontBig = _load_font('./fonts/American Captain.ttf', 64)
t0 = time.time()
t1 = time.time()
dt = t1-t0
countdownFinished = True
pygame.display.update()
if(countdownFinished):
# Timer
timer_text = font.render(
"Time: " + str(round(dt, 3)), True, (255, 255, 255))
display_surface.blit(timer_text, (0, 0))
# Time to Beat
if best_lap_time != 30000:
best_lap_text = font.render(
"Time to Beat: "+str(best_lap_time), True, (255, 255, 255))
display_surface.blit(best_lap_text, (0, 30))
if checkpoint_check >= 1:
if completeLap(car, finish_line):
mixer.Sound.play(crowd)
if dt < best_lap_time:
best_lap_time = round(dt, 3)
t0, t1 = time.time(), time.time()
checkpoint_check = 0
# If car is out of screen
if checkOutOfBounds(car):
car.reset(start_position)
pygame.display.update()
| 33.959091
| 99
| 0.549458
| 893
| 7,471
| 4.443449
| 0.226204
| 0.041583
| 0.024194
| 0.016381
| 0.210938
| 0.177671
| 0.102823
| 0.03629
| 0
| 0
| 0
| 0.04176
| 0.342926
| 7,471
| 219
| 100
| 34.114155
| 0.766551
| 0.06465
| 0
| 0.141104
| 0
| 0
| 0.04237
| 0.014963
| 0
| 0
| 0
| 0
| 0
| 1
| 0.02454
| false
| 0
| 0.07362
| 0
| 0.122699
| 0.006135
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
06b195aef83b65c429bf30fd2c08ed267c6351f6
| 2,204
|
py
|
Python
|
test/create_cert.py
|
finsberg/pytest-tornado
|
52ba5119310be5385ceed74ef94f4538660e3725
|
[
"Apache-2.0"
] | 123
|
2015-03-31T17:25:34.000Z
|
2021-12-16T12:14:38.000Z
|
test/create_cert.py
|
finsberg/pytest-tornado
|
52ba5119310be5385ceed74ef94f4538660e3725
|
[
"Apache-2.0"
] | 53
|
2015-02-04T06:02:21.000Z
|
2020-11-25T20:04:52.000Z
|
test/create_cert.py
|
finsberg/pytest-tornado
|
52ba5119310be5385ceed74ef94f4538660e3725
|
[
"Apache-2.0"
] | 43
|
2015-02-26T05:02:44.000Z
|
2021-12-17T10:08:44.000Z
|
# -*- coding: utf-8 -*-
"""
Create a cert with pyOpenSSL for tests.
Heavily based on python-opsi's OPSI.Util.Task.Certificate.
Source: https://github.com/opsi-org/python-opsi/blob/stable/OPSI/Util/Task/Certificate.py
"""
import argparse
import os
import random
import socket
from tempfile import NamedTemporaryFile
from OpenSSL import crypto
try:
import secrets
except ImportError:
secrets = None
def createCertificate(path):
"""
Creates a certificate.
"""
cert = crypto.X509()
cert.get_subject().C = "DE" # Country
cert.get_subject().ST = "HE" # State
cert.get_subject().L = "Wiesbaden" # Locality
cert.get_subject().O = "pytest-tornado" # Organisation
cert.get_subject().OU = "Testing Department" # organisational unit
cert.get_subject().CN = socket.getfqdn() # common name
# As described in RFC5280 this value is required and must be a
# positive and unique integer.
# Source: http://tools.ietf.org/html/rfc5280#page-19
cert.set_serial_number(random.randint(0, pow(2, 16)))
cert.gmtime_adj_notBefore(0)
cert.gmtime_adj_notAfter(60 * 60) # Valid 1 hour
k = crypto.PKey()
k.generate_key(crypto.TYPE_RSA, 2048)
cert.set_issuer(cert.get_subject())
cert.set_pubkey(k)
cert.set_version(2)
cert.sign(k, 'sha512')
certcontext = b"".join(
(
crypto.dump_certificate(crypto.FILETYPE_PEM, cert),
crypto.dump_privatekey(crypto.FILETYPE_PEM, k)
)
)
with open(path, "wt") as certfile:
certfile.write(certcontext.decode())
try:
with NamedTemporaryFile(mode="wb", delete=False) as randfile:
randfile.write(randomBytes(512))
command = u"openssl dhparam -rand {tempfile} 512 >> {target}".format(
tempfile=randfile.name, target=path
)
os.system(command)
finally:
os.remove(randfile.name)
def randomBytes(length):
"""
Return _length_ random bytes.
:rtype: bytes
"""
if secrets:
return secrets.token_bytes(512)
else:
return os.urandom(512)
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Create certificate for testing')
parser.add_argument('--cert', dest='cert', default="testcert.pem",
help='Name of the certificate')
args = parser.parse_args()
createCertificate(args.cert)
| 23.956522
| 89
| 0.71824
| 300
| 2,204
| 5.16
| 0.563333
| 0.031654
| 0.063307
| 0.029716
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.023454
| 0.14882
| 2,204
| 91
| 90
| 24.21978
| 0.801706
| 0.227314
| 0
| 0.036364
| 0
| 0
| 0.111712
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.036364
| false
| 0
| 0.145455
| 0
| 0.218182
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
06b1a7bf9e162d2f1a93b478504af2c68a143b23
| 680
|
py
|
Python
|
positional_args.py
|
nickaigi/effective_python_tips
|
1a68b6eaed2e946b003c0cd0bdea03e79b8e8990
|
[
"Unlicense"
] | null | null | null |
positional_args.py
|
nickaigi/effective_python_tips
|
1a68b6eaed2e946b003c0cd0bdea03e79b8e8990
|
[
"Unlicense"
] | null | null | null |
positional_args.py
|
nickaigi/effective_python_tips
|
1a68b6eaed2e946b003c0cd0bdea03e79b8e8990
|
[
"Unlicense"
] | null | null | null |
def log(message, *values):
""" * operator instructs python to pass items from the sequence as
positional arguments
Remember:
- using the * operator with a generator may cause your program
to run out of memory and crash.
- adding new positional parameters to functions that accept
*args can introduce hard-to-find bugs
"""
if not values:
print(message)
else:
values_str = ', '.join(str(x) for x in values)
print('%s: %s' % (message, values_str))
if __name__ == '__main__':
log('My numbers are', 1, 2)
log('Hi there')
favorites = [7, 33, 99]
log('Favorites colors', *favorites)
| 28.333333
| 70
| 0.613235
| 90
| 680
| 4.522222
| 0.744444
| 0.063882
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.014374
| 0.283824
| 680
| 23
| 71
| 29.565217
| 0.821355
| 0.451471
| 0
| 0
| 0
| 0
| 0.160237
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.090909
| false
| 0
| 0
| 0
| 0.090909
| 0.181818
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
06b2849360054f2d534889fecd3a7de975d603e4
| 4,342
|
py
|
Python
|
utilities/misc.py
|
lebionick/stereo-transformer
|
6e7df042d917c5ed00d10bd6ddb6f76e90429148
|
[
"Apache-2.0"
] | 410
|
2020-11-06T02:10:17.000Z
|
2022-03-25T17:12:24.000Z
|
utilities/misc.py
|
lppllppl920/stereo-transformer
|
f07b1ee8ced1c36e10630401688a06e355056e56
|
[
"Apache-2.0"
] | 55
|
2020-11-06T10:29:16.000Z
|
2022-03-30T02:10:10.000Z
|
utilities/misc.py
|
lppllppl920/stereo-transformer
|
f07b1ee8ced1c36e10630401688a06e355056e56
|
[
"Apache-2.0"
] | 72
|
2020-11-06T07:22:39.000Z
|
2022-03-19T14:20:38.000Z
|
# Authors: Zhaoshuo Li, Xingtong Liu, Francis X. Creighton, Russell H. Taylor, and Mathias Unberath
#
# Copyright (c) 2020. Johns Hopkins University - All rights reserved.
import copy
import numpy as np
import torch
import torch.nn as nn
class NestedTensor(object):
def __init__(self, left, right, disp=None, sampled_cols=None, sampled_rows=None, occ_mask=None,
occ_mask_right=None):
self.left = left
self.right = right
self.disp = disp
self.occ_mask = occ_mask
self.occ_mask_right = occ_mask_right
self.sampled_cols = sampled_cols
self.sampled_rows = sampled_rows
def center_crop(layer, max_height, max_width):
_, _, h, w = layer.size()
xy1 = (w - max_width) // 2
xy2 = (h - max_height) // 2
return layer[:, :, xy2:(xy2 + max_height), xy1:(xy1 + max_width)]
def batched_index_select(source, dim, index):
views = [source.shape[0]] + [1 if i != dim else -1 for i in range(1, len(source.shape))]
expanse = list(source.shape)
expanse[0] = -1
expanse[dim] = -1
index = index.view(views).expand(expanse)
return torch.gather(source, dim, index)
def torch_1d_sample(source, sample_points, mode='linear'):
"""
linearly sample source tensor along the last dimension
input:
source [N,D1,D2,D3...,Dn]
sample_points [N,D1,D2,....,Dn-1,1]
output:
[N,D1,D2...,Dn-1]
"""
idx_l = torch.floor(sample_points).long().clamp(0, source.size(-1) - 1)
idx_r = torch.ceil(sample_points).long().clamp(0, source.size(-1) - 1)
if mode == 'linear':
weight_r = sample_points - idx_l
weight_l = 1 - weight_r
elif mode == 'sum':
weight_r = (idx_r != idx_l).int() # we only sum places of non-integer locations
weight_l = 1
else:
raise Exception('mode not recognized')
out = torch.gather(source, -1, idx_l) * weight_l + torch.gather(source, -1, idx_r) * weight_r
return out.squeeze(-1)
def get_clones(module, N):
return nn.ModuleList([copy.deepcopy(module) for i in range(N)])
def find_occ_mask(disp_left, disp_right):
"""
find occlusion map
1 indicates occlusion
disp range [0,w]
"""
w = disp_left.shape[-1]
# # left occlusion
# find corresponding pixels in target image
coord = np.linspace(0, w - 1, w)[None,] # 1xW
right_shifted = coord - disp_left
# 1. negative locations will be occlusion
occ_mask_l = right_shifted <= 0
# 2. wrong matches will be occlusion
right_shifted[occ_mask_l] = 0 # set negative locations to 0
right_shifted = right_shifted.astype(np.int)
disp_right_selected = np.take_along_axis(disp_right, right_shifted,
axis=1) # find tgt disparity at src-shifted locations
wrong_matches = np.abs(disp_right_selected - disp_left) > 1 # theoretically, these two should match perfectly
wrong_matches[disp_right_selected <= 0.0] = False
wrong_matches[disp_left <= 0.0] = False
# produce final occ
wrong_matches[occ_mask_l] = True # apply case 1 occlusion to case 2
occ_mask_l = wrong_matches
# # right occlusion
# find corresponding pixels in target image
coord = np.linspace(0, w - 1, w)[None,] # 1xW
left_shifted = coord + disp_right
# 1. negative locations will be occlusion
occ_mask_r = left_shifted >= w
# 2. wrong matches will be occlusion
left_shifted[occ_mask_r] = 0 # set negative locations to 0
left_shifted = left_shifted.astype(np.int)
disp_left_selected = np.take_along_axis(disp_left, left_shifted,
axis=1) # find tgt disparity at src-shifted locations
wrong_matches = np.abs(disp_left_selected - disp_right) > 1 # theoretically, these two should match perfectly
wrong_matches[disp_left_selected <= 0.0] = False
wrong_matches[disp_right <= 0.0] = False
# produce final occ
wrong_matches[occ_mask_r] = True # apply case 1 occlusion to case 2
occ_mask_r = wrong_matches
return occ_mask_l, occ_mask_r
def save_and_clear(idx, output_file):
with open('output-' + str(idx) + '.dat', 'wb') as f:
torch.save(output_file, f)
idx += 1
# clear
for key in output_file:
output_file[key].clear()
return idx
| 32.893939
| 114
| 0.649931
| 636
| 4,342
| 4.234277
| 0.275157
| 0.044189
| 0.014853
| 0.008169
| 0.380245
| 0.342369
| 0.283698
| 0.260676
| 0.230969
| 0.205719
| 0
| 0.023143
| 0.243667
| 4,342
| 131
| 115
| 33.145038
| 0.796894
| 0.242976
| 0
| 0.053333
| 0
| 0
| 0.014678
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.093333
| false
| 0
| 0.053333
| 0.013333
| 0.24
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
06b306a89a539a3cbfca1d1c817821e2aac7c4eb
| 28,278
|
py
|
Python
|
BASS-train.py
|
shlpu/Statlie-Image-Processor
|
e40355f43f344fd02041bdc8ce57b0ee101c6cdb
|
[
"Apache-2.0"
] | 1
|
2019-11-23T12:58:09.000Z
|
2019-11-23T12:58:09.000Z
|
BASS-train.py
|
shlpu/Statlie-Image-Processor
|
e40355f43f344fd02041bdc8ce57b0ee101c6cdb
|
[
"Apache-2.0"
] | null | null | null |
BASS-train.py
|
shlpu/Statlie-Image-Processor
|
e40355f43f344fd02041bdc8ce57b0ee101c6cdb
|
[
"Apache-2.0"
] | 3
|
2019-03-27T00:47:08.000Z
|
2022-02-05T04:52:48.000Z
|
import numpy as np
import scipy.io
from sklearn.metrics import confusion_matrix
from random import randint, shuffle
from argparse import ArgumentParser
from helper import getValidDataset
import tensorflow as tf
parser = ArgumentParser()
parser.add_argument('--data', type=str, default='Indian_pines')
parser.add_argument('--patch_size', type=int, default=3)
parser.add_argument('--library', type=str, default='tensorflow')
opt = parser.parse_args()
import os
model_directory = os.path.join(os.getcwd(), 'BASSNET_Trained_model/')
# Load MATLAB pre-processed image data
try:
TRAIN = scipy.io.loadmat("./data/" + opt.data + "_Train_patch_" + str(opt.patch_size) + ".mat")
VALIDATION = scipy.io.loadmat("./data/" + opt.data + "_Val_patch_" + str(opt.patch_size) + ".mat")
TEST = scipy.io.loadmat("./data/" + opt.data + "_Test_patch_" + str(opt.patch_size) + ".mat")
except NameError:
raise print('--data options are: Indian_pines, Salinas, KSC, Botswana')
# Extract data and label from MATLAB file
training_data, training_label = TRAIN['train_patch'], TRAIN['train_labels']
validation_data, validation_label = VALIDATION['val_patch'], VALIDATION['val_labels']
test_data, test_label = TEST['test_patch'], TEST['test_labels']
getValidDataset(test_data, test_label)
print('\nData input shape')
print('training_data shape' + str(training_data.shape))
print('training_label shape' + str(training_label.shape) + '\n')
print('testing_data shape' + str(test_data.shape))
print('testing_label shape' + str(test_label.shape) + '\n')
SIZE = training_data.shape[0]
HEIGHT = training_data.shape[1]
WIDTH = training_data.shape[2]
BANDS = training_data.shape[3]
NUM_PARALLEL_BAND = 10
BAND_SIZE = BANDS / 10
NUM_CLASS = training_label.shape[1]
# Helper Functions
def create_conv_2dlayer(input,
num_input_channels,
filter_size,
num_output_channel,
relu=True,
pooling=True): # Number of filters.
shape = [filter_size, filter_size, num_input_channels, num_output_channel]
weights = tf.get_variable('weights', shape=shape, initializer=tf.truncated_normal_initializer(stddev=0.05))
biases = tf.get_variable('biases', shape=[num_output_channel], initializer=tf.constant_initializer(0.05))
layer = tf.nn.conv2d(input=input, filter=weights, strides=[1, 1, 1, 1], padding='SAME')
layer += biases
if pooling:
layer = tf.nn.max_pool(value=layer,
ksize=[1, 3, 3, 1],
strides=[1, 1, 1, 1],
padding='VALID')
if relu:
layer = tf.nn.relu(layer)
return layer, weights
def fully_connected_layer(input,
num_inputs,
num_outputs,
activation=None):
weights = tf.get_variable('weights', shape=[num_inputs, num_outputs])
biases = tf.get_variable('biases', shape=num_outputs)
layer = tf.matmul(input, weights) + biases
if activation is not None:
if activation == 'relu':
layer = tf.nn.relu(layer)
elif activation == 'softmax':
layer = tf.nn.softmax(layer)
return layer
def flatten_layer(layer):
layer_shape = layer.get_shape() # layer = [num_images, img_height, img_width, num_channels]
num_features = layer_shape[1:4].num_elements() # Total number of elements in the network
layer_flat = tf.reshape(layer, [-1, num_features]) # -1 means total size of dimension is unchanged
return layer_flat, num_features
def specialized_conv1d(input,
filter_width,
filter_height,
num_output_channels,
num_input_channels = 1,
relu=True):
shape = [filter_height, filter_width, num_input_channels, num_output_channels]
weights = tf.get_variable(name='weights-1D', shape=shape, initializer=tf.truncated_normal_initializer(stddev=0.05))
biases = tf.get_variable(name='biases-1D', shape=[num_output_channels], initializer=tf.constant_initializer(0.05))
layer = tf.nn.conv2d(input=input, filter=weights, strides=[1,1,1,1], padding='VALID')
out_height = input.shape[1] - filter_height + 1
layer += biases
layer = tf.reshape(layer, [-1, out_height, num_output_channels, 1])
if relu:
layer = tf.nn.relu(layer)
return layer
def block2_parallel(model):
layer = model['block2_preprocess']
with tf.variable_scope('band1'):
block2_prep = layer[0]
block2_prep = tf.reshape(block2_prep, (-1, 9, 22, 1))
block2_prep = tf.transpose(block2_prep, perm=[0, 2, 1, 3])
with tf.variable_scope('block2_part1'):
block2_part1 = specialized_conv1d(input=block2_prep, filter_width=9, filter_height=3,
num_output_channels=20, relu=True)
with tf.variable_scope('block2_part2'):
block2_part2 = specialized_conv1d(input=block2_part1, filter_width=20, filter_height=3,
num_output_channels=10, relu=True)
with tf.variable_scope('block2_part3'):
block2_part3 = specialized_conv1d(input=block2_part2, filter_width=10, filter_height=3,
num_output_channels=10, relu=True)
with tf.variable_scope('block2_part4'):
block2_part4 = specialized_conv1d(input=block2_part3, filter_width=10, filter_height=5,
num_output_channels=5, relu=True)
with tf.variable_scope('block2_part5'):
block2_part5, _ = flatten_layer(block2_part4)
stack = tf.concat(block2_part5, axis=1)
print(stack)
with tf.variable_scope('band2'):
block2_prep = layer[1]
block2_prep = tf.reshape(block2_prep, (-1, 9, 22, 1))
block2_prep = tf.transpose(block2_prep, perm=[0, 2, 1, 3])
with tf.variable_scope('block2_part1'):
block2_part1 = specialized_conv1d(input=block2_prep, filter_width=9, filter_height=3,
num_output_channels=20, relu=True)
with tf.variable_scope('block2_part2'):
block2_part2 = specialized_conv1d(input=block2_part1, filter_width=20, filter_height=3,
num_output_channels=10, relu=True)
with tf.variable_scope('block2_part3'):
block2_part3 = specialized_conv1d(input=block2_part2, filter_width=10, filter_height=3,
num_output_channels=10, relu=True)
with tf.variable_scope('block2_part4'):
block2_part4 = specialized_conv1d(input=block2_part3, filter_width=10, filter_height=5,
num_output_channels=5, relu=True)
with tf.variable_scope('block2_part5'):
block2_part5, _ = flatten_layer(block2_part4)
stack = tf.concat([stack, block2_part5], axis=1)
print(stack)
with tf.variable_scope('band3'):
block2_prep = layer[2]
block2_prep = tf.reshape(block2_prep, (-1, 9, 22, 1))
block2_prep = tf.transpose(block2_prep, perm=[0, 2, 1, 3])
with tf.variable_scope('block2_part1'):
block2_part1 = specialized_conv1d(input=block2_prep, filter_width=9, filter_height=3,
num_output_channels=20, relu=True)
with tf.variable_scope('block2_part2'):
block2_part2 = specialized_conv1d(input=block2_part1, filter_width=20, filter_height=3,
num_output_channels=10, relu=True)
with tf.variable_scope('block2_part3'):
block2_part3 = specialized_conv1d(input=block2_part2, filter_width=10, filter_height=3,
num_output_channels=10, relu=True)
with tf.variable_scope('block2_part4'):
block2_part4 = specialized_conv1d(input=block2_part3, filter_width=10, filter_height=5,
num_output_channels=5, relu=True)
with tf.variable_scope('block2_part5'):
block2_part5, _ = flatten_layer(block2_part4)
stack = tf.concat([stack, block2_part5], axis=1)
print(stack)
with tf.variable_scope('band4'):
block2_prep = layer[3]
block2_prep = tf.reshape(block2_prep, (-1, 9, 22, 1))
block2_prep = tf.transpose(block2_prep, perm=[0, 2, 1, 3])
with tf.variable_scope('block2_part1'):
block2_part1 = specialized_conv1d(input=block2_prep, filter_width=9, filter_height=3,
num_output_channels=20, relu=True)
with tf.variable_scope('block2_part2'):
block2_part2 = specialized_conv1d(input=block2_part1, filter_width=20, filter_height=3,
num_output_channels=10, relu=True)
with tf.variable_scope('block2_part3'):
block2_part3 = specialized_conv1d(input=block2_part2, filter_width=10, filter_height=3,
num_output_channels=10, relu=True)
with tf.variable_scope('block2_part4'):
block2_part4 = specialized_conv1d(input=block2_part3, filter_width=10, filter_height=5,
num_output_channels=5, relu=True)
with tf.variable_scope('block2_part5'):
block2_part5, _ = flatten_layer(block2_part4)
stack = tf.concat([stack, block2_part5], axis=1)
print(stack)
with tf.variable_scope('band5'):
block2_prep = layer[4]
block2_prep = tf.reshape(block2_prep, (-1, 9, 22, 1))
block2_prep = tf.transpose(block2_prep, perm=[0, 2, 1, 3])
with tf.variable_scope('block2_part1'):
block2_part1 = specialized_conv1d(input=block2_prep, filter_width=9, filter_height=3,
num_output_channels=20, relu=True)
with tf.variable_scope('block2_part2'):
block2_part2 = specialized_conv1d(input=block2_part1, filter_width=20, filter_height=3,
num_output_channels=10, relu=True)
with tf.variable_scope('block2_part3'):
block2_part3 = specialized_conv1d(input=block2_part2, filter_width=10, filter_height=3,
num_output_channels=10, relu=True)
with tf.variable_scope('block2_part4'):
block2_part4 = specialized_conv1d(input=block2_part3, filter_width=10, filter_height=5,
num_output_channels=5, relu=True)
with tf.variable_scope('block2_part5'):
block2_part5, _ = flatten_layer(block2_part4)
stack = tf.concat([stack, block2_part5], axis=1)
print(stack)
with tf.variable_scope('band6'):
block2_prep = layer[5]
block2_prep = tf.reshape(block2_prep, (-1, 9, 22, 1))
block2_prep = tf.transpose(block2_prep, perm=[0, 2, 1, 3])
with tf.variable_scope('block2_part1'):
block2_part1 = specialized_conv1d(input=block2_prep, filter_width=9, filter_height=3,
num_output_channels=20, relu=True)
with tf.variable_scope('block2_part2'):
block2_part2 = specialized_conv1d(input=block2_part1, filter_width=20, filter_height=3,
num_output_channels=10, relu=True)
with tf.variable_scope('block2_part3'):
block2_part3 = specialized_conv1d(input=block2_part2, filter_width=10, filter_height=3,
num_output_channels=10, relu=True)
with tf.variable_scope('block2_part4'):
block2_part4 = specialized_conv1d(input=block2_part3, filter_width=10, filter_height=5,
num_output_channels=5, relu=True)
with tf.variable_scope('block2_part5'):
block2_part5, _ = flatten_layer(block2_part4)
stack = tf.concat([stack, block2_part5], axis=1)
print(stack)
with tf.variable_scope('band7'):
block2_prep = layer[6]
block2_prep = tf.reshape(block2_prep, (-1, 9, 22, 1))
block2_prep = tf.transpose(block2_prep, perm=[0, 2, 1, 3])
with tf.variable_scope('block2_part1'):
block2_part1 = specialized_conv1d(input=block2_prep, filter_width=9, filter_height=3,
num_output_channels=20, relu=True)
with tf.variable_scope('block2_part2'):
block2_part2 = specialized_conv1d(input=block2_part1, filter_width=20, filter_height=3,
num_output_channels=10, relu=True)
with tf.variable_scope('block2_part3'):
block2_part3 = specialized_conv1d(input=block2_part2, filter_width=10, filter_height=3,
num_output_channels=10, relu=True)
with tf.variable_scope('block2_part4'):
block2_part4 = specialized_conv1d(input=block2_part3, filter_width=10, filter_height=5,
num_output_channels=5, relu=True)
with tf.variable_scope('block2_part5'):
block2_part5, _ = flatten_layer(block2_part4)
stack = tf.concat([stack, block2_part5], axis=1)
print(stack)
with tf.variable_scope('band8'):
block2_prep = layer[7]
block2_prep = tf.reshape(block2_prep, (-1, 9, 22, 1))
block2_prep = tf.transpose(block2_prep, perm=[0, 2, 1, 3])
with tf.variable_scope('block2_part1'):
block2_part1 = specialized_conv1d(input=block2_prep, filter_width=9, filter_height=3,
num_output_channels=20, relu=True)
with tf.variable_scope('block2_part2'):
block2_part2 = specialized_conv1d(input=block2_part1, filter_width=20, filter_height=3,
num_output_channels=10, relu=True)
with tf.variable_scope('block2_part3'):
block2_part3 = specialized_conv1d(input=block2_part2, filter_width=10, filter_height=3,
num_output_channels=10, relu=True)
with tf.variable_scope('block2_part4'):
block2_part4 = specialized_conv1d(input=block2_part3, filter_width=10, filter_height=5,
num_output_channels=5, relu=True)
with tf.variable_scope('block2_part5'):
block2_part5, _ = flatten_layer(block2_part4)
stack = tf.concat([stack, block2_part5], axis=1)
print(stack)
with tf.variable_scope('band9'):
block2_prep = layer[8]
block2_prep = tf.reshape(block2_prep, (-1, 9, 22, 1))
block2_prep = tf.transpose(block2_prep, perm=[0, 2, 1, 3])
with tf.variable_scope('block2_part1'):
block2_part1 = specialized_conv1d(input=block2_prep, filter_width=9, filter_height=3,
num_output_channels=20, relu=True)
with tf.variable_scope('block2_part2'):
block2_part2 = specialized_conv1d(input=block2_part1, filter_width=20, filter_height=3,
num_output_channels=10, relu=True)
with tf.variable_scope('block2_part3'):
block2_part3 = specialized_conv1d(input=block2_part2, filter_width=10, filter_height=3,
num_output_channels=10, relu=True)
with tf.variable_scope('block2_part4'):
block2_part4 = specialized_conv1d(input=block2_part3, filter_width=10, filter_height=5,
num_output_channels=5, relu=True)
with tf.variable_scope('block2_part5'):
block2_part5, _ = flatten_layer(block2_part4)
stack = tf.concat([stack, block2_part5], axis=1)
print(stack)
with tf.variable_scope('band10'):
block2_prep = layer[9]
block2_prep = tf.reshape(block2_prep, (-1, 9, 22, 1))
block2_prep = tf.transpose(block2_prep, perm=[0, 2, 1, 3])
with tf.variable_scope('block2_part1'):
block2_part1 = specialized_conv1d(input=block2_prep, filter_width=9, filter_height=3,
num_output_channels=20, relu=True)
with tf.variable_scope('block2_part2'):
block2_part2 = specialized_conv1d(input=block2_part1, filter_width=20, filter_height=3,
num_output_channels=10, relu=True)
with tf.variable_scope('block2_part3'):
block2_part3 = specialized_conv1d(input=block2_part2, filter_width=10, filter_height=3,
num_output_channels=10, relu=True)
with tf.variable_scope('block2_part4'):
block2_part4 = specialized_conv1d(input=block2_part3, filter_width=10, filter_height=5,
num_output_channels=5, relu=True)
with tf.variable_scope('block2_part5'):
block2_part5, _ = flatten_layer(block2_part4)
stack = tf.concat([stack, block2_part5], axis=1)
print(stack)
return stack
# Define BASSNET archicture
def bassnet(statlieImg, prob):
# Image_entry are images in format 3 x 3 x 220, Prob = Drop out probability ~ 0.5
# return a dictionary contains all layer
sequence = {}
sequence['inputLayer'] = tf.reshape(statlieImg, [-1,3,3,220])
with tf.variable_scope('block1_conv1'):
layer = sequence['inputLayer']
layer, weight = create_conv_2dlayer(input=layer,
num_input_channels=BANDS,
filter_size=1,
num_output_channel=220,
relu=True, pooling=False)
sequence['block1_conv1'] = layer
with tf.variable_scope('block1_conv2'):
layer = sequence['block1_conv1']
layer, weight = create_conv_2dlayer(input=layer,
num_input_channels=BANDS,
filter_size=1,
num_output_channel=220,
relu=True, pooling=False)
sequence['block1_conv2'] = layer
# Block 2 Implementation
with tf.variable_scope('block2_preprocess_GPU'):
layer = sequence['block1_conv2']
layer = tf.reshape(layer, [-1, 9, 220])
container = tf.split(layer, num_or_size_splits=10, axis=2)
sequence['block2_preprocess_GPU'] = container
for i in range(10):
scope = "BAND_"+str(i)
with tf.variable_scope(scope):
print(tf.get_variable_scope())
with tf.variable_scope('block2_preprocess'):
layer = sequence['block1_conv2']
layer = tf.reshape(layer, [-1, 9, 220])
layer = tf.split(layer, num_or_size_splits=10, axis=2)
sequence['block2_preprocess'] = layer
with tf.variable_scope('block2_parallel'):
parallel_model = block2_parallel(sequence)
sequence['block2_end'] = parallel_model
'''
with tf.variable_scope('block2'):
layer = sequence['block2_preprocess']
def condition(time, output_ta_l):
return time < 10
def body(time, output_ta_l):
block2_prep = layer[:, :, :, time]
block2_prep = tf.reshape(block2_prep, (-1, 9, 22, 1))
block2_prep = tf.transpose(block2_prep, perm=[0, 2, 1, 3])
with tf.variable_scope('block2_part1'):
block2_part1 = specialized_conv1d(input=block2_prep, filter_width=9, filter_height=3,
num_output_channels=20, relu=True)
with tf.variable_scope('block2_part2'):
block2_part2 = specialized_conv1d(input=block2_part1, filter_width=20, filter_height=3,
num_output_channels=10, relu=True)
with tf.variable_scope('block2_part3'):
block2_part3 = specialized_conv1d(input=block2_part2, filter_width=10, filter_height=3,
num_output_channels=10, relu=True)
with tf.variable_scope('block2_part4'):
block2_part4 = specialized_conv1d(input=block2_part3, filter_width=10, filter_height=5,
num_output_channels=5, relu=True)
with tf.variable_scope('block2_part5'):
block2_part5, _ = flatten_layer(block2_part4)
output_ta_l = output_ta_l.write(time, block2_part5)
return time+1, output_ta_l
time = 0
block3_entry = tf.TensorArray(tf.float32, size=10)
_, block3_entry = tf.while_loop(condition, body, loop_vars=[time, block3_entry])
block3_entry = block3_entry.concat()
block3_entry3 = tf.reshape(block3_entry, (-1, 600))
sequence['block3_entry_point'] = block3_entry3
# End of geniue block 2
'''
# Begin of fake block 2
with tf.variable_scope('block2_conv1_fake'):
layer = sequence['block1_conv2']
layer, weight = create_conv_2dlayer(input=layer,
num_input_channels=220,
filter_size=3,
num_output_channel=600,
relu=True, pooling=True)
sequence['block2_conv1_fake'] = layer
with tf.variable_scope('block2_exit_flatten'):
layer = sequence['block2_conv1_fake']
layer, number_features = flatten_layer(layer)
sequence['block2_exit_flatten'] = layer
# End of fake block 2
# Final block 3 layer
with tf.variable_scope('block3_dense1'):
layer = sequence['block2_end']
# layer = sequence['block3_entry_point']
layer = fully_connected_layer(input=layer,
num_inputs=number_features,
num_outputs=100,
activation='rely')
layer = tf.nn.dropout(x=layer, keep_prob=prob)
sequence['block3_dense1'] = layer
with tf.variable_scope('block3_dense2'):
layer = sequence['block3_dense1']
layer = fully_connected_layer(input=layer,
num_inputs=100,
num_outputs=54)
layer = tf.nn.dropout(x=layer, keep_prob=prob)
sequence['block3_dense2'] = layer
with tf.variable_scope('block3_dense3'):
layer = sequence['block3_dense2']
layer = fully_connected_layer(input=layer,
num_inputs=54,
num_outputs=9)
layer = tf.nn.dropout(x=layer, keep_prob=prob)
sequence['block3_dense3'] = layer
y_predict = tf.nn.softmax(sequence['block3_dense3'])
sequence['class_prediction'] = y_predict
sequence['predict_class_number'] = tf.argmax(y_predict, axis=1)
return sequence
a =8
graph = tf.Graph()
with graph.as_default():
img_entry = tf.placeholder(tf.float32, shape=[None, HEIGHT, WIDTH, BANDS], name='img_entry')
img_label = tf.placeholder(tf.uint8, shape=[None, NUM_CLASS], name='img_label')
image_true_class = tf.argmax(img_label, axis=1, name="img_true_label")
prob = tf.placeholder(tf.float32)
model = bassnet(statlieImg=img_entry, prob=prob)
final_layer = model['block3_dense3']
cross_entropy = tf.nn.softmax_cross_entropy_with_logits_v2(logits=final_layer,
labels=img_label)
cost = tf.reduce_mean(cross_entropy)
# Optimisation function
optimizer = tf.train.AdamOptimizer(learning_rate=0.0005).minimize(cost)
predict_class = model['predict_class_number']
correction = tf.equal( predict_class, image_true_class)
accuracy = tf.reduce_mean(tf.cast(correction, tf.float32))
saver = tf.train.Saver()
with tf.Session(graph=graph) as session:
writer = tf.summary.FileWriter("BASSNETlogs/", session.graph)
if os.path.isdir(model_directory):
saver.restore(session, 'BASSNET_Trained_model/')
session.run(tf.global_variables_initializer())
total_iterations = 0
def train(num_iterations, train_batch_size=200, s=250, training_data=training_data, training_label=training_label, test_data=test_data, test_label=test_label, ):
global total_iterations
for i in range(total_iterations, total_iterations + num_iterations):
idx = randint(1, 2550)
for x in range(10):
train_batch = training_data[idx*x: idx*x + train_batch_size]
train_batch_label = training_label[idx*x:idx*x + train_batch_size]
feed_dict_train = {img_entry: train_batch, img_label: train_batch_label, prob: 0.2}
session.run(optimizer, feed_dict=feed_dict_train)
print('Finished training an epoch...')
if i % 10 == 0:
training_data, training_label, test_data, test_label = trainTestSwap(training_data, training_label, test_data, test_label, idx, size=s)
# val_x, val_y = getValidDataset(test_data, test_label)
val_x, val_y = test_data[:s], test_label[:s]
feed_dict_validate = {img_entry: val_x, img_label: val_y, prob: 1.0}
acc = session.run(accuracy, feed_dict=feed_dict_validate)
msg = "Optimization Iteration: {0:>6}, Training Accuracy: {1:>6.1%}"
print(msg.format(i + 1, acc))
total_iterations += num_iterations
def test(test_batch_size=validation_data.shape[0]):
print('\n -----Test----')
y_predict_class = model['predict_class_number']
idx = randint(1, 2000)
test_img_batch = test_data[idx: idx + test_batch_size]
test_img_label = test_label[idx: idx + test_batch_size]
feed_dict_test = {img_entry: validation_data, img_label: validation_label, prob: 1.0}
class_pred = np.zeros(shape=test_batch_size, dtype=np.int)
class_pred[:test_batch_size] = session.run(y_predict_class, feed_dict=feed_dict_test)
class_true = np.argmax(validation_label, axis=1)
correct = (class_true == class_pred).sum()
accuracy_test = float(correct) / test_batch_size
print('Accuracy at test: \t' + str(accuracy_test * 100) + '%')
# print_confusion_matrix(true_class =class_true, predicted_class=class_pred )
print('Confusion matrix')
con_mat = confusion_matrix(class_true, class_pred)
print(con_mat)
def trainTestSwap(training_data, training_label, test_data, test_label, idx, size=250):
a, b = test_data[:size], test_label[:size]
c, d = training_data[idx: idx+size], training_label[idx: idx+size]
test_data, test_label = test_data[size:], test_label[size:]
test_data, test_label = np.concatenate((test_data, c), axis=0), np.concatenate((test_label, d), axis=0)
training_data[idx: idx + size], training_label[idx: idx + size] = a, b
return training_data, training_label, test_data, test_label
def cross_validate(training_data, training_label, test_):
print("This is not necessary as we have large dataset and it's expensive to do!")
train(num_iterations=12000, train_batch_size=200)
saver.save(session, model_directory)
test()
# trainTestSwap(training_data, training_label, test_data, test_label, 1, size=250)
print('End session')
| 47.766892
| 169
| 0.607539
| 3,379
| 28,278
| 4.794022
| 0.097958
| 0.028891
| 0.066547
| 0.090314
| 0.64041
| 0.610902
| 0.576023
| 0.568677
| 0.555528
| 0.547812
| 0
| 0.051856
| 0.292135
| 28,278
| 591
| 170
| 47.847716
| 0.757406
| 0.026841
| 0
| 0.481651
| 0
| 0
| 0.075
| 0.003365
| 0
| 0
| 0
| 0
| 0
| 1
| 0.022936
| false
| 0
| 0.018349
| 0
| 0.057339
| 0.057339
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
ebef4935fe5542a7f33a3a5e4cd173560258a38e
| 4,588
|
py
|
Python
|
mlmodels/model_tf/misc/tfcode2/CNN/alex-net/alexnet.py
|
gitter-badger/mlmodels
|
f08cc9b6ec202d4ad25ecdda2f44487da387569d
|
[
"MIT"
] | 1
|
2022-03-11T07:57:48.000Z
|
2022-03-11T07:57:48.000Z
|
mlmodels/model_tf/misc/tfcode2/CNN/alex-net/alexnet.py
|
whitetiger1002/mlmodels
|
f70f1da7434e8855eed50adc67b49cc169f2ea24
|
[
"MIT"
] | null | null | null |
mlmodels/model_tf/misc/tfcode2/CNN/alex-net/alexnet.py
|
whitetiger1002/mlmodels
|
f70f1da7434e8855eed50adc67b49cc169f2ea24
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
# coding: utf-8
# In[1]:
import time
import matplotlib.pyplot as plt
import numpy as np
import seaborn as sns
import tensorflow as tf
from scipy.misc import imresize
from sklearn.cross_validation import train_test_split
import _pickle as cPickle
from train import train
class Alexnet:
def __init__(self, input_size, output_dimension, learning_rate):
self.X = tf.placeholder(tf.float32, (None, input_size, input_size, 3))
self.Y = tf.placeholder(tf.float32, (None, output_dimension))
kernel = tf.Variable(tf.truncated_normal([11, 11, 3, 64], stddev=1e-1))
bias = tf.Variable(tf.constant(0.0, shape=[64]), trainable=True)
conv1 = tf.nn.relu(tf.nn.conv2d(self.X, kernel, [1, 4, 4, 1], padding="SAME") + bias)
lrn1 = tf.nn.local_response_normalization(
conv1, alpha=1e-4, beta=0.75, depth_radius=2, bias=2.0
)
pool1 = tf.nn.max_pool(lrn1, ksize=[1, 3, 3, 1], strides=[1, 2, 2, 1], padding="VALID")
kernel = tf.Variable(tf.truncated_normal([5, 5, 64, 192], stddev=1e-1))
bias = tf.Variable(tf.constant(0.0, shape=[192]), trainable=True)
conv2 = tf.nn.relu(tf.nn.conv2d(pool1, kernel, [1, 1, 1, 1], padding="SAME") + bias)
lrn2 = tf.nn.local_response_normalization(
conv2, alpha=1e-4, beta=0.75, depth_radius=2, bias=2.0
)
pool2 = tf.nn.max_pool(lrn2, ksize=[1, 3, 3, 1], strides=[1, 2, 2, 1], padding="VALID")
kernel = tf.Variable(tf.truncated_normal([3, 3, 192, 384], stddev=1e-1))
bias = tf.Variable(tf.constant(0.0, shape=[384]), trainable=True)
conv3 = tf.nn.relu(tf.nn.conv2d(pool2, kernel, [1, 1, 1, 1], padding="SAME") + bias)
kernel = tf.Variable(tf.truncated_normal([3, 3, 384, 256], stddev=1e-1))
bias = tf.Variable(tf.constant(0.0, shape=[256]), trainable=True)
conv4 = tf.nn.relu(tf.nn.conv2d(conv3, kernel, [1, 1, 1, 1], padding="SAME") + bias)
kernel = tf.Variable(tf.truncated_normal([3, 3, 256, 256], stddev=1e-1))
bias = tf.Variable(tf.constant(0.0, shape=[256]), trainable=True)
conv5 = tf.nn.relu(tf.nn.conv2d(conv4, kernel, [1, 1, 1, 1], padding="SAME") + bias)
pool5 = tf.nn.max_pool(conv5, ksize=[1, 3, 3, 1], strides=[1, 2, 2, 1], padding="VALID")
pulled_shape = int(pool5.shape[1]) * int(pool5.shape[2]) * int(pool5.shape[3])
pulled_pool = tf.reshape(pool5, (-1, pulled_shape))
w = tf.Variable(tf.truncated_normal([pulled_shape, 4096], stddev=1e-1))
b = tf.Variable(tf.constant(0.0, shape=[4096]), trainable=True)
fully1 = tf.nn.relu(tf.matmul(pulled_pool, w) + b)
w = tf.Variable(tf.truncated_normal([4096, 4096], stddev=1e-1))
b = tf.Variable(tf.constant(0.0, shape=[4096]), trainable=True)
fully2 = tf.nn.relu(tf.matmul(fully1, w) + b)
w = tf.Variable(tf.truncated_normal([4096, output_dimension], stddev=1e-1))
b = tf.Variable(tf.constant(0.0, shape=[output_dimension]), trainable=True)
self.logits = tf.matmul(fully2, w) + b
self.cost = tf.reduce_mean(
tf.nn.softmax_cross_entropy_with_logits(logits=self.logits, labels=self.Y)
)
self.optimizer = tf.train.AdagradOptimizer(learning_rate=learning_rate).minimize(self.cost)
self.correct_prediction = tf.equal(tf.argmax(self.logits, 1), tf.argmax(self.Y, 1))
self.accuracy = tf.reduce_mean(tf.cast(self.correct_prediction, "float"))
# In[2]:
def unpickle(file):
with open(file, "rb") as fo:
dict = cPickle.load(fo, encoding="latin1")
return dict
unique_name = unpickle("cifar-10-batches-py/batches.meta")["label_names"]
batches = unpickle("cifar-10-batches-py/data_batch_1")
train_X, test_X, train_Y, test_Y = train_test_split(
batches["data"], batches["labels"], test_size=0.2
)
# In[3]:
BATCH_SIZE = 5
# alexnet original
IMG_SIZE = 224
LEARNING_RATE = 0.0001
# In[4]:
sess = tf.InteractiveSession()
model = Alexnet(IMG_SIZE, len(unique_name), LEARNING_RATE)
sess.run(tf.global_variables_initializer())
# In[5]:
RESULTS = train(
sess, model, 20, BATCH_SIZE, len(unique_name), IMG_SIZE, train_X, test_X, train_Y, test_Y
)
# In[13]:
sns.set()
plt.figure(figsize=(15, 5))
plt.subplot(1, 2, 1)
plt.plot(np.arange(len(RESULTS[0])), RESULTS[0], label="entropy cost")
plt.legend()
plt.subplot(1, 2, 2)
plt.plot(np.arange(len(RESULTS[0])), RESULTS[1], label="accuracy training")
plt.plot(np.arange(len(RESULTS[0])), RESULTS[2], label="accuracy testing")
plt.legend()
plt.show()
# In[ ]:
| 37
| 99
| 0.649956
| 723
| 4,588
| 4.02213
| 0.237898
| 0.024759
| 0.066025
| 0.057772
| 0.462517
| 0.396492
| 0.344567
| 0.344567
| 0.27751
| 0.254127
| 0
| 0.067216
| 0.179599
| 4,588
| 123
| 100
| 37.300813
| 0.705367
| 0.022014
| 0
| 0.073171
| 0
| 0
| 0.039759
| 0.014295
| 0
| 0
| 0
| 0
| 0
| 1
| 0.02439
| false
| 0
| 0.109756
| 0
| 0.158537
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
ebefcab7987e2949070f887144afd954129e8c65
| 4,184
|
py
|
Python
|
p8_test/test_local/__init__.py
|
crazynayan/tpf1
|
c81a15d88d4d1f3ed2cf043c90782a4b8509ef14
|
[
"MIT"
] | 1
|
2020-01-27T10:10:40.000Z
|
2020-01-27T10:10:40.000Z
|
p8_test/test_local/__init__.py
|
crazynayan/tpf1
|
c81a15d88d4d1f3ed2cf043c90782a4b8509ef14
|
[
"MIT"
] | 4
|
2019-08-23T05:24:23.000Z
|
2021-09-16T10:05:55.000Z
|
p8_test/test_local/__init__.py
|
crazynayan/tpf1
|
c81a15d88d4d1f3ed2cf043c90782a4b8509ef14
|
[
"MIT"
] | null | null | null |
import random
import string
import unittest
from typing import List, Union, Dict
from config import config
from p2_assembly.mac2_data_macro import DataMacro
from p3_db.test_data import TestData
from p3_db.test_data_elements import Pnr
from p4_execution.debug import get_debug_loc, add_debug_loc, get_missed_loc
from p4_execution.ex5_execute import TpfServer
class TestDataUTS(TestData):
def add_all_regs(self) -> None:
for reg in config.REG:
if reg in ['R8', 'R9']:
continue
self.output.regs[reg] = 0
return
def add_all_reg_pointers(self, length: int) -> None:
for reg in config.REG:
self.output.reg_pointers[reg] = length
def add_fields(self, fields: List[Union[str, tuple]], macro_name: str, base_reg: str = None) -> None:
field_dict = dict()
for field in fields:
field, length = field if isinstance(field, tuple) else (field, 0)
field_dict['field'] = field
field_dict['base_reg'] = base_reg if base_reg else str()
field_dict['length'] = length
self.output.create_field_byte(macro_name, field_dict, persistence=False)
return
def add_pnr_element(self, data_list: List[str], key: str, locator: str = None, variation: int = 0) -> Pnr:
pnr_dict = {'key': key, 'data': ','.join(data_list), 'variation': variation, 'variation_name': str(),
'locator': str()}
if locator:
pnr_dict['locator'] = locator
pnr = self.create_pnr_element(pnr_dict, persistence=False)
pnr.set_id(''.join(random.choices(string.ascii_uppercase + string.ascii_lowercase + string.digits, k=20)))
return pnr
def add_pnr_field_data(self, field_data_list: List[Dict[str, str]], key, locator: str = None,
variation: int = 0) -> None:
core_dict = {'macro_name': DataMacro.get_label_reference(next(iter(field_data_list[0].keys()))).name}
for field_data in field_data_list:
core_dict['field_data'] = field_data
pnr = self.add_pnr_element(list(), key, locator, variation)
self.create_pnr_field_data(pnr.id, core_dict, persistence=False)
return
def add_tpfdf(self, field_data_list: List[Dict[str, str]], key: str, macro_name: str, variation: int = 0):
df_dict = {'key': key, 'macro_name': macro_name, 'variation': variation, 'variation_name': str()}
for field_data in field_data_list:
df_dict['field_data'] = field_data
self.create_tpfdf_lrec(df_dict, persistence=False)
return
class TestDebug(unittest.TestCase):
SEGMENTS = ["ETA1", "ETAX", "ETAF", "ETAZ", "ETK1", "ETKF", "ETA4", "ETA5", "ETAW", "ETA6", "ETK2", "ETK6", "ETAA",
"ETA9", "ETG1", "INS0", "ETG2", "ETGG", "ETG3", "ETGE", "EWA1", "EXA1", "EXAA", "EXAK", "EXA2", "EXA3",
"EXA8", "EXA9", "EXA4", "EXA5", "EXE1", "EXE2", "EXER", "EXE3", "EXE6", "EXE4", "EXEN"]
SUCCESS_END = "EXEN0000"
ETG1_TJR_END = "ETG10750.2"
EXAA_NPTY_END = "EXAA0525.6"
FMSG_END = "FMSG0100"
IGR1_END = "IGR1E000"
def setUp(self) -> None:
self.tpf_server = TpfServer()
self.test_data = TestDataUTS()
self.test_data.output.debug = self.SEGMENTS if config.TEST_DEBUG else list()
self.output = None
def tearDown(self) -> None:
if not config.TEST_DEBUG:
return
if not self.output or not self.output.debug:
return
add_debug_loc(config.ET_DEBUG_DATA, self.output.debug)
add_debug_loc(config.ET_DEBUG_DATA_MISSED, self.output.debug_missed)
@classmethod
def tearDownClass(cls) -> None:
if not config.TEST_DEBUG:
return
config.ET_CLASS_COUNTER += 1
if config.ET_CLASS_COUNTER < config.ET_TEST_CLASS_COUNT:
return
for segment in cls.SEGMENTS:
loc = get_debug_loc(config.ET_DEBUG_DATA, segment)
loc_missed = get_missed_loc(config.ET_DEBUG_DATA_MISSED, config.ET_DEBUG_DATA, segment)
print(f"{segment} LOC Done = {loc}, LOC Missed = {loc_missed}")
| 42.693878
| 119
| 0.633843
| 559
| 4,184
| 4.506261
| 0.280859
| 0.046447
| 0.025804
| 0.033744
| 0.250099
| 0.180627
| 0.094482
| 0.026995
| 0.026995
| 0
| 0
| 0.021519
| 0.244742
| 4,184
| 97
| 120
| 43.134021
| 0.775633
| 0
| 0
| 0.168675
| 0
| 0
| 0.090583
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.108434
| false
| 0
| 0.120482
| 0
| 0.433735
| 0.012048
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
ebf1ffe3b522e31d9f44e5d373462af230e2e497
| 3,199
|
py
|
Python
|
src/GameController.py
|
salemalex11/Gomoku
|
e709bc161a945e5521ea3b234ce8db41d3fd5bfe
|
[
"MIT"
] | null | null | null |
src/GameController.py
|
salemalex11/Gomoku
|
e709bc161a945e5521ea3b234ce8db41d3fd5bfe
|
[
"MIT"
] | null | null | null |
src/GameController.py
|
salemalex11/Gomoku
|
e709bc161a945e5521ea3b234ce8db41d3fd5bfe
|
[
"MIT"
] | 3
|
2019-02-17T22:15:36.000Z
|
2021-01-04T19:13:52.000Z
|
# Define imports
import pygame
from pygame import *
import sys
import time
class Controller:
"""Class responsible for interacting with the Model and View."""
def __init__(self, view):
"""Initialize a controller taking input from the View."""
self.model = view.get_model()
self.board = self.model.get_board()
self.num_players = self.model.get_num_players()
self.player_list = self.model.get_player_list()
self.view = view
self.tile_size = self.view.get_tile_size()
self.tile_margin = self.view.get_tile_margin()
def play(self):
"""Play the game until a player wins or quits."""
# Initialize pygame
pygame.init()
# Start with Player 1
current_player = 1
pygame.display.set_caption("Player {}'s turn".format(current_player))
# Play until a player wins
is_won = False
while not is_won:
# Loop through mouse clicks
for event in pygame.event.get():
if event.type == pygame.MOUSEBUTTONDOWN:
# Find board tile from click coordinates
click = pygame.mouse.get_pos()
row = (click[1] // (self.tile_size + self.tile_margin))
column = (click[0] // (self.tile_size + self.tile_margin))
# If tile is unclaimed
if self.board[row][column] == 0:
# Claim tile
self.board[row][column] = current_player
# Update board
self.view.update()
# Check if winning move
if self.model.is_won(current_player, row, column):
# Display win message
pygame.display.set_caption("Player {} won the game!".format(current_player))
# Display win animation
self.view.win_animation(current_player)
# Stop playing
is_won = True
# Continue game if no winning move
else:
# Switch players
current_player += 1
if current_player > self.num_players:
current_player = 1
# Display next player's turn message
pygame.display.set_caption("Player {}'s turn".format(current_player))
# If player quits
elif event.type == pygame.QUIT:
# Terminate program
sys.exit()
# Terminate pygame
pygame.quit()
# Pause game view before terminating
time.sleep(5)
| 36.352273
| 131
| 0.449203
| 294
| 3,199
| 4.744898
| 0.326531
| 0.09319
| 0.034409
| 0.034409
| 0.159857
| 0.144086
| 0.075986
| 0.075986
| 0.075986
| 0.075986
| 0
| 0.004828
| 0.482026
| 3,199
| 87
| 132
| 36.770115
| 0.837055
| 0.190685
| 0
| 0.1
| 0
| 0
| 0.021535
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.05
| false
| 0
| 0.1
| 0
| 0.175
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
ebf22c5792152fe6b5cb3d25a3473aad20996bcf
| 17,101
|
py
|
Python
|
silverberg/test/test_client.py
|
TimothyZhang/silverberg
|
fb93ab68988c6ad6f7a4136d2c5b16b32966d0ca
|
[
"Apache-2.0"
] | 1
|
2019-09-22T04:00:56.000Z
|
2019-09-22T04:00:56.000Z
|
silverberg/test/test_client.py
|
TimothyZhang/silverberg
|
fb93ab68988c6ad6f7a4136d2c5b16b32966d0ca
|
[
"Apache-2.0"
] | 14
|
2015-01-22T01:00:50.000Z
|
2017-12-06T03:35:46.000Z
|
silverberg/test/test_client.py
|
TimothyZhang/silverberg
|
fb93ab68988c6ad6f7a4136d2c5b16b32966d0ca
|
[
"Apache-2.0"
] | 4
|
2015-03-31T19:49:05.000Z
|
2020-03-03T20:44:32.000Z
|
# Copyright 2012 Rackspace Hosting, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Test the client."""
import mock
from uuid import UUID
from twisted.internet import defer
from silverberg.client import CQLClient, ConsistencyLevel, TestingCQLClient
from silverberg.cassandra import ttypes, Cassandra
from silverberg.test.util import BaseTestCase
class MockClientTests(BaseTestCase):
"""Test the client."""
def setUp(self):
"""Setup the mock objects for the tests."""
self.endpoint = mock.Mock()
self.client_proto = mock.Mock(Cassandra.Client)
self.twisted_transport = mock.Mock()
self.mock_results = ttypes.CqlResult(type=ttypes.CqlResultType.INT, num=1)
self.client_proto.set_keyspace.return_value = defer.succeed(None)
self.client_proto.login.return_value = defer.succeed(None)
self.client_proto.describe_version.return_value = defer.succeed('1.2.3')
def _execute_cql3_query(*args, **kwargs):
return defer.succeed(self.mock_results)
self.client_proto.execute_cql3_query.side_effect = _execute_cql3_query
def _connect(factory):
wrapper = mock.Mock()
wrapper.transport = self.twisted_transport
wrapper.wrapped.client = self.client_proto
return defer.succeed(wrapper)
self.endpoint.connect.side_effect = _connect
def test_disconnect_on_cancel(self):
"""
If allowed, cancellation of running query will also try to disconnect
the TCP connection
"""
self.client_proto.execute_cql3_query.side_effect = lambda *_: defer.Deferred()
client = CQLClient(self.endpoint, 'abc', disconnect_on_cancel=True)
client.disconnect = mock.Mock()
d = client.execute('query', {}, ConsistencyLevel.ONE)
self.assertNoResult(d)
self.assertFalse(client.disconnect.called)
d.cancel()
self.failureResultOf(d, defer.CancelledError)
client.disconnect.assert_called_one_with()
def test_disconnect_on_cancel_returns_correct_value(self):
"""
with disconnect_on_cancel=True, the value from execute_cql3_query is
returned before cancellation
"""
exec_d = defer.Deferred()
self.client_proto.execute_cql3_query.side_effect = lambda *_: exec_d
client = CQLClient(self.endpoint, 'abc', disconnect_on_cancel=True)
client.disconnect = mock.Mock()
d = client.execute('query', {}, ConsistencyLevel.ONE)
self.assertNoResult(d)
self.assertFalse(client.disconnect.called)
exec_d.callback(self.mock_results)
self.assertEqual(self.successResultOf(d), 1)
self.assertFalse(client.disconnect.called)
def test_no_disconnect_on_cancel(self):
"""
If not given, cancellation of running query should not try to disconnect
the TCP connection
"""
self.client_proto.execute_cql3_query.side_effect = lambda *_: defer.Deferred()
client = CQLClient(self.endpoint, 'abc', disconnect_on_cancel=False)
client.disconnect = mock.Mock()
d = client.execute('query', {}, ConsistencyLevel.ONE)
self.assertNoResult(d)
self.assertFalse(client.disconnect.called)
d.cancel()
self.failureResultOf(d, defer.CancelledError)
self.assertFalse(client.disconnect.called)
def test_disconnect(self):
"""
When disconnect is called, the on demand thrift client is disconnected
"""
client = CQLClient(self.endpoint, 'blah')
self.assertFired(client.describe_version())
client.disconnect()
self.twisted_transport.loseConnection.assert_called_once_with()
def test_login(self):
"""Test that login works as expected."""
client = CQLClient(self.endpoint, 'blah', 'groucho', 'swordfish')
d = client.describe_version()
self.assertEqual(self.assertFired(d), '1.2.3')
self.client_proto.describe_version.assert_called_once_with()
self.client_proto.set_keyspace.assert_called_once_with('blah')
creds = {'username': 'groucho', 'password': 'swordfish'}
authreq = ttypes.AuthenticationRequest(creds)
self.client_proto.login.assert_called_once_with(authreq)
def test_bad_keyspace(self):
"""Ensure that a bad keyspace results in an errback."""
self.client_proto.set_keyspace.return_value = defer.fail(ttypes.NotFoundException())
client = CQLClient(self.endpoint, 'blah')
d = client.describe_version()
self.assertFailed(d, ttypes.NotFoundException)
self.client_proto.set_keyspace.assert_called_once_with('blah')
def test_describe_version(self):
"""Connect and check the version."""
client = CQLClient(self.endpoint, 'blah')
d = client.describe_version()
self.assertEqual(self.assertFired(d), '1.2.3')
self.assertEqual(self.client_proto.describe_version.call_count, 1)
self.client_proto.set_keyspace.assert_called_once_with('blah')
def test_unsupported_types_are_returned_as_bytes(self):
"""
When a table includes a column of a type that is not explicitly
supported we should return the raw bytes instead of attempting to
unmarshal the data.
"""
mock_rows = [ttypes.CqlRow(
key='',
columns=[
ttypes.Column(
name='an_unknown_type',
value="\x00\x01")])]
self.mock_results = ttypes.CqlResult(
type=ttypes.CqlResultType.ROWS,
rows=mock_rows,
schema=ttypes.CqlMetadata(value_types={'an_unknown_type': 'an.unknown.type'}))
client = CQLClient(self.endpoint, 'blah')
d = client.execute("SELECT * FROM blah", {}, ConsistencyLevel.ONE)
results = self.assertFired(d)
self.assertEqual(results, [{'an_unknown_type': '\x00\x01'}])
def test_cql_value(self):
"""
Test that a CQL response that is an integer value is
processed correctly (e.g. SELECT COUNT).
"""
self.mock_results = ttypes.CqlResult(type=ttypes.CqlResultType.INT, num=1)
client = CQLClient(self.endpoint, 'blah')
d = client.execute("SELECT :sel FROM test_blah", {"sel": "blah"}, ConsistencyLevel.ONE)
self.assertEqual(self.assertFired(d), 1)
self.client_proto.execute_cql3_query.assert_called_once_with("SELECT 'blah' FROM test_blah", 2,
ConsistencyLevel.ONE)
self.client_proto.set_keyspace.assert_called_once_with('blah')
def test_cql_array(self):
"""Test that a full CQL response (e.g. SELECT) works."""
expected = [{"foo": "{P}"}]
mockrow = [ttypes.CqlRow(key='blah', columns=[ttypes.Column(name='foo', value='{P}')])]
self.mock_results = ttypes.CqlResult(
type=ttypes.CqlResultType.ROWS,
rows=mockrow,
schema=ttypes.CqlMetadata(value_types={'foo': 'org.apache.cassandra.db.marshal.UTF8Type'}))
client = CQLClient(self.endpoint, 'blah')
d = client.execute("SELECT :sel FROM test_blah", {"sel": "blah"}, ConsistencyLevel.ONE)
self.assertEqual(self.assertFired(d), expected)
self.client_proto.execute_cql3_query.assert_called_once_with("SELECT 'blah' FROM test_blah", 2,
ConsistencyLevel.ONE)
self.client_proto.set_keyspace.assert_called_once_with('blah')
def test_cql_array_deserial(self):
"""Make sure that values that need to be deserialized correctly are."""
expected = [{"fff": 1222}]
mockrow = [ttypes.CqlRow(key='blah', columns=[ttypes.Column(name='fff', value='\x04\xc6')])]
self.mock_results = ttypes.CqlResult(type=ttypes.CqlResultType.ROWS,
rows=mockrow,
schema=ttypes.CqlMetadata(value_types={
'fff': 'org.apache.cassandra.db.marshal.IntegerType'
}))
client = CQLClient(self.endpoint, 'blah')
d = client.execute("SELECT * FROM :tablename;", {"tablename": "blah"}, ConsistencyLevel.ONE)
self.assertEqual(self.assertFired(d), expected)
self.client_proto.execute_cql3_query.assert_called_once_with("SELECT * FROM 'blah';", 2,
ConsistencyLevel.ONE)
self.client_proto.set_keyspace.assert_called_once_with('blah')
def test_cql_list_deserial(self):
expected = [{'fff': ['ggg', 'hhh']}]
mockrow = [ttypes.CqlRow(key='blah',
columns=[ttypes.Column(name='fff',
value='\x00\x02\x00\x03ggg\x00\x03hhh')])]
list_type = 'org.apache.cassandra.db.marshal.ListType'
text_type = 'org.apache.cassandra.db.marshal.UTF8Type'
text_list_type = list_type + '(' + text_type + ')'
self.mock_results = ttypes.CqlResult(
type=ttypes.CqlResultType.ROWS,
rows=mockrow,
schema=ttypes.CqlMetadata(value_types={'fff': text_list_type}))
client = CQLClient(self.endpoint, 'blah')
d = client.execute("SELECT * FROM :tablename;", {"tablename": "blah"}, ConsistencyLevel.ONE)
self.assertEqual(self.assertFired(d), expected)
self.client_proto.execute_cql3_query.assert_called_once_with("SELECT * FROM 'blah';", 2,
ConsistencyLevel.ONE)
self.client_proto.set_keyspace.assert_called_once_with('blah')
def test_cql_None_not_deserialized(self):
"""
If the value is None, it is not deserialized at all.
"""
raw_rows = [ttypes.CqlRow(
key='blah', columns=[ttypes.Column(name='fff', value=None)])]
schema = ttypes.CqlMetadata(value_types={
'fff': 'org.apache.cassandra.db.marshal.AlwaysFailType'})
client = CQLClient(self.endpoint, 'blah')
always_blow_up = mock.Mock(spec=[], side_effect=Exception)
rows = client._unmarshal_result(schema, raw_rows, {
'org.apache.cassandra.db.marshal.AlwaysFailType': always_blow_up
})
self.assertEqual(rows, [{'fff': None}])
self.assertEqual(always_blow_up.call_count, 0)
def test_cql_insert(self):
"""Test a mock CQL insert with a VOID response works."""
expected = None
self.mock_results = ttypes.CqlResult(type=ttypes.CqlResultType.VOID)
client = CQLClient(self.endpoint, 'blah')
d = client.execute("UPDATE blah SET 'key'='frr', 'fff'=1222 WHERE KEY='frr'", {},
ConsistencyLevel.ONE)
self.assertEqual(self.assertFired(d), expected)
self.client_proto.execute_cql3_query.assert_called_once_with(
"UPDATE blah SET 'key'='frr', 'fff'=1222 WHERE KEY='frr'",
2, ConsistencyLevel.ONE)
self.client_proto.set_keyspace.assert_called_once_with('blah')
def test_cql_insert_vars(self):
"""Test that a CQL insert that has variables works."""
expected = None
self.mock_results = ttypes.CqlResult(type=ttypes.CqlResultType.VOID)
client = CQLClient(self.endpoint, 'blah')
d = client.execute("UPDATE blah SET 'key'='frr', 'fff'=:val WHERE KEY='frr'", {"val": 1234},
ConsistencyLevel.ONE)
self.assertEqual(self.assertFired(d), expected)
self.client_proto.execute_cql3_query.assert_called_once_with(
"UPDATE blah SET 'key'='frr', 'fff'=1234 WHERE KEY='frr'",
2, ConsistencyLevel.ONE)
self.client_proto.set_keyspace.assert_called_once_with('blah')
def test_cql_sequence(self):
"""
Test a sequence of operations results in only one handshake
but two requests.
"""
expected = [{"foo": "{P}"}]
mockrow = [ttypes.CqlRow(key='blah', columns=[ttypes.Column(name='foo', value='{P}')])]
self.mock_results = ttypes.CqlResult(
type=ttypes.CqlResultType.ROWS, rows=mockrow,
schema=ttypes.CqlMetadata(
value_types={'foo': 'org.apache.cassandra.db.marshal.UTF8Type'}))
client = CQLClient(self.endpoint, 'blah')
def _cqlProc(r):
return client.execute("SELECT :sel FROM test_blah", {"sel": "blah"},
ConsistencyLevel.ONE)
d = client.execute("SELECT :sel FROM test_blah", {"sel": "ffh"},
ConsistencyLevel.ONE)
d.addCallback(_cqlProc)
self.assertEqual(self.assertFired(d), expected)
self.client_proto.execute_cql3_query.assert_any_call("SELECT 'blah' FROM test_blah", 2,
ConsistencyLevel.ONE)
self.client_proto.execute_cql3_query.assert_any_call("SELECT 'ffh' FROM test_blah", 2,
ConsistencyLevel.ONE)
self.client_proto.set_keyspace.assert_called_once_with('blah')
def test_cql_result_metadata(self):
"""
execute should use the metadata included with the CqlResult for
deserializing values.
"""
expected = [{"foo": UUID('114b8328-d1f1-11e2-8683-000c29bc9473')}]
mockrow = [
ttypes.CqlRow(
key='blah',
columns=[
ttypes.Column(
name='foo',
value='\x11K\x83(\xd1\xf1\x11\xe2\x86\x83\x00\x0c)\xbc\x94s')])]
self.mock_results = ttypes.CqlResult(
type=ttypes.CqlResultType.ROWS,
rows=mockrow,
schema=ttypes.CqlMetadata(value_types={
'foo': 'org.apache.cassandra.db.marshal.TimeUUIDType'}))
client = CQLClient(self.endpoint, 'blah')
d = client.execute("SELECT * FROM blah;", {}, ConsistencyLevel.ONE)
self.assertEqual(self.assertFired(d), expected)
class MockTestingClientTests(MockClientTests):
"""
Test the conveniences provided by the testing client
"""
def test_transport_exposed(self):
"""
The transport exposed is the underlying twisted transport, if it exists
"""
client = TestingCQLClient(self.endpoint, 'meh')
self.assertEqual(client.transport, None) # has not connected yet
self.assertFired(client.describe_version())
self.assertIs(client.transport, self.twisted_transport)
def test_pause(self):
"""
When pausing, stop reading and stop writing on the transport are called
if the transport exists.
"""
client = TestingCQLClient(self.endpoint, 'meh')
client.pause()
self.assertEqual(len(self.twisted_transport.stopReading.mock_calls), 0)
self.assertEqual(len(self.twisted_transport.stopWriting.mock_calls), 0)
self.assertFired(client.describe_version())
client.pause()
self.twisted_transport.stopReading.assert_called_one_with()
self.twisted_transport.stopWriting.assert_called_one_with()
def test_resume(self):
"""
When resuming, start reading and start writing on the transport are
called if the transport exists.
"""
client = TestingCQLClient(self.endpoint, 'meh')
client.pause()
self.assertEqual(len(self.twisted_transport.startReading.mock_calls),
0)
self.assertEqual(len(self.twisted_transport.startWriting.mock_calls),
0)
self.assertFired(client.describe_version())
client.pause()
self.twisted_transport.startReading.assert_called_one_with()
self.twisted_transport.startWriting.assert_called_one_with()
# class FaultTestCase(BaseTestCase):
# def setUp(self):
# self.client = CqlClient(TCP4ClientEndpoint(reactor, '127.0.0.1', 9160), 'blah')
# def test_vers(self):
# d = self.client.describe_version()
# def printR(r):
# print r
# d.addCallback(printR)
# return d
# def test_cql(self):
# d = self.client.execute("SELECT * FROM blah;", {})
# def printQ(r):
# print r
# d.addCallback(printQ)
# return d
| 41.009592
| 103
| 0.626513
| 1,918
| 17,101
| 5.421272
| 0.169447
| 0.032699
| 0.04472
| 0.036545
| 0.622235
| 0.583478
| 0.558088
| 0.542123
| 0.523562
| 0.502404
| 0
| 0.011885
| 0.261973
| 17,101
| 416
| 104
| 41.108173
| 0.81198
| 0.151804
| 0
| 0.495935
| 0
| 0.004065
| 0.103252
| 0.03252
| 0
| 0
| 0
| 0
| 0.247967
| 1
| 0.097561
| false
| 0.004065
| 0.02439
| 0.00813
| 0.142276
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
ebf2bc1d88e8d3404f1439f8fb4400bf3874e4c0
| 3,386
|
py
|
Python
|
drawer.py
|
jarekwg/crossword-packer
|
88f90c16272c2c2f64475dffe3b0aaeec11c0606
|
[
"MIT"
] | null | null | null |
drawer.py
|
jarekwg/crossword-packer
|
88f90c16272c2c2f64475dffe3b0aaeec11c0606
|
[
"MIT"
] | null | null | null |
drawer.py
|
jarekwg/crossword-packer
|
88f90c16272c2c2f64475dffe3b0aaeec11c0606
|
[
"MIT"
] | null | null | null |
import re
from exceptions import WordPlacementConflict
from constants import ACROSS, DOWN
def score_placements(placements, display=False):
dimensions = [
min([x for x, y, dir in placements.values()]),
min([y for x, y, dir in placements.values()]),
max([placement[0] + len(word) for word, placement in placements.items() if placement[2] == ACROSS] + [x + 1 for x, y, dir in placements.values()]),
max([placement[1] + len(word) for word, placement in placements.items() if placement[2] == DOWN] + [y + 1 for x, y, dir in placements.values()]),
]
width = dimensions[2] - dimensions[0]
height = dimensions[3] - dimensions[1]
x_offset = dimensions[0]
y_offset = dimensions[1]
lines = []
for _ in range(height):
lines.append('.' * width)
numintersections = 0
for word, placement in placements.items():
x = placement[0] - x_offset
y = placement[1] - y_offset
if placement[2] == ACROSS:
# If letters before or after aren't empty, bail out.
if (placement[0] - 1 >= dimensions[0] and lines[y][x - 1] != '.') or (placement[0] + len(word) < dimensions[2] and lines[y][x + len(word)] != '.'):
raise WordPlacementConflict
# If incoming letters don't match existing letters, bail out.
if re.match(lines[y][x:x + len(word)], word) is None:
raise WordPlacementConflict
# Check neighbouring rows. Bail out if there's something in them for words that aren't intersecting.
for row_offset in [-1, 1]:
if dimensions[1] <= placement[1] + row_offset < dimensions[3]:
for i, c in enumerate(lines[y + row_offset][x:x + len(word)]):
if c != '.' and lines[y][x + i] == '.':
raise WordPlacementConflict
# Increment numintersections for every matching existing letter (ie. intersection)
numintersections += len(set(lines[y][x:x + len(word)].replace('.', '')))
lines[y] = lines[y][:x] + word + lines[y][x + len(word):]
else:
# If letters before or after aren't empty, bail out.
if (placement[1] - 1 >= dimensions[1] and lines[y - 1][x] != '.') or (placement[1] + len(word) < dimensions[3] and lines[y + len(word)][x] != '.'):
raise WordPlacementConflict
for i in range(len(word)):
# If incoming letter doesn't match existing letter, bail out.
if re.match(lines[y + i][x], word[i]) is None:
raise WordPlacementConflict
# Check neighbouring columns. Bail out if there's something in them for words that aren't intersecting.
for col_offset in [-1, 1]:
if dimensions[0] <= placement[0] + col_offset < dimensions[2]:
if lines[y + i][x + col_offset] != '.' and lines[y + i][x] == '.':
raise WordPlacementConflict
# Increment numintersections if we're matching existing letter (ie. intersection)
numintersections += lines[y + i][x] != '.'
lines[y + i] = lines[y + i][:x] + word[i] + lines[y + i][x + 1:]
if display:
print('\n'.join(lines) + '\n')
return (lines, numintersections, width * height)
| 52.092308
| 159
| 0.560543
| 429
| 3,386
| 4.39627
| 0.200466
| 0.057264
| 0.025981
| 0.025451
| 0.457052
| 0.437434
| 0.266172
| 0.229056
| 0.21421
| 0.173913
| 0
| 0.016553
| 0.304194
| 3,386
| 64
| 160
| 52.90625
| 0.783956
| 0.17218
| 0
| 0.125
| 0
| 0
| 0.005369
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.020833
| false
| 0
| 0.0625
| 0
| 0.104167
| 0.020833
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
ebf45563a2d56576081e640ac1564e55a2546dba
| 4,200
|
py
|
Python
|
src/analyse/bubble_map.py
|
timtroendle/geographic-scale
|
81ec940e10b8e692429797e6a066a177e1508a89
|
[
"MIT"
] | 3
|
2020-08-19T17:56:22.000Z
|
2021-08-19T08:52:21.000Z
|
src/analyse/bubble_map.py
|
timtroendle/geographic-scale
|
81ec940e10b8e692429797e6a066a177e1508a89
|
[
"MIT"
] | null | null | null |
src/analyse/bubble_map.py
|
timtroendle/geographic-scale
|
81ec940e10b8e692429797e6a066a177e1508a89
|
[
"MIT"
] | null | null | null |
import numpy as np
import shapely
import geopandas as gpd
import xarray as xr
import matplotlib.pyplot as plt
import seaborn as sns
EPSG_3035_PROJ4 = "+proj=laea +lat_0=52 +lon_0=10 +x_0=4321000 +y_0=3210000 +ellps=GRS80 +units=m +no_defs "
GREY = "#C0C0C0"
BLUE = "#4F6DB8"
YELLOW = "#FABC3C"
SUPPLY_TECHS = [
"hydro_reservoir", "hydro_run_of_river", "open_field_pv",
"roof_mounted_pv", "wind_offshore", "wind_onshore_competing",
"wind_onshore_monopoly"
]
DEMAND_TECH = "demand_elec"
MAP_MIN_X = 2200000
MAP_MIN_Y = 1400000
MAP_MAX_X = 6300000
MAP_MAX_Y = 5500000
def bubble_map(path_to_shapes, path_to_continent_shape, scenario, resolution_km, colour, markersize,
path_to_results, path_to_output):
colour = {"yellow": YELLOW, "blue": BLUE}[colour]
continent = (
gpd
.read_file(path_to_continent_shape)
.to_crs(EPSG_3035_PROJ4)
.rename(columns={"id": "locs"})
.set_index("locs")
.rename(index=lambda idx: idx.replace(".", "-"))
)
shapes = read_shapes(path_to_shapes, path_to_results, scenario)
points = points_on_shape(continent.geometry.iloc[0], resolution_km2=resolution_km)
points = generation_per_point(points, shapes)
fig = plt.figure(figsize=(8, 8))
ax = fig.subplots(1, 1)
continent.plot(ax=ax, color=GREY, alpha=0.2)
points.plot(ax=ax, color=colour, markersize=points["generation"] if markersize == "gen" else int(markersize))
ax.set_xticks([])
ax.set_yticks([])
ax.set_xlim(MAP_MIN_X, MAP_MAX_X)
ax.set_ylim(MAP_MIN_Y, MAP_MAX_Y)
sns.despine(fig=fig, top=True, bottom=True, left=True, right=True)
fig.savefig(path_to_output)
def read_shapes(path_to_shapes, path_to_results, scenario):
shapes = (
gpd
.read_file(path_to_shapes)
.to_crs(EPSG_3035_PROJ4)
.rename(columns={"id": "locs"})
.set_index("locs")
.rename(index=lambda idx: idx.replace(".", "-"))
)
ds = xr.open_dataset(path_to_results)
demand_twh = (
ds
.carrier_con
.sel(techs=DEMAND_TECH, scenario=scenario)
.to_series()
.reindex(shapes.index)
.div(1e6)
.mul(-1)
)
generation_twh = (
ds
.carrier_prod
.sel(techs=SUPPLY_TECHS, scenario=scenario)
.sum("techs")
.to_series()
.reindex(shapes.index)
.div(1e6)
)
shapes["generation"] = generation_twh / demand_twh
return shapes
def generation_per_point(points, shapes):
points = gpd.sjoin(
gpd.GeoDataFrame(geometry=points),
shapes,
how="left",
op="within"
)
points.generation.fillna(value=0, inplace=True)
points.index_right.fillna(value=0, inplace=True)
points["generation"] = points.groupby("index_right").generation.transform(lambda x: x / x.count())
max_value = 100
points["generation"] = points["generation"] * 10
points["generation"].where(points["generation"] < max_value, max_value, inplace=True)
return points
def points_on_shape(shape_3035, resolution_km2):
x_min, y_min, x_max, y_max = shape_3035.bounds
all_points = [
shapely.geometry.Point(x, y)
for x in np.arange(start=x_min, stop=x_max, step=resolution_km2 * 1000)
for y in np.arange(start=y_min, stop=y_max, step=resolution_km2 * 1000)
]
simplification_strength = resolution_km2 * 1000 / 20
surface_area = (
shape_3035
.simplify(simplification_strength)
)
prepared_shape = shapely.prepared.prep(surface_area)
return gpd.GeoSeries(
list(filter(
lambda point: prepared_shape.intersects(point),
all_points
)),
crs=EPSG_3035_PROJ4
)
if __name__ == "__main__":
bubble_map(
path_to_shapes=snakemake.input.shapes,
path_to_continent_shape=snakemake.input.continent_shape,
scenario=snakemake.wildcards.scenario,
colour=snakemake.wildcards.colour,
markersize=snakemake.wildcards.markersize,
resolution_km=snakemake.params.resolution_km,
path_to_results=snakemake.input.results,
path_to_output=snakemake.output[0]
)
| 30.882353
| 113
| 0.662619
| 550
| 4,200
| 4.778182
| 0.329091
| 0.03653
| 0.027397
| 0.018265
| 0.226788
| 0.139269
| 0.117199
| 0.092846
| 0.092846
| 0.060122
| 0
| 0.039598
| 0.218333
| 4,200
| 135
| 114
| 31.111111
| 0.760889
| 0
| 0
| 0.151261
| 0
| 0.008403
| 0.09
| 0.010238
| 0
| 0
| 0
| 0
| 0
| 1
| 0.033613
| false
| 0
| 0.05042
| 0
| 0.109244
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
ebf5ca4f90a237385342b586d5c1e142847a2572
| 4,875
|
py
|
Python
|
GUI/my_lib/factory.py
|
EnviableYapper0/FMachineSchedulerPL
|
05ba6a2169ee481062b71b917d1f32d26e240eb8
|
[
"MIT"
] | null | null | null |
GUI/my_lib/factory.py
|
EnviableYapper0/FMachineSchedulerPL
|
05ba6a2169ee481062b71b917d1f32d26e240eb8
|
[
"MIT"
] | null | null | null |
GUI/my_lib/factory.py
|
EnviableYapper0/FMachineSchedulerPL
|
05ba6a2169ee481062b71b917d1f32d26e240eb8
|
[
"MIT"
] | null | null | null |
from . import machine as m
from . import machine_calculator as mc
from . import my_time as mt
class Factory:
def __init__(self, open_time=0.00, close_time=24.00):
self.open_time = open_time
self.close_time = close_time
self.machine_id_map = {}
self.machines = []
def get_operation_time(self):
print("Operation time")
print(mt.distance_between_time_in_minute(self.close_time,self.open_time))
return mt.distance_between_time_in_minute(self.close_time,self.open_time)
def get_total_machine_work_time(self):
print("Total machine time")
sum = 0
for id in self.machines:
machine = self.machine_id_map[id]
sum += machine.get_duration_minutes()
print(sum)
return sum
def set_time(self,open_time,close_time):
self.open_time = open_time
self.close_time = close_time
def add_machine(self, machine):
self.machines.append(machine.id)
self.machine_id_map[machine.id] = machine
def remove_machine(self, index):
id = self.machines[index]
del self.machines[index]
del self.machine_id_map[id]
def get_machine_by_id(self, id):
return self.machine_id_map[id]
def get_peak_minutes(self):
peak_time_list = [[0.00, 9.00], [9.00, 13.30], [13.30, 15.30], [15.30, 22.00], [22.00, 24.00]]
found_open = False
found_close = False
for i in range(0, len(peak_time_list)):
start_time = peak_time_list[i][0]
end_time = peak_time_list[i][1]
if self.open_time >= start_time and self.open_time <= end_time:
peak_time_list[i][0] = self.open_time
found_open = True
if self.close_time >= start_time and self.close_time <= end_time:
peak_time_list[i][1] = self.close_time
found_close = True
continue
if not found_open:
peak_time_list[i][0] = -1
peak_time_list[i][1] = -1
if found_close:
peak_time_list[i][0] = -1
peak_time_list[i][1] = -1
print(peak_time_list)
no_peak_time_1 = 0
no_peak_time_2 = 0
if peak_time_list[0][0] != -1:
no_peak_time_1 = mt.distance_between_time_in_minute(peak_time_list[0][1],peak_time_list[0][0])
if peak_time_list[4][0] != -1:
no_peak_time_2 = mt.distance_between_time_in_minute(peak_time_list[4][1],peak_time_list[4][0])
total_no_peak_time = no_peak_time_1 + no_peak_time_2
peak_time_1 = 0
peak_time_2 = 0
if peak_time_list[1][0] != -1:
peak_time_1 = mt.distance_between_time_in_minute(peak_time_list[1][1],peak_time_list[1][0])
if peak_time_list[3][0] != -1:
peak_time_2 = mt.distance_between_time_in_minute(peak_time_list[3][1],peak_time_list[3][0])
total_peak_time = peak_time_1 + peak_time_2
return total_no_peak_time, total_peak_time
def get_machine_list(self):
machine_list = [self.get_machine_by_id(id) for id in self.machines]
return machine_list
def get_sorted_machines_by_kwh(self):
m_calc = mc.MachineCalculator()
sorted_machines = m_calc.get_sorted_machines_by_kwh(self.get_machine_list())
return sorted_machines
def get_sorted_machines_by_peak(self):
# Get sorted machines first then split it
sorted_machine_dicts = self.get_sorted_machines_by_kwh()
sorted_machine = []
for m_dict in sorted_machine_dicts:
machine = self.machine_id_map[m_dict["id"]]
sorted_machine.append(machine)
no_peak_min, peak_min = self.get_peak_minutes()
print(peak_min,no_peak_min)
m_calc = mc.MachineCalculator()
no_peak,peak,crit_peak = m_calc.get_sorted_machines_by_peak(sorted_machine, peak_min, no_peak_min)
return (no_peak, peak, crit_peak)
def get_time_table_list(self):
no_peak, peak, crit_peak = self.get_sorted_machines_by_peak()
m_calc = mc.MachineCalculator()
time_table_list = m_calc.get_time_table(no_peak,peak,crit_peak,self.open_time)
for machine_data in time_table_list:
# name
machine_data[0] = self.machine_id_map[int(machine_data[0])].name
# duration
machine_data[1] = int(machine_data[1])
# kw
machine_data[2] = float(machine_data[2])
# start
machine_data[3] = mt.float_to_datetime(mt.minutes_to_float(int(machine_data[3])))
# end
machine_data[4] = mt.float_to_datetime(mt.minutes_to_float(int(machine_data[4])))
print(time_table_list)
return time_table_list
def generate_nodes(self):
pass
| 35.326087
| 106
| 0.633231
| 726
| 4,875
| 3.862259
| 0.125344
| 0.11127
| 0.098431
| 0.03709
| 0.468616
| 0.305278
| 0.253923
| 0.21184
| 0.194722
| 0.194722
| 0
| 0.031514
| 0.270974
| 4,875
| 138
| 107
| 35.326087
| 0.757456
| 0.013538
| 0
| 0.108911
| 0
| 0
| 0.007079
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.128713
| false
| 0.009901
| 0.029703
| 0.009901
| 0.247525
| 0.069307
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
ebfa57fc6af077b8e484bb5107bce4b51e06f9f3
| 1,898
|
py
|
Python
|
places/models.py
|
amureki/lunchtime-with-channels
|
7cf6cb15b88ceefbebd53963ff1e194d8df6c25c
|
[
"MIT"
] | null | null | null |
places/models.py
|
amureki/lunchtime-with-channels
|
7cf6cb15b88ceefbebd53963ff1e194d8df6c25c
|
[
"MIT"
] | null | null | null |
places/models.py
|
amureki/lunchtime-with-channels
|
7cf6cb15b88ceefbebd53963ff1e194d8df6c25c
|
[
"MIT"
] | null | null | null |
from django.conf import settings
from django.db import models
from django.utils import timezone
from django.utils.translation import ugettext_lazy as _
from django_extensions.db.models import TimeStampedModel
from stdimage import StdImageField
from stdimage.utils import UploadToUUID
class Place(TimeStampedModel):
name = models.CharField(_('Name'), max_length=255)
image = StdImageField(
_('Image'),
upload_to=UploadToUUID(path='places'),
variations=settings.IMAGE_THUMBNAIL_VARIATIONS,
blank=True, null=True)
address = models.CharField(_('Address'), max_length=255)
class Meta:
ordering = ('-created',)
def __str__(self):
return self.name
@property
def today_rating(self):
now = timezone.now()
return self.vote_set.filter(created__date__gte=now).count()
@property
def voters(self):
now = timezone.now()
voters = self.vote_set \
.filter(created__date__gte=now) \
.values_list('username', flat=True)
return sorted(list(voters)) or ['Nobody']
def voted_by(self, username):
now = timezone.now()
return self.vote_set.filter(created__date__gte=now,
username=username).exists()
@classmethod
def most_wanted(cls):
now = timezone.now()
wanted = cls.objects \
.filter(vote__created__date__gte=now) \
.distinct() \
.annotate(models.Count('vote')) \
.filter(vote__count__gt=0) \
.order_by('-vote__count')
if wanted.first():
top_score = wanted.first().vote__count
most_wanted = wanted \
.filter(vote__count=top_score) \
.values_list('name', flat=True)
else:
most_wanted = ['Nothing', ]
return ', '.join(most_wanted)
| 29.2
| 67
| 0.615385
| 209
| 1,898
| 5.315789
| 0.38756
| 0.045005
| 0.050405
| 0.061206
| 0.127813
| 0.127813
| 0.127813
| 0.127813
| 0.09721
| 0.09721
| 0
| 0.005098
| 0.276607
| 1,898
| 64
| 68
| 29.65625
| 0.804079
| 0
| 0
| 0.117647
| 0
| 0
| 0.038462
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.098039
| false
| 0
| 0.137255
| 0.019608
| 0.431373
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
ebfc9f2828a65b31b16c43b42091b7e322b73651
| 2,363
|
py
|
Python
|
models/process_dataset.py
|
Aremaki/MscProjectNMR
|
5bb8fb129d5fe326aa73b56cb7c5b01a17aebb0d
|
[
"MIT"
] | null | null | null |
models/process_dataset.py
|
Aremaki/MscProjectNMR
|
5bb8fb129d5fe326aa73b56cb7c5b01a17aebb0d
|
[
"MIT"
] | null | null | null |
models/process_dataset.py
|
Aremaki/MscProjectNMR
|
5bb8fb129d5fe326aa73b56cb7c5b01a17aebb0d
|
[
"MIT"
] | 1
|
2021-07-28T11:18:00.000Z
|
2021-07-28T11:18:00.000Z
|
import tensorflow as tf
def shuffle_and_batch_dataset(dataset, batch_size, shuffle_buffer=None):
"""
This function is used to shuffle and batch the dataset, using shuffle_buffer
and batch_size.
"""
if shuffle_buffer is not None:
dataset = dataset.shuffle(shuffle_buffer)
dataset = dataset.batch(batch_size)
return dataset
def split_dataset(dataset, train_prop=0.8, val_prop=0.2):
"""
This function takes in the loaded TFRecordDataset, and builds training, validation
and test TFRecordDataset objects. The test_prop is automatically set up to be equal to
1 - (train_prop + val_prop).
"""
dataset_size = sum(1 for _ in dataset)
train_size = int(train_prop * dataset_size)
val_size = int(val_prop * dataset_size)
train_dataset = dataset.take(train_size)
remaining_dataset = dataset.skip(train_size)
val_dataset = remaining_dataset.take(val_size)
test_dataset = remaining_dataset.skip(val_size)
return train_dataset, val_dataset, test_dataset
def process_dataset(dataset, batch_sizes=None, shuffle_buffers=None, train_prop=0.8, val_prop=0.2):
"""
:param dataset: TFRecordDataset object
:param batch_sizes: list of batch_size for train set, validation set and test set
:param shuffle_buffers: an integer shuffle_buffer for the train set only
:param train_prop: the ratio between the full dataset size and the train set size
:param val_prop: the ratio between the full dataset size and the validation set size
:return: fully processed train, validation and test TFRecordDataset
"""
if batch_sizes is None:
batch_sizes = [64, 64, 64]
if type(shuffle_buffers) != int:
return "Error: shuffle_buffers should be an integer"
if len(batch_sizes) != 3:
return "Error: batch_sizes should have a length of 3."
train_dataset, val_dataset, test_dataset = split_dataset(dataset, train_prop, val_prop)
train_dataset = shuffle_and_batch_dataset(train_dataset, batch_sizes[0], shuffle_buffers)
train_dataset = train_dataset.prefetch(tf.data.experimental.AUTOTUNE)
val_dataset = val_dataset.batch(batch_sizes[1]).prefetch(tf.data.experimental.AUTOTUNE)
test_dataset = test_dataset.batch(batch_sizes[2]).prefetch(tf.data.experimental.AUTOTUNE)
return train_dataset, val_dataset, test_dataset
| 41.45614
| 99
| 0.7427
| 339
| 2,363
| 4.946903
| 0.230089
| 0.053667
| 0.040549
| 0.039356
| 0.230173
| 0.141324
| 0.121646
| 0.075134
| 0.051282
| 0.051282
| 0
| 0.010864
| 0.181972
| 2,363
| 56
| 100
| 42.196429
| 0.856699
| 0.304697
| 0
| 0.071429
| 0
| 0
| 0.05623
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.107143
| false
| 0
| 0.035714
| 0
| 0.321429
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
2300582ed8688ca839e05903662437f7a910f9a9
| 1,648
|
py
|
Python
|
scratch/eyy/debug/bad_pair_analysis.py
|
sasgc6/pysmurf
|
a370b515ab717c982781223da147bea3c8fb3a9c
|
[
"BSD-3-Clause-LBNL"
] | 3
|
2019-10-17T02:37:59.000Z
|
2022-03-09T16:42:34.000Z
|
scratch/eyy/debug/bad_pair_analysis.py
|
sasgc6/pysmurf
|
a370b515ab717c982781223da147bea3c8fb3a9c
|
[
"BSD-3-Clause-LBNL"
] | 446
|
2019-04-10T04:46:20.000Z
|
2022-03-15T20:27:57.000Z
|
scratch/eyy/debug/bad_pair_analysis.py
|
sasgc6/pysmurf
|
a370b515ab717c982781223da147bea3c8fb3a9c
|
[
"BSD-3-Clause-LBNL"
] | 13
|
2019-02-05T18:02:05.000Z
|
2021-03-02T18:41:49.000Z
|
import numpy as np
import matplotlib.pyplot as plt
import os
f_cutoff = .25
df_cutoff = .05
data_dir = '/data/smurf_data/20181214/1544843999/outputs'
f2, df2 = np.load(os.path.join(data_dir, 'band3_badres.npy'))
f2p, df2p = np.load(os.path.join(data_dir, 'band3_badpair.npy'))
m = np.ravel(np.where(np.logical_or(f2 > f_cutoff, df2 > df_cutoff)))
f2[m] = np.nan
df2[m] = np.nan
f2p[m,0] = np.nan
f2p[m-1,1] = np.nan
df2p[m,0] = np.nan
df2p[m-1,1] = np.nan
n, _ = np.shape(df2p)
xp = np.arange(1,n)
fig, ax = plt.subplots(2, 2, sharex=True, figsize=(8,7))
ax[0,0].plot(f2, color='k')
ax[0,0].plot(f2p[:-1,0])
ax[0,0].plot(xp, f2p[:-1, 1])
ax[0,0].set_title('f')
ax[0,1].plot(df2, color='k', label='Solo')
ax[0,1].plot(df2p[:-1,0], label='R on')
ax[0,1].plot(xp, df2p[:-1,1], label='L on')
ax[0,1].set_title('df')
ax[0,1].legend()
delta_ron_f2 = f2[:-1] - f2p[:-1,0] # right on
delta_lon_f2 = f2[1:] - f2p[:-1,1] # left one
ax[1,0].plot(delta_ron_f2)
ax[1,0].plot(xp, delta_lon_f2)
delta_ron_df2 = df2[:-1] - df2p[:-1,0] # right on
delta_lon_df2 = df2[1:] - df2p[:-1,1] # left one
ax[1,1].plot(delta_ron_df2)
ax[1,1].plot(xp, delta_lon_df2)
ax[1,0].set_xlabel('Res #')
ax[1,1].set_xlabel('Res #')
fig, ax = plt.subplots(1,2, figsize=(8, 4))
bins = np.arange(-.1, 0.06, .01)
hist_mask_r = np.where(~np.isnan(delta_ron_df2))
hist_mask_l = np.where(~np.isnan(delta_lon_df2))
ax[1].hist(delta_ron_df2[hist_mask_r], bins=bins,
histtype='step', label='R on')
ax[1].hist(delta_lon_df2[hist_mask_l], bins=bins,
histtype='step', label='L on')
ax[1].axvline(0, color='k', linestyle=':')
ax[1].legend()
# ax[2,1].hist(delta_lon_df2[])
| 26.15873
| 69
| 0.646238
| 348
| 1,648
| 2.916667
| 0.232759
| 0.029557
| 0.019704
| 0.023645
| 0.338916
| 0.112315
| 0.055172
| 0.055172
| 0
| 0
| 0
| 0.097327
| 0.114684
| 1,648
| 63
| 70
| 26.15873
| 0.598355
| 0.039442
| 0
| 0
| 0
| 0
| 0.077313
| 0.027883
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.06383
| 0
| 0.06383
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
230125cca40653427f41d2b5c28c03de5e593aca
| 2,794
|
py
|
Python
|
examples/pytorch/eager/blendcnn/utils.py
|
intelkevinputnam/lpot-docs
|
1ff32b4d89074a6bd133ba531f7c0cea3b73152f
|
[
"Apache-2.0"
] | 172
|
2021-09-14T18:34:17.000Z
|
2022-03-30T06:49:53.000Z
|
examples/pytorch/eager/blendcnn/utils.py
|
intelkevinputnam/lpot-docs
|
1ff32b4d89074a6bd133ba531f7c0cea3b73152f
|
[
"Apache-2.0"
] | 40
|
2021-09-14T02:26:12.000Z
|
2022-03-29T08:34:04.000Z
|
examples/pytorch/eager/blendcnn/utils.py
|
intelkevinputnam/lpot-docs
|
1ff32b4d89074a6bd133ba531f7c0cea3b73152f
|
[
"Apache-2.0"
] | 33
|
2021-09-15T07:27:25.000Z
|
2022-03-25T08:30:57.000Z
|
# Copyright 2018 Dong-Hyun Lee, Kakao Brain.
#
# Copyright (c) 2020 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" Utils Functions """
import os
import random
import logging
import json
import numpy as np
import torch
def set_seeds(seed):
"set random seeds"
random.seed(seed)
np.random.seed(seed)
torch.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
def get_device():
"get device (CPU or GPU)"
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
n_gpu = torch.cuda.device_count()
print("%s (%d GPUs)" % (device, n_gpu))
return device
def split_last(x, shape):
"split the last dimension to given shape"
shape = list(shape)
assert shape.count(-1) <= 1
if -1 in shape:
shape[shape.index(-1)] = int(x.size(-1) / -np.prod(shape))
return x.view(*x.size()[:-1], *shape)
def merge_last(x, n_dims):
"merge the last n_dims to a dimension"
s = x.size()
assert n_dims > 1 and n_dims < len(s)
return x.view(*s[:-n_dims], -1)
def find_sublist(haystack, needle):
"""Return the index at which the sequence needle appears in the
sequence haystack, or -1 if it is not found, using the Boyer-
Moore-Horspool algorithm. The elements of needle and haystack must
be hashable.
https://codereview.stackexchange.com/questions/19627/finding-sub-list
"""
h = len(haystack)
n = len(needle)
skip = {needle[i]: n - i - 1 for i in range(n - 1)}
i = n - 1
while i < h:
for j in range(n):
if haystack[i - j] != needle[-j - 1]:
i += skip.get(haystack[i], n)
break
else:
return i - n + 1
return -1
def get_logger(name, log_path):
"get logger"
logger = logging.getLogger(name)
fomatter = logging.Formatter(
'[ %(levelname)s|%(filename)s:%(lineno)s] %(asctime)s > %(message)s')
if not os.path.isfile(log_path):
f = open(log_path, "w+")
fileHandler = logging.FileHandler(log_path)
fileHandler.setFormatter(fomatter)
logger.addHandler(fileHandler)
#streamHandler = logging.StreamHandler()
#streamHandler.setFormatter(fomatter)
#logger.addHandler(streamHandler)
logger.setLevel(logging.DEBUG)
return logger
| 28.222222
| 77
| 0.65927
| 409
| 2,794
| 4.449878
| 0.422983
| 0.032967
| 0.014286
| 0.017582
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.014767
| 0.224409
| 2,794
| 98
| 78
| 28.510204
| 0.825104
| 0.403006
| 0
| 0
| 0
| 0.018182
| 0.120848
| 0.021764
| 0
| 0
| 0
| 0
| 0.036364
| 1
| 0.109091
| false
| 0
| 0.109091
| 0
| 0.327273
| 0.018182
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
23012fe006d829b36579833bc95d73785791bbf3
| 1,983
|
py
|
Python
|
models/Nets.py
|
lorflea/FederatedLearningMLDL2021
|
453d273c14a06eb6d2522c1b9fe877b43212ab76
|
[
"MIT"
] | 1
|
2021-11-22T01:20:29.000Z
|
2021-11-22T01:20:29.000Z
|
models/Nets.py
|
lorflea/FederatedLearningMLDL2021
|
453d273c14a06eb6d2522c1b9fe877b43212ab76
|
[
"MIT"
] | null | null | null |
models/Nets.py
|
lorflea/FederatedLearningMLDL2021
|
453d273c14a06eb6d2522c1b9fe877b43212ab76
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Python version: 3.6
import torch
from torch import nn
import torch.nn.functional as F
class AlexNet(nn.Module):
def __init__(self, num_classes=10):
super(AlexNet, self).__init__()
self.features = nn.Sequential(
nn.Conv2d(3, 64, kernel_size=3, stride=1, padding=2),
nn.ReLU(inplace=True),
nn.MaxPool2d(kernel_size=2),
nn.Conv2d(64, 192, kernel_size=3, padding=2),
nn.ReLU(inplace=True),
nn.MaxPool2d(kernel_size=2),
nn.Conv2d(192, 384, kernel_size=3, padding=1),
nn.ReLU(inplace=True),
nn.Conv2d(384, 256, kernel_size=3, padding=1),
nn.ReLU(inplace=True),
nn.Conv2d(256, 256, kernel_size=3, padding=1),
nn.ReLU(inplace=True),
nn.MaxPool2d(kernel_size=3, stride=2)
)
self.fc_layers = nn.Sequential(
nn.Dropout(0.6),
nn.Linear(4096, 2048),
nn.ReLU(inplace=True),
nn.Dropout(0.6),
nn.Linear(2048, 2048),
nn.ReLU(inplace=True),
nn.Linear(2048, num_classes),
)
def forward(self, x):
conv_features = self.features(x)
flatten = conv_features.view(conv_features.size(0), -1)
fc = self.fc_layers(flatten)
return fc
class LeNet(nn.Module):
def __init__(self):
super(LeNet, self).__init__()
self.conv1 = nn.Conv2d(3, 6, kernel_size=5)
self.conv2 = nn.Conv2d(6, 16, kernel_size=5)
self.fc1 = nn.Linear(16*5*5, 120)
self.fc2 = nn.Linear(120, 84)
self.fc3 = nn.Linear(84, 10)
def forward(self, x):
x = F.relu(self.conv1(x))
x = F.max_pool2d(x, 2)
x = F.relu(self.conv2(x))
x = F.max_pool2d(x, 2)
x = x.view(x.size(0), -1)
x = F.relu(self.fc1(x))
x = F.relu(self.fc2(x))
x = self.fc3(x)
return x
| 30.984375
| 65
| 0.547151
| 286
| 1,983
| 3.671329
| 0.241259
| 0.095238
| 0.086667
| 0.113333
| 0.412381
| 0.355238
| 0.277143
| 0.277143
| 0.230476
| 0.230476
| 0
| 0.087872
| 0.305598
| 1,983
| 64
| 66
| 30.984375
| 0.674655
| 0.031266
| 0
| 0.283019
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.075472
| false
| 0
| 0.056604
| 0
| 0.207547
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
23045d3d5a94dd7bbdb73152afab227894299c52
| 3,137
|
py
|
Python
|
app.py
|
jjchshayan/heroku
|
7181631b52057a92d751e1756b7b422dfd8825c0
|
[
"MIT"
] | null | null | null |
app.py
|
jjchshayan/heroku
|
7181631b52057a92d751e1756b7b422dfd8825c0
|
[
"MIT"
] | null | null | null |
app.py
|
jjchshayan/heroku
|
7181631b52057a92d751e1756b7b422dfd8825c0
|
[
"MIT"
] | null | null | null |
from telegram.ext import Updater
from telegram import bot
#!/usr/bin/env python
# -*- coding: utf-8 -*-
updater = Updater(token='660812730:AAEGP-xXkMKoplHR6YsUECqXB8diNgvlfbs')
dispatcher = updater.dispatcher
import logging
import requests
state = 1
logging.basicConfig(format='%(asctime)s - %(name)s - %(levelname)s - %(message)s', level=logging.INFO)
def start(bot, update):
bot.send_message(chat_id=update.message.chat_id, text="سلام خوش آمدید لطفا عکس گرفته شده را اضافه نمایید")
state=2
from telegram.ext import CommandHandler
start_handler = CommandHandler('start', start)
dispatcher.add_handler(start_handler)
def echo(bot, update):
#my_id = 504335145
try:
# print(update)
user_id = update['message']['chat']['id']
user_name = update['message']['chat']['first_name']
file_id = bot.get_file(update['message']['photo'][2]['file_id'])
url =file_id["file_path"]
r = requests.post("http://shayan2020.ir/Api/Telegram/UploadData.php", data={'url': url,'filename':str(user_id)+'_'+str(user_name)})
if(r.text =="ok"):
bot.send_message(chat_id=update.message.chat_id, text="با تشکر از شما برای اضافه کردن عکسی دیگر دگمه /start را مجددا تایپ نمایید")
else:
print(r.text)
bot.send_message(chat_id=update.message.chat_id, text="خطا لطفا مجددا تلاش نمایید")
except:
print(update)
bot.send_message(chat_id=update.message.chat_id, text="لطفا فقط عکس اضافه کنید")
from telegram.ext import MessageHandler, Filters
echo_handler = MessageHandler(Filters.all, echo)
dispatcher.add_handler(echo_handler)
# def caps(bot, update, args=''):
# text_caps = ' '.join(args).upper()
# bot.send_message(chat_id=update.message.chat_id, text=text_caps)
#
#
# caps_handler = CommandHandler('caps', caps, pass_args=True)
# dispatcher.add_handler(caps_handler)
# from telegram import InlineQueryResultArticle, InputTextMessageContent
#
#
# def inline_caps(bot, update):
# query = update.inline_query.query
# if not query:
# return
# results = list()
# results.append(
# InlineQueryResultArticle(
# id=query.upper(),
# title='Caps',
# input_message_content=InputTextMessageContent(query.upper())
# )
# )
# bot.answer_inline_query(update.inline_query.id, results)
# from telegram.ext import InlineQueryHandler
#
# inline_caps_handler = InlineQueryHandler(inline_caps)
# dispatcher.add_handler(inline_caps_handler)
def unknown(bot, update):
bot.send_message(chat_id=update.message.chat_id, text="Sorry, I didn't understand that command.")
unknown_handler = MessageHandler(Filters.command, unknown)
dispatcher.add_handler(unknown_handler)
#
# TOKEN = '545193892:AAF-i-kxjJBeEiVXL1PokHCCEGNnQ1sOXFo'
# HOST = 'shayantt.herokuapp.com' # Same FQDN used when generating SSL Cert
# PORT = 8443
# updater.start_webhook(listen="0.0.0.0",
# port=PORT,
# # url_path=TOKEN)
# updater.bot.set_webhook("https://shayantt.herokuapp.com/" + TOKEN)
# updater.idle()
updater.start_polling()
| 29.87619
| 140
| 0.6927
| 401
| 3,137
| 5.264339
| 0.369077
| 0.072951
| 0.080057
| 0.063003
| 0.143534
| 0.133586
| 0.133586
| 0.133586
| 0.133586
| 0.133586
| 0
| 0.018189
| 0.176283
| 3,137
| 104
| 141
| 30.163462
| 0.798762
| 0.399426
| 0
| 0
| 0
| 0
| 0.237371
| 0.024443
| 0
| 0
| 0
| 0
| 0
| 1
| 0.081081
| false
| 0
| 0.162162
| 0
| 0.243243
| 0.054054
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
23088bb0c48cd2efc5f4f5582dd8f9fb037c941d
| 3,682
|
py
|
Python
|
src/sequel/hierarchical_search/functional.py
|
simone-campagna/sequel
|
a96e0f8b5000f8d0174f97f772cca5ac8a140acd
|
[
"Apache-2.0"
] | null | null | null |
src/sequel/hierarchical_search/functional.py
|
simone-campagna/sequel
|
a96e0f8b5000f8d0174f97f772cca5ac8a140acd
|
[
"Apache-2.0"
] | null | null | null |
src/sequel/hierarchical_search/functional.py
|
simone-campagna/sequel
|
a96e0f8b5000f8d0174f97f772cca5ac8a140acd
|
[
"Apache-2.0"
] | null | null | null |
"""
Search integral/derivative algorithm class
"""
from ..items import Items
from ..sequence import integral, derivative, summation, product
from ..utils import sequence_matches
from .base import RecursiveSearchAlgorithm
__all__ = [
"SearchSummation",
"SearchProduct",
"SearchIntegral",
"SearchDerivative",
]
class SearchSum(RecursiveSearchAlgorithm):
"""Search for sums"""
__min_items__ = 3
__accepts_undefined__ = False
def __init__(self, sub_algorithm, name=None):
super().__init__(sub_algorithm=sub_algorithm, name=name)
def _impl_call(self, catalog, items, info, options):
s_items = []
last = 0
for item in items:
value = item - last
s_items.append(value)
last = item
sub_items = Items(s_items)
# print("sum:", [int(x) for x in sub_items])
info = info.sub(rank=1)
for sequence, sub_info in self.sub_search(catalog, sub_items, info, options):
seq = summation(sequence)
if sequence_matches(seq, items):
yield seq, sub_info
class SearchProd(RecursiveSearchAlgorithm):
"""Search for prods"""
__min_items__ = 3
__accepts_undefined__ = False
def __init__(self, sub_algorithm, name=None):
super().__init__(sub_algorithm=sub_algorithm, name=name)
def _impl_call(self, catalog, items, info, options):
s_items = []
last = 1
for item in items:
if last == 0:
value = 0
else:
value, mod = divmod(item, last)
if mod != 0:
return
s_items.append(value)
last = item
sub_items = Items(s_items)
# print("prod:", [int(x) for x in items], "->", [int(x) for x in sub_items])
info = info.sub(rank=1)
for sequence, sub_info in self.sub_search(catalog, sub_items, info, options):
seq = product(sequence)
if sequence_matches(seq, items):
yield seq, sub_info
class SearchIntegral(RecursiveSearchAlgorithm):
"""Search for integrals"""
__min_items__ = 3
__accepts_undefined__ = False
def __init__(self, sub_algorithm, name=None):
super().__init__(sub_algorithm=sub_algorithm, name=name)
def _impl_call(self, catalog, items, info, options):
if items.derivative:
sub_items = Items(items.derivative)
info = info.sub(rank=1)
for sequence, sub_info in self.sub_search(catalog, sub_items, info, options):
seq = integral(sequence, start=items[0]).simplify()
#print("dd..", derivative, sequence, [x for x, _ in zip(sequence, derivative)])
#print("dd->", items, seq, [x for x, _ in zip(seq, items)])
if sequence_matches(seq, items):
yield seq, sub_info
class SearchDerivative(RecursiveSearchAlgorithm):
"""Search for derivatives"""
__min_items__ = 3
__accepts_undefined__ = False
def __init__(self, sub_algorithm, name=None):
super().__init__(sub_algorithm=sub_algorithm, name=name)
def _impl_call(self, catalog, items, info, options):
sub_items = Items(items.make_integral())
info = info.sub(rank=1)
for sequence, sub_info in self.sub_search(catalog, sub_items, info, options):
#print("ii..", integral, sequence, [x for x, _ in zip(sequence, integral)])
#print("ii->", items, seq, [x for x, _ in zip(seq, items)])
seq = derivative(sequence).simplify()
if sequence_matches(seq, items):
yield seq, sub_info
| 33.171171
| 95
| 0.608637
| 433
| 3,682
| 4.866051
| 0.163972
| 0.068344
| 0.06075
| 0.023256
| 0.63028
| 0.625534
| 0.625534
| 0.600854
| 0.600854
| 0.55719
| 0
| 0.005299
| 0.282455
| 3,682
| 110
| 96
| 33.472727
| 0.792203
| 0.137425
| 0
| 0.605263
| 0
| 0
| 0.018454
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.105263
| false
| 0
| 0.052632
| 0
| 0.328947
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
230ca0bc145d70340fa1510e5f32fb9e40355ade
| 1,662
|
py
|
Python
|
tests/image/segmentation/test_backbones.py
|
lillekemiker/lightning-flash
|
a047330ba75486355378f22cbebfd053c3d63c08
|
[
"Apache-2.0"
] | null | null | null |
tests/image/segmentation/test_backbones.py
|
lillekemiker/lightning-flash
|
a047330ba75486355378f22cbebfd053c3d63c08
|
[
"Apache-2.0"
] | null | null | null |
tests/image/segmentation/test_backbones.py
|
lillekemiker/lightning-flash
|
a047330ba75486355378f22cbebfd053c3d63c08
|
[
"Apache-2.0"
] | null | null | null |
# Copyright The PyTorch Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pytest
import torch
from pytorch_lightning.utilities import _BOLTS_AVAILABLE, _TORCHVISION_AVAILABLE
from flash.image.segmentation.backbones import SEMANTIC_SEGMENTATION_BACKBONES
@pytest.mark.parametrize(["backbone"], [
pytest.param("fcn_resnet50", marks=pytest.mark.skipif(not _TORCHVISION_AVAILABLE, reason="No torchvision")),
pytest.param("deeplabv3_resnet50", marks=pytest.mark.skipif(not _TORCHVISION_AVAILABLE, reason="No torchvision")),
pytest.param(
"lraspp_mobilenet_v3_large", marks=pytest.mark.skipif(not _TORCHVISION_AVAILABLE, reason="No torchvision")
),
pytest.param("unet", marks=pytest.mark.skipif(not _BOLTS_AVAILABLE, reason="No bolts")),
])
def test_image_classifier_backbones_registry(backbone):
img = torch.rand(1, 3, 32, 32)
backbone_fn = SEMANTIC_SEGMENTATION_BACKBONES.get(backbone)
backbone_model = backbone_fn(10, pretrained=False)
assert backbone_model
backbone_model.eval()
res = backbone_model(img)
if isinstance(res, dict):
res = res["out"]
assert res.shape[1] == 10
| 42.615385
| 118
| 0.760529
| 224
| 1,662
| 5.5
| 0.513393
| 0.048701
| 0.048701
| 0.068182
| 0.212662
| 0.193182
| 0.193182
| 0.193182
| 0.193182
| 0.193182
| 0
| 0.014768
| 0.144404
| 1,662
| 38
| 119
| 43.736842
| 0.851617
| 0.336342
| 0
| 0
| 0
| 0
| 0.110193
| 0.022957
| 0
| 0
| 0
| 0
| 0.090909
| 1
| 0.045455
| false
| 0
| 0.181818
| 0
| 0.227273
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
230cbf98d0fce9a1f8d3eb7ee8c52b62685cd185
| 6,972
|
py
|
Python
|
src/ExtractData.py
|
AntoineMeresse/Terminal-chart
|
eff66c32d78c394849176c7777bf7c203dbac5b3
|
[
"MIT"
] | null | null | null |
src/ExtractData.py
|
AntoineMeresse/Terminal-chart
|
eff66c32d78c394849176c7777bf7c203dbac5b3
|
[
"MIT"
] | null | null | null |
src/ExtractData.py
|
AntoineMeresse/Terminal-chart
|
eff66c32d78c394849176c7777bf7c203dbac5b3
|
[
"MIT"
] | null | null | null |
import sys
import re
from src.GenGraph import *
class ExtractData:
def __init__(self, genGraph):
#print("Init extractData.")
self.datas = list()
self.datasDefine = False
self.file = "pipe" # Cas de base ou l'on prend des données de stdin
self.genGraph = genGraph
self.separator = " " # Separateur par défaut
def setSeparator(self, sep):
"""
Method to change the separator, default is whitespace (" ")
Example(s):
>>> obj = ExtractData(GenGraph())
>>> obj.separator
' '
>>> obj.setSeparator(1)
Traceback (most recent call last):
...
AssertionError
>>> obj.setSeparator(",")
>>> obj.separator
','
"""
assert(type(sep)==str)
self.separator = sep
def data_from_pipe(self):
"""
return : list of lines. Line are string.
"""
return sys.stdin.readlines()
def data_from_file(self, filename):
"""
return : list of lines. Line are string.
"""
with open(filename,'r') as fl:
return fl.readlines()
def setFile(self, filename):
"""
Method to change file, default value of file is pipe.
Example(s):
>>> obj = ExtractData(GenGraph())
>>> obj.file
'pipe'
>>> obj.setFile(["datas/simpleDatas.txt"])
>>> obj.file
['datas/simpleDatas.txt']
"""
self.file = filename
def getData(self):
r"""
Method to ...
return : list of lines
Example(s):
>>> obj = ExtractData(GenGraph())
>>> obj.file = ["datas/simpleDatas.txt"] # Fichier d'exemple avec 13 lignes
>>> obj.getData()
[['Mois Temperature Moyenne\n', 'Janvier 2\n', 'Fevrier 3\n', 'Mars 4\n', 'Avril 12\n', 'Mai 14\n', 'Juin 21\n', 'Juillet 24\n', 'Aout 26 \n', 'Septembre 14\n', 'Octobre 15\n', 'Novembre 10\n', 'Decembre 0']]
"""
if(not self.datasDefine):
if(self.file == "pipe"):
print("PIPE")
self.datas.append(self.data_from_pipe())
else:
for elem in self.file:
if elem != '':
self.datas.append(self.data_from_file(elem))
self.genGraph.graphDatas.files.append(elem)
self.datasDefine = True
return self.datas
def skipFirstLine(self):
r"""
Method to skip first line of your file data
Example(s):
>>> obj = ExtractData(GenGraph())
>>> obj.file = ["datas/simpleDatas.txt"] # Fichier d'exemple avec 13 lignes
>>> obj.getData()
[['Mois Temperature Moyenne\n', 'Janvier 2\n', 'Fevrier 3\n', 'Mars 4\n', 'Avril 12\n', 'Mai 14\n', 'Juin 21\n', 'Juillet 24\n', 'Aout 26 \n', 'Septembre 14\n', 'Octobre 15\n', 'Novembre 10\n', 'Decembre 0']]
>>> firstelem = obj.datas[0]
>>> len(firstelem)
13
>>> obj.skipFirstLine()
>>> firstelem = obj.datas[0]
>>> len(firstelem)
12
"""
self.datas = self.getData()
for i in range(len(self.datas)):
self.datas[i] = self.datas[i][1:len(self.datas[i])]
def getCleanData(self,lign):
"""
Method to extract and create a clean data list.
param lign : a string of datas
return : a list of clean elements split by a separator
Example(s):
>>> obj = ExtractData(GenGraph())
>>> lign = "udev 4052132 0 4052132 0% /dev\\n"
>>> obj.getCleanData(lign)
['udev', '4052132', '0', '4052132', '0%', '/dev']
>>> obj.setSeparator(",")
>>> lign = "udev , 4052132 , 0 , 4052132 , 0% ,/dev\\n"
>>> obj.getCleanData(lign)
['udev', '4052132', '0', '4052132', '0%', '/dev']
"""
tmp = re.sub("\n+", "", lign)
splt = tmp.split(self.separator)
res = list()
for elem in splt:
e = elem.strip()
if elem != "":
res.append(e) # Fix problem
return res
def extract_column(self, columnNumber):
"""
param columnNumber : colomn number
return : a list
Example(s):
>>> obj = ExtractData(GenGraph())
>>> obj.file = ["datas/simpleDatas.txt", "datas/simpleDatas2.txt"]
>>> obj.extract_column(4) # Erreur
Traceback (most recent call last):
...
AssertionError
>>> obj.extract_column(0)
[['Mois', 'Janvier', 'Fevrier', 'Mars', 'Avril', 'Mai', 'Juin', 'Juillet', 'Aout', 'Septembre', 'Octobre', 'Novembre', 'Decembre'], ['Mois', 'Janvier', 'Fevrier', 'Mars', 'Avril', 'Mai', 'Juin', 'Juillet', 'Aout', 'Septembre', 'Octobre', 'Novembre', 'Decembre']]
>>> obj.extract_column(1)
[['Temperature', '2', '3', '4', '12', '14', '21', '24', '26', '14', '15', '10', '0'], ['Temperature', '4', '5', '6', '14', '16', '23', '26', '28', '16', '17', '12', '2']]
"""
datas = self.getData()
res = list()
for elem in datas:
tmp = list()
for lign in elem:
infos = self.getCleanData(lign)
assert(columnNumber <= len(infos))
e = (infos[columnNumber])
tmp += [e]
res.append(tmp)
return res
def extract_column_x(self,columnNumber):
"""
Method to extract datas for x axis in matplotlib
param columnNumber : colomn number
Example(s):
>>> graph = GenGraph()
>>> obj = ExtractData(graph)
>>> obj.file = ["datas/simpleDatas.txt"]
>>> graph.graphDatas.getNames()
[]
>>> obj.extract_column_x([0])
>>> len(graph.graphDatas.getNames()[0])
13
"""
assert (type(columnNumber) == list)
for elem in columnNumber:
res = self.extract_column(elem)
for e in res:
self.genGraph.graphDatas.addNames(e)
def extract_column_y(self,columnNumber):
"""
Method to extract datas for y axis in matplotlib
param columnNumber : colomn number
Example(s):
>>> graph = GenGraph()
>>> obj = ExtractData(graph)
>>> obj.file = ["datas/simpleDatas.txt"]
>>> graph.graphDatas.getValues()
[]
>>> obj.extract_column_y([0, 1])
>>> len(graph.graphDatas.getValues())
2
"""
assert(type(columnNumber) == list)
for elem in columnNumber:
res = self.extract_column(elem)
for e in res:
self.genGraph.graphDatas.addValues(e)
| 30.578947
| 270
| 0.49455
| 738
| 6,972
| 4.636856
| 0.227642
| 0.037989
| 0.038866
| 0.038574
| 0.524255
| 0.483928
| 0.44097
| 0.364115
| 0.364115
| 0.364115
| 0
| 0.03707
| 0.353844
| 6,972
| 227
| 271
| 30.713656
| 0.722531
| 0.510901
| 0
| 0.169014
| 0
| 0
| 0.006754
| 0
| 0
| 0
| 0
| 0
| 0.056338
| 1
| 0.15493
| false
| 0
| 0.042254
| 0
| 0.28169
| 0.014085
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
230de14d7e6fc08a01de2fd55c6b8f3b77dd5b56
| 4,456
|
py
|
Python
|
chemistry/compressibilities/optimize_compressibility_factor_sigmoid_minimum.py
|
davidson16807/tectonics-approximations
|
f69570fd0a9693fad8e8ec27ccc34e0d6b3fd50b
|
[
"CC0-1.0"
] | null | null | null |
chemistry/compressibilities/optimize_compressibility_factor_sigmoid_minimum.py
|
davidson16807/tectonics-approximations
|
f69570fd0a9693fad8e8ec27ccc34e0d6b3fd50b
|
[
"CC0-1.0"
] | null | null | null |
chemistry/compressibilities/optimize_compressibility_factor_sigmoid_minimum.py
|
davidson16807/tectonics-approximations
|
f69570fd0a9693fad8e8ec27ccc34e0d6b3fd50b
|
[
"CC0-1.0"
] | null | null | null |
from math import *
import csv
import random
import numpy as np
from optimize import genetic_algorithm
with open('pTZ.csv', newline='') as csvfile:
csvreader = csv.reader(csvfile, delimiter=',', quotechar='"')
next(csvreader, None) # skip header
observations = [( np.array([float(p),float(T)]), float(Z))
for (i,p,Z,T) in csvreader ]
Lout = np.array([Z for (p,T), Z in observations if p >= 5 or (p>=1.2 and T<1.05) ])
Lin = np.array([(p,T) for (p,T), Z in observations if p >= 5 or (p>=1.2 and T<1.05) ])
Zout = np.array([Z for (p,T), Z in observations])
Zin = np.array([(p, T) for (p,T), Z in observations])
def max_absolute_error(estimated, observed):
return np.max(np.abs(observed-estimated))
def max_percent_absolute_error(estimated, observed):
return np.max(np.abs(observed-estimated/observed))
def mean_percent_absolute_error(estimated, observed):
return np.mean(np.abs(observed-estimated/observed))
def mean_absolute_error(estimated, observed):
return np.mean(np.abs(observed-estimated))
def L(Lparams, Lin):
p = Lin[:,0]
T = Lin[:,1]
V = p/T
T1 = 1/T
a0 = (Lparams[0])
a1 = (Lparams[1])
a2 = (Lparams[2])
a3 = (Lparams[3])
a4 = (Lparams[4])
return a0 + a1*V**a4 + a2*T1**a3
# return a0 + a1*p/T + a2/T + a3*(p/T)*(1/T) + a4*(p/T)**2 + a5*(1/T)**2
def Lcost1(Lparams):
return max_absolute_error(L(Lparams, Lin), Lout)
def Lcost2(Lparams):
return mean_absolute_error(L(Lparams, Lin), Lout)
def Lcode(Lparams):
a0 = (Lparams[0])
a1 = (Lparams[1])
a2 = (Lparams[2])
a3 = (Lparams[3])
a4 = (Lparams[4])
return f'{a0:.3f} {a1:+.3f}*(p/T)**{a4:+.3f} {a2:+.3f}/T**{a3:+.3f}'
# return f'{a0:.3f} {a1:+.3f}*p/T {a2:+.3f}/T {a3:+.3f}*(p/T)*(1/T) {a4:+.3f}*(p/T)**2 {a5:+.3f}*(1/T)**2'
def Ltext(Lparams):
arraytext = ','.join(f'{Lparams[i]:.3f}' for i in range(len(Lparams)))
return( f'''#
# Lguess = np.array([{arraytext}])
# max error: {Lcost1(Lparams)}
# {Lcode(Lparams)}
# mean error: {Lcost2(Lparams)} ''')
# Lguess = np.array([1.098,0.118,-0.946,0.981,0.954])
Lguess = np.array([1.104, 0.101, -0.924, 1,1]) # best found where exponents are 1
Lsolutions = [Lguess + np.array([random.gauss(0,0.1) for j in range(len(Lguess))]) for i in range(1000000)]
Lsolutions = sorted(Lsolutions, key=Lcost1)[0:50000]
Lsolutions = genetic_algorithm([Lcost1], Ltext, Lsolutions, survival_rate=0.8, mutant_deviation=0.3)
def S(Sparams, Sin):
p = Sin[:,0]
T = Sin[:,1]
V = p/T
T1 = 1/T
a0 = (Sparams[0])
a1 = (Sparams[1])
return 1/(1+np.exp(a0*(T1-a1)))
def Scode(Sparams):
a0 = (Sparams[0])
a1 = (Sparams[1])
return f' 1/(1+exp({a0:.3f}*(T1-{a1:.3f})))'
def I(Iparams, Iin):
p = Iin[:,0]
T = Iin[:,1]
V = p/T
T1 = 1/T
a0 = (Iparams[0])
a1 = (Iparams[1])
Lvalue = L(Iparams[2:2+5], Iin)
Svalue = S(Iparams[2+5:2+5+2], Iin)
return 1/(1+V*a0*np.exp((Lvalue-Svalue)*a1))
def Icode(Iparams):
a0 = (Iparams[0])
a1 = (Iparams[1])
Lcodetext = Lcode(Iparams[2:2+5])
Scodetext = Scode(Iparams[2+5:2+5+2])
return f'1/(1+V*{a0:.3f}*np.exp(({Lcodetext}-{Scodetext})*{a1:.3f}))'
def Z(Zparams, Zin):
Ivalue = I(Zparams, Zin)
Lvalue = L(Zparams[2:2+5], Zin)
return Ivalue + (1-Ivalue)*Lvalue
def Zcost1(Zparams):
return max_absolute_error(Z(Zparams,Zin), Zout)
def Zcost2(Zparams):
return mean_absolute_error(Z(Zparams,Zin), Zout)
def Zcode(Zparams):
Icodetext = Icode(Zparams)
Lcodetext = Lcode(Zparams)
return f'({Icodetext}) + (1-{Icodetext})*({Lcodetext})'
def Ztext(Zparams):
arraytext = ','.join(f'{Zparams[i]:.3f}' for i in range(len(Zparams)))
return( f'''#
# Zguess = np.array([{arraytext}])
# {Zcode(Zparams)}
# max error: {Zcost1(Zparams)}
# mean error: {Zcost2(Zparams)} ''')
Zguess = np.array([3,3, 1.12, 0.101, -0.928, 1,1, 7.7, -0.84])
# Zguess = np.array([1.098,0.118,-0.946,0.981,0.954, 18.033,-7.974,-24.599,3.465,0.116,9.261])
# Zguess = np.array([0.103,1.245,2.083,1.030,0.994]) # best found for the other model
Zsolutions = [Zguess]+[Zguess + np.random.normal(0, 0.3, len(Zguess)) for i in range(100000)]
Zsolutions = [x for x in Zsolutions if not isnan(Zcost1(x))]
Zsolutions = sorted(Zsolutions, key=Zcost1)[0:50000]
Zsolutions = genetic_algorithm([Zcost1], Ztext, Zsolutions, survival_rate=0.8, mutant_deviation=1)
| 31.380282
| 110
| 0.60772
| 742
| 4,456
| 3.615903
| 0.202156
| 0.011927
| 0.007454
| 0.008945
| 0.383899
| 0.370481
| 0.325755
| 0.23183
| 0.210958
| 0.19754
| 0
| 0.087661
| 0.183348
| 4,456
| 141
| 111
| 31.602837
| 0.649629
| 0.101661
| 0
| 0.242991
| 0
| 0.018692
| 0.120461
| 0.053844
| 0
| 0
| 0
| 0
| 0
| 1
| 0.168224
| false
| 0
| 0.046729
| 0.074766
| 0.364486
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
230f8a70cf89cd6ca954075bdfb7904ee2fe3de0
| 1,364
|
py
|
Python
|
backend/apps/permissions/constants.py
|
hovedstyret/indok-web
|
598e9ca0b5f3a5e776a85dec0a8694b9bcd5a159
|
[
"MIT"
] | 3
|
2021-11-18T09:29:14.000Z
|
2022-01-13T20:12:11.000Z
|
backend/apps/permissions/constants.py
|
rubberdok/indok-web
|
598e9ca0b5f3a5e776a85dec0a8694b9bcd5a159
|
[
"MIT"
] | 277
|
2022-01-17T18:16:44.000Z
|
2022-03-31T19:44:04.000Z
|
backend/apps/permissions/constants.py
|
hovedstyret/indok-web
|
598e9ca0b5f3a5e776a85dec0a8694b9bcd5a159
|
[
"MIT"
] | null | null | null |
from typing import Final, Literal
DefaultPermissionsType = Final[list[tuple[str, str]]]
# Default ResponsibleGroup types
PRIMARY_TYPE: Literal["PRIMARY"] = "PRIMARY"
HR_TYPE: Literal["HR"] = "HR"
ORGANIZATION: Final = "Organization member"
INDOK: Final = "Indøk"
REGISTERED_USER: Final = "Registered user"
PRIMARY_GROUP_NAME: Final = "Medlem"
HR_GROUP_NAME: Final = "HR"
DEFAULT_ORGANIZATION_PERMISSIONS: DefaultPermissionsType = [
("events", "add_event"),
("events", "change_event"),
("events", "delete_event"),
("listings", "add_listing"),
("listings", "change_listing"),
("listings", "delete_listing"),
("organizations", "add_membership"),
]
DEFAULT_INDOK_PERMISSIONS: DefaultPermissionsType = [
("listings", "view_listing"),
("events", "add_signup"),
("events", "view_signup"),
("events", "change_signup"),
("organizations", "view_organization"),
("forms", "add_answer"),
("forms", "change_answer"),
("forms", "view_answer"),
("forms", "view_form"),
("forms", "add_response"),
("archive", "view_archivedocument"),
]
DEFAULT_REGISTERED_USER_PERMISSIONS: DefaultPermissionsType = [
("events", "view_event"),
]
DEFAULT_GROUPS = {
ORGANIZATION: DEFAULT_ORGANIZATION_PERMISSIONS,
INDOK: DEFAULT_INDOK_PERMISSIONS,
REGISTERED_USER: DEFAULT_REGISTERED_USER_PERMISSIONS,
}
| 28.416667
| 63
| 0.692082
| 132
| 1,364
| 6.833333
| 0.325758
| 0.077605
| 0.031042
| 0.070953
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.147361
| 1,364
| 47
| 64
| 29.021277
| 0.77558
| 0.021994
| 0
| 0
| 0
| 0
| 0.323574
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.025641
| 0
| 0.025641
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
230ffd138e6c0b442e53f396664bbe99fe6ff440
| 1,037
|
py
|
Python
|
magda/utils/logger/printers/message.py
|
p-mielniczuk/magda
|
6359fa5721b4e27bd98f2c6af0e858b476645618
|
[
"Apache-2.0"
] | 8
|
2021-02-25T14:00:25.000Z
|
2022-03-10T00:32:43.000Z
|
magda/utils/logger/printers/message.py
|
p-mielniczuk/magda
|
6359fa5721b4e27bd98f2c6af0e858b476645618
|
[
"Apache-2.0"
] | 22
|
2021-03-24T11:56:47.000Z
|
2021-11-02T15:09:50.000Z
|
magda/utils/logger/printers/message.py
|
p-mielniczuk/magda
|
6359fa5721b4e27bd98f2c6af0e858b476645618
|
[
"Apache-2.0"
] | 6
|
2021-04-06T07:26:47.000Z
|
2021-12-07T18:55:52.000Z
|
from __future__ import annotations
from typing import Optional
from colorama import Fore, Style
from magda.utils.logger.parts import LoggerParts
from magda.utils.logger.printers.base import BasePrinter
from magda.utils.logger.printers.shared import with_log_level_colors
class MessagePrinter(BasePrinter):
EVENT_START_MARKER = '['
EVENT_END_MARKER = ']'
def _with_event_colors(self, text: str) -> str:
return (
Style.BRIGHT + Fore.GREEN
+ text
+ Fore.RESET + Style.NORMAL
)
def flush(
self,
colors: bool,
msg: str = None,
is_event: bool = False,
level: Optional[LoggerParts.Level] = None,
**kwargs,
) -> Optional[str]:
if is_event:
text = f'{self.EVENT_START_MARKER}{msg}{self.EVENT_END_MARKER}'
return self._with_event_colors(text) if colors else text
level_value = level.value if level else None
return with_log_level_colors(msg, level_value) if colors else msg
| 30.5
| 75
| 0.657666
| 130
| 1,037
| 5.030769
| 0.369231
| 0.041284
| 0.06422
| 0.091743
| 0.085627
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.260366
| 1,037
| 33
| 76
| 31.424242
| 0.852673
| 0
| 0
| 0
| 0
| 0
| 0.053038
| 0.051109
| 0
| 0
| 0
| 0
| 0
| 1
| 0.071429
| false
| 0
| 0.214286
| 0.035714
| 0.5
| 0.071429
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
23165b9f50977d462d02641d8468df5aa19bed3f
| 10,872
|
py
|
Python
|
priceprop/propagator.py
|
felixpatzelt/priceprop
|
038832b5e89b8559c6162e39f1b446f4446fe7f2
|
[
"MIT"
] | 17
|
2018-01-17T13:19:42.000Z
|
2022-01-25T14:02:10.000Z
|
priceprop/propagator.py
|
felixpatzelt/priceprop
|
038832b5e89b8559c6162e39f1b446f4446fe7f2
|
[
"MIT"
] | null | null | null |
priceprop/propagator.py
|
felixpatzelt/priceprop
|
038832b5e89b8559c6162e39f1b446f4446fe7f2
|
[
"MIT"
] | 7
|
2018-07-14T06:17:05.000Z
|
2021-05-16T13:59:47.000Z
|
import numpy as np
from scipy.linalg import solve_toeplitz, solve
from scipy.signal import fftconvolve
from scipy.interpolate import Rbf
from scorr import xcorr, xcorr_grouped_df, xcorrshift, fftcrop, corr_mat
# Helpers
# =====================================================================
def integrate(x):
"Return lag 1 sum, i.e. price from return, or an integrated kernel."
return np.concatenate([[0], np.cumsum(x[:-1])])
def smooth_tail_rbf(k, l0=3, tau=5, smooth=1, epsilon=1):
"""Smooth tail of array k with radial basis functions"""
# interpolate in log-lags
l = np.log(np.arange(l0,len(k)))
# estimate functions
krbf = Rbf(
l, k[l0:], function='multiquadric', smooth=smooth, epsilon=epsilon
)
# weights to blend with original for short lags
w = np.exp(-np.arange(1,len(k)-l0+1)/ float(tau))
# interpolate
knew = np.empty_like(k)
knew[:l0] = k[:l0]
knew[l0:] = krbf(l) * (1-w) + k[l0:] * w
#done
return knew
def propagate(s, G, sfunc=np.sign):
"""Simulate propagator model from signs and one kernel.
Equivalent to tim1, one of the kernels in tim2 or hdim2.
"""
steps = len(s)
s = sfunc(s[:len(s)])
p = fftconvolve(s, G)[:steps]
return p
# Responses
# =====================================================================
def _return_response(ret, x, maxlag):
"""Helper for response and response_grouped_df."""
# return what?
ret = ret.lower()
res = []
for i in ret:
if i == 'l':
# lags
res.append(np.arange(-maxlag,maxlag+1))
elif i == 's':
res.append(
# differential response
np.concatenate([x[-maxlag:], x[:maxlag+1]])
)
elif i == 'r':
res.append(
# bare response === cumulated differential response
np.concatenate([
-np.cumsum(x[:-maxlag-1:-1])[::-1],
[0],
np.cumsum(x[:maxlag])
])
)
if len(res) > 1:
return tuple(res)
else:
return res[0]
def response(r, s, maxlag=10**4, ret='lsr', subtract_mean=False):
"""Return lag, differential response S, response R.
Note that this commonly used price response is a simple cross correlation
and NOT equivalent to the linear response in systems analysis.
Parameters:
===========
r: array-like
Returns
s: array-like
Order signs
maxlag: int
Longest lag to calculate
ret: str
can include 'l' to return lags, 'r' to return response, and
's' to return differential response (in specified order).
subtract_mean: bool
Subtract means first. Default: False (signal means already zero)
"""
maxlag = min(maxlag, len(r) - 2)
s = s[:len(r)]
# diff. resp.
# xcorr == S(0, 1, ..., maxlag, -maxlag, ... -1)
x = xcorr(r, s, norm='cov', subtract_mean=subtract_mean)
return _return_response(ret, x, maxlag)
def response_grouped_df(
df, cols, nfft='pad', ret='lsr', subtract_mean=False, **kwargs
):
"""Return lag, differential response S, response R calculated daily.
Note that this commonly used price response is a simple cross correlation
and NOT equivalent to the linear response in systems analysis.
Parameters
==========
df: pandas.DataFrame
Dataframe containing order signs and returns
cols: tuple
The columns of interest
nfft:
Length of the fft segments
ret: str
What to return ('l': lags, 'r': response, 's': incremental response).
subtract_mean: bool
Subtract means first. Default: False (signal means already zero)
See also response, spectral.xcorr_grouped_df for more explanations
"""
# diff. resp.
x = xcorr_grouped_df(
df,
cols,
by = 'date',
nfft = nfft,
funcs = (lambda x: x, lambda x: x),
subtract_mean = subtract_mean,
norm = 'cov',
return_df = False,
**kwargs
)[0]
# lag 1 -> element 0, lag 0 -> element -1, ...
#x = x['xcorr'].values[x.index.values-1]
maxlag = len(x) / 2
return _return_response(ret, x, maxlag)
# Analytical power-laws
# =====================================================================
def beta_from_gamma(gamma):
"""Return exponent beta for the (integrated) propagator decay
G(lag) = lag**-beta
that compensates a sign-autocorrelation
C(lag) = lag**-gamma.
"""
return (1-gamma)/2.
def G_pow(steps, beta):
"""Return power-law Propagator kernel G(l). l=0...steps"""
G = np.arange(1,steps)**-beta#+1
G = np.r_[0, G]
return G
def k_pow(steps, beta):
"""Return increment of power-law propagator kernel g. l=0...steps"""
return np.diff(G_pow(steps, beta))
# TIM1 specific
# =====================================================================
def calibrate_tim1(c, Sl, maxlag=10**4):
"""Return empirical estimate TIM1 kernel
Parameters:
===========
c: array-like
Cross-correlation (covariance).
Sl: array-like
Price-response. If the response is differential, so is the returned
kernel.
maxlag: int
length of the kernel.
See also: integrate, g2_empirical, tim1
"""
lS = int(len(Sl) / 2)
g = solve_toeplitz(c[:maxlag], Sl[lS:lS+maxlag])
return g
def tim1(s, G, sfunc=np.sign):
"""Simulate Transient Impact Model 1, return price or return.
Result is the price p when the bare responses G is passed
and the 1 step ahead return p(t+1)-p(t) for the differential kernel
g, where G == numpy.cumsum(g).
Parameters:
===========
s: array-like
Order signs
G: array-like
Kernel
See also: calibrate_tim1, integrate, tim2, hdim2.
"""
return propagate(s, G, sfunc=sfunc)
# TIM2 specific
# =====================================================================
def calibrate_tim2(
nncorr, cccorr, cncorr, nccorr, Sln, Slc, maxlag=2**10
):
"""
Return empirical estimate for both kernels of the TIM2.
(Transient Impact Model with two propagators)
Parameters:
===========
nncorr: array-like
Cross-covariance between non-price-changing (n-) orders.
cccorr: array-like
Cross-covariance between price-changing (c-) orders.
cncorr: array-like
Cross-covariance between c- and n-orders
nccorr: array-like
Cross-covariance between n- and c-orders.
Sln: array-like
(incremental) price response for n-orders
Slc: array-like
(incremental) price response for c-orders
maxlag: int
Length of the kernels.
See also: calibrate_tim1, calibrate_hdim2
"""
# incremental response
lSn = int(len(Sln) / 2)
lSc = int(len(Slc) / 2)
S = np.concatenate([Sln[lSn:lSn+maxlag], Slc[lSc:lSc+maxlag]])
# covariance matrix
mat_fn = lambda x: corr_mat(x, maxlag=maxlag)
C = np.bmat([
[mat_fn(nncorr), mat_fn(cncorr)],
[mat_fn(nccorr), mat_fn(cccorr)]
])
# solve
g = solve(C, S)
gn = g[:maxlag]
gc = g[maxlag:]
return gn, gc
def tim2(s, c, G_n, G_c, sfunc=np.sign):
"""Simulate Transient Impact Model 2
Returns prices when integrated kernels are passed as arguments
or returns for differential kernels.
Parameters:
===========
s: array
Trade signs
c: array
Trade labels (1 = change; 0 = no change)
G_n: array
Kernel for non-price-changing trades
G_c: array
Kernel for price-changing trades
sfunc: function [optional]
Function to apply to signs. Default: np.sign.
See also: calibrate_tim2, tim1, hdim2.
"""
assert c.dtype == bool, "c must be a boolean indicator!"
return propagate(s * c, G_c) + propagate(s * (~c), G_n)
# HDIM2 specific
# =====================================================================
def calibrate_hdim2(
Cnnc, Cccc, Ccnc, Sln, Slc,
maxlag=None, force_lag_zero=True
):
"""Return empirical estimate for both kernels of the HDIM2.
(History Dependent Impact Model with two propagators).
Requres three-point correlation matrices between the signs of one
non-lagged and two differently lagged orders.
We distinguish between price-changing (p-) and non-price-changing (n-)
orders. The argument names corresponds to the argument order in
spectral.x3corr.
Parameters:
===========
Cnnc: 2d-array-like
Cross-covariance matrix for n-, n-, c- orders.
Cccc: 2d-array-like
Cross-covariance matrix for c-, c-, c- orders.
Ccnc: 2d-array-like
Cross-covariance matrix for c-, n-, c- orders.
Sln: array-like
(incremental) lagged price response for n-orders
Slc: array-like
(incremental) lagged price response for c-orders
maxlag: int
Length of the kernels.
See also: hdim2,
"""
maxlag = maxlag or min(len(Cccc), len(Sln))/2
# incremental response
lSn = int(len(Sln) / 2)
lSc = int(len(Slc) / 2)
S = np.concatenate([
Sln[lSn:lSn+maxlag],
Slc[lSc:lSc+maxlag]
])
# covariance matrix
Cncc = Ccnc.T
C = np.bmat([
[Cnnc[:maxlag,:maxlag], Ccnc[:maxlag,:maxlag]],
[Cncc[:maxlag,:maxlag], Cccc[:maxlag,:maxlag]]
])
if force_lag_zero:
C[0,0] = 1
C[0,1:] = 0
# solve
g = solve(C, S)
gn = g[:maxlag]
gc = g[maxlag:]
return gn, gc
def hdim2(s, c, k_n, k_c, sfunc=np.sign):
"""Simulate History Dependent Impact Model 2, return return.
Parameters:
===========
s: array
Trade signs
c: array
Trade labels (1 = change; 0 = no change)
k_n: array
Differential kernel for non-price-changing trades
k_c: array
Differential kernel for price-changing trades
sfunc: function [optional]
Function to apply to signs. Default: np.sign.
See also: calibrate_hdim2, tim2, tim1.
"""
assert c.dtype == bool, "c must be a boolean indicator!"
return c * (propagate(s * c, k_c) + propagate(s * (~c), k_n))
| 30.2
| 79
| 0.545438
| 1,345
| 10,872
| 4.358364
| 0.201487
| 0.0261
| 0.019106
| 0.028659
| 0.395599
| 0.32651
| 0.28608
| 0.248038
| 0.221426
| 0.205732
| 0
| 0.014199
| 0.306843
| 10,872
| 359
| 80
| 30.284123
| 0.763668
| 0.513521
| 0
| 0.217742
| 0
| 0
| 0.037585
| 0
| 0
| 0
| 0
| 0
| 0.016129
| 1
| 0.120968
| false
| 0
| 0.040323
| 0
| 0.290323
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
2317503e6a916f16a70dd2104fe9aa18b505c980
| 3,035
|
py
|
Python
|
2020/day16/day16.py
|
Zojka/advent
|
0f967bf308ae0502db3656d2e9e8a0d310b00594
|
[
"Apache-2.0"
] | 1
|
2020-12-16T20:34:30.000Z
|
2020-12-16T20:34:30.000Z
|
2020/day16/day16.py
|
Zojka/adventofcode
|
0f967bf308ae0502db3656d2e9e8a0d310b00594
|
[
"Apache-2.0"
] | null | null | null |
2020/day16/day16.py
|
Zojka/adventofcode
|
0f967bf308ae0502db3656d2e9e8a0d310b00594
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
@author: zparteka
"""
def read(infile):
with open(infile, 'r') as f:
line = f.readline()
rules = {}
while line != "\n":
rule = line.strip().split(':')
key = rule[0]
r1 = rule[1].split()[0].split("-")
r2 = rule[1].split()[2].split("-")
rules[key] = ((int(r1[0]), int(r1[1])), (int(r2[0]), int(r2[1])))
line = f.readline()
line = f.readline()
ticket = [int(i) for i in f.readline().strip().split(",")]
nearby = []
f.readline()
f.readline()
while line:
line = f.readline()
if line != "":
nearby.append([int(i) for i in line.strip().split(",")])
return rules, ticket, nearby
def check_nearby(rules, nearby):
rules = rules.values()
rules = [i for sub in rules for i in sub]
print(rules)
wrong = 0
for ticket in nearby:
for number in ticket:
flag = False
for r in rules:
if number in range(r[0], r[1] + 1):
flag = True
if flag:
continue
else:
wrong += number
break
return wrong
def remove_invalid(rules, nearby):
rules = rules.values()
rules = [i for sub in rules for i in sub]
valid = []
for ticket in nearby:
tick = True
for number in ticket:
flag = False
for r in rules:
if number in range(r[0], r[1] + 1):
flag = True
if flag:
continue
else:
tick = False
break
if tick:
valid.append(ticket)
return valid
def find_positions(nearby, rules):
transposed = list(map(list, zip(*nearby)))
result = [0] * len(transposed)
for row in range(len(transposed)):
possible_rules = list(rules.keys())
for number in transposed[row]:
for name in rules.keys():
rule = rules[name]
if number not in range(rule[0][0], rule[0][1] + 1) and number not in range(rule[1][0], rule[1][1] + 1):
possible_rules.remove(name)
result[row] = (possible_rules, row)
result.sort(key=lambda t: len(t[0]))
occured = [0] * len(result)
for i in range(len(result)):
for j in result[i][0]:
if j not in occured:
occured[result[i][1]] = j
indexes = []
for i in range(len(occured)):
if occured[i].startswith("departure"):
indexes.append(i)
return indexes
def main():
example = "input"
rules, ticket, nearby = read(example)
valid_nearby = remove_invalid(rules, nearby)
indexes = find_positions(valid_nearby, rules)
answer = 1
for i in indexes:
answer *= ticket[i]
print(ticket[i])
print(answer)
print(ticket)
if __name__ == '__main__':
main()
| 27.342342
| 119
| 0.497858
| 380
| 3,035
| 3.928947
| 0.218421
| 0.042197
| 0.028131
| 0.010717
| 0.243804
| 0.184863
| 0.184863
| 0.184863
| 0.184863
| 0.184863
| 0
| 0.020313
| 0.367381
| 3,035
| 110
| 120
| 27.590909
| 0.757292
| 0.019769
| 0
| 0.326087
| 0
| 0
| 0.010115
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.054348
| false
| 0
| 0
| 0
| 0.097826
| 0.043478
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
231aa17295db10591d7e97d44c06178132b509d0
| 2,481
|
py
|
Python
|
core/characters.py
|
gnbuck/rpg_game
|
a0e7a0d2002230d5628f7a811e831a36b0904d2c
|
[
"Apache-2.0"
] | null | null | null |
core/characters.py
|
gnbuck/rpg_game
|
a0e7a0d2002230d5628f7a811e831a36b0904d2c
|
[
"Apache-2.0"
] | null | null | null |
core/characters.py
|
gnbuck/rpg_game
|
a0e7a0d2002230d5628f7a811e831a36b0904d2c
|
[
"Apache-2.0"
] | null | null | null |
from random import randint
from core.players import Players
class Human(Players):
def __init__(self, name, classe):
super().__init__(name, classe)
self.hp = 100
self.strengh = 15
self.defense = 15
self.speed = 50
def __str__(self, super_desc=None, super_stats=None):
desc = f"Je m'appelle {self.name} et je suis un "
if super_desc:
desc += super_desc
else:
desc += f"simple {self.classe}.\n"
stats = f"Mes stats sont : \nhp = {self.hp}\nstrengh = {self.strengh}\ndefense = {self.defense}\nspeed = {self.speed}\n"
if super_stats:
stats += super_stats
desc = desc + stats
return desc
def do_damage(self, damage=None):
print(f"{self.name} prepare un coup a {damage}")
return self.strengh
def take_damage(self, input_damage):
evade = randint(0, 100)
if evade <= self.defense:
print(f"{self.name} a esquive le coup")
return
self.hp -= input_damage
if self.hp <= 0:
print(f"{self.name} est DCD, il n'etait pas si fort que ca...")
return "ENDGAME"
print(f"{self.name} takes {input_damage} damages and now have {self.hp} HP.")
class War(Human):
def __init__(self, name, classe):
super().__init__(name, classe)
self.hp = randint(90, 120)
self.armor = 20
self.speed = 40
def __str__(self):
desc = f"un furieux {self.classe}.\n"
stats = f"armor = {self.armor}\n"
return super().__str__(desc, stats)
def do_damage(self):
return super().do_damage(self.strengh)
def take_damage(self, input_damage):
reduced_damage = input_damage * (1 - self.armor / 100)
return super().take_damage(reduced_damage)
class Mage(Human):
def __init__(self, name, classe):
super().__init__(name, classe)
self.hp = randint(60, 85)
self.magic = 30
def __str__(self):
desc = f"un puissant {self.classe}.\n"
stats = f"magic = {self.magic}\n"
return super().__str__(desc, stats)
def do_damage(self):
critic = randint(0, 100)
if critic <= self.magic:
print("Critical hit!")
return super().do_damage(self.strengh * 1.5)
return super().do_damage(self.strengh)
def take_damage(self, input_damage):
return super().take_damage(input_damage)
| 29.188235
| 128
| 0.583636
| 330
| 2,481
| 4.175758
| 0.269697
| 0.065312
| 0.05225
| 0.040639
| 0.370102
| 0.333091
| 0.286647
| 0.286647
| 0.258345
| 0.258345
| 0
| 0.022235
| 0.293027
| 2,481
| 84
| 129
| 29.535714
| 0.763398
| 0
| 0
| 0.261538
| 0
| 0.015385
| 0.192261
| 0.017735
| 0
| 0
| 0
| 0
| 0
| 1
| 0.184615
| false
| 0
| 0.030769
| 0.030769
| 0.430769
| 0.076923
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
231b5c3a6ff047a112893a6a6f2da0e0da9bf4d4
| 1,893
|
py
|
Python
|
raytracerchallenge_python/material.py
|
toku345/RayTracerChallenge_Python
|
40ced097f92cc61b116d24c6d6c4f27d6b13029d
|
[
"MIT"
] | 1
|
2020-05-13T20:54:01.000Z
|
2020-05-13T20:54:01.000Z
|
raytracerchallenge_python/material.py
|
toku345/RayTracerChallenge_Python
|
40ced097f92cc61b116d24c6d6c4f27d6b13029d
|
[
"MIT"
] | null | null | null |
raytracerchallenge_python/material.py
|
toku345/RayTracerChallenge_Python
|
40ced097f92cc61b116d24c6d6c4f27d6b13029d
|
[
"MIT"
] | null | null | null |
from raytracerchallenge_python.tuple import Color
from math import pow
class Material:
def __init__(self):
self.color = Color(1, 1, 1)
self.ambient = 0.1
self.diffuse = 0.9
self.specular = 0.9
self.shininess = 200.0
self.pattern = None
self.reflective = 0.0
self.transparency = 0.0
self.refractive_index = 1.0
def __eq__(self, other):
return all([self.color == other.color,
self.ambient == other.ambient,
self.diffuse == other.diffuse,
self.specular == other.specular,
self.shininess == other.shininess,
self.pattern == other.pattern,
self.transparency == other.transparency,
self.refractive_index == other.refractive_index])
def lighting(self, object, light, point, eyev, normalv, in_shadow=False):
if self.pattern:
color = self.pattern.pattern_at_shape(object, point)
else:
color = self.color
effective_color = color * light.intensity
ambient = effective_color * self.ambient
if in_shadow:
return ambient
lightv = (light.position - point).normalize()
light_dot_normal = lightv.dot(normalv)
black = Color(0, 0, 0)
if light_dot_normal < 0:
diffuse = black
specular = black
else:
diffuse = effective_color * self.diffuse * light_dot_normal
reflectv = (-lightv).reflect(normalv)
reflect_dot_eye = reflectv.dot(eyev)
if reflect_dot_eye <= 0:
specular = black
else:
factor = pow(reflect_dot_eye, self.shininess)
specular = light.intensity * self.specular * factor
return ambient + diffuse + specular
| 33.210526
| 77
| 0.56524
| 203
| 1,893
| 5.118227
| 0.275862
| 0.043311
| 0.040423
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.019465
| 0.348653
| 1,893
| 56
| 78
| 33.803571
| 0.823195
| 0
| 0
| 0.106383
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.06383
| false
| 0
| 0.042553
| 0.021277
| 0.191489
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
231c19be88b4ad2d044eaa6cc1261367a03e271b
| 673
|
py
|
Python
|
dawgmon/local.py
|
anvilventures/dawgmon
|
59c28f430d896aa5e7afd9c2f40584113e8d52dc
|
[
"BSD-3-Clause"
] | 54
|
2017-09-18T21:24:25.000Z
|
2021-03-11T00:11:43.000Z
|
dawgmon/local.py
|
anvilventures/dawgmon
|
59c28f430d896aa5e7afd9c2f40584113e8d52dc
|
[
"BSD-3-Clause"
] | null | null | null |
dawgmon/local.py
|
anvilventures/dawgmon
|
59c28f430d896aa5e7afd9c2f40584113e8d52dc
|
[
"BSD-3-Clause"
] | 8
|
2017-09-19T09:48:45.000Z
|
2020-03-22T01:18:44.000Z
|
import subprocess, shlex
from dawgmon import commands
def local_run(dirname, commandlist):
for cmdname in commandlist:
cmd = commands.COMMAND_CACHE[cmdname]
# shell escape such that we can pass command properly onwards
# to the Popen call
cmd_to_execute = shlex.split(cmd.command)
p = subprocess.Popen(cmd_to_execute, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
stdout, stderr = p.communicate()
# XXX we should probably try and get the system encoding for
# this instead of defaulting to UTF-8.
stdout = stdout.decode("utf-8")
stderr = stderr.decode("utf-8")
yield (cmd.name, "$ %s" % " ".join(cmd_to_execute), p.returncode, stdout, stderr)
| 32.047619
| 86
| 0.738484
| 98
| 673
| 4.989796
| 0.581633
| 0.030675
| 0.07362
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.00531
| 0.160475
| 673
| 20
| 87
| 33.65
| 0.860177
| 0.257058
| 0
| 0
| 0
| 0
| 0.030303
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.090909
| false
| 0
| 0.181818
| 0
| 0.272727
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
231f6aa566919c06850651c755c3b8c14c876a0c
| 38,747
|
py
|
Python
|
py_knots/clasper.py
|
Chinmaya-Kausik/py_knots
|
3c9930ea0e95f6c62da9e13eb5ffcfc0e0737f9f
|
[
"MIT"
] | null | null | null |
py_knots/clasper.py
|
Chinmaya-Kausik/py_knots
|
3c9930ea0e95f6c62da9e13eb5ffcfc0e0737f9f
|
[
"MIT"
] | null | null | null |
py_knots/clasper.py
|
Chinmaya-Kausik/py_knots
|
3c9930ea0e95f6c62da9e13eb5ffcfc0e0737f9f
|
[
"MIT"
] | null | null | null |
import tkinter as tk
from tkinter import ttk
from matplotlib.pyplot import close
from matplotlib.figure import Figure
from matplotlib.backends.backend_tkagg import (FigureCanvasTkAgg,
NavigationToolbar2Tk)
from matplotlib.mathtext import math_to_image
from io import BytesIO
from PIL import ImageTk, Image
from sympy import latex
from math import pi, cos, sin
from sgraph import *
from braid import *
from col_perm import *
from pres_mat import *
from visualization import *
from casson_gordon import *
from typing import List, Tuple, Callable, Dict
from math import log10, floor
font_style = "Calibri"
font_size = 25
# Function for rounding eigenvalues
def round_to_2(x: float):
if(x==0):
return 0
else:
return round(x, -int(floor(log10(abs(x))))+1)
# Class for main window
class Clasper(tk.Frame):
def __init__(self, parent):
tk.Frame.__init__(self, parent)
self.parent = parent
# Configure the grid
self.grid_columnconfigure(0, weight=1)
self.grid_columnconfigure(1, weight=1)
self.grid_columnconfigure(2, weight=1)
self.grid_columnconfigure(3, weight=1)
# Configure counter/control variables
self.braid_inv_control = ""
self.braid_seif_control = ""
self.computed_invariants = False
self.computed_seif = False
# Configure input variables
self.braid_str = tk.StringVar()
self.complete_graph = tk.IntVar(value=0)
# Configure invariant variables
self.cpf = 0
self.alexander = 0
self.signature_value = 0
self.seif = ""
self.pm = 0
# Configure frames for checking the braid
self.braid_check = tk.Frame(self)
self.cycle_decomp_frame = tk.Frame(self)
self.euler_char_frame = tk.Frame(self)
self.euler_char_frame.grid(column=2, row=3, pady=10, sticky='W')
self.euler_char_frame.grid_columnconfigure(0, weight=3)
self.euler_char_frame.grid_columnconfigure(0, weight=1)
self.euler_char_frame.euler_char_val = tk.Frame(self.euler_char_frame)
# Configure frames for everything
self.strands = Strands(self)
self.strands.grid(
column=0, row=4, pady=10, rowspan=6, sticky='N')
self.color = Color(self)
self.color.grid(
column=1, row=4, pady=10, rowspan=6, sticky='N')
self.signature = Signature(self)
self.signature.grid(
column=2, row=4, pady=10, rowspan=6, sticky='N')
self.braid_visual = tk.Frame(self)
self.braid_visual.grid(
column=0, row=14, pady=10, columnspan=4, sticky='N')
self.ccomplex_visual = tk.Frame(self)
self.ccomplex_visual.grid(
column=0, row=15, pady=10, columnspan=4, sticky='N')
self.invariant_frame = tk.Frame(self)
self.invariant_frame.grid(column=0, row=11,
columnspan=4, rowspan=3)
"""
----- Implementing the GUI ----
"""
# (0, 0) Instructions for entering braids
ttk.Label(
self, text='''Braids - LinkInfo format or comma/space '''+
'''separated. Colors and signature inputs - space separated.\n'''+
'''Press enter to compute invariants with defaults.'''
''' See paper for details about the C-Complex.\n'''+
'''Written by Chinmaya Kausik.''',
font=(font_style, font_size), background='cyan').grid(
column=0, row=0, columnspan=4)
# (0, 0->1) Setting up the entry for the braid
ttk.Label(
self, text='Braid:', font=(font_style, font_size)).grid(
column=0, row=1, pady=10)
ttk.Entry(self, textvariable=self.braid_str,
font=(font_style, font_size), width=40).grid(column=1, row=1,
padx=0, pady=10, sticky='W', columnspan=2)
# (1, 2) Examples for braid entries
ttk.Label(
self, text="""Example: '-2 -3 2 -3 -1 -2 -3'"""+
""" or '-2, -3, 2, -3, -1, -2, -3' or """+
"""'{4, {-2, -3, 2, -3, -1, -2, -3}}'""",
font=(font_style, font_size), background='cyan').grid(
column=1, row=2, pady=10, sticky='W', columnspan=3)
# Creating a style object
style = ttk.Style()
# Adding style for buttons
style.configure('C.TButton', font=('calibri', font_size),
background='blue')
# Adding style for radiobuttons
style.configure('C.TRadiobutton', font=('calibri', font_size))
# Adding style for checkbuttons
style.configure('C.TCheckbutton', font=('calibri', font_size))
ttk.Checkbutton(self, text="All Seifert surfaces intersecting",
style='C.TCheckbutton',
variable=self.complete_graph).grid(column=2, row=1,
padx=30, pady=10, sticky='W')
# Setup for printing the cycle decomposition
ttk.Button(self, text="Cycle Decomposition", command=self.compute_cyc,
style='C.TButton').grid(column=0, row=3, pady=10)
# Setup for printing the Euler Characteristic of the C-Complex
ttk.Button(self.euler_char_frame, text="Euler Characteristic of C-Complex",
command=self.get_sgraph_euler_char,
style='C.TButton').grid(column=0, row=0, pady=10, sticky='W')
# Button to compute invariants
ttk.Button(self, text="Compute link invariants",
command=self.get_invariants, style='C.TButton').grid(
column=0, row=10, pady=10)
ttk.Button(self, text="Invariants in LaTeX",
command=self.get_latex, style='C.TButton').grid(
column=1, row=10, pady=10)
ttk.Button(self, text="Export Seifert matrices",
command=self.get_seifert_matrices, style='C.TButton').grid(
column=2, row=10, pady=10)
# Compute invariants with defaults
def compute_with_defaults(self, int: int):
self.strands.strand_choice.set(1)
self.color.color_choice.set(2)
self.signature.signature_choice.set(1)
self.get_invariants()
# Processing Link Info style inputs
def link_info(self, braid: str) -> Braid:
start = braid.index('{')+1
strands = int(braid[start])
new_braid = braid[start:]
braid1 = new_braid[
new_braid.index('{')+1: new_braid.index('}')].split(',')
braid1 = list(filter(lambda x: x.strip()!="", braid1))
braid1 = list(map(lambda x: int(x), braid1))
return Braid(braid1, strands)
# Processing comma separated inputs
def csv_input(self, braid: str) -> List[int]:
braid1 = braid.strip().split(",")
braid1 = list(filter(lambda x: x.strip()!="", braid1))
braid1 = [int(x) for x in braid1]
return braid1
# Processing space separated inputs
def space_input(self, braid: str) -> List[int]:
braid1 = braid.strip().split(" ")
braid1 = list(filter(lambda x: x.strip()!="", braid1))
braid1 = [int(x) for x in braid1]
return braid1
# Command for computing the cycle decomposition and generating the braid
def compute_cyc(self) -> Braid:
self.cycle_decomp_frame.destroy()
self.cycle_decomp_frame = tk.Frame(self)
self.cycle_decomp_frame.grid(
column=1, row=3, pady=10, sticky='W')
p_braid = self.strands.make_braid()
ttk.Label(self.cycle_decomp_frame, text=str(p_braid.cycle_decomp),
font=(font_style, font_size)).pack()
# Command for computing the cycle decomposition and generating the braid
def get_sgraph_euler_char(self) -> Braid:
self.euler_char_frame.euler_char_val.destroy()
self.euler_char_frame.euler_char_val = tk.Frame(self.euler_char_frame)
self.euler_char_frame.euler_char_val.grid(
column=1, row=0, padx=20, pady=10, sticky='E')
try:
graph = self.color.get_graph()
ttk.Label(self.euler_char_frame.euler_char_val,
text="= "+str(graph.sgraph_euler_char()),
font=(font_style, font_size)).pack()
except Exception:
pass
# Print latex
def get_latex(self):
new_window = tk.Toplevel(self)
try:
if((self.braid_inv_control.strip() == self.braid_str.get().strip())
and self.computed_invariants):
pass
else:
graph = self.color.get_graph()
# Print the Euler characteristic of the SGraph
self.get_sgraph_euler_char()
if(self.braid_seif_control.strip() !=
self.braid_str.get().strip()):
(self.seif, self.pm) = presentation_matrix(graph)
self.cpf = self.pm.conway_potential_function(graph)
self.alexander = self.pm.multivar_alexander_poly(graph)
self.computed_invariants = True
self.computed_seif = True
self.braid_inv_control = self.braid_str.get()
self.braid_seif_control = self.braid_str.get()
cpf_text = tk.Text(new_window, font=(font_style, font_size))
cpf_text.insert(1.0, "Conway Potential Function:\n"+
latex(self.cpf))
cpf_text.pack()
cpf_text.configure(state="disabled")
multi_var_alexander = tk.Text(
new_window, font=(font_style, font_size))
multi_var_alexander.insert(1.0,
"Mutivariable Alexander Polynomial:\n"+
latex(self.alexander))
multi_var_alexander.pack()
multi_var_alexander.configure(state="disabled")
# if tkinter is 8.5 or above you'll want the selection background
# to appear like it does when the widget is activated
# comment this out for older versions of Tkinter
cpf.configure(inactiveselectbackground=cpf.cget(
"selectbackground"))
multi_var_alexander.configure(
inactiveselectbackground=cpf.cget("selectbackground"))
except ValueError:
pass
# Save the seifert matrices to a file
def get_seifert_matrices(self):
if((self.braid_seif_control.strip() == self.braid_str.get().strip())
and self.computed_invariants):
pass
else:
graph = self.color.get_graph()
# Print the Euler characteristic of the SGraph
self.get_sgraph_euler_char()
(self.seif, self.pm) = presentation_matrix(graph)
file_name = tk.filedialog.asksaveasfilename()
self.invariant_frame.destroy()
self.invariant_frame = Inv(self)
self.invariant_frame.grid(column=0, row=11,
columnspan=4, rowspan=3)
p = self.strands.make_braid()
graph = self.invariant_frame.graph
if(file_name):
if("." not in file_name):
file_name += ".txt"
f = open(file_name, 'w+')
f.write("Braid: "+str(p.braid_wrong))
f.write("\nStrands: "+str(p.strands)+"\n\n")
f.write(self.seif)
f.close()
# Command for computing and displaying invariants
def get_invariants(self):
self.invariant_frame.destroy()
self.view_braid()
self.view_c_complex()
self.invariant_frame = Inv(self)
self.invariant_frame.grid(column=0, row=11,
columnspan=4, rowspan=3)
# Command to view the braid
def view_braid(self):
try:
close(self.braid_fig)
except Exception:
pass
self.braid_visual.destroy()
self.braid_visual = tk.Frame(self)
self.braid_visual.grid(
column=0, row=14, pady=10, columnspan=4)
self.braid_fig = visualize_braid(self.color.get_col_braid())
# creating the Tkinter canvas
# containing the Matplotlib figure
canvas = FigureCanvasTkAgg(self.braid_fig, master=self.braid_visual)
canvas.draw()
# placing the canvas on the Tkinter window
canvas.get_tk_widget().pack()
# Command to view the C-Complex
def view_c_complex(self):
try:
close(self.ccomplex_fig)
except Exception:
pass
self.ccomplex_visual.destroy()
self.ccomplex_visual = tk.Frame(self)
self.ccomplex_visual.grid(
column=0, row=15, pady=10, columnspan=4)
self.ccomplex_fig = visualize_clasp_complex(self.color.get_graph())
# creating the Tkinter canvas
# containing the Matplotlib figure
canvas = FigureCanvasTkAgg(self.ccomplex_fig,
master=self.ccomplex_visual)
canvas.draw()
# placing the canvas on the Tkinter window
canvas.get_tk_widget().pack()
# Class for invariants
class Inv(tk.Frame):
def __init__(self, parent):
tk.Frame.__init__(self, parent)
self.parent = parent
# Configure the grid
self.grid_columnconfigure(0, weight=1)
self.grid_columnconfigure(1, weight=1)
self.grid_columnconfigure(2, weight=1)
self.grid_columnconfigure(3, weight=1)
try:
graph = parent.color.get_graph()
self.graph = graph
except ValueError:
pass
omega = parent.signature.get_omega()
# Print the Euler characteristic of the SGraph
self.parent.get_sgraph_euler_char()
if((self.parent.braid_inv_control.strip() ==
self.parent.braid_str.get().strip())
and self.parent.computed_invariants):
pass
else:
graph = self.parent.color.get_graph()
# Print the Euler characteristic of the SGraph
self.parent.get_sgraph_euler_char()
if(self.parent.braid_seif_control.strip() !=
self.parent.braid_str.get().strip()):
(self.parent.seif, self.parent.pm) = presentation_matrix(graph)
self.parent.cpf = self.parent.pm.conway_potential_function(graph)
self.parent.alexander = \
self.parent.pm.multivar_alexander_poly(graph)
self.parent.computed_invariants = True
self.parent.computed_seif = True
self.parent.braid_inv_control = self.parent.braid_str.get()
self.parent.braid_seif_control = self.parent.braid_str.get()
ttk.Label(self, text='Conway Potential Function:',
font=(font_style, font_size)).grid(
column=0, row=0, pady=10)
self.make_latex_label(latex(self.parent.cpf),
column=1, row=0, y_pad=10, sticky='W',
columnspan=3, rowspan=1, size=(2000, 100))
ttk.Label(self, text='Multivariable Alexander Polynomial:',
font=(font_style, font_size)).grid(
column=0, row=1, pady=10)
self.make_latex_label(latex(self.parent.alexander),
column=1, row=1, y_pad=10, sticky='W',
columnspan=3, rowspan=1, size=(2000, 50))
ttk.Label(self, text='Cimasoni-Florens Signature:',
font=(font_style, font_size)).grid(
column=0, row=2, pady=15)
signat = self.parent.pm.signature(omega)
ttk.Label(self, text=str(signat[0]), font=(font_style, 30)).grid(
column=1, row=2, pady=15, sticky='W')
eig_val_str = str([round_to_2(x) for x in signat[1]])[1:-1]
eig_val = "(Eigenvalues: "+eig_val_str+")"
ttk.Label(self, text=str(eig_val), font=(font_style, 25)).grid(
column=2, row=2, columnspan=2, padx=10, pady=15, sticky='W')
# Renders latex as a label and places it on the grid
def make_latex_label(self, latex_string: str, column: int,
row: int, y_pad: int, sticky: str, columnspan: int, rowspan: int,
size = Tuple[int, int]):
# Creating buffer for storing image in memory
buffer = BytesIO()
# Writing png image with our rendered latex text to buffer
math_to_image("$" + latex_string + "$",
buffer, dpi=1000, format='png')
# Remoting buffer to 0, so that we can read from it
buffer.seek(0)
# Creating Pillow image object from it
pimage= Image.open(buffer)
pimage.thumbnail(size)
# Creating PhotoImage object from Pillow image object
image = ImageTk.PhotoImage(pimage)
# Creating label with our image
label = ttk.Label(self, image=image)
# Storing reference to our image object so it's not garbage collected,
# since TkInter doesn't store references by itself
label.img = image
label.grid(column=column, row=row, pady=y_pad, sticky=sticky,
columnspan=columnspan, rowspan=rowspan)
buffer.flush()
# Class for strand inputs
class Strands(tk.Frame):
def __init__(self, parent):
tk.Frame.__init__(self, parent)
self.parent = parent
braid = self.parent.braid_str.get()
# Configure the two columns
self.grid_columnconfigure(0, weight=1)
self.grid_columnconfigure(1, weight=2)
# Add title
ttk.Label(
self, text='''Number of strands''',
font=(font_style, font_size), background='yellow').grid(
column=0, row=0, columnspan=2)
# Configure frame for printing defaults
self.strand_default = tk.Frame(self)
self.strand_check = tk.Frame(self)
# Configure variables to hold inputs
self.strand_choice = tk.IntVar(value=0)
self.strand_str = tk.StringVar()
# Configure and place radio buttons and entries
# Default
self.use_defaults = ttk.Radiobutton(self, text="Default",
variable=self.strand_choice,
style='C.TRadiobutton', value=1, command=self.make_braid)
self.use_defaults.grid(column=0, row=1, pady=10, sticky='W')
# Custom
self.use_custom = ttk.Radiobutton(self, text="Custom: ",
variable=self.strand_choice,
style='C.TRadiobutton', value=2, command=self.make_braid)
self.use_custom.grid(column=0, row=2, pady=10, sticky='W')
ttk.Entry(self, textvariable=self.strand_str,
font=(font_style, font_size)).grid(
column=1, row=2, padx=0, pady=10, sticky='W')
# Example of a custom entry
ttk.Label(self, text="Example: '3'",
font=(font_style, font_size), background='cyan').grid(
column=1, row=3, pady=10, sticky='W')
# Make a braid and return error messages
def make_braid(self) -> Braid:
# Destroy and reinitialize message frames
self.parent.braid_check.destroy()
self.strand_default.destroy()
self.strand_check.destroy()
self.strand_check = tk.Frame(self)
self.strand_default = tk.Frame(self)
self.parent.braid_check = tk.Frame(self.parent)
self.parent.braid_check.grid(column=0, row=2, pady=10)
self.strand_default.grid(column=1, row=1, pady=10, sticky='W')
self.strand_check.grid(column=0, row=5, pady=10, columnspan=2)
strand_check_message = ""
braid = self.parent.braid_str.get()
try:
strand_option = self.strand_choice.get()
assert strand_option != 0, AssertionError
if('{' in braid):
p = self.parent.link_info(braid)
elif(',' in braid):
braid1 = self.parent.csv_input(braid)
else:
braid1 = self.parent.space_input(braid)
except AssertionError:
strand_check_message += "Specify strands."
except ValueError:
ttk.Label(self.parent.braid_check, text="Bad braid input",
font=(font_style, font_size), background="pink").pack()
try:
if(strand_option == 2):
strands = self.strand_str.get()
strands = int(strands)
p = Braid(braid1, strands)
else:
if('{' not in braid):
strands = max(list(map(lambda x: abs(x), braid1)))+1
p = Braid(braid1, strands)
ttk.Label(self.strand_default, text="= "+ str(p.strands),
font=(font_style, font_size)).pack(anchor='w')
except ValueError:
strand_check_message += "Bad strand input."
except UnboundLocalError:
pass
if(strand_check_message!=""):
ttk.Label(self.strand_check, text=strand_check_message,
font=(font_style, font_size), background="pink").pack()
try:
return p
except Exception:
pass
# Class for color inputs
class Color(tk.Frame):
def __init__(self, parent):
tk.Frame.__init__(self, parent)
self.parent = parent
braid = self.parent.braid_str.get()
# Configure the two columns
self.grid_columnconfigure(0, weight=1)
self.grid_columnconfigure(1, weight=2)
# Add title
ttk.Label(
self, text='''Colors''',
font=(font_style, font_size), background='yellow').grid(
column=0, row=0, columnspan=2)
# Configure frame for printing defaults
self.one_color_default = tk.Frame(self)
self.multi_color_default = tk.Frame(self)
self.color_check = tk.Frame(self)
# Configure variables to hold inputs
self.color_choice = tk.IntVar(value=0)
self.color_str = tk.StringVar()
# Configure and place radio buttons and entries
# One color
self.use_one_color = ttk.Radiobutton(self, text="One color",
variable=self.color_choice,
style='C.TRadiobutton', value=1, command=self.get_col_braid)
self.use_one_color.grid(column=0, row=1, pady=10, sticky='W')
# One per knot
self.use_one_per_knot = ttk.Radiobutton(self, text="One per knot",
variable=self.color_choice,
style='C.TRadiobutton', value=2, command=self.get_col_braid)
self.use_one_per_knot.grid(column=0, row=2, pady=10, sticky='W')
# Custom
self.use_custom = ttk.Radiobutton(self, text="Custom: ",
variable=self.color_choice,
style='C.TRadiobutton', value=3, command=self.get_col_braid)
self.use_custom.grid(column=0, row=3, pady=10, sticky='W')
ttk.Entry(self, textvariable=self.color_str,
font=(font_style, font_size)).grid(
column=1, row=3, padx=0, pady=10, sticky='W')
# Example of a custom entry
ttk.Label(self, text="Example: '0 0 1' for 3 knots",
font=(font_style, font_size), background='cyan').grid(
column=1, row=4, pady=10, sticky='W')
# Make a colored braid and return error messages
# Command for getting the coloured braid
def get_col_braid(self) -> ColBraid:
self.color_check.destroy()
self.multi_color_default.destroy()
self.one_color_default.destroy()
self.color_check = tk.Frame(self)
self.multi_color_default = tk.Frame(self)
self.one_color_default = tk.Frame(self)
# Place frames for various defaults and error messages
self.color_check.grid(column=0, row=5, pady=10)
self.one_color_default.grid(column=1, row=1, pady=10, sticky='W')
self.multi_color_default.grid(column=1, row=2, pady=10, sticky='W')
self.parent.compute_cyc()
p = self.parent.strands.make_braid()
def print_col_list(lst: List[int]):
a = ""
for i in lst:
a += str(i) + " "
return a
try:
color_option = self.color_choice.get()
assert color_option != 0, AssertionError
if(color_option == 1):
col_list = [0]*p.ct_knots
ttk.Label(self.one_color_default,
text="= "+print_col_list(col_list),
font=(font_style, font_size)).pack(anchor='w')
elif(color_option == 2):
col_list = list(range(p.ct_knots))
ttk.Label(self.multi_color_default,
text="= "+print_col_list(col_list),
font=(font_style, font_size)).pack(anchor='w')
else:
col_list = self.color_str.get()
col_list = [int(x) for x in col_list.split(" ")]
col_signs = [1]*(max(col_list)+1)
p = ColBraid(p.braid, p.strands, col_list)
complete_choice = self.parent.complete_graph.get()
if(complete_choice==0):
p, col_signs = find_min_perm(p, col_signs, 50)
else:
p, col_signs = find_min_perm_complete(p, col_signs, 50)
return p
except ValueError:
ttk.Label(self.color_check, text="Bad color input",
font=(font_style, font_size), background="pink").pack()
except AssertionError:
ttk.Label(self.color_check, text="Specify colors",
font=(font_style, font_size), background="pink").pack()
# Makes the graph for the colored braid derived from the color inputs
def get_graph(self):
self.color_check.destroy()
self.multi_color_default.destroy()
self.one_color_default.destroy()
self.color_check = tk.Frame(self)
self.multi_color_default = tk.Frame(self)
self.one_color_default = tk.Frame(self)
# Place frames for various defaults and error messages
self.color_check.grid(column=0, row=5, pady=10)
self.one_color_default.grid(column=1, row=1, pady=10, sticky='W')
self.multi_color_default.grid(column=1, row=2, pady=10, sticky='W')
self.parent.compute_cyc()
p = self.parent.strands.make_braid()
def print_col_list(lst: List[int]):
a = ""
for i in lst:
a += str(i) + " "
return a
try:
color_option = self.color_choice.get()
assert color_option != 0, AssertionError
if(color_option == 1):
col_list = [0]*p.ct_knots
ttk.Label(self.one_color_default,
text="= "+print_col_list(col_list),
font=(font_style, font_size)).pack(anchor='w')
elif(color_option == 2):
col_list = list(range(p.ct_knots))
ttk.Label(self.multi_color_default,
text="= "+print_col_list(col_list),
font=(font_style, font_size)).pack(anchor='w')
else:
col_list = self.color_str.get()
col_list = [int(x) for x in col_list.split(" ")]
col_signs = [1]*(max(col_list)+1)
p = ColBraid(p.braid, p.strands, col_list)
complete_choice = self.parent.complete_graph.get()
if(complete_choice==0):
p, col_signs = find_min_perm(p, col_signs, 50)
graph = p.make_graph(col_signs)
else:
p, col_signs = find_min_perm_complete(p, col_signs, 50)
graph= p.make_graph_complete(col_signs)
return graph
except ValueError:
ttk.Label(self.color_check, text="Bad color input",
font=(font_style, font_size), background="pink").pack()
except AssertionError:
ttk.Label(self.color_check, text="Specify colors",
font=(font_style, font_size), background="pink").pack()
# Class for signature inputs
class Signature(tk.Frame):
def __init__(self, parent):
tk.Frame.__init__(self, parent)
self.parent = parent
braid = self.parent.braid_str.get()
# Configure the two columns
self.grid_columnconfigure(0, weight=1)
self.grid_columnconfigure(1, weight=2)
# Add title
ttk.Label(
self, text='''Signature inputs''',
font=(font_style, font_size), background='yellow').grid(
column=0, row=0, columnspan=2)
# Configure frame for printing defaults
self.signature_default = tk.Frame(self)
self.signature_check = tk.Frame(self)
# Configure variables to hold inputs
self.signature_choice = tk.IntVar(value=0)
self.signature_str = tk.StringVar()
# Configure and place radio buttons and entries
# Default
self.use_defaults = ttk.Radiobutton(self, text="Default",
variable=self.signature_choice,
style='C.TRadiobutton', value=1, command=self.get_omega)
self.use_defaults.grid(column=0, row=1, pady=10, sticky='W')
# Custom
self.use_custom = ttk.Radiobutton(self, text="Custom: ",
variable=self.signature_choice,
style='C.TRadiobutton', value=2, command=self.get_omega)
self.use_custom.grid(column=0, row=2, pady=10, sticky='W')
ttk.Entry(self, textvariable=self.signature_str,
font=(font_style, font_size)).grid(
column=1, row=2, padx=0, pady=10, sticky='W')
# Example of a custom entry
ttk.Label(self, text="Example: '1/2 1/3' means '(pi, 2*pi/3)'",
font=(font_style, font_size), background='cyan').grid(
column=1, row=3, pady=10, sticky='W')
# Get the signature input and return error messages
def get_omega(self) -> Braid:
# Destroy and reinitialize message frames
self.signature_default.destroy()
self.signature_check.destroy()
self.signature_check = tk.Frame(self)
self.signature_default = tk.Frame(self)
self.signature_default.grid(column=1, row=1, pady=10, sticky='W')
self.signature_check.grid(column=0, row=5, pady=10, columnspan=2)
signature_inputs = self.signature_str.get()
graph = self.parent.color.get_graph()
try:
signature_option = self.signature_choice.get()
assert signature_option != 0, AssertionError
if(signature_option == 1):
omega = [complex(-1, 0)]*graph.colors
ttk.Label(self.signature_default, text="= "+ "1/2 "*graph.colors,
font=(font_style, font_size)).pack(anchor='w')
else:
complex_tuple = [eval(x) for x in
signature_inputs.strip().split(" ")]
for c in complex_tuple:
if(c==1.0):
ttk.Label(self.signature_check,
text="2*pi is not allowed.",
font=(font_style, font_size),
background='pink').pack(anchor='w')
omega = [complex(cos(2*pi*x), sin(2*pi*x))
for x in complex_tuple]
except AssertionError:
ttk.Label(self.signature_check, text="Specify signature inputs",
font=(font_style, font_size),
background='pink').pack(anchor='w')
except ValueError:
ttk.Label(self.signature_check, text="Bad signature inputs",
font=(font_style, font_size),
background='pink').pack(anchor='w')
try:
return omega
except Exception:
pass
# Class for Casson Gordon inputs
class Casson_Gordon(tk.Frame):
def __init__(self, parent):
tk.Frame.__init__(self, parent)
self.parent = parent
# Configure the two columns
self.grid_columnconfigure(0, weight=1)
self.grid_columnconfigure(1, weight=2)
# Add title
ttk.Label(
self, text='''Casson-Gordon invariants''',
font=(font_style, font_size), background='yellow').grid(
column=0, row=0, columnspan=2)
# Configure variables to hold inputs
self.framing = tk.StringVar()
self.q_ni_cg = tk.StringVar()
# Configure and place labels for inputs and and examples
ttk.Label(self, text="Framing:",
font=(font_style, font_size)).grid(
column=0, row=1, padx=0, pady=10)
ttk.Label(self, text="Example: '1 0 -2'."+
" Framing = self-linking numbers of knots.",
font=(font_style, font_size), background='cyan').grid(
column=0, row=2, columnspan=2, padx=0, pady=10)
ttk.Label(self, text="q, n_i tuple:",
font=(font_style, font_size)).grid(
column=0, row=3, padx=0, pady=10)
ttk.Label(self, text="Example: '5, 2 3 2' means q = 5, n_1 = 3."+
" See paper.",
font=(font_style, font_size), background='cyan').grid(
column=0, row=4, columnspan=2, padx=0, pady=10)
# Configure and place entry boxes
ttk.Entry(self, textvariable=self.framing,
font=(font_style, font_size)).grid(
column=1, row=1, padx=0, pady=10, sticky='W')
ttk.Entry(self, textvariable=self.q_ni_cg,
font=(font_style, font_size)).grid(
column=1, row=3, padx=0, pady=10, sticky='W')
self.casson_gordon_frame = tk.Frame(self)
def compute_casson_gordon(self):
self.casson_gordon_frame.destroy()
self.casson_gordon_frame = tk.Frame(self)
self.casson_gordon_frame.grid(
column=0, row=5, columnspan=2, padx=0, pady=10)
self.casson_gordon_frame.grid_columnconfigure(0)
self.casson_gordon_frame.grid_columnconfigure(1)
ttk.Label(self.casson_gordon_frame, text="Casson-Gordon invariant:",
font=(font_style, font_size)).grid(
column=0, row=0, padx=0, pady=10)
framing_str = self.framing.get()
q_ni_cg_str = self.q_ni_cg.get()
framing_val = [int(x) for x in framing_str.split(" ")]
q = int(q_ni_cg_str.strip()[0])
ni_tuple_str = q_ni_cg_str[q_ni_cg_str.find(",")+1:].strip().split(" ")
ni_tuple = [int(x) for x in ni_tuple_str]
p = self.parent.strands.make_braid()
ttk.Label(self.casson_gordon_frame,
text=str(casson_gordon(framing_val, q, ni_tuple, p)),
font=(font_style, font_size)).grid(
column=1, row=0, padx=0, pady=10)
def get_casson_gordon(self):
try:
self.compute_casson_gordon()
except (ValueError, AttributeError):
self.casson_gordon_frame.destroy()
self.casson_gordon_frame = tk.Frame(self)
ttk.Label(self, text="Check inputs",
font=(font_style, font_size), background='pink').grid(
column=0, row=5, columnspan=2, padx=0, pady=10)
# Executing everything
if __name__ == "__main__":
root = tk.Tk()
root.title("Clasper")
# Get the screen dimension
screen_width = root.winfo_screenwidth()
screen_height = root.winfo_screenheight()
# Find the center point
center_x = int(screen_width/2)
center_y = int(screen_height/2)
window_width = screen_width
window_height = screen_height
# Set the position of the window to the center of the screen
root.geometry(f'{window_width}x{window_height}+{center_x}+{0}')
root.state('zoomed')
clasper_canvas = tk.Canvas(root)
hbar = tk.Scrollbar(root, orient='horizontal',
command=clasper_canvas.xview)
scrollbar = tk.Scrollbar(root, orient='vertical',
command=clasper_canvas.yview)
hbar.pack(side="bottom", fill="both")
clasper_canvas.pack(side="left", fill="both", expand=True, padx=10, pady=10)
scrollbar.pack(side="right", fill="both")
clasper_canvas['yscrollcommand'] = scrollbar.set
clasper_canvas['xscrollcommand'] = hbar.set
clasper = Clasper(clasper_canvas)
def onCanvasConfigure(e):
clasper_canvas.configure(scrollregion=clasper_canvas.bbox("all"))
clasper_canvas.itemconfig('frame',
height=2800,
width=3000)
clasper_canvas.create_window(0, 0,
height=2800,
width=3000,
window=clasper, anchor="nw", tags="frame")
clasper_canvas.bind("<Configure>", onCanvasConfigure)
clasper_canvas.configure(scrollregion=clasper_canvas.bbox("all"))
clasper_canvas.itemconfig('frame',
height=2800,
width=3000)
def on_mousewheel(event):
clasper_canvas.yview_scroll(int(-1*(event.delta/120)), "units")
def on_shift_mousewheel(event):
clasper_canvas.xview_scroll(int(-1*(event.delta/120)), "units")
root.bind_all("<MouseWheel>", on_mousewheel)
root.bind_all("<Shift-MouseWheel>", on_shift_mousewheel)
root.bind('<Return>', clasper.compute_with_defaults)
try:
from ctypes import windll
windll.shcore.SetProcessDpiAwareness(1)
finally:
root.mainloop()
# Setting up the entry for strands
"""ttk.Label(
self, text='Number of Strands:',
font=(font_style, font_size)).grid(column=0, row=2, pady=10)
self.strand_str = tk.StringVar()
ttk.Entry(self, textvariable=self.strand_str,
font=(font_style, font_size)).grid(
column=1, row=2, padx=0, pady=10, sticky='W', columnspan=3)"""
# Set up entry for the colour list
"""ttk.Label(self, text='Colours (start from 0, BFD):',
font=(font_style, font_size)).grid(
column=0, row=5, pady=10)
self.colour_list = tk.StringVar()
ttk.Entry(self, textvariable=self.colour_list,
font=(font_style, font_size)).grid(
column=1, row=5, padx=0, pady=10, sticky='W', columnspan=3)"""
# Set up entry for orientations of colours
"""ttk.Label(self, text='Orientations (+1/-1, BFD):',
font=(font_style, font_size)).grid(
column=0, row=6, pady=10)
self.colour_signs = tk.StringVar()
ttk.Entry(self, textvariable=self.colour_signs,
font=(font_style, font_size)).grid(
column=1, row=6, padx=0, pady=10, sticky='W', columnspan=3)
"""
# Set up entry for complex tuple
"""ttk.Label(self, text='Signature input,'+
'space sep\n (1/3 means 2*pi/3, BFD):',
font=(font_style, font_size)).grid(
column=0, row=7, pady=10)
self.cplx_tuple = tk.StringVar()
ttk.Entry(self, textvariable=self.cplx_tuple,
font=(font_style, font_size)).grid(
column=1, row=7, padx=0, pady=10, sticky='W', columnspan=2)"""
| 36.901905
| 83
| 0.596666
| 4,910
| 38,747
| 4.547658
| 0.092057
| 0.033589
| 0.032021
| 0.040351
| 0.615388
| 0.562811
| 0.517936
| 0.489811
| 0.440817
| 0.396525
| 0
| 0.022448
| 0.284874
| 38,747
| 1,049
| 84
| 36.937083
| 0.783392
| 0.093298
| 0
| 0.453125
| 0
| 0
| 0.049223
| 0.001356
| 0.00142
| 0
| 0
| 0
| 0.011364
| 1
| 0.042614
| false
| 0.015625
| 0.026989
| 0
| 0.09375
| 0.008523
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
23203ffa2e49d090e30c618e5403e0af89df7c09
| 17,259
|
py
|
Python
|
state_graph.py
|
Lukx19/KR-QR
|
be90434de57759e077bce208398ee12e8f1ec85a
|
[
"MIT"
] | null | null | null |
state_graph.py
|
Lukx19/KR-QR
|
be90434de57759e077bce208398ee12e8f1ec85a
|
[
"MIT"
] | null | null | null |
state_graph.py
|
Lukx19/KR-QR
|
be90434de57759e077bce208398ee12e8f1ec85a
|
[
"MIT"
] | null | null | null |
import copy
import queue
import pydot
class NZP:
def __init__(self):
self.names = ['-', '0', '+']
self.vals = [-1, 0, 1]
self.stationary = [False, True, False]
class ZP:
def __init__(self):
self.names = ['0', '+']
self.vals = [0, 1]
self.stationary = [True, False]
class ZPM:
def __init__(self):
self.names = ['0', '+', 'm']
self.vals = [0, 1, 2]
self.stationary = [True, False, True]
class QSpace(object):
def __init__(self, name, Qmodel, state):
self.name = name
self.q_model = Qmodel
self.current_state = state
self.maximum = len(self.q_model.vals)
def increase(self):
if self.current_state < self.maximum - 1:
self.current_state += 1
def decrease(self):
if self.current_state > 0:
self.current_state -= 1
def setStateAs(self, q_state):
# TODO add check if two states are the same
self.current_state = q_state.current_state
def getVal(self):
return self.q_model.vals[self.current_state]
def getName(self):
return self.q_model.names[self.current_state]
def isStationary(self):
return self.q_model.stationary[self.current_state]
def __eq__(self, other):
if isinstance(other, self.__class__):
return self.getVal() == other.getVal()
return False
def __ne__(self, other):
return not self.__eq__(other)
class State:
def __init__(self, quantities):
self.state = {
'inflow': {'mag': quantities[0],
'der': quantities[1]},
'volume': {'mag': quantities[2],
'der': quantities[3]},
'outflow': {'mag': quantities[4],
'der': quantities[5]}
}
self.next_states = []
self.quantities = quantities
self.name = "0"
self.desc =""
def __eq__(self, other):
if isinstance(other, self.__class__):
for idx in range(len(self.quantities)):
if self.quantities[idx] != other.quantities[idx]:
return False
return True
def __ne__(self, other):
return not self.__eq__(other)
class StateChange:
def __init__(self, desc):
self.desciption = desc
def stationaryToIntervalChange(state_obj):
for qt in state_obj.quantities:
if qt.isStationary():
return True
return False
def genFlipedInflow(state_obj):
states = []
if state_obj.state['inflow']['der'].getVal() == 0:
states.append(newState(state_obj,[('inflow','der',+1)],
desc="Id+", transition="increase"))
if state_obj.state['inflow']['mag'].getVal() != 0:
states.append(newState(state_obj,[('inflow','der',-1)],
desc="Id-", transition="decrease"))
return states
if (state_obj.state['inflow']['mag'].getVal() == 0
and state_obj.state['inflow']['der'].getVal() == 1):
return states
if (state_obj.state['inflow']['mag'].getVal() == 1
and state_obj.state['outflow']['der'].getVal() == 0
and state_obj.state['outflow']['mag'].getVal() != 2):
return states
if (state_obj.state['inflow']['der'].getVal() == -1
and state_obj.state['outflow']['mag'].getVal() == 2):
return states
if state_obj.state['inflow']['der'].getVal() == -1:
states.append(newState(state_obj,[('inflow','der',+1)],
desc="Id+", transition="increase"))
return states
if state_obj.state['inflow']['der'].getVal() == 1:
states.append(newState(state_obj,[('inflow','der',-1)],
desc="Id-", transition="decrease"))
return states
return states
def newState(state_obj,change =[('inflow','der',0)],desc="", transition=""):
new_state = copy.deepcopy(state_obj)
for ch in change:
if ch[2] == -1:
new_state.state[ch[0]][ch[1]].decrease()
elif ch[2] == 1:
new_state.state[ch[0]][ch[1]].increase()
return {'state': new_state, 'desc':desc, 'transition': transition}
def generateNextStates(state_obj):
state = state_obj.state
new_states = []
# imidiate changes
if state['outflow']['mag'].getVal() == 0 and state['outflow']['der'].getVal() == 1:
new_states.append(newState(state_obj,[('volume','mag',1),('outflow','mag',1)],
desc="Im+->Vd+,Od+", transition="time"))
#new_states[-1]['state'].desc="Positive change in volume/outflow causes increase in magnitude of these quantities."
if state['inflow']['mag'].getVal() == 0 and state['inflow']['der'].getVal() == 1:
changes = [('inflow','mag',1)]
desc = "Id+->Im+. "
state_desc = "Positive change in inflow increases magnitude of inflow."
if state['outflow']['der'].isStationary():
changes.append(('outflow','der',1))
changes.append(('volume','der',1))
state_desc+=" Positive change in inflow magnitude causes to positively increase change of volume and outflow."
new_states.append(newState(state_obj,changes, desc=desc+"Im+->Vd+,Od+", transition="time"))
new_states[-1]['state'].desc=state_desc
if len(new_states) == 0:
new_states = new_states + genFlipedInflow(state_obj)
# Changes which take long time:
# increasing inflow volume
if (state['inflow']['mag'].getVal() == 1 and state['inflow']['der'].getVal() == 1):
# apply positive Infuence
if state['outflow']['mag'].getVal() != 2:
new_states.append(newState(state_obj,[('volume','der',+1),('outflow','der',+1)],
desc="E+->Vd+,Od+", transition="time"))
new_states[-1]['state'].desc="Increasing inflow. Increasing derivation of Volume and Outflow."
if state['outflow']['mag'].getVal() == 1 and state['outflow']['der'].getVal() == 1:
# go to maximal state
new_states.append(newState(state_obj,[('volume','mag',1),
('volume','der',-1),('outflow','mag',1),('outflow','der',-1)],
desc="E+->Om+", transition="time"))
new_states[-1]['state'].desc="Increasing inflow. Maximal capacity of container reached."
# rate of changes between inflow and outflow- outflow is faster -> go back to steady
if (state['outflow']['mag'].getVal() == 1
and state['outflow']['der'].getVal() == state['inflow']['der'].getVal()):
new_states.append(newState(state_obj,[('volume','der',-1),('outflow','der',-1)],
desc="Im<Om->Vd-,Od-", transition="time"))
new_states[-1]['state'].desc="Increasing inflow. Inflow is increasing slower than Outflow. The volume is in positive steady state."
# steady inflow volume
if (state['inflow']['mag'].getVal() == 1 and state['inflow']['der'].getVal() == 0):
change = -1* state['outflow']['der'].getVal()
s = '+' if change >0 else '-' if change < 0 else '~'
new_states.append(newState(state_obj,
[('volume','der',change),('outflow','der',change)],
desc="E~->Vd"+s+',Od'+s))
new_states[-1]['state'].desc="Positive steady inflow."
if state['outflow']['der'].getVal() == 1:
new_states.append(newState(state_obj,[('volume','mag',1),
('volume','der',-1),('outflow','mag',1),('outflow','der',-1)],
desc="E~->Vm+,Om+", transition="time"))
new_states[-1]['state'].desc="Positive steady inflow. Maximal capacity of container reached."
# decreasing inflow volume
if (state['inflow']['mag'].getVal() == 1 and state['inflow']['der'].getVal() == -1):
# apply negative influence
new_states.append(newState(state_obj,[('volume','der',-1),('outflow','der',-1)],
desc="E-->Vd-,Od-", transition="time"))
# extreme no inflow volume left
if state['outflow']['der'].getVal() == -1 and state['outflow']['mag'].getVal() < 2:
new_states.append(newState(state_obj,[('inflow','der',+1),('inflow','mag',-1)],
desc="E-->Id0,Im0", transition="time"))
new_states[-1]['state'].desc="Inflow is empty."
# colapsing from maximum to plus
if state['outflow']['mag'].getVal() == 2 and state['outflow']['der'].getVal() == -1:
new_states.append(newState(state_obj,[('volume','mag',-1),('outflow','mag',-1)],
desc="E-->Vm-,Om-", transition="time"))
new_states[-1]['state'].desc="Inflow is is slowing down what causes increase in outflow rate."
# speed of decrease can be different in inflow and outflow -> go to steady outflow
if (state['outflow']['der'].getVal() == state['inflow']['der'].getVal()
and not state['outflow']['mag'].isStationary()):
new_states.append(newState(state_obj,[('volume','der',+1),('outflow','der',+1)],
desc="E-->Vd-,Od-", transition="time"))
new_states[-1]['state'].desc="Positive steady state"
# no inflow volume
if (state['inflow']['mag'].getVal() == 0 and state['inflow']['der'].getVal() == 0):
if state['outflow']['mag'].getVal() > 0:
new_states.append(newState(state_obj,
[('volume','der',-1),('outflow','der',-1)],
desc="E0->Vd-,Od-", transition="time"))
if (state['outflow']['mag'].getVal() == 1 and state['outflow']['der'].getVal() == -1):
new_states.append(newState(state_obj,[('volume','der',1),('outflow','der',1),
('volume','mag',-1),('outflow','mag',-1)],
desc="E0->Vd+,Od+", transition="time"))
# print('new states generated: ',len(new_states))
return new_states
def printState(state_obj):
state = state_obj.state
print("State",state_obj.name)
print(state['inflow']['mag'].getName(), state['inflow']['der'].getName())
print(state['volume']['mag'].getName(), state['volume']['der'].getName())
print(state['outflow']['mag'].getName(), state['outflow']['der'].getName())
print('----------------------')
def createEdge(source, target, desc, transition):
return {"explanation": desc,"source": source, "target": target, "transition": transition}
def addNewState(edges, states, source, target, desc, transition):
source.next_states.append(target)
edges.append(createEdge(source,target,desc,transition))
states.append(target)
return edges, states
def existingState(states, state):
for s in states:
if s == state:
return s
return None
#------------------------------------ VISUALIZATION -------------------------------
# returns the values for all variables in text format
def getStateText(state):
in_mag = state.state['inflow']['mag'].getName()
in_der = state.state['inflow']['der'].getName()
vol_mag = state.state['volume']['mag'].getName()
vol_der = state.state['volume']['der'].getName()
out_mag = state.state['outflow']['mag'].getName()
out_der = state.state['outflow']['der'].getName()
return str(state.name)+'\n'+in_mag+" "+in_der+"\n"+vol_mag+" "+vol_der+"\n"+out_mag+" "+out_der
# generates a visual (directed) graph of all states
def generateGraph(edgeList):
graph = pydot.Dot(graph_type='digraph', center=True, size=15)
for edgeObj in edgeList:
transitionText = edgeObj['explanation'] # explanation for transition
transitionType = edgeObj['transition'] # type of transition (+, -, or time)
sourceState = edgeObj['source'] # source state (obj)
targetState = edgeObj['target'] # target state (obj)
if transitionType == "increase":
edgeFillColor = '#00FF00'
elif transitionType == "decrease":
edgeFillColor = '#FF0000'
else:
edgeFillColor = '#black'
sourceStateText = getStateText(sourceState) # all values of source state in text format
targetStateText = getStateText(targetState) # all values of target state in text format
if len(targetState.next_states) == 0:
nodeFillColor = '#81B2E0'
nodeBorder = 2.8
else:
nodeFillColor = '#92E0DF'
nodeBorder = 1.5
sourceNode = pydot.Node(sourceStateText, shape='rectangle',
style="filled", fillcolor='#92E0DF', penwidth=1.5)
graph.add_node(sourceNode)
targetNode = pydot.Node(targetStateText, shape='rectangle',
style="filled", fillcolor=nodeFillColor, penwidth=nodeBorder)
graph.add_node(targetNode)
edge = pydot.Edge(sourceNode, targetNode, label=transitionText,
color=edgeFillColor, penwidth=2.25)
graph.add_edge(edge)
return graph
def decodeDesc(desc):
out = desc.replace('d',"derivative] ")
out = out.replace('m',"magnitude] ")
out = out.replace('I',"[Inflow ")
out = out.replace('E+',"Inflow is increasing ")
out = out.replace('E-',"Inflow is decreasing ")
out = out.replace('E~',"Inflow is positive ")
out = out.replace('E0',"Inflow is closed ")
out = out.replace(',',"and ")
out = out.replace('->',"implies that ")
out = out.replace('O',"[Outflow ")
out = out.replace('V',"[Volume ")
out = out.replace('+',"increases ")
out = out.replace('-',"decreases ")
# out = out.replace('~',"is steady ")
out = out.replace('<',"is less than ")
out = out.replace('.',"\n ")
return out
def printIntraState(state_obj):
state = state_obj.state
printState(state_obj)
print(state_obj.desc)
for var in ['inflow', 'outflow', 'volume']:
if state[var]['der'].getVal() == 1 and state[var]['mag'].getVal() == 1:
print(var+ ' quantity increasing')
if state[var]['der'].getVal() == 0 and state[var]['mag'].getVal() == 1:
print(var+ ' quantity is steady')
if state[var]['der'].getVal() == -1 and state[var]['mag'].getVal() == 1:
print(var+ ' quantity decreasing')
'''
if state_obj.desc == None or state_obj.desc == '':
if state['inflow']['der'].getVal() == 0:
print("Initial state. Inflow is empty.")
if state['inflow']['der'].getVal() == 1:
print("Increasing inflow.")
if state['volume']['der'].getVal() == -1:
print('Decreasing volume / outflow.')
if state['volume']['der'].getVal() == 1:
print('Increasing volume / outflow.')
if state['volume']['der'].getVal() == 0:
print('Steady volume / outflow.')
# if state['inflow']['der'].getVal() == 1:
# print('Inflow is increasing')
# if state['inflow']['der'].getVal() == -1:
# print('Inflow is decreasing')
# if state['inflow']['der'].getVal() == 0 and state['inflow']['mag'].getVal() == 0:
# print('Inflow is positive without change')
# if state['outflow']['mag'].getVal() == 2:
# print('Container is full.')
# if state['outflow']['der'].getVal() == 1:
# print('')
'''
print('----------------------')
def printInterstate(name_a,name_b,desc):
print("{:<3}->{:<3}:{:<30}{:<100}".format(name_a,name_b,desc,decodeDesc(desc)))
# --------------------------------------- MAIN --------------------------------------
inflow_mag = QSpace('inflow_mag', ZP(), 0)
inflow_der = QSpace('inflow_der', NZP(), 1)
volume_mag = QSpace('volume_mag', ZPM(), 0)
volume_der = QSpace('volume_der', NZP(), 1)
outflow_mag = QSpace('outflow_mag', ZPM(), 0)
outflow_der = QSpace('outflow_der', NZP(), 1)
initial_state = State(
[inflow_mag, inflow_der,
volume_mag, volume_der,
outflow_mag, outflow_der])
states = [initial_state]
edges = []
fringe = queue.Queue()
fringe.put(initial_state)
iteration = 0
print("INTER-STATE TRACE")
dot_graph = None
while not fringe.empty():
curr_state = fringe.get(block=False)
new_states = generateNextStates(curr_state)
for state_dict in new_states:
same_state = existingState(states, state_dict['state'])
if same_state is None:
state_dict['state'].name = str(len(states))
edges, states = addNewState(edges, states,
source=curr_state, target=state_dict['state'],
desc=state_dict['desc'],transition=state_dict['transition'])
fringe.put(state_dict['state'])
printInterstate(curr_state.name,state_dict['state'].name,state_dict['desc'])
elif curr_state != same_state:
curr_state.next_states.append(same_state)
edges.append(createEdge(source=curr_state, target=same_state,
desc=state_dict['desc'], transition=state_dict['transition']))
printInterstate(curr_state.name,same_state.name,state_dict['desc'])
dot_graph = generateGraph(edges)
iteration+=1
# print('************'+str(iteration)+'*****************')
# input("Press Enter to continue...")
dot_graph.write('graph.dot')
dot_graph.write_png('TEST_graph.png')
print("\n")
print("INTRA-STATE TRACE")
for st in states:
printIntraState(st)
print("\n")
| 39.767281
| 143
| 0.576453
| 2,054
| 17,259
| 4.730282
| 0.121714
| 0.038699
| 0.021614
| 0.034994
| 0.43557
| 0.389152
| 0.333882
| 0.306608
| 0.278716
| 0.22921
| 0
| 0.013738
| 0.2324
| 17,259
| 433
| 144
| 39.859122
| 0.719656
| 0.070282
| 0
| 0.166134
| 0
| 0
| 0.167144
| 0.004663
| 0
| 0
| 0
| 0.002309
| 0
| 1
| 0.092652
| false
| 0
| 0.009585
| 0.019169
| 0.207668
| 0.070288
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
2324184f8448361dc8a0618b5d05232be22a8ed2
| 6,040
|
py
|
Python
|
service/logging.py
|
IIEG/employment-forecast-jalisco
|
83de3bef5ad91706822ffa1e1d5b8b1c29e2f6c0
|
[
"Apache-2.0"
] | null | null | null |
service/logging.py
|
IIEG/employment-forecast-jalisco
|
83de3bef5ad91706822ffa1e1d5b8b1c29e2f6c0
|
[
"Apache-2.0"
] | 1
|
2021-06-01T22:29:58.000Z
|
2021-06-01T22:29:58.000Z
|
service/logging.py
|
IIEG/employment-forecast-jalisco
|
83de3bef5ad91706822ffa1e1d5b8b1c29e2f6c0
|
[
"Apache-2.0"
] | null | null | null |
from conf import settings
import pandas as pd
import numpy as np
import datetime
import os
def stringify_results(res, reg_conf, regression_key):
res_string = """
-------------------------------
{datetime}
SELECTED MODEL: {model}
Link Function (y-transform): {link}
Other Transformations (x-transform):
{transf}
PRAMETERS:
{params}
TRAIN DATA
> SME : {sme_train} ({sme_train_before})
> RSME: {rsme_train} ({rsme_train_before})
> AME : {ame_train} ({ame_train_before})
TEST DATA
> SME : {sme_test} ({sme_test_before})
> RSME: {rsme_test} ({rsme_test_before})
> AME : {ame_test} ({ame_test_before})
TEMPORAL VALIDATION (2017)
> SME : {sme_valid} ({sme_valid_before})
> RSME: {rsme_valid} ({rsme_valid_before})
> AME : {ame_valid} ({ame_valid_before})
Response Variable Stats (insured employment) -- train data
Stats:
{stats}
Temp. Validation RMSE / response_mean = {mean}
Temp. Validation RMSE / response_median = {median}
"""
# Response Variable Stats
stats = pd.DataFrame(res.datasets.get_train(True, True), columns=["response-variable"]).describe()
# Stringify Parameters
params = ""
for param in reg_conf:
params += "\t> " + param + ": " + str(reg_conf[param]) + "\n"
# Stringify x-transforms
other_transf = ""
tranf_functions = res.datasets.transformations
for transf in tranf_functions:
other_transf += "\t> " + transf + ": " + str(tranf_functions[transf]) + "\n"
# Format Content
now = datetime.datetime.now()
content = res_string.format(
datetime=now.strftime("%Y/%m/%d %H:%M:%S"),
model=regression_key,
link=res.datasets.link,
transf=other_transf,
params=params,
sme_train=res.sme(settings.ModelConf.labels.train, apply_inverse=True),
sme_train_before=res.sme(settings.ModelConf.labels.train, apply_inverse=False),
rsme_train=res.rsme(settings.ModelConf.labels.train, apply_inverse=True),
rsme_train_before=res.rsme(settings.ModelConf.labels.train, apply_inverse=False),
ame_train=res.ame(settings.ModelConf.labels.train, apply_inverse=True),
ame_train_before=res.ame(settings.ModelConf.labels.train, apply_inverse=False),
sme_test=res.sme(settings.ModelConf.labels.test, apply_inverse=True),
sme_test_before=res.sme(settings.ModelConf.labels.test, apply_inverse=False),
rsme_test=res.rsme(settings.ModelConf.labels.test, apply_inverse=True),
rsme_test_before=res.rsme(settings.ModelConf.labels.test, apply_inverse=False),
ame_test=res.ame(settings.ModelConf.labels.test, apply_inverse=True),
ame_test_before=res.ame(settings.ModelConf.labels.test, apply_inverse=False),
sme_valid=res.sme(settings.ModelConf.labels.validate, apply_inverse=True),
sme_valid_before=res.sme(settings.ModelConf.labels.validate, apply_inverse=False),
rsme_valid=res.rsme(settings.ModelConf.labels.validate, apply_inverse=True),
rsme_valid_before=res.rsme(settings.ModelConf.labels.validate, apply_inverse=False),
ame_valid=res.ame(settings.ModelConf.labels.validate, apply_inverse=True),
ame_valid_before=res.ame(settings.ModelConf.labels.validate, apply_inverse=False),
stats=str(stats).replace("\n", "\n\t"),
mean=res.rsme(settings.ModelConf.labels.validate, apply_inverse=True) / stats.loc["mean"].values[0],
median=res.rsme(settings.ModelConf.labels.validate, apply_inverse=True) / stats.loc["50%"].values[0]
)
filename = now.strftime("%Y-%m-%d-%H-%M-%S") + "-" + regression_key + ".txt"
return filename, content
def logg_result(res, reg_conf, regression_key):
filename, content = stringify_results(res, reg_conf, regression_key)
print(content)
with open(os.path.join(settings.PROJECT_DIR, "logs", filename), "w") as file:
file.write(content)
def results_as_dict(res):
train_label = settings.ModelConf.labels.train
test_label = settings.ModelConf.labels.test
validate_label = settings.ModelConf.labels.validate
def reverse_dict(d):
return {v: k for k, v in d.items()}
def percentage_error(label, res):
original = sum(res.original_output(label, True))
pred = sum(res.prediction(label, True))
return 100 * np.abs(original - pred) / original
vdf = res.data(validate_label).copy()
vdf["prediction"] = res.prediction(validate_label, True)
vdf["value"] = res.original_output(validate_label, True)
vdf["abs_error"] = np.abs(vdf["prediction"] - vdf["value"])
reference_index = ((vdf.year + vdf.month / 12) == (vdf.year + vdf.month / 12).max()).values
vdf[reference_index].head()
categ = {}
for sc in res.datasets.string_cols:
vdf[sc] = vdf[sc].replace(reverse_dict(res.datasets.category_encoder[sc]))
temp = vdf.groupby(sc)[["prediction", "value", "abs_error"]].sum()
temp["percentage_error"] = 100 * temp["abs_error"] / temp["value"]
categ[sc] = temp.T.to_dict()
return {
"model-desc": {
"lags": [c for c in res.datasets.get_train().columns if "t-" in c]
},
"model-performance": {
train_label: {
"rsme": res.rsme(train_label, apply_inverse=True),
"ame": res.ame(train_label, apply_inverse=True),
"percentage-error": percentage_error(train_label, res)
},
test_label: {
"rsme": res.rsme(test_label, apply_inverse=True),
"ame": res.ame(test_label, apply_inverse=True),
"percentage-error": percentage_error(test_label, res)
},
validate_label: {
"rsme": res.rsme(validate_label, apply_inverse=True),
"ame": res.ame(validate_label, apply_inverse=True),
"percentage-error": percentage_error(validate_label, res)
}
},
"validation-data-2017": categ
}
| 41.088435
| 108
| 0.654636
| 756
| 6,040
| 5.041005
| 0.183862
| 0.081868
| 0.138809
| 0.073209
| 0.40698
| 0.383102
| 0.368932
| 0.304644
| 0.046707
| 0.032537
| 0
| 0.004576
| 0.203974
| 6,040
| 146
| 109
| 41.369863
| 0.788062
| 0.013576
| 0
| 0
| 0
| 0
| 0.19654
| 0.012263
| 0
| 0
| 0
| 0
| 0
| 1
| 0.040984
| false
| 0
| 0.040984
| 0.008197
| 0.114754
| 0.008197
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
23271db66f8bb4de60b78338e614df097d3bd2ec
| 665
|
py
|
Python
|
systemtools/test/clearterminaltest.py
|
hayj/SystemTools
|
89c32c2cac843dfa2719f0ce37a0a52cda0b0c0b
|
[
"MIT"
] | 11
|
2018-08-10T00:55:20.000Z
|
2022-02-11T13:34:06.000Z
|
systemtools/test/clearterminaltest.py
|
hayj/SystemTools
|
89c32c2cac843dfa2719f0ce37a0a52cda0b0c0b
|
[
"MIT"
] | 5
|
2018-05-01T14:30:37.000Z
|
2021-11-18T11:48:28.000Z
|
systemtools/test/clearterminaltest.py
|
hayj/SystemTools
|
89c32c2cac843dfa2719f0ce37a0a52cda0b0c0b
|
[
"MIT"
] | 7
|
2019-08-16T13:32:19.000Z
|
2022-01-27T10:51:19.000Z
|
# print("aaaaaaaaaa bbbbbbbbbb")
# # print(chr(27) + "[2J")
import os
import sys
from enum import Enum
import signal
print(getOutputType())
exit()
# import os
# os.system('cls' if os.name == 'nt' else 'clear')
size = os.get_terminal_size()
print(size[0])
if signal.getsignal(signal.SIGHUP) == signal.SIG_DFL: # default action
print("No SIGHUP handler")
else:
print("In nohup mode")
import time
for x in range (0,5):
b = "Loading" + "." * x
print (b, end="\r")
time.sleep(1)
import sys
print("FAILED...")
sys.stdout.write("\033[F") #back to previous line
time.sleep(1)
sys.stdout.write("\033[K") #clear line
print("SUCCESS!")
| 14.777778
| 71
| 0.645113
| 101
| 665
| 4.217822
| 0.574257
| 0.037559
| 0.046948
| 0.079812
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.025688
| 0.180451
| 665
| 45
| 72
| 14.777778
| 0.755963
| 0.239098
| 0
| 0.173913
| 0
| 0
| 0.139113
| 0
| 0.043478
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.26087
| 0
| 0.26087
| 0.304348
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
23273537cf14476c6fb5136eab49c7351f22035d
| 7,674
|
py
|
Python
|
polytrack/deep_learning.py
|
malikaratnayake/Polytrack2.0
|
4ce45f26823c6ac63469112954fa23ed5ffd04bc
|
[
"MIT"
] | 1
|
2022-03-24T07:06:37.000Z
|
2022-03-24T07:06:37.000Z
|
polytrack/deep_learning.py
|
malikaratnayake/Polytrack2.0
|
4ce45f26823c6ac63469112954fa23ed5ffd04bc
|
[
"MIT"
] | null | null | null |
polytrack/deep_learning.py
|
malikaratnayake/Polytrack2.0
|
4ce45f26823c6ac63469112954fa23ed5ffd04bc
|
[
"MIT"
] | null | null | null |
import os
import time
import cv2
import random
import colorsys
import numpy as np
import tensorflow as tf
import pytesseract
import core.utils as utils
from core.config import cfg
import re
from PIL import Image
from polytrack.general import cal_dist
import itertools as it
import math
# import tensorflow as tf
physical_devices = tf.config.experimental.list_physical_devices('GPU')
if len(physical_devices) > 0:
tf.config.experimental.set_memory_growth(physical_devices[0], True)
tf.config.set_visible_devices(physical_devices[0:1], 'GPU')
from absl import app, flags, logging
from absl.flags import FLAGS
import core.utils as utils
from core.yolov4 import filter_boxes
from tensorflow.python.saved_model import tag_constants
from PIL import Image
from tensorflow.compat.v1 import ConfigProto
from tensorflow.compat.v1 import InteractiveSession
from polytrack.config import pt_cfg
model_weights = './checkpoints/custom-416'
config = ConfigProto()
config.gpu_options.allow_growth = True
session = InteractiveSession(config=config)
saved_model_loaded = tf.saved_model.load(model_weights, tags=[tag_constants.SERVING])
infer = saved_model_loaded.signatures['serving_default']
def dl_detections_process(bboxes):
classes = utils.read_class_names(cfg.YOLO.CLASSES)
allowed_classes = pt_cfg.POLYTRACK.TRACKING_INSECTS
num_classes = len(classes)
_dl_detections = np.zeros(shape=(0,6))
out_boxes, out_scores, out_classes, num_boxes = bboxes
for i in range(num_boxes):
if int(out_classes[i]) < 0 or int(out_classes[i]) > num_classes: continue
coor = out_boxes[i]
score = out_scores[i]
class_ind = int(out_classes[i])
# print(class_ind, classes[class_ind])
class_name = classes[class_ind]
if class_name not in allowed_classes:
continue
else:
_dl_detections = np.vstack([_dl_detections,(coor[0], coor[1], coor[2], coor[3], class_name, score)])
return _dl_detections
def map_darkspots(__frame, _dark_spots):
for spot in _dark_spots:
__frame = cv2.circle(__frame, (int(spot[0]), int(spot[1])), int(pt_cfg.POLYTRACK.DL_DARK_SPOTS_RADIUS), (100,100,100), -1)
return __frame
def run_DL(_frame):
#if pt_cfg.POLYTRACK.DL_DARK_SPOTS:
#dark_spots = pt_cfg.POLYTRACK.RECORDED_DARK_SPOTS
#if len(dark_spots):
# _frame = map_darkspots(_frame, dark_spots)
#else:
# pass
# else:
# pass
_frame = cv2.cvtColor(_frame, cv2.COLOR_BGR2RGB)
image = Image.fromarray(_frame)
frame_size = _frame.shape[:2]
image_data = cv2.resize(_frame, (cfg.YOLO.INPUT_SIZE, cfg.YOLO.INPUT_SIZE))
image_data = image_data / 255.
image_data = image_data[np.newaxis, ...].astype(np.float32)
batch_data = tf.constant(image_data)
pred_bbox = infer(batch_data)
for key, value in pred_bbox.items():
boxes = value[:, :, 0:4]
pred_conf = value[:, :, 4:]
boxes, scores, classes, valid_detections = tf.image.combined_non_max_suppression(
boxes=tf.reshape(boxes, (tf.shape(boxes)[0], -1, 1, 4)),
scores=tf.reshape(
pred_conf, (tf.shape(pred_conf)[0], -1, tf.shape(pred_conf)[-1])),
max_output_size_per_class=pt_cfg.POLYTRACK.MAX_OUTPUT_SIZE_PER_CLASS,
max_total_size=pt_cfg.POLYTRACK.MAX_TOTAL_SIZE,
iou_threshold=pt_cfg.POLYTRACK.DL_IOU_THRESHOLD,
score_threshold=pt_cfg.POLYTRACK.DL_SCORE_THRESHOLD
)
# format bounding boxes from normalized ymin, xmin, ymax, xmax ---> xmin, ymin, xmax, ymax
original_h, original_w, _ = _frame.shape
bboxes = utils.format_boxes(boxes.numpy()[0], original_h, original_w)
pred_bbox = [bboxes, scores.numpy()[0], classes.numpy()[0], valid_detections.numpy()[0]]
# read in all class names from config
class_names = utils.read_class_names(cfg.YOLO.CLASSES)
_detections = dl_detections_process(pred_bbox)
return _detections
#Calculate the area covered by the insect
def cal_bodyArea_DL(_x_TL,_y_TL,_x_BR,_y_BR):
_body_area = abs((_x_BR-_x_TL)*(_y_BR-_y_TL))
return _body_area
#Extract the data from result and calculate the center of gravity of the insect
def cal_CoG_DL(result):
_x_DL, _y_DL, _body_area, _radius = 0, 0, 0, 0
_x_TL = int(float(result[0]))
_y_TL = int(float(result[1]))
_x_BR = int(float(result[2]))
_y_BR = int(float(result[3]))
_x_DL = int(round((_x_TL+_x_BR)/2))
_y_DL = int(round((_y_TL+_y_BR)/2))
_radius = round(cal_dist(_x_TL, _y_TL,_x_DL,_y_DL)*math.cos(math.radians(45)))
_body_area = cal_bodyArea_DL(_x_TL,_y_TL,_x_BR,_y_BR)
return _x_DL,_y_DL, _body_area, _radius
#Detect insects in frame using Deep Learning
def detect_deep_learning(_frame, flowers = False):
_results = run_DL(_frame)
#print(flowers)
_deep_learning_detections = process_DL_results(_results, flowers)
if (len(_deep_learning_detections)>1) :
_deep_learning_detections = verify_insects_DL(_deep_learning_detections)
else:
pass
return _deep_learning_detections
def process_DL_results(_results, flowers):
_logDL = np.zeros(shape=(0,5)) #(create an array to store data x,y,area, conf, type)
for result in _results: # Go through the detected results
confidence = result[5]
_species = result[4]
if not flowers:
if ((_species != 'flower')): # Filter out detections which do not meet the threshold
_x_DL, _y_DL, _body_area, _ = cal_CoG_DL(result) #Calculate the center of gravity
_logDL = np.vstack([_logDL,(float(_x_DL), float(_y_DL), float(_body_area),_species,confidence)])
else:
pass
else:
if ((_species == 'flower')): # Filter out detections which do not meet the threshold
_x_DL, _y_DL, _ , _radius = cal_CoG_DL(result) #Calculate the center of gravity
_logDL = np.vstack([_logDL,(float(_x_DL), float(_y_DL), float(_radius),_species,confidence)])
else:
pass
return _logDL
# Calculate the distance between two coordinates
def cal_euclidean_DL(_insects_inFrame,_pair):
_dx = float(_insects_inFrame[_pair[0]][0]) - float(_insects_inFrame[_pair[1]][0])
_dy = float(_insects_inFrame[_pair[0]][1]) - float(_insects_inFrame[_pair[1]][1])
_dist = np.sqrt(_dx**2+_dy**2)
return _dist
#Verify that there are no duplicate detections (The distance between two CoG are >= 20 pixels)
def verify_insects_DL(_insects_inFrame):
_conflict_pairs = []
_combinations = it.combinations(np.arange(len(_insects_inFrame)), 2)
for pair in _combinations:
_distance = cal_euclidean_DL(_insects_inFrame,pair)
if (_distance<15):
_conflict_pairs.append(pair)
if (_conflict_pairs): _insects_inFrame = evaluvate_conflict(_conflict_pairs, _insects_inFrame)
return _insects_inFrame
#Evaluvate the confidence levels in DL and remove the least confidence detections
def evaluvate_conflict(_conflict_pairs, _insects_inFrame):
to_be_removed = []
for pairs in _conflict_pairs:
conf_0 = _insects_inFrame[pairs[0]][4]
conf_1 = _insects_inFrame[pairs[1]][4]
if (conf_0>=conf_1):to_be_removed.append(pairs[1])
else: to_be_removed.append(pairs[0])
to_be_removed = list(dict.fromkeys(to_be_removed)) #Remove duplicates
_insects_inFrame = np.delete(_insects_inFrame, to_be_removed, 0)
return _insects_inFrame
| 33.365217
| 130
| 0.690253
| 1,086
| 7,674
| 4.516575
| 0.22744
| 0.048522
| 0.022834
| 0.006116
| 0.250357
| 0.146381
| 0.103568
| 0.069725
| 0.069725
| 0.069725
| 0
| 0.016124
| 0.207975
| 7,674
| 229
| 131
| 33.510917
| 0.790885
| 0.133307
| 0
| 0.09589
| 0
| 0
| 0.008605
| 0.003623
| 0
| 0
| 0
| 0
| 0
| 1
| 0.068493
| false
| 0.020548
| 0.164384
| 0
| 0.30137
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
2327a93cda5f2e2914fc9a547155549bead73408
| 765
|
py
|
Python
|
pypi_uploader/setup.py
|
p-geon/DockerBonsai
|
1b1deafe228438e5ce3b4a41026aef4748f98573
|
[
"MIT"
] | 1
|
2021-11-28T13:27:41.000Z
|
2021-11-28T13:27:41.000Z
|
docker-pypi_uploader/setup.py
|
p-geon/DockerBonsai
|
1b1deafe228438e5ce3b4a41026aef4748f98573
|
[
"MIT"
] | 8
|
2021-02-19T12:54:22.000Z
|
2021-02-25T02:32:23.000Z
|
pypi_uploader/setup.py
|
p-geon/DockerBonsai
|
1b1deafe228438e5ce3b4a41026aef4748f98573
|
[
"MIT"
] | null | null | null |
from setuptools import setup
from codecs import open
from os import path
NAME_REPO="imagechain"
here = path.abspath(path.dirname(__file__))
with open(path.join(here, 'README.md'), encoding='utf-8') as f:
long_description = f.read()
setup(
name=NAME_REPO,
packages=[NAME_REPO],
version='0.1',
license='MIT',
install_requires=[],
author='p-geon',
author_email='alchemic4s@gmail.com',
url='https://github.com/p-geon/' + NAME_REPO,
description='Image plotting & Image conversion',
long_description=long_description,
long_description_content_type='text/markdown',
keywords='image plot',
classifiers=[
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 3.7',
],
)
| 25.5
| 63
| 0.673203
| 96
| 765
| 5.197917
| 0.645833
| 0.064128
| 0.076152
| 0.12024
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.009631
| 0.185621
| 765
| 30
| 64
| 25.5
| 0.791332
| 0
| 0
| 0
| 0
| 0
| 0.278068
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.12
| 0
| 0.12
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
232aa5dcc39387e06484add60fa99039e0f84ed2
| 563
|
py
|
Python
|
uaa_bot/config.py
|
cloud-gov/uaa-bot
|
d2191621d364ce0fe4804283243a5195cfe84c7a
|
[
"CC0-1.0"
] | 1
|
2021-03-27T21:34:28.000Z
|
2021-03-27T21:34:28.000Z
|
uaa_bot/config.py
|
cloud-gov/uaa-bot
|
d2191621d364ce0fe4804283243a5195cfe84c7a
|
[
"CC0-1.0"
] | 4
|
2021-02-11T18:02:16.000Z
|
2022-02-23T18:55:11.000Z
|
uaa_bot/config.py
|
cloud-gov/uaa-bot
|
d2191621d364ce0fe4804283243a5195cfe84c7a
|
[
"CC0-1.0"
] | null | null | null |
import os
def parse_config_env(default_dict):
config_dict = {}
for key, value in default_dict.items():
config_dict[key] = os.environ.get(key, value)
return config_dict
SMTP_KEYS = {
"SMTP_HOST": "localhost",
"SMTP_PORT": 25,
"SMTP_FROM": "no-reply@example.com",
"SMTP_USER": None,
"SMTP_PASS": None,
"SMTP_CERT": None,
}
UAA_KEYS = {
"UAA_BASE_URL": "https://uaa.bosh-lite.com",
"UAA_CLIENT_ID": None,
"UAA_CLIENT_SECRET": None,
}
smtp = parse_config_env(SMTP_KEYS)
uaa = parse_config_env(UAA_KEYS)
| 18.766667
| 53
| 0.651865
| 81
| 563
| 4.197531
| 0.493827
| 0.097059
| 0.123529
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.004454
| 0.202487
| 563
| 29
| 54
| 19.413793
| 0.752784
| 0
| 0
| 0
| 0
| 0
| 0.26643
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.047619
| false
| 0.047619
| 0.047619
| 0
| 0.142857
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
232ab34c654fc84b1b9af2251151c7a436bd3f09
| 1,346
|
py
|
Python
|
TcpServer.py
|
WinHtut/BootCampPython-1
|
c784a23d73304f328b8d6a1e29a1c43e6b6c44c7
|
[
"MIT"
] | null | null | null |
TcpServer.py
|
WinHtut/BootCampPython-1
|
c784a23d73304f328b8d6a1e29a1c43e6b6c44c7
|
[
"MIT"
] | null | null | null |
TcpServer.py
|
WinHtut/BootCampPython-1
|
c784a23d73304f328b8d6a1e29a1c43e6b6c44c7
|
[
"MIT"
] | 1
|
2021-12-04T16:08:17.000Z
|
2021-12-04T16:08:17.000Z
|
import socket
import threading
import FetchData
class TCPserver():
def __init__(self):
self.server_ip="localhost"
self.server_port=9998
def main(self):
server = socket.socket(socket.AF_INET,socket.SOCK_STREAM)
server.bind((self.server_ip,self.server_port))
server.listen(6)
print(f'Server listen on {self.server_ip} : Port:{self.server_port}')
while True:
cleint , address = server.accept()
print(f'[+] Accepted conneciton from {address[0]} : {address[1]}')
cleint_handler = threading.Thread(target=self.handle_client , args=(cleint,))
cleint_handler.start()
def handle_client(self,client_socket):
with client_socket as sock:
request = sock.recv(1024)
toFindInDatabase = request.decode()
print('[*] Received Data From Cleint:',toFindInDatabase)
receivedFromDatabase = self.toFind(toFindInDatabase)
toSend=bytes(receivedFromDatabase,'utf-8')
sock.send(toSend)
def toFind(self,toFindInDatabase):
db =FetchData.DatabaseClass(toFindInDatabase)
DBdata=db.databaseMethod()
return DBdata
if __name__ == "__main__":
while True:
server =TCPserver()
server.main()
| 32.829268
| 90
| 0.616642
| 140
| 1,346
| 5.742857
| 0.464286
| 0.087065
| 0.044776
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.012358
| 0.278603
| 1,346
| 41
| 91
| 32.829268
| 0.815654
| 0
| 0
| 0.060606
| 0
| 0
| 0.127774
| 0.017598
| 0
| 0
| 0
| 0
| 0
| 1
| 0.121212
| false
| 0
| 0.090909
| 0
| 0.272727
| 0.090909
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
232aee5e5c70b6ac013e320c3a04f48e6af0f6b1
| 11,122
|
py
|
Python
|
Jump_Trend_labeling/Trend/jump.py
|
anakinanakin/neural-network-on-finance-data
|
1842606294ca3d5dafa7387d6db95a1c21d323eb
|
[
"MIT"
] | 1
|
2021-05-11T09:11:53.000Z
|
2021-05-11T09:11:53.000Z
|
Jump_Trend_labeling/Trend/jump.py
|
anakinanakin/neural-network-on-finance-data
|
1842606294ca3d5dafa7387d6db95a1c21d323eb
|
[
"MIT"
] | null | null | null |
Jump_Trend_labeling/Trend/jump.py
|
anakinanakin/neural-network-on-finance-data
|
1842606294ca3d5dafa7387d6db95a1c21d323eb
|
[
"MIT"
] | 1
|
2020-07-28T03:59:31.000Z
|
2020-07-28T03:59:31.000Z
|
#source code: https://github.com/alvarobartt/trendet
import psycopg2, psycopg2.extras
import os
import glob
import csv
import time
import datetime
import string
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
from matplotlib import patches
from matplotlib.pyplot import figure
from datetime import timedelta, date
from math import ceil, sqrt
from statistics import mean
from unidecode import unidecode
# transform array to rectangle shape
def trans2rect(arr):
tarr = []
trend = arr[0]
width = 1
day = 0
for elm in arr[1:]:
if elm == trend:
width += 1
else:
tarr.append((trend, day, width))
trend = elm
day += width
width = 1
tarr.append((trend, day, width))
return tarr
def date_range(start_date, end_date):
for n in range(int ((end_date - start_date).days)):
yield start_date + timedelta(n)
def identify_df_trends(df, column, window_size=5, identify='both'):
"""
This function receives as input a pandas.DataFrame from which data is going to be analysed in order to
detect/identify trends over a certain date range. A trend is considered so based on the window_size, which
specifies the number of consecutive days which lead the algorithm to identify the market behaviour as a trend. So
on, this function will identify both up and down trends and will remove the ones that overlap, keeping just the
longer trend and discarding the nested trend.
Args:
df (:obj:`pandas.DataFrame`): dataframe containing the data to be analysed.
column (:obj:`str`): name of the column from where trends are going to be identified.
window_size (:obj:`window`, optional): number of days from where market behaviour is considered a trend.
identify (:obj:`str`, optional):
which trends does the user wants to be identified, it can either be 'both', 'up' or 'down'.
Returns:
:obj:`pandas.DataFrame`:
The function returns a :obj:`pandas.DataFrame` which contains the retrieved historical data from Investing
using `investpy`, with a new column which identifies every trend found on the market between two dates
identifying when did the trend started and when did it end. So the additional column contains labeled date
ranges, representing both bullish (up) and bearish (down) trends.
Raises:
ValueError: raised if any of the introduced arguments errored.
"""
if df is None:
raise ValueError("df argument is mandatory and needs to be a `pandas.DataFrame`.")
if not isinstance(df, pd.DataFrame):
raise ValueError("df argument is mandatory and needs to be a `pandas.DataFrame`.")
if column is None:
raise ValueError("column parameter is mandatory and must be a valid column name.")
if column and not isinstance(column, str):
raise ValueError("column argument needs to be a `str`.")
if isinstance(df, pd.DataFrame):
if column not in df.columns:
raise ValueError("introduced column does not match any column from the specified `pandas.DataFrame`.")
else:
if df[column].dtype not in ['int64', 'float64']:
raise ValueError("supported values are just `int` or `float`, and the specified column of the "
"introduced `pandas.DataFrame` is " + str(df[column].dtype))
if not isinstance(window_size, int):
raise ValueError('window_size must be an `int`')
if isinstance(window_size, int) and window_size < 3:
raise ValueError('window_size must be an `int` equal or higher than 3!')
if not isinstance(identify, str):
raise ValueError('identify should be a `str` contained in [both, up, down]!')
if isinstance(identify, str) and identify not in ['both', 'up', 'down']:
raise ValueError('identify should be a `str` contained in [both, up, down]!')
objs = list()
up_trend = {
'name': 'Up Trend',
'element': np.negative(df['close'])
}
down_trend = {
'name': 'Down Trend',
'element': df['close']
}
if identify == 'both':
objs.append(up_trend)
objs.append(down_trend)
elif identify == 'up':
objs.append(up_trend)
elif identify == 'down':
objs.append(down_trend)
#print(objs)
results = dict()
for obj in objs:
mov_avg = None
values = list()
trends = list()
for index, value in enumerate(obj['element'], 0):
# print(index)
# print(value)
if mov_avg and mov_avg > value:
values.append(value)
mov_avg = mean(values)
elif mov_avg and mov_avg < value:
if len(values) > window_size:
min_value = min(values)
for counter, item in enumerate(values, 0):
if item == min_value:
break
to_trend = from_trend + counter
trend = {
'from': df.index.tolist()[from_trend],
'to': df.index.tolist()[to_trend],
}
trends.append(trend)
mov_avg = None
values = list()
else:
from_trend = index
values.append(value)
mov_avg = mean(values)
results[obj['name']] = trends
# print(results)
# print("\n\n")
# deal with overlapping labels, keep longer trends
if identify == 'both':
up_trends = list()
for up in results['Up Trend']:
flag = True
for down in results['Down Trend']:
if (down['from'] <= up['from'] <= down['to']) or (down['from'] <= up['to'] <= down['to']):
#print("up")
if (up['to'] - up['from']) <= (down['to'] - down['from']):
#print("up")
flag = False
for other_up in results['Up Trend']:
if (other_up['from'] < up['from'] < other_up['to']) or (other_up['from'] < up['to'] < other_up['to']):
#print("up")
if (up['to'] - up['from']) < (other_up['to'] - other_up['from']):
#print("up")
flag = False
if flag is True:
up_trends.append(up)
labels = [letter for letter in string.printable[:len(up_trends)]]
for up_trend, label in zip(up_trends, labels):
for index, row in df[up_trend['from']:up_trend['to']].iterrows():
df.loc[index, 'Up Trend'] = label
down_trends = list()
for down in results['Down Trend']:
flag = True
for up in results['Up Trend']:
if (up['from'] <= down['from'] <= up['to']) or (up['from'] <= down['to'] <= up['to']):
#print("down")
if (up['to'] - up['from']) >= (down['to'] - down['from']):
#print("down")
flag = False
for other_down in results['Down Trend']:
if (other_down['from'] < down['from'] < other_down['to']) or (other_down['from'] < down['to'] < other_down['to']):
#print("down")
if (other_down['to'] - other_down['from']) > (down['to'] - down['from']):
#print("down")
flag = False
if flag is True:
down_trends.append(down)
labels = [letter for letter in string.printable[:len(down_trends)]]
for down_trend, label in zip(down_trends, labels):
for index, row in df[down_trend['from']:down_trend['to']].iterrows():
df.loc[index, 'Down Trend'] = label
return df
elif identify == 'up':
up_trends = results['Up Trend']
up_labels = [letter for letter in string.printable[:len(up_trends)]]
for up_trend, up_label in zip(up_trends, up_labels):
for index, row in df[up_trend['from']:up_trend['to']].iterrows():
df.loc[index, 'Up Trend'] = up_label
return df
elif identify == 'down':
down_trends = results['Down Trend']
down_labels = [letter for letter in string.printable[:len(down_trends)]]
for down_trend, down_label in zip(down_trends, down_labels):
for index, row in df[down_trend['from']:down_trend['to']].iterrows():
df.loc[index, 'Down Trend'] = down_label
return df
conn = psycopg2.connect(**eval(open('auth.txt').read()))
cmd = conn.cursor(cursor_factory=psycopg2.extras.DictCursor)
start_date = date(2010, 3, 25)
end_date = date(2010, 3, 26)
# sampling window
window_size = 5
for single_date in date_range(start_date, end_date):
#smp no volume
#cmd.execute('select * from market_index where mid = 3 and dt=%(dt)s',dict(dt=single_date.strftime("%Y-%m-%d")))
#smp with volume
cmd.execute('select * from market_index where mid = 1 and dt=%(dt)s',dict(dt=single_date.strftime("%Y-%m-%d")))
recs = cmd.fetchall()
if recs == []:
continue;
df = pd.DataFrame(recs, columns = recs[0].keys())
df.sort_values(by='dt')
# with pd.option_context('display.max_rows', None, 'display.max_columns', None):
# print(df)
close_price = df['close'].values
maxprice = max(close_price)
minprice = min(close_price)
# prevent from equal to 0
df['close'] = (df['close']-minprice)/(maxprice - minprice)+0.01
close_price = df['close'].values
# close_price = close_price.tolist()
# df_trend = df.copy()
# df_trend['Up Trend'] = np.nan
# df_trend['Down Trend'] = np.nan
df_trend = identify_df_trends(df, 'close', window_size=window_size, identify='both')
# with pd.option_context('display.max_rows', None, 'display.max_columns', None):
# print(df_trend)
df.reset_index(inplace=True)
figure(num=None, figsize=(48, 10), dpi=180, facecolor='w', edgecolor='k')
ax = sns.lineplot(x=df.index, y=df['close'])
ax.set(xlabel='minute')
a=0
b=0
try:
labels = df_trend['Up Trend'].dropna().unique().tolist()
except:
df_trend['Up Trend'] = np.nan
a=1
if a == 0:
for label in labels:
ax.axvspan(df[df['Up Trend'] == label].index[0], df[df['Up Trend'] == label].index[-1], alpha=0.2, color='red')
try:
labels = df_trend['Down Trend'].dropna().unique().tolist()
except:
df_trend['Down Trend'] = np.nan
b=1
if b == 0:
for label in labels:
ax.axvspan(df[df['Down Trend'] == label].index[0], df[df['Down Trend'] == label].index[-1], alpha=0.2, color='green')
plt.savefig('date='+single_date.strftime("%m-%d-%Y")+'_window={}.png'.format(window_size))
| 31.68661
| 130
| 0.573368
| 1,453
| 11,122
| 4.300757
| 0.207846
| 0.023524
| 0.009602
| 0.007681
| 0.360538
| 0.308209
| 0.24724
| 0.223236
| 0.197952
| 0.166107
| 0
| 0.008094
| 0.300126
| 11,122
| 350
| 131
| 31.777143
| 0.794707
| 0.199514
| 0
| 0.28866
| 0
| 0
| 0.135815
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.015464
| false
| 0
| 0.087629
| 0
| 0.123711
| 0.020619
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
232d44b9e301f131b81fce59b6e44322f7b61b53
| 978
|
py
|
Python
|
dmatrix.py
|
sanchitcop19/redHackProject
|
16f8d2e2a675dc5bd370e28ab5880a6b1f113a2d
|
[
"Apache-2.0"
] | null | null | null |
dmatrix.py
|
sanchitcop19/redHackProject
|
16f8d2e2a675dc5bd370e28ab5880a6b1f113a2d
|
[
"Apache-2.0"
] | 1
|
2021-06-02T00:26:30.000Z
|
2021-06-02T00:26:30.000Z
|
dmatrix.py
|
sanchitcop19/redHackProject
|
16f8d2e2a675dc5bd370e28ab5880a6b1f113a2d
|
[
"Apache-2.0"
] | 1
|
2019-09-22T08:46:11.000Z
|
2019-09-22T08:46:11.000Z
|
import requests
import json
content = None
with open("scored_output.json") as file:
content = json.load(file)
matrix = [[0 for i in range(len(content))] for j in range(len(content))]
mapping = {}
for i, origin in enumerate(content):
mapping[i] = origin
for j, destination in enumerate(content):
print(i, j)
if origin[0] == ',' or destination[0] == ',' or origin[-2:] != destination[-2:] or origin[-2:] != 'CA':
continue
response = requests.get("https://maps.googleapis.com/maps/api/distancematrix/json?units=imperial&origins=" + origin + "&destinations=" + destination + "&key=" + "AIzaSyA3kdX2kwoRQpkmui8GtloGvGQB-rn1tMU")
try:
matrix[i][j] = json.loads(response.content)["rows"][0]["elements"][0]["distance"]["value"]
except:
continue
data = {
'mapping': mapping,
'matrix': matrix
}
with open("dmatrix.json", "w") as file:
json.dump(data, file)
| 30.5625
| 211
| 0.603272
| 118
| 978
| 4.991525
| 0.474576
| 0.027165
| 0.033956
| 0.057725
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.015936
| 0.230061
| 978
| 31
| 212
| 31.548387
| 0.766268
| 0
| 0
| 0.083333
| 0
| 0.041667
| 0.216632
| 0.040041
| 0.041667
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.083333
| 0
| 0.083333
| 0.041667
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
232d65d107c7ac95d64e3240caf376ce0bbcff3f
| 2,416
|
py
|
Python
|
src/SetExpan/util.py
|
jmshen1994/SetExpan
|
d725bb9896c45478217294d188fafaea56660858
|
[
"Apache-2.0"
] | 36
|
2017-11-08T01:54:43.000Z
|
2021-08-04T08:26:54.000Z
|
src/SetExpan/util.py
|
mickeystroller/SetExpan
|
d725bb9896c45478217294d188fafaea56660858
|
[
"Apache-2.0"
] | 4
|
2017-10-30T19:47:14.000Z
|
2018-11-22T02:51:55.000Z
|
src/SetExpan/util.py
|
mickeystroller/SetExpan
|
d725bb9896c45478217294d188fafaea56660858
|
[
"Apache-2.0"
] | 10
|
2017-11-10T03:50:54.000Z
|
2020-12-16T19:52:29.000Z
|
'''
__author__: Ellen Wu (modified by Jiaming Shen)
__description__: A bunch of utility functions
__latest_update__: 08/31/2017
'''
from collections import defaultdict
import set_expan
import eid_pair_TFIDF_selection
import extract_seed_edges
import extract_entity_pair_skipgrams
def loadEidToEntityMap(filename):
eid2ename = {}
ename2eid = {}
with open(filename, 'r') as fin:
for line in fin:
seg = line.strip('\r\n').split('\t')
eid2ename[int(seg[1])] = seg[0]
ename2eid[seg[0].lower()] = int(seg[1])
return eid2ename, ename2eid
def loadFeaturesAndEidMap(filename):
featuresetByEid = defaultdict(set)
eidsByFeature = defaultdict(set)
with open(filename, 'r') as fin:
for line in fin:
seg = line.strip('\r\n').split('\t')
eid = int(seg[0])
feature = seg[1]
featuresetByEid[eid].add(feature)
eidsByFeature[feature].add(eid)
return featuresetByEid, eidsByFeature
def loadFeaturesAndEidPairMap(filename):
featuresetByEidPair = defaultdict(set)
eidPairsByFeature = defaultdict(set)
with open(filename, 'r') as fin:
for line in fin:
seg = line.strip('\r\n').split('\t')
eidPair = (int(seg[0]), int(seg[1]))
feature = seg[2]
featuresetByEidPair[eidPair].add(feature)
eidPairsByFeature[feature].add(eidPair)
return featuresetByEidPair, eidPairsByFeature
def loadWeightByEidAndFeatureMap(filename, idx = -1):
''' Load the (eid, feature) -> strength
:param filename:
:param idx: The index column of weight, default is the last column
:return:
'''
weightByEidAndFeatureMap = {}
with open(filename, 'r') as fin:
for line in fin:
seg = line.strip('\r\n').split('\t')
eid = int(seg[0])
feature = seg[1]
weight = float(seg[idx])
weightByEidAndFeatureMap[(eid, feature)] = weight
return weightByEidAndFeatureMap
def loadWeightByEidPairAndFeatureMap(filename, idx = -1):
''' Load the ((eid1, eid2), feature) -> strength
:param filename:
:param idx: The index column of weight, default is the last column
:return:
'''
weightByEidPairAndFeatureMap = {}
with open(filename, 'r') as fin:
for line in fin:
seg = line.strip('\r\n').split('\t')
eidPair = (int(seg[0]), int(seg[1]))
feature = seg[2]
weight = float(seg[idx])
weightByEidPairAndFeatureMap[(eidPair, feature)] = weight
return weightByEidPairAndFeatureMap
| 30.974359
| 68
| 0.68005
| 292
| 2,416
| 5.55137
| 0.287671
| 0.029611
| 0.049352
| 0.052437
| 0.378779
| 0.355336
| 0.355336
| 0.355336
| 0.355336
| 0.355336
| 0
| 0.016352
| 0.189983
| 2,416
| 78
| 69
| 30.974359
| 0.811957
| 0.162252
| 0
| 0.446429
| 0
| 0
| 0.017614
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.089286
| false
| 0
| 0.089286
| 0
| 0.267857
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
232e28fbfd431f5f262b4d4fadc8f82e257b7c68
| 534
|
py
|
Python
|
solutions/container-generator.py
|
hydrargyrum/python-exercises
|
f99889d18179dce45956ce68382e37a987c8f460
|
[
"Unlicense"
] | null | null | null |
solutions/container-generator.py
|
hydrargyrum/python-exercises
|
f99889d18179dce45956ce68382e37a987c8f460
|
[
"Unlicense"
] | null | null | null |
solutions/container-generator.py
|
hydrargyrum/python-exercises
|
f99889d18179dce45956ce68382e37a987c8f460
|
[
"Unlicense"
] | null | null | null |
#!/usr/bin/env pytest-3
import pytest
# Exercice: iter
def multiples_of(n):
i = 0
while True:
yield i
i += n
# test
def test_iter():
gen = multiples_of(3)
for n, mult in enumerate(gen):
assert n * 3 == mult
if n >= 100:
break
for n, mult in enumerate(gen):
assert (n + 101) * 3 == mult
if n >= 100:
break
gen = multiples_of(4)
for n, mult in enumerate(gen):
assert n * 4 == mult
if n >= 100:
break
| 16.181818
| 36
| 0.488764
| 76
| 534
| 3.381579
| 0.381579
| 0.128405
| 0.093385
| 0.116732
| 0.521401
| 0.463035
| 0.338521
| 0.338521
| 0
| 0
| 0
| 0.059561
| 0.402622
| 534
| 32
| 37
| 16.6875
| 0.746082
| 0.078652
| 0
| 0.428571
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.142857
| 1
| 0.095238
| false
| 0
| 0.047619
| 0
| 0.142857
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
2330a75a4af76c6269b983247c9bbf1f53e9a024
| 8,468
|
py
|
Python
|
pds_github_util/plan/plan.py
|
NASA-PDS/pds-github-util
|
155f60532a02bcbc7a9664b8a170a2e7ab0463d1
|
[
"Apache-2.0"
] | null | null | null |
pds_github_util/plan/plan.py
|
NASA-PDS/pds-github-util
|
155f60532a02bcbc7a9664b8a170a2e7ab0463d1
|
[
"Apache-2.0"
] | 42
|
2020-09-17T17:30:40.000Z
|
2022-03-31T21:09:19.000Z
|
pds_github_util/plan/plan.py
|
NASA-PDS/pds-github-util
|
155f60532a02bcbc7a9664b8a170a2e7ab0463d1
|
[
"Apache-2.0"
] | 3
|
2020-08-12T23:02:40.000Z
|
2021-09-30T11:57:59.000Z
|
"""Release Planning."""
import argparse
import github3
import logging
import os
import sys
import traceback
from pds_github_util.issues.utils import get_labels, is_theme
from pds_github_util.zenhub.zenhub import Zenhub
from pds_github_util.utils import GithubConnection, addStandardArguments
from pkg_resources import resource_string
from jinja2 import Template
from yaml import FullLoader, load
# PDS Github Org
GITHUB_ORG = 'NASA-PDS'
REPO_INFO = ('\n--------\n\n'
'{}\n'
'{}\n\n'
'*{}*\n\n'
'.. list-table:: \n'
' :widths: 15 15 15 15 15 15\n\n'
' * - `User Guide <{}>`_\n'
' - `Github Repo <{}>`_\n'
' - `Issue Tracking <{}/issues>`_ \n'
' - `Backlog <{}/issues?q=is%3Aopen+is%3Aissue+label%3Abacklog>`_ \n'
' - `Stable Release <{}/releases/latest>`_ \n'
' - `Dev Release <{}/releases>`_ \n\n')
# Quiet github3 logging
logger = logging.getLogger('github3')
logger.setLevel(level=logging.WARNING)
# Enable logging
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)
def append_to_project(proj, output):
if 'output' in proj.keys():
proj['output'] += output
else:
proj['output'] = output
def get_project(projects, gh_issue, labels):
intersection = list(set(projects.keys()) & set(labels))
if intersection:
return projects[intersection[0]]
else:
raise Exception(f"Unknown project for theme '{gh_issue.title}': {labels}")
def main():
parser = argparse.ArgumentParser(formatter_class=argparse.RawDescriptionHelpFormatter,
description=__doc__)
addStandardArguments(parser)
parser.add_argument('--github_token',
help='github API token')
parser.add_argument('--zenhub_token',
help='zenhub API token')
parser.add_argument('--build_number',
help='build number',
required=True)
parser.add_argument('--delivery_date',
help='EN delivery to I&T date',
required=True)
parser.add_argument('--trr_date',
help='EN TRR date',
required=True)
parser.add_argument('--ddr_date',
help='EN DDR date',
required=True)
parser.add_argument('--release_date',
help='EN DDR date',
required=True)
parser.add_argument('--projects_config',
help='Path to config file with project information',
required=True)
args = parser.parse_args()
# set output filename
output_fname = f'plan.rst'
# get github token or throw error
github_token = args.github_token or os.environ.get('GITHUB_TOKEN')
if not github_token:
logger.error(f'github API token must be provided or set as environment'
' variable (GITHUB_TOKEN).')
sys.exit(1)
# get zenhub token or throw error
zenhub_token = args.github_token or os.environ.get('ZENHUB_TOKEN')
if not zenhub_token:
logger.error(f'zenhub API token must be provided or set as environment'
' variable (ZENHUB_TOKEN).')
sys.exit(1)
try:
gh = GithubConnection.getConnection(token=github_token)
org = gh.organization(GITHUB_ORG)
repos = org.repositories()
issues = []
repo_dict = {}
zen = Zenhub(zenhub_token)
for repo in repos:
if not issues:
issues = zen.get_issues_by_release(repo.id, f'B{args.build_number}')
repo_dict[repo.id] = {'repo': repo,
'issues': []}
# Build up dictionary of repos + issues in release
issue_dict = {}
for issue in issues:
repo_dict[issue['repo_id']]['issues'].append(issue['issue_number'])
# Create project-based dictionary
with open(args.projects_config) as _file:
_conf = load(_file, Loader=FullLoader)
# get project info
projects = _conf['projects']
# get key dates info
key_dates = _conf['key_dates']
# Loop through repos
plan_output = ''
maintenance_output = ''
ddwg_plans = ''
for repo_id in repo_dict:
r = repo_dict[repo_id]['repo']
issues = repo_dict[repo_id]['issues']
repo_output = ''
if issues:
for issue_num in issues:
gh_issue = gh.issue(org.login, repo_dict[repo_id]['repo'].name, issue_num)
zen_issue = zen.issue(repo_id, issue_num)
# we only want release themes in the plan (is_epic + label:theme)
labels = get_labels(gh_issue)
# Custom handling for pds4-information-model SCRs
if 'CCB-' in gh_issue.title:
ddwg_plans += f'* `{r.name}#{issue_num} <{gh_issue.html_url}>`_ **{gh_issue.title}**\n'
elif is_theme(labels, zen_issue):
repo_output += f'* `{r.name}#{issue_num} <{gh_issue.html_url}>`_ **{gh_issue.title}**\n'
# proj_id = get_project(projects, gh_issue, labels)
# append_to_project(projects[proj_id], f'* `{r.name}#{issue_num} <{gh_issue.html_url}>`_ **{gh_issue.title}**\n')
for child in zen.get_epic_children(gh, org, repo_id, issue_num):
child_repo = child['repo']
child_issue = child['issue']
repo_output += f' * `{child_repo.name}#{child_issue.number} <{child_issue.html_url}>`_ {child_issue.title}\n'
# append_to_project(projects[proj_id], f' * `{child_repo.name}#{child_issue.number} <{child_issue.html_url}>`_ {child_issue.title}\n')
# print(repo_output)
repo_info = REPO_INFO.format(r.name,
'#' * len(r.name),
r.description,
r.homepage or r.html_url + '#readme',
r.html_url,
r.html_url,
r.html_url,
r.html_url,
r.html_url)
# only output the header
if repo_output:
plan_output += repo_info
plan_output += repo_output
with open(output_fname, 'w') as f_out:
template_kargs = {
'output': output_fname,
'build_number': args.build_number,
'scr_date': key_dates['scr_date'],
'doc_update_date': key_dates['doc_update_date'],
'delivery_date': key_dates['delivery_date'],
'trr_date': key_dates['trr_date'],
'beta_test_date': key_dates['beta_test_date'],
'dldd_int_date': key_dates['dldd_int_date'],
'doc_review_date': key_dates['doc_review_date'],
'ddr_date': key_dates['ddr_date'],
'release_date': key_dates['release_date'],
'pds4_changes': ddwg_plans,
'planned_changes': plan_output
}
template = Template(resource_string(__name__, 'plan.template.rst').decode("utf-8"))
rst_str = template.render(template_kargs)
f_out.write(rst_str)
# else:
# maintenance_output += repo_info
# print(f'## {r.name}')
# print(f'Description: {r.description}')
# print(f'User Guide: {r.homepage}')
# print(f'Github Repo: {r.html_url}')
# print(f'Issue Tracker: {r.html_url}/issues')
# print(repo_dict[repo_id]['repo'].name)
# print(repo_dict[repo_id]['issues'])
# print(repo_dict)
# for repo in repos:
except Exception as e:
traceback.print_exc()
sys.exit(1)
logger.info(f'SUCCESS: Release Plan generated successfully.')
if __name__ == '__main__':
main()
| 36.978166
| 164
| 0.535782
| 936
| 8,468
| 4.595085
| 0.223291
| 0.021158
| 0.02511
| 0.01953
| 0.223204
| 0.182051
| 0.14206
| 0.126715
| 0.110904
| 0.110904
| 0
| 0.004722
| 0.349787
| 8,468
| 228
| 165
| 37.140351
| 0.776426
| 0.130255
| 0
| 0.11039
| 0
| 0.012987
| 0.210182
| 0.039853
| 0
| 0
| 0
| 0
| 0
| 1
| 0.019481
| false
| 0
| 0.077922
| 0
| 0.103896
| 0.006494
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
2338e51f497f2917867ef18cfad79cfe5635f3ea
| 717
|
py
|
Python
|
setup.py
|
DigiKlausur/ilias2nbgrader
|
ef6b14969ce73f8203aa125175915f76f07c8e43
|
[
"MIT"
] | 4
|
2020-01-17T08:39:00.000Z
|
2021-12-13T13:54:14.000Z
|
setup.py
|
DigiKlausur/ilias2nbgrader
|
ef6b14969ce73f8203aa125175915f76f07c8e43
|
[
"MIT"
] | 12
|
2020-01-24T14:52:35.000Z
|
2020-05-26T15:34:20.000Z
|
setup.py
|
DigiKlausur/ilias2nbgrader
|
ef6b14969ce73f8203aa125175915f76f07c8e43
|
[
"MIT"
] | 1
|
2020-03-23T17:16:06.000Z
|
2020-03-23T17:16:06.000Z
|
# -*- coding: utf-8 -*-
from setuptools import setup, find_packages
with open('README.md') as f:
readme = f.read()
setup(
name='ilias2nbgrader',
version='0.4.3',
license='MIT',
url='https://github.com/DigiKlausur/ilias2nbgrader',
description='Exchange submissions and feedbacks between ILIAS and nbgrader',
long_description=readme,
long_description_content_type="text/markdown",
author='Tim Metzler',
author_email='tim.metzler@h-brs.de',
packages=find_packages(exclude=('tests', 'docs')),
install_requires=[
"rapidfuzz",
"nbformat"
],
include_package_data = True,
zip_safe=False,
test_suite='tests',
tests_require=['pytest-cov']
)
| 26.555556
| 80
| 0.668061
| 85
| 717
| 5.482353
| 0.8
| 0.051502
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.010292
| 0.18689
| 717
| 26
| 81
| 27.576923
| 0.789022
| 0.029289
| 0
| 0
| 0
| 0
| 0.319885
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.043478
| 0
| 0.043478
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
23395cc50637ff5b0993e2601b07c4a0ab09d8ac
| 2,343
|
py
|
Python
|
citrees/utils.py
|
m0hashi/citrees
|
e7d4866109ce357d5d67cffa450604567f7b469e
|
[
"MIT"
] | null | null | null |
citrees/utils.py
|
m0hashi/citrees
|
e7d4866109ce357d5d67cffa450604567f7b469e
|
[
"MIT"
] | null | null | null |
citrees/utils.py
|
m0hashi/citrees
|
e7d4866109ce357d5d67cffa450604567f7b469e
|
[
"MIT"
] | null | null | null |
from __future__ import absolute_import, print_function
from numba import jit
import numpy as np
# from externals.six.moves import range
def bayes_boot_probs(n):
"""Bayesian bootstrap sampling for case weights
Parameters
----------
n : int
Number of Bayesian bootstrap samples
Returns
-------
p : 1d array-like
Array of sampling probabilities
"""
p = np.random.exponential(scale=1.0, size=n)
return p/p.sum()
@jit(nopython=True, cache=True, nogil=True)
def auc_score(y_true, y_prob):
"""ADD
Parameters
----------
Returns
-------
"""
y_true, n = y_true[np.argsort(y_prob)], len(y_true)
nfalse, auc = 0, 0.0
for i in range(n):
nfalse += 1 - y_true[i]
auc += y_true[i] * nfalse
auc /= (nfalse * (n - nfalse))
return auc
def logger(name, message):
"""Prints messages with style "[NAME] message"
Parameters
----------
name : str
Short title of message, for example, train or test
message : str
Main description to be displayed in terminal
Returns
-------
None
"""
print('[{name}] {message}'.format(name=name.upper(), message=message))
def estimate_margin(y_probs, y_true):
"""Estimates margin function of forest ensemble
Note : This function is similar to margin in R's randomForest package
Parameters
----------
y_probs : 2d array-like
Predicted probabilities where each row represents predicted
class distribution for sample and each column corresponds to
estimated class probability
y_true : 1d array-like
Array of true class labels
Returns
-------
margin : float
Estimated margin of forest ensemble
"""
# Calculate probability of correct class
n, p = y_probs.shape
true_probs = y_probs[np.arange(n, dtype=int), y_true]
# Calculate maximum probability for incorrect class
other_probs = np.zeros(n)
for i in range(n):
mask = np.zeros(p, dtype=bool)
mask[y_true[i]] = True
other_idx = np.ma.array(y_probs[i,:], mask=mask).argmax()
other_probs[i] = y_probs[i, other_idx]
# Margin is P(y == j) - max(P(y != j))
return true_probs - other_probs
| 24.154639
| 74
| 0.599659
| 308
| 2,343
| 4.448052
| 0.412338
| 0.036496
| 0.013139
| 0.023358
| 0.043796
| 0
| 0
| 0
| 0
| 0
| 0
| 0.005376
| 0.285531
| 2,343
| 96
| 75
| 24.40625
| 0.813023
| 0.455399
| 0
| 0.074074
| 0
| 0
| 0.016917
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.148148
| false
| 0
| 0.111111
| 0
| 0.37037
| 0.074074
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
233b1c9f4e244ac8cb55094347c4c0772dd724da
| 4,820
|
py
|
Python
|
blog/views.py
|
arascch/Django_blog
|
091a5a4974534fbe37560bd8e451716a3b1bdcbf
|
[
"Apache-2.0"
] | 1
|
2019-03-04T15:02:03.000Z
|
2019-03-04T15:02:03.000Z
|
blog/views.py
|
arascch/Django_blog
|
091a5a4974534fbe37560bd8e451716a3b1bdcbf
|
[
"Apache-2.0"
] | null | null | null |
blog/views.py
|
arascch/Django_blog
|
091a5a4974534fbe37560bd8e451716a3b1bdcbf
|
[
"Apache-2.0"
] | null | null | null |
from django.shortcuts import render, get_object_or_404
from django.core.paginator import Paginator, EmptyPage, PageNotAnInteger
from django.views.generic import ListView
from .models import Post , Comment
from .forms import EmailPostForm , CommentForm , SearchForm
from django.core.mail import send_mail
from taggit.models import Tag
from django.db.models import Count
from django.contrib.postgres.search import SearchVector , SearchQuery , SearchRank , TrigramSimilarity
def post_list(request , tag_slug = None):
object_list = Post.published.all()
tag = None
if tag_slug:
tag = get_object_or_404(Tag , slug = tag_slug)
object_list = object_list.filter(tags__in = [tag])
paginator = Paginator(object_list, 1) # 3 posts in each page
page = request.GET.get('page')
try:
posts = paginator.page(page)
except PageNotAnInteger:
# If page is not an integer deliver the first page
posts = paginator.page(1)
except EmptyPage:
# If page is out of range deliver last page of results
posts = paginator.page(paginator.num_pages)
return render(request,
'blog/post/list.html',
{'page': page,
'posts': posts,
'tag' : tag})
def post_detail(request, year, month, day, post):
post = get_object_or_404(Post, slug=post,
status='published',
publish__year=year,
publish__month=month,
publish__day=day)
#list of active comments for this post
comments = post.comments.filter(active = True)
new_comment = None
if request.method == 'POST':
#A comment was posted
comment_form = CommentForm(data = request.POST)
if comment_form.is_valid():
#create comment object bud dont save to database yet
new_comment = comment_form.save(commit=False)
#assign the current post to the comment
new_comment.post = post
#save the comment to the database
new_comment.save()
else:
comment_form = CommentForm()
post_tags_ids = post.tags.values_list('id' , flat = True)
similar_posts = Post.published.filter(tags__in = post_tags_ids)\
.exclude(id = post.id)
similar_posts = similar_posts.annotate(same_tags = Count('tags'))\
.order_by('-same_tags' , '-publish')[:4]
return render(request,
'blog/post/detail.html',
{'post': post ,
'comments' : comments,
'new_comment':new_comment,
'comment_form':comment_form,
'similar_posts' : similar_posts})
class PostListView(ListView):
queryset = Post.published.all()
context_object_name = 'posts'
paginate_by = 3
template_name = 'blog/post/list.html'
def post_share(request , post_id):
post = get_object_or_404(Post , id = post_id , status = 'published')
sent = False
if request.method == 'POST':
form = EmailPostForm(request.POST)
if form.is_valid():
cd = form.cleaned_data
post_url = request.build_absolute_uri(
post.get_absolute_url())
subject = '{} ({}) recommends you reading" {}" '.format(cd['name'] , cd['email'], post.title)
message = 'Read "{}" at {}\n\n{}\'s comments: {}'.format(post.title , post_url , cd['name'] , cd ['comments'])
send_mail(subject , message , 'admin@arasch.ir' , [cd['to']])
sent = True
else :
form = EmailPostForm()
return render(request , 'blog/post/share.html' , {'post' : post ,
'form' : form ,
'sent' : sent})
def post_search(request):
form = SearchForm()
query = None
results = []
if 'query' in request.GET:
form = SearchForm(request.GET)
if form.is_valid():
query = form.cleaned_data['query']
search_vector = SearchVector('title' , weight = 'A') + SearchVector('body' , weight = 'B')
search_query = SearchQuery(query)
results = Post.objects.annotate(
similarity = TrigramSimilarity('title' , query),
search = search_vector,
rank = SearchRank(search_vector , search_query)
).filter(similarity__gt = 0.3).order_by('-similarity')
return render(request ,
'blog/post/search.html',
{'form' : form ,
'query': query,
'results' : results})
| 40.504202
| 122
| 0.567427
| 526
| 4,820
| 5.036122
| 0.290875
| 0.02265
| 0.01661
| 0.02114
| 0.05738
| 0.01661
| 0
| 0
| 0
| 0
| 0
| 0.005884
| 0.330083
| 4,820
| 119
| 123
| 40.504202
| 0.814494
| 0.062448
| 0
| 0.090909
| 0
| 0
| 0.091111
| 0.009311
| 0
| 0
| 0
| 0
| 0
| 1
| 0.040404
| false
| 0
| 0.090909
| 0
| 0.222222
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
233d6f3fd59520be733341519e2ee7dc3d18d10a
| 2,424
|
py
|
Python
|
StudentAssociation/tasks.py
|
codertimeless/StudentAssociation
|
3f6caf2b362623d4f8cf82bab9529951a375fe6a
|
[
"Apache-2.0"
] | null | null | null |
StudentAssociation/tasks.py
|
codertimeless/StudentAssociation
|
3f6caf2b362623d4f8cf82bab9529951a375fe6a
|
[
"Apache-2.0"
] | 15
|
2020-03-09T11:56:13.000Z
|
2022-02-10T15:03:01.000Z
|
StudentAssociation/tasks.py
|
codertimeless/StudentAssociation
|
3f6caf2b362623d4f8cf82bab9529951a375fe6a
|
[
"Apache-2.0"
] | null | null | null |
from django.utils import timezone
from django.db.models import Q
from celery.decorators import task, periodic_task
from celery.utils.log import get_task_logger
from celery.task.schedules import crontab
from accounts.models.user_profile import ClubUserProfile
from management.models.activity_apply import ActivityApplication
from accounts.models.messages import Messages
from StudentAssociation.utils import message_service
from .utils import send_email
logger = get_task_logger(__name__)
@task(name='celery_send_email')
def celery_send_email(subject, to_email, msg):
logger.info("Send Email")
return send_email(subject, to_email, msg)
@task(name="send_inner_message")
def send_inner_message(content, next_url, to_user, msg_type):
pass
@periodic_task(run_every=crontab(minute=2, hour='8-10'))
def send_msg_to_notice_check():
aps = ActivityApplication.objects.filter(Q(approved_teacher=False) | Q(approved_association=False)
| Q(approved_xuegong=False))
for ap in aps:
apply_time = ap.apply_time
current_time = timezone.now()
re = current_time - apply_time
if re.days >= 1:
if not ap.approved_association and not ap.send_ass:
phone_number = ClubUserProfile.objects.filter(job="活动管理")[0].phone_number
content = "您有一个来自 " + ap.main_club.name + " 活动申请,等待你进行审核哦,请登录社团管理系统进行查看。"
flag, status = message_service(phone_number=phone_number, message=content)
if flag:
ap.send_ass = True
if not ap.approved_teacher and not ap.send_tea:
phone_number = ClubUserProfile.objects.filter(job="指导老师", club=ap.main_club)[0].phone_number
content = "您所管理的社团: " + ap.main_club.name + " ,有一个活动申请等待您的审核,请登录社团管理系统进行查看。"
flag, status = message_service(phone_number=phone_number, message=content)
if flag:
ap.send_tea = True
if not ap.send_xue and not ap.send_xue:
phone_number = ClubUserProfile.objects.filter(job="学工处老师")[0].phone_number
content = "您有一个来自 " + ap.main_club.name + " 活动申请,等待你进行审核哦,请登录社团管理系统进行查看。"
flag, status = message_service(phone_number=phone_number, message=content)
if flag:
ap.send_xue = True
ap.save()
return True
| 39.737705
| 108
| 0.667904
| 305
| 2,424
| 5.081967
| 0.311475
| 0.085161
| 0.023226
| 0.023226
| 0.345161
| 0.345161
| 0.230323
| 0.230323
| 0.230323
| 0.230323
| 0
| 0.004372
| 0.24505
| 2,424
| 60
| 109
| 40.4
| 0.842623
| 0
| 0
| 0.170213
| 0
| 0
| 0.07137
| 0.035066
| 0
| 0
| 0
| 0
| 0
| 1
| 0.06383
| false
| 0.021277
| 0.212766
| 0
| 0.319149
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
233dd3a1892a3e39ce7f0e1314827e36c01fc57e
| 433
|
py
|
Python
|
streaming/take_picture.py
|
jsse-2017-ph23/rpi-streaming
|
a701e6bc818b24b880a409db65b43a43e78259f8
|
[
"MIT"
] | 1
|
2017-08-25T08:31:01.000Z
|
2017-08-25T08:31:01.000Z
|
streaming/take_picture.py
|
jsse-2017-ph23/rpi-streaming
|
a701e6bc818b24b880a409db65b43a43e78259f8
|
[
"MIT"
] | null | null | null |
streaming/take_picture.py
|
jsse-2017-ph23/rpi-streaming
|
a701e6bc818b24b880a409db65b43a43e78259f8
|
[
"MIT"
] | null | null | null |
import threading
from datetime import datetime
from io import BytesIO
capture_lock = threading.Lock()
def take_picture(camera):
# Create an in-memory stream
stream = BytesIO()
camera.rotation = 180
camera.annotate_text = datetime.now().strftime('%Y-%m-%d %H:%M:%S')
with capture_lock:
camera.capture(stream, 'jpeg', resize=(720, 480))
value = stream.getvalue()
stream.close()
return value
| 22.789474
| 71
| 0.678984
| 57
| 433
| 5.087719
| 0.649123
| 0.075862
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.025937
| 0.198614
| 433
| 18
| 72
| 24.055556
| 0.809798
| 0.060046
| 0
| 0
| 0
| 0
| 0.051852
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.076923
| false
| 0
| 0.230769
| 0
| 0.384615
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
2340ff27f70c0f25fa92baa0c7cf6b801391d2c6
| 8,061
|
py
|
Python
|
src/bin/shipyard_airflow/shipyard_airflow/plugins/deployment_status_operator.py
|
rb560u/airship-shipyard
|
01b6960c1f80b44d1db31c081139649c40b82308
|
[
"Apache-2.0"
] | 12
|
2018-05-18T18:59:23.000Z
|
2019-05-10T12:31:44.000Z
|
src/bin/shipyard_airflow/shipyard_airflow/plugins/deployment_status_operator.py
|
rb560u/airship-shipyard
|
01b6960c1f80b44d1db31c081139649c40b82308
|
[
"Apache-2.0"
] | 4
|
2021-07-28T14:36:57.000Z
|
2022-03-22T16:39:23.000Z
|
src/bin/shipyard_airflow/shipyard_airflow/plugins/deployment_status_operator.py
|
rb560u/airship-shipyard
|
01b6960c1f80b44d1db31c081139649c40b82308
|
[
"Apache-2.0"
] | 9
|
2018-05-18T16:42:41.000Z
|
2019-04-18T20:12:14.000Z
|
# Copyright 2019 AT&T Intellectual Property. All other rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import configparser
import logging
import yaml
from airflow import AirflowException
from airflow.plugins_manager import AirflowPlugin
from airflow.models import BaseOperator
from airflow.utils.decorators import apply_defaults
import kubernetes
from kubernetes.client.rest import ApiException
from kubernetes.client.models.v1_config_map import V1ConfigMap
from kubernetes.client.models.v1_object_meta import V1ObjectMeta
from shipyard_airflow.conf import config
from shipyard_airflow.control.helpers.action_helper import \
get_deployment_status
from shipyard_airflow.plugins.xcom_puller import XcomPuller
from shipyard_airflow.common.document_validators.document_validation_utils \
import DocumentValidationUtils
from shipyard_airflow.plugins.deckhand_client_factory import \
DeckhandClientFactory
from shipyard_airflow.common.document_validators.errors import \
DocumentNotFoundError
LOG = logging.getLogger(__name__)
# Variable to hold details about how the Kubernetes ConfigMap is stored
CONFIG_MAP_DETAILS = {
'api_version': 'v1',
'kind': 'ConfigMap',
'pretty': 'true'
}
class DeploymentStatusOperator(BaseOperator):
"""Deployment status operator
Update Kubernetes with the deployment status of this dag's action
"""
@apply_defaults
def __init__(self, shipyard_conf, main_dag_name, force_completed=False,
*args, **kwargs):
super(DeploymentStatusOperator, self).__init__(*args, **kwargs)
self.shipyard_conf = shipyard_conf
self.main_dag_name = main_dag_name
self.force_completed = force_completed
self.xcom_puller = None
def execute(self, context):
"""Execute the main code for this operator.
Create a ConfigMap with the deployment status of this dag's action
"""
LOG.info("Running deployment status operator")
self.xcom_puller = XcomPuller(self.main_dag_name, context['ti'])
# Required for the get_deployment_status helper to function properly
config.parse_args(args=[], default_config_files=[self.shipyard_conf])
# First we need to check if the concurrency check was successful as
# this operator is expected to run even if upstream steps fail
if not self.xcom_puller.get_concurrency_status():
msg = "Concurrency check did not pass, so the deployment status " \
"will not be updated"
LOG.error(msg)
raise AirflowException(msg)
deployment_status_doc, revision_id = self._get_status_and_revision()
deployment_version_doc = self._get_version_doc(revision_id)
full_data = {
'deployment': deployment_status_doc,
**deployment_version_doc
}
config_map_data = {'release': yaml.safe_dump(full_data)}
self._store_as_config_map(config_map_data)
def _get_status_and_revision(self):
"""Retrieve the deployment status information from the appropriate
helper function
:return: dict with the status of the deployment
:return: revision_id of the action
"""
action_info = self.xcom_puller.get_action_info()
deployment_status = get_deployment_status(
action_info,
force_completed=self.force_completed)
revision_id = action_info['committed_rev_id']
return deployment_status, revision_id
def _get_version_doc(self, revision_id):
"""Retrieve the deployment-version document from Deckhand
:param revision_id: the revision_id of the docs to grab the
deployment-version document from
:return: deployment-version document returned from Deckhand
"""
# Read and parse shipyard.conf
config = configparser.ConfigParser()
config.read(self.shipyard_conf)
doc_name = config.get('document_info', 'deployment_version_name')
doc_schema = config.get('document_info', 'deployment_version_schema')
dh_client = DeckhandClientFactory(self.shipyard_conf).get_client()
dh_tool = DocumentValidationUtils(dh_client)
try:
deployment_version_doc = dh_tool.get_unique_doc(
revision_id=revision_id,
schema=doc_schema,
name=doc_name)
return deployment_version_doc
except DocumentNotFoundError:
LOG.info("There is no deployment-version document in Deckhand "
"under the revision '{}' with the name '{}' and schema "
"'{}'".format(revision_id, doc_name, doc_schema))
return {}
def _store_as_config_map(self, data):
"""Store given data in a Kubernetes ConfigMap
:param dict data: The data to store in the ConfigMap
"""
LOG.info("Storing deployment status as Kubernetes ConfigMap")
# Read and parse shipyard.conf
config = configparser.ConfigParser()
config.read(self.shipyard_conf)
name = config.get('deployment_status_configmap', 'name')
namespace = config.get('deployment_status_configmap', 'namespace')
k8s_client = self._get_k8s_client()
cfg_map_obj = self._create_config_map_object(name, namespace, data)
cfg_map_naming = "(name: {}, namespace: {})".format(name, namespace)
try:
LOG.info("Updating deployment status config map {}, "
.format(cfg_map_naming))
k8s_client.patch_namespaced_config_map(
name,
namespace,
cfg_map_obj,
pretty=CONFIG_MAP_DETAILS['pretty'])
except ApiException as err:
if err.status != 404:
raise
# ConfigMap still needs to be created
LOG.info("Deployment status config map does not exist yet")
LOG.info("Creating deployment status config map {}".format(
cfg_map_naming))
k8s_client.create_namespaced_config_map(
namespace,
cfg_map_obj,
pretty=CONFIG_MAP_DETAILS['pretty'])
@staticmethod
def _get_k8s_client():
"""Create and return a Kubernetes client
:returns: A Kubernetes client object
:rtype: kubernetes.client
"""
# Note that we are using 'in_cluster_config'
LOG.debug("Loading Kubernetes config")
kubernetes.config.load_incluster_config()
LOG.debug("Creating Kubernetes client")
return kubernetes.client.CoreV1Api()
@staticmethod
def _create_config_map_object(name, namespace, data):
"""Create/return a Kubernetes ConfigMap object out of the given data
:param dict data: The data to put into the config map
:returns: A config map object made from the given data
:rtype: V1ConfigMap
"""
LOG.debug("Creating Kubernetes config map object")
metadata = V1ObjectMeta(
name=name,
namespace=namespace
)
return V1ConfigMap(
api_version=CONFIG_MAP_DETAILS['api_version'],
kind=CONFIG_MAP_DETAILS['kind'],
data=data,
metadata=metadata
)
class DeploymentStatusOperatorPlugin(AirflowPlugin):
"""Creates DeploymentStatusOperatorPlugin in Airflow."""
name = "deployment_status_operator"
operators = [DeploymentStatusOperator]
| 36.977064
| 79
| 0.677832
| 933
| 8,061
| 5.635584
| 0.269025
| 0.034234
| 0.021681
| 0.014264
| 0.181818
| 0.136173
| 0.096995
| 0.082541
| 0.082541
| 0.050209
| 0
| 0.004141
| 0.251085
| 8,061
| 217
| 80
| 37.147465
| 0.866821
| 0.252574
| 0
| 0.110236
| 0
| 0
| 0.133862
| 0.02208
| 0
| 0
| 0
| 0
| 0
| 1
| 0.055118
| false
| 0.007874
| 0.133858
| 0
| 0.259843
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|