hexsha string | size int64 | ext string | lang string | max_stars_repo_path string | max_stars_repo_name string | max_stars_repo_head_hexsha string | max_stars_repo_licenses list | max_stars_count int64 | max_stars_repo_stars_event_min_datetime string | max_stars_repo_stars_event_max_datetime string | max_issues_repo_path string | max_issues_repo_name string | max_issues_repo_head_hexsha string | max_issues_repo_licenses list | max_issues_count int64 | max_issues_repo_issues_event_min_datetime string | max_issues_repo_issues_event_max_datetime string | max_forks_repo_path string | max_forks_repo_name string | max_forks_repo_head_hexsha string | max_forks_repo_licenses list | max_forks_count int64 | max_forks_repo_forks_event_min_datetime string | max_forks_repo_forks_event_max_datetime string | content string | avg_line_length float64 | max_line_length int64 | alphanum_fraction float64 | qsc_code_num_words_quality_signal int64 | qsc_code_num_chars_quality_signal float64 | qsc_code_mean_word_length_quality_signal float64 | qsc_code_frac_words_unique_quality_signal float64 | qsc_code_frac_chars_top_2grams_quality_signal float64 | qsc_code_frac_chars_top_3grams_quality_signal float64 | qsc_code_frac_chars_top_4grams_quality_signal float64 | qsc_code_frac_chars_dupe_5grams_quality_signal float64 | qsc_code_frac_chars_dupe_6grams_quality_signal float64 | qsc_code_frac_chars_dupe_7grams_quality_signal float64 | qsc_code_frac_chars_dupe_8grams_quality_signal float64 | qsc_code_frac_chars_dupe_9grams_quality_signal float64 | qsc_code_frac_chars_dupe_10grams_quality_signal float64 | qsc_code_frac_chars_replacement_symbols_quality_signal float64 | qsc_code_frac_chars_digital_quality_signal float64 | qsc_code_frac_chars_whitespace_quality_signal float64 | qsc_code_size_file_byte_quality_signal float64 | qsc_code_num_lines_quality_signal float64 | qsc_code_num_chars_line_max_quality_signal float64 | qsc_code_num_chars_line_mean_quality_signal float64 | qsc_code_frac_chars_alphabet_quality_signal float64 | qsc_code_frac_chars_comments_quality_signal float64 | qsc_code_cate_xml_start_quality_signal float64 | qsc_code_frac_lines_dupe_lines_quality_signal float64 | qsc_code_cate_autogen_quality_signal float64 | qsc_code_frac_lines_long_string_quality_signal float64 | qsc_code_frac_chars_string_length_quality_signal float64 | qsc_code_frac_chars_long_word_length_quality_signal float64 | qsc_code_frac_lines_string_concat_quality_signal float64 | qsc_code_cate_encoded_data_quality_signal float64 | qsc_code_frac_chars_hex_words_quality_signal float64 | qsc_code_frac_lines_prompt_comments_quality_signal float64 | qsc_code_frac_lines_assert_quality_signal float64 | qsc_codepython_cate_ast_quality_signal float64 | qsc_codepython_frac_lines_func_ratio_quality_signal float64 | qsc_codepython_cate_var_zero_quality_signal bool | qsc_codepython_frac_lines_pass_quality_signal float64 | qsc_codepython_frac_lines_import_quality_signal float64 | qsc_codepython_frac_lines_simplefunc_quality_signal float64 | qsc_codepython_score_lines_no_logic_quality_signal float64 | qsc_codepython_frac_lines_print_quality_signal float64 | qsc_code_num_words int64 | qsc_code_num_chars int64 | qsc_code_mean_word_length int64 | qsc_code_frac_words_unique null | qsc_code_frac_chars_top_2grams int64 | qsc_code_frac_chars_top_3grams int64 | qsc_code_frac_chars_top_4grams int64 | qsc_code_frac_chars_dupe_5grams int64 | qsc_code_frac_chars_dupe_6grams int64 | qsc_code_frac_chars_dupe_7grams int64 | qsc_code_frac_chars_dupe_8grams int64 | qsc_code_frac_chars_dupe_9grams int64 | qsc_code_frac_chars_dupe_10grams int64 | qsc_code_frac_chars_replacement_symbols int64 | qsc_code_frac_chars_digital int64 | qsc_code_frac_chars_whitespace int64 | qsc_code_size_file_byte int64 | qsc_code_num_lines int64 | qsc_code_num_chars_line_max int64 | qsc_code_num_chars_line_mean int64 | qsc_code_frac_chars_alphabet int64 | qsc_code_frac_chars_comments int64 | qsc_code_cate_xml_start int64 | qsc_code_frac_lines_dupe_lines int64 | qsc_code_cate_autogen int64 | qsc_code_frac_lines_long_string int64 | qsc_code_frac_chars_string_length int64 | qsc_code_frac_chars_long_word_length int64 | qsc_code_frac_lines_string_concat null | qsc_code_cate_encoded_data int64 | qsc_code_frac_chars_hex_words int64 | qsc_code_frac_lines_prompt_comments int64 | qsc_code_frac_lines_assert int64 | qsc_codepython_cate_ast int64 | qsc_codepython_frac_lines_func_ratio int64 | qsc_codepython_cate_var_zero int64 | qsc_codepython_frac_lines_pass int64 | qsc_codepython_frac_lines_import int64 | qsc_codepython_frac_lines_simplefunc int64 | qsc_codepython_score_lines_no_logic int64 | qsc_codepython_frac_lines_print int64 | effective string | hits int64 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
526610e16ee728557c0a3bfa58f2986c17e49598 | 7,416 | py | Python | server/python/django_w2ui/django_w2ui/views.py | EruditePig/w2ui | 81e0ee27692956325d4729d36d23e93c1094a397 | [
"MIT"
] | 1,415 | 2015-01-01T06:37:10.000Z | 2022-03-30T01:40:31.000Z | server/python/django_w2ui/django_w2ui/views.py | EruditePig/w2ui | 81e0ee27692956325d4729d36d23e93c1094a397 | [
"MIT"
] | 1,237 | 2015-01-05T16:24:34.000Z | 2022-03-28T14:21:51.000Z | server/python/django_w2ui/django_w2ui/views.py | EruditePig/w2ui | 81e0ee27692956325d4729d36d23e93c1094a397 | [
"MIT"
] | 640 | 2015-01-09T12:56:26.000Z | 2022-03-30T05:37:37.000Z | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
import json
import re
from operator import or_ , and_
from django.core.paginator import Paginator
from django.core.serializers.json import DjangoJSONEncoder
from django.db import models
from django.db.models import Q
from django.http import HttpResponse, HttpResponseBadRequest
from django.utils.six import text_type
from django.utils.six.moves import reduce, xrange
from django.views.generic import View
from django.views.generic.list import MultipleObjectMixin
from django.forms.models import modelform_factory
from django.views.generic.detail import SingleObjectMixin
from django.utils.timezone import is_aware
import decimal
import datetime
import settings
JSON_MIMETYPE = 'application/json'
RE_FORMATTED = re.compile(r'\{(\w+)\}')
#: SQLite unsupported field types for regex lookups
UNSUPPORTED_REGEX_FIELDS = (
models.IntegerField,
models.BooleanField,
models.NullBooleanField,
models.FloatField,
models.DecimalField,
)
class DjangoJSONEncoderMod(json.JSONEncoder):
"""
JSONEncoder subclass that knows how to encode date/time and decimal types.
"""
def default(self, o):
# See "Date Time String Format" in the ECMA-262 specification.
if isinstance(o, datetime.datetime):
# r = o.isoformat()
USER_SHORT_DATETIME_FORMAT = "%d-%m-%Y %H:%M"
r = o.strftime(USER_SHORT_DATETIME_FORMAT)
if o.microsecond:
r = r[:23] + r[26:]
if r.endswith('+00:00'):
r = r[:-6] + 'Z'
return r
elif isinstance(o, datetime.date):
USER_SHORT_DATETIME_FORMAT = "%d-%m-%Y"
return o.strftime(USER_SHORT_DATETIME_FORMAT)
# return o.isoformat()
elif isinstance(o, datetime.time):
if is_aware(o):
raise ValueError("JSON can't represent timezone-aware times.")
# r = o.isoformat()
# if o.microsecond:
# r = r[:12]
USER_SHORT_TIME_FORMAT = "%H:%M"
r = o.strftime(USER_SHORT_TIME_FORMAT)
return r
elif isinstance(o, decimal.Decimal):
return str(o)
else:
return super(DjangoJSONEncoderMod, self).default(o)
def get_real_field(model, field_name):
'''
Get the real field from a model given its name.
Handle nested models recursively (aka. ``__`` lookups)
'''
parts = field_name.split('__')
field = model._meta.get_field(parts[0])
if len(parts) == 1:
return model._meta.get_field(field_name)
elif isinstance(field, models.ForeignKey):
return get_real_field(field.rel.to, '__'.join(parts[1:]))
else:
raise Exception('Unhandled field: %s' % field_name)
class W2uiBaseView(View):
fields = []
commands = {}
data = {}
def post(self, request, *args, **kwargs):
self.data = json.loads(request.body)
cmd = self.commands.get(self.data.get('cmd',''),'')
if cmd:
response = getattr(self,cmd)(self.data)
else:
response = self.error('unknown command "%s"' % self.data.get('cmd',''))
return HttpResponse(json.dumps(response, cls=DjangoJSONEncoderMod),mimetype=JSON_MIMETYPE)
def response(self,status, message, data):
resp = {
"status" : status,
"message": message,
}
if data:
resp.update(data)
return resp
def success(self,message="",data=None):
return self.response('success',message,data)
def error(self,message="",data=None):
return self.response('error',message,data)
class W2uiGridView(MultipleObjectMixin, W2uiBaseView):
commands = {
'get': 'get_records',
'save': 'save_records',
'delete': 'delete_records',
}
def get_records(self,data):
# TODO: convalida data
qs = self.get_queryset()
# search
search = data.get('search',[])
filters = []
for param in search:
term = param['value']
field = param['field']
typ = param['type']
operator = param['operator']
if field == 'recid':
field = 'pk'
type_search = ""
if operator == "contains":
type_search = '__i'+operator
elif operator == "in":
type_search = '__'+operator
elif operator == "between":
type_search = '__range'
elif operator == "begins":
type_search = '__istartswith'
elif operator == "ends":
type_search = '__iendswith'
elif operator == "is":
type_search = "__exact"
filters.append((Q(**{field+type_search: term})))
if filters:
searchLogic = data.get('searchLogic','AND')
if searchLogic == "AND":
searchLogic = and_
else:
searchLogic = or_
qs = qs.filter(reduce(searchLogic, filters))
# sort
sort = data.get('sort',[])
order = []
for param in sort:
field = param['field']
if field == "recid":
field = self.model._meta.pk.get_attname()
direction = param['direction']
if direction == 'desc':
field = '-' + field
order.append(field)
if order:
qs = qs.order_by(*order)
# fields
qs = qs.values('pk',*self.fields)
# pagination
page_size = data.get('limit',1)
start_index = data.get('offset',0)
paginator = Paginator(qs, page_size)
num_page = (start_index / page_size) + 1
page = paginator.page(num_page)
return self.success(data={
"total" : page.paginator.count,
"records" : list(page.object_list),
})
def save_records(self,data):
return self.error('method not implemented') # TODO:
def delete_records(self,data):
try:
for obj in self.get_queryset().in_bulk(data['selected']).itervalues():
obj.delete()
response = self.success()
except Exception as e:
response = self.error('error deleting records',{ 'exception': e })
return response
def get_data(self):
return self.data
class W2uiFormView (SingleObjectMixin, W2uiBaseView):
commands = {
'get-record': 'get_record',
'save-record': 'save_record',
}
def get_record(self,data):
pk = data.get('recid',None)
record = self.get_queryset().filter(pk=pk).values(*self.fields)
if len(record) == 1:
response = self.success(data={ 'record': record[0] })
else:
response = self.error('record ID "%s" not found' % pk)
return response
def save_record(self,data):
Form = modelform_factory(self.model, fields=self.fields)
form = Form(data['record'])
if form.is_valid():
obj = form.save(commit=False)
if data['recid']:
obj.pk = data['recid']
obj.save()
response = self.success()
else:
response = self.error('errori nella form',form.errors)
return response
| 33.556561 | 98 | 0.574299 | 820 | 7,416 | 5.069512 | 0.284146 | 0.028867 | 0.016358 | 0.022131 | 0.069762 | 0.051479 | 0.040414 | 0 | 0 | 0 | 0 | 0.005453 | 0.307578 | 7,416 | 220 | 99 | 33.709091 | 0.80409 | 0.069984 | 0 | 0.093923 | 0 | 0 | 0.083662 | 0 | 0 | 0 | 0 | 0.004545 | 0 | 1 | 0.066298 | false | 0 | 0.104972 | 0.022099 | 0.314917 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
526932d0d569e4bf6eed17930efef0fc16252d39 | 2,197 | py | Python | test/no_bench/magma_examples/down_over_nested_to_down_over_flattened/down_over_nested_to_down_over_flattened_1 % 1thr.py | David-Durst/embeddedHaskellAetherling | 34c5403e07433e572170699f3bd69c5b5c3eff2d | [
"BSD-3-Clause"
] | 20 | 2019-03-12T20:12:31.000Z | 2022-02-07T04:23:22.000Z | test/no_bench/magma_examples/down_over_nested_to_down_over_flattened/down_over_nested_to_down_over_flattened_1 % 1thr.py | David-Durst/embeddedHaskellAetherling | 34c5403e07433e572170699f3bd69c5b5c3eff2d | [
"BSD-3-Clause"
] | 30 | 2019-07-22T19:25:42.000Z | 2020-06-18T17:58:43.000Z | test/no_bench/magma_examples/down_over_nested_to_down_over_flattened/down_over_nested_to_down_over_flattened_1 % 1thr.py | David-Durst/embeddedHaskellAetherling | 34c5403e07433e572170699f3bd69c5b5c3eff2d | [
"BSD-3-Clause"
] | 3 | 2019-10-14T18:07:26.000Z | 2022-01-20T14:36:17.000Z | import fault
import aetherling.helpers.fault_helpers as fault_helpers
from aetherling.space_time import *
from aetherling.space_time.reshape_st import DefineReshape_ST
import magma as m
import json
@cache_definition
def Module_0() -> DefineCircuitKind:
class _Module_0(Circuit):
name = "top"
IO = ['I', In(ST_SSeq(16, ST_Int(8, False)).magma_repr()),'O', Out(ST_SSeq(1, ST_Int(8, False)).magma_repr())] + ClockInterface(has_ce=False,has_reset=False) + valid_ports
st_in_t = [ST_SSeq(16, ST_Int(8, False))]
st_out_t = ST_SSeq(1, ST_Int(8, False))
binary_op = False
@classmethod
def definition(cls):
n1 = DefineFIFO(ST_SSeq(16, ST_Int(8, False)), 1, has_valid=True)()
wire(cls.I, n1.I)
wire(cls.valid_up, n1.valid_up)
n2 = DefinePartition_S(4, 4, ST_Int(8, False), has_valid=True)()
wire(n1.O, n2.I)
wire(n1.valid_down, n2.valid_up)
n3 = DefineDown_S(4, 0, ST_SSeq(4, ST_Int(8, False)), has_valid=True)()
wire(n2.O, n3.I)
wire(n2.valid_down, n3.valid_up)
n4 = DefineUnpartition_S(1, 4, ST_Int(8, False), has_valid=True)()
wire(n3.O, n4.I)
wire(n3.valid_down, n4.valid_up)
n5 = DefineDown_S(4, 0, ST_Int(8, False), has_valid=True)()
wire(n4.O, n5.I)
wire(n4.valid_down, n5.valid_up)
n6 = DefineFIFO(ST_SSeq(1, ST_Int(8, False)), 1, has_valid=True)()
wire(n5.O, n6.I)
wire(n5.valid_down, n6.valid_up)
n7 = DefineFIFO(ST_SSeq(1, ST_Int(8, False)), 1, has_valid=True)()
wire(n6.O, n7.I)
wire(n6.valid_down, n7.valid_up)
n8 = DefineFIFO(ST_SSeq(1, ST_Int(8, False)), 1, has_valid=True)()
wire(n7.O, n8.I)
wire(n7.valid_down, n8.valid_up)
wire(n8.O, cls.O)
wire(n8.valid_down, cls.valid_down)
return _Module_0
Main = Module_0
fault_helpers.compile(Main(), "v./home/durst/dev/embeddedHaskellAetherling//test/no_bench/magma_examples/down_over_nested_to_down_over_flattened/down_over_nested_to_down_over_flattened_1 % 1thr.py") | 44.836735 | 198 | 0.618571 | 349 | 2,197 | 3.636103 | 0.237822 | 0.047281 | 0.056738 | 0.104019 | 0.368794 | 0.34673 | 0.332545 | 0.215918 | 0.194641 | 0.106383 | 0 | 0.049517 | 0.246245 | 2,197 | 49 | 198 | 44.836735 | 0.716787 | 0 | 0 | 0 | 0 | 0.022222 | 0.077343 | 0.070519 | 0 | 0 | 0 | 0 | 0 | 1 | 0.044444 | false | 0 | 0.133333 | 0 | 0.333333 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
526af9efed594956fbfe9f864284d57c10b4f1b7 | 1,546 | py | Python | real_robots/__init__.py | skbly7/real_robots | 55863c9ee98bdefa2af2ec4fe298b59156084773 | [
"MIT"
] | null | null | null | real_robots/__init__.py | skbly7/real_robots | 55863c9ee98bdefa2af2ec4fe298b59156084773 | [
"MIT"
] | null | null | null | real_robots/__init__.py | skbly7/real_robots | 55863c9ee98bdefa2af2ec4fe298b59156084773 | [
"MIT"
] | 1 | 2021-05-23T18:19:17.000Z | 2021-05-23T18:19:17.000Z | # -*- coding: utf-8 -*-
"""Top-level package for real-robots."""
__author__ = """S.P. Mohanty"""
__email__ = 'mohanty@aicrowd.com'
__version__ = '0.1.13'
import os
from gym.envs.registration import register
from .evaluate import evaluate # noqa F401
register(
id='REALRobot-v0',
entry_point='real_robots.envs:REALRobotEnv',
)
register(
id='REALRobotSingleObj-v0',
entry_point='real_robots.envs:REALRobotEnvSingleObj',
)
def getPackageDataPath():
import real_robots
return os.path.join(
real_robots.__path__[0],
"data"
)
def copy_over_data_into_pybullet(force_copy=False):
"""
If the package specific data has not already
been copied over into pybullet_data, then
copy them over.
"""
import pybullet_data
pybullet_data_path = pybullet_data.getDataPath()
is_data_absent = \
"kuka_gripper_description" not in os.listdir(pybullet_data_path)
if force_copy or is_data_absent:
import shutil
source_data_path = os.path.join(
getPackageDataPath(),
"kuka_gripper_description")
target_data_path = os.path.join(
pybullet_data_path,
"kuka_gripper_description")
print(
"[REALRobot] Copying over data into pybullet_data_path."
"This is a one time operation.")
shutil.copytree(source_data_path, target_data_path)
copy_over_data_into_pybullet()
| 26.20339 | 72 | 0.631307 | 177 | 1,546 | 5.169492 | 0.451977 | 0.069945 | 0.069945 | 0.065574 | 0.148634 | 0.056831 | 0 | 0 | 0 | 0 | 0 | 0.009874 | 0.279431 | 1,546 | 58 | 73 | 26.655172 | 0.81149 | 0.109961 | 0 | 0.105263 | 0 | 0 | 0.219911 | 0.118871 | 0 | 0 | 0 | 0 | 0 | 1 | 0.052632 | false | 0 | 0.157895 | 0 | 0.236842 | 0.026316 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
526b0a727835a092fa5f20736d85d0e72e54520d | 762 | py | Python | teuthology/test/test_parallel.py | zhsj/teuthology | 7f11a09f2b7d7406d65f21a85fc2e3db395a95a0 | [
"MIT"
] | 1 | 2018-05-17T13:02:42.000Z | 2018-05-17T13:02:42.000Z | teuthology/test/test_parallel.py | zhsj/teuthology | 7f11a09f2b7d7406d65f21a85fc2e3db395a95a0 | [
"MIT"
] | 1 | 2021-02-23T19:06:55.000Z | 2021-02-23T19:06:55.000Z | teuthology/test/test_parallel.py | zhsj/teuthology | 7f11a09f2b7d7406d65f21a85fc2e3db395a95a0 | [
"MIT"
] | 2 | 2019-09-26T09:31:37.000Z | 2019-09-26T09:36:30.000Z | from ..parallel import parallel
def identity(item, input_set=None, remove=False):
if input_set is not None:
assert item in input_set
if remove:
input_set.remove(item)
return item
class TestParallel(object):
def test_basic(self):
in_set = set(range(10))
with parallel() as para:
for i in in_set:
para.spawn(identity, i, in_set, remove=True)
assert para.any_spawned is True
assert para.count == len(in_set)
def test_result(self):
in_set = set(range(10))
with parallel() as para:
for i in in_set:
para.spawn(identity, i, in_set)
for result in para:
in_set.remove(result)
| 26.275862 | 60 | 0.570866 | 104 | 762 | 4.038462 | 0.355769 | 0.095238 | 0.042857 | 0.057143 | 0.338095 | 0.338095 | 0.338095 | 0.338095 | 0.338095 | 0.338095 | 0 | 0.008032 | 0.346457 | 762 | 28 | 61 | 27.214286 | 0.835341 | 0 | 0 | 0.272727 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.136364 | 1 | 0.136364 | false | 0 | 0.045455 | 0 | 0.272727 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
526c950d7cd35017e5b92b3e425c26d2940f243b | 3,311 | py | Python | ew.py | dunky11/exponential-weighting-watermarking | 717bd04ac05daf8eb7e902ec84b04fc02126bf92 | [
"MIT"
] | 7 | 2020-11-22T19:14:17.000Z | 2022-03-01T05:59:58.000Z | ew.py | dunky11/exponential-weighting-watermarking | 717bd04ac05daf8eb7e902ec84b04fc02126bf92 | [
"MIT"
] | 1 | 2021-10-05T21:17:02.000Z | 2021-10-05T21:17:02.000Z | ew.py | dunky11/exponential-weighting-watermarking | 717bd04ac05daf8eb7e902ec84b04fc02126bf92 | [
"MIT"
] | null | null | null | import tensorflow as tf
from tensorflow import keras
from tensorflow.python.keras.layers.ops import core as core_ops
from tensorflow.python.ops import nn
class EWBase(keras.layers.Layer):
"""
t is called the temperature in the paper. The higher t is, the more the weights are squeezed
when exponential weighting is enabled. A value of 2.0 was used in the paper.
"""
def __init__(self, t):
super().__init__()
self.t = t
self.is_ew_enabled = False
def enable(self):
self.is_ew_enabled = True
def disable(self):
self.is_ew_enabled = False
def ew(self, theta):
exp = tf.exp(tf.math.abs(theta) * self.t)
numerator = exp
denominator = tf.math.reduce_max(exp)
return tf.math.multiply(numerator / denominator, theta)
class EWDense(EWBase):
def __init__(self, units, t, activation=None):
super().__init__(t)
self.units = units
self.activation = activation
def build(self, input_shape):
# ToDo change to glorot_normal since it's the default, but currently doesn't work with relu
self.w = self.add_weight(
shape=(input_shape[-1], self.units),
initializer="random_normal",
trainable=True,
)
self.b = self.add_weight(
shape=(self.units,), initializer="zeros", trainable=True
)
def call(self, inputs):
if self.is_ew_enabled:
out = tf.matmul(inputs, self.ew(self.w)) + self.b
else:
out = tf.matmul(inputs, self.w) + self.b
if self.activation:
return self.activation(out)
return out
class EWConv2D(EWBase):
def __init__(self, filters, kernel_size, t, strides=1, activation=None, padding="valid"):
super().__init__(t)
self.filters = filters
if isinstance(kernel_size, int):
self.kernel_size = [kernel_size, kernel_size]
else:
self.kernel_size = kernel_size,
if isinstance(strides, int):
self.strides = [strides, strides]
elif isinstance(strides, tuple):
self.strides = list(strides)
else:
self.strides = strides
self.activation = activation
if not padding.upper() in ["VALID", "SAME"]:
raise Exception(
f"padding must be either 'valid' or 'same', but '{padding}' was passed.")
self.padding = padding.upper()
self.t = t
def build(self, input_shape):
self.w = self.add_weight(
shape=(self.kernel_size[0], self.kernel_size[1],
input_shape[-1], self.filters),
initializer="random_normal",
trainable=True,
)
self.b = self.add_weight(
shape=(self.filters,), initializer="zeros", trainable=True
)
def call(self, inputs):
if self.is_ew_enabled:
out = tf.nn.conv2d(inputs, self.ew(self.w),
strides=self.strides, padding=self.padding)
else:
out = tf.nn.conv2d(
inputs, self.w, strides=self.strides, padding=self.padding)
out = tf.nn.bias_add(out, self.b)
if self.activation:
return self.activation(out)
return out
| 31.533333 | 99 | 0.58955 | 414 | 3,311 | 4.577295 | 0.285024 | 0.047493 | 0.021108 | 0.039578 | 0.392612 | 0.305541 | 0.237467 | 0.237467 | 0.194195 | 0.194195 | 0 | 0.00434 | 0.304138 | 3,311 | 104 | 100 | 31.836538 | 0.818142 | 0.078526 | 0 | 0.395062 | 0 | 0 | 0.039261 | 0 | 0 | 0 | 0 | 0.009615 | 0 | 1 | 0.123457 | false | 0.012346 | 0.049383 | 0 | 0.271605 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
526d9e99d4a6a35862a69a9e2ea972d41bfbf621 | 2,027 | py | Python | shader.py | jt667/Hydralab-Pallet-Comparison | 4148242dcf6b3da20c4ac87b39d4c979f6f35c16 | [
"MIT"
] | null | null | null | shader.py | jt667/Hydralab-Pallet-Comparison | 4148242dcf6b3da20c4ac87b39d4c979f6f35c16 | [
"MIT"
] | null | null | null | shader.py | jt667/Hydralab-Pallet-Comparison | 4148242dcf6b3da20c4ac87b39d4c979f6f35c16 | [
"MIT"
] | null | null | null | import subprocess
import os
def Diff(li1, li2):
#Returns the files that are not contained in both lists (the symmetric difference of the lists)
li_dif = [i for i in li1 + li2 if i not in li1 or i not in li2]
return li_dif
def pcv(overwrite,src,dest):
print("Shading files")
print("")
#Path to the CloudCompare exe file
cc_path = r"C:\Program Files\CloudCompare\CloudCompare.exe"
#List of all the files in the src directory
all_files = os.listdir(src)
#Create the destination folder if it does not already exist
os.makedirs(dest,exist_ok=True)
for filename in all_files:
#Destination file name
shaded_path = dest + "\\" + filename.replace(".bin","_Shaded.bin")
#Check if the output file already exists
if not os.path.exists(shaded_path) or overwrite:
#Path to current file
unshaded_path = src + "\\" + filename
#Shades the cloud using light rays from above (useful for visualisation)
# -SILENT stops a cloud compare console popping up (useful for debug as it will stop the program after completing its task)
# -O current_file_path opens the file with path given by current_file_path
# -PCV runs the PCV plugin on the loaded clouds
# -180 rays only come from the northern hemisphere (+Z)
subprocess.run([cc_path, "-SILENT", "-O", unshaded_path, "-PCV", "-180"], shell = True)
#The time stamped name of the shaded file
time_stamped_file = Diff(all_files,os.listdir(src))[0]
#Deletes the old output file if it exists
if os.path.exists(shaded_path) and overwrite:
os.remove(shaded_path)
#Moves the file to a new folder and renames it to the original filename + "_Shaded"
os.rename(src + "\\" + time_stamped_file,shaded_path)
| 40.54 | 136 | 0.608288 | 276 | 2,027 | 4.376812 | 0.434783 | 0.041391 | 0.009934 | 0.028146 | 0.069536 | 0 | 0 | 0 | 0 | 0 | 0 | 0.0094 | 0.317711 | 2,027 | 49 | 137 | 41.367347 | 0.864064 | 0.411939 | 0 | 0 | 0 | 0 | 0.086146 | 0.031083 | 0 | 0 | 0 | 0 | 0 | 1 | 0.1 | false | 0 | 0.1 | 0 | 0.25 | 0.1 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
526f217844c174b7e06e02f7a96389ffb22def23 | 7,914 | py | Python | athena/.ipynb_checkpoints/sampling-checkpoint.py | markowetzlab/Athena | 55de866303fd6b82d05b294ccab4e85c4b965f81 | [
"MIT"
] | 1 | 2022-03-23T12:45:08.000Z | 2022-03-23T12:45:08.000Z | athena/sampling.py | markowetzlab/Athena | 55de866303fd6b82d05b294ccab4e85c4b965f81 | [
"MIT"
] | null | null | null | athena/sampling.py | markowetzlab/Athena | 55de866303fd6b82d05b294ccab4e85c4b965f81 | [
"MIT"
] | null | null | null | import os
import random
import numpy as np
import pandas as pd
import scanpy as sc
from tqdm import tqdm
from multiprocessing import Pool, RLock
class Sampling:
def sample(self, ncells=10000, pop_fp=None, sim_fp=None, cache=True, return_data=False):
print (f"Simulation: {self.network_name} Sampling Cells...", flush=True)
cells_meta, gene_expr = self.sampling_cells(ncells, sim_fp)
print (f"Simulation: {self.network_name} Sampling Molecules...", flush=True)
lib_sizes = self.sampling_molecules(gene_expr, pop_fp)
cells_meta = self.clean_cells_metadata(cells_meta, lib_sizes)
cells_meta = cells_meta.reset_index(drop=True)
if cache:
print (f"Simulation: {self.network_name} Caching....", flush=True)
cells_meta.to_csv(os.path.join(self.metadata_dir,
'cells_metadata.csv.gz'),
compression='gzip', index=False)
if return_data:
fp = os.path.join(self.metadata_dir, 'gene_expression.csv.gz')
gene_expr = pd.read_csv(fp, dtype=np.int16)
return cells_meta, gene_expr
else:
return None, None
def sampling_cells(self, ncells, sim_fp):
if sim_fp is None:
sim_fp = os.path.join(self.results_dir, 'simulated_counts.csv.gz')
self.cell_sim_meta = pd.read_csv(f'{self.results_dir}/cell_metadata.csv.gz')
self.cell_sim_meta = self.cell_sim_meta.reset_index().rename(columns={'index': 'cell_i'})
if ncells > self.cell_sim_meta.shape[0]:
raise Exception(f"Simulation: {self.network_name} Number of cells requested is greater than the number of cells simulated. Sample fewer cells...")
cells_meta = []
cells = np.array([i for i in range(self.cell_sim_meta.shape[0])])
cells_meta = self.get_cells_meta()
cells = self.sample_cells_per_grna(cells_meta, ncells)
cells_meta = cells_meta.iloc[cells]
gene_expr = self.load_cells(cells, sim_fp)
return cells_meta, gene_expr
def sampling_molecules(self, gene_expr, pop_fp=None):
if pop_fp is None:
pop = sc.read_loom(self.pop_fp)
else:
pop = sc.read_loom(pop_fp)
realcounts = pop.X.toarray()
cell_umi = pop.obs.total_counts.values
lib_size = self.calc_library_size(cell_umi, gene_expr)
self.downsampling(realcounts, gene_expr, lib_size)
return lib_size
def clean_cells_metadata(self, meta, lib_sizes):
meta['lib_size'] = lib_sizes
meta['grna'] = meta['sim_label'].apply(lambda x: "_".join(x.split('_')[0:2]))
meta['target_gene'] = meta['sim_label'].apply(lambda x: x.split('-grna')[0])
if self.crispr_type == 'knockout':
meta['is_cell_perturbed'] = meta['sim_label'].apply(lambda x: x.split('_')[-1])
meta.loc[meta.target_gene == self.ctrl_label, 'is_cell_perturbed'] = self.ctrl_label
else:
meta['is_cell_perturbed'] = 'PRT'
meta.loc[meta.target_gene == self.ctrl_label, 'is_cell_perturbed'] = self.ctrl_label
meta = meta.reset_index(drop=True)
return meta
def load_cells(self, sampled_cells, sim_fp):
df = pd.read_csv(sim_fp, dtype=np.int16)
df = df.iloc[sampled_cells]
return df
def calc_library_size(self, cell_umis, sim_counts):
sim_counts_ls = sim_counts.sum(axis=1).values
if self.map_reference_ls:
# sampling library
sim_probs = np.random.uniform(size=len(sim_counts_ls))
lib_size = np.around(np.quantile(cell_umis, sim_probs))
else:
lib_size = sim_counts_ls
return lib_size
def downsampling(self, realcount, sim_counts, lib_sizes, cache_size=100000):
gene_expr = []
sim_cols = list(sim_counts.columns)
real_cpm = self.get_real_cpm(realcount)
gene_expr_fp = os.path.join(self.metadata_dir, 'gene_expression.csv.gz')
if len(lib_sizes) < cache_size:
cache_size = round(len(lib_sizes) / 2)
sizes = [lib_sizes[i:i+cache_size-1] for i in range(0, len(lib_sizes), cache_size)]
counts = [sim_counts.iloc[i:i+cache_size-1, :] for i in range(0, len(sim_counts), cache_size)]
for i in tqdm(range(len(sizes))):
df = counts[i]
lib_size = np.array(sizes[i])
cpm = df / lib_size.reshape(-1, 1)
cpm = self.calc_cpm(cpm, real_cpm)
# sample molecules
for index, size in enumerate(lib_size):
gene_val = cpm[index, ]
gene_expr = np.random.multinomial(size, gene_val)
cpm[index, ] = gene_expr
gene_expr = pd.DataFrame(cpm, columns=sim_cols, dtype=np.int16)
self.cache_dataframe(gene_expr, gene_expr_fp)
def get_real_cpm(self, realcount):
# calculating realcount datasets cpm
real_ls = np.sum(realcount, axis=1).reshape(-1, 1)
real_cpm = realcount / real_ls
real_cpm = real_cpm.flatten()
real_cpm = real_cpm[real_cpm != 0]
return real_cpm
def calc_cpm(self, scpm, rcpm):
if self.map_reference_cpm:
# sort sim counts data via least to greatest
rcpm = rcpm.flatten()
sim_shape = scpm.shape
scpm_size = sim_shape[0] * sim_shape[1]
probs = np.random.uniform(size=scpm_size)
scpm = np.quantile(rcpm, probs).reshape(sim_shape)
scpm = scpm / np.sum(scpm, axis=1).reshape(-1, 1)
return scpm
def sample_cells_per_grna(self, cells_meta, ncells):
sampled_cells = []
ngrnas = len(self.sim_meta.grna.unique())
self.ncells_per_grna = round(ncells / ngrnas)
for row_i in range(len(self.sim_meta)):
row = self.sim_meta.iloc[row_i]
sim_cells = cells_meta.loc[cells_meta.sim_label == row.sim_name, 'cell_i'].values
sim_cells = list(sim_cells)
if len(sim_cells) < self.ncells_per_grna:
print ("changing ncells_per_grna...")
self.ncells_per_grna = len(sim_cells)
if row.sample_percent != 0:
n = int(self.ncells_per_grna * row.sample_percent)
sampled = random.sample(sim_cells, k=n)
sampled_cells = sampled_cells + sampled
return sampled_cells
def get_cells_meta(self):
cells_meta = []
ncells_per_sim = int(self.perturb_time / self.update_interval)
for row_i, row in self.sim_meta.iterrows():
nsims_adjust = 1 + self.sim_meta.nsims.iloc[:row_i].sum()
for row_sim_i in range(row.nsims):
row_sim_i = row_sim_i + nsims_adjust
cell_sim_meta = self.cell_sim_meta.loc[self.cell_sim_meta.sim_i == row_sim_i]
for cell_i in range(cell_sim_meta.shape[0]):
cells_meta.append({"cell_i": cell_sim_meta.iloc[cell_i].cell_i,
"sim_i": cell_sim_meta.iloc[cell_i].sim_i,
"sim_label": row.sim_name,
"grna_label": row.grna})
return pd.DataFrame(cells_meta)
def cache_dataframe(self, df, fp):
if os.path.exists(fp):
df.to_csv(fp, mode='a', index=False, header=False, compression='gzip')
else:
df.to_csv(fp, index=False, compression='gzip') | 39.969697 | 158 | 0.584155 | 1,054 | 7,914 | 4.114801 | 0.163188 | 0.043579 | 0.027899 | 0.02421 | 0.24095 | 0.171086 | 0.140189 | 0.076089 | 0.062255 | 0.062255 | 0 | 0.007893 | 0.3116 | 7,914 | 198 | 159 | 39.969697 | 0.788179 | 0.014026 | 0 | 0.090278 | 0 | 0.006944 | 0.079754 | 0.016284 | 0 | 0 | 0 | 0 | 0 | 1 | 0.083333 | false | 0 | 0.048611 | 0 | 0.215278 | 0.027778 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
526fc9dff25cdd0000681e96fb40774aa2123c51 | 1,481 | py | Python | commands/cs.py | zivoy/flowaboat | c809821fe3c04b9d24351589443326b1842f2e3e | [
"MIT"
] | null | null | null | commands/cs.py | zivoy/flowaboat | c809821fe3c04b9d24351589443326b1842f2e3e | [
"MIT"
] | null | null | null | commands/cs.py | zivoy/flowaboat | c809821fe3c04b9d24351589443326b1842f2e3e | [
"MIT"
] | null | null | null | from utils.discord import help_me, DiscordInteractive
from utils.osu.utils import CalculateMods
from utils.utils import Log
interact = DiscordInteractive.interact
class Command:
command = "cs"
description = "Calculate Circle Size value with mods applied."
argsRequired = 1
usage = "<cs> [+mods]"
examples = [{
'run': "cs 6 +HR",
'result': "Returns CS of AR8 with HR applied."
},
{
'run': "cs 8.3 +EZ",
'result': "Returns CS of AR8.3 with EZ applied."
}]
synonyms = []
async def call(self, package):
message, args = package["message_obj"], package['args']
try:
cs = float(args[1])
except ValueError:
msg = f"{args[1]} is not a valid cs"
Log.error(msg)
await help_me(message, self.command)
return
except IndexError:
Log.error("No cs provided")
await help_me(message, self.command)
return
mods = args[2].upper() if len(args) > 2 else ""
new_cs, mod_list = CalculateMods(mods).cs(cs)
output = ""
if len(mod_list) > 0:
if cs.is_integer():
cs = int(cs)
output += f"CS{cs}+{''.join(mod_list).upper()} -> "
new_cs = float(f"{new_cs:.2f}")
if new_cs.is_integer():
new_cs = int(new_cs)
output += f"CS{new_cs}"
interact(message.channel.send, output)
| 27.425926 | 66 | 0.541526 | 183 | 1,481 | 4.295082 | 0.420765 | 0.044529 | 0.038168 | 0.043257 | 0.139949 | 0.089059 | 0.089059 | 0 | 0 | 0 | 0 | 0.013145 | 0.332208 | 1,481 | 53 | 67 | 27.943396 | 0.781598 | 0 | 0 | 0.093023 | 0 | 0 | 0.190412 | 0.022957 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.069767 | 0 | 0.27907 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
526ffd095c6b94b1fb871e852c7e2532e34db8a2 | 8,523 | py | Python | custom_components/maxhomeautomation/sensor.py | koleo9am/hass_max_home_automation | f6b282d272c1d0cf724ce8c6d2aab5c3813acd02 | [
"Unlicense"
] | 3 | 2020-01-05T20:19:26.000Z | 2020-03-23T09:19:18.000Z | custom_components/maxhomeautomation/sensor.py | koleo9am/hass_max_home_automation | f6b282d272c1d0cf724ce8c6d2aab5c3813acd02 | [
"Unlicense"
] | 13 | 2019-03-22T15:01:57.000Z | 2022-03-22T18:31:05.000Z | custom_components/maxhomeautomation/sensor.py | koleo9am/hass_max_home_automation | f6b282d272c1d0cf724ce8c6d2aab5c3813acd02 | [
"Unlicense"
] | 6 | 2019-10-02T19:09:29.000Z | 2021-03-04T18:01:12.000Z | """Support for MAX! Home Automation Thermostats Sensors."""
import logging
from homeassistant.helpers.entity import Entity
from homeassistant.const import TEMP_CELSIUS
from .consts import *
from .__init__ import MaxHomeAutomationDeviceHandler
from .__init__ import MaxHomeAutomationCubeHandler
_LOGGER = logging.getLogger(__name__)
# allowed sensors types
MHA_ALLOWED_SENSOR_TYPES = [
MHA_SENSOR_TYPE_TEMPERATURE,
MHA_SENSOR_TYPE_SET_TEMPERATURE,
MHA_SENSOR_TYPE_VALVE,
MHA_SENSOR_TYPE_OFFSET,
MHA_SENSOR_TYPE_ECO_BUTTON,
]
# map sensor type to unit
MHA_UNIT_HA_CAST = {
MHA_SENSOR_TYPE_TEMPERATURE: TEMP_CELSIUS,
MHA_SENSOR_TYPE_SET_TEMPERATURE: TEMP_CELSIUS,
MHA_SENSOR_TYPE_VALVE: '%',
MHA_SENSOR_TYPE_OFFSET: TEMP_CELSIUS,
MHA_SENSOR_TYPE_ECO_BUTTON: '',
MHA_SENSOR_TYPE_DUTY: '%',
}
# map sensor type to icon
MHA_ICON_HA_CAST = {
MHA_SENSOR_TYPE_TEMPERATURE: 'mdi:thermometer',
MHA_SENSOR_TYPE_SET_TEMPERATURE: 'mdi:thermometer',
MHA_SENSOR_TYPE_VALVE: 'mdi:radiator',
MHA_SENSOR_TYPE_OFFSET: 'mdi:delta',
MHA_SENSOR_TYPE_ECO_BUTTON: 'mdi:home-automation',
MHA_SENSOR_TYPE_DUTY: 'mdi:radio-tower',
}
def setup_platform(hass, config, add_entities, discovery_info=None):
"""Iterate through all MAX! Devices."""
devices = []
# read configuration and setup platform
gateways = hass.data[DATA_KEY][DOMAIN][CONF_GATEWAYS]
for gateway in gateways:
host = gateway[CONF_HOST]
port = gateway[CONF_PORT]
scan_interval = gateway[CONF_SCAN_INTERVAL].total_seconds()
cubes = gateway[CONF_CUBES]
gateway_url_base= "http://{}:{}/".format(host, port)
# walk trough cubes
for cube in cubes:
# read config
cube_address = cube[CONF_HEX_ADDRESS]
cube_name = cube[CONF_NAME]
radiator_thermostats = cube[CONF_RADIATOR_THERMOSTATS]
wall_thermostats = cube[CONF_WALL_THERMOSTATS]
window_shutters = cube[CONF_WINDOWS_SHUTTERS]
eco_buttons = cube[CONF_ECO_BUTTONS]
# walk trough radiator thermostats
for radiator_thermostat in radiator_thermostats:
device_address = radiator_thermostat[CONF_HEX_ADDRESS]
device_name = radiator_thermostat[CONF_NAME]
handler = MaxHomeAutomationDeviceHandler(
gateway_url_base, cube_address, device_address, scan_interval)
devices.append(
MaxHomeAutomationSensor (handler, device_name + " - Temperature", MHA_SENSOR_TYPE_TEMPERATURE))
devices.append(
MaxHomeAutomationSensor (handler, device_name + " - Target Temperature", MHA_SENSOR_TYPE_SET_TEMPERATURE))
devices.append(
MaxHomeAutomationSensor (handler, device_name + " - Valve", MHA_SENSOR_TYPE_VALVE))
devices.append(
MaxHomeAutomationSensor (handler, device_name + " - Offset", MHA_SENSOR_TYPE_OFFSET))
# walk trough wall thermostats
for wall_thermostat in wall_thermostats:
device_address = wall_thermostat[CONF_HEX_ADDRESS]
device_name = wall_thermostat[CONF_NAME]
handler = MaxHomeAutomationDeviceHandler(
gateway_url_base, cube_address, device_address, scan_interval)
devices.append(
MaxHomeAutomationSensor (handler, device_name + " - Temperature", MHA_SENSOR_TYPE_TEMPERATURE))
devices.append(
MaxHomeAutomationSensor (handler, device_name + " - Target Temperature", MHA_SENSOR_TYPE_SET_TEMPERATURE))
# walk trough eco buttons
for eco_button in eco_buttons:
device_address = eco_button[CONF_HEX_ADDRESS]
device_name = eco_button[CONF_NAME]
handler = MaxHomeAutomationDeviceHandler(
gateway_url_base, cube_address, device_address, scan_interval)
devices.append(
MaxHomeAutomationSensor (handler, device_name + " - Mode", MHA_SENSOR_TYPE_ECO_BUTTON))
# duty sensor
handler = MaxHomeAutomationCubeHandler(
gateway_url_base, cube_address, scan_interval)
devices.append(
MaxHomeAutomationDutySensor (handler, cube_name + " - Duty"))
if devices:
add_entities(devices)
# platform initialization was successful
return True
class MaxHomeAutomationSensor(Entity):
"""Representation of a Max! Home Automation sensor."""
def __init__(self, device_handler, name, sensor_type):
"""Initialize the sensor."""
# check sensor_type
if sensor_type not in MHA_ALLOWED_SENSOR_TYPES:
raise ValueError("Unknown Max! Home Automation sensor type: {}".format(sensor_type))
# store values
self._device_handler = device_handler
self._name = name
self._sensor_type = sensor_type
self._state = None
# read current value
self.update()
@property
def should_poll(self):
"""Return the polling state."""
return True
@property
def state(self):
"""Return the state of the sensor."""
return self._state
@property
def name(self):
"""Return the name of the climate device."""
return self._name
@property
def sensor_type (self):
return self._sensor_type;
@property
def unit_of_measurement(self):
"""Return the unit of measurement of this entity, if any."""
return MHA_UNIT_HA_CAST.get(self.sensor_type, None)
@property
def icon(self):
"""Return the icon to use in the frontend, if any."""
return MHA_ICON_HA_CAST.get(self.sensor_type, None)
def update(self):
"""Get latest data from MAX! Home Automation"""
self._device_handler.update()
# find the device
device = self._device_handler.data
# device not found
if device is None:
self._state = None
return False
# update internal values
value = device.get(self.sensor_type, None)
self._state = (
value
if self.sensor_type != MHA_SENSOR_TYPE_ECO_BUTTON
else
# translate operation mode of ECO button
MAP_MHA_OPERATION_MODE_HASS.get(value, None)
)
class MaxHomeAutomationDutySensor(Entity):
"""Representation of a Max! Home Automation Cube duty sensor."""
def __init__(self, cubehandle, name):
"""Initialize the sensor."""
# store values
self._cubehandle = cubehandle
self._name = name
self._state = None
# read current value
self.update()
@property
def should_poll(self):
"""Return the polling state."""
return True
@property
def state(self):
"""Return the state of the sensor."""
return self._state
@property
def name(self):
"""Return the name of the climate device."""
return self._name
@property
def sensor_type (self):
return MHA_SENSOR_TYPE_DUTY;
@property
def unit_of_measurement(self):
"""Return the unit of measurement of this entity, if any."""
return MHA_UNIT_HA_CAST.get(self.sensor_type, None)
@property
def icon(self):
"""Return the icon to use in the frontend, if any."""
return MHA_ICON_HA_CAST.get(self.sensor_type, None)
def update(self):
"""Get latest data from MAX! Home Automation"""
self._cubehandle.update()
value = self._cubehandle.cube_duty
# no value
if value is None:
self._state = None
return False
# remove '%'
value = value.replace('%', '')
# update internal values
self._state = value | 35.962025 | 127 | 0.602018 | 880 | 8,523 | 5.519318 | 0.168182 | 0.090591 | 0.06959 | 0.061972 | 0.528927 | 0.48322 | 0.415277 | 0.37554 | 0.360305 | 0.360305 | 0 | 0 | 0.325355 | 8,523 | 237 | 128 | 35.962025 | 0.844696 | 0.143377 | 0 | 0.422078 | 0 | 0 | 0.03538 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.11039 | false | 0 | 0.038961 | 0.012987 | 0.25974 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
5270fde94d095534094e0e22ff1e0b045b9601ae | 2,348 | py | Python | hexa/ui/datacard/actions.py | qgerome/openhexa-app | 8c9377b2ad972121d8e9575f5d52420212b52ed4 | [
"MIT"
] | 4 | 2021-07-19T12:53:21.000Z | 2022-01-26T17:45:02.000Z | hexa/ui/datacard/actions.py | qgerome/openhexa-app | 8c9377b2ad972121d8e9575f5d52420212b52ed4 | [
"MIT"
] | 20 | 2021-05-17T12:27:06.000Z | 2022-03-30T11:35:26.000Z | hexa/ui/datacard/actions.py | qgerome/openhexa-app | 8c9377b2ad972121d8e9575f5d52420212b52ed4 | [
"MIT"
] | 2 | 2021-09-07T04:19:59.000Z | 2022-02-08T15:33:29.000Z | from __future__ import annotations
import typing
from django.http import HttpRequest
from django.template import loader
from django.utils.translation import gettext_lazy as _
import hexa.ui.datacard
from hexa.ui.utils import get_item_value
from .base import DatacardComponent
class Action(DatacardComponent):
def __init__(
self,
*,
label: str,
url: str,
icon: typing.Optional[str] = None,
method: str = "post",
open_in_new_tab: bool = False,
primary: bool = True,
enabled_when: typing.Optional[typing.Callable] = None
):
if open_in_new_tab and method.lower() != "get":
raise ValueError(
'"open_in_new_tab" can only be set to true if "method" is "get"'
)
self.label = label
self.icon = icon
self.url = url
self.method = method
self.open_in_new_tab = open_in_new_tab
self.primary = primary
self.enabled_when = enabled_when
def bind(self, datacard: hexa.ui.datacard.Datacard):
return BoundAction(self, datacard=datacard)
def get_value(self, model, accessor, container=None):
return get_item_value(
model, accessor, container=container, exclude=DatacardComponent
)
@property
def template(self):
return "ui/datacard/action.html"
def context(self, model, card: hexa.ui.datacard.Datacard):
return {
"url": self.get_value(model, self.url, container=card),
"label": _(self.label),
"icon": self.icon,
"method": self.method,
"open_in_new_tab": self.open_in_new_tab,
"primary": self.primary,
}
class BoundAction:
def __init__(self, unbound_action: Action, *, datacard: hexa.ui.datacard.Datacard):
self.unbound_action = unbound_action
self.datacard = datacard
def is_enabled(self, request: HttpRequest):
if self.unbound_action.enabled_when:
return self.unbound_action.enabled_when(request)
return True
def __str__(self):
template = loader.get_template(self.unbound_action.template)
return template.render(
self.unbound_action.context(self.datacard.model, self.datacard),
request=self.datacard.request,
)
| 28.987654 | 87 | 0.635434 | 277 | 2,348 | 5.169675 | 0.259928 | 0.02933 | 0.043994 | 0.058659 | 0.146648 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.272998 | 2,348 | 80 | 88 | 29.35 | 0.838899 | 0 | 0 | 0 | 0 | 0 | 0.056218 | 0.009796 | 0 | 0 | 0 | 0 | 0 | 1 | 0.126984 | false | 0 | 0.126984 | 0.063492 | 0.396825 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
5277bbe38d9e203cfe8b7158687231c7465a9e3a | 1,871 | py | Python | src/api/models/user/user_manager.py | Avik32223/gala-iam-api | 2e9f852d016be651e90e21cd5693a10048e487e0 | [
"MIT"
] | null | null | null | src/api/models/user/user_manager.py | Avik32223/gala-iam-api | 2e9f852d016be651e90e21cd5693a10048e487e0 | [
"MIT"
] | null | null | null | src/api/models/user/user_manager.py | Avik32223/gala-iam-api | 2e9f852d016be651e90e21cd5693a10048e487e0 | [
"MIT"
] | null | null | null | from pydantic.error_wrappers import ValidationError
from db.database import Database
from models.base_record_manager import BaseRecordManager
from models.user.user_model import (USER_MODEL_NAME, User, UserCreate,
UserPartial)
class UserManager(BaseRecordManager):
"""UserManager to handle CRUD functionality"""
model = User
model_name = USER_MODEL_NAME
@classmethod
def create(cls, db: Database, record: UserCreate) -> User:
"""Creates a new User after validating subjects.
Arguments:
db {Database} -- Database connection
record {UserCreate} -- New User data
Returns:
User -- newly created user
"""
existing_user = UserManager.find_by_name(db, record.metadata.name)
if existing_user:
raise ValidationError(
"User with name [%s] already exists" % record.metadata.name)
return super(UserManager, cls).create(db, record)
@classmethod
def update(cls, db: Database, record_uuid: str, record: UserPartial) -> User:
"""Updates the existing User after validating data
Arguments:
db {Database} -- Database connection
record_uuid {str} -- unique record uuid
record {BaseModel} -- updating record
Returns:
BaseRecord -- Updated record
"""
existing_user = cls.find_by_uuid(db, record_uuid)
updated_record = cls.model(**record.dict(), uuid=record_uuid)
if updated_record.metadata.name != existing_user.metadata.name:
if UserManager.find_by_name(db, updated_record.metadata.name):
raise ValidationError(
"User with name [%s] already exists" % record.metadata.name)
return super(UserManager, cls).update(db, record_uuid, record)
| 34.648148 | 81 | 0.640834 | 202 | 1,871 | 5.80198 | 0.311881 | 0.061433 | 0.076792 | 0.02901 | 0.264505 | 0.225256 | 0.151877 | 0.151877 | 0.151877 | 0.151877 | 0 | 0 | 0.275254 | 1,871 | 53 | 82 | 35.301887 | 0.864307 | 0.241582 | 0 | 0.25 | 0 | 0 | 0.052429 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.083333 | false | 0 | 0.166667 | 0 | 0.458333 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
527ab490909e85acdfa005425c8c8c8a4131ce29 | 1,239 | py | Python | helpers/logger_utils.py | alyswidan/HeroUDP | db8bd2799d9cb10d7899884c0709ecd718dd6e5c | [
"MIT"
] | 4 | 2019-04-12T11:40:42.000Z | 2022-01-02T07:42:32.000Z | helpers/logger_utils.py | alyswidan/HeroUDP | db8bd2799d9cb10d7899884c0709ecd718dd6e5c | [
"MIT"
] | null | null | null | helpers/logger_utils.py | alyswidan/HeroUDP | db8bd2799d9cb10d7899884c0709ecd718dd6e5c | [
"MIT"
] | null | null | null | import logging
import sys
def get_stdout_logger(name='root',level='INFO'):
# if get_stdout_logger.is_initialized:
# return logging.getLogger()
logging_level = getattr(logging, level.upper(), None)
if not isinstance(logging_level, int):
raise ValueError(f'invalid log level {level}')
stdout_logger = logging.getLogger(name)
stdout_logger.setLevel(logging_level)
handler = logging.StreamHandler(sys.stdout)
handler.setLevel(level)
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
handler.setFormatter(formatter)
stdout_logger.addHandler(handler)
get_stdout_logger.is_initialized = True
return stdout_logger
get_stdout_logger.is_initialized = False
# def delegate_calls(delegate_to):
# def wrapper(cls):
# def _get_attr(self, attr):
# try:
# found_attr = super(cls, self).__getattribute__(attr)
# except AttributeError:
# pass
# else:
# return found_attr
#
# found_attr = .__getattribute__(attr)
#
# return found_attr
# setattr(cls, '__getattribute__', _get_attr)
# return cls
# return wrapper
| 29.5 | 89 | 0.647296 | 136 | 1,239 | 5.602941 | 0.404412 | 0.125984 | 0.07874 | 0.066929 | 0.110236 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.250202 | 1,239 | 41 | 90 | 30.219512 | 0.820237 | 0.419693 | 0 | 0 | 0 | 0 | 0.121429 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.0625 | false | 0 | 0.125 | 0 | 0.25 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
527bb85e43b55afd9d6e09000cc926ba92e6f886 | 3,930 | py | Python | preprocess.py | TonyMTH/fashion-classifier | 2fd426599aca03026ea44538b6c3d3a8bc042a48 | [
"MIT"
] | null | null | null | preprocess.py | TonyMTH/fashion-classifier | 2fd426599aca03026ea44538b6c3d3a8bc042a48 | [
"MIT"
] | null | null | null | preprocess.py | TonyMTH/fashion-classifier | 2fd426599aca03026ea44538b6c3d3a8bc042a48 | [
"MIT"
] | 1 | 2021-11-19T11:52:18.000Z | 2021-11-19T11:52:18.000Z | from torchvision import datasets, transforms
import torch
import copy
import numpy as np
def download_data(path, transformer, datatype, batch_size):
# Download and load the training data
dataset = datasets.FashionMNIST(path, download=True, train=datatype, transform=transformer)
dataloader = torch.utils.data.DataLoader(dataset, batch_size=batch_size, shuffle=True)
return dataloader
def train_transform():
# Define a transform to normalize the data
return transforms.Compose([transforms.ToTensor(),
transforms.Normalize((0.5), (0.5))
])
def test_transform():
# Define a transform to normalize the data
return transforms.Compose([transforms.ToTensor(),
transforms.Normalize((0.5), (0.5))
])
def train_loop(model, epochs, trainloader, testloader, optimizer, criterion, model_path, saved_model_device, device):
train_losses, test_losses = [], []
train_accuracies, test_accuracies = [], []
least_running_loss = np.inf
for e in range(epochs):
running_loss = 0
for images, labels in trainloader:
# Move data to device
images, labels = images.to(device), labels.to(device)
# Flatten Fashion-MNIST images into a 784 long vector
images = images.view(images.shape[0], -1)
# Training pass
optimizer.zero_grad()
output = model.forward(images)
loss = criterion(output, labels)
loss.backward()
optimizer.step()
running_loss += loss.item()
test_loss = 0
train_accuracy = 0
test_accuracy = 0
# Turn off gradients for validation, saves memory and computation
with torch.no_grad():
# Set the model to evaluation mode
model.eval()
# Validation pass
for images, labels in testloader:
# Move data to device
images, labels = images.to(device), labels.to(device)
images = images.view(images.shape[0], -1)
log_ps = model(images)
test_loss += criterion(log_ps, labels)
ps = torch.exp(log_ps)
top_p, top_class = ps.topk(1, dim=1)
equals = top_class == labels.view(*top_class.shape)
test_accuracy += torch.mean(equals.type(torch.FloatTensor))
for images, labels in trainloader:
# Move data to device
images, labels = images.to(device), labels.to(device)
images = images.view(images.shape[0], -1)
log_ps = model(images)
test_loss += criterion(log_ps, labels)
ps = torch.exp(log_ps)
top_p, top_class = ps.topk(1, dim=1)
equals = top_class == labels.view(*top_class.shape)
train_accuracy += torch.mean(equals.type(torch.FloatTensor))
model.train()
train_losses.append(running_loss / len(trainloader))
test_losses.append(test_loss / len(testloader))
train_accuracies.append(train_accuracy)
test_accuracies.append(test_accuracy)
# Save best model
if running_loss < least_running_loss:
least_running_loss = running_loss
best_model_state = copy.deepcopy(model)
best_model_state.to(saved_model_device)
torch.save(best_model_state, model_path)
print("Epoch: {}/{}..".format(e + 1, epochs),
"Training loss: {:.3f}..".format(running_loss / len(trainloader)),
"Test loss: {:.3f}..".format(test_loss / len(testloader)),
"Train Accuracy: {:.3f}".format(train_accuracy / len(trainloader)),
"Test Accuracy: {:.3f}".format(test_accuracy / len(testloader))
)
| 36.388889 | 117 | 0.58855 | 441 | 3,930 | 5.090703 | 0.265306 | 0.044098 | 0.03118 | 0.022717 | 0.448998 | 0.380846 | 0.380846 | 0.329621 | 0.329621 | 0.329621 | 0 | 0.011091 | 0.311705 | 3,930 | 107 | 118 | 36.728972 | 0.818854 | 0.094656 | 0 | 0.342857 | 0 | 0 | 0.027927 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.057143 | false | 0 | 0.057143 | 0.028571 | 0.157143 | 0.014286 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
527bfbb700cf6aa7c22512f1e8c3ab4c7a6708b0 | 4,845 | py | Python | pinspect/utils.py | dizcza/pinspect | c72909f9e996b84b3ebaab7a81241251ae9d1891 | [
"MIT"
] | 1 | 2019-12-11T09:57:01.000Z | 2019-12-11T09:57:01.000Z | pinspect/utils.py | dizcza/pinspect | c72909f9e996b84b3ebaab7a81241251ae9d1891 | [
"MIT"
] | null | null | null | pinspect/utils.py | dizcza/pinspect | c72909f9e996b84b3ebaab7a81241251ae9d1891 | [
"MIT"
] | null | null | null | import inspect
import logging
import re
import networkx as nx
from pyvis.network import Network
try:
from StringIO import StringIO
except ImportError:
from io import StringIO
# does not match to any symbol
REGEX_NEVER_MATCH = '(?!x)x'
NON_EXECUTABLE = "save|write|remove|delete|duplicate"
def getmembers(obj_class):
"""
Parameters
----------
obj_class : type
An object class.
Returns
-------
member_names : set
A set of method and attribute names of the `obj_class` type.
"""
member_names = {func_name for func_name, func in inspect.getmembers(obj_class)}
return member_names
def get_module_root(obj):
return obj.__class__.__module__.split('.')[0]
class IgnoreFunc:
def __init__(self, key, obj_class=()):
"""
Parameters
----------
key : str or list, optional
A string or a list of strings to ignore `obj` attributes and methods from being accessed and executed.
Apart from user-provided strings, all methods that contain one of the following key-words will be ignored:
'save', 'write', 'remove', 'delete', 'duplicate'
For the total list of ignored key-words, see `NON_EXECUTABLE` in `utils.py`.
obj_class : list, optional
A list of class types to ignore.
Apart from user-provided class types, all numpy functions will not be executed.
"""
self.ignore = re.compile(key, flags=re.IGNORECASE)
self.ignored_functions = dict()
try:
import numpy as np
self.ignored_functions[np.ndarray] = getmembers(np.ndarray)
self.ignored_functions[np.ndarray].update(getmembers(np))
except ImportError:
pass
if not isinstance(obj_class, (list, tuple, set)):
obj_class = [obj_class]
for class_type in obj_class:
self.ignored_functions[class_type] = getmembers(class_type)
def __call__(self, obj, attribute_name):
"""
Check the `obj` for the attribute name `func_name`.
Parameters
----------
obj : object
Object to take the attribute from.
attribute_name : str
`obj` attribute name.
Returns
-------
bool
Whether this attribute should be ignored or not.
"""
for ignored_class, ignored_functions in self.ignored_functions.items():
if isinstance(obj, ignored_class) and attribute_name in ignored_functions:
return True
return self.ignore.search(attribute_name)
def to_pyvis(graph, layout=True):
"""
This method takes an exisitng Networkx graph and translates
it to a PyVis graph format that can be accepted by the VisJs
API in the Jinja2 template.
Parameters
----------
graph : nx.DiGraph
NetworkX directed graph.
layout : bool
Use hierarchical layout if this is set.
Returns
-------
net : Network
PyVis Network
"""
def add_node(node_id):
attr = nodes[node_id]
net.add_node(node_id, label=attr['label'], level=attr['level'], color=attr.get('color', None),
title=attr['title'])
edges = graph.edges.data()
nodes = graph.nodes
net = Network(height="960px", width="1280px", directed=True, layout=layout)
for v, u, edge_attr in edges:
add_node(v)
add_node(u)
net.add_edge(v, u, title=edge_attr['label'], color=edge_attr['color'])
return net
def to_string(graph, source, prefix=''):
"""
Traverse the graph and yield its string representation.
Parameters
----------
graph : nx.DiGraph
Graph, obtained by `GraphBuilder`.
source : int
Source node id.
prefix : str
This prefix will be accumulated in a full call history during successive calls of `to_string()`.
Returns
-------
generator
Generator of string traversal of the graph.
"""
if len(graph.adj[source]) == 0:
yield f"{prefix} -> '{graph.nodes[source]['label']}'"
else:
for adj, attr in graph.adj[source].items():
yield from to_string(graph, source=adj, prefix=f"{prefix}.{attr['label']}")
def check_edge(graph, edge_label):
"""
Parameters
----------
graph : nx.DiGraph
A graph.
edge_label : str
Edge label.
Returns
-------
int
Counts how many edges have the property `label` that matches `edge_label`.
"""
edge_label = re.compile(edge_label)
filtered = [triple for triple in graph.edges.data('label') if edge_label.search(triple[2])]
for v, u, label in filtered:
logging.info(f"{graph.nodes[v]['label']}.{label} -> {graph.nodes[u]['label']}")
return len(filtered)
| 29.186747 | 118 | 0.61259 | 610 | 4,845 | 4.740984 | 0.303279 | 0.030429 | 0.034578 | 0.024896 | 0.040802 | 0 | 0 | 0 | 0 | 0 | 0 | 0.00314 | 0.276987 | 4,845 | 165 | 119 | 29.363636 | 0.822438 | 0.384107 | 0 | 0.066667 | 0 | 0.016667 | 0.084666 | 0.057745 | 0 | 0 | 0 | 0 | 0 | 1 | 0.133333 | false | 0.016667 | 0.166667 | 0.016667 | 0.416667 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
527d096b6239531d6033a6d10e5f212e3a876e4d | 23,773 | py | Python | tools/static/usr/lib/russng/tools/rubb.py | johnm-dev/russng | 0265ccba20bf00d00cff7d448099faf65be17947 | [
"Apache-2.0"
] | 1 | 2018-02-08T00:30:24.000Z | 2018-02-08T00:30:24.000Z | tools/static/usr/lib/russng/tools/rubb.py | johnm-dev/russng | 0265ccba20bf00d00cff7d448099faf65be17947 | [
"Apache-2.0"
] | null | null | null | tools/static/usr/lib/russng/tools/rubb.py | johnm-dev/russng | 0265ccba20bf00d00cff7d448099faf65be17947 | [
"Apache-2.0"
] | null | null | null | #! /usr/bin/python3
#
# rubb.py
import os
import os.path
import pwd
import shutil
import signal
import stat
import subprocess
import sys
from sys import stderr
import traceback
# system
ETC_DIR = "/etc/russ"
RUN_DIR = "/var/run/russ"
CONF_DIR = "%s/conf" % RUN_DIR
PIDS_DIR = "%s/pids" % RUN_DIR
SERVICES_DIR = "%s/services" % RUN_DIR
SYSTEM_SOURCESFILE = "%s/bb.sources" % ETC_DIR
SYSTEM_BBBASEDIR = "%s/bb" % RUN_DIR
SYSTEM_SAFEPATHS = ["/run/russ/bb", "/var/run/russ/bb"]
DEVNULL = open("/dev/null", "w")
class BB:
"""Manage bulletin board (BB) for RUSS services.
Organized as:
.../bb/
<bbname>/
conf/
pids/
services/
The pids dir is only used for "system" BBs.
"""
def __init__(self, bbdir):
self.bbdir = bbdir
self.name = os.path.basename(bbdir)
self.confdir = os.path.join(self.bbdir, "conf")
self.pidsdir = os.path.join(self.bbdir, "pids")
self.servicesdir = os.path.join(self.bbdir, "services")
def prep(self):
"""Ensure working areas exist.
"""
print("prepping bb (%s) ..." % (self.name,))
for dirpath in [self.confdir, self.pidsdir, self.servicesdir]:
if not os.path.isdir(dirpath):
if verbose:
print("makedir (%s)" % (dirpath,))
os.makedirs(dirpath)
def clean(self, safepaths):
"""Clean areas associated with srcname.
"""
print("cleaning bb (%s) ..." % (self.name,))
for dirpath in [self.confdir, self.pidsdir, self.servicesdir]:
if os.path.exists(dirpath):
for safepath in safepaths:
if dirpath.startswith(safepath):
for name in os.listdir(dirpath):
path = os.path.join(dirpath, name)
if verbose:
print("removing (%s)" % (path,))
os.remove(path)
if not os.listdir(dirpath):
os.rmdir(dirpath)
if os.path.exists(self.bbdir):
if not os.listdir(self.bbdir):
if verbose:
print("rmdir (%s)" % (self.bbdir,))
os.rmdir(self.bbdir)
def get_confnames(self):
"""Return configuration names without the .conf.
"""
_, _, names = next(os.walk(self.confdir))
names = [name[:-5] for name in names]
return names
def get_names(self):
"""Return all names found under conf/ and services/.
"""
if os.path.isdir(self.confdir):
_, _, confnames = next(os.walk(self.confdir))
else:
confnames = []
if os.path.isdir(self.servicesdir):
_, _, servicenames = next(os.walk(self.servicesdir))
else:
servicenames = []
names = [name[:-5] for name in confnames if name.endswith(".conf")]
names.extend(servicenames)
return set(names)
def get_servernames(self):
"""List server names.
"""
names = os.listdir(self.servicesdir)
return names
def get_server(self, name):
return BBServer(self, name)
def install(self, filename, newname=None):
"""Install file contents to configuration file.
"""
self.prep()
if newname:
name = newname
else:
name = os.path.basename(filename)
if name.endswith(".conf"):
name = name[:-5]
print("installing (%s) from file (%s)" % (name, filename))
txt = open(filename).read()
bs = self.get_server(name)
bs.install(txt)
def remove(self, name):
"""Remove configuration.
"""
bs = self.get_server(name)
if bs:
bs.removeconf()
def show(self, name):
"""Show configuration.
"""
bs = self.get_server(name)
if bs:
txt = bs.get_conf()
if txt:
print(txt)
def start_servers(self, names):
"""Start select named or all servers of a BB.
"""
print("starting servers for bb (%s) ..." % (self.name,))
for name in names:
bs = self.get_server(name)
if bs.isrunning():
stderr.write("warning: server (%s) already running\n" % (name,))
else:
bs.start()
st = bs.get_status()
if st:
print("bb=%(bbname)s:name=%(name)s:running=%(isrunning)s" % st)
def status_servers(self, names, detail=False):
"""Output status of select named or all servers of a BB.
"""
for name in names:
bs = self.get_server(name)
st = bs.get_status()
if st:
if detail:
print("bb=%(bbname)s:name=%(name)s:running=%(isrunning)s:type=%(type)s:pid=%(pid)s:conffile=%(conffile)s:servicefile=%(servicefile)s" % st)
else:
print("bb=%(bbname)s:name=%(name)s:running=%(isrunning)s" % st)
def stop_servers(self, names):
"""Stop select named or all servers of a BB.
"""
print("stopping servers for bb (%s) ..." % (self.name,))
for name in names:
bs = self.get_server(name)
if bs.isrunning():
bs.stop()
st = bs.get_status()
if st:
print("bb=%(bbname)s:name=%(name)s:running=%(isrunning)s" % st)
def sync(self, sources, tags=None, preclean=False):
"""Sync configuration from sources to BB.
Configurations that are not found in the sources are cleaned.
"""
print("syncing bb (%s) ..." % (self.name,))
if tags:
sources = [d for d in sources if d["name"] in tags]
self.prep()
foundfilenames = set([name for name in os.listdir(self.confdir) if name.endswith(".conf")])
if preclean:
for filename in foundfilenames:
name = filename[:-5]
s = self.get_server(name)
s.stop()
s.clean()
syncfilenames = []
for d in sources:
srctype = d["type"]
srcpath = d["source"]
if srctype in ["dir", "file"]:
if srctype == "dir":
filenames = os.listdir(srcpath)
else:
filenames = [os.path.basename(srcpath)]
srcpath = os.path.dirname(srcpath)
filenames = [name for name in filenames if name.endswith(".conf")]
for filename in filenames:
name = filename[:-5]
if filename in syncfilenames:
stderr.write("skipping. will not sync duplicate name (%s) from source (%s)\n" % (name, d["name"]))
continue
txt = open(os.path.join(srcpath, filename)).read()
s = BBServer(self, name)
print("installing (%s) from source (%s)" % (name, d["name"]))
s.install(txt)
syncfilenames.append(filename)
# clean
if not tags:
for filename in foundfilenames.difference(syncfilenames):
name = filename[:-5]
s = BBServer(self, name)
print("cleaning (%s)" % (name,))
s.clean()
class BBServer:
"""Manage server under BB location.
"""
def __init__(self, bb, name):
self.bb = bb
self.name = name
self.confname = "%s.conf" % (name,)
self.conffile = os.path.join(self.bb.confdir, self.confname)
self.pidfile = os.path.join(self.bb.pidsdir, self.name)
self.servicefile = os.path.join(self.bb.servicesdir, self.name)
def _getpid(self):
try:
return int(open(self.pidfile).read())
except:
return None
def _hasserviceconffile(self):
try:
line = open(self.conffile).readline()
return " service=conffile" in line
except:
return False
def _killpid(self):
if self.isrunning():
pid = self._getpid()
os.kill(-pid, signal.SIGHUP)
self._removepid()
def _removepid(self):
try:
os.remove(self.pidfile)
except:
pass
def _removeservice(self):
if os.path.exists(self.servicefile):
os.remove(self.servicefile)
def _ruspawn(self):
pargs = [
"ruspawn",
"-f", self.conffile,
"-c", "main:pgid=0",
"-c", "main:addr=%s" % (self.servicefile,)
]
p = subprocess.Popen(pargs,
stdin=DEVNULL,
#stdout=DEVNULL,
#stderr=DEVNULL,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
close_fds=True)
out, err = p.communicate()
if debug:
print("pargs (%s)" % (pargs,))
print("pid (%s) out (%s) err (%s)" % (p.pid, out, err))
if p.pid == None:
return False
self._setpid(p.pid)
return True
def _setpid(self, pid):
open(self.pidfile, "w+").write("%s" % (pid,))
def clean(self):
"""Clean server items
"""
self.removeconf()
self._removepid()
self._removeservice()
def get_conf(self):
try:
return open(self.conffile).read()
except:
pass
def get_status(self):
"""Return status information.
"""
d = {
"bbname": self.bb.name,
"conffile": os.path.exists(self.conffile) and self.conffile or None,
"isrunning": self.isrunning(),
"name": self.name,
"pid": self._getpid(),
"pidfile": os.path.exists(self.pidfile) and self.pidfile or None,
"servicefile": os.path.exists(self.servicefile) and self.servicefile or None,
"type": self.isconffile() and "conffile" or "socket",
}
return d
def install(self, txt):
"""Install configuration file.
"""
open(self.conffile, "w+").write(txt)
def isconffile(self):
"""Check if servicefile is conffile rather than a socket file.
"""
try:
st = os.stat(self.servicefile)
if stat.S_ISSOCK(st.st_mode):
return False
return self._hasserviceconffile()
except:
if debug:
traceback.print_exc()
return False
def isrunning(self):
"""Check if server is running.
A running server has a servicefile and a pidfile.
"""
try:
if os.path.exists(self.pidfile):
pid = open(self.pidfile).read()
os.kill(-int(pid), 0)
return True
else:
return self.isconffile()
except:
if debug:
traceback.print_exc()
return False
def removeconf(self):
if os.path.exists(self.conffile):
os.remove(self.conffile)
def restart(self):
self.stop()
self.start()
def start(self):
if self._hasserviceconffile():
shutil.copy(self.conffile, self.servicefile)
else:
self._ruspawn()
def stop(self):
if not self.isconffile():
self._killpid()
self._removeservice()
class SourcesFile:
"""Interface to working with the bb.sources file.
"""
def __init__(self, path=None):
self.path = path
self.d = None
def get_sources(self, bbname):
"""Get sources associated with name from sources file.
"""
self.load()
return self.d.get(bbname)
def get_bbnames(self):
"""Get BB names from sources file.
"""
self.load()
return list(self.d.keys())
def load(self, force=False):
"""Load sources file.
Use force to reload.
"""
if not force and self.d != None:
return
d = {}
for line in open(self.path).readlines():
line = line.strip()
if line == "" or line.startswith("#"):
continue
t = line.split(":")
bbname = t[0]
l = d.setdefault(bbname, [])
d2 ={
"name": t[1],
"type": t[2],
"source": t[3],
}
l.append(d2)
self.d = d
def get_bbdir(bbbasedir, bbname=None):
"""Return bbdir based on user and optional bb name.
If name starts with "/", then return it as the bbdir. Otherwise,
name cannot contain a "/".
"""
if bbname and bbname.startswith("/"):
return bbname
if bbname and "/" in bbname:
return None
return os.path.join(bbbasedir, bbname)
def get_bbnames(bbbasedir, bbnames=None):
"""Return list of BB names.
Filter bbnames if provided.
"""
try:
_, realbbnames, _ = next(os.walk(bbbasedir))
except:
realbbnames = []
if bbnames == None:
bbnames = realbbnames
else:
realbbnames = set(realbbnames)
bbnames = [bbname for bbname in bbnames if bbname in realbbnames]
return bbnames
def print_usage():
d = {
"progname": os.path.basename(sys.argv[0]),
}
print("""\
usage: %(progname)s [<options>] <cmd> [...]
%(progname)s -h|--help|help
Manage system or user RUSS bulletin boards (BB). A BB hosts RUSS
services. Although the services can be accessed directly using a
path, the standard way is to use the ("+") plus service. By default,
the plus server searches for services at some system ("system") and
user ("override", "fallback") BBs.
System BBs can host services by either a socket (running) or
configuration file (run on demand). The user BBs host services by
configuration file only.
System BBs are configured using the "sync" command which uses the
/etc/russ/bb.sources file which specifies configuration sources used
to set up. Alternatively, the "install" and "remove" commands can
also be used. However, for BBs that are managed using the sources
file, the "sync" operation will overwrite/remove anything that was
installed with "install".
User BBs are configured using the "install" and "remove" commands.
Common options:
--bb <bbname>[,...]
Select named BBs. System default is "system". User
default is "override".
--bb-all Select all BBs.
--debug Print debugging information.
-l Print detailed information when applicable.
--sources <path>
(system) Alternate path of the bb.sources file.
--verbose Print additional information.
Commands:
clean Clean BB.
install <filename> [<newname>]
Install configuration (filename ends with .conf). Use
<newname> to override name derived from <filename>.
list List BB entries. Use -l for details.
list-bb List BBs.
list-sources (system) List sources from sources file.
remove <name> Remove configuration.
restart [<name>,...]
Restart server(s).
resync (system) Clean and sync.
show <name> Show configuration.
start [<name>,...]
Start server(s). Make available for use.
status [<name>,...]
Report status of server(s). Use -l for details.
stop [<name>,...]
Stop server(s). Make unavailable for use.
sync [<tag>,...]
(system) Syncronize local configuration using sources
specified in a bb.sources file. Use <tag> to limit
sources to use.""" % d)
def main(args):
global debug, verbose
try:
bball = False
bbbasedir = None
bbnames = None
cmd = None
debug = os.environ.get("RUBB_DEBUG") == "1"
detail = False
sf = None
sourcesfile = None
username = None
usertype = None
verbose = os.environ.get("RUBB_VERBOSE") == "1"
if os.getuid() == 0:
usertype = "system"
else:
usertype = "user"
while args:
arg = args.pop(0)
if arg == "--bb" and args:
bbnames = args.pop(0).split(",")
bball = False
elif arg == "--bb-all":
bball = True
bbnames = None
elif arg == "--bbbasedir" and args:
bbbasedir = args.pop(0)
elif arg == "--debug":
debug = True
elif arg in ["-h", "--help", "help"]:
print_usage()
sys.exit(0)
elif arg == "-l":
detail = True
elif arg == "--sources" and args:
sourcespath = args.pop(0)
elif arg == "--system":
usertype = "system"
elif arg == "--user" and args:
usertype = "user"
username = args.pop(0)
elif arg == "--verbose":
verbose = True
else:
cmd = arg
break
if username:
try:
pwd.getpwnam(username)
except:
stderr.write("error: bad username (%s)\n" % (username))
sys.exit(1)
if usertype == "system":
bbbasedir = bbbasedir or SYSTEM_BBBASEDIR
bbnames = bbnames or ["system"]
safepaths = SYSTEM_SAFEPATHS
sourcesfile = SYSTEM_SOURCESFILE
else:
if username:
bbbasedir = bbbasedir or os.path.expanduser("~%s/.russ/bb" % (username,))
else:
bbbasedir = bbbasedir or os.path.expanduser("~/.russ/bb")
bbnames = bbnames or ["override"]
safepaths = [bbbasedir]
sourcesfile = None
# validate
if not os.path.exists(bbbasedir):
pass
if sourcesfile and os.path.exists(sourcesfile):
sf = SourcesFile(sourcesfile)
if not cmd:
raise Exception()
except SystemExit:
raise
except:
if debug:
traceback.print_exc()
stderr.write("error: bad/missing arguments\n")
sys.exit(1)
try:
if verbose:
print("bb basedir (%s)" % (bbbasedir,))
print("bb names (%s)" % (bbnames,))
print("sources file (%s)" % (sourcesfile,))
print("cmd (%s)" % (cmd,))
if cmd in ["clean", "list", "list-sources", "restart", "resync", "start", "status", "stop", "sync"]:
# multi bbname commands
if cmd in ["list", "restart", "start", "status", "stop"]:
if not bbbasedir or not os.path.exists(bbbasedir):
stderr.write("error: bb basedir (%s) not found\n" % (bbbasedir,))
sys.exit(1)
if bball:
bbnames = get_bbnames(bbbasedir)
elif cmd in ["list-sources", "resync", "sync"]:
if bball:
bbnames = sf.get_bbnames()
_args = args[:]
for bbname in bbnames:
args = _args[:]
bbdir = get_bbdir(bbbasedir, bbname)
bb = BB(bbdir)
if cmd == "clean" and not args:
names = sorted(bb.get_names())
bb.stop_servers(names)
bb.clean(safepaths)
elif cmd == "list" and not args:
names = sorted(bb.get_names())
if names:
print("%s: %s" % (bbname, " ".join(names)))
elif cmd == "list-sources" and not args:
sources = sf.get_sources(bbname)
if sources:
if detail:
for d in sources:
print("%s:%s" % (bbname, "%(name)s:%(type)s:%(source)s" % d))
else:
print("%s: %s" % (bbname, " ".join([d["name"] for d in sources])))
elif cmd == "restart" and len(args) < 2:
names = args and [args.pop(0)] or sorted(bb.get_names())
bb.stop_servers(names)
bb.start_servers(names)
elif cmd == "resync":
names = sorted(bb.get_names())
bb.stop_servers(names)
sources = sf.get_sources(bb.name)
if sources:
bb.clean(safepaths)
bb.sync(sources)
else:
print("skipping. no source for bb (%s)" % (bb.name,))
elif cmd == "status" and len(args) < 2:
names = args and args.pop(0).split(",") or sorted(bb.get_names())
bb.status_servers(names, detail)
elif cmd == "stop" and len(args) < 2:
names = args and [args.pop(0)] or sorted(bb.get_names())
bb.stop_servers(names)
elif cmd == "sync" and len(args) < 2:
tags = tags and args.pop(0).split(",")
sources = sf.get_sources(bb.name)
if sources:
bb.sync(sources, tags)
else:
print("skipping. no source for bb (%s)" % (bb.name,))
elif cmd == "start" and len(args) < 2:
names = args and [args.pop(0)] or sorted(bb.get_names())
bb.start_servers(names)
else:
stderr.write("error: bad/missing command or arguments\n")
sys.exit(1)
elif cmd in ["install", "remove", "show"]:
# single bbname commands
if cmd in ["show"]:
if not bbbasedir or not os.path.exists(bbbasedir):
stderr.write("error: bb basedir (%s) not found\n" % (bbbasedir,))
sys.exit(1)
if bball:
bbnames = get_bbnames(bbbasedir)
bbname = bbnames[0]
bbdir = get_bbdir(bbbasedir, bbname)
bb = BB(bbdir)
if cmd == "install" and args:
filename = None
newname = None
filename = args.pop(0)
if args:
newname = args.pop(0)
bb.install(filename, newname)
elif cmd == "remove" and len(args) == 1:
name = args.pop(0)
bb.remove(name)
elif cmd == "show" and len(args) == 1:
name = args.pop(0)
bb.show(name)
else:
stderr.write("error: bad/missing command or arguments\n")
sys.exit(1)
elif cmd in ["list-bb"]:
if cmd == "list-bb":
bbnames = get_bbnames(bbbasedir)
if bbnames:
print(" ".join(bbnames))
else:
stderr.write("error: bad/missing command or arguments\n")
sys.exit(1)
else:
stderr.write("error: bad/missing command or arguments\n")
sys.exit(1)
except SystemExit:
raise
except:
if debug:
traceback.print_exc()
stderr.write("error: fail to run command\n")
sys.exit(1)
sys.exit(0)
if __name__ == "__main__":
main(sys.argv[1:])
| 32.082321 | 159 | 0.503512 | 2,613 | 23,773 | 4.534252 | 0.132798 | 0.016205 | 0.009453 | 0.009453 | 0.253967 | 0.191256 | 0.17024 | 0.17024 | 0.158339 | 0.125338 | 0 | 0.003576 | 0.376646 | 23,773 | 740 | 160 | 32.125676 | 0.795938 | 0.066798 | 0 | 0.318739 | 0 | 0.001751 | 0.188616 | 0.013671 | 0 | 0 | 0 | 0 | 0 | 1 | 0.070053 | false | 0.005254 | 0.017513 | 0.001751 | 0.136602 | 0.061296 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
52806429c051fc51f5db96e8df27987cfa7afda6 | 6,100 | py | Python | ramps_controller/ramps_controller.py | Ladvien/ramps_controller | 5fa1410c57bcc9112df16e1781341a9d1315f189 | [
"MIT"
] | 2 | 2021-02-04T12:07:56.000Z | 2022-02-10T14:00:52.000Z | ramps_controller/ramps_controller.py | Ladvien/ramps_controller | 5fa1410c57bcc9112df16e1781341a9d1315f189 | [
"MIT"
] | null | null | null | ramps_controller/ramps_controller.py | Ladvien/ramps_controller | 5fa1410c57bcc9112df16e1781341a9d1315f189 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sat Sep 28 05:39:18 2019
@author: ladvien
"""
from time import sleep, time
"""
MOTOR_NUM:
X = 0
Y = 1
Z = 2
E0 = 3
E1 = 4
PACKET_TYPES
0x01 = motor_write
0x02 = motor_halt
DIRECTION
0x00 = CW
0x01 = CCW
MOTOR MOVE PROTOCOL:
0 1 2 3 4
MOTOR_PACKET = PACKET_TYPE DIR STEPS_1 STEPS_2 MICROSECONDS_BETWEEN
MOTOR_PACKET = 01 00 03 E8 05
MOTOR_PACKET = 0x 01010003E8050A
HALT = 0x0F
PACKAGE = PACKET1 PACKET2 PACKET3 PACKET4 PACKET5
PACKAGE_EXAMPLE = 01 00 03 E8 05 01 00 03 E8 05 01 00 03 E8 05 01 00 03 E8 05 01 00 03 E8 05
0 1 2 0
COMPLETED_PACKET = PACKET_TYPE SUCCESS MOTOR_NUM \n
0x01
PACKET_TYPES:
MOTOR_FINISHED = 0x01
SUCCESS_TYPES:
SUCCESS = 0x06
FAIL = 0x15
Types not motor related, MOTOR_NUM = 0.
01 01 00 FF E8 01 01 02 00 FF E8 01 01 02 00 FF E8 01 01 02 00 FF E8 01 01 02 00 FF E8 01
"""
class RAMPS:
DRIVE_CMD = 0x01
HALT_CMD = 0x0F
DIR_CC = 0x00
DIR_CCW = 0x01
COMPLETED_CMD = 0x07
END_TX = 0x0A
ACKNOWLEDGE = 0x06
NEG_ACKNOWLEDGE = 0x15
SUCCESS = 0x06
FAIL = 0x15
MOTOR_X = 0x01
MOTOR_Y = 0x02
MOTOR_Z = 0x03
MOTOR_E1 = 0x04
MOTOR_E2 = 0x05
def __init__(self, ser, debug = False):
self.ser = ser
self.toggle_debug = debug
self.rx_buffer_size = 256
self.serial_delay = 0.1
def toggle_debug(self):
self.debug = not self.debug
def print_debug(self, message):
if self.toggle_debug:
print(message)
"""
COMMUNICATION
"""
# Prepare for a serial send.
def encode_packet(self, values):
return bytearray(values)
# Prepare a packet the slave will understand
def prepare_motor_packet(self, motor_num, direction, steps, milli_between):
steps_1 = (steps >> 8) & 0xFF
steps_2 = (steps) & 0xFF
return [self.DRIVE_CMD, motor_num, direction, steps_1, steps_2, milli_between, self.END_TX]
def read_available(self, as_ascii = False):
self.print_debug(f'Reading available.')
# 1. Get all available data.
# 2. Unless buffer exceeded.
# 3. Return a list of the data.
incoming_data = []
incoming_data_size = 0
while self.ser.in_waiting > 0:
incoming_data_size += 1
if incoming_data_size > self.rx_buffer_size:
self.print_debug(f'Buffer overflow.')
return list('RX buffer overflow.')
if as_ascii:
incoming_data.append(self.ser.readline().decode('utf-8'))
else:
incoming_data += self.ser.readline()
self.print_debug(f'Completed reading available.')
return incoming_data
def check_for_confirm(self, command_expected):
confirmation = self.read_available()
if len(confirmation) > 0:
if confirmation[0] == command_expected:
return True
else:
return False
"""
RAMPS UTILITY
"""
def reset_ramps(self, print_welcome = False):
self.print_debug(f'Reseting Arduino.')
# Reset the Arduino Mega.
self.ser.setDTR(False)
sleep(0.4)
self.ser.setDTR(True)
sleep(2)
# Get welcome message.
welcome_message = []
while self.ser.in_waiting > 0:
welcome_message.append(self.ser.readline().decode('utf-8') )
self.print_debug(f'Completed reset.')
if print_welcome:
# Print it for the user.
print(''.join(welcome_message))
return
else:
return
"""
MOTOR COMMANDS
"""
def move(self, motor, direction, steps, milli_secs_between_steps):
# 1. Create a list containg RAMPs command.
# 2. Encode it for serial writing.
# 3. Write to serial port.
# 4. Check for ACK or NACK.
# 5. Poll serial for completed command.
packet = self.prepare_motor_packet(motor,
direction,
steps,
milli_secs_between_steps)
packet = self.encode_packet(packet)
self.print_debug(f'Created move packet: {packet}')
self.write_move(packet)
# Don't miss ACK to being in a hurry.
sleep(self.serial_delay)
confirmation = self.read_available()
print(confirmation)
if confirmation[0] == self.ACKNOWLEDGE:
self.print_debug(f'Move command acknowledged.')
if(self.wait_for_complete(120)):
return True
return False
def wait_for_complete(self, timeout):
# 1. Wait for complete or timeout
# 2. Return whether the move was successful.
start_time = time()
while True:
now_time = time()
duration = now_time - start_time
self.print_debug(duration)
if(duration > timeout):
return False
if self.check_for_confirm(self.COMPLETED_CMD):
self.print_debug(f'Move command completed.')
return True
sleep(self.serial_delay)
def write_move(self, packet):
self.ser.write(packet)
self.print_debug(f'Executed move packet: {packet}') | 27.727273 | 113 | 0.521803 | 699 | 6,100 | 4.384835 | 0.271817 | 0.035889 | 0.045677 | 0.044046 | 0.157912 | 0.112235 | 0.080914 | 0.034584 | 0.034584 | 0.034584 | 0 | 0.071212 | 0.406066 | 6,100 | 220 | 114 | 27.727273 | 0.774772 | 0.097213 | 0 | 0.168317 | 0 | 0 | 0.054371 | 0 | 0 | 0 | 0.015936 | 0 | 0 | 1 | 0.108911 | false | 0 | 0.009901 | 0.009901 | 0.39604 | 0.158416 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
5280afe9a79ea1e7a3cb2b978ccc6b2696e14d60 | 2,890 | py | Python | src/attrbench/lib/attribution_writer.py | zoeparman/benchmark | 96331b7fa0db84f5f422b52cae2211b41bbd15ce | [
"MIT"
] | null | null | null | src/attrbench/lib/attribution_writer.py | zoeparman/benchmark | 96331b7fa0db84f5f422b52cae2211b41bbd15ce | [
"MIT"
] | 7 | 2020-03-02T13:03:50.000Z | 2022-03-12T00:16:20.000Z | src/attrbench/lib/attribution_writer.py | zoeparman/benchmark | 96331b7fa0db84f5f422b52cae2211b41bbd15ce | [
"MIT"
] | null | null | null | import numpy as np
import torch
from torch.utils.tensorboard import SummaryWriter
from matplotlib import colors
import matplotlib.pyplot as plt
def _scale_images(img_tensor):
return torch.true_divide((img_tensor - img_tensor.min()), (img_tensor.max() - img_tensor.min()))
def _clip_image(img_tensor):
res = img_tensor
res[res < 0.] = 0.
res[res > 1.] = 1.
return res
def _attrshow(attrs):
npattrs = attrs.squeeze() # [batch_size, rows, cols]
if len(npattrs.shape) == 2:
# If the batch had only 1 sample, we need to add back the original batch dim
npattrs = npattrs[np.newaxis, ...]
npattrs = np.concatenate(npattrs, axis=-1)
min_value = min(np.min(npattrs), -.01)
max_value = max(np.max(npattrs), .01)
divnorm = colors.TwoSlopeNorm(vmin=min_value, vcenter=0., vmax=max_value)
fig, ax = plt.subplots()
cs = ax.imshow(npattrs, cmap="bwr", norm=divnorm)
fig.colorbar(cs, orientation="horizontal")
plt.tight_layout()
return fig
class AttributionWriter(SummaryWriter):
def __init__(self, log_dir: str, method_name: str = None, comment='', mean=None, std=None, purge_step=None, max_queue=10,
flush_secs=120, filename_suffix=''):
super().__init__(log_dir=log_dir, comment=comment, purge_step=purge_step, max_queue=max_queue,
flush_secs=flush_secs, filename_suffix=filename_suffix)
self.mean = mean
self.std = std
self.batch_nr = 0
self.method_name = method_name
def increment_batch(self):
self.batch_nr += 1
def _normalize_images(self, img_tensor):
dtype = img_tensor.dtype
mean = torch.as_tensor(self.mean, dtype=dtype, device=img_tensor.device)
std = torch.as_tensor(self.std, dtype=dtype, device=img_tensor.device)
img_tensor = (img_tensor * std.view(1, -1, 1, 1)) + mean.view(1, -1, 1, 1)
return img_tensor
def add_images(self, tag, img_tensor, global_step=None, **kwargs):
if self.mean and self.std:
img_tensor = self._normalize_images(img_tensor)
# scale values to [0,1] for plotting if no std or mean were given
else:
img_tensor = _scale_images(img_tensor)
super().add_images(tag, img_tensor, global_step=global_step, **kwargs)
def add_attribution(self, tag, img_tensor, global_step=None):
if self.method_name is not None:
tag = f"{tag}/{self.method_name}"
# If attributions have 1 channel, use the image method
if img_tensor.shape[-3] > 1:
img_tensor = _scale_images(img_tensor)
super().add_images(tag, img_tensor, global_step=global_step)
# if attributions have one channel, use the figure method
else:
fig = _attrshow(img_tensor)
super().add_figure(tag, fig, global_step=global_step)
| 37.051282 | 125 | 0.658478 | 409 | 2,890 | 4.425428 | 0.308068 | 0.129282 | 0.033149 | 0.039779 | 0.159116 | 0.150276 | 0.116022 | 0.082873 | 0.082873 | 0.082873 | 0 | 0.014401 | 0.231142 | 2,890 | 77 | 126 | 37.532468 | 0.80018 | 0.094118 | 0 | 0.070175 | 0 | 0 | 0.014165 | 0.009188 | 0 | 0 | 0 | 0 | 0 | 1 | 0.140351 | false | 0 | 0.087719 | 0.017544 | 0.315789 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
52818b991911437a0cb363eb9962f52757412448 | 2,605 | py | Python | tests/unit/h_api/model/json_api_test.py | hypothesis/h-api | 9e8b6a46abdae796241c61e41ad02b695446dc00 | [
"BSD-2-Clause"
] | null | null | null | tests/unit/h_api/model/json_api_test.py | hypothesis/h-api | 9e8b6a46abdae796241c61e41ad02b695446dc00 | [
"BSD-2-Clause"
] | 7 | 2020-04-16T12:58:42.000Z | 2021-05-11T08:13:30.000Z | tests/unit/h_api/model/json_api_test.py | hypothesis/h-api | 9e8b6a46abdae796241c61e41ad02b695446dc00 | [
"BSD-2-Clause"
] | 1 | 2020-05-28T16:31:09.000Z | 2020-05-28T16:31:09.000Z | from h_matchers import Any
from h_api.enums import DataType
from h_api.model.json_api import JSONAPIData, JSONAPIError, JSONAPIErrorBody
class TestJSONAPIErrorBody:
def test_create(self):
meta = {"metadata": 1}
body = JSONAPIErrorBody.create(
KeyError("message"),
title="title",
detail="detail",
pointer="_pointer",
status=200,
meta=meta,
)
assert isinstance(body, JSONAPIErrorBody)
assert body.raw == {
"code": "KeyError",
"title": "title",
"detail": "detail",
"meta": meta,
"source": {"pointer": "_pointer"},
"status": "200",
}
assert body.detail == "detail"
def test_degenerate_create(self):
body = JSONAPIErrorBody.create(KeyError("message"))
assert body.raw == {"code": "KeyError", "title": "message"}
assert body.detail is None
class TestJSONAPIError:
def test_create(self):
body = JSONAPIErrorBody.create(KeyError("message"))
error = JSONAPIError.create([body, body])
assert isinstance(error, JSONAPIError)
assert error.raw == {"errors": [body.raw, body.raw]}
class TestJSONAPIData:
def test_create(self):
attributes = {"attrs": 1}
meta = {"some_meta": "value"}
relationships = {"rel_type": {"data": {"type": "foo", "id": "1"}}}
data = JSONAPIData.create(
DataType.GROUP,
"my_id",
attributes=attributes,
meta=meta,
id_reference="my_ref",
relationships=relationships,
)
assert isinstance(data, JSONAPIData)
data_block = {
"type": "group",
"id": "my_id",
"attributes": attributes,
"meta": meta,
"relationships": relationships,
}
assert data.raw == {"data": data_block}
assert data.id == "my_id"
assert data.type == DataType.GROUP
assert data.meta == {"some_meta": "value", "$anchor": "my_ref"}
assert data.attributes == attributes
assert data.relationships == relationships
assert data.id_reference == "my_ref"
def test_degenerate_create(self):
data = JSONAPIData.create("group")
assert data.raw == {"data": {"type": "group"}}
def test_create_with_id_reference_but_no_meta(self):
data = JSONAPIData.create("group", id_reference="ref")
assert data.raw == {"data": Any.dict.containing({"meta": {"$anchor": "ref"}})}
| 28.626374 | 86 | 0.564299 | 255 | 2,605 | 5.631373 | 0.247059 | 0.062674 | 0.036212 | 0.035515 | 0.255571 | 0.157382 | 0.071031 | 0 | 0 | 0 | 0 | 0.004937 | 0.300192 | 2,605 | 90 | 87 | 28.944444 | 0.782776 | 0 | 0 | 0.161765 | 0 | 0 | 0.127831 | 0 | 0 | 0 | 0 | 0 | 0.25 | 1 | 0.088235 | false | 0 | 0.044118 | 0 | 0.176471 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
5282d91fc480e47f39c5a644636c726b415df9ff | 2,152 | py | Python | ChemicalReactions/Solver/PreparationUser.py | temmy222/ReactionModeling | d397b4cdc77c1415369298cc75a49be3798048c1 | [
"Unlicense"
] | null | null | null | ChemicalReactions/Solver/PreparationUser.py | temmy222/ReactionModeling | d397b4cdc77c1415369298cc75a49be3798048c1 | [
"Unlicense"
] | null | null | null | ChemicalReactions/Solver/PreparationUser.py | temmy222/ReactionModeling | d397b4cdc77c1415369298cc75a49be3798048c1 | [
"Unlicense"
] | null | null | null | import os
from Parameters.aqueous import Aqueous
from Preparation.WaterInputUser import WaterInputUser
class PreparationUser(object):
"""
This class provides methods that prepares the solver for its calculations
"""
def __init__(self, dest, database, water_species=None, gas_species=None, minerals=None):
"""
An instance of this class takes in two parameters;
file --> the name of the file
dest ---> where the file is located
"""
self.dest = dest
os.chdir(dest)
self.database = database
self.water_species = water_species
self.gas_species = gas_species
self.minerals = minerals
self.aqueous_species = Aqueous(self.dest, self.database)
self.water_input = WaterInputUser(self.dest, self.database, self.water_species)
def getAllAqueousComplexesInWater(self):
all_aqueous = self.aqueous_species.getAllAqueousComplexes()
all_reactants = self.aqueous_species.getAllReactants()
water_species_inside = list(map(lambda x: x.lower(), self.water_species))
if self.water_input.compareWaterIsABasisSpecie() is True:
output = []
for i in range(0, len(all_reactants)):
temp = list(map(lambda x: x.lower()[1:-1], all_reactants[i]))
if all(x in water_species_inside for x in temp):
output.append(all_aqueous[i])
else:
raise ValueError("Provided specie list are not all present in database. Please recheck")
return output[:-1]
def getUnknowns(self):
complexes = self.getAllAqueousComplexesInWater()
reactants = []
for complex in complexes:
reactants.append(self.aqueous_species.getReactants(complex))
reactants = [item for sublist in reactants for item in sublist]
reactants = [i[1:-1] for i in reactants]
reactants.extend(complexes)
reactants = list(set(reactants))
reactants.remove('H2O')
return reactants
def massBalance(self, comp):
lhs = self.aqueous_species.getLeftSide(comp)
return lhs
| 37.103448 | 100 | 0.652416 | 249 | 2,152 | 5.526104 | 0.361446 | 0.061047 | 0.065407 | 0.034884 | 0.071221 | 0.071221 | 0 | 0 | 0 | 0 | 0 | 0.0044 | 0.260688 | 2,152 | 57 | 101 | 37.754386 | 0.860465 | 0.088755 | 0 | 0 | 0 | 0 | 0.03727 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.1 | false | 0 | 0.075 | 0 | 0.275 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
52833f3ae75ec5d573eb6aadd218de8f1fd55c77 | 2,583 | py | Python | statuspageio/configuration.py | alinbalutoiu/python-statuspageio | c7e0a043417700facd64960c43ee0e903720f57b | [
"MIT"
] | null | null | null | statuspageio/configuration.py | alinbalutoiu/python-statuspageio | c7e0a043417700facd64960c43ee0e903720f57b | [
"MIT"
] | null | null | null | statuspageio/configuration.py | alinbalutoiu/python-statuspageio | c7e0a043417700facd64960c43ee0e903720f57b | [
"MIT"
] | null | null | null | from statuspageio.version import VERSION
from statuspageio.errors import ConfigurationError
import warnings
class Configuration(object):
def __init__(self, **options):
"""
:param str api_key: Personal access token.
:param str page_id: The page_id you wish to manage
:param str organization_id: (optional) The organization id, used for managing user accounts
:param bool verbose: (optional) Verbose/debug mode. Default: ``False``.
:param int timeout: (optional) Connection and response timeout. Default: **30** seconds.
:param bool verify_ssl: (optional) Whether to verify ssl or not. Default: ``True``.
"""
self.api_key = options.get('api_key')
self.page_id = options['page_id']
self.organization_id = options['organization_id'] if 'organization_id' in options else False
self.base_url = 'https://api.statuspage.io'
self.user_agent = 'StatusPage/v1 Python/{0}'.format(VERSION)
self.verbose = options['verbose'] if 'verbose' in options else False
self.timeout = options['timeout'] if 'timeout' in options else 30
self.verify_ssl = options['verify_ssl'] if 'verify_ssl' in options else True
if self.verbose:
print("StatusPage client configuration: " + str(self.__dict__))
def validate(self):
"""Validates whether a configuration is valid.
:rtype: bool
:raises ConfigurationError: if no ``api_key`` provided.
:raises ConfigurationError: if no ``page_id`` provided.
:warns 'No organization_id provided.' if no ``organization_id`` provided
"""
if self.api_key is None:
raise ConfigurationError('No api_key provided. '
'Set your access token during client initialization using: '
'"statuspageio.Client(api_key= <YOUR_PERSONAL_api_key>)"')
if not self.page_id:
raise ConfigurationError('No page_id provided.'
'Set your page id during client initialization using: '
'"statuspageiocrm.Client(page_id= <YOUR_PERSONAL_page_id>)"')
if not self.organization_id:
warnings.warn('No organization_id provided.'
'You will be unable to manage users. Set your organization_id during client initialization using: '
'"statuspageiocrm.Client(organization_id= <YOUR_PERSONAL_page_id>)"')
return True | 44.534483 | 125 | 0.626016 | 294 | 2,583 | 5.336735 | 0.323129 | 0.042065 | 0.033142 | 0.045889 | 0.155513 | 0.068834 | 0.068834 | 0 | 0 | 0 | 0 | 0.003243 | 0.283779 | 2,583 | 58 | 126 | 44.534483 | 0.844865 | 0.260937 | 0 | 0 | 0 | 0 | 0.35 | 0.097778 | 0 | 0 | 0 | 0 | 0 | 1 | 0.068966 | false | 0 | 0.103448 | 0 | 0.241379 | 0.034483 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
52867c6f31589aef796fca1adcef07cf20ac6646 | 1,981 | py | Python | tests/operators/callable_test.py | sgissinger/grappa | 51157a828d5cfdc731cada9b16255eaaf1cabbe6 | [
"MIT"
] | 137 | 2017-03-28T10:19:07.000Z | 2022-01-30T19:21:32.000Z | tests/operators/callable_test.py | sgissinger/grappa | 51157a828d5cfdc731cada9b16255eaaf1cabbe6 | [
"MIT"
] | 47 | 2017-03-19T23:08:48.000Z | 2021-01-25T15:18:10.000Z | tests/operators/callable_test.py | grappa-project/grappa | f1861e1572e68f031977e86a5d9eba1957bd164e | [
"MIT"
] | 17 | 2017-03-28T10:39:13.000Z | 2021-07-23T20:50:15.000Z | import pytest
from grappa.operators.callable import CallableOperator
def test_should_callable(should):
test_should_callable | should.be.callable
(lambda x: x) | should.be.callable
CallableOperator | should.be.callable
CallableOperator.match | should.be.callable
with pytest.raises(AssertionError):
tuple() | should.be.callable
with pytest.raises(AssertionError):
0 | should.be.callable
def test_expect_callable(expect):
test_expect_callable | expect.to.be.callable
(lambda x: x) | expect.to.be.callable
CallableOperator | expect.to.be.callable
CallableOperator.match | expect.to.be.callable
with pytest.raises(AssertionError):
tuple() | expect.to.be.callable
with pytest.raises(AssertionError):
0 | expect.to.be.callable
def test_callable_operator(ctx):
assert CallableOperator(ctx).match(lambda x: x) == (True, [])
assert CallableOperator(ctx).match(CallableOperator) == (True, [])
assert CallableOperator(ctx).match(CallableOperator.match) == (True, [])
assert CallableOperator(ctx).match(0) == (False, [])
assert CallableOperator(ctx).match('foo') == (False, [])
assert CallableOperator(ctx).match(iter([1, 2, 3])) == (False, [])
assert CallableOperator(ctx).match(None) == (
False, ['a callable value cannot be "None"'])
def test_callable_operator_properties(should):
(CallableOperator
| should.have.property('kind')
> should.be.equal.to('accessor'))
(CallableOperator
| should.have.property('operators')
> should.have.length.of(1)
> should.be.equal.to(('callable',)))
CallableOperator | should.have.property('aliases') > should.be.empty
CallableOperator | should.have.property('expected_message')
CallableOperator | should.have.property('subject_message')
(CallableOperator | should.have.property('information')
> should.be.a('tuple')
> should.have.length.of(2))
| 32.47541 | 76 | 0.686522 | 222 | 1,981 | 6.058559 | 0.220721 | 0.089219 | 0.130112 | 0.156134 | 0.464684 | 0.223048 | 0.148699 | 0.071375 | 0 | 0 | 0 | 0.004905 | 0.176678 | 1,981 | 60 | 77 | 33.016667 | 0.819742 | 0 | 0 | 0.139535 | 0 | 0 | 0.060071 | 0 | 0 | 0 | 0 | 0 | 0.255814 | 1 | 0.093023 | false | 0 | 0.046512 | 0 | 0.139535 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
5286b04633fffbe3ecb4fc6a0f8a1d3e895353f9 | 1,471 | py | Python | master/scripts/planner/solvers/hyperparameter_optimization/lambda_test.py | OPU-Surveillance-System/monitoring | 2c2c657c74fce9a5938d986372f9077708617d9c | [
"MIT"
] | 4 | 2020-12-24T11:51:28.000Z | 2022-02-08T09:02:38.000Z | master/scripts/planner/solvers/hyperparameter_optimization/lambda_test.py | OPU-Surveillance-System/monitoring | 2c2c657c74fce9a5938d986372f9077708617d9c | [
"MIT"
] | 1 | 2021-11-16T02:54:35.000Z | 2021-11-16T02:54:35.000Z | master/scripts/planner/solvers/hyperparameter_optimization/lambda_test.py | OPU-Surveillance-System/monitoring | 2c2c657c74fce9a5938d986372f9077708617d9c | [
"MIT"
] | null | null | null | from tqdm import tqdm
import pickle
from sys import path
path.append("..")
import numpy as np
import math
from uncertainty_solver import UncertaintySimulatedAnnealingSolver, UncertaintyRandomSolver
import map_converter as m
solutions = {}
state = [(113, 128), (4, 112), (132, 105), (108, 64), (62, 42), (4, 140), (22, 150), (45, 4), (83, 150), (86, 15), (37, 152), (49, 140), (97, 128), (93, 79), (133, 10), (85, 39), (63, 151), (180, 79), (120, 86), (94, 102), (14, 81), (201, 123), (60, 112), (185, 144), (33, 133), (117, 40), (26, 124), (196, 70)]
time = np.linspace(1 * 60, 180 * 60, num=30)
nb_drone = 1
fs = open("../../webserver/data/serialization/mapper.pickle", "rb")
mapper = pickle.load(fs)
fs.close()
for t in tqdm(time):
mean = []
lam = math.log(1 - 0.99) / t
for i in range(20):
rplan = UncertaintyRandomSolver(state, mapper, nb_drone, lam)
rplan.solve()
saplan = UncertaintySimulatedAnnealingSolver(rplan.state, mapper, nb_drone, lam)
saplan.copy_strategy = "slice"
saplan.steps = 200000
saplan.Tmax = 1197
saplan.Tmin = 0.01
saplan.updates = 0
itinerary, energy = saplan.solve()
mean.append(energy / 10000)
mean = sum(mean) / len(mean)
f = open("memo", "a")
f.write(str(t) + " " + str(mean) + "\n")
f.close()
solutions[t] = mean
fs = open("../../webserver/data/serialization/lambda_test.pickle", "wb")
pickle.dump(solutions, fs)
fs.close()
| 35.878049 | 311 | 0.60775 | 209 | 1,471 | 4.244019 | 0.583732 | 0.023675 | 0.033822 | 0.042841 | 0.119504 | 0 | 0 | 0 | 0 | 0 | 0 | 0.146404 | 0.205982 | 1,471 | 40 | 312 | 36.775 | 0.613014 | 0 | 0 | 0.054054 | 0 | 0 | 0.081577 | 0.068661 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.189189 | 0 | 0.189189 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
528958b2ecbd0bfe1de858e2a122190bfb1a9eef | 1,211 | py | Python | web3engine/dtfactory.py | marc4gov/tokenspice2 | 1993383674f35b20e11e54606b3dac8e4c05c0f9 | [
"Apache-2.0"
] | 1 | 2021-01-12T08:06:21.000Z | 2021-01-12T08:06:21.000Z | web3engine/dtfactory.py | marc4gov/tokenspice2 | 1993383674f35b20e11e54606b3dac8e4c05c0f9 | [
"Apache-2.0"
] | null | null | null | web3engine/dtfactory.py | marc4gov/tokenspice2 | 1993383674f35b20e11e54606b3dac8e4c05c0f9 | [
"Apache-2.0"
] | null | null | null | from enforce_typing import enforce_types # type: ignore[import]
import warnings
from web3tools import web3util, web3wallet
@enforce_types
class DTFactory:
def __init__(self):
name = self.__class__.__name__
abi = web3util.abi(name)
web3 = web3util.get_web3()
contract_address = web3util.contractAddress(name)
self.contract = web3.eth.contract(contract_address, abi=abi)
@property
def address(self):
return self.contract.address
#============================================================
#reflect DTFactory Solidity methods
def createToken(self, blob:str, name:str, symbol:str, cap_base:int,
from_wallet: web3wallet.Web3Wallet) -> str:
f = self.contract.functions.createToken(blob, name, symbol, cap_base)
(tx_hash, tx_receipt) = web3wallet.buildAndSendTx(f, from_wallet)
warnings.filterwarnings("ignore") #ignore unwarranted warning up next
rich_logs = getattr(self.contract.events, 'TokenCreated')().processReceipt(tx_receipt)
token_address = rich_logs[0]['args']['newTokenAddress']
warnings.resetwarnings()
return token_address
| 37.84375 | 94 | 0.646573 | 127 | 1,211 | 5.937008 | 0.464567 | 0.06366 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.013684 | 0.215524 | 1,211 | 31 | 95 | 39.064516 | 0.78 | 0.122213 | 0 | 0 | 0 | 0 | 0.034972 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.130435 | false | 0 | 0.130435 | 0.043478 | 0.391304 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
5289888b6dd630121c268a32e16bb4d524f85a6e | 797 | py | Python | management/doc.py | jonathanverner/brython-jinja2 | cec6e16de1750203a858d0acf590f230fc3bf848 | [
"BSD-3-Clause"
] | 2 | 2020-09-13T17:51:55.000Z | 2020-11-25T18:47:12.000Z | management/doc.py | jonathanverner/brython-jinja2 | cec6e16de1750203a858d0acf590f230fc3bf848 | [
"BSD-3-Clause"
] | 2 | 2020-11-25T19:18:15.000Z | 2021-06-01T21:48:12.000Z | management/doc.py | jonathanverner/brython-jinja2 | cec6e16de1750203a858d0acf590f230fc3bf848 | [
"BSD-3-Clause"
] | null | null | null | from http import server
import os
from plumbum import local, ProcessExecutionError
import sys
from webbrowser import open_new_tab
from .utils import M
sphinx = local['sphinx-build']
sphinx_args = ["-d", "_build/doctrees"]
apidoc = local['sphinx-apidoc']
@M.command()
def build(format="html"):
if format == "latex":
sphinx_args.extend(["-D", "latex_paper_size=a4"])
apidoc("-o", './doc/en/api/', './src/')
with local.cwd('./doc/en'):
sphinx(".", "_build", "-b", format, *sphinx_args, stdout=sys.stdout, stderr=sys.stderr)
@M.command()
def view(port=7364):
with local.cwd('./doc/en/_build/'):
open_new_tab("http://localhost:{}/".format(port))
server.test(HandlerClass=server.SimpleHTTPRequestHandler, ServerClass=server.HTTPServer, port=port)
| 26.566667 | 107 | 0.673777 | 105 | 797 | 5 | 0.47619 | 0.057143 | 0.038095 | 0.057143 | 0.064762 | 0 | 0 | 0 | 0 | 0 | 0 | 0.007353 | 0.146801 | 797 | 29 | 108 | 27.482759 | 0.764706 | 0 | 0 | 0.095238 | 0 | 0 | 0.183417 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.095238 | false | 0 | 0.285714 | 0 | 0.380952 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
528a7b613c435f3e7a339f9843dad77a07147873 | 533 | py | Python | scripts/build_matrix.py | TuftsBCB/python-rwr | 557f02dcd3dc1bbeb4859df5b88fe3542094dfd7 | [
"MIT"
] | 30 | 2017-02-13T02:02:27.000Z | 2021-06-28T07:51:59.000Z | scripts/build_matrix.py | TuftsBCB/python-rwr | 557f02dcd3dc1bbeb4859df5b88fe3542094dfd7 | [
"MIT"
] | 3 | 2017-06-19T09:25:39.000Z | 2018-09-22T17:54:42.000Z | scripts/build_matrix.py | TuftsBCB/python-rwr | 557f02dcd3dc1bbeb4859df5b88fe3542094dfd7 | [
"MIT"
] | 16 | 2017-04-07T14:27:16.000Z | 2020-04-02T08:22:25.000Z | import sys
import numpy as np
def main(argv):
file_prefix = argv[1]
num_files = int(argv[2])
output_filename = argv[3]
matrix = []
for idx in range(num_files):
filename = '{}.{}.rwr'.format(file_prefix, idx)
try:
fp = open(filename, 'r')
except IOError:
sys.exit('Could not open file: {}'.format(filename))
matrix.append(np.loadtxt(filename))
np.savetxt(output_filename, np.array(matrix), fmt='%.10f')
if __name__ == '__main__':
main(sys.argv)
| 23.173913 | 64 | 0.592871 | 70 | 533 | 4.314286 | 0.6 | 0.066225 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.012658 | 0.258912 | 533 | 22 | 65 | 24.227273 | 0.751899 | 0 | 0 | 0 | 0 | 0 | 0.086304 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.058824 | false | 0 | 0.117647 | 0 | 0.176471 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
528a815d4af0dd59025526397d7dc223c4e303da | 1,097 | py | Python | home_work/lesson7/WorldSpot.py | JayIvhen/StudyRepoPython2014 | 96affa9b3d5cb342d9c9ef8582610c9d7a0e7f5d | [
"Unlicense"
] | 1 | 2018-10-11T09:48:30.000Z | 2018-10-11T09:48:30.000Z | home_work/lesson7/WorldSpot.py | JayIvhen/StudyRepoPython2014 | 96affa9b3d5cb342d9c9ef8582610c9d7a0e7f5d | [
"Unlicense"
] | null | null | null | home_work/lesson7/WorldSpot.py | JayIvhen/StudyRepoPython2014 | 96affa9b3d5cb342d9c9ef8582610c9d7a0e7f5d | [
"Unlicense"
] | null | null | null | from __future__ import print_function
# vim: set fileencoding= UTF-8
#!usr/bin/python
"""Word Spot
ENG: Enter string. Output. All world in order they came in, if any word have appired more then once, print index in (paranthesise)
input: qwe sdf tyu qwe sdf try sdf qwe sdf rty sdf wer sdf wer
output:qwe(7) sdf(12) tyu try rty wer(13)
lecture 7 task 3.
http://uneex.ru/LecturesCMC/PythonIntro2014/07_LanguageExtensions
"""
__author__ = "JayIvhen"
from collections import OrderedDict
words_dict = OrderedDict()
words = raw_input().strip(" ").split(" ")
# add words in dict as keys and if it already added add its index. Otherwise add 0
for index in xrange(len(words)):
if words_dict.has_key(words[index]):
words_dict[words[index]] = index
else:
words_dict[words[index]] = 0
# Print words in order they came in. if value = 0 print just word, otherwise print word(index, when last seen in text)
for index in words_dict:
if words_dict[index]:
print("{}({})".format(index, words_dict[index]), end=' ')
else:
print(index, end=' ')
print()
| 25.511628 | 130 | 0.69918 | 172 | 1,097 | 4.348837 | 0.505814 | 0.084225 | 0.029412 | 0.040107 | 0.050802 | 0.050802 | 0 | 0 | 0 | 0 | 0 | 0.019166 | 0.191431 | 1,097 | 42 | 131 | 26.119048 | 0.824126 | 0.21969 | 0 | 0.125 | 0 | 0 | 0.035225 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.125 | 0 | 0.125 | 0.25 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
528feda6f44e18e7b731c9fd4e6d5025c5865a14 | 3,346 | py | Python | simfection/simulation_run.py | gvermillion/simfection | 94882690b3ee6f99df692d0ae77bfb9bf207c14b | [
"MIT"
] | 1 | 2020-06-14T06:32:53.000Z | 2020-06-14T06:32:53.000Z | simfection/simulation_run.py | simfection/simfection | 94882690b3ee6f99df692d0ae77bfb9bf207c14b | [
"MIT"
] | 17 | 2020-06-14T06:46:48.000Z | 2020-07-05T18:41:15.000Z | simfection/simulation_run.py | gvermillion/simfection | 94882690b3ee6f99df692d0ae77bfb9bf207c14b | [
"MIT"
] | null | null | null | from .population_engine import PopulationEngine
from .simulation_day import SimulationDay
from .settings import SimfectionSettings
from .logger import SimfectionLogger
from .path import SimfectionPath
from .arguments import _get_parser, simfection_args
import pickle
import time
import os
simfection_logger = SimfectionLogger(name=__name__)
logger = simfection_logger.get_logger()
class SimulationRun():
def __init__(self, settings: dict = None) -> None:
logger.info('+ Initializing Simfection Run.')
# Set settings
self.settings = SimfectionSettings(settings)
self.path = SimfectionPath(base_path=self.settings.get_setting('base_path'))
logger.info('+ Building directory structure at {}.'.format(self.path.base()))
self.path.build_directory_structure()
# Needed to avoid numexpr.utils from writing to log
if 'NUMEXPR_NUM_THREADS' not in os.environ.keys():
logger.debug('+ Setting NUMEXP_NUM_THREADS to 4.')
os.environ["NUMEXPR_NUM_THREADS"] = "4"
if self.settings.get_setting('previous_run') is None:
self.population = PopulationEngine(self.settings)
self.population.synthesize_population()
self.days = None
self.run_id = 'simfection_{}'.format(int(time.time()))
else: # Restarting
logger.info('+ Restarting from previous run.')
with open(self.settings.get_setting('previous_run'), 'rb') as _file:
previous_run = pickle.load(_file)
self.days = previous_run.days
self.run_id = previous_run.run_id
del previous_run
logger.info('- Loading population.')
self.population = self.days[-1].population
def run(self):
num_days = self.settings.get_setting('num_days')
population = self.population
if self.days is None:
self.days = []
logger.info('+ Running {} days.'.format(num_days))
for today in range(num_days):
# Get day number
if len(self.days) == 0:
day_number = today
else:
day_number = self.days[-1].day_number + 1
if today == 0:
day = SimulationDay(
self.run_id,
population=population,
day_number=day_number,
settings=self.settings
)
else:
yesterday = self.days[-1]
day = SimulationDay(
self.run_id,
population=yesterday.population,
day_number=day_number,
settings=self.settings
)
day.run()
self.days.append(day)
self.path.save_day(day)
logger.info('- All days ran successfully.')
logger.info('+ Saving run.')
self.path.save_run(self)
self.path.move_log()
def main():
parser = _get_parser(simfection_args)
args = parser.parse_args()
settings = {
arg: getattr(args, arg)
for arg in vars(args)
if getattr(args, arg) is not None
}
if settings != {}:
sim = SimulationRun(settings)
else:
sim = SimulationRun()
sim.run()
if __name__ == '__main__':
main()
| 33.46 | 85 | 0.588165 | 364 | 3,346 | 5.211538 | 0.266484 | 0.056932 | 0.031629 | 0.046389 | 0.122298 | 0.122298 | 0.050606 | 0.050606 | 0 | 0 | 0 | 0.003484 | 0.313808 | 3,346 | 99 | 86 | 33.79798 | 0.822735 | 0.0263 | 0 | 0.144578 | 0 | 0 | 0.096834 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.036145 | false | 0 | 0.108434 | 0 | 0.156627 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
52906eb370787de84f31e6bc46ae36bf4a924390 | 272 | py | Python | conf/config.py | ElevenPaths/FARO | 4d5585a1f08ce74baff3acf92668646dc9919439 | [
"MIT"
] | 8 | 2020-04-17T11:35:14.000Z | 2022-01-13T05:07:37.000Z | conf/config.py | ElevenPaths/FARO | 4d5585a1f08ce74baff3acf92668646dc9919439 | [
"MIT"
] | 1 | 2020-08-03T15:38:14.000Z | 2020-08-03T15:38:14.000Z | conf/config.py | ElevenPaths/FARO | 4d5585a1f08ce74baff3acf92668646dc9919439 | [
"MIT"
] | 1 | 2020-09-28T02:50:34.000Z | 2020-09-28T02:50:34.000Z | # Logger
import logging
import os
LOG_FILE_NAME = 'faro-community.log'
LOG_LEVEL = os.getenv('FARO_LOG_LEVEL', "INFO")
logging.basicConfig(
level=LOG_LEVEL,
format="%(levelname)s: %(name)20s: %(message)s",
handlers=[logging.StreamHandler()]
) | 22.666667 | 56 | 0.669118 | 34 | 272 | 5.176471 | 0.588235 | 0.136364 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.008969 | 0.180147 | 272 | 12 | 57 | 22.666667 | 0.780269 | 0.022059 | 0 | 0 | 0 | 0 | 0.279245 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.222222 | 0 | 0.222222 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
5295ed2ef205147d8a45e718ca500ece9f69523d | 1,694 | py | Python | src/tidypy/tools/manifest.py | douardda/tidypy | 9d4c6470af8e0ca85209333a99787290f36498d4 | [
"MIT"
] | null | null | null | src/tidypy/tools/manifest.py | douardda/tidypy | 9d4c6470af8e0ca85209333a99787290f36498d4 | [
"MIT"
] | null | null | null | src/tidypy/tools/manifest.py | douardda/tidypy | 9d4c6470af8e0ca85209333a99787290f36498d4 | [
"MIT"
] | null | null | null |
import os
from functools import partial
import check_manifest
from .base import Tool, Issue
IGNORE_MSGS = (
'lists of files in version control and sdist match',
)
class CheckManifestIssue(Issue):
tool = 'manifest'
pylint_type = 'W'
class CheckManifestTool(Tool):
"""
Uses the check-manifest script to detect discrepancies or problems with
your project's MANIFEST.in file.
"""
@classmethod
def get_default_config(cls):
config = Tool.get_default_config()
config['filters'] = [
r'setup\.py$',
]
return config
@classmethod
def get_all_codes(cls):
return [
('info', 'info'),
('warning', 'warning'),
('error', 'error'),
]
def execute(self, finder):
issues = []
def capture(code, message):
if message in IGNORE_MSGS:
return
issues.append(CheckManifestIssue(
code,
message,
os.path.join(dirname, 'MANIFEST.in'),
))
check_manifest.info = partial(capture, 'info')
check_manifest.warning = partial(capture, 'warning')
check_manifest.error = partial(capture, 'error')
for filepath in finder.files(self.config['filters']):
dirname, _ = os.path.split(filepath)
try:
check_manifest.check_manifest(dirname)
except check_manifest.Failure as exc:
issues.append(CheckManifestIssue(
'error',
'Unexpected error: %s' % (exc,),
filepath,
))
return issues
| 23.527778 | 75 | 0.548406 | 164 | 1,694 | 5.560976 | 0.469512 | 0.114035 | 0.037281 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.35183 | 1,694 | 71 | 76 | 23.859155 | 0.830601 | 0.061393 | 0 | 0.122449 | 0 | 0 | 0.105867 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.081633 | false | 0 | 0.081633 | 0.020408 | 0.326531 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
5295fca6e06ce3dbf6409491d1dc7f9ee987cea2 | 619 | py | Python | Easy/decrypt_string_from_alphabet_to_integer_mapping.py | BrynjarGeir/LeetCode | dbd57e645c5398dec538b6466215b61491c8d1d9 | [
"MIT"
] | null | null | null | Easy/decrypt_string_from_alphabet_to_integer_mapping.py | BrynjarGeir/LeetCode | dbd57e645c5398dec538b6466215b61491c8d1d9 | [
"MIT"
] | null | null | null | Easy/decrypt_string_from_alphabet_to_integer_mapping.py | BrynjarGeir/LeetCode | dbd57e645c5398dec538b6466215b61491c8d1d9 | [
"MIT"
] | null | null | null | from string import ascii_lowercase
class Solution:
def freqAlphabets(self, s: str) -> str:
keys = [str(i) for i in range(1,10)] + [str(i)+'#' for i in range(10,27)]
values = ascii_lowercase
mapping = dict(zip(keys, values))
ans = []
if len(s) == 1: return mapping[s]
if len(s) == 2: return mapping[s[0]] + mapping[s[1]]
while s:
if len(s) > 2 and s[:3] in mapping:
ans.append(mapping[s[:3]])
s = s[3:]
else:
ans.append(mapping[s[0]])
s = s[1:]
return ''.join(ans) | 36.411765 | 81 | 0.481422 | 88 | 619 | 3.363636 | 0.409091 | 0.135135 | 0.060811 | 0.054054 | 0.155405 | 0.101351 | 0 | 0 | 0 | 0 | 0 | 0.043147 | 0.36349 | 619 | 17 | 82 | 36.411765 | 0.708122 | 0 | 0 | 0 | 0 | 0 | 0.001613 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.058824 | false | 0 | 0.058824 | 0 | 0.235294 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
529993ca855fa5daf954ef152e365a2748d1400f | 1,229 | py | Python | interactive/interactive_autoscroll.py | daliasen/LED-Cube | 3959ee5caf86c1497ac22231d87a8009bed5b3e8 | [
"BSD-3-Clause"
] | 4 | 2018-08-19T09:16:40.000Z | 2020-01-27T13:18:19.000Z | interactive/interactive_autoscroll.py | daliasen/LED-Cube | 3959ee5caf86c1497ac22231d87a8009bed5b3e8 | [
"BSD-3-Clause"
] | null | null | null | interactive/interactive_autoscroll.py | daliasen/LED-Cube | 3959ee5caf86c1497ac22231d87a8009bed5b3e8 | [
"BSD-3-Clause"
] | 3 | 2018-08-09T13:30:29.000Z | 2020-01-26T16:19:23.000Z | from .interactive import *
from visuals.cube import *
from display import *
import random
def parse_colour(input):
parts = input.split(',')
if len(parts) != 3:
return None
return Colour((int(parts[0]), int(parts[1]), int(parts[2])))
def parse_input(input):
lines = input.split('|')
if len(lines) != SIZE:
return None
grid = [[Colour.BLACK for i in range(SIZE)] for j in range(SIZE)]
for x, line in enumerate(lines):
pixels = line.split(';')
if len(pixels) != SIZE:
return None
for y, pixel in enumerate(pixels):
colour = parse_colour(pixel)
if colour is None:
return None
grid[x][y] = colour
return grid
class Autoscroll(Interactive):
def run(self):
self.clear_input()
c = Cube()
while True:
yield wait_for_input(value = c.copy())
input = self.get_input()
if input is not None:
grid = parse_input(input)
if grid is not None:
scroll_back(c, Direction.BACK, new_layer = grid)
def scroll_back(cube, direction, new_layer = Colour.BLACK):
for i in range(cube.size - 1):
cube.fill_layer(direction, i, cube.get_layer(direction, i + 1))
cube.fill_layer(opposite_direction(direction), 0, new_layer)
| 27.311111 | 67 | 0.646867 | 181 | 1,229 | 4.298343 | 0.325967 | 0.051414 | 0.03856 | 0.03856 | 0.056555 | 0.056555 | 0 | 0 | 0 | 0 | 0 | 0.007368 | 0.227014 | 1,229 | 44 | 68 | 27.931818 | 0.811579 | 0 | 0 | 0.102564 | 0 | 0 | 0.002441 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.102564 | false | 0 | 0.102564 | 0 | 0.384615 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
bfdb4e7226187fdddac3ab5cddc12e80efe262e1 | 13,585 | py | Python | Reform/def_ply_parser.py | kornai/4lang | a22f2e4b525f83145165f16da0f2012373d1593a | [
"MIT"
] | 20 | 2016-03-01T07:34:17.000Z | 2021-09-06T11:08:11.000Z | Reform/def_ply_parser.py | kornai/4lang | a22f2e4b525f83145165f16da0f2012373d1593a | [
"MIT"
] | 103 | 2015-02-03T13:34:55.000Z | 2020-07-13T11:21:22.000Z | Reform/def_ply_parser.py | kornai/4lang | a22f2e4b525f83145165f16da0f2012373d1593a | [
"MIT"
] | 14 | 2015-02-03T09:00:17.000Z | 2021-12-15T11:26:30.000Z | # Copyright © 2021 Adam Kovacs <adaam.ko@gmail.com>
# Distributed under terms of the MIT license.
from ply import lex
from ply.lex import TOKEN
import ply.yacc as yacc
import sys
import getopt
import argparse
import os
import re
BINARIES = []
with open(os.path.join(os.path.dirname(os.path.realpath(__file__)), "binaries"), 'r', encoding="utf-8") as f:
for line in f:
BINARIES.append(line.strip())
BINARIES.sort(key=lambda x: len(x), reverse=True)
class FourlangLexer():
def __init__(self):
self.lexer = lex.lex(module=self)
tokens = (
'CLAUSE',
'RELATION',
'PUNCT',
'SQUAREBR',
'SQUAREBL',
'ROUNDBR',
'ROUNDBL',
'EQUAL',
'CURLYBR',
'CURLYBL',
'ANGLEBR',
'ANGLEBL',
)
t_ignore = ' \t'
# r'(\b(?!FOLLOW|AT|INTO|HAS|ABOUT)\b[a-zA-Z]+)|(^[a-zA-Z]+\/[0-9]+)|(^@[a-zA-Z]+)|(^"[a-zA-Z]+"$)|(^/=[A-Z]+)'
t_PUNCT = r','
t_SQUAREBR = r'\]'
t_SQUAREBL = r'\['
t_ROUNDBR = r'\)'
t_ROUNDBL = r'\('
t_CURLYBR = r'\}'
t_CURLYBL = r'\{'
t_ANGLEBR = r'\>'
t_ANGLEBL = r'\<'
@TOKEN(fr'(({"|".join(BINARIES)})\/[0-9]+)|({"|".join(BINARIES)})')
def t_RELATION(self, t):
return t
@TOKEN(r'([a-zA-Z-]+[_]*\/[0-9]+)|(@[a-zA-Z-]+[_]*)|([a-zA-Z-]+[_]*)')
def t_CLAUSE(self, t):
return t
@TOKEN(r'(=pat|=agt)')
def t_EQUAL(self, t):
return t
def t_newline(self, t):
r'\n+'
t.lexer.lineno += len(t.value)
def t_error(self, t):
print("Invalid Token:", t.value[0])
raise TypeError("Invalid token %r" % (t.value[0],))
t.lexer.skip(1)
class FourlangParser():
def __init__(self, lexer):
self.parser = yacc.yacc(module=self, debug=True, write_tables=True)
self.lexer = lexer
def parse(self, elements):
return self.parser.parse(elements)
tokens = FourlangLexer.tokens
precedence = (
('left', 'ANGLEBL'),
('left', 'ANGLEBR'),
('left', 'CURLYBL'),
('left', 'CURLYBR'),
('left', 'EQUAL'),
('left', 'ROUNDBL'),
('left', 'ROUNDBR'),
('left', 'SQUAREBL'),
('left', 'SQUAREBR'),
('left', 'PUNCT'),
('left', 'CLAUSE'),
('left', 'RELATION'),
)
def p_start(self, p):
'''start : expr rec'''
print("p_start")
def p_rec(self, p):
'''rec : PUNCT expr rec
|'''
print("p_rec")
def p_clause(self, p):
'''expr : CLAUSE'''
print(p[0])
print(p[1])
print("p_clause")
def p_relation(self, p):
'''expr : RELATION'''
print(p[0])
print(p[1])
print("p_relation")
def p_clause_angle(self, p):
'''expr : ANGLEBL start ANGLEBR'''
print(p[0])
print(p[1])
print("p_clause_angle")
def p_expr_curly(self, p):
'''expr : CURLYBL start CURLYBR'''
print(p[0])
print(p[1])
print("p_expr_curly")
def p_equal(self, p):
'expr : EQUAL'
print(p[0])
print(p[1])
print("p_equal")
def p_relation_clause(self, p):
'expr : RELATION start'
print(p[0])
print(p[1])
print("p_relation_clause")
def p_relation_clause_binary(self, p):
'expr : start RELATION start'
print(p[0])
print(p[1])
print("p_relation_clause_binary")
def p_clause_relation(self, p):
'expr : start RELATION'
print(p[0])
print(p[1])
print("p_clause_relation")
def p_square(self, p):
'''expr : expr SQUAREBL start SQUAREBR
| EQUAL SQUAREBL start SQUAREBR
| RELATION SQUAREBL start SQUAREBR
| CLAUSE SQUAREBL start SQUAREBR'''
print(p[0])
print(p[1])
print("p_square")
def p_round(self, p):
'''expr : expr ROUNDBL start ROUNDBR
| EQUAL ROUNDBL start ROUNDBR
| RELATION ROUNDBL start ROUNDBR
| CLAUSE ROUNDBL start ROUNDBR'''
print(p[0])
print(p[1])
print("p_round")
def p_error(self, p):
raise TypeError("unknown text at %r" % (p,))
defs_to_parse = {}
def_states = {}
defs = {}
def get_tokens(line, mode="4lang"):
l = line.strip().split("\t")
if mode == "4lang":
definition = l[7]
else:
definition = l[1]
tokens = []
definition = re.sub("@", "", definition)
definition = re.sub('"', "", definition)
definition = re.sub(",", " ", definition)
definition = re.sub("\{", " ", definition)
definition = re.sub("\}", " ", definition)
definition = re.sub("\(", " ", definition)
definition = re.sub("\)", " ", definition)
definition = re.sub("\[", " ", definition)
definition = re.sub("\]", " ", definition)
definition = re.sub("[0-9]*", "", definition)
definition = re.sub("/", "", definition)
words = definition.split()
for wo in words:
wo = wo.strip()
if not ">" in wo and not "<" in wo:
tokens.append(wo)
defin = " ".join(tokens)
substituted_line = l[0] + "\t" + defin + "\n"
return substituted_line
def get_top_level_clauses(line, mode="4lang"):
l = line.strip().split("\t")
if mode == "4lang":
definition = l[7]
def_phrases = re.split(
''',(?=(?:[^\[\]{}<>]|\[[^\]]*\]|{[^}]*}|<[^>]*>|\([^\)]*\))*$)''', definition)
filtered_definition = []
for phrase in def_phrases:
yield phrase.strip()
else:
definition = l[1]
def_phrases = re.split(
''',(?=(?:[^\[\]{}<>]|\[[^\]]*\]|{[^}]*}|<[^>]*>|\([^\)]*\))*$)''', definition)
for phrase in def_phrases:
yield phrase.strip()
def substitute_root(line, mode="4lang"):
global BINARIES
l = line.strip().split("\t")
if mode == "4lang":
definition = l[7]
else:
definition = l[1]
def_phrases = re.split(
''',(?=(?:[^\[\]{}<>]|\[[^\]]*\]|{[^}]*}|<[^>]*>|\([^\)]*\))*$)''', definition)
for i, phrase in enumerate(def_phrases):
tokens = re.split(
'''\s(?=(?:[^\[\]{}<>"]|\[[^\]]*\]|{[^}]*}|<[^>]*>|\([^\)]*\))*$)''', phrase.strip())
new_tokens = None
if len(tokens) == 1:
if tokens[0].startswith("<"):
default_tokens = tokens[0].strip("<>")
default_tokens_split = re.split(
'''\s(?=(?:[^\[\]{}<>"]|\[[^\]]*\]|{[^}]*}|<[^>]*>|\([^\)]*\))*$)''', default_tokens.strip())
if len(default_tokens_split) == 2:
if default_tokens_split[0] in BINARIES:
new_tokens = "<%s %s %s>" % (
l[0], default_tokens_split[0], default_tokens_split[1])
elif default_tokens_split[1] in BINARIES:
new_tokens = "<%s %s %s>" % (
default_tokens_split[0], default_tokens_split[1], l[0])
else:
new_tokens = "%s ISA %s" % (l[0], tokens[0])
else:
new_tokens = "%s ISA %s" % (l[0], tokens[0])
elif len(tokens) == 2:
if tokens[0] in BINARIES:
new_tokens = "%s %s %s" % (l[0], tokens[0], tokens[1])
elif tokens[1] in BINARIES:
new_tokens = "%s %s %s" % (tokens[0], tokens[1], l[0])
else:
new_tokens = " ".join(tokens)
if new_tokens:
def_phrases[i] = new_tokens
defin = ", ".join(def_phrases)
if mode == "4lang":
substituted_line = "\t".join(l[:7]) + "\t" + defin + "\n"
else:
substituted_line = l[0] + "\t" + defin + "\n"
return substituted_line
def filter_line(line, clause, mode="4lang"):
l = line.strip().split("\t")
if mode == "4lang":
definition = l[7]
def_phrases = re.split(
''',(?=(?:[^\[\]{}<>]|\[[^\]]*\]|{[^}]*}|<[^>]*>|\([^\)]*\))*$)''', definition)
found = False
filtered_definition = []
for phrase in def_phrases:
if clause in phrase:
filtered_definition.append(phrase.strip())
found = True
if found:
filtered_line = "\t".join(
l[:7]) + "\t" + ", ".join(filtered_definition)
return filtered_line.strip("\n") + "\n"
else:
return line
else:
definition = l[1]
def_phrases = re.split(
''',(?=(?:[^\[\]{}<>]|\[[^\]]*\]|{[^}]*}|<[^>]*>|\([^\)]*\))*$)''', definition)
found = False
filtered_definition = []
for phrase in def_phrases:
if clause in phrase:
filtered_definition.append(phrase.strip())
found = True
if found:
filtered_line = l[0] + "\t" + ", ".join(filtered_definition)
return filtered_line.strip("\n") + "\n"
else:
return line
def readfile(filename, mode="4lang"):
with open(filename, encoding='utf-8') as f:
for i, line in enumerate(f):
line = re.sub('"[^"]+"', "", line)
if mode == "4lang":
l = line.strip().split("\t")
if l[4] in defs:
print(l[4])
defs[l[4]] = line
if len(l) >= 8:
if "%" in l[7]:
l[7] = l[7].split("%")[0].strip()
defs_to_parse[l[4]] = (l[0], l[7])
def_states[l[4]] = None
else:
def_states[l[4]
] = 'err bad columns (maybe spaces instead of TABS?)'
else:
l = line.strip().split("\t")
defs[i] = line
if len(l) >= 2:
if "%" in l[1]:
l[1] = l[1].split("%")[0].strip()
defs_to_parse[i] = (l[0], l[1])
def_states[i] = None
else:
def_states[i] = "err bad columns (maybe spaces instead of TABS?)"
def process(outputdir, parser):
for element in defs_to_parse:
d = defs_to_parse[element][1]
if d is not None:
try:
print(f"Parsing: {d}")
res = parser.parse(d)
except TypeError as e:
def_states[element] = "err syntax error " + str(e)
def get_args():
parser = argparse.ArgumentParser(
description="def_ply_parser.py -i <inputfile> -o <outputdir> -f <format> -c <clause>")
parser.add_argument("-i", "--input-file", type=str, required=True)
parser.add_argument("-o", "--output-dir", type=str, required=True)
parser.add_argument("-f", "--format", type=str, default="4lang")
parser.add_argument("-c", "--clause", type=str, default=None)
#parser.add_argument("-b", "--binaries", type=str, required=True)
return parser.parse_args()
def main(argv):
args = get_args()
inputf = args.input_file
outputdir = args.output_dir
mode = args.format
clause = args.clause
#bins = args.binaries
# get_binaries(bins)
lexer = FourlangLexer()
parser = FourlangParser(lexer)
readfile(inputf, mode)
process(outputdir, parser)
errors = []
correct = []
for state in def_states:
if def_states[state] and 'err' in def_states[state]:
errors.append(defs[state].strip() + "\t" +
def_states[state] + "\n")
else:
correct.append(defs[state])
errors.sort()
correct.sort()
if not os.path.exists(outputdir):
os.makedirs(outputdir)
with open(os.path.join(outputdir, "4lang_def_errors"), 'w', encoding="utf-8") as f:
for item in errors:
if not item.startswith("%"):
f.write("%s" % item)
with open(os.path.join(outputdir, "4lang_def_correct"), 'w', encoding="utf-8") as f:
with open(os.path.join(outputdir, "4lang_def_correct_filtered"), "w", encoding="utf-8") as filtered:
with open(os.path.join(outputdir, "4lang_def_correct_substituted"), "w", encoding="utf-8") as substituted:
with open(os.path.join(outputdir, "top_level_clauses"), "w", encoding="utf-8") as top_level:
with open(os.path.join(outputdir, "tokens"), "w", encoding="utf-8") as tokens:
for item in correct:
if not item.startswith("%"):
substituted.write(
"%s" % substitute_root(item, mode))
for top in get_top_level_clauses(item, mode):
top_level.write("%s\n" % top)
if clause:
filtered.write(
"%s" % filter_line(item, clause, mode))
tokens.write("%s" % get_tokens(item, mode))
f.write("%s" % item)
if __name__ == "__main__":
main(sys.argv[1:])
| 32.5 | 119 | 0.470372 | 1,510 | 13,585 | 4.10596 | 0.140397 | 0.030968 | 0.026613 | 0.019355 | 0.402419 | 0.375968 | 0.336452 | 0.323548 | 0.269032 | 0.213387 | 0 | 0.013445 | 0.348473 | 13,585 | 417 | 120 | 32.577938 | 0.686928 | 0.058152 | 0 | 0.303571 | 0 | 0.005952 | 0.106816 | 0.01622 | 0 | 0 | 0 | 0 | 0 | 1 | 0.08631 | false | 0 | 0.02381 | 0.011905 | 0.1875 | 0.104167 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
bfdc2c778df8946033f164e4812cefbea3b77257 | 8,929 | py | Python | noxfile.py | dhermes/bossylobster-blog | 16cf1be002d86e3d26a8bd7e9cc74ba93ef50c41 | [
"Apache-2.0"
] | 1 | 2021-04-09T17:30:52.000Z | 2021-04-09T17:30:52.000Z | noxfile.py | dhermes/bossylobster-blog | 16cf1be002d86e3d26a8bd7e9cc74ba93ef50c41 | [
"Apache-2.0"
] | 40 | 2015-01-07T00:49:33.000Z | 2022-02-07T19:31:32.000Z | noxfile.py | dhermes/bossylobster-blog | 16cf1be002d86e3d26a8bd7e9cc74ba93ef50c41 | [
"Apache-2.0"
] | 2 | 2019-05-10T03:53:39.000Z | 2020-12-03T20:24:33.000Z | # Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import glob
import errno
import os
import shutil
import subprocess
import nox
import psutil
import py.path
nox.options.error_on_external_run = True
DEFAULT_INTERPRETER = "3.7"
PRINT_SEP = "=" * 60
BASE_DIR = os.path.abspath(os.path.dirname(__file__))
INPUT_DIR = os.path.join(BASE_DIR, "content")
OUTPUT_DIR = os.path.join(BASE_DIR, "output")
CONF_FILE = os.path.join(BASE_DIR, "pelicanconf.py")
ALT_CONF_FILE = os.path.join(BASE_DIR, "pelicanconf_with_pagination.py")
DEBUG = "DEBUG" in os.environ
PORT = os.environ.get("PORT")
def get_path(*names):
return os.path.join(BASE_DIR, *names)
def _render(session, env=None):
# I will typically run this via
# PATH="${PATH}:${HOME}/.nodenv/versions/${VERSION}/bin" nox -s render
# because I don't have a ``node`` executable on my default ``${PATH}``.
if py.path.local.sysfind("node") is None:
session.skip("`node` must be installed")
if py.path.local.sysfind("npm") is None:
session.skip("`npm` must be installed")
session.run("npm", "install", external=True)
script = get_path("render_jinja2_templates.py")
session.run("python", script, env=env)
@nox.session(py=DEFAULT_INTERPRETER)
def render(session):
"""Render blog posts from templates.
If the post has already been rendered, this will check the file hash against
a stored mapping of hashes and do nothing if confirmed.
"""
session.install("--requirement", "render-requirements.txt")
_render(session)
@nox.session(py=DEFAULT_INTERPRETER)
def rerender(session):
"""Re-render blog posts from templates."""
session.install("--requirement", "render-requirements.txt")
_render(session, env={"FORCE_RENDER": "true"})
def _generate(
session, pelican_opts, regenerate=False, conf_file=CONF_FILE, env=None
):
args = [os.path.join(session.bin, "pelican")]
if regenerate:
args.append("-r")
args.extend([INPUT_DIR, "-o", OUTPUT_DIR, "-s", conf_file])
args.extend(pelican_opts)
session.run(*args, env=env)
def get_pelican_opts():
pelican_opts = []
if DEBUG:
pelican_opts.append("-D")
return pelican_opts
@nox.session(py=DEFAULT_INTERPRETER)
def html(session):
"""(Re)-generate the web site."""
pelican_opts = get_pelican_opts()
session.install("--requirement", "html-requirements.txt")
# 1. Render
print("Rendering templates...")
print(PRINT_SEP)
_render(session)
print(PRINT_SEP)
# 2. Build HTML with paging.
print("Making first pass with paging")
print(PRINT_SEP)
env = {"PYTHONPATH": get_path()}
_generate(session, pelican_opts, conf_file=ALT_CONF_FILE, env=env)
print(PRINT_SEP)
# 3. Keep around the paged index files and nothing else.
print("Storing paging index*.html files for re-use")
print(" and removing paged output.")
print(PRINT_SEP)
index_files = glob.glob(os.path.join(OUTPUT_DIR, "index*.html"))
for filename in index_files:
session.run(shutil.move, filename, BASE_DIR)
session.run(shutil.rmtree, OUTPUT_DIR, ignore_errors=True)
print(PRINT_SEP)
# 4. Build HTML without paging.
print("Making second pass without paging")
print(PRINT_SEP)
_generate(session, pelican_opts, env=env)
print(PRINT_SEP)
# 5. Add back paging information.
print("Putting back paging index*.html files")
print(PRINT_SEP)
session.run(os.remove, os.path.join(OUTPUT_DIR, "index.html"))
index_files = glob.glob(os.path.join(BASE_DIR, "index*.html"))
for filename in index_files:
session.run(shutil.move, filename, OUTPUT_DIR)
print(PRINT_SEP)
# 6. Delete generated pages that are unused
print("Removing unwanted pages")
print(PRINT_SEP)
session.run(remove_file, os.path.join(OUTPUT_DIR, "authors.html"))
session.run(
shutil.rmtree, os.path.join(OUTPUT_DIR, "author"), ignore_errors=True
)
session.run(remove_file, os.path.join(OUTPUT_DIR, "categories.html"))
session.run(
shutil.rmtree, os.path.join(OUTPUT_DIR, "category"), ignore_errors=True
)
session.run(remove_file, os.path.join(OUTPUT_DIR, "tags.html"))
print(PRINT_SEP)
# 7. Rewrite URL paths for the pagination feature.
print("Rewriting paths for paging index*.html files.")
print(PRINT_SEP)
script = get_path("rewrite_custom_pagination.py")
session.run("python", script)
print(PRINT_SEP)
def remove_file(filename):
try:
os.remove(filename)
except OSError as exc:
# errno.ENOENT = no such file or directory
if exc.errno != errno.ENOENT:
raise
@nox.session(py=DEFAULT_INTERPRETER)
def regenerate(session):
"""Regenerate files upon modification.
This runs a daemon that waits on file changes and updates generated
content when files are updated.
"""
pelican_opts = get_pelican_opts()
session.install("--requirement", "html-requirements.txt")
env = {"PYTHONPATH": get_path()}
_generate(session, pelican_opts, regenerate=True, env=env)
@nox.session(py=DEFAULT_INTERPRETER)
def serve(session):
""""Serve site at http://localhost:${PORT}'."""
script = get_path("pelican_server.py")
session.cd(OUTPUT_DIR)
if PORT is None:
session.run("python", script)
else:
session.run("python", script, PORT)
@nox.session(py=DEFAULT_INTERPRETER)
def serve_local(session):
"""Serve at http://192.168.XX.YY:8001."""
script = get_path("get_local_ip.py")
local_ip = session.run("python", script, silent=True)
script = get_path("pelican_server.py")
session.cd(OUTPUT_DIR)
# ``root`` doesn't know about our virtualenv.
py_exe = os.path.join(session.bin, "python")
session.run(py_exe, script, "8001", local_ip.strip())
@nox.session(py=DEFAULT_INTERPRETER)
def dev_server(session):
"""Start / restart ``develop_server.sh``.
Uses ``${PORT}`` environment variable.
"""
script = get_path("develop_server.sh")
if PORT is None:
session.run(script, "restart")
else:
session.run(script, "restart", PORT)
def get_pelican_pid():
try:
with open(get_path("pelican.pid"), "r") as fh:
return int(fh.read())
except (OSError, ValueError):
return None
def get_srv_pid():
try:
with open(get_path("srv.pid"), "r") as fh:
return int(fh.read())
except (OSError, ValueError):
return None
@nox.session(py=False)
def stop_server(session):
"""Stop local server."""
pelican_pid = session.run(get_pelican_pid)
srv_pid = session.run(get_srv_pid)
if pelican_pid is None:
if srv_pid is None:
session.error("`pelican.pid` and `srv.pid` files invalid")
else:
session.error("`pelican.pid` file invalid")
if srv_pid is None:
session.error("srv.pid` file invalid")
pelican_proc = psutil.Process(pelican_pid)
srv_proc = psutil.Process(srv_pid)
session.run(pelican_proc.kill)
session.run(srv_proc.kill)
@nox.session(py=DEFAULT_INTERPRETER)
def update_requirements(session):
if py.path.local.sysfind("git") is None:
session.skip("`git` must be installed")
# Install all dependencies.
session.install("pip-tools")
# Update all of the requirements file(s).
names = ("render", "html")
for name in names:
in_name = "{}-requirements.in".format(name)
txt_name = "{}-requirements.txt".format(name)
session.run("rm", "-f", txt_name, external=True)
session.run(
"pip-compile",
"--generate-hashes",
"--output-file",
txt_name,
in_name,
)
session.run("git", "add", txt_name, external=True)
@nox.session(python=DEFAULT_INTERPRETER)
def blacken(session):
session.install("black")
file_list_str = subprocess.check_output(["git", "ls-files", "*.py"])
file_list = file_list_str.decode("ascii").strip().split("\n")
session.run("black", "--line-length=79", *file_list)
@nox.session(py=False)
def clean(session):
"""Remove the generated files."""
dir_paths = (
OUTPUT_DIR,
get_path("__pycache__"),
get_path("node_modules"),
get_path("pelican-plugins", "__pycache__"),
)
for dir_path in dir_paths:
session.run(shutil.rmtree, dir_path, ignore_errors=True)
| 30.896194 | 80 | 0.671184 | 1,223 | 8,929 | 4.752249 | 0.246934 | 0.048176 | 0.025809 | 0.026153 | 0.340847 | 0.276841 | 0.226772 | 0.183586 | 0.121645 | 0.114935 | 0 | 0.004457 | 0.195991 | 8,929 | 288 | 81 | 31.003472 | 0.805126 | 0.186359 | 0 | 0.312821 | 0 | 0 | 0.164944 | 0.024022 | 0 | 0 | 0 | 0 | 0 | 1 | 0.092308 | false | 0.010256 | 0.041026 | 0.005128 | 0.164103 | 0.112821 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
bfdc46f47b5b04f85e478f88351e0d8e0d1e6e37 | 3,928 | py | Python | license/webhook_handler.py | KieranSweeden/fol.io | a6f231e3f9fb96841387b04d72131470c5fc3239 | [
"OLDAP-2.5",
"OLDAP-2.4",
"OLDAP-2.3"
] | null | null | null | license/webhook_handler.py | KieranSweeden/fol.io | a6f231e3f9fb96841387b04d72131470c5fc3239 | [
"OLDAP-2.5",
"OLDAP-2.4",
"OLDAP-2.3"
] | null | null | null | license/webhook_handler.py | KieranSweeden/fol.io | a6f231e3f9fb96841387b04d72131470c5fc3239 | [
"OLDAP-2.5",
"OLDAP-2.4",
"OLDAP-2.3"
] | null | null | null | """
Contains the webhook handler class
containing the handlers required
to process payments from stripe
"""
from django.conf import settings
from django.http import HttpResponse
from django.shortcuts import get_object_or_404
from django.contrib.auth.models import User
from django.core.mail import send_mail
from django.template.loader import render_to_string
from account.models import UserAccount
from .models import LicensePurchase
import stripe
stripe.api_key = settings.STRIPE_PRIVATE_KEY
class StripeWebHookHandlers:
"""
Contains methods that handle
incoming stripe webhooks
"""
def handle_event(self, event):
"""
Handles unknown/unexpected incoming
webhook events
"""
return HttpResponse(
content=f'Webhook received: {event["type"]}',
status=200)
def handle_checkout_session_completed(self, event):
"""Handles successful stripe checkout sessions"""
session = event['data']['object']
no_of_licenses_purchased = (
stripe.checkout.Session.list_line_items(
session['id'])['data'][0]['quantity']
)
customer_details = session['metadata']
user = get_object_or_404(
User,
pk=customer_details.user_id
)
new_license_purchase = LicensePurchase(
user=user,
purchaser_full_name=customer_details['purchaser_full_name'],
purchaser_email=session['customer_email'],
purchaser_phone_number=customer_details['purchaser_phone_number'],
purchaser_street_address1=customer_details[
'purchaser_street_address1'],
purchaser_street_address2=customer_details[
'purchaser_street_address2'],
purchaser_town_or_city=customer_details['purchaser_town_or_city'],
purchaser_postcode=customer_details['purchaser_postcode'],
purchaser_county=customer_details['purchaser_county'],
purchaser_country=customer_details['purchaser_country'],
no_of_licenses_purchased=no_of_licenses_purchased,
purchase_total=session.amount_total,
stripe_pid=session.payment_intent
)
new_license_purchase.purchase_total /= 100
new_license_purchase.save()
email_subject = render_to_string(
'license/includes/email/purchase_confirmation_subject.txt',
{"order_number": new_license_purchase.order_number}
)
email_body = render_to_string(
'license/includes/email/purchase_confirmation_body.txt',
{
"purchase": new_license_purchase,
"contact_email": settings.DEFAULT_FROM_EMAIL
}
)
send_mail(
email_subject,
email_body,
settings.DEFAULT_FROM_EMAIL,
[session['customer_email']]
)
user_account = get_object_or_404(
UserAccount,
pk=user.id
)
user_account.add_licences_to_user_account(
no_of_licenses_purchased
)
if 'save_billing_as_default' in customer_details:
user_account.save_purchase_info_as_default(
customer_details=customer_details
)
response_message = (
f"Webhook received: {event['type']} | "
f"Purchase was successfully made"
)
return HttpResponse(
content=response_message,
status=200
)
def handle_payment_intent_failed(self, event):
"""
Handles a failed stripe checkout session
"""
response_message = (
f"Webhook received: {event['type']} | "
f"Purchase was unsuccessfully made"
)
return HttpResponse(
content=response_message,
status=200
)
| 30.215385 | 78 | 0.633147 | 392 | 3,928 | 6.002551 | 0.326531 | 0.082873 | 0.081598 | 0.035699 | 0.145771 | 0.135147 | 0.135147 | 0.135147 | 0.044199 | 0.044199 | 0 | 0.009359 | 0.29277 | 3,928 | 129 | 79 | 30.449612 | 0.837653 | 0.073574 | 0 | 0.122222 | 0 | 0 | 0.156487 | 0.063608 | 0 | 0 | 0 | 0 | 0 | 1 | 0.033333 | false | 0 | 0.1 | 0 | 0.177778 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
bfdcf9dc5f7878296c06ad1958748bbe7307eba4 | 3,432 | py | Python | src/fhir_types/FHIR_ExampleScenario_Operation.py | anthem-ai/fhir-types | 42348655fb3a9b3f131b911d6bc0782da8c14ce4 | [
"Apache-2.0"
] | 2 | 2022-02-03T00:51:30.000Z | 2022-02-03T18:42:43.000Z | src/fhir_types/FHIR_ExampleScenario_Operation.py | anthem-ai/fhir-types | 42348655fb3a9b3f131b911d6bc0782da8c14ce4 | [
"Apache-2.0"
] | null | null | null | src/fhir_types/FHIR_ExampleScenario_Operation.py | anthem-ai/fhir-types | 42348655fb3a9b3f131b911d6bc0782da8c14ce4 | [
"Apache-2.0"
] | null | null | null | from typing import Any, List, Literal, TypedDict
from .FHIR_boolean import FHIR_boolean
from .FHIR_Element import FHIR_Element
from .FHIR_ExampleScenario_ContainedInstance import (
FHIR_ExampleScenario_ContainedInstance,
)
from .FHIR_markdown import FHIR_markdown
from .FHIR_string import FHIR_string
# Example of workflow instance.
FHIR_ExampleScenario_Operation = TypedDict(
"FHIR_ExampleScenario_Operation",
{
# Unique id for the element within a resource (for internal references). This may be any string value that does not contain spaces.
"id": FHIR_string,
# May be used to represent additional information that is not part of the basic definition of the element. To make the use of extensions safe and manageable, there is a strict set of governance applied to the definition and use of extensions. Though any implementer can define an extension, there is a set of requirements that SHALL be met as part of the definition of the extension.
"extension": List[Any],
# May be used to represent additional information that is not part of the basic definition of the element and that modifies the understanding of the element in which it is contained and/or the understanding of the containing element's descendants. Usually modifier elements provide negation or qualification. To make the use of extensions safe and manageable, there is a strict set of governance applied to the definition and use of extensions. Though any implementer can define an extension, there is a set of requirements that SHALL be met as part of the definition of the extension. Applications processing a resource are required to check for modifier extensions.Modifier extensions SHALL NOT change the meaning of any elements on Resource or DomainResource (including cannot change the meaning of modifierExtension itself).
"modifierExtension": List[Any],
# The sequential number of the interaction, e.g. 1.2.5.
"number": FHIR_string,
# Extensions for number
"_number": FHIR_Element,
# The type of operation - CRUD.
"type": FHIR_string,
# Extensions for type
"_type": FHIR_Element,
# The human-friendly name of the interaction.
"name": FHIR_string,
# Extensions for name
"_name": FHIR_Element,
# Who starts the transaction.
"initiator": FHIR_string,
# Extensions for initiator
"_initiator": FHIR_Element,
# Who receives the transaction.
"receiver": FHIR_string,
# Extensions for receiver
"_receiver": FHIR_Element,
# A comment to be inserted in the diagram.
"description": FHIR_markdown,
# Extensions for description
"_description": FHIR_Element,
# Whether the initiator is deactivated right after the transaction.
"initiatorActive": FHIR_boolean,
# Extensions for initiatorActive
"_initiatorActive": FHIR_Element,
# Whether the receiver is deactivated right after the transaction.
"receiverActive": FHIR_boolean,
# Extensions for receiverActive
"_receiverActive": FHIR_Element,
# Each resource instance used by the initiator.
"request": FHIR_ExampleScenario_ContainedInstance,
# Each resource instance used by the responder.
"response": FHIR_ExampleScenario_ContainedInstance,
},
total=False,
)
| 57.2 | 836 | 0.725233 | 431 | 3,432 | 5.670534 | 0.306265 | 0.02455 | 0.040917 | 0.047054 | 0.307692 | 0.307692 | 0.253682 | 0.253682 | 0.253682 | 0.253682 | 0 | 0.001128 | 0.224942 | 3,432 | 59 | 837 | 58.169492 | 0.917669 | 0.587995 | 0 | 0 | 0 | 0 | 0.160316 | 0.021567 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.171429 | 0 | 0.171429 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
bfdf1ab9db4b33d736aea7297fe7ed880fc1be28 | 34,694 | py | Python | att_app/views.py | tunir27/django-Attendance | 4075c93bce56f02b06de126349bcc63294e07f0b | [
"MIT"
] | 3 | 2019-07-05T16:03:39.000Z | 2019-11-06T07:20:29.000Z | att_app/views.py | tunir27/django-Attendance | 4075c93bce56f02b06de126349bcc63294e07f0b | [
"MIT"
] | 6 | 2020-06-05T17:53:31.000Z | 2021-09-07T23:50:09.000Z | att_app/views.py | tunir27/django-Attendance | 4075c93bce56f02b06de126349bcc63294e07f0b | [
"MIT"
] | 3 | 2018-04-30T15:09:04.000Z | 2018-12-15T12:45:14.000Z | from django.shortcuts import render, render_to_response, redirect
from django.contrib.auth.decorators import login_required
from django.shortcuts import render_to_response
from django.template import RequestContext
from django.contrib.auth import authenticate, login, logout
from django.http import HttpResponseRedirect, HttpResponse,JsonResponse
from .forms import RegistrationForm, FilterAttendance, VerifyForm
from django.contrib import messages
from rest_framework.views import APIView
from rest_framework.response import Response
from .models import Student_Details, Student_Attendance, Token,Teacher_Details
from .serializers import StudentDetailsSerializer, TokenSerializer, StudentAttendanceSerializer,TeacherDetailsSerializer
from rest_framework import status
from django.contrib.auth import get_user_model
import datetime
import requests
from django.utils.html import escape
from datetime import date
from io import BytesIO
from .pdf_utils import PdfPrint
from django.db.models import Q
import itertools
import functools
from django.views.decorators.csrf import csrf_exempt
from django.utils.decorators import method_decorator
from django.contrib.auth.tokens import default_token_generator
from django.utils.encoding import force_bytes
from django.utils.http import urlsafe_base64_encode, urlsafe_base64_decode
from django.template import loader
from django.core.validators import validate_email
from django.core.exceptions import ValidationError
from django.core.mail import send_mail
from Project.settings import DEFAULT_FROM_EMAIL,FCM_SERVER_API
from django.views.generic import *
from .forms import PasswordResetRequestForm,SetPasswordForm,ContactUsForm
from pyfcm import FCMNotification
@login_required(login_url='/login/')
def successful_login(request):
now = datetime.datetime.now()
day=now.weekday()
ntime = now.strftime("%H")
ndate=now.strftime("%d/%m/%y")
if not day==6:
if int(ntime) >= 7:
d=Student_Attendance.objects.filter(date=ndate)
f=Student_Details.objects.filter(~Q(st_id__in=d.values_list('st_id',flat=True))).order_by('st_id')
for data in f:
r=requests.post('https://attendanceproject.herokuapp.com/home/apia/',data={'st_id':data.st_id,'date':ndate,'status':'0'})
#r=requests.post('http://127.0.0.1:8000/home/apia/',data={'st_id':data.st_id,'date':ndate,'status':'0'})
print(r.content)
if int(ntime)>= 15:
d=Student_Attendance.objects.filter(date=ndate,status="1").order_by('st_id')
for data in d:
if not data.out_time:
r=requests.post('https://attendanceproject.herokuapp.com/home/apia/',data={'st_id':data.st_id,'date':ndate,'status':'0','out_time':'--'})
#r=requests.post('http://127.0.0.1:8000/home/apia/',data={'st_id':data.st_id,'date':ndate,'status':'0'})
print(r.content)
if not data.in_time:
r=requests.post('https://attendanceproject.herokuapp.com/home/apia/',data={'st_id':data.st_id,'date':ndate,'status':'0','in_time':'--'})
#r=requests.post('http://127.0.0.1:8000/home/apia/',data={'st_id':data.st_id,'date':ndate,'status':'0'})
print(r.content)
form = VerifyForm(request.POST or None)
http_data = request.POST.get('data')
print(http_data)
if http_data:
http_status, http_sid, http_vdate = http_data.split(",")
else:
http_sid = None
http_status = None
http_vdate = None
if http_sid and http_status and http_vdate:
now = datetime.datetime.now()
ntime=now.strftime("%H:%M:%S")
if http_status=="1":
r = requests.post('https://attendanceproject.herokuapp.com/home/apia/',
data={'st_id': http_sid, 'date': http_vdate,'in_time':ntime, 'status': http_status,'notif_s':"2"})
## r = requests.post('http://127.0.0.1:8000/home/apia/',
## data={'st_id': http_sid, 'date': http_vdate,'in_time':ntime, 'status': http_status,'notif_s':"2"})
if http_status=="0":
r = requests.post('https://attendanceproject.herokuapp.com/home/apia/',data={'st_id': http_sid, 'date': http_vdate,'in_time':"", 'status': http_status,'notif_s':"2"})
#r = requests.post('http://127.0.0.1:8000/home/apia/',data={'st_id': http_sid, 'date': http_vdate,'in_time':"", 'status': http_status,'notif_s':"2"})
print(r.content)
form = FilterAttendance(request.POST or None)
http_uid = request.session['username']
user = get_user_model()
uid = user.objects.get(sid=http_uid)
staff_value = uid.is_staff
request.session['staff_value']=staff_value
http_date = request.POST.get('date_id')
http_class = request.POST.get('class_id')
http_sec = request.POST.get('sec_id')
if staff_value:
date_item = Student_Attendance.objects.values('date').distinct()
class_item = Student_Details.objects.values('s_class').distinct()
section_item = Student_Details.objects.values('sec').distinct()
else:
date_item = Student_Attendance.objects.values('date').distinct()
class_item = Student_Details.objects.filter(st_id=user.objects.get(sid=http_uid)).values('s_class')
section_item = Student_Details.objects.filter(st_id=user.objects.get(sid=http_uid)).values('sec')
if http_date and http_class and http_sec:
stu_det = Student_Details.objects.filter(s_class=http_class, sec=http_sec).order_by('st_id')
stu_att = Student_Attendance.objects.filter(date=http_date, st_id__in=stu_det.values_list('st_id', flat=True)).order_by('st_id')
att_count_a = Student_Attendance.objects.filter(date=http_date, st_id__in=stu_det.values_list('st_id', flat=True),status="0").count()
att_count_p = Student_Attendance.objects.filter(date=http_date, st_id__in=stu_det.values_list('st_id', flat=True),status="1").count()
if http_date and http_class and http_sec and staff_value:
stu_count = Student_Details.objects.filter(s_class=http_class, sec=http_sec).count()
stu_det = Student_Details.objects.filter(s_class=http_class, sec=http_sec).order_by('st_id')
stu_att = Student_Attendance.objects.filter(date=http_date, st_id__in=stu_det.values_list('st_id', flat=True)).order_by('st_id')
elif http_date and not http_class and not http_sec and not staff_value:
stu_det=Student_Details.objects.filter(st_id=uid.sid).order_by('st_id')
stu_det_all=Student_Details.objects.filter(s_class=stu_det[0].s_class, sec=stu_det[0].sec).order_by('st_id')
stu_count = Student_Details.objects.filter(s_class=stu_det[0].s_class, sec=stu_det[0].sec).count()
stu_att = Student_Attendance.objects.filter(date=http_date, st_id__in=stu_det.values_list('st_id', flat=True)).order_by('st_id')
att_count_a = Student_Attendance.objects.filter(date=http_date, st_id__in=stu_det_all.values_list('st_id', flat=True),status="0").count()
att_count_p = Student_Attendance.objects.filter(date=http_date, st_id__in=stu_det_all.values_list('st_id', flat=True),status="1").count()
else:
stu_count = 0
stu_att = ''
stu_det = ''
att_count_a = 0
att_count_p = 0
return render(request, 'dashboard.html',
{"counter": functools.partial(next, itertools.count()), 'stu_count': stu_count, 'stu_att': stu_att,
'stu_det': stu_det, 'date_item': date_item, 'class_item': class_item, 'section_item': section_item,
'att_count_a': att_count_a,'att_count_p':att_count_p, 'staff_value': staff_value,'combined':zip(stu_att,stu_det)})
def site_history(request):
class_item = Student_Details.objects.values('s_class').distinct()
section_item = Student_Details.objects.values('sec').distinct()
staff_value=request.session['staff_value']
return render(request,'history.html',{'class_item':class_item,'section_item':section_item,'staff_value':staff_value})
@csrf_exempt
def pdf_test(request):
http_stid=""
print(request.POST)
http_date=request.POST.get('date')
http_class = request.POST.get('class_id')
http_sec = request.POST.get('sec_id')
http_uid = request.POST.get('uid')
print(http_class)
print(http_sec)
print(http_date)
pdf_type=0
if http_uid:
http_stid=http_uid
pdf_type=1
if request.POST.get('stu_id'):
http_stid=request.POST.get('stu_id')
pdf_type=1
else:
try:
request.session['username']
if not http_stid:
pdf_type=1
http_stid=request.session['username']
except:
pass
d,m,y=http_date.split("/")
user = get_user_model()
if http_stid:
uid = user.objects.filter(sid=http_stid)
if http_class and http_sec:
pdf_type=0
details = Student_Details.objects.filter(s_class=http_class, sec=http_sec)
attendance = Student_Attendance.objects.filter(date__contains=('/'+m+'/'), st_id__in=details.values_list('st_id', flat=True))
pie=0
filename = 'pdf_attendance' +" "+ http_class + "-"+ http_sec + " " + m + "," + y
else:
attendance = Student_Attendance.objects.filter(st_id=uid[0],date__contains=('/'+m+'/'))
details = Student_Details.objects.filter(st_id=uid[0])
pie=1
for i in details:
filename = 'pdf_attendance ' +" "+ i.first_name +" "+ i.last_name +" " + m + "," + y
print(attendance)
print(details)
response = HttpResponse(content_type='application/pdf')
today = date.today()
response['Content-Disposition'] = \
'attachement; filename={0}.pdf'.format(filename)
buffer = BytesIO()
report = PdfPrint(buffer, 'A4')
pdf = report.report(attendance, details,http_date,pie, 'Student Attendance data',pdf_type)
response.write(pdf)
return response
class ContactUsView(FormView):
template_name = "contact_form/contact_form.html" #code for template is given below the view's code
success_url = '/home/contact/'
form_class = ContactUsForm
@method_decorator(csrf_exempt)
def dispatch(self, request, *args, **kwargs):
return super(ContactUsView, self).dispatch(request, *args, **kwargs)
def post(self, request, *args, **kwargs):
'''
A normal post request which takes input from field "name" and "subject" (in ContactUsForm).
'''
print(request.POST)
http_name=request.POST.get('c_name')
http_subject=request.POST.get('c_subject')
http_sid=request.POST.get('uid')
if http_name==None and http_subject==None:
form = self.form_class(request.POST)
if form.is_valid():
name= form.cleaned_data["name"]
subject=form.cleaned_data["subject"]
else:
name=""
subject=""
user = get_user_model()
if http_sid:
http_stid=http_sid
else:
http_stid = request.session['username']
uid = user.objects.filter(sid=http_stid)
stu_det=""
t_det=""
if uid[0].is_staff:
t_det=Teacher_Details.objects.filter(t_id=uid[0].sid)
else:
stu_det=Student_Details.objects.filter(st_id=uid[0].sid)
c = {
'email': 'attendrteam@gmail.com',
'name': name,
'content':subject,
'user': 'Not Specified' ,
'u_mail':'Not Specified'
}
if stu_det:
if stu_det[0].email:
if name and subject:
c = {
'email': 'attendrteam@gmail.com',
'name': name,
'content':subject,
'user':uid[0],
'u_mail':stu_det[0].email
}
else:
c = {
'email': 'attendrteam@gmail.com',
'name': http_name,
'content':http_subject,
'user':uid[0],
'u_mail':stu_det[0].email
}
elif t_det:
if t_det[0].email:
if name and subject:
c = {
'email': 'attendrteam@gmail.com',
'name': name,
'content':subject,
'user':uid[0],
'u_mail':t_det[0].email
}
else:
c = {
'email': 'attendrteam@gmail.com',
'name': http_name,
'content':http_subject,
'user':uid[0],
'u_mail':t_det[0].email
}
subject_template_name='contact_form/contact_form_subject.txt'
# copied from django/contrib/admin/templates/registration/password_reset_subject.txt to templates directory
email_template_name='contact_form/contact_form_email.html'
# copied from django/contrib/admin/templates/registration/password_reset_email.html to templates directory
subject = loader.render_to_string(subject_template_name, c)
# Email subject *must not* contain newlines
subject = ''.join(subject.splitlines())
email = loader.render_to_string(email_template_name, c)
if stu_det:
send_mail(subject, email, stu_det[0].email , ['attendrteam@gmail.com'], fail_silently=False)
elif t_det:
send_mail(subject, email, t_det[0].email , ['attendrteam@gmail.com'], fail_silently=False)
if http_name and http_subject:
return JsonResponse({"msg":"An email has been sent to the administration. We will get back to you soon."}, status=status.HTTP_201_CREATED)
else:
result = self.form_valid(form)
messages.success(request, 'An email has been sent to the administration. We will get back to you soon.')
return result
result = self.form_invalid(form)
messages.error(request, 'No email id associated with this user')
return result
class ResetPasswordRequestView(FormView):
template_name = "account/test_template.html"
success_url = '/login/'
form_class = PasswordResetRequestForm
@staticmethod
def validate_email_address(email):
'''
This method here validates the if the input is an email address or not. Its return type is boolean, True if the input is a email address or False if its not.
'''
try:
validate_email(email)
return True
except ValidationError:
return False
def post(self, request, *args, **kwargs):
'''
A normal post request which takes input from field "email_or_username" (in ResetPasswordRequestForm).
'''
form = self.form_class(request.POST)
if form.is_valid():
data= form.cleaned_data["email_or_username"]
if self.validate_email_address(data) is True: #uses the method written above
'''
If the input is an valid email address, then the following code will lookup for users associated with that email address. If found then an email will be sent to the address, else an error message will be printed on the screen.
'''
User = get_user_model()
stu_det=""
t_det=""
try:
stu_det=Student_Details.objects.filter(email=data)
if not stu_det.exists():
t_det=Teacher_Details.objects.filter(email=data)
except Student_Details.DoesNotExist:
t_det=Teacher_Details.objects.filter(email=data)
print("t_det",t_det)
if stu_det:
associated_users=User.objects.filter(sid=stu_det[0])
elif t_det:
associated_users=User.objects.filter(sid=t_det[0])
else:
associated_users=None
if associated_users:
if stu_det.exists():
c = {
'email': stu_det[0].email,
'domain': request.META['HTTP_HOST'],
'site_name': 'Attendr',
'uid': urlsafe_base64_encode(force_bytes(associated_users[0].pk)).decode(),
'user': associated_users[0],
'token': default_token_generator.make_token(associated_users[0]),
'protocol': 'http',
}
elif t_det.exists():
c = {
'email': t_det[0].email,
'domain': request.META['HTTP_HOST'],
'site_name': 'Attendr',
'uid': urlsafe_base64_encode(force_bytes(associated_users[0].pk)).decode(),
'user': associated_users[0],
'token': default_token_generator.make_token(associated_users[0]),
'protocol': 'http',
}
subject_template_name='registration/password_reset_subject.txt'
# copied from django/contrib/admin/templates/registration/password_reset_subject.txt to templates directory
email_template_name='registration/password_reset_email.html'
# copied from django/contrib/admin/templates/registration/password_reset_email.html to templates directory
subject = loader.render_to_string(subject_template_name, c)
# Email subject *must not* contain newlines
subject = ''.join(subject.splitlines())
email = loader.render_to_string(email_template_name, c)
if stu_det:
send_mail(subject, email, DEFAULT_FROM_EMAIL , [stu_det[0].email], fail_silently=False)
elif t_det:
send_mail(subject, email, DEFAULT_FROM_EMAIL , [t_det[0].email], fail_silently=False)
result = self.form_valid(form)
messages.success(request, 'An email has been sent to ' + data +". Please check its inbox to continue reseting password.")
return result
result = self.form_invalid(form)
messages.error(request, 'No user is associated with this email address')
return result
else:
'''
If the input is an username, then the following code will lookup for users associated with that user. If found then an email will be sent to the user's address, else an error message will be printed on the screen.
'''
User = get_user_model()
associated_users= User.objects.filter(sid=data)
if associated_users:
if not associated_users[0].is_staff:
stu_det=Student_Details.objects.filter(st_id=associated_users[0].sid)
t_det=''
else:
t_det=Teacher_Details.objects.filter(t_id=associated_users[0].sid)
stu_det=''
if stu_det:
if not stu_det[0].email:
result = self.form_invalid(form)
messages.error(request, 'This username does does not have an email id.Please contact administrator.')
return result
c = {
'email': stu_det[0].email,
'domain': request.META['HTTP_HOST'], #or your domain
'site_name': 'Attendr',
'uid': urlsafe_base64_encode(force_bytes(associated_users[0].sid)).decode(),
'user': associated_users[0],
'token': default_token_generator.make_token(associated_users[0]),
'protocol': 'http',
}
subject_template_name='registration/password_reset_subject.txt'
email_template_name='registration/password_reset_email.html'
subject = loader.render_to_string(subject_template_name, c)
# Email subject *must not* contain newlines
subject = ''.join(subject.splitlines())
email = loader.render_to_string(email_template_name, c)
send_mail(subject, email, DEFAULT_FROM_EMAIL , [stu_det[0].email], fail_silently=False)
result = self.form_valid(form)
messages.success(request, 'Email has been sent to ' + data +"'s email address. Please check its inbox to continue reseting password.")
return result
elif t_det:
if not t_det[0].email:
result = self.form_invalid(form)
messages.error(request, 'This username does does not have an email id.Please contact administrator.')
return result
c = {
'email': t_det[0].email,
'domain': request.META['HTTP_HOST'], #or your domain
'site_name': 'Attendr',
'uid': urlsafe_base64_encode(force_bytes(associated_users[0].sid)).decode(),
'user': associated_users[0],
'token': default_token_generator.make_token(associated_users[0]),
'protocol': 'http',
}
subject_template_name='registration/password_reset_subject.txt'
email_template_name='registration/password_reset_email.html'
subject = loader.render_to_string(subject_template_name, c)
# Email subject *must not* contain newlines
subject = ''.join(subject.splitlines())
email = loader.render_to_string(email_template_name, c)
send_mail(subject, email, DEFAULT_FROM_EMAIL , [t_det[0].email], fail_silently=False)
result = self.form_valid(form)
messages.success(request, 'Email has been sent to ' + data +"'s email address. Please check its inbox to continue reseting password.")
return result
result = self.form_invalid(form)
messages.error(request, 'This username does not exist in the system.')
return result
messages.error(request, 'Invalid Input')
return self.form_invalid(form)
class PasswordResetConfirmView(FormView):
template_name = "account/test_template.html"
success_url = '/login/'
form_class = SetPasswordForm
def post(self, request, uidb64=None, token=None, *arg, **kwargs):
"""
View that checks the hash in a password reset link and presents a
form for entering a new password.
"""
UserModel = get_user_model()
form = self.form_class(request.POST)
assert uidb64 is not None and token is not None # checked by URLconf
try:
uid = urlsafe_base64_decode(uidb64).decode()
user = UserModel._default_manager.get(sid=uid)
except (TypeError, ValueError, OverflowError, UserModel.DoesNotExist):
user = None
if user is not None and default_token_generator.check_token(user, token):
if form.is_valid():
new_password = form.cleaned_data['new_password2']
user.set_password(new_password)
user.save()
messages.success(request, 'Password has been reset.')
return self.form_valid(form)
else:
messages.error(
request, 'Password reset has not been unsuccessful.')
return self.form_invalid(form)
else:
messages.error(
request, 'The reset password link is no longer valid.')
return self.form_invalid(form)
class ApiDetails(APIView):
def get(self, request, std_id, format=None):
User = get_user_model()
user_value=User.objects.filter(sid=std_id)
if not user_value:
return JsonResponse({"msg":"Student/Teacher not found"}, status=status.HTTP_404_NOT_FOUND)
if user_value[0].is_staff:
http_stdid = Teacher_Details.objects.filter(t_id=std_id)
serializer = TeacherDetailsSerializer(http_stdid, many=True)
else:
http_stdid = Student_Details.objects.filter(st_id=std_id)
serializer = StudentDetailsSerializer(http_stdid, many=True)
return Response(serializer.data)
def post(self, request, format=None):
pstd_id = request.POST.get('st_id')
pt_id= request.POST.get('t_id')
User = get_user_model()
if pstd_id:
user_value=User.objects.filter(sid=pstd_id)
if pt_id:
user_value=User.objects.filter(sid=pt_id)
if user_value.exists():
if user_value[0].is_staff:
temp_tdet=Teacher_Details.objects.get(t_id=user_value[0])
serializer = TeacherDetailsSerializer(temp_tdet,data=request.data)
else:
temp_sdet=Student_Details.objects.get(st_id=user_value[0])
serializer = StudentDetailsSerializer(temp_sdet,data=request.data)
if serializer.is_valid():
serializer.save()
return Response(serializer.data, status=status.HTTP_201_CREATED)
else:
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
else:
return JsonResponse({"msg":"Student/Teacher not found"}, status=status.HTTP_404_NOT_FOUND)
class ApiTeachAttendance(APIView):
def post(self, request,format=None):
h_class=request.POST.get('h_class')
h_sec=request.POST.get('h_sec')
h_date=request.POST.get('h_date')
if not h_date:
stu_det = Student_Details.objects.filter(s_class=h_class, sec=h_sec)
date = Student_Attendance.objects.filter(st_id__in=stu_det.values_list('st_id', flat=True)).values('date').distinct()
return JsonResponse({"date":list(date)})
else:
try:
stu_det = Student_Details.objects.filter(s_class=h_class, sec=h_sec)
stu_att = Student_Attendance.objects.filter(date=h_date, st_id__in=stu_det.values_list('st_id', flat=True))
except Student_Attendance.DoesNotExist:
return JsonResponse({"msg":"Value error"}, status=status.HTTP_404_NOT_FOUND)
serializer = StudentAttendanceSerializer(stu_att.order_by('st_id'), many=True)
serializer_name=StudentDetailsSerializer(stu_det.order_by('st_id'), many=True)
return JsonResponse({"data":serializer.data,"name":serializer_name.data})
class ApiAttendance(APIView):
def get(self, request, std_id,format=None):
try:
http_stdid = Student_Attendance.objects.filter(st_id=std_id)
except Student_Attendance.DoesNotExist:
return JsonResponse({"msg":"Student ID error"}, status=status.HTTP_404_NOT_FOUND)
serializer = StudentAttendanceSerializer(http_stdid, many=True)
return JsonResponse({"data":serializer.data})
def post(self, request, format=None):
pstd_id = request.POST.get('st_id')
p_date = request.POST.get('date')
http_status=request.POST.get('status')
print(request.POST)
if not http_status:
try:
http_stdid = Student_Attendance.objects.filter(st_id=pstd_id,date=p_date)
except Student_Attendance.DoesNotExist:
return Response("Student ID error", status=status.HTTP_404_NOT_FOUND)
serializer = StudentAttendanceSerializer(http_stdid, many=True)
return JsonResponse({"data":serializer.data})
else:
User = get_user_model()
if User.objects.filter(sid=pstd_id).exists():
uid = User.objects.filter(sid=pstd_id)
try:
stu_a = Student_Attendance.objects.get(st_id=uid[0], date=p_date)
if stu_a.in_time and http_status=="1":
new_data=request.data.copy()
new_data['in_time']=stu_a.in_time
else:
new_data=""
except Student_Attendance.DoesNotExist:
stu_a = None
if stu_a:
print("new_data",new_data)
if new_data:
serializer = StudentAttendanceSerializer(stu_a, data=new_data)
else:
serializer = StudentAttendanceSerializer(stu_a, data=request.data)
else:
serializer = StudentAttendanceSerializer(data=request.data)
if serializer.is_valid():
serializer.save()
notif_s= request.POST.get('notif_s')
message_title = "Student Attendance"
registration_id=None
if notif_s == "1":
push_service = FCMNotification(api_key=FCM_SERVER_API)
try:
tokenq=Token.objects.get(uid=uid[0])
registration_id = tokenq.token
except:
registration_id=None
stu_det=Student_Details.objects.get(st_id=uid[0])
stu_a=Student_Attendance.objects.get(st_id=uid[0],date=p_date)
now = datetime.datetime.now()
ntime = now.strftime("%H")
if stu_a.status=="1" and not int(ntime)>=13:
message_body = stu_det.first_name + " has entered the school at " + stu_a.in_time
elif stu_a.status=="0":
message_body = stu_det.first_name + " has left the school at " + stu_a.out_time
else:
message_body = stu_det.first_name + " has left the school at " + stu_a.out_time
elif notif_s == "2":
push_service = FCMNotification(api_key=FCM_SERVER_API)
try:
tokenq=Token.objects.get(uid=uid[0])
registration_id = tokenq.token
except:
registration_id=None
stu_det=Student_Details.objects.get(st_id=uid[0])
stu_a=Student_Attendance.objects.get(st_id=uid[0],date=p_date)
now = datetime.datetime.now()
ntime = now.strftime("%H:%M:%S")
if stu_a.status=="1":
message_body = stu_det.first_name + " has been marked present at " + ntime + " by the authorities."
elif stu_a.status=="0":
message_body = stu_det.first_name + " has been marked absent at " + ntime + " by the authorities."
if registration_id:
result = push_service.notify_single_device(registration_id=registration_id, message_title=message_title, message_body=message_body,sound="Default")
print(result)
return Response(serializer.data, status=status.HTTP_201_CREATED)
else:
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
else:
return JsonResponse({"msg":"Student ID error"}, status=status.HTTP_404_NOT_FOUND)
class ApiLogin(APIView):
def post(self, request, format=None):
u_id = request.POST.get('uid')
password = request.POST.get('password')
token = request.POST.get('token')
if not token:
user = authenticate(sid=u_id, password=password)
if user is None:
return JsonResponse({"msg":"Login error"}, status=status.HTTP_404_NOT_FOUND)
else:
request.session['username']=user.sid
return JsonResponse({"msg":"Login successfull"}, status=status.HTTP_201_CREATED)
try:
http_token = Token.objects.get(uid=u_id)
except Token.DoesNotExist:
http_token = ''
user = authenticate(sid=u_id, password=password)
if user is None:
return JsonResponse({"msg":"Login error"}, status=status.HTTP_404_NOT_FOUND)
else:
if token:
if http_token:
serializer = TokenSerializer(http_token, data=request.data)
else:
serializer = TokenSerializer(data=request.data)
if serializer.is_valid():
serializer.save()
cus_data=serializer.data
cus_data['staff_value']=user.is_staff
return Response(cus_data, status=status.HTTP_201_CREATED)
else:
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
| 50.063492 | 242 | 0.579034 | 4,012 | 34,694 | 4.779163 | 0.092971 | 0.013977 | 0.018254 | 0.023939 | 0.654376 | 0.601909 | 0.562898 | 0.529936 | 0.5121 | 0.477626 | 0 | 0.009318 | 0.319508 | 34,694 | 692 | 243 | 50.135838 | 0.802829 | 0.052401 | 0 | 0.493311 | 0 | 0 | 0.102605 | 0.016547 | 0.003344 | 0 | 0 | 0 | 0.001672 | 1 | 0.023411 | false | 0.038462 | 0.060201 | 0.001672 | 0.177258 | 0.026756 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
bfdf6117fed7579fa4507e741970110a2813606c | 1,042 | py | Python | {{cookiecutter.project_dir}}/{{cookiecutter.project_source_code_dir}}/logging_config.py | CapedHero/big-bang-py | 74aa5588584f61ad83b6a1078e00911c3cb974f5 | [
"MIT"
] | 2 | 2020-04-19T13:08:49.000Z | 2022-02-20T12:55:00.000Z | {{cookiecutter.project_dir}}/{{cookiecutter.project_source_code_dir}}/logging_config.py | CapedHero/big-bang-py | 74aa5588584f61ad83b6a1078e00911c3cb974f5 | [
"MIT"
] | null | null | null | {{cookiecutter.project_dir}}/{{cookiecutter.project_source_code_dir}}/logging_config.py | CapedHero/big-bang-py | 74aa5588584f61ad83b6a1078e00911c3cb974f5 | [
"MIT"
] | 1 | 2020-04-19T13:09:05.000Z | 2020-04-19T13:09:05.000Z | import os
from dirs import PROJECT_ROOT
logs_folder = PROJECT_ROOT / "logs"
os.makedirs(logs_folder, exist_ok=True)
DICT_CONFIG = {
"version": 1,
"formatters": {"standard": {"format": "%(asctime)s %(name)-8s %(levelname)-8s %(message)s"}},
"handlers": {
"stderr": {"class": "logging.StreamHandler", "level": "DEBUG", "formatter": "standard"},
"rotating_file_handler": {
"class": "logging.handlers.RotatingFileHandler",
"level": "DEBUG",
"formatter": "standard",
"filename": logs_folder / "logs.log",
# Set max log file size to 1MB.
"maxBytes": 1024 * 1024,
# At most `backupCount` of backup log files will be kept. If more
# would be created when rollover occurs, the oldest one is deleted.
"backupCount": 2,
},
},
"loggers": {
"main": {
"handlers": ["stderr", "rotating_file_handler"],
"level": "DEBUG",
"propagate": True,
}
},
}
| 31.575758 | 97 | 0.547025 | 105 | 1,042 | 5.32381 | 0.67619 | 0.053667 | 0.053667 | 0.096601 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.017808 | 0.299424 | 1,042 | 32 | 98 | 32.5625 | 0.747945 | 0.152591 | 0 | 0.076923 | 0 | 0 | 0.387941 | 0.112628 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.076923 | 0 | 0.076923 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
bfe107f23ca865470e9f406d7de74546794125de | 22,186 | py | Python | 2pt_data_to_fits/save_and_check_Phase1_iterative_covariance.py | KiDS-WL/Cat_to_Obs_K1000_P1 | 0de7f79cab150416859ffe58ac2d0f5659aedb5d | [
"MIT"
] | 7 | 2020-11-18T12:58:03.000Z | 2021-07-01T08:54:29.000Z | 2pt_data_to_fits/save_and_check_Phase1_iterative_covariance.py | KiDS-WL/Cat_to_Obs_K1000_P1 | 0de7f79cab150416859ffe58ac2d0f5659aedb5d | [
"MIT"
] | null | null | null | 2pt_data_to_fits/save_and_check_Phase1_iterative_covariance.py | KiDS-WL/Cat_to_Obs_K1000_P1 | 0de7f79cab150416859ffe58ac2d0f5659aedb5d | [
"MIT"
] | 3 | 2020-12-09T13:30:22.000Z | 2022-03-02T01:40:13.000Z |
######################################
## save_and_check_Phase1.py ##
## Marika Asgari ##
## Version 2020.04.21 ##
######################################
# This is based on Linc's save_and_check_twopoint.
# It has been adapted to make .fits files for the Phase1 real data
import sys
import numpy as np
import scipy.interpolate as itp
import astropy.io.fits as fits
import os
# set the path to scale_cuts here
sys.path.append("../../kcap/modules/scale_cuts/")
sys.path.append("../Calc_2pt_Stats/")
#import twopoint
import wrapper_twopoint as wtp
import wrapper_twopoint2 as wtp2
###############################################################################
## Main function
def saveFitsTwoPoint(
nbTomoN=2, nbTomoG=5,
N_theta=9, theta_min=0.5, theta_max=300,
N_ell=8, ell_min=100, ell_max=1500,
nbModes=5,
prefix_Flinc='/disk05/calin/91_Data/mockFootprint/',
prefix_CosmoSIS='/disk05/calin/91_Data/KiDS/kcap/Flinc/test_buceros/',
scDict={},
meanTag=None, meanName=None,
covTag=None, covName=None,
nOfZNameList=None, nGalList=None, sigmaEpsList=None,
saveName=None
):
"""
This is a general function to save twopoint file.
Parameters
----------
nbTomoN : int, optional
Number of lens bins
nbTomoG : int, optional
Number of source bins
N_theta : int, optional
Number of theta bins
theta_min : float, optional
Lower limit of theta bins
theta_max : float, optional
Upper limit of theta bins
N_ell : int, optional
Number of ell bins
ell_min : float, optional
Lower limit of ell bins
ell_max : float, optional
Upper limit of ell bins
nbModes : int, optional
Number of COSEBIs modes
prefix_Flinc : string, optional
Prefix of Flinc input directory; only concerned if meanTag = 'Flinc'
prefix_CosmoSIS : string, optional
Prefix of CosmoSIS theory input directory
Only concerned if meanTag = 'CosmoSIS'
scDict : dict, optional
Dictionary containing scale-cut arguments
Same format as in kcap ini files
All dictionary keys & values have to be in lower case
meanTag : {None, 'Flinc', 'CosmoSIS', 'variable', 'file'}, optional
Method of mean input. One of
None
No mean vector
``Flinc``
Calculate Flinc means specified by `prefix_Flinc`
`meanName` is then interpreted as the bird tag to be used
``CosmoSIS``
Read theory outputs specified by `prefix_CosmoSIS`
`meanName` is then ignored
``variable``
Read `meanName` directly as a python object,
supposed to be a list or an array
Should already be ordered
``file``
Read `meanName` as the path to a single column file
If the file does not have '.npy' or '.fits' as extension,
it is interpreted as an ASCII file.
meanName : object, optional
See `meanTag`
covTag : {None, 'Flinc', 'list', 'variable', 'file'}, optional
Method of covariance input. One of
None
No covariance
``Flinc``
Calculate Flinc covariance specified by `prefix_Flinc`
`covName` is then interpreted as the bird tag to be used
``list``
Read theory covariance specified by `covName` as under Benjamin's
list format
If the file has several terms (G, NG, SSC, etc.), it sums up all
terms automatically.
All nan values are automatically replaced with 0
Should not already apply scale cuts in the file
``variable``
Read `covName` directly as a python object,
supposed to be a squared 2D-array
Should already be ordered
``file``
Read `covName` as the path to a file containing a squared matrix
If the file does not have '.npy' or '.fits' as extension,
it is interpreted as an ASCII file.
nOfZNameList : None or string list
List of n(z) file names
One file for each tomographic bin
Has to be ASCII
Should share the same z bins
If None, no n(z) will be saved
nGalList : float list
List of n_gal
sigmaEpsList : float list
List of sigma_eps
The length can be nbTomoG or nbTomoN+nbTomoG
savename : string
Path of the twopoint file to be saved
Returns
-------
Nothing, but output a file
"""
wtp2.saveFitsTwoPoint(
nbTomoN=nbTomoN, nbTomoG=nbTomoG,
N_theta=N_theta, theta_min=theta_min, theta_max=theta_max,
N_ell=N_ell, ell_min=ell_min, ell_max=ell_max,
nbModes=nbModes,
prefix_Flinc=prefix_Flinc,
prefix_CosmoSIS=prefix_CosmoSIS,
scDict=scDict,
meanTag=meanTag, meanName=meanName,
covTag=covTag, covName=covName,
nOfZNameList=nOfZNameList, nGalList=nGalList, sigmaEpsList=sigmaEpsList,
saveName=saveName
)
return
# copied from cosmosis
def load_histogram_form(ext):
# Load the various z columns.
# The cosmosis code is expecting something it can spline
# so we need to give it more points than this - we will
# give it the intermediate z values (which just look like a step
# function)
zlow = ext.data['Z_LOW']
zhigh = ext.data['Z_HIGH']
# First bin.
i = 1
bin_name = 'BIN{0}'.format(i)
nz = []
z = ext.data['Z_MID']
# Load the n(z) columns, bin1, bin2, ...
while bin_name in ext.data.names:
col = ext.data[bin_name]
nz.append(col)
i += 1
bin_name = 'BIN{0}'.format(i)
# First bin.
i = 1
ngal_name = "NGAL_"+str(i)
n_bar= []
while ngal_name in ext.header.keys():
n_b = ext.header[ngal_name]
n_bar.append(n_b)
i += 1
ngal_name = "NGAL_"+str(i)
nbin = len(nz)
print(" Found {0} bins".format(nbin))
nz = np.array(nz)
# z, nz = ensure_starts_at_zero(z, nz)
for col in nz:
norm = np.trapz(col, z)
col /= norm
return z, nz, n_bar
def mkdir_mine(dirName):
try:
# Create target Directory
os.mkdir(dirName)
print("Directory " , dirName , " Created ")
except FileExistsError:
print("Directory " , dirName , " already exists")
##################################################################################
### Making fits files for Phase-1 real data
# Folder and file names for nofZ, for the sources it will depend on the blind
blind = 'C'
cat_version = 'V1.0.0A_ugriZYJHKs_photoz_SG_mask_LF_svn_309c_2Dbins_v2_goldclasses_Flag_SOM_Fid'
name_tag = 'with_m_bias' # with_m_bias # no_m_bias # bmodes
FolderNameInputs = '../data/'
FolderNameCov = '../data/covariance/'
bp_filename = FolderNameInputs+'/kids/bp_K1000_ALL_BLIND_'+blind+'_'+name_tag+'_'+cat_version+'_nbins_8_Ell_100.0_1500.0.asc'
cosebis_filename = FolderNameInputs+'/kids/cosebis_K1000_ALL_BLIND_'+blind+'_'+name_tag+'_'+cat_version+'_nbins_theta_0.5_300.asc'
xipm_filename = FolderNameInputs+'/kids/xipm_K1000_ALL_BLIND_'+blind+'_'+name_tag+'_'+cat_version+'_nbins_9_theta_0.5_300.asc'
xipm_sys_corrected_filename = FolderNameInputs+'/kids/psf_systematic_corrected/xipm_K1000_ALL_BLIND_'+blind+'_'+name_tag+'_'+cat_version+'_nbins_9_theta_0.5_300.asc'
nBins_lens = 2
lens1 = FolderNameInputs+'/boss/nofz/BOSS_and_2dFLenS_n_of_z1_res_0.01_extended.txt'
lens2 = FolderNameInputs+'/boss/nofz/BOSS_and_2dFLenS_n_of_z2_res_0.01_extended.txt'
nBins_source = 5
source1 = FolderNameInputs+'/kids/nofz/SOM_N_of_Z/K1000_NS_V1.0.0A_ugriZYJHKs_photoz_SG_mask_LF_svn_309c_2Dbins_v2_DIRcols_Fid_blind'+blind+'_TOMO1_Nz.asc'
source2 = FolderNameInputs+'/kids/nofz/SOM_N_of_Z/K1000_NS_V1.0.0A_ugriZYJHKs_photoz_SG_mask_LF_svn_309c_2Dbins_v2_DIRcols_Fid_blind'+blind+'_TOMO2_Nz.asc'
source3 = FolderNameInputs+'/kids/nofz/SOM_N_of_Z/K1000_NS_V1.0.0A_ugriZYJHKs_photoz_SG_mask_LF_svn_309c_2Dbins_v2_DIRcols_Fid_blind'+blind+'_TOMO3_Nz.asc'
source4 = FolderNameInputs+'/kids/nofz/SOM_N_of_Z/K1000_NS_V1.0.0A_ugriZYJHKs_photoz_SG_mask_LF_svn_309c_2Dbins_v2_DIRcols_Fid_blind'+blind+'_TOMO4_Nz.asc'
source5 = FolderNameInputs+'/kids/nofz/SOM_N_of_Z/K1000_NS_V1.0.0A_ugriZYJHKs_photoz_SG_mask_LF_svn_309c_2Dbins_v2_DIRcols_Fid_blind'+blind+'_TOMO5_Nz.asc'
# number density of galaxies per arcmin^2
# Lenses:
n_2dflens = np.asarray([0.002890,0.003674])
n_boss = np.asarray([0.014478,0.016597])
area_2dflens = 342.879925
area_boss = 322.255634
area_total = 852.845901656631
nGal_lens_average = n_2dflens*area_2dflens/area_total+n_boss*area_boss/area_total
nGal_lens = [ nGal_lens_average[0],nGal_lens_average[1] ]
# Sources:
# read from file
filename = FolderNameInputs+'/kids/number_density/ngal_blind'+blind+'.ascii'
nGal_source = np.loadtxt(filename)
nGal_all = nGal_lens + (nGal_source).tolist()
# read from file
filename = FolderNameInputs+'/kids/ellipticity_dispersion/sigma_e_blind'+blind+'.ascii'
sigma_e = np.loadtxt(filename)
# Make the data and Cov and redshift file for BP for KiDS1000 Phase-1
def saveFitsBP_list_KIDS1000():
scDict = {
'use_stats': 'PneE PeeE'.lower()
}
nOfZNameList = [ lens1 ,
lens2 ,
source1,
source2,
source3,
source4,
source5 ]
nGalList = nGal_all
sigmaEpsList = sigma_e.tolist()
if(name_tag=='no_m_bias'):
covName = FolderNameCov+'/inputs/iterative_covariance/blind'+blind+'/thps_cov_kids1000_bandpower_E_apod_0_list.dat'
elif(name_tag=='with_m_bias'):
covName = FolderNameCov+'/inputs/iterative_covariance/blind'+blind+'/thps_cov_kids1000_bandpower_E_apod_0_list_with_sigma_m.dat'
elif(name_tag=='bmodes'):
covName = FolderNameCov+'/inputs/iterative_covariance/blind'+blind+'/thps_cov_kids1000_bandpower_B_apod_0_list.dat'
else:
print('not a recognised name_tag, will not produce anything')
return
saveName = FolderNameInputs+'/kids/fits_iterative_covariance/bp_KIDS1000_Blind'+blind+'_'+name_tag+'_'+cat_version+'.fits'
saveFitsTwoPoint(
nbTomoN=nBins_lens, nbTomoG=nBins_source,
# N_theta=9, theta_min=0.5, theta_max=300,
N_ell=8, ell_min=100, ell_max=1500,
# nbModes=5,
prefix_Flinc=None,
prefix_CosmoSIS=None,
scDict=scDict,
meanTag='file', meanName=bp_filename,
covTag='list', covName=covName,
nOfZNameList=nOfZNameList, nGalList=nGalList, sigmaEpsList=sigmaEpsList,
saveName=saveName
)
return
def saveFitsCOSEBIs_KIDS1000():
scDict = {
'use_stats': 'En'.lower()
}
nOfZNameList = [ source1,
source2,
source3,
source4,
source5 ]
nGalList = nGal_source.tolist()
sigmaEpsList = sigma_e.tolist()
if(name_tag=='no_m_bias'):
covName = FolderNameCov+'/outputs/Covariance_bestfit_3x2pt_no_m_bias_blind'+blind+'_nMaximum_20_0.50_300.00_nBins5.ascii'
elif(name_tag=='with_m_bias'):
covName = FolderNameCov+'/outputs/Covariance_bestfit_3x2pt_blind'+blind+'_nMaximum_20_0.50_300.00_nBins5.ascii'
elif(name_tag=='bmodes'):
covName = FolderNameCov+'/outputs/Covariance_bestfit_3x2pt_blind'+blind+'_nMaximum_20_0.50_300.00_nBins5_NoiseOnly.ascii'
else:
print('not a recognised name_tag, will not produce anything')
return
saveName = FolderNameInputs+'/kids/fits_iterative_covariance/cosebis_KIDS1000_Blind'+blind+'_'+name_tag+'_'+cat_version+'.fits'
saveFitsTwoPoint(
nbTomoN=0, nbTomoG=nBins_source,
# N_theta=9, theta_min=0.5, theta_max=300,
# N_ell=8, ell_min=100, ell_max=1500,
nbModes=20,
prefix_Flinc=None,
prefix_CosmoSIS=None,
scDict=scDict,
meanTag='file', meanName=cosebis_filename,
covTag='file', covName=covName,
nOfZNameList=nOfZNameList, nGalList=nGalList, sigmaEpsList=sigmaEpsList,
saveName=saveName
)
return
def saveFitsXIPM_list_KIDS1000():
scDict = {
'use_stats': 'xiP xiM'.lower()
}
sigmaEpsList = sigma_e.tolist()
if(name_tag=='no_m_bias'):
covTag='list'
covName = FolderNameCov+'/inputs/iterative_covariance/blind'+blind+'/thps_cov_kids1000_xipm_list.dat'
nGalList = nGal_all
nBins_lens = 2
nOfZNameList = [ lens1,
lens2,
source1,
source2,
source3,
source4,
source5 ]
elif(name_tag=='with_m_bias'):
covTag='file'
covName = FolderNameCov+'/inputs/iterative_covariance/blind'+blind+'/thps_cov_kids1000_xipm_matrix_with_sigma_m.dat'
nBins_lens = 0
nGalList = nGal_source
nOfZNameList = [ source1,
source2,
source3,
source4,
source5 ]
else:
print('not a recognised name_tag, will not produce anything')
return
saveName = FolderNameInputs+'/kids/fits_iterative_covariance/xipm_KIDS1000_Blind'+blind+'_'+name_tag+'_'+cat_version+'.fits'
saveFitsTwoPoint(
nbTomoN=nBins_lens, nbTomoG=nBins_source,
N_theta=9, theta_min=0.5, theta_max=300,
#N_ell=8, ell_min=100, ell_max=1500,
# nbModes=5,
prefix_Flinc=None,
prefix_CosmoSIS=None,
scDict=scDict,
meanTag='file', meanName=xipm_filename,
covTag=covTag, covName=covName,
nOfZNameList=nOfZNameList, nGalList=nGalList, sigmaEpsList=sigmaEpsList,
saveName=saveName
)
return
###############################################################################
## Checks and plots
def plot_redshift(filename,title,savename):
import matplotlib.pyplot as plt
F=fits.open(filename)
ext=F["nz_source"]
z_source, nz_source, n_bar_source=load_histogram_form(ext)
try:
ext=F["nz_lens"]
z_lens, nz_lens, n_bar_lens=load_histogram_form(ext)
plot_lenses=True
except:
print("no lenses given")
plot_lenses=False
F.close()
if(plot_lenses):
plt.clf()
ax=plt.subplot(2,1,1)
plt.ylabel("P(z)")
plt.title(title)
plt.setp(ax.get_xticklabels(), visible=False)
plt.subplots_adjust(wspace=0,hspace=0)
for bin1 in range(len(nz_lens)):
plt.xlim(0,2.0)
plt.plot(z_lens,nz_lens[bin1],label='lens '+str(bin1+1))
plt.legend(loc='best')
ax=plt.subplot(2,1,2)
plt.setp(ax.get_xticklabels(), visible=True)
for bin1 in range(len(nz_source)):
plt.xlim(0,2.0)
plt.plot(z_source,nz_source[bin1],label='source '+str(bin1+1))
plt.legend(loc='best')
plt.xlabel("z")
plt.ylabel("P(z)")
plt.savefig(savename,bbox_inches='tight')
else:
plt.clf()
ax=plt.subplot(1,1,1)
plt.setp(ax.get_xticklabels(), visible=True)
for bin1 in range(len(nz_source)):
plt.xlim(0,2.0)
plt.plot(z_source,nz_source[bin1],label='source '+str(bin1+1))
plt.legend(loc='best')
plt.xlabel("z")
plt.ylabel("P(z)")
plt.savefig(savename,bbox_inches='tight')
def plot_covariance(filename,title,savename):
import matplotlib.pyplot as plt
F=fits.open(filename)
ext=F["COVMAT"]
covariance= ext.data
fig, ax = plt.subplots()
im = ax.imshow(covariance)
cbar = ax.figure.colorbar(im, ax=ax)
plt.title(title)
plt.savefig(savename)
def plot_correlation_mat(filename,title,savename):
import matplotlib.pyplot as plt
F=fits.open(filename)
ext=F["COVMAT"]
cov= ext.data
corr=np.zeros((len(cov),len(cov)))
for i in range(len(cov)):
for j in range(len(cov)):
corr[i,j]=cov[i,j]/np.sqrt(cov[i,i]*cov[j,j])
fig, ax = plt.subplots()
im = ax.imshow(corr)
cbar = ax.figure.colorbar(im, ax=ax)
plt.title(title)
plt.savefig(savename)
def plot_data(filename,title,extname,savename):
import matplotlib.pyplot as plt
F=fits.open(filename)
ext=F[extname]
data=ext.data['VALUE']
x_index = ext.data['ANGBIN']
x_val = ext.data['ANG']
plt.clf()
plt.title(title)
plt.plot(data,'x')
plt.savefig(savename)
def printTwoPointHDU(name, ind=1):
"""
Print the content of a given HDU of a twopoint file
"""
hdr = fits.getheader(name, ind)
data = fits.getdata(name, ind)
print()
print(hdr.tostring(sep='\n'))
print(data)
return
def printTwoPoint_fromFile(name):
"""
Print the summary info of a twopoint file
"""
HDUList = fits.open(name)
## Check default HDU
print()
print('Check default HDU:')
if 'SIMPLE' in HDUList[0].header:
HDUList = HDUList[1:]
print(' Passed.')
else:
print(' No default HDU.')
print(' Means that this file was not generated in the standard way.')
print(' Will continue.')
hdrList = [HDU.header for HDU in HDUList]
dataList = [HDU.data for HDU in HDUList]
print()
wtp2._checkExtensions_fromFile(hdrList)
print()
wtp2._checkCovariance_fromFile(hdrList)
print()
wtp2._checkSpectra_fromFile(hdrList)
print()
wtp2._checkKernels_fromFile(hdrList, dataList)
print()
wtp2._checkNGal_fromFile(hdrList)
return
def printTwoPoint(TP, mean=True, cov=True, nOfZ=True):
"""
Print the summary info of a twopoint object
Useful when you want to see what is really stocked in the python object
"""
if mean:
print()
print('Spectra:')
for spectrum in TP.spectra:
print()
wtp2._printSpectrum(spectrum)
if cov:
print()
print('Covariance:')
if hasattr(TP, 'covmat_info') and TP.covmat_info is not None:
print()
wtp2._printCovMatInfo(TP.covmat_info)
print('Direct cov.shape = %s' % str(TP.covmat.shape))
else:
print()
print('Did not find `covmat_info` attribute')
if nOfZ:
print()
print('Kernels:')
for kernel in TP.kernels:
print()
wtp2._printKernel(kernel)
##print(TP.windows)
##print(TP._spectrum_index)
return
def printTwoPoint_fromObj(name, mean=True, cov=True, nOfZ=True):
"""
Print the summary info of a twopoint file by reading it first as an object
"""
try:
TP = wtp.TwoPointWrapper.from_fits(name, covmat_name='COVMAT')
except:
TP = wtp.TwoPointWrapper.from_fits(name, covmat_name=None)
printTwoPoint(TP, mean=mean, cov=cov, nOfZ=nOfZ)
return
def unitaryTest(name1, name2):
"""
Check if two files are strictly identical
"""
wtp2.unitaryTest(name1, name2)
return
############################################################
# plot things here
# exit()
saveFitsCOSEBIs_KIDS1000()
saveFitsBP_list_KIDS1000()
saveFitsXIPM_list_KIDS1000()
# exit()
FolderPlots=FolderNameInputs+'/plots_iterative_cov'
mkdir_mine(FolderPlots)
filename=FolderNameInputs+"/kids/fits_iterative_covariance/cosebis_KIDS1000_Blind"+blind+"_"+name_tag+"_V1.0.0A_ugriZYJHKs_photoz_SG_mask_LF_svn_309c_2Dbins_v2_goldclasses_Flag_SOM_Fid.fits"
title='KiDS'
savename=FolderPlots+'/only_source'+'_blind'+blind+'.pdf'
plot_redshift(filename,title,savename)
title='COSEBIs'
savename=FolderPlots+'/COSEBIs_covariance_'+name_tag+'_blind'+blind+'.pdf'
plot_covariance(filename,title,savename)
savename=FolderPlots+'/COSEBIs_correlation_matrix_'+name_tag+'_blind'+blind+'.pdf'
plot_correlation_mat(filename,title,savename)
extname='En'
savename=FolderPlots+'/COSEBIs_data_'+extname+'_'+name_tag+'_blind'+blind+'.pdf'
plot_data(filename,title,extname,savename)
# exit()
# BP
filename=FolderNameInputs+"/kids/fits_iterative_covariance/bp_KIDS1000_Blind"+blind+"_"+name_tag+"_V1.0.0A_ugriZYJHKs_photoz_SG_mask_LF_svn_309c_2Dbins_v2_goldclasses_Flag_SOM_Fid.fits"
title= 'KiDS1000-BOSS'
savename=FolderPlots+'/KiDS1000_nofz_'+name_tag+'_blind'+blind+'.pdf'
plot_redshift(filename,title,savename)
savename=FolderPlots+'/BP_covariance_'+name_tag+'_blind'+blind+'.pdf'
plot_covariance(filename,title,savename)
savename=FolderPlots+'/BP_correlation_matrix_'+name_tag+'_blind'+blind+'.pdf'
plot_correlation_mat(filename,title,savename)
file=open(bp_filename)
bp=np.loadtxt(file)
extname='PeeE'
savename=FolderPlots+'/BP_data_'+extname+'_'+name_tag+'_blind'+blind+'.pdf'
plot_data(filename,title,extname,savename)
extname='PneE'
savename=FolderPlots+'/BP_data_'+extname+'_'+name_tag+'_blind'+blind+'.pdf'
plot_data(filename,title,extname,savename)
# xipm
filename=FolderNameInputs+"/kids/fits_iterative_covariance/xipm_KIDS1000_Blind"+blind+"_"+name_tag+"_V1.0.0A_ugriZYJHKs_photoz_SG_mask_LF_svn_309c_2Dbins_v2_goldclasses_Flag_SOM_Fid.fits"
title= 'Xipm'
savename=FolderPlots+'/xipm_nofz_'+name_tag+'_blind'+blind+'.pdf'
plot_redshift(filename,title,savename)
savename=FolderPlots+'/xipm_covariance_'+name_tag+'_blind'+blind+'.pdf'
plot_covariance(filename,title,savename)
savename=FolderPlots+'/xipm_correlation_matrix_'+name_tag+'_blind'+blind+'.pdf'
plot_correlation_mat(filename,title,savename)
extname='xip'
savename=FolderPlots+'/xip_data_'+extname+'_'+name_tag+'_blind'+blind+'.pdf'
plot_data(filename,title,extname,savename)
extname='xim'
savename=FolderPlots+'/xim_data_'+extname+'_'+name_tag+'_blind'+blind+'.pdf'
plot_data(filename,title,extname,savename)
| 33.262369 | 190 | 0.6441 | 2,855 | 22,186 | 4.769527 | 0.178984 | 0.029375 | 0.013366 | 0.017478 | 0.528016 | 0.505545 | 0.468091 | 0.450393 | 0.41551 | 0.404348 | 0 | 0.030672 | 0.232895 | 22,186 | 666 | 191 | 33.312312 | 0.769434 | 0.210899 | 0 | 0.44389 | 0 | 0 | 0.22308 | 0.14677 | 0 | 0 | 0 | 0 | 0 | 1 | 0.037406 | false | 0.002494 | 0.027431 | 0 | 0.097257 | 0.104738 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
bfe5b0250969d4362be0975cee2662278fb9feec | 4,077 | py | Python | akanda/horizon/tests/alias/views_tests.py | dreamhost/akanda-horizon | c2a3771f620245d31e7c84ba38bbf440f5161fb6 | [
"Apache-2.0"
] | 1 | 2015-02-23T16:59:55.000Z | 2015-02-23T16:59:55.000Z | akanda/horizon/tests/alias/views_tests.py | dreamhost/akanda-horizon | c2a3771f620245d31e7c84ba38bbf440f5161fb6 | [
"Apache-2.0"
] | null | null | null | akanda/horizon/tests/alias/views_tests.py | dreamhost/akanda-horizon | c2a3771f620245d31e7c84ba38bbf440f5161fb6 | [
"Apache-2.0"
] | null | null | null | # Copyright 2014 DreamHost, LLC
#
# Author: DreamHost, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from django.contrib.messages.storage import default_storage
from django.core.urlresolvers import reverse
from mock import patch
from openstack_dashboard.test import helpers
from akanda.horizon.tabs import alias_tab_redirect
from akanda.horizon import alias # noqa
class TestNetworkAliasView(helpers.TestCase):
def setUp(self):
super(TestNetworkAliasView, self).setUp()
# mock this method because both the network forms use it to fill
# the drop-down menu for the group field in the html template
self.form_data = {'name': 'net1', 'cidr': '192.168.1.1', 'group': 1}
self.get_address_groups = patch(
'akanda.horizon.alias.forms.networks.get_address_groups',
lambda x: [(1, 'group')])
self.get_address_groups.start()
self.neutron_extensions_client = patch(
'akanda.horizon.alias.forms.networks.neutron_extensions_client')
self.neutron_extensions_client.start()
def tearDown(self):
self.get_address_groups.stop()
self.neutron_extensions_client.stop()
def test_create_network_alias(self):
url = reverse('horizon:project:networking:alias:networks:create')
response = self.client.post(url, self.form_data)
self.assertNoFormErrors(response)
def test_create_network_alias_redirect(self):
url = reverse('horizon:project:networking:alias:networks:create')
response = self.client.post(url, self.form_data)
redirect_url = "%s?tab=%s" % (
reverse('horizon:project:networking:index'), alias_tab_redirect())
self.assertRedirectsNoFollow(response, redirect_url)
def test_create_network_alias_assert_template(self):
url = reverse('horizon:project:networking:alias:networks:create')
response = self.client.post(url)
self.assertTemplateUsed(response, 'akanda/alias/networks/create.html')
def test_create_network_alias_message(self):
url = reverse('horizon:project:networking:alias:networks:create')
response = self.client.post(url, self.form_data)
storage = default_storage(response.request)
message_cookie = response.cookies['messages'].value
messages = [m.message for m in storage._decode(message_cookie)]
msg = "Successfully created network alias: %s"
self.assertIn(msg % self.form_data['name'], messages)
self.assertMessageCount(success=1)
@patch('alias.views.networks.neutron_extensions_client.networkalias_get')
def test_update_network_alias(self, get_obj):
url = reverse(
'horizon:project:networking:alias:networks:edit', args=['1'])
network_ref = {'name': 'net1', 'cidr': '192.168.1.1',
'groups': 1, 'id': 1}
get_obj.return_value = network_ref
response = self.client.post(url)
self.assertItemsEqual(
response.context['network_alias'], network_ref)
@patch('alias.views.networks.neutron_extensions_client.networkalias_get')
def test_update_network_alias_assert_template(self, get_obj):
url = reverse(
'horizon:project:networking:alias:networks:edit', args=['1'])
network_ref = {'name': 'net1', 'cidr': '192.168.1.1',
'groups': 1, 'id': 1}
get_obj.return_value = network_ref
response = self.client.post(url)
self.assertTemplateUsed(
response, 'akanda/alias/networks/edit_rules.html')
| 42.46875 | 78 | 0.693157 | 509 | 4,077 | 5.40275 | 0.324165 | 0.034909 | 0.053455 | 0.078909 | 0.426909 | 0.380364 | 0.354182 | 0.346909 | 0.346909 | 0.346909 | 0 | 0.013501 | 0.200638 | 4,077 | 95 | 79 | 42.915789 | 0.830316 | 0.172431 | 0 | 0.349206 | 0 | 0 | 0.237258 | 0.186885 | 0 | 0 | 0 | 0 | 0.142857 | 1 | 0.126984 | false | 0 | 0.095238 | 0 | 0.238095 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
bfe743cac0bce1d1645ca6a54b9da5a5bed36495 | 10,112 | py | Python | railrl/memory_states/qfunctions.py | Asap7772/rail-rl-franka-eval | 4bf99072376828193d05b53cf83c7e8f4efbd3ba | [
"MIT"
] | null | null | null | railrl/memory_states/qfunctions.py | Asap7772/rail-rl-franka-eval | 4bf99072376828193d05b53cf83c7e8f4efbd3ba | [
"MIT"
] | null | null | null | railrl/memory_states/qfunctions.py | Asap7772/rail-rl-franka-eval | 4bf99072376828193d05b53cf83c7e8f4efbd3ba | [
"MIT"
] | null | null | null | import torch
from torch import nn as nn
from torch.autograd import Variable
from torch.nn import functional as F
from railrl.pythonplusplus import identity
from railrl.torch import pytorch_util as ptu
from railrl.torch.core import PyTorchModule
from railrl.torch.rnn import BNLSTMCell, LSTM
class FeedForwardDuelingQFunction(PyTorchModule):
def __init__(
self,
obs_dim,
action_dim,
observation_hidden_size,
embedded_hidden_size,
init_w=3e-3,
output_activation=identity,
hidden_init=ptu.fanin_init,
batchnorm_obs=False,
):
self.save_init_params(locals())
super().__init__()
self.obs_dim = obs_dim
self.action_dim = action_dim
self.observation_hidden_size = observation_hidden_size
self.embedded_hidden_size = embedded_hidden_size
self.hidden_init = hidden_init
self.obs_fc = nn.Linear(obs_dim, observation_hidden_size)
self.value_embedded_fc = nn.Linear(observation_hidden_size, embedded_hidden_size)
self.advantage_embedded_fc = nn.Linear(observation_hidden_size + action_dim, embedded_hidden_size)
# self.advantage_avg = np.zeros(embedded_hidden_size)
self.advantage_last_fc = nn.Linear(embedded_hidden_size, 1)
self.value_last_fc = nn.Linear(embedded_hidden_size, 1)
self.output_activation = output_activation
self.init_weights(init_w)
self.batchnorm_obs = batchnorm_obs
if self.batchnorm_obs:
self.bn_obs = nn.BatchNorm1d(obs_dim)
def init_weights(self, init_w):
self.hidden_init(self.obs_fc.weight)
self.obs_fc.bias.data.fill_(0)
self.hidden_init(self.value_embedded_fc.weight)
self.hidden_init(self.advantage_embedded_fc.weight)
self.value_embedded_fc.bias.data.fill_(0)
self.advantage_embedded_fc.bias.data.fill_(0)
self.advantage_last_fc.weight.data.uniform_(-init_w, init_w)
self.value_last_fc.weight.data.uniform_(-init_w, init_w)
self.advantage_last_fc.bias.data.uniform_(-init_w, init_w)
self.value_last_fc.bias.data.uniform_(-init_w, init_w)
def forward(self, obs, action):
if self.batchnorm_obs:
obs = self.bn_obs(obs)
h = obs
h = F.relu(self.obs_fc(h))
val_input = h
advantage_input = torch.cat((h, action), dim=1)
value = F.relu(self.value_embedded_fc(val_input))
value = self.output_activation(self.value_last_fc(value))
advantage = F.relu(self.advantage_embedded_fc(advantage_input))
advantage = self.output_activation(self.advantage_last_fc(advantage))
# a_average = self._compute_running_average(advantage)
q = value + advantage
return q
def _compute_running_average(self, update):
avg = self.advantage_avg
if self.training:
self.advantage_avg = .9 * self.advantage_avg + .1 * update
return avg
class MemoryQFunction(PyTorchModule):
def __init__(
self,
obs_dim,
action_dim,
memory_dim,
fc1_size,
fc2_size,
init_w=3e-3,
output_activation=identity,
hidden_init=ptu.fanin_init,
ignore_memory=False,
):
self.save_init_params(locals())
super().__init__()
self.obs_dim = obs_dim
self.action_dim = action_dim
self.memory_dim = memory_dim
self.observation_hidden_size = fc1_size
self.embedded_hidden_size = fc2_size
self.init_w = init_w
self.hidden_init = hidden_init
self.ignore_memory = ignore_memory
if self.ignore_memory:
self.obs_fc = nn.Linear(self.obs_dim, self.observation_hidden_size)
self.embedded_fc = nn.Linear(
self.observation_hidden_size + self.action_dim,
fc2_size,
)
else:
self.obs_fc = nn.Linear(obs_dim + memory_dim, fc1_size)
self.embedded_fc = nn.Linear(
fc1_size + action_dim + memory_dim,
fc2_size,
)
self.last_fc = nn.Linear(fc2_size, 1)
self.output_activation = output_activation
self.init_weights(init_w)
def init_weights(self, init_w):
self.hidden_init(self.obs_fc.weight)
self.obs_fc.bias.data.fill_(0)
self.hidden_init(self.embedded_fc.weight)
self.embedded_fc.bias.data.fill_(0)
self.last_fc.weight.data.uniform_(-init_w, init_w)
self.last_fc.bias.data.uniform_(-init_w, init_w)
def forward(self, obs, memory, action, write):
if self.ignore_memory:
obs_embedded = F.relu(self.obs_fc(obs))
x = torch.cat((obs_embedded, action), dim=1)
x = F.relu(self.embedded_fc(x))
else:
obs_embedded = torch.cat((obs, memory), dim=1)
obs_embedded = F.relu(self.obs_fc(obs_embedded))
x = torch.cat((obs_embedded, action, write), dim=1)
x = F.relu(self.embedded_fc(x))
return self.output_activation(self.last_fc(x))
class RecurrentQFunction(PyTorchModule):
def __init__(
self,
obs_dim,
action_dim,
hidden_size,
fc1_size,
fc2_size,
init_w=3e-3,
hidden_init=ptu.fanin_init,
):
self.save_init_params(locals())
super().__init__()
self.obs_dim = obs_dim
self.action_dim = action_dim
self.hidden_size = hidden_size
self.fc1_size = fc1_size
self.fc2_size = fc2_size
self.hidden_init = hidden_init
self.lstm = LSTM(
BNLSTMCell,
self.obs_dim + self.action_dim,
self.hidden_size,
batch_first=True,
)
self.fc1 = nn.Linear(self.hidden_size + self.obs_dim, fc1_size)
self.fc2 = nn.Linear(self.fc1_size + self.action_dim, fc2_size)
self.last_fc = nn.Linear(self.fc2_size, 1)
self.init_weights(init_w)
def init_weights(self, init_w):
self.hidden_init(self.fc1.weight)
self.fc1.bias.data.fill_(0)
self.hidden_init(self.fc2.weight)
self.fc2.bias.data.fill_(0)
self.last_fc.weight.data.uniform_(-init_w, init_w)
self.last_fc.bias.data.uniform_(-init_w, init_w)
def forward(self, obs, action):
"""
:param obs: torch Variable, [batch_size, sequence length, obs dim]
:param action: torch Variable, [batch_size, sequence length, action dim]
:return: torch Variable, [batch_size, sequence length, 1]
"""
assert len(obs.size()) == 3
inputs = torch.cat((obs, action), dim=2)
batch_size, subsequence_length = obs.size()[:2]
cx = Variable(
ptu.FloatTensor(1, batch_size, self.hidden_size)
)
cx.data.fill_(0)
hx = Variable(
ptu.FloatTensor(1, batch_size, self.hidden_size)
)
hx.data.fill_(0)
rnn_outputs, _ = self.lstm(inputs, (hx, cx))
rnn_outputs.contiguous()
rnn_outputs_flat = rnn_outputs.view(-1, self.hidden_size)
obs_flat = obs.view(-1, self.obs_dim)
action_flat = action.view(-1, self.action_dim)
h = torch.cat((rnn_outputs_flat, obs_flat), dim=1)
h = F.relu(self.fc1(h))
h = torch.cat((h, action_flat), dim=1)
h = F.relu(self.fc2(h))
outputs_flat = self.last_fc(h)
return outputs_flat.view(batch_size, subsequence_length, 1)
@property
def is_recurrent(self):
return True
class RecurrentMemoryQFunction(PyTorchModule):
def __init__(
self,
obs_dim,
action_dim,
memory_dim,
hidden_size,
fc1_size,
fc2_size,
init_w=3e-3,
output_activation=identity,
hidden_init=ptu.fanin_init,
):
self.save_init_params(locals())
super().__init__()
self.obs_dim = obs_dim
self.action_dim = action_dim
self.memory_dim = memory_dim
self.hidden_size = hidden_size
self.fc1_size = fc1_size
self.fc2_size = fc2_size
self.output_activation = output_activation
self.hidden_init = hidden_init
self.rnn = nn.LSTM(
self.obs_dim + self.action_dim + 2 * self.memory_dim,
self.hidden_size,
1,
batch_first=True,
)
self.last_fc = nn.Linear(self.hidden_size, 1)
self.init_weights(init_w)
def init_weights(self, init_w):
self.last_fc.weight.data.uniform_(-init_w, init_w)
self.last_fc.bias.data.uniform_(-init_w, init_w)
def forward(self, obs, memory, action, write):
"""
:param obs: torch Variable, [batch_size, sequence length, obs dim]
:param memory: torch Variable, [batch_size, sequence length, memory dim]
:param action: torch Variable, [batch_size, sequence length, action dim]
:param write: torch Variable, [batch_size, sequence length, memory dim]
:return: torch Variable, [batch_size, sequence length, 1]
"""
rnn_inputs = torch.cat((obs, memory, action, write), dim=2)
batch_size, subsequence_length, _ = obs.size()
cx = Variable(
ptu.FloatTensor(1, batch_size, self.hidden_size)
)
cx.data.fill_(0)
hx = Variable(
ptu.FloatTensor(1, batch_size, self.hidden_size)
)
hx.data.fill_(0)
state = (hx, cx)
rnn_outputs, _ = self.rnn(rnn_inputs, state)
rnn_outputs.contiguous()
rnn_outputs_flat = rnn_outputs.view(
batch_size * subsequence_length,
self.fc1.in_features,
)
outputs_flat = self.output_activation(self.last_fc(rnn_outputs_flat))
return outputs_flat.view(batch_size, subsequence_length, 1)
@property
def is_recurrent(self):
return True
| 34.511945 | 106 | 0.620748 | 1,313 | 10,112 | 4.467631 | 0.085301 | 0.028981 | 0.022162 | 0.018752 | 0.708319 | 0.646778 | 0.571258 | 0.533413 | 0.457041 | 0.404194 | 0 | 0.011597 | 0.283722 | 10,112 | 292 | 107 | 34.630137 | 0.798288 | 0.063884 | 0 | 0.547718 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.004149 | 1 | 0.062241 | false | 0 | 0.033195 | 0.008299 | 0.141079 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
bfeae6f9193d44b6c772f30e0ccc9eb683862d93 | 629 | py | Python | main.py | garyrouch/Hackaton2021 | a9ccd96531d3e255af179209dba99ee84ac269cf | [
"Unlicense"
] | 2 | 2021-09-16T22:26:51.000Z | 2021-09-16T23:09:54.000Z | main.py | garyrouch/Hackaton2021 | a9ccd96531d3e255af179209dba99ee84ac269cf | [
"Unlicense"
] | null | null | null | main.py | garyrouch/Hackaton2021 | a9ccd96531d3e255af179209dba99ee84ac269cf | [
"Unlicense"
] | null | null | null | import pandas as pd
from database_construction import insert_information
from Database_creator import create_database
from Database_creator import create_table
from config import CSV_PATH
def add_from_database(csv_file_path):
df = pd.read_csv(csv_file_path)
for i in range(len(df)):
print(df.loc[i])
email = df.loc[i][0]
label = df.loc[i][1]
level = df.loc[i][2]
insert_information(email = email, label = int(label), level= int(level))
def main():
create_database()
create_table()
add_from_database(csv_file_path = CSV_PATH )
if __name__ == '__main__':
main()
| 26.208333 | 81 | 0.694754 | 94 | 629 | 4.329787 | 0.393617 | 0.14742 | 0.058968 | 0.12285 | 0.280098 | 0.127764 | 0 | 0 | 0 | 0 | 0 | 0.006 | 0.205087 | 629 | 23 | 82 | 27.347826 | 0.808 | 0 | 0 | 0 | 0 | 0 | 0.012719 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.105263 | false | 0 | 0.263158 | 0 | 0.368421 | 0.052632 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
bfefd01b40ec0ad9a121716b4e79989544fc2b7e | 1,112 | py | Python | web/plot.py | RightMesh/payment-channel-performance | 3abde2d4d32353e212a49c946cb8222f297379e7 | [
"MIT"
] | null | null | null | web/plot.py | RightMesh/payment-channel-performance | 3abde2d4d32353e212a49c946cb8222f297379e7 | [
"MIT"
] | 43 | 2019-01-25T23:54:27.000Z | 2019-04-09T02:36:52.000Z | web/plot.py | RightMesh/payment-channel-performance | 3abde2d4d32353e212a49c946cb8222f297379e7 | [
"MIT"
] | 1 | 2019-04-27T00:17:44.000Z | 2019-04-27T00:17:44.000Z | import requests
import json
import sys
import matplotlib.pyplot as plt
# get count
url = 'http://localhost:5000/count'
headers = {'content-type': 'application/json'}
response = requests.get(url, headers=headers)
print('number of items in summary: ',int(response.content))
# get summary
url = 'http://localhost:5000/summary'
headers = {'content-type': 'application/json'}
response = requests.get(url, headers=headers)
results = json.loads(response.content)
print('an example of result:', results[0])
# actual_cost of a transaction
points_x = []
# gas_price of a transaction
points_x2 = []
# waiting_time of a transaction
points_y = []
for row in results:
points_x.append(row['actual_cost'])
points_x2.append([row['gas_price']])
points_y.append(row['waiting_time'])
# Plot
plt.scatter(points_x, points_y)
plt.title('Scatter plot pythonspot.com')
plt.xlabel('number of transaction')
plt.ylabel('number of token types')
plt.show()
# Plot
plt.scatter(points_x2, points_y)
plt.title('Scatter plot pythonspot.com')
plt.xlabel('number of transaction')
plt.ylabel('number of token types')
plt.show() | 24.173913 | 59 | 0.738309 | 163 | 1,112 | 4.93865 | 0.349693 | 0.049689 | 0.052174 | 0.074534 | 0.42236 | 0.42236 | 0.42236 | 0.42236 | 0.42236 | 0.42236 | 0 | 0.01227 | 0.120504 | 1,112 | 46 | 60 | 24.173913 | 0.810838 | 0.106115 | 0 | 0.4 | 0 | 0 | 0.33536 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.133333 | 0 | 0.133333 | 0.066667 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
bff217f65ff9c8d34b875f514c1bc92e0dd3255b | 1,881 | py | Python | tests/datasets/test_cnn_dailymail.py | awesome-archive/lineflow | 1b753a1c2d5d3c7b369c6dd7f20e836c90d43407 | [
"MIT"
] | 1 | 2020-01-07T05:26:56.000Z | 2020-01-07T05:26:56.000Z | tests/datasets/test_cnn_dailymail.py | arita37/lineflow | 1b753a1c2d5d3c7b369c6dd7f20e836c90d43407 | [
"MIT"
] | null | null | null | tests/datasets/test_cnn_dailymail.py | arita37/lineflow | 1b753a1c2d5d3c7b369c6dd7f20e836c90d43407 | [
"MIT"
] | null | null | null | import tempfile
import shutil
from unittest import TestCase
from unittest import mock
from lineflow import download
from lineflow.datasets.cnn_dailymail import CnnDailymail, get_cnn_dailymail
class CnnDailymailTestCase(TestCase):
def setUp(self):
self.default_cache_root = download.get_cache_root()
self.temp_dir = tempfile.mkdtemp()
download.set_cache_root(self.temp_dir)
def tearDown(self):
download.set_cache_root(self.default_cache_root)
shutil.rmtree(self.temp_dir)
def test_get_cnn_dailymail(self):
raw = get_cnn_dailymail()
# train
self.assertIn('train', raw)
self.assertEqual(len(raw['train']), 2)
for x in raw['train']:
self.assertEqual(len(x), 287_227)
# dev
self.assertIn('dev', raw)
self.assertEqual(len(raw['dev']), 2)
for x in raw['dev']:
self.assertEqual(len(x), 13_368)
# test
self.assertIn('test', raw)
self.assertEqual(len(raw['test']), 2)
for x in raw['test']:
self.assertEqual(len(x), 11_490)
def test_get_cnn_dailymail_twice(self):
get_cnn_dailymail()
with mock.patch('lineflow.datasets.cnn_dailymail.pickle', autospec=True) as \
mock_pickle:
get_cnn_dailymail()
mock_pickle.dump.assert_not_called()
self.assertEqual(mock_pickle.load.call_count, 1)
def test_loads_each_split(self):
train = CnnDailymail(split='train')
self.assertEqual(len(train), 287_227)
dev = CnnDailymail(split='dev')
self.assertEqual(len(dev), 13_368)
test = CnnDailymail(split='test')
self.assertEqual(len(test), 11_490)
def test_raises_value_error_with_invalid_split(self):
with self.assertRaises(ValueError):
CnnDailymail(split='invalid_split')
| 32.431034 | 85 | 0.650718 | 237 | 1,881 | 4.945148 | 0.291139 | 0.127986 | 0.138225 | 0.053754 | 0.188567 | 0 | 0 | 0 | 0 | 0 | 0 | 0.02521 | 0.240829 | 1,881 | 57 | 86 | 33 | 0.795518 | 0.007443 | 0 | 0.044444 | 0 | 0 | 0.05314 | 0.020397 | 0 | 0 | 0 | 0 | 0.333333 | 1 | 0.133333 | false | 0 | 0.133333 | 0 | 0.288889 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
bff2ba6781c7c773ddcd469f3278eaf3773eb45a | 2,187 | py | Python | tests/test_actions/test_config.py | logse34/survivor | 44dd7f8e157bc12768bd535b3a28627029703741 | [
"MIT"
] | null | null | null | tests/test_actions/test_config.py | logse34/survivor | 44dd7f8e157bc12768bd535b3a28627029703741 | [
"MIT"
] | null | null | null | tests/test_actions/test_config.py | logse34/survivor | 44dd7f8e157bc12768bd535b3a28627029703741 | [
"MIT"
] | null | null | null | from tests.test_actions import *
from ltk import actions
from io import StringIO
import unittest
class TestConfig(unittest.TestCase):
def setUp(self):
create_config()
self.action = actions.Action(os.getcwd())
def tearDown(self):
cleanup()
self.action.close()
def test_config(self):
from io import BytesIO
import sys
try:
out = StringIO()
sys.stdout = out
self.action.config_action(None, None, None, None, [])
info = out.getvalue()
assert 'Access_token' in info
key_words = ['Host: https://cms.lingotek.com', 'Project id', 'Community id', 'Locale', 'Workflow id']
assert all(word in info for word in key_words)
finally:
sys.stdout = sys.__stdout__
def test_change_locale(self):
self.action.config_action('de_DE', None, None, None, [])
assert self.action.locale == 'de_DE'
def test_change_workflow(self):
new_workflow = '6ff1b470-33fd-11e2-81c1-0800200c9a66'
self.action.config_action(None, new_workflow, None, None, [])
assert self.action.workflow_id == new_workflow
def test_add_download_folder(self):
dirName = 'download'
os.mkdir(dirName)
download_folder = dirName
self.action.config_action(None, None, download_folder, None, [])
os.rmdir(dirName)
assert self.action.download_dir == download_folder
def test_add_upload_folder(self):
watch_folder = 'watching'
os.mkdir(watch_folder)
self.action.config_action(None, None, None, watch_folder, [])
os.rmdir(watch_folder)
assert self.action.watch_dir == watch_folder
def test_watch_locales_1(self):
locale = {'ja_JP'}
self.action.config_action(None, None, None, None, locale)
assert self.action.watch_locales == locale
def test_watch_locales_mult(self):
locales = ['ja_JP', 'zh_CN', 'fr_FR',]
self.action.config_action(None, None, None, None, locales)
print (self.action.watch_locales, set(locales))
assert self.action.watch_locales == set(locales)
| 33.646154 | 113 | 0.636031 | 272 | 2,187 | 4.908088 | 0.290441 | 0.11985 | 0.07191 | 0.115356 | 0.262172 | 0.181273 | 0.110861 | 0.085393 | 0 | 0 | 0 | 0.014751 | 0.256059 | 2,187 | 64 | 114 | 34.171875 | 0.805778 | 0 | 0 | 0 | 0 | 0 | 0.074531 | 0.016461 | 0 | 0 | 0 | 0 | 0.150943 | 1 | 0.169811 | false | 0 | 0.113208 | 0 | 0.301887 | 0.018868 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
bff32f80ff6ca100cbb496fe547d602b34d0df80 | 1,842 | py | Python | biplane_tasks/performance/cs_transform_mats_mats.py | klevis-a/process-vicon-biplane | f140589b4705f0d6411b80b8e2699add68d08662 | [
"MIT"
] | 1 | 2021-11-10T21:09:59.000Z | 2021-11-10T21:09:59.000Z | biplane_tasks/performance/cs_transform_mats_mats.py | klevis-a/process-vicon-biplane | f140589b4705f0d6411b80b8e2699add68d08662 | [
"MIT"
] | null | null | null | biplane_tasks/performance/cs_transform_mats_mats.py | klevis-a/process-vicon-biplane | f140589b4705f0d6411b80b8e2699add68d08662 | [
"MIT"
] | null | null | null | """This script tests whether matmul or einsum is faster at multipliying a collection of matrices with a another
collection (same length) of matrices of the same dimension. In biomechanics this operation is used when expressing
a segment in the coordinate system of its proximal segment.
"""
import numpy as np
import time
# transform all frames in mats2 using frames in mats1
# assume that mats1 is nx4x4 and mats2 is nx4x4, and the output should be nx4x4
# implemented using matrix multiplication
def cs_transform_mats_mats_matmul(mats1, mats2):
return mats1 @ mats2
# implemented using einsum
def cs_transform_mats_mats_einsum(mats1, mats2):
return np.einsum('ijk,ikl->ijl', mats1, mats2)
# simple test that can be easily verified by eye that the expected transformation is actually taking place
ms1 = np.stack([np.eye(4) * i for i in range(1, 11)], axis=0)
ms2 = np.stack([np.eye(4) * i for i in range(1, 11)], axis=0)
matmul_res = cs_transform_mats_mats_matmul(ms1, ms2)
einsum_res = cs_transform_mats_mats_einsum(ms1, ms2)
assert(np.array_equal(matmul_res, einsum_res))
# performance test
num_el = 1000
n = 1000
ms1_r = [np.random.rand(num_el, 4, 4) for i in range(n)]
ms2_r = [np.random.rand(num_el, 4, 4) for i in range(n)]
t0 = time.time()
for i in range(n):
cs_transform_mats_mats_matmul(ms1_r[i], ms2_r[i])
t1 = time.time()
matmul_total_time = (t1-t0)*1000
matmul_avg_time = matmul_total_time / n
t0 = time.time()
for i in range(n):
cs_transform_mats_mats_einsum(ms1_r[i], ms2_r[i])
t1 = time.time()
einsum_total_time = (t1-t0)*1000
einsum_avg_time = einsum_total_time / n
print('Matrix multiplication total time {:0.2f} and average time {:0.5f} ms'.format(matmul_total_time, matmul_avg_time))
print('Einsum total time {:0.2f} and average time {:0.5f} ms'.format(einsum_total_time, einsum_avg_time))
| 34.111111 | 120 | 0.7519 | 326 | 1,842 | 4.070552 | 0.322086 | 0.054258 | 0.067822 | 0.085908 | 0.370008 | 0.297664 | 0.246421 | 0.246421 | 0.246421 | 0.216277 | 0 | 0.048377 | 0.147123 | 1,842 | 53 | 121 | 34.754717 | 0.796308 | 0.326276 | 0 | 0.206897 | 0 | 0 | 0.108394 | 0 | 0 | 0 | 0 | 0 | 0.034483 | 1 | 0.068966 | false | 0 | 0.068966 | 0.068966 | 0.206897 | 0.068966 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
bff3be5410418a43b54559e9e94272529bc2f10f | 819 | py | Python | automation_class/lecture3.py | omar115/code_for_Kids | 3f50ffb1d492c6ea5aa09688944aa01a0cadf1fd | [
"MIT"
] | null | null | null | automation_class/lecture3.py | omar115/code_for_Kids | 3f50ffb1d492c6ea5aa09688944aa01a0cadf1fd | [
"MIT"
] | null | null | null | automation_class/lecture3.py | omar115/code_for_Kids | 3f50ffb1d492c6ea5aa09688944aa01a0cadf1fd | [
"MIT"
] | 2 | 2021-01-08T03:52:46.000Z | 2021-04-01T19:16:12.000Z | '''
note: pypdf2 is a pdf manipulation library,
we will read, extract text, count pages etc. different
works can be done using pypdf2. this is a simple project
where we will do the followings:
1. extract text from a pdf
2. pass the text into pyttsx3
3. read it
that is how we will make our own audiobook.
'''
import PyPDF2
import pyttsx3
from pyttsx3 import engine
# step 1: to read the pdf
path = open(r'/Users/omar/Desktop/omar_workspace/code_for_Kids/automation_class/oldmansea.pdf', 'rb')
pdfreader = PyPDF2.PdfFileReader(path)
# step 2: select a page and extract string from that page
page = pdfreader.getPage(1)
text = page.extractText()
print(text)
# step 3: use python text to speech library
# and read the text, convert to audio speech
engine = pyttsx3.init()
engine.say(text)
engine.runAndWait() | 21 | 101 | 0.754579 | 135 | 819 | 4.548148 | 0.57037 | 0.029316 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.021962 | 0.166056 | 819 | 39 | 102 | 21 | 0.877013 | 0.568987 | 0 | 0 | 0 | 0 | 0.236152 | 0.230321 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.272727 | 0 | 0.272727 | 0.090909 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
bff41c2735d6131e009e493a99a142b564dc94c8 | 2,257 | py | Python | bomberman/coalesced_counter.py | timothytrippel/bomberman | 517114a062e3b3160858af86b06334891a7e4554 | [
"BSD-2-Clause"
] | 1 | 2022-01-18T06:50:54.000Z | 2022-01-18T06:50:54.000Z | bomberman/coalesced_counter.py | timothytrippel/bomberman | 517114a062e3b3160858af86b06334891a7e4554 | [
"BSD-2-Clause"
] | 3 | 2021-03-26T20:45:07.000Z | 2022-01-13T03:30:40.000Z | bomberman/coalesced_counter.py | timothytrippel/bomberman | 517114a062e3b3160858af86b06334891a7e4554 | [
"BSD-2-Clause"
] | null | null | null | # Copyright © 2019, Massachusetts Institute of Technology
#
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
from malicious_counter import add_malicious_coal_counters
def generate_coalesced_counters(signals, vcd, num_mal_cntrs, dut_top_module,
d_sig_basename, n_sig_basename):
coal_counters = {}
# Find existing coalesced counters in the design
# (i.e., signals that reside in the DUT and are flip-flops)
for signal_name in signals.keys():
if signals[signal_name].isff and signal_name.startswith(dut_top_module):
coal_counters[signal_name] = signals[signal_name]
# Generate artificial coalesced counters
if num_mal_cntrs > 0:
print("Generating Malicious Coalesced Counters...")
coal_counters = add_malicious_coal_counters(signals, vcd, coal_counters,
num_mal_cntrs, dut_top_module,
d_sig_basename, n_sig_basename)
return coal_counters
| 49.065217 | 79 | 0.743908 | 307 | 2,257 | 5.345277 | 0.488599 | 0.051188 | 0.02011 | 0.028032 | 0.169409 | 0.140158 | 0.140158 | 0.140158 | 0.140158 | 0.140158 | 0 | 0.003898 | 0.204253 | 2,257 | 45 | 80 | 50.155556 | 0.909243 | 0.647319 | 0 | 0 | 0 | 0 | 0.054688 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.076923 | false | 0 | 0.076923 | 0 | 0.230769 | 0.076923 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
bff8bbf5d0b9c287c010eec938856baa6144fab3 | 2,095 | py | Python | secret_santa/util/email_sender.py | jacobboesch/secret_santa_program | f5b75614e716302930e5980beb1c79171e9b5451 | [
"MIT"
] | null | null | null | secret_santa/util/email_sender.py | jacobboesch/secret_santa_program | f5b75614e716302930e5980beb1c79171e9b5451 | [
"MIT"
] | null | null | null | secret_santa/util/email_sender.py | jacobboesch/secret_santa_program | f5b75614e716302930e5980beb1c79171e9b5451 | [
"MIT"
] | null | null | null | import smtplib
from email import encoders
from email.mime.base import MIMEBase
from email.mime.multipart import MIMEMultipart
from email.mime.text import MIMEText
import os
import secret_santa.config as config
import traceback
from secret_santa.response import ErrorException
class EmailSender():
def __init__(self, to, subject, body, attachment=None, bc=None):
self.to = to
self.attachment = attachment
self.message = MIMEMultipart()
self.message["From"] = config.SENDER_EMAIL
self.message["To"] = self.to
self.message["Subject"] = subject
self.bc = bc
if(body is not None):
self.message.attach(MIMEText(body, "html"))
if(attachment is not None):
self._attach_file()
def set_to(self, recipient):
self.to = recipient
self.message["To"] = recipient
def set_body(self, body):
self.message.attach(MIMEText(body, "html"))
def _attach_file(self):
try:
with open(self.attachment, "rb") as attachment:
part = MIMEBase("application", "octet-stream")
part.set_payload(attachment.read())
encoders.encode_base64(part)
file_name = os.path.basename(self.attachment)
part.add_header(
"Content-Disposition",
"attachment; filename=" + file_name)
self.message.attach(part)
except Exception:
traceback.print_exc()
raise ErrorException("Unable to attach file to email", 500)
def send(self):
try:
text = self.message.as_string()
with smtplib.SMTP(
config.SMTP_SERVER,
config.PORT) as server:
server.login(config.SENDER_EMAIL, config.SENDER_PASSWORD)
server.sendmail(config.SENDER_EMAIL, self.to, text)
except Exception:
traceback.print_exc()
raise ErrorException(
"Unable to send email to {to}".format(to=self.to), 500)
| 34.344262 | 75 | 0.597613 | 234 | 2,095 | 5.24359 | 0.32906 | 0.080685 | 0.031785 | 0.03423 | 0.149959 | 0.149959 | 0.09617 | 0.09617 | 0.09617 | 0 | 0 | 0.005513 | 0.307399 | 2,095 | 60 | 76 | 34.916667 | 0.84011 | 0 | 0 | 0.150943 | 0 | 0 | 0.06969 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.09434 | false | 0.018868 | 0.169811 | 0 | 0.283019 | 0.037736 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
bffe14491bc4d25ac929d786742d2c975a111cd1 | 4,764 | py | Python | electrum/gui/qt/delegation_list.py | L-47/qtum-electrum | dd1b0a8b6ef6e96015a6210de36b23949eaad359 | [
"MIT"
] | 1 | 2020-07-21T18:37:59.000Z | 2020-07-21T18:37:59.000Z | electrum/gui/qt/delegation_list.py | L-47/qtum-electrum | dd1b0a8b6ef6e96015a6210de36b23949eaad359 | [
"MIT"
] | null | null | null | electrum/gui/qt/delegation_list.py | L-47/qtum-electrum | dd1b0a8b6ef6e96015a6210de36b23949eaad359 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
__author__ = 'CodeFace'
"""
from enum import IntEnum
from PyQt5.QtCore import Qt, QPersistentModelIndex, QPoint
from PyQt5.QtGui import QStandardItemModel, QStandardItem, QFont
from PyQt5.QtWidgets import QAbstractItemView, QMenu
from electrum.i18n import _
from electrum.util import profiler
from electrum.bitcoin import is_address
from electrum.wallet import InternalAddressCorruption
from .util import MyTreeView, MONOSPACE_FONT
class DelegationList(MyTreeView):
class Columns(IntEnum):
ADDRESS = 0
STAKER = 1
FEE = 2
BALANCE = 3
filter_columns = [Columns.ADDRESS, Columns.STAKER, Columns.BALANCE, Columns.FEE]
def __init__(self, parent):
super().__init__(parent, self.create_menu, stretch_column=self.Columns.ADDRESS)
self.wallet = self.parent.wallet
self.setSelectionMode(QAbstractItemView.ExtendedSelection)
self.setSortingEnabled(True)
self.setModel(QStandardItemModel(self))
def get_toolbar_buttons(self):
return []
def refresh_headers(self):
headers = {
self.Columns.ADDRESS: _('Address'),
self.Columns.STAKER: _('Staker'),
self.Columns.FEE: _('Fee'),
self.Columns.BALANCE: _('Balance'),
}
self.update_headers(headers)
@profiler
def update(self):
if self.maybe_defer_update():
return
current_address = self.current_item_user_role(col=self.Columns.ADDRESS)
self.model().clear()
self.refresh_headers()
set_address = None
for addr in sorted(self.parent.wallet.db.list_delegations()):
dele = self.parent.wallet.db.get_delegation(addr)
c, u, x = self.wallet.get_addr_balance(addr)
balance = c + u + x
balance_text = self.parent.format_amount(balance, whitespaces=True)
fee_text = f'{dele.fee}%' if dele.fee > 0 else ''
labels = [dele.addr, dele.staker, fee_text, balance_text]
item = [QStandardItem(e) for e in labels]
item[self.Columns.ADDRESS].setData(dele.addr, Qt.UserRole)
item[self.Columns.ADDRESS].setTextAlignment(Qt.AlignLeft | Qt.AlignVCenter)
item[self.Columns.BALANCE].setTextAlignment(Qt.AlignRight | Qt.AlignVCenter)
item[self.Columns.BALANCE].setFont(QFont(MONOSPACE_FONT))
row_count = self.model().rowCount()
self.model().insertRow(row_count, item)
idx = self.model().index(row_count, self.Columns.ADDRESS)
if addr == current_address:
set_address = QPersistentModelIndex(idx)
self.set_current_idx(set_address)
def mouseDoubleClickEvent(self, item):
idx = self.indexAt(item.pos())
if not idx.isValid():
return
try:
addr = self.model().itemFromIndex(self.selected_in_column(self.Columns.ADDRESS)[0]).text()
except:
return
dele = self.parent.wallet.db.get_delegation(addr)
self.parent.delegation_dialog(dele, mode='edit')
def create_menu(self, position: QPoint):
menu = QMenu()
selected = self.selected_in_column(self.Columns.ADDRESS)
multi_select = len(selected) > 1
if not selected:
menu.addAction(_("Add Delegation"), lambda: self.parent.delegation_dialog())
elif not multi_select:
addr = self.model().itemFromIndex(self.selected_in_column(self.Columns.ADDRESS)[0]).text()
dele = self.parent.wallet.db.get_delegation(addr)
idx = self.indexAt(position)
if not idx.isValid():
return
col = idx.column()
column_title = self.model().horizontalHeaderItem(col).text()
copy_text = self.model().itemFromIndex(idx).text()
if col == self.Columns.BALANCE:
copy_text = copy_text.strip()
menu.addAction(_("Copy {}").format(column_title), lambda: self.place_text_on_clipboard(copy_text))
menu.addAction(_("Edit"), lambda: self.parent.delegation_dialog(dele, mode='edit'))
if dele and dele.staker:
menu.addAction(_("Undelegate"), lambda: self.parent.delegation_dialog(dele, mode='undelegate'))
menu.exec_(self.viewport().mapToGlobal(position))
def place_text_on_clipboard(self, text: str, *, title: str = None) -> None:
if is_address(text):
try:
self.parent.wallet.check_address_for_corruption(text)
except InternalAddressCorruption as e:
self.parent.show_error(str(e))
raise
super().place_text_on_clipboard(text, title=title) | 40.372881 | 111 | 0.638749 | 540 | 4,764 | 5.464815 | 0.283333 | 0.055913 | 0.054897 | 0.032531 | 0.190105 | 0.165029 | 0.14063 | 0.086411 | 0.046764 | 0.046764 | 0 | 0.004196 | 0.24958 | 4,764 | 118 | 112 | 40.372881 | 0.821259 | 0.014064 | 0 | 0.132653 | 0 | 0 | 0.018554 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.071429 | false | 0 | 0.091837 | 0.010204 | 0.244898 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
bfffbeabe843284199d6974dacace8acb741af22 | 6,726 | py | Python | src/pytiger/logging/test_syslog.py | tigercomputing/pytiger | 3002c732d90a258dc9aacdb9588532ffc29d58ea | [
"BSD-3-Clause"
] | 1 | 2016-06-22T13:51:07.000Z | 2016-06-22T13:51:07.000Z | src/pytiger/logging/test_syslog.py | tigercomputing/pytiger | 3002c732d90a258dc9aacdb9588532ffc29d58ea | [
"BSD-3-Clause"
] | 6 | 2017-07-05T16:34:00.000Z | 2018-07-30T11:04:07.000Z | src/pytiger/logging/test_syslog.py | tigercomputing/pytiger | 3002c732d90a258dc9aacdb9588532ffc29d58ea | [
"BSD-3-Clause"
] | 2 | 2016-06-22T10:36:02.000Z | 2016-06-22T13:51:16.000Z | # -*- coding: utf-8 -*-
# Copyright © 2015 Tiger Computing Ltd
# This file is part of pytiger and distributed under the terms
# of a BSD-like license
# See the file COPYING for details
from __future__ import absolute_import
import logging
import logging.config
import os.path
import sys
import syslog
import textwrap
import unittest
from six import StringIO
try:
from unittest.mock import patch
except ImportError:
from mock import patch
from .syslog import (
PRIORITY_NAMES, FACILITY_NAMES, priority, facility, encode_priority,
_map_priority, SyslogHandler)
class TestSyslogSupport(unittest.TestCase):
def test_priority_int(self):
for name, value in PRIORITY_NAMES.items():
self.assertEqual(value, priority(value))
def test_priority_str(self):
for name, value in PRIORITY_NAMES.items():
self.assertEqual(value, priority(name))
def test_priority_str_invalid(self):
self.assertRaises(ValueError, priority, 'foobar')
def test_priority_invalid(self):
self.assertRaises(TypeError, priority, object())
####################
def test_facility_int(self):
for name, value in FACILITY_NAMES.items():
self.assertEqual(value, facility(value))
def test_facility_str(self):
for name, value in FACILITY_NAMES.items():
self.assertEqual(value, facility(name))
def test_facility_str_invalid(self):
self.assertRaises(ValueError, facility, 'foobar')
def test_facility_invalid(self):
self.assertRaises(TypeError, facility, object())
####################
def test_encode_priority(self):
SAMPLE_ENCODINGS = [
('user', 'crit', (1 << 3) | 2),
('auth', 'emerg', (4 << 3) | 0),
('mail', 'debug', (2 << 3) | 7),
('cron', 'warning', (9 << 3) | 4),
('kern', 'emerg', (0 << 3) | 0),
('local7', 'debug', (23 << 3) | 7),
]
for fac, pri, expect in SAMPLE_ENCODINGS:
value = encode_priority(fac, pri)
self.assertEqual(value, expect,
"%d != %s:%s (%d)" % (value, fac, pri, expect))
####################
def test_map_priority(self):
SAMPLE_MAPPINGS = [
(logging.CRITICAL, syslog.LOG_CRIT),
(logging.ERROR, syslog.LOG_ERR),
(logging.WARNING, syslog.LOG_WARNING),
(logging.INFO, syslog.LOG_INFO),
(logging.DEBUG, syslog.LOG_DEBUG),
(100, syslog.LOG_CRIT), # x > CRITICAL
(45, syslog.LOG_CRIT), # CRITICAL > x > ERROR
(35, syslog.LOG_ERR), # ERROR > x > WARNING
(25, syslog.LOG_WARNING), # WARNING > x > INFO
(15, syslog.LOG_INFO), # INFO > x > DEBUG
(5, syslog.LOG_DEBUG), # DEBUG > x
]
for lvl, prio in SAMPLE_MAPPINGS:
value = _map_priority(lvl)
self.assertEqual(prio, value,
"%d => %d; expected %d" % (lvl, value, prio))
class TestSyslogFormatter(unittest.TestCase):
config = """
[loggers]
keys=root
[handlers]
keys=hand1
[formatters]
keys=form1
[logger_root]
level=NOTSET
handlers=hand1
[handler_hand1]
class=StreamHandler
level=NOTSET
formatter=form1
args=(sys.stdout,)
[formatter_form1]
class=pytiger.logging.syslog.SyslogFormatter
format=%(levelname)s:%(name)s:%(message)s
datefmt=
"""
def test_formatter_exception(self):
with patch('sys.stdout', new=StringIO()) as out:
logging.config.fileConfig(StringIO(textwrap.dedent(self.config)))
try:
raise RuntimeError()
except RuntimeError:
logging.exception("just testing")
sys.stdout.seek(0)
self.assertEqual(out.getvalue(),
"ERROR:root:just testing\n")
class TestSyslogHandler(unittest.TestCase):
config1 = """
[loggers]
keys=root
[handlers]
keys=hand1
[formatters]
keys=form1
[logger_root]
level=NOTSET
handlers=hand1
[handler_hand1]
class=pytiger.logging.syslog.SyslogHandler
level=NOTSET
formatter=form1
args=()
[formatter_form1]
class=pytiger.logging.syslog.SyslogFormatter
format=%(levelname)s:%(name)s:%(message)s
datefmt=
"""
config2 = """
[loggers]
keys=root
[handlers]
keys=hand1
[formatters]
keys=
[logger_root]
level=NOTSET
handlers=hand1
[handler_hand1]
class=pytiger.logging.syslog.SyslogHandler
level=NOTSET
args=("config2",)
"""
def apply_config(self, conf):
logging.config.fileConfig(StringIO(textwrap.dedent(conf)))
def test_openlog_default(self):
# In Python 2.6 ident must be a string so we need to do what
# SyslogHandler.__init__() does to get an ident string.
ident = os.path.basename(sys.argv[0])
with patch('syslog.openlog') as openlog:
self.apply_config(self.config1)
openlog.assert_called_once_with(
ident, syslog.LOG_PID, syslog.LOG_USER)
def test_openlog_ident(self):
with patch('syslog.openlog') as openlog:
self.apply_config(self.config2)
openlog.assert_called_once_with(
'config2', syslog.LOG_PID, syslog.LOG_USER)
def test_logging(self):
self.apply_config(self.config1)
logger = logging.getLogger()
handler = logger.handlers[0]
with patch('syslog.syslog') as _syslog:
logger.critical('something critical')
_syslog.assert_called_once_with(
syslog.LOG_USER | syslog.LOG_CRIT,
"CRITICAL:root:something critical")
with patch('syslog.syslog') as _syslog:
logger.warning('something you should know')
_syslog.assert_called_once_with(
syslog.LOG_USER | syslog.LOG_WARNING,
"WARNING:root:something you should know")
with patch('syslog.syslog') as _syslog:
logger.debug('the authors may want to know...')
_syslog.assert_called_once_with(
syslog.LOG_USER | syslog.LOG_DEBUG,
"DEBUG:root:the authors may want to know...")
self.assertTrue(isinstance(handler, SyslogHandler))
handler.facility = 'uucp'
with patch('syslog.syslog') as _syslog:
logger.info('something happened')
_syslog.assert_called_once_with(
syslog.LOG_UUCP | syslog.LOG_INFO,
"INFO:root:something happened")
| 29.116883 | 77 | 0.597532 | 746 | 6,726 | 5.234584 | 0.253351 | 0.053009 | 0.023047 | 0.03073 | 0.447119 | 0.394366 | 0.337004 | 0.29219 | 0.26402 | 0.26402 | 0 | 0.013308 | 0.285013 | 6,726 | 230 | 78 | 29.243478 | 0.798503 | 0.057538 | 0 | 0.385057 | 0 | 0 | 0.242298 | 0.047725 | 0 | 0 | 0 | 0 | 0.103448 | 1 | 0.086207 | false | 0 | 0.074713 | 0 | 0.195402 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
8702bbe17fb7f3cc4799ef21147a44f8e6dc7279 | 539 | py | Python | icons/create_dark_icons.py | zerorock1312/lt-maker-master | 82f733683f9dba763a5de8567c41fd7cbcfb0173 | [
"MIT"
] | null | null | null | icons/create_dark_icons.py | zerorock1312/lt-maker-master | 82f733683f9dba763a5de8567c41fd7cbcfb0173 | [
"MIT"
] | null | null | null | icons/create_dark_icons.py | zerorock1312/lt-maker-master | 82f733683f9dba763a5de8567c41fd7cbcfb0173 | [
"MIT"
] | null | null | null | import os, glob
from PIL import Image
# Creats dark icons
icon_folder = 'icons'
new_folder = 'dark_icons'
if not os.path.exists(new_folder):
os.mkdir(new_folder)
for icon in glob.glob(icon_folder + '/*.png'):
im = Image.open(icon)
icon_name = os.path.split(icon)[-1]
print(icon_name)
r, g, b, a = im.split()
def invert(image):
return image.point(lambda p: 255 - p)
r, g, b = map(invert, (r, g, b))
im_invert = Image.merge(im.mode, (r, g, b, a))
im_invert.save(new_folder + '/' + icon_name)
| 23.434783 | 50 | 0.627087 | 91 | 539 | 3.582418 | 0.450549 | 0.110429 | 0.03681 | 0.02454 | 0.03681 | 0 | 0 | 0 | 0 | 0 | 0 | 0.009479 | 0.217069 | 539 | 22 | 51 | 24.5 | 0.763033 | 0.03154 | 0 | 0 | 0 | 0 | 0.042308 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.0625 | false | 0 | 0.125 | 0.0625 | 0.25 | 0.0625 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
8704b3f35fdc2c2c654da64eb433c750b5fce562 | 1,244 | py | Python | analysis/test.py | shiyxg/tremor | 18c4efa8104fe2aba9789488aeca200b6fa143e5 | [
"Apache-2.0"
] | null | null | null | analysis/test.py | shiyxg/tremor | 18c4efa8104fe2aba9789488aeca200b6fa143e5 | [
"Apache-2.0"
] | null | null | null | analysis/test.py | shiyxg/tremor | 18c4efa8104fe2aba9789488aeca200b6fa143e5 | [
"Apache-2.0"
] | null | null | null | import numpy as np
import matplotlib.pyplot as plt
from analysis.getdata import Wave
from analysis.tremor import Tremor
from analysis.earthquake import Event
a = Tremor(chn=4)
b = Wave()
index = 102
a_tremor = a.tremor[index]
data = []
a_tremor['station'].sort(key=lambda x: x[1], reverse=True)
print(a_tremor)
duration = 3600
shift = -duration/2
for i in a_tremor['station']:
sac = b.get_waveform(a_tremor, duration=duration, shift=shift, station=i[0])[0]
sac = sac-sac.mean()
wave_data = sac*1e7
plt.plot(np.linspace(0, duration, len(wave_data)), wave_data+i[1], label=i[0])
plt.axvline(x=-shift)
plt.legend(bbox_to_anchor=(1, 1), loc=2, borderaxespad=0.)
plt.figure()
event = Event(4)
print(len(event.event))
index = 102
a_event = event.event[index]
data = []
a_event['station'].sort(key=lambda x: x[1], reverse=True)
print(a_event)
duration = 3600
shift = -duration/2
for i in a_event['station']:
sac = b.get_waveform(a_event, duration=duration, shift=shift, station=i[0])[0]
sac = sac-sac.mean()
wave_data = sac/sac.max()
plt.plot(np.linspace(0, duration, len(wave_data)), wave_data+i[1], label=i[0])
plt.axvline(x=-shift)
plt.legend(bbox_to_anchor=(1, 1), loc=2, borderaxespad=0.)
plt.show() | 23.037037 | 83 | 0.699357 | 214 | 1,244 | 3.962617 | 0.266355 | 0.057783 | 0.021226 | 0.04717 | 0.636792 | 0.636792 | 0.582547 | 0.582547 | 0.582547 | 0.504717 | 0 | 0.037244 | 0.136656 | 1,244 | 54 | 84 | 23.037037 | 0.752328 | 0 | 0 | 0.410256 | 0 | 0 | 0.02249 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.128205 | 0 | 0.128205 | 0.076923 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
87074f9d5174826c16ba536e838df32c5a51116a | 7,036 | py | Python | prcoords.py | gumblex/naivegis | df94dcbf4648217a5a53c7fe265571b40459486f | [
"MIT"
] | null | null | null | prcoords.py | gumblex/naivegis | df94dcbf4648217a5a53c7fe265571b40459486f | [
"MIT"
] | null | null | null | prcoords.py | gumblex/naivegis | df94dcbf4648217a5a53c7fe265571b40459486f | [
"MIT"
] | null | null | null | '''
People's Rectified [[T:Coord|Coordinates]]
@file Utils for inserting valid WGS-84 coords from GCJ-02/BD-09 input
@author User:Artoria2e5
@url https://github.com/Artoria2e5/PRCoords
@see [[:en:GCJ-02]]
@see https://github.com/caijun/geoChina (GPLv3)
@see https://github.com/googollee/eviltransform (MIT)
@see https://on4wp7.codeplex.com/SourceControl/changeset/view/21483#353936 (Anonymous)
@see https://github.com/zxteloiv/pycoordtrans (BSD-3)
@license CC0
To the greatest extent possible, this implementation of obfuscations designed
in hope that they will screw y'all up is dedicated into the public domain
under CC0 1.0 <https://creativecommons.org/publicdomain/zero/1.0/>.
Happy geotagging/ingressing/whatever.
To make my FSF membership shine brighter, this conversion implementation is
additionally licensed under GPLv3+:
@license GPLv3+
@copyright 2016 Mingye Wang (User:Artoria2e5)
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
'''
import math
import warnings
import collections
# Krasovsky 1940 ellipsoid
# @const
GCJ_A = 6378245
GCJ_EE = 0.00669342162296594323 # f = 1/298.3; e^2 = 2*f - f**2
# Epsilon to use for "exact" iterations.
# Wanna troll? Use Number.EPSILON. 1e-13 in 15 calls for gcj.
# @const
PRC_EPS = 1e-5
# Baidu's artificial deviations
# @const
BD_DLAT = 0.0060
BD_DLON = 0.0065
# Mean Earth Radius
# @const
EARTH_R = 6371000
class Coords(collections.namedtuple('Coords', 'lat lon')):
def __add__(self, other):
return Coords(self.lat + other.lat, self.lon + other.lon)
def __sub__(self, other):
return Coords(self.lat - other.lat, self.lon - other.lon)
def __abs__(self, other):
return math.hypot(*self)
def error(self, other):
return max(abs(self.lat - other.lat), abs(self.lon - other.lon))
def distance(self, other):
'''
Distance for haversine method; suitable over short distances like
conversion deviation checking
'''
hav = lambda theta: math.sin(theta / 2) ** 2
delta = self - other
return 2 * EARTH_R * math.asin(math.sqrt(
hav(math.radians(delta.lat)) +
math.cos(math.radians(self.lat)) *
math.cos(math.radians(other.lat)) *
hav(math.radians(delta.lon))
))
def sanity_in_china_p(coords):
return (0.8293 <= coords.lat <= 55.8271 and
72.004 <= coords.lon <= 137.8347)
def wgs_gcj(wgs, check_china=True):
wgs = Coords(*wgs)
if check_china and not sanity_in_china_p(wgs):
warnings.warn('Non-Chinese coords found, returning as-is: %r' % (wgs,))
return wgs
x, y = wgs.lon - 105, wgs.lat - 35
# These distortion functions accept (x = lon - 105, y = lat - 35).
# They return distortions in terms of arc lengths, in meters.
# In other words, you can pretty much figure out how much you will be off
# from WGS-84 just through evaulating them...
#
# For example, at the (mapped) center of China (105E, 35N), you get a
# default deviation of <300, -100> meters.
dLat_m = (-100 + 2 * x + 3 * y + 0.2 * y * y + 0.1 * x * y +
0.2 * math.sqrt(abs(x)) + (
2 * math.sin(x * 6 * math.pi) + 2 * math.sin(x * 2 * math.pi) +
2 * math.sin(y * math.pi) + 4 * math.sin(y / 3 * math.pi) +
16 * math.sin(y / 12 * math.pi) + 32 * math.sin(y / 30 * math.pi)
) * 20 / 3)
dLon_m = (300 + x + 2 * y + 0.1 * x * x + 0.1 * x * y +
0.1 * math.sqrt(abs(x)) + (
2 * math.sin(x * 6 * math.pi) + 2 * math.sin(x * 2 * math.pi) +
2 * math.sin(x * math.pi) + 4 * math.sin(x / 3 * math.pi) +
15 * math.sin(x / 12 * math.pi) + 30 * math.sin(x / 30 * math.pi)
) * 20 / 3)
radLat = math.radians(wgs.lat)
magic = 1 - GCJ_EE * math.pow(math.sin(radLat), 2) # just a common expr
# [[:en:Latitude#Length_of_a_degree_of_latitude]]
lat_deg_arclen = math.radians((GCJ_A * (1 - GCJ_EE)) * math.pow(magic, 1.5))
# [[:en:Longitude#Length_of_a_degree_of_longitude]]
lon_deg_arclen = math.radians(GCJ_A * math.cos(radLat) / math.sqrt(magic))
# The screwers pack their deviations into degrees and disappear.
# Note how they are mixing WGS-84 and Krasovsky 1940 ellipsoids here...
return Coords(wgs.lat + (dLat_m / lat_deg_arclen),
wgs.lon + (dLon_m / lon_deg_arclen))
def gcj_wgs(gcj, check_china=True):
'''rev_transform_rough; accuracy ~2e-6 deg (meter-level)'''
gcj = Coords(*gcj)
return gcj - (wgs_gcj(gcj, check_china) - gcj)
def gcj_bd(gcj, _dummy=False):
y, x = gcj
# trivia: pycoordtrans actually describes how these values are calculated
r = math.sqrt(x * x + y * y) + 0.00002 * math.sin(math.radians(y) * 3000)
theta = math.atan2(y, x) + 0.000003 * math.cos(math.radians(x) * 3000)
# Hard-coded default deviations again!
return Coords(r * math.sin(theta) + BD_DLAT, r * math.cos(theta) + BD_DLON)
# Yes, we can implement a "precise" one too.
def bd_gcj(bd, _dummy=False):
'''accuracy ~1e-7 deg (decimeter-level; exceeds usual data accuracy)'''
bd = Coords(*bd)
x = bd.lon - BD_DLON
y = bd.lat - BD_DLAT
# trivia: pycoordtrans actually describes how these values are calculated
r = math.sqrt(x * x + y * y) - 0.00002 * math.sin(math.radians(y) * 3000)
theta = math.atan2(y, x) - 0.000003 * math.cos(math.radians(x) * 3000)
return Coords(r * math.sin(theta), r * math.cos(theta))
def bd_wgs(bd, check_china=True):
return gcj_wgs(bd_gcj(bd), check_china)
def wgs_bd(bd, check_china=True):
return gcj_bd(wgs_gcj(bd, check_china))
def _bored(fwd, rev):
'''
generic "bored function" factory, Caijun 2014
gcj: 4 calls to wgs_gcj; ~0.1mm acc
'''
def rev_bored(bad, check_china=True):
wgs = rev(bad)
bad = old = Coords(*bad)
# Wait till we hit fixed point or get bored
i = 0
while i < 10 and wgs.error(old) > PRC_EPS:
old = wgs
wgs = wgs - (fwd(wgs, False) - bad)
i += 1
return wgs
return rev_bored
# Precise functions using caijun 2014 method
#
# Why "bored"? Because they usually exceed source data accuracy -- the
# original GCJ implementation contains noise from a linear-modulo PRNG,
# and Baidu seems to do similar things with their API too.
gcj_wgs_bored = _bored(wgs_gcj, gcj_wgs)
bd_gcj_bored = _bored(gcj_bd, bd_gcj)
bd_wgs_bored = _bored(wgs_bd, bd_wgs)
| 35.356784 | 86 | 0.650796 | 1,105 | 7,036 | 4.054299 | 0.332127 | 0.028125 | 0.014286 | 0.010045 | 0.214063 | 0.160045 | 0.114286 | 0.114286 | 0.114286 | 0.114286 | 0 | 0.052583 | 0.226976 | 7,036 | 198 | 87 | 35.535354 | 0.771098 | 0.445992 | 0 | 0.068966 | 0 | 0 | 0.015275 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.16092 | false | 0 | 0.034483 | 0.08046 | 0.37931 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
87076abf6388523059ba8f7e4a5450f284673279 | 2,527 | py | Python | mosaic-generator.py | gilbertohasnofb/mosaic-generator | 7e84ef3c606a93c74c0770feaff7a9aaf326f536 | [
"MIT"
] | 1 | 2021-04-23T05:31:03.000Z | 2021-04-23T05:31:03.000Z | mosaic-generator.py | gilbertohasnofb/mosaic-generator | 7e84ef3c606a93c74c0770feaff7a9aaf326f536 | [
"MIT"
] | null | null | null | mosaic-generator.py | gilbertohasnofb/mosaic-generator | 7e84ef3c606a93c74c0770feaff7a9aaf326f536 | [
"MIT"
] | 1 | 2021-04-23T05:31:20.000Z | 2021-04-23T05:31:20.000Z | #!/usr/bin/env python3
# Author: Gilberto Agostinho <gilbertohasnofb@gmail.com>
# https://github.com/gilbertohasnofb/mosaic-generator
from PIL import Image, ImageDraw, ImageFont
import os
import random
def get_images(input_folder):
"""Reads input images
"""
source_images = []
if not input_folder.endswith('/'):
input_folder += '/'
for filename in os.listdir(input_folder):
if filename.endswith('.jpg'):
source_images.append(input_folder + filename)
return source_images
def generate_mosaic(source_images,
n_columns,
n_rows,
border=0,
border_colour=(255, 255, 255),
randomise=False,
output_folder='./'):
"""Creates a random mosaic from input images
"""
width, heigth = Image.open(source_images[0]).size
mosaic_width = width * n_columns + border * (n_columns + 1)
mosaic_heigth = heigth * n_rows + border * (n_rows + 1)
image = Image.new('RGB',
(mosaic_width, mosaic_heigth),
color=border_colour, # white background
)
if randomise:
random.shuffle(source_images)
for number, image_filename in enumerate(source_images):
position_x = (number % n_columns) * width \
+ border * (number % n_columns + 1)
position_y = (number // n_columns) * heigth \
+ border * (number // n_columns + 1)
image.paste(
Image.open(image_filename),
(position_x, position_y),
)
if not output_folder.endswith('/'):
output_folder += '/'
image.save(output_folder + 'mosaic.jpg',
format='JPEG',
subsampling=0,
quality=100,
)
def main():
n_columns = 6 # number of columns of images
n_rows = 7 # number of rows of images
border = 50 # in pixels; use 0 for no border
border_colour = (255, 255, 255) # rgb value
input_folder = './input'
output_folder = './'
randomise = True # randomises the order of the input images
source_images = get_images(input_folder)
generate_mosaic(source_images,
n_columns,
n_rows,
border,
border_colour,
randomise,
output_folder,
)
if __name__ == '__main__':
main()
| 30.445783 | 64 | 0.548081 | 266 | 2,527 | 4.973684 | 0.334586 | 0.081633 | 0.042328 | 0.030234 | 0.131519 | 0.068027 | 0.068027 | 0.068027 | 0.068027 | 0 | 0 | 0.020885 | 0.355758 | 2,527 | 82 | 65 | 30.817073 | 0.791769 | 0.139691 | 0 | 0.064516 | 0 | 0 | 0.020418 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.048387 | false | 0 | 0.048387 | 0 | 0.112903 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
870a014839ae5f25613da0770f86f1dc0952face | 3,586 | py | Python | src/arti/types/pydantic.py | artigraph/artigraph | 8904dbd708dcf961ab64a5b5e523828accb1a5fa | [
"Apache-2.0"
] | 13 | 2021-12-29T05:25:01.000Z | 2022-02-28T04:50:47.000Z | src/arti/types/pydantic.py | artigraph/artigraph | 8904dbd708dcf961ab64a5b5e523828accb1a5fa | [
"Apache-2.0"
] | 41 | 2021-12-29T05:27:16.000Z | 2022-03-28T00:38:45.000Z | src/arti/types/pydantic.py | artigraph/artigraph | 8904dbd708dcf961ab64a5b5e523828accb1a5fa | [
"Apache-2.0"
] | null | null | null | from typing import Any, Protocol
from pydantic import BaseModel
from pydantic.fields import ModelField
from pydantic.fields import UndefinedType as _PydanticUndefinedType
from arti.internal.type_hints import lenient_issubclass
from arti.types import Struct, Type, TypeAdapter, TypeSystem, _ScalarClassTypeAdapter
from arti.types.python import python_type_system
pydantic_type_system = TypeSystem(key="pydantic")
class _PostFieldConversionHook(Protocol):
def __call__(self, type_: Type, *, name: str, required: bool) -> Type:
raise NotImplementedError()
def get_post_field_conversion_hook(type_: Any) -> _PostFieldConversionHook:
if hasattr(type_, "_pydantic_type_system_post_field_conversion_hook_"):
return type_._pydantic_type_system_post_field_conversion_hook_ # type: ignore
return lambda type_, *, name, required: type_
@pydantic_type_system.register_adapter
class BaseModelAdapter(TypeAdapter):
artigraph = Struct
system = BaseModel
@staticmethod
def _field_to_artigraph(field: ModelField, *, hints: dict[str, Any]) -> Type:
subtype = python_type_system.to_artigraph(field.outer_type_, hints=hints)
return get_post_field_conversion_hook(subtype)(
subtype,
name=field.name,
required=(
True if isinstance(field.required, _PydanticUndefinedType) else field.required
),
)
@classmethod
def to_artigraph(cls, type_: type[BaseModel], *, hints: dict[str, Any]) -> Type:
return Struct(
name=type_.__name__,
fields={
field.name: cls._field_to_artigraph(field, hints=hints)
for field in type_.__fields__.values()
},
)
@classmethod
def matches_system(cls, type_: Any, *, hints: dict[str, Any]) -> bool:
return lenient_issubclass(type_, cls.system)
@classmethod
def to_system(cls, type_: Type, *, hints: dict[str, Any]) -> type[BaseModel]:
assert isinstance(type_, Struct)
return type(
f"{type_.name}",
(BaseModel,),
{
"__annotations__": {
k: (
pydantic_type_system.to_system(v, hints=hints)
if isinstance(v, Struct)
else python_type_system.to_system(v, hints=hints)
)
for k, v in type_.fields.items()
}
},
)
# Extend the python_type_system to handle BaseModel. This simplifies conversion of nested models
@python_type_system.register_adapter
class _PythonBaseModelAdapter(_ScalarClassTypeAdapter):
artigraph = Struct
system = BaseModel
priority = int(1e8) # Beneath the Optional Adapter
@classmethod
def matches_artigraph(cls, type_: Type, *, hints: dict[str, Any]) -> bool:
# Avoid converting a python type to a BaseModel unless explicit annotated.
return super().matches_artigraph(type_, hints=hints) and hints.get(
f"{pydantic_type_system.key}.is_model", False
)
@classmethod
def to_artigraph(cls, type_: Any, *, hints: dict[str, Any]) -> Type:
return BaseModelAdapter.to_artigraph(type_, hints=hints)
@classmethod
def matches_system(cls, type_: Any, *, hints: dict[str, Any]) -> bool:
return BaseModelAdapter.matches_system(type_, hints=hints)
@classmethod
def to_system(cls, type_: Type, *, hints: dict[str, Any]) -> Any:
return BaseModelAdapter.to_system(type_, hints=hints)
| 35.50495 | 96 | 0.658115 | 395 | 3,586 | 5.681013 | 0.243038 | 0.04902 | 0.042781 | 0.053476 | 0.305704 | 0.227718 | 0.18672 | 0.138146 | 0.098039 | 0.098039 | 0 | 0.000745 | 0.251255 | 3,586 | 100 | 97 | 35.86 | 0.835009 | 0.058282 | 0 | 0.168831 | 0 | 0 | 0.035291 | 0.024911 | 0 | 0 | 0 | 0 | 0.012987 | 1 | 0.12987 | false | 0 | 0.090909 | 0.077922 | 0.454545 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
870a4445ee9194cb869001089f19294f1768075b | 4,355 | py | Python | cfg.py | ceciliaromaro/PDCM_NetPyNE | aa9f5208a17904d1fba2211b234f22f45739c041 | [
"MIT"
] | 5 | 2018-07-16T23:31:15.000Z | 2020-08-27T14:00:04.000Z | cfg.py | ceciliaromaro/PDCM_NetPyNE | aa9f5208a17904d1fba2211b234f22f45739c041 | [
"MIT"
] | 1 | 2018-09-23T22:46:55.000Z | 2018-09-23T22:46:55.000Z | cfg.py | ceciliaromaro/PDCM_NetPyNE | aa9f5208a17904d1fba2211b234f22f45739c041 | [
"MIT"
] | null | null | null | '''
NetPyNE version of Potjans and Diesmann thalamocortical network
cfg.py -- contains the simulation configuration (cfg object)
'''
from netpyne import specs
############################################################
#
# SIMULATION CONFIGURATION
#
############################################################
cfg = specs.SimConfig() # object of class SimConfig to store simulation configuration
############################################################
# Run options
############################################################
cfg.seeds['stim']=3
cfg.duration = 1*1e3 #6*1e2 # Duration of the simulation, in ms
cfg.dt = 0.025 # Internal integration timestep to use
cfg.verbose = 0 # Show detailed messages
cfg.seeds['m'] = 123
cfg.printPopAvgRates = True
cfg.printRunTime = 1
### Options to save memory in large-scale ismulations
cfg.gatherOnlySimData = False #Original
# set the following 3 options to False when running large-scale versions of the model (>50% scale) to save memory
cfg.saveCellSecs = True
cfg.saveCellConns = True
cfg.createPyStruct = True
###########################################################
# Network Options
###########################################################
# DC=True ; TH=False; Balanced=True => Reproduce Figure 7 A1 and A2
# DC=False; TH=False; Balanced=False => Reproduce Figure 7 B1 and B2
# DC=False ; TH=False; Balanced=True => Reproduce Figure 8 A, B, C and D
# DC=False ; TH=False; Balanced=True and run to 60 s to => Table 6
# DC=False ; TH=True; Balanced=True => Figure 10A. But I want a partial reproduce so I guess Figure 10C is not necessary
# Size of Network. Adjust this constants, please!
cfg.ScaleFactor = 0.10 # 1.0 = 80.000
# External input DC or Poisson
cfg.DC = False #True = DC // False = Poisson
# Thalamic input in 4th and 6th layer on or off
cfg.TH = False #True = on // False = off
# Balanced and Unbalanced external input as PD article
cfg.Balanced = False #True=Balanced // False=Unbalanced
cfg.simLabel = 'pd_scale-%s_DC-%d_TH-%d_Balanced-%d_dur-%d'%(str(cfg.ScaleFactor), int(cfg.DC), int(cfg.TH), int(cfg.Balanced), int(cfg.duration/1e3))
###########################################################
# Recording and plotting options
###########################################################
cfg.recordStep = 0.1 # Step size in ms to save data (e.g. V traces, LFP, etc)
cfg.filename = cfg.simLabel # Set file output name
cfg.saveFolder = 'data/'
cfg.savePickle = True # Save params, network and sim output to pickle file
cfg.saveJson = False
cfg.recordStim = False
cfg.printSynsAfterRule = False
cfg.recordCellsSpikes = ['L2e', 'L2i', 'L4e', 'L4i', 'L5e', 'L5i','L6e', 'L6i'] # record only spikes of cells (not ext stims)
# raster plot (include update in netParams.py)
cfg.analysis['plotRaster']={'include': [], 'timeRange': [100,600], 'popRates' : False, 'figSize' : (6,7),
'labels':'overlay', 'orderInverse': True, 'fontSize':16, 'showFig':False, 'saveFig': True}
# statistics plot (include update in netParams.py)
cfg.analysis['plotSpikeStats'] = {'include' : [], 'stats' : ['rate'], 'legendLabels':cfg.recordCellsSpikes,
'timeRange' : [100,600], 'fontSize': 16, 'figSize': (6,9),'showFig':False, 'saveFig': True}
## Additional NetPyNE analysis
# plot traces
#cfg.recordTraces = {'m': {'var': 'm', 'conds':{'pop': ['L2e', 'L2i']}}}
#cfg.analysis['plotTraces'] = {'include':[('L2e', [0, 1, 2, 3]),('L2i', [0, 1])], 'timeRange': [0,100],'overlay': True,'oneFigPer': 'trace', 'showFig':False, 'saveFig': 'traceEscala3'+str(ScaleFactor)+'.png'}
# plot 2D net structure
# cfg.analysis['plot2Dnet'] = {'include': cfg.recordCellsSpikes, 'saveFig': True, 'figSize': (10,15)}
# plot convergence connectivity as 2D
# cfg.analysis['plotConn'] = {'includePre': cfg.recordCellsSpikes, 'includePost': cfg.recordCellsSpikes, 'feature': 'convergence', \
# 'synOrConn': 'conn', 'graphType': 'bar', 'saveFig': True, 'figSize': (15, 9)}
# plot firing rate spectrogram (run for 4 sec)
# cfg.analysis['plotRateSpectrogram'] = {'include': ['allCells'], 'saveFig': True, 'figSize': (15, 7)}
# plot granger causality (run for 4 sec)
# cfg.analysis.granger = {'cells1': ['L2i'], 'cells2': ['L4e'], 'label1': 'L2i', 'label2': 'L4e', 'timeRange': [500,4000], 'saveFig': True, 'binSize': 4}
| 42.281553 | 208 | 0.60597 | 532 | 4,355 | 4.951128 | 0.430451 | 0.029233 | 0.022779 | 0.02164 | 0.093774 | 0.085421 | 0.031131 | 0.031131 | 0 | 0 | 0 | 0.032976 | 0.143513 | 4,355 | 102 | 209 | 42.696078 | 0.67319 | 0.580941 | 0 | 0 | 0 | 0 | 0.189001 | 0.032533 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.033333 | 0 | 0.033333 | 0.1 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
870af336a7da1b721781dfe570080a32228f88dc | 2,523 | py | Python | deepobs/pytorch/testproblems/svhn_wrn164.py | abahde/DeepOBS | 7ba549fe2ed77d6458a20ae9e8971df95830d821 | [
"MIT"
] | 7 | 2019-09-06T04:51:14.000Z | 2020-05-12T09:05:47.000Z | deepobs/pytorch/testproblems/svhn_wrn164.py | abahde/DeepOBS | 7ba549fe2ed77d6458a20ae9e8971df95830d821 | [
"MIT"
] | 16 | 2019-09-06T10:58:31.000Z | 2020-07-08T09:22:06.000Z | deepobs/pytorch/testproblems/svhn_wrn164.py | abahde/DeepOBS | 7ba549fe2ed77d6458a20ae9e8971df95830d821 | [
"MIT"
] | 5 | 2019-07-24T14:20:15.000Z | 2020-10-14T13:14:08.000Z | import torch
from torch import nn
from .testproblems_modules import net_wrn
from ..datasets.svhn import svhn
from .testproblem import TestProblem
class svhn_wrn164(TestProblem):
"""DeepOBS test problem class for the Wide Residual Network 16-4 architecture\
for SVHN.
Details about the architecture can be found in the `original paper`_.
A weight decay is used on the weights (but not the biases)
which defaults to ``5e-4``.
Training settings recommenden in the `original paper`_:
``batch size = 128``, ``num_epochs = 160`` using the Momentum optimizer
with :math:`\\mu = 0.9` and an initial learning rate of ``0.01`` with a decrease by
``0.1`` after ``80`` and ``120`` epochs.
.. _original paper: https://arxiv.org/abs/1605.07146
Args:
batch_size (int): Batch size to use.
weight_decay (float): Weight decay factor. Weight decay (L2-regularization)
is used on the weights but not the biases.
Defaults to ``5e-4``.
"""
def __init__(self, batch_size, weight_decay=0.0005):
"""Create a new WRN 16-4 test problem instance on SVHN.
Args:
batch_size (int): Batch size to use.
weight_decay (float): Weight decay factor. Weight decay (L2-regularization)
is used on the weights but not the biases.
Defaults to ``5e-4``.
"""
super(svhn_wrn164, self).__init__(batch_size, weight_decay)
def set_up(self):
"""Set up the Wide ResNet 16-4 test problem on SVHN."""
self.data = svhn(self._batch_size, data_augmentation=True)
self.loss_function = nn.CrossEntropyLoss
self.net = net_wrn(num_outputs=10, num_residual_blocks=2, widening_factor=4)
self.net.to(self._device)
self.regularization_groups = self.get_regularization_groups()
def get_regularization_groups(self):
"""Creates regularization groups for the parameters.
Returns:
dict: A dictionary where the key is the regularization factor and the value is a list of parameters.
"""
no, l2 = 0.0, self._weight_decay
group_dict = {no: [], l2: []}
for parameters_name, parameters in self.net.named_parameters():
# penalize only the non bias layer parameters
if ('weight' in parameters_name) and (('dense' in parameters_name) or ('conv' in parameters_name)):
group_dict[l2].append(parameters)
else:
group_dict[no].append(parameters)
return group_dict
| 38.815385 | 112 | 0.66191 | 348 | 2,523 | 4.652299 | 0.399425 | 0.067943 | 0.014824 | 0.020383 | 0.188388 | 0.188388 | 0.188388 | 0.188388 | 0.188388 | 0.168005 | 0 | 0.033508 | 0.242965 | 2,523 | 64 | 113 | 39.421875 | 0.814136 | 0.50218 | 0 | 0 | 0 | 0 | 0.013286 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.130435 | false | 0 | 0.217391 | 0 | 0.434783 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
870ba76491af5b76385f767f2ce33f509662f230 | 976 | py | Python | inference.py | juanitorduz/ml_prod_tutorial | ef60714392d4ec76f43f25bb157009a1cfb35300 | [
"MIT"
] | 10 | 2020-01-30T00:35:14.000Z | 2021-04-28T04:00:08.000Z | inference.py | juanitorduz/ml_prod_tutorial | ef60714392d4ec76f43f25bb157009a1cfb35300 | [
"MIT"
] | 1 | 2020-01-24T21:33:05.000Z | 2020-01-24T21:33:05.000Z | inference.py | juanitorduz/ml_prod_tutorial | ef60714392d4ec76f43f25bb157009a1cfb35300 | [
"MIT"
] | 3 | 2020-04-09T15:45:16.000Z | 2021-03-27T00:51:35.000Z | import os
from joblib import load
from envs import envs
from utils import load_data
MODEL_DIR = envs['MODEL_DIR']
MODEL_FILE = envs['MODEL_FILE']
METADATA_FILE = envs['METADATA_FILE']
S3_BUCKET = envs['S3BUCKET']
MODEL_PATH = os.path.join(MODEL_DIR, MODEL_FILE)
METADATA_PATH = os.path.join(MODEL_DIR, METADATA_FILE)
S3_DATA_PATH = os.path.join(
's3://', S3_BUCKET, 'ml_prod_tutorial/data/train_data.csv'
)
def predict(data_path):
""" Generate predictions of the model for new data sored in `data_path`.
Print the predictions as an output.
:param data_path: Path of the new data as csv.
:return: None
"""
# Load data.
X, y = load_data(data_path)
# Load model
print('Loading model from: {}'.format(MODEL_PATH))
ml_model = load(MODEL_PATH)
# Run inference
print('Scoring observations...')
y_pred = ml_model.predict(X)
print(y_pred)
return None
if __name__ == '__main__':
predict('/data/train_data.csv')
| 25.684211 | 76 | 0.69877 | 148 | 976 | 4.337838 | 0.344595 | 0.062305 | 0.046729 | 0.065421 | 0.068536 | 0.068536 | 0 | 0 | 0 | 0 | 0 | 0.006289 | 0.185451 | 976 | 37 | 77 | 26.378378 | 0.801258 | 0.207992 | 0 | 0 | 0 | 0 | 0.206158 | 0.048193 | 0 | 0 | 0 | 0 | 0 | 1 | 0.043478 | false | 0 | 0.173913 | 0 | 0.26087 | 0.130435 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
870c3c914bedd0e40f7a4c08d7f0aa8f3ac75b79 | 3,045 | py | Python | localBGSviewer.py | EfilOne/LocalBGSviewer | a1f18b84f6125983ad2746e5dda632c7d8eb05a9 | [
"MIT"
] | null | null | null | localBGSviewer.py | EfilOne/LocalBGSviewer | a1f18b84f6125983ad2746e5dda632c7d8eb05a9 | [
"MIT"
] | null | null | null | localBGSviewer.py | EfilOne/LocalBGSviewer | a1f18b84f6125983ad2746e5dda632c7d8eb05a9 | [
"MIT"
] | null | null | null | from visual import *
from visual.graph import *
import wx
import os
import json
import request as r
version = '0.1a'
cor_ID = 26400
def translate(X,Y,Z):
t_x = X + 114.78125
t_y = Y - 80.71875
t_z = Z + 4.875
return (t_x,t_y,t_z)
# ---------- Request latest JSON digest from EliteBGS API ----------
folder = './data/'
if not os.path.exists(folder):
os.makedirs(folder)
with open('./data/cor_systems.json','w') as f:
f.write(r.cor_request())
with open('./data/losp_systems.json',"w") as f:
f.write(r.losp_request())
# --------------------- Window management area ---------------------
w = window(menus=True,title='Local BGS Viewer '+version,x=0,y=0,width=800,
height=800,style=wx.SYSTEM_MENU | wx.CAPTION | wx.CLOSE_BOX) # Create window
starmap = display(window=w,x=0,y=0,range=2,width=800,height=800) # Define 3D display
for x,y in zip(range(-30,30,1), range(-30,30,1)): # Generate grid
if x % 10 or y % 10:
g1_color = (0,0.2,0.2)
else:
g1_color = (0,0.4,0.4)
curve(pos=[(x,0,-30), (x,0,30)],color=g1_color)
curve(pos=[(-30,0,y),(30,0,y)],color=g1_color)
gdisplay(window=w) # Invoke window
# --------------- Systems + Labels generation area ----------------
# Note : VPython uses a (x,z,y) coordinates system
cor_data = json.load(open('./data/cor_systems.json')) # load CoR data from JSON
losp_data = json.load(open('./data/losp_systems.json')) # load LOSP data from JSON
cor_systems = cor_data['docs']
# ------------------- Generate coordinates list -------------------
c = []
for coords in enumerate((sys['x'],sys['y'],sys['z']) for sys in cor_systems):
c.append(coords[1])
cor_c = []
for sys_c in c:
new_c = translate(sys_c[0],sys_c[1],sys_c[2]) # Run coordinates translation
cor_c.append(new_c)
# --------------------- Generate label lists ----------------------
cor_sysn = [] # COR system names
for names in enumerate(sys['name'] for sys in cor_systems):
cor_sysn.append(names[1])
cor_sysst = [] # COR system states
for states in enumerate(sys['state'] for sys in cor_systems):
cor_sysst.append(states[1])
cor_sysinf =[] # COR system influences
for factionlist in enumerate(sys['minor_faction_presences'] for sys in cor_systems):
for f in factionlist[1]:
if f['minor_faction_id'] == cor_ID:
cor_sysinf.append(f['influence'])
# ----------------- Generate systems point cloud ------------------
points(pos=cor_c, size=10, color=color.red) # Define points
for position, name, state, inf in zip(cor_c, cor_sysn, cor_sysst, cor_sysinf):
label(pos=position, text=name.upper(),xoffset=25,yoffset=20,
height=12,font='sans',box=False, opacity=0.5) # Generate name label
label(pos=position, text=state,xoffset=25,yoffset=0,
height=12,font='sans',box=False, line=False, opacity=0.5) # Generate state label
label(pos=position, text=str(inf),xoffset=25,yoffset=-20,
height=12,font='sans',box=False, line=False, opacity=0.5) # Generate influence label
# Keep display active for future development
while True:
rate(1)
| 34.602273 | 88 | 0.639737 | 492 | 3,045 | 3.855691 | 0.300813 | 0.0369 | 0.02952 | 0.023195 | 0.232472 | 0.129678 | 0.107538 | 0.107538 | 0.084344 | 0.084344 | 0 | 0.045174 | 0.149425 | 3,045 | 87 | 89 | 35 | 0.687259 | 0.250575 | 0 | 0.032258 | 0 | 0 | 0.088535 | 0.051793 | 0 | 0 | 0 | 0 | 0 | 1 | 0.016129 | false | 0 | 0.096774 | 0 | 0.129032 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
870f9369609cfc6a2831ee2c8ca77eb994d803e5 | 4,363 | py | Python | LDA_Two_Classifier.py | Xuyang-Huang/DLA-Two-Classifier | 902fb3a70189e9ea6639972cb024e2171cb3c6c3 | [
"MIT"
] | 1 | 2021-03-21T10:46:15.000Z | 2021-03-21T10:46:15.000Z | LDA_Two_Classifier.py | Xuyang-Huang/LDA-Two-Classifier | 902fb3a70189e9ea6639972cb024e2171cb3c6c3 | [
"MIT"
] | null | null | null | LDA_Two_Classifier.py | Xuyang-Huang/LDA-Two-Classifier | 902fb3a70189e9ea6639972cb024e2171cb3c6c3 | [
"MIT"
] | null | null | null | #-- coding: utf-8 --
#@Time : 2021/3/21 15:33
#@Author : HUANG XUYANG
#@Email : xhuang032@e.ntu.edu.sg
#@File : LDA_Two_Classifier.py
#@Software: PyCharm
import numpy as np
import matplotlib.pyplot as plt
import sklearn.datasets as sk_dataset
class LDATwoClassifier:
"""A LDA two classifier
Only be used in two class dataset.
"""
def __init__(self):
self.w = None
self.thr = None
def train(self, data, label):
"""Training.
Training and saving project matrix.
:param data: A Numpy array float, [n, dim].
:param label: A Numpy array int, [n].
:return: No return.
"""
assert len(data.shape) > 1, f'data shape should be [n, dim].'
assert len(data) == len(label), f'label number does not match data number.'
dim = len(data[0])
# Get sample means and scatters of each class.
sample_mean = []
scatters = np.zeros([dim, dim])
for label_index in range(2):
# Means
current_label_data = data[np.where(label == label_index)]
mean_tmp = (np.sum(current_label_data, axis=0) / len(current_label_data))[:, np.newaxis]
sample_mean.append(mean_tmp)
# Scatters
for item in current_label_data:
item = item[:, np.newaxis]
scatters += np.dot((item - mean_tmp), (item - mean_tmp).T)
self.w = np.dot(np.mat(scatters).I, sample_mean[0] - sample_mean[1])
result = self.w.T * data.transpose([1, 0])
result = np.array(result)[0]
result_index = np.argsort(result)
result = result[result_index]
label = label[result_index]
# Find the best threshold to classify.
tp_fn = np.sum(label)
np_tn = len(label) - tp_fn
max_score = 0
for i in range(len(result)):
tp = np.sum(label[:i])
tn = len(label[i:]) - np.sum(label[i:])
sensitivity = tp/tp_fn
specificity = tn/np_tn
score = (sensitivity + specificity)/2
if score > max_score:
self.thr = result[i]
max_score = score
def predict(self, data):
"""
:param data: A Numpy array float, [n, dim].
:return: Prediction.
"""
assert len(data.shape) > 1, f'data shape should be [n, dim].'
result = self.w.T * data.transpose([1, 0])
result = np.array(result)[0] <= self.thr
return result
def eval(self, data, label):
"""Evaluate val data and plot result.
:param data: A Numpy array float, [n, dim].
:param label: A Numpy array int, [n].
:return: No return.
"""
assert len(data.shape) > 1, f'data shape should be [n, dim].'
assert len(data) == len(label), f'label number does not match data number.'
data_0 = data[np.where(label == 0)]
data_1 = data[np.where(label == 1)]
result_0 = self.w.T * data_0.transpose([1, 0])
result_0 = np.array(result_0)[0]
result_1 = self.w.T * data_1.transpose([1, 0])
result_1 = np.array(result_1)[0]
plt.figure()
plt.scatter(np.arange(len(result_0)), result_0, cmap='y')
plt.scatter(np.arange(len(result_0), len(label)), result_1, cmap='g')
plt.show()
result = self.w.T * data.transpose([1, 0])
result = np.array(result)[0] <= self.thr
acc = np.sum(np.equal(result, label)) / len(label)
return acc
def prepare_data(proportion):
dataset = sk_dataset.load_breast_cancer()
label = dataset['target']
data = dataset['data']
n_class = len(dataset['target_names'])
shuffle_index = np.arange(len(label))
np.random.shuffle(shuffle_index)
train_number = int(proportion * len(label))
train_index = shuffle_index[:train_number]
val_index = shuffle_index[train_number:]
data_train = data[train_index]
label_train = label[train_index]
data_val = data[val_index]
label_val = label[val_index]
return (data_train, label_train), (data_val, label_val), n_class
if __name__ == '__main__':
train, val, num_class = prepare_data(0.9)
lda = LDATwoClassifier()
lda.train(train[0], train[1])
accuracy = lda.eval(val[0], val[1])
pred = lda.predict(val[0])
print(pred)
| 30.51049 | 100 | 0.584002 | 608 | 4,363 | 4.046053 | 0.243421 | 0.02561 | 0.022358 | 0.020325 | 0.270325 | 0.247561 | 0.247561 | 0.224797 | 0.213008 | 0.213008 | 0 | 0.020154 | 0.283521 | 4,363 | 142 | 101 | 30.725352 | 0.766795 | 0.14898 | 0 | 0.121951 | 0 | 0 | 0.056535 | 0 | 0 | 0 | 0 | 0 | 0.060976 | 1 | 0.060976 | false | 0 | 0.036585 | 0 | 0.146341 | 0.012195 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
871012b51302d7e987a22cc5d5ae68032648a341 | 1,505 | py | Python | database.py | MainSilent/instaAds | 3a6f03768b68eea7eb172b6d2e10f58da433916c | [
"MIT"
] | null | null | null | database.py | MainSilent/instaAds | 3a6f03768b68eea7eb172b6d2e10f58da433916c | [
"MIT"
] | null | null | null | database.py | MainSilent/instaAds | 3a6f03768b68eea7eb172b6d2e10f58da433916c | [
"MIT"
] | null | null | null | import sqlite3
conn = sqlite3.connect('Data.db')
c = conn.cursor()
class DataBase:
def __init__(self,username,uID,send):
self.username = username
self.uID = uID
self.send = send
@classmethod
def GetFromDB(self):
with conn:
c.execute("SELECT * FROM Users")
return c.fetchall()
def GoToDB(self):
with conn:
c.execute(f"INSERT INTO 'main'.'Users'('ID','username','uID','send') VALUES (NULL,?,'{self.uID}',{self.send})",(self.username,))
@classmethod
def SendUpdate(self,uID, value):
with conn:
c.execute(f"UPDATE Users SET send = {value} WHERE uID = {uID}")
@classmethod
def Status(self,uID):
with conn:
c.execute(f"SELECT uID FROM Users WHERE uID = {uID}")
if len(c.fetchall()) == 0:
return False
else:
return True
@classmethod
def Count(self):
with conn:
c.execute("SELECT * FROM Users")
return len(c.fetchall())
@classmethod
def nCount(self):
with conn:
c.execute("SELECT * FROM Users WHERE send = 0")
return len(c.fetchall())
@classmethod
def Reset(self):
value = 0
with conn:
c.execute(f"UPDATE Users SET send = {value}")
print("Reset is Done!")
@classmethod
def truncate(self):
with conn:
c.execute("DELETE FROM users") | 25.508475 | 140 | 0.535548 | 175 | 1,505 | 4.582857 | 0.302857 | 0.079801 | 0.089776 | 0.159601 | 0.390274 | 0.317955 | 0.245636 | 0.245636 | 0.201995 | 0.099751 | 0 | 0.005035 | 0.340199 | 1,505 | 59 | 141 | 25.508475 | 0.802618 | 0 | 0 | 0.395833 | 0 | 0.020833 | 0.216467 | 0.051129 | 0 | 0 | 0 | 0 | 0 | 1 | 0.1875 | false | 0 | 0.020833 | 0 | 0.333333 | 0.020833 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
871164f1ced4f7e6392abcdf31dadb7997607036 | 2,134 | py | Python | src/model.py | dstein64/vrapi | c1beba50a76a731d72aa575d51f446f70e05981b | [
"MIT"
] | null | null | null | src/model.py | dstein64/vrapi | c1beba50a76a731d72aa575d51f446f70e05981b | [
"MIT"
] | null | null | null | src/model.py | dstein64/vrapi | c1beba50a76a731d72aa575d51f446f70e05981b | [
"MIT"
] | null | null | null | import torch.nn as nn
import torch.nn.functional as F
class Block(nn.Module):
def __init__(self, in_channels, out_channels, stride=1):
super(Block, self).__init__()
self.conv1 = nn.Conv2d(in_channels, out_channels, kernel_size=3, stride=stride, padding=1, bias=False)
self.bn1 = nn.BatchNorm2d(out_channels)
self.conv2 = nn.Conv2d(out_channels, out_channels, kernel_size=3, stride=1, padding=1, bias=False)
self.bn2 = nn.BatchNorm2d(out_channels)
self.shortcut = nn.Sequential()
if stride != 1 or in_channels != out_channels:
conv3 = nn.Conv2d(in_channels, out_channels, kernel_size=1, stride=stride, bias=False)
bn3 = nn.BatchNorm2d(out_channels)
self.shortcut = nn.Sequential(conv3, bn3)
def forward(self, x):
out = F.relu(self.bn1(self.conv1(x)))
out = self.bn2(self.conv2(out))
out += self.shortcut(x)
out = F.relu(out)
return out
class Net(nn.Module):
def __init__(self):
super(Net, self).__init__()
self.conv1 = nn.Conv2d(3, 64, kernel_size=3, stride=1, padding=1, bias=False)
self.bn1 = nn.BatchNorm2d(64)
self.block1a = Block(64, 64, 1)
self.block1b = Block(64, 64, 1)
self.block2a = Block(64, 128, 2)
self.block2b = Block(128, 128, 1)
self.block3a = Block(128, 256, 2)
self.block3b = Block(256, 256, 1)
self.block4a = Block(256, 512, 2)
self.block4b = Block(512, 512, 1)
self.linear = nn.Linear(512, 10)
def forward(self, x, include_penultimate=False):
out = F.relu(self.bn1(self.conv1(x)))
out = self.block1a(out)
out = self.block1b(out)
out = self.block2a(out)
out = self.block2b(out)
out = self.block3a(out)
out = self.block3b(out)
out = self.block4a(out)
out = self.block4b(out)
out = F.avg_pool2d(out, 4)
out = out.view(out.size(0), -1)
penultimate = out
out = self.linear(out)
if include_penultimate:
out = (out, penultimate)
return out
| 36.793103 | 110 | 0.599813 | 298 | 2,134 | 4.174497 | 0.211409 | 0.057878 | 0.072347 | 0.067524 | 0.416399 | 0.351286 | 0.317524 | 0.296624 | 0.114148 | 0.114148 | 0 | 0.074502 | 0.270384 | 2,134 | 57 | 111 | 37.438596 | 0.72447 | 0 | 0 | 0.078431 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.078431 | false | 0 | 0.039216 | 0 | 0.196078 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
871242cc5ca654370937e5af550ecd15245789dd | 2,165 | py | Python | scripts/upload_training_data.py | kimito/jetbox | c765d51e6376f597aeea0cf878bced92d734d022 | [
"Apache-2.0"
] | 2 | 2021-06-20T09:41:38.000Z | 2021-07-16T06:11:53.000Z | scripts/upload_training_data.py | kimito/lunchjet | c765d51e6376f597aeea0cf878bced92d734d022 | [
"Apache-2.0"
] | null | null | null | scripts/upload_training_data.py | kimito/lunchjet | c765d51e6376f597aeea0cf878bced92d734d022 | [
"Apache-2.0"
] | null | null | null | if '__file__' in globals():
import os, sys
sys.path.append(os.path.join(os.path.dirname(__file__), '..'))
from lunchjet import GoogleDrive
import time
import tarfile
from argparse import ArgumentParser
import re
import pathlib
def unlink_files_in(dir_path):
for file in dir_path.glob('**/*'):
if file.is_file():
file.unlink()
def main():
parser = ArgumentParser(description="create tarball and upload it to Google Drive")
parser.add_argument('files', metavar='FILE', type=str, nargs='+', help='files to send')
parser.add_argument('--root', metavar='ROOT_DIR', type=str, nargs='?', default=None,
help='root directory of the archive. files in the archive is put in relateve path from this directory. this is usable if FILE(s) is absolute path')
parser.add_argument('--rm', action='store_true', help='delete sent files. if FILE is a directory, only files in the directory are to be deleted.')
args = parser.parse_args()
if args.root is None:
args.root = str()
# create tarball
tarball_name = "train_{}.tar.gz".format(time.strftime('%Y_%m_%d_%H_%M_%S'))
with tarfile.open(tarball_name, "w|gz") as archive:
for file in args.files:
#if --root are there, make the file path in the archive to be relative
relative_path = re.sub('^{}'.format(args.root), '', file)
relative_path = re.sub('^/', '', relative_path)
archive.add(file, arcname=relative_path)
print('create {}'.format(tarball_name))
# upload the tarball to Google Drive
gdrive = GoogleDrive(client_secret_file='/etc/lunchjet/credentials.json', token_file='/etc/lunchjet/token.json')
file = gdrive.create_file('lunchjet/' + tarball_name, tarball_name)
print('upload a file to gdrive : {}'.format(str(file)))
pathlib.Path(tarball_name).unlink()
# delete files if needed
if args.rm:
for file in args.files:
file_path = pathlib.Path(file)
if file_path.is_dir():
unlink_files_in(file_path)
else:
file_path.unlink()
if __name__ == '__main__':
main()
| 36.083333 | 152 | 0.652194 | 301 | 2,165 | 4.511628 | 0.335548 | 0.048601 | 0.019882 | 0.019146 | 0.02651 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.218938 | 2,165 | 59 | 153 | 36.694915 | 0.803075 | 0.065127 | 0 | 0.047619 | 0 | 0.02381 | 0.241328 | 0.026759 | 0.02381 | 0 | 0 | 0 | 0 | 1 | 0.047619 | false | 0 | 0.166667 | 0 | 0.214286 | 0.047619 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
87124bd92d5f796bc849e3067142a66827c46020 | 1,688 | py | Python | hrl_common_code_darpa_m3/src/hrl_common_code_darpa_m3/robot_config/three_link_with_hand.py | gt-ros-pkg/hrl-haptic-manip | 6458187075033ecd3a22fbcdc1a632df39b0cba1 | [
"Apache-2.0"
] | 1 | 2017-07-13T14:58:35.000Z | 2017-07-13T14:58:35.000Z | hrl_common_code_darpa_m3/src/hrl_common_code_darpa_m3/robot_config/three_link_with_hand.py | gt-ros-pkg/hrl-haptic-manip | 6458187075033ecd3a22fbcdc1a632df39b0cba1 | [
"Apache-2.0"
] | null | null | null | hrl_common_code_darpa_m3/src/hrl_common_code_darpa_m3/robot_config/three_link_with_hand.py | gt-ros-pkg/hrl-haptic-manip | 6458187075033ecd3a22fbcdc1a632df39b0cba1 | [
"Apache-2.0"
] | 2 | 2017-03-08T14:44:22.000Z | 2019-07-15T23:46:35.000Z |
import numpy as np
import math
height = 0.0
upper_arm_length = 0.334
forearm_length = 0.288
hand_length = 0.12
ee_location = np.matrix([0., -upper_arm_length-forearm_length-hand_length, height]).T
bod_color = [[0.4, 0.4, 0.4, 1], [0.8, 0.8, 0.8, 1], [0.33, 0.33, 0.33, 1]]
bod_num_links = 3
bod_mass = [2.3, 1.32, 0.7]
bod_names = ['link1', 'link2', 'link3']
b_jt_axis = [[0.,0.,1.],[0.,0.,1.], [0.,0.,1.]]
b_jt_anchor = [[0., 0., height], [0., -upper_arm_length, height],
[0., -upper_arm_length-forearm_length, height]]
b_jt_kp = [20., 15., 10.]
b_jt_kd = [3., 2., 1.]
b_jt_limits_max = np.radians([162, 159, 90]).tolist()
b_jt_limits_min = np.radians([-63, 0, -45]).tolist()
b_jt_axis = [[0.,0.,1.],[0.,0.,1.], [0.,0.,1.]]
b_jt_attach = [[0, -1], [1, 0], [2,1]]
b_jt_start = np.radians([-30.0, 150, 15]).tolist()
#b_jt_start = np.radians([0.0, 10, 0]).tolist()
b_jts = {'anchor':b_jt_anchor, 'axis':b_jt_axis, 'jt_lim_max':b_jt_limits_max,
'jt_lim_min':b_jt_limits_min, 'jt_init':b_jt_start, 'jt_attach':b_jt_attach,
'jt_stiffness':b_jt_kp, 'jt_damping':b_jt_kd}
bod_shapes = ['capsule', 'capsule', 'capsule']
dia = 0.03
bod_dimensions = [[dia, dia, upper_arm_length], [dia, dia, forearm_length],
[dia, dia, hand_length-dia/2]]
bod_com_position = [[0., -upper_arm_length/2., height],
[0., -upper_arm_length-forearm_length/2., height],
[0., -upper_arm_length-forearm_length-hand_length/2.+dia/4, height]]
bodies ={'shapes':bod_shapes, 'dim':bod_dimensions, 'num_links':bod_num_links,
'com_pos':bod_com_position, 'mass':bod_mass, 'name':bod_names, 'color':bod_color}
| 33.098039 | 90 | 0.625592 | 303 | 1,688 | 3.168317 | 0.247525 | 0.05625 | 0.116667 | 0.109375 | 0.261458 | 0.204167 | 0.204167 | 0.16875 | 0.11875 | 0.039583 | 0 | 0.087049 | 0.162915 | 1,688 | 50 | 91 | 33.76 | 0.592357 | 0.027251 | 0 | 0.060606 | 0 | 0 | 0.086638 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.060606 | 0 | 0.060606 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
87129e7b79e24ecf531c7c06bfb457d2abc1da76 | 842 | py | Python | strategy/foraging.py | vinkels/jiteyareta | b057e1abfafa20d0516677e4450ce0bb3fe8fa12 | [
"CC-BY-3.0"
] | null | null | null | strategy/foraging.py | vinkels/jiteyareta | b057e1abfafa20d0516677e4450ce0bb3fe8fa12 | [
"CC-BY-3.0"
] | null | null | null | strategy/foraging.py | vinkels/jiteyareta | b057e1abfafa20d0516677e4450ce0bb3fe8fa12 | [
"CC-BY-3.0"
] | null | null | null | from food import Food
def foraging_step(bee):
"""
This type of bee goes to a given food location and takes the food.
If the bee is loaded it returns to the hive.
"""
if bee.is_carrying_food:
bee.move_to_hive()
else:
bee.move(bee.food_location)
# Check if arrived, then take food.
if bee.food_location == bee.pos:
bee.planned_route = []
food_on_location = [
food
for food in bee.model.grid.get_food_neighbors(bee.pos, 0)
if food.can_be_harvested
]
if not food_on_location:
# No food was found, so next step bee will scout.
bee.type_bee = "scout"
else:
food_on_location[0].harvest()
bee.is_carrying_food = True
| 28.066667 | 73 | 0.551069 | 113 | 842 | 3.920354 | 0.486726 | 0.081264 | 0.094808 | 0.076749 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.003817 | 0.377672 | 842 | 29 | 74 | 29.034483 | 0.841603 | 0.230404 | 0 | 0.111111 | 0 | 0 | 0.007974 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.055556 | false | 0 | 0.055556 | 0 | 0.111111 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
8714350f5bc78fe0fd2afdb213dc3cc875214f63 | 1,447 | py | Python | gaplan/common/error.py | yugr/gaplan | 932993d997fba278818b6489c9688d50bd5bccd1 | [
"MIT"
] | 5 | 2020-05-16T20:57:56.000Z | 2022-03-03T19:39:26.000Z | gaplan/common/error.py | yugr/gaplan | 932993d997fba278818b6489c9688d50bd5bccd1 | [
"MIT"
] | null | null | null | gaplan/common/error.py | yugr/gaplan | 932993d997fba278818b6489c9688d50bd5bccd1 | [
"MIT"
] | null | null | null | # The MIT License (MIT)
#
# Copyright (c) 2018-2022 Yury Gribov
#
# Use of this source code is governed by The MIT License (MIT)
# that can be found in the LICENSE.txt file.
"""Error handling APIs."""
import sys
import os.path
from typing import NoReturn
from gaplan.common.location import Location
_print_stack = False
_me = os.path.basename(sys.argv[0])
def error(*args) -> NoReturn:
"""Prints pretty error message and terminates."""
if isinstance(args[0], Location):
loc, msg = args
sys.stderr.write(f"{_me}: error: {loc}: {msg}\n")
else:
msg, = args
sys.stderr.write(f"{_me}: error: {msg}\n")
if _print_stack:
raise RuntimeError
sys.exit(1)
def error_if(cond, *args):
"""Report error if condition is true."""
if cond:
error(*args)
def warn(*args):
"""Prints pretty warning message."""
if isinstance(args[0], Location):
loc, msg = args
sys.stderr.write(f"{_me}: warning: {loc}: {msg}\n")
else:
msg, = args
sys.stderr.write(f"{_me}: warning: {msg}\n")
def warn_if(cond, *args):
"""Report warning if condition is true."""
if cond:
warn(*args)
def set_basename(name):
"""Set program name for error reports."""
global _me
_me = name
def set_options(**kwargs):
"""Set other error-reporting options."""
for k, v in kwargs.items():
if k == 'print_stack':
global _print_stack
_print_stack = v
else:
error("error: unknown option: " + k)
| 22.968254 | 62 | 0.647547 | 215 | 1,447 | 4.265116 | 0.395349 | 0.054526 | 0.043621 | 0.069793 | 0.266085 | 0.266085 | 0.215921 | 0.215921 | 0.189749 | 0.189749 | 0 | 0.010399 | 0.202488 | 1,447 | 62 | 63 | 23.33871 | 0.784229 | 0.278507 | 0 | 0.282051 | 0 | 0 | 0.135593 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.153846 | false | 0 | 0.102564 | 0 | 0.25641 | 0.128205 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
8714535dbd86b94e3bf6718d469655885f16e32c | 403 | py | Python | phns/utils/tests/test_transcribe.py | DeepLenin/phns | 80fb48d032cd159782a5d96724e91540a55271ef | [
"MIT"
] | 5 | 2020-04-03T20:59:46.000Z | 2020-07-08T17:40:40.000Z | phns/utils/tests/test_transcribe.py | DeepLenin/phns | 80fb48d032cd159782a5d96724e91540a55271ef | [
"MIT"
] | null | null | null | phns/utils/tests/test_transcribe.py | DeepLenin/phns | 80fb48d032cd159782a5d96724e91540a55271ef | [
"MIT"
] | null | null | null | from phns import Phn
from phns.utils import transcribe
def test_transcribe_simple_word():
transcriptions = transcribe.word("that^is")
assert transcriptions == {
(Phn("dh"), Phn("ae1"), Phn("t"), Phn("ih1"), Phn("z")): ["that", "is"],
(Phn("dh"), Phn("ae1"), Phn("t"), Phn("s")): ["that's"],
(Phn("dh"), Phn("ah"), Phn("t"), Phn("ih1"), Phn("z")): ["that", "is"],
}
| 33.583333 | 80 | 0.533499 | 55 | 403 | 3.854545 | 0.381818 | 0.084906 | 0.113208 | 0.103774 | 0.325472 | 0.325472 | 0.325472 | 0.188679 | 0 | 0 | 0 | 0.012195 | 0.186104 | 403 | 11 | 81 | 36.636364 | 0.634146 | 0 | 0 | 0 | 0 | 0 | 0.126551 | 0 | 0 | 0 | 0 | 0 | 0.111111 | 1 | 0.111111 | false | 0 | 0.222222 | 0 | 0.333333 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
8716d2976b9ac99e79325aad69ce2ddac6ec0a0b | 5,041 | py | Python | tests/app/routes/members/test_rest.py | kentsanggds/api | 651cdf7d496690722d6a4f5b51f04f4be97899d4 | [
"MIT"
] | null | null | null | tests/app/routes/members/test_rest.py | kentsanggds/api | 651cdf7d496690722d6a4f5b51f04f4be97899d4 | [
"MIT"
] | null | null | null | tests/app/routes/members/test_rest.py | kentsanggds/api | 651cdf7d496690722d6a4f5b51f04f4be97899d4 | [
"MIT"
] | null | null | null | from flask import json, jsonify, url_for
from tests.conftest import create_authorization_header
from app.comms.encryption import encrypt
from app.models import Member
from tests.db import create_member
class WhenGettingMembers:
def it_returns_all_members(self, client, db_session, sample_member):
member = create_member(name='Sid Green', email='sid@example.com', active=False)
response = client.get(
url_for('members.get_members'),
headers=[('Content-Type', 'application/json'), create_authorization_header()]
)
assert len(response.json) == 2
assert response.json[0] == jsonify(sample_member.serialize()).json
assert response.json[1] == jsonify(member.serialize()).json
class WhenPostingMembers:
def it_unsubscribes_member(self, app, client, db_session, sample_member):
unsubcode = encrypt(
"{}={}".format(app.config['EMAIL_TOKENS']['member_id'], str(sample_member.id)),
app.config['EMAIL_UNSUB_SALT']
)
response = client.post(
url_for('members.unsubscribe_member', unsubcode=unsubcode),
headers=[('Content-Type', 'application/json'), create_authorization_header()]
)
assert not sample_member.active
assert response.json == {'message': '{} unsubscribed'.format(sample_member.name)}
def it_imports_members(self, client, db, db_session, sample_marketing):
data = [
{
"id": "1",
"Name": "Test member",
"EmailAdd": "test@example.com",
"Active": "y",
"CreationDate": "2019-08-01",
"Marketing": "1",
"IsMember": "n",
"LastUpdated": "2019-08-10 10:00:00"
},
{
"id": "2",
"Name": "Test member 2",
"EmailAdd": "test2@example.com",
"Active": "y",
"CreationDate": "2019-08-02",
"Marketing": "1",
"IsMember": "n",
"LastUpdated": "2019-08-11 10:00:00"
},
]
response = client.post(
url_for('members.import_members'),
data=json.dumps(data),
headers=[('Content-Type', 'application/json'), create_authorization_header()]
)
assert response.status_code == 201
members = Member.query.all()
assert len(members) == 2
assert members[0].old_id == int(data[0]['id'])
assert members[0].name == data[0]['Name']
def it_doesnt_import_exising_members(self, client, db_session, sample_marketing, sample_member):
data = [
{
"id": "1",
"Name": "Test member",
"EmailAdd": "test@example.com",
"Active": "y",
"CreationDate": "2019-08-01",
"Marketing": "1",
"IsMember": "n",
"LastUpdated": "2019-08-10 10:00:00"
},
{
"id": "2",
"Name": "Test member 2",
"EmailAdd": "test2@example.com",
"Active": "y",
"CreationDate": "2019-08-02",
"Marketing": "1",
"IsMember": "n",
"LastUpdated": "2019-08-11 10:00:00"
},
]
response = client.post(
url_for('members.import_members'),
data=json.dumps(data),
headers=[('Content-Type', 'application/json'), create_authorization_header()]
)
assert response.status_code == 201
assert response.json.get('errors') == ['member already exists: 1']
members = Member.query.all()
assert len(members) == 2
def it_doesnt_import_members_with_invalid_marketing(self, client, db_session, sample_marketing, sample_member):
data = [
{
"id": "2",
"Name": "Test member 2",
"EmailAdd": "test2@example.com",
"Active": "y",
"CreationDate": "2019-08-02",
"Marketing": "2",
"IsMember": "n",
"LastUpdated": "2019-08-11 10:00:00"
},
{
"id": "3",
"Name": "Test member 3",
"EmailAdd": "test3@example.com",
"Active": "y",
"CreationDate": "2019-08-02",
"Marketing": "1",
"IsMember": "n",
"LastUpdated": "2019-08-11 10:00:00"
},
]
response = client.post(
url_for('members.import_members'),
data=json.dumps(data),
headers=[('Content-Type', 'application/json'), create_authorization_header()]
)
assert response.status_code == 201
assert response.json.get('errors') == ['Cannot find marketing: 2']
members = Member.query.all()
assert len(members) == 2
| 34.527397 | 115 | 0.509819 | 494 | 5,041 | 5.068826 | 0.204453 | 0.028754 | 0.044728 | 0.040735 | 0.629792 | 0.616613 | 0.588658 | 0.588658 | 0.543131 | 0.478435 | 0 | 0.052616 | 0.347748 | 5,041 | 145 | 116 | 34.765517 | 0.708942 | 0 | 0 | 0.572581 | 0 | 0 | 0.22555 | 0.01825 | 0 | 0 | 0 | 0 | 0.120968 | 1 | 0.040323 | false | 0 | 0.08871 | 0 | 0.145161 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
87192a28aca8f78c8355f87ba88d4a61c81e47b6 | 722 | py | Python | hamming.py | aakp10/Malice-and-Bytes | f6e1b6e72466b64476e6c4cc6dfd1021c9657644 | [
"MIT"
] | null | null | null | hamming.py | aakp10/Malice-and-Bytes | f6e1b6e72466b64476e6c4cc6dfd1021c9657644 | [
"MIT"
] | null | null | null | hamming.py | aakp10/Malice-and-Bytes | f6e1b6e72466b64476e6c4cc6dfd1021c9657644 | [
"MIT"
] | null | null | null | '''
calculate the hamming or the bit diff
useful in deciding the key length for repeating XOR
'''
def str_to_bytes(str,base):
byte_array = bytearray()
byte_array.extend([ord(ch) for ch in str])
return byte_array
def calc_hamming(str1,str2):
bits1 = str_to_bytes(str1,1)
bits2 = str_to_bytes(str2,1)
print(bits1)
print(bits2)
ham = 0
for (x,y) in zip(bits1,bits2):
print(x,y)
while x > 0 or y > 0:
if x > 0 and y == 0 or x == 0 and y>0:
ham += 1
elif (int(x)%2)!=(int(y)%2):
ham += 1
x = int(x/2)
y = int(y/2)
print(ham)
calc_hamming("this is a test","wokka wokka!!!")
| 24.896552 | 51 | 0.531856 | 117 | 722 | 3.188034 | 0.418803 | 0.040214 | 0.080429 | 0.032172 | 0.037534 | 0 | 0 | 0 | 0 | 0 | 0 | 0.052301 | 0.33795 | 722 | 29 | 52 | 24.896552 | 0.728033 | 0.123269 | 0 | 0.095238 | 0 | 0 | 0.044728 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.095238 | false | 0 | 0 | 0 | 0.142857 | 0.190476 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
871a0f66beb18391f97445d37118a2ad38b7fddc | 2,540 | py | Python | network/MySourceFiles/talkback/bot.py | dstack4273/new-coder_tutorials | 658344fdeca6ab3957dfe6646e5ac9b75ce07c94 | [
"Zlib"
] | null | null | null | network/MySourceFiles/talkback/bot.py | dstack4273/new-coder_tutorials | 658344fdeca6ab3957dfe6646e5ac9b75ce07c94 | [
"Zlib"
] | null | null | null | network/MySourceFiles/talkback/bot.py | dstack4273/new-coder_tutorials | 658344fdeca6ab3957dfe6646e5ac9b75ce07c94 | [
"Zlib"
] | null | null | null | from twisted.internet import protocol
from twisted.python import log
from twisted.words.protocols import irc
class TalkBackBot(irc.IRCClient):
def connectionMade(self):
""" called when a connection is made to a channel """
self.nickname = self.factory.nickname
self.realname = self.factory.realname
irc.IRCClient.connectionMade(self)
log.msg("connectionMade")
def connectionLost(self, reason):
""" Called when the connection is lost """
irc.IRCClient.connectionLost(self, reason)
log.msg("connectionLost {!r}".format(reason))
# callbacks for events
def signedOn(self):
""" Called when the bot has successfully signed on to the server """
log.msg("Signed On")
if self.nickname != self.factory.nickname:
log.msg('Your nickname was already occupied, actual nickname is '
'"{}".'.format(self.nickname))
self.join(self.factory.channel)
def joined(self, channel):
""" Called when the bot joins the channel """
log.msg("[{nick} has joined {channel}]"
.format(nick = self.nickname, channel = self.factory.channel,))
def privmsg(self, user, channel, msg):
""" Called when the bot recieves a message """
sendTo = None
prefix = ''
senderNick = user.split('!', 1)[0]
if channel == self.nickname:
# /MSG back
sendTo = senderNick
elif msg.startswith(self.nickname):
# Reply back on the channel
sendTo = channel
prefix = senderNick + ': '
else:
msg = msg.lower()
for trigger in self.factory.triggers:
if msg in trigger:
sendTo = channel
prefix = senderNick + ': '
break
if sendTo:
quote = self.factory.quotes.pick()
self.msg(sendTo, prefix + quote)
log.msg(
"sent message to {reciever}, triggered by {sender}:\n\t{quote}"
.format(reciever=sendTo, sender=senderNick, quote=quote)
)
class TalkBackBotFactory(protocol.ClientFactory):
protocol = TalkBackBot
def __init__(self, channel, nickname, realname, quotes, triggers):
""" Initilzing the bot factory with our defined settings """
self.channel = channel
self.nickname = nickname
self.realname = realname
self.quotes = quotes
self.triggers = triggers
| 35.277778 | 79 | 0.58622 | 270 | 2,540 | 5.5 | 0.344444 | 0.056566 | 0.035017 | 0.032323 | 0.041751 | 0 | 0 | 0 | 0 | 0 | 0 | 0.001145 | 0.312205 | 2,540 | 71 | 80 | 35.774648 | 0.848884 | 0.13189 | 0 | 0.076923 | 0 | 0 | 0.090951 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.115385 | false | 0 | 0.057692 | 0 | 0.230769 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
871a590c93d4835bfac72b7e64433be0acb694e7 | 3,273 | py | Python | clue/tests/test_idea.py | limor-gs/clue | 9fe2939fbb84edc6039048d618175f279f9ee3cb | [
"Apache-2.0"
] | 2 | 2016-11-02T10:25:06.000Z | 2017-02-27T11:23:07.000Z | clue/tests/test_idea.py | limor-gs/clue | 9fe2939fbb84edc6039048d618175f279f9ee3cb | [
"Apache-2.0"
] | 4 | 2016-02-28T13:10:59.000Z | 2016-10-13T10:04:08.000Z | clue/tests/test_idea.py | limor-gs/clue | 9fe2939fbb84edc6039048d618175f279f9ee3cb | [
"Apache-2.0"
] | 7 | 2016-02-04T19:34:06.000Z | 2017-07-18T08:45:26.000Z | ########
# Copyright (c) 2016 GigaSpaces Technologies Ltd. All rights reserved
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
############
from clue import tests
class TestIdea(tests.BaseTest):
def test_no_project_dir(self):
repo = 'cloudify-rest-client'
repos = {repo: {}}
repo_dir = self.repos_dir / repo
self.clue_install(repos=repos)
self.assertFalse((repo_dir / '{}.iml'.format(repo)).exists())
def test_project_dir(self):
repo1 = 'cloudify-rest-client'
repo2 = 'cloudify-dsl-parser'
repo3 = 'cloudify-manager-blueprints'
repo4 = 'claw-scripts'
repo5 = 'cloudify-plugins-common'
repos = {
repo1: {},
repo2: {'properties': {'project_dir': True}},
repo3: {'python': False},
repo4: {'python': False,
'properties': {'resources': True,
'organization': 'dankilman'}},
repo5: {'python': False,
'properties': {'resources': ['cloudify']}}
}
repo1_dir = self.repos_dir / repo1
repo2_dir = self.repos_dir / repo2
repo3_dir = self.repos_dir / repo3
repo4_dir = self.repos_dir / repo4
repo5_dir = self.repos_dir / repo5
self.clue_install(repos=repos)
self.assertTrue((repo1_dir / '{}.iml'.format(repo1)).exists())
self.assertTrue((repo2_dir / '{}.iml'.format(repo2)).exists())
self.assertFalse((repo3_dir / '{}.iml'.format(repo3)).exists())
self.assertTrue((repo4_dir / '{}.iml'.format(repo4)).exists())
self.assertFalse((repo5_dir / '{}.iml'.format(repo5)).exists())
self.assertTrue(
(repo5_dir / 'cloudify/cloudify.iml'.format(repo5)).exists())
idea_dir = repo2_dir / '.idea'
self.assertTrue(idea_dir.exists())
modules_xml = idea_dir / 'modules.xml'
vcs_xml = idea_dir / 'vcs.xml'
misc_xml = idea_dir / 'misc.xml'
for f in [modules_xml, vcs_xml, misc_xml]:
self.assertTrue(f.exists())
self.assertIn('project-jdk-name="cloudify"', misc_xml.text())
vcs = vcs_xml.text()
self.assertIn('cloudify-rest-client', vcs)
self.assertIn('cloudify-dsl-parser', vcs)
self.assertIn('cloudify-plugins-common', vcs)
self.assertIn('claw-scripts', vcs)
self.assertIn('cloudify-manager-blueprints', vcs)
modules = modules_xml.text()
self.assertIn('cloudify-rest-client', modules)
self.assertIn('cloudify-dsl-parser', modules)
self.assertIn('cloudify-plugins-common/cloudify', modules)
self.assertIn('claw-scripts', modules)
self.assertNotIn('cloudify-manager-blueprints', modules)
| 41.961538 | 74 | 0.618698 | 384 | 3,273 | 5.164063 | 0.320313 | 0.060514 | 0.0706 | 0.045386 | 0.129097 | 0.066566 | 0.037317 | 0 | 0 | 0 | 0 | 0.016539 | 0.242591 | 3,273 | 77 | 75 | 42.506494 | 0.78338 | 0.180874 | 0 | 0.035088 | 0 | 0 | 0.208916 | 0.078202 | 0 | 0 | 0 | 0 | 0.350877 | 1 | 0.035088 | false | 0 | 0.017544 | 0 | 0.070175 | 0.052632 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
871ea6889558ac7fd27492a315a3703dfd534985 | 435 | py | Python | examples/docs_snippets/docs_snippets_tests/deploying_tests/test_dask.py | abkfenris/dagster | 7f35164535200cf904a4fdb18af207ccad09ad68 | [
"Apache-2.0"
] | null | null | null | examples/docs_snippets/docs_snippets_tests/deploying_tests/test_dask.py | abkfenris/dagster | 7f35164535200cf904a4fdb18af207ccad09ad68 | [
"Apache-2.0"
] | null | null | null | examples/docs_snippets/docs_snippets_tests/deploying_tests/test_dask.py | abkfenris/dagster | 7f35164535200cf904a4fdb18af207ccad09ad68 | [
"Apache-2.0"
] | null | null | null | from dagster.core.test_utils import instance_for_test
from docs_snippets.deploying.dask_hello_world import ( # pylint: disable=import-error
local_dask_job,
)
def test_local_dask_pipeline():
with instance_for_test() as instance:
result = local_dask_job.execute_in_process(
instance=instance,
)
assert result.success
assert result.output_for_node("hello_world") == "Hello, World!"
| 29 | 86 | 0.71954 | 56 | 435 | 5.232143 | 0.553571 | 0.102389 | 0.102389 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.2 | 435 | 14 | 87 | 31.071429 | 0.841954 | 0.064368 | 0 | 0 | 0 | 0 | 0.059259 | 0 | 0 | 0 | 0 | 0 | 0.181818 | 1 | 0.090909 | false | 0 | 0.181818 | 0 | 0.272727 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
871f9d0c4f1b41f70e72511261f3201ecdd1fd25 | 3,138 | py | Python | 2021/2021_22.py | pbomta/Advent-of-Code | 55d94bce6b1052bc9757534b76a5287f7167c7b2 | [
"Unlicense"
] | null | null | null | 2021/2021_22.py | pbomta/Advent-of-Code | 55d94bce6b1052bc9757534b76a5287f7167c7b2 | [
"Unlicense"
] | null | null | null | 2021/2021_22.py | pbomta/Advent-of-Code | 55d94bce6b1052bc9757534b76a5287f7167c7b2 | [
"Unlicense"
] | null | null | null | from math import inf
# Range is a pair, inclusive, min to max
def rangeIntersects(a, b):
return a[0] <= b[1] and b[0] <= a[1]
assert(rangeIntersects((1,2), (3, 4)) == False)
assert(rangeIntersects((0,10), (2, 8)) == True)
assert(rangeIntersects((2, 8), (0,10)) == True)
assert(rangeIntersects((0,10), (5, 15)) == True)
assert(rangeIntersects((0,10), (-5, 5)) == True)
# Etc.
def rangeContains(a, b):
return a[0] >= b[0] and a[1] <= b[1]
assert(rangeContains((1,2), (0,10)) == True)
assert(rangeContains((0,10), (1,2)) == False)
# Etc.
def rangeLength(a):
return a[1] - a[0] + 1
assert(rangeLength((10,10)) == 1)
# Prec. ranges intersect
def rangeIntersection(a, b):
return (max(a[0], b[0]), min(a[1], b[1]))
assert(rangeIntersection((0, 10), (5, 15)) == (5, 10))
assert(rangeIntersection((5, 15), (0, 10)) == (5, 10))
assert(rangeIntersection((2, 4), (0, 10)) == (2, 4))
def cubeSize(a):
return rangeLength(a[0]) * rangeLength(a[1]) * rangeLength(a[2])
def cubeIntersects(a, b):
return rangeIntersects(a[0], b[0]) \
and rangeIntersects(a[1], b[1]) \
and rangeIntersects(a[2], b[2])
def cubeIntersection(a, b):
if not cubeIntersects(a, b):
return None
return (rangeIntersection(a[0], b[0]), rangeIntersection(a[1], b[1]), rangeIntersection(a[2], b[2]))
def parseRangeText(S):
assert(S[1] == '=')
return (int(S[2:].split("..")[0]), int(S[2:].split("..")[1]))
def readInput(filename):
result = []
with open(filename) as file:
for line in file:
command, range_text = line.strip().split(' ')
x_range, y_range, z_range = range_text.split(',')
result.append((command, (parseRangeText(x_range), parseRangeText(y_range), parseRangeText(z_range))))
return result
instructions = readInput("2021_22_input")
# print(instructions)
# initregion = ((-50, 50), (-50, 50), (-50, 50)) # Part 1
initregion = ((-inf, inf), (-inf, inf), (-inf, inf)) # Part 2
on_switches = []
off_switches = []
for command, i in instructions:
new_cube = cubeIntersection(initregion, i)
if new_cube == None:
continue
new_on = []
new_off = []
if command == "on":
new_on.append(new_cube)
for C in on_switches:
intr = cubeIntersection(new_cube, C)
if intr != None:
new_off.append(intr)
for C in off_switches:
intr = cubeIntersection(new_cube, C)
if intr != None:
new_on.append(intr)
else:
for C in on_switches:
intr = cubeIntersection(new_cube, C)
if intr != None:
new_off.append(intr)
for C in off_switches:
intr = cubeIntersection(new_cube, C)
if intr != None:
new_on.append(intr)
on_switches += new_on
off_switches += new_off
total_size = 0
for C in on_switches:
total_size += cubeSize(C)
for C in off_switches:
total_size -= cubeSize(C)
print(total_size)
| 30.173077 | 114 | 0.563416 | 425 | 3,138 | 4.065882 | 0.195294 | 0.015625 | 0.020833 | 0.009259 | 0.291667 | 0.208333 | 0.162037 | 0.162037 | 0.162037 | 0.162037 | 0 | 0.052862 | 0.270554 | 3,138 | 103 | 115 | 30.466019 | 0.702053 | 0.048757 | 0 | 0.230769 | 0 | 0 | 0.007658 | 0 | 0 | 0 | 0 | 0 | 0.153846 | 1 | 0.115385 | false | 0 | 0.012821 | 0.076923 | 0.25641 | 0.012821 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
87243cc999f0a739773ea300e557dba657fc768c | 13,699 | py | Python | tetration-ansible/library/tetration_inventory_filter.py | chrivand/tetration-ansible-playbooks | ef0fc5e3e257f3e664a8b2e00b8590e179172d66 | [
"MIT"
] | 2 | 2019-09-06T23:16:44.000Z | 2021-02-17T21:52:38.000Z | tetration-ansible/library/tetration_inventory_filter.py | chrivand/tetration-ansible-playbooks | ef0fc5e3e257f3e664a8b2e00b8590e179172d66 | [
"MIT"
] | null | null | null | tetration-ansible/library/tetration_inventory_filter.py | chrivand/tetration-ansible-playbooks | ef0fc5e3e257f3e664a8b2e00b8590e179172d66 | [
"MIT"
] | 3 | 2019-08-23T19:50:21.000Z | 2021-04-25T01:47:48.000Z | #!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2018, Doron Chosnek
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = r'''
---
author: Doron Chosnek (@dchosnek)
description:
- Enables management of Cisco Teetration inventory filters.
- Enables creation, modification, and deletion of filters.
- Enables management of complex filters with boolean operators on many different objects.
extends_documentation_fragment: tetration
module: tetration_inventory_filter
options:
app_scope_id:
description: Scope ID and scope name are mutually exclusive.
type: string
app_scope_name:
description: Scope ID and scope name are mutually exclusive.
type: string
name:
description: Name of the inventory filter
required: true
type: string
primary:
default: 'false'
description: When true it means inventory filter is restricted to ownership scope.
type: bool
public:
default: 'false'
description: When true the filter represents a service to be matched by other
applications during application discovery runs (ADM).
type: bool
query:
description: Filter (or match criteria) associated with the scope
type: dict
state:
choices: '[present, absent, query]'
description: Add, change, or remove the inventory filter
required: true
type: string
version_added: '2.8'
'''
EXAMPLES = r'''
# Create a filter based on hostname
- tetration_inventory_filter:
provider: "{{ my_tetration }}"
name: hostname contains dns
app_scope_name: Default
state: present
query:
field: host_name
type: contains
value: dns
# Create a filter for a specific IP subnet
- tetration_inventory_filter:
provider: "{{ my_tetration }}"
name: vpn users subnet
app_scope_name: Default
state: present
query:
field: ip
type: subnet
value: 192.168.100.0/24
# Create filter for a user annotation field named Owner. When using a user
# annotation, the field value must always start with user_ and end with the
# name of the user annotation. user_Owner represents the user annotation
# named Owner.
- tetration_inventory_filter:
provider: "{{ my_tetration }}"
name: owned by engineering
app_scope_name: Default
state: present
query:
field: user_Owner
type: eq
value: engineering
# Create filter for a user annotation field named Location
- tetration_inventory_filter:
provider: "{{ my_tetration }}"
name: location of Texas
app_scope_name: Default
state: present
query:
field: user_Location
type: contains
value: Texas
# Create a filter based on interface name
- tetration_inventory_filter:
provider: "{{ my_tetration }}"
name: interface eth0
app_scope_name: Default
state: present
query:
field: iface_name
type: eq
value: eth0
# Create a filter based on interface MAC address
- tetration_inventory_filter:
provider: "{{ my_tetration }}"
name: mac a9
app_scope_name: Default
state: present
query:
field: iface_mac
type: contains
value: a9
# Build a complex filter with both 'and' and 'or' statements
- tetration_inventory_filter:
provider: "{{ my_tetration }}"
name: vulnerable linux hosts
app_scope_name: Default
state: present
public: true
primary: true
query:
type: and
filters:
- field: os
type: contains
value: linux
- type: or
filters:
- field: host_tags_cvss3
type: gt
value: 8
- field: host_tags_cvss2
type: gt
value: 8
# Delete some inventory filters
- tetration_inventory_filter:
provider: "{{ my_tetration }}"
name: "{{ item }}"
app_scope_name: Default
state: absent
loop:
- my first filter
- my second filter
'''
RETURN = r'''
---
object:
contains:
app_scope_id:
description: ID of the scope associated with the filter
returned: when C(state) is present or query
sample: 5bdf9776497d4f397d38fdcb
type: dict
id:
description: Unique identifier for the inventory filter
returned: when C(state) is present or query
sample: 5be671e9497d4f08f028b1bb
type: dict
name:
description: User specified name of the inventory filter
returned: when C(state) is present or query
type: string
primary:
description: When true it means inventory filter is restricted to ownership
scope
returned: when C(state) is present or query
sample: 'false'
type: bool
public:
description: When true the filter represents a service to be matched by other
applications during application discovery runs (ADM).
returned: when C(state) is present or query
sample: 'false'
type: bool
query:
description: Filter (or match criteria) associated with the filter in conjunction
with the filters of the parent scopes.
returned: when C(state) is present or query
type: dict
updated_at:
description: Unix timestamp for the last update of the filter
returned: when C(state) is present or query
sample: 1541829226
type: int
description: the changed or modified object
returned: always
type: complex
'''
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.tetration.api import TetrationApiModule
from ansible.module_utils.tetration.api import TETRATION_API_INVENTORY_FILTER
from ansible.module_utils.tetration.api import TETRATION_API_SCOPES
from ansible.utils.display import Display
display = Display()
def main():
tetration_spec=dict(
name=dict(type='str', required=False),
query=dict(type='dict', required=False),
app_scope_id=dict(type='str', required=False),
app_scope_name=dict(type='str', required=False),
primary=dict(type='bool', required=False, default=False),
public=dict(type='bool', required=False, default=False),
query_type=dict(type='str', required=False, choices=['single', 'sub-scope', 'all'], default='single'),
)
argument_spec = dict(
provider=dict(required=True),
state=dict(required=True, choices=['present', 'absent', 'query'])
)
argument_spec.update(tetration_spec)
argument_spec.update(TetrationApiModule.provider_spec)
module = AnsibleModule(
argument_spec=argument_spec,
supports_check_mode=True,
required_one_of=[ ['app_scope_id', 'app_scope_name'] ],
required_if=[
['state', 'present', ['name']],
['state', 'absent', ['name']],
['query_type', 'sub-scope', ['app_scope_name']]
]
)
tet_module = TetrationApiModule(module)
# These are all elements we put in our return JSON object for clarity
result = dict(
failed=False,
object=None,
)
state = module.params['state']
filter_name = module.params['name']
app_scope_id = module.params['app_scope_id']
app_scope_name = module.params['app_scope_name']
primary = module.params['primary']
query_type = module.params['query_type']
if not primary:
public = False
module.params['public'] = False
else:
public = module.params['public']
# =========================================================================
# Get current state of the object
# find the ID of the scope if specified by name
if app_scope_name is not None:
scope = tet_module.get_object(
target = TETRATION_API_SCOPES,
filter = dict(name=app_scope_name),
)
app_scope_id = scope['id'] if scope else None
# The first thing we have to do is get the object.
existing_object = tet_module.get_object(
target = TETRATION_API_INVENTORY_FILTER,
filter = dict(name=filter_name, app_scope_id=app_scope_id)
)
id = None if existing_object is None else existing_object['id']
# =========================================================================
# Now enforce the desired state (present, absent, query)
# at this point in the code, there will be one object stored in the
# variable named existing_object
changed = False
# ---------------------------------
# STATE == 'present'
# ---------------------------------
if state == 'present':
new_object = dict(
name=filter_name,
app_scope_id=app_scope_id,
primary=primary,
public=public
)
if module.params['query'] is not None:
new_object['query'] = module.params['query']
# if the object does not exist at all, create it
if not existing_object:
changed = True
if not module.check_mode:
query_result = tet_module.run_method(
method_name='post',
target=TETRATION_API_INVENTORY_FILTER,
req_payload=new_object,
)
id = query_result['id']
# if the object does exist, check to see if any part of it should be
# changed
else:
# if primary or app_scope_id don't match, UPDATE!
update_needed = False
for k in ['app_scope_id', 'primary', 'public']:
if module.params[k] is not None and existing_object[k] != module.params[k]:
update_needed = True
# if query doesn't match, UPDATE!
if module.params['query'] is not None and module.params['query'] != existing_object['short_query']:
update_needed = True
if update_needed:
changed = True
if not module.check_mode:
tet_module.run_method(
method_name='put',
target='%s/%s' % (TETRATION_API_INVENTORY_FILTER, id),
req_payload=new_object,
)
# decide what value to return
if not changed:
result['object'] = existing_object
elif module.check_mode:
result['object'] = new_object
else:
# retrieve the current state of the object
query_result = tet_module.run_method(
method_name='get',
target='%s/%s' % (TETRATION_API_INVENTORY_FILTER, id)
)
result['object'] = query_result
# ---------------------------------
# STATE == 'absent'
# ---------------------------------
elif state == 'absent':
# if existing_object is a non-empty dictionary, that means there is
# something to delete; if it's empty then there is nothing to do
if bool(existing_object):
changed = True
if not module.check_mode:
tet_module.run_method(
method_name='delete',
target='%s/%s' % (TETRATION_API_INVENTORY_FILTER, id)
)
result['object'] = existing_object
# ---------------------------------
# STATE == 'query'
# ---------------------------------
elif state == 'query':
# we already retrieved the current state of the object, so there is no
# need to do it again
if query_type == 'all':
existing_app_scope = tet_module.run_method(
method_name = 'get',
target = '%s/%s' % (TETRATION_API_SCOPES, app_scope_id)
)
if not existing_app_scope:
module.fail_json(msg='No app_scope was found matching id: %s' % app_scope_id)
if existing_app_scope['id'] != existing_app_scope['root_app_scope_id']:
module.fail_json(msg='query_type `all` option is only allowed on root scopes')
app_scopes = tet_module.get_object(
target = TETRATION_API_SCOPES,
filter = dict(root_app_scope_id = existing_app_scope['root_app_scope_id']),
allow_multiple = True
)
scope_ids = [ scope['id'] for scope in app_scopes ]
inventory_filters = tet_module.run_method(
method_name = 'get',
target = TETRATION_API_INVENTORY_FILTER,
)
if inventory_filters:
inventory_filters = [ valid_filter for valid_filter in inventory_filters if valid_filter['app_scope_id'] in scope_ids and valid_filter['name'] != 'Everything' ]
result['object'] = inventory_filters
elif query_type == 'sub-scope':
app_scopes = tet_module.run_method(
method_name = 'get',
target = TETRATION_API_SCOPES
)
scope_ids = [ scope['id'] for scope in app_scopes if scope['name'].startswith(app_scope_name) ]
inventory_filters = tet_module.run_method(
method_name = 'get',
target = TETRATION_API_INVENTORY_FILTER,
)
if inventory_filters:
inventory_filters = [ valid_filter for valid_filter in inventory_filters if valid_filter['app_scope_id'] in scope_ids and valid_filter['name'] != 'Everything' ]
result['object'] = inventory_filters
else:
result['object'] = existing_object
module.exit_json(changed=changed, **result)
if __name__ == '__main__':
main()
| 33.25 | 176 | 0.614205 | 1,617 | 13,699 | 5.015461 | 0.188621 | 0.042417 | 0.025894 | 0.031566 | 0.474846 | 0.445376 | 0.41566 | 0.334032 | 0.303329 | 0.247226 | 0 | 0.00729 | 0.278998 | 13,699 | 411 | 177 | 33.3309 | 0.81381 | 0.101832 | 0 | 0.412463 | 0 | 0 | 0.468014 | 0.026159 | 0 | 0 | 0 | 0 | 0 | 1 | 0.002967 | false | 0 | 0.017804 | 0 | 0.020772 | 0.002967 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
8725b33522f602845ab11d98f5e3419251253adf | 2,034 | py | Python | tests/util.py | jin-qin/cs655-image-recognition | c8353dd2a5b2937678b097dddd2cf7c9910e82dd | [
"MIT"
] | 1 | 2020-12-09T12:57:51.000Z | 2020-12-09T12:57:51.000Z | tests/util.py | jin-qin/cs655-image-recognition | c8353dd2a5b2937678b097dddd2cf7c9910e82dd | [
"MIT"
] | null | null | null | tests/util.py | jin-qin/cs655-image-recognition | c8353dd2a5b2937678b097dddd2cf7c9910e82dd | [
"MIT"
] | null | null | null |
def load_ground_truth(gt_file: str):
ground_truth = []
with open(gt_file, 'r') as f:
for idx, line in enumerate(f):
ground_truth.append(int(line))
return ground_truth
def load_imagenet_meta(meta_file: str):
import scipy.io
mat = scipy.io.loadmat(meta_file)
return mat['synsets']
def get_sysnset_map(meta_file: str, synset_words_mapping_file: str):
'''
since the predicted label from model is not the same as the synsets id in imagenet
we have to map the label to the synsets id
this function will return the map of <model label, imagenet id>
'''
metadata = load_imagenet_meta(meta_file)
d = metadata[:, 0]
wnid_map = {}
for r in d:
if r[0][0][0] > 1000: continue
wnid_map[r[1][0]] = r[0][0][0]
synset_map = {-1: -1}
import csv
with open(synset_words_mapping_file, newline='') as csvfile:
csvreader = csv.reader(csvfile, delimiter=' ', quotechar='|')
for id, line in enumerate(csvreader):
id_imgnet = wnid_map[line[0]]
synset_map[id] = id_imgnet
return synset_map
def get_synset_details_map(meta_file: str, synset_words_mapping_file: str):
metadata = load_imagenet_meta(meta_file)
d = metadata[:, 0]
wnid_map = {}
category = {}
desc = {}
for r in d:
if r[0][0][0] > 1000: continue
wnid_map[r[1][0]] = r[0][0][0]
category[r[1][0]] = r[2][0]
desc[r[1][0]] = r[3][0]
synset_map = {-1: {'code': -1, 'desc': ''}}
import csv
with open(synset_words_mapping_file, newline='') as csvfile:
csvreader = csv.reader(csvfile, delimiter=' ', quotechar='|')
for id, line in enumerate(csvreader):
id_imgnet = wnid_map[line[0]]
synset_map[id] = { 'code': int(id_imgnet), 'cat':category[line[0]], 'desc': desc[line[0]]}
return synset_map
def is_predict_correct(ground_truth: list, img_idx: int, imgnet_label: int):
return ground_truth[img_idx] == imgnet_label | 30.818182 | 102 | 0.616519 | 303 | 2,034 | 3.940594 | 0.257426 | 0.0134 | 0.060302 | 0.073702 | 0.497487 | 0.477387 | 0.477387 | 0.477387 | 0.477387 | 0.41206 | 0 | 0.027505 | 0.249263 | 2,034 | 66 | 103 | 30.818182 | 0.75442 | 0.093412 | 0 | 0.521739 | 0 | 0 | 0.017033 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.108696 | false | 0 | 0.065217 | 0.021739 | 0.282609 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
872b61646177f42b5acb2f9b81247456f994ecd4 | 1,108 | py | Python | src/python/benchmarking_utils/Shremote_cfgs/kv/make_cmd_lists.py | isaac-ped/demikernel | 6f372569e3599d8bf9083df6c25490c42af74c0d | [
"MIT"
] | null | null | null | src/python/benchmarking_utils/Shremote_cfgs/kv/make_cmd_lists.py | isaac-ped/demikernel | 6f372569e3599d8bf9083df6c25490c42af74c0d | [
"MIT"
] | null | null | null | src/python/benchmarking_utils/Shremote_cfgs/kv/make_cmd_lists.py | isaac-ped/demikernel | 6f372569e3599d8bf9083df6c25490c42af74c0d | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
import sys
import os
import random
import string
from argparse import ArgumentParser
parser = ArgumentParser()
parser.add_argument('srv_file')
parser.add_argument('client_file')
parser.add_argument('value_size', type=int)
parser.add_argument('n_keys', type=int)
parser.add_argument('n_reqs', type=int)
parser.add_argument('reqtype', type=str)
args = parser.parse_args()
if args.reqtype not in ("SZOF", "GET", "NNZ"):
raise Exception("args.reqtype must be one of SZOF GET NNZ")
keys = [('%d' % i) for i in range(args.n_keys)]
def write_server_files(filename):
values = [''.join(random.choices(string.ascii_letters, k=args.value_size)) for _ in keys]
sets = ['PUT %s %s' % (k, v) for k, v in zip(keys, values)]
with open(filename, 'w') as f:
f.write('\n'.join(sets))
def write_client_files(filename, n):
sampling = random.choices(keys, k=n)
gets = ['%s %s' % (args.reqtype, s) for s in sampling]
with open(filename, 'w') as f:
f.write('\n'.join(gets))
write_server_files(args.srv_file)
write_client_files(args.client_file, args.n_reqs)
| 29.945946 | 93 | 0.697653 | 179 | 1,108 | 4.167598 | 0.379888 | 0.072386 | 0.136729 | 0.064343 | 0.182306 | 0.150134 | 0.08311 | 0.08311 | 0.08311 | 0.08311 | 0 | 0.001058 | 0.147112 | 1,108 | 36 | 94 | 30.777778 | 0.78836 | 0.018953 | 0 | 0.071429 | 0 | 0 | 0.110497 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.071429 | false | 0 | 0.178571 | 0 | 0.25 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
872ba5b5d271945c338ed8551c7ef45840b7921f | 3,638 | py | Python | scripts/ddd/algo.py | jlstevens/awesome-panel | c67b0f4529a3ce6a8517648f49fef8358e2e2c8b | [
"Apache-2.0"
] | null | null | null | scripts/ddd/algo.py | jlstevens/awesome-panel | c67b0f4529a3ce6a8517648f49fef8358e2e2c8b | [
"Apache-2.0"
] | null | null | null | scripts/ddd/algo.py | jlstevens/awesome-panel | c67b0f4529a3ce6a8517648f49fef8358e2e2c8b | [
"Apache-2.0"
] | null | null | null | # (Name, Drunkfactor, Subs=[])
from typing import Dict, List, Optional
class Person:
def __init__(self, name: str, drunk_factor: int, leader_name: Optional[str]):
self.name = name
self.drunk_factor = drunk_factor
self.leader_name = leader_name
self.subs: List["Person"] = []
self._max_drunk_factor_tree = None
self._max_communication_time_tree = None
@staticmethod
def create_from_line(line: str) -> "Person":
split = line.split(" ")
if len(split) == 2:
return Person(split[0], int(split[1]), None)
if len(split) == 3:
return Person(split[0], int(split[1]), split[2])
@staticmethod
def create_from_lines(lines: str) -> Dict[str, "Person"]:
persons = {}
for person in (Person.create_from_line(line) for line in lines.splitlines()):
persons[person.name] = person
for person in persons.values():
if person.leader_name:
persons[person.leader_name].subs.append(person)
return persons
@classmethod
def create_from_input(cls) -> Dict[str, "Person"]:
n = int(input())
lines = []
for _ in range(n):
lines.append(input())
return cls.create_from_lines("\n".join(lines))
@property
def max_drunk_factor_tree(self) -> int:
if self._max_drunk_factor_tree:
return self._max_drunk_factor_tree
if not self.subs:
return self.drunk_factor
self._max_drunk_factor_tree = self.drunk_factor + max(
(person.max_drunk_factor_tree) for person in self.subs
)
return self._max_drunk_factor_tree
@property
def max_communication_time_tree(self) -> int:
if self._max_communication_time_tree:
return self._max_communication_time_tree
if not self.subs:
return 0
if len(self.subs) == 1:
if self.subs[0].max_communication_time_tree > 0:
return self.subs[0].max_communication_time_tree
return self.max_drunk_factor_tree
drunk_factor_highest = 0
drunk_factor_second_highest = 0
max_communication_time_sub_tree = 0
for person in self.subs:
if person.max_drunk_factor_tree > drunk_factor_highest:
drunk_factor_second_highest = drunk_factor_highest
drunk_factor_highest = person.max_drunk_factor_tree
elif person.max_drunk_factor_tree > drunk_factor_second_highest:
drunk_factor_second_highest = person.max_drunk_factor_tree
if person.max_communication_time_tree > max_communication_time_sub_tree:
max_communication_time_sub_tree = person.max_communication_time_tree
max_communication_time_two_subs = (
self.drunk_factor + drunk_factor_highest + drunk_factor_second_highest
)
self._max_communication_time_tree = max(
max_communication_time_sub_tree, max_communication_time_two_subs
)
return self._max_communication_time_tree
@property
def is_leader(self) -> bool:
return self.leader_name is None
def __str__(self):
if self.is_leader:
return self.name + " " + str(self.drunk_factor)
return self.name + " " + str(self.drunk_factor) + " " + self.leader_name
def __repr__(self):
return self.__str__()
if __name__ == "__main__":
persons = Person.create_from_input()
leader_name = list(persons)[0]
leader = persons[leader_name]
print(leader.max_communication_time_tree)
| 33.376147 | 85 | 0.64431 | 455 | 3,638 | 4.747253 | 0.131868 | 0.152778 | 0.157407 | 0.1 | 0.559259 | 0.412963 | 0.285185 | 0.080556 | 0 | 0 | 0 | 0.006033 | 0.271028 | 3,638 | 108 | 86 | 33.685185 | 0.808446 | 0.007697 | 0 | 0.130952 | 0 | 0 | 0.010532 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.107143 | false | 0 | 0.011905 | 0.02381 | 0.321429 | 0.011905 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
872c5f87d460c4803f7e6f206cc087776389f03f | 471 | py | Python | 100doc/009-student_grades.py | ralexrivero/python_fundation | 34a855db7380d3d91db6a8f02d97f287d038ef5f | [
"Apache-2.0"
] | 1 | 2021-09-19T04:09:48.000Z | 2021-09-19T04:09:48.000Z | 100doc/009-student_grades.py | ralexrivero/python_fundation | 34a855db7380d3d91db6a8f02d97f287d038ef5f | [
"Apache-2.0"
] | null | null | null | 100doc/009-student_grades.py | ralexrivero/python_fundation | 34a855db7380d3d91db6a8f02d97f287d038ef5f | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python3
scores = {
"John": 75,
"Ronald": 99,
"Clarck": 78,
"Mark": 69,
"Newton": 82,
}
grades = {}
for name in scores:
score = scores[name]
if score > 90:
grades[name] = "Outstanding"
elif score > 80:
grades[name] = "Exceeds Expectations"
elif score > 70:
grades[name] = "Acceptable"
else:
grades[name] = "Fail"
for key in grades:
print("{:s}: {:s}".format(key, grades[key]))
| 18.115385 | 48 | 0.535032 | 57 | 471 | 4.421053 | 0.614035 | 0.15873 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.051205 | 0.295117 | 471 | 25 | 49 | 18.84 | 0.707831 | 0.044586 | 0 | 0 | 0 | 0 | 0.180401 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0 | 0 | 0 | 0.05 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
872f7a2fc8fbe9d50e0947ba871518017246583e | 712 | py | Python | sqjobs/brokers/eager.py | Thinkful/sqjobs | b8c99a72426f374519c495bf956f787647fa4d56 | [
"BSD-3-Clause"
] | 27 | 2015-02-13T13:47:53.000Z | 2021-04-21T14:28:20.000Z | sqjobs/brokers/eager.py | Thinkful/sqjobs | b8c99a72426f374519c495bf956f787647fa4d56 | [
"BSD-3-Clause"
] | 14 | 2015-02-25T16:47:53.000Z | 2021-06-10T20:36:53.000Z | sqjobs/brokers/eager.py | Thinkful/sqjobs | b8c99a72426f374519c495bf956f787647fa4d56 | [
"BSD-3-Clause"
] | 19 | 2015-02-18T12:41:24.000Z | 2020-02-18T09:45:43.000Z | import sys
import six
from .base import Broker
from ..job import JobResult
class Eager(Broker):
"""
Broker to execute jobs in a synchronous way
"""
def __repr__(self):
return 'Broker(Eager)'
def add_job(self, job_class, *args, **kwargs):
job_id = self.gen_job_id()
eager_job = job_class()
eager_job.id = job_id
try:
result = eager_job.execute(*args, **kwargs)
eager_job.on_success()
except Exception as e:
eager_job.on_failure()
six.reraise(*sys.exc_info())
job_result = JobResult()
job_result.job_id = job_id
job_result.result = result
return job_result
| 20.941176 | 55 | 0.594101 | 92 | 712 | 4.326087 | 0.413043 | 0.075377 | 0.060302 | 0.050251 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.311798 | 712 | 33 | 56 | 21.575758 | 0.812245 | 0.060393 | 0 | 0 | 0 | 0 | 0.019908 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.095238 | false | 0 | 0.190476 | 0.047619 | 0.428571 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
873021a845521edc67649b98ab32b6aacab1aaa3 | 1,589 | py | Python | core_modules/get_window.py | picass02005/PyMacro-async | 5945de7be39793c42d2e1d53a6050809d962338d | [
"MIT"
] | 1 | 2021-08-10T19:50:57.000Z | 2021-08-10T19:50:57.000Z | core_modules/get_window.py | picass02005/PyMacro-async | 5945de7be39793c42d2e1d53a6050809d962338d | [
"MIT"
] | null | null | null | core_modules/get_window.py | picass02005/PyMacro-async | 5945de7be39793c42d2e1d53a6050809d962338d | [
"MIT"
] | null | null | null | import sys
import psutil
from global_modules import logs
if sys.platform == "win32":
import ctypes
from ctypes import wintypes
elif sys.platform == "linux" or sys.platform == "linux2":
import subprocess
proc = subprocess.Popen(["/bin/bash", "-c", "which xdotool"], stdout=subprocess.PIPE, stderr=subprocess.PIPE)
stdout_, stderr_ = proc.communicate()
if stderr_:
logs.error("get_window", f"Cannot find xdotool. Please install it with apt / pacman / dnf / ... "
f"(bash error: {stderr_.decode()[:-1]})")
exit(1)
else:
path = stdout_.decode()[:-1]
logs.info("get_window", f"xdotool found under {path}")
def get_window():
if sys.platform == "win32":
user32 = ctypes.windll.user32
h_wnd = user32.GetForegroundWindow()
pid = wintypes.DWORD()
user32.GetWindowThreadProcessId(h_wnd, ctypes.byref(pid))
return psutil.Process(pid=pid.value).name().replace(".exe", "")
else:
process = subprocess.Popen([path, "getactivewindow", "getwindowpid"],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
stdout, stderr = process.communicate()
if stderr:
logs.error("get_window", f"Active window pid not found: (${path} getactivewindow getwindowpid) >& "
f"{stderr.decode()[:-1]}")
return None
else:
pid = int(stdout.decode()[:-1])
return psutil.Process(pid=pid).exe()
| 31.156863 | 113 | 0.573946 | 170 | 1,589 | 5.294118 | 0.417647 | 0.048889 | 0.033333 | 0.04 | 0.255556 | 0.2 | 0.2 | 0.2 | 0 | 0 | 0 | 0.016043 | 0.293896 | 1,589 | 50 | 114 | 31.78 | 0.786096 | 0 | 0 | 0.138889 | 0 | 0 | 0.208307 | 0.028949 | 0 | 0 | 0 | 0 | 0 | 1 | 0.027778 | false | 0 | 0.166667 | 0 | 0.277778 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
873546bba21758a6bdeb7202c2fa9d5e03ead94d | 6,031 | py | Python | crawler/anidb_spider.py | Alter-0/MiraiHyoka | b1a21492a0f67a2db488aff9086e440ec65269ce | [
"Apache-2.0"
] | null | null | null | crawler/anidb_spider.py | Alter-0/MiraiHyoka | b1a21492a0f67a2db488aff9086e440ec65269ce | [
"Apache-2.0"
] | null | null | null | crawler/anidb_spider.py | Alter-0/MiraiHyoka | b1a21492a0f67a2db488aff9086e440ec65269ce | [
"Apache-2.0"
] | null | null | null | import time
import requests
import re
import pymysql
import urllib3
from lxml import etree
from fake_useragent import UserAgent
# from scrapy import Selector
from requests.adapters import HTTPAdapter
urllib3.disable_warnings()
db = pymysql.connect("localhost", "root", "", "miraihyoka")
cursor = db.cursor()
def spider():
domain = "https://anidb.net/anime/?h=1&noalias=1&orderby.name=1.1&orderby.rating=0.2"
# result = requests.get(domain, headers=headers).text
# print(result)
i = 59
headers = get_header()
url = domain + '&page=' + str(i) + '&view=list'
result = get_index(url)
# result = s.get(domain + '&page=' + str(i) + '&view=list', headers=headers, verify=False).text
# try:
while 1:
# sel = Selector(text=result)
# results = sel.xpath("//td[@data-label='Title']/a/@href").extract()
# url=sel.xpath("//*[@id='#area5114']").extract()
tree = etree.HTML(result)
results = tree.xpath("//td[@data-label='Title']/a/@href")
print(results)
headers = get_header()
for url in results:
headers = get_header()
url = 'https://anidb.net' + url
details_page = get_web(url)
try:
# details_page = s.get('https://anidb.net' + url, headers=headers, verify=False).text
tree = etree.HTML(details_page)
# print(details_page)
# sel = Selector(text=details_page)
# rate = sel.xpath("//span[@itemprop='ratingValue']/text()").extract()[0]
rate = tree.xpath("//span[@itemprop='ratingValue']/text()")[0]
# print(type(rate))
# float(rate)
print(rate)
except:
rate=''
try:
# judge=
# judge = sel.xpath('//div[@id="tab_1_pane"]//span[@class="i_icon i_flag i_audio_en"][1]').extract()
judge = tree.xpath('//div[@id="tab_1_pane"]//span[@class="i_icon i_flag i_audio_en"][1]')
if len(judge):
# name_en = sel.xpath('//span[@class="i_icon i_flag i_audio_en"][1]/../../label/text()').extract()[0]
name_en = tree.xpath('//span[@class="i_icon i_flag i_audio_en"][1]/../../label/text()')[0]
else:
# name_en = sel.xpath('//*[@id="tab_1_pane"]/div/table/tbody/tr[1]/td/span/text()').extract()[0]
name_en = tree.xpath('//*[@id="tab_1_pane"]/div/table/tbody/tr[1]/td/span/text()')[0]
pat = '(.*)\s\\('
try:
name_en = re.compile(pat).findall(name_en)[0]
except:
pass
name_en = name_en.replace('`', '\'')
print(name_en)
except:
name_en = ''
try:
is_card = re.search('Title Card', details_page)
if is_card is None:
# print('没有title card')
# name_jp = sel.xpath('//span[@class="i_icon i_flag i_audio_ja"][1]/../../label/text()').extract()[0]
name_jp = tree.xpath('//span[@class="i_icon i_flag i_audio_ja"][1]/../../label/text()')[0]
else:
# print('有title card')
# name_jp = sel.xpath("//th[text()='Title Card'][1]/following-sibling::td[1]/label/text()").extract()[
name_jp = tree.xpath("//th[text()='Title Card'][1]/following-sibling::td[1]/label/text()")[0]
pat = '(.*)\s\\('
try:
name_jp = re.compile(pat).findall(name_jp)[0]
except:
pass
name_jp = name_jp.replace('`', '\'')
print(name_jp)
except:
name_jp = ''
sql = "insert into anidb(name_en,name_jp,rate) value (%s,%s,%s)"
args = (name_en, name_jp, rate)
db.ping(reconnect=True)
cursor.execute(sql, args)
db.commit()
time.sleep(5)
i += 1
url = domain + '&page=' + str(i) + '&view=list'
print(url)
result = get_index(url)
# except:
# print('出错,执行结束')
def get_header():
headers = {
'sec-ch-ua': '"Google Chrome";v="87", " Not;A Brand";v="99", "Chromium";v="87"',
'accept': 'application/json, text/plain, */*',
# 'user-agent': str(UserAgent().random),
'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) '
'Chrome/87.0.4280.88 Safari/537.36',
'sec-fetch-site': 'same-site',
'sec-fetch-mode': 'cors',
'sec-fetch-dest': 'empty',
'accept-language': 'zh-CN,zh;q=0.9,en;q=0.8,zh-TW;q=0.7',
'keep-live': 'false',
}
return headers
def get_web(url):
for i in range(0, 3):
print(time.strftime('%Y-%m-%d %H:%M:%S'))
try:
s = requests.session()
s.keep_alive = False
s.DEFAULT_RETRIES = 5
s.mount('http://', HTTPAdapter(max_retries=3))
s.mount('https://', HTTPAdapter(max_retries=3))
headers = get_header()
result = s.get(url, headers=headers, verify=False, timeout=60).text
return result
except:
print('正尝试重新连接...')
pass
return None
def get_index(url):
while True:
print(time.strftime('%Y-%m-%d %H:%M:%S'))
try:
s = requests.session()
s.keep_alive = False
s.DEFAULT_RETRIES = 5
s.mount('http://', HTTPAdapter(max_retries=3))
s.mount('https://', HTTPAdapter(max_retries=3))
headers = get_header()
result = s.get(url, headers=headers, verify=False, timeout=60).text
return result
except:
print('正尝试重新连接...')
pass
if __name__ == '__main__':
spider()
db.close()
| 35.476471 | 122 | 0.503565 | 734 | 6,031 | 4.010899 | 0.26703 | 0.024457 | 0.02038 | 0.028533 | 0.461957 | 0.381793 | 0.362092 | 0.316576 | 0.316576 | 0.316576 | 0 | 0.023506 | 0.322832 | 6,031 | 169 | 123 | 35.686391 | 0.697356 | 0.196816 | 0 | 0.411765 | 0 | 0.042017 | 0.22296 | 0.081378 | 0 | 0 | 0 | 0 | 0 | 1 | 0.033613 | false | 0.033613 | 0.067227 | 0 | 0.134454 | 0.07563 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
8736c28143447fda9d01e4dd913ba05e06747b0d | 1,637 | py | Python | main.py | okada39/pinn_projectile | 0c00a1ba5a1324abf0fc4de868fff106c53ee89e | [
"MIT"
] | null | null | null | main.py | okada39/pinn_projectile | 0c00a1ba5a1324abf0fc4de868fff106c53ee89e | [
"MIT"
] | null | null | null | main.py | okada39/pinn_projectile | 0c00a1ba5a1324abf0fc4de868fff106c53ee89e | [
"MIT"
] | 1 | 2022-03-05T05:35:59.000Z | 2022-03-05T05:35:59.000Z | import lib.tf_silent
import numpy as np
import matplotlib.pyplot as plt
from lib.pinn import PINN
from lib.network import Network
from lib.optimizer import L_BFGS_B
def theoretical_motion(input, g):
"""
Compute the theoretical projectile motion.
Args:
input: ndarray with shape (num_samples, 3) for t, v0_x, v0_z
g: gravity acceleration
Returns:
theoretical motion of x, z.
"""
t, v0_x, v0_z = np.split(input, 3, axis=-1)
x = v0_x * t
z = v0_z * t - 0.5 * g * t * t
return x, z
if __name__ == '__main__':
"""
Test the physics informed neural network (PINN) model for a projectile motion.
"""
# number of training samples
num_train_samples = 10000
# number of test samples
num_test_samples = 100
# gravity acceleration
g = 1.0
# build a core network model
network = Network.build()
network.summary()
# build a PINN model
pinn = PINN(network, g).build()
# train the model using L-BFGS-B algorithm
samples = np.random.rand(num_train_samples, 3)
lbfgs = L_BFGS_B(model=pinn, samples=samples)
lbfgs.fit()
# Test
t = np.linspace(0, 1, num_test_samples).reshape((num_test_samples, 1))
v0 = 0.5 * np.ones((num_test_samples, 2))
x = np.concatenate([t, v0], axis=-1)
r_pred = network.predict(x, batch_size=num_test_samples)
# plot theory vs prediction
plt.plot(*theoretical_motion(x, g), label='theory', color='crimson')
plt.scatter(r_pred[..., 0], r_pred[..., 1], label='pinn', s=5, color='royalblue')
plt.xlabel('x')
plt.ylabel('z')
plt.legend()
plt.show()
| 27.745763 | 85 | 0.64325 | 249 | 1,637 | 4.068273 | 0.37751 | 0.065153 | 0.069102 | 0.011846 | 0.01382 | 0 | 0 | 0 | 0 | 0 | 0 | 0.027178 | 0.235797 | 1,637 | 58 | 86 | 28.224138 | 0.782574 | 0.228467 | 0 | 0 | 0 | 0 | 0.031972 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.032258 | false | 0 | 0.193548 | 0 | 0.258065 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
87385931bf180f383987d8f5225b1fd084f846ea | 1,889 | py | Python | utils/image_transformation.py | groverkds/hand_sign_recognition | 4ae05c43af869e990acc636b0a031f903709dfbc | [
"Apache-2.0"
] | 3 | 2020-02-18T07:12:42.000Z | 2022-01-07T14:35:42.000Z | utils/image_transformation.py | polawarrushi/hand_sign_recognition | 493b3568c462280533a33cab592008c91da1f999 | [
"Apache-2.0"
] | null | null | null | utils/image_transformation.py | polawarrushi/hand_sign_recognition | 493b3568c462280533a33cab592008c91da1f999 | [
"Apache-2.0"
] | 8 | 2019-05-22T15:45:17.000Z | 2021-09-15T17:28:50.000Z | import cv2
import numpy as np
import math
def transform(frame):
#frame = resize(frame)
try:
frame = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
except:
pass
frame = threshold(frame)
frame = get_contours(frame)
frame = resize(frame,100)
return frame
def resize(frame,size):
return cv2.resize(frame,(size,size))
def threshold(frame):
_ , thresh = cv2.threshold(frame,120,255,cv2.THRESH_OTSU)
return thresh
def get_contours(frame):
se = np.ones((10,10),np.uint8)
frame = cv2.erode(frame,se,iterations = 2)
frame = cv2.dilate(frame,se,iterations = 2)
frame = cv2.erode(frame,se,iterations = 1)
(_,contours,_) = cv2.findContours(frame,cv2.RETR_TREE,cv2.CHAIN_APPROX_SIMPLE)
c = max(contours, key = cv2.contourArea)
#largest_contour = [c]
hull = [cv2.convexHull(c) for c in contours]
x,y,w,h = cv2.boundingRect(c)
#final = cv2.drawContours(frame,hull,-1,(255,255,255))
final = frame.copy()
#cv2.rectangle(final,(x,y),(x+w,y+h),(255,255,255),2)
#cv2.imshow('ar',final)
#cv2.waitKey(0)
if w>h:
dom = w
else:
dom = h
bg = np.zeros((dom,dom),np.uint8)
roi = final[y:y+h,x:x+w]
image = bg
if w>h:
image[int((w-h)/2):int((w+h)/2),0:w]=roi
else:
image[0:h,int((h-w)/2):int((w+h)/2)]=roi
return image
def auto_canny(image, sigma=0.33):
# compute the median of the single channel pixel intensities
v = np.median(image)
# apply automatic Canny edge detection using the computed median
lower = int(max(0, (1.0 - sigma) * v))
upper = int(min(255, (1.0 + sigma) * v))
edged = cv2.Canny(image, lower, upper)
# return the edged image
return edged
def write_text(frame,text):
font = cv2.FONT_HERSHEY_SIMPLEX
bottomLeftCornerOfText = (50,50)
fontScale = 2
fontColor = (0,0,255)
lineType = 3
cv2.putText(frame,text,bottomLeftCornerOfText,font,fontScale,fontColor,lineType)
return frame | 25.527027 | 81 | 0.672843 | 304 | 1,889 | 4.131579 | 0.351974 | 0.038217 | 0.040605 | 0.014331 | 0.080414 | 0.069268 | 0 | 0 | 0 | 0 | 0 | 0.057545 | 0.172049 | 1,889 | 74 | 82 | 25.527027 | 0.745524 | 0.173637 | 0 | 0.113208 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.113208 | false | 0.018868 | 0.056604 | 0.018868 | 0.283019 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
873aac2e5f787d5a3ca6b31d3f26e38999e76248 | 2,357 | py | Python | models/xdog.py | jialuogao/ECE539FinalProject | 8feed1a1a606225dc9c55fccc09f41bf6767c3e0 | [
"BSD-3-Clause"
] | null | null | null | models/xdog.py | jialuogao/ECE539FinalProject | 8feed1a1a606225dc9c55fccc09f41bf6767c3e0 | [
"BSD-3-Clause"
] | null | null | null | models/xdog.py | jialuogao/ECE539FinalProject | 8feed1a1a606225dc9c55fccc09f41bf6767c3e0 | [
"BSD-3-Clause"
] | 1 | 2020-12-04T01:36:21.000Z | 2020-12-04T01:36:21.000Z | import cv2
import numpy as np
def dog(img,size=(0,0),k=1.6,sigma=0.5,gamma=1):
img1 = cv2.GaussianBlur(img,size,sigma)
print("img:")
print(np.max(img1))
img2 = cv2.GaussianBlur(img,size,sigma*k)
return (img1-gamma*img2)
def xdog(img,sigma=0.5,k=1.6, gamma=1,epsilon=1,phi=1):
img = dog(img,sigma=sigma,k=k,gamma=gamma)
for i in range(0,img.shape[0]):
for j in range(0,img.shape[1]):
if(img[i,j] < epsilon):
img[i,j] = 1
else:
img[i,j] = (1 + np.tanh(phi*(img[i,j])))
return img
def xdog_thresh(img, sigma=0.5,k=1.6, gamma=1,epsilon=1,phi=1,alpha=1):
img = xdog(img,sigma=sigma,k=k,gamma=gamma,epsilon=epsilon,phi=phi)
#cv2.imshow("1",np.uint8(img))
mean = np.mean(img)
max = np.max(img)
img = cv2.GaussianBlur(src=img,ksize=(0,0),sigmaX=sigma*3)
#cv2.imshow("2",np.uint8(img))
for i in range(0,img.shape[0]):
for j in range(0,img.shape[1]):
if(img[i,j] > mean):
img[i,j] = max
#cv2.imshow("3",np.uint8(img))
return img/max
if __name__ == '__main__':
# Open image in grayscale
#img = cv2.imread('imgs/lena.jpg',cv2.CV_LOAD_IMAGE_GRAYSCALE)
img = cv2.imread('./imgs/horse.png',cv2.IMREAD_GRAYSCALE)
print(img.shape)
img = cv2.resize(img,(400,400))
print(img.shape)
# k = 1.6 as proposed in the paper
k = 1.6
#cv2.imshow("Original in Grayscale", img)
#cv2.imshow("Edge DoG",edge_dog(img,sigma=0.5,k=200, gamma=0.98))
#cv2.imshow("XDoG GaryGrossi",np.uint8(xdog_garygrossi(img,sigma=0.5,k=200, gamma=0.98,epsilon=0.1,phi=10)))
#cv2.imshow("XDoG Project 1",np.uint8(xdog(img,sigma=0.4,k=1.6, gamma=0.5,epsilon=-0.5,phi=10)))
cv2.imshow("orig",img)
cv2.imshow("thres",np.uint8(255*xdog_thresh(img,sigma=0.5,k=1.6, gamma=0.98,epsilon=-0.1,phi=200)))
print(img)
print(255*xdog_thresh(img,sigma=0.5,k=1.6, gamma=0.98,epsilon=-0.1,phi=200))
#cv2.imshow("XDoG Project 2",np.uint8(xdog(img,sigma=1.6,k=1.6, gamma=0.5,epsilon=-1,phi=10)))
# Natural media (tried to follow parameters of article)
#cv2.imshow("XDoG Project 3 - Natural Media",np.uint8(xdog(img,sigma=1,k=1.6, gamma=0.5,epsilon=-0.5,phi=10)))
#cv2.imshow("XDoG Project 4 - Hatch",np.uint8(hatchBlend(img)))
cv2.waitKey(0) | 35.712121 | 114 | 0.613492 | 432 | 2,357 | 3.30787 | 0.194444 | 0.016795 | 0.020994 | 0.039188 | 0.456963 | 0.370889 | 0.319804 | 0.264521 | 0.264521 | 0.23373 | 0 | 0.087297 | 0.188375 | 2,357 | 66 | 115 | 35.712121 | 0.659697 | 0.351718 | 0 | 0.157895 | 0 | 0 | 0.024422 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.078947 | false | 0 | 0.052632 | 0 | 0.210526 | 0.157895 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
873c67c1eac26d9929c452d0c7419948dcea5913 | 803 | py | Python | src/data/75.py | NULLCT/LOMC | 79a16474a8f21310e0fb47e536d527dd5dc6d655 | [
"MIT"
] | null | null | null | src/data/75.py | NULLCT/LOMC | 79a16474a8f21310e0fb47e536d527dd5dc6d655 | [
"MIT"
] | null | null | null | src/data/75.py | NULLCT/LOMC | 79a16474a8f21310e0fb47e536d527dd5dc6d655 | [
"MIT"
] | null | null | null | def main():
N, Q = map(int, input().split())
import collections
tree = collections.defaultdict(list)
for _ in range(N - 1):
a, b = map(int, input().split())
a -= 1
b -= 1
tree[a].append(b)
tree[b].append(a)
RED = 1
BLACK = 2
colors = [None] * N
# DFS for color
stack = [(0, RED)]
while stack:
u, c = stack.pop()
colors[u] = c
next_c = BLACK if c == RED else RED
for v in tree[u]:
if not colors[v]:
stack.append((v, next_c))
for _ in range(Q):
c, d = map(int, input().split())
c -= 1
d -= 1
if colors[c] == colors[d]:
print('Town')
else:
print('Road')
if __name__ == "__main__":
main()
| 20.589744 | 43 | 0.447073 | 109 | 803 | 3.183486 | 0.394495 | 0.051873 | 0.095101 | 0.138329 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.016632 | 0.400996 | 803 | 38 | 44 | 21.131579 | 0.704782 | 0.016189 | 0 | 0 | 0 | 0 | 0.020305 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.032258 | false | 0 | 0.032258 | 0 | 0.064516 | 0.064516 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
873db11f74a1a1143dde3d98487cd6051dbe23cc | 4,483 | py | Python | simplipy/device/lock.py | gderrick/simplisafe-python | 7cdd9f92661e2e26bc1e5c6cb5689d5916807234 | [
"MIT"
] | 3 | 2017-05-21T16:49:38.000Z | 2018-07-05T16:16:45.000Z | simplipy/device/lock.py | gderrick/simplisafe-python | 7cdd9f92661e2e26bc1e5c6cb5689d5916807234 | [
"MIT"
] | 2 | 2017-07-20T11:57:23.000Z | 2018-09-24T03:03:19.000Z | simplipy/device/lock.py | gderrick/simplisafe-python | 7cdd9f92661e2e26bc1e5c6cb5689d5916807234 | [
"MIT"
] | 7 | 2017-04-15T05:52:09.000Z | 2018-08-19T01:49:54.000Z | """Define a SimpliSafe lock."""
from __future__ import annotations
from enum import Enum
from typing import TYPE_CHECKING, Any, Awaitable, Callable, cast
from simplipy.const import LOGGER
from simplipy.device import DeviceTypes, DeviceV3
if TYPE_CHECKING:
from simplipy.system import System
class LockStates(Enum):
"""States that a lock can be in."""
UNLOCKED = 0
LOCKED = 1
JAMMED = 2
UNKNOWN = 99
class Lock(DeviceV3):
"""A lock that works with V3 systems.
Note that this class shouldn't be instantiated directly; it will be
instantiated as appropriate via :meth:`simplipy.API.async_get_systems`.
:param api: A :meth:`simplipy.API` object
:type api: :meth:`simplipy.API`
:param system: A :meth:`simplipy.system.System` object (or one of its subclasses)
:type system: :meth:`simplipy.system.System`
:param device_type: The type of device represented
:type device_type: :meth:`simplipy.device.DeviceTypes`
:param serial: The serial number of the device
:type serial: ``str``
"""
class _InternalStates(Enum):
"""Define an enum to map internal lock states to values we understand."""
LOCKED = 1
UNLOCKED = 2
def __init__(
self,
request: Callable[..., Awaitable],
system: System,
device_type: DeviceTypes,
serial: str,
) -> None:
"""Initialize."""
super().__init__(system, device_type, serial)
self._request = request
@property
def disabled(self) -> bool:
"""Return whether the lock is disabled.
:rtype: ``bool``
"""
return cast(
bool, self._system.sensor_data[self._serial]["status"]["lockDisabled"]
)
@property
def lock_low_battery(self) -> bool:
"""Return whether the lock's battery is low.
:rtype: ``bool``
"""
return cast(
bool, self._system.sensor_data[self._serial]["status"]["lockLowBattery"]
)
@property
def pin_pad_low_battery(self) -> bool:
"""Return whether the pin pad's battery is low.
:rtype: ``bool``
"""
return cast(
bool, self._system.sensor_data[self._serial]["status"]["pinPadLowBattery"]
)
@property
def pin_pad_offline(self) -> bool:
"""Return whether the pin pad is offline.
:rtype: ``bool``
"""
return cast(
bool, self._system.sensor_data[self._serial]["status"]["pinPadOffline"]
)
@property
def state(self) -> LockStates:
"""Return the current state of the lock.
:rtype: :meth:`simplipy.lock.LockStates`
"""
if bool(self._system.sensor_data[self._serial]["status"]["lockJamState"]):
return LockStates.JAMMED
raw_state = self._system.sensor_data[self._serial]["status"]["lockState"]
try:
internal_state = self._InternalStates(raw_state)
except ValueError:
LOGGER.error("Unknown raw lock state: %s", raw_state)
return LockStates.UNKNOWN
if internal_state == self._InternalStates.LOCKED:
return LockStates.LOCKED
return LockStates.UNLOCKED
def as_dict(self) -> dict[str, Any]:
"""Return dictionary version of this device."""
return {
**super().as_dict(),
"disabled": self.disabled,
"lock_low_battery": self.lock_low_battery,
"pin_pad_low_battery": self.pin_pad_low_battery,
"pin_pad_offline": self.pin_pad_offline,
"state": self.state.value,
}
async def async_lock(self) -> None:
"""Lock the lock."""
await self._request(
"post",
f"doorlock/{self._system.system_id}/{self.serial}/state",
json={"state": "lock"},
)
# Update the internal state representation:
self._system.sensor_data[self._serial]["status"][
"lockState"
] = self._InternalStates.LOCKED.value
async def async_unlock(self) -> None:
"""Unlock the lock."""
await self._request(
"post",
f"doorlock/{self._system.system_id}/{self.serial}/state",
json={"state": "unlock"},
)
# Update the internal state representation:
self._system.sensor_data[self._serial]["status"][
"lockState"
] = self._InternalStates.UNLOCKED.value
| 29.11039 | 86 | 0.602275 | 503 | 4,483 | 5.196819 | 0.252485 | 0.038256 | 0.048967 | 0.061209 | 0.330145 | 0.320199 | 0.307957 | 0.268171 | 0.235654 | 0.235654 | 0 | 0.003094 | 0.279054 | 4,483 | 153 | 87 | 29.300654 | 0.805693 | 0.252063 | 0 | 0.247059 | 0 | 0 | 0.116837 | 0.033931 | 0 | 0 | 0 | 0 | 0 | 1 | 0.082353 | false | 0 | 0.070588 | 0 | 0.341176 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
873e2e534ceea6ba2ca685b8e535b0bf75b98bfc | 4,291 | py | Python | mxnet/alexnet.py | s0nghuiming/convnet-benchmarks | 5b91b6966714e8358292ae440d807c6d9d2cf7fe | [
"MIT"
] | 2,829 | 2015-01-02T19:34:27.000Z | 2022-02-22T03:42:06.000Z | mxnet/alexnet.py | Reddmist/convnet-benchmarks | b458aab61c0ac2257c0990119b5de15c1e886f02 | [
"MIT"
] | 90 | 2015-02-18T21:56:21.000Z | 2021-02-06T22:20:30.000Z | mxnet/alexnet.py | Reddmist/convnet-benchmarks | b458aab61c0ac2257c0990119b5de15c1e886f02 | [
"MIT"
] | 644 | 2015-01-02T19:31:23.000Z | 2022-01-07T23:53:45.000Z | # In[1]:
import mxnet as mx
import numpy as np
import time
# In[2]:
# Basic Info
dev = mx.gpu()
batch_size = 128
dshape = (batch_size, 3, 224, 224)
lshape = (batch_size)
num_epoch = 100
# Mock data iterator
tmp_data = np.random.uniform(-1, 1, dshape).astype("float32")
train_iter = mx.io.NDArrayIter(data=tmp_data, batch_size=batch_size, shuffle=False, last_batch_handle='pad')
# In[5]:
def get_alexnet_symbol():
## define alexnet
input_data = mx.symbol.Variable(name="data")
# stage 1
conv1 = mx.symbol.Convolution(
data=input_data, kernel=(11, 11), stride=(4, 4), num_filter=64)
relu1 = mx.symbol.Activation(data=conv1, act_type="relu")
pool1 = mx.symbol.Pooling(
data=relu1, pool_type="max", kernel=(3, 3), stride=(2,2))
# lrn1 = mx.symbol.LRN(data=pool1, alpha=0.0001, beta=0.75, knorm=1, nsize=5)
# stage 2
conv2 = mx.symbol.Convolution(
data=pool1, kernel=(5, 5), pad=(2, 2), num_filter=192)
relu2 = mx.symbol.Activation(data=conv2, act_type="relu")
pool2 = mx.symbol.Pooling(data=relu2, kernel=(3, 3), stride=(2, 2), pool_type="max")
# lrn2 = mx.symbol.LRN(data=pool2, alpha=0.0001, beta=0.75, knorm=1, nsize=5)
# stage 3
conv3 = mx.symbol.Convolution(
data=pool2, kernel=(3, 3), pad=(1, 1), num_filter=384)
relu3 = mx.symbol.Activation(data=conv3, act_type="relu")
conv4 = mx.symbol.Convolution(
data=relu3, kernel=(3, 3), pad=(1, 1), num_filter=256)
relu4 = mx.symbol.Activation(data=conv4, act_type="relu")
conv5 = mx.symbol.Convolution(
data=relu4, kernel=(3, 3), pad=(1, 1), num_filter=256)
relu5 = mx.symbol.Activation(data=conv5, act_type="relu")
pool3 = mx.symbol.Pooling(data=relu5, kernel=(3, 3), stride=(2, 2), pool_type="max")
# stage 4
flatten = mx.symbol.Flatten(data=pool3)
fc1 = mx.symbol.FullyConnected(data=flatten, num_hidden=4096)
relu6 = mx.symbol.Activation(data=fc1, act_type="relu")
# stage 5
fc2 = mx.symbol.FullyConnected(data=relu6, num_hidden=4096)
relu7 = mx.symbol.Activation(data=fc2, act_type="relu")
# stage 6
fc3 = mx.symbol.FullyConnected(data=relu7, num_hidden=1000)
return fc3
# In[6]:
# bind to get executor
# This is what happened behind mx.model.Feedforward
fc3 = get_alexnet_symbol()
alex_exec = fc3.simple_bind(ctx=dev, grad_req="write", data=dshape)
print("Temp space: ", alex_exec.debug_str().split('\n')[-3])
# Find where to set data
# In[7]:
# some useful structure
# data structues
arg_names = fc3.list_arguments()
arg_map = dict(zip(arg_names, alex_exec.arg_arrays))
grad_map = dict(zip(arg_names, alex_exec.grad_arrays))
param_blocks = [(i, arg_map[arg_names[i]], grad_map[arg_names[i]]) for i in range(len(arg_names)) if grad_map[arg_names[i]] != None]
input_ndarray = arg_map["data"]
grad = mx.nd.zeros((batch_size, 1000), ctx=mx.gpu())
param_len = len(param_blocks)
# In[8]:
#init
for i in range(param_len):
param_blocks[i][1][:] = mx.rnd.uniform(-0.01, 0.01, param_blocks[i][1].shape)
param_blocks[i][2][:] = 0.
# Set data
train_iter.reset()
dbatch = train_iter.next()
dbatch.data[0].copyto(input_ndarray)
# block all async all
mx.nd.waitall()
# In[12]:
# Test forward
def test_forward(model, epoch):
tic = time.time()
for i in range(epoch):
model.forward(is_train=True)
# Note: This command will force thread engine block, which hurts performance a lot
# Remove it will bring parallelism bias
# model.outputs[0].wait_to_read()
model.outputs[0].wait_to_read()
toc = time.time()
return (toc - tic) / epoch
print("Avg forward per batch: ", test_forward(alex_exec, num_epoch))
# In[13]:
# Test full path
def test_full(model, epoch):
tic = time.time()
for i in range(epoch):
model.forward(is_train=True)
model.backward([grad])
#model.outputs[0].wait_to_read()
# mx.nd.waitall()
# mock update
for i in range(param_len):
param_blocks[i][1][:] -= 0.0 * param_blocks[i][2][:]
# Note: This command will force thread engine block, which hurts performance a lot
mx.nd.waitall()
toc = time.time()
return (toc - tic) / epoch
print("Avg fullpath per batch: ", test_full(alex_exec, num_epoch))
# In[ ]:
| 30.006993 | 132 | 0.66255 | 678 | 4,291 | 4.061947 | 0.294985 | 0.063907 | 0.045752 | 0.055919 | 0.285766 | 0.261075 | 0.230211 | 0.211329 | 0.203341 | 0.139434 | 0 | 0.050968 | 0.181543 | 4,291 | 142 | 133 | 30.21831 | 0.7332 | 0.186204 | 0 | 0.189189 | 0 | 0 | 0.035052 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.040541 | false | 0 | 0.040541 | 0 | 0.121622 | 0.040541 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
873fd671c46e323d79a2f0109ba1aa6756c796ee | 23,213 | py | Python | nfv/nfv-vim/nfv_vim/api/controllers/v1/virtualised_resources/_computes_api.py | SidneyAn/nfv | 5f0262a5b6ea4be59f977b9c587c483cbe0e373d | [
"Apache-2.0"
] | 2 | 2020-02-07T19:01:36.000Z | 2022-02-23T01:41:46.000Z | nfv/nfv-vim/nfv_vim/api/controllers/v1/virtualised_resources/_computes_api.py | SidneyAn/nfv | 5f0262a5b6ea4be59f977b9c587c483cbe0e373d | [
"Apache-2.0"
] | 1 | 2021-01-14T12:02:25.000Z | 2021-01-14T12:02:25.000Z | nfv/nfv-vim/nfv_vim/api/controllers/v1/virtualised_resources/_computes_api.py | SidneyAn/nfv | 5f0262a5b6ea4be59f977b9c587c483cbe0e373d | [
"Apache-2.0"
] | 2 | 2021-01-13T08:39:21.000Z | 2022-02-09T00:21:55.000Z | # Copyright (c) 2015-2016 Wind River Systems, Inc.
#
# SPDX-License-Identifier: Apache-2.0
#
import json
import pecan
import six
from six.moves import http_client as httplib
from wsme import types as wsme_types
import wsmeext.pecan as wsme_pecan
from nfv_common import debug
from nfv_common import validate
from nfv_vim import rpc
DLOG = debug.debug_get_logger('nfv_vim.api.virtualised_compute')
ComputeOperationType = wsme_types.Enum(str, 'start', 'stop', 'pause',
'unpause', 'suspend', 'resume',
'reboot')
class ComputeOperateRequestData(wsme_types.Base):
"""
Virtualised Resources - Compute Operate Request Data
"""
compute_operation = wsme_types.wsattr(ComputeOperationType, mandatory=True)
compute_operation_data = wsme_types.wsattr(six.text_type, mandatory=False,
default=None)
class ComputeOperateAPI(pecan.rest.RestController):
"""
Virtualised Resources - Computes Operate API
"""
@staticmethod
def _do_operation(rpc_request):
"""
Return an image details
"""
vim_connection = pecan.request.vim.open_connection()
vim_connection.send(rpc_request.serialize())
msg = vim_connection.receive()
if msg is None:
DLOG.error("No response received for %s." % rpc_request)
return httplib.INTERNAL_SERVER_ERROR
response = rpc.RPCMessage.deserialize(msg)
if rpc.RPC_MSG_RESULT.NOT_FOUND == response.result:
DLOG.debug("Resource was not found for %s." % rpc_request)
return httplib.NOT_FOUND
elif rpc.RPC_MSG_RESULT.SUCCESS == response.result:
return httplib.ACCEPTED
DLOG.error("Unexpected result received for %s, result=%s."
% (rpc_request, response.result))
return httplib.INTERNAL_SERVER_ERROR
@wsme_pecan.wsexpose(None, six.text_type, body=ComputeOperateRequestData,
status_code=httplib.ACCEPTED)
def post(self, compute_id, request_data):
"""
Perform an operation against a virtual compute resource
"""
DLOG.verbose("Compute-API operate called for compute %s, "
"operation=%s." % (compute_id,
request_data.compute_operation))
if not validate.valid_uuid_str(compute_id):
DLOG.error("Invalid uuid received, uuid=%s." % compute_id)
return pecan.abort(httplib.BAD_REQUEST)
http_response = httplib.BAD_REQUEST
if 'start' == request_data.compute_operation:
rpc_request = rpc.APIRequestStartInstance()
rpc_request.uuid = compute_id
http_response = self._do_operation(rpc_request)
elif 'stop' == request_data.compute_operation:
rpc_request = rpc.APIRequestStopInstance()
rpc_request.uuid = compute_id
http_response = self._do_operation(rpc_request)
elif 'pause' == request_data.compute_operation:
rpc_request = rpc.APIRequestPauseInstance()
rpc_request.uuid = compute_id
http_response = self._do_operation(rpc_request)
elif 'unpause' == request_data.compute_operation:
rpc_request = rpc.APIRequestUnpauseInstance()
rpc_request.uuid = compute_id
http_response = self._do_operation(rpc_request)
elif 'suspend' == request_data.compute_operation:
rpc_request = rpc.APIRequestSuspendInstance()
rpc_request.uuid = compute_id
http_response = self._do_operation(rpc_request)
elif 'resume' == request_data.compute_operation:
rpc_request = rpc.APIRequestResumeInstance()
rpc_request.uuid = compute_id
http_response = self._do_operation(rpc_request)
elif 'reboot' == request_data.compute_operation:
rpc_request = rpc.APIRequestRebootInstance()
rpc_request.uuid = compute_id
http_response = self._do_operation(rpc_request)
if httplib.ACCEPTED != http_response:
DLOG.error("Compute operation %s failed for %s, http_response=%s."
% (request_data.compute_operation, compute_id,
http_response))
return pecan.abort(http_response)
ComputeMigrateType = wsme_types.Enum(str, 'live', 'cold', 'evacuate')
class ComputeMigrateRequestData(wsme_types.Base):
"""
Virtualised Resources - Compute Migrate Request Data
"""
migrate_type = wsme_types.wsattr(ComputeMigrateType, mandatory=True)
class ComputeMigrateAPI(pecan.rest.RestController):
"""
Virtualised Resources - Computes Migrate API
"""
@staticmethod
def _do_migrate(rpc_request):
"""
Return an image details
"""
vim_connection = pecan.request.vim.open_connection()
vim_connection.send(rpc_request.serialize())
msg = vim_connection.receive()
if msg is None:
DLOG.error("No response received for %s." % rpc_request)
return httplib.INTERNAL_SERVER_ERROR
response = rpc.RPCMessage.deserialize(msg)
if rpc.RPC_MSG_RESULT.NOT_FOUND == response.result:
DLOG.debug("Resource was not found for %s." % rpc_request)
return httplib.NOT_FOUND
elif rpc.RPC_MSG_RESULT.SUCCESS == response.result:
return httplib.ACCEPTED
DLOG.error("Unexpected result received for %s, result=%s."
% (rpc_request, response.result))
return httplib.INTERNAL_SERVER_ERROR
@wsme_pecan.wsexpose(None, six.text_type, body=ComputeMigrateRequestData,
status_code=httplib.ACCEPTED)
def post(self, compute_id, request_data):
"""
Perform a migrate against a virtual compute resource
"""
DLOG.verbose("Compute-API migrate called for compute %s, "
"migrate_type=%s." % (compute_id,
request_data.migrate_type))
if not validate.valid_uuid_str(compute_id):
DLOG.error("Invalid uuid received, uuid=%s." % compute_id)
return pecan.abort(httplib.BAD_REQUEST)
http_response = httplib.BAD_REQUEST
if 'live' == request_data.migrate_type:
rpc_request = rpc.APIRequestLiveMigrateInstance()
rpc_request.uuid = compute_id
http_response = self._do_migrate(rpc_request)
elif 'cold' == request_data.migrate_type:
rpc_request = rpc.APIRequestColdMigrateInstance()
rpc_request.uuid = compute_id
http_response = self._do_migrate(rpc_request)
elif 'evacuate' == request_data.migrate_type:
rpc_request = rpc.APIRequestEvacuateInstance()
rpc_request.uuid = compute_id
http_response = self._do_migrate(rpc_request)
if httplib.ACCEPTED != http_response:
DLOG.error("Compute migrate %s failed for %s, http_response=%s."
% (request_data.migrate_type, compute_id,
http_response))
return pecan.abort(http_response)
CpuPinningPolicy = wsme_types.Enum(str, 'any', 'static', 'dynamic')
StorageType = wsme_types.Enum(str, 'volume')
class ComputeCreateVirtualCpuPinningType(wsme_types.Base):
"""
Virtualised Resources - Compute Create Virtual CPU Pinning Type
"""
cpu_pinning_policy = wsme_types.wsattr(CpuPinningPolicy, mandatory=False)
cpu_pinning_map = wsme_types.wsattr(six.text_type, mandatory=False)
class ComputeCreateVirtualCpuType(wsme_types.Base):
"""
Virtualised Resources - Compute Create Virtual CPU Type
"""
cpu_architecture = wsme_types.wsattr(six.text_type, mandatory=False)
num_virtual_cpu = wsme_types.wsattr(int, mandatory=True)
virtual_cpu_clock = wsme_types.wsattr(int, mandatory=False)
virtual_cpu_oversubscription_policy = wsme_types.wsattr(six.text_type,
mandatory=False)
virtual_cpu_pinning = wsme_types.wsattr(ComputeCreateVirtualCpuPinningType,
mandatory=False)
class ComputeCreateVirtualMemoryType(wsme_types.Base):
"""
Virtualised Resources - Compute Create Virtual Memory Type
"""
virtual_mem_size = wsme_types.wsattr(int, mandatory=True)
virtual_mem_oversubscription_policy = wsme_types.wsattr(six.text_type,
mandatory=False)
numa_enabled = wsme_types.wsattr(bool, mandatory=False)
class ComputeCreateVirtualStorageType(wsme_types.Base):
"""
Virtualised Resources - Compute Create Virtual Storage Type
"""
type_of_storage = wsme_types.wsattr(StorageType, mandatory=True)
size_of_storage = wsme_types.wsattr(int, mandatory=True)
class ComputeCreateFlavourType(wsme_types.Base):
"""
Virtualised Resources - Compute Create Flavour Type
"""
flavour_id = wsme_types.wsattr(six.text_type, mandatory=True)
virtual_cpu = wsme_types.wsattr(ComputeCreateVirtualCpuType,
mandatory=True)
virtual_memory = wsme_types.wsattr(ComputeCreateVirtualMemoryType,
mandatory=True)
virtual_storage = wsme_types.wsattr(ComputeCreateVirtualStorageType,
mandatory=True)
class ComputeCreateData(wsme_types.Base):
"""
Virtualised Resources - Compute Create Data
"""
compute_id = wsme_types.wsattr(six.text_type, mandatory=True)
reservation_id = wsme_types.wsattr(six.text_type, mandatory=False)
compute_data = wsme_types.wsattr(ComputeCreateFlavourType, mandatory=True)
image_id = wsme_types.wsattr(six.text_type, mandatory=True)
meta_data = wsme_types.wsattr(six.text_type, mandatory=False, default=None)
class ComputeQueryVirtualCpuPinningType(wsme_types.Base):
"""
Virtualised Resources - Compute Query Virtual CPU Pinning Type
"""
cpu_pinning_policy = CpuPinningPolicy
cpu_pinning_map = [six.text_type]
class ComputeQueryVirtualCpuType(wsme_types.Base):
"""
Virtualised Resources - Compute Query Virtual CPU Type
"""
cpu_architecture = six.text_type
num_virtual_cpu = int
virtual_cpu_clock = int
virtual_cpu_oversubscription_policy = six.text_type
virtual_cpu_pinning = ComputeQueryVirtualCpuPinningType
class ComputeQueryVirtualMemoryType(wsme_types.Base):
"""
Virtualised Resources - Compute Query Virtual Memory Type
"""
virtual_mem_size = int
virtual_mem_oversubscription_policy = six.text_type
numa_enabled = bool
class ComputeQueryVirtualStorageType(wsme_types.Base):
"""
Virtualised Resources - Compute Query Virtual Storage Type
"""
type_of_storage = StorageType
size_of_storage = int
class ComputeQueryStorageResourceType(wsme_types.Base):
"""
Virtualised Resources - Compute Query Storage Resource Type
"""
resource_id = six.text_type
storage_attributes = ComputeQueryVirtualStorageType
owner_id = six.text_type
host_id = six.text_type
status = six.text_type
meta_data = six.text_type
class ComputeQueryAttributesResourceType(wsme_types.Base):
"""
Virtualised Resources - Compute Query Attributes Resource Type
"""
flavour_id = six.text_type
acceleration_capabilities = six.text_type
virtual_memory = ComputeQueryVirtualMemoryType
virtual_cpu = ComputeQueryVirtualCpuType
flavour_original_name = six.text_type
class ComputeQueryResourceType(wsme_types.Base):
"""
Virtualised Resources - Compute Query Resource Type
"""
compute_id = six.text_type
compute_attributes = ComputeQueryAttributesResourceType
vc_image_id = six.text_type
virtual_disks = [ComputeQueryStorageResourceType]
host_id = six.text_type
status = six.text_type
meta_data = six.text_type
class ComputeQueryData(wsme_types.Base):
"""
Virtualised Resources - Compute Query Data
"""
query_result = ComputeQueryResourceType
class ComputesAPI(pecan.rest.RestController):
"""
Virtualised Resources - Computes API
"""
operate = ComputeOperateAPI()
migrate = ComputeMigrateAPI()
@staticmethod
def _get_compute_details(compute_id, compute):
"""
Return compute details
"""
vim_connection = pecan.request.vim.open_connection()
rpc_request = rpc.APIRequestGetInstance()
rpc_request.filter_by_uuid = compute_id
vim_connection.send(rpc_request.serialize())
msg = vim_connection.receive()
if msg is None:
DLOG.error("No response received for compute %s." % compute_id)
return httplib.INTERNAL_SERVER_ERROR
response = rpc.RPCMessage.deserialize(msg)
if rpc.RPC_MSG_TYPE.GET_INSTANCE_RESPONSE != response.type:
DLOG.error("Unexpected message type received, msg_type=%s."
% response.type)
return httplib.INTERNAL_SERVER_ERROR
if rpc.RPC_MSG_RESULT.NOT_FOUND == response.result:
DLOG.debug("Compute %s was not found." % compute_id)
return httplib.NOT_FOUND
elif rpc.RPC_MSG_RESULT.SUCCESS == response.result:
virtual_memory = ComputeQueryVirtualMemoryType()
virtual_memory.virtual_mem_size = response.memory_mb
virtual_cpu = ComputeQueryVirtualCpuType()
virtual_cpu.num_virtual_cpu = response.vcpus
compute_attributes = ComputeQueryAttributesResourceType()
compute_attributes.flavour_id = ''
compute_attributes.virtual_memory = virtual_memory
compute_attributes.virtual_cpu = virtual_cpu
compute_attributes.flavour_original_name = \
response.instance_type_original_name
query_result = ComputeQueryResourceType()
query_result.compute_id = response.uuid
query_result.compute_attributes = compute_attributes
query_result.host_id = response.host_uuid
query_result.vc_image_id = response.image_uuid
meta_data = dict()
meta_data['sw:wrs:auto_recovery'] = response.auto_recovery
meta_data['hw:wrs:live_migration_timeout'] \
= response.live_migration_timeout
meta_data['hw:wrs:live_migration_max_downtime'] \
= response.live_migration_max_downtime
query_result.meta_data = json.dumps(meta_data)
compute.query_result = query_result
return httplib.OK
DLOG.error("Unexpected result received for compute %s, result=%s."
% (compute_id, response.result))
return httplib.INTERNAL_SERVER_ERROR
@wsme_pecan.wsexpose(ComputeQueryData, six.text_type, status_code=httplib.OK)
def get_one(self, compute_id):
if not validate.valid_uuid_str(compute_id):
DLOG.error("Invalid uuid received, uuid=%s." % compute_id)
return pecan.abort(httplib.BAD_REQUEST)
compute = ComputeQueryData()
http_response = self._get_compute_details(compute_id, compute)
if httplib.OK == http_response:
return compute
else:
return pecan.abort(http_response)
@wsme_pecan.wsexpose([ComputeQueryData], status_code=httplib.OK)
def get_all(self):
DLOG.verbose("Compute-API get-all called.")
vim_connection = pecan.request.vim.open_connection()
rpc_request = rpc.APIRequestGetInstance()
rpc_request.get_all = True
vim_connection.send(rpc_request.serialize())
computes = list()
while True:
msg = vim_connection.receive()
if msg is None:
DLOG.verbose("Done receiving.")
break
response = rpc.RPCMessage.deserialize(msg)
if rpc .RPC_MSG_TYPE.GET_INSTANCE_RESPONSE != response.type:
DLOG.error("Unexpected message type received, msg_type=%s."
% response.type)
return pecan.abort(httplib.INTERNAL_SERVER_ERROR)
if rpc.RPC_MSG_RESULT.SUCCESS != response.result:
DLOG.error("Unexpected result received, result=%s."
% response.result)
return pecan.abort(httplib.INTERNAL_SERVER_ERROR)
DLOG.verbose("Received response=%s." % response)
virtual_memory = ComputeQueryVirtualMemoryType()
virtual_memory.virtual_mem_size = response.memory_mb
virtual_cpu = ComputeQueryVirtualCpuType()
virtual_cpu.num_virtual_cpu = response.vcpus
compute_attributes = ComputeQueryAttributesResourceType()
compute_attributes.flavour_id = ''
compute_attributes.virtual_memory = virtual_memory
compute_attributes.virtual_cpu = virtual_cpu
compute_attributes.flavour_original_name = \
response.instance_type_original_name
query_result = ComputeQueryResourceType()
query_result.compute_id = response.uuid
query_result.compute_attributes = compute_attributes
query_result.host_id = response.host_uuid
query_result.vc_image_id = response.image_uuid
meta_data = dict()
meta_data['sw:wrs:auto_recovery'] = response.auto_recovery
meta_data['hw:wrs:live_migration_timeout'] \
= response.live_migration_timeout
meta_data['hw:wrs:live_migration_max_downtime'] \
= response.live_migration_max_downtime
query_result.meta_data = json.dumps(meta_data)
compute = ComputeQueryData()
compute.query_result = query_result
computes.append(compute)
return computes
@wsme_pecan.wsexpose(ComputeQueryData, body=ComputeCreateData,
status_code=httplib.CREATED)
def post(self, compute_create_data):
DLOG.verbose("Compute-API create called for compute %s."
% compute_create_data.compute_id)
compute_data = compute_create_data.compute_data
cpu_info = compute_data.virtual_cpu
memory_info = compute_data.virtual_memory
storage_info = compute_data.virtual_storage
if compute_create_data.meta_data is None:
meta_data = dict()
else:
meta_data = json.loads(compute_create_data.meta_data)
vim_connection = pecan.request.vim.open_connection()
rpc_request = rpc.APIRequestCreateInstance()
rpc_request.name = compute_create_data.compute_id
rpc_request.instance_type_uuid = compute_data.flavour_id
rpc_request.image_uuid = compute_create_data.image_id
rpc_request.vcpus = cpu_info.num_virtual_cpu
rpc_request.memory_mb = memory_info.virtual_mem_size
rpc_request.disk_gb = storage_info.size_of_storage
rpc_request.ephemeral_gb = 0
rpc_request.swap_gb = 0
rpc_request.network_uuid = meta_data.get("network_uuid", None)
rpc_request.auto_recovery = meta_data.get("sw:wrs:auto_recovery", None)
rpc_request.live_migration_timeout \
= meta_data.get("hw:wrs:live_migration_timeout", None)
rpc_request.live_migration_max_downtime \
= meta_data.get("hw:wrs:live_migration_max_downtime", None)
vim_connection.send(rpc_request.serialize())
msg = vim_connection.receive()
if msg is None:
DLOG.error("No response received for compute %s."
% compute_create_data.compute_id)
return pecan.abort(httplib.INTERNAL_SERVER_ERROR)
response = rpc.RPCMessage.deserialize(msg)
if rpc.RPC_MSG_TYPE.CREATE_INSTANCE_RESPONSE != response.type:
DLOG.error("Unexpected message type received, msg_type=%s."
% response.type)
return pecan.abort(httplib.INTERNAL_SERVER_ERROR)
if rpc.RPC_MSG_RESULT.SUCCESS == response.result:
virtual_memory = ComputeQueryVirtualMemoryType()
virtual_memory.virtual_mem_size = response.memory_mb
virtual_cpu = ComputeQueryVirtualCpuType()
virtual_cpu.num_virtual_cpu = response.vcpus
compute_attributes = ComputeQueryAttributesResourceType()
compute_attributes.flavour_id = ''
compute_attributes.virtual_memory = virtual_memory
compute_attributes.virtual_cpu = virtual_cpu
compute_attributes.flavour_original_name = \
response.instance_type_original_name
query_result = ComputeQueryResourceType()
query_result.compute_id = response.uuid
query_result.compute_attributes = compute_attributes
query_result.host_id = response.host_uuid
query_result.vc_image_id = response.image_uuid
meta_data = dict()
meta_data['sw:wrs:auto_recovery'] = response.auto_recovery
meta_data['hw:wrs:live_migration_timeout'] \
= response.live_migration_timeout
meta_data['hw:wrs:live_migration_max_downtime'] \
= response.live_migration_max_downtime
query_result.meta_data = json.dumps(meta_data)
compute = ComputeQueryData()
compute.query_result = query_result
return compute
DLOG.error("Unexpected result received for compute %s, result=%s."
% (compute_create_data.compute_id, response.result))
return pecan.abort(httplib.INTERNAL_SERVER_ERROR)
@wsme_pecan.wsexpose(None, six.text_type, status_code=httplib.NO_CONTENT)
def delete(self, compute_id):
DLOG.verbose("Compute-API delete called for compute %s." % compute_id)
vim_connection = pecan.request.vim.open_connection()
rpc_request = rpc.APIRequestDeleteInstance()
rpc_request.uuid = compute_id
vim_connection.send(rpc_request.serialize())
msg = vim_connection.receive()
if msg is None:
DLOG.error("No response received for instance %s." % compute_id)
return pecan.abort(httplib.INTERNAL_SERVER_ERROR)
response = rpc.RPCMessage.deserialize(msg)
if rpc.RPC_MSG_TYPE.DELETE_INSTANCE_RESPONSE != response.type:
DLOG.error("Unexpected message type received, msg_type=%s."
% response.type)
return pecan.abort(httplib.INTERNAL_SERVER_ERROR)
if rpc.RPC_MSG_RESULT.NOT_FOUND == response.result:
DLOG.debug("Instance %s was not found." % compute_id)
return pecan.abort(httplib.NOT_FOUND)
elif rpc.RPC_MSG_RESULT.SUCCESS == response.result:
return None
DLOG.error("Unexpected result received for instance %s, result=%s."
% (compute_id, response.result))
return pecan.abort(httplib.INTERNAL_SERVER_ERROR)
| 39.477891 | 81 | 0.664283 | 2,503 | 23,213 | 5.870555 | 0.092289 | 0.042875 | 0.023207 | 0.026133 | 0.708316 | 0.674833 | 0.63121 | 0.570369 | 0.543419 | 0.495168 | 0 | 0.000696 | 0.257657 | 23,213 | 587 | 82 | 39.545145 | 0.85202 | 0.0554 | 0 | 0.544776 | 0 | 0 | 0.079636 | 0.013149 | 0 | 0 | 0 | 0 | 0 | 1 | 0.022388 | false | 0 | 0.022388 | 0 | 0.313433 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
87408a167f26c51ac79b4357fde6e86578607e21 | 5,219 | py | Python | ncm2-plugin/ncm2_neosnippet.py | oabt/ncm2-neosnippet | b7e516d6f50eda53147569e18068f1622d2720c9 | [
"MIT"
] | 6 | 2019-01-18T18:16:05.000Z | 2021-10-30T12:39:37.000Z | ncm2-plugin/ncm2_neosnippet.py | oabt/ncm2-neosnippet | b7e516d6f50eda53147569e18068f1622d2720c9 | [
"MIT"
] | 2 | 2019-01-24T18:22:01.000Z | 2019-03-01T16:33:32.000Z | ncm2-plugin/ncm2_neosnippet.py | oabt/ncm2-neosnippet | b7e516d6f50eda53147569e18068f1622d2720c9 | [
"MIT"
] | 2 | 2021-09-16T03:43:23.000Z | 2021-09-21T21:26:55.000Z | # -*- coding: utf-8 -*-
import sys
if __name__ == '__main__':
sys.path.append('./pythonx')
def wrap():
from ncm2_core import ncm2_core
from ncm2 import getLogger
import vim
import ncm2_lsp_snippet.utils as lsp_utils
from ncm2_lsp_snippet.parser import Parser
import re
import json
# # escape $ inside place holder $1
# # $ -> \\$
# # } -> \}
# # escape $ outside placeholder
# # $ -> \$
# # } -> }
# snippet test
# options head
# \${1:kjkj} ${1:escape \\${ \} value} foobar{}${0}
#
# # place holder ${3:foo3} in nested placeholder $2
# # - for $2 ${3:foo3\}
# # - for $1 ${3:foo3\\\} or \${3:foo3\\\}
# snippet test2
# options head
# hi ${1:escape ${2:foo2 ${3:foo3\\\} \} foobar} ha
#
# # leteral "${3:foo3}" in nested placeholder $2
# # - for $2 \\${ ... \}
# # - for $1 \\\\\${ ... \\\}
# snippet test3
# options head
# hi \`mode()\` ${1:escape \${2:foo2 \\\\\${3:\`mode()\`foo3\\\} \} foobar} ha
def flatten_ast(ast, level=0):
txt = ''
for t, ele in ast:
if t == 'text':
yield (t, level, ele)
elif t == 'tabstop':
# txt += '${%s}' % ele
yield ('${', level, '${%s' % ele)
yield ('}', level, '}')
elif t == 'placeholder':
tab, subast = ele
yield ('${', level, '${%s:' % tab)
yield from flatten_ast(subast, level + 1)
yield ('}', level, '}')
elif t == 'choice':
# neosnippet doesn't support choices, replace it with placeholder
tab, opts = ele
yield ('${', level, '${%s:' % tab)
yield ('text', level + 1, opts[0])
yield ('}', level, '}')
def to_neosnippet(ast):
eles = []
for t, level, s in flatten_ast(ast):
if t == '${':
eles.append(s)
elif t == '}':
eles.append('\\' * (2 ** level - 1) + r'}')
elif t == 'text':
s = s.replace('\\', '\\' * (2 ** level))
if level == 0:
s = s.replace('$', r'\$')
# s = s.replace('}', r'}')
else:
if level == 1:
s = s.replace('$', r'\\$')
s = s.replace('}', r'\}')
else:
s = s.replace('$', '\\' * (2 ** (level-1) * 3 - 1) + '$')
s = s.replace('}', '\\' * (2 ** level - 1) + r'\}')
s = s.replace('`', r'\`')
eles.append(s)
return ''.join(eles)
logger = getLogger(__name__)
vim.command('call ncm2_neosnippet#init()')
old_formalize = ncm2_core.match_formalize
old_decorate = ncm2_core.matches_decorate
parser = Parser()
# convert lsp snippet into neosnippet snippet
def formalize(ctx, item):
item = old_formalize(ctx, item)
item = lsp_utils.match_formalize(ctx, item)
ud = item['user_data']
if not ud['is_snippet']:
return item
if ud['snippet'] == '':
return item
try:
ast = parser.get_ast(ud['snippet'])
neosnippet = to_neosnippet(ast)
if neosnippet:
if len(ast) == 1 and ast[0][0] == 'text':
neosnippet += '${0}'
ud['neosnippet_snippet'] = neosnippet
ud['is_snippet'] = 1
else:
ud['is_snippet'] = 0
except:
ud['is_snippet'] = 0
logger.exception("ncm2_lsp_snippet failed parsing item %s", item)
return item
# add [+] mark for snippets
def decorate(data, matches):
matches = old_decorate(data, matches)
has_snippet = False
for m in matches:
ud = m['user_data']
if not ud.get('is_snippet', False):
continue
has_snippet = True
if not has_snippet:
return matches
for m in matches:
ud = m['user_data']
if ud.get('is_snippet', False):
# [+] sign indicates that this completion item is
# expandable
if ud.get('ncm2_neosnippet_auto', False):
m['menu'] = '(+) ' + m['menu']
else:
m['menu'] = '[+] ' + m['menu']
else:
m['menu'] = '[ ] ' + m['menu']
return matches
ncm2_core.matches_decorate = decorate
ncm2_core.match_formalize = formalize
wrap()
# parser = Parser()
#
# snippets = ["""
# hello ${1:world}.
# ""","""
# hello ${1:world \${\}}.
# ""","""
# hello ${1:world ${2:\${foobar\}}}}.
# ""","""
# hello ${1:world ${2:\${`mode()`foobar\}}}}.
# """,
# ]
#
# # results:
# # hello ${1:world}.
# # hello ${1:world \\${\}}.
# # hello ${1:world ${2:\\\\\${foobar\\\\}\}}}.
# # hello ${1:world ${2:\\\\\${\`mode()\`foobar\\\\}\}}}.
#
# for snippet in snippets:
# ast = parser.get_ast(snippet)
# # print(snippet)
# print(to_neosnippet(ast))
| 29.822857 | 84 | 0.439356 | 547 | 5,219 | 4.084095 | 0.232176 | 0.007162 | 0.032229 | 0.022381 | 0.225604 | 0.177261 | 0.143241 | 0.143241 | 0.117278 | 0.055506 | 0 | 0.024293 | 0.376892 | 5,219 | 174 | 85 | 29.994253 | 0.662669 | 0.247365 | 0 | 0.23 | 0 | 0 | 0.093385 | 0.005707 | 0 | 0 | 0 | 0 | 0 | 1 | 0.05 | false | 0 | 0.08 | 0 | 0.19 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
8742cf27f615b8219c28a68120212e7fc935f9c3 | 4,385 | py | Python | get_students.py | pcbouman-eur/CanvasToTeams | 06fb78a794fd1353336ca05881f8018310388ffe | [
"MIT"
] | null | null | null | get_students.py | pcbouman-eur/CanvasToTeams | 06fb78a794fd1353336ca05881f8018310388ffe | [
"MIT"
] | null | null | null | get_students.py | pcbouman-eur/CanvasToTeams | 06fb78a794fd1353336ca05881f8018310388ffe | [
"MIT"
] | 1 | 2020-10-12T08:32:40.000Z | 2020-10-12T08:32:40.000Z | from canvasapi import Canvas
import json
import os
# Do `pip install canvasapi` first before running this script!
DEFAULT_CANVAS_URL = 'https://eur.instructure.com'
DEFAULT_REMOVE_PATTERN = 'groep'
DEFAULT_ADD_PREFIX = 'Tutorial '
DEFAULT_IGNORE = 'Default section'
print('Please enter the Canvas base URL you want to use (default: '+DEFAULT_CANVAS_URL+')')
CANVAS_URL = input()
if len(CANVAS_URL.strip()) == 0:
CANVAS_URL = DEFAULT_CANVAS_URL
print('Please enter your API token for Canvas')
print('You can create a Canvas acces token token for you account at '+CANVAS_URL+'/profile/settings')
API_KEY = input()
print('Please enter the Canvas course number. This is a number that appears in the URL when you go to your course')
COURSE_NUM = input()
canvas = Canvas(CANVAS_URL, API_KEY)
course = canvas.get_course(COURSE_NUM)
fname = 'course-'+str(course.id)+'-registrations.json'
print('Is there a text pattern you want to remove from the section information? (default: \''+DEFAULT_REMOVE_PATTERN+'\')')
REMOVE_PATTERN = input()
if len(REMOVE_PATTERN.strip()) == 0:
REMOVE_PATTERN = DEFAULT_REMOVE_PATTERN
print('Is there a prefix you want to add to the section information? (default \''+DEFAULT_ADD_PREFIX+'\')')
ADD_PREFIX = input()
if len(ADD_PREFIX.strip()) == 0:
ADD_PREFIX = DEFAULT_ADD_PREFIX
print('Are there channels that you do not want to map to channels? Separate them by commas (default: \''+DEFAULT_IGNORE+'\')')
SKIP_LIST = input()
if len(SKIP_LIST.strip()) == 0:
SKIP_LIST = DEFAULT_IGNORE
SKIP_SET = {s.strip() for s in SKIP_LIST.split(',')}
print('Give a name for a channel to which all students should be added (leave empty to skip this step)')
CHANNEL_ALL = input()
data = {}
all_students = set()
print('Succesfully obtained the course. Please be patient while student enrollments are collected...')
for section in course.get_sections():
enrollments = [enrollment for enrollment in section.get_enrollments()]
secname = section.name.replace(REMOVE_PATTERN, '').strip()
secname = ADD_PREFIX + secname
students = list(sorted({e.sis_user_id for e in enrollments if e.role == 'StudentEnrollment' and not e.sis_user_id is None}))
if not section.name in SKIP_SET:
data[secname] = students
all_students.update(students)
none_students = [e for e in enrollments if e.sis_user_id is None]
if len(none_students) > 0:
print('The following students were not added to '+secname+' because the sis_user_id is missing')
for ns in none_students:
try:
print(ns.user.name+' (id: '+ns.user.id+')')
except:
print(vars(ns))
if len(CHANNEL_ALL.strip()) != 0:
data[CHANNEL_ALL] = list(sorted(all_students))
processed = False
if os.path.isfile(fname):
# TODO: how to deal with multiple mutations??
print('Old .json file found. Comparing the old file and the new data')
with open(fname) as infile:
old_data = json.load(infile)
mutation = 1
while True:
mut_file = 'course-'+str(course.id)+'-mutation-'+str(mutation)+'.json'
if not os.path.isfile(mut_file):
break
mutation += 1
with open(mut_file) as infile:
mut_data = json.load(infile)
for channel, std_list in mut_data.items():
if not channel in old_data:
old_data[channel] = std_list
else:
old_data[channel] = list({std for std in old_data[channel]}.union(set(std_list)))
removed = 0
for channel, std_list in old_data.items():
if channel in data:
for std in std_list:
if std in data[channel]:
data[channel].remove(std)
removed += 1
print('Removed '+str(removed)+' entries that were already added earlier')
mutation = 1
while True:
fname = 'course-'+str(course.id)+'-mutation-'+str(mutation)+'.json'
if os.path.isfile(fname):
mutation += 1
else:
break
count = sum([len(std_list) for std_list in data.values()])
if count > 0:
with open(fname, 'w') as out:
json.dump(data, out)
print('Course registrations for '+str(count)+' students written to file: '+fname)
else:
print('No new course registrations found. Not writing anything')
| 37.478632 | 128 | 0.661574 | 625 | 4,385 | 4.5104 | 0.2688 | 0.025541 | 0.014189 | 0.018092 | 0.1348 | 0.056403 | 0.042568 | 0.029798 | 0.029798 | 0 | 0 | 0.003836 | 0.227138 | 4,385 | 116 | 129 | 37.801724 | 0.827973 | 0.023717 | 0 | 0.136842 | 0 | 0.031579 | 0.28705 | 0.00561 | 0 | 0 | 0 | 0.008621 | 0 | 1 | 0 | false | 0 | 0.031579 | 0 | 0.031579 | 0.168421 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
8745cd5aecce86323314f1a025b4ce6f3f4d62bc | 2,771 | py | Python | schedule_lib/schedexport.py | allankellynet/mimas | 10025d43bba9e84f502a266760786842e7158a05 | [
"MIT"
] | null | null | null | schedule_lib/schedexport.py | allankellynet/mimas | 10025d43bba9e84f502a266760786842e7158a05 | [
"MIT"
] | 1 | 2020-02-05T13:00:29.000Z | 2020-02-05T13:00:29.000Z | schedule_lib/schedexport.py | allankellynet/mimas | 10025d43bba9e84f502a266760786842e7158a05 | [
"MIT"
] | null | null | null | #-----------------------------------------------------
# Mimas: conference submission and review system
# (c) Allan Kelly 2016-2020 http://www.allankelly.net
# Licensed under MIT License, see LICENSE file
# -----------------------------------------------------
# schedule_lib/schedexport.py
#
# system imports
import datetime
# framework imports
from google.appengine.ext import ndb
import cloudstorage
import xlsxwriter
# app imports
import schedule
from reports import exportexcel
def worksheet_write_wrapper(wksheet, row, col, text):
wksheet.write(row, col, text)
def worksheet_merge_wrapper(wksheet, row_start, col_start, row_end, col_end, text):
wksheet.merge_range(row_start, col_start, row_end, col_end, text)
def write_title_row(sched, day, worksheet):
row = 0
col = 2
for t in sched.tracks(day):
worksheet_write_wrapper(worksheet, row, col, t)
col += 1
def write_tracks(row, col, day, slot, sched, worksheet):
for t in sched.tracks(day):
worksheet_write_wrapper(worksheet, row, col, schedule.talkTitle(sched.get_assignment(day, t, slot)))
col += 1
def write_plenary(row, col, description, track_count, worksheet):
#worksheet.merge_range(row, col, row, col+track_count, description)
worksheet_merge_wrapper(worksheet, row, col, row, (col+track_count-1), description)
def write_slots_and_content(sched, day, worksheet):
row = 1
for slot in sched.orderd_slot_keys(day):
col = 0
worksheet_write_wrapper(worksheet, row, col, sched.slots(day)[slot].start_time.strftime("%H:%M"))
col += 1
worksheet_write_wrapper(worksheet, row, col, sched.slots(day)[slot].end_time.strftime("%H:%M"))
col += 1
if sched.slots(day)[slot].slot_type == "Tracks":
write_tracks(row, col, day, slot, sched, worksheet)
else:
write_plenary(row, col,
schedule.talkTitle(sched.get_assignment(day, "Plenary", slot)),
len(sched.tracks(day)),
worksheet)
row += 1
def write_days(sched, workbook):
for day in sched.day_names():
worksheet = workbook.add_worksheet(name=day)
write_title_row(sched, day, worksheet)
write_slots_and_content(sched, day, worksheet)
def schedule_to_excel(sched):
fullname, url = exportexcel.mk_filename("Schedule", datetime.datetime.now())
with cloudstorage.open(fullname, "w",
content_type="text/plain; charset=utf-8",
options={'x-goog-acl': 'public-read'}) as output:
workbook = xlsxwriter.Workbook(output, {'in_memory': True})
write_days(sched, workbook)
workbook.close()
output.close()
return url
| 35.075949 | 108 | 0.638759 | 348 | 2,771 | 4.91954 | 0.321839 | 0.049065 | 0.061332 | 0.064252 | 0.382009 | 0.382009 | 0.300234 | 0.257009 | 0.16472 | 0.127336 | 0 | 0.008736 | 0.215085 | 2,771 | 78 | 109 | 35.525641 | 0.778391 | 0.140743 | 0 | 0.113208 | 0 | 0 | 0.036709 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.150943 | false | 0 | 0.113208 | 0 | 0.283019 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
8746d96b9c229eb941dea75daa70eed79983ec48 | 1,102 | py | Python | unchaind/util/esi.py | xnlfgh/unchaind | af59c380c02401a7ac58139d7f82507b9fb59f75 | [
"MIT"
] | 2 | 2019-01-02T20:43:50.000Z | 2019-01-28T10:15:13.000Z | unchaind/util/esi.py | xnlfgh/unchaind | af59c380c02401a7ac58139d7f82507b9fb59f75 | [
"MIT"
] | 40 | 2018-12-26T16:20:57.000Z | 2019-03-31T13:47:32.000Z | unchaind/util/esi.py | xnlfgh/unchaind | af59c380c02401a7ac58139d7f82507b9fb59f75 | [
"MIT"
] | 4 | 2018-12-25T22:53:51.000Z | 2021-02-20T19:54:51.000Z | """Functions to query ESI with simple caching."""
import logging
import json
from typing import Dict, Any
from async_lru import alru_cache
from unchaind.http import HTTPSession
log = logging.getLogger(__name__)
_ESI = "https://esi.evetech.net/latest/"
@alru_cache(maxsize=8192)
async def character_details(character: int) -> Dict[str, Any]:
rv = await _esi_request(f"{_ESI}characters/{character}/")
return rv
@alru_cache(maxsize=4096)
async def corporation_details(corp: int) -> Dict[str, Any]:
rv = await _esi_request(f"{_ESI}corporations/{corp}/")
return rv
@alru_cache(maxsize=4096)
async def alliance_details(alliance: int) -> Dict[str, Any]:
rv = await _esi_request(f"{_ESI}alliances/{alliance}/")
return rv
@alru_cache(maxsize=4096)
async def type_details(type: int) -> Dict[str, Any]:
rv = await _esi_request(f"{_ESI}universe/types/{type}/")
return rv
async def _esi_request(url: str) -> Dict[str, Any]:
http = HTTPSession()
response = await http.request(url=url, method="GET")
return dict(json.loads(response.body, encoding="utf-8"))
| 24.488889 | 62 | 0.713249 | 158 | 1,102 | 4.791139 | 0.373418 | 0.059445 | 0.06605 | 0.068692 | 0.322325 | 0.322325 | 0.322325 | 0.322325 | 0.179657 | 0.179657 | 0 | 0.018124 | 0.14882 | 1,102 | 44 | 63 | 25.045455 | 0.788913 | 0.03902 | 0 | 0.259259 | 0 | 0 | 0.1415 | 0.104463 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.185185 | 0 | 0.37037 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
8746ebe9ac343bc60b4bcbb4756f7bfd2a842179 | 1,460 | py | Python | uttut/toolkits/tests/test_get_kth_combination.py | Yoctol/uttut | 31ed12449d38fac58f50178c4ade8b011f1fcfbd | [
"MIT"
] | 2 | 2018-03-27T03:03:37.000Z | 2018-05-23T05:49:34.000Z | uttut/toolkits/tests/test_get_kth_combination.py | Yoctol/uttut | 31ed12449d38fac58f50178c4ade8b011f1fcfbd | [
"MIT"
] | 125 | 2018-04-06T14:07:36.000Z | 2019-12-19T03:54:19.000Z | uttut/toolkits/tests/test_get_kth_combination.py | Yoctol/uttut | 31ed12449d38fac58f50178c4ade8b011f1fcfbd | [
"MIT"
] | null | null | null | from unittest import TestCase
from functools import reduce
from operator import mul
from ..get_kth_combination import get_kth_combination
class GetKthCombinationTestCase(TestCase):
def test_get_kth_combination(self):
iterables = [[1, 2, 3], ['A', 'B', 'C'], ['I', 'II']]
expected_outputs = [
[1, 'A', 'I'],
[2, 'A', 'I'],
[3, 'A', 'I'],
[1, 'B', 'I'],
[2, 'B', 'I'],
[3, 'B', 'I'],
[1, 'C', 'I'],
[2, 'C', 'I'],
[3, 'C', 'I'],
[1, 'A', 'II'],
[2, 'A', 'II'],
[3, 'A', 'II'],
[1, 'B', 'II'],
[2, 'B', 'II'],
[3, 'B', 'II'],
[1, 'C', 'II'],
[2, 'C', 'II'],
[3, 'C', 'II'],
]
for i, expected_output in enumerate(expected_outputs):
with self.subTest(k=i):
output = get_kth_combination(iterables, i)
self.assertEqual(expected_output, output)
# no duplicated output when k <= n_combinations
n_combinations = reduce(mul, [len(e) for e in iterables])
self.assertEqual(
len(
set(
[
str(get_kth_combination(iterables, k))
for k in range(n_combinations)
],
),
),
n_combinations,
)
| 28.627451 | 65 | 0.404795 | 153 | 1,460 | 3.738562 | 0.294118 | 0.052448 | 0.148601 | 0.090909 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.02506 | 0.426027 | 1,460 | 50 | 66 | 29.2 | 0.657518 | 0.030822 | 0 | 0.046512 | 0 | 0 | 0.036093 | 0 | 0 | 0 | 0 | 0 | 0.046512 | 1 | 0.023256 | false | 0 | 0.093023 | 0 | 0.139535 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
874715eb3f14435bebfd2dc5d5105c4dbfec947d | 15,605 | py | Python | src/nasty/_retriever/retriever.py | evhart/nasty | 1b14977d1ba61bdb78d0906c76dd57242a8c8923 | [
"Apache-2.0"
] | null | null | null | src/nasty/_retriever/retriever.py | evhart/nasty | 1b14977d1ba61bdb78d0906c76dd57242a8c8923 | [
"Apache-2.0"
] | null | null | null | src/nasty/_retriever/retriever.py | evhart/nasty | 1b14977d1ba61bdb78d0906c76dd57242a8c8923 | [
"Apache-2.0"
] | null | null | null | #
# Copyright 2019-2020 Lukas Schmelzeisen
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import re
from abc import ABC, abstractmethod
from http import HTTPStatus
from logging import getLogger
from os import getenv
from time import sleep
from typing import (
Any,
Callable,
Generic,
Iterable,
Mapping,
Optional,
Sequence,
Type,
TypeVar,
cast,
)
import requests
from overrides import overrides
from requests.adapters import HTTPAdapter
from requests.exceptions import RetryError
from typing_extensions import Final, final
from urllib3 import Retry
from .._util.errors import UnexpectedStatusCodeException
from .._util.typing_ import checked_cast
from ..request.request import Request
from ..tweet.tweet import Tweet, TweetId, UserId
from ..tweet.tweet_stream import TweetStream
logger = getLogger(__name__)
crawl_delay: Optional[float] = None
class RetrieverTweetStream(TweetStream):
def __init__(self, update_callback: Callable[[], bool]):
self._update_callback: Final = update_callback
self._tweets: Sequence[Tweet] = []
self._tweets_position = 0
def update_tweets(self, tweets: Sequence[Tweet]) -> None:
self._tweets = tweets
self._tweets_position = 0
@overrides
def __next__(self) -> Tweet:
if self._tweets_position == len(self._tweets):
if not self._update_callback():
raise StopIteration()
self._tweets_position += 1
return self._tweets[self._tweets_position - 1]
class RetrieverBatch(ABC):
def __init__(self, json: Mapping[str, Mapping[str, object]]):
self._json: Final = json
self.tweets: Final = self._tweets()
self.next_cursor: Final = self._next_cursor()
@final
def _tweets(self) -> Sequence[Tweet]:
id_to_tweet_json: Final = cast(
Mapping[TweetId, Mapping[str, object]],
self._json["globalObjects"]["tweets"],
)
id_to_user_json: Final = cast(
Mapping[UserId, object], self._json["globalObjects"]["users"]
)
result = []
for tweet_id in self._tweet_ids():
if tweet_id not in id_to_tweet_json:
# For conversation it can sometimes happen that a Tweet-ID is returned
# without accompanying meta information. I have no idea why this happens
# or how to fix it.
logger.warning(
"Found Tweet-ID {} in timeline, but did not receive "
"Tweet meta information.".format(tweet_id)
)
# TODO: move this to a ConversationRetrieverBatch
# TODO: add way to expose this over api
continue
tweet_json = dict(id_to_tweet_json[tweet_id])
tweet_json["user"] = id_to_user_json[
checked_cast(UserId, tweet_json["user_id_str"])
]
# Delete remaining user fields in order to be similar to the Twitter
# developer API and because the information is stored in the user object
# anyways.
tweet_json.pop("user_id", None) # present on Search, not on Conversation
tweet_json.pop("user_id_str")
result.append(Tweet(tweet_json))
return result
@abstractmethod
def _tweet_ids(self) -> Iterable[TweetId]:
raise NotImplementedError()
@abstractmethod
def _next_cursor(self) -> Optional[str]:
raise NotImplementedError()
_T_Request = TypeVar("_T_Request", bound=Request)
class Retriever(Generic[_T_Request], ABC):
"""Retrieves Tweets belonging to a specific Twitter timeline view.
Implemented via Twitter's mobile web interface. For this we emulate what a normal
browser would do:
1) Load the HTML stub belonging to a timeline page. We say stub here, because the
HTML doesn't contained any contents, i.e., there are no Tweets in it.
2) Load batches of displayed Tweets via AJAX requests on page load and whenever the
user scrolls to the bottom of the page.
The upside of this approach is that the JSON results have the exact same format as
the results from the Twitter developer API (and even contain more information).
"""
def __init__(self, request: _T_Request):
self._tweet_stream: Final = self._tweet_stream_type()(self._update_tweet_stream)
self._request: Final = request
self._session: Final = requests.Session()
self._request_finished = False
self._retrieved_tweets = 0
self._cursor: Optional[str] = None
# Configure on which status codes we should perform automated retries.
self._session.mount(
"https://",
HTTPAdapter(
max_retries=Retry(
total=5,
connect=5,
redirect=10,
backoff_factor=0.1,
raise_on_redirect=True,
raise_on_status=True,
status_forcelist=[
HTTPStatus.REQUEST_TIMEOUT, # HTTP 408
HTTPStatus.CONFLICT, # HTTP 409
HTTPStatus.INTERNAL_SERVER_ERROR, # HTTP 500
HTTPStatus.NOT_IMPLEMENTED, # HTTP 501
HTTPStatus.BAD_GATEWAY, # HTTP 502
HTTPStatus.SERVICE_UNAVAILABLE, # HTTP 503
HTTPStatus.GATEWAY_TIMEOUT, # HTTP 504
],
)
),
)
self._fetch_new_twitter_session()
@classmethod
def _tweet_stream_type(cls) -> Type[RetrieverTweetStream]:
return RetrieverTweetStream
@classmethod
@abstractmethod
def _retriever_batch_type(cls) -> Type[RetrieverBatch]:
raise NotImplementedError()
@property
def tweet_stream(self) -> RetrieverTweetStream:
return self._tweet_stream
@abstractmethod
def _timeline_url(self) -> Mapping[str, object]:
raise NotImplementedError()
@abstractmethod
def _batch_url(self) -> Mapping[str, object]:
raise NotImplementedError()
def _update_tweet_stream(self) -> bool: # noqa: C901
# TODO: try to reduce complexity and get red of noqa
if self._request_finished:
return False
consecutive_retry_error = 0
consecutive_rate_limits = 0
consecutive_forbidden = 0
consecutive_empty_batches = 0
batch = None
while True:
try:
batch = self._fetch_batch()
except RetryError:
consecutive_retry_error += 1
if consecutive_retry_error != 3:
self._fetch_new_twitter_session()
continue
logger.warning("Received 3 consecutive RetryErrors.")
return False
except UnexpectedStatusCodeException as e:
if e.status_code == HTTPStatus.TOO_MANY_REQUESTS: # HTTP 429
consecutive_rate_limits += 1
if consecutive_rate_limits != 3:
self._fetch_new_twitter_session()
continue
logger.warning(
"Received 3 consecutive TOO MANY REQUESTS responses."
)
elif e.status_code == HTTPStatus.FORBIDDEN: # HTTP 403
consecutive_forbidden += 1
if consecutive_forbidden != 3:
self._fetch_new_twitter_session()
continue
logger.warning("Received 3 consecutive FORBIDDEN responses.")
raise
consecutive_rate_limits = 0
# Stop the iteration once the returned batch no longer contains any Tweets.
# Ideally, we would like to omit this last request but there seems to be no
# way to detect this prior to having the last batch loaded. Additionally,
# Twitter will sometimes stop sending results early, which we also can not
# detect. Because of this, we only stop loading once we receive empty
# batches multiple times in a row.
if not batch.tweets:
consecutive_empty_batches += 1
if consecutive_empty_batches != 3:
continue
logger.info("Received 3 consecutive empty batches.")
return False
break
tweets = batch.tweets
if self._request.max_tweets:
tweets = tweets[: self._request.max_tweets - self._retrieved_tweets]
self._retrieved_tweets += len(tweets)
logger.debug(
" Received new batch of {} Tweets ({}/{})".format(
len(tweets), self._retrieved_tweets, self._request.max_tweets
)
)
if (
self._request.max_tweets
and self._request.max_tweets == self._retrieved_tweets
):
self._request_finished = True
self.tweet_stream.update_tweets(tweets)
self._cursor = batch.next_cursor
if self._cursor is None:
self._request_finished = True
return True
@final
def _fetch_new_twitter_session(self) -> None:
"""Establishes a session with Twitter, so that they answer our requests.
If we try to directly access request the first batch of a query, Twitter will
respond with a rate limit error, i.e. HTTP 429. To receive actual responses we
need to include a bearer token and a guest token in our headers. A normal web
browser gets these be first loading the displaying HTML stub. This function
emulates this process and prepares the given session object to contain the
necessary headers.
For more information on this process, see:
- https://tech.b48.club/2019/05/13/how-to-fake-a-source-of-a-tweet.html
- https://steemit.com/technology/@singhpratyush/
fetching-url-for-complete-twitter-videos-using-guest-user-access-pattern
- https://github.com/ytdl-org/youtube-dl/issues/12726#issuecomment-304779835
Each established session is only good for a given number of requests.
Information on this can be obtained by checking the X-Rate-Limit-* headers in
the responses from api.twitter.com. Currently, we do not pay attention to these,
and just establish a new session once we run into the first rate limit error.
Cursor parameters, i.e. those that specify the current position in the result
list seem to persist across sessions.
Technically, a normal web browser would also receive a few cookies from Twitter
in this process. Currently, api.twitter.com doesn't seem to check for these. In
any case, we still set those in case Twitter changes their behavior. Note,
however, that our requests will still be trivially distinguishable from a
normal web browsers requests, as they typically sent many more headers and
cookies, i.e. those from Google Analytics. Further we include the string
"NASTYbot" in our User-Agent header to make it trivial for Twitter to
rate-limit us, should they decide to.
"""
logger.debug(" Establishing new Twitter session.")
self._session.headers.clear()
self._session.cookies.clear()
# We use the current Chrome User-Agent string to get the most recent version of
# the Twitter mobile website.
self._session.headers["User-Agent"] = (
"Mozilla/5.0 (Linux; Android 6.0; Nexus 5 Build/MRA58N)"
" AppleWebKit/537.36 (KHTML, like Gecko)"
" Chrome/68.0.3440.84 Mobile Safari/537.36"
" NASTYbot"
)
# The following header should not matter for the actual returned Tweets. Still,
# since api.twitter.com also returns some localized strings for the UI (e.g.
# headings), we set this to English, so these strings are always the same. If
# not set, Twitter will guesstimate the language from the IP.
self._session.headers["Accept-Language"] = "en_US,en"
# Query HTML stub page. Also automatically adds any returned cookies by Twitter
# via response headers to the session.
response = self._session_get(**self._timeline_url())
main_js_url = re.findall(
"(https://abs.twimg.com/responsive-web/"
"(?:client[-_])?web(?:[-_]legacy)?/main.[a-z0-9]+.js)",
response.text,
)[0]
guest_token = re.findall(
'document\\.cookie = decodeURIComponent\\(\\"gt=([0-9]+);', response.text
)[0]
# Queries the JS-script that carries the bearer token. Currently, this does not
# seem to constant for all users, but we still check in case this changes in the
# future.
response = self._session_get(main_js_url)
bearer_token = re.findall('.="Web-12",.="([^"]+)"', response.text)[0]
# Emulate cookie setting that would be performed via Javascript.
self._session.cookies.set_cookie( # type: ignore
requests.cookies.create_cookie(
"gt", guest_token, domain=".twitter.com", path="/"
)
)
# Set the two headers that we need to access api.twitter.com.
self._session.headers["Authorization"] = "Bearer {}".format(bearer_token)
self._session.headers["X-Guest-Token"] = guest_token
logger.debug(
" Guest token: {}. Bearer token: {}.".format(guest_token, bearer_token)
)
@final
def _fetch_batch(self) -> RetrieverBatch:
return self._retriever_batch_type()(
self._session_get(**self._batch_url()).json()
)
@final
def _session_get(self, url: str, **kwargs: Any) -> requests.Response:
if not getenv("NASTY_DISRESPECT_ROBOTSTXT"):
global crawl_delay
if crawl_delay is None:
response = self._session.get("https://mobile.twitter.com/robots.txt")
for line in response.text.splitlines():
if line.lower().startswith("crawl-delay:"):
crawl_delay = float(line[len("crawl-delay:") :])
break
else:
raise RuntimeError("Could not determine crawl-delay.")
logger.debug(
" Determined crawl-delay of {:.2f}s.".format(crawl_delay)
)
sleep(crawl_delay)
response = self._session.get(url, **kwargs)
status = HTTPStatus(response.status_code)
logger.debug(
" Received {} {} for {}".format(status.value, status.name, response.url)
)
if response.status_code != HTTPStatus.OK.value:
raise UnexpectedStatusCodeException(
response.url, HTTPStatus(response.status_code)
)
return response
| 39.0125 | 88 | 0.620058 | 1,834 | 15,605 | 5.124864 | 0.28735 | 0.016385 | 0.010852 | 0.011703 | 0.069369 | 0.051388 | 0.040855 | 0.030854 | 0.021704 | 0.021704 | 0 | 0.012499 | 0.302723 | 15,605 | 399 | 89 | 39.110276 | 0.8513 | 0.307658 | 0 | 0.172549 | 0 | 0.003922 | 0.097026 | 0.012962 | 0 | 0 | 0 | 0.005013 | 0 | 1 | 0.066667 | false | 0 | 0.070588 | 0.011765 | 0.188235 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
874dd0ef2fc879a426a0de09a6582dbf87216983 | 7,242 | py | Python | cmmcdb/management/commands/importpdf.py | linville/cmmcdb | acb60fa7278711a2f685ccbadd3d6767689a8236 | [
"0BSD"
] | 1 | 2020-10-29T22:27:55.000Z | 2020-10-29T22:27:55.000Z | cmmcdb/management/commands/importpdf.py | linville/cmmcdb | acb60fa7278711a2f685ccbadd3d6767689a8236 | [
"0BSD"
] | 1 | 2020-02-12T02:34:56.000Z | 2020-02-12T02:34:56.000Z | cmmcdb/management/commands/importpdf.py | linville/cmmcdb | acb60fa7278711a2f685ccbadd3d6767689a8236 | [
"0BSD"
] | null | null | null | # This imports the CMMC pdf as distributed by OUSD(A&S).
from django.core.management.base import BaseCommand, CommandError
from django.db.models import IntegerField, Case, Sum, Value, When
from cmmcdb.models import *
import argparse
import camelot
import re
import pprint
def remove_prefix(text, prefix):
if text.startswith(prefix):
return text[len(prefix) :]
return text
class Command(BaseCommand):
help = "Imports the CMMC pdf."
def add_arguments(self, parser):
parser.add_argument(
"FILE", type=argparse.FileType("rb", 0), help="Path to the CMMC pdf."
)
def check_db(self):
expected = 5
found = MaturityLevel.objects.all().count()
if expected != found:
raise CommandError(f"Expected {expected} Maturity Levels. Found {found}.")
expected = 17
found = Domain.objects.all().count()
if expected != found:
raise CommandError(f"Expected {expected} Domains. Found {found}.")
def handle(self, *args, **options):
self.check_db()
# Domains and Capabilities may be spread across multiple pages. It would be
# nice if they repeated the string on each page so that page could be
# independently scraped, but they didn't, so we need to remember it ourself.
last_domain = None
last_capability = None
for page_index in range(7, 40):
print(f"Reading CMMC pdf page {page_index}...")
# Extract the Table Header
header = camelot.read_pdf(
options["FILE"].name,
pages=str(page_index),
flavor="stream",
table_areas=["60,550,400,520"],
suppress_stdout=True,
)
header = header[0].data[0][0]
if header is None and last_domain is not None:
pass
elif header == "CAPABILITY":
pass
else:
last_domain = self.extract_domain(header)
# Extract the Table
try:
tables = camelot.read_pdf(options["FILE"].name, pages=str(page_index))
except NotImplementedError:
print("PyPDF2 does not support read-only PDFs. Use pikepdf to fix.")
exit(0)
if tables.n != 1:
raise CommandError(f"Expected 1 table per page. Found {tables.n}.")
last_capability = self.extract_table(
last_domain, last_capability, tables[0]
)
def extract_table(self, domain, last_capability, table):
if table.shape[1] != 6:
raise CommandError(
f"Expected table to have 6 columns. Found {table.shape[1]} columns on page {table.page}."
)
if table.shape[0] < 3:
raise CommandError(
f"Expected table to have at least 3 rows. Found {table.shape[0]} rows on page {table.page}."
)
# print(table.data)
# Row 0: Capability, Practices
# Row 1: Empty Cell, Level 1 ... Level 5
# Row 2+: Practices
# Column 0, Capability or Empty Cell
# Column 1-5: Practice
is_process = self.extract_is_process(table.data[0])
for i in range(2, table.shape[0]):
print(f" Row {i}")
# Check for a new capability in first column
if table.data[i][0]:
last_capability = self.extract_capability(
table.data[i][0], domain, is_process
)
# Look through all the columns for practices assuming the column index
# corresponds to the maturity level.
for j in range(1, 6):
try:
ml = MaturityLevel.objects.get(level=int(j))
self.extract_practice(last_capability, ml, table.data[i][j])
except MaturityLevel.ObjectDoesNotExist:
raise CommandError(f"Couldn't find Maturity Level {matches[0][1]}")
except:
pass
return last_capability
def extract_domain(self, domain_text):
regex = r"^(?P<name>.+)\s*\((?P<short>.+)\)"
try:
matches = re.search(regex, domain_text, re.IGNORECASE).groupdict()
except:
raise CommandError(f"Domain regex failed on: {domain_text}")
try:
domain = Domain.objects.filter(short=matches["short"]).get()
return domain
except:
raise CommandError(f"Error extracting domain: {domain_text}")
def extract_is_process(self, row):
if row[1].lower().startswith("practices"):
return False
elif row[1].lower().startswith("processes"):
return True
else:
raise CommandError(f"Unknown practice or process: {row[1]}")
def extract_capability(self, cell, domain, is_process):
simple_text = " ".join(cell.split())
if is_process:
index = 1
name = simple_text
else:
regex = r"^C(?P<index>\d+)\s(?P<name>.*?)(?:\(continued\W?\))?$"
matches = re.search(regex, simple_text, re.IGNORECASE).groupdict()
try:
index = int(matches["index"])
name = " ".join(matches["name"].split())
except:
raise CommandError(f"Error extracting capability: {simple_text}")
if not name.endswith("."):
name += "."
try:
obj, created = Capability.objects.get_or_create(
index=index, name=name, process=is_process, domain=domain
)
return obj
except:
raise CommandError(
f"Capability get or create failed on {index}, {name}, {is_process}"
)
def extract_practice(self, capability, ml, cell):
if not cell:
return
simple_text = " ".join(cell.split())
# Quirks
# simple_text = remove_prefix(simple_text, "L ")
sections = simple_text.split("•")
regex = r"^(?P<text_id>(?P<domain>[A-Z]{2,3})\.(?P<level>\d+)\.(?P<practicenumber>\d+))\s(?P<name>.*?)$"
matches = re.search(regex, sections[0], re.IGNORECASE)
# text_id: Full Id: AC.1.001
# domain: AC
# level: 1
# practicenumber: 0001
try:
name = matches["name"].strip()
except:
raise CommandError(
f"Practice didn't extract:\nCell: {cell}\nSections:{sections}\nMatches:{matches}"
)
if ml.level != int(matches["level"]):
raise CommandError(
f"Maturity levels didn't match: {ml.level} != {matches['level']}"
)
if not name.endswith("."):
name += "."
# print(f"i,n,m,c: {matches[0][0]}, {name}, {ml}, {capability}")
practice, created = Practice.objects.get_or_create(
practice_number=matches["practicenumber"],
name=name,
maturity_level=ml,
capability=capability,
)
for reference in sections[1:]:
# print(f" {reference}")
pass
| 32.475336 | 112 | 0.548743 | 828 | 7,242 | 4.724638 | 0.275362 | 0.056493 | 0.059816 | 0.033231 | 0.117587 | 0.095092 | 0.075153 | 0.056237 | 0.056237 | 0.056237 | 0 | 0.014565 | 0.336371 | 7,242 | 222 | 113 | 32.621622 | 0.799209 | 0.114747 | 0 | 0.22 | 0 | 0.02 | 0.179584 | 0.035228 | 0 | 0 | 0 | 0 | 0 | 1 | 0.06 | false | 0.026667 | 0.053333 | 0 | 0.18 | 0.026667 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
874e4aee29d15243a99028c3d561c5293417a2a1 | 1,834 | py | Python | ModuleSolvers/Keypad.py | rjslater2000/KeepTalkingBot | 4d5fac633542f967384779b8049765a97f6a1cf8 | [
"MIT"
] | null | null | null | ModuleSolvers/Keypad.py | rjslater2000/KeepTalkingBot | 4d5fac633542f967384779b8049765a97f6a1cf8 | [
"MIT"
] | null | null | null | ModuleSolvers/Keypad.py | rjslater2000/KeepTalkingBot | 4d5fac633542f967384779b8049765a97f6a1cf8 | [
"MIT"
] | null | null | null | # ============================================================
# Title: Keep Talking and Nobody Explodes Solver: Keypad
# Author: Ryan J. Slater
# Date: 4/3/2019
# ============================================================
def solveKeypad(keypadSymbols): # Symbols on the player's keypad
# List of the 6 columns of symbols
sourceSymbols = [['spoon', 'at', 'lambda', 'resistor', 'cat', 'h', 'backwardsc'],
['backwardse', 'spoon', 'backwardsc', 'cl', 'hollowstar', 'h', 'upsidedownquestionmark'],
['copyright', 'ballsack', 'cl', 'doublek', 'trailer', 'lambda', 'hollowstar'],
['six', 'paragraph', 'b', 'cat', 'doublek', 'upsidedownquestionmark', 'smileyface'],
['trident', 'smileyface', 'b', 'forwardsc', 'paragraph', 'snake', 'filledstar'],
['six', 'backwardse', 'railroad', 'ae', 'trident', 'backwardsn', 'omega']]
# Figure out which column is the correct one
correctColumnNumber = -1
for i in range(len(sourceSymbols)):
# Assume true until proven otherwise
correctColumn = True
for symbol in keypadSymbols:
# Here's the proof otherwise
if sourceSymbols[i].count(symbol) == 0:
correctColumn = False
break
# Break if found the correct column
if correctColumn:
correctColumnNumber = i
break
# Determine the correct order to press
column = sourceSymbols[correctColumnNumber]
orderToPress = []
for symbol in column:
if symbol in keypadSymbols:
orderToPress.append(symbol)
return ', '.join(orderToPress)
# Testing
if __name__ == '__main__':
keypadSymbols = ['backwardsc', 'lambda', 'resistor', 'spoon']
print(solveKeypad(keypadSymbols))
| 40.755556 | 110 | 0.553435 | 163 | 1,834 | 6.177914 | 0.595092 | 0.029791 | 0.021847 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.006618 | 0.258451 | 1,834 | 44 | 111 | 41.681818 | 0.733824 | 0.251908 | 0 | 0.076923 | 0 | 0 | 0.24595 | 0.032401 | 0 | 0 | 0 | 0 | 0 | 1 | 0.038462 | false | 0 | 0 | 0 | 0.076923 | 0.038462 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
874fe6723780a293d95e2506de662b55ab1b2deb | 2,385 | py | Python | lib/cicdctl/utils/packer/driver.py | cicdenv/cicdenv | 5b72fd9ef000bf07c2052471b59edaa91af18778 | [
"MIT"
] | 8 | 2020-08-10T20:57:24.000Z | 2021-08-08T10:46:20.000Z | lib/cicdctl/utils/packer/driver.py | cicdenv/cicdenv | 5b72fd9ef000bf07c2052471b59edaa91af18778 | [
"MIT"
] | null | null | null | lib/cicdctl/utils/packer/driver.py | cicdenv/cicdenv | 5b72fd9ef000bf07c2052471b59edaa91af18778 | [
"MIT"
] | 1 | 2020-08-10T20:42:09.000Z | 2020-08-10T20:42:09.000Z | import json
from . import env, packer_dir, packer_templates, workspace
from ...commands.types.target import Target
from ..terraform.driver import TerraformDriver
from ..terraform.routing import routing_targets
# Supported packer sub-commands
packer_commands = [
'validate',
'build',
'console',
]
class PackerDriver(object):
def __init__(self, settings, root_fs, ephemeral_fs, builder, flags=[]):
self.settings = settings
self.root_fs = root_fs
self.ephemeral_fs = ephemeral_fs
self.builder = builder
self.flags = flags
self._run = self.settings.runner(cwd=packer_dir, env_ctx=env()).run
def _ensure_routing(self):
network_targets = routing_targets('main')
for network_target in network_targets:
if not TerraformDriver(self.settings, network_target).has_resources():
TerraformDriver(self.settings, network_target, ['-auto-approve']).apply()
def _tf_outputs(self, component, keys):
target = Target(component, 'main')
outputs = TerraformDriver(self.settings, target).outputs()
return [outputs[key]['value'] for key in keys]
def _get_variables(self):
(vpc, subnets) = self._tf_outputs('network/shared', ('vpc', 'subnets'))
(key, account_ids, main_account) = self._tf_outputs('packer', ('key', 'allowed_account_ids', 'main_account'))
_variables = [
'-var', f'vpc_id={vpc["id"]}',
'-var', f'subnet_id={list(subnets["public"].values())[0]["id"]}',
'-var', f'key_id={key["key_id"]}',
'-var', f'account_ids={json.dumps(account_ids)}',
'-var', f'root_fs={self.root_fs}',
]
if self.builder == 'ebs':
_variables.append('-var')
_variables.append(f'ephemeral_fs={self.ephemeral_fs}')
if self.root_fs == 'zfs':
_variables.append('-var')
_variables.append(f'source_owner={main_account["id"]}')
return _variables
def _run_packer(self, command):
vars = self._get_variables()
self._ensure_routing()
self._run(['packer', command] + vars + [packer_templates[self.builder]])
def __getattr__(self, name):
if name in packer_commands:
def _func():
self._run_packer(name)
return _func
| 34.565217 | 117 | 0.615933 | 274 | 2,385 | 5.087591 | 0.30292 | 0.05165 | 0.021521 | 0.02439 | 0.106169 | 0.04878 | 0 | 0 | 0 | 0 | 0 | 0.000557 | 0.24696 | 2,385 | 68 | 118 | 35.073529 | 0.775612 | 0.012159 | 0 | 0.037736 | 0 | 0 | 0.155905 | 0.084537 | 0 | 0 | 0 | 0 | 0 | 1 | 0.132075 | false | 0 | 0.09434 | 0 | 0.301887 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
8750ee733ecb801c032c59be486b95a731ca328e | 3,776 | py | Python | src/tfi/cli/__init__.py | ajbouh/tfi | 6e89e8c8f1ca3b285c788cc6b802fc44f9001290 | [
"MIT"
] | 160 | 2017-09-13T00:32:05.000Z | 2018-05-21T18:17:32.000Z | src/tfi/cli/__init__.py | tesserai/tfi | 6e89e8c8f1ca3b285c788cc6b802fc44f9001290 | [
"MIT"
] | 6 | 2017-09-14T17:54:21.000Z | 2018-01-27T19:31:18.000Z | src/tfi/cli/__init__.py | ajbouh/tfi | 6e89e8c8f1ca3b285c788cc6b802fc44f9001290 | [
"MIT"
] | 11 | 2017-09-13T00:37:08.000Z | 2018-03-05T08:03:34.000Z | import ast
import argparse
import inspect
from collections import OrderedDict
from functools import partial
from tfi.base import _GetAttrAccumulator as _GetAttrAccumulator
from tfi.data import file as _tfi_data_file
from tfi.resolve.model import resolve_auto as _resolve_auto
def _split_list(l, delim):
for ix in range(0, len(l)):
if l[ix] == '--':
return l[:ix], l[ix+1:]
return l, []
def _resolve_needed_params(method, have_kwargs=None):
sig = inspect.signature(method)
needed = OrderedDict(sig.parameters.items())
if inspect.isfunction(method):
del needed[list(needed.keys())[0]]
# Only allow unspecified values to be given.
if have_kwargs:
for k in have_kwargs.keys():
del needed[k]
return needed
def _parse_arg_fn(annotation):
dtype_fn = None
if isinstance(annotation, dict):
dtype_fn = annotation.get('dtype', None)
elif hasattr(annotation, 'dtype'):
dtype_fn = annotation.dtype
def default_dtype_fn(s):
print("default_dtype_fn", s)
if s:
ch = s[0]
if ch == '[' or ch == '{' or ch.isdecimal():
return ast.literal_eval(s)
return s
default_dtype_fn.__name__ = 'literal'
if dtype_fn is None:
dtype_fn = default_dtype_fn
return lambda o: dtype_fn(_tfi_data_file(o[1:]) if o.startswith("@") else o)
def resolve(leading_value, rest):
resolution = _resolve_auto(leading_value)
if 'model_fn_needed_params' not in resolution:
resolution['model_method_fn'] = None
resolution['model'] = None
return resolution
empty = inspect.Parameter.empty
p = argparse.ArgumentParser(prog=leading_value)
for name, param in resolution['model_fn_needed_params'].items():
p.add_argument(
'--%s' % name,
required=param.default is empty,
default=None if param.default is empty else param.default,
type=_parse_arg_fn({} if param.annotation is empty else param.annotation),
)
p.set_defaults(_method=None)
def apply_fn(ns_keys_to_kw, fn, ns):
kw = {}
for ns_k, kw_k in ns_keys_to_kw.items():
if hasattr(ns, ns_k):
kw[kw_k] = getattr(ns, ns_k)
return fn(**kw)
def apply_model_method(method_name, ns_keys_to_kw, model, ns):
return apply_fn(ns_keys_to_kw, getattr(model, method_name), ns)
subparsers = p.add_subparsers(help='sub-command help')
for membername, member in resolution['model_members']:
sp = subparsers.add_parser(membername)
needed_params = _resolve_needed_params(member)
ns_keys_to_kw = {}
for name, param in needed_params.items():
# HACK(adamb) Should actually properly process theses?!?
if isinstance(param.annotation, _GetAttrAccumulator):
continue
dest = "_%s.%s" % (membername, name)
ns_keys_to_kw[dest] = name
sp.add_argument(
'--%s' % name,
required=param.default is empty,
dest=dest,
metavar=name.upper(),
default=None if param.default is empty else param.default,
type=_parse_arg_fn({} if param.annotation is empty else param.annotation ),
)
sp.set_defaults(_method=partial(apply_model_method, membername, ns_keys_to_kw))
ns = p.parse_args(rest)
model_fn = resolution['model_fn']
model = apply_fn(
{k: k for k in resolution['model_fn_needed_params'].keys()},
model_fn,
ns)
resolution['model_method_fn'] = partial(ns._method, model, ns) if ns._method else None
resolution['model'] = model
return resolution
| 34.018018 | 91 | 0.632945 | 498 | 3,776 | 4.554217 | 0.238956 | 0.030864 | 0.024691 | 0.030864 | 0.185185 | 0.17284 | 0.130511 | 0.130511 | 0.130511 | 0.092593 | 0 | 0.001803 | 0.265625 | 3,776 | 110 | 92 | 34.327273 | 0.816084 | 0.025689 | 0 | 0.10989 | 0 | 0 | 0.053047 | 0.017954 | 0 | 0 | 0 | 0 | 0 | 1 | 0.076923 | false | 0 | 0.087912 | 0.010989 | 0.274725 | 0.010989 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
87524cfb18180e13d8c4e3dcda35f9acda0079e3 | 954 | py | Python | cg_pyrosetta/tests/test_sequence_mover_fac.py | shirtsgroup/cg_pyrosetta | bf69737e9bd88735e17c48629b9bc420e5ca2024 | [
"MIT"
] | null | null | null | cg_pyrosetta/tests/test_sequence_mover_fac.py | shirtsgroup/cg_pyrosetta | bf69737e9bd88735e17c48629b9bc420e5ca2024 | [
"MIT"
] | 17 | 2020-01-22T18:48:04.000Z | 2021-07-22T20:20:41.000Z | cg_pyrosetta/tests/test_sequence_mover_fac.py | shirtsgroup/cg_pyrosetta | bf69737e9bd88735e17c48629b9bc420e5ca2024 | [
"MIT"
] | null | null | null | import numpy as np
import pyrosetta
import pytest
from cg_pyrosetta.CG_monte_carlo import SequenceMoverFactory
import os
import sys
import warnings
current_path = os.path.dirname(os.path.abspath(__file__))
sys.path.insert(0, os.path.abspath(current_path + '/../PyRosetta4.modified'))
@pytest.fixture
def pose():
return(pyrosetta.pose_from_sequence('X[CG11]X[CG11]X[CG11]X[CG11]X[CG11]'))
def test_sequence_factory(pose):
mover_list = ['small_dihe', 'shear_dihe']
freq_list = [10, 10]
mover_seq_builder = SequenceMoverFactory()
mover_seq = mover_seq_builder.build_seq_mover(pose, mover_list, freq_list)
assert(mover_seq.size() == 2)
def test_sequence_factory_warn(pose):
mover_list = ['unimplemented_mover', 'small_dihe', 'shear_dihe']
freq_list = [10, 10, 10]
mover_seq_builder = SequenceMoverFactory()
with pytest.warns(UserWarning):
mover_seq_builder.build_seq_mover(pose, mover_list, freq_list)
| 28.909091 | 79 | 0.751572 | 136 | 954 | 4.955882 | 0.367647 | 0.071217 | 0.035608 | 0.059347 | 0.378338 | 0.378338 | 0.271513 | 0.271513 | 0.145401 | 0.145401 | 0 | 0.027778 | 0.132075 | 954 | 32 | 80 | 29.8125 | 0.786232 | 0 | 0 | 0.083333 | 0 | 0 | 0.122642 | 0.060797 | 0 | 0 | 0 | 0 | 0.041667 | 1 | 0.125 | false | 0 | 0.291667 | 0.041667 | 0.416667 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
8754c125fa1cff09a4ed9e9d885b2be469315ee3 | 6,337 | py | Python | app.py | daylinepp/building_energy_modeling | 9af8753a3b8c8f809c89335051caaec2e80353de | [
"Apache-2.0"
] | null | null | null | app.py | daylinepp/building_energy_modeling | 9af8753a3b8c8f809c89335051caaec2e80353de | [
"Apache-2.0"
] | null | null | null | app.py | daylinepp/building_energy_modeling | 9af8753a3b8c8f809c89335051caaec2e80353de | [
"Apache-2.0"
] | null | null | null | import streamlit as st
import pandas as pd
import matplotlib as mpl
import matplotlib.pyplot as plt
import plotly.express as px
import os
from datetime import datetime
from fbprophet import Prophet
from fbprophet.plot import plot_plotly, plot_components_plotly
import pickle
st.set_page_config(layout="wide") # or "centered"
st.title("Building Energy Modeling")
st.header("Consumption Forecasting & System Analysis")
st.markdown("Web App by [Daylin Epp](https://www.linkedin.com/in/daylin-epp-62989760/)")
st.write("---")
st.markdown("")
st.markdown("Please visit the project repository for more information ")
# Prophet Forecasting
##################################################################################
# load preprocessed dataframes for forecasting scenarios
df_uni = pd.read_csv('raw_data/univariate_forecast.csv')
df_multi = pd.read_csv('raw_data/multivariate_forecast.csv')
# make sure dates are correct dtype
df_uni['ds'] = pd.to_datetime(df_uni['ds'])
df_multi['ds'] = pd.to_datetime(df_multi['ds'])
# train/test split
# done in univariate case for consistency
test_uni = df_uni[(df_uni['ds'] >= '2021-05-25 00:00:00')] # last week of data (168 hours)
train_uni = df_uni[(df_uni['ds'] < '2021-05-25 00:00:00')]
# must be done in multivariate case so that added regressor date is available for forecast
test_multi = df_multi[(df_multi['ds'] >= '2021-05-25 00:00:00')] # last week of data (168 hours)
train_multi = df_multi[(df_multi['ds'] < '2021-05-25 00:00:00')]
CONS_UNI = 'Univariate Forecast'
CONS_MULTI = ' Multivariate Forecast'
# Prophet Forecasting Model
# will need to add a cache to improve load times once each option has been run
#@st.cache(allow_output_mutation=True)
def make_forecast(selection):
if selection == CONS_UNI:
title = "Univariate Consumption Forecast"
axis_label = "Energy Use (kWh)"
df_prophet = train_uni
extra_regressor = False
if selection == CONS_MULTI:
title = "Multivariate Consumption Forecast including Outside Air Temperature & Demand"
axis_label = "Energy Use (kWh)"
df_prophet = train_multi
extra_regressor = True
m = Prophet(interval_width=0.9)
if extra_regressor:
m.add_regressor('demand')
m.add_regressor('temp thresh')
m.fit(df_prophet)
future = m.make_future_dataframe(periods=len(test_uni), freq='H')
if extra_regressor:
future['demand'] = df_multi['demand']
future['temp thresh'] = df_multi['temp thresh']
forecast = m.predict(future)
fig_forecast = plot_plotly(m, forecast)
fig_forecast.update_layout(title=title, yaxis_title=axis_label, xaxis_title="Date")
fig_components = plot_components_plotly(m, forecast)
return fig_forecast, fig_components
st.header("Energy Consumption Forecast")
st.markdown("This tool is capable of forecasting hourly building energy consumption. It has been trained on data from June 1, 2020 to May 25, 2021."
" The forecasting window provides data for the final week of May.")
st.markdown("""
Interpretting the graphs below:
* Use the time period selection boxes at the top left of the graph to change the field of view
* Hover your cursor over the graph to read specific observation values
* Black points are actual values
* Dark blue line is forecast predictions
* Light blue area is 90% confidence interval
""")
st.markdown("Notice there is *significant improvement* in how the forecast fits the actual data in the multivariate case.")
st.markdown("")
selected_case = st.selectbox("Select Forecast Type:", (CONS_UNI, CONS_MULTI))
plotly_fig, plotly_components = make_forecast(selected_case)
st.plotly_chart(plotly_fig)
st.markdown("The graphs below break down the components of the forecasting curve."
" They highlight the overall trend, weekly seasonality, and daily seasonality of energy consumption.")
st.plotly_chart(plotly_components)
# Predictive Modeling
##################################################################################
model = pickle.load(open("lgb_model_v2.sav", "rb"))
def prediction(Point_34, Point_17, Point_37, Point_21,
Point_194, Point_26, Point_22, Point_9,
Point_13, Point_23, Point_90, Point_198, Point_10):
df = pd.DataFrame([{'Point_34': Point_34,
'Point_17': Point_17,
'Point_37': Point_37,
'Point_21': Point_21,
'Point_194': Point_194,
'Point_26': Point_26,
'Point_22': Point_22,
'Point_9': Point_9,
'Point_13': Point_13,
'Point_23': Point_23,
'Point_90': Point_90,
'Point_198': Point_198,
'Point_10': Point_10}])
pred = model.predict(df)
return pred
st.header("Energy Consumption Prediction")
st.markdown("Now you can try out the model by setting values for each feature."
"")
Point_90 = st.slider('Average Outside Air Temp', -2.0, 33.0, 10.0, 0.5)
Point_21 = st.slider('Mixed Air Temp', 10.0, 40.0, 20.0, 0.5)
Point_22 = st.slider('Min Room Error', -10.0, 10.0, 0.0, 0.5)
Point_37 = st.slider('Supply Air Temp', 9.0, 33.0, 18.0, 0.5)
Point_26 = st.slider('Max Room Temp', 19.0, 35.0, 25.0, 0.5)
Point_17 = st.slider('Total Flow', 0.0, 7475.0, 3000.0, 25.0)
Point_194 = st.slider('Make Up Air Unit 1 Supply Temp', 12.0, 34.0, 22.0, 0.5)
Point_198 = st.slider('Make Up Air Unit 2 Supply Temp', 12.0, 34.0, 22.0, 0.5)
Point_13 = st.slider('Duct Pressure Point', 0.0, 480.0, 200.0, 10.0)
Point_23 = st.slider('Min Room Temp', 14.0, 27.0, 21.0, 0.5)
Point_34 = st.slider('Set Point', 10.0, 33.0, 20.0, 0.5)
Point_9 = st.slider('Return Carbon Dioxide', 390.0, 730.0, 450.0, 10.0)
Point_10 = st.selectbox('Air Handling Unit Supply Fan',("ON","OFF"))
if st.button('Make Hourly Energy Consumption Prediction'):
result = prediction(Point_34, Point_17, Point_37, Point_21,
Point_194, Point_26, Point_22, Point_9,
Point_13, Point_23, Point_90, Point_198, Point_10)
consumption = round(result[0], 1)
try:
st.write(consumption, 'kWh')
except:
pass | 41.418301 | 148 | 0.655831 | 934 | 6,337 | 4.292291 | 0.313705 | 0.006485 | 0.006735 | 0.01796 | 0.198553 | 0.142679 | 0.126715 | 0.126715 | 0.109254 | 0.109254 | 0 | 0.072553 | 0.2083 | 6,337 | 153 | 149 | 41.418301 | 0.72653 | 0.077008 | 0 | 0.069565 | 0 | 0.017391 | 0.344323 | 0.011636 | 0 | 0 | 0 | 0 | 0 | 1 | 0.017391 | false | 0.008696 | 0.086957 | 0 | 0.121739 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |