hexsha string | size int64 | ext string | lang string | max_stars_repo_path string | max_stars_repo_name string | max_stars_repo_head_hexsha string | max_stars_repo_licenses list | max_stars_count int64 | max_stars_repo_stars_event_min_datetime string | max_stars_repo_stars_event_max_datetime string | max_issues_repo_path string | max_issues_repo_name string | max_issues_repo_head_hexsha string | max_issues_repo_licenses list | max_issues_count int64 | max_issues_repo_issues_event_min_datetime string | max_issues_repo_issues_event_max_datetime string | max_forks_repo_path string | max_forks_repo_name string | max_forks_repo_head_hexsha string | max_forks_repo_licenses list | max_forks_count int64 | max_forks_repo_forks_event_min_datetime string | max_forks_repo_forks_event_max_datetime string | content string | avg_line_length float64 | max_line_length int64 | alphanum_fraction float64 | qsc_code_num_words_quality_signal int64 | qsc_code_num_chars_quality_signal float64 | qsc_code_mean_word_length_quality_signal float64 | qsc_code_frac_words_unique_quality_signal float64 | qsc_code_frac_chars_top_2grams_quality_signal float64 | qsc_code_frac_chars_top_3grams_quality_signal float64 | qsc_code_frac_chars_top_4grams_quality_signal float64 | qsc_code_frac_chars_dupe_5grams_quality_signal float64 | qsc_code_frac_chars_dupe_6grams_quality_signal float64 | qsc_code_frac_chars_dupe_7grams_quality_signal float64 | qsc_code_frac_chars_dupe_8grams_quality_signal float64 | qsc_code_frac_chars_dupe_9grams_quality_signal float64 | qsc_code_frac_chars_dupe_10grams_quality_signal float64 | qsc_code_frac_chars_replacement_symbols_quality_signal float64 | qsc_code_frac_chars_digital_quality_signal float64 | qsc_code_frac_chars_whitespace_quality_signal float64 | qsc_code_size_file_byte_quality_signal float64 | qsc_code_num_lines_quality_signal float64 | qsc_code_num_chars_line_max_quality_signal float64 | qsc_code_num_chars_line_mean_quality_signal float64 | qsc_code_frac_chars_alphabet_quality_signal float64 | qsc_code_frac_chars_comments_quality_signal float64 | qsc_code_cate_xml_start_quality_signal float64 | qsc_code_frac_lines_dupe_lines_quality_signal float64 | qsc_code_cate_autogen_quality_signal float64 | qsc_code_frac_lines_long_string_quality_signal float64 | qsc_code_frac_chars_string_length_quality_signal float64 | qsc_code_frac_chars_long_word_length_quality_signal float64 | qsc_code_frac_lines_string_concat_quality_signal float64 | qsc_code_cate_encoded_data_quality_signal float64 | qsc_code_frac_chars_hex_words_quality_signal float64 | qsc_code_frac_lines_prompt_comments_quality_signal float64 | qsc_code_frac_lines_assert_quality_signal float64 | qsc_codepython_cate_ast_quality_signal float64 | qsc_codepython_frac_lines_func_ratio_quality_signal float64 | qsc_codepython_cate_var_zero_quality_signal bool | qsc_codepython_frac_lines_pass_quality_signal float64 | qsc_codepython_frac_lines_import_quality_signal float64 | qsc_codepython_frac_lines_simplefunc_quality_signal float64 | qsc_codepython_score_lines_no_logic_quality_signal float64 | qsc_codepython_frac_lines_print_quality_signal float64 | qsc_code_num_words int64 | qsc_code_num_chars int64 | qsc_code_mean_word_length int64 | qsc_code_frac_words_unique null | qsc_code_frac_chars_top_2grams int64 | qsc_code_frac_chars_top_3grams int64 | qsc_code_frac_chars_top_4grams int64 | qsc_code_frac_chars_dupe_5grams int64 | qsc_code_frac_chars_dupe_6grams int64 | qsc_code_frac_chars_dupe_7grams int64 | qsc_code_frac_chars_dupe_8grams int64 | qsc_code_frac_chars_dupe_9grams int64 | qsc_code_frac_chars_dupe_10grams int64 | qsc_code_frac_chars_replacement_symbols int64 | qsc_code_frac_chars_digital int64 | qsc_code_frac_chars_whitespace int64 | qsc_code_size_file_byte int64 | qsc_code_num_lines int64 | qsc_code_num_chars_line_max int64 | qsc_code_num_chars_line_mean int64 | qsc_code_frac_chars_alphabet int64 | qsc_code_frac_chars_comments int64 | qsc_code_cate_xml_start int64 | qsc_code_frac_lines_dupe_lines int64 | qsc_code_cate_autogen int64 | qsc_code_frac_lines_long_string int64 | qsc_code_frac_chars_string_length int64 | qsc_code_frac_chars_long_word_length int64 | qsc_code_frac_lines_string_concat null | qsc_code_cate_encoded_data int64 | qsc_code_frac_chars_hex_words int64 | qsc_code_frac_lines_prompt_comments int64 | qsc_code_frac_lines_assert int64 | qsc_codepython_cate_ast int64 | qsc_codepython_frac_lines_func_ratio int64 | qsc_codepython_cate_var_zero int64 | qsc_codepython_frac_lines_pass int64 | qsc_codepython_frac_lines_import int64 | qsc_codepython_frac_lines_simplefunc int64 | qsc_codepython_score_lines_no_logic int64 | qsc_codepython_frac_lines_print int64 | effective string | hits int64 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
79a565e7d7928d619d4922162412c3aac164285d | 3,016 | py | Python | nonebot_plugin_bam/database/helper.py | 7sDream/nonebot_plugin_bam | 9d19856661a75484440efff8d77094390230f4c9 | [
"MIT"
] | 4 | 2021-02-08T16:18:12.000Z | 2021-12-28T07:13:51.000Z | nonebot_plugin_bam/database/helper.py | 7sDream/nonebot_plugin_bam | 9d19856661a75484440efff8d77094390230f4c9 | [
"MIT"
] | null | null | null | nonebot_plugin_bam/database/helper.py | 7sDream/nonebot_plugin_bam | 9d19856661a75484440efff8d77094390230f4c9 | [
"MIT"
] | null | null | null | from collections import defaultdict
from typing import Dict
from nonebot.log import logger
from peewee import JOIN
from .db import DB
from .tables import BilibiliUser, BilibiliUserStatus, FollowLink, Group
def log_sql(s):
# logger.debug(f"[DB:SQL] {s.sql()}")
return s
def get_all_groups():
yield from log_sql(Group.select())
def get_group(gid: int) -> Group:
for group in log_sql(Group.select().where(Group.gid == gid)):
return group
return None
def add_group(gid: int, group_suid: int):
return log_sql(
Group.insert(gid=gid, super_user=group_suid).on_conflict_replace()
).execute()
def remove_group(group: Group):
group.delete_instance(recursive=True, delete_nullable=True)
def get_users_with_linked_groups_and_status() -> Dict[int, BilibiliUser]:
users = {}
for user in log_sql(
BilibiliUser.select(BilibiliUser, FollowLink, BilibiliUserStatus)
.join(FollowLink, JOIN.LEFT_OUTER)
.switch(BilibiliUser)
.join(BilibiliUserStatus, JOIN.LEFT_OUTER, attr="status")
):
users[user.uid] = user
return users
def clean_users_live_status():
log_sql(BilibiliUserStatus.update(live_status=False)).execute(None)
def clean_user_live_status_in(users):
if len(users) > 0:
log_sql(
BilibiliUserStatus.update(live_status=False).where(
BilibiliUserStatus.bilibili_user.in_(users)
)
).execute()
def set_user_live_status_in(users):
if len(users) > 0:
log_sql(
BilibiliUserStatus.update(live_status=True).where(
BilibiliUserStatus.bilibili_user.in_(users)
)
).execute()
def get_group_with_following_users(gid):
for group in log_sql(
Group.select()
.where(Group.gid == gid)
.join(FollowLink, JOIN.LEFT_OUTER)
.join(BilibiliUser, JOIN.LEFT_OUTER)
):
return group
return None
def get_user(uid):
for user in log_sql(BilibiliUser.select().where(BilibiliUser.uid == uid)):
return user
return None
def add_user(uid, nickname, rid):
user, created = BilibiliUser.get_or_create(
uid=uid, defaults={"nickname": nickname, "rid": rid}
)
if created:
BilibiliUserStatus.create(
bilibili_user=user, newest_activity_id=0, live_status=False
)
else:
user.nickname = nickname
user.rid = rid
user.save()
return user
def add_link(group, user):
FollowLink.create(group=group, bilibili_user=user)
def remove_link(gid, uid):
log_sql(
FollowLink.delete().where(
(FollowLink.group == gid) & (FollowLink.bilibili_user == uid)
)
).execute()
def update_user_newest_activity_id(data: dict[int, int]):
with DB.atomic():
for user, act_id in data.items():
BilibiliUserStatus.update(newest_activity_id=act_id).where(
BilibiliUserStatus.bilibili_user == user
).execute()
| 24.92562 | 78 | 0.655172 | 372 | 3,016 | 5.107527 | 0.228495 | 0.034737 | 0.023158 | 0.026842 | 0.290526 | 0.236842 | 0.236842 | 0.175789 | 0.121053 | 0.121053 | 0 | 0.001306 | 0.238395 | 3,016 | 120 | 79 | 25.133333 | 0.82586 | 0.011605 | 0 | 0.267442 | 0 | 0 | 0.005707 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.174419 | false | 0 | 0.069767 | 0.023256 | 0.360465 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
79a69b808745f05349b2ede483ce4782883293d0 | 1,536 | py | Python | monitor.py | hletrd/Facebook-Autopoker | 18735eebd4a34992a43a0987d390bbcfc0050d96 | [
"MIT"
] | 5 | 2015-07-14T17:11:24.000Z | 2016-07-28T11:52:03.000Z | monitor.py | hletrd/Facebook-Autopoker | 18735eebd4a34992a43a0987d390bbcfc0050d96 | [
"MIT"
] | null | null | null | monitor.py | hletrd/Facebook-Autopoker | 18735eebd4a34992a43a0987d390bbcfc0050d96 | [
"MIT"
] | null | null | null | db = 'log.db'
import sqlite3
import time
dbc = sqlite3.connect(db, check_same_thread=False)
dbc.text_factory = str
c = dbc.cursor()
def key(obj):
return obj[2]
while True:
c.execute('SELECT userid, name, COUNT(`date`) FROM log WHERE `date` > \'' + time.strftime('%Y-%m-%d 00:00:00') + '\' AND result=1 GROUP BY userid;')
result = c.fetchall()
result.sort(key=key, reverse=True)
total = 0
for i in result:
total += i[2]
print('Poked ' + str(i[1]) + '(' + str(i[0]) + ') ' + str(i[2]) + ' time' + ('s' if (i[2] > 1) else '') + ' today')
print('Total: ' + str(total) + ' poke' + ('s' if (total > 1) else ''))
c.execute('SELECT COUNT(`date`) FROM log WHERE `date` > datetime(\'' + time.strftime('%Y-%m-%d %H:%M:%S') + '\', \'-24 hours\') AND result=1;')
print(str((c.fetchone()[0] * 100 / 1440) / 100.0) + ' ppm for last 24 hours')
c.execute('SELECT COUNT(`date`) FROM log WHERE `date` > datetime(\'' + time.strftime('%Y-%m-%d %H:%M:%S') + '\', \'-6 hours\') AND result=1;')
print(str((c.fetchone()[0] * 100 / 360) / 100.0) + ' ppm for last 6 hours')
c.execute('SELECT COUNT(`date`) FROM log WHERE `date` > datetime(\'' + time.strftime('%Y-%m-%d %H:%M:%S') + '\', \'-1 hours\') AND result=1;')
print(str((c.fetchone()[0] * 100 / 60) / 100.0) + ' ppm for last 1 hour')
c.execute('SELECT COUNT(`date`) FROM log WHERE `date` > datetime(\'' + time.strftime('%Y-%m-%d %H:%M:%S') + '\', \'-5 minutes\') AND result=1;')
print(str((c.fetchone()[0] * 100 / 5) / 100.0) + ' ppm for last 5 minutes')
print('')
time.sleep(5) | 45.176471 | 149 | 0.575521 | 254 | 1,536 | 3.468504 | 0.283465 | 0.022701 | 0.079455 | 0.090806 | 0.573212 | 0.492622 | 0.464245 | 0.464245 | 0.464245 | 0.429058 | 0 | 0.058457 | 0.164714 | 1,536 | 34 | 150 | 45.176471 | 0.628215 | 0 | 0 | 0 | 0 | 0 | 0.398829 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.037037 | false | 0 | 0.074074 | 0.037037 | 0.148148 | 0.259259 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
79a7ef7c11468985df93521d767d16d3db7e7f54 | 1,949 | py | Python | annoTree/subs/parsSyms.py | jvfNontools/jvfNontools | 60b3c2643f6cabbcad342b5f6b3e5490e89f31f5 | [
"Apache-2.0"
] | null | null | null | annoTree/subs/parsSyms.py | jvfNontools/jvfNontools | 60b3c2643f6cabbcad342b5f6b3e5490e89f31f5 | [
"Apache-2.0"
] | null | null | null | annoTree/subs/parsSyms.py | jvfNontools/jvfNontools | 60b3c2643f6cabbcad342b5f6b3e5490e89f31f5 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/python3
#Copyright 2018 Jim Van Fleet
#Licensed under the Apache License, Version 2.0 (the "License");
#you may not use this file except in compliance with the License.
#You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
#Unless required by applicable law or agreed to in writing, software
#distributed under the License is distributed on an "AS IS" BASIS,
#WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#See the License for the specific language governing permissions and
#limitations under the License.
class SearchFileForSyms:
def doSearch(self, openFile, searchItem):
symb = "sym="
startb = "start-address=0x"
commb = ","
spacb = " "
parab = "("
brackb = "["
allSyms = []
symIndex = 0
with open(openFile) as symFile:
for line in symFile:
if (line.find(searchItem) == -1):
continue
# want exception if search items not found
si = line.index(symb)
ei = line.index(commb, (si+1))
li = line.rfind(parab, (si+1), ei)
if (li == -1):
li = line.rfind(brackb, (si+1), ei)
if li == -1:
sy0 = line[(si+4): (ei)]
else:
sy0 = line[(si+4): li]
else:
sy0 = line[(si+4): li]
line = symFile.readline()
line = symFile.readline()
si = line.index(startb)
ei = line.index(spacb, si)
star10 = line[(si+16): ei]
star1 = star10.lstrip("0")
allSyms.append(sy0)
allSyms.append(star1)
return allSyms
def __init__(self, openFile, searchItem):
self.symData = self.doSearch(openFile, searchItem)
| 34.192982 | 73 | 0.531042 | 226 | 1,949 | 4.561947 | 0.50885 | 0.058196 | 0.026188 | 0.029098 | 0.050436 | 0.050436 | 0 | 0 | 0 | 0 | 0 | 0.026742 | 0.366855 | 1,949 | 56 | 74 | 34.803571 | 0.808752 | 0.307337 | 0 | 0.166667 | 0 | 0 | 0.018685 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.055556 | false | 0 | 0 | 0 | 0.111111 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
79aa3668bb043f0729ae0d753b69ad0de26cb30d | 2,270 | py | Python | models/networkgcn.py | Byomyyt/GnTCN | b4cc9e97fc0b0438deb0a7e118817a7ab73ae93c | [
"MIT"
] | 91 | 2021-04-06T15:33:11.000Z | 2022-03-31T05:16:27.000Z | models/networkgcn.py | ddddwee1/GnTCN | e1abb8c526b2a9904d6f964b0084b54f123b82c9 | [
"MIT"
] | 17 | 2021-01-04T09:08:20.000Z | 2022-03-17T11:45:27.000Z | models/networkgcn.py | ddddwee1/GnTCN | e1abb8c526b2a9904d6f964b0084b54f123b82c9 | [
"MIT"
] | 15 | 2021-01-18T01:54:23.000Z | 2021-09-24T01:29:32.000Z | import numpy as np
import torch
import torch.nn.functional as F
from TorchSUL import Model as M
from torch.nn.parameter import Parameter
import torch.nn.init as init
class PropLayer(M.Model):
def initialize(self, outdim, usebias=True):
self.outdim = outdim
self.act = torch.nn.ReLU()
self.act2 = torch.nn.ReLU()
self.usebias = usebias
def build(self, *inp):
# inp: [Bsize, num_pts, 2]
num_pts = inp[0].shape[1]
indim = inp[0].shape[2]
self.weight = Parameter(torch.Tensor(num_pts, indim, self.outdim))
self.weight2 = Parameter(torch.Tensor(num_pts, self.outdim, self.outdim))
init.kaiming_uniform_(self.weight, a=np.sqrt(5))
init.kaiming_uniform_(self.weight2, a=np.sqrt(5))
if self.usebias:
print('initialize bias')
self.bias = Parameter(torch.Tensor(num_pts, self.outdim))
self.bias2 = Parameter(torch.Tensor(num_pts, self.outdim))
init.uniform_(self.bias, -0.1, 0.1)
init.uniform_(self.bias2, -0.1, 0.1)
def forward(self, inp, aff=None, act=True):
if aff is not None:
# propagate the keypoints
x = torch.einsum('ikl,ijk->ijl', inp, aff)
else:
x = inp
x = torch.einsum('ijk,jkl->ijl', x, self.weight)
if self.usebias:
x = x + self.bias
if act:
x = self.act(x)
# x = F.dropout(x, 0.25, self.training, False)
x = torch.einsum('ijk,jkl->ijl', x, self.weight2)
if self.usebias:
x = x + self.bias2
if act:
x = self.act2(x)
#x = F.dropout(x, 0.25, self.training, False)
if aff is not None:
x = torch.cat([inp, x], dim=-1)
return x
class TransNet(M.Model):
def initialize(self, outdim, num_pts):
self.num_pts = num_pts
self.c1 = PropLayer(outdim)
self.c2 = PropLayer(outdim)
self.c3 = PropLayer(outdim)
self.b2 = PropLayer(outdim)
self.b3 = PropLayer(outdim)
self.c8 = PropLayer(outdim)
self.c9 = PropLayer(3)
def forward(self, x, aff, aff_bone, inc, inc_inv):
x = feat = self.c1(x)
x = self.c2(x, aff)
x = self.c3(x, aff)
feat = torch.einsum('ijk,lj->ilk', feat, inc)
feat = self.b2(feat, aff_bone)
feat = self.b3(feat, aff_bone)
feat = torch.einsum('ijk,lj->ilk', feat, inc_inv)
x = torch.cat([x, feat], dim=-1)
x = self.c8(x)
x = self.c9(x, act=False)
# print(x.shape)
x = x.reshape(-1, self.num_pts, 3)
return x
| 27.02381 | 75 | 0.657269 | 385 | 2,270 | 3.820779 | 0.220779 | 0.067981 | 0.077498 | 0.062542 | 0.299116 | 0.262407 | 0.197145 | 0.172672 | 0.042148 | 0.042148 | 0 | 0.025918 | 0.184141 | 2,270 | 83 | 76 | 27.349398 | 0.768359 | 0.067401 | 0 | 0.136364 | 0 | 0 | 0.034581 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.075758 | false | 0 | 0.090909 | 0 | 0.227273 | 0.015152 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
79ac7f2c83fe009a0c2f95dafc3599ebde6411df | 307 | py | Python | tests/backends/test_init.py | benkrikler/fast-carpenter-github-test | b6f7e1b218d3a1f39fcbe739c8bab19af63aabb8 | [
"Apache-2.0"
] | 12 | 2019-05-17T13:02:20.000Z | 2020-08-31T08:16:47.000Z | tests/backends/test_init.py | FAST-HEP/fast-carpenter | b6f7e1b218d3a1f39fcbe739c8bab19af63aabb8 | [
"Apache-2.0"
] | 104 | 2019-05-17T16:25:35.000Z | 2022-03-28T16:11:10.000Z | tests/backends/test_init.py | benkrikler/fast-carpenter-github-test | b6f7e1b218d3a1f39fcbe739c8bab19af63aabb8 | [
"Apache-2.0"
] | 16 | 2019-05-20T16:57:48.000Z | 2020-09-28T16:36:21.000Z | import pytest
import fast_carpenter.backends as backends
def test_get_backend():
coffea_back = backends.get_backend("coffea:dask")
assert hasattr(coffea_back, "execute")
with pytest.raises(ValueError) as e:
backends.get_backend("doesn't exist")
assert "Unknown backend" in str(e)
| 25.583333 | 53 | 0.732899 | 42 | 307 | 5.190476 | 0.619048 | 0.137615 | 0.146789 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.169381 | 307 | 11 | 54 | 27.909091 | 0.854902 | 0 | 0 | 0 | 0 | 0 | 0.149837 | 0 | 0 | 0 | 0 | 0 | 0.25 | 1 | 0.125 | false | 0 | 0.25 | 0 | 0.375 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
79ae5d267641860a50ba60429c85299cdeeef14d | 1,534 | py | Python | serving_patterns/src/api_composition_proxy/helpers.py | shibuiwilliam/ml-system-in-action | 0aa9d6bc4a4346236b9c971ec90afad04bcf5cca | [
"MIT"
] | 10 | 2020-08-30T03:19:10.000Z | 2021-08-08T17:38:06.000Z | serving_patterns/src/api_composition_proxy/helpers.py | shibuiwilliam/ml-system-in-action | 0aa9d6bc4a4346236b9c971ec90afad04bcf5cca | [
"MIT"
] | null | null | null | serving_patterns/src/api_composition_proxy/helpers.py | shibuiwilliam/ml-system-in-action | 0aa9d6bc4a4346236b9c971ec90afad04bcf5cca | [
"MIT"
] | 6 | 2020-08-30T03:19:13.000Z | 2021-11-26T23:32:42.000Z | from typing import Dict
import logging
logger = logging.getLogger(__name__)
def path_builder(url: str, path: str) -> str:
if path == "" or path is None:
return url
if path.startswith("/"):
path = path[1:]
if url.endswith("/"):
url = f"{url}{path}"
else:
url = f"{url}/{path}"
return url
def url_builder(hostname: str, https: bool = False) -> str:
if not (hostname.startswith("http://") or hostname.startswith("https://")):
hostname = f"https://{hostname}" if https else f"http://{hostname}"
return hostname
def url_path_builder(hostname: str, path: str, https: bool = False) -> str:
hostname = url_builder(hostname, https)
url = path_builder(hostname, path)
return url
def customized_redirect_builder(alias: str, url: str, redirect_path: str, customized_redirect_map: Dict[str, Dict[str, str]] = None) -> str:
"""
customized_redirect_map
{
ALIAS_0:
{
REDIRECT_PATH_0: redirect_path_0,
REDIRECT_PATH_1: redirect_path_1,
},
ALIAS_1:
{
REDIRECT_PATH_0: redirect_path_0,
REDIRECT_PATH_2: redirect_path_2,
}
}
"""
path = path_builder(url, redirect_path)
if customized_redirect_map is None:
return path
if alias in customized_redirect_map.keys():
if redirect_path in customized_redirect_map[alias].keys():
path = path_builder(url, customized_redirect_map[alias][redirect_path])
return path
| 27.890909 | 140 | 0.627119 | 194 | 1,534 | 4.71134 | 0.190722 | 0.157549 | 0.137856 | 0.091904 | 0.128009 | 0.084245 | 0.083151 | 0.083151 | 0 | 0 | 0 | 0.009649 | 0.256845 | 1,534 | 54 | 141 | 28.407407 | 0.792105 | 0.160365 | 0 | 0.172414 | 0 | 0 | 0.061275 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.137931 | false | 0 | 0.068966 | 0 | 0.413793 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
79ae9cb10f166f65649baf95240f5d262fca4fa9 | 1,856 | py | Python | rsa/rsa/common.py | andrew-kulikov/crypto | c81cf7965d58da23ce234435676c8516daf3c649 | [
"MIT"
] | null | null | null | rsa/rsa/common.py | andrew-kulikov/crypto | c81cf7965d58da23ce234435676c8516daf3c649 | [
"MIT"
] | null | null | null | rsa/rsa/common.py | andrew-kulikov/crypto | c81cf7965d58da23ce234435676c8516daf3c649 | [
"MIT"
] | null | null | null | import typing
class NotRelativePrimeError(ValueError):
def __init__(self, a, b, d, msg=''):
super().__init__(msg or "%d and %d are not relatively prime, divider=%i" % (a, b, d))
self.a = a
self.b = b
self.d = d
def bit_size(num: int) -> int:
try:
return num.bit_length()
except AttributeError:
raise TypeError('bit_size(num) only supports integers, not %r' % type(num))
def byte_size(number: int) -> int:
if number == 0: return 1
return ceil_div(bit_size(number), 8)
def ceil_div(num: int, div: int) -> int:
quanta, mod = divmod(num, div)
if mod:
quanta += 1
return quanta
def extended_gcd(a: int, b: int) -> typing.Tuple[int, int, int]:
"""Returns a tuple (r, i, j) such that r = gcd(a, b) = ia + jb
"""
# r = gcd(a,b) i = multiplicitive inverse of a mod b
# or j = multiplicitive inverse of b mod a
# Neg return values for i or j are made positive mod b or a respectively
# Iterateive Version is faster and uses much less stack space
x = 0
y = 1
lx = 1
ly = 0
oa = a # Remember original a/b to remove
ob = b # negative values from return results
while b != 0:
q = a // b
(a, b) = (b, a % b)
(x, lx) = ((lx - (q * x)), x)
(y, ly) = ((ly - (q * y)), y)
if lx < 0:
lx += ob # If neg wrap modulo orignal b
if ly < 0:
ly += oa # If neg wrap modulo orignal a
return a, lx, ly # Return only positive values
def inverse(x: int, n: int) -> int:
"""Returns the inverse of x % n under multiplication, a.k.a x^-1 (mod n)
>>> inverse(7, 4)
3
>>> (inverse(143, 4) * 143) % 4
1
"""
(divider, inv, _) = extended_gcd(x, n)
if divider != 1:
raise NotRelativePrimeError(x, n, divider)
return inv
| 25.424658 | 93 | 0.553341 | 293 | 1,856 | 3.443686 | 0.351536 | 0.015857 | 0.005946 | 0.011893 | 0.043608 | 0 | 0 | 0 | 0 | 0 | 0 | 0.019701 | 0.316272 | 1,856 | 72 | 94 | 25.777778 | 0.775414 | 0.310884 | 0 | 0 | 0 | 0 | 0.072698 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.142857 | false | 0 | 0.02381 | 0 | 0.309524 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
79af2c5e0250f4d13af181fe19d4ed482ecdc804 | 12,217 | py | Python | tests/unit/core/test_datasetprofile.py | bernease/whylogs-python | cfd2a2f71280537aae584cbd40a752fbe7da647b | [
"Apache-2.0"
] | null | null | null | tests/unit/core/test_datasetprofile.py | bernease/whylogs-python | cfd2a2f71280537aae584cbd40a752fbe7da647b | [
"Apache-2.0"
] | null | null | null | tests/unit/core/test_datasetprofile.py | bernease/whylogs-python | cfd2a2f71280537aae584cbd40a752fbe7da647b | [
"Apache-2.0"
] | null | null | null | import datetime
import json
import os
from uuid import uuid4
import pytest
import numpy as np
from pandas import util
from whylogs.core.datasetprofile import DatasetProfile, array_profile, dataframe_profile
from whylogs.core.model_profile import ModelProfile
from whylogs.util import time
from whylogs.util.protobuf import message_to_dict, message_to_json
from whylogs.util.time import to_utc_ms
def test_all_zeros_returns_summary_with_stats():
stats = ("min", "max", "stddev", "mean")
array = np.zeros([100, 1])
prof = array_profile(array)
msg = prof.to_summary()
d = message_to_dict(msg)
d1 = json.loads(message_to_json(msg))
number_summary = d["columns"]["0"]["numberSummary"]
missing_stats = [k for k in stats if k not in number_summary]
if len(missing_stats) > 0:
raise RuntimeError(f"Stats missing from number summary: {missing_stats}")
assert d == d1
def test_empty_valid_datasetprofiles_empty():
now = datetime.datetime.utcnow()
shared_session_id = uuid4().hex
x1 = DatasetProfile(name="test", session_id=shared_session_id, session_timestamp=now, tags={
"key": "value"}, metadata={"key": "value"}, )
x2 = DatasetProfile(name="test", session_id=shared_session_id, session_timestamp=now, tags={
"key": "value"}, metadata={"key": "value"}, )
merged = x1.merge(x2)
assert merged.name == "test"
assert merged.session_id == shared_session_id
assert merged.session_timestamp == now
assert merged.columns == {}
def test_merge_different_columns():
now = datetime.datetime.utcnow()
shared_session_id = uuid4().hex
x1 = DatasetProfile(name="test", session_id=shared_session_id, session_timestamp=now, tags={
"key": "value"}, metadata={"key": "x1"}, )
x1.track("col1", "value")
x2 = DatasetProfile(name="test", session_id=shared_session_id, session_timestamp=now, tags={
"key": "value"}, metadata={"key": "x2"}, )
x2.track("col2", "value")
merged = x1.merge(x2)
assert merged.name == "test"
assert merged.session_id == shared_session_id
assert merged.session_timestamp == now
assert set(list(merged.columns.keys())) == {"col1", "col2"}
assert merged.columns["col1"].counters.count == 1
assert merged.columns["col2"].counters.count == 1
assert merged.tags == dict({"name": "test", "key": "value"})
assert merged.metadata == dict({"key": "x1"})
def test_merge_lhs_no_profile():
now = datetime.datetime.utcnow()
shared_session_id = uuid4().hex
x1 = DatasetProfile(name="test", session_id=shared_session_id, session_timestamp=now, tags={
"key": "value"}, metadata={"key": "value"}, )
x2 = DatasetProfile(name="test", session_id=shared_session_id, session_timestamp=now, tags={
"key": "value"}, metadata={"key": "value"}, model_profile=ModelProfile())
merged = x1.merge(x2)
assert merged.name == "test"
assert merged.session_id == shared_session_id
assert merged.session_timestamp == now
assert merged.columns == {}
assert merged.model_profile is not None
def test_merge_rhs_no_profile():
now = datetime.datetime.utcnow()
shared_session_id = uuid4().hex
x1 = DatasetProfile(name="test", session_id=shared_session_id, session_timestamp=now, tags={
"key": "value"}, metadata={"key": "value"}, model_profile=ModelProfile())
x2 = DatasetProfile(name="test", session_id=shared_session_id, session_timestamp=now, tags={
"key": "value"}, metadata={"key": "value"}, )
merged = x1.merge(x2)
assert merged.name == "test"
assert merged.session_id == shared_session_id
assert merged.session_timestamp == now
assert merged.columns == {}
assert merged.model_profile is not None
def test_merge_same_columns():
now = datetime.datetime.utcnow()
shared_session_id = uuid4().hex
x1 = DatasetProfile(name="test", session_id=shared_session_id, session_timestamp=now, tags={
"key": "value"}, metadata={"key": "value"}, )
x1.track("col1", "value1")
x2 = DatasetProfile(name="test", session_id=shared_session_id, session_timestamp=now, tags={
"key": "value"}, metadata={"key": "value"}, )
x2.track("col1", "value1")
x2.track("col2", "value")
merged = x1.merge(x2)
assert merged.name == "test"
assert merged.session_id == shared_session_id
assert merged.session_timestamp == now
assert set(list(merged.columns.keys())) == {"col1", "col2"}
assert merged.columns["col1"].counters.count == 2
assert merged.columns["col2"].counters.count == 1
def test_protobuf_round_trip():
now = datetime.datetime.utcnow()
tags = {"k1": "rock", "k2": "scissors", "k3": "paper"}
original = DatasetProfile(name="test", dataset_timestamp=now, tags=tags, )
original.track("col1", "value")
original.track("col2", "value")
msg = original.to_protobuf()
roundtrip = DatasetProfile.from_protobuf(msg)
assert roundtrip.name == "test"
assert roundtrip.session_id == original.session_id
assert to_utc_ms(roundtrip.session_timestamp) == to_utc_ms(
original.session_timestamp)
assert set(list(roundtrip.columns.keys())) == {"col1", "col2"}
assert roundtrip.columns["col1"].counters.count == 1
assert roundtrip.columns["col2"].counters.count == 1
tags["name"] = "test"
assert set(roundtrip.tags) == set(tags)
assert roundtrip.metadata == original.metadata
def test_non_string_tag_raises_assert_error():
now = datetime.datetime.utcnow()
tags = {"key": "value"}
x = DatasetProfile("test", now, tags=tags)
x.validate()
# Include a non-string tag
x._tags["number"] = 1
try:
x.validate()
raise RuntimeError("validate should raise an AssertionError")
except AssertionError:
pass
def test_mismatched_tags_raises_assertion_error():
now = datetime.datetime.utcnow()
x1 = DatasetProfile("test", now, tags={"key": "foo"})
x2 = DatasetProfile("test", now, tags={"key": "bar"})
try:
x1.merge_strict(x2)
raise RuntimeError("Assertion error not raised")
except AssertionError:
pass
def test_mismatched_tags_merge_succeeds():
now = datetime.datetime.utcnow()
x1 = DatasetProfile("test", now, tags={"key": "foo"})
x2 = DatasetProfile("test2", now, tags={"key": "bar"})
result = x1.merge(x2)
assert result.tags.get("key") == "foo"
def test_name_always_appear_in_tags():
x1 = DatasetProfile(name="test")
assert x1.tags["name"] == "test"
def test_parse_delimited_from_java_single():
dir_path = os.path.dirname(os.path.realpath(__file__))
with open(os.path.join(dir_path, "output_from_java_08242020.bin"), "rb") as f:
data = f.read()
assert DatasetProfile.parse_delimited_single(data) is not None
with open(os.path.join(dir_path, "output_from_java_01212021.bin"), "rb") as f:
data = f.read()
assert DatasetProfile.parse_delimited_single(data) is not None
def test_parse_from_protobuf():
dir_path = os.path.dirname(os.path.realpath(__file__))
DatasetProfile.read_protobuf(os.path.join(
dir_path, "output_from_java_08242020.bin"))
def test_parse_delimited_from_java_multiple():
dir_path = os.path.dirname(os.path.realpath(__file__))
with open(os.path.join(dir_path, "output_from_java_08242020.bin"), "rb") as f:
data = f.read()
multiple = data + data
result = DatasetProfile.parse_delimited(multiple)
assert len(result) == 2
def test_write_delimited_single():
now = datetime.datetime.utcnow()
original = DatasetProfile(name="test", session_id="test.session.id", session_timestamp=now, tags={
"key": "value"}, metadata={"key": "value"}, )
original.track("col1", "value")
output_bytes = original.serialize_delimited()
pos, roundtrip = DatasetProfile.parse_delimited_single(output_bytes)
assert roundtrip.session_id == original.session_id
# Python time precision includes nanoseconds
assert time.to_utc_ms(roundtrip.session_timestamp) == time.to_utc_ms(
original.session_timestamp)
assert roundtrip.tags == original.tags
assert roundtrip.metadata == original.metadata
def test_write_delimited_multiple():
now = datetime.datetime.utcnow()
original = DatasetProfile(name="test", session_id="test.session.id", session_timestamp=now, tags={
"key": "value"}, metadata={"key": "value"}, )
original.track("col1", "value")
output_bytes = original.serialize_delimited()
multiple_entries = output_bytes
for i in range(1, 5):
multiple_entries += output_bytes
entries = DatasetProfile.parse_delimited(multiple_entries)
assert len(entries) == 5
for entry in entries:
assert entry.session_id == original.session_id
# Python time precisions are different
assert time.to_utc_ms(entry.session_timestamp) == time.to_utc_ms(
original.session_timestamp)
assert entry.tags == original.tags
assert entry.metadata == original.metadata
def test_verify_schema_version():
dp = DatasetProfile(name="test", session_id="test.session.id", session_timestamp=datetime.datetime.now(
), tags={"key": "value"}, metadata={"key": "value"}, )
props = dp.to_properties()
assert props.schema_major_version == 1
assert props.schema_minor_version == 1
def tests_timestamp():
time = datetime.datetime.now()
dp = DatasetProfile(name="test", session_id="test.session.id", session_timestamp=datetime.datetime.now(
), tags={"key": "value"}, metadata={"key": "value"}, )
time_2 = dp.session_timestamp_ms
assert time_2 == int(time.replace(
tzinfo=datetime.timezone.utc).timestamp() * 1000.0)
def test_dataframe_profile():
time = datetime.datetime.now()
df = util.testing.makeDataFrame()
profile = DatasetProfile("test", time)
profile.track_dataframe(df)
profile_factory = dataframe_profile(df, name="test", timestamp=time)
assert profile_factory.columns["A"].number_tracker.variance.mean == profile.columns[
"A"].number_tracker.variance.mean
profile_factory_2 = dataframe_profile(df)
assert profile_factory_2.columns["A"].number_tracker.variance.mean == profile.columns[
"A"].number_tracker.variance.mean
profile_factory_3 = dataframe_profile(df, timestamp=103433)
assert profile_factory_3.columns["A"].number_tracker.variance.mean == profile.columns[
"A"].number_tracker.variance.mean
def test_track():
now = datetime.datetime.utcnow()
original = DatasetProfile(name="test", session_id="test.session.id", session_timestamp=now, tags={
"key": "value"}, metadata={"key": "value"}, )
data = {
"rows": 1,
"names": "roger roger",
}
original.track(columns=data)
def test_errors():
now = datetime.datetime.utcnow()
original = DatasetProfile(name="test", session_id="test.session.id", session_timestamp=now, tags={
"key": "value"}, metadata={"key": "value"}, )
with pytest.raises(TypeError):
original.track(columns=1, data=34)
def test_flat_summary():
now = datetime.datetime.utcnow()
original = DatasetProfile(name="test", session_id="test.session.id", session_timestamp=now, tags={
"key": "value"}, metadata={"key": "value"}, )
flat_summary = original.flat_summary()
assert flat_summary is not None
assert len(original.flat_summary()) == 4
def test_chunk_iterator():
now = datetime.datetime.utcnow()
original = DatasetProfile(name="test", session_id="test.session.id", session_timestamp=now, tags={
"key": "value"}, metadata={"key": "value"}, )
data = {
"rows": 1,
"names": "roger roger",
}
original.track(columns=data)
for each_chuck in original.chunk_iterator():
assert each_chuck is not None
def test_array():
now = datetime.datetime.utcnow()
original = DatasetProfile(name="test", session_id="test.session.id", session_timestamp=now, tags={
"key": "value"}, metadata={"key": "value"}, )
with pytest.raises(ValueError):
original.track_array(np.random.rand(3))
| 35.106322 | 107 | 0.681919 | 1,535 | 12,217 | 5.227362 | 0.134853 | 0.066176 | 0.045364 | 0.068669 | 0.645937 | 0.617896 | 0.600698 | 0.559696 | 0.547233 | 0.542498 | 0 | 0.014706 | 0.17623 | 12,217 | 347 | 108 | 35.207493 | 0.782591 | 0.008513 | 0 | 0.515504 | 0 | 0 | 0.092906 | 0.00958 | 0 | 0 | 0 | 0 | 0.255814 | 1 | 0.093023 | false | 0.007752 | 0.046512 | 0 | 0.139535 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
79af4b15407f4473ba60b0d4c07074b41824263f | 2,645 | py | Python | canvas/cli/api.py | robinsax/canvas | 6e8b9b260fdda868d687b562926a2038736ec56c | [
"Apache-2.0"
] | 4 | 2018-01-24T01:34:39.000Z | 2021-01-14T21:29:47.000Z | canvas/cli/api.py | robinsax/canvas | 6e8b9b260fdda868d687b562926a2038736ec56c | [
"Apache-2.0"
] | 2 | 2018-06-09T22:28:56.000Z | 2018-06-12T01:40:10.000Z | canvas/cli/api.py | robinsax/canvas | 6e8b9b260fdda868d687b562926a2038736ec56c | [
"Apache-2.0"
] | null | null | null | # coding: utf-8
'''
The CLI API definition, available to both the core and plugins.
'''
import sys
from ..exceptions import NotInstalled
from .. import __installed__
# Define the global name to launcher function map.
_launchers = dict()
# Define a single character to launcher function map.
_shortforms = dict()
def launcher(name, **info):
'''
Register a launcher function to be referenced from the CLI as `name`. An
abbreviation will be automatically assigned if one is available. The `info`
keyword arguments can contain one or more of:
* `description` - A textual description of the launch mode.
* `argspec` - A CLI argument specification.
* `init` - A flag indicating a full initialization is required before the
handler is invoked.
'''
def launcher_wrap(func):
ref_name, char = name, name[0]
func.__info__ = info
if char not in _shortforms:
# Assign a short form alias.
ref_name = ''.join(('(', char, ')', name[1:]))
_shortforms[char] = func
info['ref_name'] = ref_name
_launchers[name] = func
return func
return launcher_wrap
def launch_cli(args):
'''Launch the CLI given the commandline arguments `args`.'''
# Define the incorrect usage handler.
def print_usage():
# Define the argument representation generatior.
def write_one(name, launcher):
ref_name = launcher.__info__['ref_name']
string = ' '.join(
(''.join(('--', ref_name)), launcher.__info__.get('argspec', ''))
)
string = ''.join((
string, ' '*(35 - len(string)),
launcher.__info__.get('description', '')
))
return string
# Sort launch options alphabetically.
alpha_order = sorted(_launchers.keys())
print(' '.join((
'Usage:',
'python3 canvas [',
'\n\t' + '\n\t'.join(
write_one(name, _launchers[name]) for name in alpha_order
),
'\n]'
)))
# Exit.
sys.exit(1)
# Define an asserted initializer.
def safe_initialize():
if not __installed__:
raise NotInstalled('Run python3 canvas --init')
from ..core import initialize
initialize()
if args and args[0] == '-!':
# The -i switch causes eager initialization.
safe_initialize()
args = args[1:]
# Nothing supplied, show usage.
if not args:
print_usage()
# Look up the launcher.
launcher = None
if args[0].startswith('--'):
launcher = _launchers.get(args[0][2:])
elif args[0].startswith('-'):
launcher = _shortforms.get(args[0][1:])
if not launcher:
print_usage()
if launcher.__info__.get('init', False):
# This launcher requires initialization.
safe_initialize()
if launcher(args[1:]) is False:
# The launch function reported incorrect usage.
print_usage()
| 25.432692 | 76 | 0.677127 | 345 | 2,645 | 5.023188 | 0.37971 | 0.028275 | 0.025967 | 0.024235 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.007955 | 0.19206 | 2,645 | 103 | 77 | 25.679612 | 0.802995 | 0.377694 | 0 | 0.084746 | 0 | 0 | 0.066998 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.101695 | false | 0 | 0.067797 | 0 | 0.220339 | 0.084746 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
79b1464dc8a1a2223cbbc525bfa7851ed4e2bee9 | 1,408 | py | Python | lego/apps/gallery/migrations/0006_auto_20171210_1610.py | ollfkaih/lego | b15aacaf09efe90e7f984d25b0e7bddbe12647e8 | [
"MIT"
] | 45 | 2017-10-24T12:09:06.000Z | 2021-11-03T21:21:03.000Z | lego/apps/gallery/migrations/0006_auto_20171210_1610.py | ollfkaih/lego | b15aacaf09efe90e7f984d25b0e7bddbe12647e8 | [
"MIT"
] | 980 | 2017-10-24T12:29:07.000Z | 2022-03-31T04:04:31.000Z | lego/apps/gallery/migrations/0006_auto_20171210_1610.py | wahello/lego | a0b02f3abc997fe96326e9c9c05b49847170041b | [
"MIT"
] | 23 | 2018-04-11T16:34:22.000Z | 2021-11-23T12:28:30.000Z | # Generated by Django 2.0 on 2017-12-10 16:10
import django.db.models.deletion
from django.conf import settings
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [("gallery", "0005_auto_20170912_1708")]
operations = [
migrations.AlterField(
model_name="gallery",
name="created_by",
field=models.ForeignKey(
default=None,
editable=False,
null=True,
on_delete=django.db.models.deletion.SET_NULL,
related_name="gallery_created",
to=settings.AUTH_USER_MODEL,
),
),
migrations.AlterField(
model_name="gallery",
name="event",
field=models.ForeignKey(
null=True,
on_delete=django.db.models.deletion.SET_NULL,
related_name="galleries",
to="events.Event",
),
),
migrations.AlterField(
model_name="gallery",
name="updated_by",
field=models.ForeignKey(
default=None,
editable=False,
null=True,
on_delete=django.db.models.deletion.SET_NULL,
related_name="gallery_updated",
to=settings.AUTH_USER_MODEL,
),
),
]
| 29.333333 | 61 | 0.52983 | 132 | 1,408 | 5.477273 | 0.371212 | 0.055325 | 0.077455 | 0.121715 | 0.611342 | 0.547718 | 0.381743 | 0.381743 | 0.381743 | 0.381743 | 0 | 0.034091 | 0.375 | 1,408 | 47 | 62 | 29.957447 | 0.7875 | 0.03054 | 0 | 0.658537 | 1 | 0 | 0.093177 | 0.016875 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.073171 | 0 | 0.146341 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
79b154f9526abf942504a9812110e0bdc124d139 | 1,898 | py | Python | tests/run/test_config_file.py | vincent99/rio | 018dac19be47ee20ae47bcd8eea71c8c4f07a1af | [
"Apache-2.0"
] | 1 | 2019-05-28T11:32:11.000Z | 2019-05-28T11:32:11.000Z | tests/run/test_config_file.py | vincent99/rio | 018dac19be47ee20ae47bcd8eea71c8c4f07a1af | [
"Apache-2.0"
] | null | null | null | tests/run/test_config_file.py | vincent99/rio | 018dac19be47ee20ae47bcd8eea71c8c4f07a1af | [
"Apache-2.0"
] | null | null | null | from os import unlink
from random import randint
import util
import tempfile
def config_setup(stack, *configs):
config_name = "tconfig" + str(randint(1000, 5000))
fp = tempfile.NamedTemporaryFile(delete=False)
for c in configs:
fp.write(bytes(c+"\n", 'utf8'))
fp.close()
util.run(f"rio config create {stack}/{config_name} {fp.name}")
unlink(fp.name)
return config_name
def run_config(stack, config_names):
name = "tsrv" + str(randint(1000, 5000))
fullName = "%s/%s" % (stack, name)
cmd = (f'rio run -n {fullName}')
for c in config_names:
tempdir = ":/temp" + str(randint(100, 999))
cmd += " --config " + c + tempdir
cmd += " nginx"
print(cmd)
util.run(cmd)
util.run(f"rio wait {fullName}")
print(name)
return name
def rio_chk(stack, sname):
fullName = "%s/%s" % (stack, sname)
inspect = util.rioInspect(fullName)
out = []
for item in inspect["configs"]:
out.append(item["source"])
out.sort()
return out
def kube_chk(stack, service_name):
fullName = "%s/%s" % (stack, service_name)
id = util.rioInspect(fullName, "id")
namespace = id.split(":")[0]
obj = util.kubectl(namespace, "deployment", service_name)
out = []
for item in obj['spec']['template']['spec']['volumes']:
if 'configMap' in item:
out.append(str(item['configMap']['name']).split("-")[0])
out.sort()
print(out)
return out
def test_content(stack):
config_name1 = config_setup(stack, "1foo=1bar", "1foo2=1bar2")
config_setup(stack, "2foo=2bar", "2foo1=2bar2")
expect = [config_name1]
expect.sort()
servicename = run_config(stack, expect)
print(stack, servicename)
gotrio = rio_chk(stack, servicename)
assert expect == gotrio
gotk8s = kube_chk(stack, servicename)
assert expect == gotk8s
| 21.325843 | 68 | 0.615385 | 245 | 1,898 | 4.685714 | 0.35102 | 0.027875 | 0.041812 | 0.039199 | 0.054007 | 0 | 0 | 0 | 0 | 0 | 0 | 0.028082 | 0.230769 | 1,898 | 88 | 69 | 21.568182 | 0.758219 | 0 | 0 | 0.105263 | 0 | 0 | 0.134352 | 0.011064 | 0 | 0 | 0 | 0 | 0.035088 | 1 | 0.087719 | false | 0 | 0.070175 | 0 | 0.22807 | 0.070175 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
79b1746a37fe8892a2af50ecacc257f9f91b14cd | 1,090 | py | Python | Semana5/lab05/tienda/models.py | SPFelipe/TECSUP-DAE-2021-2 | ec218d0a7fa66a73e3e0a8889e325cf2ce2a74d3 | [
"MIT"
] | null | null | null | Semana5/lab05/tienda/models.py | SPFelipe/TECSUP-DAE-2021-2 | ec218d0a7fa66a73e3e0a8889e325cf2ce2a74d3 | [
"MIT"
] | null | null | null | Semana5/lab05/tienda/models.py | SPFelipe/TECSUP-DAE-2021-2 | ec218d0a7fa66a73e3e0a8889e325cf2ce2a74d3 | [
"MIT"
] | null | null | null | from django.db import models
# Create your models here.
class Categoria(models.Model):
nombre = models.CharField(max_length=200)
pub_date = models.DateTimeField('Fecha Creación')
def __str__(self):
return self.nombre
class Product(models.Model):
categoria = models.ForeignKey(Categoria, on_delete=models.CASCADE)
nombre = models.CharField(max_length=200)
precio = models.DecimalField(max_digits=6, decimal_places=2)
stock = models.IntegerField(default=0)
pub_date = models.DateTimeField('date published')
def __str__(self):
return self.nombre
class Cliente(models.Model):
nombre = models.CharField(max_length=30)
apellido = models.CharField(max_length=30)
dni = models.CharField(max_length=8)
telefono = models.CharField(max_length=9)
direccion = models.CharField(max_length=50)
email = models.EmailField(max_length=100)
fecha_nacimiento = models.DateField("Fecha de Nacimiento")
pub_date = models.DateTimeField('Fecha Creación')
def __str__(self):
return self.nombre+ " " + self.apellido | 34.0625 | 70 | 0.725688 | 136 | 1,090 | 5.617647 | 0.419118 | 0.094241 | 0.164921 | 0.219895 | 0.408377 | 0.371728 | 0.324607 | 0.170157 | 0.170157 | 0.170157 | 0 | 0.022075 | 0.168807 | 1,090 | 32 | 71 | 34.0625 | 0.821192 | 0.022018 | 0 | 0.36 | 0 | 0 | 0.058216 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.12 | false | 0 | 0.04 | 0.12 | 1 | 0 | 0 | 0 | 0 | null | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 2 |
79b4abdae3efe0c4ff65adf1b7ab722b6fbb2d46 | 71 | py | Python | PyUdemy/Day1/variables.py | JoseArtur/phyton-exercices | f3da4447044e445222233960f991fb2e36311131 | [
"MIT"
] | null | null | null | PyUdemy/Day1/variables.py | JoseArtur/phyton-exercices | f3da4447044e445222233960f991fb2e36311131 | [
"MIT"
] | null | null | null | PyUdemy/Day1/variables.py | JoseArtur/phyton-exercices | f3da4447044e445222233960f991fb2e36311131 | [
"MIT"
] | null | null | null | a=input("a= ")
b=input("b= ")
aa=a
a=b
b=aa
print("a=",a)
print("b=",b) | 10.142857 | 14 | 0.507042 | 18 | 71 | 2 | 0.277778 | 0.111111 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.112676 | 71 | 7 | 15 | 10.142857 | 0.571429 | 0 | 0 | 0 | 0 | 0 | 0.138889 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0 | 0 | 0 | 0.285714 | 1 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 2 |
79b4dd93658058c4c08b578922c3ee4d84c4d4e5 | 5,548 | py | Python | vivareal.py | erlancassiano/portal_crawler | bcbda7871d74080b926b0f59c05d813385286173 | [
"MIT"
] | null | null | null | vivareal.py | erlancassiano/portal_crawler | bcbda7871d74080b926b0f59c05d813385286173 | [
"MIT"
] | null | null | null | vivareal.py | erlancassiano/portal_crawler | bcbda7871d74080b926b0f59c05d813385286173 | [
"MIT"
] | null | null | null | import os
import datetime
import csv
import time
import random
from time import sleep
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.common.by import By
from selenium.common.exceptions import NoSuchElementException
from selenium.common.exceptions import StaleElementReferenceException
import undetected_chromedriver as uc
class Vivareal:
timestamp = str(datetime.datetime.now()).replace(".","").replace("-","").replace(":","")
filename = "results_{}".format(timestamp)+".csv"
chromeOptions = uc.ChromeOptions()
#chromeOptions.add_argument('--headless')
driver = uc.Chrome(options=chromeOptions)
def __init__(self):
self.csvCreater()
url = "https://www.vivareal.com.br/aluguel"
self.driver.get(url)
while True:
# driver.implicitly_wait(10)
WebDriverWait(self.driver, 60).until(EC.presence_of_element_located((By.CSS_SELECTOR, 'span.property-card__title.js-cardLink.js-card-title')))
self.ScrollPage()
result_div = self.driver.find_element_by_css_selector(".results-list.js-results-list")
result_cards_list = result_div.find_elements_by_css_selector("article.property-card__container.js-property-card")
for item in result_cards_list:
try:
title = item.find_element_by_css_selector("span.property-card__title.js-cardLink.js-card-title").text
except NoSuchElementException:
title = "-"
try:
address = item.find_element_by_css_selector("span.property-card__address").text
except NoSuchElementException:
address = "-"
try:
price = item.find_element_by_css_selector(".property-card__price.js-property-card-prices.js-property-card__price-small").text
except:
price = "-"
try:
price_details = item.find_element_by_css_selector(".property-card__price-details--condo")
price_details = str(price_details.text).replace("Condomínio:","").strip()
except NoSuchElementException:
price_details = "-"
try:
area = item.find_element_by_css_selector("li.property-card__detail-item.property-card__detail-area")
area = str(area.text).replace(" ","").strip()
except NoSuchElementException:
area = "-"
try:
rooms = item.find_element_by_css_selector("li.property-card__detail-item.property-card__detail-room.js-property-detail-rooms")
rooms = str(rooms.text).replace(" Quarto","").replace("s","").strip()
except NoSuchElementException:
rooms = "-"
try:
garages = item.find_element_by_css_selector("li.property-card__detail-item.property-card__detail-garage.js-property-detail-garages")
garages = str(garages.text).replace("Vaga","").replace("s","").strip()
except NoSuchElementException:
garages = "-"
try:
bathrooms = item.find_element_by_css_selector("li.property-card__detail-item.property-card__detail-bathroom.js-property-detail-bathroom")
bathrooms = str(bathrooms.text).replace(" Banheiro","").replace("s","").strip()
except NoSuchElementException:
bathrooms = "-"
self.csvupdate(title,address,price,price_details,area,rooms,bathrooms,garages)
print(title,"\n",address,"\n",price,"\n",price_details,"\n",area,"\n",rooms,"\n",bathrooms,"\n",garages,"\n\n")
self.driver.find_element_by_xpath("//a[@class='js-change-page' and contains(text(), 'Próxima página')]").click()
time.sleep(5)
def csvCreater(self):
with open(self.filename,'w' ,newline='') as file:
fieldNames = ['Title','Address','Rent','Admin Fee','Area','Rooms','Bathrooms','Parking']
thewriter = csv.DictWriter(file, fieldnames=fieldNames)
thewriter.writeheader()
def csvupdate(self,title,address,price,price_details,area,rooms,bathrooms,garages):
with open(self.filename,'a' ,newline='') as file:
fieldNames = ['Title','Address','Rent','Admin Fee','Area','Rooms','Bathrooms','Parking']
thewriter = csv.DictWriter(file, fieldnames=fieldNames)
thewriter.writerow({'Title': str(title),'Address': str(address),'Rent': price,'Admin Fee': price_details,'Area': area,'Rooms':rooms,'Bathrooms': bathrooms,'Parking': garages})
def ScrollPage(self):
lenOfPage = self.driver.execute_script("window.scrollTo(0, document.body.scrollHeight);var lenOfPage=document.body.scrollHeight;return lenOfPage;")
match=False
while(match==False):
lastCount = lenOfPage
sleep(3)
lenOfPage = self.driver.execute_script("window.scrollTo(0, document.body.scrollHeight);var lenOfPage=document.body.scrollHeight;return lenOfPage;")
if lastCount==lenOfPage:
match=True
bot = Vivareal() | 53.864078 | 188 | 0.605443 | 568 | 5,548 | 5.734155 | 0.265845 | 0.062634 | 0.043905 | 0.044212 | 0.427694 | 0.351551 | 0.351551 | 0.351551 | 0.351551 | 0.272644 | 0 | 0.001975 | 0.270007 | 5,548 | 103 | 189 | 53.864078 | 0.802222 | 0.012076 | 0 | 0.238636 | 0 | 0.056818 | 0.217739 | 0.149312 | 0 | 0 | 0 | 0 | 0 | 1 | 0.045455 | false | 0 | 0.136364 | 0 | 0.238636 | 0.011364 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
79b52191dc7ea7de4c6237513c7af4e22ce1b28f | 3,339 | py | Python | seahub/base/profile.py | gzy403999903/seahub | 992e5852579a6d9e0cfdaf18c77ce0191cb64449 | [
"Apache-2.0"
] | null | null | null | seahub/base/profile.py | gzy403999903/seahub | 992e5852579a6d9e0cfdaf18c77ce0191cb64449 | [
"Apache-2.0"
] | 6 | 2019-12-13T09:55:45.000Z | 2022-03-11T23:47:29.000Z | seahub/base/profile.py | gzy403999903/seahub | 992e5852579a6d9e0cfdaf18c77ce0191cb64449 | [
"Apache-2.0"
] | 1 | 2019-05-16T06:58:16.000Z | 2019-05-16T06:58:16.000Z | # Copyright (c) 2012-2016 Seafile Ltd.
"""
The MIT License (MIT)
Copyright (c) 2013 Omar Bohsali
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
"""
try:
import cProfile as profile
except ImportError:
import profile
import pstats
from cStringIO import StringIO
from django.conf import settings
class ProfilerMiddleware(object):
"""
Simple profile middleware to profile django views. To run it, add ?prof to
the URL like this:
http://localhost:8000/view/?__prof__=true
Optionally pass the following to modify the output:
?sort => Sort the output by a given metric. Default is time.
See http://docs.python.org/2/library/profile.html#pstats.Stats.sort_stats
for all sort options.
quick reference:
- time: sort by function execution time
- cum: the cumulative time spent in this and all subfunctions (from invocation till exit). This figure is accurate even for recursive functions.
?count => The number of rows to display. Default is 100.
?fullpath=<true|false> default false. True to show full path of the source file of each function
?callee=<true|false> default false. True to show the time of a function spent on its sub function.
This is adapted from an example found here:
http://www.slideshare.net/zeeg/django-con-high-performance-django-presentation.
"""
def can(self, request):
return settings.DEBUG and request.GET.get('__prof__', False) == 'true'
def process_view(self, request, callback, callback_args, callback_kwargs):
if self.can(request):
self.profiler = profile.Profile()
args = (request,) + callback_args
return self.profiler.runcall(callback, *args, **callback_kwargs)
def process_response(self, request, response):
if self.can(request):
self.profiler.create_stats()
io = StringIO()
stats = pstats.Stats(self.profiler, stream=io)
if not request.GET.get('fullpath', False):
stats.strip_dirs()
stats.sort_stats(request.GET.get('sort', 'time'))
if request.GET.get('callee', False):
stats.print_callees()
stats.print_stats(int(request.GET.get('count', 100)))
response.content = '<pre>%s</pre>' % io.getvalue()
return response
| 38.825581 | 152 | 0.7059 | 466 | 3,339 | 5.015021 | 0.472103 | 0.037655 | 0.027813 | 0.017972 | 0.050492 | 0.050492 | 0.02653 | 0 | 0 | 0 | 0 | 0.008785 | 0.215933 | 3,339 | 85 | 153 | 39.282353 | 0.883881 | 0.620545 | 0 | 0.071429 | 0 | 0 | 0.043771 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.107143 | false | 0 | 0.214286 | 0.035714 | 0.464286 | 0.071429 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
79b68a6d1a405ace81c0d0f659613828d57db047 | 3,203 | py | Python | data/make_stterror_data/tts.py | gcunhase/StackedDeBERT | 82777114fd99cafc6e2a3d760e774f007c563245 | [
"MIT"
] | 32 | 2020-01-03T09:53:03.000Z | 2021-09-07T07:23:26.000Z | data/make_stterror_data/tts.py | gcunhase/StackedDeBERT | 82777114fd99cafc6e2a3d760e774f007c563245 | [
"MIT"
] | null | null | null | data/make_stterror_data/tts.py | gcunhase/StackedDeBERT | 82777114fd99cafc6e2a3d760e774f007c563245 | [
"MIT"
] | 6 | 2020-01-21T06:50:21.000Z | 2021-01-22T08:04:00.000Z | import data.make_stterror_data.utils as utils
import os
import sys
import subprocess
# TTS imports
from gtts import gTTS
import pyttsx3
# sys.path.append("~/PycharmProjects/pyfestival") # https://github.com/techiaith/pyfestival/pull/4
# import festival
__author__ = 'Gwena Cunha'
"""
Text-To-Speech Module
"""
class TTS:
def __init__(self, data_dir="", result_dir="", audio_type=".wav"):
print("Initializing TTS Module")
self.audio_type = audio_type
self.project_dir = utils.project_dir_name()
self.data_dir = data_dir
self.result_dir = result_dir
utils.ensure_dir(self.project_dir + self.result_dir)
def set_data_dir(self, data_dir):
self.data_dir = data_dir
def set_result_dir(self, result_dir):
self.result_dir = result_dir
def read_text(self, text, sentence_id, tts_type="gtts"):
if "macsay" in tts_type: # Mac
self.audio_from_mac_say(text, sentence_id)
else: # google gtts
self.audio_from_google(text, sentence_id)
def audio_from_google(self, text, sentence_id):
gtts = gTTS(text=text, lang='en') # , slow=False)
partial_saved_audio_filename = self.project_dir + self.result_dir + "gtts_" + sentence_id
tmp_saved_audio_filename = partial_saved_audio_filename + "_tmp" + self.audio_type
final_saved_audio_filename = partial_saved_audio_filename + self.audio_type
gtts.save(tmp_saved_audio_filename)
# fix_missing_riff_header = "ffmpeg - i "+tmp_saved_audio_filename+" -y "+final_saved_audio_filename
# Making ffmpeg quieter (less verbose): ffmpeg -nostats -loglevel 0 -i 2.mp3 ~/PycharmProjects/STTError/assets/2.mp3
subprocess.call(
["ffmpeg", "-nostats", "-loglevel", "0", "-i", tmp_saved_audio_filename, "-y", final_saved_audio_filename])
# os.system("ffmpeg -nostats -loglevel 0 -i {} -y {}".format(tmp_saved_audio_filename, final_saved_audio_filename))
# Remove tmp_saved_audio_filename
subprocess.call(["rm", tmp_saved_audio_filename])
return final_saved_audio_filename
def audio_from_mac_say(self, text, sentence_id):
""" Mac's say command: Mac Systems
"""
for voice in ['Fred']: # ['Alex', 'Fred', 'Victoria']
partial_saved_audio_filename = self.project_dir + self.result_dir + "macsay_" + sentence_id
tmp_saved_audio_filename = partial_saved_audio_filename + "_tmp.aiff"
final_saved_audio_filename = partial_saved_audio_filename + self.audio_type
subprocess.call(["say", "-o", tmp_saved_audio_filename, "-v", voice, text])
subprocess.call(["ffmpeg", "-nostats", "-loglevel", "0", "-i", tmp_saved_audio_filename, "-y",
final_saved_audio_filename])
# os.system("say -o {} -v {} {}".format(tmp_saved_audio_filename, voice, text))
# os.system("ffmpeg -nostats -loglevel 0 -i {} -y {}".format(tmp_saved_audio_filename, final_saved_audio_filename))
# Remove tmp_saved_audio_filename
subprocess.call(["rm", tmp_saved_audio_filename])
return final_saved_audio_filename
| 42.706667 | 127 | 0.675617 | 417 | 3,203 | 4.815348 | 0.242206 | 0.144422 | 0.25996 | 0.146414 | 0.552291 | 0.49253 | 0.454183 | 0.454183 | 0.454183 | 0.454183 | 0 | 0.004356 | 0.211677 | 3,203 | 74 | 128 | 43.283784 | 0.790891 | 0.252264 | 0 | 0.227273 | 0 | 0 | 0.062367 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.136364 | false | 0 | 0.136364 | 0 | 0.340909 | 0.022727 | 0 | 0 | 0 | null | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
79b7a93216b116c4fe2b33e6f3183397b498a763 | 1,047 | py | Python | year2019/day21/code.py | romainvigneres/advent_of_code | 2ae38617706cb1041ab3950cdec3713176dc3633 | [
"MIT"
] | null | null | null | year2019/day21/code.py | romainvigneres/advent_of_code | 2ae38617706cb1041ab3950cdec3713176dc3633 | [
"MIT"
] | null | null | null | year2019/day21/code.py | romainvigneres/advent_of_code | 2ae38617706cb1041ab3950cdec3713176dc3633 | [
"MIT"
] | null | null | null | from year2019.intcode_v2 import Intcode
from common import input_integer_sep
def part_one(inp_list):
program1 = (
"NOT A J\n"
"NOT B T\n"
"OR T J\n"
"NOT C T\n"
"OR T J\n"
"AND D J\n"
"WALK\n"
)
p = Intcode(
inp_list,
[ord(char) for char in program1]
)
while not p.done:
out = p.run_until_output()
if out > 999:
return out
def part_two(inp_list):
program2 = (
"NOT C J\n"
"NOT B T\n"
"OR T J\n"
"NOT A T\n"
"OR T J\n"
"AND D J\n"
"NOT E T\n"
"NOT T T\n"
"OR H T\n"
"AND T J\n"
"RUN\n"
)
p = Intcode(
inp_list,
[ord(char) for char in program2]
)
while not p.done:
out = p.run_until_output()
if out > 257:
return out
def get_result():
inp = input_integer_sep("2019", "21")
print("Part one", part_one(inp.copy()))
print("Part two", part_two(inp.copy()))
| 20.529412 | 43 | 0.472779 | 163 | 1,047 | 2.92638 | 0.312883 | 0.037736 | 0.052411 | 0.041929 | 0.406709 | 0.406709 | 0.406709 | 0.406709 | 0.406709 | 0.406709 | 0 | 0.0336 | 0.403056 | 1,047 | 50 | 44 | 20.94 | 0.7296 | 0 | 0 | 0.391304 | 0 | 0 | 0.164279 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.065217 | false | 0 | 0.043478 | 0 | 0.152174 | 0.043478 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
79b7cbd53300df46238acfe16835276ec2f45c5e | 2,252 | py | Python | july/management/commands/fix_locations.py | kanika-art/julython.org | 557b29e5d69a772b684fb6073a616f06b97d0a48 | [
"MIT"
] | 7 | 2015-07-01T18:01:40.000Z | 2019-12-27T02:04:07.000Z | july/management/commands/fix_locations.py | kanika-art/julython.org | 557b29e5d69a772b684fb6073a616f06b97d0a48 | [
"MIT"
] | 6 | 2015-07-01T11:32:34.000Z | 2021-06-10T20:35:32.000Z | july/management/commands/fix_locations.py | kanika-art/julython.org | 557b29e5d69a772b684fb6073a616f06b97d0a48 | [
"MIT"
] | 10 | 2015-07-01T11:20:35.000Z | 2020-10-02T18:58:07.000Z |
import logging
from django.core.management.base import BaseCommand
from django.template.defaultfilters import slugify
from july.models import User
from july.people.models import Location
from july.utils import check_location
from optparse import make_option
class Command(BaseCommand):
help = 'fix locations'
option_list = BaseCommand.option_list + (
make_option(
'--commit',
action='store_true',
dest='commit',
default=False,
help='Actually move the items.'),
)
def handle(self, *args, **options):
commit = options['commit']
empty = 0
fine = 0
fixable = 0
bad = []
for location in Location.objects.all():
user_count = User.objects.filter(location=location).count()
if not user_count:
logging.info("Empty location: %s", location)
if commit:
location.delete()
logging.info('Deleted')
empty += 1
continue
l = check_location(location.name)
if l == location.name:
logging.info('Location fine: %s', location)
fine += 1
continue
if not commit:
if l:
fixable += 1
else:
bad.append((location, user_count))
continue
elif l is not None:
new_loc = Location.create(l)
User.objects.filter(location=location).update(location=new_loc)
user_count = User.objects.filter(location=location).count()
if not user_count:
logging.error("missed users!")
else:
location.delete()
elif l is None:
logging.info('Bad location: %s', location)
location.approved = False
location.save()
if not commit:
[logging.error('Bad Loc: %s, count: %s', l, c) for l, c in bad]
logging.info('Empty: %s, Fine: %s, fixable: %s',
empty, fine, fixable)
logging.info('Add --commit to fix locations')
| 32.637681 | 79 | 0.517318 | 232 | 2,252 | 4.961207 | 0.349138 | 0.057341 | 0.044309 | 0.065161 | 0.146829 | 0.118158 | 0.118158 | 0.118158 | 0.118158 | 0.118158 | 0 | 0.00436 | 0.388988 | 2,252 | 68 | 80 | 33.117647 | 0.832122 | 0 | 0 | 0.216667 | 0 | 0 | 0.098179 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.016667 | false | 0 | 0.116667 | 0 | 0.183333 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
79b9401f776d3b7758cf615a45ec455370cc2331 | 42 | py | Python | tests/identifiers/__init__.py | jparsai/cvejob | 8f9462a1ecdf1d4de877ac5f44e772239ffcb379 | [
"Apache-2.0"
] | 8 | 2019-09-25T14:45:28.000Z | 2021-11-08T10:30:03.000Z | tests/identifiers/__init__.py | jparsai/cvejob | 8f9462a1ecdf1d4de877ac5f44e772239ffcb379 | [
"Apache-2.0"
] | 113 | 2018-07-10T12:58:16.000Z | 2020-12-09T22:33:15.000Z | tests/identifiers/__init__.py | jparsai/cvejob | 8f9462a1ecdf1d4de877ac5f44e772239ffcb379 | [
"Apache-2.0"
] | 12 | 2018-07-10T11:00:02.000Z | 2021-01-27T12:19:56.000Z | """Tests for package name identifiers."""
| 21 | 41 | 0.714286 | 5 | 42 | 6 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.119048 | 42 | 1 | 42 | 42 | 0.810811 | 0.833333 | 0 | null | 0 | null | 0 | 0 | null | 0 | 0 | 0 | null | 1 | null | true | 0 | 0 | null | null | null | 1 | 1 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 5 |
79b9db085b3980a703000d7ded2a0b497ec1fcdd | 6,338 | py | Python | core/feature/gps_location_daywise/gps_location_daywise.py | MD2Korg/CerebralCortex-DataAnalysis | 73f5ea2430bc7c23de422dccb7b65ef9f8917595 | [
"BSD-2-Clause"
] | 1 | 2018-04-24T18:11:24.000Z | 2018-04-24T18:11:24.000Z | core/feature/gps_location_daywise/gps_location_daywise.py | Boris69bg/CerebralCortex-DataAnalysis | 49565bdff348d69153bd5d3a37e73f1645f82b32 | [
"BSD-2-Clause"
] | 10 | 2018-03-13T19:04:09.000Z | 2018-05-12T01:40:03.000Z | core/feature/gps_location_daywise/gps_location_daywise.py | Boris69bg/CerebralCortex-DataAnalysis | 49565bdff348d69153bd5d3a37e73f1645f82b32 | [
"BSD-2-Clause"
] | 42 | 2017-12-07T17:08:14.000Z | 2019-06-02T08:25:12.000Z | # Copyright (c) 2018, MD2K Center of Excellence
# - Alina Zaman <azaman@memphis.edu>
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from cerebralcortex.core.data_manager.raw.stream_handler import DataSet
from cerebralcortex.cerebralcortex import CerebralCortex
from cerebralcortex.core.datatypes.datastream import DataStream
from cerebralcortex.core.datatypes.datastream import DataPoint
from datetime import datetime, timedelta, time
from core.computefeature import ComputeFeatureBase
from typing import List
import pprint as pp
import numpy as np
import pdb
import pickle
import uuid
import json
import traceback
feature_class_name = 'GpsLocationDaywise'
GPS_EPISODES_AND_SEMANTIC_lOCATION_STREAM = "org.md2k.data_analysis.gps_episodes_and_semantic_location_from_model"
class GpsLocationDaywise(ComputeFeatureBase):
"""
Produce feature from gps location from
"org.md2k.data_analysis.gps_episodes_and_semantic_location" data stream. One data
point is split into two when it starts from one day and ends in other day. In that way,
we are getting semantic location of daily data
"""
def listing_all_gps_location_daywise(self, user_id: str, all_days: List[str]):
"""
Produce and save the gps location of participant's in day basis
:param str user_id: UUID of the stream owner
:param List(str) all_days: All days of the user in the format 'YYYYMMDD'
"""
self.CC.logging.log('%s started processing for user_id %s' %
(self.__class__.__name__, str(user_id)))
gps_data = []
stream_ids = self.get_latest_stream_id(user_id,
GPS_EPISODES_AND_SEMANTIC_lOCATION_STREAM)
for stream_id in stream_ids:
for day in all_days:
location_data_stream = \
self.CC.get_stream(stream_id["identifier"], user_id, day, localtime=False)
for data in set(location_data_stream.data):
if data.start_time.date() != data.end_time.date():
temp = DataPoint(data.start_time, data.end_time, data.offset, data.sample)
start_day = data.start_time.date()
end_time = datetime.combine(start_day, time.max)
end_time = end_time.replace(tzinfo=data.start_time.tzinfo)
temp.end_time = end_time
gps_data.append(temp)
end_day = data.end_time.date()
start_day += timedelta(days=1)
while start_day != end_day:
temp = DataPoint(data.start_time, data.end_time, data.offset, data.sample)
start_time = datetime.combine(start_day, time.min)
start_time = start_time.replace(tzinfo=data.start_time.tzinfo)
temp.start_time = start_time
end_time = datetime.combine(start_day, time.max)
end_time = end_time.replace(tzinfo=data.start_time.tzinfo)
temp.end_time = end_time
gps_data.append(temp)
start_day += timedelta(days=1)
temp = DataPoint(data.start_time, data.end_time, data.offset, data.sample)
start_time = datetime.combine(start_day, time.min)
start_time = start_time.replace(tzinfo=data.start_time.tzinfo)
temp.start_time = start_time
gps_data.append(temp)
else:
gps_data.append(data)
try:
if len(gps_data):
streams = self.CC.get_user_streams(user_id)
for stream_name, stream_metadata in streams.items():
if stream_name == GPS_EPISODES_AND_SEMANTIC_lOCATION_STREAM:
self.store_stream(filepath="gps_location_daywise.json",
input_streams=[stream_metadata],
user_id=user_id,
data=gps_data)
break
except Exception as e:
self.CC.logging.log("Exception:", str(e))
self.CC.logging.log(traceback.format_exc())
self.CC.logging.log('%s finished processing for user_id %s saved %d '
'data points' %
(self.__class__.__name__, str(user_id),
len(gps_data)))
def process(self, user_id: str, all_days: List[str]):
"""
Main processing function inherited from ComputerFeatureBase
:param str user_id: UUID of the user
:param List(str) all_days: List of days with format 'YYYYMMDD'
"""
if self.CC is not None:
self.CC.logging.log("Processing Working Days")
self.listing_all_gps_location_daywise(user_id, all_days)
| 48.015152 | 114 | 0.631745 | 779 | 6,338 | 4.947368 | 0.297818 | 0.044369 | 0.030358 | 0.028542 | 0.395693 | 0.314997 | 0.251168 | 0.239232 | 0.225221 | 0.199792 | 0 | 0.002032 | 0.301041 | 6,338 | 131 | 115 | 48.381679 | 0.867946 | 0.304986 | 0 | 0.266667 | 0 | 0 | 0.057998 | 0.021749 | 0 | 0 | 0 | 0 | 0 | 1 | 0.026667 | false | 0 | 0.186667 | 0 | 0.226667 | 0.013333 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
79bc0b508670cb51847c1e5dbe41b345258f4db3 | 263 | py | Python | Python Advanced/Advanced/Multidimensional Lists/Lab/Task04.py | IvanTodorovBG/SoftUni | 7b667f6905d9f695ab1484efbb02b6715f6d569e | [
"MIT"
] | 1 | 2022-03-16T10:23:04.000Z | 2022-03-16T10:23:04.000Z | Python Advanced/Advanced/Multidimensional Lists/Lab/Task04.py | IvanTodorovBG/SoftUni | 7b667f6905d9f695ab1484efbb02b6715f6d569e | [
"MIT"
] | null | null | null | Python Advanced/Advanced/Multidimensional Lists/Lab/Task04.py | IvanTodorovBG/SoftUni | 7b667f6905d9f695ab1484efbb02b6715f6d569e | [
"MIT"
] | null | null | null | rows, columns = [int(x) for x in input().split(", ")]
matrix = [[int(i) for i in input().split()] for _ in range(rows)]
for column in range(columns):
sum_column = 0
for row in range(rows):
sum_column += matrix[row][column]
print(sum_column)
| 26.3 | 65 | 0.619772 | 42 | 263 | 3.785714 | 0.380952 | 0.132075 | 0.150943 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.004854 | 0.21673 | 263 | 9 | 66 | 29.222222 | 0.76699 | 0 | 0 | 0 | 0 | 0 | 0.007605 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0 | 0 | 0 | 0.142857 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
79bc48970d40d17cad24372399c2613c8f57a896 | 2,939 | py | Python | benchmark/invocation.py | zanderhavgaard/thesis-code | d9f193e622b8b98ec88c33006f8e0e1dbb3d17fc | [
"MIT"
] | null | null | null | benchmark/invocation.py | zanderhavgaard/thesis-code | d9f193e622b8b98ec88c33006f8e0e1dbb3d17fc | [
"MIT"
] | 2 | 2020-04-28T07:59:30.000Z | 2020-05-17T15:36:04.000Z | benchmark/invocation.py | zanderhavgaard/thesis-code | d9f193e622b8b98ec88c33006f8e0e1dbb3d17fc | [
"MIT"
] | null | null | null | import sys
import uuid
import psutil
import time
from datetime import datetime
# remove for production
from pprint import pprint
from functools import reduce
import function_lib as lib
class Invocation:
def __init__(self, exp_uuid: str, root: str, data: dict):
self.exp_id = exp_uuid
self.root_identifier = root
# parse data to self
self.parse_data(data)
def get_data(self):
return self.__dict__
def dev_print(self):
pprint(self.get_data())
def parse_data(self, data: dict):
for i in map(lambda x: setattr(self, x, data[x]), list(data)):
pass
# invocation can be either a success or an error, this will be marked
if('error' in data):
self.is_error = True
self.type = lib.str_replace(self.error['type'],[('\'',''),('\"','')])
self.trace = lib.str_replace(self.error['trace'],[('\'',''),('\"','')])
self.message = lib.str_replace( self.error['message'], [('\'',''),('\"','')])
delattr(self, 'error')
else:
self.is_error = False
self.execution_total = self.execution_end - self.execution_start
self.invocation_total = self.invocation_end - self.invocation_start
def create_monolith_query(self, invo_dict:dict):
keys = 'exp_id,invo_id,seed,function_argument,function_called,monolith_result'
values = """'{0}','{1}',{2},{3},'{4}','{5}'""".format(
self.exp_id,
invo_dict['identifier'],
invo_dict.pop('seed'),
invo_dict.pop('function_argument'),
invo_dict.pop('function_called'),
invo_dict.pop('monolith_result'))
if 'process_time_matrix' in invo_dict:
keys += ',process_time_matrix,running_time_matrix'
values += """,{0},{1}""".format(invo_dict.pop('process_time_matrix'),invo_dict.pop('running_time_matrix'))
return [f'INSERT INTO Monolith ({keys}) VALUES ({values});']
def get_query_string(self):
key_values = self.__dict__.copy()
monolith = [] if 'monolith_result' not in key_values else self.create_monolith_query(key_values)
is_error = key_values.pop('is_error')
list(map(lambda x: x if x[1] != None else key_values.pop(x[0]), key_values.copy().items()))
(keys,vals) = reduce(lambda x,y: ( f'{x[0]}{y[0]},', f'{x[1]}{y[1]},') if not isinstance(y[1],str)
else ( f'{x[0]}{y[0]},', f"""{x[1]}'{y[1]}',""") ,[('','')] + list(key_values.items()))
return ['INSERT INTO {0} ({1}) VALUES ({2});'.format('Error' if is_error else 'Invocation', keys[:-1], vals[:-1])]+monolith
| 40.819444 | 131 | 0.543042 | 361 | 2,939 | 4.204986 | 0.277008 | 0.047431 | 0.043478 | 0.033597 | 0.056653 | 0.013175 | 0.013175 | 0.013175 | 0.013175 | 0.013175 | 0 | 0.011667 | 0.300102 | 2,939 | 71 | 132 | 41.394366 | 0.7263 | 0.036747 | 0 | 0 | 0 | 0 | 0.171621 | 0.04954 | 0 | 0 | 0 | 0 | 0 | 1 | 0.117647 | false | 0.019608 | 0.156863 | 0.019608 | 0.352941 | 0.058824 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
79bc840d240556a6ff5b245bea2de77b90bf2da0 | 470 | py | Python | mysite/news/forms.py | rsg33/testsite | 939e5c25f2e128c30d4a8593337059971587dd3c | [
"MIT"
] | null | null | null | mysite/news/forms.py | rsg33/testsite | 939e5c25f2e128c30d4a8593337059971587dd3c | [
"MIT"
] | null | null | null | mysite/news/forms.py | rsg33/testsite | 939e5c25f2e128c30d4a8593337059971587dd3c | [
"MIT"
] | null | null | null | from django import forms
from .models import News
class NewsForm(forms.ModelForm):
class Meta:
model = News
# fields = '__all__'
fields = ['title', 'content', 'is_published', 'category']
widgets = {
'title': forms.TextInput(attrs={'class': 'form-control'}),
'content': forms.Textarea(attrs={'class': 'form-control', 'rows': 5}),
'category': forms.Select(attrs={'class': 'form-control'})
} | 33.571429 | 82 | 0.574468 | 48 | 470 | 5.520833 | 0.5625 | 0.113208 | 0.158491 | 0.237736 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.002865 | 0.257447 | 470 | 14 | 83 | 33.571429 | 0.756447 | 0.038298 | 0 | 0 | 0 | 0 | 0.237251 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.181818 | 0 | 0.363636 | 0 | 0 | 0 | 0 | null | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
79bcabef33a714ea5bd9e55eb07ea14a99365d51 | 4,123 | py | Python | src/pymor/tools/io/vtk.py | kinnala/pymor | 9d2a8ee5f7a71482e62952257332d269d50678e9 | [
"Unlicense"
] | 2 | 2022-03-22T11:47:12.000Z | 2022-03-22T11:48:23.000Z | src/pymor/tools/io/vtk.py | kinnala/pymor | 9d2a8ee5f7a71482e62952257332d269d50678e9 | [
"Unlicense"
] | 14 | 2022-01-05T09:25:11.000Z | 2022-03-31T17:07:10.000Z | src/pymor/tools/io/vtk.py | moro1111/pymor | aa03f2521ee3c7b8a9e7da4cb109caea4c788b29 | [
"Unlicense"
] | 1 | 2022-03-28T10:58:18.000Z | 2022-03-28T10:58:18.000Z | # This file is part of the pyMOR project (https://www.pymor.org).
# Copyright pyMOR developers and contributors. All rights reserved.
# License: BSD 2-Clause License (https://opensource.org/licenses/BSD-2-Clause)
from pymor.core.config import config
config.require('VTKIO')
from pathlib import Path
import meshio
from xml.etree.ElementTree import fromstring
from collections import OrderedDict
from xmljson import BadgerFish
from lxml import etree
from pymor.core.exceptions import IOLibsMissing
def _read_collection(xml, metadata_key):
collection = xml['VTKFile']['Collection']
files = collection['DataSet']
data = [(fl[f'@{metadata_key}'], _read_single(fl['@file'])) for fl in files]
data.sort(key=lambda t: t[0])
return data
def _read_single(filename):
mesh = meshio.read(filename)
assert len(mesh.points)
return mesh
def _get_collection_data(filename):
path = Path(filename)
assert path.is_file()
bf = BadgerFish(dict_type=OrderedDict)
return path, bf.data(fromstring(open(path, 'rb').read()))
def _get_vtk_type(path):
"""Parse given file until a VTKFile element is found.
We use the incremental event emitting parser here since we can expect to encounter appended
binary data in the xml which lxml cannot parse.
Parameters
----------
path
vtk file to peek into
Returns
-------
None if no VTKFile element found, else the type attribute of the VTKFile element
"""
parser = etree.XMLPullParser(events=('start',))
with open(path, 'rb') as xml:
for lines in xml.readlines():
parser.feed(lines)
for action, element in parser.read_events():
if element.tag == 'VTKFile':
return element.get('type')
return None
def read_vtkfile(filename, metadata_key='timestep'):
"""Try to read a given file into a Sequence of meshio.Mesh instances
Parameters
----------
metadata_key
Which metadata to extract and return alongside the meshio.Mesh instances.
Returns
-------
A list of (metadata_value, meshio.Mesh) tuples. The length of the list is either 1 for
a singular vtk/vtu/vtp input file (None is returned as metadata),
or however many members are in the collection file (pvd).
"""
from pymor.tools.io import change_to_directory
vtk_type = _get_vtk_type(filename)
if vtk_type == 'Collection':
path, xml = _get_collection_data(filename)
with change_to_directory(path.parent):
return _read_collection(xml, metadata_key=metadata_key)
return [(None, _read_single(filename, vtk_type))]
def write_vtk_collection(filename_base, meshes, metadata=None):
"""Output grid-associated data in vtk format
filename_base
common component for output files in collection
meshes
Sequence of meshio.Mesh objects
metadata
dict of {key1: sequence1, key2: sequence2} where sequence must be of len(meshes) or len == 1
currently supported keys are "timestep", "name", "group" and "part"
used to describe datapoints in Vtk collection file
defaults to { 'timestep': list(range(len(meshes))) }
Returns
-------
full filename of saved file
"""
if not config.HAVE_VTKIO:
raise IOLibsMissing()
from pyevtk.vtk import VtkGroup
fn_tpl = '{}_{:08d}.vtu'
metadata = metadata or {'timestep': list(range(len(meshes)))}
def _meta(key, i):
if key in metadata.keys():
return metadata[key][0] if len(metadata[key]) == 1 else metadata[key][i]
# carry over defaults from pyevtk to not break backwards compat
return {'timestep': 0, 'group': '', 'name': '', 'part': '0'}[key]
group = VtkGroup(filename_base)
for i, mesh in enumerate(meshes):
fn = fn_tpl.format(filename_base, i)
mesh.write(fn)
group.addFile(filepath=fn, sim_time=_meta('timestep', i), group=_meta('group', i), name=_meta('name', i),
part=_meta('part', i))
group.save()
return f'{filename_base}.pvd'
| 32.464567 | 113 | 0.66408 | 551 | 4,123 | 4.865699 | 0.352087 | 0.036927 | 0.00746 | 0.01865 | 0.040283 | 0 | 0 | 0 | 0 | 0 | 0 | 0.004717 | 0.228717 | 4,123 | 126 | 114 | 32.722222 | 0.838365 | 0.369634 | 0 | 0 | 0 | 0 | 0.06953 | 0 | 0 | 0 | 0 | 0 | 0.033898 | 1 | 0.118644 | false | 0 | 0.169492 | 0 | 0.457627 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
79be6504986834ffb4fc631ea1dcce3c139c7fb4 | 161 | py | Python | wsmode/exceptions.py | 29527/OKExPyWebsocket | d084373e0bf18ca533bcc8f4fc1ba051d6be0209 | [
"MIT"
] | 2 | 2021-08-20T10:01:22.000Z | 2021-11-07T21:41:35.000Z | wsmode/exceptions.py | 29527/OKExPyWebsocket | d084373e0bf18ca533bcc8f4fc1ba051d6be0209 | [
"MIT"
] | null | null | null | wsmode/exceptions.py | 29527/OKExPyWebsocket | d084373e0bf18ca533bcc8f4fc1ba051d6be0209 | [
"MIT"
] | 3 | 2021-08-18T09:07:15.000Z | 2022-03-11T08:09:06.000Z | class MessageTypeNotExist(Exception):
"""
消息类型不存在
"""
pass
class TopicMessageTypeNotExist(Exception):
"""
主题消息类型不存在
"""
pass
| 10.733333 | 42 | 0.590062 | 10 | 161 | 9.5 | 0.7 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.298137 | 161 | 14 | 43 | 11.5 | 0.840708 | 0.10559 | 0 | 0.5 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0.5 | 0 | 0 | 0.5 | 0 | 1 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 3 |
79bf7da39552fc06aca988e7aec5e90f151e4f97 | 159 | py | Python | cart/admin.py | shaongitt/boihut | d93c6b503dd7ad4f37dc572a6dec21f593bf7b35 | [
"BSD-2-Clause"
] | null | null | null | cart/admin.py | shaongitt/boihut | d93c6b503dd7ad4f37dc572a6dec21f593bf7b35 | [
"BSD-2-Clause"
] | null | null | null | cart/admin.py | shaongitt/boihut | d93c6b503dd7ad4f37dc572a6dec21f593bf7b35 | [
"BSD-2-Clause"
] | null | null | null | from django.contrib import admin
from .models import Cart,CartItems
# Register your models here.
admin.site.register(CartItems)
admin.site.register(Cart)
| 26.5 | 35 | 0.792453 | 22 | 159 | 5.727273 | 0.545455 | 0.142857 | 0.269841 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.125786 | 159 | 5 | 36 | 31.8 | 0.906475 | 0.163522 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0 | 0.5 | 0 | 0.5 | 0 | 1 | 0 | 0 | null | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 0 | 0 | 0 | 5 |
79c3715dd99e77bde511c274c350ef404bb7cf2e | 1,887 | py | Python | models/vanilla_cnn.py | mamaheux/pytorch-exemple-calcul-canada | 41bd1769aaf30bd3786589bd3e3252bb115fdd69 | [
"MIT"
] | null | null | null | models/vanilla_cnn.py | mamaheux/pytorch-exemple-calcul-canada | 41bd1769aaf30bd3786589bd3e3252bb115fdd69 | [
"MIT"
] | null | null | null | models/vanilla_cnn.py | mamaheux/pytorch-exemple-calcul-canada | 41bd1769aaf30bd3786589bd3e3252bb115fdd69 | [
"MIT"
] | null | null | null | import torch.nn as nn
from models.blocks import GlobalAvgPool2d
class _VanillaConvBlock(nn.Module):
def __init__(self, in_channels, out_channels, kernel_size):
super(_VanillaConvBlock, self).__init__()
self._block = nn.Sequential(
nn.Conv2d(in_channels, out_channels, kernel_size, stride=1, padding=kernel_size // 2, bias=False),
nn.BatchNorm2d(out_channels),
nn.ReLU(inplace=True)
)
def forward(self, x):
return self._block(x)
class VanillaCnn(nn.Module):
def __init__(self, class_count=10, use_softmax=True):
super(VanillaCnn, self).__init__()
self._features = nn.Sequential(_VanillaConvBlock(in_channels=3, out_channels=8, kernel_size=3),
_VanillaConvBlock(in_channels=8, out_channels=16, kernel_size=3),
nn.MaxPool2d(kernel_size=2, stride=2),
_VanillaConvBlock(in_channels=16, out_channels=32, kernel_size=3),
_VanillaConvBlock(in_channels=32, out_channels=64, kernel_size=3),
nn.MaxPool2d(kernel_size=2, stride=2),
_VanillaConvBlock(in_channels=64, out_channels=128, kernel_size=3),
_VanillaConvBlock(in_channels=128, out_channels=256, kernel_size=3),
nn.MaxPool2d(kernel_size=2, stride=2))
classifier_layers = [
GlobalAvgPool2d(),
nn.Conv2d(256, class_count, kernel_size=1)
]
if use_softmax:
classifier_layers.append(nn.Softmax(dim=1))
self._classifier = nn.Sequential(*classifier_layers)
def forward(self, x):
y = self._features(x)
return self._classifier(y)[:, :, 0, 0]
| 39.3125 | 110 | 0.581346 | 206 | 1,887 | 5 | 0.276699 | 0.126214 | 0.151456 | 0.078641 | 0.371845 | 0.334951 | 0.16699 | 0.16699 | 0.16699 | 0.16699 | 0 | 0.042868 | 0.320085 | 1,887 | 47 | 111 | 40.148936 | 0.759938 | 0 | 0 | 0.117647 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.117647 | false | 0 | 0.058824 | 0.029412 | 0.294118 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
79c4397925c5e818a6ed0d6ef4f084f5009de3b4 | 1,079 | py | Python | routemaster_sdk/exceptions.py | thread/routemaster-sdk | 1300508525a3e1495c640f9c7ff689bb6f621d7e | [
"MIT"
] | null | null | null | routemaster_sdk/exceptions.py | thread/routemaster-sdk | 1300508525a3e1495c640f9c7ff689bb6f621d7e | [
"MIT"
] | null | null | null | routemaster_sdk/exceptions.py | thread/routemaster-sdk | 1300508525a3e1495c640f9c7ff689bb6f621d7e | [
"MIT"
] | null | null | null | """Well known exceptions."""
from routemaster_sdk.types import LabelRef, StateMachine
class UnknownLabel(ValueError):
"""Represents a label unknown in the given state machine."""
deleted = False
def __init__(self, label: LabelRef) -> None:
self.label = label
def __str__(self):
return "{0}: {1}".format(self.__class__.__name__, self.label)
class DeletedLabel(UnknownLabel):
"""Represents a label deleted in the given state machine."""
deleted = True
class UnknownStateMachine(ValueError):
"""Represents a state machine not in the system."""
def __init__(self, state_machine: StateMachine) -> None:
self.state_machine = state_machine
def __str__(self):
return "{0}: {1}".format(self.__class__.__name__, self.state_machine)
class LabelAlreadyExists(ValueError):
"""Thrown when a label already exists in the state machine."""
def __init__(self, label: LabelRef) -> None:
self.label = label
def __str__(self):
return "{0}: {1}".format(self.__class__.__name__, self.label)
| 26.975 | 77 | 0.682113 | 129 | 1,079 | 5.294574 | 0.341085 | 0.140556 | 0.048316 | 0.070278 | 0.402635 | 0.402635 | 0.317716 | 0.317716 | 0.317716 | 0.317716 | 0 | 0.006936 | 0.198332 | 1,079 | 39 | 78 | 27.666667 | 0.782659 | 0.217794 | 0 | 0.473684 | 0 | 0 | 0.02934 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.315789 | false | 0 | 0.052632 | 0.157895 | 0.842105 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 2 |
79c473b0364a6ca1e495e68f26cc755757c8686b | 604 | py | Python | servicebox/platforms/api/views.py | FlxPeters/servicebox | 2fc39fa5ec6e629a0794fda003a7a0e4adf05202 | [
"Apache-2.0"
] | null | null | null | servicebox/platforms/api/views.py | FlxPeters/servicebox | 2fc39fa5ec6e629a0794fda003a7a0e4adf05202 | [
"Apache-2.0"
] | null | null | null | servicebox/platforms/api/views.py | FlxPeters/servicebox | 2fc39fa5ec6e629a0794fda003a7a0e4adf05202 | [
"Apache-2.0"
] | null | null | null | from platforms.models import PlatformGroup, Platform
from rest_framework import viewsets
from platforms.api.serializers import PlatformGroupSerializer, PlatformSerializer
class PlatformViewSet(viewsets.ModelViewSet):
"""
API endpoint that allows tenants to be viewed or edited.
"""
queryset = Platform.objects.all()
serializer_class = PlatformSerializer
class PlatformGroupViewSet(viewsets.ModelViewSet):
"""
API endpoint that allows tenant groups to be viewed or edited.
"""
queryset = PlatformGroup.objects.all()
serializer_class = PlatformGroupSerializer
| 27.454545 | 81 | 0.768212 | 61 | 604 | 7.557377 | 0.52459 | 0.056399 | 0.099783 | 0.13449 | 0.290672 | 0.290672 | 0 | 0 | 0 | 0 | 0 | 0 | 0.167219 | 604 | 21 | 82 | 28.761905 | 0.916501 | 0.19702 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.333333 | 0 | 1 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 1 |
79c4bc7411a8ceae834135ef0832c81c48a8f427 | 5,528 | py | Python | transforms.py | amitkumarj441/TGS_Kaggle | a4f613046cc36f3f6dbec28adb35f97a63c2a994 | [
"MIT"
] | 1 | 2019-03-20T07:10:08.000Z | 2019-03-20T07:10:08.000Z | transforms.py | amitkumarj441/TGS_Kaggle | a4f613046cc36f3f6dbec28adb35f97a63c2a994 | [
"MIT"
] | null | null | null | transforms.py | amitkumarj441/TGS_Kaggle | a4f613046cc36f3f6dbec28adb35f97a63c2a994 | [
"MIT"
] | null | null | null | import cv2
import numpy as np
from scipy.ndimage.filters import gaussian_filter
from scipy.ndimage.interpolation import map_coordinates
def upsample(image, image_size_target):
padding0 = (image_size_target - image.shape[0]) / 2
padding1 = (image_size_target - image.shape[1]) / 2
padding_start0 = int(np.ceil(padding0))
padding_end0 = int(np.floor(padding0))
padding_start1 = int(np.ceil(padding1))
padding_end1 = int(np.floor(padding1))
return cv2.copyMakeBorder(image, padding_start0, padding_end0, padding_start1, padding_end1, cv2.BORDER_REFLECT_101)
def downsample(image, image_size_original):
padding = (image.shape[0] - image_size_original) / 2
padding_start = int(np.ceil(padding))
return image[padding_start:padding_start + image_size_original, padding_start:padding_start + image_size_original]
def augment(image, mask):
if np.random.rand() < 0.5:
image = np.fliplr(image)
mask = np.fliplr(mask)
if np.random.rand() < 0.5:
c = np.random.choice(2)
if c == 0:
image = multiply_brightness(image, np.random.uniform(1 - 0.1, 1 + 0.1))
elif c == 1:
image = adjust_gamma(image, np.random.uniform(1 - 0.1, 1 + 0.1))
if np.random.rand() < 0.5:
c = np.random.choice(3)
if c == 0:
image, mask = apply_elastic_transform(image, mask, alpha=150, sigma=8, alpha_affine=0)
elif c == 1:
image, mask = apply_elastic_transform(image, mask, alpha=0, sigma=0, alpha_affine=8)
elif c == 2:
image, mask = apply_elastic_transform(image, mask, alpha=150, sigma=10, alpha_affine=5)
if np.random.rand() < 0.5:
image, mask = random_crop_and_pad(image, mask)
return image, mask
def multiply_brightness(image, coefficient):
image_HLS = cv2.cvtColor(image, cv2.COLOR_RGB2HLS)
image_HLS = np.array(image_HLS, dtype=np.float64)
image_HLS[:, :, 1] = image_HLS[:, :, 1] * coefficient
image_HLS[:, :, 1][image_HLS[:, :, 1] > 255] = 255
image_HLS = np.array(image_HLS, dtype=np.uint8)
return cv2.cvtColor(image_HLS, cv2.COLOR_HLS2RGB)
def adjust_gamma(image, gamma):
# build a lookup table mapping the pixel values [0, 255] to
# their adjusted gamma values
invGamma = 1.0 / gamma
table = np.array([((i / 255.0) ** invGamma) * 255 for i in np.arange(0, 256)]).astype("uint8")
# apply gamma correction using the lookup table
return cv2.LUT(image, table)
# Function to distort image
def elastic_transform(image, alpha, sigma, alpha_affine, random_state=None):
"""Elastic deformation of images as described in [Simard2003]_ (with modifications).
.. [Simard2003] Simard, Steinkraus and Platt, "Best Practices for
Convolutional Neural Networks applied to Visual Document Analysis", in
Proc. of the International Conference on Document Analysis and
Recognition, 2003.
Based on https://gist.github.com/erniejunior/601cdf56d2b424757de5
"""
if random_state is None:
random_state = np.random.RandomState(None)
shape = image.shape
shape_size = shape[:2]
# Random affine
center_square = np.float32(shape_size) // 2
square_size = min(shape_size) // 3
pts1 = np.float32([center_square + square_size, [center_square[0] + square_size, center_square[1] - square_size],
center_square - square_size])
pts2 = pts1 + random_state.uniform(-alpha_affine, alpha_affine, size=pts1.shape).astype(np.float32)
M = cv2.getAffineTransform(pts1, pts2)
image = cv2.warpAffine(image, M, shape_size[::-1], borderMode=cv2.BORDER_REFLECT_101)
dx = gaussian_filter((random_state.rand(*shape) * 2 - 1), sigma) * alpha
dy = gaussian_filter((random_state.rand(*shape) * 2 - 1), sigma) * alpha
dz = np.zeros_like(dx)
x, y, z = np.meshgrid(np.arange(shape[1]), np.arange(shape[0]), np.arange(shape[2]))
indices = np.reshape(y + dy, (-1, 1)), np.reshape(x + dx, (-1, 1)), np.reshape(z, (-1, 1))
return map_coordinates(image, indices, order=1, mode='reflect').reshape(shape)
def apply_elastic_transform(image, mask, alpha, sigma, alpha_affine):
channels = np.concatenate((image, mask[..., None]), axis=2)
result = elastic_transform(channels, alpha, sigma, alpha_affine, random_state=np.random.RandomState(None))
image_result = result[..., 0:3]
mask_result = result[..., 3]
mask_result = (mask_result > 0.5).astype(mask.dtype)
return image_result, mask_result
def random_crop_and_pad(image, mask):
max_crop = 40
crop_x_total = np.random.randint(max_crop)
crop_x0 = np.random.randint(crop_x_total + 1)
crop_x1 = crop_x_total - crop_x0
crop_y_total = np.random.randint(max_crop)
crop_y0 = np.random.randint(crop_y_total + 1)
crop_y1 = crop_y_total - crop_y0
cropped_image = image[crop_x0:image.shape[0] - crop_x1, crop_y0:image.shape[1] - crop_y1, :]
cropped_mask = mask[crop_x0:mask.shape[0] - crop_x1, crop_y0:mask.shape[1] - crop_y1]
cropped_padded_image = upsample(cropped_image, image.shape[0])
cropped_padded_mask = upsample(cropped_mask, mask.shape[0])
return cropped_padded_image, cropped_padded_mask
def random_crop_to_size(image, mask, size):
dmax = image.shape[0] - size
dx = np.random.randint(dmax + 1)
dy = np.random.randint(dmax + 1)
cropped_image = image[dx:dx + size, dy:dy + size, :]
cropped_mask = mask[dx:dx + size, dy:dy + size]
return cropped_image, cropped_mask
| 38.929577 | 120 | 0.680174 | 806 | 5,528 | 4.475186 | 0.219603 | 0.035487 | 0.024951 | 0.015525 | 0.289992 | 0.254505 | 0.168561 | 0.114777 | 0.085944 | 0.085944 | 0 | 0.045098 | 0.193741 | 5,528 | 141 | 121 | 39.205674 | 0.764191 | 0.100398 | 0 | 0.086022 | 0 | 0 | 0.00243 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.096774 | false | 0 | 0.043011 | 0 | 0.236559 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
79c67d96a3b58ae9b3f7d1e5efc7a2527f181276 | 1,663 | py | Python | taskobra/monitor/system_info.py | Vipyr/taskobra | d9884f006ef9c735852075912d5a945543de52f5 | [
"MIT"
] | null | null | null | taskobra/monitor/system_info.py | Vipyr/taskobra | d9884f006ef9c735852075912d5a945543de52f5 | [
"MIT"
] | 43 | 2020-02-06T22:23:42.000Z | 2020-04-29T23:56:43.000Z | taskobra/monitor/system_info.py | Vipyr/taskobra | d9884f006ef9c735852075912d5a945543de52f5 | [
"MIT"
] | 2 | 2020-02-06T21:01:42.000Z | 2020-02-06T23:43:11.000Z | from taskobra.orm import *
import platform
import cpuinfo
import subprocess
def create_system(args, database_engine):
system = System(name=platform.node())
cpu_info = cpuinfo.get_cpu_info()
system.add_component(OperatingSystem(
name=platform.system(),
version=platform.platform(),
))
system.add_component(CPU(
manufacturer=cpu_info.get('vendor_id', ''),
model=cpu_info.get('brand', ''),
isa=cpu_info.get('arch', ''),
core_count=cpu_info.get('count', 1),
threads_per_core=1,
nominal_frequency=(cpu_info.get('hz_actual_raw')[0] / 1000000000),
))
with get_session(bind=database_engine) as session:
current_system = session.query(System).filter(
System.name == platform.node(),
).first()
if current_system is None:
session.add(system)
session.commit()
#gpu = GPU(
# manufacturer="NVIDIA",
# model="1070",
# architecture="CUDA",
# tdp=105,
# core_count=1920,
# memory=8.0,
#)
#memory = Memory(
# manufacturer="G-Skill",
# model="Trident",
# standard="DDR4",
# capacity=16.0,
# frequency=3600,
# cas_latency=16,
# t_rcd=19,
# t_rp=19,
# t_ras=39,
#)
#storage = Storage(
# manufacturer="Sabrent",
# model="Rocket",
# standard="NVMe PCIe 4.0",
# capacity=500.0,
# max_read=5000,
# max_write=2500,
#)
#system.add_component(gpu)
#system.add_component(memory)
#system.add_component(memory)
#system.add_component(storage)
| 25.984375 | 74 | 0.578473 | 185 | 1,663 | 5.016216 | 0.491892 | 0.052802 | 0.116379 | 0.047414 | 0.071121 | 0.071121 | 0.071121 | 0 | 0 | 0 | 0 | 0.046823 | 0.280818 | 1,663 | 63 | 75 | 26.396825 | 0.729097 | 0.338545 | 0 | 0.076923 | 0 | 0 | 0.033676 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.038462 | false | 0 | 0.153846 | 0 | 0.192308 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
79ca87eadda13d9fdb7282bf224ad560fc96b076 | 2,392 | py | Python | expression_builder/tests/test_string_replace.py | django-advance-utils/expression-builder | 08dab5780ae3a8be90c4daa6b9950ad1af4d87a4 | [
"MIT"
] | null | null | null | expression_builder/tests/test_string_replace.py | django-advance-utils/expression-builder | 08dab5780ae3a8be90c4daa6b9950ad1af4d87a4 | [
"MIT"
] | null | null | null | expression_builder/tests/test_string_replace.py | django-advance-utils/expression-builder | 08dab5780ae3a8be90c4daa6b9950ad1af4d87a4 | [
"MIT"
] | null | null | null | import unittest
from expression_builder.exceptions import ExpressionError
from expression_builder.expression_builder import ExpressionBuilder
class StringReplaceTests(unittest.TestCase):
# noinspection PyPep8Naming
def setUp(self):
self.exp = ExpressionBuilder()
self.exp.add_to_global("fred", 1234)
self.exp.add_to_global_string_statement("path", ";[tom];[tom]#")
self.exp.add_to_global_string_statement("path2", ";[tom];[tom+$]#")
def test_simple(self):
result = self.exp.string_replace("fred")
self.assertEqual('fred', result)
def test_simple_math(self):
result = self.exp.string_replace(";[1+5];#")
self.assertEqual(';6;#', result)
def test_simple_variable(self):
result = self.exp.string_replace(";[tom];#", variables={'tom': 5})
self.assertEqual(';5;#', result)
def test_bool_variable_true(self):
result = self.exp.string_replace(";[tom];#", variables={'tom': True})
self.assertEqual(';1;#', result)
def test_bool_variable_false(self):
result = self.exp.string_replace(";[tom];#", variables={'tom': False})
self.assertEqual(';0;#', result)
def test_simple_variable2(self):
result = self.exp.string_replace(";[tom];[tom]#", variables={'tom': 5})
self.assertEqual(';5;5#', result)
def test_simple_global_variable(self):
result = self.exp.string_replace(";[fred];[fred]#")
self.assertEqual(';1234;1234#', result)
def test_string_variable(self):
result = self.exp.string_replace(";[bill];#", {'bill': 'hello'})
self.assertEqual(';hello;#', result)
def test_unknown_variable(self):
with self.assertRaises(ExpressionError) as cm:
self.exp.string_replace(";[tom];[tom]#")
the_exception = cm.exception
self.assertEqual(the_exception.value, 'No variable named tom')
def test_global(self):
result = self.exp.run_statement("path", {'tom': 10})
self.assertEqual(";10;10#", result)
def test_global_with_replace(self):
result = self.exp.run_statement("path2", {'tom': 10}, replace_values={'$': 5})
self.assertEqual(";10;15#", result)
def test_inline_replace(self):
result = self.exp.run_statement('result = ^"ten = [ten]"', {'ten': 10})
self.assertEqual("ten = 10", result['result'])
| 36.8 | 86 | 0.640886 | 285 | 2,392 | 5.189474 | 0.214035 | 0.075727 | 0.104124 | 0.126437 | 0.425963 | 0.379986 | 0.322515 | 0.091278 | 0.091278 | 0 | 0 | 0.022234 | 0.191472 | 2,392 | 64 | 87 | 37.375 | 0.742503 | 0.010452 | 0 | 0 | 0 | 0 | 0.119662 | 0 | 0 | 0 | 0 | 0 | 0.276596 | 1 | 0.276596 | false | 0 | 0.06383 | 0 | 0.361702 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
79ca9ccc9aed4b417288aeae0d80662e45b6689d | 309 | py | Python | example/simple.py | yoophi/str_pic | e4ac65e819b50d7c8fb4bf94dd26aa5a97e4331b | [
"MIT"
] | null | null | null | example/simple.py | yoophi/str_pic | e4ac65e819b50d7c8fb4bf94dd26aa5a97e4331b | [
"MIT"
] | 3 | 2021-06-08T19:34:37.000Z | 2022-03-11T23:18:13.000Z | example/simple.py | yoophi/str_pic | e4ac65e819b50d7c8fb4bf94dd26aa5a97e4331b | [
"MIT"
] | null | null | null | from flask import Flask, render_template
from flask_dummyimage import DummyImage
app = Flask(__name__)
dummyimage = DummyImage(app, url_prefix="/dm", endpoint="images", route="img")
@app.route("/")
def index():
return render_template("index.html")
if __name__ == "__main__":
app.run(debug=True)
| 19.3125 | 78 | 0.721683 | 40 | 309 | 5.175 | 0.6 | 0.086957 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.135922 | 309 | 15 | 79 | 20.6 | 0.775281 | 0 | 0 | 0 | 0 | 0 | 0.100324 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.111111 | false | 0 | 0.222222 | 0.111111 | 0.444444 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 1 |
79cc5960935ea7fbba4fb0eb6555e1ecb03c2fbf | 1,562 | py | Python | mints/args/typed.py | candy-kingdom/mints | e68a2351cf3ff6823e978bc6a4b740bd2a974ca3 | [
"MIT"
] | 4 | 2020-05-09T11:01:32.000Z | 2020-06-03T14:44:06.000Z | mints/args/typed.py | candy-kingdom/cli | e68a2351cf3ff6823e978bc6a4b740bd2a974ca3 | [
"MIT"
] | 43 | 2020-01-27T21:14:16.000Z | 2020-06-18T17:57:20.000Z | mints/args/typed.py | candy-kingdom/mints | e68a2351cf3ff6823e978bc6a4b740bd2a974ca3 | [
"MIT"
] | null | null | null | from typing import Type, Any
class Typed:
"""A typed command line argument.
A typed command line argument is used for specifying the type
to convert the value to. For example, consider the following code:
@cli
def double(number: Opt[int]('A number to double.')):
print(number * 2)
When this CLI is called as
$ example.py double --number 5
the value '5' of the argument '--number' is converted to `int`
and passed to the function.
Note:
The default type of arguments is `str`. Thus, if an argument
is annotated as `Opt('A number to double.')`, a string value
will be passed to the function.
Attributes:
kind: A kind of an argument
(for example, `Arg`, `Opt` or `Flag`).
type: A type of an argument
(for example, `int`, `List[double]`, etc.).
"""
def __init__(self, kind: Type, type: Type):
self.kind = kind
self.type = type
def __call__(self, *args: Any, **kwargs: Any) -> 'Typed':
# Instantiate the parameter being wrapped. For example,
# `Arg[int]` will return `Typed(Arg, int)`, and
# `Typed(Arg, int)('Description.')` will instantiate
# `self.kind = Arg('Description.')`.
if isinstance(self.kind, type):
self.kind = self.kind(*args, **kwargs)
else:
raise ValueError(f"Cannot instantiate {type(self.kind)} twice: it"
f"is already instantiated as {repr(self.kind)}.")
return self
| 31.877551 | 78 | 0.587068 | 206 | 1,562 | 4.412621 | 0.398058 | 0.070407 | 0.039604 | 0.037404 | 0.10341 | 0 | 0 | 0 | 0 | 0 | 0 | 0.002747 | 0.300896 | 1,562 | 48 | 79 | 32.541667 | 0.82967 | 0.604994 | 0 | 0 | 0 | 0 | 0.180791 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.166667 | false | 0 | 0.083333 | 0 | 0.416667 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
79cd97ed3020f125684d084c92be22793583d226 | 5,994 | py | Python | lab3/es2/webservice.py | haraldmeister/Programming_for_IoT_applications | 04ec13689caee1fca28bf4fb6a261c318ebd374d | [
"Apache-2.0"
] | null | null | null | lab3/es2/webservice.py | haraldmeister/Programming_for_IoT_applications | 04ec13689caee1fca28bf4fb6a261c318ebd374d | [
"Apache-2.0"
] | null | null | null | lab3/es2/webservice.py | haraldmeister/Programming_for_IoT_applications | 04ec13689caee1fca28bf4fb6a261c318ebd374d | [
"Apache-2.0"
] | null | null | null | import json
import time
import cherrypy
class albums:
def __init__(self,artist,year,title,num):
self.artist=artist
self.year=year
self.title=title
self.N=num
class owner(albums):
def __init__(self,nome,date):
self.album_list=[]
self.nome=nome
self.last_upd=date
self.result={"Artist":[],"Year":[],"Title":[],"Total songs":[]}
self.discography={"Owner":self.nome,"Last update":self.last_upd,"Album List":self.album_list}
def search_artist(self,key_artist):
for i in range(len(self.album_list)):
if(str(self.album_list[i].artist)==key_artist):
self.result["Artist"]=self.album_list[i].artist
self.result["Year"]=self.album_list[i].year
self.result["Title"]=self.album_list[i].title
self.result["Total songs"]=self.album_list[i].N
return json.loads(json.dumps(self.result,default=lambda x: x.__dict__))
return json.loads(json.dumps(self.result,default=lambda x: x.__dict__))
def search_title(self,key_title):
for i in range(len(self.album_list)):
if(str(self.album_list[i].title)==key_title):
self.result["Artist"]=self.album_list[i].artist
self.result["Year"]=self.album_list[i].year
self.result["Title"]=self.album_list[i].title
self.result["Total songs"]=self.album_list[i].N
return json.loads(json.dumps(self.result,default=lambda x: x.__dict__))
return json.loads(json.dumps(self.result,default=lambda x: x.__dict__))
def search_year(self,key_year):
for i in range(len(self.album_list)):
if(str(self.album_list[i].year)==key_year):
self.result["Artist"]=self.album_list[i].artist
self.result["Year"]=self.album_list[i].year
self.result["Title"]=self.album_list[i].title
self.result["Total songs"]=self.album_list[i].N
return json.loads(json.dumps(self.result,default=lambda x: x.__dict__))
return json.loads(json.dumps(self.result,default=lambda x: x.__dict__))
def search_totalsong(self,key_nsong):
for i in range(len(self.album_list)):
if(str(self.album_list[i].N)==key_nsong):
self.result["Artist"]=self.album_list[i].artist
self.result["Year"]=self.album_list[i].year
self.result["Title"]=self.album_list[i].title
self.result["Total songs"]=self.album_list[i].N
return self.result
return json.loads(json.dumps(self.result,default=lambda x: x.__dict__))
def insert_album(self,artist,year,title,num):
for i in range(len(self.album_list)):
if(str(self.album_list[i].artist)==artist and str(self.album_list[i].title)==title ):
self.album_list[i].N=num
self.album_list[i].year=year
self.last_upd=time.strftime('%d/%m/%Y')+' '+time.strftime('%H:%M:%S')
return
self.album_list.append(albums(artist,year,title,num))
self.last_upd=time.strftime('%d/%m/%Y')+' '+time.strftime('%H:%M:%S')
def delete_album(self,artist,year,title,num):
for i in range(len(self.album_list)):
if(str(self.album_list[i].artist)==artist and str(self.album_list[i].title)==title ):
self.album_list.remove(self.album_list[i])
self.last_upd=time.strftime('%d/%m/%Y')+' '+time.strftime('%H:%M:%S')
def print_all(self):
return json.loads(json.dumps(self.discography,default=lambda x: x.__dict__))
class Discography(owner):
exposed=True
def __init__(self):
json_data=open("discography.txt")
data = json.load(json_data)
self.discogr=owner(data['discography_owner'],data['last_update'])
for j in range(len(data['album_list'])):
self.discogr.album_list.append(albums(data['album_list'][j]['artist'],
data['album_list'][j]['publication_year'],
data['album_list'][j]['title'],
data['album_list'][j]['total_tracks']))
@cherrypy.tools.json_out()
def GET(self,*uri,**params):
if (len(uri)==0):
return self.discogr.print_all()
else:
if uri[0]=="search_artist":
return self.discogr.search_artist(uri[1])
elif uri[0]=="search_title":
return self.discogr.search_title(uri[1])
elif uri[0]=="search_year":
return self.discogr.search_year(uri[1])
elif uri[0]=="search_totalsong":
return self.discogr.search_totalsong(uri[1])
elif uri[0]=="print":
return self.discogr.print_all()
@cherrypy.tools.json_in()
def POST(self,*uri,**params):
if uri[0]=="insert_album":
input_json = cherrypy.request.json
artist=input_json["artist"]
year=int(input_json["year"])
title=input_json["title"]
N=int(input_json["N"])
self.discogr.insert_album(artist,year,title,N)
return
if uri[0]=="delete_album":
input_json = cherrypy.request.json
artist=input_json["artist"]
year=int(input_json["year"])
title=input_json["title"]
N=int(input_json["N"])
self.discogr.delete_album(artist,year,title,N)
return
if __name__ == '__main__':
conf = {
'/': {
'request.dispatch': cherrypy.dispatch.MethodDispatcher(),
'tools.sessions.on': True
}
}
cherrypy.tree.mount(Discography(), '/', conf)
cherrypy.config.update({'server.socket_host': '0.0.0.0'})
cherrypy.config.update({'server.socket_port': 9090})
cherrypy.engine.start()
cherrypy.engine.block()
| 45.067669 | 101 | 0.584918 | 790 | 5,994 | 4.249367 | 0.124051 | 0.117962 | 0.143283 | 0.112601 | 0.633006 | 0.569258 | 0.543342 | 0.526065 | 0.526065 | 0.526065 | 0 | 0.004514 | 0.260761 | 5,994 | 132 | 102 | 45.409091 | 0.753103 | 0 | 0 | 0.395161 | 0 | 0 | 0.089756 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.096774 | false | 0 | 0.024194 | 0.008065 | 0.298387 | 0.032258 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
79ce0327616efa9691f6ae8fe41ab9246d6bf9e6 | 143 | py | Python | tests/redirects_tests/urls.py | Yoann-Vie/esgi-hearthstone | 115d03426c7e8e80d89883b78ac72114c29bed12 | [
"PSF-2.0",
"BSD-3-Clause"
] | null | null | null | tests/redirects_tests/urls.py | Yoann-Vie/esgi-hearthstone | 115d03426c7e8e80d89883b78ac72114c29bed12 | [
"PSF-2.0",
"BSD-3-Clause"
] | null | null | null | tests/redirects_tests/urls.py | Yoann-Vie/esgi-hearthstone | 115d03426c7e8e80d89883b78ac72114c29bed12 | [
"PSF-2.0",
"BSD-3-Clause"
] | null | null | null | from django.conf.urls import url
from django.http import HttpResponse
urlpatterns = [
url(r'^$', lambda req: HttpResponse('OK')),
]
| 20.428571 | 48 | 0.678322 | 18 | 143 | 5.388889 | 0.722222 | 0.206186 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.188811 | 143 | 6 | 49 | 23.833333 | 0.836207 | 0 | 0 | 0 | 0 | 0 | 0.029197 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.4 | 0 | 0.4 | 0 | 1 | 0 | 0 | null | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 4 |
79d29ea8f56cec3596c251c94d5aca0bfd3a1026 | 1,653 | pyde | Python | examples/01_game_of_life/sketch_gameoflife.pyde | underwit/pyprocessing-examples | c6e84fded23dcdd5bf32d499aa91900d68ec213d | [
"MIT"
] | null | null | null | examples/01_game_of_life/sketch_gameoflife.pyde | underwit/pyprocessing-examples | c6e84fded23dcdd5bf32d499aa91900d68ec213d | [
"MIT"
] | null | null | null | examples/01_game_of_life/sketch_gameoflife.pyde | underwit/pyprocessing-examples | c6e84fded23dcdd5bf32d499aa91900d68ec213d | [
"MIT"
] | null | null | null | import random
from itertools import product
CS = 10 # cell size
W = 600 # width
H = 600 # height
COLS = W // CS
ROWS = H // CS
DENSITY = 0.35
dirs = list(product((-1, 0, 1), repeat=2))
dirs.remove((0, 0))
points = []
new_points = []
run = False
def xy2flat(x, y):
x = (x + COLS) % COLS
y = (y + ROWS) % ROWS
return x + COLS * y
def flat2xy(index):
return index % COLS, index // COLS
def setup():
frameRate(20)
size(600, 600)
for i in range(0, W * H, CS):
points.append(random.random() < DENSITY)
new_points.append(False)
def mouseClicked():
x = mouseX // CS
y = mouseY // CS
index = xy2flat(x, y)
points[index] = not points[index]
def keyPressed():
global run
if key == ' ':
run = not run
elif key == 'r': # randomly fill the board
for i, _ in enumerate(points):
points[i] = random.random() < DENSITY
elif key == 'c': # clear the board
for i, _ in enumerate(points):
points[i] = False
def calc_cell(index):
x, y = flat2xy(index)
nb = sum([points[xy2flat(x + _x, y + _y)] for _x, _y in dirs])
new_points[index] = points[index]
if points[index] and (nb < 2 or nb > 3):
new_points[index] = False
elif nb == 3:
new_points[index] = True
def draw():
global points, new_points
background(52, 63, 62)
fill(220, 237, 255)
for index, is_alive in enumerate(points):
if is_alive:
x, y = flat2xy(index)
rect(x * CS, y * CS, CS, CS)
if run:
calc_cell(index)
if run:
points, new_points = new_points, points
| 21.467532 | 66 | 0.557774 | 241 | 1,653 | 3.751037 | 0.327801 | 0.079646 | 0.066372 | 0.026549 | 0.117257 | 0.079646 | 0.079646 | 0.079646 | 0.079646 | 0 | 0 | 0.043821 | 0.30974 | 1,653 | 76 | 67 | 21.75 | 0.748466 | 0.037508 | 0 | 0.1 | 0 | 0 | 0.001893 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.116667 | false | 0 | 0.033333 | 0.016667 | 0.183333 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
79d585582066d6853246fd14d5eec7b556e67b85 | 5,064 | py | Python | conanfile.py | madebr/conan-repo-actions-conan-libwebp | 2f2eaad6e8de2cbec611f19de5205fc0b3267492 | [
"MIT"
] | null | null | null | conanfile.py | madebr/conan-repo-actions-conan-libwebp | 2f2eaad6e8de2cbec611f19de5205fc0b3267492 | [
"MIT"
] | null | null | null | conanfile.py | madebr/conan-repo-actions-conan-libwebp | 2f2eaad6e8de2cbec611f19de5205fc0b3267492 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
import shutil
from conans import ConanFile, CMake, tools
class LibwebpConan(ConanFile):
name = "libwebp"
version = "1.0.0"
description = "library to encode and decode images in WebP format"
url = "http://github.com/bincrafters/conan-libwebp"
homepage = "https://github.com/webmproject/libwebp"
author = "Bincrafters <bincrafters@gmail.com>"
license = "BSD 3-Clause"
exports = ["LICENSE.md"]
exports_sources = ['CMakeLists.txt',
'0001-install-pkg-config-files-during-the-CMake-build.patch']
generators = 'cmake'
_source_subfolder = "source_subfolder"
settings = "os", "compiler", "build_type", "arch"
options = {"shared": [True, False], "fPIC": [True, False],
"with_simd": [True, False], "near_lossless": [True, False],
"swap_16bit_csp": [True, False]}
default_options = {'shared': False, 'fPIC': True, 'with_simd': True, 'near_lossless': True, 'swap_16bit_csp': False}
def source(self):
source_url = "https://github.com/webmproject/libwebp"
tools.get("{0}/archive/v{1}.tar.gz".format(source_url, self.version))
extracted_dir = self.name + "-" + self.version
os.rename(extracted_dir, self._source_subfolder)
tools.patch(base_path=self._source_subfolder,
patch_file='0001-install-pkg-config-files-during-the-CMake-build.patch')
os.rename(os.path.join(self._source_subfolder, "CMakeLists.txt"),
os.path.join(self._source_subfolder, "CMakeListsOriginal.txt"))
shutil.copy("CMakeLists.txt",
os.path.join(self._source_subfolder, "CMakeLists.txt"))
def configure(self):
del self.settings.compiler.libcxx
def config_options(self):
if self.settings.os == 'Windows':
del self.options.fPIC
@property
def _version_components(self):
return [int(x) for x in self.version.split('.')]
def _configure_cmake(self):
cmake = CMake(self)
# should be an option but it doesn't work yet
cmake.definitions["WEBP_ENABLE_SIMD"] = self.options.with_simd
if self._version_components[0] >= 1:
cmake.definitions["WEBP_NEAR_LOSSLESS"] = self.options.near_lossless
else:
cmake.definitions["WEBP_ENABLE_NEAR_LOSSLESS"] = self.options.near_lossless
cmake.definitions['WEBP_ENABLE_SWAP_16BIT_CSP'] = self.options.swap_16bit_csp
# avoid finding system libs
cmake.definitions['CMAKE_DISABLE_FIND_PACKAGE_GIF'] = True
cmake.definitions['CMAKE_DISABLE_FIND_PACKAGE_PNG'] = True
cmake.definitions['CMAKE_DISABLE_FIND_PACKAGE_TIFF'] = True
cmake.definitions['CMAKE_DISABLE_FIND_PACKAGE_JPEG'] = True
if self.settings.os == "Android":
if 'CMAKE_ANDROID_ARCH_ABI' in cmake.definitions:
cmake.definitions['ANDROID_ABI'] = cmake.definitions['CMAKE_ANDROID_ARCH_ABI']
if 'ANDROID_NDK_HOME' in os.environ:
cmake.definitions['ANDROID_NDK'] = os.environ.get('ANDROID_NDK_HOME')
cmake.configure(source_folder=self._source_subfolder)
return cmake
def build(self):
# WEBP_EXTERN is not specified on Windows
# Set it to dllexport for building (see CMakeLists.txt) and to dllimport otherwise
if self.options.shared and self.settings.compiler == "Visual Studio":
tools.replace_in_file(os.path.join(self._source_subfolder, 'src', 'webp', 'types.h'),
'#ifndef WEBP_EXTERN',
"""#ifndef WEBP_EXTERN
#ifdef _MSC_VER
#ifdef WEBP_DLL
#define WEBP_EXTERN __declspec(dllexport)
#else
#define WEBP_EXTERN __declspec(dllimport)
#endif
#endif /* _MSC_VER */
#endif
#ifndef WEBP_EXTERN""")
# cmake misses dll (RUNTIME) copy
tools.replace_in_file(os.path.join(self._source_subfolder, "CMakeListsOriginal.txt"),
"LIBRARY DESTINATION lib",
"RUNTIME DESTINATION bin\nLIBRARY DESTINATION lib")
if self._version_components[0] >= 1:
# allow to build webpmux
tools.replace_in_file(os.path.join(self._source_subfolder, "CMakeListsOriginal.txt"),
"if(WEBP_BUILD_GIF2WEBP OR WEBP_BUILD_IMG2WEBP)",
"if(TRUE)")
cmake = self._configure_cmake()
cmake.build()
def package(self):
cmake = self._configure_cmake()
cmake.install()
self.copy("COPYING", dst="licenses", src=self._source_subfolder)
self.copy("FindWEBP.cmake", dst=".", src=".")
def package_info(self):
self.cpp_info.libs = ['webpmux', 'webpdemux', 'webpdecoder', 'webp']
if self.options.shared and self.settings.os == "Windows" and self.settings.compiler != 'Visual Studio':
self.cpp_info.libs = [lib + '.dll' for lib in self.cpp_info.libs]
| 42.2 | 120 | 0.634874 | 598 | 5,064 | 5.16388 | 0.304348 | 0.05829 | 0.061529 | 0.027202 | 0.310557 | 0.271697 | 0.202396 | 0.138601 | 0.090997 | 0.090997 | 0 | 0.007576 | 0.244076 | 5,064 | 119 | 121 | 42.554622 | 0.799112 | 0.056872 | 0 | 0.073171 | 0 | 0 | 0.267637 | 0.102954 | 0 | 0 | 0 | 0 | 0 | 1 | 0.097561 | false | 0 | 0.036585 | 0.012195 | 0.341463 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
79d5867444343ac92dd71c753e06968277e1c875 | 5,184 | py | Python | sjtwo-c/site_scons/site_tools/codegen/site_packages/can/broadcastmanager.py | seanlinc/Playmate | 077877d172dd6b7beab910c52ec95ee300bc6480 | [
"Apache-2.0"
] | 2 | 2020-04-04T21:09:56.000Z | 2020-04-08T17:00:58.000Z | sjtwo-c/site_scons/site_tools/codegen/site_packages/can/broadcastmanager.py | seanlinc/Playmate | 077877d172dd6b7beab910c52ec95ee300bc6480 | [
"Apache-2.0"
] | 13 | 2020-04-11T21:50:57.000Z | 2020-04-19T03:19:48.000Z | sjtwo-c/site_scons/site_tools/codegen/site_packages/can/broadcastmanager.py | seanlinc/Playmate | 077877d172dd6b7beab910c52ec95ee300bc6480 | [
"Apache-2.0"
] | null | null | null | # coding: utf-8
"""
Exposes several methods for transmitting cyclic messages.
The main entry point to these classes should be through
:meth:`can.BusABC.send_periodic`.
"""
import abc
import logging
import threading
import time
import warnings
log = logging.getLogger('can.bcm')
class CyclicTask(object):
"""
Abstract Base for all cyclic tasks.
"""
@abc.abstractmethod
def stop(self):
"""Cancel this periodic task.
:raises can.CanError:
If stop is called on an already stopped task.
"""
class CyclicSendTaskABC(CyclicTask):
"""
Message send task with defined period
"""
def __init__(self, message, period):
"""
:param can.Message message: The message to be sent periodically.
:param float period: The rate in seconds at which to send the message.
"""
self.message = message
self.can_id = message.arbitration_id
self.arbitration_id = message.arbitration_id
self.period = period
super(CyclicSendTaskABC, self).__init__()
class LimitedDurationCyclicSendTaskABC(CyclicSendTaskABC):
def __init__(self, message, period, duration):
"""Message send task with a defined duration and period.
:param can.Message message: The message to be sent periodically.
:param float period: The rate in seconds at which to send the message.
:param float duration:
The duration to keep sending this message at given rate.
"""
super(LimitedDurationCyclicSendTaskABC, self).__init__(message, period)
self.duration = duration
class RestartableCyclicTaskABC(CyclicSendTaskABC):
"""Adds support for restarting a stopped cyclic task"""
@abc.abstractmethod
def start(self):
"""Restart a stopped periodic task.
"""
class ModifiableCyclicTaskABC(CyclicSendTaskABC):
"""Adds support for modifying a periodic message"""
def modify_data(self, message):
"""Update the contents of this periodically sent message without altering
the timing.
:param can.Message message:
The message with the new :attr:`can.Message.data`.
Note: The arbitration ID cannot be changed.
"""
self.message = message
class MultiRateCyclicSendTaskABC(CyclicSendTaskABC):
"""A Cyclic send task that supports switches send frequency after a set time.
"""
def __init__(self, channel, message, count, initial_period, subsequent_period):
"""
Transmits a message `count` times at `initial_period` then continues to
transmit message at `subsequent_period`.
:param channel: See interface specific documentation.
:param can.Message message:
:param int count:
:param float initial_period:
:param float subsequent_period:
"""
super(MultiRateCyclicSendTaskABC, self).__init__(channel, message, subsequent_period)
class ThreadBasedCyclicSendTask(ModifiableCyclicTaskABC,
LimitedDurationCyclicSendTaskABC,
RestartableCyclicTaskABC):
"""Fallback cyclic send task using thread."""
def __init__(self, bus, lock, message, period, duration=None):
super(ThreadBasedCyclicSendTask, self).__init__(message, period, duration)
self.bus = bus
self.lock = lock
self.stopped = True
self.thread = None
self.end_time = time.time() + duration if duration else None
self.start()
def stop(self):
self.stopped = True
def start(self):
self.stopped = False
if self.thread is None or not self.thread.is_alive():
name = "Cyclic send task for 0x%X" % (self.message.arbitration_id)
self.thread = threading.Thread(target=self._run, name=name)
self.thread.daemon = True
self.thread.start()
def _run(self):
while not self.stopped:
# Prevent calling bus.send from multiple threads
with self.lock:
started = time.time()
try:
self.bus.send(self.message)
except Exception as exc:
log.exception(exc)
break
if self.end_time is not None and time.time() >= self.end_time:
break
# Compensate for the time it takes to send the message
delay = self.period - (time.time() - started)
time.sleep(max(0.0, delay))
def send_periodic(bus, message, period, *args, **kwargs):
"""
Send a :class:`~can.Message` every `period` seconds on the given bus.
:param can.BusABC bus: A CAN bus which supports sending.
:param can.Message message: Message to send periodically.
:param float period: The minimum time between sending messages.
:return: A started task instance
"""
warnings.warn("The function `can.send_periodic` is deprecated and will " +
"be removed in an upcoming version. Please use `can.Bus.send_periodic` instead.", DeprecationWarning)
return bus.send_periodic(message, period, *args, **kwargs)
| 32.603774 | 119 | 0.643711 | 590 | 5,184 | 5.561017 | 0.308475 | 0.034136 | 0.022859 | 0.033526 | 0.116733 | 0.078635 | 0.068881 | 0.068881 | 0.068881 | 0.068881 | 0 | 0.001061 | 0.272569 | 5,184 | 158 | 120 | 32.810127 | 0.869 | 0.36169 | 0 | 0.181818 | 0 | 0 | 0.055537 | 0.007695 | 0 | 0 | 0 | 0 | 0 | 1 | 0.166667 | false | 0 | 0.075758 | 0 | 0.363636 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
79d996f9c7b739201903f7162ae39e85d80aae38 | 513 | py | Python | accessible_output/braille/outputs/virgo.py | Timtam/cards-against-humanity | 89ea61b5c9915198b845bbf8a93c3f7827323ceb | [
"MIT"
] | 5 | 2017-04-11T00:18:42.000Z | 2021-08-01T04:27:20.000Z | accessible_output/braille/outputs/virgo.py | Timtam/cards-against-humanity | 89ea61b5c9915198b845bbf8a93c3f7827323ceb | [
"MIT"
] | 47 | 2017-04-27T18:57:27.000Z | 2017-07-16T21:18:28.000Z | accessible_output/braille/outputs/virgo.py | Timtam/cards-against-humanity | 89ea61b5c9915198b845bbf8a93c3f7827323ceb | [
"MIT"
] | 4 | 2018-05-17T12:33:59.000Z | 2022-02-20T16:08:51.000Z | from pywintypes import com_error
import win32com.client
from main import OutputError, BrailleOutput
class Virgo (BrailleOutput):
"""Braille output supporting the Virgo screen reader."""
name = 'Virgo'
def __init__(self, *args, **kwargs):
super (Virgo, self).__init__(*args, **kwargs)
try:
self.object = win32com.client.Dispatch("phoenix.BrailleSysClass")
except com_error:
raise OutputError
def braille(self, text):
self.object.sayonbraille(True,text)
def canBraille(self):
return True | 23.318182 | 68 | 0.744639 | 63 | 513 | 5.904762 | 0.587302 | 0.043011 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.009132 | 0.146199 | 513 | 22 | 69 | 23.318182 | 0.840183 | 0.097466 | 0 | 0 | 0 | 0 | 0.061135 | 0.050218 | 0 | 0 | 0 | 0 | 0 | 1 | 0.2 | false | 0 | 0.2 | 0.066667 | 0.6 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
79da26b04e69fcee30b862a1e5dd200b98e09556 | 3,050 | py | Python | data_ai/comp3006/src/test.py | lonelyhentai/workspace | 2a996af58d6b9be5d608ed040267398bcf72403b | [
"MIT"
] | 2 | 2021-04-26T16:37:38.000Z | 2022-03-15T01:26:19.000Z | data_ai/comp3006/src/test.py | lonelyhentai/workspace | 2a996af58d6b9be5d608ed040267398bcf72403b | [
"MIT"
] | null | null | null | data_ai/comp3006/src/test.py | lonelyhentai/workspace | 2a996af58d6b9be5d608ed040267398bcf72403b | [
"MIT"
] | 1 | 2022-03-15T01:26:23.000Z | 2022-03-15T01:26:23.000Z | import pandas as pd
import numpy as np
from os import path
from path_service import LOG_DIR, DATA_DIR
from sklearn.metrics import log_loss
import re
prob_columns = list(map(lambda x: f"prob{x}", range(8)))
prob_columns_without_end = list(map(lambda x: f"prob{x}", range(7)))
def row_check(df: pd.DataFrame):
df.loc[:,prob_columns]=df.loc[:,prob_columns].apply(lambda x: x/np.sum(x),axis=1,result_type='expand')
df = df.round(5)
sum7 = np.sum(df.loc[:,prob_columns_without_end],axis=1)
df.loc[:,'prob7'] = 1.0 - sum7
return df
def get_prob_res(file_name: str):
df: pd.DataFrame = pd.DataFrame([])
with open(path.join(LOG_DIR, file_name), 'r') as prob_file:
prob_lines = prob_file.readlines()
probs = {}
for i in range(8):
probs[i] = []
for line in prob_lines:
words = re.split(r"\s", line)
for i in range(8):
pos = i * 2
prob_index = int(words[pos][-1])
probs[prob_index].append(float(words[pos + 1]))
df.loc[:, "file_id"] = pd.Series(list(range(1, len(probs[0]) + 1)), dtype=np.int)
for i in range(8):
df.loc[:, f"prob{i}"] = pd.Series(probs[i], dtype=np.float)
return row_check(df)
def get_single_res(file_name: str, true_mode: bool = True):
df: pd.DataFrame = pd.DataFrame([])
with open(path.join(LOG_DIR if not true_mode else DATA_DIR, file_name), 'r') as prob_file:
prob_lines = prob_file.readlines()
probs = {}
for i in range(8):
probs[i] = []
j = 0
for line in prob_lines:
label = int(str.strip(re.split(r"\s", line)[0])[-1])
for i in range(8):
if i == label:
probs[i].append(1.0)
else:
probs[i].append(0.0)
df.loc[:, "file_id"] = pd.Series(list(range(1, len(probs[0]) + 1)), dtype=np.int)
for i in range(8):
df.loc[:, f"prob{i}"] = pd.Series(probs[i], dtype=np.float)
return df
def get_probs(df: pd.DataFrame) -> pd.DataFrame:
return df.loc[:, list(map(lambda x: f"prob{x}", range(8)))]
def check_valid_log_loss():
valid_prob_df = get_prob_res('valid_prob.log')
labels = get_single_res('security.valid', True)
print("prob mode: ", log_loss(get_probs(labels), get_probs(valid_prob_df)))
def check_train_log_loss():
valid_prob_df = get_prob_res('train_prob.log')
labels = get_single_res('new_train', True)
print("prob mode: ", log_loss(get_probs(labels), get_probs(valid_prob_df)))
def save_train_res(df: pd.DataFrame):
df.to_csv(path.join(DATA_DIR, "test_submit.csv"), sep=",", index=False, float_format='%.5f')
if __name__ == "__main__":
check_valid_log_loss()
check_train_log_loss()
test_prob_df = get_prob_res("test_prob.log")
save_train_res(test_prob_df)
df = pd.read_csv(path.join(DATA_DIR, "test_submit.csv"), sep=",")
for index, row in df.iterrows():
if np.abs(np.sum(row[list(map(lambda x: f"prob{x}", range(8)))]) - 1.0) > 1e-6:
raise Exception(f"sum prob not equal 1.0 in {index}")
| 34.659091 | 106 | 0.627213 | 513 | 3,050 | 3.524366 | 0.216374 | 0.029867 | 0.019912 | 0.036504 | 0.53208 | 0.462389 | 0.434735 | 0.434735 | 0.389934 | 0.309181 | 0 | 0.01697 | 0.207869 | 3,050 | 87 | 107 | 35.057471 | 0.731374 | 0 | 0 | 0.338028 | 0 | 0 | 0.077377 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.098592 | false | 0 | 0.084507 | 0.014085 | 0.239437 | 0.028169 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
79daca46089122299df7193b935021df51332239 | 1,255 | py | Python | tests/test_data.py | tappitikarrass/flask-ap | 88b97bb522474ca3dc056c209640050af74cb5dc | [
"BSD-3-Clause"
] | null | null | null | tests/test_data.py | tappitikarrass/flask-ap | 88b97bb522474ca3dc056c209640050af74cb5dc | [
"BSD-3-Clause"
] | null | null | null | tests/test_data.py | tappitikarrass/flask-ap | 88b97bb522474ca3dc056c209640050af74cb5dc | [
"BSD-3-Clause"
] | null | null | null | import base64
# USER
post_user_data_200 = {
"username": "sbandera1",
"firstname": "Stepan",
"lastname": "Bandera",
"email": "stepanko@liamg.com",
"phone": "123",
"password": "supersecret"
}
post_user_alt_data_200 = {
"username": "ivanbahryanyi",
"firstname": "Ivan",
"lastname": "Bahryanyi",
"email": "bahryanyi@liamg.com",
"phone": "30",
"password": "fcksovietunion"
}
post_user_data_400 = {
"usernameeee": "sbandera1",
"firstname": "Stepan",
"lastname": "Bandera",
"email": "stepanko@liamg.com",
"phone": "123",
"password": "supersecret"
}
update_user_data_200 = {
"username": "sbandera1",
"firstname": "Ivan",
"lastname": "Franko",
"email": "ivanko@liamg.com",
"phone": "111",
"password": "supersecret"
}
login_creds_200 = base64.b64encode(b'sbandera1:supersecret').decode('utf-8')
login_creds_alt_200 = base64.b64encode(b'ivanbahryanyi:fcksovietunion').decode('utf-8')
login_creds_403 = base64.b64encode(b'sbandera1:supersecreta').decode('utf-8')
login_creds_404 = base64.b64encode(b'sbandera2:supersecret').decode('utf-8')
# LIST
post_list_data_200 = {
"name": "watchlist"
}
update_list_data_200 = {
"name": "newname"
}
post_list_anime_data_200 = {
"mal_id": 47
}
| 22.818182 | 87 | 0.663745 | 142 | 1,255 | 5.633803 | 0.359155 | 0.0525 | 0.065 | 0.05625 | 0.3625 | 0.2875 | 0.2175 | 0.2175 | 0.2175 | 0.2175 | 0 | 0.069224 | 0.148207 | 1,255 | 54 | 88 | 23.240741 | 0.679139 | 0.007171 | 0 | 0.326087 | 0 | 0 | 0.43041 | 0.074014 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0.086957 | 0.021739 | 0 | 0.021739 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 1 |
79dc47eea0d2277e80b8015449551a4eef9526a7 | 235 | py | Python | terrafirma/calendar/apps.py | AlexandraAlter/django-terrafirma | afce5946f173aded2b4bfea78cf1b1034ec32272 | [
"MIT"
] | null | null | null | terrafirma/calendar/apps.py | AlexandraAlter/django-terrafirma | afce5946f173aded2b4bfea78cf1b1034ec32272 | [
"MIT"
] | null | null | null | terrafirma/calendar/apps.py | AlexandraAlter/django-terrafirma | afce5946f173aded2b4bfea78cf1b1034ec32272 | [
"MIT"
] | null | null | null | from django.apps import AppConfig
from django.utils.translation import gettext_lazy as _
class CalendarConfig(AppConfig):
name = 'terrafirma.calendar'
label = 'terrafirma_calendar'
verbose_name = _('Terrafirma Calendar')
| 26.111111 | 54 | 0.770213 | 26 | 235 | 6.769231 | 0.653846 | 0.306818 | 0.25 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.153191 | 235 | 8 | 55 | 29.375 | 0.884422 | 0 | 0 | 0 | 0 | 0 | 0.242553 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.333333 | 0 | 1 | 0 | 1 | 0 | 0 | null | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 0 | 6 |
79dd7101e6c2dbca64177e87d238cd48079dd45d | 6,466 | py | Python | resources/lib/auth_routes.py | t43pasdf/plugin.video.espn_3 | f111edf14f0344d248f0a62de3da4f15afc7d354 | [
"MIT"
] | 4 | 2019-10-18T01:27:48.000Z | 2020-02-14T05:45:29.000Z | resources/lib/auth_routes.py | t43pasdf/plugin.video.espn_3 | f111edf14f0344d248f0a62de3da4f15afc7d354 | [
"MIT"
] | 3 | 2020-02-10T05:58:30.000Z | 2020-09-28T22:42:04.000Z | resources/lib/auth_routes.py | t43pasdf/plugin.video.espn_3 | f111edf14f0344d248f0a62de3da4f15afc7d354 | [
"MIT"
] | null | null | null | # Copyright 2019 https://github.com/kodi-addons
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is furnished
# to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED,
# INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A
# PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
# HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
# SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
try:
from urllib2 import HTTPError
except ImportError:
from urllib.error import HTTPError
try:
from Queue import Queue, Empty
except ImportError:
from queue import Queue, Empty
import logging
import threading
import time
import xbmcgui
from resources.lib import adobe_activate_api, espnplus, player_config, util
from resources.lib.plugin_routing import plugin
from resources.lib.kodiutils import get_string, set_setting
@plugin.route('/login-tv-provider')
def login_tv_provider():
logging.debug('Authenticate Device')
if adobe_activate_api.is_authenticated():
logging.debug('Device already authenticated, skipping authentication')
dialog = xbmcgui.Dialog()
dialog.ok(get_string(30037), get_string(30301))
set_setting('LoggedInToTvProvider', True)
return True
else:
regcode = adobe_activate_api.get_regcode()
dialog = xbmcgui.Dialog()
ok = dialog.yesno(get_string(30310),
get_string(30320),
get_string(30330) % regcode,
get_string(30340),
get_string(30360),
get_string(30350))
if ok:
try:
adobe_activate_api.authenticate(regcode)
dialog.ok(get_string(30310), get_string(30370))
set_setting('LoggedInToTvProvider', True)
return True
except HTTPError as e:
dialog.ok(get_string(30037), get_string(30420) % e)
set_setting('LoggedInToTvProvider', False)
return False
@plugin.route('/view-tv-provider-details')
def view_tv_provider_details():
dialog = xbmcgui.Dialog()
dialog.ok(get_string(30380),
get_string(30390) % adobe_activate_api.get_authentication_expires(),
get_string(30700) % (player_config.get_dma(), player_config.get_timezone()))
@plugin.route('/logout-tv-provider')
def logout_tv_provider():
dialog = xbmcgui.Dialog()
ok = dialog.yesno(get_string(30381),
get_string(30382))
if ok:
adobe_activate_api.deauthorize()
set_setting('LoggedInToTvProvider', False)
@plugin.route('/login-espn-plus')
def login_espn_plus():
if not espnplus.have_valid_login_id_token():
logging.debug('Requesting login id token')
semaphore = threading.Semaphore(0)
result_queue = Queue()
license_plate, ws = espnplus.perform_license_plate_auth_flow(semaphore, result_queue)
progress_dialog = xbmcgui.DialogProgress()
progress_dialog.create(get_string(40100), get_string(40110), license_plate)
espnplus.start_websocket_thread(ws)
times = 0
sleep_time = 1
max_time = 180
max_times = max_time / sleep_time
# wait a maximum of 3 minutes
while times < max_times:
time.sleep(sleep_time)
canceled = progress_dialog.iscanceled()
acquired = semaphore.acquire(blocking=False)
logging.debug('Canceled: %s Acquired: %s' % (canceled, acquired))
seconds_left = max_time - times * sleep_time
minutes, seconds = divmod(seconds_left, 60)
percent = int(times / max_times)
progress_dialog.update(percent, get_string(40110), license_plate,
get_string(40120) % (minutes, seconds))
if canceled or acquired:
break
times = times + 1
ws.close()
progress_dialog.close()
token = None
try:
token = result_queue.get(block=True, timeout=1)
except Empty as e:
logging.error('No result from websocket %s', e)
if token is not None and 'id_token' in token:
espnplus.handle_license_plate_token(token)
else:
dialog = xbmcgui.Dialog()
dialog.ok(get_string(30037), get_string(40130))
set_setting('LoggedInToEspnPlus', False)
return False
if not espnplus.has_valid_bam_account_access_token():
espnplus.request_bam_account_access_token()
logging.debug('Bam token %s' % espnplus.get_bam_account_access_token())
dialog = xbmcgui.Dialog()
dialog.ok(get_string(40000), get_string(40101))
set_setting('LoggedInToEspnPlus', True)
return True
@plugin.route('/view-espn-plus-details')
def view_espn_plus_details():
account_details = espnplus.get_bam_account_details()
email = util.get_nested_value(account_details, ['attributes', 'email'], 'Unknown Email')
profile_name = util.get_nested_value(account_details, ['activeProfile', 'profileName'], 'Unknown Profile Name')
product_details = email + ' - ' + profile_name + '\n'
sub_details = espnplus.get_bam_sub_details()
for sub in sub_details:
if sub['isActive']:
product_name = ''
for product in sub['products']:
product_name = product_name + ' ' + product['name']
product_details = product_details + product_name + ' ' + sub['expirationDate'] + '\n'
dialog = xbmcgui.Dialog()
dialog.ok(get_string(40260), product_details)
@plugin.route('/logout-espn-plus')
def logout_espn_plus():
set_setting('LoggedInToEspnPlus', False)
espnplus.config.reset_settings()
| 38.951807 | 115 | 0.665636 | 780 | 6,466 | 5.329487 | 0.324359 | 0.058456 | 0.031994 | 0.028626 | 0.156844 | 0.113784 | 0.077219 | 0.043782 | 0.024056 | 0.024056 | 0 | 0.0299 | 0.244819 | 6,466 | 165 | 116 | 39.187879 | 0.821421 | 0.169193 | 0 | 0.225806 | 0 | 0 | 0.100149 | 0.008969 | 0 | 0 | 0 | 0 | 0 | 1 | 0.048387 | false | 0 | 0.104839 | 0 | 0.193548 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
79df34a92c6aa109d6fb09a1fbe24d44b829d071 | 3,440 | py | Python | app.py | rbSparky/umit-hack-backend | a9402d35d07693b78498a2ba2d4ff08fcb6cab44 | [
"MIT"
] | null | null | null | app.py | rbSparky/umit-hack-backend | a9402d35d07693b78498a2ba2d4ff08fcb6cab44 | [
"MIT"
] | null | null | null | app.py | rbSparky/umit-hack-backend | a9402d35d07693b78498a2ba2d4ff08fcb6cab44 | [
"MIT"
] | null | null | null | import pickle
from flask import Flask, request, jsonify, session
from flask_cors import CORS, cross_origin
import sklearn
from sklearn.decomposition import TruncatedSVD
import pandas as pd
import numpy as np
ranks = []
app = Flask(__name__)
cors = CORS(app)
app.config['CORS_HEADERS'] = 'Content-Type'
class Model:
i = '0'
lrank = 1
hrank = 2
r_names = None
r_ID = None
corr_ID = None
recc = None
cmat = []
fir = []
sec = []
final = []
def predict(self):
self.sec = []
self.fir = []
flf = []
SVD = TruncatedSVD(n_components = 10)
decompm = SVD.fit_transform(self.cmat)
df = pd.DataFrame(decompm)
corrm = np.corrcoef(decompm)
p_names = list(self.cmat.index)
p_ID = p_names.index(str(self.i))
c_ID = corrm[p_ID]
Recommend = list(self.cmat.index[c_ID > 0.95])
fl = []
for i in range(len(c_ID)):
if(c_ID[i] > 0.95):
fl.append([c_ID[i], self.cmat.index[i]])
fl.sort(reverse=True)
flf = []
for i in range(len(fl)):
if (fl[i][0] > 0.95):
flf.append(fl[i])
clgdf, clgds = {}, {}
for i in flf:
for j in self.cmat.loc[i[1]].items():
if ((j[1] == 5) and (j[0] not in self.fir) and (ranks[j[0]][1] >= self.lrank)):# and (self.hrank >= ranks[j[0]][0])):
if(j[0] in clgdf):
clgdf[j[0]] += 1
else:
clgdf[j[0]] = 1
elif ((j[1] == 2) and (j[0] not in self.sec) and (ranks[j[0]][1] >= self.lrank)):# and (self.hrank >= ranks[j[0]][0])):
if(j[0] in clgds):
clgds[j[0]] += 1
else:
clgds[j[0]] = 1
tf, ts = [], []
for k in clgdf:
tf.append([clgdf[k], k])
for k in clgds:
ts.append([clgds[k], k])
tf.sort(reverse=True)
ts.sort(reverse=True)
for i in tf:
j = i[1]
self.fir.append([(j.split())[0], j[len((j.split())[0]):], ranks[j][0], ranks[j][1]])
for i in ts:
j = i[1]
self.sec.append([(j.split())[0], j[len((j.split())[0]):], ranks[j][0], ranks[j][1]])
#print(self.fir, self.sec, sep = '\n\n\n')
self.final = []
for i in self.fir:
self.final.append(i)
for i in self.sec:
self.final.append(i)
rfinal = []
[rfinal.append(x) for x in self.final if x not in rfinal]
return jsonify(rfinal)
@app.route('/')
def hello():
return 'hi main'
@app.route('/predict', methods=['POST','GET']) #or POST u see that
@cross_origin()
def predict():
#take all these as input from args
global ranks
#session.clear()
req_dat = request.get_json()
lrank = req_dat['lrank']#5000
hrank = req_dat['hrank']#7000
stream1 = req_dat['stream1']#'Computer Science'
stream2 = req_dat['stream2']#'Electronics'
'''
lrank = int(request.args.get("lrank"))
hrank = int(request.args.get("hrank"))
stream1 = request.args.get("p1")
stream2 = request.args.get("p2")
'''
f = open('essentials.pckl', 'rb')
f1 = pickle.load(f)
f.close()
#print(f1)
f = open('Model2.pckl', 'rb')
f2 = pickle.load(f)
f.close()
wsc = f1[0]
ranks = f1[1]
f2.cmat = f1[2]
f2.lrank = lrank
f2.hrank = hrank
f2.stream1 = stream1
f2.stream2 = stream2
f2.final = []
f2.i = wsc[(stream1, stream2)]
return f2.predict()
if __name__ == '__main__':
#app.secret_key = 'super secret key'
#app.config['SESSION_TYPE'] = 'filesystem'
#session.init_app(app)
app.run(debug=True)
| 23.888889 | 127 | 0.563081 | 540 | 3,440 | 3.514815 | 0.264815 | 0.014752 | 0.022129 | 0.017914 | 0.135933 | 0.103267 | 0.088514 | 0.088514 | 0.088514 | 0.088514 | 0 | 0.034924 | 0.250872 | 3,440 | 143 | 128 | 24.055944 | 0.701591 | 0.094477 | 0 | 0.092593 | 0 | 0 | 0.037428 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.027778 | false | 0 | 0.064815 | 0.009259 | 0.231481 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
8dab135854cbf1898ed8a1808f3a10f5e2425b1b | 235 | py | Python | ccr/urls.py | nikhil96sher/coding_companion | bb5d9596dff74e342ca07b6d95c37fb491877224 | [
"MIT"
] | 12 | 2015-12-30T06:31:57.000Z | 2017-12-26T01:42:18.000Z | ccr/urls.py | nikhilsheoran96/coding_companion | bb5d9596dff74e342ca07b6d95c37fb491877224 | [
"MIT"
] | null | null | null | ccr/urls.py | nikhilsheoran96/coding_companion | bb5d9596dff74e342ca07b6d95c37fb491877224 | [
"MIT"
] | 5 | 2015-12-30T07:06:22.000Z | 2019-04-24T05:46:01.000Z | from django.conf.urls import patterns,url
from ccr import views
urlpatterns=patterns(
'',
url(r'^$',views.main),
url(r'^save/',views.save),
url(r'^template/',views.template),
url(r'^compile/',views.compile),
url(r'^run/',views.run),
) | 21.363636 | 41 | 0.702128 | 37 | 235 | 4.459459 | 0.432432 | 0.121212 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.068085 | 235 | 11 | 42 | 21.363636 | 0.753425 | 0 | 0 | 0 | 0 | 0 | 0.135593 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.2 | 0 | 0.2 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
8daf3e4c8966ce1a6bddee1ab26929f7ffa13135 | 9,354 | py | Python | tools/Project/UI_ModuleMgrDlg.py | wzhengsen/engine-x | f398b94a9a5bb9645c16d12d82d6366589db4e21 | [
"MIT"
] | null | null | null | tools/Project/UI_ModuleMgrDlg.py | wzhengsen/engine-x | f398b94a9a5bb9645c16d12d82d6366589db4e21 | [
"MIT"
] | null | null | null | tools/Project/UI_ModuleMgrDlg.py | wzhengsen/engine-x | f398b94a9a5bb9645c16d12d82d6366589db4e21 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'UI/ModuleMgrDlg.ui'
#
# Created by: PyQt5 UI code generator 5.15.2
#
# WARNING: Any manual changes made to this file will be lost when pyuic5 is
# run again. Do not edit this file unless you know what you are doing.
from PyQt5 import QtCore, QtGui, QtWidgets
class Ui_ModuleMgrDlg(object):
def setupUi(self, ModuleMgrDlg):
ModuleMgrDlg.setObjectName("ModuleMgrDlg")
ModuleMgrDlg.setWindowModality(QtCore.Qt.WindowModal)
ModuleMgrDlg.resize(600, 300)
ModuleMgrDlg.setMinimumSize(QtCore.QSize(300, 150))
ModuleMgrDlg.setMaximumSize(QtCore.QSize(600, 300))
icon = QtGui.QIcon()
icon.addPixmap(QtGui.QPixmap(":/res/IcoModule.ico"), QtGui.QIcon.Normal, QtGui.QIcon.Off)
ModuleMgrDlg.setWindowIcon(icon)
ModuleMgrDlg.setModal(True)
self.horizontalLayout = QtWidgets.QHBoxLayout(ModuleMgrDlg)
self.horizontalLayout.setContentsMargins(0, 0, 0, 0)
self.horizontalLayout.setSpacing(2)
self.horizontalLayout.setObjectName("horizontalLayout")
self.gridLayout = QtWidgets.QGridLayout()
self.gridLayout.setSpacing(2)
self.gridLayout.setObjectName("gridLayout")
self.NewModuleButton = QtWidgets.QPushButton(ModuleMgrDlg)
icon1 = QtGui.QIcon()
icon1.addPixmap(QtGui.QPixmap(":/res/ImgPlus.png"), QtGui.QIcon.Normal, QtGui.QIcon.Off)
self.NewModuleButton.setIcon(icon1)
self.NewModuleButton.setAutoDefault(False)
self.NewModuleButton.setObjectName("NewModuleButton")
self.gridLayout.addWidget(self.NewModuleButton, 1, 0, 1, 1)
self.DelModuleButton = QtWidgets.QPushButton(ModuleMgrDlg)
self.DelModuleButton.setEnabled(False)
icon2 = QtGui.QIcon()
icon2.addPixmap(QtGui.QPixmap(":/res/ImgDel.png"), QtGui.QIcon.Normal, QtGui.QIcon.Off)
self.DelModuleButton.setIcon(icon2)
self.DelModuleButton.setAutoDefault(False)
self.DelModuleButton.setObjectName("DelModuleButton")
self.gridLayout.addWidget(self.DelModuleButton, 1, 1, 1, 1)
self.ModuleListWidget = QtWidgets.QListWidget(ModuleMgrDlg)
self.ModuleListWidget.setObjectName("ModuleListWidget")
self.gridLayout.addWidget(self.ModuleListWidget, 0, 0, 1, 2)
self.horizontalLayout.addLayout(self.gridLayout)
self.ModuleInfoTabWidget = QtWidgets.QTabWidget(ModuleMgrDlg)
self.ModuleInfoTabWidget.setEnabled(False)
self.ModuleInfoTabWidget.setObjectName("ModuleInfoTabWidget")
self.tab = QtWidgets.QWidget()
self.tab.setObjectName("tab")
self.gridLayout_2 = QtWidgets.QGridLayout(self.tab)
self.gridLayout_2.setContentsMargins(0, 0, 0, 0)
self.gridLayout_2.setSpacing(0)
self.gridLayout_2.setObjectName("gridLayout_2")
self.InfoTableWidget = QtWidgets.QTableWidget(self.tab)
self.InfoTableWidget.setSelectionMode(QtWidgets.QAbstractItemView.SingleSelection)
self.InfoTableWidget.setGridStyle(QtCore.Qt.SolidLine)
self.InfoTableWidget.setObjectName("InfoTableWidget")
self.InfoTableWidget.setColumnCount(1)
self.InfoTableWidget.setRowCount(4)
item = QtWidgets.QTableWidgetItem()
self.InfoTableWidget.setVerticalHeaderItem(0, item)
item = QtWidgets.QTableWidgetItem()
self.InfoTableWidget.setVerticalHeaderItem(1, item)
item = QtWidgets.QTableWidgetItem()
self.InfoTableWidget.setVerticalHeaderItem(2, item)
item = QtWidgets.QTableWidgetItem()
self.InfoTableWidget.setVerticalHeaderItem(3, item)
item = QtWidgets.QTableWidgetItem()
self.InfoTableWidget.setHorizontalHeaderItem(0, item)
item = QtWidgets.QTableWidgetItem()
self.InfoTableWidget.setItem(0, 0, item)
item = QtWidgets.QTableWidgetItem()
self.InfoTableWidget.setItem(1, 0, item)
item = QtWidgets.QTableWidgetItem()
self.InfoTableWidget.setItem(2, 0, item)
item = QtWidgets.QTableWidgetItem()
self.InfoTableWidget.setItem(3, 0, item)
self.InfoTableWidget.horizontalHeader().setVisible(False)
self.InfoTableWidget.horizontalHeader().setStretchLastSection(True)
self.InfoTableWidget.verticalHeader().setDefaultSectionSize(20)
self.InfoTableWidget.verticalHeader().setHighlightSections(False)
self.gridLayout_2.addWidget(self.InfoTableWidget, 0, 0, 1, 1)
self.ModuleInfoTabWidget.addTab(self.tab, "")
self.tab_2 = QtWidgets.QWidget()
self.tab_2.setObjectName("tab_2")
self.gridLayout_3 = QtWidgets.QGridLayout(self.tab_2)
self.gridLayout_3.setContentsMargins(0, 0, 0, 0)
self.gridLayout_3.setSpacing(0)
self.gridLayout_3.setObjectName("gridLayout_3")
self.DelDirButton = QtWidgets.QPushButton(self.tab_2)
self.DelDirButton.setEnabled(False)
self.DelDirButton.setIcon(icon2)
self.DelDirButton.setObjectName("DelDirButton")
self.gridLayout_3.addWidget(self.DelDirButton, 1, 2, 1, 1)
self.NewDirButton = QtWidgets.QPushButton(self.tab_2)
self.NewDirButton.setIcon(icon1)
self.NewDirButton.setObjectName("NewDirButton")
self.gridLayout_3.addWidget(self.NewDirButton, 1, 1, 1, 1)
spacerItem = QtWidgets.QSpacerItem(40, 20, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum)
self.gridLayout_3.addItem(spacerItem, 1, 0, 1, 1)
self.DirsListWidget = QtWidgets.QListWidget(self.tab_2)
self.DirsListWidget.setObjectName("DirsListWidget")
self.gridLayout_3.addWidget(self.DirsListWidget, 0, 0, 1, 3)
self.ModuleInfoTabWidget.addTab(self.tab_2, "")
self.tab_3 = QtWidgets.QWidget()
self.tab_3.setObjectName("tab_3")
self.gridLayout_4 = QtWidgets.QGridLayout(self.tab_3)
self.gridLayout_4.setContentsMargins(0, 0, 0, 0)
self.gridLayout_4.setSpacing(0)
self.gridLayout_4.setObjectName("gridLayout_4")
self.DelFileButton = QtWidgets.QPushButton(self.tab_3)
self.DelFileButton.setEnabled(False)
self.DelFileButton.setIcon(icon2)
self.DelFileButton.setObjectName("DelFileButton")
self.gridLayout_4.addWidget(self.DelFileButton, 1, 2, 1, 1)
self.NewFileButton = QtWidgets.QPushButton(self.tab_3)
self.NewFileButton.setIcon(icon1)
self.NewFileButton.setObjectName("NewFileButton")
self.gridLayout_4.addWidget(self.NewFileButton, 1, 1, 1, 1)
spacerItem1 = QtWidgets.QSpacerItem(40, 20, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum)
self.gridLayout_4.addItem(spacerItem1, 1, 0, 1, 1)
self.FilesListWidget = QtWidgets.QListWidget(self.tab_3)
self.FilesListWidget.setObjectName("FilesListWidget")
self.gridLayout_4.addWidget(self.FilesListWidget, 0, 0, 1, 3)
self.ModuleInfoTabWidget.addTab(self.tab_3, "")
self.horizontalLayout.addWidget(self.ModuleInfoTabWidget)
self.horizontalLayout.setStretch(0, 4)
self.horizontalLayout.setStretch(1, 12)
self.retranslateUi(ModuleMgrDlg)
self.ModuleInfoTabWidget.setCurrentIndex(0)
QtCore.QMetaObject.connectSlotsByName(ModuleMgrDlg)
def retranslateUi(self, ModuleMgrDlg):
_translate = QtCore.QCoreApplication.translate
ModuleMgrDlg.setWindowTitle(_translate("ModuleMgrDlg", "模块管理"))
self.NewModuleButton.setText(_translate("ModuleMgrDlg", "新增模块"))
self.DelModuleButton.setText(_translate("ModuleMgrDlg", "删除模块"))
item = self.InfoTableWidget.verticalHeaderItem(0)
item.setText(_translate("ModuleMgrDlg", "downloadUrl"))
item.setToolTip(_translate("ModuleMgrDlg", "远程文件根目录"))
item = self.InfoTableWidget.verticalHeaderItem(1)
item.setText(_translate("ModuleMgrDlg", "uploadUrl"))
item.setToolTip(_translate("ModuleMgrDlg", "上传根路径"))
item = self.InfoTableWidget.verticalHeaderItem(2)
item.setText(_translate("ModuleMgrDlg", "remoteVersionUrl"))
item.setToolTip(_translate("ModuleMgrDlg", "远程版本文件"))
item = self.InfoTableWidget.verticalHeaderItem(3)
item.setText(_translate("ModuleMgrDlg", "remoteManifestUrl"))
item.setToolTip(_translate("ModuleMgrDlg", "远程清单文件"))
item = self.InfoTableWidget.horizontalHeaderItem(0)
item.setText(_translate("ModuleMgrDlg", "值"))
__sortingEnabled = self.InfoTableWidget.isSortingEnabled()
self.InfoTableWidget.setSortingEnabled(False)
self.InfoTableWidget.setSortingEnabled(__sortingEnabled)
self.ModuleInfoTabWidget.setTabText(self.ModuleInfoTabWidget.indexOf(self.tab), _translate("ModuleMgrDlg", "常规"))
self.DelDirButton.setText(_translate("ModuleMgrDlg", "删除目录"))
self.NewDirButton.setText(_translate("ModuleMgrDlg", "新增目录"))
self.ModuleInfoTabWidget.setTabText(self.ModuleInfoTabWidget.indexOf(self.tab_2), _translate("ModuleMgrDlg", "目录"))
self.DelFileButton.setText(_translate("ModuleMgrDlg", "删除文件"))
self.NewFileButton.setText(_translate("ModuleMgrDlg", "新增文件"))
self.ModuleInfoTabWidget.setTabText(self.ModuleInfoTabWidget.indexOf(self.tab_3), _translate("ModuleMgrDlg", "文件"))
import UI_rc
| 55.023529 | 123 | 0.715843 | 914 | 9,354 | 7.251641 | 0.195842 | 0.059143 | 0.04647 | 0.04481 | 0.272782 | 0.214544 | 0.168678 | 0.119191 | 0.040435 | 0.028365 | 0 | 0.023954 | 0.174364 | 9,354 | 169 | 124 | 55.349112 | 0.834261 | 0.029934 | 0 | 0.058065 | 1 | 0 | 0.070373 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.012903 | false | 0 | 0.012903 | 0 | 0.032258 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
8db06f6b303411c51b7e7ee7f461a4a3b7ef48b6 | 3,941 | py | Python | spowtd/transmissivity.py | alex-cobb/python-spowtd | b841ce63a4ed168a6e1b4e17b689d8be9dc11318 | [
"BSD-2-Clause"
] | null | null | null | spowtd/transmissivity.py | alex-cobb/python-spowtd | b841ce63a4ed168a6e1b4e17b689d8be9dc11318 | [
"BSD-2-Clause"
] | null | null | null | spowtd/transmissivity.py | alex-cobb/python-spowtd | b841ce63a4ed168a6e1b4e17b689d8be9dc11318 | [
"BSD-2-Clause"
] | 2 | 2021-10-14T14:38:43.000Z | 2022-03-21T16:21:06.000Z | """Transmissivity classes
"""
import numpy as np
import scipy.integrate as integrate_mod
import spowtd.spline as spline_mod
def create_transmissivity_function(parameters):
"""Create a transmissivity function
Returns a callable object that returns transmissivity at a given
water level. The class of the object depends on the "type" field
in the parameters provided, and must be either "peatclsm" or
"spline".
"""
if 'type' not in parameters:
raise ValueError(
'"type" field is required in parameters; got {}'
.format(parameters))
sy_type = parameters.pop('type', None)
return {
'peatclsm': PeatclsmTransmissivity,
'spline': SplineTransmissivity
}[sy_type](**parameters)
class SplineTransmissivity:
"""Transmissivity parameterized as a spline of log conductivity
zeta_knots_mm: Sequence of water levels in mm
K_knots: Condutivity values at those water levels
Stores a set of knots representing hydraulic conductivity at water
table heights (relative to surface) zeta. When called, takes a
water table height and returns a transmissivity obtained by linear
interpolation of log-conductivity.
This is an extended value function that returns
minimum_transmissivity below min(zeta) and extrapolates
exponentially or linearly above max(zeta), according to whether
the last two knots have the same or different conductivity.
"""
__slots__ = ['zeta_knots_mm', 'K_knots_km_d',
'minimum_transmissivity_m2_d', '_spline']
def __init__(self, zeta_knots_mm, K_knots_km_d,
minimum_transmissivity_m2_d):
self.zeta_knots_mm = np.asarray(zeta_knots_mm, dtype='float64')
self.K_knots_km_d = np.asarray(K_knots_km_d, dtype='float64')
self.minimum_transmissivity_m2_d = minimum_transmissivity_m2_d
log_K_knots = np.log(K_knots_km_d)
self._spline = spline_mod.Spline.from_points(
zip(zeta_knots_mm, log_K_knots),
order=1)
def conductivity(self, water_level_mm):
assert water_level_mm >= self.zeta_knots_mm.min()
if water_level_mm >= self.zeta_knots_mm.max():
raise NotImplementedError('Extrapolation above highest knot')
return np.exp(self._spline(water_level_mm))
def __call__(self, water_level_mm):
if np.isscalar(water_level_mm):
return self.call_scalar(water_level_mm)
return np.array(
[self.call_scalar(value) for value in water_level_mm],
dtype='float64')
def call_scalar(self, water_level_mm):
"""Compute transmissivity for a scalar argument
"""
if water_level_mm <= self.zeta_knots_mm.min():
return self.minimum_transmissivity_m2_d
return (
self.minimum_transmissivity_m2_d +
integrate_mod.quad(
self.conductivity,
self.zeta_knots_mm.min(),
water_level_mm)[0])
class PeatclsmTransmissivity:
"""Transmissivity function used in PEATCLSM
Computes transmissivity in m^2 / s from water level in mm.
See equation 3 in Apers et al. 2022, JAMES.
"""
__slots__ = ['Ksmacz0', 'alpha', 'zeta_max_cm']
def __init__(self, Ksmacz0, alpha, zeta_max_cm):
self.Ksmacz0 = Ksmacz0
self.alpha = alpha
self.zeta_max_cm = zeta_max_cm
def __call__(self, water_level_mm):
Ksmacz0 = self.Ksmacz0
alpha = self.alpha
zeta_max_cm = self.zeta_max_cm
water_level_mm = np.asarray(water_level_mm)
if (water_level_mm / 10 > zeta_max_cm).any():
raise ValueError('T undefined at water level > {} cm in {}'
.format(zeta_max_cm, water_level_mm / 10))
return (
Ksmacz0 * (zeta_max_cm - water_level_mm / 10) ** (1 - alpha)
) / (100 * (alpha - 1))
| 34.269565 | 73 | 0.664298 | 511 | 3,941 | 4.829746 | 0.295499 | 0.081037 | 0.082658 | 0.058347 | 0.200162 | 0.145867 | 0.091167 | 0.072528 | 0.035656 | 0.035656 | 0 | 0.01296 | 0.256026 | 3,941 | 114 | 74 | 34.570175 | 0.828786 | 0.274042 | 0 | 0.064516 | 0 | 0 | 0.088332 | 0.009815 | 0 | 0 | 0 | 0 | 0.016129 | 1 | 0.112903 | false | 0 | 0.048387 | 0 | 0.33871 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
8db1c50adc29a6ffdf16eef7eb3f4db1957e630c | 1,737 | py | Python | python/hillEquations.py | dhlee4/Tinkercell_new | c4d1848bbb905f0e1f9e011837268ac80aff8711 | [
"BSD-3-Clause"
] | 1 | 2021-01-07T13:12:51.000Z | 2021-01-07T13:12:51.000Z | python/hillEquations.py | dhlee4/Tinkercell_new | c4d1848bbb905f0e1f9e011837268ac80aff8711 | [
"BSD-3-Clause"
] | 7 | 2020-04-12T22:25:46.000Z | 2020-04-13T07:50:40.000Z | python/hillEquations.py | daniel-anavaino/tinkercell | 7896a7f809a0373ab3c848d25e3691d10a648437 | [
"BSD-3-Clause"
] | 2 | 2020-04-12T21:57:01.000Z | 2020-04-12T21:59:29.000Z | """
category: Generate kinetics
name: Hill equations
description: automatically generate the equilibrium rate equation for transcription
icon: hillequation.png
menu: yes
specific for: Coding
tool: yes
"""
from tinkercell import *
from tc2py import *
items = tc_selectedItems();
genes = [];
for i in range(0,items.length):
if tc_isA( tc_getItem(items,i),"Coding"):
genes.append( tc_getItem(items,i) );
tc_deleteItemsArray(items);
if (len(genes) > 0):
strList = toStrings(("AND","OR","XOR"));
t = tc_getStringFromList("Select the logical function to approximate:",strList,"Auto");
tc_deleteStringsArray(strList);
if t > -1:
for i in genes:
opnames = [];
opname = "";
promoter = "";
upstream = tc_partsUpstream(i);
for j in range(0,upstream.length):
p = tc_getItem(upstream,j);
if tc_isA(p,"Promoter"):
promoter = tc_getUniqueName(p);
if tc_isA(p,"Operator"):
opname = tc_getUniqueName(p);
if tc_getConnections(p).length > 0:
opnames.append(opname);
rate = "0.0";
if len(promoter) > 0:
if len(opnames) < 1:
rate = promoter + ".strength";
elif len(opnames) == 1:
rate = promoter + ".strength * " + opnames[0];
else:
if t == 0: #AND
rate = " * ".join(opnames);
elif t == 1: #OR
rate = " + ".join(opnames) + " - " + " * ".join(opnames);
elif t == 2: #XOR
rate = " + ".join(opnames) + " - 2 * " + " * ".join(opnames);
rate = promoter + ".strength * (" + rate + ")";
name = tc_getUniqueName(i)
tc_print(name + " has rate : " + rate + "\n");
tc_addForcingFunction(i, name , rate);
else:
tc_print("no promoter found for this coding region\n");
else:
tc_errorReport("please select a coding region");
| 28.47541 | 88 | 0.62061 | 224 | 1,737 | 4.727679 | 0.366071 | 0.051936 | 0.01983 | 0.028329 | 0.096317 | 0.058546 | 0 | 0 | 0 | 0 | 0 | 0.011773 | 0.217617 | 1,737 | 60 | 89 | 28.95 | 0.767476 | 0.11802 | 0 | 0.06383 | 1 | 0 | 0.147929 | 0 | 0.042553 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.042553 | 0 | 0.042553 | 0.042553 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
8db29e40510fc64c7655a39c604ac0d49c03c44b | 795 | py | Python | setup.py | Flowerowl/ici | 7c3209ee0ddfae27bda76f586ac02545364a0c73 | [
"MIT"
] | 204 | 2015-01-03T14:29:43.000Z | 2021-12-15T16:21:28.000Z | setup.py | QQ83076130/ici | 7c3209ee0ddfae27bda76f586ac02545364a0c73 | [
"MIT"
] | 5 | 2015-05-14T10:34:24.000Z | 2017-10-09T15:53:47.000Z | setup.py | QQ83076130/ici | 7c3209ee0ddfae27bda76f586ac02545364a0c73 | [
"MIT"
] | 77 | 2015-01-13T01:44:16.000Z | 2021-12-15T16:21:39.000Z | #encoding:utf-8
from setuptools import setup, find_packages
import sys, os
version = '0.4.3'
setup(name='ici',
version=version,
description="方便程序员在terminal查询生词的小工具",
long_description="""方便程序员在terminal查询生词的小工具""",
classifiers=[], # Get strings from http://pypi.python.org/pypi?%3Aaction=list_classifiers
keywords='python iciba dictionary terminal',
author='yuzhe',
author_email='lazynightz@gmail.com',
url='https://github.com/Flowerowl/ici',
license='',
packages=find_packages(exclude=['ez_setup', 'examples', 'tests']),
include_package_data=True,
zip_safe=False,
install_requires=[
'termcolor',
],
entry_points={
'console_scripts':[
'ici = ici.ici:main'
]
},
)
| 27.413793 | 95 | 0.632704 | 83 | 795 | 5.915663 | 0.759036 | 0.04888 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.00813 | 0.226415 | 795 | 28 | 96 | 28.392857 | 0.790244 | 0.108176 | 0 | 0 | 0 | 0 | 0.288543 | 0.062235 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.08 | 0 | 0.08 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
8db3a905d27b52ca5f7ab31fe8496b3bc345779b | 24,074 | py | Python | src/son/monitor/son_sp.py | dang03/son-cli | 3e29322d4556f3e02f7b15c43c5e66a1e7e07bd3 | [
"Apache-2.0"
] | 4 | 2017-02-08T22:50:28.000Z | 2018-05-29T07:29:47.000Z | src/son/monitor/son_sp.py | dang03/son-cli | 3e29322d4556f3e02f7b15c43c5e66a1e7e07bd3 | [
"Apache-2.0"
] | 81 | 2016-07-19T13:55:12.000Z | 2021-05-07T15:03:05.000Z | src/son/monitor/son_sp.py | dang03/son-cli | 3e29322d4556f3e02f7b15c43c5e66a1e7e07bd3 | [
"Apache-2.0"
] | 13 | 2016-07-19T13:33:19.000Z | 2019-04-25T08:04:15.000Z | """
Copyright (c) 2015 SONATA-NFV
ALL RIGHTS RESERVED.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
Neither the name of the SONATA-NFV [, ANY ADDITIONAL AFFILIATION]
nor the names of its contributors may be used to endorse or promote
products derived from this software without specific prior written
permission.
This work has been performed in the framework of the SONATA project,
funded by the European Commission under Grant number 671517 through
the Horizon 2020 and 5G-PPP programmes. The authors would like to
acknowledge the contributions of their colleagues of the SONATA
partner consortium (www.sonata-nfv.eu).
"""
import logging
from requests import Session, post, get
import websocket
import threading
from subprocess import call, check_output
import json
from son.profile.helper import read_yaml, write_yaml
from prometheus_client import start_http_server, Gauge
import os
import docker
from time import gmtime, strftime
import datetime
"""
This class implements the son-sp commands.
These commands translate to the API's of the SONATA SP
"""
LOG = logging.getLogger('SP_monitor')
LOG.setLevel(level=logging.INFO)
prometheus_stream_port = 8082
prometheus_server_api = 'http://127.0.0.1:9090'
prometheus_config_path = '/tmp/son-monitor/prometheus/prometheus_sdk.yml'
GK_api = 'http://sp.int3.sonata-nfv.eu:32001/api/v2/'
monitor_api = 'http://sp.int3.sonata-nfv.eu:8000/api/v1/'
son_access_config_path = "/home/steven/.son-workspace"
platform_id = 'sp1'
class Service_Platform():
def __init__(self, export_port=8082, GK_api=None, **kwargs):
self.monitor_api = kwargs.get('monitor_api', monitor_api)
self.GK_api = kwargs.get('GK_api', GK_api)
self.son_access_config_path = kwargs.get('son_access_config_path', son_access_config_path)
self.platform_id = kwargs.get('platform_id', platform_id)
# Build up our session
self.session = Session()
self.session.headers = {
"Accept": "application/json; charset=UTF-8"
}
# global parameters needed for the SP_websocket Class
global prometheus_stream_port
prometheus_stream_port = export_port
global prometheus_server_api
prometheus_server_api = kwargs.get('prometheus_server_api', prometheus_server_api)
global prometheus_config_path
prometheus_config_path = kwargs.get('prometheus_config_path', prometheus_config_path)
self.ws_thread = None
# websocket in the SP
self.ws = None
# access token to auth the SDK user
self.access_token = None
def list(self, **kwargs):
# if metric is specified, show the list of VNFs that export ths metric
metric = kwargs.get('metric')
if metric :
url = self.monitor_api + 'prometheus/metrics/name/' + metric
ret = self.session.get(url).json().get("metrics").get("result")
else:
url = self.monitor_api + 'prometheus/metrics/list'
resp = self.session.get(url)
ret = resp.json().get('metrics')
return ret
def query(self, **kwargs):
verbose = kwargs.get("verbose", False)
LOG.setLevel(level=logging.INFO)
if verbose:
LOG.setLevel(level=logging.DEBUG)
# periodically refresh token
self._get_token()
service_name = kwargs.get("service")
vnf_name = kwargs.get("vnf_name")
vdu_id = kwargs.get("vdu_id")
vnfc_id = kwargs.get("vnfc_id")
metric = kwargs.get("metric")
since = kwargs.get("since")
until = kwargs.get("until")
metric_list = []
service_desc_uuid = self._get_service_descriptor_uuid(service_name)
vnf_instances = self._get_vnf_instances(service_desc_uuid)
if len(vnf_instances) <= 0:
LOG.warning("found no VNF instances for this service descriptor uuid: {0}".format(service_desc_uuid))
else:
vnf_descriptor_uuid = self._get_VNF_descriptor_uuid(vnf_name)
for vnf_instance_uuid in vnf_instances:
vdu_id, vc_id = self._check_VNF_instance(vnf_instance_uuid, vnf_descriptor_uuid, vdu_id, vnfc_id)
if vc_id:
LOG.info("found VNF: {0} with instance uuid: {2}, vdu_id: {3} vnfc_id: {4} in service: {1} ".format(
vnf_name, service_name, vnf_instance_uuid, vdu_id, vc_id))
metric_list = self._get_async_metric(vnf_instance_uuid, vdu_id, vc_id, metric, since, until)
break
return metric_list
def stream_test(self, **kwargs):
metric = kwargs.get('metric')
vnf_name = kwargs.get('vnf_name')
action = kwargs.get('action', 'start')
if action == 'stop':
SP_websocket._config_prometheus(remove=True)
if self.ws:
self.ws.close()
# kill all running websocket streams
call(['pkill', '-f', 'son-monitor stream'])
return 'websocket closed'
# create the websocket with a filter eg: {"metric":"vm_cpu_perc","filters":["exported_instance":"vtc-vnf"]}
url = self.monitor_api + 'ws/new'
data = {'metric':str(metric), 'filters':str(list("exported_instance={}".format(vnf_name)))}
response = self.session.post(url, json=data)
code = response.status_code
if code == 200:
ws_url = response.json().get('ws_url')
LOG.info('ws_url: {}'.format(ws_url))
self.ws = SP_websocket(ws_url, vnf_name=vnf_name, metric=metric)
self.ws_thread = threading.Thread(target=self.ws.run_forever)
self.ws_thread.daemon = True
self.ws_thread.start()
self.ws_thread.join()
return 'websocket thread started'
def stream_auth(self, **kwargs):
"""
call the SONATA Gatekeeper API to request monitoring metrics
:param kwargs:
:return:
"""
verbose = kwargs.get("verbose", False)
LOG.setLevel(level=logging.INFO)
if verbose:
LOG.setLevel(level=logging.DEBUG)
action = kwargs.get('action', 'start')
if action == 'stop':
SP_websocket._config_prometheus(remove=True)
if self.ws:
self.ws.close()
# kill all running websocket streams
LOG.info('closing websocket')
call(['pkill', '-f', 'son-monitor stream'])
LOG.info('websocket closed')
return 'websocket closed'
# periodically refresh token
self._get_token()
service_name = kwargs.get("service","sonata-demo-12")
vnf_name = kwargs.get("vnf_name","vtc-vnf2")
vdu_id = kwargs.get("vdu_id")
vnfc_id = kwargs.get("vnfc_id")
metric = kwargs.get("metric")
ws_url = None
# first lookup if the service name is instantiated
service_desc_uuid = self._get_service_descriptor_uuid(service_name)
# then check if the service has an instance of this VNF
vnf_instances = self._get_vnf_instances(service_desc_uuid)
if len(vnf_instances) <= 0:
LOG.warning("found no VNF instances for this service descriptor uuid: {0}".format(service_desc_uuid))
else:
# get the descriptor uuid of this vnf
vnf_descriptor_uuid = self._get_VNF_descriptor_uuid(vnf_name)
for vnf_instance_uuid in vnf_instances:
# check if this VNF instance has the correct vdu and vnfc
vdu_id, vnfc_id = self._check_VNF_instance(vnf_instance_uuid, vnf_descriptor_uuid, vdu_id, vnfc_id)
if vnfc_id:
LOG.info("found VNF: {0} with instance uuid: {2}, vdu_id: {3} vnfc_id: {4} in service: {1} ".format(
vnf_name, service_name, vnf_instance_uuid, vdu_id, vnfc_id))
ws_url = self._get_ws_url(vnf_instance_uuid, vdu_id, vnfc_id, metric)
break
if not vnfc_id:
return 'No vnfc_id found in the record'
if not ws_url:
return 'No websocket url received'
#ws_url = 'ws://10.30.0.112:8002/ws/98adab175fd64cc4bbe50ae9505fecf6'
self.ws = SP_websocket(ws_url, vnf_name=vnf_name, metric=metric, vm_id=vnfc_id)
self.ws_thread = threading.Thread(target=self.ws.run_forever)
self.ws_thread.daemon = True
self.ws_thread.start()
self.ws_thread.join()
return 'websocket thread started'
# TODO: start background thread to refresh token
def _get_token(self):
# the credentials and token is fetched via son-access, the son-access config path must be given
token_path = os.path.join(self.son_access_config_path, 'platforms', 'token.txt')
output = check_output(['son-access', '-w', self.son_access_config_path, '-p', self.platform_id, 'auth'])
#token_path = workspace_dir + '/' + token_file
with open(token_path, 'r') as token:
self.access_token = token.read()
def _get_VNF_descriptor_uuid(self, vnf_name):
headers = {'Authorization': "Bearer %s" % self.access_token}
url = self.GK_api + "functions"
resp = get(url, headers=headers)
if resp.status_code >= 400:
return 'error: {}'.format(resp.status_code)
functions_list = resp.json()
found_functions = [function.get("uuid") for function in functions_list if function["vnfd"]["name"] == vnf_name]
if len(found_functions) > 1 or len(found_functions) == 0:
LOG.warning("found {0} functions with name: {1}".format(len(found_functions), vnf_name))
return None
else:
uuid = found_functions[0]
LOG.info("found function descriptor of {0} with uuid: {1}".format(vnf_name, uuid))
return uuid
def _check_VNF_instance(self, vnf_instance_uuid, vnf_descriptor_uuid, vdu_id=None, vnfc_id=None):
headers = {'Authorization': "Bearer %s" % self.access_token}
url = self.GK_api + "records/functions"
resp = get(url, headers=headers)
if resp.status_code >= 400:
return 'error: {}'.format(resp.status_code)
LOG.debug('request VNF record, url:{0} json:{1}'.format(url, json.dumps(resp.json(), indent=2)))
vnf_list = resp.json()
vnf_list = [vnf for vnf in vnf_list if vnf.get("descriptor_reference") == vnf_descriptor_uuid and vnf.get("uuid") == vnf_instance_uuid]
if len(vnf_list) > 1 :
LOG.info("found multiple VNF instances with matching uuid: {0}".format(vnf_list))
return False
elif len(vnf_list) == 0 :
LOG.info("found no VNF instance with matching uuid: {0}".format(vnf_instance_uuid))
return False
# we found 1 matching vnf instance, now check if it has a vdu
LOG.info("found VNF instance with matching uuid: {0}".format(vnf_instance_uuid))
vnf_record = vnf_list[0]
vdu_list = vnf_record["virtual_deployment_units"]
if vdu_id:
vdu_list = [vdu for vdu in vdu_list if vdu.get("id") == vdu_id]
else:
#pick by default first vdu
vdu_list = [vdu_list[0]]
vdu = vdu_list[0]
vdu_id = vdu["id"]
if len(vdu_list) > 1 :
LOG.info("found multiple vdu_ids with matching id: {0} list: {1}".format(vdu_id, vdu_list))
return False
elif len(vdu_list) == 0 :
LOG.info("found no VDUs with matching id: {0}".format(vdu_id))
return False
# we found 1 matching vdu id, now check if it has a vdu instance(vnfc)
LOG.info("found VDU with matching id: {0}".format(vdu_id))
vdu = vdu_list[0]
vnfc_list = vdu["vnfc_instance"]
if vnfc_id:
vnfc_list = [vnfc for vnfc in vnfc_list if vnfc.get("id") == vnfc_id]
else:
#pick by default first vnfc
vnfc_list = [vnfc_list[0]]
vnfc = vnfc_list[0]
vnfc_id = vnfc["id"]
if len(vnfc_list) > 1 :
LOG.info("found multiple vnfc_ids with matching id: {0} list: {1}".format(vnfc_id, vnfc_list))
return False
elif len(vnfc_list) == 0 :
LOG.info("found no VNFCs with matching id: {0}".format(vnfc_id))
return False
vnfc = vnfc_list[0]
vc_id = vnfc["vc_id"]
LOG.info("found VNFC with matching id: {0} and vc_id: {1}".format(vnfc_id, vc_id))
return vdu_id, vc_id
# Get the list of all the service instances registered
def _get_service_instance_list(self):
headers = {'Authorization': "Bearer %s" % self.access_token}
url = self.GK_api + "records/services"
resp = get(url, headers=headers)
LOG.info('request service instance uuid list, url:{0} json:{1}'.format(url, json.dumps(resp.json(), indent=2)))
return resp.text
# Gets a registered service instance
def _get_vnf_instances(self, service_descriptor_uuid):
headers = {'Authorization': "Bearer %s" % self.access_token}
url = self.GK_api + "records/services"
resp = get(url, headers=headers)
if resp.status_code >= 400:
return 'error: {}'.format(resp.status_code)
LOG.debug('request service instances, url:{0} json:{1}'.format(url, json.dumps(resp.json(), indent=2)))
services_list = resp.json()
found_services = [service for service in services_list if service["descriptor_reference"] == service_descriptor_uuid]
if len(found_services) > 1 or len(found_services) == 0 :
LOG.warning("found {0} service instances with descriptor uuid: {1}". format(len(found_services), service_descriptor_uuid))
return []
else:
service = found_services[0]
service_instance_uuid = service["uuid"]
vnfr_list = [vnf.get("vnfr_id") for vnf in service["network_functions"]]
LOG.info("found VNF descriptors: {}".format(json.dumps(vnfr_list,indent=2)))
return vnfr_list
# Obtain the list of services that can be instantiated
def _get_service_descriptor_uuid(self, service_name):
headers = {'Authorization': "Bearer %s" % self.access_token}
url = self.GK_api + "services"
resp = get(url, headers=headers)
if resp.status_code >= 400:
return 'error: {}'.format(resp.status_code)
LOG.debug('request service descriptor uuid, url:{0} json:{1}'.format(url, json.dumps(resp.json(), indent=2)))
services_list = resp.json()
found_services = [service.get("uuid") for service in services_list if service.get("nsd",{}).get("name") == service_name]
if len(found_services) > 1 or len(found_services) == 0 :
LOG.warning("found {0} services with name: {1}". format(len(found_services), service_name))
return None
else:
uuid = found_services[0]
LOG.info("found service descriptor of service: {0} with uuid: {1}".format(service_name, uuid))
return uuid
# get the websocket url where the metrocs will be streamed
def _get_ws_url(self, vnf_instance_uuid, vdu_id, vc_id, metric):
"""
call Gatekeeper API …/functions/metrics/:inst_id/:vdu_id/:vnfc_id/synch-mon-data
A metric is uniquely identified by vnf_instance + vdu_id + vnfc_id.
A VNF can consist out of multiple VDU's, a VNFC is an instance of a VDU.
the vnfc_id is only unique in the scope of the VNFR/VDU
:param vnf_instance_uuid: vnf instance uuid of the VNF
:param vdu_id: vdu id in the VNFD of the metric we want to monitor
:param vc_id: vc id in the VNFR of the metric we want to monitor
:param metric:
:return:
"""
headers = {'Authorization': "Bearer %s" % self.access_token}
#url = self.GK_api + "functions/" + function_uuid + "/instances/" + instance_uuid + "/synch-mon-data?metrics=" + \
# metric + "&for=10"
url = self.GK_api + "functions/metrics/" + vnf_instance_uuid + "/" + vdu_id + "/" + vc_id +"/synch-mon-data"
params = {"metrics": metric}
response = get(url, headers=headers, params=params)
code = response.status_code
LOG.debug("url: {}".format(response.url))
LOG.debug("websocket request response: {}".format(response.json()))
if code == 200:
ws_url = response.json().get('ws_url')
LOG.info('ws_url: {}'.format(ws_url))
return ws_url
# Do a query to the SP Prometheus DB
def _get_async_metric(self, vnf_instance_uuid, vdu_id, vc_id, metric, since=None, until=None, step='10s'):
"""
call Gatekeeper API …/functions/metrics/:inst_id/:vdu_id/:vnfc_id/asynch-mon-data
:param vnf_instance_uuid: vnf instance uuid of the VNF
:param vdu_id: vdu id in the VNFD of the metric we want to monitor
:param vnfc_id: vnfc id in the VNFR of the metric we want to monitor
:param metric:
:param since:
:param until:
:return:
"""
# pick some default time values (since 1 min ago until now) (notation eg. 2017-05-05T17:10:22Z)
# The SONATA integration env is UTC time
if not until:
#now = datetime.datetime.now()
now = datetime.datetime.utcnow()
until = now.strftime("%Y-%m-%dT%H:%M:%SZ")
#until = '2017-06-19T10:06:00Z'
if not since:
#now = datetime.datetime.now()
now = datetime.datetime.utcnow()
now_minus_1 = now - datetime.timedelta(minutes=1)
since = now_minus_1.strftime("%Y-%m-%dT%H:%M:%SZ")
#since = '2017-06-19T10:05:00Z'
LOG.info("since: {}".format(since))
LOG.info("until: {}".format(until))
LOG.info("step: {}".format(step))
headers = {'Authorization': "Bearer %s" % self.access_token}
url = self.GK_api + "functions/metrics/" + vnf_instance_uuid + "/" + vdu_id + "/" + vc_id + "/asynch-mon-data"
params = {"metrics":metric,
"since":since,
"until":until,
"step":step}
response = get(url, headers=headers, params=params)
code = response.status_code
LOG.debug("url: {}".format(response.url))
LOG.debug("metric request response: {}".format(response.text))
return response.json()
class SP_websocket(websocket.WebSocketApp):
def __init__(self, url, vnf_name=None, metric=None, vm_id=None,
desc='exported metric from SP', print=True):
self.vnf_name = vnf_name
self.metric = metric
self.vc_id = vm_id #the unique identifier of the vm, used by OpenStack
self.desc = desc
self.print = print
self.metric_received = False
self.prometheus_metric = None
websocket.WebSocketApp.__init__(self, url,
on_message=self._on_message,
on_error=self._on_error,
on_close=self._on_close,
on_open=self._on_open
)
def _on_message(self, ws, message):
LOG.info('ws message: {}'.format(message))
metric_list = self.find_metric(message)
# set the metric with the correct labels once, when first value is received
if not self.metric_received:
self.set_exported_metric(metric_list)
if self.metric_received:
for metric in metric_list:
self.prometheus_metric.labels(**metric['labels']).set(metric["value"])
# some info printing
if self.metric_received and self.print \
and self.vnf_name is not None and self.metric is not None:
message = self.filter_output(message)
def _on_error(self, ws, error):
self._config_prometheus(remove=True)
pass
def _on_close(self, ws):
self._config_prometheus(remove=True)
pass
def _on_open(self, ws):
global prometheus_stream_port
# start local http export server
start_http_server(prometheus_stream_port)
# make Prometheus scrape this server
self._config_prometheus()
LOG.info('websocket opened: {}'.format(self.url))
@staticmethod
def _config_prometheus(remove=False):
global prometheus_server_api
global prometheus_config_path
docker_cli = docker.from_env()
# check if containers are already running
c1 = docker_cli.containers.list(filters={'status': 'running', 'name': 'prometheus'})
if len(c1) < 1:
LOG.info('Prometheus is not running')
return "Prometheus DB is not running"
# make Prometheus scrape this server
config_file = read_yaml(prometheus_config_path)
targets = config_file.get('scrape_configs', [])
SP_stream_config = next((target for target in targets if target.get('job_name') == 'SP_stream'), None)
# the SP http server is not yet added to the config file
config_dict = {'job_name': 'SP_stream', 'scrape_interval': '1s',
'static_configs': [{'targets': ['172.17.0.1:{}'.format(prometheus_stream_port)]}]}
if not SP_stream_config and not remove:
config_file['scrape_configs'].append(config_dict)
LOG.info('added SP stream to Prometheus')
elif remove and SP_stream_config:
config_file['scrape_configs'].remove(config_dict)
LOG.info('removed SP stream from Prometheus')
write_yaml(prometheus_config_path, config_file)
post(prometheus_server_api + '/-/reload')
def set_exported_metric(self, metric_list):
for metric in metric_list:
# metric is found and labels are set
metric_name = self.metric
labels = list(metric['labels'])
self.prometheus_metric = Gauge(metric_name, self.desc, labels)
self.metric_received = True
LOG.info('exporting metric with labels: {}'.format(labels))
break
def filter_output(self, message):
data = json.loads(message)
metric_list = data.get(self.metric, [])
metric = {}
for metric in metric_list:
for label in metric.get('labels', []):
if self.vc_id in label:
LOG.info('label: {}'.format(label))
LOG.info('value: {}'.format(metric.get('value')))
LOG.info('time: {}'.format(metric.get('time')))
break
return metric
def find_metric(self, message):
data = json.loads(message)
metric_list = data.get(self.metric, [])
metric_list_out = []
for metric in metric_list:
metric_found = False
labels = {}
LOG.debug('metric found:{}'.format(metric))
for label in metric.get('labels', []):
key, value = label.split('=')
labels[key] = str(value).replace('"','')
if self.vc_id in value:
metric_found = True
if metric_found:
# metric is found and labels are set
value = metric.get('value')
metric = {'labels': labels, "value": value}
metric_list_out.append(metric)
return metric_list_out | 43.533454 | 143 | 0.620005 | 3,160 | 24,074 | 4.537658 | 0.138608 | 0.012204 | 0.020922 | 0.007671 | 0.432039 | 0.40219 | 0.34577 | 0.316898 | 0.29758 | 0.290048 | 0 | 0.013361 | 0.272493 | 24,074 | 553 | 144 | 43.533454 | 0.805013 | 0.169436 | 0 | 0.378238 | 0 | 0.005181 | 0.157024 | 0.010645 | 0 | 0 | 0 | 0.001808 | 0 | 1 | 0.056995 | false | 0.005181 | 0.031088 | 0 | 0.173575 | 0.007772 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
8db3db553de8307aa88c5fde47c1bd6250050be2 | 2,246 | py | Python | cogs/miscellaneous/avatar.py | AkshuAgarwal/Aperture-1.7 | c55ffa68d3a4de0daaaad2c918173e5ebca9f006 | [
"MIT"
] | 2 | 2021-09-05T16:42:13.000Z | 2021-09-09T18:41:14.000Z | cogs/miscellaneous/avatar.py | AkshuAgarwal/Aperture-1.7 | c55ffa68d3a4de0daaaad2c918173e5ebca9f006 | [
"MIT"
] | null | null | null | cogs/miscellaneous/avatar.py | AkshuAgarwal/Aperture-1.7 | c55ffa68d3a4de0daaaad2c918173e5ebca9f006 | [
"MIT"
] | null | null | null | from datetime import datetime
from typing import Union
from discord import Member, User, Embed
from discord.ext import commands
from bot.main import NewCommand
class Avatar(commands.Cog):
def __init__(self, client):
self.client = client
@commands.command(
name='avatar',
cls=NewCommand,
aliases=['av'],
brief='That Avatar looks cool!',
description='Get the Avatar of a User',
help="""This command is used to get the Avatar of a User/Member.
The Member should be visible to Me. That means I need to share atleast 1 common Server with the user of whom I need to get the Avatar.""",
usage='[user:name/id/@mention, default:command_invoker]',
explained_usage=["**User:** User whose Avatar you need to get. Can be Name, ID or Mention."],
examples=[
'avatar',
'avatar 764462046032560128',
'avatar @Akshu'
]
)
@commands.cooldown(1, 5, commands.BucketType.member)
async def _avatar(self, ctx, user:Union[User, Member]=None):
if not user:
user = ctx.author
if not user.avatar:
desc = f"> **Download Avatar:**\n> [png]({user.avatar_url})"
elif user.is_avatar_animated() is False:
desc = f"> **Download Avatar:**\n> [webp]({user.avatar_url_as(format='webp')}) | [jpeg]({user.avatar_url_as(format='jpeg')}) | [jpg]({user.avatar_url_as(format='jpg')}) | [png]({user.avatar_url_as(format='png')})"
elif user.is_avatar_animated() is True:
desc = f"> **Download Avatar:**\n> [gif]({user.avatar_url_as(format='gif')}) | [webp]({user.avatar_url_as(format='webp')}) | [jpeg]({user.avatar_url_as(format='jpeg')}) | [jpg]({user.avatar_url_as(format='jpg')}) | [png]({user.avatar_url_as(format='png')})"
embed = Embed(title=f"{user}'s Avatar", description=desc, color=0x00eeff, timestamp=datetime.utcnow())
embed.set_author(name=user, icon_url=user.avatar_url)
embed.set_footer(text=f'Thanks for using {ctx.guild.me.name}', icon_url=ctx.guild.me.avatar_url)
embed.set_image(url=user.avatar_url)
await ctx.reply(embed=embed)
def setup(client):
client.add_cog(Avatar(client)) | 45.836735 | 269 | 0.638023 | 312 | 2,246 | 4.467949 | 0.355769 | 0.093257 | 0.111908 | 0.096844 | 0.283357 | 0.225251 | 0.160689 | 0.160689 | 0.160689 | 0.160689 | 0 | 0.01359 | 0.213713 | 2,246 | 49 | 270 | 45.836735 | 0.775764 | 0 | 0 | 0 | 0 | 0.073171 | 0.427681 | 0.199377 | 0 | 0 | 0.00356 | 0 | 0 | 1 | 0.04878 | false | 0 | 0.121951 | 0 | 0.195122 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
8db417da99710aab489ce1f6e872bf13a1887670 | 323 | py | Python | pylinkedin/exceptions.py | johnjoo1/scrape-linkedin | 4860e90e65aa776ce84afa3041a5bd826790ec0a | [
"MIT"
] | 160 | 2016-06-27T12:55:20.000Z | 2021-10-02T12:38:55.000Z | pylinkedin/exceptions.py | johnjoo1/scrape-linkedin | 4860e90e65aa776ce84afa3041a5bd826790ec0a | [
"MIT"
] | 16 | 2017-03-23T08:38:32.000Z | 2020-02-24T22:39:19.000Z | pylinkedin/exceptions.py | johnjoo1/scrape-linkedin | 4860e90e65aa776ce84afa3041a5bd826790ec0a | [
"MIT"
] | 55 | 2017-02-23T14:29:45.000Z | 2021-05-03T09:28:19.000Z | class ProfileNotFound(Exception):
""" Exception if the linkedin url points to the linkedin not found page """
pass
class NotAProfile(Exception):
""" Exception raised if you pass a non linkedin profile as url """
pass
class ServerIpBlacklisted(Exception):
pass
class BadStatusCode(Exception):
pass | 24.846154 | 79 | 0.724458 | 39 | 323 | 6 | 0.564103 | 0.115385 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.201238 | 323 | 13 | 80 | 24.846154 | 0.906977 | 0.393189 | 0 | 0.5 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0.5 | 0 | 0 | 0.5 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 2 |
8db5993b3ba09fcfb72c92ea6f0805e8ba07d24f | 810 | py | Python | Basics II/Lists2.py | marinaoliveira96/python-exercises | 13fc0ec30dec9bb6531cdeb41c80726971975835 | [
"MIT"
] | null | null | null | Basics II/Lists2.py | marinaoliveira96/python-exercises | 13fc0ec30dec9bb6531cdeb41c80726971975835 | [
"MIT"
] | null | null | null | Basics II/Lists2.py | marinaoliveira96/python-exercises | 13fc0ec30dec9bb6531cdeb41c80726971975835 | [
"MIT"
] | null | null | null | print(isinstance(3, int))
lista = ['marina', 2, 'jujuba']
lista2 = []
for i in lista:
if isinstance(i, str):
lista2.append(i)
print(lista2)
myList = ['marina', 123, 9.5]
print(isinstance(9.5, int))
#strings
items = ['marina', 123, 9.5]
print(isinstance(9.5, float))
str_items = ['abc', 'Abc','def', 'BBBB','ghi', 'AAAA']
str_items.sort(key=str.lower, reverse=True)
print(str_items)
new_items = sorted(str_items)
print(new_items)
#numbers
int_numbers = [123, 13.44, 5436, 324.54, 9034]
int_numbers.sort()
print(f'sort.() = {int_numbers}')
int_numbers.sort(reverse=True)
print(f'sort.(reverse=True) = {int_numbers}')
#esse sorted ta relacionado a lista n aos numeros
new_numbers = sorted(int_numbers, reverse=False)
print(f'new numbers = {new_numbers}')
total = sum(int_numbers)
print(total)
| 18.837209 | 54 | 0.691358 | 128 | 810 | 4.257813 | 0.40625 | 0.12844 | 0.036697 | 0.040367 | 0.102752 | 0.102752 | 0.102752 | 0.102752 | 0 | 0 | 0 | 0.055241 | 0.128395 | 810 | 42 | 55 | 19.285714 | 0.716714 | 0.076543 | 0 | 0 | 0 | 0 | 0.173154 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0 | 0 | 0 | 0.4 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
8db72819dbae785cf03bf81e31c9e2232cea71f2 | 1,283 | py | Python | webapp/config.py | rustprooflabs/psycopg3-connpool | 5576fd89ed986afb24fa2f229d52925e7a6d845c | [
"MIT"
] | 3 | 2021-03-13T14:07:25.000Z | 2022-03-12T01:51:49.000Z | webapp/config.py | rustprooflabs/psycopg3-connpool | 5576fd89ed986afb24fa2f229d52925e7a6d845c | [
"MIT"
] | 1 | 2021-09-12T15:03:12.000Z | 2021-09-12T15:03:12.000Z | webapp/config.py | rustprooflabs/psycopg3-connpool | 5576fd89ed986afb24fa2f229d52925e7a6d845c | [
"MIT"
] | null | null | null | import os
import logging
APP_NAME = 'psycopg3-connpool'
# Set to False to force reporting queries to share pool with non-reporting queries
REPORTING_POOL = True
POOL_MIN_SIZE = 1
POOL_MAX_SIZE = 10
POOL_MAX_IDLE = 60
POOL_STAT_SLEEP = 300
if not REPORTING_POOL:
pool_max_size += 5
CURR_PATH = os.path.abspath(os.path.dirname(__file__))
PROJECT_BASE_PATH = os.path.abspath(os.path.join(CURR_PATH, os.pardir))
try:
LOG_PATH = os.environ['LOG_PATH']
except KeyError:
LOG_PATH = PROJECT_BASE_PATH + '/webapp.log'
# Required for CSRF protection in Flask, please change to something secret!
try:
APP_SECRET_KEY = os.environ['APP_SECRET_KEY']
except KeyError:
ERR_MSG = '\nSECURITY WARNING: To ensure security please set the APP_SECRET_KEY'
ERR_MSG += ' environment variable.\n'
#LOGGER.warning(ERR_MSG)
print(ERR_MSG)
APP_SECRET_KEY = 'S$332sgajg9GHKL14jklsjfkjasglmssajfsdgGADAAJj77j@neHMld'
try:
DATABASE_STRING = os.environ['PG_CONN']
except KeyError:
key_msg = 'Database environment variable not set. Need PG_CONN string'
sys.exit(key_msg)
try:
APP_DEBUG_RAW = os.environ['APP_DEBUG']
if APP_DEBUG_RAW == 'False':
APP_DEBUG = False
else:
APP_DEBUG = True
except KeyError:
APP_DEBUG = False
| 23.759259 | 84 | 0.731878 | 189 | 1,283 | 4.698413 | 0.433862 | 0.054054 | 0.054054 | 0.038288 | 0.051802 | 0.051802 | 0 | 0 | 0 | 0 | 0 | 0.01711 | 0.180047 | 1,283 | 53 | 85 | 24.207547 | 0.826996 | 0.137958 | 0 | 0.277778 | 0 | 0 | 0.251589 | 0.049955 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.055556 | 0 | 0.055556 | 0.027778 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
8db775d1dd9bd4e0cb2fd047201ecfb22122ee56 | 247 | py | Python | toughradius/common/__init__.py | capitek-wangsj/toughradius | ee0e6c20d32262ff7a6ace653af5a78340db62a2 | [
"Apache-2.0"
] | null | null | null | toughradius/common/__init__.py | capitek-wangsj/toughradius | ee0e6c20d32262ff7a6ace653af5a78340db62a2 | [
"Apache-2.0"
] | null | null | null | toughradius/common/__init__.py | capitek-wangsj/toughradius | ee0e6c20d32262ff7a6ace653af5a78340db62a2 | [
"Apache-2.0"
] | null | null | null | # coding: utf-8
class ObjectDict(dict):
def __getattr__(self, name):
try:
return self[name]
except KeyError:
raise AttributeError(name)
def __setattr__(self, name, value):
self[name] = value | 24.7 | 39 | 0.587045 | 27 | 247 | 5.074074 | 0.666667 | 0.233577 | 0.189781 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.005917 | 0.315789 | 247 | 10 | 40 | 24.7 | 0.804734 | 0.052632 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.25 | false | 0 | 0 | 0 | 0.5 | 0 | 1 | 0 | 0 | null | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 4 |
8db868f6631d93b30648549794d251ef271627af | 3,695 | py | Python | mnist.py | xiaoxinyi/tfrecord | 6f39e3dbd5b1ffb3df8636b3163dbe2161469075 | [
"Apache-2.0"
] | null | null | null | mnist.py | xiaoxinyi/tfrecord | 6f39e3dbd5b1ffb3df8636b3163dbe2161469075 | [
"Apache-2.0"
] | null | null | null | mnist.py | xiaoxinyi/tfrecord | 6f39e3dbd5b1ffb3df8636b3163dbe2161469075 | [
"Apache-2.0"
] | null | null | null | import os
import tensorflow as tf
import tensorflow.contrib.slim as slim
from tensorflow.examples.tutorials.mnist import mnist
TRAIN_FILE = 'train.tfrecords'
VALIDATION_FILE = 'train.tfrecords'
def lenet(images):
net = slim.layers.conv2d(images, 20, [5,5], scope='conv1')
net = slim.layers.max_pool2d(net, [2,2], scope='pool1')
net = slim.layers.conv2d(net, 50, [5,5], scope='conv2')
net = slim.layers.max_pool2d(net, [2,2], scope='pool2')
net = slim.layers.flatten(net, scope='flatten3')
net = slim.layers.fully_connected(net, 500, scope='fully_connected4')
net = slim.layers.fully_connected(net, 10, activation_fn=None, scope='fully_connected5')
return net
def read_and_decode(filename_queue):
reader = tf.TFRecordReader()
_, serialized_example = reader.read(filename_queue)
features = tf.parse_single_example(
serialized_example,
# Defaults are not specified since both keys are required.
features={
'image_raw': tf.FixedLenFeature([], tf.string),
'label': tf.FixedLenFeature([], tf.int64),
})
# Convert from a scalar string tensor (whose single string has
# length mnist.IMAGE_PIXELS) to a uint8 tensor with shape
# [mnist.IMAGE_PIXELS].
image = tf.decode_raw(features['image_raw'], tf.uint8)
image.set_shape([mnist.IMAGE_PIXELS])
image = tf.cast(image, tf.float32) * (1. / 255) - 0.5
image = tf.reshape(image, [mnist.IMAGE_SIZE, mnist.IMAGE_SIZE, 1])
# OPTIONAL: Could reshape into a 28x28 image and apply distortions
# here. Since we are not applying any distortions in this
# example, and the next step expects the image to be flattened
# into a vector, we don't bother.
# Convert label from a scalar uint8 tensor to an int32 scalar.
label = tf.cast(features['label'], tf.int32)
return image, label
def inputs(train_dir, train, batch_size, num_epochs, one_hot_labels=False):
"""Reads input data num_epochs times.
Args:
train: Selects between the training (True) and validation (False) data.
batch_size: Number of examples per returned batch.
num_epochs: Number of times to read the input data, or 0/None to
train forever.
Returns:
A tuple (images, labels), where:
* images is a float tensor with shape [batch_size, mnist.IMAGE_PIXELS]
in the range [-0.5, 0.5].
* labels is an int32 tensor with shape [batch_size] with the true label,
a number in the range [0, mnist.NUM_CLASSES).
Note that an tf.train.QueueRunner is added to the graph, which
must be run using e.g. tf.train.start_queue_runners().
"""
if not num_epochs: num_epochs = None
filename = os.path.join(train_dir,
TRAIN_FILE if train else VALIDATION_FILE)
with tf.name_scope('input'):
filename_queue = tf.train.string_input_producer(
[filename], num_epochs=num_epochs)
# Even when reading in multiple threads, share the filename
# queue.
image, label = read_and_decode(filename_queue)
if one_hot_labels:
label = tf.one_hot(label, mnist.NUM_CLASSES, dtype=tf.int32)
# Shuffle the examples and collect them into batch_size batches.
# (Internally uses a RandomShuffleQueue.)
# We run this in two threads to avoid being a bottleneck.
images, sparse_labels = tf.train.shuffle_batch(
[image, label], batch_size=batch_size, num_threads=2,
capacity=1000 + 3 * batch_size,
# Ensures a minimum amount of shuffling of examples.
min_after_dequeue=1000)
return images, sparse_labels
| 39.731183 | 91 | 0.673884 | 518 | 3,695 | 4.673745 | 0.372587 | 0.02974 | 0.037588 | 0.015696 | 0.115655 | 0.074349 | 0.026435 | 0.026435 | 0.026435 | 0 | 0 | 0.024613 | 0.230311 | 3,695 | 92 | 92 | 40.163043 | 0.826653 | 0.381867 | 0 | 0 | 0 | 0 | 0.055732 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.066667 | false | 0 | 0.088889 | 0 | 0.222222 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
8db9b3ff0897bce11d0fb7fc945e79ab18d1a305 | 2,635 | py | Python | setup.py | BenFrankel/hgf | 78ec6a1e4eaa62005cc3914e8a554d2f1401ac37 | [
"Apache-2.0"
] | null | null | null | setup.py | BenFrankel/hgf | 78ec6a1e4eaa62005cc3914e8a554d2f1401ac37 | [
"Apache-2.0"
] | 2 | 2017-12-27T17:38:18.000Z | 2017-12-27T17:42:10.000Z | setup.py | BenFrankel/hgf | 78ec6a1e4eaa62005cc3914e8a554d2f1401ac37 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
###############################################################################
# #
# Copyright 2017 - Ben Frankel #
# #
# Licensed under the Apache License, Version 2.0 (the "License"); #
# you may not use this file except in compliance with the License. #
# You may obtain a copy of the License at #
# #
# http://www.apache.org/licenses/LICENSE-2.0 #
# #
# Unless required by applicable law or agreed to in writing, software #
# distributed under the License is distributed on an "AS IS" BASIS, #
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. #
# See the License for the specific language governing permissions and #
# limitations under the License. #
# #
###############################################################################
from setuptools import setup, find_packages
version = '0.2.2'
with open('README.md') as f:
long_description = f.read()
setup(
name='hgf',
version=version,
description='A framework for building hierarchical GUIs',
long_description=long_description,
author='Ben Frankel',
author_email='ben.frankel7@gmail.com',
license='Apache 2.0',
url='https://www.github.com/BenFrankel/hgf',
download_url='https://www.github.com/BenFrankel/hgf/tarball/' + version,
keywords='hgf hierarchical gui framework',
classifiers=[
'Development Status :: 2 - Pre-Alpha',
'Intended Audience :: Developers',
'License :: OSI Approved :: Apache Software License',
'Natural Language :: English',
'Operating System :: OS Independent',
'Programming Language :: Python :: 3 :: Only',
'Topic :: Software Development :: Libraries :: Application Frameworks',
'Topic :: Software Development :: Libraries :: pygame',
],
packages=find_packages(),
install_requires=[
'pygame (>=1.9.1)',
'pyperclip (>=1.6.0)',
],
provides=['hgf']
)
| 43.196721 | 83 | 0.447059 | 218 | 2,635 | 5.366972 | 0.59633 | 0.051282 | 0.022222 | 0.02735 | 0.05641 | 0.05641 | 0.05641 | 0 | 0 | 0 | 0 | 0.014085 | 0.407211 | 2,635 | 60 | 84 | 43.916667 | 0.734955 | 0.4 | 0 | 0.0625 | 0 | 0 | 0.466562 | 0.017309 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.03125 | 0 | 0.03125 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
8db9c18f5e2082b747c4a03ec17f797125c796c9 | 605 | py | Python | tools/ml/get_email.py | Xowap/Maiznet | bd564d4c93eb28dc87135e9d31dad9a921ea8cf6 | [
"WTFPL"
] | 1 | 2015-05-04T09:28:14.000Z | 2015-05-04T09:28:14.000Z | tools/ml/get_email.py | Xowap/Maiznet | bd564d4c93eb28dc87135e9d31dad9a921ea8cf6 | [
"WTFPL"
] | null | null | null | tools/ml/get_email.py | Xowap/Maiznet | bd564d4c93eb28dc87135e9d31dad9a921ea8cf6 | [
"WTFPL"
] | null | null | null | #!/usr/bin/python
from django.core.management import setup_environ
import sys
sys.path.append('/var/wsgi/maiznet')
sys.path.append('/var/wsgi')
from maiznet import settings
setup_environ(settings)
from maiznet.register.models import Presence
wfile_announces = open("/var/wsgi/maiznet/tools/ml/emails_announces","w")
wfile_talkings = open("/var/wsgi/maiznet/tools/ml/emails_talkings","w")
presence = Presence.objects.all()
for p in presence :
if p.talkings==1 :
wfile_talkings.write(p.user.email + "\n")
wfile_announces.write(p.user.email + "\n")
wfile_announces.close()
wfile_talkings.close()
| 23.269231 | 73 | 0.760331 | 90 | 605 | 5 | 0.444444 | 0.062222 | 0.093333 | 0.071111 | 0.36 | 0.271111 | 0.271111 | 0 | 0 | 0 | 0 | 0.001818 | 0.090909 | 605 | 25 | 74 | 24.2 | 0.816364 | 0.026446 | 0 | 0 | 0 | 0 | 0.19898 | 0.144558 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.25 | 0 | 0.25 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
8dbb58ecd3a0dc98c548334dbc3b8ca871d442dd | 1,368 | py | Python | tests/functional/test_actions.py | AKhodus/adcm | 98dbf22af3f1c6afa94505e9acaff0ac4088a602 | [
"Apache-2.0"
] | null | null | null | tests/functional/test_actions.py | AKhodus/adcm | 98dbf22af3f1c6afa94505e9acaff0ac4088a602 | [
"Apache-2.0"
] | null | null | null | tests/functional/test_actions.py | AKhodus/adcm | 98dbf22af3f1c6afa94505e9acaff0ac4088a602 | [
"Apache-2.0"
] | null | null | null | # Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import allure
import pytest
from adcm_client.objects import ADCMClient
from adcm_pytest_plugin import utils
from tests.ui_tests.test_actions_page import check_verbosity
@pytest.mark.parametrize("verbose_state", [True, False], ids=["verbose_state_true", "verbose_state_false"])
def test_check_verbose_option_of_action_run(sdk_client_fs: ADCMClient, verbose_state):
"""Test action run with verbose switch"""
bundle_dir = utils.get_data_dir(__file__, "verbose_state")
bundle = sdk_client_fs.upload_from_fs(bundle_dir)
cluster = bundle.cluster_create(utils.random_string())
task = cluster.action(name="dummy_action").run(verbose=verbose_state)
with allure.step(f"Check if verbosity is {verbose_state}"):
task.wait()
log = task.job().log()
check_verbosity(log, verbose_state)
| 42.75 | 107 | 0.765351 | 202 | 1,368 | 4.980198 | 0.534653 | 0.095427 | 0.025845 | 0.031809 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.003431 | 0.147661 | 1,368 | 31 | 108 | 44.129032 | 0.859348 | 0.408626 | 0 | 0 | 0 | 0 | 0.141236 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.066667 | false | 0 | 0.333333 | 0 | 0.4 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 1 |
8dbbf348283d5d908174cdee1b01595e478b5b7d | 11,798 | py | Python | src/network/graph_module.py | andrewliao11/env-aware-program-gen | bc50b788c35e8e8545b8af9127c279a7387146d6 | [
"MIT"
] | 5 | 2019-08-17T07:53:02.000Z | 2022-02-26T07:17:37.000Z | src/network/graph_module.py | andrewliao11/env-aware-program-gen | bc50b788c35e8e8545b8af9127c279a7387146d6 | [
"MIT"
] | 9 | 2019-06-28T07:36:10.000Z | 2022-03-11T23:48:39.000Z | src/network/graph_module.py | andrewliao11/env-aware-program-gen | bc50b788c35e8e8545b8af9127c279a7387146d6 | [
"MIT"
] | 1 | 2020-04-14T12:48:40.000Z | 2020-04-14T12:48:40.000Z | import torch
import torch.nn as nn
from program.graph_utils import *
from helper import fc_block, LayerNormGRUCell
# helper class for GraphEncoder
class AttrProxy(object):
"""
Translates index lookups into attribute lookups.
To implement some trick which able to use list of nn.Module in a nn.Module
see https://discuss.pytorch.org/t/list-of-nn-module-in-a-nn-module/219/2
"""
def __init__(self, module, prefix):
self.module = module
self.prefix = prefix
def __getitem__(self, i):
return getattr(self.module, self.prefix + str(i))
class VanillaGraphEncoder(nn.Module):
def __init__(
self,
n_timesteps,
n_edge_types,
graph_hidden,
embedding_dim,
hidden):
super(VanillaGraphEncoder, self).__init__()
layernorm = True
self.n_timesteps = n_timesteps
self.n_edge_types = n_edge_types
self.embedding_dim = embedding_dim
self.input_dim = n_edge_types + embedding_dim
self.graph_hidden = graph_hidden
node_init2hidden = nn.Sequential()
node_init2hidden.add_module(
'fc1',
fc_block(
3 * embedding_dim,
graph_hidden,
False,
nn.Tanh))
node_init2hidden.add_module(
'fc2',
fc_block(
graph_hidden,
graph_hidden,
False,
nn.Tanh))
for i in range(n_edge_types):
hidden2message_in = fc_block(
graph_hidden, graph_hidden, False, nn.Tanh)
self.add_module(
"hidden2message_in_{}".format(i),
hidden2message_in)
hidden2message_out = fc_block(
graph_hidden, graph_hidden, False, nn.Tanh)
self.add_module(
"hidden2message_out_{}".format(i),
hidden2message_out)
if layernorm:
self.gru_cell = LayerNormGRUCell
else:
self.gru_cell = nn.GRUCell
propagator = self.gru_cell(
input_size=2 * n_edge_types * graph_hidden,
hidden_size=graph_hidden)
self.node_init2hidden = node_init2hidden
self.hidden2message_in = AttrProxy(self, "hidden2message_in_")
self.hidden2message_out = AttrProxy(self, "hidden2message_out_")
self.propagator = propagator
def forward(
self,
edge_adjacency_matrix,
node_state_prev,
related_mask=None):
"""edge_adjacency_matrix: e, b, v, v
object_state_arry: b, v, p
state: b, v, h
"""
B, V, H = node_state_prev.size()
node_state_prev = node_state_prev.view(B * V, -1)
node_state = node_state_prev
edge_adjacency_matrix = edge_adjacency_matrix.float()
edge_adjacency_matrix_out = edge_adjacency_matrix
# convert the outgoing edges to incoming edges
edge_adjacency_matrix_in = edge_adjacency_matrix.permute(0, 1, 3, 2)
for i in range(self.n_timesteps):
message_out = []
for j in range(self.n_edge_types):
node_state_hidden = self.hidden2message_out[j](
node_state) # b*v, h
node_state_hidden = node_state_hidden.view(B, V, -1)
message_out.append(
torch.bmm(
edge_adjacency_matrix_out[j],
node_state_hidden)) # b, v, h
# concatenate the message from each edges
message_out = torch.stack(message_out, 2) # b, v, e, h
message_out = message_out.view(B * V, -1) # b, v, e*h
message_in = []
for j in range(self.n_edge_types):
node_state_hidden = self.hidden2message_in[j](
node_state) # b*v, h
node_state_hidden = node_state_hidden.view(B, V, -1)
message_in.append(
torch.bmm(
edge_adjacency_matrix_in[j],
node_state_hidden))
# concatenate the message from each edges
message_in = torch.stack(message_in, 2) # b, v, e, h
message_in = message_in.view(B * V, -1) # b, v, e*h
message = torch.cat([message_out, message_in], 1)
node_state = self.propagator(message, node_state)
if related_mask is not None:
# mask out un-related changes
related_mask_expand = related_mask.unsqueeze(
2).repeat(1, 1, self.graph_hidden).float()
related_mask_expand = related_mask_expand.view(B * V, -1)
node_state = node_state * related_mask_expand + \
node_state_prev * (-related_mask_expand + 1)
node_state = node_state.view(B, V, -1)
return node_state
class ResidualActionGraphEncoder(VanillaGraphEncoder):
def __init__(
self,
n_edge_types,
n_touch,
graph_hidden,
embedding_dim,
hidden):
super(
ResidualActionGraphEncoder,
self).__init__(
0,
n_edge_types,
graph_hidden,
embedding_dim,
hidden)
self.n_touch = n_touch
action2hidden = nn.Sequential()
action2hidden.add_module(
'fc1',
fc_block(
embedding_dim + n_touch,
graph_hidden,
False,
nn.Tanh))
action2hidden.add_module(
'fc2',
fc_block(
graph_hidden,
graph_hidden,
False,
nn.Tanh))
compute_residual = nn.Sequential()
compute_residual.add_module(
'fc1',
fc_block(
2 * graph_hidden,
graph_hidden,
False,
nn.Tanh))
compute_residual.add_module(
'fc2',
fc_block(
graph_hidden,
graph_hidden,
False,
nn.Tanh))
self.compute_residual = compute_residual
self.action2hidden = action2hidden
def action_applier(
self,
action_embedding,
batch_touch_idx,
batch_node_state_prev,
batch_touch_mask):
"""
action_embedding: b, emb
batch_touch_idx: b, n, touch_type,
batch_node_state_prev: b, n, h
batch_touch_mask: b, n
"""
B, N, _ = batch_touch_idx.size()
action_embedding = action_embedding.unsqueeze(1).repeat(1, N, 1)
graph_input = torch.cat([action_embedding, batch_touch_idx], 2)
graph_input = self.action2hidden(graph_input)
graph_input = graph_input.view(B * N, -1)
batch_node_state_prev = batch_node_state_prev.view(B * N, -1)
residual = self.compute_residual(
torch.cat([graph_input, batch_node_state_prev], 1))
batch_touch_mask = batch_touch_mask.unsqueeze(
2).repeat(1, 1, self.graph_hidden)
batch_touch_mask = batch_touch_mask.view(B * N, -1)
batch_node_state = batch_node_state_prev + residual * batch_touch_mask
batch_node_state = batch_node_state.view(B, N, -1)
return batch_node_state
class FCActionGraphEncoder(VanillaGraphEncoder):
def __init__(
self,
n_edge_types,
n_touch,
graph_hidden,
embedding_dim,
hidden):
super(FCActionGraphEncoder,
self).__init__(
0,
n_edge_types,
graph_hidden,
embedding_dim,
hidden)
self.n_touch = n_touch
action2hidden = nn.Sequential()
action2hidden.add_module('fc1', fc_block(embedding_dim + n_touch, graph_hidden, False, nn.Tanh))
action2hidden.add_module('fc2', fc_block(graph_hidden, graph_hidden, False, nn.Tanh))
compute_residual = nn.Sequential()
compute_residual.add_module('fc1', fc_block(2*graph_hidden, graph_hidden, False, nn.Tanh))
compute_residual.add_module('fc2', fc_block(graph_hidden, graph_hidden, False, nn.Tanh))
self.compute_residual = compute_residual
self.action2hidden = action2hidden
def action_applier(
self,
action_embedding,
batch_touch_idx,
batch_node_state_prev,
batch_touch_mask):
"""
action_embedding: b, emb
batch_touch_idx: b, n, touch_type,
batch_node_state_prev: b, n, h
batch_touch_mask: b, n
"""
B, N, _ = batch_touch_idx.size()
action_embedding = action_embedding.unsqueeze(1).repeat(1, N, 1)
graph_input = torch.cat([action_embedding, batch_touch_idx], 2)
graph_input = self.action2hidden(graph_input)
graph_input = graph_input.view(B * N, -1)
batch_node_state_prev = batch_node_state_prev.view(B * N, -1)
batch_node_state = self.compute_residual(torch.cat([graph_input, batch_node_state_prev], 1))
batch_touch_mask = batch_touch_mask.unsqueeze(2).repeat(1, 1, self.graph_hidden)
batch_touch_mask = batch_touch_mask.view(B * N, -1)
batch_node_state = batch_node_state * batch_touch_mask + batch_node_state_prev * (-batch_touch_mask + 1)
batch_node_state = batch_node_state.view(B, N, -1)
return batch_node_state
class GRUActionGraphEncoder(VanillaGraphEncoder):
def __init__(
self,
n_edge_types,
n_touch,
graph_hidden,
embedding_dim,
hidden):
super(
GRUActionGraphEncoder,
self).__init__(
0,
n_edge_types,
graph_hidden,
embedding_dim,
hidden)
self.n_touch = n_touch
action2hidden = nn.Sequential()
action2hidden.add_module('fc1', fc_block(embedding_dim + n_touch, graph_hidden, False, nn.Tanh))
action2hidden.add_module('fc2', fc_block(graph_hidden, graph_hidden, False, nn.Tanh))
temporal_propagator = self.gru_cell(input_size=graph_hidden, hidden_size=graph_hidden)
self.temporal_propagator = temporal_propagator
self.action2hidden = action2hidden
def action_applier(
self,
action_embedding,
batch_touch_idx,
batch_node_state_prev,
batch_touch_mask):
"""
action_embedding: b, emb
batch_touch_idx: b, n, touch_type,
batch_node_state_prev: b, n, h
batch_touch_mask: b, n
"""
B, N, _ = batch_touch_idx.size()
action_embedding = action_embedding.unsqueeze(1).repeat(1, N, 1)
graph_input = torch.cat([action_embedding, batch_touch_idx], 2)
graph_input = self.action2hidden(graph_input)
graph_input = graph_input.view(B * N, -1)
batch_node_state_prev = batch_node_state_prev.view(B * N, -1)
batch_node_state = self.temporal_propagator(graph_input, batch_node_state_prev)
batch_touch_mask = batch_touch_mask.unsqueeze(2).repeat(1, 1, self.graph_hidden)
batch_touch_mask = batch_touch_mask.view(B * N, -1)
batch_node_state = batch_node_state * batch_touch_mask + batch_node_state_prev * (-batch_touch_mask + 1)
batch_node_state = batch_node_state.view(B, N, -1)
return batch_node_state
| 32.323288 | 112 | 0.577386 | 1,356 | 11,798 | 4.651917 | 0.100295 | 0.082752 | 0.07546 | 0.051363 | 0.717185 | 0.683893 | 0.654883 | 0.627616 | 0.60558 | 0.594008 | 0 | 0.014564 | 0.342346 | 11,798 | 364 | 113 | 32.412088 | 0.798428 | 0.073233 | 0 | 0.647287 | 0 | 0 | 0.010682 | 0.001968 | 0 | 0 | 0 | 0 | 0 | 1 | 0.03876 | false | 0 | 0.015504 | 0.003876 | 0.093023 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
8dbd46f30d6dfa6a1507fd5a331fbdfbe41f94f6 | 744 | py | Python | adam_io/utils.py | barcesat/adam_io | c9b36a696dca2338de02925cc9af78bc59b55fce | [
"MIT"
] | null | null | null | adam_io/utils.py | barcesat/adam_io | c9b36a696dca2338de02925cc9af78bc59b55fce | [
"MIT"
] | null | null | null | adam_io/utils.py | barcesat/adam_io | c9b36a696dca2338de02925cc9af78bc59b55fce | [
"MIT"
] | null | null | null | # check with status codes
# exceptions
# check 'error' and 'status'
from socket import inet_pton, inet_aton, AF_INET, error
def valid_ipv4(address: str):
"""
:param address: ip address string
:return: if the address is a valid dotted ipv4 address
"""
try:
inet_pton(AF_INET, address)
except AttributeError:
try:
inet_aton(address)
except error:
return False
return address.count('.') == 3
except error:
return False
return True
class URI:
DIGITAL_INPUT = "/digitalinput"
DIGITAL_OUTPUT = "/digitaloutput"
ANALOG_INPUT = "/analoginput"
ANALOG_OUTPUT = "/analogoutput"
ALL = "/all"
VALUE = "/value"
RANGE = "/range"
| 21.257143 | 58 | 0.61828 | 85 | 744 | 5.282353 | 0.564706 | 0.035635 | 0.075724 | 0.097996 | 0.124722 | 0 | 0 | 0 | 0 | 0 | 0 | 0.00565 | 0.28629 | 744 | 34 | 59 | 21.882353 | 0.839925 | 0.202957 | 0 | 0.285714 | 0 | 0 | 0.120841 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.047619 | false | 0 | 0.047619 | 0 | 0.666667 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 1 |
8dbf72711729ecd397489eebb67c03ca4962e12f | 182 | py | Python | core/__init__.py | xmings/IdeaNote | a538d4bf012255a19583f9acc57576c44105283e | [
"Apache-2.0"
] | 25 | 2019-11-13T03:35:34.000Z | 2022-03-26T04:13:50.000Z | core/__init__.py | xmings/IdeaNote | a538d4bf012255a19583f9acc57576c44105283e | [
"Apache-2.0"
] | 3 | 2019-11-04T06:23:35.000Z | 2021-02-08T07:21:04.000Z | core/__init__.py | xmings/IdeaNote | a538d4bf012255a19583f9acc57576c44105283e | [
"Apache-2.0"
] | null | null | null | #!/bin/python
# -*- coding: utf-8 -*-
# @File : __init__.py
# @Author: wangms
# @Date : 2018/8/7
from flask import Blueprint
core = Blueprint('core', __name__)
from . import view | 18.2 | 34 | 0.648352 | 25 | 182 | 4.4 | 0.8 | 0.236364 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.046667 | 0.175824 | 182 | 10 | 35 | 18.2 | 0.686667 | 0.489011 | 0 | 0 | 0 | 0 | 0.044944 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.666667 | 0 | 0.666667 | 0.666667 | 1 | 0 | 0 | null | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 1 | 0 | 5 |
8dc0d85e7869f55ab1de18e5ff3544c306008e2b | 3,241 | py | Python | doc/examples/doc_code/raysgd_torch_signatures.py | thedrow/ray | 584645cc7da2bfd7d341d52b59c9c8561dbd119b | [
"Apache-2.0"
] | 1 | 2020-02-25T08:43:46.000Z | 2020-02-25T08:43:46.000Z | doc/examples/doc_code/raysgd_torch_signatures.py | mwbrulhardt/ray | b97b8c2be1c7d01e2c93ca97a0b87120bfa2bd1a | [
"Apache-2.0"
] | 1 | 2019-03-16T07:08:57.000Z | 2019-03-16T07:08:57.000Z | doc/examples/doc_code/raysgd_torch_signatures.py | gehring/ray | d8eeb9641314740572e81f9836cbce3e5b8f2b73 | [
"Apache-2.0"
] | null | null | null | # flake8: noqa
"""
This file holds code for the Pytorch Trainer creator signatures.
It ignores yapf because yapf doesn't allow comments right after code blocks,
but we put comments right after code blocks to prevent large white spaces
in the documentation.
"""
# yapf: disable
# __torch_model_start__
import torch.nn as nn
def model_creator(config):
"""Constructor function for the model(s) to be optimized.
You will also need to provide a custom training
function to specify the optimization procedure for multiple models.
Args:
config (dict): Configuration dictionary passed into ``PyTorchTrainer``.
Returns:
One or more torch.nn.Module objects.
"""
return nn.Linear(1, 1)
# __torch_model_end__
# __torch_optimizer_start__
import torch
def optimizer_creator(model, config):
"""Constructor of one or more Torch optimizers.
Args:
models: The return values from ``model_creator``. This can be one
or more torch nn modules.
config (dict): Configuration dictionary passed into ``PyTorchTrainer``.
Returns:
One or more Torch optimizer objects.
"""
return torch.optim.SGD(model.parameters(), lr=config.get("lr", 1e-4))
# __torch_optimizer_end__
# __torch_data_start__
from ray.util.sgd.pytorch.examples.train_example import LinearDataset
def data_creator(config):
"""Constructs torch.utils.data.Dataset objects.
Note that even though two Dataset objects can be returned,
only one dataset will be used for training.
Args:
config: Configuration dictionary passed into ``PyTorchTrainer``
Returns:
One or Two Dataset objects. If only one Dataset object is provided,
``trainer.validate()`` will throw a ValueError.
"""
return LinearDataset(2, 5), LinearDataset(2, 5, size=400)
# __torch_data_end__
# __torch_loss_start__
import torch
def loss_creator(config):
"""Constructs the Torch Loss object.
Note that optionally, you can pass in a Torch Loss constructor directly
into the PyTorchTrainer (i.e., ``PyTorchTrainer(loss_creator=nn.BCELoss, ...)``).
Args:
config: Configuration dictionary passed into ``PyTorchTrainer``
Returns:
Torch Loss object.
"""
return torch.nn.BCELoss()
# __torch_loss_end__
# __torch_scheduler_start__
import torch
def scheduler_creator(optimizer, config):
"""Constructor of one or more Torch optimizer schedulers.
Args:
optimizers: The return values from ``optimizer_creator``.
This can be one or more torch optimizer objects.
config: Configuration dictionary passed into ``PyTorchTrainer``
Returns:
One or more Torch scheduler objects.
"""
return torch.optim.lr_scheduler.StepLR(optimizer, step_size=5, gamma=0.9)
# __torch_scheduler_end__
# __torch_ray_start__
import ray
ray.init()
# or ray.init(address="auto") to connect to a running cluster.
# __torch_ray_end__
# __torch_trainer_start__
from ray.util.sgd import PyTorchTrainer
trainer = PyTorchTrainer(
model_creator,
data_creator,
optimizer_creator,
loss_creator=nn.MSELoss,
scheduler_creator=scheduler_creator,
config={"lr": 0.001})
# __torch_trainer_end__
| 26.565574 | 85 | 0.719222 | 419 | 3,241 | 5.322196 | 0.338902 | 0.017937 | 0.028251 | 0.043946 | 0.281614 | 0.233632 | 0.219283 | 0.189686 | 0.132287 | 0.100448 | 0 | 0.007339 | 0.201172 | 3,241 | 121 | 86 | 26.785124 | 0.853998 | 0.671706 | 0 | 0.12 | 0 | 0 | 0.004619 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.2 | false | 0 | 0.28 | 0 | 0.68 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 1 |
8dc3232c7e5f402c1d0748b307d531d49a9423ea | 144 | py | Python | lib/abridger/extraction_model/table.py | willangenent/abridger | 6daa80f7360339376b38544ce60694c5addaa30f | [
"MIT"
] | 8 | 2016-10-19T14:15:34.000Z | 2020-06-23T09:37:02.000Z | lib/abridger/extraction_model/table.py | freewilll/abridger | 6daa80f7360339376b38544ce60694c5addaa30f | [
"MIT"
] | null | null | null | lib/abridger/extraction_model/table.py | freewilll/abridger | 6daa80f7360339376b38544ce60694c5addaa30f | [
"MIT"
] | null | null | null | class Table(object):
def __init__(self, table, col, values):
self.table = table
self.col = col
self.values = values
| 24 | 43 | 0.590278 | 18 | 144 | 4.5 | 0.444444 | 0.222222 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.305556 | 144 | 5 | 44 | 28.8 | 0.81 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.2 | false | 0 | 0 | 0 | 0.4 | 0 | 1 | 0 | 0 | null | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 3 |
8dc3361defa92720863211b21b935ff651f2bf8d | 2,185 | py | Python | tests/test_view.py | takos22/baguette | 36c6cafa793ff4be057ca2f8a5c7129baf8a5ab8 | [
"MIT"
] | 20 | 2021-04-13T06:23:33.000Z | 2021-12-12T13:52:50.000Z | tests/test_view.py | takos22/baguette | 36c6cafa793ff4be057ca2f8a5c7129baf8a5ab8 | [
"MIT"
] | 4 | 2021-04-17T23:17:36.000Z | 2021-05-23T14:20:08.000Z | tests/test_view.py | takos22/baguette | 36c6cafa793ff4be057ca2f8a5c7129baf8a5ab8 | [
"MIT"
] | 3 | 2021-04-23T00:01:45.000Z | 2021-04-29T22:48:33.000Z | import pytest
from baguette.app import Baguette
from baguette.httpexceptions import MethodNotAllowed
from baguette.responses import make_response
from baguette.view import View
@pytest.mark.asyncio
async def test_view_create():
class TestView(View):
async def get(self, request):
return "GET"
async def post(self, request):
return "POST"
async def put(self, request):
return "PUT"
async def delete(self, request):
return "DELETE"
async def nonexistent_method(self, request):
return "NONEXISTENT"
view = TestView(Baguette())
assert view.methods == ["GET", "POST", "PUT", "DELETE"]
assert await view.get(None) == "GET"
assert await view.post(None) == "POST"
assert await view.put(None) == "PUT"
assert await view.delete(None) == "DELETE"
assert await view.nonexistent_method(None) == "NONEXISTENT"
@pytest.fixture(name="view")
def create_view():
class TestView(View):
async def get(self, request):
return "GET"
async def post(self, request):
return "POST"
async def put(self, request):
return "PUT"
async def delete(self, request):
return "DELETE"
return TestView(Baguette())
@pytest.mark.asyncio
async def test_view_call(view, test_request):
result = await view(test_request)
response = make_response(result)
assert response.status_code == 200
assert response.body == "GET"
@pytest.mark.asyncio
@pytest.mark.parametrize(
["method", "method_allowed"],
[
["GET", True],
["POST", True],
["PUT", True],
["DELETE", True],
["PATCH", False],
["NONEXISTENT", False],
],
)
async def test_view_dispatch(view, test_request, method, method_allowed):
test_request.method = method
if method_allowed:
result = await view.dispatch(test_request)
response = make_response(result)
assert response.status_code == 200
assert response.body == method
else:
with pytest.raises(MethodNotAllowed):
await view.dispatch(test_request)
| 25.406977 | 73 | 0.626545 | 249 | 2,185 | 5.405622 | 0.200803 | 0.071322 | 0.11367 | 0.035661 | 0.425706 | 0.392273 | 0.392273 | 0.343239 | 0.343239 | 0.343239 | 0 | 0.003715 | 0.26087 | 2,185 | 85 | 74 | 25.705882 | 0.829721 | 0 | 0 | 0.384615 | 0 | 0 | 0.066362 | 0 | 0 | 0 | 0 | 0 | 0.153846 | 1 | 0.015385 | false | 0 | 0.076923 | 0 | 0.276923 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
8dc386d6b5d927e8b934f386d034dfa885867aad | 5,167 | py | Python | tests/conftest.py | adrien-berchet/luigi-tools | 5de731714db38656db06e39acdb0b9e53ed612bf | [
"Apache-2.0"
] | 2 | 2021-07-20T13:08:44.000Z | 2021-07-23T13:08:05.000Z | tests/conftest.py | adrien-berchet/luigi-tools | 5de731714db38656db06e39acdb0b9e53ed612bf | [
"Apache-2.0"
] | 3 | 2021-10-04T11:48:34.000Z | 2022-03-18T15:48:00.000Z | tests/conftest.py | adrien-berchet/luigi-tools | 5de731714db38656db06e39acdb0b9e53ed612bf | [
"Apache-2.0"
] | 1 | 2022-03-21T15:15:23.000Z | 2022-03-21T15:15:23.000Z | # Copyright 2021 Blue Brain Project / EPFL
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Fixtures for luigi-tools test suite."""
import os
import luigi
import pytest
import luigi_tools.task
from luigi_tools.util import set_luigi_config
from .tools import create_not_empty_file
@pytest.fixture(scope="function")
def tmp_working_dir(tmp_path):
"""Change working directory before a test and change it back when the test is finished"""
cwd = os.getcwd()
os.chdir(tmp_path)
yield tmp_path
os.chdir(cwd)
@pytest.fixture
def luigi_tools_params():
return {"TaskA": {"a_cfg": "default_value_in_cfg"}}
@pytest.fixture
def luigi_tools_working_directory(tmp_working_dir, luigi_tools_params):
# Set config
with set_luigi_config(luigi_tools_params):
yield tmp_working_dir
@pytest.fixture
def task_collection(tmpdir):
class TaskClasses:
"""Class with some luigi tasks to test"""
def __init__(self):
self.tmpdir = tmpdir
self.reset_classes()
self.classes = self._classes()
self.targets = self._targets()
self.reset_classes() # Reset again to return classes that are not registered by luigi
def reset_classes(self):
class TaskA(luigi_tools.task.WorkflowTask):
""""""
counter = luigi.IntParameter(default=0)
def run(self):
for i in luigi.task.flatten(self.output()):
create_not_empty_file(i.path)
def output(self):
return luigi.LocalTarget(tmpdir / "TaskA.target")
class TaskB(luigi_tools.task.WorkflowTask):
""""""
def requires(self):
return TaskA()
def run(self):
for i in luigi.task.flatten(self.output()):
create_not_empty_file(i.path)
def output(self):
return [
luigi.LocalTarget(tmpdir / "TaskB.target"),
[
luigi.LocalTarget(tmpdir / "TaskB2.target"),
luigi.LocalTarget(tmpdir / "TaskB3.target"),
],
]
class TaskC(luigi_tools.task.WorkflowTask):
""""""
def requires(self):
return TaskA()
def run(self):
for i in luigi.task.flatten(self.output()):
create_not_empty_file(i.path)
def output(self):
return {
"first_target": luigi.LocalTarget(tmpdir / "TaskC.target"),
"second_target": luigi.LocalTarget(tmpdir / "TaskC2.target"),
}
class TaskD(luigi_tools.task.WorkflowTask):
""""""
def requires(self):
return [TaskB(), TaskC()]
def run(self):
for i in luigi.task.flatten(self.output()):
create_not_empty_file(i.path)
def output(self):
return [
luigi.LocalTarget(tmpdir / "TaskD.target"),
luigi.LocalTarget(tmpdir / "TaskD2.target"),
]
class TaskE(luigi_tools.task.WorkflowTask):
""""""
def requires(self):
return TaskD()
def run(self):
for i in luigi.task.flatten(self.output()):
create_not_empty_file(i.path)
def output(self):
return {
"first_target": luigi.LocalTarget(tmpdir / "TaskE.target"),
"other_targets": {
"second_target": luigi.LocalTarget(tmpdir / "TaskE2.target"),
"third_target": luigi.LocalTarget(tmpdir / "TaskE3.target"),
},
}
self.TaskA = TaskA
self.TaskB = TaskB
self.TaskC = TaskC
self.TaskD = TaskD
self.TaskE = TaskE
def _classes(self):
return [
self.TaskA,
self.TaskB,
self.TaskC,
self.TaskD,
self.TaskE,
]
def _targets(self):
targets = {}
for task in self.classes:
targets[task.__name__] = task().output()
return targets
return TaskClasses()
| 31.315152 | 98 | 0.522547 | 526 | 5,167 | 4.994297 | 0.275665 | 0.045679 | 0.09212 | 0.085268 | 0.331176 | 0.285497 | 0.285497 | 0.285497 | 0.249715 | 0.249715 | 0 | 0.004729 | 0.386104 | 5,167 | 164 | 99 | 31.506098 | 0.823455 | 0.153474 | 0 | 0.342857 | 0 | 0 | 0.058183 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.209524 | false | 0 | 0.057143 | 0.104762 | 0.447619 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 2 |
8dc3acc4aaba2f96a5e6b63f6e6498fe13dc2b2d | 2,633 | py | Python | HubMovrmentChallenge.py | lhsalud/hub-movement | 351fb430bb5e8540d4215415154c1d9f8d0730fe | [
"Apache-2.0"
] | null | null | null | HubMovrmentChallenge.py | lhsalud/hub-movement | 351fb430bb5e8540d4215415154c1d9f8d0730fe | [
"Apache-2.0"
] | null | null | null | HubMovrmentChallenge.py | lhsalud/hub-movement | 351fb430bb5e8540d4215415154c1d9f8d0730fe | [
"Apache-2.0"
] | null | null | null | #
# Author: L. Salud, April 26.2018
#
import pandas as pd
import os
import matplotlib.pyplot as plt
from sklearn.decomposition import PCA
from sklearn.preprocessing import StandardScaler
os.getcwd() # Get and place .py in same directory as .xls initially
os.chdir('./') # Path to .xls file
from pandas import read_excel
df = read_excel('rssi_data_challenge2.xls')
df.dropna(how="all", inplace=True) # drops the empty line at file-end
df.head(n=5)
df.tail()
df.describe(include = 'all')
pca = PCA(n_components=3)
X = df[['attributesfirstnodemeanrssi','attributessecondnodemeanrssi', 'attributesthirdnodemeanrssi','attributesfourthnodemeanrssi','attributesfifthnodemeanrssi','attributessixthnodemeanrssi']]
X.loc[1:10]
list(df)
S = df[['attributesfirstnodestddevrssi','attributessecondnodestddevrssi', 'attributesthirdnodestddevrssi','attributesfourthnodestddevrssi','attributesfifthnodestddevrssi','attributessixthnodestddevrssi']]
S.loc[1:10]
S.columns = X.columns
S1 = S.replace(0.00, 0.01)
CD = (X*X) + 300*S1 # TODO: Refine
# TODO: See if any of these and other publications may be applicable
# https://www.hindawi.com/journals/jcnc/2013/185138/abs/
# https://www.ncbi.nlm.nih.gov/pubmed/28895879
# https://dl.acm.org/citation.cfm?id=2790093
# https://en.wikipedia.org/wiki/Short-time_Fourier_transform
# Standardizing the features
CD = StandardScaler().fit_transform(CD)
CD[1:10]
principalComponents = pca.fit_transform(CD)
principalDf = pd.DataFrame(data = principalComponents
, columns = ['principal_component_1', 'principal_component_2', 'principal_component_3'])
principalDf = principalDf[principalDf['principal_component_1'] < 18 ]
max(principalDf['principal_component_1'])
finalDf = pd.concat([principalDf, df[['movestate']]], axis = 1)
# Visualize
fig = plt.figure(figsize = (26, 26))
#ax = fig.add_subplot(111, projection='3d')
ax = fig.add_subplot(1,1,1)
ax.set_xlabel('Principal Component 2', fontsize = 15)
ax.set_ylabel('Principal Component 1', fontsize = 15)
#ax.set_zlabel('Principal Component 3', fontsize = 15)
ax.set_title('3 component PCA', fontsize = 20)
targets = ['nonmove', 'move']
colors = ['r', 'b', 'g']
for target, color in zip(targets,colors):
indicesToKeep = finalDf['movestate'] == target
ax.scatter(finalDf.loc[indicesToKeep, 'principal_component_2']
, finalDf.loc[indicesToKeep, 'principal_component_1']
# , finalDf.loc[indicesToKeep, 'principal_component_3']
, c = color
, alpha=0.5,
label="Point")
ax.legend(targets)
ax.grid()
# TODO: Run SVM for cluster separation | 31.722892 | 204 | 0.725788 | 335 | 2,633 | 5.61194 | 0.543284 | 0.105319 | 0.050532 | 0.023936 | 0.065426 | 0 | 0 | 0 | 0 | 0 | 0 | 0.039665 | 0.138245 | 2,633 | 83 | 205 | 31.722892 | 0.788894 | 0.250285 | 0 | 0 | 0 | 0 | 0.313395 | 0.261247 | 0 | 0 | 0 | 0.012048 | 0 | 1 | 0 | false | 0 | 0.130435 | 0 | 0.130435 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
8dc61223149b3158489a5e9ccf76ac85256384c3 | 1,278 | py | Python | utils/filter_empty_lines.py | LeCongThuong/deep-text-recognition-benchmark | b9f4e5dab9a991435d9ba9e71a89dd6fce20f468 | [
"Apache-2.0"
] | null | null | null | utils/filter_empty_lines.py | LeCongThuong/deep-text-recognition-benchmark | b9f4e5dab9a991435d9ba9e71a89dd6fce20f468 | [
"Apache-2.0"
] | null | null | null | utils/filter_empty_lines.py | LeCongThuong/deep-text-recognition-benchmark | b9f4e5dab9a991435d9ba9e71a89dd6fce20f468 | [
"Apache-2.0"
] | null | null | null | import re
def read_from_file(file_path):
with open(file_path, 'r', encoding='utf-8') as f:
content = f.read().splitlines()
return content
def write_to_file(corpus, dest_path):
with open(dest_path, 'w', encoding='utf-8') as f:
for item in corpus:
f.write("%s\n" % item)
def filter_emtpy_lines(content, character_vocab):
out_of_vocab = f'[^{character_vocab}]'
count = 0
filtered_content = []
for line in content:
print(f'\r{line}', end='')
filtered_line = re.sub(out_of_vocab, '', line)
if len(filtered_line) == 0:
count = count + 1
else:
filtered_content.extend(line)
print("Done")
print("Num of invalid lines: ", count)
return filtered_content
def main():
file_path = '/home/love_you/ocr-gen/vi.txt'
character_vocab = 'hjbóẺoÝLvÚẼÁÂẩởĨỈtgKứẾmŨÒWsăỷịơIÔỀửãùaXP9ẰẳỉẹỶzầẪâỸỎảệyOựỬẵỘxCỐlỲD6ộỦỒĂƠÌồ1áTFnỆpHẽờếỏẢYẨUắƯẦíÃẤJèýẲ2i4ẬỊÊÓớR7ÙÕàGỨềỳecêSéừqQạòấỮ0ốẫ5õfỗđỡúNũỤợỖỠMằẸôỚặuỌỞụÀEkĐÉBưẮ3ỂễAìỜủỔỢổọwậdZĩẻ8ỄỰểrÈẴÍỪẶẠữỹV '
dest_path = '/home/love_you/ocr-gen/filtered_vi.txt'
content = read_from_file(file_path)
filtered_content = filter_emtpy_lines(content, character_vocab)
write_to_file(filtered_content, dest_path)
main()
| 29.72093 | 219 | 0.692488 | 154 | 1,278 | 5.5 | 0.383117 | 0.088548 | 0.028335 | 0.03778 | 0.219599 | 0.136954 | 0 | 0 | 0 | 0 | 0 | 0.014663 | 0.199531 | 1,278 | 42 | 220 | 30.428571 | 0.813294 | 0 | 0 | 0 | 0 | 0 | 0.259781 | 0.204225 | 0 | 0 | 0 | 0 | 0 | 1 | 0.129032 | false | 0 | 0.032258 | 0 | 0.225806 | 0.096774 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
8dc6bc8eb5fc6294a76cfdda9f4e036dfe3da0de | 3,680 | py | Python | cltk/data.py | fractaledmind/cltk | 78c7259c1845a4ae8bbd33935ffbae34da23234b | [
"MIT"
] | 1 | 2020-08-02T19:35:06.000Z | 2020-08-02T19:35:06.000Z | cltk/data.py | fractaledmind/cltk | 78c7259c1845a4ae8bbd33935ffbae34da23234b | [
"MIT"
] | null | null | null | cltk/data.py | fractaledmind/cltk | 78c7259c1845a4ae8bbd33935ffbae34da23234b | [
"MIT"
] | null | null | null | """Classes to access the `cltk_data/` directory tree"""
__author__ = 'Stephen Margheim <stephen.margheim@gmail.com>'
__license__ = 'MIT License. See LICENSE.'
import os
import site
from cltk.cltk import CLTK_DATA_DIR
from cltk.cltk.corpus.wrappers.logger import logger
class CorpusError(Exception):
pass
class CLTKData(object):
"""This class provides access to the full directory tree of `cltk_data/`.
The basic structure of the `cltk_data/` directory is:
```
cltk_data/
{language}/
text_corpora/
originals/
{corpus}/
structured/
{corpus}/
plain/
{corpus}/
readable/
{corpus}/
treebank/
{corpus}/
training_set/
{corpus}/
```
Users can set the path to `cltk_data/` via the ``data_path`` property.
When dealing with a particular corpus, users will also need to set the
``language_dir`` property properly in order to access the corpus.
"""
def __init__(self):
self._data_path = None
self._language_dir = None
## Base `cltk_data/` directory --------------------------------------------
@property
def data_path(self):
if self._data_path:
return self.resolve_path(self._data_path)
else:
return self.resolve_path(CLTK_DATA_DIR)
@data_path.setter
def data_path(self, value):
self._data_path = value
## 2nd level language directories -----------------------------------------
@property
def language_dir(self):
if self._language_dir:
return self.resolve_path(os.path.join(self.data_path,
self._language_dir))
else:
# TODO: Fix error message
raise CorpusError('Define `language_dir`!')
@language_dir.setter
def language_dir(self, value):
self._language_dir = value
## 3rd level corpus type directories --------------------------------------
@property
def corpora_dir(self):
return self.resolve_path(os.path.join(self.language_dir,
'text_corpora'))
@property
def treebank_dir(self):
return self.resolve_path(os.path.join(self.language_dir,
'treebank'))
@property
def training_dir(self):
return self.resolve_path(os.path.join(self.language_dir,
'training_set'))
## Misc. ------------------------------------------------------------------
# What does this do?
@property
def bin_path(self):
return os.path.join(site.getsitepackages()[0], 'cltk')
def resolve_path(self, path):
# Resolve absolute path
if os.path.isabs(path):
full_path = path
elif path.startswith('~'):
full_path = os.path.expanduser(path)
elif path.startswith('.'):
full_path = os.path.abspath(path)
# Ensure absolute path exists
if not os.path.exists(full_path):
# If directory
if os.path.splitext(full_path)[1] == '':
os.makedirs(full_path)
logger.info('Directory created at : {}'.format(full_path))
# If file
else:
open(full_path).close()
logger.info('File created at : {}'.format(full_path))
return full_path
# Alias
cltk_data = CLTKData()
| 30.92437 | 79 | 0.521467 | 381 | 3,680 | 4.834646 | 0.296588 | 0.071661 | 0.057003 | 0.068404 | 0.169381 | 0.144408 | 0.144408 | 0.144408 | 0.086319 | 0.086319 | 0 | 0.001645 | 0.339402 | 3,680 | 118 | 80 | 31.186441 | 0.756067 | 0.325 | 0 | 0.193548 | 0 | 0 | 0.073871 | 0.011819 | 0 | 0 | 0 | 0.008475 | 0 | 1 | 0.16129 | false | 0.016129 | 0.064516 | 0.064516 | 0.387097 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
8dc7d59fdd262802a1ded4d3d7416e6bb94d267d | 118 | py | Python | Executor/Tasks/PostRun/__init__.py | EVOLVED-5G/ELCM | 07d07a114b667e8c6915ee3ef125dd4864dd2247 | [
"Apache-2.0"
] | 1 | 2020-04-16T17:07:46.000Z | 2020-04-16T17:07:46.000Z | Executor/Tasks/PostRun/__init__.py | EVOLVED-5G/ELCM | 07d07a114b667e8c6915ee3ef125dd4864dd2247 | [
"Apache-2.0"
] | 3 | 2020-03-06T11:22:09.000Z | 2020-03-06T11:22:10.000Z | Executor/Tasks/PostRun/__init__.py | EVOLVED-5G/ELCM | 07d07a114b667e8c6915ee3ef125dd4864dd2247 | [
"Apache-2.0"
] | 1 | 2022-02-01T07:56:44.000Z | 2022-02-01T07:56:44.000Z | from .decommission import Decommission
from .release_resources import ReleaseResources
from .farewell import Farewell
| 29.5 | 47 | 0.872881 | 13 | 118 | 7.846154 | 0.538462 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.101695 | 118 | 3 | 48 | 39.333333 | 0.962264 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0 | 1 | 0 | 1 | 0 | 1 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 1 | 0 | 0 | 5 |
8dc7dd2aecae51adb10cd582c54a3498d17a6890 | 3,482 | py | Python | qa/L0_backend_python/model_control/model_control_test.py | galv/server | 071eb2c6c9a8f1bba380c0e69592f50a857c5c42 | [
"BSD-3-Clause"
] | 2,159 | 2020-08-26T06:21:38.000Z | 2022-03-31T16:13:46.000Z | qa/L0_backend_python/model_control/model_control_test.py | galv/server | 071eb2c6c9a8f1bba380c0e69592f50a857c5c42 | [
"BSD-3-Clause"
] | 1,482 | 2020-08-26T08:26:36.000Z | 2022-03-31T23:11:19.000Z | qa/L0_backend_python/model_control/model_control_test.py | galv/server | 071eb2c6c9a8f1bba380c0e69592f50a857c5c42 | [
"BSD-3-Clause"
] | 592 | 2020-08-26T06:09:25.000Z | 2022-03-31T00:37:41.000Z | # Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of NVIDIA CORPORATION nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY
# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
# OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import sys
sys.path.append("../../common")
import test_util as tu
import tritonclient.http as httpclient
from tritonclient.utils import *
import numpy as np
import unittest
class ExplicitModelTest(tu.TestResultCollector):
def send_identity_request(self, client, model_name):
inputs = []
inputs.append(httpclient.InferInput('INPUT0', [1, 16], "FP32"))
input0_data = np.arange(start=0, stop=16, dtype=np.float32)
input0_data = np.expand_dims(input0_data, axis=0)
inputs[0].set_data_from_numpy(input0_data)
result = client.infer(
model_name=model_name,
inputs=inputs,
outputs=[httpclient.InferRequestedOutput('OUTPUT0')])
output_numpy = result.as_numpy('OUTPUT0')
self.assertTrue(np.all(input0_data == output_numpy))
def test_model_reload(self):
model_name = "identity_fp32"
ensemble_model_name = 'simple_' + "identity_fp32"
with httpclient.InferenceServerClient("localhost:8000") as client:
for _ in range(5):
self.assertFalse(client.is_model_ready(model_name))
# Load the model before the ensemble model to make sure reloading the
# model works properly in Python backend.
client.load_model(model_name)
client.load_model(ensemble_model_name)
self.assertTrue(client.is_model_ready(model_name))
self.assertTrue(client.is_model_ready(ensemble_model_name))
self.send_identity_request(client, model_name)
self.send_identity_request(client, ensemble_model_name)
client.unload_model(ensemble_model_name)
client.unload_model(model_name)
self.assertFalse(client.is_model_ready(model_name))
self.assertFalse(client.is_model_ready(ensemble_model_name))
if __name__ == '__main__':
unittest.main()
| 46.426667 | 85 | 0.716255 | 449 | 3,482 | 5.400891 | 0.427617 | 0.059381 | 0.042062 | 0.037113 | 0.23134 | 0.211546 | 0.183505 | 0.141856 | 0.056082 | 0.056082 | 0 | 0.012044 | 0.213096 | 3,482 | 74 | 86 | 47.054054 | 0.872993 | 0.456634 | 0 | 0.052632 | 0 | 0 | 0.04882 | 0 | 0.026316 | 0 | 0 | 0 | 0.157895 | 1 | 0.052632 | false | 0 | 0.157895 | 0 | 0.236842 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
8dc7f5a5c998df601fc1435d5e14c66275786aee | 19,093 | py | Python | learnable_primitives/primitives.py | ianhuang0630/CSQ | 5f1fe99a8d9da73692643b3911d675dce269a03d | [
"MIT"
] | null | null | null | learnable_primitives/primitives.py | ianhuang0630/CSQ | 5f1fe99a8d9da73692643b3911d675dce269a03d | [
"MIT"
] | null | null | null | learnable_primitives/primitives.py | ianhuang0630/CSQ | 5f1fe99a8d9da73692643b3911d675dce269a03d | [
"MIT"
] | null | null | null |
import numpy as np
import torch
def fexp(x, p):
return torch.sign(x)*(torch.abs(x)**p)
def cuboid_inside_outside_function(X, shape_params, epsilon=0.25):
"""
Arguments:
----------
X: Tensor with size BxNxMx3, containing the 3D points, where B is the
batch size and N is the number of points
shape_params: Tensor with size BxMx3, containing the shape along each
axis for the M primitives
epsilon: int, the shape of the SQ along the latitude and longitude
Returns:
---------
F: Tensor with size BxNxM, containing the values of the
inside-outside function
"""
# Make sure that both tensors have the right shape
assert X.shape[0] == shape_params.shape[0] # batch size
assert X.shape[2] == shape_params.shape[1] # number of primitives
assert X.shape[-1] == 3 # 3D points
# Tensor that holds the values of the inside-outside function
F = shape_params.new_zeros(X.shape[:-1])
shape_params = shape_params.unsqueeze(1)
for i in range(3):
F += (X[:, :, :, i] / shape_params[:, :, :, i])**(2.0/epsilon)
return F**(epsilon)
def inside_outside_function(X, shape_params, epsilons):
"""
Arguments:
----------
X: Tensor with size BxNxMx3, containing the 3D points, where B is the
batch size and N is the number of points
shape_params: Tensor with size BxMx3, containing the shape along each
axis for the M primitives
epsilons: Tensor with size BxMx2, containing the shape along the
longitude and the latitude for the M primitives
Returns:
---------
F: Tensor with size BxNxM, containing the values of the
inside-outside function
"""
B = X.shape[0] # batch_size
N = X.shape[1] # number of points on target object
M = X.shape[2] # number of primitives
# Make sure that both tensors have the right shape
assert shape_params.shape[0] == B # batch size
assert epsilons.shape[0] == B # batch size
assert shape_params.shape[1] == M # number of primitives
assert shape_params.shape[1] == epsilons.shape[1]
assert shape_params.shape[-1] == 3 # number of shape parameters
assert epsilons.shape[-1] == 2 # number of shape parameters
assert X.shape[-1] == 3 # 3D points
# Declare some variables
a1 = shape_params[:, :, 0].unsqueeze(1) # size Bx1xM
a2 = shape_params[:, :, 1].unsqueeze(1) # size Bx1xM
a3 = shape_params[:, :, 2].unsqueeze(1) # size Bx1xM
e1 = epsilons[:, :, 0].unsqueeze(1) # size Bx1xM
e2 = epsilons[:, :, 1].unsqueeze(1) # size Bx1xM
# Add a small constant to points that are completely dead center to avoid
# numerical issues in computing the gradient
# zeros = X == 0
# X[zeros] = X[zeros] + 1e-6
X = ((X > 0).float() * 2 - 1) * torch.max(torch.abs(X), X.new_tensor(1e-6))
F = ((X[:, :, :, 0] / a1)**2)**(1./e2)
# F += ((X[:, :, :, 1] / a2)**2)**(1./e2)
F = F+((X[:, :, :, 1] / a2)**2)**(1./e2)
F = F**(e2 / e1)
# F += ((X[:, :, :, 2] / a3)**2)**(1./e1)
F = F+((X[:, :, :, 2] / a3)**2)**(1./e1)
# Sanity check to make sure that we have the expected size
assert F.shape == (B, N, M)
return F**e1
# return F
def points_to_cuboid_distances(X, shape_params):
"""
Arguments:
----------
X: Tensor with size BxNxMx3, containing the 3D points, where B is the
batch size and N is the number of points
shape_params: Tensor with size BxMx3, containing the shape along each
axis for the M primitives
Returns:
---------
F: Tensor with size BxNxM, containing the distances of each point to
every primitive
"""
# Make sure that everything has the right size
assert X.shape[0] == shape_params.shape[0] # batch size
assert X.shape[2] == shape_params.shape[1] # number of primitives
assert X.shape[-1] == 3 # 3D points
# The distance between a point (x, y, z) to a cuboid with dimensions
# (a1, a2, a3) is sqrt(max(0, abs(x) - a1)^2 + max(0, abs(y) - a2)^2 +
# max(0, abs(z) - a3)^2). Technically, F=0 for all points either inside or
# on the surface of the primitive, while we only want F=0 for the points on
# the surface of the cuboid.
F = (torch.max(
X.abs() - shape_params.unsqueeze(1),
torch.zeros_like(X)
)**2).sum(-1)
return F
def euler_angles_to_rotation_matrices(angles):
"""
Arguments:
---------
angles: Tensor with size Kx3, where K is the number of Euler angles we
want to transform to rotation matrices
Returns:
-------
rotation_matrices: Tensor with size Kx3x3, that contains the computed
rotation matrices
"""
K = angles.shape[0]
# Allocate memory for a Tensor of size Kx3x3 that will hold the rotation
# matrix along the x-axis
r_x = angles.new_zeros((K, 3, 3))
r_x[:, 0, 0] = 1.0
c = torch.cos(angles[:, 0])
s = torch.sin(angles[:, 0])
r_x[torch.arange(K), 1, 1] = c
r_x[torch.arange(K), 2, 2] = c
r_x[torch.arange(K), 1, 2] = -s
r_x[torch.arange(K), 2, 1] = s
# Similar for the rotation matrices along the y-axis and z-axis
r_y = angles.new_zeros((K, 3, 3))
r_y[:, 1, 1] = 1.0
c = torch.cos(angles[:, 1])
s = torch.sin(angles[:, 1])
r_y[torch.arange(K), 0, 0] = c
r_y[torch.arange(K), 2, 2] = c
r_y[torch.arange(K), 2, 0] = -s
r_y[torch.arange(K), 0, 2] = s
r_z = angles.new_zeros((K, 3, 3))
r_z[:, 2, 2] = 1.0
c = torch.cos(angles[:, 2])
s = torch.sin(angles[:, 2])
r_z[torch.arange(K), 0, 0] = c
r_z[torch.arange(K), 1, 1] = c
r_z[torch.arange(K), 0, 1] = -s
r_z[torch.arange(K), 1, 0] = s
return r_z.bmm(r_y.bmm(r_x))
def quaternions_to_rotation_matrices(quaternions):
"""
Arguments:
---------
quaternions: Tensor with size Kx4, where K is the number of quaternions
we want to transform to rotation matrices
Returns:
-------
rotation_matrices: Tensor with size Kx3x3, that contains the computed
rotation matrices
"""
K = quaternions.shape[0]
# Allocate memory for a Tensor of size Kx3x3 that will hold the rotation
# matrix along the x-axis
R = quaternions.new_zeros((K, 3, 3))
# A unit quaternion is q = w + xi + yj + zk
xx = quaternions[:, 1]**2
yy = quaternions[:, 2]**2
zz = quaternions[:, 3]**2
ww = quaternions[:, 0]**2
n = (ww + xx + yy + zz).unsqueeze(-1)
s = quaternions.new_zeros((K, 1))
s[n != 0] = 2 / n[n != 0]
xy = s[:, 0] * quaternions[:, 1] * quaternions[:, 2]
xz = s[:, 0] * quaternions[:, 1] * quaternions[:, 3]
yz = s[:, 0] * quaternions[:, 2] * quaternions[:, 3]
xw = s[:, 0] * quaternions[:, 1] * quaternions[:, 0]
yw = s[:, 0] * quaternions[:, 2] * quaternions[:, 0]
zw = s[:, 0] * quaternions[:, 3] * quaternions[:, 0]
xx = s[:, 0] * xx
yy = s[:, 0] * yy
zz = s[:, 0] * zz
idxs = torch.arange(K).to(quaternions.device)
R[idxs, 0, 0] = 1 - yy - zz
R[idxs, 0, 1] = xy - zw
R[idxs, 0, 2] = xz + yw
R[idxs, 1, 0] = xy + zw
R[idxs, 1, 1] = 1 - xx - zz
R[idxs, 1, 2] = yz - xw
R[idxs, 2, 0] = xz - yw
R[idxs, 2, 1] = yz + xw
R[idxs, 2, 2] = 1 - xx - yy
return R
def transform_to_primitives_centric_system(X, translations, rotation_angles):
"""
Arguments:
----------
X: Tensor with size BxNx3, containing the 3D points, where B is the
batch size and N is the number of points
translations: Tensor with size BxMx3, containing the translation
vectors for the M primitives
rotation_angles: Tensor with size BxMx4 containing the 4 quaternion
values for the M primitives
Returns:
--------
X_transformed: Tensor with size BxNxMx3 containing the N points
transformed in the M primitive centric coordinate
systems.
"""
# Make sure that all tensors have the right shape
assert X.shape[0] == translations.shape[0]
assert translations.shape[0] == rotation_angles.shape[0]
assert translations.shape[1] == rotation_angles.shape[1]
assert X.shape[-1] == 3
assert translations.shape[-1] == 3
assert rotation_angles.shape[-1] == 4
# Subtract the translation and get X_transformed with size BxNxMx3
X_transformed = X.unsqueeze(2) - translations.unsqueeze(1)
# R = euler_angles_to_rotation_matrices(rotation_angles.view(-1, 3)).view(
R = quaternions_to_rotation_matrices(rotation_angles.view(-1, 4)).view(
rotation_angles.shape[0], rotation_angles.shape[1], 3, 3
)
# Let as denote a point x_p in the primitive-centric coordinate system and
# its corresponding point in the world coordinate system x_w. We denote the
# transformation from the point in the world coordinate system to a point
# in the primitive-centric coordinate system as x_p = R(x_w - t)
X_transformed = R.unsqueeze(1).matmul(X_transformed.unsqueeze(-1))
X_signs = (X_transformed > 0).float() * 2 - 1
X_abs = X_transformed.abs()
X_transformed = X_signs * torch.max(X_abs, X_abs.new_tensor(1e-5))
return X_transformed.squeeze(-1)
def transform_to_world_coordinates_system(X_SQ, translations, rotation_angles):
"""
Arguments:
----------
X_SQ: Tensor with size BxMxSx3, containing the 3D points, where B is
the batch size, M is the number of primitives and S is the number
of points on each primitive-centric system
translations: Tensor with size BxMx3, containing the translation
vectors for the M primitives
rotation_angles: Tensor with size BxMx3 containing the 3 Euler angles
for the M primitives
Returns:
--------
X_SQ_w: Tensor with size BxMxSx3 containing the N points
transformed in the M primitive centric coordinate
systems.
"""
# Make sure that all tensors have the right shape
assert X_SQ.shape[0] == translations.shape[0]
assert translations.shape[0] == rotation_angles.shape[0]
assert translations.shape[1] == rotation_angles.shape[1]
assert X_SQ.shape[1] == translations.shape[1]
assert X_SQ.shape[-1] == 3
assert translations.shape[-1] == 3
assert rotation_angles.shape[-1] == 4
# Compute the rotation matrices to every primitive centric coordinate
# system (R has size BxMx3x3)
R = quaternions_to_rotation_matrices(rotation_angles.view(-1, 4)).view(
rotation_angles.shape[0], rotation_angles.shape[1], 3, 3
)
# We need the R.T to get the rotation matrix from the primitive-centric
# coordinate system to the world coordinate system.
R_t = torch.einsum("...ij->...ji", (R,))
assert R.shape == R_t.shape
X_SQ_w = R.unsqueeze(2).matmul(X_SQ.unsqueeze(-1))
X_SQ_w = X_SQ_w.squeeze(-1) + translations.unsqueeze(2)
return X_SQ_w
def deform(X, shape_params, tapering_params, bending_params=None):
"""
Arguments:
----------
X: Tensor with size BxMxSx3 containing the S points
sampled on the surfaces of each SQ
shape_params: Tensor with size BxMx3, containing the shape along each
axis for the M primitives
tapering_params: Tensor with size BxMx2, containing the tapering_params
for every primitive
bending_params: Tensor with size BxMx2, containing the bending_params
for every primitive
Returns:
--------
X_deformed: Tensor with size BxMxSx3 containing the N points
transformed in the M primitive centric coordinate
systems after the deformations.
"""
B, M, S, _ = X.shape
# Make sure that all tensors have the right shape
assert X.shape[0] == shape_params.shape[0] # batch size
assert X.shape[0] == tapering_params.shape[0] # batch size
assert shape_params.shape[-1] == 3
assert tapering_params.shape[-1] == 2
assert X.shape[1] == shape_params.shape[1]
assert X.shape[1] == tapering_params.shape[1]
# Compute the two linear tapering functions
K = tapering_params / shape_params[:, :, -1].unsqueeze(-1)
assert tapering_params.shape == K.shape
f = K.unsqueeze(2) * X[:, :, :, -1].unsqueeze(-1) + 1.0
assert f.shape == (B, M, S, 2)
f = torch.cat([
f,
f.new_ones(B, M, S, 1)
], -1)
assert f.shape == X.shape
X_d = X * f
X_d = apply_bending(X_d, bending_params)
return X_d
def apply_bending(X, bending_params):
"""
Arguments:
----------
X: Tensor with size BxMxSx3 containing the S points
sampled on the surfaces of each SQ
bending_params: Tensor with size BxMx2, containing the bending_params
for every primitive
Returns:
--------
X_d: Tensor with size BxMxSx3 containing the N points
transformed in the M primitive centric coordinate
systems after the deformations.
"""
# If there no bending params specified return the input as is
if bending_params is None:
return X
B, M, S, _ = X.shape
# Make sure that all tensors have the right shape
assert X.shape[0] == bending_params.shape[0] # batch size
assert bending_params.shape[-1] == 2
# Apply the bending operation
bending_params = bending_params.unsqueeze(2) # BXMX2 -> BxMx1x2
k = bending_params[:, :, :, 0].unsqueeze(-1) # BxMx1x1
a = bending_params[:, :, :, 1].unsqueeze(-1) # BxMx1x1
b = torch.atan2(X[:, :, :, 1].unsqueeze(-1), X[:, :, :, 0].unsqueeze(-1))
assert b.shape == (B, M, S, 1)
r = torch.sqrt(
X_d[:, :, :, 0].unsqueeze(-1)**2 + X_d[:, :, :, 1].unsqueeze(-1)**2
) * torch.cos(a - b)
assert r.shape == (B, M, S, 1)
k_inv = 1 / k # BxMx1x1
gamma = X_d[:, :, :, -1].unsqueeze(-1) / k
R = k_inv - (k_inv - r) * torch.cos(gamma)
assert R.shape == (B, M, S, 1)
X_d = X.new_zeros(X.shape)
X_d[:, :, :, 0] = X_d[:, :, :, 0] + (R - r)*torch.cos(a)
X_d[:, :, :, 1] = X_d[:, :, :, 1] + (R - r)*torch.sin(a)
X_d[:, :, :, 2] = (k_inv - r)*(R - r)*torch.sin(gamma)
return X_d
def distance(F, shape_params=None, use_chamfer=False):
"""
Arguments:
----------
F: Tensor of size BxNxM, with the values of the inside-outside function
for the N points w.r.t. the M primitives
shape_params: Tensor with size BxMx3, containing the shape along each
axis for the M primitives
Returns:
--------
C: Tensor of size BxNxM, with the distance between points and
primitives
primitive_idxs: Tensor of size BxNxM, with the indices of the
primitives in the original tensor F
"""
# Minimization of the distances between points and primitives
if use_chamfer:
C = (F-1.0)**2.0
else:
a1a2a3 = torch.sqrt(shape_params.prod(-1)).unsqueeze(1)
# C = torch.max(a1a2a3*(F - 1.0), torch.zeros_like(F))
# C = torch.max(torch.sqrt(F) - 1.0, torch.zeros_like(F))
C = torch.max((F - 1.0), torch.zeros_like(F))
return torch.sort(C, dim=-1)
def ray_plane_intersections(P, V, normals, exp1, exp2):
"""
Find the interesection between a set of rays and a set of planes. Rays are
defined as two points and normals as points and planes.
We we want to compute
rs = n (Vo - Po)
-----------
n (P1 - Po)
n and Vo define the plane and Po and P1 the ray
Arguments:
----------
P: Tensor of size BxMx?x3 containing the start of each ray (P1 - Po)
V: Tensor of size BxMxSxNx3 with the differences between the ray_starts
and the points of the planes (Vo - Po)
normals: Tensor of size BxMx?x3 N normals transformed in the M
primitive-centric coordinate systems
Returns:
--------
r: Tensor of size BxMxSxN with the squared_distances
"""
B, M, S, N, _ = V.shape
t1 = torch.einsum(exp1, [normals, V])
t2 = torch.einsum(exp2, [normals, P])
rs = torch.div(t1, t2)
assert rs.shape == (B, M, S, N)
return torch.pow(rs, 2)
def beta_stirling(x, y):
sqrt2pi = float(np.sqrt(2*np.pi))
return sqrt2pi * (x**(x-0.5) * y**(y-0.5)) / (x+y)**(x+y-0.5)
def sq_volumes(parameters):
a1a2a3 = parameters[3].view(-1, 3).prod(-1)
e = parameters[4].view(-1, 2)
e1 = e[:, 0]
e2 = e[:, 1]
e1e2 = e.prod(-1)
b1 = beta_stirling(e1/2 + 1, e1)
b2 = beta_stirling(e2/2, e2/2)
volumes = 2 * a1a2a3 * e1e2 * b1 * b2
return volumes
def sq_areas(shapes, epsilons):
"""Approximate area of the superquadric.
We use Knud Thomsen's formula for ellipsoids.
"""
p = 1.6075
a = shapes[:, :, 0]
b = shapes[:, :, 1]
c = shapes[:, :, 2]
return 4 * np.pi * (((a*b)**p + (a*c)**p + (b*c)**p)/3)**(1/p)
def sample_points_inside_primitives(X_SQ, N, rotations, translations):
"""Sample points inside the primitives, given S points on their surface
Arguments:
----------
X_SQ: Tensor of size BxMxSx3 containing S points sampled on the surface
of each primitive
rotations: Tensor of size BxMx4 containing the quaternions of the SQs
translations: Tensor of size BxMx4 containing the translation vectors
of the SQs
N: number of points to be generated internally in each primitive
Returns:
--------
X_world: Tensor of size BxMxNx3 containing N points sampled uniformly
inside and on the surface of the SQs
"""
B, M, S, _ = X_SQ.shape
assert rotations.shape == (B, M, 4)
assert translations.shape == (B, M, 3)
# Create points inside the primitives
device = X_SQ.device
batch = (torch.arange(B*M*N) / (M*N)).view(B, M, N).to(device)
prim = ((torch.arange(B*M*N) / N) % M).view(B, M, N).to(device)
pointsA = torch.randint(0, S, (B, M, N), dtype=torch.long).to(device)
pointsB = torch.randint(0, S, (B, M, N), dtype=torch.long).to(device)
t = torch.rand(B, M, N, 1).to(device)
X_a = X_SQ[batch, prim, pointsA]
X_b = X_SQ[batch, prim, pointsB]
X = X_a + t * (X_b-X_a)
# Transform the points to world coordinates
# R = quaternions_to_rotation_matrices(rotations.view(-1, 4))
# R = R.view(B, M, 3, 3)
# X_world = X.view(B, M, N, 1, 3).matmul(R.view(B, M, 1, 3, 3))
# X_world = X_world.view(B, M, N, 3)
# X_world = X_world + translations.view(B, M, 1, 3)
X_world = transform_to_world_coordinates_system(
X,
translations,
rotations
)
assert X_world.shape == (B, M, N, 3)
return X_world
| 35.423006 | 79 | 0.59514 | 2,851 | 19,093 | 3.899684 | 0.109786 | 0.017809 | 0.039036 | 0.015291 | 0.520777 | 0.450711 | 0.369221 | 0.325418 | 0.320921 | 0.319032 | 0 | 0.035194 | 0.273765 | 19,093 | 538 | 80 | 35.488848 | 0.766623 | 0.467292 | 0 | 0.116883 | 0 | 0 | 0.00128 | 0 | 0 | 0 | 0 | 0 | 0.199134 | 1 | 0.069264 | false | 0 | 0.008658 | 0.004329 | 0.151515 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
8dc85eed480d432c4899171384c4da5e6df7a236 | 6,206 | py | Python | spring semester 2 course/computer_graphics_labs/3D OpenGL/lab.py | andrwnv/study-progs | 902c4ede0b273d91fd87c93e861b40439847c1a9 | [
"MIT"
] | 4 | 2020-01-02T08:38:55.000Z | 2020-11-12T19:46:22.000Z | spring semester 2 course/computer_graphics_labs/3D OpenGL/lab.py | andrwnv/StudyProgs | 902c4ede0b273d91fd87c93e861b40439847c1a9 | [
"MIT"
] | null | null | null | spring semester 2 course/computer_graphics_labs/3D OpenGL/lab.py | andrwnv/StudyProgs | 902c4ede0b273d91fd87c93e861b40439847c1a9 | [
"MIT"
] | null | null | null | from PyQt5.QtOpenGL import *
from OpenGL.GL import *
from PyQt5 import QtWidgets, QtCore
class FigureWidget(QGLWidget):
""" Main OpenGL widget. """
def __init__(self, parent):
super(FigureWidget, self).__init__()
self.setMinimumSize(1280, 720)
self.__rotate_angle_y = 70
self.__rotate_angle_x = 15
self.__rotate_angle_z = 0
self.__zoom_coefficient = -5
self.setFocusPolicy(QtCore.Qt.StrongFocus)
self.__timer = QtCore.QTimer()
self.__timer.setInterval(30)
self.__timer.timeout.connect(lambda: self.idle())
self.__timer.start()
def idle(self):
self.__rotate_angle_y += 0.5
self.update()
def paintGL(self) -> None:
""" Draw scene. """
glClear(GL_COLOR_BUFFER_BIT)
glClearColor(0, 0, 0, 1.0)
glColor3f(1.0, 1.0, 1.0)
glMatrixMode(GL_PROJECTION)
glLoadIdentity()
glFrustum(-3, 3, -2, 2, 1.2, 40)
glMatrixMode(GL_MODELVIEW)
glLoadIdentity()
glTranslatef(0, 0, self.__zoom_coefficient)
glRotatef(self.__rotate_angle_x, 1, 0, 0)
glRotatef(self.__rotate_angle_y, 0, 1, 0)
glRotatef(self.__rotate_angle_z, 0, 0, 1)
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT)
glColor3f(1.0, 1.0, 1.0)
glBegin(GL_LINES)
i: float = -2.5
# Draw coordinate grid.
while i <= 2.5:
glVertex3f(i, -4, 2.5)
glVertex3f(i, -4, -2.5)
glVertex3f(2.5, -4, i)
glVertex3f(-2.5, -4, i)
i += 0.25
glEnd()
# Draw up pyramid.
glBegin(GL_TRIANGLE_STRIP)
# 1st face.
glColor3f(1, 0, 1)
glVertex3f(0, 3, 0)
glColor3f(0.5, 0, 0.5)
glVertex3f(-1, 1, -1)
glColor3f(0.5, 1, 1)
glVertex3f(-1, 1, 1)
# 2nd face.
glColor3f(0, 0, 1)
glVertex3f(0, 3, 0)
glColor3f(0.5, 0.5, 1)
glVertex3f(-1, 1, 1)
glColor3f(0.5, 0.3, 0.2)
glVertex3f(1, 1, 1)
# 3th face.
glColor3f(0, 1, 1)
glVertex3f(0, 3, 0)
glColor3f(0.5, 0, 0.5)
glVertex3f(1, 1, 1)
glColor3f(0, 1, 1)
glVertex3f(1, 1, -1)
# 4sth face.
glColor3f(0, 1, 0)
glVertex3f(0, 3, 0)
glColor3f(0.5, 0.7, 0.3)
glVertex3f(1, 1, -1)
glColor3f(0.1, 0.4, 0.3)
glVertex3f(-1, 1, -1)
glEnd()
# Draw cube.
glBegin(GL_QUAD_STRIP)
glColor3f(1, 1, 0)
glVertex3f(-1, 1, -1)
glColor3f(0.5, 1, 0)
glVertex3f(-1, 1, 1)
glColor3f(1, 0, 1)
glVertex3f(-1, -1, -1)
glVertex3f(-1, -1, 1)
glColor3f(1, 0, 0)
glVertex3f(-1, 1, 1)
glColor3f(1, 1, 0)
glVertex3f(1, 1, 1)
glVertex3f(-1, -1, 1)
glColor3f(0.5, 1, 0)
glVertex3f(1, -1, 1)
glColor3f(0, 0.5, 1)
glVertex3f(1, 1, 1)
glVertex3f(1, 1, -1)
glColor3f(1, 0, 1)
glVertex3f(1, -1, 1)
glColor3f(0, 1, 0)
glVertex3f(1, -1, -1)
glColor3f(1, 0, 1)
glVertex3f(1, 1, -1)
glColor3f(0.5, 1, 0)
glVertex3f(-1, 1, -1)
glColor3f(1, 1, 0)
glVertex3f(1, -1, -1)
glVertex3f(-1, -1, -1)
glEnd()
# Draw down pyramid.
glBegin(GL_TRIANGLE_STRIP)
# 1st face.
glColor3f(1, 0, 1)
glVertex3f(0, -3, 0)
glColor3f(0.2, 0.7, 1)
glVertex3f(-1, -1, -1)
glColor3f(0.1, 0.7, 0.8)
glVertex3f(-1, -1, 1)
# 2nd face.
glColor3f(0, 0, 1)
glVertex3f(0, -3, 0)
glColor3f(0.1, 0, 0.8)
glVertex3f(-1, -1, 1)
glColor3f(0.8, 0, 0.8)
glVertex3f(1, -1, 1)
# 3th face.
glColor3f(0, 1, 1)
glVertex3f(0, -3, 0)
glColor3f(0.1, 0.7, 0.8)
glVertex3f(1, -1, 1)
glColor3f(0.2, 0.7, 1)
glVertex3f(1, -1, -1)
# 4sth face.
glColor3f(0, 1, 0)
glVertex3f(0, -3, 0)
glColor3f(0, 0, 1)
glVertex3f(1, -1, -1)
glColor3f(0.8, 0, 0.8)
glVertex3f(-1, -1, -1)
glEnd()
glFlush()
def resizeGL(self, w, h) -> None:
""" Resize event. """
glViewport(50, 50, w - 100, h - 100)
def initializeGL(self) -> None:
""" Init OpenGL. """
# glEnable(GL_CULL_FACE)
# glCullFace(GL_FRONT)
# Enable depth.
glEnable(GL_DEPTH_TEST)
glClearColor(0.1, 0.39, 0.88, 1.0)
glColor3f(1.0, 1.0, 1.0)
glMatrixMode(GL_PROJECTION)
glLoadIdentity()
glFrustum(-2, 2, -1.5, 1.5, 1, 40)
glMatrixMode(GL_MODELVIEW)
glLoadIdentity()
glTranslatef(0, 0, -3)
glRotatef(70, 0, 1, 0)
glDisable(GL_BLEND)
def keyPressEvent(self, event):
if event.key() == QtCore.Qt.Key_A:
self.__rotate_angle_y -= 0.5
self.update()
elif event.key() == QtCore.Qt.Key_D:
self.__rotate_angle_y += 0.5
self.update()
elif event.key() == QtCore.Qt.Key_W:
self.__rotate_angle_x += 0.5
self.update()
elif event.key() == QtCore.Qt.Key_S:
self.__rotate_angle_x -= 0.5
self.update()
elif event.key() == QtCore.Qt.Key_Q:
self.__rotate_angle_z += 0.5
self.update()
elif event.key() == QtCore.Qt.Key_E:
self.__rotate_angle_z -= 0.5
self.update()
elif event.key() == QtCore.Qt.Key_Plus:
self.__zoom_coefficient += 0.5
self.update()
elif event.key() == QtCore.Qt.Key_Minus:
self.__zoom_coefficient -= 0.5
self.update()
class App(QtWidgets.QMainWindow):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.__glWidget = FigureWidget(self)
self.setCentralWidget(self.__glWidget)
if __name__ == '__main__':
app = QtWidgets.QApplication(['3D OpenGL'])
window = App()
window.show()
app.exec_()
| 24.626984 | 58 | 0.508701 | 824 | 6,206 | 3.667476 | 0.163835 | 0.046989 | 0.127068 | 0.137657 | 0.656188 | 0.603574 | 0.591661 | 0.568829 | 0.487756 | 0.487095 | 0 | 0.12086 | 0.34805 | 6,206 | 251 | 59 | 24.7251 | 0.62605 | 0.045923 | 0 | 0.548571 | 0 | 0 | 0.00289 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.04 | false | 0 | 0.017143 | 0 | 0.068571 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
8dc89f352fa436e39e052c93cd7afb303171ceba | 2,485 | py | Python | src/passari/scripts/submit_sip.py | finnish-heritage-agency/passari | 17af68b07435a40eaabaacf8c34e54517edc9153 | [
"MIT"
] | 1 | 2020-06-17T11:05:00.000Z | 2020-06-17T11:05:00.000Z | src/passari/scripts/submit_sip.py | finnish-heritage-agency/passari | 17af68b07435a40eaabaacf8c34e54517edc9153 | [
"MIT"
] | 2 | 2021-03-31T19:50:58.000Z | 2021-05-17T20:52:03.000Z | src/passari/scripts/submit_sip.py | finnish-heritage-agency/passari | 17af68b07435a40eaabaacf8c34e54517edc9153 | [
"MIT"
] | null | null | null | """
Submit a SIP archive to the digital preservation service
"""
from pathlib import Path
import click
from passari.config import CONFIG
from passari.dpres.package import MuseumObjectPackage
from passari.dpres.ssh import connect_dpres_sftp
from passari.utils import debugger_enabled
def submit_sip(package_dir, object_id: int, sip_id: str = None):
"""
Submit SIP to the DPRES service
:param package_dir: Path to directory containing objects under processing
:param archive_dir: Path to directory containing logs for processed SIPs
:param object_id: Object ID of the object to process
:param sip_id: Optional SIP ID used to generate multiple SIPs
from the same object version
"""
with connect_dpres_sftp() as sftp:
museum_package = MuseumObjectPackage.from_path_sync(
path=package_dir / str(object_id), sip_id=sip_id
)
# DPRES service won't process files with the suffix '.incomplete'
temp_filename = f"{museum_package.sip_filename}.incomplete"
dest_path = Path(CONFIG["ssh"]["home_path"]) / "transfer"
print(f"Uploading to {dest_path / temp_filename}")
sftp.put(
museum_package.sip_archive_path,
str(dest_path / temp_filename)
)
print("Renaming uploaded file")
sftp.rename(
str(dest_path / temp_filename),
str(dest_path / museum_package.sip_filename)
)
return museum_package
@click.command()
@click.option(
"--package-dir",
help="Directory used to process and store the objects",
type=click.Path(exists=True, file_okay=False, dir_okay=True),
default=Path.home() / "MuseumObjects"
)
@click.option(
"--debug/--no-debug", default=False, envvar="MUSEUMPLUS_DEBUG",
help=(
"Enable debug mode. Any unhandled exception will launch a debugger."
)
)
@click.option(
"--sip-id",
help=(
"Optional SIP ID allowing multiple SIPs to be generated for the "
"same package."
),
type=str, default=None
)
@click.argument("object_id", nargs=1)
def cli(package_dir, object_id, debug, sip_id):
main(package_dir, object_id, debug, sip_id)
def main(package_dir, object_id, debug=False, sip_id=None):
package_dir = Path(package_dir)
with debugger_enabled(debug):
return submit_sip(
package_dir=package_dir, object_id=object_id, sip_id=sip_id
)
if __name__ == "__main__":
cli()
| 29.583333 | 77 | 0.677666 | 329 | 2,485 | 4.908815 | 0.322188 | 0.037152 | 0.049536 | 0.055728 | 0.139319 | 0.076161 | 0.034675 | 0 | 0 | 0 | 0 | 0.000522 | 0.228571 | 2,485 | 83 | 78 | 29.939759 | 0.841941 | 0.185111 | 0 | 0.089286 | 0 | 0 | 0.199798 | 0.020182 | 0 | 0 | 0 | 0 | 0 | 1 | 0.053571 | false | 0.071429 | 0.107143 | 0 | 0.196429 | 0.035714 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 1 |
8dc9eea63f0db427ca8d82b08f280d3e1c15f968 | 2,931 | py | Python | alipay/aop/api/domain/AlipayOfflineProviderIndirectisvActivityCreateModel.py | antopen/alipay-sdk-python-all | 8e51c54409b9452f8d46c7bb10eea7c8f7e8d30c | [
"Apache-2.0"
] | null | null | null | alipay/aop/api/domain/AlipayOfflineProviderIndirectisvActivityCreateModel.py | antopen/alipay-sdk-python-all | 8e51c54409b9452f8d46c7bb10eea7c8f7e8d30c | [
"Apache-2.0"
] | null | null | null | alipay/aop/api/domain/AlipayOfflineProviderIndirectisvActivityCreateModel.py | antopen/alipay-sdk-python-all | 8e51c54409b9452f8d46c7bb10eea7c8f7e8d30c | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.constant.ParamConstants import *
from alipay.aop.api.domain.IndirectIsvTerminalInfo import IndirectIsvTerminalInfo
class AlipayOfflineProviderIndirectisvActivityCreateModel(object):
def __init__(self):
self._ext_info = None
self._indirect_isv_terminal_info = None
self._merchant_id = None
@property
def ext_info(self):
return self._ext_info
@ext_info.setter
def ext_info(self, value):
self._ext_info = value
@property
def indirect_isv_terminal_info(self):
return self._indirect_isv_terminal_info
@indirect_isv_terminal_info.setter
def indirect_isv_terminal_info(self, value):
if isinstance(value, list):
self._indirect_isv_terminal_info = list()
for i in value:
if isinstance(i, IndirectIsvTerminalInfo):
self._indirect_isv_terminal_info.append(i)
else:
self._indirect_isv_terminal_info.append(IndirectIsvTerminalInfo.from_alipay_dict(i))
@property
def merchant_id(self):
return self._merchant_id
@merchant_id.setter
def merchant_id(self, value):
self._merchant_id = value
def to_alipay_dict(self):
params = dict()
if self.ext_info:
if hasattr(self.ext_info, 'to_alipay_dict'):
params['ext_info'] = self.ext_info.to_alipay_dict()
else:
params['ext_info'] = self.ext_info
if self.indirect_isv_terminal_info:
if isinstance(self.indirect_isv_terminal_info, list):
for i in range(0, len(self.indirect_isv_terminal_info)):
element = self.indirect_isv_terminal_info[i]
if hasattr(element, 'to_alipay_dict'):
self.indirect_isv_terminal_info[i] = element.to_alipay_dict()
if hasattr(self.indirect_isv_terminal_info, 'to_alipay_dict'):
params['indirect_isv_terminal_info'] = self.indirect_isv_terminal_info.to_alipay_dict()
else:
params['indirect_isv_terminal_info'] = self.indirect_isv_terminal_info
if self.merchant_id:
if hasattr(self.merchant_id, 'to_alipay_dict'):
params['merchant_id'] = self.merchant_id.to_alipay_dict()
else:
params['merchant_id'] = self.merchant_id
return params
@staticmethod
def from_alipay_dict(d):
if not d:
return None
o = AlipayOfflineProviderIndirectisvActivityCreateModel()
if 'ext_info' in d:
o.ext_info = d['ext_info']
if 'indirect_isv_terminal_info' in d:
o.indirect_isv_terminal_info = d['indirect_isv_terminal_info']
if 'merchant_id' in d:
o.merchant_id = d['merchant_id']
return o
| 35.313253 | 104 | 0.639713 | 349 | 2,931 | 5 | 0.157593 | 0.132378 | 0.228653 | 0.276791 | 0.429226 | 0.366189 | 0.135817 | 0.135817 | 0.10659 | 0.064183 | 0 | 0.000945 | 0.277721 | 2,931 | 82 | 105 | 35.743902 | 0.823335 | 0.01433 | 0 | 0.104478 | 0 | 0 | 0.081802 | 0.036049 | 0 | 0 | 0 | 0 | 0 | 1 | 0.134328 | false | 0 | 0.044776 | 0.044776 | 0.283582 | 0 | 0 | 0 | 0 | null | 0 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 2 |
8dca2da6c8645bf9fdd19dabbdf05a6549cdbc55 | 4,220 | py | Python | mate/environments/environment.py | thomyphan/emergent-cooperation | 2406b8679ddbebba745f1026ca3689f1ba181e28 | [
"MIT"
] | null | null | null | mate/environments/environment.py | thomyphan/emergent-cooperation | 2406b8679ddbebba745f1026ca3689f1ba181e28 | [
"MIT"
] | null | null | null | mate/environments/environment.py | thomyphan/emergent-cooperation | 2406b8679ddbebba745f1026ca3689f1ba181e28 | [
"MIT"
] | null | null | null | import numpy
class Environment:
def __init__(self, params) -> None:
self.domain_value_labels = params["domain_value_labels"]
self.observation_dim = params["observation_dim"]
self.nr_agents = params["nr_agents"]
self.nr_actions = params["nr_actions"]
self.time_limit = params["time_limit"]
self.gamma = params["gamma"]
self.time_step = 0
self.sent_gifts = numpy.zeros(self.nr_agents)
self.discounted_returns = numpy.zeros(self.nr_agents)
self.undiscounted_returns = numpy.zeros(self.nr_agents)
self.domain_counts = numpy.zeros(len(self.domain_value_labels))
self.last_joint_action = -numpy.ones(self.nr_agents, dtype=numpy.int)
"""
Performs the joint action in order to change the environment.
Returns the reward for each agent in a list sorted by agent ID.
"""
def perform_step(self, joint_action):
assert not self.is_done(), "Episode terminated at time step {}. Please, reset before calling 'step'."\
.format(self.time_step)
return numpy.zeros(self.nr_agents), {}
"""
Indicates if an episode is done and the environments needs
to be reset.
"""
def is_done(self):
return self.time_step >= self.time_limit
def action_as_vector(self, action):
if action < self.nr_actions:
vector = numpy.zeros(self.nr_actions)
if action >= 0:
vector[action] = 1
else:
vector = numpy.ones(self.nr_actions)
return vector
"""
Performs a joint action to change the state of the environment.
Returns the joint observation, the joint reward, a done flag,
and other optional information (e.g., logged data).
Note: The joint action must be a list ordered according to the agent ID!.
"""
def step(self, joint_action):
assert len(joint_action) == self.nr_agents, "Length of 'joint_action' is {}, expected {}"\
.format(len(joint_action), self.nr_agents)
assert not self.is_done(), "Episode terminated at time step {}. Please, reset before calling 'step'."\
.format(self.time_step)
rewards, infos = self.perform_step(joint_action)
for i, a in enumerate(joint_action):
self.last_joint_action[i] = a
if a >= self.nr_actions:
self.sent_gifts[i] += 1
assert len(rewards) == self.nr_agents, "Length of 'rewards' is {}, expected {}"\
.format(len(rewards), self.nr_agents)
observations = self.joint_observation()
assert len(observations) == self.nr_agents, "Length of 'observations' is {}, expected {}"\
.format(len(observations), self.nr_agents)
self.time_step += 1
self.domain_counts[0] += 1.0
self.undiscounted_returns += rewards
self.discounted_returns += (self.gamma**self.time_step)*rewards
if "neighbor_agents" not in infos:
infos["neighbor_agents"] = [[j for j in range(self.nr_agents) if j != i] for i in range(self.nr_agents)]
return observations, rewards, self.is_done(), infos
def get_index(self, label):
return self.domain_value_labels.index(label)
"""
The local observation for a specific agent. Only visible for
the corresponding agent and private to others.
"""
def local_observation(self, agent_id):
pass
"""
Returns the observations of all agents in a listed sorted by agent ids.
"""
def joint_observation(self):
return [numpy.array(self.local_observation(i)).reshape(self.observation_dim) for i in range(self.nr_agents)]
"""
Returns a high-level value which is domain-specific.
"""
def domain_values(self):
return self.domain_counts
def domain_value_debugging_indices(self):
return 0,1
"""
Re-Setup of the environment for a new episode.
"""
def reset(self):
self.time_step = 0
self.discounted_returns[:] = 0
self.undiscounted_returns[:] = 0
self.last_joint_action[:] = -1
self.domain_counts[:] = 0
self.sent_gifts[:] = 0
return self.joint_observation()
| 38.363636 | 116 | 0.638389 | 555 | 4,220 | 4.684685 | 0.225225 | 0.046154 | 0.069231 | 0.030769 | 0.250385 | 0.143846 | 0.113846 | 0.070769 | 0.070769 | 0.070769 | 0 | 0.005099 | 0.256398 | 4,220 | 109 | 117 | 38.715596 | 0.823454 | 0 | 0 | 0.086957 | 0 | 0 | 0.109352 | 0 | 0 | 0 | 0 | 0 | 0.072464 | 1 | 0.15942 | false | 0.014493 | 0.014493 | 0.072464 | 0.318841 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
8dca97c9199a78c34139496f0e9ce4927d4d5e8f | 1,651 | py | Python | boa3/model/attribute.py | hal0x2328/neo3-boa | 6825a3533384cb01660773050719402a9703065b | [
"Apache-2.0"
] | 25 | 2020-07-22T19:37:43.000Z | 2022-03-08T03:23:55.000Z | boa3/model/attribute.py | hal0x2328/neo3-boa | 6825a3533384cb01660773050719402a9703065b | [
"Apache-2.0"
] | 419 | 2020-04-23T17:48:14.000Z | 2022-03-31T13:17:45.000Z | boa3/model/attribute.py | hal0x2328/neo3-boa | 6825a3533384cb01660773050719402a9703065b | [
"Apache-2.0"
] | 15 | 2020-05-21T21:54:24.000Z | 2021-11-18T06:17:24.000Z | import ast
from typing import Optional, Tuple, Union
from boa3.model.expression import IExpression
from boa3.model.imports.package import Package
from boa3.model.symbol import ISymbol
from boa3.model.type.classes.classtype import ClassType
from boa3.model.type.itype import IType
class Attribute(IExpression):
"""
A class used to represent an attribute
:ivar value: the origin expression that has the attribute
:ivar attr_name: the name of the attribute
:ivar attr_symbol: the found symbol for the attribute
"""
def __init__(self, value: Union[ast.AST, IExpression, Package], attr_name: str,
attr_symbol: Optional[ISymbol] = None, origin: Optional[ast.AST] = None):
super().__init__(origin)
self.value: Union[ast.AST, IExpression, Package] = value
self.attr_name: str = attr_name
obj_with_symbols = value.type if isinstance(value, IExpression) else value
if (isinstance(value, (IExpression, ClassType, Package))
and hasattr(obj_with_symbols, 'symbols') and attr_name in obj_with_symbols.symbols):
attr_symbol = obj_with_symbols.symbols[attr_name]
self.attr_symbol: Optional[ISymbol] = attr_symbol
@property
def shadowing_name(self) -> str:
return 'attribute'
@property
def type(self) -> IType:
from boa3.model.type.type import Type
return self.attr_symbol.type if isinstance(self.attr_symbol, IExpression) else Type.none
@property
def values(self) -> Tuple[Union[ast.AST, IExpression], Optional[ISymbol], str]:
return self.value, self.attr_symbol, self.attr_name
| 35.891304 | 100 | 0.706239 | 218 | 1,651 | 5.201835 | 0.256881 | 0.070547 | 0.068783 | 0.044974 | 0.111111 | 0.067019 | 0.067019 | 0 | 0 | 0 | 0 | 0.004577 | 0.205936 | 1,651 | 45 | 101 | 36.688889 | 0.860412 | 0.117505 | 0 | 0.107143 | 0 | 0 | 0.011197 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.142857 | false | 0 | 0.285714 | 0.071429 | 0.571429 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 1 |
8dcb21a58f1185a80b68bc21fae747a3384bef5d | 2,409 | py | Python | craynn/viz/imgs.py | maxim-borisyak/craynn | fceabd33f5969033fb3605f894778c42c42f3e08 | [
"MIT"
] | null | null | null | craynn/viz/imgs.py | maxim-borisyak/craynn | fceabd33f5969033fb3605f894778c42c42f3e08 | [
"MIT"
] | null | null | null | craynn/viz/imgs.py | maxim-borisyak/craynn | fceabd33f5969033fb3605f894778c42c42f3e08 | [
"MIT"
] | null | null | null | import os
import os.path as osp
__all__ = [
'pack_images',
'plot_and_pack',
'save_images'
]
def pack_images(output, imgs, vmax=1024.0, archive=None, name="image_%d.png", **data):
from scipy.misc import toimage
try:
os.makedirs(output)
except:
pass
for i in range(imgs.shape[0]):
args = dict([ (k, v[i]) for k, v in data.items()])
args['index'] = i
path = osp.join(output, name.format(**args))
toimage(imgs[i], cmin=0.0, cmax=vmax, channel_axis=0).save(path)
if archive is not None:
import subprocess as sb
if sb.check_call(['tar', '-czvf', archive, output]):
os.removedirs(output)
def save_images(cycle, version, original, transformed, outdir='output', pack=True):
import matplotlib.pyplot as plt
import os
import os.path as osp
path = osp.join(outdir, 'images_%012d_%s' % (cycle, str(version)))
os.system('rm -rf %s' % path)
os.system('mkdir -p %s' % path)
plt.ioff()
for i in range(original.shape[0]):
fig = plt.figure(figsize=(10, 4))
ax = fig.add_subplot(1, 2, 1)
ax.grid('off')
im = ax.imshow(original[i, 0], interpolation='None', cmap=plt.cm.gray)
cb = fig.colorbar(im)
ax = fig.add_subplot(1, 2, 2)
ax.grid('off')
im = ax.imshow(transformed[i, 0], interpolation='None', cmap=plt.cm.gray)
cb = fig.colorbar(im)
plt.savefig(osp.join(path, 'test_%06d.png' % i), dpi=80)
plt.close(fig)
plt.ion()
if pack:
tar_path = osp.join(outdir, 'test_images_%s.tar.gz' % version)
os.system('tar -czf %s %s ' % (tar_path, path))
def plot_and_pack(imgs, outdir='output', pack=True, name="image_{index}.png",
figsize=(5, 4), cmap='Grey', **data):
import matplotlib.pyplot as plt
import os
import os.path as osp
os.system('rm -rf %s' % outdir)
os.system('mkdir -p %s' % outdir)
plt.ioff()
for i in range(imgs.shape[0]):
fig = plt.figure(figsize=figsize)
plt.grid('off')
plt.imshow(imgs[i, 0], interpolation='None', cmap=cmap)
plt.colorbar()
args = dict([(k, v[i]) for k, v in data.items()])
args['index'] = i
filename = name.format(**args)
plt.savefig(osp.join(outdir, filename), dpi=80)
plt.close(fig)
plt.ion()
if pack:
basedir, cwd = osp.split(outdir)
tar_path = osp.join(basedir, '%s.tar.gz' % cwd)
print('Archive', tar_path)
return os.system('tar -czf %s %s ' % (tar_path, outdir))
| 26.184783 | 86 | 0.62308 | 384 | 2,409 | 3.835938 | 0.296875 | 0.032587 | 0.029871 | 0.032587 | 0.448744 | 0.395112 | 0.299389 | 0.253904 | 0.222675 | 0.184657 | 0 | 0.017672 | 0.201328 | 2,409 | 91 | 87 | 26.472527 | 0.747921 | 0 | 0 | 0.371429 | 0 | 0 | 0.105438 | 0.008717 | 0 | 0 | 0 | 0 | 0 | 1 | 0.042857 | false | 0.014286 | 0.142857 | 0 | 0.2 | 0.014286 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
8dcbe319893f08c9fec316e62b3f5815719b8547 | 34,085 | py | Python | shield.py | sidsen/VRL_CodeReview | 6ee6114215ee49570c47ae41789f5d9bd11ce820 | [
"MIT"
] | 10 | 2019-10-22T16:58:43.000Z | 2021-12-05T06:12:53.000Z | shield.py | sidsen/VRL_CodeReview | 6ee6114215ee49570c47ae41789f5d9bd11ce820 | [
"MIT"
] | 1 | 2019-02-23T05:44:06.000Z | 2019-02-23T05:44:06.000Z | shield.py | sidsen/VRL_CodeReview | 6ee6114215ee49570c47ae41789f5d9bd11ce820 | [
"MIT"
] | 5 | 2019-02-23T04:10:06.000Z | 2020-09-06T22:59:26.000Z | import metrics
from metrics import timeit
from main import *
import os
class Shield(object):
def __init__(self, env, actor, model_path=None, force_learning=False, debug=False):
"""init
Args:
env (Environment): environment
actor (ActorNetwork): actor
force_learning (bool, optional): if true, even there are model stored in model path, still train.
"""
self.env = env
self.actor = actor
self.model_path = model_path
self.K = None
self.K_list = []
self.initial_range_list = []
if not force_learning and os.path.isfile(str(self.model_path)):
self.K_list = [_K for _K in loadK(self.model_path)]
self.continuous = env.continuous
self.shield_count = 0
self.debug = debug
self.step_count = 0
self.last_B_value = 0
self.keep_increasing = False
@timeit
def train_shield(self, learning_method, number_of_rollouts, simulation_steps, eq_err=1e-2, rewardf=None, testf=None, explore_mag = .04, step_size = .05, names=None,
coffset=None, bias=False, discretization=False, lqr_start=False, degree=4, without_nn_guide=False, enable_jit=False):
"""train shield
Args:
learning_method (string): learning method string
number_of_rollouts (int): number of rollouts
simulation_steps (int): simulation steps
timestep (float, optional): timestep for continuous control
eq_err (float, optional): amount of guassian error
rewardf (None, optional): reward function
testf (None, optional): reward function for draw controller
explore_mag (float, optional): explore mag
step_size (float, optional): step size
names (None, optional): names of state
"""
# continuous
if self.env.continuous:
self.B_str_list = []
self.B_list = []
self.last_B_result = []
self.B = None
if self.K_list == []:
#assert names is not None
x0 = self.env.reset()
def default_testf_continous(x, u):
if self.env.unsafe:
if ((np.array(x) < self.env.x_max)*(np.array(x) > self.env.x_min)).all(axis=1).any():
return -1
else:
return 0
else:
if ((x < self.env.x_max).all() and (x > self.env.x_min).all()):
return 0
else:
return -1
def learning_oracle_continuous(x):
self.K = learn_shield(self.env.A, self.env.B, self.env.Q, self.env.R, x, eq_err,\
learning_method, number_of_rollouts, simulation_steps, self.actor, self.env.x_min, self.env.x_max, rewardf=rewardf, \
continuous=True, timestep=self.env.timestep, explore_mag = explore_mag, step_size = step_size, coffset=coffset, bias=bias, \
unsafe_flag=self.env.unsafe, lqr_start=lqr_start, without_nn_guide=without_nn_guide)
return self.K
def draw_oracle_continuous(x, K):
# draw_controller (self.env.A, self.env.B, self.K, x, simulation_steps*shield_testing_on_x_ep_len, names, True, 0.01)
test_reward = testf if testf is not None else default_testf_continous
result = test_controller (self.env.A, self.env.B, self.K, x, simulation_steps*shield_testing_on_x_ep_len, rewardf=test_reward, \
continuous=True, timestep=self.env.timestep, coffset=coffset, bias=bias)
return result
#Iteratively search polcies that can cover all initial states
'''
Fixme: the verification approach does not consider the case under which x_min and x_max
'''
def verification_oracle_continuous(x, initial_size, Theta, K):
#Theta and K is useless here but required by the API
#Generate the closed loop system for verification
Acl = self.env.A + self.env.B.dot(self.K)
print "Learned Closed Loop System: {}".format(Acl)
if (discretization):
S0 = Polyhedron.from_bounds(self.env.s_min, self.env.s_max)
self.O_inf = verify_controller_via_discretization(Acl, self.env.timestep, self.env.x_min, self.env.x_max)
min = np.array([[x[i,0] - initial_size[i]] for i in range(self.env.state_dim)])
max = np.array([[x[i,0] + initial_size[i]] for i in range(self.env.state_dim)])
S = Polyhedron.from_bounds(min, max)
S = S.intersection(S0)
ce = S.is_included_in_with_ce(self.O_inf)
return (ce is None)
else:
#Specs for initial conditions
init = []
initSOSPoly = []
init_cnstr = []
for i in range(self.env.state_dim):
init.append("init" + str(i+1) + " = (x[" + str(i+1) + "] - " + str(self.env.s_min[i,0]) + ")*(" + str(self.env.s_max[i,0]) + "-x[" + str(i+1) + "])")
for i in range(self.env.state_dim):
initSOSPoly.append("@variable m Zinit" + str(i+1) + " SOSPoly(Z)")
for i in range(self.env.state_dim):
init_cnstr.append(" - Zinit" + str(i+1) + "*init" + str(i+1))
#Specs for initial conditions subject to intial_size
for i in range(self.env.state_dim):
l = x[i,0] - initial_size[i]
h = x[i,0] + initial_size[i]
init.append("init" + str(self.env.state_dim+i+1) + " = (x[" + str(i+1) + "] - (" + str(l) + "))*((" + str(h) + ")-x[" + str(i+1) + "])")
for i in range(self.env.state_dim):
initSOSPoly.append("@variable m Zinit" + str(self.env.state_dim+i+1) + " SOSPoly(Z)")
for i in range(self.env.state_dim):
init_cnstr.append(" - Zinit" + str(self.env.state_dim+i+1) + "*init" + str(self.env.state_dim+i+1))
#Specs for unsafe condions depends on env.unsafe
unsafe = []
unsafeSOSPoly = []
unsafe_cnstr = []
if (self.env.unsafe):
#unsafe is given either via unsafe regions or unsfe properties in the env
if (self.env.unsafe_property is not None):
unsafes = self.env.unsafe_property ()
unsafe = []
unsafeSOSPoly = []
unsafe_cnstr = []
for i in range(len(unsafes)):
unsafe.append("unsafe" + str(i+1) + " = " + unsafes[i])
unsafeSOSPoly.append("@variable m Zunsafe" + str(i+1) + " SOSPoly(Z)")
unsafe_cnstr.append(" - Zunsafe" + str(i+1) + "*unsafe" + str(i+1))
if (self.env.x_min is not None):
for j in range(len(self.env.x_min)):
unsafe_query = ""
unsafe_x_min = self.env.x_min[j]
unsafe_x_max = self.env.x_max[j]
for i in range(self.env.state_dim):
if unsafe_x_min[i, 0] != np.NINF and unsafe_x_max[i, 0] != np.inf:
unsafe.append("unsafe" + str(i+1) + " = (x[" + str(i+1) + "] - " + str(unsafe_x_min[i,0]) + ")*(" + str(unsafe_x_max[i,0]) + "-x[" + str(i+1) + "])")
unsafeSOSPoly.append("@variable m Zunsafe" + str(i+1) + " SOSPoly(Z)")
unsafe_query += " - Zunsafe" + str(i+1) + "*unsafe" + str(i+1)
elif unsafe_x_min[i, 0] != np.NINF:
unsafe.append("unsafe" + str(i+1) + " = (x[" + str(i+1) + "] - " + str(unsafe_x_min[i,0]) + ")*(" + str(unsafe_x_max[i,0]) + "-x[" + str(i+1) + "])")
unsafeSOSPoly.append("@variable m Zunsafe" + str(i+1) + " SOSPoly(Z)")
unsafe_query += " - Zunsafe" + str(i+1) + "*unsafe" + str(i+1)
elif unsafe_x_max[i, 0] != np.inf:
unsafe.append("unsafe" + str(i+1) + " = (x[" + str(i+1) + "] - " + str(unsafe_x_min[i,0]) + ")*(" + str(unsafe_x_max[i,0]) + "-x[" + str(i+1) + "])")
unsafeSOSPoly.append("@variable m Zunsafe" + str(i+1) + " SOSPoly(Z)")
unsafe_query += " - Zunsafe" + str(i+1) + "*unsafe" + str(i+1)
if unsafe_query != "":
unsafe_cnstr.append(unsafe_query)
else:
for i in range(self.env.state_dim):
mid = (self.env.x_min[i, 0] + self.env.x_max[i, 0]) / 2
radium = self.env.x_max[i, 0] - mid
unsafe.append("unsafe" + str(i+1) + " = (x[" + str(i+1) + "] - " + str(mid) + ")^2 - " + str(pow(radium, 2)))
unsafeSOSPoly.append("@variable m Zunsafe" + str(i+1) + " SOSPoly(Z)")
unsafe_cnstr.append(" - Zunsafe" + str(i+1) + "*unsafe" + str(i+1))
# Now we have init, unsafe and sysdynamics for verification
sos = genSOSContinuousAsDiscreteMultipleUnsafes(
self.env.timestep, self.env.state_dim, ",".join(dxdt(Acl)), "\n".join(init), "\n".join(unsafe),
"\n".join(initSOSPoly), "\n".join(unsafeSOSPoly), "".join(init_cnstr), unsafe_cnstr, degree=degree)
verified = verifySOS(writeSOS("SOS.jl", sos), False, 900)
print verified
if verified.split("#")[0].find("Optimal") >= 0:
# returns Verified and the inductive invariant
return True, verified.split("#")[1]
else:
return False, None
#return (verified.find("Optimal") >= 0)
Theta = (self.env.s_min, self.env.s_max)
result, resultList = verify_controller_z3(x0, Theta, verification_oracle_continuous, learning_oracle_continuous, draw_oracle_continuous, continuous=True)
print ("Shield synthesis result: {}".format(result))
if result:
for (x, initial_size, inv, K) in resultList:
self.B_str_list.append(inv+"\n")
self.B_list.append(barrier_certificate_str2func(inv, self.env.state_dim, enable_jit))
self.K_list.append(K)
initial_range = np.array([x-initial_size.reshape(len(initial_size), 1), x+initial_size.reshape(len(initial_size), 1)])
self.initial_range_list.append(initial_range)
self.save_shield(os.path.split(self.model_path)[0])
else:
self.load_shield(os.path.split(self.model_path)[0], enable_jit)
# discrete
else:
self.O_inf_list = []
self.last_O_inf_result = []
self.O_inf = None
if self.K_list == []:
x0 = self.env.reset()
S0 = Polyhedron.from_bounds(self.env.s_min, self.env.s_max)
def default_testf_discrete(x, u):
if self.env.unsafe:
if ((np.array(x) < self.env.x_max)*(np.array(x) > self.env.x_min)).all(axis=1).any():
return -1
else:
return 0
else:
if ((x < self.env.x_max).all() and (x > self.env.x_min).all()) and ((u < self.env.u_max).all() and (u > self.env.u_min).all()):
return 0
else:
return -1
def learning_oracle_discrete(x):
self.K = learn_shield(self.env.A, self.env.B, self.env.Q, self.env.R, x, eq_err,\
learning_method, number_of_rollouts, simulation_steps, self.actor, self.env.x_min, self.env.x_max, rewardf=rewardf,\
continuous=False, timestep=self.env.timestep, explore_mag = explore_mag, step_size = step_size, coffset=coffset, bias=bias, \
unsafe_flag=self.env.unsafe, lqr_start=lqr_start, without_nn_guide=without_nn_guide)
return self.K
def draw_oracle_discrete(x, K):
# draw_controller (self.env.A, self.env.B, self.K, x, simulation_steps*shield_testing_on_x_ep_len, names, True, 0.01)
test_reward = testf if testf is not None else default_testf_discrete
result = test_controller (self.env.A, self.env.B, self.K, x, simulation_steps*shield_testing_on_x_ep_len, rewardf=test_reward, \
coffset=coffset, bias=bias)
return result
#Iteratively search polcies that can cover all initial states
def verification_oracle_discrete(x, initial_size, Theta, K):
self.O_inf = verify_controller(np.asarray(self.env.A), np.asarray(self.env.B), np.asarray(self.K), self.env.x_min, self.env.x_max, self.env.u_min, self.env.u_max)
min = np.array([[x[i,0] - initial_size[i]] for i in range(self.env.state_dim)])
max = np.array([[x[i,0] + initial_size[i]] for i in range(self.env.state_dim)])
S = Polyhedron.from_bounds(min, max)
S = S.intersection(S0)
ce = S.is_included_in_with_ce(self.O_inf)
if ce is None:
self.K_list.append(K)
self.O_inf_list.append(self.O_inf)
initial_range = np.array([x-initial_size.reshape(len(initial_size), 1), x+initial_size.reshape(len(initial_size), 1)])
self.initial_range_list.append(initial_range)
return (ce is None)
Theta = (self.env.s_min, self.env.s_max)
result = verify_controller_z3(x0, Theta, verification_oracle_discrete, learning_oracle_discrete, draw_oracle_discrete, continuous=False)
print ("Shield synthesis result: {}".format(result))
if result:
self.save_shield(os.path.split(self.model_path)[0])
else:
self.load_shield(os.path.split(self.model_path)[0], enable_jit)
@timeit
def train_polysys_shield(self, learning_method, number_of_rollouts, simulation_steps, eq_err=1e-2,
explore_mag = .04, step_size = .05, names=None, coffset=None, bias=False, degree=4, aggressive=False, without_nn_guide=False, enable_jit=False):
"""train shield
Args:
learning_method (string): learning method string
number_of_rollouts (int): number of rollouts
simulation_steps (int): simulation steps
timestep (float, optional): timestep for continuous control
eq_err (float, optional): amount of guassian error
rewardf (None, optional): reward function
testf (None, optional): reward function for draw controller
explore_mag (float, optional): explore mag
step_size (float, optional): step size
names (None, optional): names of state
"""
"""
Additional arguments in line 2 of the function signature:
polyf: describe polynomial system dynamics in python
polyf_to_str(K): describe polynomial system dynamics in string
rewardf describe polynomial system reward function
testf describe polynomial system test function
unsafe_string(): describe polynomial unsafe conditions in string
"""
self.B_str_list = []
self.B_list = []
self.last_B_result = []
self.B = None
self.initial_range_list = []
if self.K_list == []:
#assert names is not None
x0 = self.env.reset()
def learning_oracle_continuous(x):
self.K = learn_polysys_shield(self.env.polyf, self.env.state_dim, self.env.action_dim, self.env.Q, self.env.R, x, eq_err,\
learning_method, number_of_rollouts, simulation_steps, self.actor, rewardf=self.env.rewardf, \
continuous=True, timestep=self.env.timestep, explore_mag = explore_mag, step_size = step_size, coffset=coffset, bias=bias, without_nn_guide=without_nn_guide)
return self.K
def draw_oracle_continuous(x, K):
result = test_controller_helper(self.env.polyf, self.K, x, simulation_steps*shield_testing_on_x_ep_len, rewardf=self.env.testf, continuous=True, timestep=self.env.timestep,\
coffset=coffset, bias=bias)
if (result >= 0):
# Find *a new piece of* controller
saveK(self.model_path, self.K)
return result
#Iteratively search polcies that can cover all initial states
def verification_oracle_continuous(x, initial_size, Theta, K):
#Theta and K is useless here but required by the API
#Specs for initial conditions
init = []
initSOSPoly = []
init_cnstr = []
for i in range(self.env.state_dim):
init.append("init" + str(i+1) + " = (x[" + str(i+1) + "] - " + str(self.env.s_min[i,0]) + ")*(" + str(self.env.s_max[i,0]) + "-x[" + str(i+1) + "])")
for i in range(self.env.state_dim):
initSOSPoly.append("@variable m Zinit" + str(i+1) + " SOSPoly(Z)")
for i in range(self.env.state_dim):
init_cnstr.append(" - Zinit" + str(i+1) + "*init" + str(i+1))
#Specs for initial conditions subject to initial_size
for i in range(self.env.state_dim):
l = x[i,0] - initial_size[i]
h = x[i,0] + initial_size[i]
init.append("init" + str(self.env.state_dim+i+1) + " = (x[" + str(i+1) + "] - (" + str(l) + "))*((" + str(h) + ")-x[" + str(i+1) + "])")
for i in range(self.env.state_dim):
initSOSPoly.append("@variable m Zinit" + str(self.env.state_dim+i+1) + " SOSPoly(Z)")
for i in range(self.env.state_dim):
init_cnstr.append(" - Zinit" + str(self.env.state_dim+i+1) + "*init" + str(self.env.state_dim+i+1))
#Specs for unsafe condions
unsafes = self.env.unsafe_property()
unsafe = []
unsafeSOSPoly = []
unsafe_cnstr = []
for i in range(len(unsafes)):
unsafe.append("unsafe" + str(i+1) + " = " + unsafes[i])
for i in range(len(unsafes)):
unsafeSOSPoly.append("@variable m Zunsafe" + str(i+1) + " SOSPoly(Z)")
for i in range(len(unsafes)):
unsafe_cnstr.append(" - Zunsafe" + str(i+1) + "*unsafe" + str(i+1))
#Specs for bounded state space
bound = []
boundSOSPoly = []
bound_cnstr = []
if (self.env.bound_x_min is not None and self.env.bound_x_max is not None):
for i in range(self.env.state_dim):
if (self.env.bound_x_min[i,0] is not None and self.env.bound_x_max[i,0] is not None):
bound.append("bound" + str(i+1) + " = (x[" + str(i+1) + "] - " + str(self.env.bound_x_min[i,0]) + ")*(" + str(self.env.bound_x_max[i,0]) + "-x[" + str(i+1) + "])")
for i in range(self.env.state_dim):
if (self.env.bound_x_min[i,0] is not None and self.env.bound_x_max[i,0] is not None):
boundSOSPoly.append("@variable m Zbound" + str(i+1) + " SOSPoly(Z)")
for i in range(self.env.state_dim):
if (self.env.bound_x_min[i,0] is not None and self.env.bound_x_max[i,0] is not None):
bound_cnstr.append(" - Zbound" + str(i+1) + "*bound" + str(i+1))
#Specs for bounded environment disturbance
disturbance = []
disturbanceSOSPoly = []
disturbance_cnstr = []
if (self.env.disturbance_x_min is not None and self.env.disturbance_x_max is not None):
for i in range(self.env.state_dim):
if (self.env.disturbance_x_min[i,0] is not None and self.env.disturbance_x_max[i,0] is not None):
disturbance.append("disturbance" + str(i+1) + " = (d[" + str(i+1) + "] - " + str(self.env.disturbance_x_min[i,0]) + ")*(" + str(self.env.disturbance_x_max[i,0]) + "-d[" + str(i+1) + "])")
for i in range(self.env.state_dim):
if (self.env.disturbance_x_min[i,0] is not None and self.env.disturbance_x_max[i,0] is not None):
disturbanceSOSPoly.append("@variable m Zdisturbance" + str(i+1) + " SOSPoly(D)")
for i in range(self.env.state_dim):
if (self.env.disturbance_x_min[i,0] is not None and self.env.disturbance_x_max[i,0] is not None):
disturbance_cnstr.append(" - Zdisturbance" + str(i+1) + "*disturbance" + str(i+1))
# Now we have init, unsafe and sysdynamics for verification
sos = None
if (self.env.bound_x_min is not None and self.env.bound_x_max is not None):
sos = genSOSwithBound(self.env.state_dim, ",".join(self.env.polyf_to_str(K)), "\n".join(init), "\n".join(unsafe), "\n".join(bound),
"\n".join(initSOSPoly), "\n".join(unsafeSOSPoly), "\n".join(boundSOSPoly),
"".join(init_cnstr), "".join(unsafe_cnstr), "".join(bound_cnstr), degree=degree)
elif (self.env.disturbance_x_min is not None and self.env.disturbance_x_max is not None):
sos = genSOSwithDisturbance(self.env.state_dim, ",".join(self.env.polyf_to_str(K)), "\n".join(init), "\n".join(unsafe), "\n".join(disturbance),
"\n".join(initSOSPoly), "\n".join(unsafeSOSPoly), "\n".join(disturbanceSOSPoly),
"".join(init_cnstr), "".join(unsafe_cnstr), "".join(disturbance_cnstr), degree=degree)
else:
sos = genSOS(self.env.state_dim, ",".join(self.env.polyf_to_str(K)), "\n".join(init), "\n".join(unsafe),
"\n".join(initSOSPoly), "\n".join(unsafeSOSPoly),
"".join(init_cnstr), "".join(unsafe_cnstr), degree=degree)
verified = verifySOS(writeSOS("SOS.jl", sos), False, 900, aggressive=aggressive)
print verified
if verified.split("#")[0].find("Optimal") >= 0:
return True, verified.split("#")[1]
else:
return False, None
Theta = (self.env.s_min, self.env.s_max)
result, resultList = verify_controller_z3(x0, Theta, verification_oracle_continuous, learning_oracle_continuous, draw_oracle_continuous, continuous=True)
print ("Shield synthesis result: {}".format(result))
if result:
for (x, initial_size, inv, K) in resultList:
self.B_str_list.append(inv+"\n")
self.B_list.append(barrier_certificate_str2func(inv, self.env.state_dim, enable_jit))
self.K_list.append(K)
initial_range = np.array([x-initial_size.reshape(len(initial_size), 1), x+initial_size.reshape(len(initial_size), 1)])
self.initial_range_list.append(initial_range)
self.save_shield(os.path.split(self.model_path)[0])
else:
self.load_shield(os.path.split(self.model_path)[0], enable_jit)
def save_shield(self, model_path):
if self.env.continuous:
with open(model_path+"/shield.model", "w") as f:
for B_str in self.B_str_list:
f.write(B_str)
# print B_str
print "store shield to "+model_path+"/shield.model"
saveK(model_path+"/K.model", np.array(self.K_list))
print "store K to "+model_path+"/K.model.npy"
saveK(model_path+"/initial_range.model", np.array(self.initial_range_list))
print "store initial_range to "+model_path+"/initial_range.model.npy"
else:
saveK(model_path+"/K.model", np.array(self.K_list))
print "store K to "+model_path+"/K.model.npy"
saveK(model_path+"/initial_range.model", np.array(self.initial_range_list))
print "store initial_range to "+model_path+"/initial_range.model.npy"
def load_shield(self, model_path, enable_jit):
if self.env.continuous:
with open(model_path+"/shield.model", "r") as f:
for B_str in f:
self.B_list.append(barrier_certificate_str2func(B_str, self.env.state_dim, enable_jit))
print "load barrier from " + model_path + "/shield.model"
self.K_list = [K for K in loadK(model_path+"/K.model.npy")]
print "load K from "+model_path+"/K.model.npy"
self.initial_range_list = [initr for initr in loadK(model_path+"/initial_range.model.npy")]
print "load initial range to "+model_path+"/initial_range.model.npy"
else:
self.K_list = [K for K in loadK(model_path+"/K.model.npy")]
print "load K from "+model_path+"/K.model.npy"
self.initial_range_list = [initr for initr in loadK(model_path+"/initial_range.model.npy")]
print "load initial range to "+model_path+"/initial_range.model.npy"
for K in self.K_list:
O_inf = verify_controller(np.asarray(self.env.A), np.asarray(self.env.B), np.asarray(K), self.env.x_min, self.env.x_max, self.env.u_min, self.env.u_max)
self.O_inf_list.append(O_inf)
def select_shield(self):
i = -1
if (len(self.initial_range_list) > 1):
lowboundaries = np.array([item[0] for item in self.initial_range_list])
upboundaries = np.array([item[1] for item in self.initial_range_list])
if self.debug:
print "x0: \n", self.env.x0
print "low boundary: \n", lowboundaries
print "up boundary: \n", upboundaries
select_list = [(self.env.x0>low).all()*(self.env.x0<high).all() for low, high in zip(lowboundaries, upboundaries)]
i = select_list.index(True)
if self.debug:
print "select list", select_list
elif (len(self.initial_range_list) == 1):
i == 0
else:
print "Error: No shield available!"
assert (False)
self.K = self.K_list[i]
if self.continuous:
self.B = self.B_list[i]
return self.B
else:
self.O_inf = self.O_inf_list[i]
return self.O_inf
def detactor(self, x, u, mode="single", loss_compensation=0.0, increase_step=-1):
"""detact if there are dangerous state in furture
Args:
x: current state
u: current action
mode (str, optional): single(faster, more calls) -> choose one shield according to the initial state.
all(slower, less calls) -> use all shield at run time, if all the B > 0, call shield.
loss_compensation (float, optional): The compensation for loss in calculating barrier
increase_step (int, optional): if B's value keep increase this step, call shield until the vale stop increasing,
now only support the single mode.
Returns:
Bool: True -> call shield
False -> call neural network
"""
mode_tuple = ("single", "all")
assert mode in mode_tuple
xk = self.env.simulation(u)
# single shield model
if mode == mode_tuple[0]:
# continuous
if self.env.continuous:
if self.B is None:
self.select_shield()
B_value = self.B(*state2list(xk))
if self.debug:
print B_value
if increase_step >= 0:
if B_value > self.last_B_value:
self.step_count += 1
else:
self.keep_increasing = False
self.last_B_value = B_value
if self.step_count >= increase_step:
self.step_count = 0
self.keep_increasing = True
if self.keep_increasing:
return True
if B_value > -loss_compensation:
return True
return False
# discrete
else:
self.select_shield()
if self.O_inf.contains(xk):
return False
return True
# all shield model
elif mode == mode_tuple[1]:
# continuous
if self.env.continuous:
current_B_result = []
if self.last_B_result == []:
lowboundaries = np.array([i[0] for i in self.initial_range_list])
upboundaries = np.array([i[1] for i in self.initial_range_list])
self.last_B_result = [np.logical_not((self.env.x0>low).all()*(self.env.x0<high).all()) for low, high in zip(lowboundaries, upboundaries)]
debug_list = []
for B in self.B_list:
B_value = B(*state2list(xk))
if self.debug:
debug_list.append(B_value)
res = B_value > -loss_compensation
current_B_result.append(res)
if self.debug:
print debug_list
if np.array(current_B_result).all():
# The K will be called latter
self.K = self.K_list[self.last_B_result.index(False)]
return True
self.last_B_result = current_B_result
return False
# discrete
else:
current_O_inf_result = []
if self.last_O_inf_result == []:
lowboundaries = np.array([i[0] for i in self.initial_range_list])
upboundaries = np.array([i[1] for i in self.initial_range_list])
self.last_O_inf_result = [np.logical_not((self.env.x0>low).all()*(self.env.x0<high).all()) for low, high in zip(lowboundaries, upboundaries)]
for O_inf in self.O_inf_list:
res = not O_inf.contains(xk)
current_O_inf_result.append(res)
if self.debug:
print xk
print current_O_inf_result
if np.array(current_O_inf_result).all():
# The K will be called latter
self.K = self.K_list[self.last_O_inf_result.index(False)]
return True
self.last_O_inf_result = current_O_inf_result
return False
def call_shield(self, x, mute=False):
"""call shield
Args:
x : current state
mute (bool, optional): print !shield or not
Returns:
shield action
"""
u = self.K.dot(x)
if not mute:
print 'Shield! in state: \n', x
self.shield_count += 1
return u
@timeit
def test_shield(self, test_ep=1, test_step=5000, x0=None, mode="single", loss_compensation=0, shield_combo=1, mute=False):
"""test if shield works
Args:
test_ep (int, optional): test episodes
test_step (int, optional): test step in each episode
"""
assert shield_combo > 0
assert loss_compensation >= 0
fail_time = 0
success_time = 0
fail_list = []
self.shield_count = 0
combo_remain = 0
for ep in xrange(test_ep):
if x0 is not None:
x = self.env.reset(x0)
else:
x = self.env.reset()
init_x = x
for i in xrange(test_step):
u = np.reshape(self.actor.predict(np.reshape(np.array(x), \
(1, self.actor.s_dim))), (self.actor.a_dim, 1))
# safe or not
if self.detactor(x, u, mode=mode, loss_compensation=loss_compensation) or (combo_remain > 0):
if combo_remain == 0:
combo_remain = shield_combo
u = self.call_shield(x, mute=mute)
if not mute:
print "!shield at step {}".format(i)
combo_remain -= 1
# step
x, _, terminal = self.env.step(u)
# success or fail
if terminal:
if np.sum(np.power(self.env.xk, 2)) < self.env.terminal_err:
success_time += 1
else:
fail_time += 1
fail_list.append((init_x, x))
break
if i == test_step-1:
success_time += 1
print "----epoch: {} ----".format(ep)
print 'initial state:\n', init_x, '\nterminal state:\n', x, '\nlast action:\n', self.env.last_u
print "----step: {} ----".format(i)
print 'Success: {}, Fail: {}'.format(success_time, fail_time)
print '#############Fail List:###############'
for (i, e) in fail_list:
print 'initial state:\n{}\nend state: \n{}\n----'.format(i, e)
print 'shield times: {}, shield ratio: {}'.format(self.shield_count, float(self.shield_count)/(test_ep*test_step))
@timeit
def shield_boundary(self, sample_ep=500, sample_step=100):
"""sample to find the state bound of shield
Args:
sample_ep (int, optional): epsoides
sample_step (int, optional): step in each epsoide
"""
max_boundary = np.zeros([self.env.state_dim, 1])
min_boundary = np.zeros([self.env.state_dim, 1])
for ep in xrange(sample_ep):
x = self.env.reset()
for i in xrange(sample_step):
u = self.call_shield(x, mute=True)
max_boundary, min_boundary = metrics.find_boundary(x, max_boundary, min_boundary)
# step
x, _, terminal = self.env.step(u)
print 'max_boundary:\n{}\nmin_boundary:\n{}'.format(max_boundary, min_boundary)
def learn_shield_gd(self, lr=0.00001, epsoides=100, steps=1000):
K = np.random.random(self.env.state_dim)
grad = np.zeros(self.env.state_dim)
for ep in xrange(epsoides):
self.env.reset()
loss = 0
for step in xrange(steps):
u = self.actor.predict(np.reshape(np.array(self.env.xk), (1, self.actor.s_dim)))
grad += np.array(((K.dot(self.env.xk)-u).dot(self.env.xk.T)))[0]
loss += np.sum(np.power((K.dot(self.env.xk)-u), 2))
self.env.step(u)
K -= lr*grad
print loss
return K
import re
def barrier_certificate_str2func(bc_str, vars_num, enable_jit=False):
"""transform julia barrier string to function
Args:
bc_str (str): string
vars_num (int): the dimension number of state
enable_jit: enable jit, the performance of B will increase, but it takes time to preprocess B
"""
eval_str = re.sub("\^", r"**", bc_str)
variables = ["x"+str(i+1) for i in xrange(vars_num)]
var_pattern = re.compile(r"(?P<var>x\d*)")
eval_str = var_pattern.sub(r'*\g<var>', eval_str)
# This way is much much slower
# def B(state):
# values_name=get_values_name(len(state))
# assert len(variables) == len(values_name)
# eval_str1 = eval_str
# for var, val in zip(variables, values_name):
# eval_str1 = re.sub(var, val, eval_str1)
# return eval(eval_str1)
args_str = ""
for arg in variables:
args_str += (arg+",")
args_str = args_str[:-1]
if enable_jit:
from numba import jit, float64
exec(("@jit"+"(float64 ({}))"+"\ndef B({}): return {}").format(("float64,"*vars_num)[:-1], args_str, eval_str))
else:
exec("""def B({}): return {}""".format(args_str, eval_str))
return B
def barrier_certificate_str2z3(bc_str, vars_num):
"""transform julia barrier string to what z3 and python can understand
Args:
bc_str (str): string
"""
eval_str = re.sub("\^", r"**", bc_str)
var_pattern = re.compile(r"(?P<var>x\d*)")
eval_str = var_pattern.sub(r'*\g<var>', eval_str)
# substitute x1 to x[0], ..., x[n] to x[n-1]
for i in range(vars_num):
eval_str = eval_str.replace("x"+str(i+1), "x[" + str(i) + "]")
# polynomial function's value should be less than 0.
eval_str = eval_str + " <= 0"
return eval_str
def get_values_name(vars_num):
return ["state["+str(i)+"][0]" for i in xrange(vars_num)]
def state2list(state):
return [x[0] for x in state.tolist()]
| 44.323797 | 205 | 0.599267 | 4,852 | 34,085 | 4.039571 | 0.084295 | 0.068571 | 0.015561 | 0.033673 | 0.666224 | 0.628469 | 0.604133 | 0.573929 | 0.556633 | 0.546071 | 0 | 0.011726 | 0.264427 | 34,085 | 768 | 206 | 44.38151 | 0.770022 | 0.052193 | 0 | 0.524621 | 0 | 0 | 0.070462 | 0.00626 | 0 | 0 | 0 | 0.001302 | 0.007576 | 0 | null | null | 0 | 0.011364 | null | null | 0.068182 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
8dcf634f294734dd35e17dc999018f8d298536e2 | 171 | py | Python | palindrome_partitioning.py | spencercjh/sync-leetcode-today-problem-python3-example | 4957e5eadb697334741df0fc297bec2edaa9e2ab | [
"Apache-2.0"
] | null | null | null | palindrome_partitioning.py | spencercjh/sync-leetcode-today-problem-python3-example | 4957e5eadb697334741df0fc297bec2edaa9e2ab | [
"Apache-2.0"
] | null | null | null | palindrome_partitioning.py | spencercjh/sync-leetcode-today-problem-python3-example | 4957e5eadb697334741df0fc297bec2edaa9e2ab | [
"Apache-2.0"
] | null | null | null |
class PalindromePartitioning:
"""
https://leetcode-cn.com/problems/palindrome-partitioning/
"""
def partition(self, s: str) -> List[List[str]]:
| 19 | 61 | 0.619883 | 17 | 171 | 6.235294 | 0.882353 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.222222 | 171 | 8 | 62 | 21.375 | 0.796992 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | null | 0 | 0 | null | null | 0 | 1 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 3 |
8dd01c14c4996542c62dcb370ac32dc38f1f238a | 3,922 | py | Python | Chapter 09/exercise9_10/exercise9_10.py | nescience8/starting-out-with-python-global-4th-edition | c16f93b7cbb4c7ae7b57653a7190bf192fe6b472 | [
"MIT"
] | 35 | 2019-05-03T00:30:31.000Z | 2022-01-20T06:57:25.000Z | Chapter 09/exercise9_10/exercise9_10.py | nescience8/starting-out-with-python-global-4th-edition | c16f93b7cbb4c7ae7b57653a7190bf192fe6b472 | [
"MIT"
] | null | null | null | Chapter 09/exercise9_10/exercise9_10.py | nescience8/starting-out-with-python-global-4th-edition | c16f93b7cbb4c7ae7b57653a7190bf192fe6b472 | [
"MIT"
] | 22 | 2020-05-13T21:20:02.000Z | 2021-12-21T08:35:59.000Z | ##Write a program that reads the contents of a text file. The program should create a diction-
##ary in which the key-value pairs are described as follows:
##• Key. The keys are the individual words found in the file.
##• Values. Each value is a list that contains the line numbers in the file where the word
##(the key) is found.
##For example, suppose the word “robot” is found in lines 7, 18, 94, and 138. The dictionary
##would contain an element in which the key was the string “robot”, and the value was a list
##containing the numbers 7, 18, 94, and 138.
##Once the dictionary is built, the program should create another text file, known as a word
##index, listing the contents of the dictionary. The word index file should contain an alpha-
##betical listing of the words that are stored as keys in the dictionary, along with the line
##numbers where the words appear in the original file. Figure 9-1 shows an example of an
##original text file ( Kennedy.txt ) and its index file ( index.txt ).
# Open a text file.
# Read the contents
# Every time a new word appers, create a new key with value a list with a single
# element, which will be the lien where the word appeared.
# Every time a word that already exists appears, add to the list of that key,
# the line that appeared.
# Create another text file
# Sort the keys alphabetically
# Write each word and the times it appeared with a colon (:) in between.
def get_textname():
# Ask the use the name of the text file to create an index for.
name = input('For which file would you like me to create a word index? ')
return name
def create_dictionary(filename):
infile = open(filename, 'r', encoding='utf8') # Open the file
word_index = dict() # Create an empty dictionary to store the words and line numbers
counter = 0 # Set a counter to count the line we found the word
for line in infile:
wordlinelist = line.rstrip('\n').split() # remove \n and split the line into words.
counter += 1 # advance the counter to reflect the line we are in
for word in wordlinelist: # for every word in the line
if word not in word_index: # If we haven't yet encountered it,
word_index[word] = [str(counter)] # Start a key/value pair with value being a list with the line number.
elif word in word_index: # If the word was found,
word_index[word].append(str(counter)) # Append the line number to the list in the value
infile.close() # Close the file since we are done reading.
return word_index
def create_index_file(dict):
outfile = open('index.txt', 'w', encoding='utf8') # create a file to store the word index.
a_list = [] # Create a list to store the word the index.
index = 0 # Create a counter to control the index we are checking.
for key in dict.keys(): # Ever word found in the dictionary
a_list.append(key) # Append it to the a_list
for value in dict[key]: # Ever value found for that word/key
a_list[index] = a_list[index] + ' ' + value # Add it to the a_list, in the same index, seperated by a space
index += 1 # Advance the index by one to continue to the next word.
a_list.sort() # Sort the finished list.
for element in a_list:
outfile.write(element + '\n') # Extract the list, element by element to the file index.txt.
outfile.close() # Close the file
def main():
print('This program creates a word index of the file you request.')
print('----------------------------------------------------------')
print()
file = get_textname() # Ask the user for the file to create an index for.
dictionary = create_dictionary(file) # Create the dictionary for the file.
create_index_file(dictionary) # Write the the word index to a file.
print('Word index is created. File name is: index.txt')
# Call the main function.
main() | 56.028571 | 120 | 0.685365 | 647 | 3,922 | 4.123648 | 0.261206 | 0.024363 | 0.011244 | 0.016492 | 0.033733 | 0.016492 | 0 | 0 | 0 | 0 | 0 | 0.007923 | 0.22769 | 3,922 | 70 | 121 | 56.028571 | 0.872235 | 0.631311 | 0 | 0 | 0 | 0 | 0.175451 | 0.041877 | 0 | 0 | 0 | 0 | 0 | 1 | 0.102564 | false | 0 | 0 | 0 | 0.153846 | 0.102564 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
8dd2ec97b90cb2d15b0a054348250dd7d5121065 | 982 | py | Python | problems4/4A.py | Lopa10ko/ITMO-algo-2021-2022 | fa1ae8571e9cccd54faf1680fad21ffc6dbcef49 | [
"MIT"
] | 1 | 2021-11-11T12:08:14.000Z | 2021-11-11T12:08:14.000Z | problems4/4A.py | Lopa10ko/ITMO-algo-2021-2022 | fa1ae8571e9cccd54faf1680fad21ffc6dbcef49 | [
"MIT"
] | null | null | null | problems4/4A.py | Lopa10ko/ITMO-algo-2021-2022 | fa1ae8571e9cccd54faf1680fad21ffc6dbcef49 | [
"MIT"
] | null | null | null | file_input = open('stack.in', 'r')
file_output = open('stack.out', 'w')
def validate_stack(top_index):
return True if (top_index == -1) else False
class ImplementedStack(object):
def __init__(self):
self.stack = []
self.top = -1
def push_value(self, value):
self.top += 1
self.stack += ['']
self.stack[self.top] = value
def pop_value(self):
try:
if validate_stack(self.top):
return
else:
self.top -= 1
return self.stack[self.top + 1]
except IndexError:
return
if __name__ == '__main__':
arr = ImplementedStack()
for i in range(int(file_input.readline())):
current= file_input.readline().split()
if current[0] == '+':
arr.push_value(int(current[1]))
else:
print(arr.pop_value(), file=file_output)
file_output.close()
| 25.842105 | 60 | 0.521385 | 113 | 982 | 4.300885 | 0.380531 | 0.08642 | 0.106996 | 0.098765 | 0.069959 | 0 | 0 | 0 | 0 | 0 | 0 | 0.010989 | 0.351324 | 982 | 37 | 61 | 26.540541 | 0.751962 | 0 | 0 | 0.133333 | 0 | 0 | 0.028513 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | null | 0 | 0 | null | null | 0.033333 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
8dd45ea0a29eda5d4c2a5b4693e02c4c67831b90 | 4,151 | py | Python | pynextion/hardware.py | cowo78/pynextion | 40215761bc8abbd7cc53fefa68e8b78a67b73aed | [
"Apache-2.0"
] | null | null | null | pynextion/hardware.py | cowo78/pynextion | 40215761bc8abbd7cc53fefa68e8b78a67b73aed | [
"Apache-2.0"
] | null | null | null | pynextion/hardware.py | cowo78/pynextion | 40215761bc8abbd7cc53fefa68e8b78a67b73aed | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import collections
import queue
import threading
import typing
from .constants import S_END_OF_CMD
import serial
class AbstractSerialNex(object):
INCOMING_BUFFER_SIZE = 1024 # Seems the Nextion buffer size, mentioned in official docs
MIN_SIZE_READ = len(S_END_OF_CMD) + 1 # Minimum return data size
TERMINATOR_SIZE = len(S_END_OF_CMD)
def __init__(self):
super().__init__()
self._port_mutex = threading.Lock()
# Queue of event objects
self._events = queue.Queue()
# Incoming serial buffer
self._buffer = bytearray(self.INCOMING_BUFFER_SIZE)
self._events_queue = collections.deque() # type: typing.Sequence[bytearray]
def write(self, data: bytes) -> int:
""" Raw write access to underlying transport. Threadsafe.
:returns: Number of bytes written
"""
with self._port_mutex:
nbytes = self.sp.write(data)
return nbytes
send = write
def read_all(self) -> bytes:
""" Read all buffered data. Threadsafe. """
with self._port_mutex:
data = self.sp.read_all()
return data
def read_next(self) -> bytes:
""" Read next message. Threadsafe. May return an empty array is no event is available. """
# At some point (along with editor 0.58) the Nextion firmware changed and now it returns
# an "instruction successful" everytime, even after a string or numeric data event
if self._events_queue:
return self._events_queue.pop()
buffer_size = 0
with self._port_mutex:
# Reading one byte at a time is of course inefficient, so serial.read_until is not the best option
# We know the minimal read should be 4 chars (i.e. Invalid Instruction) and must be prepared to
# partial command reads since we have no guarantee that we will always have complete commands in the buffer
if self.sp.in_waiting < self.MIN_SIZE_READ:
# Partial event, unlikely at this point
return b''
# Read bulk of data
chunk = self.sp.read(self.sp.in_waiting)
self._buffer[buffer_size:buffer_size+len(chunk)] = chunk
buffer_size = len(chunk)
while self._buffer[buffer_size - self.TERMINATOR_SIZE:buffer_size] != S_END_OF_CMD:
# Trickle until end of event
chunk = self.sp.read(1)
self._buffer[buffer_size:buffer_size+1] = chunk
buffer_size += 1
# Finished reading and we are sure we have complete event(s), now split into single events
start = 0
while True:
pos = self._buffer.find(S_END_OF_CMD, start, buffer_size)
if pos == -1:
break
self._events_queue.appendleft(self._buffer[start:pos+self.TERMINATOR_SIZE])
start = pos + self.TERMINATOR_SIZE
return self._events_queue.pop()
def clear_events_queue(self):
self._events_queue.clear()
def close(self):
return self.sp.close()
class PySerialNex(AbstractSerialNex):
def __init__(self, port_or_url: str, *args, **kwargs):
super().__init__()
self.sp = serial.serial_for_url(port_or_url, *args, **kwargs)
self.sp.reset_input_buffer()
self.sp.reset_output_buffer()
@property
def baudrate(self):
return self.sp.baudrate
@baudrate.setter
def baudrate(self, val):
self.sp.baudrate = val
# TODO: rotten
class NexSerialMock(AbstractSerialNex):
def __init__(self, *args, **kwargs):
super().__init__()
def write(self, cmd):
pass
def read(self):
return None
def close(self):
print("close")
"""
# PyBoard 1.1
# https://docs.micropython.org/en/latest/pyboard/pyboard/quickref.html
# RED: VIN
# BLACK: GND
# YELLOW: X9 (Board TX)
# BLUE: X10 (Board RX)
import machine
import time
class uPyNexSerial(AbstractSerialNex):
def __init__(self, *args, **kwargs):
self.sp = machine.UART(*args, **kwargs)
"""
| 30.07971 | 119 | 0.63286 | 539 | 4,151 | 4.669759 | 0.374768 | 0.051649 | 0.041716 | 0.017878 | 0.116806 | 0.054033 | 0 | 0 | 0 | 0 | 0 | 0.006977 | 0.274874 | 4,151 | 137 | 120 | 30.29927 | 0.829236 | 0.256083 | 0 | 0.138889 | 0 | 0 | 0.001842 | 0 | 0 | 0 | 0 | 0.007299 | 0 | 1 | 0.180556 | false | 0.013889 | 0.083333 | 0.041667 | 0.472222 | 0.013889 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
8dd4c7ac514b07c8936d2d8818ce12d988742cca | 1,762 | py | Python | scripts/compute_stats.py | mikeshuser/TopicWordMap | 7ed9df73d1b7dd8ded03361a662444c31fad70bc | [
"MIT"
] | null | null | null | scripts/compute_stats.py | mikeshuser/TopicWordMap | 7ed9df73d1b7dd8ded03361a662444c31fad70bc | [
"MIT"
] | null | null | null | scripts/compute_stats.py | mikeshuser/TopicWordMap | 7ed9df73d1b7dd8ded03361a662444c31fad70bc | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
"""
Compute essential stats(freq/tf-idf) on a corpus
Dependencies:
pandas == 0.23
gensim == 3.8
"""
__author__ = "Mike Shuser"
import pickle
import numpy as np
import pandas as pd
from gensim.models import TfidfModel
from gensim.corpora import Dictionary
DATA_SRC = "../processed_corpus"
MODEL_SRC = "../modelling"
if __name__ == '__main__':
files = ["positive_text","negative_text"]
vecs = pd.read_csv(f"{MODEL_SRC}/imdb_wordvectors.csv",
index_col=[0],
na_filter=False)
for filetype in files:
with open(f"{DATA_SRC}/{filetype}.csv.bigrams.pkl", "rb") as handle:
docs = pickle.load(handle)
vocab = pd.DataFrame(index=vecs.index)
dct = Dictionary(docs)
corpus = [dct.doc2bow(line) for line in docs]
tfidf = TfidfModel(corpus)
#corpus statistics
def lookup_mentions(x):
try:
return dct.cfs[dct.token2id[x]]
except KeyError:
return 0
vocab['mentions'] = vocab.index.map(lookup_mentions)
vocab['log2_mentions'] = np.log2(vocab.mentions)
#get tf-idfs for every word in each doc, then get average per word
vocab_tfidf = {k : [] for k in vocab.index}
for i, row in enumerate(docs):
tmp = dict(tfidf[corpus[i]])
for word in row:
if word in vocab_tfidf:
vocab_tfidf[word].extend([tmp[dct.token2id[word]]])
for k, v in vocab_tfidf.items():
vocab_tfidf[k] = np.mean(v)
vocab['avg_tfidf'] = vocab.index.map(lambda x: vocab_tfidf[x])
vocab.to_csv(f"{MODEL_SRC}/{filetype}_vocab_stats.csv")
| 28.419355 | 76 | 0.586833 | 228 | 1,762 | 4.372807 | 0.469298 | 0.060181 | 0.018054 | 0.024072 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.01045 | 0.293984 | 1,762 | 61 | 77 | 28.885246 | 0.790997 | 0.116345 | 0 | 0 | 0 | 0 | 0.138979 | 0.069166 | 0 | 0 | 0 | 0 | 0 | 1 | 0.027027 | false | 0 | 0.135135 | 0 | 0.216216 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
8dd5699562650669f491fdd18d9c1edc25f11acb | 2,937 | py | Python | app/utils/images/linalg/utils.py | vinaykakkad/audio_and_image_compression | b5f7c767429f36805262ae87e8239434569fc372 | [
"MIT"
] | 1 | 2021-11-13T11:08:24.000Z | 2021-11-13T11:08:24.000Z | app/utils/images/linalg/utils.py | neelpopat242/audio_and_image_compression | b5f7c767429f36805262ae87e8239434569fc372 | [
"MIT"
] | null | null | null | app/utils/images/linalg/utils.py | neelpopat242/audio_and_image_compression | b5f7c767429f36805262ae87e8239434569fc372 | [
"MIT"
] | 1 | 2021-11-13T11:07:54.000Z | 2021-11-13T11:07:54.000Z | import math
def print_matrix(matrix):
"""
Function to print a matrix
"""
for row in matrix:
for col in row:
print("%.3f" % col, end=" ")
print()
def rows(matrix):
"""
Returns the no. of rows of a matrix
"""
if type(matrix) != list:
return 1
return len(matrix)
def cols(matrix):
"""
Returns the no. of columns of a matrix
"""
if type(matrix[0]) != list:
return 1
return len(matrix[0])
def eye(size):
"""
Returns an identity matrix
"""
mat = list()
for r in range(size):
row = list()
for c in range(size):
if r == c:
row.append(1)
else:
row.append(0)
mat.append(row)
return mat
def pivot_index(row):
"""
Returns the index of pivot in a row
"""
counter = 0
for element in row:
if element != float(0):
return counter
counter += 1
return counter
def pivot_value(row):
"""
Returns the value of pivot in a row
"""
for element in row:
if element > math.exp(-8):
return element
return 0
def swap(matrix, index_1, index_2):
"""
Function to swap two rows
"""
x = matrix[index_1]
matrix[index_1] = matrix[index_2]
matrix[index_2] = x
def transpose(matrix):
"""
Returns the transpose of a matrix
"""
transpose_matrix = list()
for i in range(cols(matrix)):
row = list()
for j in range(rows(matrix)):
row.append(matrix[j][i])
transpose_matrix.append(row)
return transpose_matrix
def mat_multiply(a, b):
"""
Function to multiply two matrices
"""
c = [[0 for i in range(cols(b))] for j in range(rows(a))]
for i in range(rows(a)):
for j in range(cols(b)):
for k in range(rows(b)):
c[i][j] += a[i][k] * b[k][j]
return c
def mat_splice(matrix, r, c):
"""
Function which returns a matrix with the first r rows and first c
columns of the original matrix
"""
result = list()
for i in range(r):
row = matrix[i]
result.append(row[:c])
return result
def to_int(matrix):
"""
Funciton to convert the eact element of the matrix to int
"""
for row in range(rows(matrix)):
for col in range(cols(matrix)):
for j in range(3):
matrix[row][col][j] = int(matrix[row][col][j])
return matrix
def clip(matrix):
"""
Function to clip each element to the range float[0, 1]
"""
for row in range(rows(matrix)):
for col in range(cols(matrix)):
for j in range(3):
if matrix[row][col][j] > 1:
matrix[row][col][j] = 1
if matrix[row][col][j] < 0:
matrix[row][col][j] = 0
return matrix
| 17.908537 | 69 | 0.520259 | 410 | 2,937 | 3.690244 | 0.173171 | 0.074025 | 0.043622 | 0.051553 | 0.35228 | 0.170522 | 0.076669 | 0.076669 | 0.076669 | 0.076669 | 0 | 0.014316 | 0.357848 | 2,937 | 163 | 70 | 18.018405 | 0.787911 | 0.171604 | 0 | 0.207792 | 0 | 0 | 0.002224 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.155844 | false | 0 | 0.012987 | 0 | 0.350649 | 0.038961 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
8dd581b173988521e90d63f2bb0417df587f5647 | 1,102 | py | Python | testproject/manage.py | innovationinit/django-kazoo-locks | 91ceb37ab92dad97659f24a9a7ace3bb9ae3ba10 | [
"BSD-2-Clause"
] | null | null | null | testproject/manage.py | innovationinit/django-kazoo-locks | 91ceb37ab92dad97659f24a9a7ace3bb9ae3ba10 | [
"BSD-2-Clause"
] | null | null | null | testproject/manage.py | innovationinit/django-kazoo-locks | 91ceb37ab92dad97659f24a9a7ace3bb9ae3ba10 | [
"BSD-2-Clause"
] | 1 | 2022-03-15T07:30:07.000Z | 2022-03-15T07:30:07.000Z | #!/usr/bin/env python
import os
import sys
if __name__ == "__main__":
try:
from django.core.management import execute_from_command_line
except ImportError:
# The above import may fail for some other reason. Ensure that the
# issue is really that Django is missing to avoid masking other
# exceptions on Python 2.
try:
import django
except ImportError:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
)
raise
try:
import settings as settings_mod # Assumed to be in the same directory.
except ImportError:
sys.stderr.write("Error: Can't find the file 'settings.py' in the directory containing %r" % __file__)
sys.exit(1)
sys.path.insert(0, settings_mod.BASE_DIR)
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "testproject.settings")
execute_from_command_line(sys.argv)
| 34.4375 | 110 | 0.650635 | 140 | 1,102 | 4.957143 | 0.635714 | 0.073487 | 0.051873 | 0.063401 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.003797 | 0.283122 | 1,102 | 31 | 111 | 35.548387 | 0.874684 | 0.188748 | 0 | 0.26087 | 0 | 0 | 0.311586 | 0.024747 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0 | 0.434783 | 0 | 0.434783 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 0 | 0 | 0 | 2 |
8dd7751b946690a5f79059a3575a07f2c9cb06b8 | 1,443 | py | Python | mos_ru_service/file_crypt.py | onlycska/depersonalization-of-data | d11497d0f0708496975d682ae447e97bfd9177d9 | [
"MIT"
] | null | null | null | mos_ru_service/file_crypt.py | onlycska/depersonalization-of-data | d11497d0f0708496975d682ae447e97bfd9177d9 | [
"MIT"
] | null | null | null | mos_ru_service/file_crypt.py | onlycska/depersonalization-of-data | d11497d0f0708496975d682ae447e97bfd9177d9 | [
"MIT"
] | null | null | null | import hashlib
from datetime import datetime
def salsa_20_xor_bytes():
pass
def n_string(string, n):
hash = hashlib.sha512()
hash.update(string.encode('utf-8'))
return hash.digest()[:n]
def encryption(iv: str, key: str, filename: str) -> bool:
try:
iv = n_string(iv, 8)
key = n_string(key, 32)
header_bytes = 50
with open(filename, "rb") as picture:
picture.seek(header_bytes)
picture_content = picture.read()
cipher = salsa_20_xor_bytes(picture_content, key, iv)
with open(filename + ".encr", "wb") as encryption:
picture.seek(0)
encryption.write(picture.read(header_bytes))
encryption.write(cipher)
return True
except Exception as e:
return False
def decryption(iv: str, key: str, filename: str) -> bool:
try:
iv = n_string(iv, 8)
key = n_string(key, 32)
header_bytes = 50
with open(filename + ".encr", "rb") as picture:
picture.seek(header_bytes)
encryption = picture.read()
original = salsa_20_xor_bytes(encryption, key, iv)
with open(filename, "wb") as decrypted:
picture.seek(0)
decrypted.write(picture.read(header_bytes))
decrypted.write(original)
return True
except Exception as e:
return False
print()
| 26.722222 | 65 | 0.579349 | 175 | 1,443 | 4.651429 | 0.297143 | 0.081081 | 0.078624 | 0.055283 | 0.503686 | 0.385749 | 0.385749 | 0.304668 | 0.208845 | 0.208845 | 0 | 0.022312 | 0.316701 | 1,443 | 53 | 66 | 27.226415 | 0.803245 | 0 | 0 | 0.439024 | 0 | 0 | 0.015939 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.097561 | false | 0.02439 | 0.04878 | 0 | 0.268293 | 0.02439 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
8dd86a18a4119d3adc2f367d73fae1d910601d27 | 3,747 | py | Python | fitbenchmarking/utils/tests/test_create_dirs.py | fitbenchmarking/fitbenchmarking | ea398efa61f071dc64fe7c3b484d5bb4e1897856 | [
"BSD-3-Clause"
] | 6 | 2019-07-22T01:56:10.000Z | 2021-12-10T05:29:30.000Z | fitbenchmarking/utils/tests/test_create_dirs.py | fitbenchmarking/fitbenchmarking | ea398efa61f071dc64fe7c3b484d5bb4e1897856 | [
"BSD-3-Clause"
] | 677 | 2019-04-29T10:23:49.000Z | 2022-03-22T12:01:30.000Z | fitbenchmarking/utils/tests/test_create_dirs.py | fitbenchmarking/fitbenchmarking | ea398efa61f071dc64fe7c3b484d5bb4e1897856 | [
"BSD-3-Clause"
] | 8 | 2019-06-13T10:32:17.000Z | 2020-12-09T15:08:40.000Z | """
This file contains tests on the creation of directories
"""
from __future__ import absolute_import, division, print_function
import time
import os
import shutil
import unittest
from fitbenchmarking.utils.create_dirs import (figures, group_results, results,
support_pages, css)
class CreateDirsTests(unittest.TestCase):
"""
Tests for the creation of directories
"""
def setUp(self):
"""
Sets a temporary directory in which results are stored
"""
path = 'r{}'.format(int(time.time()))
self.results_dir = os.path.join(os.getcwd(), path)
def tearDown(self):
"""
Deletes the temporary folder
"""
if os.path.exists(self.results_dir):
shutil.rmtree(self.results_dir)
def test_results_throw_correct_error(self):
"""
Check that the correct error is raised
"""
self.assertRaises(TypeError, results, 123)
self.assertRaises(TypeError, results, None)
def test_results_create_correct_dir(self):
"""
Check that the correct directory is created
"""
results_dir = results(self.results_dir)
results_dir_expected = self.results_dir
self.assertEqual(results_dir_expected, results_dir)
self.assertTrue(os.path.exists(results_dir_expected))
shutil.rmtree(results_dir_expected)
def test_groupResults_create_correct_group_results(self):
"""
Check that the Group results directory is as expected
"""
results_dir = results(self.results_dir)
group_results_dir = group_results(results_dir, "test_group")
group_results_dir_expected = os.path.join(results_dir, "test_group")
self.assertEqual(group_results_dir_expected, group_results_dir)
self.assertTrue(os.path.exists(group_results_dir_expected))
shutil.rmtree(results_dir)
def test_support_pages_create_correct_dir(self):
"""
Check that the support pages directory is as expected
"""
results_dir = results(self.results_dir)
group_results_dir = group_results(results_dir, "test_group")
support_pages_dir = support_pages(group_results_dir)
support_pages_dir_expected = os.path.join(group_results_dir,
'support_pages')
self.assertEqual(support_pages_dir_expected, support_pages_dir)
self.assertTrue(os.path.exists(support_pages_dir_expected))
shutil.rmtree(results_dir)
def test_figures_create_correct_dir(self):
"""
Check that the figures directory is as expected
"""
results_dir = results(self.results_dir)
group_results_dir = group_results(results_dir, "test_group")
support_pages_dir = support_pages(group_results_dir)
figures_dir = figures(support_pages_dir)
figures_dir_expected = os.path.join(support_pages_dir, 'figures')
self.assertEqual(figures_dir_expected, figures_dir)
self.assertTrue(os.path.exists(figures_dir_expected))
shutil.rmtree(results_dir)
def test_css_create_correct_dir(self):
"""
Check that the css directory is as expected
"""
results_dir = results(self.results_dir)
group_results_dir = group_results(results_dir, "test_group")
css_dir = css(group_results_dir)
css_dir_expected = os.path.join(group_results_dir,
'css')
self.assertEqual(css_dir_expected, css_dir)
self.assertTrue(os.path.exists(css_dir_expected))
shutil.rmtree(css_dir)
if __name__ == "__main__":
unittest.main()
| 32.868421 | 79 | 0.662397 | 446 | 3,747 | 5.226457 | 0.174888 | 0.1716 | 0.083655 | 0.075504 | 0.484341 | 0.443157 | 0.392535 | 0.286572 | 0.204204 | 0.204204 | 0 | 0.001075 | 0.255137 | 3,747 | 113 | 80 | 33.159292 | 0.83411 | 0.122765 | 0 | 0.237288 | 0 | 0 | 0.027273 | 0 | 0 | 0 | 0 | 0 | 0.20339 | 1 | 0.135593 | false | 0 | 0.101695 | 0 | 0.254237 | 0.016949 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
8dd963b267b7e2d742f71f63075d5795850791fb | 1,173 | py | Python | router.py | laddge/cardAPI | 770c5f8936f7b699ccaf386c82f7172e84292ecc | [
"MIT"
] | null | null | null | router.py | laddge/cardAPI | 770c5f8936f7b699ccaf386c82f7172e84292ecc | [
"MIT"
] | null | null | null | router.py | laddge/cardAPI | 770c5f8936f7b699ccaf386c82f7172e84292ecc | [
"MIT"
] | null | null | null | import os
from urllib.parse import urlparse
from fastapi import FastAPI, Request
from fastapi.middleware.cors import CORSMiddleware
from fastapi.responses import RedirectResponse, Response
from fastapi.staticfiles import StaticFiles
from typing import Optional
import api
app = FastAPI()
app.mount("/files", StaticFiles(directory="files"), name="files")
app.add_middleware(
CORSMiddleware,
allow_origins=["*"],
allow_credentials=True,
allow_methods=["*"],
allow_headers=["*"],
)
@app.middleware("http")
async def middleware(request: Request, call_next):
if request.method == "HEAD":
response = Response()
elif "herokuapp" in urlparse(str(request.url)).netloc:
domain = os.getenv("DOMAIN")
if domain:
url = urlparse(str(request.url))._replace(netloc=domain).geturl()
response = RedirectResponse(url)
else:
response = await call_next(request)
else:
response = await call_next(request)
return response
@app.get('/')
async def getAPI(url: Optional[str] = None):
if url:
return api.main(url)
else:
return {'message': 'hello, world'}
| 24.957447 | 77 | 0.672634 | 134 | 1,173 | 5.820896 | 0.432836 | 0.05641 | 0.046154 | 0.053846 | 0.082051 | 0.082051 | 0 | 0 | 0 | 0 | 0 | 0 | 0.208014 | 1,173 | 46 | 78 | 25.5 | 0.839612 | 0 | 0 | 0.135135 | 0 | 0 | 0.052856 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.216216 | 0 | 0.297297 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
8ddada2583d4d1572a9c3857e2ac95af6c1ca1dd | 1,976 | py | Python | examples/list_alert_notifications.py | kenlavoie/python-sdc-client | cafb8f2279956c572bd2c01c8645895d0895716f | [
"MIT"
] | null | null | null | examples/list_alert_notifications.py | kenlavoie/python-sdc-client | cafb8f2279956c572bd2c01c8645895d0895716f | [
"MIT"
] | null | null | null | examples/list_alert_notifications.py | kenlavoie/python-sdc-client | cafb8f2279956c572bd2c01c8645895d0895716f | [
"MIT"
] | null | null | null | #!/usr/bin/env python
#
# Get alert notifications from Sysdig Cloud
#
import os
import sys
import time
sys.path.insert(0, os.path.join(os.path.dirname(os.path.realpath(sys.argv[0])), '..'))
from sdcclient import SdcClient
def print_notifications(notifications):
for notification in notifications:
values = []
for entity in notification['entities']:
for value in entity['metricValues']:
values.append(str(value['value']))
print "#%s, State: %s, Severity: %s, Scope: %s, Condition: %s, Value: %s, Resolved: %s" % \
(notification['id'], notification['state'], notification['severity'], notification['filter'], notification['condition'], ','.join(values), notification['resolved'])
#
# Parse arguments
#
if len(sys.argv) != 2:
print 'usage: %s <sysdig-token>' % sys.argv[0]
print 'You can find your token at https://app.sysdigcloud.com/#/settings/user'
sys.exit(1)
sdc_token = sys.argv[1]
#
# Instantiate the SDC client
#
sdclient = SdcClient(sdc_token)
#
# Get the notifications in the last day
#
res = sdclient.get_notifications(from_ts=int(time.time()-86400), to_ts=int(time.time()))
print_notifications(res[1]['notifications'])
if not res[0]:
sys.exit(1)
#
# Get the notifications in the last day and active state
#
res = sdclient.get_notifications(from_ts=int(time.time()-86400), to_ts=int(time.time()), state='ACTIVE')
print_notifications(res[1]['notifications'])
if not res[0]:
sys.exit(1)
#
# Get the notifications in the last day and active state
#
res = sdclient.get_notifications(from_ts=int(time.time()-86400), to_ts=int(time.time()), state='OK')
print_notifications(res[1]['notifications'])
if not res[0]:
sys.exit(1)
#
# Get the notifications in the last day and resolved state
#
res = sdclient.get_notifications(from_ts=int(time.time()-86400), to_ts=int(time.time()), resolved=True)
print_notifications(res[1]['notifications'])
if not res[0]:
sys.exit(1)
| 27.444444 | 176 | 0.688765 | 283 | 1,976 | 4.742049 | 0.279152 | 0.029806 | 0.053651 | 0.077496 | 0.472429 | 0.472429 | 0.472429 | 0.449329 | 0.449329 | 0.449329 | 0 | 0.022673 | 0.151822 | 1,976 | 71 | 177 | 27.830986 | 0.778043 | 0.156883 | 0 | 0.371429 | 0 | 0.028571 | 0.181873 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | null | 0 | 0.114286 | null | null | 0.228571 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
8ddb9586e5b11a1deeec483163fc0ec9a71544d9 | 791 | py | Python | Table/groupingDeviceMappingTable.py | tuanldchainos/HcPullData | 65f89cfdcae135781aad4b3edf210c0ecd2d6a1c | [
"Apache-2.0"
] | null | null | null | Table/groupingDeviceMappingTable.py | tuanldchainos/HcPullData | 65f89cfdcae135781aad4b3edf210c0ecd2d6a1c | [
"Apache-2.0"
] | null | null | null | Table/groupingDeviceMappingTable.py | tuanldchainos/HcPullData | 65f89cfdcae135781aad4b3edf210c0ecd2d6a1c | [
"Apache-2.0"
] | null | null | null | from sqlalchemy import Column, Integer, String
from sqlalchemy import DateTime
from sqlalchemy import Table, Column, Integer, String, MetaData, ForeignKey
class groupingDeviceMappingTable():
def __init__(self, metadata: MetaData):
self.groupingDeviceMappingTable = Table('GroupingDeviceMapping', metadata,
Column('GroupingId', String, primary_key=True, nullable=False),
Column('GroupUnicastId', Integer, nullable=False),
Column('DeviceId', String, primary_key=True, nullable=False),
Column('DeviceUnicastId', Integer, nullable=False),
)
| 56.5 | 111 | 0.548673 | 57 | 791 | 7.508772 | 0.438596 | 0.121495 | 0.140187 | 0.093458 | 0.182243 | 0.182243 | 0.182243 | 0 | 0 | 0 | 0 | 0 | 0.376738 | 791 | 13 | 112 | 60.846154 | 0.868154 | 0 | 0 | 0 | 0 | 0 | 0.085967 | 0.026549 | 0 | 0 | 0 | 0 | 0 | 1 | 0.090909 | false | 0 | 0.272727 | 0 | 0.454545 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
8ddb98d57d427980a09a0ebd40ba75c59e9df8f6 | 802 | py | Python | nutils/__init__.py | wijnandhoitinga/nutils | 7ad6793ca5e3a43f45dcc0a4a795b381d2a0b9d4 | [
"MIT"
] | 25 | 2015-04-29T13:10:22.000Z | 2019-03-18T09:45:29.000Z | nutils/__init__.py | wijnandhoitinga/nutils | 7ad6793ca5e3a43f45dcc0a4a795b381d2a0b9d4 | [
"MIT"
] | 330 | 2015-03-04T09:06:38.000Z | 2019-06-11T10:31:54.000Z | nutils/__init__.py | wijnandhoitinga/nutils | 7ad6793ca5e3a43f45dcc0a4a795b381d2a0b9d4 | [
"MIT"
] | 16 | 2015-03-23T08:00:46.000Z | 2019-02-21T11:14:47.000Z | import sys
import numpy
from distutils.version import LooseVersion
assert sys.version_info >= (3, 5)
assert LooseVersion(numpy.version.version) >= LooseVersion('1.16'), 'nutils requires numpy 1.16 or higher, got {}'.format(numpy.version.version)
version = '8.0a0'
version_name = None
long_version = ('{} "{}"' if version_name else '{}').format(version, version_name)
__all__ = [
'cache',
'cli',
'element',
'elementseq',
'evaluable',
'export',
'expression_v1',
'expression_v2',
'function',
'matrix',
'mesh',
'numeric',
'parallel',
'points',
'pointsseq',
'sample',
'solver',
'sparse',
'testing',
'topology',
'transform',
'transformseq',
'types',
'unit',
'util',
'warnings',
]
# vim:sw=2:sts=2:et
| 19.095238 | 144 | 0.599751 | 86 | 802 | 5.465116 | 0.686047 | 0.119149 | 0.080851 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.024116 | 0.224439 | 802 | 41 | 145 | 19.560976 | 0.731511 | 0.021197 | 0 | 0 | 0 | 0 | 0.320562 | 0 | 0 | 0 | 0 | 0 | 0.055556 | 1 | 0 | false | 0 | 0.083333 | 0 | 0.083333 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
8ddbd28f219a98dd5d2cc0254cc240672cc2d246 | 4,871 | py | Python | addutils/toc.py | addfor/AddUtils | 2e2eb7ecd0718c7ec88a72b59313ab9084c0eaac | [
"MIT"
] | null | null | null | addutils/toc.py | addfor/AddUtils | 2e2eb7ecd0718c7ec88a72b59313ab9084c0eaac | [
"MIT"
] | null | null | null | addutils/toc.py | addfor/AddUtils | 2e2eb7ecd0718c7ec88a72b59313ab9084c0eaac | [
"MIT"
] | null | null | null | # The MIT License (MIT)
#
# Copyright (c) 2015 addfor s.r.l.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
"""IPython utility: automatic table of contents generation
Functions:
js - get the JavaScript script that generates the TOC of the document
"""
JS_SCRIPT = """
$(function() {
function regenTOC(){
element = $("#toc-container");
var toc = document.createElement("div");
$(toc).attr("class", "table-of-contents");
var curLevel = 0;
var containerStack = [toc];
var levelOfTag = {"h2": 1, "h3": 2, "h4": 3, "h5": 4};
function pushLevel() {
var list = document.createElement("ul");
containerStack.push(list);
curLevel++;
}
function popLevel() {
var lastContainer = containerStack.pop();
$(lastContainer).appendTo(containerStack[containerStack.length - 1]);
curLevel--;
}
$(".text_cell_render :header").each(function (i, elem) {
var level = levelOfTag[ elem.tagName.toLowerCase() ];
if (level === undefined)
return;
while (curLevel < level)
pushLevel();
while (curLevel > level)
popLevel();
var listItem = document.createElement("li");
var link = document.createElement("a");
$(link)
.text($(elem).contents().first().text()) // Remove the pilcrow sign
.attr("href", "#" + $(elem).attr("id"))
.appendTo(listItem);
$(listItem).appendTo(containerStack[containerStack.length - 1]);
});
while (curLevel > 0)
popLevel();
$("<a class='btn-update' href='#'>Update</a>")
.click(regenTOC).prependTo(toc);
$(toc).prepend("<div class='title'>Contents</div>")
.wrap("<div class='toc-headings'/>");
$(element).empty();
$(element).append(toc);
}
if (typeof(IPython) !== 'undefined')
$([IPython.events]).on('notebook_loaded.Notebook', regenTOC);
regenTOC();
});
"""
def js(ipy_notebook=False):
"""Get the JavaScript script that generates the TOC of the document.
The returned script uses JQuery to access the DOM, and looks at
the heading tags (i.e. <h1>, <h2>, ...) to create a table of
contents. The resulting table of contents is appended to the
element #toc-container (which, in the case of an IPython notebook,
is created in the output area of the cell).
Parameters:
ipy_notebook (bool) :
When true, the script is returned wrapped in a
IPython.display.HTML object. This makes it work
automatically in any IPython notebook.
Returns:
(str or IPython.display.HTML) - The JS script
The structure of the output is (if you want to style it with CSS, for example):
div#table-of-contents div.title ("Contents")
.toc-container
ul First
li First.1
li First.2
li First.3
...
ul Second
li Second.1
li Second.2
ul Second.2.1
li Second.2.1.1
li Second.2.1.2
...
...
...
...
"""
if ipy_notebook:
from IPython.display import HTML
return HTML(data=("<div id='toc-container'>"
+ "<script type='text/javascript'>"
+ JS_SCRIPT
+ "</script>"
+"</div>"))
else:
return JS_SCRIPT
| 35.043165 | 83 | 0.559228 | 546 | 4,871 | 4.972527 | 0.419414 | 0.032413 | 0.027624 | 0.01105 | 0.079558 | 0.039779 | 0.039779 | 0.039779 | 0.039779 | 0.039779 | 0 | 0.00955 | 0.333607 | 4,871 | 138 | 84 | 35.297101 | 0.826864 | 0.537467 | 0 | 0.067797 | 0 | 0.016949 | 0.840095 | 0.284487 | 0 | 0 | 0 | 0 | 0 | 1 | 0.016949 | false | 0 | 0.016949 | 0 | 0.067797 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
8ddbf31bfb108f86a7acf1ea3203d6b23129570c | 720 | py | Python | portfolio/blog/models.py | SkullTech/portfolio-devclub | a3db4ab72464c82e9da6ee89ad51cecc082d9ff5 | [
"MIT"
] | 1 | 2017-03-02T22:38:26.000Z | 2017-03-02T22:38:26.000Z | portfolio/blog/models.py | SkullTech/portfolio-devclub | a3db4ab72464c82e9da6ee89ad51cecc082d9ff5 | [
"MIT"
] | null | null | null | portfolio/blog/models.py | SkullTech/portfolio-devclub | a3db4ab72464c82e9da6ee89ad51cecc082d9ff5 | [
"MIT"
] | null | null | null | from django.db import models
from django.utils import timezone
class Tag(models.Model):
name = models.CharField(max_length=50)
description = models.CharField(max_length=200, null=True, blank=True, default='')
def __str__(self):
return self.name
class Post(models.Model):
author = models.ForeignKey('auth.user')
title = models.CharField(max_length=200)
text = models.TextField()
created_date = models.DateTimeField(default=timezone.now)
published_date = models.DateTimeField(blank=True, null=True)
tags = models.ManyToManyField(Tag)
def publish(self):
self.published_date = timezone.now()
self.save()
def __str__(self):
return self.title
| 26.666667 | 85 | 0.698611 | 90 | 720 | 5.433333 | 0.466667 | 0.092025 | 0.110429 | 0.147239 | 0.192229 | 0 | 0 | 0 | 0 | 0 | 0 | 0.013722 | 0.190278 | 720 | 26 | 86 | 27.692308 | 0.825043 | 0 | 0 | 0.105263 | 0 | 0 | 0.0125 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.157895 | false | 0 | 0.105263 | 0.105263 | 0.894737 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 1 |
8ddd126c99d8feae03f19cc66e85504d9442c512 | 1,710 | py | Python | ReadCifar.py | timestocome/ReadCifar10 | ea6e70a982bdc923386327db648e038a63b1d55d | [
"MIT"
] | null | null | null | ReadCifar.py | timestocome/ReadCifar10 | ea6e70a982bdc923386327db648e038a63b1d55d | [
"MIT"
] | null | null | null | ReadCifar.py | timestocome/ReadCifar10 | ea6e70a982bdc923386327db648e038a63b1d55d | [
"MIT"
] | null | null | null |
# http://github.com/timestocome
# data
# https://www.cs.toronto.edu/~kriz/cifar.html
import numpy as np
import pickle
import matplotlib.pyplot as plt
###################################################################################
# read in data
##################################################################################
n_classes = 10
image_height = 32
image_width = 32
image_depth = 3
label_bytes = 1
def unpickle(file):
fo = open(file, 'rb')
dict = pickle.load(fo)
fo.close()
return dict
def load_data():
xs = []
ys = []
# read in training files
for i in range(5):
# this is the directory you put the cifar batch files into
filename = 'cifar-10/data_batch_%d' % (i+1)
with open(filename, 'rb') as f:
d = pickle.load(f, encoding='latin1') # needed for python2-python3 pickle
x = d['data']
y = d['labels']
xs.append(x)
ys.append(y)
# read in test files
filename = 'cifar-10/test_batch'
with open(filename, 'rb') as f:
d = pickle.load(f, encoding='latin1')
xs.append(d['data'])
ys.append(d['labels'])
x = np.concatenate(xs) # images
y = np.concatenate(ys) # labels
x = x.reshape((x.shape[0], 3, 32, 32)).transpose(0,2,3,1)
# Visualizing CIFAR 10
fig, axes1 = plt.subplots(5,5,figsize=(10,10))
for j in range(5):
for k in range(5):
i = np.random.choice(range(len(x)))
axes1[j][k].set_axis_off()
axes1[j][k].imshow(x[i:i+1][0])
plt.show()
# scale images
x = x / 255.
load_data() | 21.375 | 85 | 0.489474 | 225 | 1,710 | 3.666667 | 0.44 | 0.021818 | 0.029091 | 0.043636 | 0.113939 | 0.113939 | 0.113939 | 0.113939 | 0.113939 | 0.113939 | 0 | 0.037798 | 0.288304 | 1,710 | 80 | 86 | 21.375 | 0.640099 | 0.159649 | 0 | 0.097561 | 0 | 0 | 0.062748 | 0.017474 | 0 | 0 | 0 | 0 | 0 | 1 | 0.04878 | false | 0 | 0.073171 | 0 | 0.146341 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
8ddd3e9119c32c0e18dc330a4330c6e053f331a5 | 427 | py | Python | app/blueprints/seo_arizona/errors.py | Anioko/TestApp | 95fa8d27ca8e7a074e62f92609427a378844e621 | [
"MIT"
] | null | null | null | app/blueprints/seo_arizona/errors.py | Anioko/TestApp | 95fa8d27ca8e7a074e62f92609427a378844e621 | [
"MIT"
] | 1 | 2021-06-02T01:53:47.000Z | 2021-06-02T01:53:47.000Z | app/blueprints/seo_arizona/errors.py | Anioko/TestApp | 95fa8d27ca8e7a074e62f92609427a378844e621 | [
"MIT"
] | null | null | null | from flask import render_template
from app.blueprints.seo_arizona.views import seo_arizona
@seo_arizona.app_errorhandler(403)
def forbidden(_):
return render_template('errors/403.html'), 403
@seo_arizona.app_errorhandler(404)
def page_not_found(_):
return render_template('errors/404.html'), 404
@seo_arizona.app_errorhandler(500)
def internal_server_error(_):
return render_template('errors/500.html'), 500
| 22.473684 | 56 | 0.789227 | 60 | 427 | 5.3 | 0.416667 | 0.157233 | 0.122642 | 0.235849 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.070681 | 0.105386 | 427 | 18 | 57 | 23.722222 | 0.76178 | 0 | 0 | 0 | 0 | 0 | 0.105386 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.272727 | false | 0 | 0.181818 | 0.272727 | 0.727273 | 0.090909 | 0 | 0 | 0 | null | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 1 | 1 | 0 | 0 | 4 |
8dde19fedd80853c00291936adb295b97fcd76bc | 1,315 | py | Python | tests.py | rudradatta/Flames | 3692564ef1c3493eb2e1586be47ca997ede97cb4 | [
"BSD-3-Clause"
] | null | null | null | tests.py | rudradatta/Flames | 3692564ef1c3493eb2e1586be47ca997ede97cb4 | [
"BSD-3-Clause"
] | null | null | null | tests.py | rudradatta/Flames | 3692564ef1c3493eb2e1586be47ca997ede97cb4 | [
"BSD-3-Clause"
] | null | null | null | import flames
import unittest
class TestFlamesMethods(unittest.TestCase):
def test_flames_count(self):
self.assertEqual(flames.flames_count('abhi','abhi'),0)
self.assertEqual(flames.flames_count('abhi','a'),3)
self.assertEqual(flames.flames_count('abhi','asd'),5)
def test_flames_result(self):
self.assertEqual(flames.flames_result(1),'S')
self.assertEqual(flames.flames_result(2),'E')
self.assertEqual(flames.flames_result(3),'F')
self.assertEqual(flames.flames_result(7),'E')
self.assertEqual(flames.flames_result(10),'L')
self.assertEqual(flames.flames_result(15),'M')
self.assertEqual(flames.flames_result(21),'F')
self.assertEqual(flames.flames_result(28),'A')
self.assertEqual(flames.flames_result(30),'A')
def test_calculate(self):
self.assertEqual(flames.calculate('abhi','abhil'),'S')
self.assertEqual(flames.calculate('abhi','abhila'),'E')
self.assertEqual(flames.calculate('abhi','abhilas'),'F')
self.assertEqual(flames.calculate('abhi','abhilashdsm'),'E')
self.assertEqual(flames.calculate('Abhi',' abHil '),'S')
self.assertEqual(flames.calculate('abhi','abhi.l'),'S')
if __name__ == '__main__':
unittest.main()
| 35.540541 | 68 | 0.657034 | 157 | 1,315 | 5.343949 | 0.248408 | 0.321812 | 0.450536 | 0.386174 | 0.756853 | 0.510131 | 0.1764 | 0.1764 | 0.1764 | 0.1764 | 0 | 0.015639 | 0.173384 | 1,315 | 36 | 69 | 36.527778 | 0.75621 | 0 | 0 | 0 | 0 | 0 | 0.082953 | 0 | 0 | 0 | 0 | 0 | 0.692308 | 1 | 0.115385 | false | 0 | 0.076923 | 0 | 0.230769 | 0 | 0 | 0 | 0 | null | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 4 |
8ddf6ed4392a1c97342e5972d833a191e47b53d0 | 826 | py | Python | app/api/errors/server.py | maxzhenzhera/my_vocab_backend | 2e9f968374e0bc2fcc0ae40830ca40f3cf5754d1 | [
"MIT"
] | null | null | null | app/api/errors/server.py | maxzhenzhera/my_vocab_backend | 2e9f968374e0bc2fcc0ae40830ca40f3cf5754d1 | [
"MIT"
] | null | null | null | app/api/errors/server.py | maxzhenzhera/my_vocab_backend | 2e9f968374e0bc2fcc0ae40830ca40f3cf5754d1 | [
"MIT"
] | null | null | null | import traceback
from fastapi import Request
from fastapi.responses import PlainTextResponse
from starlette.status import HTTP_500_INTERNAL_SERVER_ERROR
def internal_server_exception_handler(
_: Request,
exception: Exception
) -> PlainTextResponse:
""" Return the traceback of the internal server error. """
exception_traceback = ''.join(
traceback.format_exception(
type(exception),
value=exception,
tb=exception.__traceback__
)
)
message = (
f'{"Internal server error has occurred.":<50}|\n'
f'{"Please, check the traceback.":<50}|\n'
f'{"-" * 50}x\n\n'
)
message += exception_traceback
return PlainTextResponse(
status_code=HTTP_500_INTERNAL_SERVER_ERROR,
content=message
)
| 27.533333 | 62 | 0.6477 | 85 | 826 | 6.058824 | 0.423529 | 0.135922 | 0.147573 | 0.081553 | 0.100971 | 0 | 0 | 0 | 0 | 0 | 0 | 0.01964 | 0.260291 | 826 | 29 | 63 | 28.482759 | 0.823241 | 0.060533 | 0 | 0 | 0 | 0 | 0.130208 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.04 | false | 0 | 0.16 | 0 | 0.24 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
8de0588b8d216183bd189807faae11d1037b92e8 | 11,025 | py | Python | dynamic_rcnn/utils/misc.py | yyzq1/bigwork | c2247abd2355b0f64ddfcc6e489e77b1eec55147 | [
"MIT"
] | 177 | 2020-04-14T01:16:26.000Z | 2022-03-28T03:29:28.000Z | dynamic_rcnn/utils/misc.py | yyzq1/bigwork | c2247abd2355b0f64ddfcc6e489e77b1eec55147 | [
"MIT"
] | 10 | 2020-05-06T13:42:47.000Z | 2021-02-06T13:35:27.000Z | dynamic_rcnn/utils/misc.py | yyzq1/bigwork | c2247abd2355b0f64ddfcc6e489e77b1eec55147 | [
"MIT"
] | 23 | 2020-04-14T05:41:25.000Z | 2021-12-21T02:43:01.000Z | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
"""
helper class that supports empty tensors on some nn functions.
Ideally, add support directly in PyTorch to empty tensors in
those functions.
This can be removed once https://github.com/pytorch/pytorch/issues/12013
is implemented
"""
import math
import torch
from torch import nn
from torch.nn.modules.utils import _ntuple
class _NewEmptyTensorOp(torch.autograd.Function):
@staticmethod
def forward(ctx, x, new_shape):
ctx.shape = x.shape
return x.new_empty(new_shape)
@staticmethod
def backward(ctx, grad):
shape = ctx.shape
return _NewEmptyTensorOp.apply(grad, shape), None
class Conv2d(torch.nn.Conv2d):
def forward(self, x):
if x.numel() > 0:
return super(Conv2d, self).forward(x)
# get output shape
output_shape = [
(i + 2 * p - (di * (k - 1) + 1)) // d + 1
for i, p, di, k, d in zip(
x.shape[-2:], self.padding, self.dilation, self.kernel_size, self.stride
)
]
output_shape = [x.shape[0], self.weight.shape[0]] + output_shape
return _NewEmptyTensorOp.apply(x, output_shape)
class ConvTranspose2d(torch.nn.ConvTranspose2d):
def forward(self, x):
if x.numel() > 0:
return super(ConvTranspose2d, self).forward(x)
# get output shape
output_shape = [
(i - 1) * d - 2 * p + (di * (k - 1) + 1) + op
for i, p, di, k, d, op in zip(
x.shape[-2:],
self.padding,
self.dilation,
self.kernel_size,
self.stride,
self.output_padding,
)
]
output_shape = [x.shape[0], self.bias.shape[0]] + output_shape
return _NewEmptyTensorOp.apply(x, output_shape)
class BatchNorm2d(torch.nn.BatchNorm2d):
def forward(self, x):
if x.numel() > 0:
return super(BatchNorm2d, self).forward(x)
# get output shape
output_shape = x.shape
return _NewEmptyTensorOp.apply(x, output_shape)
def interpolate(
input, size=None, scale_factor=None, mode="nearest", align_corners=None
):
if input.numel() > 0:
return torch.nn.functional.interpolate(
input, size, scale_factor, mode, align_corners
)
def _check_size_scale_factor(dim):
if size is None and scale_factor is None:
raise ValueError("either size or scale_factor should be defined")
if size is not None and scale_factor is not None:
raise ValueError("only one of size or scale_factor should be defined")
if (
scale_factor is not None
and isinstance(scale_factor, tuple)
and len(scale_factor) != dim
):
raise ValueError(
"scale_factor shape must match input shape. "
"Input is {}D, scale_factor size is {}".format(dim, len(scale_factor))
)
def _output_size(dim):
_check_size_scale_factor(dim)
if size is not None:
return size
scale_factors = _ntuple(dim)(scale_factor)
# math.floor might return float in py2.7
return [
int(math.floor(input.size(i + 2) * scale_factors[i])) for i in range(dim)
]
output_shape = tuple(_output_size(2))
output_shape = input.shape[:-2] + output_shape
return _NewEmptyTensorOp.apply(input, output_shape)
class DFConv2d(nn.Module):
"""Deformable convolutional layer"""
def __init__(
self,
in_channels,
out_channels,
with_modulated_dcn=True,
kernel_size=3,
stride=1,
groups=1,
dilation=1,
deformable_groups=1,
bias=False
):
super(DFConv2d, self).__init__()
if isinstance(kernel_size, (list, tuple)):
assert isinstance(stride, (list, tuple))
assert isinstance(dilation, (list, tuple))
assert len(kernel_size) == 2
assert len(stride) == 2
assert len(dilation) == 2
padding = (
dilation[0] * (kernel_size[0] - 1) // 2,
dilation[1] * (kernel_size[1] - 1) // 2
)
offset_base_channels = kernel_size[0] * kernel_size[1]
else:
padding = dilation * (kernel_size - 1) // 2
offset_base_channels = kernel_size * kernel_size
if with_modulated_dcn:
from dynamic_rcnn.kernels.ops.dcn import ModulatedDeformConv
offset_channels = offset_base_channels * 3 #default: 27
conv_block = ModulatedDeformConv
else:
from dynamic_rcnn.kernels.ops.dcn import DeformConv
offset_channels = offset_base_channels * 2 #default: 18
conv_block = DeformConv
self.offset = Conv2d(
in_channels,
deformable_groups * offset_channels,
kernel_size=kernel_size,
stride=stride,
padding=padding,
groups=1,
dilation=dilation
)
for l in [self.offset,]:
nn.init.kaiming_uniform_(l.weight, a=1)
torch.nn.init.constant_(l.bias, 0.)
self.conv = conv_block(
in_channels,
out_channels,
kernel_size=kernel_size,
stride=stride,
padding=padding,
dilation=dilation,
groups=groups,
deformable_groups=deformable_groups,
bias=bias
)
self.with_modulated_dcn = with_modulated_dcn
self.kernel_size = kernel_size
self.stride = stride
self.padding = padding
self.dilation = dilation
def forward(self, x):
if x.numel() > 0:
if not self.with_modulated_dcn:
offset = self.offset(x)
x = self.conv(x, offset)
else:
offset_mask = self.offset(x)
offset = offset_mask[:, :18, :, :]
mask = offset_mask[:, -9:, :, :].sigmoid()
x = self.conv(x, offset, mask)
return x
# get output shape
output_shape = [
(i + 2 * p - (di * (k - 1) + 1)) // d + 1
for i, p, di, k, d in zip(
x.shape[-2:],
self.padding,
self.dilation,
self.kernel_size,
self.stride
)
]
output_shape = [x.shape[0], self.conv.weight.shape[0]] + output_shape
return _NewEmptyTensorOp.apply(x, output_shape)
class FrozenBatchNorm2d(nn.Module):
"""
BatchNorm2d where the batch statistics and the affine parameters
are fixed
"""
def __init__(self, n):
super(FrozenBatchNorm2d, self).__init__()
self.register_buffer("weight", torch.ones(n))
self.register_buffer("bias", torch.zeros(n))
self.register_buffer("running_mean", torch.zeros(n))
self.register_buffer("running_var", torch.ones(n))
def forward(self, x):
# Cast all fixed parameters to half() if necessary
if x.dtype == torch.float16:
self.weight = self.weight.half()
self.bias = self.bias.half()
self.running_mean = self.running_mean.half()
self.running_var = self.running_var.half()
scale = self.weight * self.running_var.rsqrt()
bias = self.bias - self.running_mean * scale
scale = scale.reshape(1, -1, 1, 1)
bias = bias.reshape(1, -1, 1, 1)
return x * scale + bias
def get_group_gn(dim, dim_per_gp, num_groups):
"""get number of groups used by GroupNorm, based on number of channels."""
assert dim_per_gp == -1 or num_groups == -1, \
"GroupNorm: can only specify G or C/G."
if dim_per_gp > 0:
assert dim % dim_per_gp == 0, \
"dim: {}, dim_per_gp: {}".format(dim, dim_per_gp)
group_gn = dim // dim_per_gp
else:
assert dim % num_groups == 0, \
"dim: {}, num_groups: {}".format(dim, num_groups)
group_gn = num_groups
return group_gn
# TODO, fix the cfg setting
def group_norm(out_channels, affine=True, divisor=1, cfg=None):
out_channels = out_channels // divisor
if cfg:
dim_per_gp = cfg.MODEL.GROUP_NORM.DIM_PER_GP // divisor
num_groups = cfg.MODEL.GROUP_NORM.NUM_GROUPS // divisor
eps = cfg.MODEL.GROUP_NORM.EPSILON # default: 1e-5
else:
dim_per_gp = -1
num_groups = 32
eps = 1e-5
return torch.nn.GroupNorm(
get_group_gn(out_channels, dim_per_gp, num_groups),
out_channels,
eps,
affine
)
def make_conv3x3(
in_channels,
out_channels,
dilation=1,
stride=1,
use_gn=False,
use_relu=False,
kaiming_init=True
):
conv = Conv2d(
in_channels,
out_channels,
kernel_size=3,
stride=stride,
padding=dilation,
dilation=dilation,
bias=False if use_gn else True
)
if kaiming_init:
nn.init.kaiming_normal_(
conv.weight, mode="fan_out", nonlinearity="relu"
)
else:
torch.nn.init.normal_(conv.weight, std=0.01)
if not use_gn:
nn.init.constant_(conv.bias, 0)
module = [conv,]
if use_gn:
module.append(group_norm(out_channels))
if use_relu:
module.append(nn.ReLU(inplace=True))
if len(module) > 1:
return nn.Sequential(*module)
return conv
def make_fc(dim_in, hidden_dim, use_gn=False):
'''
Caffe2 implementation uses XavierFill, which in fact
corresponds to kaiming_uniform_ in PyTorch
'''
if use_gn:
fc = nn.Linear(dim_in, hidden_dim, bias=False)
nn.init.kaiming_uniform_(fc.weight, a=1)
return nn.Sequential(fc, group_norm(hidden_dim))
fc = nn.Linear(dim_in, hidden_dim)
nn.init.kaiming_uniform_(fc.weight, a=1)
nn.init.constant_(fc.bias, 0)
return fc
def conv_with_kaiming_uniform(use_gn=False, use_relu=False):
def make_conv(
in_channels, out_channels, kernel_size, stride=1, dilation=1
):
conv = Conv2d(
in_channels,
out_channels,
kernel_size=kernel_size,
stride=stride,
padding=dilation * (kernel_size - 1) // 2,
dilation=dilation,
bias=False if use_gn else True
)
# Caffe2 implementation uses XavierFill, which in fact
# corresponds to kaiming_uniform_ in PyTorch
nn.init.kaiming_uniform_(conv.weight, a=1)
if not use_gn:
nn.init.constant_(conv.bias, 0)
module = [conv,]
if use_gn:
module.append(group_norm(out_channels))
if use_relu:
module.append(nn.ReLU(inplace=True))
if len(module) > 1:
return nn.Sequential(*module)
return conv
return make_conv
| 31.590258 | 88 | 0.581043 | 1,372 | 11,025 | 4.478863 | 0.169825 | 0.039056 | 0.014321 | 0.031245 | 0.403255 | 0.358503 | 0.326932 | 0.279414 | 0.230269 | 0.199349 | 0 | 0.017308 | 0.31873 | 11,025 | 348 | 89 | 31.681034 | 0.800825 | 0.080363 | 0 | 0.355872 | 0 | 0 | 0.030707 | 0 | 0 | 0 | 0 | 0.002874 | 0.02847 | 1 | 0.064057 | false | 0 | 0.021352 | 0 | 0.192171 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |