hexsha string | size int64 | ext string | lang string | max_stars_repo_path string | max_stars_repo_name string | max_stars_repo_head_hexsha string | max_stars_repo_licenses list | max_stars_count int64 | max_stars_repo_stars_event_min_datetime string | max_stars_repo_stars_event_max_datetime string | max_issues_repo_path string | max_issues_repo_name string | max_issues_repo_head_hexsha string | max_issues_repo_licenses list | max_issues_count int64 | max_issues_repo_issues_event_min_datetime string | max_issues_repo_issues_event_max_datetime string | max_forks_repo_path string | max_forks_repo_name string | max_forks_repo_head_hexsha string | max_forks_repo_licenses list | max_forks_count int64 | max_forks_repo_forks_event_min_datetime string | max_forks_repo_forks_event_max_datetime string | content string | avg_line_length float64 | max_line_length int64 | alphanum_fraction float64 | qsc_code_num_words_quality_signal int64 | qsc_code_num_chars_quality_signal float64 | qsc_code_mean_word_length_quality_signal float64 | qsc_code_frac_words_unique_quality_signal float64 | qsc_code_frac_chars_top_2grams_quality_signal float64 | qsc_code_frac_chars_top_3grams_quality_signal float64 | qsc_code_frac_chars_top_4grams_quality_signal float64 | qsc_code_frac_chars_dupe_5grams_quality_signal float64 | qsc_code_frac_chars_dupe_6grams_quality_signal float64 | qsc_code_frac_chars_dupe_7grams_quality_signal float64 | qsc_code_frac_chars_dupe_8grams_quality_signal float64 | qsc_code_frac_chars_dupe_9grams_quality_signal float64 | qsc_code_frac_chars_dupe_10grams_quality_signal float64 | qsc_code_frac_chars_replacement_symbols_quality_signal float64 | qsc_code_frac_chars_digital_quality_signal float64 | qsc_code_frac_chars_whitespace_quality_signal float64 | qsc_code_size_file_byte_quality_signal float64 | qsc_code_num_lines_quality_signal float64 | qsc_code_num_chars_line_max_quality_signal float64 | qsc_code_num_chars_line_mean_quality_signal float64 | qsc_code_frac_chars_alphabet_quality_signal float64 | qsc_code_frac_chars_comments_quality_signal float64 | qsc_code_cate_xml_start_quality_signal float64 | qsc_code_frac_lines_dupe_lines_quality_signal float64 | qsc_code_cate_autogen_quality_signal float64 | qsc_code_frac_lines_long_string_quality_signal float64 | qsc_code_frac_chars_string_length_quality_signal float64 | qsc_code_frac_chars_long_word_length_quality_signal float64 | qsc_code_frac_lines_string_concat_quality_signal float64 | qsc_code_cate_encoded_data_quality_signal float64 | qsc_code_frac_chars_hex_words_quality_signal float64 | qsc_code_frac_lines_prompt_comments_quality_signal float64 | qsc_code_frac_lines_assert_quality_signal float64 | qsc_codepython_cate_ast_quality_signal float64 | qsc_codepython_frac_lines_func_ratio_quality_signal float64 | qsc_codepython_cate_var_zero_quality_signal bool | qsc_codepython_frac_lines_pass_quality_signal float64 | qsc_codepython_frac_lines_import_quality_signal float64 | qsc_codepython_frac_lines_simplefunc_quality_signal float64 | qsc_codepython_score_lines_no_logic_quality_signal float64 | qsc_codepython_frac_lines_print_quality_signal float64 | qsc_code_num_words int64 | qsc_code_num_chars int64 | qsc_code_mean_word_length int64 | qsc_code_frac_words_unique null | qsc_code_frac_chars_top_2grams int64 | qsc_code_frac_chars_top_3grams int64 | qsc_code_frac_chars_top_4grams int64 | qsc_code_frac_chars_dupe_5grams int64 | qsc_code_frac_chars_dupe_6grams int64 | qsc_code_frac_chars_dupe_7grams int64 | qsc_code_frac_chars_dupe_8grams int64 | qsc_code_frac_chars_dupe_9grams int64 | qsc_code_frac_chars_dupe_10grams int64 | qsc_code_frac_chars_replacement_symbols int64 | qsc_code_frac_chars_digital int64 | qsc_code_frac_chars_whitespace int64 | qsc_code_size_file_byte int64 | qsc_code_num_lines int64 | qsc_code_num_chars_line_max int64 | qsc_code_num_chars_line_mean int64 | qsc_code_frac_chars_alphabet int64 | qsc_code_frac_chars_comments int64 | qsc_code_cate_xml_start int64 | qsc_code_frac_lines_dupe_lines int64 | qsc_code_cate_autogen int64 | qsc_code_frac_lines_long_string int64 | qsc_code_frac_chars_string_length int64 | qsc_code_frac_chars_long_word_length int64 | qsc_code_frac_lines_string_concat null | qsc_code_cate_encoded_data int64 | qsc_code_frac_chars_hex_words int64 | qsc_code_frac_lines_prompt_comments int64 | qsc_code_frac_lines_assert int64 | qsc_codepython_cate_ast int64 | qsc_codepython_frac_lines_func_ratio int64 | qsc_codepython_cate_var_zero int64 | qsc_codepython_frac_lines_pass int64 | qsc_codepython_frac_lines_import int64 | qsc_codepython_frac_lines_simplefunc int64 | qsc_codepython_score_lines_no_logic int64 | qsc_codepython_frac_lines_print int64 | effective string | hits int64 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
8c850d5115c5aa28db017ddfbc32d9b1dc5253d7 | 2,095 | py | Python | storm_kit/mpc/model/isaac_model.py | jsacks3/storm | b368de7391db1e0ac2486c0e66255e63ccb00a8f | [
"MIT"
] | null | null | null | storm_kit/mpc/model/isaac_model.py | jsacks3/storm | b368de7391db1e0ac2486c0e66255e63ccb00a8f | [
"MIT"
] | null | null | null | storm_kit/mpc/model/isaac_model.py | jsacks3/storm | b368de7391db1e0ac2486c0e66255e63ccb00a8f | [
"MIT"
] | null | null | null | from .model_base import DynamicsModelBase
import numpy as np
import torch
try:
from isaacgym import gymapi
from isaacgym import gymutil
from isaacgym import gymtorch
except Exception:
print("ERROR: gym not loaded, this is okay when generating doc")
class IsaacModel(DynamicsModelBase):
def __init__(self, gym_instance, env, d_obs, d_action, tensor_args={'device':'cpu','dtype':torch.float32}):
super(IsaacModel, self).__init__()
self.gym_instance = gym_instance
self.gym = gym_instance.gym
self.sim = gym_instance.sim
self.env = env
self.tensor_args = tensor_args
self.d_obs = d_obs
self.d_action = d_action
self.num_envs = env.num_envs
self.num_dof = env.num_dof
def step(self, actions):
self.pre_physics_step(actions)
self.gym.simulate(self.sim)
#self.gym.fetch_results(self.sim, True)
#self.gym.step_graphics(self.sim)
#self.gym.draw_viewer(self.gym_instance.viewer, self.sim, True)
self.post_physics_step()
def rollout_open_loop(self, start_state, act_seq):
num_particles, horizon, _ = act_seq.shape
inp_device = start_state.device
start_state = start_state.to(**self.tensor_args)
act_seq = act_seq.to(**self.tensor_args)
start_state = start_state.unsqueeze(0).repeat((self.env.num_envs, 1))
self.env.set_state(start_state, env_ids=None)
state_seq = torch.zeros((num_particles, horizon, self.d_obs), **self.tensor_args)
for t in range(horizon):
self.step(act_seq[:, t])
state_t = self.env.get_state(env_ids=None)
state_seq[:, t] = state_t.view(num_particles, -1)
trajectories = dict(
actions = act_seq,
state_seq = state_seq.to(device=inp_device)
)
return trajectories
def get_next_state(self, curr_state, act, dt):
pass
def pre_physics_step(self, actions):
self.env.set_control(actions, env_ids=None)
def post_physics_step(self):
pass
| 31.742424 | 111 | 0.656325 | 291 | 2,095 | 4.450172 | 0.323024 | 0.043243 | 0.043243 | 0.029344 | 0.035521 | 0.035521 | 0 | 0 | 0 | 0 | 0 | 0.003155 | 0.243437 | 2,095 | 65 | 112 | 32.230769 | 0.81388 | 0.063007 | 0 | 0.041667 | 0 | 0 | 0.035204 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.125 | false | 0.041667 | 0.125 | 0 | 0.291667 | 0.020833 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
8c884f8ad5ad01ad456fef6fb9d4c39c9eba1a9d | 2,548 | py | Python | containers/test-apps/kitchen/tests/kitchen/conftest.py | twosigmajab/waiter | dd063a06aff49e9e0c9749d9b01d91986cb31e1c | [
"Apache-2.0"
] | 76 | 2017-02-12T16:40:45.000Z | 2022-01-19T19:36:01.000Z | containers/test-apps/kitchen/tests/kitchen/conftest.py | twosigmajab/waiter | dd063a06aff49e9e0c9749d9b01d91986cb31e1c | [
"Apache-2.0"
] | 403 | 2017-02-08T22:25:10.000Z | 2021-11-15T17:48:20.000Z | containers/test-apps/kitchen/tests/kitchen/conftest.py | twosigmajab/waiter | dd063a06aff49e9e0c9749d9b01d91986cb31e1c | [
"Apache-2.0"
] | 19 | 2017-05-31T21:13:20.000Z | 2021-10-20T02:43:52.000Z | import logging
import os
import pytest
import requests
import socket
import subprocess
import tenacity
def _find_free_port(hostname, start_port=8000, attempts=1000):
for p in range(start_port, start_port+attempts):
try:
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
with sock:
sock.bind((hostname, p))
return sock.getsockname()[1]
except:
pass # try the next one
else:
raise Exception('Could not find a free port for the Kitchen server.')
class KitchenServer():
def __init__(self, ssl=False):
self.scheme = 'https' if ssl else 'http'
self.kitchen_path = os.getenv('KITCHEN_PATH', './bin/kitchen')
self.hostname = os.getenv('KITCHEN_HOSTNAME', 'localhost')
port_string = os.getenv('KITCHEN_PORT' if ssl else 'KITCHEN_SSL_PORT')
self.port = int(port_string) if port_string else _find_free_port(self.hostname)
if os.getenv('KITCHEN_AUTOSTART', 'true').lower() == 'true':
logging.info(f'Automatically starting new Kitchen server')
args = [self.kitchen_path, '--hostname', self.hostname, '--port', str(self.port)]
if ssl:
args.append('--ssl')
self.__server_process = subprocess.Popen(args)
else:
self.__server_process = None
self.await_server()
def await_server(self, max_wait_seconds=60):
@tenacity.retry(stop=tenacity.stop_after_delay(max_wait_seconds), wait=tenacity.wait_fixed(1))
def await_helper():
assert requests.get(self.url(), verify='.')
await_helper()
logging.info(f'Kitchen server is running on {self.hostname}:{self.port}')
def url(self, path='/'):
assert path.startswith('/')
return f'{self.scheme}://{self.hostname}:{self.port}{path}'
def kill(self):
if self.__server_process:
self.__server_process.terminate()
logging.info(f'Kitchen server has been killed')
@pytest.fixture(scope="session")
def kitchen_server(request):
"""Manages an instance of the Kitchen test app server."""
server = KitchenServer()
request.addfinalizer(server.kill)
return server
@pytest.fixture(scope="session")
def kitchen_ssl_server(request):
"""Manages an instance of the Kitchen test app server with SSL."""
server = KitchenServer(ssl=True)
request.addfinalizer(server.kill)
return server
| 36.927536 | 102 | 0.650706 | 319 | 2,548 | 5.028213 | 0.354232 | 0.040524 | 0.037406 | 0.023691 | 0.194514 | 0.163342 | 0.068579 | 0.068579 | 0.068579 | 0.068579 | 0 | 0.006643 | 0.231947 | 2,548 | 68 | 103 | 37.470588 | 0.812979 | 0.05102 | 0 | 0.137931 | 0 | 0 | 0.156211 | 0.031575 | 0 | 0 | 0 | 0 | 0.034483 | 1 | 0.137931 | false | 0.017241 | 0.12069 | 0 | 0.344828 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
8c8a1907486c0b1413e9270a279f67b6c3abdeea | 3,210 | py | Python | backend/post/tests.py | dzhfrv/dzhfrv-starn | cae3261ed85653643032218c4a2f2cd273ff47ae | [
"MIT"
] | null | null | null | backend/post/tests.py | dzhfrv/dzhfrv-starn | cae3261ed85653643032218c4a2f2cd273ff47ae | [
"MIT"
] | 5 | 2021-03-19T01:23:20.000Z | 2021-09-22T18:48:54.000Z | backend/post/tests.py | dzhfrv/dzhfrv-starn | cae3261ed85653643032218c4a2f2cd273ff47ae | [
"MIT"
] | 1 | 2020-04-01T12:37:00.000Z | 2020-04-01T12:37:00.000Z | from rest_framework import status
from rest_framework.test import APIClient
from backend.auth_jwt.tests import BaseTestClass, create_user
from .models import Post
from .serializers import PostSerializer
class TestPostResource(BaseTestClass):
@classmethod
def setUpClass(cls):
super().setUpClass()
cls.endpoint = '/api/v1/posts/'
def setUp(self):
super().setUp()
self.post = Post.objects.create(
author=self.base_user,
title='title1',
text='text1'
)
Post.objects.create(
author=create_user('new@email.com'),
title='title 2',
text='text2'
)
self.new_post = {
'title': 'title 3',
'text': 'text3'
}
self.third_post = {
'title': 'title 4',
'text': 'text 4'
}
def test_get_all_posts(self):
response = self.client.get(self.endpoint)
self.assertEqual(response.status_code, status.HTTP_200_OK)
def test_get_all_posts_no_token(self):
unknown_user = APIClient()
response = unknown_user.get(self.endpoint)
self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED)
def test_create_post_success(self):
response = self.client.post(self.endpoint, self.new_post)
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
self.assertEqual(response.json(), {'id': Post.objects.last().id})
self.assertEqual(Post.objects.count(), 3)
def test_update_post(self):
response = self.client.post(f'{self.endpoint}{self.post.id}/')
self.assertEqual(response.status_code, status.HTTP_405_METHOD_NOT_ALLOWED)
def test_create_posts_no_token(self):
unknown_user = APIClient()
response = unknown_user.post(self.endpoint)
self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED)
def test_create_post_duplicate_title(self):
self.new_post['title'] = 'title 2'
response = self.client.post(self.endpoint, self.new_post)
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
self.assertEqual(
response.json(),
{'title': ['post with this title already exists.']}
)
def test_create_post_without_text(self):
self.new_post['text'] = ''
response = self.client.post(self.endpoint, self.new_post)
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
self.assertEqual(
response.json(),
{'text': ['This field may not be blank.']}
)
def test_create_post_without_title(self):
self.new_post['title'] = ''
response = self.client.post(self.endpoint, self.new_post)
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
self.assertEqual(
response.json(),
{'title': ['This field may not be blank.']}
)
def test_get_post_no_token(self):
unknown_user = APIClient()
response = unknown_user.get(f'{self.endpoint}{self.post.id}/')
self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED)
| 34.516129 | 82 | 0.641433 | 382 | 3,210 | 5.164921 | 0.227749 | 0.106437 | 0.151546 | 0.132286 | 0.640142 | 0.575266 | 0.549924 | 0.549924 | 0.520527 | 0.512924 | 0 | 0.01567 | 0.244548 | 3,210 | 92 | 83 | 34.891304 | 0.797938 | 0 | 0 | 0.246753 | 0 | 0 | 0.087905 | 0.018703 | 0 | 0 | 0 | 0 | 0.181818 | 1 | 0.142857 | false | 0 | 0.064935 | 0 | 0.220779 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
8c8a49e715468d2cc4f123c663f4cafc3a2c2890 | 2,666 | py | Python | docs/macros.py | vishalbelsare/neworder | 38635fca64f239a9e8eb1a671872c174e1814678 | [
"MIT"
] | 17 | 2017-12-08T10:21:18.000Z | 2022-01-13T09:29:43.000Z | docs/macros.py | vishalbelsare/neworder | 38635fca64f239a9e8eb1a671872c174e1814678 | [
"MIT"
] | 61 | 2018-07-21T21:37:12.000Z | 2021-07-10T12:49:15.000Z | docs/macros.py | vishalbelsare/neworder | 38635fca64f239a9e8eb1a671872c174e1814678 | [
"MIT"
] | 6 | 2019-06-06T18:29:31.000Z | 2021-08-20T13:32:17.000Z | # macros for mkdocs-macros-plugin
import os
import requests
_inline_code_styles = {
".py": "python",
".sh": "bash",
".h": "cpp",
".cpp": "cpp",
".c": "c",
".rs": "rs",
".js": "js",
".md": None
}
def define_env(env):
# @env.macro
# def test(s):
# return "```some python code here: %s```\n" % s
@env.macro
def insert_doi():
response = requests.get('https://zenodo.org/api/records', params={'q': '4031821'})
if response.status_code == 200:
result = response.json()
if "hits" in result and \
"hits" in result["hits"] and \
len(result["hits"]["hits"]) > 0 and \
"doi" in result["hits"]["hits"][0]:
return result["hits"]["hits"][0]["doi"]
else:
return "[json error retrieving doi]"
return "[http error %d retrieving doi]" % response.status_code
@env.macro
def insert_version():
""" This is the *released* version not the dev one """
response = requests.get('https://zenodo.org/api/records', params={'q': '4031821'})
if response.status_code == 200:
result = response.json()
if "hits" in result and \
"hits" in result["hits"] and \
len(result["hits"]["hits"]) > 0 and \
"metadata" in result["hits"]["hits"][0] and \
"version" in result["hits"]["hits"][0]["metadata"]:
return result["hits"]["hits"][0]["metadata"]["version"]
else:
return "[json error retrieving doi]"
return "[http error %d retrieving doi]" % response.status_code
@env.macro
def include_snippet(filename, tag=None, show_filename=True):
""" looks for code in <filename> between lines containing "!<tag>!" """
full_filename = os.path.join(env.project_dir, filename)
_, file_type = os.path.splitext(filename)
# default to literal "text" for inline code style
code_style = _inline_code_styles.get(file_type, "text")
with open(full_filename, 'r') as f:
lines = f.readlines()
if tag:
tag = "!%s!" % tag
span = []
for i, l in enumerate(lines):
if tag in l:
span.append(i)
if len(span) != 2:
return "```ERROR %s (%s) too few/many tags (%s) for '%s'```" % (filename, code_style, len(span), tag)
lines = lines[span[0] + 1: span[1]]
if show_filename:
footer = "\n[file: **%s**]\n" % filename
else:
footer = ""
# line_range = lines[start_line+1:end_line]
if code_style is not None:
return "```%s\n" % code_style + "".join(lines) + "```" + footer
else:
return "".join(lines) + footer
# if __name__ == "__main__":
# print(_include_snippet("examples/chapter1/model.py", "tag")) | 29.955056 | 109 | 0.576519 | 355 | 2,666 | 4.219718 | 0.332394 | 0.06008 | 0.065421 | 0.070093 | 0.401869 | 0.327103 | 0.327103 | 0.327103 | 0.327103 | 0.327103 | 0 | 0.01628 | 0.239685 | 2,666 | 89 | 110 | 29.955056 | 0.722743 | 0.149287 | 0 | 0.359375 | 0 | 0 | 0.19831 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.0625 | false | 0 | 0.03125 | 0 | 0.234375 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
8c8d2fc01cb0e47915203e9d860c349200a9f5f6 | 3,632 | py | Python | train.py | UoA-eResearch/kitti-detect | 795b3a10b7ce8508596e1261535e205edb5e7e9a | [
"MIT"
] | null | null | null | train.py | UoA-eResearch/kitti-detect | 795b3a10b7ce8508596e1261535e205edb5e7e9a | [
"MIT"
] | null | null | null | train.py | UoA-eResearch/kitti-detect | 795b3a10b7ce8508596e1261535e205edb5e7e9a | [
"MIT"
] | null | null | null | import time
import torch
import torch.utils.data
import torchvision
from torchvision.models.detection.faster_rcnn import FastRCNNPredictor
from engine import train_one_epoch, evaluate
import utils
import transforms as T
from common import KITTIDataset
import json
def get_transforms(train):
transforms = []
# converts the image, a PIL image, into a PyTorch Tensor
transforms.append(T.ToTensor())
if train:
# during training, randomly flip the training images
# and ground-truth for data augmentation
transforms.append(T.RandomHorizontalFlip(0.5))
transforms.append(T.RandomVerticalFlip(0.5))
return T.Compose(transforms)
def get_model(num_classes, pretrained=False):
# load an object detection model pre-trained on COCO
model = torchvision.models.detection.fasterrcnn_resnet50_fpn(pretrained=pretrained)
# get the number of input features for the classifier
in_features = model.roi_heads.box_predictor.cls_score.in_features
# replace the pre-trained head with a new on
model.roi_heads.box_predictor = FastRCNNPredictor(in_features, num_classes)
return model
def main():
# experiment name corresponds to the foldername and file prefix for data partitions and label maps
exp_name = "t1-final"
batch_size = 16
epochs = 30
use_label_overrides = True
train_path = f"{exp_name}/{exp_name}-part-0.json"
test_path = f"{exp_name}/{exp_name}-test.json"
# load data
with open(train_path, 'r') as f:
data_train = json.load(f)
with open(test_path, 'r') as f:
data_test = json.load(f)
print(f"train samples: {len(data_train)}, test samples: {len(data_test)}")
# load the label map
label_map_path = f"{exp_name}/{exp_name}-label-map.json"
with open(label_map_path, 'r') as f:
label_map = json.load(f)
print(f"label map: {label_map}")
reverse_label_map = {v: k for k, v in label_map.items()}
# define datasets and data loaders
batch_size = 16
ds = KITTIDataset(data_train, label_map, use_label_overrides=use_label_overrides, transforms=get_transforms(train=True))
ds_test = KITTIDataset(data_test, label_map, use_label_overrides=use_label_overrides, transforms=get_transforms(train=False))
dl = torch.utils.data.DataLoader(ds, batch_size=batch_size, shuffle=True, num_workers=1, collate_fn=utils.collate_fn)
dl_test = torch.utils.data.DataLoader(ds_test, batch_size=1, shuffle=False, num_workers=1, collate_fn=utils.collate_fn)
device = torch.device('cuda') if torch.cuda.is_available() else torch.device('cpu')
#device = torch.device('cpu')
print('device:', device)
model = get_model(len(label_map)).to(device)
params = [p for p in model.parameters() if p.requires_grad]
optimizer = torch.optim.Adam(params, lr=0.001)
t_start = time.time()
logs = []
for e in range(epochs):
logger = train_one_epoch(model, optimizer, dl, device, e, print_freq=100, grad_clip=1)
log_values = {k:{'median':v.median, 'mean':v.avg} for k,v in logger.meters.items()}
logs.append(log_values)
# update the learning rate
#lr_scheduler.step()
# evaluate on the test dataset
#evaluate(model, dl_test, device=device)
print(f'total time: {(time.time() - t_start)/3600} hrs')
# save logs
log_name = f"{exp_name}/{exp_name}-model-logs.json"
with open(log_name, 'w') as f:
json.dump(logs, f)
# save the model
model_path = f"{exp_name}/{exp_name}-model.pt"
torch.save(model.state_dict(), model_path)
if __name__ == "__main__":
main()
| 35.262136 | 129 | 0.702368 | 536 | 3,632 | 4.56903 | 0.326493 | 0.0392 | 0.034708 | 0.022458 | 0.189873 | 0.126174 | 0.084933 | 0.084933 | 0.057166 | 0.057166 | 0 | 0.009834 | 0.188051 | 3,632 | 102 | 130 | 35.607843 | 0.820617 | 0.169053 | 0 | 0.031746 | 0 | 0 | 0.114333 | 0.055667 | 0 | 0 | 0 | 0 | 0 | 1 | 0.047619 | false | 0 | 0.15873 | 0 | 0.238095 | 0.079365 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
8c907660ae2b25b091599b3180dcc5076e7d4514 | 4,971 | py | Python | api/app/models/statsQueries.py | HotMaps/Toolbox | ba1e287dbc63e34bf9feb80b65b02c1db93ce91c | [
"Apache-2.0"
] | 4 | 2020-10-01T10:38:06.000Z | 2021-12-28T03:11:18.000Z | api/app/models/statsQueries.py | HotMaps/Toolbox | ba1e287dbc63e34bf9feb80b65b02c1db93ce91c | [
"Apache-2.0"
] | 9 | 2017-11-08T17:29:10.000Z | 2020-08-31T15:28:31.000Z | api/app/models/statsQueries.py | HotMaps/Toolbox | ba1e287dbc63e34bf9feb80b65b02c1db93ce91c | [
"Apache-2.0"
] | 4 | 2019-03-25T13:24:14.000Z | 2021-07-16T20:52:51.000Z | import datetime
from .. import helper
from app import dbGIS as db
from app import constants
from decimal import *
from app.models.indicators import layersData, ELECRICITY_MIX
from app import celery
from . import generalData
from app import model
import logging
log = logging.getLogger(__name__)
class LayersStats:
@staticmethod
def run_stat(payload):
year = payload['year']
layersPayload = payload['layers']
scale_level = payload['scale_level']
#must sanitize this
selection_areas = ''
is_hectare = False
noDataLayers=[]
layers=[]
output=[]
if scale_level in constants.NUTS_LAU_VALUES:
selection_areas = payload['nuts']
elif scale_level == constants.hectare_name:
selection_areas = payload['areas']
geom = helper.areas_to_geom(selection_areas)
is_hectare=True
for c, layer in enumerate(layersPayload):
if layersPayload[c] in layersData:
layers.append(layersPayload[c])
else:
noDataLayers.append(layersPayload[c])
if is_hectare:
output = LayersStats.get_stats(selection_areas=geom, year=year, layers=layers,scale_level=scale_level, is_hectare=is_hectare)
else:
nuts = ''.join("'"+str(nu)+"'," for nu in selection_areas)[:-1]
output = LayersStats.get_stats(selection_areas=nuts, year=year, layers=layers, scale_level=scale_level, is_hectare=False)
return output, noDataLayers
@staticmethod
def get_stats(year, layers, selection_areas, is_hectare, scale_level):
# Get the number of layers
result = []
# Check if there is at least one layer
if layers:
# Construction of the query
sql_query = ''
sql_with = ' WITH '
sql_select = ' SELECT '
sql_from = ' FROM '
for layer in layers:
if len(layersData[layer]['indicators']) != 0 and scale_level in layersData[layer]['data_lvl']:
if is_hectare:
sql_with += generalData.constructWithPartEachLayerHectare(geometry=selection_areas, year=year, layer=layer, scale_level=scale_level) + ','
else:
sql_with += generalData.constructWithPartEachLayerNutsLau(layer=layer, nuts=selection_areas, year=year, scale_level=scale_level) + ','
for indicator in layersData[layer]['indicators']:
if 'table_column' in indicator:
sql_select += layer+indicator['indicator_id']+','
elif indicator['reference_tablename_indicator_id_1'] in layers and indicator['reference_tablename_indicator_id_2'] in layers:
sql_select+= indicator['reference_tablename_indicator_id_1']+indicator['reference_indicator_id_1']+' '+indicator['operator']+' '+indicator['reference_tablename_indicator_id_2']+indicator['reference_indicator_id_2']+','
sql_from += layersData[layer]['from_indicator_name']+','
# Combine string to a single query
sql_with = sql_with[:-1]
sql_select = sql_select[:-1]
sql_from = sql_from[:-1]
sql_query = sql_with + sql_select + sql_from + ';'
# Run the query
query_geographic_database_first = model.query_geographic_database_first(sql_query)
# Storing the results only if there is data
count_indic = 0
areas = selection_areas.split(",")
for layer in layers:
values = []
for indicator in layersData[layer]['indicators']:
if ('table_column' not in indicator and (indicator['reference_tablename_indicator_id_1'] not in layers or indicator['reference_tablename_indicator_id_2'] not in layers)) or scale_level not in layersData[layer]['data_lvl']:
continue
currentValue = query_geographic_database_first[count_indic] or 0
count_indic += 1
if "agg_method" in indicator and indicator["agg_method"] == "mean":
currentValue /= len(areas)
if 'factor' in indicator: # Decimal * float => rise error
currentValue = float(currentValue) * float(indicator['factor'])
try:
values.append({
'name': layer + '_' + indicator['indicator_id'],
'value': currentValue,
'unit': indicator['unit']
})
except KeyError: # Special case we retrieve only one value for an hectare
pass
result.append({
'name': layer,
'values': values
})
return result
class ElectricityMix:
@staticmethod
def getEnergyMixNutsLau(nuts):
sql_query = "WITH energy_total as (SELECT sum(electricity_generation) as value FROM " + ELECRICITY_MIX + " WHERE nuts0_code IN ("+nuts+") )" + \
"SELECT DISTINCT energy_carrier, SUM(electricity_generation * 100 /energy_total.value) FROM " + ELECRICITY_MIX + " ,energy_total WHERE nuts0_code IN ("+nuts+") GROUP BY energy_carrier ORDER BY energy_carrier ASC" ;
query = model.query_geographic_database(sql_query)
labels = []
data = []
backgroundColor = []
for c, l in enumerate(query):
labels.append(l[0])
data.append(helper.roundValue(l[1]))
backgroundColor.append(helper.getGenerationMixColor(l[0]))
datasets = {
'data' : data,
'label': '%',
'backgroundColor': backgroundColor
}
result = {
'labels':labels,
'datasets':datasets
}
return result
| 30.496933 | 227 | 0.709716 | 620 | 4,971 | 5.469355 | 0.25 | 0.044235 | 0.047774 | 0.063698 | 0.179298 | 0.153347 | 0.059569 | 0.059569 | 0.059569 | 0.0289 | 0 | 0.005887 | 0.179843 | 4,971 | 162 | 228 | 30.685185 | 0.825852 | 0.056528 | 0 | 0.141593 | 0 | 0 | 0.174359 | 0.065171 | 0 | 0 | 0 | 0 | 0 | 1 | 0.026549 | false | 0.00885 | 0.088496 | 0 | 0.159292 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
8c92c1afc490aaa5dd5bc023a41213e9a68c6444 | 6,194 | py | Python | src/ralph/lib/table/table.py | DoNnMyTh/ralph | 97b91639fa68965ad3fd9d0d2652a6545a2a5b72 | [
"Apache-2.0"
] | 1,668 | 2015-01-01T12:51:20.000Z | 2022-03-29T09:05:35.000Z | src/ralph/lib/table/table.py | hq-git/ralph | e2448caf02d6e5abfd81da2cff92aefe0a534883 | [
"Apache-2.0"
] | 2,314 | 2015-01-02T13:26:26.000Z | 2022-03-29T04:06:03.000Z | src/ralph/lib/table/table.py | hq-git/ralph | e2448caf02d6e5abfd81da2cff92aefe0a534883 | [
"Apache-2.0"
] | 534 | 2015-01-05T12:40:28.000Z | 2022-03-29T21:10:12.000Z | try:
from dj.choices import Choices
use_choices = True
except ImportError:
Choices = None
use_choices = False
from django.contrib.contenttypes.models import ContentType
from django.core.exceptions import FieldDoesNotExist
from django.core.urlresolvers import reverse
from django.forms.utils import flatatt
from django.template.loader import render_to_string
from ralph.admin.helpers import (
get_field_by_relation_path,
get_field_title_by_relation_path,
getattr_dunder
)
class Table(object):
"""
Generating contents for table based on predefined columns and queryset.
Example:
>>> table = Table(queryset, ['id', ('name', 'My field name')])
>>> table.get_table_content()
[
[{'value': id'}, {'value': 'My field name'}],
[
{'value': '1', 'html_attributes': ''},
{'value': 'Test', 'html_attributes': ''}
],
]
See __init__'s docstring for additional info about Table params.
"""
template_name = 'table.html'
def __init__(
self, queryset, list_display, additional_row_method=None, request=None,
transpose=False,
):
"""
Initialize table class
Args:
queryset: django queryset
list_display: field list to display; a value on the list could be
plain string (name of model's field - verbose name of field
will be used here) or tuple (field_name, verbose_name)
additional_row_method: list of additional method for each row
transpose: set to True if table should be transposed (rows swapped
with columns)
"""
self.queryset = queryset
self.list_display_raw = list_display
self.list_display = [
(f[0] if isinstance(f, (tuple, list)) else f) for f in list_display
]
self.additional_row_method = additional_row_method
self.request = request
self.transpose = transpose
@property
def headers_count(self):
return len(self.get_headers())
@property
def rows_count(self):
return self.queryset.count()
def get_headers(self):
"""
Return headers for table.
"""
headers = []
for field in self.list_display_raw:
if isinstance(field, (list, tuple)):
headers.append({'value': field[1]})
else:
try:
name = getattr(self, field).title
except AttributeError:
name = get_field_title_by_relation_path(
self.queryset.model, field
)
headers.append({'value': name})
return headers
def get_field_value(self, item, field):
"""
Returns the value for the given field name.
Looking in:
If the field is type Choices returns choice name
else returns the value of row
:param item: row from dict
:param field: field name
"""
value = None
if hasattr(self, field):
value = getattr(self, field)(item)
else:
value = getattr_dunder(item, field)
try:
choice_class = get_field_by_relation_path(
item._meta.model, field
).choices
except FieldDoesNotExist:
choice_class = None
if (
use_choices and choice_class and
isinstance(choice_class, Choices)
):
value = choice_class.name_from_id(value)
return value
def get_table_content(self):
"""
Return content of table.
"""
result = [self.get_headers()]
# Remove fields which are not in model
list_display = [
field for field in self.list_display if not hasattr(self, field)
]
if 'id' not in list_display:
list_display.append('id')
if self.additional_row_method:
colspan = len(self.list_display)
for item in self.queryset:
result.append([
{
'value': self.get_field_value(item, field),
'html_attributes': ''
} for field in self.list_display
])
if self.additional_row_method:
for method in self.additional_row_method:
additional_data = [
{'value': i, 'html_attributes': flatatt(
{'colspan': colspan}
)} for i in getattr(self, method)(item)
]
if additional_data:
result.append(additional_data)
if self.transpose:
result = list(zip(*result))
return result
def render(self, request=None):
content = self.get_table_content()
context = {
'show_header': not self.transpose,
'headers_count': self.headers_count,
'rows_count': self.rows_count,
'LIMIT': 5
}
if self.transpose:
context.update({'rows': content})
else:
context.update({'headers': content[0], 'rows': content[1:]})
return render_to_string(
self.template_name,
context=context,
request=request,
)
class TableWithUrl(Table):
"""
Table with built-in url column.
"""
def get_field_value(self, item, field):
value = super().get_field_value(item, field)
if field == self.url_field:
return '<a href="{}">{}</a>'.format(
reverse(
'admin:view_on_site',
args=(ContentType.objects.get_for_model(item).id, item.id,)
),
value
)
return value
def __init__(self, queryset, list_display, *args, **kwargs):
self.url_field = kwargs.pop('url_field', None)
super().__init__(
queryset=queryset, list_display=list_display, *args, **kwargs
)
| 31.764103 | 79 | 0.548434 | 657 | 6,194 | 4.980213 | 0.232877 | 0.05379 | 0.040648 | 0.028117 | 0.150367 | 0.076711 | 0.03423 | 0 | 0 | 0 | 0 | 0.001518 | 0.361963 | 6,194 | 194 | 80 | 31.927835 | 0.826417 | 0.190023 | 0 | 0.138462 | 0 | 0 | 0.03603 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.069231 | false | 0 | 0.061538 | 0.015385 | 0.215385 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
8c96be962c71ae02896ce4b5c4357271289fff51 | 28,089 | py | Python | caravel/views.py | dolfly/caravel | 93405dc23a3056da63bbf4d15ec33e6576f7f20a | [
"Apache-2.0"
] | null | null | null | caravel/views.py | dolfly/caravel | 93405dc23a3056da63bbf4d15ec33e6576f7f20a | [
"Apache-2.0"
] | null | null | null | caravel/views.py | dolfly/caravel | 93405dc23a3056da63bbf4d15ec33e6576f7f20a | [
"Apache-2.0"
] | null | null | null | """Flask web views for Caravel"""
from datetime import datetime
import json
import logging
import re
import time
import traceback
from flask import (
g, request, redirect, flash, Response, render_template, Markup)
from flask.ext.appbuilder import ModelView, CompactCRUDMixin, BaseView, expose
from flask.ext.appbuilder.actions import action
from flask.ext.appbuilder.models.sqla.interface import SQLAInterface
from flask.ext.appbuilder.security.decorators import has_access
from pydruid.client import doublesum
from sqlalchemy import create_engine
import sqlalchemy as sqla
from wtforms.validators import ValidationError
import pandas as pd
from sqlalchemy import select, text
from sqlalchemy.sql.expression import TextAsFrom
from werkzeug.routing import BaseConverter
from caravel import appbuilder, db, models, viz, utils, app, sm, ascii_art
config = app.config
log_this = models.Log.log_this
def validate_json(form, field): # noqa
try:
json.loads(field.data)
except Exception as e:
logging.exception(e)
raise ValidationError("json isn't valid")
class DeleteMixin(object):
@action(
"muldelete", "Delete", "Delete all Really?", "fa-trash", single=False)
def muldelete(self, items):
self.datamodel.delete_all(items)
self.update_redirect()
return redirect(self.get_redirect())
class CaravelModelView(ModelView):
page_size = 500
class TableColumnInlineView(CompactCRUDMixin, CaravelModelView): # noqa
datamodel = SQLAInterface(models.TableColumn)
can_delete = False
edit_columns = [
'column_name', 'description', 'groupby', 'filterable', 'table',
'count_distinct', 'sum', 'min', 'max', 'expression', 'is_dttm']
add_columns = edit_columns
list_columns = [
'column_name', 'type', 'groupby', 'filterable', 'count_distinct',
'sum', 'min', 'max', 'is_dttm']
page_size = 500
description_columns = {
'is_dttm': (
"Whether to make this column available as a "
"[Time Granularity] option, column has to be DATETIME or "
"DATETIME-like"),
}
appbuilder.add_view_no_menu(TableColumnInlineView)
class DruidColumnInlineView(CompactCRUDMixin, CaravelModelView): # noqa
datamodel = SQLAInterface(models.DruidColumn)
edit_columns = [
'column_name', 'description', 'datasource', 'groupby',
'count_distinct', 'sum', 'min', 'max']
list_columns = [
'column_name', 'type', 'groupby', 'filterable', 'count_distinct',
'sum', 'min', 'max']
can_delete = False
page_size = 500
def post_update(self, col):
col.generate_metrics()
appbuilder.add_view_no_menu(DruidColumnInlineView)
class SqlMetricInlineView(CompactCRUDMixin, CaravelModelView): # noqa
datamodel = SQLAInterface(models.SqlMetric)
list_columns = ['metric_name', 'verbose_name', 'metric_type']
edit_columns = [
'metric_name', 'description', 'verbose_name', 'metric_type',
'expression', 'table']
add_columns = edit_columns
page_size = 500
appbuilder.add_view_no_menu(SqlMetricInlineView)
class DruidMetricInlineView(CompactCRUDMixin, CaravelModelView): # noqa
datamodel = SQLAInterface(models.DruidMetric)
list_columns = ['metric_name', 'verbose_name', 'metric_type']
edit_columns = [
'metric_name', 'description', 'verbose_name', 'metric_type',
'datasource', 'json']
add_columns = [
'metric_name', 'verbose_name', 'metric_type', 'datasource', 'json']
page_size = 500
validators_columns = {
'json': [validate_json],
}
appbuilder.add_view_no_menu(DruidMetricInlineView)
class DatabaseView(CaravelModelView, DeleteMixin): # noqa
datamodel = SQLAInterface(models.Database)
list_columns = ['database_name', 'sql_link', 'created_by_', 'changed_on']
order_columns = utils.list_minus(list_columns, ['created_by_'])
add_columns = ['database_name', 'sqlalchemy_uri', 'cache_timeout']
search_exclude_columns = ('password',)
edit_columns = add_columns
add_template = "caravel/models/database/add.html"
edit_template = "caravel/models/database/edit.html"
base_order = ('changed_on', 'desc')
description_columns = {
'sqlalchemy_uri': (
"Refer to the SqlAlchemy docs for more information on how "
"to structure your URI here: "
"http://docs.sqlalchemy.org/en/rel_1_0/core/engines.html")
}
def pre_add(self, db):
conn = sqla.engine.url.make_url(db.sqlalchemy_uri)
db.password = conn.password
conn.password = "X" * 10 if conn.password else None
db.sqlalchemy_uri = str(conn) # hides the password
def pre_update(self, db):
self.pre_add(db)
appbuilder.add_view(
DatabaseView,
"Databases",
icon="fa-database",
category="Sources",
category_icon='fa-database',)
class TableModelView(CaravelModelView, DeleteMixin): # noqa
datamodel = SQLAInterface(models.SqlaTable)
list_columns = [
'table_link', 'database', 'sql_link', 'is_featured',
'changed_by_', 'changed_on']
add_columns = [
'table_name', 'database', 'default_endpoint', 'offset', 'cache_timeout']
edit_columns = [
'table_name', 'is_featured', 'database', 'description', 'owner',
'main_dttm_col', 'default_endpoint', 'offset', 'cache_timeout']
related_views = [TableColumnInlineView, SqlMetricInlineView]
base_order = ('changed_on', 'desc')
description_columns = {
'offset': "Timezone offset (in hours) for this datasource",
'description': Markup(
"Supports <a href='https://daringfireball.net/projects/markdown/'>"
"markdown</a>"),
}
def post_add(self, table):
try:
table.fetch_metadata()
except Exception as e:
logging.exception(e)
flash(
"Table [{}] doesn't seem to exist, "
"couldn't fetch metadata".format(table.table_name),
"danger")
utils.merge_perm(sm, 'datasource_access', table.perm)
def post_update(self, table):
self.post_add(table)
appbuilder.add_view(
TableModelView,
"Tables",
category="Sources",
icon='fa-table',)
appbuilder.add_separator("Sources")
class DruidClusterModelView(CaravelModelView, DeleteMixin): # noqa
datamodel = SQLAInterface(models.DruidCluster)
add_columns = [
'cluster_name',
'coordinator_host', 'coordinator_port', 'coordinator_endpoint',
'broker_host', 'broker_port', 'broker_endpoint',
]
edit_columns = add_columns
list_columns = ['cluster_name', 'metadata_last_refreshed']
if config['DRUID_IS_ACTIVE']:
appbuilder.add_view(
DruidClusterModelView,
"Druid Clusters",
icon="fa-cubes",
category="Sources",
category_icon='fa-database',)
class SliceModelView(CaravelModelView, DeleteMixin): # noqa
datamodel = SQLAInterface(models.Slice)
can_add = False
label_columns = {
'created_by_': 'Creator',
'datasource_link': 'Datasource',
}
list_columns = [
'slice_link', 'viz_type',
'datasource_link', 'created_by_', 'changed_on']
order_columns = utils.list_minus(list_columns, ['created_by_'])
edit_columns = [
'slice_name', 'description', 'viz_type', 'druid_datasource',
'table', 'dashboards', 'params', 'cache_timeout']
base_order = ('changed_on', 'desc')
description_columns = {
'description': Markup(
"The content here can be displayed as widget headers in the "
"dashboard view. Supports "
"<a href='https://daringfireball.net/projects/markdown/'>"
"markdown</a>"),
}
appbuilder.add_view(
SliceModelView,
"Slices",
icon="fa-bar-chart",
category="",
category_icon='',)
class SliceAsync(SliceModelView): # noqa
list_columns = [
'slice_link', 'viz_type',
'created_by_', 'modified', 'icons']
label_columns = {
'icons': ' ',
}
appbuilder.add_view_no_menu(SliceAsync)
class DashboardModelView(CaravelModelView, DeleteMixin): # noqa
datamodel = SQLAInterface(models.Dashboard)
label_columns = {
'created_by_': 'Creator',
}
list_columns = ['dashboard_link', 'created_by_', 'changed_on']
order_columns = utils.list_minus(list_columns, ['created_by_'])
edit_columns = [
'dashboard_title', 'slug', 'slices', 'position_json', 'css',
'json_metadata']
add_columns = edit_columns
base_order = ('changed_on', 'desc')
description_columns = {
'position_json': (
"This json object describes the positioning of the widgets in "
"the dashboard. It is dynamically generated when adjusting "
"the widgets size and positions by using drag & drop in "
"the dashboard view"),
'css': (
"The css for individual dashboards can be altered here, or "
"in the dashboard view where changes are immediately "
"visible"),
'slug': "To get a readable URL for your dashboard",
}
def pre_add(self, obj):
obj.slug = obj.slug.strip() or None
if obj.slug:
obj.slug = obj.slug.replace(" ", "-")
obj.slug = re.sub(r'\W+', '', obj.slug)
def pre_update(self, obj):
self.pre_add(obj)
appbuilder.add_view(
DashboardModelView,
"Dashboards",
icon="fa-dashboard",
category="",
category_icon='',)
class DashboardModelViewAsync(DashboardModelView): # noqa
list_columns = ['dashboard_link', 'created_by_', 'modified']
appbuilder.add_view_no_menu(DashboardModelViewAsync)
class LogModelView(CaravelModelView):
datamodel = SQLAInterface(models.Log)
list_columns = ('user', 'action', 'dttm')
edit_columns = ('user', 'action', 'dttm', 'json')
base_order = ('dttm', 'desc')
appbuilder.add_view(
LogModelView,
"Action Log",
category="Security",
icon="fa-list-ol")
class DruidDatasourceModelView(CaravelModelView, DeleteMixin): # noqa
datamodel = SQLAInterface(models.DruidDatasource)
list_columns = [
'datasource_link', 'cluster', 'owner',
'created_by_', 'created_on',
'changed_by_', 'changed_on',
'offset']
order_columns = utils.list_minus(
list_columns, ['created_by_', 'changed_by_'])
related_views = [DruidColumnInlineView, DruidMetricInlineView]
edit_columns = [
'datasource_name', 'cluster', 'description', 'owner',
'is_featured', 'is_hidden', 'default_endpoint', 'offset',
'cache_timeout']
page_size = 500
base_order = ('datasource_name', 'asc')
description_columns = {
'offset': "Timezone offset (in hours) for this datasource",
'description': Markup(
"Supports <a href='"
"https://daringfireball.net/projects/markdown/'>markdown</a>"),
}
def post_add(self, datasource):
datasource.generate_metrics()
utils.merge_perm(sm, 'datasource_access', datasource.perm)
def post_update(self, datasource):
self.post_add(datasource)
if config['DRUID_IS_ACTIVE']:
appbuilder.add_view(
DruidDatasourceModelView,
"Druid Datasources",
category="Sources",
icon="fa-cube")
@app.route('/health')
def health():
return "OK"
@app.route('/ping')
def ping():
return "OK"
class R(BaseView):
"""used for short urls"""
@log_this
@expose("/<url_id>")
def index(self, url_id):
url = db.session.query(models.Url).filter_by(id=url_id).first()
if url:
print(url.url)
return redirect('/' + url.url)
else:
flash("URL to nowhere...", "danger")
return redirect('/')
@log_this
@expose("/shortner/", methods=['POST', 'GET'])
def shortner(self):
url = request.form.get('data')
obj = models.Url(url=url)
db.session.add(obj)
db.session.commit()
return("{request.headers[Host]}/r/{obj.id}".format(
request=request, obj=obj))
appbuilder.add_view_no_menu(R)
class Caravel(BaseView):
"""The base views for Caravel!"""
@has_access
@expose("/explore/<datasource_type>/<datasource_id>/")
@expose("/datasource/<datasource_type>/<datasource_id>/") # Legacy url
@log_this
def explore(self, datasource_type, datasource_id):
datasource_class = models.SqlaTable \
if datasource_type == "table" else models.DruidDatasource
datasource = (
db.session
.query(datasource_class)
.filter_by(id=datasource_id)
.first()
)
slice_id = request.args.get("slice_id")
slc = None
if slice_id:
slc = (
db.session.query(models.Slice)
.filter_by(id=slice_id)
.first()
)
if not datasource:
flash("The datasource seem to have been deleted", "alert")
all_datasource_access = self.appbuilder.sm.has_access(
'all_datasource_access', 'all_datasource_access')
datasource_access = self.appbuilder.sm.has_access(
'datasource_access', datasource.perm)
if not (all_datasource_access or datasource_access):
flash("You don't seem to have access to this datasource", "danger")
return redirect('/slicemodelview/list/')
action = request.args.get('action')
if action in ('save', 'overwrite'):
return self.save(request.args, slc)
viz_type = request.args.get("viz_type")
if not viz_type and datasource.default_endpoint:
return redirect(datasource.default_endpoint)
if not viz_type:
viz_type = "table"
obj = viz.viz_types[viz_type](
datasource,
form_data=request.args,
slice=slc)
if request.args.get("json") == "true":
status = 200
try:
payload = obj.get_json()
except Exception as e:
logging.exception(e)
if config.get("DEBUG"):
raise e
payload = str(e)
status = 500
resp = Response(
payload,
status=status,
mimetype="application/json")
return resp
elif request.args.get("csv") == "true":
status = 200
payload = obj.get_csv()
return Response(
payload,
status=status,
mimetype="application/csv")
else:
if request.args.get("standalone") == "true":
template = "caravel/standalone.html"
else:
template = "caravel/explore.html"
resp = self.render_template(template, viz=obj, slice=slc)
try:
pass
except Exception as e:
if config.get("DEBUG"):
raise(e)
return Response(
str(e),
status=500,
mimetype="application/json")
return resp
def save(self, args, slc):
"""Saves (inserts or overwrite a slice) """
session = db.session()
slice_name = args.get('slice_name')
action = args.get('action')
# TODO use form processing form wtforms
d = args.to_dict(flat=False)
del d['action']
del d['previous_viz_type']
as_list = ('metrics', 'groupby', 'columns')
for k in d:
v = d.get(k)
if k in as_list and not isinstance(v, list):
d[k] = [v] if v else []
if k not in as_list and isinstance(v, list):
d[k] = v[0]
table_id = druid_datasource_id = None
datasource_type = args.get('datasource_type')
if datasource_type in ('datasource', 'druid'):
druid_datasource_id = args.get('datasource_id')
elif datasource_type == 'table':
table_id = args.get('datasource_id')
if action == "save":
slc = models.Slice()
msg = "Slice [{}] has been saved".format(slice_name)
elif action == "overwrite":
msg = "Slice [{}] has been overwritten".format(slice_name)
slc.params = json.dumps(d, indent=4, sort_keys=True)
slc.datasource_name = args.get('datasource_name')
slc.viz_type = args.get('viz_type')
slc.druid_datasource_id = druid_datasource_id
slc.table_id = table_id
slc.datasource_type = datasource_type
slc.slice_name = slice_name
if action == "save":
session.add(slc)
elif action == "overwrite":
session.merge(slc)
session.commit()
flash(msg, "info")
return redirect(slc.slice_url)
@has_access
@expose("/checkbox/<model_view>/<id_>/<attr>/<value>", methods=['GET'])
def checkbox(self, model_view, id_, attr, value):
"""endpoint for checking/unchecking any boolean in a sqla model"""
model = None
if model_view == 'TableColumnInlineView':
model = models.TableColumn
elif model_view == 'DruidColumnInlineView':
model = models.DruidColumn
obj = db.session.query(model).filter_by(id=id_).first()
if obj:
setattr(obj, attr, value == 'true')
db.session.commit()
return Response("OK", mimetype="application/json")
@has_access
@expose("/activity_per_day")
def activity_per_day(self):
"""endpoint to power the calendar heatmap on the welcome page"""
Log = models.Log # noqa
qry = (
db.session
.query(
Log.dt,
sqla.func.count())
.group_by(Log.dt)
.all()
)
payload = {str(time.mktime(dt.timetuple())): ccount for dt, ccount in qry if dt}
return Response(json.dumps(payload), mimetype="application/json")
@has_access
@expose("/save_dash/<dashboard_id>/", methods=['GET', 'POST'])
def save_dash(self, dashboard_id):
"""Save a dashboard's metadata"""
data = json.loads(request.form.get('data'))
positions = data['positions']
slice_ids = [int(d['slice_id']) for d in positions]
session = db.session()
Dash = models.Dashboard # noqa
dash = session.query(Dash).filter_by(id=dashboard_id).first()
dash.slices = [o for o in dash.slices if o.id in slice_ids]
dash.position_json = json.dumps(data['positions'], indent=4)
md = dash.metadata_dejson
if 'filter_immune_slices' not in md:
md['filter_immune_slices'] = []
md['expanded_slices'] = data['expanded_slices']
dash.json_metadata = json.dumps(md, indent=4)
dash.css = data['css']
session.merge(dash)
session.commit()
session.close()
return "SUCCESS"
@has_access
@expose("/testconn", methods=["POST", "GET"])
def testconn(self):
"""Tests a sqla connection"""
try:
uri = request.form.get('uri')
engine = create_engine(uri)
engine.connect()
return json.dumps(engine.table_names(), indent=4)
except Exception:
return Response(
traceback.format_exc(),
status=500,
mimetype="application/json")
@expose("/favstar/<class_name>/<obj_id>/<action>/")
def favstar(self, class_name, obj_id, action):
session = db.session()
FavStar = models.FavStar
count = 0
favs = session.query(FavStar).filter_by(
class_name=class_name, obj_id=obj_id, user_id=g.user.id).all()
if action == 'select':
if not favs:
session.add(
FavStar(
class_name=class_name, obj_id=obj_id, user_id=g.user.id,
dttm=datetime.now()))
count = 1
elif action == 'unselect':
for fav in favs:
session.delete(fav)
else:
count = len(favs)
session.commit()
return Response(
json.dumps({'count': count}),
mimetype="application/json")
@has_access
@expose("/dashboard/<dashboard_id>/")
def dashboard(self, dashboard_id):
"""Server side rendering for a dashboard"""
session = db.session()
qry = session.query(models.Dashboard)
if dashboard_id.isdigit():
qry = qry.filter_by(id=int(dashboard_id))
else:
qry = qry.filter_by(slug=dashboard_id)
templates = session.query(models.CssTemplate).all()
dash = qry.first()
# Hack to log the dashboard_id properly, even when getting a slug
@log_this
def dashboard(**kwargs): # noqa
pass
dashboard(dashboard_id=dash.id)
pos_dict = {}
if dash.position_json:
pos_dict = {
int(o['slice_id']): o
for o in json.loads(dash.position_json)}
return self.render_template(
"caravel/dashboard.html", dashboard=dash,
templates=templates,
pos_dict=pos_dict)
@has_access
@expose("/sql/<database_id>/")
@log_this
def sql(self, database_id):
mydb = db.session.query(
models.Database).filter_by(id=database_id).first()
engine = mydb.get_sqla_engine()
tables = engine.table_names()
table_name = request.args.get('table_name')
return self.render_template(
"caravel/sql.html",
tables=tables,
table_name=table_name,
db=mydb)
@has_access
@expose("/table/<database_id>/<table_name>/")
@log_this
def table(self, database_id, table_name):
mydb = db.session.query(
models.Database).filter_by(id=database_id).first()
cols = mydb.get_columns(table_name)
df = pd.DataFrame([(c['name'], c['type']) for c in cols])
df.columns = ['col', 'type']
return self.render_template(
"caravel/ajah.html",
content=df.to_html(
index=False,
na_rep='',
classes=(
"dataframe table table-striped table-bordered "
"table-condensed sql_results")))
@has_access
@expose("/select_star/<database_id>/<table_name>/")
@log_this
def select_star(self, database_id, table_name):
mydb = db.session.query(
models.Database).filter_by(id=database_id).first()
t = mydb.get_table(table_name)
fields = ", ".join(
[c.name for c in t.columns] or "*")
s = "SELECT\n{}\nFROM {}".format(fields, table_name)
return self.render_template(
"caravel/ajah.html",
content=s
)
@has_access
@expose("/runsql/", methods=['POST', 'GET'])
@log_this
def runsql(self):
"""Runs arbitrary sql and returns and html table"""
session = db.session()
limit = 1000
data = json.loads(request.form.get('data'))
sql = data.get('sql')
database_id = data.get('database_id')
mydb = session.query(models.Database).filter_by(id=database_id).first()
content = ""
if mydb:
eng = mydb.get_sqla_engine()
if limit:
sql = sql.strip().strip(';')
qry = (
select('*')
.select_from(TextAsFrom(text(sql), ['*']).alias('inner_qry'))
.limit(limit)
)
sql = str(qry.compile(eng, compile_kwargs={"literal_binds": True}))
try:
df = pd.read_sql_query(sql=sql, con=eng)
content = df.to_html(
index=False,
na_rep='',
classes=(
"dataframe table table-striped table-bordered "
"table-condensed sql_results"))
except Exception as e:
content = (
'<div class="alert alert-danger">'
"{}</div>"
).format(e.message)
session.commit()
return content
@has_access
@expose("/refresh_datasources/")
def refresh_datasources(self):
"""endpoint that refreshes druid datasources metadata"""
session = db.session()
for cluster in session.query(models.DruidCluster).all():
try:
cluster.refresh_datasources()
except Exception as e:
flash(
"Error while processing cluster '{}'\n{}".format(
cluster, str(e)),
"danger")
logging.exception(e)
return redirect('/druidclustermodelview/list/')
cluster.metadata_last_refreshed = datetime.now()
flash(
"Refreshed metadata from cluster "
"[" + cluster.cluster_name + "]",
'info')
session.commit()
return redirect("/datasourcemodelview/list/")
@expose("/autocomplete/<datasource>/<column>/")
def autocomplete(self, datasource, column):
"""used for filter autocomplete"""
client = utils.get_pydruid_client()
top = client.topn(
datasource=datasource,
granularity='all',
intervals='2013-10-04/2020-10-10',
aggregations={"count": doublesum("count")},
dimension=column,
metric='count',
threshold=1000,
)
values = sorted([d[column] for d in top[0]['result']])
return json.dumps(values)
@app.errorhandler(500)
def show_traceback(self):
if config.get("SHOW_STACKTRACE"):
error_msg = traceback.format_exc()
else:
error_msg = "FATAL ERROR\n"
error_msg = (
"Stacktrace is hidden. Change the SHOW_STACKTRACE "
"configuration setting to enable it")
return render_template(
'caravel/traceback.html',
error_msg=error_msg,
title=ascii_art.stacktrace,
art=ascii_art.error), 500
@has_access
@expose("/welcome")
def welcome(self):
"""Personalized welcome page"""
return self.render_template('caravel/welcome.html', utils=utils)
appbuilder.add_view_no_menu(Caravel)
if config['DRUID_IS_ACTIVE']:
appbuilder.add_link(
"Refresh Druid Metadata",
href='/caravel/refresh_datasources/',
category='Sources',
category_icon='fa-database',
icon="fa-cog")
class CssTemplateModelView(CaravelModelView, DeleteMixin):
datamodel = SQLAInterface(models.CssTemplate)
list_columns = ['template_name']
edit_columns = ['template_name', 'css']
add_columns = edit_columns
appbuilder.add_separator("Sources")
appbuilder.add_view(
CssTemplateModelView,
"CSS Templates",
icon="fa-css3",
category="Sources",
category_icon='')
# ---------------------------------------------------------------------
# Redirecting URL from previous names
class RegexConverter(BaseConverter):
def __init__(self, url_map, *items):
super(RegexConverter, self).__init__(url_map)
self.regex = items[0]
app.url_map.converters['regex'] = RegexConverter
@app.route('/<regex("panoramix\/.*"):url>')
def panoramix(url): # noqa
return redirect(request.full_path.replace('panoramix', 'caravel'))
@app.route('/<regex("dashed\/.*"):url>')
def dashed(url): # noqa
return redirect(request.full_path.replace('dashed', 'caravel'))
# ---------------------------------------------------------------------
| 33.162928 | 88 | 0.59251 | 3,024 | 28,089 | 5.324405 | 0.166336 | 0.015341 | 0.016893 | 0.019875 | 0.267126 | 0.219862 | 0.152289 | 0.108875 | 0.093535 | 0.090678 | 0 | 0.003812 | 0.280964 | 28,089 | 846 | 89 | 33.202128 | 0.793385 | 0.031115 | 0 | 0.291139 | 0 | 0 | 0.202065 | 0.029788 | 0 | 0 | 0 | 0.001182 | 0 | 1 | 0.049226 | false | 0.007032 | 0.028129 | 0.005626 | 0.260197 | 0.001406 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
8c97fb5fbb5d2e6ce88af7c8744cf8535a20046c | 669 | py | Python | mundo3/ex072.py | Igor3550/Exercicios-de-python | e0f6e043df4f0770ac15968485fbb19698b4ac6b | [
"MIT"
] | null | null | null | mundo3/ex072.py | Igor3550/Exercicios-de-python | e0f6e043df4f0770ac15968485fbb19698b4ac6b | [
"MIT"
] | null | null | null | mundo3/ex072.py | Igor3550/Exercicios-de-python | e0f6e043df4f0770ac15968485fbb19698b4ac6b | [
"MIT"
] | null | null | null | # faça um programa que tenha uma tupla totalmente preenchida com uma contagem por extenso de zero até vinte
# seu porgrama deverá ler um numero pelo teclado (entre e 20) e mostra-lo por extenso
extenso = ('zero', 'um', 'dois', 'tres', 'quatro', 'cinco', 'seis', 'sete', 'oito', 'nove', 'dez', 'onze', 'doze',
'treze', 'quatorze', 'quinze', 'dezeseis', 'dezessete', 'dezoito', 'dezenove', 'vinte',)
n = int(input('Digite um numero entre 0 e 20: '))
while True:
if (n >= 0) and (n <= 20):
print(f'{n} por extenso é: {extenso[n]}')
break
else:
n = int(input('Tente novamente, Digite um numero entre 0 e 20: '))
print('FIM!')
| 41.8125 | 114 | 0.61136 | 98 | 669 | 4.173469 | 0.653061 | 0.07335 | 0.04401 | 0.09291 | 0.112469 | 0.112469 | 0.112469 | 0 | 0 | 0 | 0 | 0.020992 | 0.216741 | 669 | 15 | 115 | 44.6 | 0.759542 | 0.284006 | 0 | 0 | 0 | 0 | 0.466387 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0 | 0 | 0 | 0.2 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
8c9804afbad1deabf84940b1a24e9adc4e13b2e5 | 4,658 | py | Python | conanfile.py | biovault/nptsne | 9f70726d26ae49a0c5f8c6fe3f081234dbfa69ab | [
"Apache-2.0"
] | 30 | 2019-10-25T06:09:38.000Z | 2022-03-12T08:29:17.000Z | conanfile.py | biovault/nptsne | 9f70726d26ae49a0c5f8c6fe3f081234dbfa69ab | [
"Apache-2.0"
] | 13 | 2019-08-07T16:57:19.000Z | 2022-03-13T10:26:18.000Z | conanfile.py | biovault/nptsne | 9f70726d26ae49a0c5f8c6fe3f081234dbfa69ab | [
"Apache-2.0"
] | 2 | 2019-11-11T13:07:30.000Z | 2020-03-11T06:00:16.000Z | # -*- coding: utf-8 -*-
from conans import ConanFile, CMake, tools
import os
import sys
import json
# Python version for wheel building
with open(os.path.join(os.path.dirname(__file__), "version.txt")) as fp:
__version__ = fp.read().strip()
__py_version__ = "{}.{}".format(sys.version_info.major, sys.version_info.minor)
__py_tag__ = "cp{}{}".format(sys.version_info.major, sys.version_info.minor)
class NptsneConan(ConanFile):
name = "nptsne"
# branch = "release/1.0.0"
version = __version__
description = "nptsne is a numpy compatible python binary package that offers a number of APIs for fast tSNE calculation."
topics = ("python", "analysis", "n-dimensional", "tSNE")
url = "https://github.com/biovault/nptsne"
author = "B. van Lew <b.van_lew@lumc.nl>" #conanfile author
license = "MIT" # License for packaged library; please use SPDX Identifiers https://spdx.org/licenses/
exports = ["LICENSE.md", "version.txt"] # Packages the license for the conanfile.py
generators = "cmake"
default_user = "lkeb"
default_channel = "stable"
# Options may need to change depending on the packaged library
settings = {"os": None, "build_type": None, "compiler": None, "arch": None}
options = {"shared": [True, False], "fPIC": [True, False], "python_version": "ANY"}
default_options = {"shared": True, "fPIC": True, "python_version": __py_version__}
exports_sources = "*"
_source_subfolder = name
requires = (
"HDILib/1.2.1@biovault/stable"
)
def system_requirements(self):
if tools.os_info.is_linux:
if tools.os_info.with_apt:
installer = tools.SystemPackageTool()
installer.install('liblz4-dev')
# Centos like: -See prepare_build_linux.sh
if tools.os_info.is_macos:
installer = tools.SystemPackageTool()
installer.install('libomp')
installer.install('lz4')
def configure(self):
self.options["HDILib"].shared = False
def config_options(self):
if self.settings.os == 'Windows':
del self.options.fPIC
def package_id(self):
self.info.options.python_version = "{}.{}".format(
sys.version_info.major, sys.version_info.minor)
def _configure_cmake(self):
if self.settings.os == "Macos":
cmake = CMake(self, generator='Xcode')
else:
cmake = CMake(self)
if self.settings.os == "Windows" and self.options.shared:
cmake.definitions["CMAKE_WINDOWS_EXPORT_ALL_SYMBOLS"] = True
cmake.definitions["BUILD_PYTHON_VERSION"] = __py_version__
cmake.definitions["PYBIND11_PYTHON_VERSION"] = __py_version__
cmake.definitions["CMAKE_INSTALL_PREFIX"] = os.path.join(self.package_folder)
cmake.configure()
cmake.verbose = True
return cmake
def build(self):
# 1.) build the python extension
cmake = self._configure_cmake()
cmake.build()
# 2.) install the python binary extension and dependencies
# into a dist directory under _package
cmake.install()
# 3.) set the platform name
plat_names = {'Windows': 'win_amd64', 'Linux': 'linux_x86_64', "Macos": 'macosx-10.6-intel'}
if self.settings.os == "Macos" or self.settings.os == "Linux":
self.run('ls -l', cwd=os.path.join(self.package_folder, "_package"))
# 4.) Make the python wheel from the _package using python setup.py
self.run('python setup.py bdist_wheel --plat-name={0} --dist-dir={1} --python-tag={2}'.format(
plat_names[str(self.settings.os)],
os.path.join(self.package_folder, 'dist'),
__py_tag__
), cwd=os.path.join(self.package_folder, "_package"))
def package(self):
self.copy(pattern="LICENSE", dst="licenses", src=self._source_subfolder)
# If the CMakeLists.txt has a proper install method, the steps below may be redundant
# If so, you can just remove the lines below
self.copy("*.h", dst="include", keep_path=True)
self.copy("*.hpp", dst="include", keep_path=True)
self.copy("*.dll", dst="bin", keep_path=False)
self.copy("*.so", dst="lib", keep_path=False)
self.copy("*.dylib", dst="lib", keep_path=False)
self.copy("*.a", dst="lib", keep_path=False)
self.copy("*.lib", dst="lib", keep_path=False)
self.copy("*.whl", dst="dist", keep_path=False)
def package_info(self):
self.cpp_info.libs = tools.collect_libs(self)
| 41.221239 | 126 | 0.627737 | 587 | 4,658 | 4.792164 | 0.367973 | 0.025595 | 0.029861 | 0.030217 | 0.274796 | 0.214006 | 0.137931 | 0.078208 | 0.051902 | 0.03626 | 0 | 0.007591 | 0.236368 | 4,658 | 112 | 127 | 41.589286 | 0.783244 | 0.145556 | 0 | 0.02439 | 0 | 0.012195 | 0.194501 | 0.020938 | 0 | 0 | 0 | 0 | 0 | 1 | 0.097561 | false | 0 | 0.04878 | 0 | 0.378049 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
8c992cc8c9268f039bdfc880b611ca9a28b18864 | 1,503 | py | Python | steiner/preselection/preselection_pack.py | ASchidler/pace17 | 755e9d652c7d4d9dd1f71fb508ebf773efee8488 | [
"MIT"
] | 1 | 2019-01-15T16:58:03.000Z | 2019-01-15T16:58:03.000Z | steiner/preselection/preselection_pack.py | ASchidler/pace17 | 755e9d652c7d4d9dd1f71fb508ebf773efee8488 | [
"MIT"
] | null | null | null | steiner/preselection/preselection_pack.py | ASchidler/pace17 | 755e9d652c7d4d9dd1f71fb508ebf773efee8488 | [
"MIT"
] | null | null | null | from sys import maxint
import nearest_vertex as nv
import reduction.degree as dg
import short_links as sl
class NvSlPack:
"""Combines terminal contractions, nearest vertex and short links reduction and reiterates reduction until
failure"""
def __init__(self, threshold=0.01):
self._sl = sl.ShortLinkPreselection()
self._nv = nv.NearestVertex()
self._dg = dg.DegreeReduction()
self._threshold = threshold
self._counter = maxint / 2
def reduce(self, steiner, prev_cnt, curr_cnt):
self._counter += prev_cnt
if self._counter < self._threshold * len(steiner.graph.edges):
return 0
else:
self._counter = 0
total = 0
this_run = -1
while this_run != 0:
this_run = 0
this_run += self._nv.reduce(steiner, prev_cnt, curr_cnt)
this_run += self._sl.reduce(steiner, prev_cnt, curr_cnt)
if this_run > 0:
this_run += self._dg.reduce(steiner, prev_cnt, curr_cnt)
total += this_run
if total > 0:
steiner._voronoi_areas = None
steiner._lengths = {}
steiner._closest_terminals = None
return total
def post_process(self, solution):
result1 = self._nv.post_process(solution)
result2 = self._sl.post_process(result1[0])
result3 = self._dg.post_process(result2[0])
return result3[0], result1[1] or result2[1] or result3[1]
| 29.470588 | 110 | 0.615436 | 187 | 1,503 | 4.695187 | 0.336898 | 0.063781 | 0.063781 | 0.082005 | 0.168565 | 0.135535 | 0 | 0 | 0 | 0 | 0 | 0.02552 | 0.296075 | 1,503 | 50 | 111 | 30.06 | 0.804348 | 0.073852 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.083333 | false | 0 | 0.111111 | 0 | 0.305556 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
8c9a7ee6fd52e7b0f0b65a45e2872167b74f87ae | 260 | py | Python | interactivedashplot.py | asen932/dash-tutorials | 28611dabe35a11efbf67bfda167226ceea01aa5a | [
"MIT"
] | null | null | null | interactivedashplot.py | asen932/dash-tutorials | 28611dabe35a11efbf67bfda167226ceea01aa5a | [
"MIT"
] | null | null | null | interactivedashplot.py | asen932/dash-tutorials | 28611dabe35a11efbf67bfda167226ceea01aa5a | [
"MIT"
] | null | null | null | import numpy, pandas, plotly
# generating random data for the graph
data = pandas.DataFrame(numpy.random.randn(100,4),columns='A B C D'.split())
plotly.offline.plot = ([{
'x': data.index,
'y': data[col],
'name': col
} for col in data.columns])
| 21.666667 | 76 | 0.653846 | 40 | 260 | 4.25 | 0.7 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.018779 | 0.180769 | 260 | 11 | 77 | 23.636364 | 0.779343 | 0.138462 | 0 | 0 | 0 | 0 | 0.058559 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.142857 | 0 | 0.142857 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
8c9c3d6d513f20555733ff5412cfa9b1ffc91e4a | 475 | py | Python | 0201-0300/0213-House Robber II/0213-House Robber II.py | jiadaizhao/LeetCode | 4ddea0a532fe7c5d053ffbd6870174ec99fc2d60 | [
"MIT"
] | 49 | 2018-05-05T02:53:10.000Z | 2022-03-30T12:08:09.000Z | 0201-0300/0213-House Robber II/0213-House Robber II.py | jolly-fellow/LeetCode | ab20b3ec137ed05fad1edda1c30db04ab355486f | [
"MIT"
] | 11 | 2017-12-15T22:31:44.000Z | 2020-10-02T12:42:49.000Z | 0201-0300/0213-House Robber II/0213-House Robber II.py | jolly-fellow/LeetCode | ab20b3ec137ed05fad1edda1c30db04ab355486f | [
"MIT"
] | 28 | 2017-12-05T10:56:51.000Z | 2022-01-26T18:18:27.000Z | class Solution:
def rob(self, nums: List[int]) -> int:
def rob(start, end):
dp0 = dp1 = 0
for i in range(start, end + 1):
dp2 = max(dp1, dp0 + nums[i])
dp0 = dp1
dp1 = dp2
return dp1
if len(nums) == 0:
return 0
elif len(nums) == 1:
return nums[0]
else:
return max(rob(0, len(nums) - 2), rob(1, len(nums) - 1))
| 27.941176 | 68 | 0.406316 | 61 | 475 | 3.163934 | 0.42623 | 0.145078 | 0.082902 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.079681 | 0.471579 | 475 | 16 | 69 | 29.6875 | 0.689243 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.133333 | false | 0 | 0 | 0 | 0.466667 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
8ca24806898a75ed633e54bd29a7c6011d3d66ee | 6,745 | py | Python | django_pypayzen/tests/data.py | SamambaMan/django-payzen | 88b2df368bb7afe32a33ae398a8c858531647068 | [
"MIT"
] | null | null | null | django_pypayzen/tests/data.py | SamambaMan/django-payzen | 88b2df368bb7afe32a33ae398a8c858531647068 | [
"MIT"
] | null | null | null | django_pypayzen/tests/data.py | SamambaMan/django-payzen | 88b2df368bb7afe32a33ae398a8c858531647068 | [
"MIT"
] | null | null | null | import collections
url_exemple = "http://www.google.com/"
cards = [
{
'type': 'CB',
'card_number': '4970100000000000',
'behaviour': '3D-Secure',
'result': 'accepted'
},
{
'type': 'MasterCard',
'card_number': '5970100300000000',
'behaviour': '3D-Secure',
'result': 'accepted'
},
{
'type': 'Maestro',
'card_number': '5000550000000000',
'behaviour': '3D-Secure',
'result': 'accepted'
},
{
'type': 'CB',
'card_number': '4970100000000009',
'behaviour': '3D-Secure interactive',
'result': 'accepted'
},
{
'type': 'MasterCard',
'card_number': '5970100300000009',
'behaviour': '3D-Secure interactive',
'result': 'accepted'
},
{
'type': 'Maestro',
'card_number': '5000550000000009',
'behaviour': '3D-Secure interactive',
'result': 'accepted'
},
{
'type': 'CB',
'card_number': '4970100000000003',
'behaviour': 'Merchant without 3D-secure',
'result': 'accepted'
},
{
'type': 'MasterCard',
'card_number': '5970100300000003',
'behaviour': 'Merchant without 3D-secure',
'result': 'accepted'
},
{
'type': 'Maestro',
'card_number': '5000550000000003',
'behaviour': 'Merchant without 3D-secure',
'result': 'accepted'
},
{
'type': 'CB',
'card_number': '4970100000000001',
'behaviour': 'Buyer without 3D-secure',
'result': 'accepted'
},
{
'type': 'MasterCard',
'card_number': '5970100300000001',
'behaviour': 'Buyer without 3D-secure',
'result': 'accepted'
},
{
'type': 'Maestro',
'card_number': '5000550000000001',
'behaviour': 'Buyer without 3D-secure',
'result': 'accepted'
},
{
'type': 'CB',
'card_number': '4970100000000002',
'behaviour': 'Transaction to force',
'result': 'rejected'
},
{
'type': 'MasterCard',
'card_number': '5970100300000002',
'behaviour': 'Transaction to force',
'result': 'rejected'
},
{
'type': 'Maestro',
'card_number': '5000550000000002',
'behaviour': 'Transaction to force',
'result': 'rejected'
},
{
'type': 'CB',
'card_number': '4970100000000007',
'behaviour': 'Warranty = NO',
'result': 'accepted'
},
{
'type': 'MasterCard',
'card_number': '5970100300023006',
'behaviour': 'Warranty = NO',
'result': 'accepted'
},
{
'type': 'Maestro',
'card_number': '5000550000023006',
'behaviour': 'Warranty = NO',
'result': 'accepted'
},
{
'type': 'CB',
'card_number': '4970100000000097',
'behaviour': '3-D Secure authentication failed',
'result': 'rejected'
},
{
'type': 'MasterCard',
'card_number': '5970100300000097',
'behaviour': '3-D Secure authentication failed',
'result': 'rejected'
},
{
'type': 'Maestro',
'card_number': '5000550000000097',
'behaviour': '3-D Secure authentication failed',
'result': 'rejected'
},
{
'type': 'CB',
'card_number': '4970100000000098',
'behaviour': 'Card payment limit exceeded',
'result': 'rejected'
},
{
'type': 'MasterCard',
'card_number': '5970100300000098',
'behaviour': 'Card payment limit exceeded',
'result': 'rejected'
},
{
'type': 'Maestro',
'card_number': '5000550000000098',
'behaviour': 'Card payment limit exceeded',
'result': 'rejected'
},
{
'type': 'CB',
'card_number': '4970100000000099',
'behaviour': 'Wrong cryptogram',
'result': 'rejected'
},
{
'type': 'MasterCard',
'card_number': '5970100300000099',
'behaviour': 'Wrong cryptogram',
'result': 'rejected'
},
{
'type': 'Maestro',
'card_number': '5000550000000099',
'behaviour': 'Wrong cryptogram',
'result': 'rejected'
},
]
theme_args = collections.OrderedDict([
("success_footer_msg_return", "Success footer msg test"),
("cancel_footer_msg_return", "Cancel footer msg test"),
("secure_message", "Secure message test"),
("secure_message_register", "Secure message register test"),
("site_id_label", "Site ID label test"),
("css_for_payment", url_exemple+"payment.css"),
("css_for_payment_mobile", url_exemple+"mobile_payment.css"),
("header_for_mail", url_exemple+"mail_header.html"),
("footer_for_mail", url_exemple+"footer_mail.html"),
("shop_logo", url_exemple+"logo.png"),
])
payment_config_args = {
"first": 5000,
"count": 2,
"period": 5
}
payment_args = {
# Base fields
"vads_amount": "10000",
"vads_capture_delay": "2",
"vads_payment_cards": "CB;Visa",
"vads_return_mode": "NONE",
"vads_validation_mode": "1",
"vads_url_success": url_exemple,
"vads_url_referral": url_exemple,
"vads_url_refused": url_exemple,
"vads_url_cancel": url_exemple,
"vads_url_error": url_exemple,
"vads_url_return": url_exemple,
"vads_user_info": "Abbath Doom Occulta",
"vads_shop_name": "Immortal",
"vads_redirect_success_timeout": "1",
"vads_redirect_success_message": "Tragedies Blows At Horizon",
"vads_redirect_error_timeout": "1",
"vads_redirect_error_message": "At The Heart Of Winter",
# customer fields
"vads_cust_address": "Oeschstr.",
"vads_cust_address_number": "9",
"vads_cust_country": "GE",
"vads_cust_email": "test@nuclearblast.de",
"vads_cust_id": "1",
"vads_cust_name": "NUCLEAR BLAST",
"vads_cust_cell_phone": "+49 7162 9280-0",
"vads_cust_phone": "+49 7162 9280 26",
"vads_cust_title": "Guitarist",
"vads_cust_city": "Donzdorf",
"vads_cust_state": "Donzdorf",
"vads_cust_zip": "73072",
"vads_language": "fr",
# order fields
"vads_order_id": "1234567890",
"vads_order_info": "Order test info 1",
"vads_order_info2": "Order test info 2",
"vads_order_info3": "Order test info 3",
# shipping fields
"vads_ship_to_name": "NUCLEAR BLAST",
"vads_ship_to_street_number": "9",
"vads_ship_to_street": "Oeschstr. 9",
"vads_ship_to_street2": "...",
"vads_ship_to_zip": "73072",
"vads_ship_to_city": "Donzdorf",
"vads_ship_to_country": "GE",
"vads_ship_to_phone_num": "+49 7162 9280-0",
"vads_ship_to_state": "Donzdorf"
}
| 28.340336 | 66 | 0.564566 | 633 | 6,745 | 5.750395 | 0.260664 | 0.074176 | 0.074176 | 0.03956 | 0.461264 | 0.442582 | 0.304121 | 0.229121 | 0.126648 | 0 | 0 | 0.107019 | 0.271312 | 6,745 | 237 | 67 | 28.459916 | 0.633571 | 0.008302 | 0 | 0.355263 | 0 | 0 | 0.548773 | 0.041592 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.004386 | 0 | 0.004386 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
8ca4057e1d2a3bba02a3817e5b84172765723a67 | 23,067 | py | Python | _delphi_utils_python/data_proc/geomap/geo_data_proc.py | jingjtang/covidcast-indicators | 34cb8786f78fbea2710b810a9500ee02c2379241 | [
"MIT"
] | null | null | null | _delphi_utils_python/data_proc/geomap/geo_data_proc.py | jingjtang/covidcast-indicators | 34cb8786f78fbea2710b810a9500ee02c2379241 | [
"MIT"
] | null | null | null | _delphi_utils_python/data_proc/geomap/geo_data_proc.py | jingjtang/covidcast-indicators | 34cb8786f78fbea2710b810a9500ee02c2379241 | [
"MIT"
] | null | null | null | """
Authors: Dmitry Shemetov @dshemetov, James Sharpnack @jsharpna
Intended execution:
cd _delphi_utils/data_proc/geomap
chmod u+x geo_data_proc.py
python geo_data_proc.py
"""
from io import BytesIO
from os import remove, listdir
from os.path import join, isfile
from zipfile import ZipFile
from pandas.core.frame import DataFrame
import requests
import pandas as pd
import numpy as np
# Source files
YEAR = 2019
INPUT_DIR = "./old_source_files"
OUTPUT_DIR = f"../../delphi_utils/data/{YEAR}"
FIPS_BY_ZIP_POP_URL = "https://www2.census.gov/geo/docs/maps-data/data/rel/zcta_county_rel_10.txt?#"
ZIP_HSA_HRR_URL = "https://atlasdata.dartmouth.edu/downloads/geography/ZipHsaHrr18.csv.zip"
ZIP_HSA_HRR_FILENAME = "ZipHsaHrr18.csv"
FIPS_MSA_URL = "https://www2.census.gov/programs-surveys/metro-micro/geographies/reference-files/2018/delineation-files/list1_Sep_2018.xls"
JHU_FIPS_URL = "https://raw.githubusercontent.com/CSSEGISandData/COVID-19/master/csse_covid_19_data/UID_ISO_FIPS_LookUp_Table.csv"
STATE_CODES_URL = "http://www2.census.gov/geo/docs/reference/state.txt?#"
FIPS_POPULATION_URL = f"https://www2.census.gov/programs-surveys/popest/datasets/2010-{YEAR}/counties/totals/co-est{YEAR}-alldata.csv"
FIPS_PUERTO_RICO_POPULATION_URL = "https://www2.census.gov/geo/docs/maps-data/data/rel/zcta_county_rel_10.txt?"
STATE_HHS_FILE = "hhs.txt"
# Out files
FIPS_STATE_OUT_FILENAME = "fips_state_table.csv"
FIPS_MSA_OUT_FILENAME = "fips_msa_table.csv"
FIPS_HRR_OUT_FILENAME = "fips_hrr_table.csv"
FIPS_ZIP_OUT_FILENAME = "fips_zip_table.csv"
FIPS_HHS_FILENAME = "fips_hhs_table.csv"
FIPS_POPULATION_OUT_FILENAME = "fips_pop.csv"
ZIP_HSA_OUT_FILENAME = "zip_hsa_table.csv"
ZIP_HRR_OUT_FILENAME = "zip_hrr_table.csv"
ZIP_FIPS_OUT_FILENAME = "zip_fips_table.csv"
ZIP_MSA_OUT_FILENAME = "zip_msa_table.csv"
ZIP_POPULATION_OUT_FILENAME = "zip_pop.csv"
ZIP_STATE_CODE_OUT_FILENAME = "zip_state_code_table.csv"
ZIP_HHS_FILENAME = "zip_hhs_table.csv"
STATE_OUT_FILENAME = "state_codes_table.csv"
STATE_HHS_OUT_FILENAME = "state_code_hhs_table.csv"
STATE_POPULATION_OUT_FILENAME = "state_pop.csv"
HHS_POPULATION_OUT_FILENAME = "hhs_pop.csv"
NATION_POPULATION_OUT_FILENAME = "nation_pop.csv"
JHU_FIPS_OUT_FILENAME = "jhu_uid_fips_table.csv"
def create_fips_zip_crosswalk():
"""Build (weighted) crosswalk tables for FIPS to ZIP and ZIP to FIPS."""
pop_df = pd.read_csv(FIPS_BY_ZIP_POP_URL).rename(columns={"POPPT": "pop"})
# Create the FIPS column by combining the state and county codes
pop_df["fips"] = pop_df["STATE"].astype(str).str.zfill(2) + pop_df["COUNTY"].astype(str).str.zfill(3)
# Create the ZIP column by adding leading zeros to the ZIP
pop_df["zip"] = pop_df["ZCTA5"].astype(str).str.zfill(5)
pop_df = pop_df[["zip", "fips", "pop"]]
# Find the population fractions (the heaviest computation, takes about a minute)
# Note that the denominator in the fractions is the source population
pop_df.set_index(["fips", "zip"], inplace=True)
fips_zip: DataFrame = pop_df.groupby("fips", as_index=False).apply(lambda g: g["pop"] / g["pop"].sum())
zip_fips: DataFrame = pop_df.groupby("zip", as_index=False).apply(lambda g: g["pop"] / g["pop"].sum())
# Rename and write to file
fips_zip = fips_zip.reset_index(level=["fips", "zip"]).rename(columns={"pop": "weight"}).query("weight > 0.0")
fips_zip.sort_values(["fips", "zip"]).to_csv(join(OUTPUT_DIR, FIPS_ZIP_OUT_FILENAME), index=False)
zip_fips = zip_fips.reset_index(level=["fips", "zip"]).rename(columns={"pop": "weight"}).query("weight > 0.0")
zip_fips.sort_values(["zip", "fips"]).to_csv(join(OUTPUT_DIR, ZIP_FIPS_OUT_FILENAME), index=False)
def create_zip_hsa_hrr_crosswalk():
"""Build a crosswalk table for ZIP to HSA and for ZIP to HRR."""
with ZipFile(BytesIO(requests.get(ZIP_HSA_HRR_URL).content)) as zipped_csv:
zip_df = pd.read_csv(zipped_csv.open(ZIP_HSA_HRR_FILENAME))
hsa_df = zip_df[["zipcode18", "hsanum"]].rename(columns={"zipcode18": "zip", "hsanum": "hsa"})
hsa_df["zip"] = hsa_df["zip"].astype(str).str.zfill(5)
hsa_df["hsa"] = hsa_df["hsa"].astype(str)
hsa_df.sort_values(["zip", "hsa"]).to_csv(join(OUTPUT_DIR, ZIP_HSA_OUT_FILENAME), index=False)
hrr_df = zip_df[["zipcode18", "hrrnum"]].rename(columns={"zipcode18": "zip", "hrrnum": "hrr"})
hrr_df["zip"] = hrr_df["zip"].astype(str).str.zfill(5)
hrr_df["hrr"] = hrr_df["hrr"].astype(str)
hrr_df.sort_values(["zip", "hrr"]).to_csv(join(OUTPUT_DIR, ZIP_HRR_OUT_FILENAME), index=False)
def create_fips_msa_crosswalk():
"""Build a crosswalk table for FIPS to MSA."""
# Requires xlrd.
msa_df = pd.read_excel(FIPS_MSA_URL, skiprows=2, skipfooter=4, dtype={"CBSA Code": int, "Metropolitan/Micropolitan Statistical Area": str, "FIPS State Code": str, "FIPS County Code": str}).rename(columns={"CBSA Code": "msa"})
msa_df = msa_df[msa_df["Metropolitan/Micropolitan Statistical Area"] == "Metropolitan Statistical Area"]
# Combine state and county codes into a single FIPS code
msa_df["fips"] = msa_df["FIPS State Code"].str.cat(msa_df["FIPS County Code"])
msa_df.sort_values(["fips", "msa"]).to_csv(join(OUTPUT_DIR, FIPS_MSA_OUT_FILENAME), columns=["fips", "msa"], index=False)
def create_jhu_uid_fips_crosswalk():
"""Build a crosswalk table from JHU UID to FIPS."""
# These are hand modifications that need to be made to the translation
# between JHU UID and FIPS. See below for the special cases information
# https://cmu-delphi.github.io/delphi-epidata/api/covidcast-signals/jhu-csse.html#geographical-exceptions
hand_additions = pd.DataFrame(
[
{
"jhu_uid": "84070002",
"fips": "25007", # Split aggregation of Dukes and Nantucket, Massachusetts
"weight": 16535 / (16535 + 10172), # Population: 16535
},
{
"jhu_uid": "84070002",
"fips": "25019",
"weight": 10172 / (16535 + 10172), # Population: 10172
},
{
"jhu_uid": "84070003",
"fips": "29095", # Kansas City, Missouri
"weight": 674158 / 1084897, # Population: 674158
},
{
"jhu_uid": "84070003",
"fips": "29165",
"weight": 89322 / 1084897, # Population: 89322
},
{
"jhu_uid": "84070003",
"fips": "29037",
"weight": 99478 / 1084897, # Population: 99478
},
{
"jhu_uid": "84070003",
"fips": "29047",
"weight": 221939 / 1084897, # Population: 221939
},
# Kusilvak, Alaska
{"jhu_uid": "84002158", "fips": "02270", "weight": 1.0},
# Oglala Lakota
{"jhu_uid": "84046102", "fips": "46113", "weight": 1.0},
# Aggregate Utah territories into a "State FIPS"
{"jhu_uid": "84070015", "fips": "49000", "weight": 1.0},
{"jhu_uid": "84070016", "fips": "49000", "weight": 1.0},
{"jhu_uid": "84070017", "fips": "49000", "weight": 1.0},
{"jhu_uid": "84070018", "fips": "49000", "weight": 1.0},
{"jhu_uid": "84070019", "fips": "49000", "weight": 1.0},
{"jhu_uid": "84070020", "fips": "49000", "weight": 1.0},
]
)
# Map the Unassigned category to a custom megaFIPS XX000
unassigned_states = pd.DataFrame(
{"jhu_uid": str(x), "fips": str(x)[-2:].ljust(5, "0"), "weight": 1.0}
for x in range(84090001, 84090057)
)
# Map the Out of State category to a custom megaFIPS XX000
out_of_state = pd.DataFrame(
{"jhu_uid": str(x), "fips": str(x)[-2:].ljust(5, "0"), "weight": 1.0}
for x in range(84080001, 84080057)
)
# Map the Unassigned and Out of State categories to the cusom megaFIPS 72000
puerto_rico_unassigned = pd.DataFrame(
[
{"jhu_uid": "63072888", "fips": "72000", "weight": 1.0},
{"jhu_uid": "63072999", "fips": "72000", "weight": 1.0},
]
)
cruise_ships = pd.DataFrame(
[
{"jhu_uid": "84088888", "fips": "88888", "weight": 1.0},
{"jhu_uid": "84099999", "fips": "99999", "weight": 1.0},
]
)
jhu_df = pd.read_csv(JHU_FIPS_URL, dtype={"UID": str, "FIPS": str}).query("Country_Region == 'US'")
jhu_df = jhu_df.rename(columns={"UID": "jhu_uid", "FIPS": "fips"}).dropna(subset=["fips"])
# FIPS Codes that are just two digits long should be zero filled on the right.
# These are US state codes (XX) and the territories Guam (66), Northern Mariana Islands (69),
# Virgin Islands (78), and Puerto Rico (72).
fips_territories = jhu_df["fips"].str.len() <= 2
jhu_df.loc[fips_territories, "fips"] = jhu_df.loc[fips_territories, "fips"].str.ljust(5, "0")
# Drop the JHU UIDs that were hand-modified
manual_correction_ids = pd.concat([hand_additions, unassigned_states, out_of_state, puerto_rico_unassigned, cruise_ships])["jhu_uid"]
jhu_df.drop(jhu_df.index[jhu_df["jhu_uid"].isin(manual_correction_ids)], inplace=True)
# Add weights of 1.0 to everything not in hand additions, then merge in hand-additions
# Finally, zero fill FIPS
jhu_df["weight"] = 1.0
jhu_df = pd.concat([jhu_df, hand_additions, unassigned_states, out_of_state, puerto_rico_unassigned])
jhu_df["fips"] = jhu_df["fips"].astype(int).astype(str).str.zfill(5)
jhu_df.sort_values(["jhu_uid", "fips"]).to_csv(join(OUTPUT_DIR, JHU_FIPS_OUT_FILENAME), columns=["jhu_uid", "fips", "weight"], index=False)
def create_state_codes_crosswalk():
"""Build a State ID -> State Name -> State code crosswalk file."""
df = pd.read_csv(STATE_CODES_URL, delimiter="|").drop(columns="STATENS").rename(columns={"STATE": "state_code", "STUSAB": "state_id", "STATE_NAME": "state_name"})
df["state_code"] = df["state_code"].astype(str).str.zfill(2)
df["state_id"] = df["state_id"].astype(str).str.lower()
# Add a few extra US state territories manually
territories = pd.DataFrame(
[
{
"state_code": "70",
"state_name": "Republic of Palau",
"state_id": "pw",
},
{
"state_code": "68",
"state_name": "Marshall Islands",
"state_id": "mh",
},
{
"state_code": "64",
"state_name": "Federated States of Micronesia",
"state_id": "fm",
},
]
)
df = pd.concat((df, territories))
df.sort_values("state_code").to_csv(join(OUTPUT_DIR, STATE_OUT_FILENAME), index=False)
def create_state_hhs_crosswalk():
"""Build a state to HHS crosswalk."""
if not isfile(join(OUTPUT_DIR, STATE_OUT_FILENAME)):
create_state_codes_crosswalk()
ss_df = pd.read_csv(join(OUTPUT_DIR, STATE_OUT_FILENAME), dtype={"state_code": str, "state_name": str, "state_id": str})
with open(STATE_HHS_FILE) as temp_file:
temp = temp_file.readlines()
# Process text from https://www.hhs.gov/about/agencies/iea/regional-offices/index.html
temp = [int(s[7:9]) if "Region" in s else s for s in temp]
temp = [s.strip().split(", ") if isinstance(s, str) else s for s in temp]
temp = {temp[i]: temp[i + 1] for i in range(0, len(temp), 2)}
temp = {key: [x.lstrip(" and") for x in temp[key]] for key in temp}
temp = [[(key, x) for x in temp[key]] for key in temp]
hhs_state_pairs = [x for y in temp for x in y]
# Make naming adjustments
hhs_state_pairs.remove((2, "the Virgin Islands"))
hhs_state_pairs.append((2, "U.S. Virgin Islands"))
hhs_state_pairs.remove((9, "Commonwealth of the Northern Mariana Islands"))
hhs_state_pairs.append((9, "Northern Mariana Islands"))
# Make dataframe
hhs_df = pd.DataFrame(hhs_state_pairs, columns=["hhs", "state_name"], dtype=str)
ss_df = ss_df.merge(hhs_df, on="state_name", how="left").dropna()
ss_df.sort_values("state_code").to_csv(join(OUTPUT_DIR, STATE_HHS_OUT_FILENAME), columns=["state_code", "hhs"], index=False)
def create_fips_population_table():
"""Build a table of populations by FIPS county codes.
Uses US Census Bureau population data as determined by the YEAR variable, with 2010 population data for Puerto Rico and a few exceptions.
"""
census_pop = pd.read_csv(FIPS_POPULATION_URL, encoding="ISO-8859-1")
census_pop["fips"] = census_pop.apply(lambda x: f"{x['STATE']:02d}{x['COUNTY']:03d}", axis=1)
census_pop = census_pop.rename(columns={f"POPESTIMATE{YEAR}": "pop"})[["fips", "pop"]]
# Set population for Dukes and Nantucket combo county
dukes_pop = int(census_pop.loc[census_pop["fips"] == "25007", "pop"])
nantu_pop = int(census_pop.loc[census_pop["fips"] == "25019", "pop"])
hand_modified_pop = pd.DataFrame(
[
# Dukes and Nantucket combo county
{"fips": "70002", "pop": dukes_pop + nantu_pop},
# Kansas City
{"fips": "70003", "pop": 491918},
]
)
census_pop = pd.concat([census_pop, hand_modified_pop])
census_pop = census_pop.reset_index(drop=True)
# Get the file with Puerto Rico populations
df_pr = pd.read_csv(FIPS_PUERTO_RICO_POPULATION_URL).rename(columns={"POPPT": "pop"})
df_pr["fips"] = df_pr["STATE"].astype(str).str.zfill(2) + df_pr["COUNTY"].astype(str).str.zfill(3)
df_pr = df_pr[["fips", "pop"]]
# Create the Puerto Rico megaFIPS
df_pr = df_pr[df_pr["fips"].isin([str(x) for x in range(72000, 72999)])]
df_pr = pd.concat([df_pr, pd.DataFrame([{"fips": "72000", "pop": df_pr["pop"].sum()}])])
# Fill the missing Puerto Rico data with 2010 information
df_pr = df_pr.groupby("fips").sum().reset_index()
df_pr = df_pr[~df_pr["fips"].isin(census_pop["fips"])]
census_pop_pr = pd.concat([census_pop, df_pr])
# Filled from https://www.census.gov/data/tables/2010/dec/2010-island-areas.html
territories_pop = pd.DataFrame(
{
"fips": ["60010", "60020", "60030", "60040", "60050", "66010", "78010", "78020", "78030", "69085", "69100", "69110", "69120"],
"pop": [23030, 1143, 0, 17, 31329, 159358, 50601, 4170, 51634, 0, 2527, 48220, 3136],
}
)
census_pop_territories = pd.concat([census_pop_pr, territories_pop])
non_megafips_mask = ~census_pop_territories.fips.str.endswith("000")
census_pop_territories = census_pop_territories.loc[non_megafips_mask]
census_pop_territories.sort_values("fips").to_csv(join(OUTPUT_DIR, FIPS_POPULATION_OUT_FILENAME), index=False)
def create_state_population_table():
"""Build a state population table."""
if not isfile(join(OUTPUT_DIR, FIPS_POPULATION_OUT_FILENAME)):
create_fips_population_table()
if not isfile(join(OUTPUT_DIR, FIPS_STATE_OUT_FILENAME)):
derive_fips_state_crosswalk()
census_pop = pd.read_csv(join(OUTPUT_DIR, FIPS_POPULATION_OUT_FILENAME), dtype={"fips": str, "pop": int})
state: DataFrame = pd.read_csv(join(OUTPUT_DIR, FIPS_STATE_OUT_FILENAME), dtype=str)
state_pop = state.merge(census_pop, on="fips").groupby(["state_code", "state_id", "state_name"], as_index=False).sum()
state_pop.sort_values("state_code").to_csv(join(OUTPUT_DIR, STATE_POPULATION_OUT_FILENAME), index=False)
def create_hhs_population_table():
"""Build an HHS population table."""
if not isfile(join(OUTPUT_DIR, STATE_POPULATION_OUT_FILENAME)):
create_state_population_table()
if not isfile(join(OUTPUT_DIR, STATE_HHS_OUT_FILENAME)):
create_state_hhs_crosswalk()
state_pop = pd.read_csv(join(OUTPUT_DIR, STATE_POPULATION_OUT_FILENAME), dtype={"state_code": str, "hhs": int}, usecols=["state_code", "pop"])
state_hhs = pd.read_csv(join(OUTPUT_DIR, STATE_HHS_OUT_FILENAME), dtype=str)
hhs_pop = state_pop.merge(state_hhs, on="state_code").groupby("hhs", as_index=False).sum()
hhs_pop.sort_values("hhs").to_csv(join(OUTPUT_DIR, HHS_POPULATION_OUT_FILENAME), index=False)
def create_nation_population_table():
"""Build a nation population table."""
if not isfile(join(OUTPUT_DIR, FIPS_POPULATION_OUT_FILENAME)):
create_fips_population_table()
census_pop = pd.read_csv(join(OUTPUT_DIR, FIPS_POPULATION_OUT_FILENAME), dtype={"fips": str, "pop": int})
nation_pop = pd.DataFrame({"nation": ["us"], "pop": [census_pop["pop"].sum()]})
nation_pop.to_csv(join(OUTPUT_DIR, NATION_POPULATION_OUT_FILENAME), index=False)
def derive_zip_population_table():
"""Build a table of populations by ZIP code by translating from FIPS populations."""
if not isfile(join(OUTPUT_DIR, FIPS_POPULATION_OUT_FILENAME)):
create_fips_population_table()
if not isfile(join(OUTPUT_DIR, FIPS_ZIP_OUT_FILENAME)):
create_fips_zip_crosswalk()
census_pop = pd.read_csv(join(OUTPUT_DIR, FIPS_POPULATION_OUT_FILENAME), dtype={"fips": str, "pop": int})
fz_df = pd.read_csv(join(OUTPUT_DIR, FIPS_ZIP_OUT_FILENAME), dtype={"fips": str, "zip": str, "weight": float})
df = census_pop.merge(fz_df, on="fips", how="left")
df["pop"] = df["pop"].multiply(df["weight"], axis=0)
df = df.drop(columns=["fips", "weight"]).groupby("zip").sum().dropna().reset_index()
df["pop"] = df["pop"].astype(int)
df.sort_values("zip").to_csv(join(OUTPUT_DIR, ZIP_POPULATION_OUT_FILENAME), index=False)
def derive_fips_hrr_crosswalk():
"""Derive a crosswalk file from FIPS to HRR through FIPS -> ZIP -> HRR."""
if not isfile(join(OUTPUT_DIR, FIPS_ZIP_OUT_FILENAME)):
create_fips_zip_crosswalk()
if not isfile(join(OUTPUT_DIR, ZIP_HRR_OUT_FILENAME)):
create_zip_hsa_hrr_crosswalk()
fz_df = pd.read_csv(join(OUTPUT_DIR, FIPS_ZIP_OUT_FILENAME), dtype={"fips": str, "zip": str, "weight": float})
zh_df = pd.read_csv(join(OUTPUT_DIR, ZIP_HRR_OUT_FILENAME), dtype={"zip": str, "hrr": str})
fz_df = fz_df.merge(zh_df, on="zip", how="left").drop(columns="zip").groupby(["fips", "hrr"]).sum().reset_index()
fz_df.sort_values(["fips", "hrr"]).to_csv(join(OUTPUT_DIR, FIPS_HRR_OUT_FILENAME), index=False)
def derive_fips_state_crosswalk():
"""Derive a crosswalk between FIPS county codes and state information (number, abbreviation, name)."""
fips_pop = pd.read_csv(join(OUTPUT_DIR, FIPS_POPULATION_OUT_FILENAME), dtype={"fips": str, "pop": int})
megafips = pd.DataFrame({"fips": [fips + "000" for fips in fips_pop.fips.str[:2].unique()], "pop": np.nan})
fips_pop = pd.concat([fips_pop, megafips])
state_codes = pd.read_csv(join(OUTPUT_DIR, STATE_OUT_FILENAME), dtype={"state_code": str, "state_id": str, "state_name": str})
fips_pop["state_code"] = fips_pop["fips"].str[:2]
fips_pop = fips_pop.merge(state_codes, on="state_code", how="left").drop(columns="pop")
fips_pop.sort_values(["fips", "state_code"]).to_csv(join(OUTPUT_DIR, FIPS_STATE_OUT_FILENAME), index=False)
def derive_zip_msa_crosswalk():
"""Derive a crosswalk file from ZIP to MSA through ZIP -> FIPS -> HRR."""
if not isfile(join(OUTPUT_DIR, ZIP_FIPS_OUT_FILENAME)):
create_fips_zip_crosswalk()
if not isfile(join(OUTPUT_DIR, FIPS_MSA_OUT_FILENAME)):
create_fips_msa_crosswalk()
zf_df = pd.read_csv(join(OUTPUT_DIR, ZIP_FIPS_OUT_FILENAME), dtype={"zip": str, "fips": str, "weight": float})
fm_df = pd.read_csv(join(OUTPUT_DIR, FIPS_MSA_OUT_FILENAME), dtype={"fips": str, "msa": str})
zf_df = zf_df.merge(fm_df, on="fips").drop(columns="fips").groupby(["msa", "zip"]).sum().reset_index()
zf_df.sort_values(["zip", "msa"]).to_csv(join(OUTPUT_DIR, ZIP_MSA_OUT_FILENAME), index=False)
def derive_zip_to_state_code():
"""Derive a crosswalk between ZIP codes and state information (number, abbreviation, name)."""
if not isfile(join(OUTPUT_DIR, STATE_OUT_FILENAME)):
create_state_codes_crosswalk()
if not isfile(join(OUTPUT_DIR, ZIP_FIPS_OUT_FILENAME)):
create_fips_zip_crosswalk()
sdf = pd.read_csv(join(OUTPUT_DIR, STATE_OUT_FILENAME), dtype={"state_code": str, "state_id": str, "state_name": str})
zf_cf = pd.read_csv(join(OUTPUT_DIR, ZIP_FIPS_OUT_FILENAME), dtype={"zip": str, "fips": str})
zf_cf["state_code"] = zf_cf["fips"].str[:2]
zf_cf = zf_cf.merge(sdf, left_on="state_code", right_on="state_code", how="left").drop(columns=["fips"])
zf_cf.sort_values(["zip", "state_code"]).to_csv(join(OUTPUT_DIR, ZIP_STATE_CODE_OUT_FILENAME), index=False)
def derive_fips_hhs_crosswalk():
"""Derive a crosswalk between FIPS county codes and HHS regions."""
if not isfile(join(OUTPUT_DIR, STATE_HHS_OUT_FILENAME)):
create_state_hhs_crosswalk()
if not isfile(join(OUTPUT_DIR, FIPS_POPULATION_OUT_FILENAME)):
create_fips_population_table()
fips_pop = pd.read_csv(join(OUTPUT_DIR, FIPS_POPULATION_OUT_FILENAME), dtype={"fips": str, "pop": int})
megafips = pd.DataFrame({"fips": [fips + "000" for fips in fips_pop.fips.str[:2].unique()], "pop": np.nan})
fips_pop = pd.concat([fips_pop, megafips])
state_hhs = pd.read_csv(join(OUTPUT_DIR, STATE_HHS_OUT_FILENAME), dtype={"state_code": str, "hhs": str})
fips_pop["state_code"] = fips_pop["fips"].str[:2]
fips_pop = fips_pop.merge(state_hhs, on="state_code", how="left").drop(columns=["state_code", "pop"])
fips_pop.sort_values(["fips", "hhs"]).to_csv(join(OUTPUT_DIR, FIPS_HHS_FILENAME), index=False)
def derive_zip_hhs_crosswalk():
"""Derive a crosswalk between zip code and HHS regions."""
if not isfile(join(OUTPUT_DIR, STATE_HHS_OUT_FILENAME)):
create_state_hhs_crosswalk()
if not isfile(join(OUTPUT_DIR, ZIP_STATE_CODE_OUT_FILENAME)):
derive_zip_to_state_code()
zip_state = pd.read_csv(join(OUTPUT_DIR, ZIP_STATE_CODE_OUT_FILENAME), dtype={"zip": str, "pop": int, "state_code": str})
state_hhs = pd.read_csv(join(OUTPUT_DIR, STATE_HHS_OUT_FILENAME), dtype={"state_code": str, "hhs": str})
zip_state = zip_state.merge(state_hhs, on="state_code", how="left").drop(columns=["state_code", "state_id", "state_name"])
zip_state.sort_values(["zip", "hhs"]).to_csv(join(OUTPUT_DIR, ZIP_HHS_FILENAME), index=False)
def clear_dir(dir_path: str):
for fname in listdir(dir_path):
remove(join(dir_path, fname))
if __name__ == "__main__":
clear_dir(OUTPUT_DIR)
create_fips_zip_crosswalk()
create_zip_hsa_hrr_crosswalk()
create_fips_msa_crosswalk()
create_jhu_uid_fips_crosswalk()
create_state_codes_crosswalk()
create_state_hhs_crosswalk()
create_fips_population_table()
create_nation_population_table()
create_state_population_table()
create_hhs_population_table()
derive_fips_hrr_crosswalk()
derive_zip_msa_crosswalk()
derive_zip_to_state_code()
derive_fips_state_crosswalk()
derive_zip_population_table()
derive_fips_hhs_crosswalk()
derive_zip_hhs_crosswalk()
| 47.171779 | 229 | 0.67512 | 3,363 | 23,067 | 4.34612 | 0.135593 | 0.054187 | 0.050698 | 0.042693 | 0.467022 | 0.406541 | 0.335591 | 0.27942 | 0.235906 | 0.215791 | 0 | 0.0372 | 0.173755 | 23,067 | 488 | 230 | 47.268443 | 0.729682 | 0.13864 | 0 | 0.190332 | 0 | 0.015106 | 0.177094 | 0.010343 | 0 | 0 | 0 | 0 | 0 | 1 | 0.054381 | false | 0 | 0.024169 | 0 | 0.07855 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
8ca4822b6ac6957525984fded9a7828d286dadf6 | 7,083 | py | Python | nebula_bench/stress.py | TangYuFe/nebula-bench | 0295d1bbc8c70e571f0759b132aa3cffc1030b2c | [
"Apache-2.0"
] | null | null | null | nebula_bench/stress.py | TangYuFe/nebula-bench | 0295d1bbc8c70e571f0759b132aa3cffc1030b2c | [
"Apache-2.0"
] | null | null | null | nebula_bench/stress.py | TangYuFe/nebula-bench | 0295d1bbc8c70e571f0759b132aa3cffc1030b2c | [
"Apache-2.0"
] | null | null | null | # -*- encoding: utf-8 -*-
import sys
import inspect
import copy
from pathlib import Path
import click
from nebula_bench.utils import load_class, jinja_dump, run_process
from nebula_bench.common.base import BaseScenario
from nebula_bench.utils import logger
from nebula_bench import setting
def load_scenarios(scenarios):
if scenarios.strip().upper() == "ALL":
r = load_class("nebula_bench.scenarios", True, BaseScenario)
else:
r = load_class("nebula_bench.scenarios", False, BaseScenario, scenarios)
r = [x for x in r if x.abstract == False]
return r
class Stress(object):
DEFAULT_VU = 100
DEFAULT_DURATION = "60s"
def __init__(
self, folder, address, user, password, space, vid_type, scenarios, args, dry_run, **kwargs
):
self.folder = folder or setting.DATA_FOLDER
self.address = address or setting.NEBULA_ADDRESS
self.user = user or setting.NEBULA_USER
self.password = password or setting.NEBULA_PASSWORD
self.space = space or setting.NEBULA_SPACE
self.vid_type = vid_type
self.scenarios = []
self.output_folder = "output"
self.dry_run = dry_run
self.args = args
self.scenarios = load_scenarios(scenarios)
logger.info("total stress test scenarios is {}".format(len(self.scenarios)))
# dump config file
def dump_config(self, scenario):
pass
def run(self):
pass
class StressFactory(object):
type_list = ["K6"]
@classmethod
def gen_stress(
cls,
_type,
folder,
address,
user,
password,
space,
vid_type,
scenarios,
args,
dry_run=None,
**kwargs
):
if _type.upper() not in cls.type_list:
raise Exception("not impletment this test tool, tool is {}".format(_type))
clazz = cls.get_all_stress_class().get("{}Stress".format(_type.upper()), None)
if args is not None:
args = args.strip()
return clazz(
folder, address, user, password, space, vid_type, scenarios, args, dry_run, **kwargs
)
@classmethod
def get_all_stress_class(cls):
r = {}
current_module = sys.modules[__name__]
for name, clazz in inspect.getmembers(current_module):
if inspect.isclass(clazz) and issubclass(clazz, Stress):
r[name] = clazz
return r
class K6Stress(Stress):
def _update_read_config(self, scenario, kwargs):
kwargs["param"] = ",".join(["d[" + str(x) + "]" for x in scenario.csv_index])
return kwargs
def _update_insert_config(self, scenario, kwargs):
kwargs["csv_index"] = ",".join([str(x) for x in scenario.csv_index])
return kwargs
def dump_config(self, scenario):
assert issubclass(scenario, BaseScenario)
name = scenario.name
kwargs = {
"address": self.address,
"user": self.user,
"password": self.password,
"space": self.space,
"csv_path": "{}/{}".format(self.folder, scenario.csv_path),
"output_path": "{}/output_{}.csv".format(self.output_folder, name),
"nGQL": scenario.nGQL,
}
if scenario.is_insert_scenario:
kwargs = self._update_insert_config(scenario, kwargs)
template_file = "k6_config_insert.js.j2"
else:
kwargs = self._update_read_config(scenario, kwargs)
template_file = "k6_config.js.j2"
logger.info(
"begin dump stress config, config file is {}".format(
"{}/{}.js".format(self.output_folder, name)
)
)
jinja_dump(template_file, "{}/{}.js".format(self.output_folder, name), kwargs)
def _get_params(self):
"""
e.g.
args:
"-s 60s:0 -s 40s:30 -v"
return:
{
"-s": ["60s:0", "40s:30"],
"-v": None
}
"""
r = {}
if self.args is None:
return r
key, value = None, None
for item in self.args.split(" "):
if item.startswith("-"):
if key is not None and key not in r:
r[key] = None
key = item
elif item.strip() != "":
value = item
if key not in r:
r[key] = [value]
else:
r[key].append(value)
if key is not None and key not in r:
r[key] = None
return r
def run(self):
logger.info("run stress test in k6")
params = self._get_params()
# cannot use both stage and vu
run_with_stage = "-s" in params or "--stage" in params
vu = self.DEFAULT_VU
duration = self.DEFAULT_DURATION
if "-u" in params:
vu = params.pop("-u")[0]
if "--vus" in params:
vu = params.pop("--vus")[0]
if "-vu" in params:
vu = params.pop("-vu")[0]
if "-d" in params:
duration = params.pop("-d")[0]
if "--duration" in params:
duration = params.pop("--duration")[0]
logger.info("every scenario would run by {} vus and last {}".format(vu, duration))
Path(self.output_folder).mkdir(exist_ok=True)
if "--summary-trend-stats" not in params:
params["--summary-trend-stats"] = ["min,avg,med,max,p(90),p(95),p(99)"]
if setting.INFLUXDB_URL is not None and "--out" not in params and "-o" not in params:
params["--out"] = ["influxdb={}".format(setting.INFLUXDB_URL)]
for scenario in self.scenarios:
_params = copy.copy(params)
self.dump_config(scenario)
if run_with_stage:
command = [
"scripts/k6",
"run",
"{}/{}.js".format(self.output_folder, scenario.name),
]
else:
command = [
"scripts/k6",
"run",
"{}/{}.js".format(self.output_folder, scenario.name),
"-u",
str(vu),
"-d",
str(duration),
]
if "--summary-export" not in _params:
_params["--summary-export"] = [
"{}/result_{}.json".format(self.output_folder, scenario.name)
]
for param, values in _params.items():
if values is None:
command.append(param)
else:
for v in values:
command.append(param)
command.append(v)
click.echo("run command as below:")
click.echo(" ".join([x if "(" not in x else '"{}"'.format(x) for x in command]))
if self.dry_run is not None and self.dry_run:
continue
run_process(command)
| 31.762332 | 98 | 0.529013 | 815 | 7,083 | 4.458896 | 0.204908 | 0.026417 | 0.035223 | 0.036324 | 0.281508 | 0.187947 | 0.143093 | 0.121079 | 0.121079 | 0.121079 | 0 | 0.00872 | 0.352393 | 7,083 | 222 | 99 | 31.905405 | 0.783519 | 0.025837 | 0 | 0.207865 | 0 | 0 | 0.097536 | 0.020681 | 0 | 0 | 0 | 0 | 0.005618 | 1 | 0.061798 | false | 0.039326 | 0.050562 | 0 | 0.185393 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
8ca4cc5cb8eec669db5a292b3196020cf1b16e82 | 24,595 | py | Python | doduo/train_multi.py | doduo-anonymous/doduo-submission | 34d397c14174d64e6a3026d51cc25560a4f1e29f | [
"Apache-2.0"
] | null | null | null | doduo/train_multi.py | doduo-anonymous/doduo-submission | 34d397c14174d64e6a3026d51cc25560a4f1e29f | [
"Apache-2.0"
] | null | null | null | doduo/train_multi.py | doduo-anonymous/doduo-submission | 34d397c14174d64e6a3026d51cc25560a4f1e29f | [
"Apache-2.0"
] | null | null | null | import argparse
import json
import math
import os
import random
from time import time
import numpy as np
import pandas as pd
from sklearn.metrics import f1_score
import torch
from torch.nn import CrossEntropyLoss, BCEWithLogitsLoss
from torch.utils.data import DataLoader, RandomSampler
from transformers import BertTokenizer, BertForSequenceClassification, BertConfig
from transformers import AdamW, get_linear_schedule_with_warmup
from dataset import (
collate_fn,
TURLColTypeColwiseDataset,
TURLColTypeTablewiseDataset,
TURLRelExtColwiseDataset,
TURLRelExtTablewiseDataset,
SatoCVColwiseDataset,
SatoCVTablewiseDataset,
)
from model import BertForMultiOutputClassification, BertMultiPairPooler
from util import f1_score_multilabel
def set_seed(seed):
random.seed(seed)
np.random.seed(seed)
torch.manual_seed(seed)
if __name__ == "__main__":
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
parser = argparse.ArgumentParser()
parser.add_argument(
"--shortcut_name",
default="bert-base-uncased",
type=str,
help="Huggingface model shortcut name ",
)
parser.add_argument(
"--max_length",
default=128,
type=int,
help="The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded.",
)
parser.add_argument(
"--batch_size",
default=32,
type=int,
help="Batch size",
)
parser.add_argument(
"--epoch",
default=30,
type=int,
help="Number of epochs for training",
)
parser.add_argument(
"--random_seed",
default=4649,
type=int,
help="Random seed",
)
parser.add_argument(
"--num_classes",
default=78,
type=int,
help="Number of classes",
)
#TOCHECK
parser.add_argument(
"--multi_gpu",
action="store_true",
default=False,
help="Use multiple GPU"
)
#TODO
parser.add_argument(
"--fp16",
action="store_true",
default=False,
help="Use FP16"
)
parser.add_argument(
"--warmup",
type=float,
default=0.,
help="Warmup ratio")
parser.add_argument(
"--lr",
type=float,
default=5e-5,
help="Learning rate")
parser.add_argument(
"--tasks",
type=str,
nargs="+",
default=["sato0"],
choices=["sato0", "sato1", "sato2", "sato3", "sato4",
"msato0", "msato1", "msato2", "msato3", "msato4",
"turl", "turl-re"],
help="Task names}")
parser.add_argument(
"--colpair",
action="store_true",
help="Use column pair embedding"
)
parser.add_argument(
"--train_ratios",
type=str,
nargs="+",
default=[],
help="e.g., --train_ratios turl=0.8 turl-re=0.1"
)
parser.add_argument(
"--from_scratch",
action="store_true",
help="Training from scratch"
)
parser.add_argument(
"--single_col",
default=False,
action="store_true",
help="Training with single column model"
)
# TODO: Logger handling
args = parser.parse_args()
args.tasks = sorted(args.tasks)
task_num_class_dict = {"sato0": 78,
"sato1": 78,
"sato2": 78,
"sato3": 78,
"sato4": 78,
"msato0": 78,
"msato1": 78,
"msato2": 78,
"msato3": 78,
"msato4": 78,
"turl": 255,
"turl-re": 121}
train_ratio_dict = {}
num_classes_list = []
for task in args.tasks:
num_classes_list.append(task_num_class_dict[task])
# Default training ratio is ALL
train_ratio_dict[task] = 1.0
# Training ratio
for train_ratio in args.train_ratios:
task, ratio_str = train_ratio.split("=")
ratio = float(ratio_str)
assert task in train_ratio_dict, "Invalid task name: {}".format(task)
assert 0 < ratio <= 1
train_ratio_dict[task] = ratio
# For tagname
train_ratio_str_list = []
for task in sorted(train_ratio_dict.keys()):
ratio = train_ratio_dict[task]
train_ratio_str_list.append("{}-{:.2f}".format(task, ratio))
if args.colpair:
assert "turl-re" in args.tasks, "colpair can be only used for Relation Extraction"
print("args={}".format(json.dumps(vars(args))))
max_length = args.max_length
batch_size = args.batch_size
num_train_epochs = args.epoch
shortcut_name = args.shortcut_name
if args.single_col:
# Single column
tag_name_col = "single"
else:
tag_name_col = "mosato"
if args.colpair:
taskname = "{}-colpair".format("".join(args.tasks))
else:
taskname = "".join(args.tasks)
if args.from_scratch:
tag_name = "model/{}_{}_bert_{}-bs{}-ml-{}".format(taskname,
tag_name_col,
"{}-fromscratch".format(shortcut_name),
batch_size,
max_length)
else:
tag_name = "model/{}_{}_bert_{}-bs{}-ml-{}".format(taskname,
tag_name_col,
shortcut_name,
batch_size,
max_length)
# TODO: Check
tag_name += "__{}".format("_".join(train_ratio_str_list))
print(tag_name)
dirpath = os.path.dirname(tag_name)
if not os.path.exists(dirpath):
print("{} not exists. Created".format(dirpath))
os.makedirs(dirpath)
tokenizer = BertTokenizer.from_pretrained(shortcut_name)
# model = BertForSequenceClassification.from_pretrained(
models = []
for i, num_classes in enumerate(num_classes_list):
if args.single_col:
model_config = BertConfig.from_pretrained(shortcut_name,
num_labels=num_classes)
model = BertForSequenceClassification(model_config)
else:
if args.from_scratch:
# No pre-trained checkpoint
model_config = BertConfig.from_pretrained(shortcut_name, num_labels=num_classes)
model = BertForMultiOutputClassification(model_config)
else:
# Pre-trained checkpoint
model = BertForMultiOutputClassification.from_pretrained(
shortcut_name,
num_labels=num_classes,
output_attentions=False,
output_hidden_states=False,
)
if args.tasks[i] == "turl-re" and args.colpair:
print("Use column-pair pooling")
# Use column pair embeddings
config = BertConfig.from_pretrained(shortcut_name)
model.bert.pooler = BertMultiPairPooler(config).to(device)
# For multi-task learning
if i > 0:
assert not args.single_col, "TODO: Single-column model for multi-task learning"
# The multi-task model shares embeddings & encoder part, not sharing the pooling layer
model.bert.embeddings = models[0].bert.embeddings
model.bert.encoder = models[0].bert.encoder
# [Option] The following also shares the pooling layer
# model.bert = models[0].bert
models.append(model.to(device))
# Check if the parameters are shared
assert 1 == len(set([model.bert.embeddings.word_embeddings.weight.data_ptr() for model in models]))
assert 1 == len(set([model.bert.encoder.layer[0].attention.output.dense.weight.data_ptr() for model in models]))
assert len(models) == len(set([model.bert.pooler.dense.weight.data_ptr() for model in models]))
train_datasets = []
train_dataloaders = []
valid_datasets = []
valid_dataloaders = []
for task in args.tasks:
train_ratio = train_ratio_dict[task]
if task in ["sato0", "sato1", "sato2", "sato3", "sato4",
"msato0", "msato1", "msato2", "msato3", "msato4"]:
cv = int(task[-1])
if task[0] == "m":
multicol_only = True
else:
multicol_only = False
if args.single_col:
dataset_cls = SatoCVColwiseDataset
else:
dataset_cls = SatoCVTablewiseDataset
train_dataset = dataset_cls(cv=cv,
split="train",
tokenizer=tokenizer,
max_length=max_length,
multicol_only=multicol_only,
train_ratio=train_ratio,
device=device)
valid_dataset = dataset_cls(cv=cv,
split="train",
tokenizer=tokenizer,
max_length=max_length,
multicol_only=multicol_only,
train_ratio=train_ratio,
device=device)
# TODO: Can be unified
train_sampler = RandomSampler(train_dataset)
train_dataloader = DataLoader(train_dataset,
sampler=train_sampler,
batch_size=batch_size,
collate_fn=collate_fn)
valid_dataloader = DataLoader(valid_dataset,
batch_size=batch_size,
collate_fn=collate_fn)
elif "turl" in task:
if task in ["turl"]:
# TODO: Double-check if it is compatible with single/multi-column data
filepath = "data/table_col_type_serialized.pkl"
if args.single_col:
assert task == "turl" # Single-column model cannot be used for turl-sch
# ColumnWise
dataset_cls = TURLColTypeColwiseDataset
else:
# Tablewise
dataset_cls = TURLColTypeTablewiseDataset
elif task in ["turl-re"]:
# TODO: Double-check if it is compatible with single/multi-column data
filepath = "data/table_rel_extraction_serialized.pkl"
if args.single_col:
assert task == "turl-re" # Single-column model cannot be used for turl-sch
dataset_cls = TURLRelExtColwiseDataset
else:
dataset_cls = TURLRelExtTablewiseDataset
else:
raise ValueError("turl tasks must be turl or turl-re.")
train_dataset = dataset_cls(filepath=filepath,
split="train",
tokenizer=tokenizer,
max_length=max_length,
multicol_only=False,
train_ratio=train_ratio,
device=device)
valid_dataset = dataset_cls(filepath=filepath,
split="dev",
tokenizer=tokenizer,
max_length=max_length,
multicol_only=False,
device=device)
# Can be the same
train_sampler = RandomSampler(train_dataset)
train_dataloader = DataLoader(train_dataset,
sampler=train_sampler,
batch_size=batch_size,
collate_fn=collate_fn)
valid_dataloader = DataLoader(valid_dataset,
batch_size=batch_size,
collate_fn=collate_fn)
else:
raise ValueError("task name must be either sato or turl.")
# Store dataloaders
train_datasets.append(train_dataset)
train_dataloaders.append(train_dataloader)
valid_datasets.append(valid_dataset)
valid_dataloaders.append(valid_dataloader)
optimizers = []
schedulers = []
loss_fns = []
for i, train_dataloader in enumerate(train_dataloaders):
t_total = len(train_dataloader) * num_train_epochs
no_decay = ["bias", "LayerNorm.weight"]
optimizer_grouped_parameters = [
{
"params": [p for n, p in models[i].named_parameters() if not any(nd in n for nd in no_decay)],
"weight_decay": 0.0
},
{
"params": [p for n, p in models[i].named_parameters() if any(nd in n for nd in no_decay)],
"weight_decay": 0.0},
]
optimizer = AdamW(optimizer_grouped_parameters,
lr=5e-5,
eps=1e-8)
scheduler = get_linear_schedule_with_warmup(
optimizer, num_warmup_steps=0, num_training_steps=t_total
)
optimizers.append(optimizer)
schedulers.append(scheduler)
if "sato" in args.tasks[i]:
loss_fns.append(CrossEntropyLoss())
elif "turl" in args.tasks[i]:
loss_fns.append(BCEWithLogitsLoss())
else:
raise ValueError("task name must be either sato or turl.")
set_seed(args.random_seed)
# Best validation score could be zero
best_vl_micro_f1s = [-1 for _ in range(len(args.tasks))]
best_vl_macro_f1s = [-1 for _ in range(len(args.tasks))]
loss_info_lists = [[] for _ in range(len(args.tasks))]
for epoch in range(num_train_epochs):
for k, (task, model,
train_dataset, valid_dataset,
train_dataloader, valid_dataloader,
optimizer, scheduler, loss_fn,
loss_info_list) in enumerate(zip(args.tasks, models,
train_datasets, valid_datasets,
train_dataloaders, valid_dataloaders,
optimizers, schedulers, loss_fns,
loss_info_lists)):
t1 = time()
model.train()
tr_loss = 0.
tr_pred_list = []
tr_true_list = []
vl_loss = 0.
vl_pred_list = []
vl_true_list = []
for batch_idx, batch in enumerate(train_dataloader):
if args.single_col:
logits = model(batch["data"].T).logits
if "sato" in task:
tr_pred_list += logits.argmax(1).cpu().detach().numpy().tolist()
tr_true_list += batch["label"].cpu().detach().numpy().tolist()
loss = loss_fn(logits, batch["label"])
elif "turl" in task:
# TURL & TURL-REL for the single-col case
tr_pred_list += (logits >= math.log(0.5)).int().detach().cpu().tolist()
tr_true_list += batch["label"].cpu().detach().numpy().tolist()
loss = loss_fn(logits, batch["label"].float())
else:
raise ValueError("Invalid task for single-col: {}".format(task))
else:
# Multi-column model
logits, = model(batch["data"].T) # (row, col) is opposite?
# Align the tensor shape when the size is 1
if len(logits.shape) == 2:
logits = logits.unsqueeze(0)
# DEBUG===
# print("batch['data'].shape={} data['label'].shape={} batch['idx'].shape={}".format(
# batch["data"].shape, batch["label"].shape, batch["idx"].shape))
# ===
cls_indexes = torch.nonzero(batch["data"].T == tokenizer.cls_token_id)
filtered_logits = torch.zeros(cls_indexes.shape[0],
logits.shape[2]).to(device)
for n in range(cls_indexes.shape[0]):
i, j = cls_indexes[n]
logit_n = logits[i, j, :]
filtered_logits[n] = logit_n
if "sato" in task:
tr_pred_list += filtered_logits.argmax(1).cpu().detach().numpy().tolist()
tr_true_list += batch["label"].cpu().detach().numpy().tolist()
elif "turl" in task:
if task == "turl-re":
all_preds = (filtered_logits >= math.log(0.5)).int().detach().cpu().numpy()
all_labels = batch["label"].cpu().detach().numpy()
# Ignore the very first CLS token
idxes = np.where(all_labels > 0)[0]
tr_pred_list += all_preds[idxes, :].tolist()
tr_true_list += all_labels[idxes, :].tolist()
elif task == "turl":
# Threshold value = 0.5
tr_pred_list += (filtered_logits >= math.log(0.5)).int().detach().cpu().tolist()
tr_true_list += batch["label"].cpu().detach().numpy().tolist()
if "sato" in task:
loss = loss_fn(filtered_logits, batch["label"])
elif "turl" in task:
loss = loss_fn(filtered_logits, batch["label"].float())
loss.backward()
tr_loss += loss.item()
optimizer.step()
scheduler.step()
model.zero_grad()
tr_loss /= (len(train_dataset) / batch_size)
if "sato" in task:
tr_micro_f1 = f1_score(tr_true_list, tr_pred_list, average="micro")
tr_macro_f1 = f1_score(tr_true_list, tr_pred_list, average="macro")
tr_class_f1 = f1_score(tr_true_list, tr_pred_list, average=None, labels=np.arange(args.num_classes))
elif "turl" in task:
tr_micro_f1, tr_macro_f1, tr_class_f1, _ = f1_score_multilabel(tr_true_list, tr_pred_list)
# Validation
model.eval()
for batch_idx, batch in enumerate(valid_dataloader):
if args.single_col:
# Single-column
logits = model(batch["data"].T).logits
if "sato" in task:
vl_pred_list += logits.argmax(1).cpu().detach().numpy().tolist()
vl_true_list += batch["label"].cpu().detach().numpy().tolist()
loss = loss_fn(logits, batch["label"])
elif "turl" in task:
tr_pred_list += (logits >= math.log(0.5)).int().detach().cpu().tolist()
tr_true_list += batch["label"].cpu().detach().numpy().tolist()
loss = loss_fn(logits, batch["label"].float())
else:
raise ValueError("Invalid task for single-col: {}".format(task))
else:
# Multi-column
logits, = model(batch["data"].T)
if len(logits.shape) == 2:
logits = logits.unsqueeze(0)
cls_indexes = torch.nonzero(batch["data"].T == tokenizer.cls_token_id)
filtered_logits = torch.zeros(cls_indexes.shape[0],
logits.shape[2]).to(device)
for n in range(cls_indexes.shape[0]):
i, j = cls_indexes[n]
logit_n = logits[i, j, :]
filtered_logits[n] = logit_n
if "sato" in task:
vl_pred_list += filtered_logits.argmax(1).cpu().detach().numpy().tolist()
vl_true_list += batch["label"].cpu().detach().numpy().tolist()
elif "turl" in task:
if task == "turl-re":
all_preds = (filtered_logits >= math.log(0.5)).int().detach().cpu().numpy()
all_labels = batch["label"].cpu().detach().numpy()
idxes = np.where(all_labels > 0)[0]
vl_pred_list += all_preds[idxes, :].tolist()
vl_true_list += all_labels[idxes, :].tolist()
elif task == "turl":
# Threshold value = 0.5
vl_pred_list += (filtered_logits >= math.log(0.5)).int().detach().cpu().tolist()
vl_true_list += batch["label"].cpu().detach().numpy().tolist()
if "sato" in task:
loss = loss_fn(filtered_logits, batch["label"])
elif "turl" in task:
loss = loss_fn(filtered_logits, batch["label"].float())
vl_loss += loss.item()
vl_loss /= (len(valid_dataset) / batch_size)
if "sato" in task:
vl_micro_f1 = f1_score(vl_true_list, vl_pred_list, average="micro")
vl_macro_f1 = f1_score(vl_true_list, vl_pred_list, average="macro")
vl_class_f1 = f1_score(vl_true_list, vl_pred_list, average=None,
labels=np.arange(args.num_classes))
elif "turl" in task:
vl_micro_f1, vl_macro_f1, vl_class_f1, _ = f1_score_multilabel(vl_true_list, vl_pred_list)
if vl_micro_f1 > best_vl_micro_f1s[k]:
best_vl_micro_f1s[k] = vl_micro_f1
if len(args.tasks) >= 2:
model_savepath = "{}={}_best_micro_f1.pt".format(tag_name, task)
else:
model_savepath = "{}_best_micro_f1.pt".format(tag_name)
torch.save(model.state_dict(),
model_savepath)
if vl_macro_f1 > best_vl_macro_f1s[k]:
best_vl_macro_f1s[k] = vl_macro_f1
if len(args.tasks) >= 2:
model_savepath = "{}={}_best_macro_f1.pt".format(tag_name, task)
else:
model_savepath = "{}_best_macro_f1.pt".format(tag_name)
torch.save(model.state_dict(),
model_savepath)
loss_info_list.append([tr_loss, tr_macro_f1, tr_micro_f1,
vl_loss, vl_macro_f1, vl_micro_f1])
t2 = time()
print("Epoch {} ({}): tr_loss={:.7f} tr_macro_f1={:.4f} tr_micro_f1={:.4f} ".format(
epoch, task, tr_loss, tr_macro_f1, tr_micro_f1),
"vl_loss={:.7f} vl_macro_f1={:.4f} vl_micro_f1={:.4f} ({:.2f} sec.)".format(
vl_loss, vl_macro_f1, vl_micro_f1, (t2 - t1)))
for task, loss_info_list in zip(args.tasks, loss_info_lists):
loss_info_df = pd.DataFrame(loss_info_list,
columns=["tr_loss", "tr_f1_macro_f1", "tr_f1_micro_f1",
"vl_loss", "vl_f1_macro_f1", "vl_f1_micro_f1"])
if len(args.tasks) >= 2:
loss_info_df.to_csv("{}={}_loss_info.csv".format(tag_name, task))
else:
loss_info_df.to_csv("{}_loss_info.csv".format(tag_name))
| 42.405172 | 116 | 0.502582 | 2,510 | 24,595 | 4.686056 | 0.142231 | 0.013603 | 0.02168 | 0.020405 | 0.516664 | 0.462761 | 0.423737 | 0.402057 | 0.384543 | 0.346455 | 0 | 0.014335 | 0.395853 | 24,595 | 579 | 117 | 42.478411 | 0.777239 | 0.049644 | 0 | 0.39876 | 0 | 0 | 0.087558 | 0.007629 | 0 | 0 | 0 | 0.001727 | 0.018595 | 1 | 0.002066 | false | 0 | 0.035124 | 0 | 0.03719 | 0.010331 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
8ca738b5c8b2ec02b392772396a2487050f3fca1 | 20,016 | py | Python | dfirtrack_main/importer/file/csv.py | 0xflotus/dfirtrack | 632ebe582c2b40a4ac4b9fb12b7a118c2c49ede5 | [
"MIT"
] | 4 | 2020-03-06T17:37:09.000Z | 2020-03-17T07:50:55.000Z | dfirtrack_main/importer/file/csv.py | 0xflotus/dfirtrack | 632ebe582c2b40a4ac4b9fb12b7a118c2c49ede5 | [
"MIT"
] | null | null | null | dfirtrack_main/importer/file/csv.py | 0xflotus/dfirtrack | 632ebe582c2b40a4ac4b9fb12b7a118c2c49ede5 | [
"MIT"
] | 1 | 2020-03-06T20:54:52.000Z | 2020-03-06T20:54:52.000Z | import csv
from django.contrib import messages
from django.contrib.auth.decorators import login_required
from django.shortcuts import redirect, render
from django.utils import timezone
from dfirtrack.config import SYSTEMTAG_HEADLINE as systemtag_headline
from dfirtrack.config import SYSTEMTAG_SUBHEADLINE as systemtag_subheadline
from dfirtrack.config import TAGLIST
from dfirtrack.config import TAGPREFIX
from dfirtrack_main.forms import SystemIpFileImport, SystemTagFileImport
from dfirtrack_main.logger.default_logger import critical_logger, debug_logger, error_logger, warning_logger
from dfirtrack_main.models import Domain, Headline, Ip, Reportitem, System, Systemstatus, Tag, Tagcolor
import ipaddress
from io import TextIOWrapper
@login_required(login_url="/login")
def systems_ips(request):
""" this function parses a csv file and tries to import systems and corresponding ips """
# form was valid to post
if request.method == "POST":
# call logger
debug_logger(str(request.user), " SYSTEM_IP_IMPORTER_BEGIN")
# get text out of file (variable results from request object via file upload field)
systemipcsv = TextIOWrapper(request.FILES['systemipcsv'].file, encoding=request.encoding)
# read rows out of csv
rows = csv.reader(systemipcsv, quotechar="'")
# set row counter (needed for logger)
i = 0
# check for wrong file type
try:
# iterate over rows
for row in rows:
# autoincrement row counter
i += 1
# check for empty rows
try:
# check system column for empty value
if row[0] == '':
warning_logger(str(request.user), " SYSTEM_IP_IMPORTER_SYSTEM_COLUMN " + "row_" + str(i) + ":empty_column")
continue
except IndexError:
warning_logger(str(request.user), " SYSTEM_IP_IMPORTER_ROW row_" + str(i) + ":empty_row")
continue
# check system column for string
if not isinstance(row[0], str):
warning_logger(str(request.user), " SYSTEM_IP_IMPORTER_SYSTEM_COLUMN " + "row_" + str(i) + ":no_string")
continue
# check system column for length of string
if len(row[0]) > 50:
warning_logger(str(request.user), " SYSTEM_IP_IMPORTER_SYSTEM_COLUMN " + "row_" + str(i) + ":long_string")
continue
# check ip column for ip
try:
ipaddress.ip_address(row[1])
except ValueError:
warning_logger(str(request.user), " SYSTEM_IP_IMPORTER_IP_COLUMN " + "row_" + str(i) + ":invalid_ip")
continue
# create ip
ip, created = Ip.objects.get_or_create(ip_ip=row[1])
if created == True:
ip.logger(str(request.user), " SYSTEMS_IP_IMPORTER_IP_CREATED")
# check for existence of system
system = System.objects.filter(system_name = row[0], ip = ip)
if system.count() > 0:
error_logger(str(request.user), " SYSTEM_IP_IMPORTER_SYSTEM_EXISTS " + "row_" + str(i) + ":system_exists|system_name:" + row[0] + "|ip:" + str(row[1]))
continue
# create form with request data
form = SystemIpFileImport(request.POST, request.FILES)
# create system
if form.is_valid():
# don't save form yet
system = form.save(commit=False)
# set system_name
system.system_name = row[0]
# set auto values
system.system_created_by_user_id = request.user
system.system_modified_by_user_id = request.user
system.system_modify_time = timezone.now()
# save object
system.save()
# save manytomany
form.save_m2m()
# save ip for system
system.ip.add(ip)
# call logger
system.logger(str(request.user), ' SYSTEM_IP_IMPORTER_EXECUTED')
# wrong file type
except UnicodeDecodeError:
critical_logger(str(request.user), " SYSTEM_IP_IMPORTER_WRONG_FILE_TYPE")
# call logger
debug_logger(str(request.user), " SYSTEM_IP_IMPORTER_END")
return redirect('/systems')
else:
# show empty form
form = SystemIpFileImport(initial={
'systemstatus': 2,
'analysisstatus': 1,
})
# call logger
debug_logger(str(request.user), " SYSTEM_IP_IMPORTER_ENTERED")
return render(request, 'dfirtrack_main/system/systems_ip_importer.html', {'form': form})
@login_required(login_url="/login")
def systems_tags(request):
""" this function imports a csv file with multiple systems and relevant tags """
"""
the following high-level workflow is done by this function
- remove all tags for systems beginning with 'TAGPREFIX' (if there are any)
- evaluate given CSV line by line (without first row)
- check whether this line has relevant tags (leave loop if not)
- get hostname and convert to lowercase
- get domain and change to empty string if incorrect (either 'NT AUTHORITY' or hostname itself)
- create domain if necessary
- check for existing systems (with this hostname)
- if == 1:
- check for existing domain (for this system)
if domain_of_system == NULL: domain is set to domain from CSV (if there is one)
- if > 1: leave loop because not distinct
- if == 0: create system
- add relevant tags to this system
- check for reportitem headline = SYSTEMTAG_HEADLINE, reportitem_subheadline = SYSTEMTAG_SUBHEADLINE and create if necessary
- fill reportitem_note with markdown table containing with information of report(s)
- logs and messages are written if applicable
- counters are incremented where necessary
"""
# form was valid to post
if request.method == "POST":
# call logger
debug_logger(str(request.user), " SYSTEM_TAG_IMPORTER_BEGIN")
# check TAGLIST (from settings.config) for empty list
if not TAGLIST:
messages.error(request, "No relevant tags defined. Check `TAGLIST` in `dfirtrack.config`!")
# call logger
error_logger(str(request.user), " SYSTEM_TAG_IMPORTER_NO_TAGS_DEFINED.")
return redirect('/systems/')
else:
taglist = TAGLIST
# check TAGPREFIX (from settings.config) for empty string
if TAGPREFIX is "":
messages.error(request, "No prefix string defined. Check `TAGPREFIX` in `dfirtrack.config`!")
# call logger
error_logger(str(request.user), " SYSTEM_TAG_IMPORTER_NO_TAGPREFIX_DEFINED.")
return redirect('/systems/')
# expand the string by an underscore
else:
tagprefix = TAGPREFIX + "_"
# check whether SYSTEMTAG_HEADLINE is defined in `dfirtrack.config`
if systemtag_headline == '':
# call logger
error_logger(str(request.user), " SYSTEMTAG_HEADLINE_VARIABLE_UNDEFINED")
messages.error(request, "The variable SYSTEMTAG_HEADLINE seems to be undefined. Check `dfirtrack.config`!")
# leave importer
return redirect('/systems/')
# check whether SYSTEMTAG_SUBHEADLINE is defined in `dfirtrack.config`
if systemtag_subheadline == '':
# call logger
error_logger(str(request.user), " SYSTEMTAG_SUBHEADLINE_VARIABLE_UNDEFINED")
messages.error(request, "The variable SYSTEMTAG_SUBHEADLINE seems to be undefined. Check `dfirtrack.config`!")
# leave importer
return redirect('/systems/')
# get text out of file (variable results from request object via file upload field)
systemtagcsv = TextIOWrapper(request.FILES['systemtagcsv'].file, encoding=request.encoding)
# read rows out of csv
rows = csv.reader(systemtagcsv)
# create empty list (this list is used to store every line as single dict: {system_name: row}), because if there are multiple rows with the same system they are added to the same reportitem
rowlist = []
""" remove all tags for systems beginning with 'TAGPREFIX' (if there are any) """
# get all systems that have tags beginning with 'TAGPREFIX' | prefixtagsystems -> queryset
prefixtagsystems=System.objects.filter(tag__tag_name__startswith=tagprefix)
# iterate over systems in queryset | prefixtagsystem -> system object
for prefixtagsystem in prefixtagsystems:
# get all tags beginning with 'TAGPREFIX' that belong to the actual system | systemprefixtags -> queryset
systemprefixtags=prefixtagsystem.tag.filter(tag_name__startswith=tagprefix)
# iterate over queryset | systemprefixtag -> tag object
for systemprefixtag in systemprefixtags:
# delete all existing tags (the m2m relationship) beginning with 'TAGPREFIX' for this system (so that removed tags from csv will be removed as well)
systemprefixtag.system_set.remove(prefixtagsystem)
# create headline if it does not exist
headline, created = Headline.objects.get_or_create(headline_name=systemtag_headline)
if created == True:
headline.logger(str(request.user), " SYSTEMS_TAG_IMPORTER_HEADLINE_CREATED")
""" remove all reportitems """
# delete reportitems (so no reportitems with legacy information / tags will be left)
Reportitem.objects.filter(headline = headline, reportitem_subheadline = systemtag_subheadline).delete()
""" prepare and start loop """
# set row_counter (needed for logger)
row_counter = 1
# set systems_created_counter (needed for messages)
systems_created_counter = 0
# set systems_skipped_counter (needed for messages)
systems_skipped_counter = 0
# iterate over rows
for row in rows:
# skip first row (headlines)
if row_counter == 1:
# autoincrement row counter
row_counter += 1
continue
# get system_name and change to lowercase
system_name = row[8].lower()
# get tags from csv
tagcsvstring = row[9]
if tagcsvstring == '':
# autoincrement systems_skipped_counter
systems_skipped_counter += 1
# autoincrement row_counter
row_counter += 1
# leave because systems without tags are not relevant
continue
else:
# convert string (at whitespaces) to list
tagcsvlist = tagcsvstring.split()
# create empty list for mapping
tagaddlist = []
# check for relevant tags and add to list
for tag in taglist:
if tag in tagcsvlist:
tagaddlist.append(tagprefix + tag)
# check if tagaddlist is empty
if not tagaddlist:
# autoincrement systems_skipped_counter
systems_skipped_counter += 1
# autoincrement row_counter
row_counter += 1
# leave because there are no relevant tags
continue
# get domain from csv
domain_name = row[7]
# change domain_name to empty string if incorrect domain_name ('NT AUTHORITY') was provided
if domain_name == 'NT AUTHORITY':
domain_name = ''
# clear domain if domain_name equals system_name
elif domain_name.lower() == system_name:
domain_name = ''
# get or create domain object if some valid name was provided
if domain_name != '':
# create domain
domain, created = Domain.objects.get_or_create(domain_name=domain_name)
# call logger if created
if created == True:
domain.logger(str(request.user), " SYSTEMS_TAG_IMPORTER_DOMAIN_CREATED")
messages.success(request, 'Domain "' + domain.domain_name + '" created.')
else:
# set domain to None to avoid further errors (domain is needed later)
domain = None
# create empty dict
rowdict = {}
# put the actual row to the dict (dict with only ONE key-value-pair)
rowdict[system_name] = row
# append dict to the global list (because if there are multiple rows with the same system, needed for reportitem SYSTEMTAG_SUBHEADLINE)
rowlist.append(rowdict)
# get all systems with this system_name
systemquery = System.objects.filter(system_name=system_name)
""" check how many systems were returned """
# if there is only one system
if len(systemquery) == 1:
# get system object
system = System.objects.get(system_name=system_name)
""" add domain from CSV only if system does not already has a domain """
# check whether system has existing domain and CSV submitted a domain
if system.domain is None and domain is not None:
# if system has no existing domain set domain of system to domain submitted by tag csv
system.domain = domain
system.system_modify_time = timezone.now()
system.system_modified_by_user_id = request.user
system.save()
# call logger
system.logger(str(request.user), " SYSTEMS_TAG_IMPORTER_SYSTEM_DOMAIN_ADDED")
# if there is more than one system
elif len(systemquery) > 1:
# call logger
error_logger(str(request.user), " SYSTEM_TAG_IMPORTER_SYSTEM_EXISTS_MULTIPLE_TIMES " + "row_" + str(row_counter) + ":system_exists_multiple_times|system_name:" + system_name)
messages.error(request, 'System "' + system_name + '" was found multiple times. Nothing was changed for this system.')
# autoincrement row_counter
row_counter += 1
# leave because of no distinct mapping
continue
else:
# create entire new system object
system = System()
system.system_name = system_name
system.systemstatus = Systemstatus.objects.get(systemstatus_name = "Unknown")
#system.analysisstatus = Analysisstatus.objects.get(analysisstatus_name = "Needs anaylsis")
# add domain if submitted
if domain is not None:
system.domain = domain
system.system_modify_time = timezone.now()
system.system_created_by_user_id = request.user
system.system_modified_by_user_id = request.user
system.save()
# autoincrement systems_created_counter
systems_created_counter += 1
# call logger
system.logger(str(request.user), " SYSTEMS_TAG_IMPORTER_SYSTEM_CREATED")
# iterate over tags in tagaddlist
for tag_name in tagaddlist:
# get tagcolor object
tagcolor = Tagcolor.objects.get(tagcolor_name='primary')
# create tag if needed
tag, created = Tag.objects.get_or_create(tag_name=tag_name, tagcolor=tagcolor)
# call logger if created
if created == True:
tag.logger(str(request.user), " SYSTEMS_TAG_IMPORTER_TAG_CREATED")
messages.success(request, 'Tag "' + tag.tag_name + '" created.')
# add tag to system
tag.system_set.add(system)
# call logger
system.logger(str(request.user), " SYSTEMS_TAG_IMPORTER_SYSTEM_MODIFIED")
# create reportitem if it does not exist (get_or_create won't work in this context because of needed user objects for saving)
try:
reportitem = Reportitem.objects.get(system = system, headline = headline, reportitem_subheadline = systemtag_subheadline)
except Reportitem.DoesNotExist:
reportitem = Reportitem()
reportitem.system = system
reportitem.headline = headline
reportitem.reportitem_subheadline = (systemtag_subheadline)
reportitem.reportitem_created_by_user_id = request.user
# create empty list (used to store elements of markdown table)
notelist = []
# put head of markdown table into list
notelist.append("|File|Type|Version|Started|Duration|Lines|Checked|Domain|Host|Tags|Errors|FirstTrace|LastToolUsage|UsageTime|MalwareInstall")
notelist.append("|:---|:---|:---|:---|:---|:---|:---|:---|:---|:---|:---|:---|:---|:---|:---|")
# iterate over entries in list (dictionaries)
for item in rowlist:
# if this single key-value-pair dict contains the system
if system_name in item:
# get row
entry = item[system_name]
# convert row
entry = "|" + "|".join(entry) + "|"
# fill empty fields with '---' (otherwise mkdocs skips these)
entry = entry.replace("||", "| --- |")
# repeat last step to catch empty fields lying next to each other
entry = entry.replace("||", "| --- |")
# put entry to markdown table
notelist.append(entry)
# join list to string with linebreaks
notestring = "\n".join(notelist)
# add changing values (existing reportitem_note will be overwritten)
reportitem.reportitem_note = notestring
reportitem.reportitem_modified_by_user_id = request.user
reportitem.save()
# call logger
reportitem.logger(str(request.user), " SYSTEMS_TAG_IMPORTER_REPORTITEM_CREATED_OR_MODIFIED")
# autoincrement row_counter
row_counter += 1
# call final messages
if systems_created_counter > 0:
if systems_created_counter == 1:
messages.success(request, str(systems_created_counter) + ' system was created.')
else:
messages.success(request, str(systems_created_counter) + ' systems were created.')
if systems_skipped_counter > 0:
if systems_skipped_counter == 1:
messages.warning(request, str(systems_skipped_counter) + ' system was skipped or cleaned (no relevant tags).')
else:
messages.warning(request, str(systems_skipped_counter) + ' systems were skipped or cleaned (no relevant tags).')
# call logger
debug_logger(str(request.user), " SYSTEM_TAG_IMPORTER_END")
return redirect('/systems/')
else:
# show empty form
form = SystemTagFileImport()
# call logger
debug_logger(str(request.user), " SYSTEM_TAG_IMPORTER_ENTERED")
return render(request, 'dfirtrack_main/system/systems_tag_importer.html', {'form': form})
| 43.798687 | 197 | 0.59942 | 2,168 | 20,016 | 5.37869 | 0.163745 | 0.032073 | 0.037047 | 0.046308 | 0.369351 | 0.324758 | 0.288998 | 0.229483 | 0.193637 | 0.184718 | 0 | 0.003109 | 0.32504 | 20,016 | 456 | 198 | 43.894737 | 0.86003 | 0.230216 | 0 | 0.283105 | 0 | 0.004566 | 0.150924 | 0.091421 | 0.004566 | 0 | 0 | 0 | 0 | 1 | 0.009132 | false | 0 | 0.200913 | 0 | 0.246575 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
8ca7bf3eb29c14e30560d4819d4da4042900f145 | 25,652 | py | Python | cognigraph/gui/source_obj.py | ossadtchi/cognigraph | e616e9fa021720cc62dded649f508500af01853b | [
"MIT"
] | null | null | null | cognigraph/gui/source_obj.py | ossadtchi/cognigraph | e616e9fa021720cc62dded649f508500af01853b | [
"MIT"
] | null | null | null | cognigraph/gui/source_obj.py | ossadtchi/cognigraph | e616e9fa021720cc62dded649f508500af01853b | [
"MIT"
] | null | null | null | """Base class for objects of type source."""
from warnings import warn
import logging
import numpy as np
from scipy.spatial.distance import cdist
from vispy import scene
from vispy.scene import visuals
import vispy.visuals.transforms as vist
# from ._projection import _project_sources_data
# from .roi_obj import RoiObj
from ..utils.vispy_utils import (color2vb, normalize, vispy_array,
wrap_properties, array2colormap)
logger = logging.getLogger(__name__)
PROJ_STR = "%i sources visibles and not masked used for the %s"
class SourceObj():
"""Create a source object.
Parameters
----------
name : string
Name of the source object.
xyz : array_like
Array of positions of shape (n_sources, 2) or (n_sources, 3).
data : array_like | None
Array of weights of shape (n_sources,).
color : array_like/string/tuple | 'red'
Marker's color. Use a string (i.e 'green') to use the same color across
markers or a list of colors of length n_sources to use different colors
for markers.
alpha : float | 1.
Transparency level.
symbol : string | 'disc'
Symbol to use for sources. Allowed style strings are: disc, arrow,
ring, clobber, square, diamond, vbar, hbar, cross, tailed_arrow, x,
triangle_up, triangle_down, and star.
radius_min / radius_max : float | 5.0/10.0
Define the minimum and maximum source's possible radius. By default
if all sources have the same value, the radius will be radius_min.
edge_color : string/list/array_like | 'black'
Edge color of source's markers.
edge_width : float | 0.
Edge width source's markers.
system : {'mni', 'tal'}
Specify if the coodinates are in the MNI space ('mni') or Talairach
('tal').
mask : array_like | None
Array of boolean values to specify masked sources. For example, if data
are p-values, mask could be non-significant sources.
mask_color : array_like/tuple/string | 'gray'
Color to use for masked sources.
text : list | None
Text to attach to each source. For example, text could be the name of
each source.
text_size : float | 2.
Text size attached to sources.
text_color : array_like/string/tuple | 'white'
Text color attached to sources.
text_bold : bool | False
Specify if the text attached to sources should be bold.
text_translate : tuple | (0., 2., 0.)
Translate the text along the (x, y, z) axis.
visible : bool/array_like | True
Specify which source's have to be displayed. If visible is True, all
sources are displayed, False all sources are hiden. Alternatively, use
an array of shape (n_sources,) to select which sources to display.
transform : VisPy.visuals.transforms | None
VisPy transformation to set to the parent node.
parent : VisPy.parent | None
Markers object parent.
verbose : string
Verbosity level.
_z : float | 10.
In case of (n_sources, 2) use _z to specify the elevation.
kw : dict | {}
Optional arguments are used to control the colorbar
(See :class:`ColorbarObj`).
Notes
-----
List of supported shortcuts :
* **s** : save the figure
* **<delete>** : reset camera
Examples
--------
>>> import numpy as np
>>> from visbrain.objects import SourceObj
>>> n_sources = 100
>>> pos = np.random.uniform(-10, 10, (n_sources, 3))
>>> color = ['orange'] * 50 + ['red'] * 50
>>> data = np.random.rand(n_sources)
>>> text = ['s' + str(k) for k in range(n_sources)]
>>> s = SourceObj('test', pos, color=color, data=data, radius_min=10.,
>>> radius_max=20., edge_color='black', edge_width=1.,
>>> text=text, text_size=10.)
>>> s.preview(axis=True)
"""
###########################################################################
###########################################################################
# BUILT IN
###########################################################################
###########################################################################
def __init__(self, name, xyz, data=None, color='red', alpha=1.,
symbol='disc', radius_min=5., radius_max=10., edge_width=0.,
edge_color='black', system='mni', mask=None,
mask_color='gray', text=None, text_size=2.,
text_color='white', text_bold=False,
text_translate=(0., 2., 0.), visible=True, transform=None,
parent=None, verbose=None, _z=-10., **kw):
"""Init."""
# VisbrainObject.__init__(self, name, parent, transform, verbose, **kw)
# _______________________ CHECKING _______________________
# XYZ :
sh = xyz.shape
assert sh[1] in [2, 3]
self._n_sources = sh[0]
pos = xyz if sh[1] == 3 else np.c_[xyz, np.full((len(self),), _z)]
# Radius min and max :
assert all([isinstance(k, (int, float)) for k in (
radius_min, radius_max)])
radius_max = max(radius_min, radius_max)
self._radius_min, self._radius_max = radius_min, radius_max
# Data :
if data is None:
data = np.ones((len(self),))
else:
data = np.asarray(data).ravel()
assert len(data) == len(self)
self._data = vispy_array(data)
# System :
self._xyz = vispy_array(pos)
# Color :
self._color = color
# Edges :
self._edge_color, self._edge_width = edge_color, edge_width
# Mask :
if mask is None:
mask = [False] * len(self)
self._mask = np.asarray(mask).ravel().astype(bool)
assert len(self._mask) == len(self)
self._mask_color = color2vb(mask_color)
# Text :
self._text_size = text_size
self._text_color = text_color
self._text_translate = text_translate
# _______________________ MARKERS _______________________
self._sources = visuals.Markers(pos=self._xyz, name='Markers',
edge_color=edge_color,
edge_width=edge_width,
symbol=symbol, parent=None)
# _______________________ TEXT _______________________
tvisible = text is None
self._text = [''] * len(self) if tvisible else text
self._text = np.array(self._text)
assert len(self._text) == len(self)
self._sources_text = visuals.Text(self._text, pos=self._xyz,
bold=text_bold, name='Text',
color=color2vb(text_color),
font_size=text_size,
parent=None)
self._sources_text.visible = not tvisible
tr = vist.STTransform(translate=text_translate)
self._sources_text.transform = tr
# _______________________ UPDATE _______________________
# Radius / color :
self.visible = visible
self._update_radius()
self._update_color()
self.alpha = alpha
def __len__(self):
"""Get the number of sources."""
return self._n_sources
def __bool__(self):
"""Return if all source are visible."""
return np.all(self._visible)
def __iter__(self):
"""Loop over visible xyz coordinates.
At each step, the coordinates are (1, 3) and not (3,).
"""
xyz = self.xyz # get only visible coordinates
for k in range(xyz.shape[0]):
yield xyz[[k], :]
def __add__(self, value):
"""Add two SourceObj instances.
This method return a SourceObj with xyz coodinates and the
source's data but only for visible sources;
"""
assert isinstance(value, SourceObj)
name = self._name + ' + ' + value._name
xyz = np.r_[self._xyz, value._xyz]
data = np.r_[self._data, value._data]
text = np.r_[self._text, value._text]
visible = np.r_[self._visible, value.visible]
return SourceObj(name, xyz, data=data, text=text, visible=visible)
###########################################################################
###########################################################################
# UPDATE
###########################################################################
###########################################################################
def update(self):
"""Update the source object."""
self._sources._vbo.set_data(self._sources._data)
self._sources.update()
self._sources_text.update()
def _update_radius(self):
"""Update marker's radius."""
if np.unique(self._data).size == 1:
radius = self._radius_min * np.ones((len(self,)))
else:
radius = normalize(self._data.copy(), tomin=self._radius_min,
tomax=self._radius_max)
self._sources._data['a_size'] = radius
to_hide = self.hide
# Marker size + egde width = 0 and text='' for hidden sources :
self._sources._data['a_size'][to_hide] = 0.
self._sources._data['a_edgewidth'][to_hide] = 0.
text = np.array(self._text.copy())
text[to_hide] = ''
self._sources_text.text = text
self.update()
def _update_color(self):
"""Update marker's color."""
# Get marker's background color :
if isinstance(self._color, str): # color='white'
bg_color = color2vb(self._color, length=len(self))
elif isinstance(self._color, list): # color=['white', 'green']
assert len(self._color) == len(self)
bg_color = np.squeeze(np.array([color2vb(k) for k in self._color]))
elif isinstance(self._color, np.ndarray): # color = [[0, 0, 0], ...]
csh = self._color.shape
assert (csh[0] == len(self)) and (csh[1] >= 3)
if self._color.shape[1] == 3: # RGB
self._color = np.c_[self._color, np.full(len(self),
self._alpha)]
bg_color = self._color.copy()
# Update masked marker's color :
bg_color[self._mask, :] = self._mask_color
self._sources._data['a_bg_color'] = bg_color
self.update()
def _get_camera(self):
"""Get the most adapted camera."""
d_mean = self._xyz.mean(0)
dist = 1.1 * np.linalg.norm(self._xyz, axis=1).max()
return scene.cameras.TurntableCamera(center=d_mean, scale_factor=dist)
###########################################################################
###########################################################################
# PHYSIO
###########################################################################
###########################################################################
def color_sources(self, analysis=None, color_by=None, data=None,
roi_to_color=None, color_others='black',
hide_others=False, cmap='viridis', clim=None, vmin=None,
vmax=None, under='gray', over='red'):
"""Custom color sources methods.
This method can be used to color sources :
* According to a data vector. In that case, source's colors are
inferred using colormap inputs (i.e cmap, vmin, vmax, clim, under
and over)
* According to ROI analysis (using the `analysis` and `color_by`
input parameters)
Parameters
----------
data : array_like | None
A vector of data with the same length as the number os sources.
The color is inferred from this data vector and can be controlled
using the cmap, clim, vmin, vmax, under and over parameters.
analysis : pandas.DataFrames | None
ROI analysis runned using the analyse_sources method.
color_by : string | None
A column name of the analysis DataFrames. This columns is then used
to identify the color to set to each source inside ROI.
roi_to_color : dict | None
Define custom colors to ROI. For example use {'BA4': 'red',
'BA32': 'blue'} to define custom colors. If roi_to_color is None,
random colors will be used instead.
color_others : array_like/tuple/string | 'black'
Specify how to color sources that are not found using the
roi_to_color dictionary.
hide_others : bool | False
Show or hide sources that are not found using the
roi_to_color dictionary.
"""
if isinstance(data, np.ndarray):
assert len(data) == len(self) and (data.ndim == 1)
logger.info("Color %s using a data vector" % self.name)
kw = self._update_cbar_args(cmap, clim, vmin, vmax, under, over)
colors = array2colormap(data, **kw)
elif (analysis is not None) and (color_by is not None):
# Group analysis :
assert color_by in list(analysis.columns)
logger.info("Color %s according to the %s" % (self.name, color_by))
gp = analysis.groupby(color_by).groups
# Compute color :
if roi_to_color is None: # random color
# Predefined colors and define unique color for each ROI :
colors = np.zeros((len(self), 3), dtype=np.float32)
u_col = np.random.uniform(.1, .8, (len(gp), 3))
u_col = u_col.astype(np.float32)
# Assign color to the ROI :
for k, index in enumerate(gp.values()):
colors[list(index), :] = u_col[k, :]
elif isinstance(roi_to_color, dict): # user defined colors
colors = color2vb(color_others, length=len(self))
keep_visible = np.zeros(len(self), dtype=bool)
for roi_name, roi_col in roi_to_color.items():
if roi_name in list(gp.keys()):
colors[list(gp[roi_name]), :] = color2vb(roi_col)
keep_visible[list(gp[roi_name])] = True
else:
warn("%s not found in the %s column of analysis"
"." % (roi_name, color_by))
if hide_others:
self.visible = keep_visible
else:
raise TypeError("roi_to_color must either be None or a "
"dictionary like {'roi_name': 'red'}.")
self.color = colors
def set_visible_sources(self, select='all', v=None, distance=5.):
"""Select sources that are either inside or outside the mesh.
Parameters
----------
select : {'inside', 'outside', 'close', 'all', 'none', 'left', 'right'}
Custom source selection. Use 'inside' or 'outside' to select
sources respectively inside or outside the volume. Use 'close' to
select sources that are closed to the surface (see the distance
parameter below). Finally, use 'all' (or True), 'none' (or None,
False) to show or hide all of the sources.
v : array_like | None
The vertices of shape (nv, 3) or (nv, 3, 3) if index faced.
distance : float | 5.
Distance between the source and the surface.
"""
select = select.lower() if isinstance(select, str) else select
assert select in ['all', 'inside', 'outside', 'none', 'close', None,
True, False, 'left', 'right']
assert isinstance(distance, (int, float))
xyz = self._xyz
if select in ['inside', 'outside', 'close']:
logger.info("Select sources %s vertices" % select)
if v.ndim == 2: # index faced vertices
v = v[:, np.newaxis, :]
# Predifined inside :
nv, index_faced = v.shape[0], v.shape[1]
v = v.reshape(nv * index_faced, 3)
inside = np.ones((xyz.shape[0],), dtype=bool)
# Loop over ALL oh the sources :
for i in range(len(self)):
# Get the euclidian distance :
eucl = cdist(v, xyz[[i], :])
# Get the closest vertex :
eucl_argmin = eucl.argmin()
# Get distance to zero :
xyz_t0 = np.sqrt((xyz[[i], :] ** 2).sum())
v_t0 = np.sqrt((v[eucl_argmin, :] ** 2).sum())
if select in ['inside', 'outside']:
inside[i] = xyz_t0 <= v_t0
elif select == 'close':
inside[i] = np.abs(xyz_t0 - v_t0) > distance
self.visible = inside if select == 'inside' else np.invert(inside)
elif select in ['all', 'none', None, True, False]:
cond = select in ['all', True]
self.visible = cond
self.visible_obj = cond
msg = 'Display' if cond else 'Hide'
logger.info("%s all sources" % msg)
elif select in ['left', 'right']:
logger.info('Select sources in the %s hemisphere' % select)
vec = xyz[:, 0]
self.visible = vec <= 0 if select == 'left' else vec >= 0
def fit_to_vertices(self, v):
"""Move sources to the closest vertex.
Parameters
----------
v : array_like
The vertices of shape (nv, 3) or (nv, 3, 3) if index faced.
"""
if v.ndim == 2: # index faced vertices
v = v[:, np.newaxis, :]
# Predifined inside :
nv, index_faced = v.shape[0], v.shape[1]
v = v.reshape(nv * index_faced, 3)
new_pos = np.zeros_like(self._xyz)
# Loop over visible and not-masked sources :
for i, k in enumerate(self):
# Get the euclidian distance :
eucl = cdist(v, k)
# Set new coordinate using the closest vertex :
new_pos[i, :] = v[eucl.argmin(), :]
# Finally update data sources and text :
self._sources._data['a_position'] = new_pos
self._sources_text.pos = new_pos
self.update()
###########################################################################
###########################################################################
# PROPERTIES
###########################################################################
###########################################################################
# ----------- XYZ -----------
@property
def xyz(self):
"""Get the visible xyz value."""
return self._xyz[self.visible_and_not_masked]
# ----------- DATA -----------
@property
def data(self):
"""Get the data value."""
return self._data[self.visible_and_not_masked]
@data.setter
@wrap_properties
def data(self, value):
"""Set data value."""
assert isinstance(value, np.ndarray) and len(value) == len(self)
self._data = value
# ----------- TEXT -----------
@property
def text(self):
"""Get the text value."""
return np.array(self._text)[self.visible_and_not_masked]
@text.setter
@wrap_properties
def text(self, value):
"""Set text value."""
assert len(value) == len(self._text)
self._text = value
self._sources_text.visible = True
self._update_radius()
# ----------- VISIBLE_AND_NOT_MASKED -----------
@property
def visible_and_not_masked(self):
"""Get the visible_and_not_masked value."""
return np.logical_and(self._visible, ~self.mask)
# ----------- RADIUSMIN -----------
@property
def radius_min(self):
"""Get the radius_min value."""
return self._radius_min
@radius_min.setter
@wrap_properties
def radius_min(self, value):
"""Set radius_min value."""
assert isinstance(value, (int, float))
self._radius_min = min(self._radius_max, value)
self._update_radius()
# ----------- RADIUSMAX -----------
@property
def radius_max(self):
"""Get the radius_max value."""
return self._radius_max
@radius_max.setter
@wrap_properties
def radius_max(self, value):
"""Set radius_max value."""
assert isinstance(value, (int, float))
self._radius_max = max(self._radius_min, value)
self._update_radius()
# ----------- SYMBOL -----------
@property
def symbol(self):
"""Get the symbol value."""
return self._sources.symbol
@symbol.setter
@wrap_properties
def symbol(self, value):
"""Set symbol value."""
assert isinstance(value, str)
self._sources.symbol = value
self._sources.update()
# ----------- EDGE_WIDTH -----------
@property
def edge_width(self):
"""Get the edge_width value."""
return self._edge_width
@edge_width.setter
@wrap_properties
def edge_width(self, value):
"""Set edge_width value."""
assert isinstance(value, (int, float))
self._edge_width = value
self._sources._data['a_edgewidth'] = value
self.update()
# ----------- EDGE_COLOR -----------
@property
def edge_color(self):
"""Get the edge_color value."""
return self._edge_color
@edge_color.setter
@wrap_properties
def edge_color(self, value):
"""Set edge_color value."""
color = color2vb(value, alpha=self.alpha)
self._sources._data['a_fg_color'] = color
self._edge_color = color
self.update()
# ----------- ALPHA -----------
@property
def alpha(self):
"""Get the alpha value."""
return self._alpha
@alpha.setter
@wrap_properties
def alpha(self, value):
"""Set alpha value."""
assert isinstance(value, (int, float))
assert 0 <= value <= 1
self._alpha = value
self._sources._data['a_fg_color'][:, -1] = value
self._sources._data['a_bg_color'][:, -1] = value
self.update()
# ----------- COLOR -----------
@property
def color(self):
"""Get the color value."""
return self._color
@color.setter
@wrap_properties
def color(self, value):
"""Set color value."""
self._color = value
self._update_color()
# ----------- MASK -----------
@property
def mask(self):
"""Get the mask value."""
return self._mask
@mask.setter
@wrap_properties
def mask(self, value):
"""Set mask value."""
assert len(value) == len(self)
self._mask = value
self._update_color()
# ----------- IS_MASKED -----------
@property
def is_masked(self):
"""Get the is_masked value."""
return any(self._mask)
# ----------- MASKCOLOR -----------
@property
def mask_color(self):
"""Get the mask_color value."""
return self._mask_color
@mask_color.setter
@wrap_properties
def mask_color(self, value):
"""Set mask_color value."""
self._mask_color = color2vb(value)
self._update_color()
# ----------- VISIBLE -----------
@property
def visible(self):
"""Get the visible value."""
return self._visible
@visible.setter
@wrap_properties
def visible(self, value):
"""Set visible value."""
if isinstance(value, bool):
self._visible = np.full((len(self),), value)
else:
self._visible = np.asarray(value).ravel().astype(bool)
assert len(self._visible) == len(self)
self._update_radius()
# ----------- HIDE -----------
@property
def hide(self):
"""Get the hide value."""
return np.invert(self._visible)
# ----------- TEXT_SIZE -----------
@property
def text_size(self):
"""Get the text_size value."""
return self._text_size
@text_size.setter
@wrap_properties
def text_size(self, value):
"""Set text_size value."""
assert isinstance(value, (int, float))
self._text_size = value
self._sources_text.font_size = value
self._sources_text.update()
# ----------- TEXT_COLOR -----------
@property
def text_color(self):
"""Get the text_color value."""
return self._text_color
@text_color.setter
@wrap_properties
def text_color(self, value):
"""Set text_color value."""
color = color2vb(value)
self._sources_text.color = color
self._text_color = color
self._sources_text.update()
# ----------- TEXT_TRANSLATE -----------
@property
def text_translate(self):
"""Get the text_translate value."""
return self._text_translate
@text_translate.setter
@wrap_properties
def text_translate(self, value):
"""Set text_translate value."""
assert len(value) == 3
self._sources_text.transform.translate = value
self._text_translate = value
self._sources_text.update()
| 37.946746 | 79 | 0.532356 | 2,934 | 25,652 | 4.419564 | 0.135651 | 0.024601 | 0.017737 | 0.026606 | 0.161641 | 0.073571 | 0.051361 | 0.041798 | 0.029305 | 0.029305 | 0 | 0.007207 | 0.302199 | 25,652 | 675 | 80 | 38.002963 | 0.717207 | 0.321145 | 0 | 0.206215 | 0 | 0 | 0.036291 | 0 | 0 | 0 | 0 | 0 | 0.067797 | 1 | 0.129944 | false | 0 | 0.022599 | 0 | 0.220339 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
8ca942b31274aee57b018573913f386aeab68a16 | 14,714 | py | Python | webapp/element43/apps/manufacturing/functions.py | Ososope/eve_online | b368f77aaff403e5f1523a1a0e01d105fed0ada9 | [
"BSD-3-Clause"
] | null | null | null | webapp/element43/apps/manufacturing/functions.py | Ososope/eve_online | b368f77aaff403e5f1523a1a0e01d105fed0ada9 | [
"BSD-3-Clause"
] | null | null | null | webapp/element43/apps/manufacturing/functions.py | Ososope/eve_online | b368f77aaff403e5f1523a1a0e01d105fed0ada9 | [
"BSD-3-Clause"
] | null | null | null | import math
import itertools
from decimal import Decimal
from django.db import connection
# App settings
from apps.manufacturing.settings import MANUFACTURING_MAX_BLUEPRINT_HISTORY, MANUFACTURING_BLUEPRINT_HISTORY_SESSION
# Models
#from eve_db.models import InvBlueprintType, InvTypeMaterial, RamTypeRequirement
from apps.market_data.models import ItemRegionStat
def is_producible(type_id):
"""
Returns 'True' if the given type_id can be built with an Blueprint and 'False' otherwise.
"""
return InvBlueprintType.objects.filter(product_type__id=type_id).exists()
def is_tech1(type_id):
"""
Returns 'True' if the given type_id belongs to a Tech I item and 'False' otherwise.
"""
return InvBlueprintType.objects.filter(product_type__id=type_id, tech_level=1).exists()
def calculate_quantities(form_data, blueprint, materials):
"""
Returns the the given materials dictionary with the calculated quantities for all items.
The quantity of a material depends on:
- Blueprint base waste
- Blueprint material level (ME)
- Skill waste (Production efficiency)
- Manufacturing installation material multiplier)
"""
for material in materials:
material = calculate_quantity(form_data, blueprint, material)
return materials
def calculate_quantity(form_data, blueprint, material):
"""
Returns the the given material with the calculated quantities for this
material. The quantity of a material depends on:
- Blueprint base waste
- Blueprint material level (ME)
- Skill waste (Production efficiency)
- Manufacturing installation material multiplier)
"""
blueprint_me = int(form_data['blueprint_material_efficiency'])
blueprint_runs = int(form_data['blueprint_runs'])
skill_production_efficiency = int(form_data['skill_production_efficiency'])
base_waste_multiplier = float(blueprint.waste_factor) / 100
if blueprint_me >= 0:
base_waste_multiplier *= (float(1) / float((blueprint_me + 1)))
else:
base_waste_multiplier *= float(1 - blueprint_me)
base_quantity = material['quantity']
base_waste = base_quantity * base_waste_multiplier
skill_waste = float(((25 - (5 * skill_production_efficiency)) * base_quantity)) / 100
quantity_unit = (base_quantity * form_data['slot_material_modifier']) + base_waste + skill_waste
quantity_total = round(quantity_unit) * blueprint_runs
material['quantity'] = int(quantity_total)
material['volume'] = material['quantity'] * material['volume']
return material
def calculate_material_prices(materials):
"""
Returns the given materials dictionary with calculated prices.
Beware: The prices are 'sell median' from 'The Forge' region.
"""
try:
# Build the list of material ids for which the price has to be fetched
material_ids = [material['id'] for material in materials]
materials_prices = ItemRegionStat.objects.values(
'invtype__id',
'sell_95_percentile'
).filter(invtype_id__in=material_ids, mapregion_id__exact=10000002)
for material_price in materials_prices:
for material in materials:
if material['id'] == material_price['invtype__id']:
material['price'] = material_price['sell_95_percentile']
material['price_total'] = material_price['sell_95_percentile'] * material['quantity']
except Exception:
connection._rollback()
return materials
def get_ramtyperequirements_materials(blueprint):
"""
Returns all the RamTypeRequirements for the given blueprint that are not a
skill and required for manufacturing (activity).
"""
materials = RamTypeRequirement.objects.values(
'required_type__id',
'required_type__name',
'required_type__volume',
'quantity',
'recycle'
).filter(
type__id=blueprint.blueprint_type.id,
activity_type__id=1 # manufacturing = 1
).exclude(required_type__group__category__id=16) # skill books = 16
return materials
def get_invtypematerials(blueprint):
"""
Returns the InvTypeMaterials for the given blueprint.
"""
materials = InvTypeMaterial.objects.values(
'material_type__id',
'material_type__name',
'material_type__volume',
'quantity'
).filter(type=blueprint.product_type)
return materials
def is_required_tech1_item(build_requirement):
"""
Determines if the given build requirement is a tech 1 item that is
recyclable.
"""
is_recycle = build_requirement['recycle'] == True
is_tech1_item = is_tech1(build_requirement['required_type__id'])
return is_recycle and is_tech1_item
def get_tech1_item_materials(build_requirements):
"""
Returns the materials needed for the Tech I item that is required for a
Tech II product. If the given build requirement does not belong to a Tech II
item an empty list will be returned.
"""
materials = []
for build_requirement in build_requirements:
if is_required_tech1_item(build_requirement):
materials = InvTypeMaterial.objects.values(
'material_type__id',
'material_type__name',
'material_type__volume',
'quantity'
).filter(type=build_requirement['required_type__id'])
break
return materials
def merge_bill_of_materials(materials1, materials2):
"""
Returns a merged bill of materials from the two given bill of materials.
"""
# @TODO: This is so uber ugly that I don't know what to say. But it works.
# If you are reading this and know how to make it look/work better please
# feel free.
materials = []
for item in itertools.chain(materials1, materials2):
is_in_materials = False
for material in materials:
if material['id'] == item['id']:
is_in_materials = True
break
if is_in_materials:
for material in materials:
if material['id'] == item['id']:
material['quantity'] += item['quantity']
break
else:
materials.append(item)
return materials
def get_materials(form_data, blueprint):
"""
Returns the bill of material for the given blueprint.
"""
materials1 = []
materials2 = []
blueprint_runs = int(form_data['blueprint_runs'])
build_requirements = get_ramtyperequirements_materials(blueprint)
tech1_item_materials = get_tech1_item_materials(build_requirements)
for build_requirement in build_requirements:
type_volume = build_requirement['required_type__volume']
quantity = build_requirement['quantity']
materials1.append(dict({
'id': build_requirement['required_type__id'],
'name': build_requirement['required_type__name'],
'quantity': build_requirement['quantity'] * blueprint_runs,
'volume': type_volume * quantity * blueprint_runs,
'price': 0,
'price_total': 0,
'producible':is_producible(build_requirement['required_type__id'])
}))
# Get the bill of materials for the Tech II item
extra_materials = get_invtypematerials(blueprint)
for extra_material in extra_materials:
# If on of the materials from the bill of materials of the Tech II item
# is found in the bill of materials for the Tech I item substract them.
for tech1_item_material in tech1_item_materials:
if tech1_item_material['material_type__id'] == extra_material['material_type__id']:
extra_material['quantity'] -= tech1_item_material['quantity']
# Only if the quantity of the material is greater 0 after the
# substraction add the material to the bill of materials.
if extra_material['quantity'] > 0:
mat = {
'id': extra_material['material_type__id'],
'name': extra_material['material_type__name'],
'quantity': extra_material['quantity'],
'volume': extra_material['material_type__volume'],
'price': 0,
'price_total': 0,
'producible':is_producible(extra_material['material_type__id'])
}
mat = calculate_quantity(form_data, blueprint, mat)
materials2.append(mat)
materials = merge_bill_of_materials(materials1, materials2)
return materials
def calculate_production_time(form_data, blueprint):
""" Returns the production time for the given blueprint. """
"""
The following data is taken into account while calculation:
1. Players industry skill level
2. Players hardwirings
3. Installation slot production time modifier
4. Blueprint Production efficiency
"""
# implant modifiers. (type_id, modifier)
IMPLANT_MODIFIER = {
0: 0.00, # no hardwiring
27170: 0.01, # Zainou 'Beancounter' Industry BX-801
27167: 0.02, # Zainou 'Beancounter' Industry BX-802
27171: 0.04 # Zainou 'Beancounter' Industry BX-804
}
# calculate production time modifuer
implant_modifier = IMPLANT_MODIFIER[int(form_data['hardwiring'])]
slot_productivity_modifier = form_data['slot_production_time_modifier']
production_time_modifier = (1 - (0.04 * float(form_data['skill_industry']))) * (1 - implant_modifier) * slot_productivity_modifier
base_production_time = blueprint.production_time
production_time = base_production_time * production_time_modifier
blueprint_pe = form_data['blueprint_production_efficiency']
if blueprint_pe >= 0:
production_time *= (1 - (float(blueprint.productivity_modifier) / base_production_time) * (blueprint_pe / (1.00 + blueprint_pe)))
else:
production_time *= (1 - (float(blueprint.productivity_modifier) / base_production_time) * (blueprint_pe - 1))
return production_time
def calculate_manufacturing_job(form_data):
"""
Calculates the manufacturing costs and profits.
"""
#
# This method is basically divided in two sections:
#
# 1. Calculate bill of materials
# 2. Calculate production time
#
result = {} # result dictionary which will be returned
blueprint_type_id = int(form_data['blueprint_type_id'])
blueprint_runs = int(form_data['blueprint_runs'])
blueprint = InvBlueprintType.objects.select_related().get(blueprint_type__id=blueprint_type_id)
result['produced_units'] = blueprint.product_type.portion_size * blueprint_runs
# --------------------------------------------------------------------------
# Calculate bill of materials
# --------------------------------------------------------------------------
materials = get_materials(form_data, blueprint)
materials = calculate_material_prices(materials)
materials_cost_total = math.fsum([material['price_total'] for material in materials])
materials_volume_total = math.fsum([material['volume'] for material in materials])
# sort materials by name:
materials.sort(key=lambda material: material['name'])
result['materials'] = materials
result['materials_cost_unit'] = materials_cost_total / result['produced_units']
result['materials_cost_total'] = materials_cost_total
result['materials_volume_total'] = materials_volume_total
# --------------------------------------------------------------------------
# Calculate production time
# --------------------------------------------------------------------------
production_time = calculate_production_time(form_data, blueprint)
result['production_time_run'] = round(production_time)
result['production_time_total'] = round(production_time * blueprint_runs)
# add all the other values to the result dictionary
result['blueprint_cost_unit'] = form_data['blueprint_price'] / result['produced_units']
result['blueprint_cost_total'] = form_data['blueprint_price']
result['revenue_unit'] = form_data['target_sell_price']
result['revenue_total'] = form_data['target_sell_price'] * result['produced_units']
result['blueprint_type_id'] = blueprint_type_id
result['blueprint_name'] = blueprint.blueprint_type.name
result['blueprint_runs'] = blueprint_runs
brokers_fee = form_data.get('brokers_fee', 0)
sales_tax = form_data.get('sales_tax', 0)
if not brokers_fee:
brokers_fee = 0
if not sales_tax:
sales_tax = 0
result['brokers_fee_unit'] = result['revenue_unit'] * (brokers_fee / 100)
result['brokers_fee_total'] = result['brokers_fee_unit'] * result['produced_units']
result['sales_tax_unit'] = result['revenue_unit'] * (sales_tax / 100)
result['sales_tax_total'] = result['sales_tax_unit'] * result['produced_units']
result['total_cost_unit'] = result['brokers_fee_unit'] + result['sales_tax_unit'] + result['blueprint_cost_unit'] + Decimal((materials_cost_total / result['produced_units']))
result['total_cost_total'] = result['total_cost_unit'] * result['produced_units']
result['profit_unit'] = form_data['target_sell_price'] - result['total_cost_unit']
result['profit_total'] = result['profit_unit'] * result['produced_units']
result['profit_total_hour'] = result['profit_total'] / Decimal(result['production_time_total'] / 3600)
result['profit_total_day'] = result['profit_total_hour'] * 24
if result['profit_total'] != 0 and result['total_cost_total'] != 0:
result['profit_total_percent'] = (result['profit_total'] / result['total_cost_total']) * 100
else:
result['profit_total_percent'] = 0
return result
def update_blueprint_history(request, blueprint):
"""
Adds the given blueprint to the blueprint history (which is part of the session in the request).
"""
history = request.session.get(MANUFACTURING_BLUEPRINT_HISTORY_SESSION, [])
add_entry = True
# Don't add the blueprint if it is already in there.
for entry in history:
if entry['id'] == blueprint.blueprint_type.id:
add_entry = False
break
if add_entry:
if len(history) == MANUFACTURING_MAX_BLUEPRINT_HISTORY:
# delete the last element of the history which is the oldest
del history[-1]
# insert the latest blueprint at the beginning of the list
history.insert(0, {'id': blueprint.blueprint_type.id, 'name': blueprint.blueprint_type.name})
request.session[MANUFACTURING_BLUEPRINT_HISTORY_SESSION] = history
| 37.631714 | 178 | 0.675751 | 1,708 | 14,714 | 5.539227 | 0.159251 | 0.01966 | 0.02875 | 0.021139 | 0.344361 | 0.245746 | 0.152944 | 0.108022 | 0.099778 | 0.084346 | 0 | 0.012372 | 0.21449 | 14,714 | 390 | 179 | 37.728205 | 0.806195 | 0.218296 | 0 | 0.199052 | 0 | 0 | 0.169841 | 0.027973 | 0 | 0 | 0 | 0.002564 | 0 | 1 | 0.066351 | false | 0 | 0.028436 | 0 | 0.156398 | 0.21327 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
8cacf07651743a1199af5884f1acd8c7460735fd | 880 | py | Python | microsoft_teams/unit_test/test_strip_html.py | killstrelok/insightconnect-plugins | 911358925f4233ab273dbd8172e8b7b9188ebc01 | [
"MIT"
] | null | null | null | microsoft_teams/unit_test/test_strip_html.py | killstrelok/insightconnect-plugins | 911358925f4233ab273dbd8172e8b7b9188ebc01 | [
"MIT"
] | 1 | 2021-02-23T23:57:37.000Z | 2021-02-23T23:57:37.000Z | microsoft_teams/unit_test/test_strip_html.py | killstrelok/insightconnect-plugins | 911358925f4233ab273dbd8172e8b7b9188ebc01 | [
"MIT"
] | null | null | null | from unittest import TestCase
from icon_microsoft_teams.util.strip_html import strip_html
class TestStripHTML(TestCase):
def test_strip_html(self):
test_string = "<h1><b>test</b></h1>"
result = strip_html(test_string)
self.assertEqual("test", result)
def test_strip_teams_html(self):
test_string = "<div>\n<div itemprop=\"copy-paste-block\">\n\n<div style=\"font-size:14px\">!purge-mail subject=\"A very specific\" delete=True</div>\n</div>\n</div>"
result = strip_html(test_string)
expected = "!purge-mail subject=\"A very specific\" delete=True"
self.assertEqual(expected, result)
def test_strip_doesnt_get_inner_newlines(self):
test_string = "\n\n\n\n\n<b>some\nstuff</b>\n\n\n\n"
result = strip_html(test_string)
expected = "some\nstuff"
self.assertEqual(expected, result)
| 41.904762 | 173 | 0.673864 | 126 | 880 | 4.52381 | 0.357143 | 0.02807 | 0.026316 | 0.1 | 0.296491 | 0.252632 | 0.136842 | 0.136842 | 0 | 0 | 0 | 0.005548 | 0.180682 | 880 | 20 | 174 | 44 | 0.785021 | 0 | 0 | 0.294118 | 0 | 0.058824 | 0.226136 | 0.078409 | 0 | 0 | 0 | 0 | 0.176471 | 1 | 0.176471 | false | 0 | 0.117647 | 0 | 0.352941 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
8cadc838003730015b93eb6bdcd5fba2d9d01615 | 2,317 | py | Python | fmriprep/utils/bids.py | PennBBL/fmriprep-phases | eb0fa598be13fa56b564e9704d5c1bad22dbea06 | [
"BSD-3-Clause"
] | 1 | 2018-10-23T07:33:52.000Z | 2018-10-23T07:33:52.000Z | fmriprep/utils/bids.py | StevenM1/fmriprep | eb83092c1c048ba756ccf53b688799c87283214f | [
"BSD-3-Clause"
] | null | null | null | fmriprep/utils/bids.py | StevenM1/fmriprep | eb83092c1c048ba756ccf53b688799c87283214f | [
"BSD-3-Clause"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*-
# vi: set ft=python sts=4 ts=4 sw=4 et:
"""
Utilities to handle BIDS inputs
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
Fetch some test data
>>> import os
>>> from niworkflows import data
>>> data_root = data.get_bids_examples(variant='BIDS-examples-1-enh-ds054')
>>> os.chdir(data_root)
"""
import os
import json
from pathlib import Path
def write_derivative_description(bids_dir, deriv_dir):
from ..__about__ import __version__, __url__, DOWNLOAD_URL
bids_dir = Path(bids_dir)
deriv_dir = Path(deriv_dir)
desc = {
'Name': 'fMRIPrep - fMRI PREProcessing workflow',
'BIDSVersion': '1.1.1',
'PipelineDescription': {
'Name': 'fMRIPrep',
'Version': __version__,
'CodeURL': DOWNLOAD_URL,
},
'CodeURL': __url__,
'HowToAcknowledge':
'Please cite our paper (https://doi.org/10.1038/s41592-018-0235-4), '
'and include the generated citation boilerplate within the Methods '
'section of the text.',
}
# Keys that can only be set by environment
if 'FMRIPREP_DOCKER_TAG' in os.environ:
desc['DockerHubContainerTag'] = os.environ['FMRIPREP_DOCKER_TAG']
if 'FMRIPREP_SINGULARITY_URL' in os.environ:
singularity_url = os.environ['FMRIPREP_SINGULARITY_URL']
desc['SingularityContainerURL'] = singularity_url
singularity_md5 = _get_shub_version(singularity_url)
if singularity_md5 and singularity_md5 is not NotImplemented:
desc['SingularityContainerMD5'] = _get_shub_version(singularity_url)
# Keys deriving from source dataset
orig_desc = {}
fname = bids_dir / 'dataset_description.json'
if fname.exists():
with fname.open() as fobj:
orig_desc = json.load(fobj)
if 'DatasetDOI' in orig_desc:
desc['SourceDatasetsURLs'] = ['https://doi.org/{}'.format(
orig_desc['DatasetDOI'])]
if 'License' in orig_desc:
desc['License'] = orig_desc['License']
with (deriv_dir / 'dataset_description.json').open('w') as fobj:
json.dump(desc, fobj, indent=4)
def _get_shub_version(singularity_url):
return NotImplemented
| 32.180556 | 81 | 0.641347 | 277 | 2,317 | 5.115523 | 0.462094 | 0.06916 | 0.02964 | 0.052929 | 0.05928 | 0 | 0 | 0 | 0 | 0 | 0 | 0.020134 | 0.228312 | 2,317 | 71 | 82 | 32.633803 | 0.772371 | 0.20587 | 0 | 0 | 0 | 0.023256 | 0.305586 | 0.089266 | 0 | 0 | 0 | 0 | 0 | 1 | 0.046512 | false | 0 | 0.093023 | 0.023256 | 0.162791 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
8cb1d4f3bb2a0f7a5c3207005329c993ea088f85 | 678 | py | Python | components/blinker/wrapper.py | tarsqi/ttk | 085007047ab591426d5c08b123906c070deb6627 | [
"Apache-2.0"
] | 25 | 2016-02-28T16:42:57.000Z | 2022-01-03T13:29:48.000Z | components/blinker/wrapper.py | tarsqi/ttk | 085007047ab591426d5c08b123906c070deb6627 | [
"Apache-2.0"
] | 84 | 2016-02-13T01:07:55.000Z | 2021-04-06T18:57:36.000Z | components/blinker/wrapper.py | tarsqi/ttk | 085007047ab591426d5c08b123906c070deb6627 | [
"Apache-2.0"
] | 10 | 2016-05-30T14:35:59.000Z | 2022-03-16T12:24:09.000Z | """
Contains the Blinker wrapper.
"""
from __future__ import absolute_import
from library.tarsqi_constants import BLINKER
from components.blinker.main import Blinker
class BlinkerWrapper(object):
"""Wrapper for Blinker."""
def __init__(self, document):
self.component_name = BLINKER
self.document = document
def process(self):
"""Hand in all document elements to Blinker for processing. Document
elements are instances of Tag with name=docelement."""
blinker = Blinker(self.document)
blinker.run_timex_linking()
for element in self.document.elements():
blinker.process_element(element)
| 25.111111 | 76 | 0.69469 | 77 | 678 | 5.935065 | 0.519481 | 0.105033 | 0.083151 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.224189 | 678 | 26 | 77 | 26.076923 | 0.868821 | 0.247788 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.166667 | false | 0 | 0.25 | 0 | 0.5 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
8cb2537f5f35eb237fad7e81fa978378ed518c4a | 6,990 | py | Python | orator_validator.py | alfonsocv12/orator_validator | 8c4c2ec1b69029c8c048f11d0cba5043942a8938 | [
"MIT"
] | null | null | null | orator_validator.py | alfonsocv12/orator_validator | 8c4c2ec1b69029c8c048f11d0cba5043942a8938 | [
"MIT"
] | null | null | null | orator_validator.py | alfonsocv12/orator_validator | 8c4c2ec1b69029c8c048f11d0cba5043942a8938 | [
"MIT"
] | null | null | null | import re
import time
import json
class Validator(object):
_validation_init = True
__errors__ = {'code':200, 'errors':[]}
def process(self, key, exist=False, not_exist=False, **args):
'''
This function is created to add new functionality, if a model value of a
nullable filled exist
param: str key: This is the name of the value that we are looking
param: exist: this boolean is dedicated to execute a flag if the key exist
param: not_exist: this boolean is dedicated to execute a flag if the key doesnt exist
'''
value = getattr(self, key, None)
if exist and value:
exist(self, **args)
if not_exist and not value:
not_exist(self, **args)
def validate(self, key, require=False, data_type=False,
regex=False, custom_error=False, date_str=False, **args):
'''
Function dedicated to validate if a imput has some values
param: str key: This is the name of the value that we are looking, is just for us to make the error json
param: bool require: This tell us if we need to abort if the input is undefined
param: str data_type: Data type tell us if the input has to be a certain data type so we verified
param: str regex: String that we could check if we need
param: function custom_error: you can send a function to build a custom error
param: date_str: This is a string with the format to check the datetime string
param: *args *args: arguments to go with the function
return: data response: We can return the value validated or None if the value doest correspont to the statements
'''
if self._validation_init:
self.errors(validation_init = False)
value = getattr(self, key, None)
if require and not value:
self._handle_error('require', key, custom_error=custom_error, **args)
return self
if data_type and not isinstance(value, data_type):
self._handle_error('data type', key, custom_msg='Bad data type on {}'.format(key),
custom_error=custom_error, **args)
return self
if regex:
if not value: self._handle_error('require', key, custom_error=custom_error, **args)
elif not re.match(regex, value):
self._handle_error('regex', key, custom_error=custom_error, **args)
if date_str:
try:
time.strptime(value, date_str)
except Exception as e:
self._handle_error('invalid', key, custom_msg='Invalid time value')
return self
def validate_update(self, key, guarded=False, data_type=False,
regex=False, custom_error=False, date_str=False,
function_callback=False, **args):
'''
Funtion dedicated to validate on update that values on updated are the
ones on the list for you to use this function the update has to be on the
model side not on the builder
param: str key: This is the name of the value that we are looking, is just for us to make the error json
param: bool guarded: This tell us if we need to abort if the input is on the dictionary
param: str data_type: Data type tell us if the input has to be a certain data type so we verified
param: str regex: String that we could check if we need
param: function custom_error: you can send a function to build a custom error
param: date_str: This is a string with the format to check the datetime string
param: *args *args: arguments to go with the function
return: data response: We can return the value validated or None if the value doest correspont to the statements
'''
if self._validation_init:
self.errors(validation_init = False)
value = self.get_dirty().get(key, None)
if guarded and value:
self._handle_error('Cant update', key, custom_error=custom_error, **args)
return self
elif value:
if data_type and not isinstance(value, data_type):
self._handle_error('data type', key, custom_msg='Bad data type on {}'.format(key),
custom_error=custom_error, **args)
return self
if regex and not re.match(regex, value):
if not require: self.validate(key, require=True)
self._handle_error('regex', key, custom_error=custom_error, **args)
if date_str:
try:
time.strptime(value, date_str)
except Exception as e:
self._handle_error(
'invalid', key, custom_msg='Invalid time value')
if function_callback:
try:
function_callback(**args)
except Exception as e:
self._handle_error(
'Callback error', key, custom_msg=str(e))
return self
@classmethod
def _handle_error(cls, type_error, value_name,
custom_msg=False, custom_error=False, **args):
'''
Funtion dedicated to handle errors on the validation
param: str type_error: this is use for the default msg
param: str value_name: this is use for the default msg
param: str custom_msg: is they want to use a custom_msg
param: function custom_error: Optional to send a custom error
param: args **args: Values of the custom error
return: None
'''
if custom_error:
custom_error(**args)
cls.add_error(code=400, msg=custom_msg
if custom_msg else
'Error of {} on {}'.format(type_error, value_name)
)
@classmethod
def errors(cls, validation_init=True):
'''
Function dedicated to delete errors after use
param: validation_init
ptype: volean
return: __errors__
rtype: dict
'''
cls._validation_init = validation_init
errors = cls.__errors__
cls.__errors__ = {'code':200, 'errors':[]}
if errors['code'] != 200:
raise ValidatorError(errors['code'], json.dumps(errors['errors']))
@classmethod
def add_error(cls, code=None, msg=None):
'''
Function dedicated to modify errors
param: int code: The error code that we will return
param: str msg: the msg to append to error list
'''
if code:
cls.__errors__['code'] = code
if msg:
cls.__errors__['errors'].append({
'msg': msg
})
class Error(Exception):
"""Base class for other exceptions"""
pass
class ValidatorError(Error):
"""Raised when the validator find and error"""
def __init__(self, status_code=None, body=None):
self.status_code = status_code
self.body = body
| 42.363636 | 120 | 0.61402 | 941 | 6,990 | 4.421892 | 0.164718 | 0.068733 | 0.036049 | 0.042298 | 0.572699 | 0.54386 | 0.533045 | 0.525114 | 0.515741 | 0.497477 | 0 | 0.002495 | 0.311874 | 6,990 | 164 | 121 | 42.621951 | 0.862578 | 0.365951 | 0 | 0.37234 | 0 | 0 | 0.0546 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.074468 | false | 0.010638 | 0.031915 | 0 | 0.223404 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
8cb458001ae0b245e57008d1fb274e96a051a3f9 | 5,508 | py | Python | BinaryThreshold.py | kaleem94/CarND-Advanced-Lane-Lines | eaed810c947d534613e919b688377c87735dab5b | [
"MIT"
] | null | null | null | BinaryThreshold.py | kaleem94/CarND-Advanced-Lane-Lines | eaed810c947d534613e919b688377c87735dab5b | [
"MIT"
] | null | null | null | BinaryThreshold.py | kaleem94/CarND-Advanced-Lane-Lines | eaed810c947d534613e919b688377c87735dab5b | [
"MIT"
] | null | null | null | import numpy as np
import cv2
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
#import pickle
import glob
def abs_sobel_thresh(gray, orient='x', sobel_kernel=3, thresh=(0, 255)):
# Calculate directional gradient
# Apply threshold
# gray = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)
# Apply x or y gradient with the OpenCV Sobel() function
# and take the absolute value
if orient == 'x':
abs_sobel = np.absolute(cv2.Sobel(gray, cv2.CV_64F, 1, 0))
if orient == 'y':
abs_sobel = np.absolute(cv2.Sobel(gray, cv2.CV_64F, 0, 1))
scaled_sobel = np.uint8(255*abs_sobel/np.max(abs_sobel))
binary_output = np.zeros_like(scaled_sobel)
binary_output[(scaled_sobel >= thresh[0]) & (scaled_sobel <= thresh[1])] = 1
return binary_output
def mag_thresh(gray, sobel_kernel=3, mag_thresh=(0, 255)):
# Calculate gradient magnitude
# Apply threshold
# gray = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)
sobelx = cv2.Sobel(gray, cv2.CV_64F, 1, 0, ksize=sobel_kernel)
sobely = cv2.Sobel(gray, cv2.CV_64F, 0, 1, ksize=sobel_kernel)
gradmag = np.sqrt(sobelx**2 + sobely**2)
scale_factor = np.max(gradmag)/255
gradmag = (gradmag/scale_factor).astype(np.uint8)
binary_output = np.zeros_like(gradmag)
binary_output[(gradmag >= mag_thresh[0]) & (gradmag <= mag_thresh[1])] = 1
return binary_output
def dir_threshold(gray, sobel_kernel=3, thresh=(0, np.pi/2)):
# Calculate gradient direction
# Apply threshold
# gray = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)
sobelx = cv2.Sobel(gray, cv2.CV_64F, 1, 0, ksize=sobel_kernel)
sobely = cv2.Sobel(gray, cv2.CV_64F, 0, 1, ksize=sobel_kernel)
gradmag = np.arctan2(np.absolute(sobely),np.absolute(sobelx))
binary_output = np.zeros_like(gradmag)
binary_output[(gradmag >= thresh[0]) & (gradmag <= thresh[1])] = 1
return binary_output
def convert_binary(image):
# image = mpimg.imread('test_images/straight_lines1.jpg')
#image = mpimg.imread('test_images/test6.jpg')
# Choose a Sobel kernel size
ksize = 5 # Choose a larger odd number to smooth gradient measurements
# Apply each of the thresholding functions
#gray = cv2.cvtColor(image, cv2.COLOR_RGB2GRAY)
#gradx = abs_sobel_thresh(gray, orient='x', sobel_kernel=ksize, thresh=(30, 255))
#grady = abs_sobel_thresh(gray, orient='y', sobel_kernel=ksize, thresh=(30, 255))
#mag_binary = mag_thresh(gray, sobel_kernel=ksize, mag_thresh=(50, 255))
#dir_binary = dir_threshold(gray, sobel_kernel=ksize, thresh=(0.95, 1))
hls = cv2.cvtColor(image, cv2.COLOR_RGB2HLS)
# H = hls[:,:,0]
# L = hls[:,:,1]
S = hls[:,:,2]
#plt.figure(2)
#plt.imshow(S)
gradx1 = abs_sobel_thresh(S, orient='x', sobel_kernel=ksize, thresh=(20, 255))
grady1 = abs_sobel_thresh(S, orient='y', sobel_kernel=ksize, thresh=(20, 255))
mag_binary1 = mag_thresh(S, sobel_kernel=ksize, mag_thresh=(20, 255))
dir_binary1 = dir_threshold(S, sobel_kernel=ksize, thresh=(1.1, 1.3))
combined = np.zeros_like(dir_binary1)
combined[((gradx1 == 1) & (grady1 == 1)) | ((mag_binary1 == 1) & (dir_binary1 == 1))] = 1
# plt.figure(2)
# plt.imshow(combined)
return combined
images = glob.glob('test_images/*.jpg')
# Step through the list and search for chessboard corners
nCount = 0
for idx, fname in enumerate(images):
img = mpimg.imread(fname)
retImg = convert_binary(img)
imgBin = np.zeros_like(img)
imgGray = np.zeros_like(img)
imgBin[retImg == 1] = [255,255,255]
imgGray[retImg == 1] = 255
kernel_size = 5
blur_masked_edges = np.zeros_like(retImg)
blur_masked_edges = cv2.GaussianBlur(imgGray,(kernel_size, kernel_size), 0)
# plt.figure(2)
# plt.imshow(blur_masked_edges)
rho = 1
theta = np.pi/180
threshold = 1
min_line_length = 30
max_line_gap = 5
line_image = np.copy(img)*0 #creating a blank to draw lines on
img_grey = cv2.cvtColor(blur_masked_edges, cv2.COLOR_BGR2GRAY)
lines = cv2.HoughLinesP(img_grey, rho, theta, threshold,min_line_length, max_line_gap)
# lines = cv2.HoughLinesP(blur_masked_edges, rho, theta, threshold, np.array([]),min_line_length, max_line_gap)
line_image = np.copy(img)*0
for line in lines:
for x1,y1,x2,y2 in line:
cv2.line(line_image,(x1,y1),(x2,y2),(255,255,0),5)
# combo = cv2.addWeighted(img, 0.8, line_image, 1, 0)
kernel_size = 5
low_threshold = 30
high_threshold = 150
masked_edges = cv2.Canny(line_image, low_threshold, high_threshold)
kernel_size = 15
blur_masked_edges = np.zeros_like(retImg)
blur_masked_edges = cv2.GaussianBlur(masked_edges,(kernel_size, kernel_size), 0)
# img_grey = cv2.cvtColor(blur_masked_edges, cv2.COLOR_BGR2GRAY)
rho = 1
theta = np.pi/180
threshold = 1
min_line_length = 30
max_line_gap = 30
lines = cv2.HoughLinesP(blur_masked_edges, rho, theta, threshold,min_line_length, max_line_gap)
line_image = np.copy(img)*0
for line in lines:
for x1,y1,x2,y2 in line:
cv2.line(line_image,(x1,y1),(x2,y2),(255,0,0),2)
combo = cv2.addWeighted(img, 0.8, line_image, 1, 0)
plt.figure(nCount)
f, (ax1, ax2) = plt.subplots(1, 2, figsize=(20,10))
ax1.imshow(combo)
ax1.set_title('Original Image', fontsize=30)
ax2.imshow(imgBin)
ax2.set_title('Binary Image', fontsize=30)
nCount+=1 | 41.104478 | 114 | 0.669935 | 826 | 5,508 | 4.282082 | 0.200969 | 0.04976 | 0.038168 | 0.025445 | 0.558383 | 0.437659 | 0.398926 | 0.374329 | 0.352841 | 0.271416 | 0 | 0.05961 | 0.198983 | 5,508 | 134 | 115 | 41.104478 | 0.742067 | 0.259985 | 0 | 0.311111 | 0 | 0 | 0.011875 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.044444 | false | 0 | 0.055556 | 0 | 0.144444 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
8cb665cae33eec6ab5795da7641a13694f1d806f | 565 | py | Python | leetcode/lengthOfLastWord.py | montukv/Coding-problem-solutions | 973009c00038cc57500d965871376a60f8c4e0d1 | [
"MIT"
] | null | null | null | leetcode/lengthOfLastWord.py | montukv/Coding-problem-solutions | 973009c00038cc57500d965871376a60f8c4e0d1 | [
"MIT"
] | null | null | null | leetcode/lengthOfLastWord.py | montukv/Coding-problem-solutions | 973009c00038cc57500d965871376a60f8c4e0d1 | [
"MIT"
] | null | null | null | '''58. Length of Last Word
Easy
586
2261
Add to List
Share
Given a string s consists of upper/lower-case alphabets and empty space characters ' ', return the length of last word (last word means the last appearing word if we loop from left to right) in the string.
If the last word does not exist, return 0.
Note: A word is defined as a maximal substring consisting of non-space characters only.
Example:
Input: "Hello World"
Output: 5'''
s = 'to test this code '
try:
print(len(s.split()[-1]))
except IndexError:
print(0) | 21.730769 | 206 | 0.692035 | 94 | 565 | 4.159574 | 0.691489 | 0.081841 | 0.061381 | 0.081841 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.030023 | 0.233628 | 565 | 26 | 207 | 21.730769 | 0.872979 | 0.778761 | 0 | 0 | 0 | 0 | 0.2 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0 | 0 | 0 | 0.4 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
8cb73739649a485ab3ff7ced9dfc0eb6872fc6e3 | 5,366 | py | Python | pecos/decoders/mwpm2d/mwpm2d.py | quantum-pecos/PECOS | 44bc614a9152f3b316bacef6ca034f6a8a611293 | [
"Apache-2.0"
] | 15 | 2019-04-11T16:02:38.000Z | 2022-03-15T16:56:36.000Z | pecos/decoders/mwpm2d/mwpm2d.py | quantum-pecos/PECOS | 44bc614a9152f3b316bacef6ca034f6a8a611293 | [
"Apache-2.0"
] | 4 | 2018-10-04T19:30:09.000Z | 2019-03-12T19:00:34.000Z | pecos/decoders/mwpm2d/mwpm2d.py | quantum-pecos/PECOS | 44bc614a9152f3b316bacef6ca034f6a8a611293 | [
"Apache-2.0"
] | 3 | 2020-10-07T16:47:16.000Z | 2022-02-01T05:34:54.000Z | # -*- coding: utf-8 -*-
# Copyright 2018 National Technology & Engineering Solutions of Sandia,
# LLC (NTESS). Under the terms of Contract DE-NA0003525 with NTESS,
# the U.S. Government retains certain rights in this software.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import networkx as nx
from ...circuits import QuantumCircuit
from . import precomputing
class MWPM2D:
"""
2D minimum weight perfect matching for surface capacity assuming code capacity. (Only data error.)
A simple Minimum Weight Perfect Matching decoder. It is for 2D decoding either for code capacity modeling or ideal
decoding.
For code capacity, data errors are sprinkled before each logical gate. Then the decoder takes in syndrome
measurements to come up with a recovery operation.
"""
# Basic subpackage required attributes
output = None
input = None
def __init__(self, qecc):
instr = qecc.instruction('instr_syn_extract')
self.instr = instr
self.recorded_recovery = {} # previous: syndrome => recovery
precomputed_data = precomputing.precompute(instr)
self.precomputed_data = precomputed_data
def decode(self, measurements, error_params=None):
"""
Takes measurement results and outputs a result.
logic_range identifies over what part of self.logic we are decoding over.
"""
syndromes = measurements.simplified(True)
tuple_key = frozenset(syndromes)
if tuple_key in self.recorded_recovery:
return self.recorded_recovery[tuple_key]
else:
recovery = QuantumCircuit(1)
decode_data = self.precomputed_data
correction_x = []
correction_z = []
# Decode 'X' and Z separately.
for check_type in ['X', 'Z']:
if check_type == 'X':
correction = correction_z
else:
correction = correction_x
check_type_decode = decode_data[check_type]
distance_graph = check_type_decode['dist_graph']
# closest = check_type_decode['closest_virt']
virtual_edge_data = check_type_decode['virtual_edge_data']
active_syn = set(syndromes)
# Get the real graph
real_graph = nx.Graph(distance_graph.subgraph(active_syn))
active_syn = set(real_graph.nodes())
# Add virtual nodes
new_name = self.itr_v_name()
# print 'act', active_syn
active_virt = set([])
for s in active_syn:
edge_data = virtual_edge_data[s]
v_name = next(new_name)
active_virt.add(v_name)
real_graph.add_edge(s, v_name, **edge_data)
# print 'closest:: s:%s - v:%s, data path %s' % (s, v, edge_data['data_path'])
# Add edges between virtual nodes to allow pairing of un-needed virtual nodes
for vi in active_virt:
for vj in active_virt:
if vi != vj:
real_graph.add_edge(vi, vj, weight=0)
# Find a matching
matching_edges = nx.max_weight_matching(real_graph, maxcardinality=True)
matching = {n1: n2 for n2, n1 in matching_edges}
matching.update({n2: n1 for n2, n1 in matching_edges})
nodes_paired = set([])
## for n1 in real_graph.nodes():
real_syn = set(real_graph.nodes())
for n1 in syndromes & real_syn:
n2 = matching[n1]
# Don't continue if node has already been covered or path starts and ends with virtuals.
if n1 in nodes_paired or (str(n1).startswith('v') and str(n2).startswith('v')):
continue
nodes_paired.add(n2)
path_attr = real_graph.get_edge_data(n1, n2)
correction.extend(path_attr['data_path'])
correction_x = set(correction_x)
correction_z = set(correction_z)
correction_y = correction_x & correction_z
correction_x -= correction_y
correction_z -= correction_y
if correction_z:
recovery.update({'Z': correction_z})
if correction_x:
recovery.update({'X': correction_x})
if correction_y:
recovery.update({'Y': correction_y})
self.recorded_recovery[tuple_key] = recovery
return recovery
@staticmethod
def itr_v_name():
i = 0
while True:
i += 1
yield 'vu' + str(i)
| 33.5375 | 118 | 0.58852 | 635 | 5,366 | 4.798425 | 0.359055 | 0.026584 | 0.026255 | 0.021661 | 0.045947 | 0.01444 | 0 | 0 | 0 | 0 | 0 | 0.011527 | 0.337123 | 5,366 | 159 | 119 | 33.748428 | 0.845094 | 0.327805 | 0 | 0.026667 | 0 | 0 | 0.017923 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.04 | false | 0 | 0.04 | 0 | 0.146667 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
8cb7514575baa43724b13e8839a8fda37d9103d1 | 5,258 | py | Python | filters/peak_filters.py | Joeltronics/audioexperiments | 9f677ce72b0a50c7240ace880603d8e955f270a8 | [
"MIT"
] | 1 | 2021-12-13T03:05:03.000Z | 2021-12-13T03:05:03.000Z | filters/peak_filters.py | Joeltronics/audioexperiments | 9f677ce72b0a50c7240ace880603d8e955f270a8 | [
"MIT"
] | null | null | null | filters/peak_filters.py | Joeltronics/audioexperiments | 9f677ce72b0a50c7240ace880603d8e955f270a8 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
import numpy as np
from math import exp, pi
from utils import utils
from processor import ProcessorBase
_unit_tests = []
class BidirectionalOnePoleFilter(ProcessorBase):
def __init__(self, rise_time, fall_time, gain=1.0, verbose=False):
"""
:param rise_time: 63% (1-1/e) rise time, in samples. Equivalent cutoff frequency 1 / (2*pi*time)
:param fall_time: 63% (1-1/e) fall time, in samples. Equivalent cutoff frequency 1 / (2*pi*time)
:param gain: optional gain to add to the system
:param verbose:
"""
if rise_time == 0.0 or fall_time == 0.0:
raise ValueError('Rise & fall times must not be zero')
elif rise_time < 0.0 or fall_time < 0.0:
raise ValueError('Rise & fall times must be positive')
self.z1 = 0.0
self.na1_up = 0.0
self.b0_up = 0.0
self.na1_down = 0.0
self.b0_down = 0.0
self.gain = gain
self._set_freqs(rise_time, fall_time, gain=gain)
if verbose:
print('Bidirectional one pole filter: gain %s, rise(%.1f samples, b0=%f, a1=%f), fall(%.1f samples, b0=%f, a1=%f)' % (
utils.to_pretty_str(gain), rise_time, self.b0_up, -self.na1_up, fall_time, self.b0_down, -self.na1_down))
def reset(self):
self.z1 = 0.0
def get_state(self):
return self.z1
def set_state(self, s):
self.z1 = s
def _set_freqs(self, rise_time, fall_time, gain=None):
if rise_time == 0.0 or fall_time == 0.0:
raise ValueError('Rise & fall times must not be zero')
elif rise_time < 0.0 or fall_time < 0.0:
raise ValueError('Rise & fall times must be positive')
if gain is not None:
self.gain = gain
self.na1_up = exp(-1.0 / rise_time)
self.na1_down = exp(-1.0 / fall_time)
self.b0_up = (1.0 - self.na1_up) * self.gain
self.b0_down = (1.0 - self.na1_down) * self.gain
assert self.na1_down > 0.0
assert self.b0_down > 0.0
assert self.na1_up > 0.0
assert self.b0_up > 0.0
def process_sample(self, x):
if x > self.z1:
b0 = self.b0_up
na1 = self.na1_up
else:
b0 = self.b0_down
na1 = self.na1_down
y = self.z1 = (b0 * x) + (na1 * self.z1)
return y
def test(verbose=False):
from unit_test import unit_test
from unit_test.processor_unit_test import ProcessorUnitTest
# If rise time = fall time, BidirectionalFilter is identical to one-pole filter
twopi = 2.0 * np.pi
tests = [
ProcessorUnitTest(
"BidirectionalOnePoleFilter(both 1 ms @ 44.1 kHz)",
lambda: BidirectionalOnePoleFilter(44.1 / twopi, 44.1 / twopi),
freqs_to_test=np.array([10., 100., 1000., 10000.]) / 44100.,
expected_freq_response_range_dB=[(-0.1, 0.0), (-3.0, 0.0), (-4.0, -2.0), (-24.0, -18.0)],
expected_phase_response_range_degrees=None, # [(), (), (), None],
deterministic=True,
linear=True
),
ProcessorUnitTest(
"BidirectionalOnePoleFilter(both 1 ms @ 44.1 kHz, 3 dB gain)",
lambda: BidirectionalOnePoleFilter(44.1 / twopi, 44.1 / twopi, gain=utils.from_dB(3.0)),
freqs_to_test=np.array([10., 100., 1000., 10000.]) / 44100.,
expected_freq_response_range_dB=[(2.9, 3.0), (0.0, 3.0), (-1.0, 1.0), (-21.0, -15.0)],
expected_phase_response_range_degrees=None, # [(), (), (), None],
deterministic=True,
linear=True
),
ProcessorUnitTest(
"BidirectionalOnePoleFilter(both 10 ms @ 44.1 kHz)",
lambda: BidirectionalOnePoleFilter(44100. / (100. * twopi), 44100. / (100. * twopi)),
freqs_to_test=np.array([10., 100., 1000., 10000.]) / 44100.,
expected_freq_response_range_dB=[(-3.0, 0.0), (-4.0, -2.0), (-21.0, -20.0), (-48.0, -38.0)],
expected_phase_response_range_degrees=None, # [(), (), (), None],
deterministic=True,
linear=True
),
]
return unit_test.run_unit_tests(tests, verbose=verbose)
def plot(args):
from matplotlib import pyplot as plt
from generation import signal_generation
sample_rate = 48000.
freq = 100.
n_samp = int(round(sample_rate))
n_samp_sin = int(round(sample_rate / freq * 5))
rise_time_ms = 1.
fall_time_ms = 200.
rise_time = rise_time_ms / 1000.0 * sample_rate
fall_time = fall_time_ms / 1000.0 * sample_rate
print('Rise time: %.1f ms = %.1f samples' % (rise_time_ms, rise_time))
print('Fall time: %.1f ms = %.1f samples' % (fall_time_ms, fall_time))
filter = BidirectionalOnePoleFilter(rise_time=rise_time, fall_time=fall_time, verbose=True)
x = np.concatenate((signal_generation.gen_sine(freq / sample_rate, n_samp_sin), np.zeros(n_samp - n_samp_sin)))
x = np.abs(x)
y = filter.process_vector(x)
plt.figure()
t = signal_generation.sample_time_index(n_samp, sample_rate) * 1000.
plt.subplot(2, 1, 1)
plt.plot(t, x, label='Input')
plt.plot(t, y, label='Output')
plt.grid()
plt.xlim([0, (n_samp_sin * 1.25) / sample_rate * 1000])
plt.title('Bidirectional filter, rise %s ms, fall %s ms' % (
utils.to_pretty_str(rise_time_ms), utils.to_pretty_str(fall_time_ms)))
plt.xlabel('Time (ms)')
plt.subplot(2, 1, 2)
plt.plot(t, x, label='Input')
plt.plot(t, y, label='Output')
plt.axhline(1.0 / exp(1.0), color='red', label='fall time measurement value')
plt.legend()
plt.grid()
plt.xlabel('Time (ms)')
plt.show()
def main(args):
plot(args)
| 29.875 | 122 | 0.65329 | 845 | 5,258 | 3.887574 | 0.194083 | 0.015221 | 0.014612 | 0.024353 | 0.480061 | 0.391781 | 0.343075 | 0.343075 | 0.290715 | 0.290715 | 0 | 0.073003 | 0.197604 | 5,258 | 175 | 123 | 30.045714 | 0.705617 | 0.079308 | 0 | 0.311475 | 0 | 0.008197 | 0.124462 | 0.020026 | 0 | 0 | 0 | 0 | 0.032787 | 1 | 0.07377 | false | 0 | 0.065574 | 0.008197 | 0.172131 | 0.02459 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
8cb81479759f11eaac016b1e7b2537c9a5ef24da | 2,568 | py | Python | scripts/make_http.py | raymondpoling/rerun-tv | e10122ecad12f8ec427c28317db018be57548f60 | [
"MIT"
] | null | null | null | scripts/make_http.py | raymondpoling/rerun-tv | e10122ecad12f8ec427c28317db018be57548f60 | [
"MIT"
] | 26 | 2020-03-21T18:26:18.000Z | 2020-06-23T13:33:01.000Z | scripts/make_http.py | raymondpoling/rerun-tv | e10122ecad12f8ec427c28317db018be57548f60 | [
"MIT"
] | null | null | null | #!/usr/bin/python
'''This script takes files from locator as identified by meta,
and ensures every file reference has an http reference. Mostly only
useful locally.'''
from urllib.parse import quote
from re import sub, match
import requests
META_SERVER = "http://meta:4004"
LOCATOR_SERVER = "http://locator:4005"
MATCH_PAT = r"file://CrystalBall/home/ruguer/Videos(.*)"
REPLACE_PAT = r"/video\1"
HTTP_EXISTS = r"http://archive/video.*"
def get_series():
'''Get a list of all series meta knows'''
result = requests.get(url=META_SERVER + "/series")
return result.json()['results']
def test_results(locations):
'''Check if a set of locations has an HTTP_EXISTS'''
for location in locations:
if match(HTTP_EXISTS, location):
return True
return False
def create_http(locations):
'''Create the http resource based on existing file resource'''
for location in locations:
if match(MATCH_PAT, location):
return sub(MATCH_PAT, REPLACE_PAT, location)
return None
def walk_series(series):
'''For every series, get the catalog_ids of episodes and use
it to check for existing locations.'''
for serie in series:
print("Working on serie " + serie)
result = requests.get(url=META_SERVER +
"/series/" +
quote(serie, safe=''))
print("For " + serie + " processing: " + str(result.json()['catalog_ids']))
catalog_ids = result.json()['catalog_ids']
for item in catalog_ids:
print("Processing item: " + item)
result = requests.get(url=LOCATOR_SERVER +
"/catalog-id/" +
item)
print("Got result: " + str(result.json()))
locations = result.json()['files']
print("Existing locations for " + item + ": " + str(locations))
if not test_results(locations):
http = create_http(locations)
print("New location is: " + str(http))
if http is not None:
result = requests.post(url=LOCATOR_SERVER +
"/http/archive/" + item,
json={"path": http})
print("Result is " + str(result))
else:
print("No file location for " + item)
def main():
'''Run the program'''
series = get_series()
print("All series: " + str(series))
walk_series(series)
main()
| 35.178082 | 83 | 0.567368 | 298 | 2,568 | 4.798658 | 0.345638 | 0.034965 | 0.035664 | 0.041958 | 0.090909 | 0.090909 | 0.05035 | 0 | 0 | 0 | 0 | 0.005134 | 0.317368 | 2,568 | 72 | 84 | 35.666667 | 0.81061 | 0.159657 | 0 | 0.038462 | 0 | 0 | 0.156854 | 0.019312 | 0 | 0 | 0 | 0 | 0 | 1 | 0.096154 | false | 0 | 0.057692 | 0 | 0.25 | 0.173077 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
8cb89583881ab9de53c2424ee39af1aba95fca7b | 783 | py | Python | RNNLM_simple_examples/3-combination/timitChmap.py | We-can-apply-GPU/RNNLM | c4bfd7f5e833f0a60a25c4405fd3c7136e5600ad | [
"MIT"
] | null | null | null | RNNLM_simple_examples/3-combination/timitChmap.py | We-can-apply-GPU/RNNLM | c4bfd7f5e833f0a60a25c4405fd3c7136e5600ad | [
"MIT"
] | null | null | null | RNNLM_simple_examples/3-combination/timitChmap.py | We-can-apply-GPU/RNNLM | c4bfd7f5e833f0a60a25c4405fd3c7136e5600ad | [
"MIT"
] | null | null | null | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
File: timit_chmap.py
Description: map
"""
import sys
out = open('timit' + sys.argv[1],'w')
def infile():
out.write('id,sequence\n')
with open(sys.argv[1],'r') as sen ,open("nameList",'r') as nameList:
for name,line in zip(nameList,sen):
out.write(name.rstrip()+ ',')
l = line.rstrip().split(',')
for i in range (0,len(l)):
_str = l[i].split()
for i in range (0,len(_str)):
chmap(_str[i].lower())
out.write('\n')
def chmap(_str):
tim = open('timit.chmap','r')
for line in tim:
l = line.split()
if l[0] == _str:
out.write(l[1])
break
if __name__=="__main__":
infile()
| 24.46875 | 72 | 0.492976 | 109 | 783 | 3.412844 | 0.440367 | 0.086022 | 0.043011 | 0.05914 | 0.107527 | 0.107527 | 0.107527 | 0 | 0 | 0 | 0 | 0.013011 | 0.312899 | 783 | 31 | 73 | 25.258065 | 0.678439 | 0.097063 | 0 | 0 | 0 | 0 | 0.075931 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.090909 | false | 0 | 0.045455 | 0 | 0.136364 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
8cba0f0d59c021f8b11874424c9b8d10860ea56c | 2,526 | py | Python | food/app/contourus.py | JUNNETWORKS/akatsuki_dormitory_food_bot | 27e5b7f3d4d8c304b8db844640a841bfd0946614 | [
"MIT"
] | null | null | null | food/app/contourus.py | JUNNETWORKS/akatsuki_dormitory_food_bot | 27e5b7f3d4d8c304b8db844640a841bfd0946614 | [
"MIT"
] | null | null | null | food/app/contourus.py | JUNNETWORKS/akatsuki_dormitory_food_bot | 27e5b7f3d4d8c304b8db844640a841bfd0946614 | [
"MIT"
] | null | null | null | # 元画像からメニュー表のみをトリミングするプログラム
import numpy as np
import cv2
def transform_by4(img, points):
""" 4点を指定してトリミングする。 """
points = sorted(points, key=lambda x: x[1]) # yが小さいもの順に並び替え。
top = sorted(points[:2], key=lambda x: x[0]) # 前半二つは四角形の上。xで並び替えると左右も分かる。
bottom = sorted(points[2:], key=lambda x: x[0], reverse=True) # 後半二つは四角形の下。同じくxで並び替え。
points = np.array(top + bottom, dtype='float32') # 分離した二つを再結合。
width = max(np.sqrt(((points[0][0] - points[2][0]) ** 2) * 2),
np.sqrt(((points[1][0] - points[3][0]) ** 2) * 2))
height = max(np.sqrt(((points[0][1] - points[2][1]) ** 2) * 2),
np.sqrt(((points[1][1] - points[3][1]) ** 2) * 2))
dst = np.array([
np.array([0, 0]),
np.array([width - 1, 0]),
np.array([width - 1, height - 1]),
np.array([0, height - 1]),
], np.float32)
trans = cv2.getPerspectiveTransform(points, dst) # 変換前の座標と変換後の座標の対応を渡すと、透視変換行列を作ってくれる。
return cv2.warpPerspective(img, trans, (int(width), int(height))) # 透視変換行列を使って切り抜く。
# 以下main用関数
def trimming(img_path, save_path):
im = cv2.imread(img_path) # 画像読み込み
lines = im.copy()
# 輪郭を抽出する
canny = cv2.cvtColor(im, cv2.COLOR_BGR2GRAY)
canny = cv2.GaussianBlur(canny, (5, 5), 0)
canny = cv2.Canny(canny, 50, 100)
cnts = cv2.findContours(canny, cv2.RETR_LIST, cv2.CHAIN_APPROX_SIMPLE)[1] # 抽出した輪郭に近似する直線(?)を探す。
cnts.sort(key=cv2.contourArea, reverse=True) # 面積が大きい順に並べ替える。
warp = None
for i, c in enumerate(cnts):
arclen = cv2.arcLength(c, True)
approx = cv2.approxPolyDP(c, 0.02 * arclen, True)
level = 1 - float(i) / len(cnts) # 面積順に色を付けたかったのでこんなことをしている。
if len(approx) == 4:
cv2.drawContours(lines, [approx], -1, (0, 0, 255 * level), 2)
if warp is None:
warp = approx.copy() # 一番面積の大きな四角形をwarpに保存。
else:
cv2.drawContours(lines, [approx], -1, (0, 255 * level, 0), 2)
for pos in approx:
cv2.circle(lines, tuple(pos[0]), 4, (255 * level, 0, 0))
# cv2.imshow('edge', lines)
if warp is not None:
warped = transform_by4(im, warp[:, 0, :]) # warpが存在した場合、そこだけくり抜いたものを作る。
# cv2.imshow('warp', warped)
cv2.imwrite(save_path, warped)
if __name__ == '__main__':
img_path = "image/akatsuki2018_02.jpg"
save_path = "image/akatsuki2018_02_trimming.jpg"
trimming(img_path,save_path) | 36.085714 | 102 | 0.578385 | 322 | 2,526 | 4.459627 | 0.350932 | 0.029248 | 0.033426 | 0.022981 | 0.168524 | 0.094708 | 0.034819 | 0.034819 | 0 | 0 | 0 | 0.060557 | 0.261283 | 2,526 | 70 | 103 | 36.085714 | 0.709003 | 0.142518 | 0 | 0 | 0 | 0 | 0.035663 | 0.028434 | 0 | 0 | 0 | 0 | 0 | 1 | 0.042553 | false | 0 | 0.042553 | 0 | 0.106383 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
8cbb513ab8485bb7320c20d3fabcccd70b167f37 | 5,586 | py | Python | kochira/services/social/remind.py | gnowxilef/kochira | 817b82ad0f0893a58e8d44f8db79ddd6fc0eae77 | [
"MS-PL"
] | null | null | null | kochira/services/social/remind.py | gnowxilef/kochira | 817b82ad0f0893a58e8d44f8db79ddd6fc0eae77 | [
"MS-PL"
] | 1 | 2019-05-13T22:02:18.000Z | 2019-05-13T22:02:18.000Z | kochira/services/social/remind.py | gnowxilef/kochira | 817b82ad0f0893a58e8d44f8db79ddd6fc0eae77 | [
"MS-PL"
] | 1 | 2019-05-13T21:22:02.000Z | 2019-05-13T21:22:02.000Z | """
Timed and join reminders.
Enables the bot to record and play reminders after timed intervals or on user
join.
"""
import humanize
import parsedatetime
from datetime import datetime, timedelta
from peewee import TextField, CharField, DateTimeField, IntegerField
import math
from kochira.db import Model
from kochira.service import Service
service = Service(__name__, __doc__)
cal = parsedatetime.Calendar()
def parse_time(s):
result, what = cal.parse(s)
dt = None
if what in (1, 2):
dt = datetime(*result[:6])
elif what == 3:
dt = result
return dt
@service.model
class Reminder(Model):
message = TextField()
origin = CharField(255)
who = CharField(255)
who_n = CharField(255)
channel = CharField(255)
client_name = CharField(255)
ts = DateTimeField()
duration = IntegerField(null=True)
@service.setup
def reschedule_reminders(ctx):
for reminder in Reminder.select() \
.where(~(Reminder.duration >> None)):
dt = (reminder.ts + timedelta(seconds=reminder.duration)) - datetime.utcnow()
if dt < timedelta(0):
reminder.delete_instance()
continue
ctx.bot.scheduler.schedule_after(dt, play_timed_reminder, reminder)
@service.task
def play_timed_reminder(ctx, reminder):
needs_archive = False
if reminder.client_name in ctx.bot.clients:
client = ctx.bot.clients[reminder.client_name]
if reminder.channel in client.channels:
if reminder.who in client.channels[reminder.channel]["users"]:
client.message(reminder.channel, ctx._("{who}: {origin} wanted you to know: {message}").format(
who=reminder.who,
origin=reminder.origin,
message=reminder.message
))
else:
needs_archive = True
reminder.duration = None
reminder.save()
if not needs_archive:
reminder.delete_instance()
@service.command(r"(?:remind|tell) (?P<who>\S+) (?:about|to|that) (?P<message>.+) (?P<duration>(?:in|on|after) .+|at .+|tomorrow)$", mention=True, priority=1)
@service.command(r"(?:remind|tell) (?P<who>\S+) (?P<duration>(?:in|on|after) .+|at .+|tomorrow) (?:about|to|that) (?P<message>.+)$", mention=True, priority=1)
def add_timed_reminder(ctx, who, duration, message):
"""
Add timed reminder.
Add a reminder that will play after `time` has elapsed. If the user has left
the channel, the reminder will play as soon as they return.
"""
now = datetime.now()
t = parse_time(duration)
if who.lower() == "me" and who not in ctx.client.channels[ctx.target]["users"]:
who = ctx.origin
if t is None:
ctx.respond(ctx._("Sorry, I don't understand that time."))
return
dt = timedelta(seconds=int(math.ceil((parse_time(duration) - now).total_seconds())))
if dt < timedelta(0):
ctx.respond(ctx._("Uh, that's in the past."))
return
# persist reminder to the DB
reminder = Reminder.create(who=who, who_n=ctx.client.normalize(who),
channel=ctx.target, origin=ctx.origin,
message=message, client_name=ctx.client.name,
ts=datetime.utcnow(),
duration=dt.total_seconds())
reminder.save()
ctx.respond(ctx._("Okay, I'll let {who} know in around {dt}.").format(
who=who,
dt=humanize.naturaltime(-dt)
))
# ... but also schedule it
ctx.bot.scheduler.schedule_after(dt, play_timed_reminder, reminder)
@service.command(r"(?:remind|tell) (?P<who>\S+)(?: about| to| that)? (?P<message>.+)$", mention=True)
def add_reminder(ctx, who, message):
"""
Add reminder.
Add a reminder that will play when the user joins the channel or next speaks on
the channel.
"""
if who.lower() == "me" and who not in ctx.client.channels[ctx.target]["users"]:
who = ctx.origin
Reminder.create(who=who, who_n=ctx.client.normalize(who),
channel=ctx.target, origin=ctx.origin, message=message,
client_name=ctx.client.name, ts=datetime.utcnow(),
duration=None).save()
ctx.respond(ctx._("Okay, I'll let {who} know.").format(
who=who
))
@service.hook("channel_message")
def play_reminder_on_message(ctx, target, origin, message):
play_reminder(ctx, ctx.target, ctx.origin)
@service.hook("join")
def play_reminder_on_join(ctx, channel, user):
play_reminder(ctx, channel, user)
def play_reminder(ctx, target, origin):
now = datetime.utcnow()
origin = ctx.client.normalize(origin)
for reminder in Reminder.select().where(Reminder.who_n == origin,
Reminder.channel == target,
Reminder.client_name == ctx.client.name,
Reminder.duration >> None) \
.order_by(Reminder.ts.asc()):
# TODO: display time
dt = now - reminder.ts
ctx.message(ctx._("{who}, {origin} wanted you to know: {message}").format(
who=reminder.who,
origin=reminder.origin,
message=reminder.message
))
Reminder.delete().where(Reminder.who_n == origin,
Reminder.channel == target,
Reminder.client_name == ctx.client.name,
Reminder.duration >> None).execute()
| 30.358696 | 158 | 0.60222 | 676 | 5,586 | 4.889053 | 0.221893 | 0.033283 | 0.024206 | 0.022995 | 0.402118 | 0.402118 | 0.402118 | 0.333132 | 0.324054 | 0.324054 | 0 | 0.005643 | 0.270319 | 5,586 | 183 | 159 | 30.52459 | 0.805201 | 0.080201 | 0 | 0.245614 | 0 | 0.026316 | 0.106651 | 0.011019 | 0 | 0 | 0 | 0.005464 | 0 | 1 | 0.070175 | false | 0 | 0.061404 | 0 | 0.236842 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
8cbb7cffa6bc98bcaaf9b70f5b56f78421f6c158 | 2,340 | py | Python | preprocessing/czxc.py | GPrathap/OpenBCIPython | 0f5be167fb09d31c15885003eeafec8cdc08dbfa | [
"MIT"
] | 1 | 2021-11-07T12:01:08.000Z | 2021-11-07T12:01:08.000Z | preprocessing/czxc.py | GPrathap/OpenBCIPython | 0f5be167fb09d31c15885003eeafec8cdc08dbfa | [
"MIT"
] | null | null | null | preprocessing/czxc.py | GPrathap/OpenBCIPython | 0f5be167fb09d31c15885003eeafec8cdc08dbfa | [
"MIT"
] | 1 | 2020-10-15T08:35:01.000Z | 2020-10-15T08:35:01.000Z | # import time, random
# import math
# from collections import deque
#
# import librosa
# import matplotlib.animation as animation
# from matplotlib import pyplot as plt
# import numpy as np
# start = time.time()
#
#
# class RealtimePlot:
# def __init__(self, axes, max_entries=100):
# self.axis_x = deque(maxlen=max_entries)
# self.axis_y = deque(maxlen=max_entries)
# self.axes = axes
# self.max_entries = max_entries
#
# self.lineplot, = axes.plot([], [], "ro-")
# self.axes.set_autoscaley_on(True)
#
# def add(self, x, y):
# self.axis_x.extend(x)
# self.axis_y.extend(y)
# self.lineplot.set_data(self.axis_x, self.axis_y)
# self.axes.set_xlim(self.axis_x[0], self.axis_x[-1] + 1e-15)
# self.axes.relim()
# self.axes.autoscale_view() # rescale the y-axis
#
# def animate(self, figure, callback, interval=50):
# def wrapper(frame_index):
# self.add(*callback(frame_index))
# self.axes.relim()
# self.axes.autoscale_view() # rescale the y-axis
# return self.lineplot
# animation.FuncAnimation(figure, wrapper, interval=interval)
#
#
# def main():
# fig, axes = plt.subplots()
# display = RealtimePlot(axes)
# display.animate(fig, lambda frame_index: (time.time() - start, random.random() * 100))
# while True:
# ydata = [random.randint(0, i) * i for i in range(0, 20)]
# # ydata = librosa.amplitude_to_db(librosa.stft(ydata), ref=np.max)
# xdata = [i for i in range(0, len(ydata))]
# display.add(xdata, ydata)
# plt.pause(0.001)
#
#
# if __name__ == "__main__": main()
import random
import time
from matplotlib import pyplot as plt
from matplotlib import animation
class RegrMagic(object):
"""Mock for function Regr_magic()
"""
def __init__(self):
self.x = 0
def __call__(self):
time.sleep(random.random())
self.x += 1
return self.x, random.random()
regr_magic = RegrMagic()
def frames():
while True:
yield regr_magic()
fig = plt.figure()
x = []
y = []
def animate(args):
x.append(args[0])
y.append(args[1])
return plt.plot(x, y, color='g')
anim = animation.FuncAnimation(fig, animate, frames=frames, interval=1000)
plt.show()
| 26.292135 | 92 | 0.613248 | 311 | 2,340 | 4.463023 | 0.321543 | 0.04611 | 0.032421 | 0.037464 | 0.170029 | 0.134006 | 0.070605 | 0.070605 | 0.070605 | 0.070605 | 0 | 0.016978 | 0.244872 | 2,340 | 88 | 93 | 26.590909 | 0.768534 | 0.696581 | 0 | 0 | 0 | 0 | 0.001536 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.166667 | false | 0 | 0.166667 | 0 | 0.458333 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
8cbcac92e3f469b2736b528b5d747e23882c5486 | 1,071 | py | Python | authors/apps/core/utils/article_management.py | andela/ah-backend-thanos | baf7f20a023cc3c3ecae0fcf91bb7d9165e79fc8 | [
"BSD-3-Clause"
] | null | null | null | authors/apps/core/utils/article_management.py | andela/ah-backend-thanos | baf7f20a023cc3c3ecae0fcf91bb7d9165e79fc8 | [
"BSD-3-Clause"
] | 42 | 2018-10-24T08:21:07.000Z | 2021-06-10T20:54:39.000Z | authors/apps/core/utils/article_management.py | andela/ah-backend-thanos | baf7f20a023cc3c3ecae0fcf91bb7d9165e79fc8 | [
"BSD-3-Clause"
] | 2 | 2018-11-05T08:56:42.000Z | 2019-05-03T12:40:43.000Z | from rest_framework.exceptions import NotFound
from authors.apps.articles import serializers
from authors.apps.articles.models import Article
from authors.apps.core.utils.user_management import get_id_from_token
def article_not_found(article_id):
if not Article.objects.filter(pk=article_id).exists():
raise NotFound(detail="Article Not found",)
def validate_article_get_user(request, article_id):
article_not_found(article_id)
user, author_username = get_id_from_token(request)
return user
def add_views_to_article(current_user_id, article):
if current_user_id != article.author.id:
views_count = article.views_count
views_count += 1
fresh_article = {
'views_count': views_count
}
serializer = serializers.ArticlesUpdatesSerializer(
data=fresh_article)
serializer.is_valid(raise_exception=True)
serializer.update(article, fresh_article)
def get_article_from_db(article_id):
article_not_found(article_id)
return Article.objects.get(pk=article_id)
| 31.5 | 69 | 0.747899 | 141 | 1,071 | 5.361702 | 0.35461 | 0.083333 | 0.079365 | 0.087302 | 0.190476 | 0.087302 | 0.087302 | 0 | 0 | 0 | 0 | 0.001135 | 0.177404 | 1,071 | 33 | 70 | 32.454545 | 0.856981 | 0 | 0 | 0.08 | 0 | 0 | 0.026144 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.16 | false | 0 | 0.16 | 0 | 0.4 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
8cbd44c645e24937e3ed85151fcb260d0f6a7e0a | 16,907 | py | Python | saleor/plugins/openid_connect/plugin.py | nestfiy/saleor | 6fce3bc5c0ca72ac28db99553e6d2b49249c6dac | [
"CC-BY-4.0"
] | 1,392 | 2021-10-06T15:54:28.000Z | 2022-03-31T20:50:55.000Z | saleor/plugins/openid_connect/plugin.py | nestfiy/saleor | 6fce3bc5c0ca72ac28db99553e6d2b49249c6dac | [
"CC-BY-4.0"
] | 888 | 2021-10-06T10:48:54.000Z | 2022-03-31T11:00:30.000Z | saleor/plugins/openid_connect/plugin.py | nestfiy/saleor | 6fce3bc5c0ca72ac28db99553e6d2b49249c6dac | [
"CC-BY-4.0"
] | 538 | 2021-10-07T16:21:27.000Z | 2022-03-31T22:58:57.000Z | import logging
from typing import Optional, Tuple
from authlib.common.errors import AuthlibBaseError
from authlib.integrations.requests_client import OAuth2Session
from django.core import signing
from django.core.exceptions import ValidationError
from django.core.handlers.wsgi import WSGIRequest
from jwt import ExpiredSignatureError, InvalidTokenError
from requests import HTTPError, PreparedRequest
from ...account.models import User
from ...core.auth import get_token_from_request
from ...core.jwt import (
JWT_REFRESH_TOKEN_COOKIE_NAME,
PERMISSIONS_FIELD,
get_user_from_access_payload,
get_user_from_payload,
jwt_decode,
)
from ...core.permissions import get_permissions_codename, get_permissions_from_names
from ..base_plugin import BasePlugin, ConfigurationTypeField, ExternalAccessTokens
from ..error_codes import PluginErrorCode
from ..models import PluginConfiguration
from . import PLUGIN_ID
from .const import SALEOR_STAFF_PERMISSION
from .dataclasses import OpenIDConnectConfig
from .exceptions import AuthenticationError
from .utils import (
OAUTH_TOKEN_REFRESH_FIELD,
create_tokens_from_oauth_payload,
get_incorrect_fields,
get_or_create_user_from_payload,
get_parsed_id_token,
get_saleor_permission_names,
get_saleor_permissions_qs_from_scope,
get_user_from_oauth_access_token,
get_user_from_token,
is_owner_of_token_valid,
validate_refresh_token,
)
logger = logging.getLogger(__name__)
class OpenIDConnectPlugin(BasePlugin):
PLUGIN_ID = PLUGIN_ID
DEFAULT_CONFIGURATION = [
{"name": "client_id", "value": None},
{"name": "client_secret", "value": None},
{"name": "enable_refresh_token", "value": True},
{"name": "oauth_authorization_url", "value": None},
{"name": "oauth_token_url", "value": None},
{"name": "json_web_key_set_url", "value": None},
{"name": "oauth_logout_url", "value": None},
{"name": "user_info_url", "value": None},
{"name": "audience", "value": None},
{"name": "use_oauth_scope_permissions", "value": False},
]
PLUGIN_NAME = "OpenID Connect"
CONFIGURATION_PER_CHANNEL = False
CONFIG_STRUCTURE = {
"client_id": {
"type": ConfigurationTypeField.STRING,
"help_text": (
"Your Client ID required to authenticate on the provider side."
),
"label": "Client ID",
},
"client_secret": {
"type": ConfigurationTypeField.SECRET,
"help_text": (
"Your client secret required to authenticate on provider side."
),
"label": "Client Secret",
},
"enable_refresh_token": {
"type": ConfigurationTypeField.BOOLEAN,
"help_text": (
"Determine if the refresh token should be also fetched from provider. "
"By disabling it, users will need to re-login after the access token "
"expired. By enabling it, frontend apps will be able to refresh the "
"access token. OAuth provider needs to have included scope "
"`offline_access`."
),
"label": "Enable refreshing token",
},
"oauth_authorization_url": {
"type": ConfigurationTypeField.STRING,
"help_text": "The endpoint used to redirect user to authorization page.",
"label": "OAuth Authorization URL",
},
"oauth_token_url": {
"type": ConfigurationTypeField.STRING,
"help_text": (
"The endpoint to exchange an Authorization Code for a Token."
),
"label": "OAuth Token URL",
},
"json_web_key_set_url": {
"type": ConfigurationTypeField.STRING,
"help_text": (
"The JSON Web Key Set (JWKS) is a set of keys containing the public "
"keys used to verify any JSON Web Token (JWT) issued by the "
"authorization server and signed using the RS256 signing algorithm."
),
"label": "JSON Web Key Set URL",
},
"oauth_logout_url": {
"type": ConfigurationTypeField.STRING,
"help_text": (
"The URL for logging out the user from the OAuth provider side."
),
"label": "OAuth logout URL",
},
"user_info_url": {
"type": ConfigurationTypeField.STRING,
"help_text": (
"The URL which can be used to fetch user details by using an access "
"token."
),
"label": "User info URL",
},
"audience": {
"type": ConfigurationTypeField.STRING,
"help_text": (
"The OAuth resource identifier. If provided, Saleor will define "
"audience for each authorization request."
),
"label": "Audience",
},
"use_oauth_scope_permissions": {
"type": ConfigurationTypeField.BOOLEAN,
"help_text": (
"Use OAuth scope permissions to grant a logged-in user access to "
"protected resources. Your OAuth provider needs to have defined "
"Saleor's permission scopes in format saleor:<saleor-perm>. Check"
" Saleor docs for more details."
),
"label": "Use OAuth scope permissions",
},
}
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
# Convert to dict to easier take config elements
configuration = {item["name"]: item["value"] for item in self.configuration}
self.config = OpenIDConnectConfig(
client_id=configuration["client_id"],
client_secret=configuration["client_secret"],
enable_refresh_token=configuration["enable_refresh_token"],
json_web_key_set_url=configuration["json_web_key_set_url"],
authorization_url=configuration["oauth_authorization_url"],
token_url=configuration["oauth_token_url"],
logout_url=configuration["oauth_logout_url"],
audience=configuration["audience"],
use_scope_permissions=configuration["use_oauth_scope_permissions"],
user_info_url=configuration["user_info_url"],
)
# Determine, if we have defined all fields required to use OAuth access token
# as Saleor's authorization token.
self.use_oauth_access_token = bool(
self.config.user_info_url and self.config.json_web_key_set_url
)
# Determine, if we have defined all fields required to process the
# authorization flow.
self.use_authorization_flow = bool(
self.config.json_web_key_set_url
and self.config.authorization_url
and self.config.token_url
)
self.oauth = self._get_oauth_session()
@classmethod
def validate_plugin_configuration(
cls, plugin_configuration: "PluginConfiguration", **kwargs
):
"""Validate if provided configuration is correct."""
incorrect_fields = get_incorrect_fields(plugin_configuration)
if incorrect_fields:
error_msg = "To enable a plugin, you need to provide values for this field."
raise ValidationError(
{
field: ValidationError(
error_msg, code=PluginErrorCode.PLUGIN_MISCONFIGURED.value
)
for field in incorrect_fields
}
)
def _get_oauth_session(self):
scope = "openid profile email"
if self.config.use_scope_permissions:
permissions = [f"saleor:{perm}" for perm in get_permissions_codename()]
permissions.append(SALEOR_STAFF_PERMISSION)
scope_permissions = " ".join(permissions)
scope += f" {scope_permissions}"
if self.config.enable_refresh_token:
scope += " offline_access"
return OAuth2Session(
client_id=self.config.client_id,
client_secret=self.config.client_secret,
scope=scope,
)
def _use_scope_permissions(self, user, scope):
user_permissions = []
if scope:
permissions = get_saleor_permissions_qs_from_scope(scope)
user_permissions = get_saleor_permission_names(permissions)
user.effective_permissions = permissions
return user_permissions
def external_obtain_access_tokens(
self, data: dict, request: WSGIRequest, previous_value
) -> ExternalAccessTokens:
if not self.active:
return previous_value
if not self.use_authorization_flow:
return previous_value
code = data.get("code")
if not code:
msg = "Missing required field - code"
raise ValidationError(
{"code": ValidationError(msg, code=PluginErrorCode.NOT_FOUND.value)}
)
state = data.get("state")
if not state:
msg = "Missing required field - state"
raise ValidationError(
{"state": ValidationError(msg, code=PluginErrorCode.NOT_FOUND.value)}
)
try:
state_data = signing.loads(state)
except signing.BadSignature:
msg = "Bad signature"
raise ValidationError(
{"state": ValidationError(msg, code=PluginErrorCode.INVALID.value)}
)
redirect_uri = state_data.get("redirectUri")
if not redirect_uri:
msg = "The state value is incorrect"
raise ValidationError(
{"code": ValidationError(msg, code=PluginErrorCode.INVALID.value)}
)
token_data = self.oauth.fetch_token(
self.config.token_url, code=code, redirect_uri=redirect_uri
)
parsed_id_token = get_parsed_id_token(
token_data, self.config.json_web_key_set_url
)
user = get_or_create_user_from_payload(
parsed_id_token, self.config.authorization_url
)
user_permissions = []
if self.config.use_scope_permissions:
scope = token_data.get("scope")
user_permissions = self._use_scope_permissions(user, scope)
if not user.is_staff and bool(
SALEOR_STAFF_PERMISSION in scope or user_permissions
):
user.is_staff = True
user.save(update_fields=["is_staff"])
elif user.is_staff and not bool(
SALEOR_STAFF_PERMISSION in scope or user_permissions
):
user.is_staff = False
user.save(update_fields=["is_staff"])
tokens = create_tokens_from_oauth_payload(
token_data, user, parsed_id_token, user_permissions, owner=self.PLUGIN_ID
)
return ExternalAccessTokens(user=user, **tokens)
def external_authentication_url(
self, data: dict, request: WSGIRequest, previous_value
) -> dict:
if not self.active:
return previous_value
if not self.use_authorization_flow:
return previous_value
redirect_uri = data.get("redirectUri")
if not redirect_uri:
msg = "Missing required field - redirectUri"
raise ValidationError(
{
"redirectUri": ValidationError(
msg, code=PluginErrorCode.NOT_FOUND.value
)
}
)
kwargs = {
"redirect_uri": redirect_uri,
"state": signing.dumps({"redirectUri": redirect_uri}),
}
if self.config.audience:
kwargs["audience"] = self.config.audience
uri, state = self.oauth.create_authorization_url(
self.config.authorization_url, **kwargs
)
return {"authorizationUrl": uri}
def external_refresh(
self, data: dict, request: WSGIRequest, previous_value
) -> ExternalAccessTokens:
if not self.active:
return previous_value
if not self.use_authorization_flow:
return previous_value
error_code = PluginErrorCode.INVALID.value
if not self.config.enable_refresh_token:
msg = (
"Unable to refresh the token. Support for refreshing tokens is disabled"
)
raise ValidationError(
{"refresh_token": ValidationError(msg, code=error_code)}
)
refresh_token = request.COOKIES.get(JWT_REFRESH_TOKEN_COOKIE_NAME, None)
refresh_token = data.get("refreshToken") or refresh_token
validate_refresh_token(refresh_token, data)
saleor_refresh_token = jwt_decode(refresh_token) # type: ignore
token_endpoint = self.config.token_url
try:
token_data = self.oauth.refresh_token(
token_endpoint,
refresh_token=saleor_refresh_token[OAUTH_TOKEN_REFRESH_FIELD],
)
except (AuthlibBaseError, HTTPError):
logger.warning("Unable to refresh the token.", exc_info=True)
raise ValidationError(
{
"refresh_token": ValidationError(
"Unable to refresh the token.",
code=error_code,
)
}
)
try:
parsed_id_token = get_parsed_id_token(
token_data, self.config.json_web_key_set_url
)
user = get_user_from_token(parsed_id_token)
user_permissions = []
if self.config.use_scope_permissions:
user_permissions = self._use_scope_permissions(
user, token_data.get("scope")
)
tokens = create_tokens_from_oauth_payload(
token_data,
user,
parsed_id_token,
user_permissions,
owner=self.PLUGIN_ID,
)
return ExternalAccessTokens(user=user, **tokens)
except AuthenticationError as e:
raise ValidationError(
{"refreshToken": ValidationError(str(e), code=error_code)}
)
def external_logout(self, data: dict, request: WSGIRequest, previous_value):
if not self.active:
return previous_value
if not self.use_authorization_flow:
return previous_value
if not self.config.logout_url:
# Logout url doesn't exist
return {}
req = PreparedRequest()
req.prepare_url(self.config.logout_url, data)
return {"logoutUrl": req.url}
def external_verify(
self, data: dict, request: WSGIRequest, previous_value
) -> Tuple[Optional[User], dict]:
if not self.active:
return previous_value
if not self.use_authorization_flow:
return previous_value
token = data.get("token")
if not token:
return previous_value
valid = is_owner_of_token_valid(token, owner=self.PLUGIN_ID)
if not valid:
return previous_value
try:
payload = jwt_decode(token)
user = get_user_from_payload(payload)
if not user:
return previous_value
user.is_staff = False
except (ExpiredSignatureError, InvalidTokenError) as e:
raise ValidationError({"token": e})
permissions = payload.get(PERMISSIONS_FIELD)
if permissions is not None:
user.effective_permissions = get_permissions_from_names( # type: ignore
permissions
)
user.is_staff = True
return user, payload
def authenticate_user(self, request: WSGIRequest, previous_value) -> Optional[User]:
if not self.active:
return previous_value
token = get_token_from_request(request)
if not token:
return previous_value
user = previous_value
if self.use_authorization_flow and is_owner_of_token_valid(
token, owner=self.PLUGIN_ID
):
# Check if the token is created by this plugin
payload = jwt_decode(token)
user = get_user_from_access_payload(payload)
return user
if self.use_oauth_access_token:
user = get_user_from_oauth_access_token(
token,
self.config.json_web_key_set_url,
self.config.user_info_url,
self.config.use_scope_permissions,
self.config.audience,
)
return user or previous_value
| 37.571111 | 88 | 0.603478 | 1,747 | 16,907 | 5.576989 | 0.151689 | 0.027712 | 0.029252 | 0.014677 | 0.373704 | 0.283486 | 0.244073 | 0.173253 | 0.127681 | 0.127681 | 0 | 0.000434 | 0.318625 | 16,907 | 449 | 89 | 37.654788 | 0.845313 | 0.022712 | 0 | 0.26 | 0 | 0 | 0.16591 | 0.010358 | 0 | 0 | 0 | 0 | 0 | 1 | 0.025 | false | 0 | 0.0525 | 0 | 0.155 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
8cbfa22defd6f8a7192655725dd3cc7e7c6c3a48 | 2,045 | py | Python | rn50_dtr.py | shihan-ma/oneflow | 1705ec0859aefb4ff07a29ae4255fab70c67464b | [
"Apache-2.0"
] | null | null | null | rn50_dtr.py | shihan-ma/oneflow | 1705ec0859aefb4ff07a29ae4255fab70c67464b | [
"Apache-2.0"
] | null | null | null | rn50_dtr.py | shihan-ma/oneflow | 1705ec0859aefb4ff07a29ae4255fab70c67464b | [
"Apache-2.0"
] | null | null | null | # coding=utf-8
import numpy as np
from numpy import random
import oneflow as flow
import oneflow.nn as nn
import resnet50_model
# resnet50 bs 32, use_disjoint_set=False: threshold ~800MB
# memory policy:
# 1: only reuse the memory block with exactly the same size
# 2: reuse the memory block with the same size or larger
dtr_enabled = True
threshold = "800MB"
debug_level = 1
memory_policy = 1
use_disjoint_set = True
print(f'dtr_enabled: {dtr_enabled}, threshold: {threshold}, debug_level: {debug_level}, memory_policy: {memory_policy}, use_disjoint_set: {use_disjoint_set}')
flow.enable_dtr(dtr_enabled, threshold, debug_level, memory_policy, use_disjoint_set)
seed = 20
flow.manual_seed(seed)
np.random.seed(seed)
random.seed(seed)
def sync():
flow._oneflow_internal.eager.multi_client.Sync()
def display():
flow._oneflow_internal.dtr.display()
# init model
model = resnet50_model.resnet50(norm_layer=nn.Identity)
# model.load_state_dict(flow.load('/tmp/abcde'))
flow.save(model.state_dict(), '/tmp/abcde')
criterion = nn.CrossEntropyLoss()
cuda0 = flow.device('cuda:0')
# enable module to use cuda
model.to(cuda0)
criterion.to(cuda0)
learning_rate = 1e-3
# optimizer = flow.optim.SGD(model.parameters(), lr=learning_rate, momentum=0.9)
optimizer = flow.optim.SGD(model.parameters(), lr=learning_rate, momentum=0)
batch_size = 32
# generate random data and label
train_data = flow.tensor(
np.random.uniform(size=(batch_size, 3, 224, 224)).astype(np.float32), device=cuda0
)
train_label = flow.tensor(
(np.random.uniform(size=(batch_size,)) * 1000).astype(np.int32), dtype=flow.int32, device=cuda0
)
# run forward, backward and update parameters
for epoch in range(300):
logits = model(train_data)
loss = criterion(logits, train_label)
print('forward over')
# loss.print_ptr()
loss.backward()
print('backward over')
optimizer.step()
print('step over')
optimizer.zero_grad(True)
if debug_level > 0:
sync()
display()
print('loss: ', loss.numpy())
| 24.638554 | 158 | 0.731051 | 301 | 2,045 | 4.807309 | 0.378738 | 0.03801 | 0.048376 | 0.026261 | 0.201797 | 0.13407 | 0.13407 | 0.13407 | 0.081548 | 0.081548 | 0 | 0.032609 | 0.145232 | 2,045 | 82 | 159 | 24.939024 | 0.795195 | 0.221027 | 0 | 0 | 0 | 0.020833 | 0.132362 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.041667 | false | 0 | 0.104167 | 0 | 0.145833 | 0.104167 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
8cc0874cd127f72b8a2914899118cb45a1b605e9 | 3,854 | py | Python | cities/utils.py | olgoncharov/thingsfree | 754e3f6fe28bcb390617d2b964e8eb2f5116d61c | [
"MIT"
] | null | null | null | cities/utils.py | olgoncharov/thingsfree | 754e3f6fe28bcb390617d2b964e8eb2f5116d61c | [
"MIT"
] | 21 | 2020-05-17T10:00:29.000Z | 2022-03-12T00:39:12.000Z | cities/utils.py | olgoncharov/thingsfree | 754e3f6fe28bcb390617d2b964e8eb2f5116d61c | [
"MIT"
] | 5 | 2020-05-09T22:34:38.000Z | 2020-05-17T16:51:03.000Z | import logging
import os
import uuid
from time import sleep
import requests
from django.db import connection, transaction
from dotenv import load_dotenv
from .models import Region
load_dotenv()
logger = logging.getLogger(__name__)
class VkLoader:
def __init__(self, url, api_version='5.92'):
self.params = {
'access_token': os.getenv('VK_TOKEN'),
'v': api_version,
'count': 1000
}
self.url = url
def get_objects(self, **kwargs):
self.params.update(kwargs)
try:
items = []
total_count = 1
offset = 0
while offset < total_count:
self.params['offset'] = offset
response = requests.post(self.url, params=self.params)
# API ВКонтакте не позволяет делать более 3-х запросов в секунду
sleep(0.35)
response.raise_for_status()
response_body = response.json()['response']
total_count = response_body['count']
items.extend(response_body['items'])
offset += self.params['count']
return items
except Exception as err:
logger.error(
f'Ошибка при вызове метода {self.url}: '
f'{err}'
)
return []
@transaction.atomic
def load_geo_objects():
"""
Функция выполняет загрузку регионов и населенных пунктов, используя
API ВКонтакте.
"""
country_loader = VkLoader('https://api.vk.com/method/database.getCountries')
region_loader = VkLoader('https://api.vk.com/method/database.getRegions')
city_loader = VkLoader('https://api.vk.com/method/database.getCities')
new_cities = []
vk_countries = country_loader.get_objects(code='RU')
for vk_country in vk_countries:
vk_regions = region_loader.get_objects(country_id=vk_country['id'])
for vk_region in vk_regions:
region, _ = Region.objects.get_or_create(name=vk_region['title'])
vk_cities = city_loader.get_objects(
country_id=vk_country['id'],
region_id=vk_region['id']
)
new_cities.extend([
(uuid.uuid4().hex, region.id.hex, vk_city['title'])
for vk_city in vk_cities
])
# Для ускорения загрузки большого количества городов используются "голые"
# SQL запросы. Загрузка происходит в несколько этапов (в одной транзакции):
# 1) Создается таблица tmp_city для временного хранения данных
# 2) Все полученные через API данные о населенных пунктах загружаются
# в tmp_city
# 3) Из tmp_city выбираются все населенные пункты, которые отсутствуют в
# таблице модели City - cities_city. Выбранные данные переносятся
# в cities_city.
# 4) Уничтожается таблица tmp_city
with connection.cursor() as cursor:
cursor.execute((
'CREATE TABLE IF NOT EXISTS tmp_city('
'id char(32), '
'region_id char(32), '
'name varchar(50))'
))
cursor.executemany((
'INSERT INTO tmp_city(id, region_id, name) '
'VALUES(%s, %s, %s)'),
new_cities
)
# В следющем запросе используется группировка таблицы tmp_city,
# поскольку в одном регионе могут находиться населенные пункты с
# одинаковыми названиями
cursor.execute((
'INSERT INTO cities_city(id, region_id, name) '
'SELECT max(tc.id), tc.region_id, tc.name '
'FROM tmp_city tc '
'LEFT JOIN cities_city c '
'ON (tc.name = c.name AND tc.region_id = c.region_id) '
'WHERE c.id IS NULL '
'GROUP BY tc.region_id, tc.name'
))
cursor.execute('DROP TABLE tmp_city')
| 34.720721 | 80 | 0.596004 | 453 | 3,854 | 4.900662 | 0.432671 | 0.036036 | 0.025676 | 0.02973 | 0.118468 | 0.087838 | 0.087838 | 0.087838 | 0 | 0 | 0 | 0.009019 | 0.309549 | 3,854 | 110 | 81 | 35.036364 | 0.825254 | 0.200311 | 0 | 0.049383 | 0 | 0 | 0.212787 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.037037 | false | 0 | 0.098765 | 0 | 0.17284 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
8cc0942fda4a0ae5404aafe96f0de481fdd39eb3 | 3,513 | py | Python | openhgnn/tasks/node_classification.py | jingmouren/OpenHGNN | 3fade32da156de130dbd5e056ff359e5b86b4324 | [
"Apache-2.0"
] | 235 | 2021-05-31T09:25:31.000Z | 2022-03-30T23:20:10.000Z | openhgnn/tasks/node_classification.py | jingmouren/OpenHGNN | 3fade32da156de130dbd5e056ff359e5b86b4324 | [
"Apache-2.0"
] | 17 | 2021-05-30T15:12:26.000Z | 2022-03-09T08:32:12.000Z | openhgnn/tasks/node_classification.py | jingmouren/OpenHGNN | 3fade32da156de130dbd5e056ff359e5b86b4324 | [
"Apache-2.0"
] | 65 | 2021-05-27T14:17:42.000Z | 2022-03-29T12:28:32.000Z | import torch.nn.functional as F
import torch.nn as nn
from . import BaseTask, register_task
from ..dataset import build_dataset
from ..utils import Evaluator
@register_task("node_classification")
class NodeClassification(BaseTask):
r"""
Node classification tasks.
Attributes
-----------
dataset : NodeClassificationDataset
Task-related dataset
evaluator : Evaluator
offer evaluation metric
Methods
---------
get_graph :
return a graph
get_loss_fn :
return a loss function
"""
def __init__(self, args):
super(NodeClassification, self).__init__()
self.dataset = build_dataset(args.dataset, 'node_classification')
# self.evaluator = Evaluator()
self.logger = args.logger
if hasattr(args, 'validation'):
self.train_idx, self.val_idx, self.test_idx = self.dataset.get_idx(args.validation)
else:
self.train_idx, self.val_idx, self.test_idx = self.dataset.get_idx()
self.evaluator = Evaluator(args.seed)
self.labels = self.dataset.get_labels()
self.multi_label = self.dataset.multi_label
if hasattr(args, 'evaluation_metric'):
self.evaluation_metric = args.evaluation_metric
else:
if args.dataset in ['aifb', 'mutag', 'bgs', 'am']:
self.evaluation_metric = 'acc'
else:
self.evaluation_metric = 'f1'
def get_graph(self):
return self.dataset.g
def get_loss_fn(self):
if self.multi_label:
return nn.BCEWithLogitsLoss()
return F.cross_entropy
def get_evaluator(self, name):
if name == 'acc':
return self.evaluator.cal_acc
elif name == 'f1_lr':
return self.evaluator.nc_with_LR
elif name == 'f1':
return self.evaluator.f1_node_classification
def evaluate(self, logits, mode='test', info=True):
if mode == 'test':
mask = self.test_idx
elif mode == 'valid':
mask = self.val_idx
elif mode == 'train':
mask = self.train_idx
if self.multi_label:
pred = (logits[mask].cpu().numpy() > 0).astype(int)
else:
pred = logits[mask].argmax(dim=1).to('cpu')
if self.evaluation_metric == 'acc':
acc = self.evaluator.cal_acc(self.labels[mask], pred)
return dict(Accuracy=acc)
elif self.evaluation_metric == 'acc-ogbn-mag':
from ogb.nodeproppred import Evaluator
evaluator = Evaluator(name='ogbn-mag')
logits = logits.unsqueeze(dim=1)
input_dict = {"y_true": logits, "y_pred": self.labels[self.test_idx]}
result_dict = evaluator.eval(input_dict)
return result_dict
elif self.evaluation_metric == 'f1':
f1_dict = self.evaluator.f1_node_classification(self.labels[mask], pred)
return f1_dict
else:
raise ValueError('The evaluation metric is not supported!')
def downstream_evaluate(self, logits, evaluation_metric):
if evaluation_metric == 'f1_lr':
micro_f1, macro_f1 = self.evaluator.nc_with_LR(logits, self.labels, self.train_idx, self.test_idx)
return dict(Macro_f1=macro_f1, Mirco_f1=micro_f1)
def get_idx(self):
return self.train_idx, self.val_idx, self.test_idx
def get_labels(self):
return self.labels
| 33.457143 | 110 | 0.609735 | 419 | 3,513 | 4.914081 | 0.25537 | 0.093249 | 0.032054 | 0.031083 | 0.140359 | 0.064594 | 0.064594 | 0.064594 | 0.064594 | 0.048567 | 0 | 0.007168 | 0.285226 | 3,513 | 104 | 111 | 33.778846 | 0.812824 | 0.080843 | 0 | 0.09589 | 0 | 0 | 0.061927 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.109589 | false | 0 | 0.082192 | 0.041096 | 0.369863 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
8cc3bbaf6c0957d729096de76fb3a1b4a429702f | 1,987 | py | Python | instock_notifier/helpers.py | fpatseas/in-stock-notifier | ac098ddf5131d9e9ff00262868cc35f30cbe6b5b | [
"MIT"
] | 1 | 2021-05-15T11:31:54.000Z | 2021-05-15T11:31:54.000Z | instock_notifier/helpers.py | fpatseas/in-stock-notifier | ac098ddf5131d9e9ff00262868cc35f30cbe6b5b | [
"MIT"
] | 1 | 2021-10-04T23:29:26.000Z | 2021-10-05T07:22:07.000Z | instock_notifier/helpers.py | fpatseas/in-stock-notifier | ac098ddf5131d9e9ff00262868cc35f30cbe6b5b | [
"MIT"
] | null | null | null | import os
import json
import config
import utils
from datetime import date, datetime
def notify(url):
try:
utils.send_email(
config.EMAIL_SENDER,
config.EMAIL_RECIPIENTS,
"In-Stock Product",
"<a href='"+ url +"' target='_blank'>"+ url +"</a>")
print('Email sent!')
except Exception as e:
print(e)
def must_notify(url):
try:
file = "notifications.json"
jsonFile = open(file, "r")
if os.path.getsize(file) > 0:
data = json.load(jsonFile)
else:
data = { "notifications": [] }
jsonFile.close()
notifications = data["notifications"]
if len(notifications) > 0:
send = True
found = False
for notification in notifications:
if notification["url"] == url:
found = True
lastsent = datetime.strptime(notification["lastsent"], r"%Y-%m-%dT%H:%M:%S.%f")
diff_in_seconds = (datetime.now() - lastsent).total_seconds()
diff_in_minutes = divmod(diff_in_seconds, 60)[0]
diff_in_hours = divmod(diff_in_seconds, 3600)[0]
if diff_in_hours > config.RESEND_AFTER_INHOURS:
notification["lastsent"] = datetime.now()
else:
send = False
if found == False:
notifications.append({ "url": url, "lastsent": datetime.now() })
else:
notifications.append({ "url": url, "lastsent": datetime.now() })
jsonFile = open("notifications.json", "w+")
jsonFile.write(json.dumps(data, indent=4, default=utils.json_serial))
jsonFile.close()
except Exception as e:
print(e)
if send == False:
print('Email already sent')
return send | 29.656716 | 100 | 0.506794 | 198 | 1,987 | 4.979798 | 0.39899 | 0.036511 | 0.039554 | 0.036511 | 0.137931 | 0.137931 | 0.089249 | 0 | 0 | 0 | 0 | 0.008914 | 0.378963 | 1,987 | 67 | 101 | 29.656716 | 0.790113 | 0 | 0 | 0.254902 | 0 | 0 | 0.105099 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.039216 | false | 0 | 0.098039 | 0 | 0.156863 | 0.078431 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
8cc52f1b66e72ed6add6267a7e06a2544e5e03f1 | 465 | py | Python | Python3/0243-Shortest-Word-Distance/soln-1.py | wyaadarsh/LeetCode-Solutions | 3719f5cb059eefd66b83eb8ae990652f4b7fd124 | [
"MIT"
] | 5 | 2020-07-24T17:48:59.000Z | 2020-12-21T05:56:00.000Z | Python3/0243-Shortest-Word-Distance/soln-1.py | zhangyaqi1989/LeetCode-Solutions | 2655a1ffc8678ad1de6c24295071308a18c5dc6e | [
"MIT"
] | null | null | null | Python3/0243-Shortest-Word-Distance/soln-1.py | zhangyaqi1989/LeetCode-Solutions | 2655a1ffc8678ad1de6c24295071308a18c5dc6e | [
"MIT"
] | 2 | 2020-07-24T17:49:01.000Z | 2020-08-31T19:57:35.000Z | class Solution:
def shortestDistance(self, words, word1, word2):
"""
:type words: List[str]
:type word1: str
:type word2: str
:rtype: int
"""
idxs1, idxs2 = [], []
for i, word in enumerate(words):
if word == word1:
idxs1.append(i)
elif word == word2:
idxs2.append(i)
return min(abs(a - b) for a, b in itertools.product(idxs1, idxs2)) | 31 | 74 | 0.490323 | 53 | 465 | 4.301887 | 0.566038 | 0.061404 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.042254 | 0.389247 | 465 | 15 | 74 | 31 | 0.760563 | 0.146237 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.111111 | false | 0 | 0 | 0 | 0.333333 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
8cc6993d3105afbe975cd32ccd39f9f479e03924 | 769 | py | Python | Python/contains-duplicate-iii.py | se77enn/LeetCode-Solution | d29ef5358cae592b63952c3d293897a176fb75e1 | [
"MIT"
] | 1 | 2020-10-27T03:22:31.000Z | 2020-10-27T03:22:31.000Z | Python/contains-duplicate-iii.py | se77enn/LeetCode-Solution | d29ef5358cae592b63952c3d293897a176fb75e1 | [
"MIT"
] | null | null | null | Python/contains-duplicate-iii.py | se77enn/LeetCode-Solution | d29ef5358cae592b63952c3d293897a176fb75e1 | [
"MIT"
] | 1 | 2021-03-22T18:58:23.000Z | 2021-03-22T18:58:23.000Z | # Time: O(n * t)
# Space: O(max(k, t))
import collections
class Solution:
# @param {integer[]} nums
# @param {integer} k
# @param {integer} t
# @return {boolean}
def containsNearbyAlmostDuplicate(self, nums, k, t):
if k < 0 or t < 0:
return False
window = collections.OrderedDict()
for n in nums:
# Make sure window size
if len(window) > k:
window.popitem(False)
bucket = n if not t else n // t
# At most 2t items.
for m in (window.get(bucket - 1), window.get(bucket), window.get(bucket + 1)):
if m is not None and abs(n - m) <= t:
return True
window[bucket] = n
return False
| 27.464286 | 90 | 0.507152 | 99 | 769 | 3.939394 | 0.474747 | 0.092308 | 0.115385 | 0.082051 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.010571 | 0.384915 | 769 | 27 | 91 | 28.481481 | 0.813953 | 0.20156 | 0 | 0.133333 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.066667 | false | 0 | 0.066667 | 0 | 0.4 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
8cc7f7c2132ff07c96f897cf1fa46ce4119097c5 | 474 | py | Python | openstack_interpreter/common/profile.py | iokiwi/openstack-interpreter | 3356bbc567fe2d99aaa49fa8ca222d1e85e98b96 | [
"Apache-2.0"
] | 4 | 2017-05-01T10:22:29.000Z | 2020-09-14T00:33:05.000Z | openstack_interpreter/common/profile.py | iokiwi/openstack-interpreter | 3356bbc567fe2d99aaa49fa8ca222d1e85e98b96 | [
"Apache-2.0"
] | 3 | 2020-09-11T03:17:10.000Z | 2020-12-29T08:47:30.000Z | openstack_interpreter/common/profile.py | iokiwi/openstack-interpreter | 3356bbc567fe2d99aaa49fa8ca222d1e85e98b96 | [
"Apache-2.0"
] | 1 | 2020-10-15T00:02:33.000Z | 2020-10-15T00:02:33.000Z | from contextlib import contextmanager
from datetime import datetime
@contextmanager
def timed(desc):
"""
A useful context manager for timing how long something took
Example use:
In [1]: with timed("getting server list:"):
...: oi.sdk.connection.compute.servers()
...:
getting server list: took: 0:00:00.001366
"""
start = datetime.utcnow()
yield
end = datetime.utcnow()
print("%s took: %s" % (desc, end - start))
| 23.7 | 63 | 0.632911 | 58 | 474 | 5.172414 | 0.689655 | 0.086667 | 0.113333 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.033426 | 0.242616 | 474 | 19 | 64 | 24.947368 | 0.802228 | 0.453587 | 0 | 0 | 0 | 0 | 0.049327 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.125 | false | 0 | 0.25 | 0 | 0.375 | 0.125 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
8cc8517af231199933b870acd3c0d7b4bd57072a | 1,803 | py | Python | cubic/audio_io.py | cobaltspeech/examples-python | 74a8ad1c48c46cb029ede389baedbd44fccf9dd0 | [
"Apache-2.0"
] | null | null | null | cubic/audio_io.py | cobaltspeech/examples-python | 74a8ad1c48c46cb029ede389baedbd44fccf9dd0 | [
"Apache-2.0"
] | 2 | 2021-07-22T15:57:53.000Z | 2021-08-03T22:29:36.000Z | cubic/audio_io.py | cobaltspeech/examples-python | 74a8ad1c48c46cb029ede389baedbd44fccf9dd0 | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
#
# Copyright(2021) Cobalt Speech and Language Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License")
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http: // www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import subprocess
class Recorder(object):
"""Recorder launches an external application to handle recording audio."""
def __init__(self, cmd):
self.args = cmd.split()
self.process = None
def start(self):
"""Start the external recording application."""
# Ignore if we already started it
if self.process is not None:
return
# Start the subprocess
self.process = subprocess.Popen(args=self.args,
stdout=subprocess.PIPE)
def stop(self):
"""Stop the external recording application."""
# Ignore if it is not running
if self.process is None:
return
# Stop the subprocess
self.process.stdout.close()
self.process.terminate()
self.process = None
def read(self, bufsize):
"""Read audio data from the external recording application."""
# Raise an error if we haven't started the app
if self.process is None:
raise RuntimeError("Recording application is not running")
# Get the data from stdout
return self.process.stdout.read(bufsize)
| 30.559322 | 78 | 0.652246 | 233 | 1,803 | 5.030043 | 0.480687 | 0.084471 | 0.051195 | 0.079352 | 0.098976 | 0.066553 | 0 | 0 | 0 | 0 | 0 | 0.006818 | 0.267887 | 1,803 | 58 | 79 | 31.086207 | 0.881061 | 0.537438 | 0 | 0.3 | 0 | 0 | 0.045397 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.2 | false | 0 | 0.05 | 0 | 0.45 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
8cc89e1a1c169d9db2ceb80bb90bc5445283842a | 1,299 | py | Python | examples/micromagnetic/pbc_2d/main.py | computationalmodelling/fidimag | 07a275c897a44ad1e0d7e8ef563f10345fdc2a6e | [
"BSD-2-Clause"
] | 53 | 2016-02-27T09:40:21.000Z | 2022-01-19T21:37:44.000Z | examples/micromagnetic/pbc_2d/main.py | computationalmodelling/fidimag | 07a275c897a44ad1e0d7e8ef563f10345fdc2a6e | [
"BSD-2-Clause"
] | 132 | 2016-02-26T13:18:58.000Z | 2021-12-01T21:52:42.000Z | examples/micromagnetic/skyrmion/main.py | computationalmodelling/fidimag | 07a275c897a44ad1e0d7e8ef563f10345fdc2a6e | [
"BSD-2-Clause"
] | 32 | 2016-02-26T13:21:40.000Z | 2022-03-08T08:54:51.000Z | import matplotlib as mpl
mpl.use("Agg")
import matplotlib.pyplot as plt
import numpy as np
from fidimag.micro import Sim
from fidimag.common import CuboidMesh
from fidimag.micro import UniformExchange, Demag, DMI
from fidimag.micro import Zeeman, TimeZeeman
from fidimag.common.fileio import DataReader
mu0 = 4 * np.pi * 1e-7
def init_m(pos):
x, y = pos[0] - 500, pos[1] - 500
if x**2 + y**2 < 50**2:
return (0, 0, -1)
else:
return (0, 0, 1)
def relax_system(mesh):
sim = Sim(mesh, name='relax')
sim.driver.set_tols(rtol=1e-10, atol=1e-14)
sim.driver.alpha = 0.5
sim.driver.gamma = 2.211e5
sim.Ms = 8.6e5
sim.do_precession = False
sim.set_m(init_m)
# sim.set_m(np.load('m0.npy'))
A = 1.3e-11
exch = UniformExchange(A=A)
sim.add(exch)
dmi = DMI(D=1e-3)
sim.add(dmi)
zeeman = Zeeman((0, 0, 2e4))
sim.add(zeeman, save_field=True)
sim.relax(dt=1e-13, stopping_dmdt=0.01, max_steps=5000,
save_m_steps=None, save_vtk_steps=50)
np.save('m0.npy', sim.spin)
if __name__ == '__main__':
mesh = CuboidMesh(
nx=1001, ny=1001, nz=1, dx=1, dy=1, dz=2.0, unit_length=1e-9, periodicity=(True, True, False))
relax_system(mesh)
# apply_field1(mesh)
# deal_plot()
| 20.619048 | 102 | 0.631255 | 218 | 1,299 | 3.642202 | 0.477064 | 0.06927 | 0.060453 | 0.083123 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.078607 | 0.226328 | 1,299 | 62 | 103 | 20.951613 | 0.711443 | 0.04542 | 0 | 0 | 0 | 0 | 0.017799 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.052632 | false | 0 | 0.210526 | 0 | 0.315789 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
8cc9a4f194a08f8eb92f6794928bfc40f100cec7 | 771 | py | Python | server/tournament/urls.py | Xelia/mahjong-portal | 1baa2eab57875a64a7f09537d1f43872b577f205 | [
"MIT"
] | null | null | null | server/tournament/urls.py | Xelia/mahjong-portal | 1baa2eab57875a64a7f09537d1f43872b577f205 | [
"MIT"
] | null | null | null | server/tournament/urls.py | Xelia/mahjong-portal | 1baa2eab57875a64a7f09537d1f43872b577f205 | [
"MIT"
] | null | null | null | from django.conf.urls import url
from tournament.views import tournament_list, tournament_details, tournament_announcement, tournament_registration, \
tournament_application
urlpatterns = [
url(r'^new/$', tournament_application, name='tournament_application'),
url(r'^riichi/(?P<year>\d+)/$', tournament_list, name='tournament_list'),
url(r'^riichi/(?P<tournament_type>[\w\-]+)/(?P<year>\d+)/$', tournament_list, name='tournament_ema_list'),
url(r'^registration/(?P<tournament_id>\d+)/$', tournament_registration, name='tournament_registration'),
url(r'^riichi/(?P<slug>[\w\-]+)/$', tournament_details, name='tournament_details'),
url(r'^riichi/(?P<slug>[\w\-]+)/announcement/$', tournament_announcement, name='tournament_announcement'),
]
| 51.4 | 117 | 0.719844 | 91 | 771 | 5.879121 | 0.285714 | 0.04486 | 0.074766 | 0.082243 | 0.186916 | 0.186916 | 0.127103 | 0 | 0 | 0 | 0 | 0 | 0.085603 | 771 | 14 | 118 | 55.071429 | 0.758865 | 0 | 0 | 0 | 0 | 0 | 0.396887 | 0.32166 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.181818 | 0 | 0.181818 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
8cc9fda494549870a71da11349ff0a1020a32beb | 3,506 | py | Python | EveCommon/EveCentral.py | Marclass/EveCommon | b554f1741eae1ed871d31bcac8fdf47b9b76eefa | [
"MIT"
] | 1 | 2021-04-17T11:16:16.000Z | 2021-04-17T11:16:16.000Z | EveCommon/EveCentral.py | Marclass/EveCommon | b554f1741eae1ed871d31bcac8fdf47b9b76eefa | [
"MIT"
] | null | null | null | EveCommon/EveCentral.py | Marclass/EveCommon | b554f1741eae1ed871d31bcac8fdf47b9b76eefa | [
"MIT"
] | null | null | null | from BaseAPIConnector import BaseAPIConnector
class EveCentral(BaseAPIConnector):
def __init__(self, user_agent='', type_id=0, type_id_list=None, system_id=0, region_id=0, hours=0, min_quantity=0):
BaseAPIConnector.__init__(self, user_agent)
self.type_id = type_id
self.type_id_list = type_id_list
self.internal_type_id_list = None
if self.type_id_list is not None:
self.internal_type_id_list = list(self.type_id_list)
self.system_id = system_id
self.region_id = region_id
self.hours = hours
self.min_quantity = min_quantity
self.items = []
def construct_url(self):
base_url = 'http://api.eve-central.com/api/marketstat?'
url_parts = []
if self.type_id != 0:
url_parts.append('typeid=%s' % self.type_id)
if self.internal_type_id_list is not None:
for i in range(0, len(self.internal_type_id_list)):
type_id = self.internal_type_id_list[0]
url_parts.append('typeid=%s' % type_id)
self.internal_type_id_list.pop(0)
if i >= 100:
break
if self.system_id != 0:
url_parts.append('usesystem=%s' % self.system_id)
if self.region_id != 0:
url_parts.append('regionlimit=%s' % self.region_id)
if self.hours != 0:
url_parts.append('hours=%s' % self.hours)
if self.min_quantity != 0:
url_parts.append('minQ=%s' % self.min_quantity)
return base_url + '&'.join(url_parts)
def get_prices_list(self):
def _get_item_prices(item_node):
return EveItemPrices(volume=item_node.find('volume').text,
average=item_node.find('avg').text,
maximum=item_node.find('max').text,
minimum=item_node.find('min').text,
standard_deviation=item_node.find('stddev').text,
median=item_node.find('median').text,
percentile=item_node.find('percentile').text)
while True:
tree = self.get_xml_from_request()
if tree is None:
continue
if self.internal_type_id_list and self.verbose:
print('Getting Items starting at %s' % str(self.internal_type_id_list[0]))
for item_type in tree.iter('type'):
item = EveItem(item_id=item_type.attrib.get('id'))
item.sell = _get_item_prices(item_type.find('sell'))
item.buy = _get_item_prices(item_type.find('buy'))
item.all = _get_item_prices(item_type.find('all'))
self.items.append(item)
if (self.internal_type_id_list is None) or (self.internal_type_id_list == []):
break
return self.items
class EveItem(object):
def __init__(self, item_id=0):
self.item_id = item_id
self.buy = None
self.sell = None
self.all = None
class EveItemPrices(object):
def __init__(self, volume=0, average=0, maximum=0, minimum=0, standard_deviation=0, median=0, percentile=0):
self.volume = volume
self.average = average
self.maximum = maximum
self.minimum = minimum
self.standard_deviation = standard_deviation
self.median = median
self.percentile = percentile | 35.414141 | 119 | 0.582715 | 444 | 3,506 | 4.310811 | 0.202703 | 0.068966 | 0.07837 | 0.094044 | 0.229885 | 0.15674 | 0.056426 | 0 | 0 | 0 | 0 | 0.010399 | 0.314318 | 3,506 | 99 | 120 | 35.414141 | 0.785774 | 0 | 0 | 0.027027 | 0 | 0 | 0.052181 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.081081 | false | 0 | 0.013514 | 0.013514 | 0.175676 | 0.013514 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
8cca0320c821120bae30297c36b3614b34793a91 | 2,078 | py | Python | tensorflow_model_optimization/python/core/quantization/keras/layers/dense_batchnorm_test_utils.py | Pandinosaurus/model-optimization | 12dc84dd34ee3c6eb08b381c0abcd65b31a42366 | [
"Apache-2.0"
] | null | null | null | tensorflow_model_optimization/python/core/quantization/keras/layers/dense_batchnorm_test_utils.py | Pandinosaurus/model-optimization | 12dc84dd34ee3c6eb08b381c0abcd65b31a42366 | [
"Apache-2.0"
] | null | null | null | tensorflow_model_optimization/python/core/quantization/keras/layers/dense_batchnorm_test_utils.py | Pandinosaurus/model-optimization | 12dc84dd34ee3c6eb08b381c0abcd65b31a42366 | [
"Apache-2.0"
] | 1 | 2020-12-13T22:13:22.000Z | 2020-12-13T22:13:22.000Z | # Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Test utils for dense batchnorm folding."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
keras = tf.keras
class DenseModel(object):
"""Construct and access Dense + BatchNorm + activation models."""
params = {
'units': 32,
'input_shape': (32,),
'batch_size': 1,
}
@classmethod
def get_batched_input_shape(cls):
"""Return input shape with batch size."""
shape = [cls.params['batch_size']]
shape.extend(cls.params['input_shape'])
return shape
@classmethod
def get_nonfolded_batchnorm_model(cls,
post_bn_activation=None,
normalization_type='BatchNormalization'):
"""Return nonfolded Dense + BN + optional activation model."""
if normalization_type == 'BatchNormalization':
normalization = keras.layers.BatchNormalization
elif normalization_type == 'SyncBatchNormalization':
normalization = keras.layers.experimental.SyncBatchNormalization
inp = keras.layers.Input(cls.params['input_shape'],
cls.params['batch_size'])
x = keras.layers.Dense(cls.params['units'])(inp)
out = normalization(axis=-1)(x)
if post_bn_activation is not None:
out = post_bn_activation(out)
return tf.keras.Model(inp, out)
| 35.220339 | 80 | 0.669394 | 247 | 2,078 | 5.489879 | 0.48583 | 0.044248 | 0.035398 | 0.023599 | 0.033923 | 0 | 0 | 0 | 0 | 0 | 0 | 0.008459 | 0.203561 | 2,078 | 58 | 81 | 35.827586 | 0.810876 | 0.410972 | 0 | 0.064516 | 0 | 0 | 0.110084 | 0.018487 | 0 | 0 | 0 | 0 | 0 | 1 | 0.064516 | false | 0 | 0.129032 | 0 | 0.322581 | 0.032258 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
8cca9d1595e110284a8607006857f10b50c49b6f | 9,551 | py | Python | app/views.py | npmenon/Go-Search | adaa5f38e52768bb96252fe0a978d4b14d2d296b | [
"MIT"
] | null | null | null | app/views.py | npmenon/Go-Search | adaa5f38e52768bb96252fe0a978d4b14d2d296b | [
"MIT"
] | null | null | null | app/views.py | npmenon/Go-Search | adaa5f38e52768bb96252fe0a978d4b14d2d296b | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
from app import app
from flask import render_template
from flask import request
from flask import jsonify
# from settings import APP_STATIC
import json
import pysolr
from urllib.request import urlopen
from watson_developer_cloud import AlchemyLanguageV1
import ast
import re
import string
import os
import datetime
from collections import Counter
alchemy_language = AlchemyLanguageV1(api_key='b5abca00bba18cdda854cff13f3773df925a908b')
HOST = 'http://35.165.140.166:8983/solr/prj4/'
# HOST = 'http://localhost:8983/solr/prj4/'
LANGUAGES = ['en','es','pt','fr','ru']
def lang_map(language):
language_map = {'en':'English','fr':'French','ru':'Russian','es':'Spanish','pt':'Portuguese'}
return language_map[language]
@app.route('/')
@app.route('/query',methods=['GET'])
def query():
# Retrieve the parameter values from the url
selected_language = request.args.get('lang-select')
search_string = request.args.get('usrquery', '')
tweet_language = request.args.get('lang','')
# DATE ARGS
from_date = request.args.get('datefrom','')
to_date = request.args.get('dateto','')
# Query Boosting
boost_language = 'tweet_lang:%s^3' % selected_language
# base case params
params = {'facet':'on', 'facet.field':['{!ex=dt}tweet_lang','tweet_date'], 'rows':100,'defType':'edismax','bq':boost_language}
# if not query, display everything
if search_string == '' or search_string == 'undefined':
search_string = '*:*'
fq_list = []
# Language filter exists
if tweet_language != '':
languages = tweet_language.split(' ')
# tweet_lang:en tweet_lang:es
fq_content = ''
for lang in languages:
fq_content += "tweet_lang:"+lang+' '
params['fq'] = '{!tag=dt}'+fq_content
# Date filter exists
if from_date and to_date:
fq_list.append('{!tag=dt}'+fq_content)
fq_list.append('tweet_date:['+from_date+' TO ' + to_date+ ']')
params['fq'] = fq_list
else:
# Date filter exists
if from_date and to_date:
date_range = 'tweet_date:['+from_date+' TO ' + to_date+ ']'
params['fq'] = date_range
solr = pysolr.Solr(HOST, timeout=10)
results = solr.search(search_string, **params)
results_count = results.hits
# extracting the tweet language
lang_info = results.facets['facet_fields']['tweet_lang']
# extracting the tweet date
date_results = results.facets['facet_fields']['tweet_date']
"""
---------- LANGUAGE FACETING STARTS HERE ------
"""
filtered_lang_info = dict()
for i in range(0,len(lang_info),2):
item = []
item.append(lang_info[i+1])
if search_string is None:
item.append("all")
else:
item.append(search_string)
item.append(lang_map(lang_info[i]))
filtered_lang_info[lang_info[i]] = item
"""
---------- LANGUAGE FACETING ENDS HERE ------
"""
"""
---------- DATE FACETING STARTS HERE ------
"""
dates = list()
date_info = list()
#print(date_results)
for i in range(0,len(date_results),2):
d = date_results[i][0:10]
date_object = datetime.datetime.strptime(d,'%Y-%m-%d')
dates.append(date_object)
lower_date = min(dates)
upper_date = max(dates)
date_info.append({'y':lower_date.year,'m':lower_date.month,'d':lower_date.day})
date_info.append({'y':upper_date.year,'m':upper_date.month,'d':upper_date.day})
"""
---------- DATE FACETING ENDS HERE ------
"""
tweet_text = search_string
count = 0
image_list = []
image_count = 0
for tweet in results:
if count <= 3:
text = str(tweet['tweet_text'][0]).replace('[','').replace(']','')
tweet_text = '%s %s' % (tweet_text,text)
count += 1
if tweet.get('media') and search_string != '*:*':
image_list.append(tweet['media'][0])
image_count += 1
if (image_count > 4 and count > 3) or (count > 100):
break
# if count > 100:
# break;
if search_string == '*:*':
image_list = []
escaped_text = re.escape(string.punctuation)
tweet_string = ''
for tweet in results:
text = str(tweet['tweet_text'][0]).replace('[','').replace(']','')
tweet_string = '%s %s' % (tweet_string,text)
tweet_string = re.sub(r'http\S+', '', tweet_string)
tweet_string = re.sub(r'['+escaped_text+']', '',tweet_string)
tweet_text = re.sub(r'http\S+', '', tweet_text)
tweet_text = re.sub(r'['+escaped_text+']', '',tweet_text)
alchemy_text=tweet_text
if 'trump' in alchemy_text:
alchemy_text = 'donald trump'
alchemy_response = {}
if search_string != '*:*':
try:
alchemy_response = json.dumps(
alchemy_language.combined(
text=alchemy_text,
extract='entities,keywords',
sentiment=1,
max_items=1),
indent=2)
except Exception:
print ("Failed",Exception)
pass
tags = []
dbpedia_link = ''
if alchemy_response:
alchemy_response = ast.literal_eval(alchemy_response)
for alchemy_result in alchemy_response.get('entities'):
if alchemy_result.get('disambiguated'):
if alchemy_result['disambiguated'].get('subType'):
for tag in alchemy_result['disambiguated']['subType']:
tags.append(tag)
if alchemy_result['disambiguated'].get('dbpedia'):
dbpedia_link = alchemy_result['disambiguated'].get('dbpedia')
# Get Summary text
summary_data = ''
if dbpedia_link != '':
subject = dbpedia_link.replace('http://dbpedia.org/resource/','')
if selected_language and selected_language != 'en':
summary_link = 'https://%s.wikipedia.org/w/api.php?format=json&action=query&prop=extracts&exintro=&explaintext=&titles=%s' % (selected_language,subject)
response = urlopen(summary_link)
summary_data = json.loads(response.read().decode('utf8'))['query']['pages']
summary_data = summary_data[list(summary_data.keys())[0]].get('extract')
elif selected_language == 'en' or not summary_data or not selected_language:
summary_link = 'https://en.wikipedia.org/w/api.php?format=json&action=query&prop=extracts&exintro=&explaintext=&titles=%s' % subject
response = urlopen(summary_link)
summary_data = json.loads(response.read().decode('utf8'))['query']['pages']
summary_data = summary_data[list(summary_data.keys())[0]].get('extract')
if summary_data:
summary_data = (summary_data[:250] + '..') if len(summary_data) > 75 else summary_data
# Return the results and render it on the html page
return render_template('index.html',date_info=json.dumps(date_info),lang_info=filtered_lang_info,tweets=results,tags=tags,summary=summary_data,image_list=image_list,results_count=results_count)
# To handle tags on html
@app.route('/tags',methods=['POST'])
def tags():
solr = pysolr.Solr(HOST, timeout=10)
# results = solr.search("*:*")
params = {'rows': '0', "facet":"on", "facet.field":"hashtags"}
results = solr.search("*:*", **params)
return jsonify(results.facets['facet_fields']['hashtags'])
# Retrieve Similar pages
@app.route('/morelikethis')
def morelikethis():
solr = pysolr.Solr(HOST, timeout=10)
tweet_id = request.args.get('similar')
params = {'mlt':'true','mlt.mintf':'7','mlt.fl':'_text_','mlt.mindf':'1','rows':100}
similar = solr.more_like_this('id:'+str(tweet_id), mltfl='_text_', **params)
if len(similar)==0:
similar = solr.search('id:'+tweet_id)
print(similar.hits)
return render_template('index.html',tweets=similar)
# Language detector
@app.route('/getLang',methods=['GET'])
def get_lang():
# data = json.dumps(
# alchemy_language.combined(
# text=request.args.get('query'),
# extract='entities,keywords',
# sentiment=1,
# max_items=1),
# indent=2)
query = request.args.get('query')
query = query.replace(' ','+').encode('utf-8')
url = urlopen('http://ws.detectlanguage.com/0.2/detect?q=%s&key=5936a491b5b768c58f9c5eda80873365' % (query))
url_reponse = json.loads(url.read().decode('utf8'))
# data_dict = ast.literal_eval(url_reponse)
data_dict = url_reponse
response_language = 'en'
print(data_dict)
if not (data_dict.get('data') and data_dict['data'].get('detections')):
response_language = 'en'
elif not data_dict['data']['detections'][0].get('language'):
response_language = 'en'
else:
language = data_dict['data']['detections'][0].get('language')
if language == 'ro':
response_language = 'es'
elif language == 'da':
response_language = 'ru'
elif language not in LANGUAGES:
response_language = 'en'
else:
response_language = language
lang_dict = {'language':response_language}
return jsonify(lang_dict)
# Maps
@app.route('/maps')
def maps():
with open(os.getcwd()+'/app/map_data/locs.json') as locs:
loc_data = json.load(locs)
return render_template('maps.html',results=loc_data)
| 33.630282 | 197 | 0.60936 | 1,164 | 9,551 | 4.819588 | 0.218213 | 0.031373 | 0.019964 | 0.015686 | 0.252763 | 0.19697 | 0.160428 | 0.148307 | 0.148307 | 0.093761 | 0 | 0.01763 | 0.233902 | 9,551 | 283 | 198 | 33.749117 | 0.749077 | 0.085017 | 0 | 0.134409 | 0 | 0.016129 | 0.152819 | 0.007446 | 0 | 0 | 0 | 0 | 0 | 1 | 0.032258 | false | 0.005376 | 0.075269 | 0 | 0.139785 | 0.016129 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
8ccadeddd15f0bf7707cd71438d79bcdd2f174bc | 1,841 | py | Python | crawl_sturm_edition.py | NEISSproject/TEITools | 02340cd51f3c871f99f8b48170fe9682ad961ff1 | [
"Apache-2.0"
] | null | null | null | crawl_sturm_edition.py | NEISSproject/TEITools | 02340cd51f3c871f99f8b48170fe9682ad961ff1 | [
"Apache-2.0"
] | null | null | null | crawl_sturm_edition.py | NEISSproject/TEITools | 02340cd51f3c871f99f8b48170fe9682ad961ff1 | [
"Apache-2.0"
] | null | null | null | import requests
from bs4 import BeautifulSoup
import json
urlbrieffma = 'https://sturm-edition.de/quellen/briefe/fma.html'
urlbriefjvh='https://sturm-edition.de/quellen/briefe/jvh.html'
# gibt html code der gewünschten url zurück
def get_url_content(url):
return requests.get(url).text
def crawl_sturm(url):
content = get_url_content(url)
#print(content)
# übergebe html an beautifulsoup parser
soup = BeautifulSoup(content, "html.parser")
for main in soup.findAll('main',{'class': 'row content hyphenate'}):
#print(main.contents)
for section in main.findAll('section'):
for element in section.contents:
#print(element.name)
if element.name is not None:
if element.name=='h4' and element.attrs['class']=='year':
print(element)
cur_year=int(element.contents[0])
print(cur_year)
if element.name=='ol':
for article in element.contents:
if article.name is not None:
if article.name=='li':
li=article.contents[0]
if li.name is not None and li.name=='a':
cur_html_url=li.attrs['href']
cur_xml_url='https://sturm-edition.de/api/files/' + cur_html_url[4:-4] + 'xml'
tei=get_url_content(cur_xml_url)
with open('../data_sturm/briefe/'+cur_html_url[4:-4] + 'xml', 'w') as file:
file.write(tei)
if __name__ == "__main__":
#print(get_url_content(urlbrieffma))
crawl_sturm(urlbriefjvh)
| 42.813953 | 118 | 0.523628 | 206 | 1,841 | 4.529126 | 0.354369 | 0.032154 | 0.055734 | 0.061093 | 0.132905 | 0.10075 | 0 | 0 | 0 | 0 | 0 | 0.006891 | 0.369364 | 1,841 | 42 | 119 | 43.833333 | 0.796727 | 0.090712 | 0 | 0 | 0 | 0 | 0.140972 | 0.012597 | 0 | 0 | 0 | 0 | 0 | 1 | 0.064516 | false | 0 | 0.096774 | 0.032258 | 0.193548 | 0.064516 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
8ccb6645de8c78ccf77d3e049fa1b1e6257e5c91 | 1,552 | py | Python | sdk/cwl/gittaggers.py | thehyve/arvados | 7b72f9d1b628698277617e9f1b8a9eac1f8dd562 | [
"Apache-2.0"
] | null | null | null | sdk/cwl/gittaggers.py | thehyve/arvados | 7b72f9d1b628698277617e9f1b8a9eac1f8dd562 | [
"Apache-2.0"
] | null | null | null | sdk/cwl/gittaggers.py | thehyve/arvados | 7b72f9d1b628698277617e9f1b8a9eac1f8dd562 | [
"Apache-2.0"
] | null | null | null | # Copyright (C) The Arvados Authors. All rights reserved.
#
# SPDX-License-Identifier: Apache-2.0
from setuptools.command.egg_info import egg_info
import subprocess
import time
import os
SETUP_DIR = os.path.dirname(__file__) or '.'
def choose_version_from():
sdk_ts = subprocess.check_output(
['git', 'log', '--first-parent', '--max-count=1',
'--format=format:%ct', os.path.join(SETUP_DIR, "../python")]).strip()
cwl_ts = subprocess.check_output(
['git', 'log', '--first-parent', '--max-count=1',
'--format=format:%ct', SETUP_DIR]).strip()
if int(sdk_ts) > int(cwl_ts):
getver = os.path.join(SETUP_DIR, "../python")
else:
getver = SETUP_DIR
return getver
class EggInfoFromGit(egg_info):
"""Tag the build with git commit timestamp.
If a build tag has already been set (e.g., "egg_info -b", building
from source package), leave it alone.
"""
def git_latest_tag(self):
gitinfo = subprocess.check_output(
['git', 'describe', '--abbrev=0']).strip()
return str(gitinfo.decode('utf-8'))
def git_timestamp_tag(self):
gitinfo = subprocess.check_output(
['git', 'log', '--first-parent', '--max-count=1',
'--format=format:%ct', choose_version_from()]).strip()
return time.strftime('.%Y%m%d%H%M%S', time.gmtime(int(gitinfo)))
def tags(self):
if self.tag_build is None:
self.tag_build = self.git_latest_tag() + self.git_timestamp_tag()
return egg_info.tags(self)
| 33.73913 | 78 | 0.623711 | 211 | 1,552 | 4.417062 | 0.445498 | 0.037554 | 0.090129 | 0.103004 | 0.30794 | 0.30794 | 0.256438 | 0.200644 | 0.200644 | 0.200644 | 0 | 0.005728 | 0.212629 | 1,552 | 45 | 79 | 34.488889 | 0.756956 | 0.153995 | 0 | 0.16129 | 0 | 0 | 0.165635 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.129032 | false | 0 | 0.129032 | 0 | 0.419355 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
8ccc0b1583d96f8082108acef00d3108998ddaf0 | 132,037 | py | Python | bandwidth/voice/backup_init.py | Sendhub/python-bandwidth | 097762e8aa64eb041a5706890ecde03bfcecf8c5 | [
"MIT"
] | 18 | 2017-03-07T18:49:19.000Z | 2020-11-04T14:27:37.000Z | bandwidth/voice/backup_init.py | Sendhub/python-bandwidth | 097762e8aa64eb041a5706890ecde03bfcecf8c5 | [
"MIT"
] | 12 | 2016-11-14T21:43:46.000Z | 2019-10-11T20:18:51.000Z | bandwidth/voice/backup_init.py | Sendhub/python-bandwidth | 097762e8aa64eb041a5706890ecde03bfcecf8c5 | [
"MIT"
] | 13 | 2017-10-10T17:50:00.000Z | 2020-08-13T13:14:22.000Z | import requests
import six
import urllib
import json
import itertools
from bandwidth.voice.lazy_enumerable import get_lazy_enumerator
from bandwidth.convert_camel import convert_object_to_snake_case
from bandwidth.voice.decorators import play_audio
from bandwidth.version import __version__ as version
quote = urllib.parse.quote if six.PY3 else urllib.quote
lazy_map = map if six.PY3 else itertools.imap
def _set_media_name(recording):
recording['mediaName'] = recording.get('media', '').split('/')[-1]
return recording
@play_audio('call')
@play_audio('bridge')
@play_audio('conference')
class Client:
"""
Catapult client
"""
def __init__(self, user_id=None, api_token=None, api_secret=None, **other_options):
"""
Initialize the catatpult client.
:type user_id: str
:param user_id: catapult user id
:type api_token: str
:param api_token: catapult api token
:type api_secret: str
:param api_secret: catapult api secret
:type api_endpoint: str
:param api_endpoint: catapult api endpoint (optional, default value is https://api.catapult.inetwork.com)
:type api_version: str
:param api_version: catapult api version (optional, default value is v1)
:rtype: bandwidth.catapult.Client
:returns: bandwidth client
Init the catapult client::
api = bandwidth.catapult.Client('YOUR_USER_ID', 'YOUR_API_TOKEN', 'YOUR_API_SECRET')
# or
api = bandwidth.client('catapult', 'YOUR_USER_ID', 'YOUR_API_TOKEN', 'YOUR_API_SECRET')
"""
if not all((user_id, api_token, api_secret)):
raise ValueError('Arguments user_id, api_token and api_secret are required. '
'Use bandwidth.client("catapult", "YOUR-USER-ID", "YOUR-API-TOKEN", "YOUR-API-SECRET")')
self.user_id = user_id
self.api_endpoint = other_options.get(
'api_endpoint', 'https://api.catapult.inetwork.com')
self.api_version = other_options.get('api_version', 'v1')
self.auth = (api_token, api_secret)
def _request(self, method, url, *args, **kwargs):
user_agent = 'PythonSDK_' + version
headers = kwargs.pop('headers', None)
if headers:
headers['User-Agent'] = user_agent
else:
headers = {
'User-Agent': user_agent
}
if url.startswith('/'):
# relative url
url = '%s/%s%s' % (self.api_endpoint, self.api_version, url)
return requests.request(method, url, auth=self.auth, headers=headers, *args, **kwargs)
def _check_response(self, response):
if response.status_code >= 400:
if response.headers.get('content-type') == 'application/json':
data = response.json()
raise CatapultException(
response.status_code, data['message'], code=data.get('code'))
else:
raise CatapultException(
response.status_code, response.content.decode('utf-8')[:79])
def _make_request(self, method, url, *args, **kwargs):
response = self._request(method, url, *args, **kwargs)
self._check_response(response)
data = None
id = None
if response.headers.get('content-type') == 'application/json':
data = convert_object_to_snake_case(response.json())
location = response.headers.get('location')
if location is not None:
id = location.split('/')[-1]
return (data, response, id)
def build_sentence(self, sentence, gender=None, locale=None, voice=None, loop_enabled=None, **kwargs):
"""
Create a dictionary to speak sentence to live call, bridge, or conference
:param str sentence: The sentence to speak.
:param str gender: The gender of the voice used to synthesize the sentence.
:param str locale: The locale used to get the accent of the voice used to synthesize the sentence.
:param str voice: The voice to speak the sentence.<br>
- English US: Susan (Default), Kate, Julie, Dave, Paul
- English UK: Bridget
- Spanish: Esperanza, Violeta, Jorge
- French: Jolie, Bernard
- German: Katrin, Stefan
- Italian: Paola, Luca
:param bool loop_enabled: When value is true, the sentence will keep repeating until stopped.
:rtype: dict
:returns: dictionary to be passed to audio playback methods
:Example:
my_sentence = api.build_sentence(sentence = "Hello from Bandwidth",
gender="Female",
locale="en_UK",
voice="Bridget",
loop_enabled=True
)
api.play_audio_to_call(call_id, my_sentence)
"""
kwargs["sentence"] = sentence
kwargs["gender"] = gender
kwargs["locale"] = locale
kwargs["voice"] = voice
kwargs["loopEnabled"] = loop_enabled
return kwargs
def build_audio_playback(self, file_url, loop_enabled=None, **kwargs):
"""
Create a dictionary to playback audio file
:param str file_url: The location of an audio file to play (WAV and MP3 supported).
:param bool loop_enabled: When value is true, the audio will keep playing in a loop.
:Example:
my_audio = api.build_audio_playback('http://my_site.com/file.mp3, loop_enabled=True)
"""
kwargs["fileUrl"] = file_url
kwargs["loopEnabled"] = loop_enabled
return kwargs
"""
Account API
"""
def get_account(self):
"""
Get an Account object
:rtype: dict
:returns: account data
Example::
data = api.get_account()
"""
return self._make_request('get', '/users/%s/account' % self.user_id)[0]
def list_account_transactions(self,
max_items=None,
to_date=None,
from_date=None,
trans_type=None,
size=None,
number=None,
**kwargs):
"""
Get the transactions from the user's account
:param str max_items: Limit the number of transactions that will be returned.
:param str to_date: Return only transactions that are newer than the parameter. \
Format: "yyyy-MM-dd'T'HH:mm:ssZ"
:param str from_date: Return only transactions that are older than the parameter. \
Format: "yyyy-MM-dd'T'HH:mm:ssZ"
:param str trans_type: Return only transactions that are this type.
:param int size: Used for pagination to indicate the size of each page requested for querying a list of items. \
If no value is specified the default value is 25. (Maximum value 1000)
:param str number: Search transactions by phone number
:rtype: types.GeneratorType
:returns: list of transactions
Example: Get transactions::
list = api.get_account_transactions(type = 'charge')
Example: Get transactions by date::
list = api.get_account_transactions(type = 'charge')
Example: Get transactions filtering by date::
list = api.get_account_transactions(type = 'charge')
Example: Get transactions limiting result::
list = api.get_account_transactions(type = 'charge')
Example: Get transactions of payment type::
list = api.get_account_transactions(type = 'charge')
"""
kwargs["maxItems"] = max_items
kwargs["toDate"] = to_date
kwargs["fromDate"] = from_date
kwargs["type"] = trans_type
kwargs["size"] = size
kwargs["number"] = number
path = '/users/%s/account/transactions' % self.user_id
return get_lazy_enumerator(self, lambda: self._make_request('get', path, params=kwargs))
def list_calls(self, bridge_id=None, conference_id=None, from_=None, to=None, size=None, sort_order=None, **kwargs):
"""
Get a list of calls
:param str bridge_id: The id of the bridge for querying a list of calls history
:param str conference_id: The id of the conference for querying a list of calls history
:param str ``from_``: The number to filter calls that came from
:param str to: The number to filter calls that was called to
:param str sort_order: How to sort the calls. \
Values are asc or desc If no value is specified the default value is desc
:param int size: Used for pagination to indicate the size of each page requested for querying a list of items. \
If no value is specified the default value is 25. (Maximum value 1000)
:rtype: types.GeneratorType
:returns: list of calls
Example: Fetch calls from specific telephone number::
call_list = api.list_calls(from_ = '+19192223333', size = 1000)
total_chargeable_duration = 0
for call in call_list:
total_chargeable_duration += call['chargeableDuration']
print(total_chargeable_duration)
## 240
Example: List Calls::
call_list = api.list_calls(to = '+19192223333', size = 2)
print(list(call_list))
## [
## {
## 'activeTime' : '2017-01-26T16:10:23Z',
## 'callbackUrl' : 'http://yoursite.com/calls',
## 'chargeableDuration' : 60,
## 'direction' : 'out',
## 'endTime' : '2017-01-26T16:10:33Z',
## 'events' : 'https://api.catapult.inetwork.com/v1/users/u-abc123/calls/c-abc123/events',
## 'from' : '+17079311113',
## 'id' : 'c-abc123',
## 'recordingEnabled' : False,
## 'recordingFileFormat' : 'wav',
## 'recordings' : 'https://api.../v1/users/u-abc123/calls/c-abc123/recordings',
## 'startTime' : '2017-01-26T16:10:11Z',
## 'state' : 'completed',
## 'to' : '+19192223333',
## 'transcriptionEnabled': False,
## 'transcriptions' : 'https://api.../v1/users/u-abc123/calls/c-abc123/transcriptions'
## },
## {
## 'activeTime' : '2016-12-29T23:50:35Z',
## 'chargeableDuration' : 60,
## 'direction' : 'out',
## 'endTime' : '2016-12-29T23:50:41Z',
## 'events' : 'https://api.catapult.inetwork.com/v1/users/u-abc123/calls/c-xyz987/events',
## 'from' : '+19194443333',
## 'id' : 'c-xyz987',
## 'recordingEnabled' : False,
## 'recordingFileFormat' : 'wav',
## 'recordings' : 'https://api.../v1/users/u-abc123/calls/c-xyz987/recordings',
## 'startTime' : '2016-12-29T23:50:15Z',
## 'state' : 'completed',
## 'to' : '+19192223333',
## 'transcriptionEnabled': False,
## 'transcriptions' : 'https://api.../v1/users/u-abc123/calls/c-xyz987/transcriptions'
## }
## ]
"""
kwargs["bridgeId"] = bridge_id
kwargs["conferenceId"] = conference_id
kwargs["from"] = from_
kwargs["to"] = to
kwargs["size"] = size
kwargs["sortOrder"] = sort_order
path = '/users/%s/calls' % self.user_id
return get_lazy_enumerator(self, lambda: self._make_request('get', path, params=kwargs))
def create_call(self,
from_,
to,
call_timeout=None,
callback_url=None,
callback_timeout=None,
callback_http_method=None,
fallback_url=None,
bridge_id=None,
conference_id=None,
recording_enabled=False,
recording_file_format=None,
recording_max_duration=None,
transcription_enabled=False,
tag=None,
sip_headers=None,
**kwargs):
"""
Create a call
:param str ``from_``: A Bandwidth phone number on your account the call should come from (required)
:param str to: The number to call (required)
:param str call_timeout: Determine how long should the platform wait for call answer before timing out
in seconds.
:param str callback_url: The full server URL where the call events related to the Call will be sent to.
:param str callback_timeout: Determine how long should the platform wait for callbackUrl's response before
timing out in milliseconds.
:param str callback_http_method: Determine if the callback event should be sent via HTTP GET or HTTP POST.
Values are "GET" or "POST" (if not set the default is POST).
:param str fallback_url: The full server URL used to send the callback event if the request to
callbackUrl fails.
:param str bridge_id: The id of the bridge where the call will be added.
:param str conference_id: Id of the conference where the call will be added. This property is required
if you want to add this call to a conference.
:param bool recording_enabled: Indicates if the call should be recorded after being created. Set to "true"
to enable. Default is "false".
:param str recording_file_format: The file format of the recorded call. \
Supported values are wav (default) and mp3.
:param str recording_max_duration: Indicates the maximum duration of call recording in seconds. \
Default value is 1 hour.
:param bool transcription_enabled: Recordings for this call is going to be automatically transcribed.
:param str tag: A string that will be included in the callback events of the call.
:param str sip_headers: Map of Sip headers prefixed by "X-". Up to 5 headers can be sent per call.
:rtype: str
:returns: id of created call
Example: Create an outbound phone call::
call_id = api.create_call(from_='+1234567890',
to='+1234567891',
callback_url='http://yoursite.com/calls')
print(call_id)
## c-abc123
my_call = api.get_call(call_id)
print(my_call)
## { 'callbackUrl' : 'http://yoursite.com/calls',
## 'direction' : 'out',
## 'events' : 'https://api.catapult.inetwork.com/v1/users/u-abc/calls/c-abc123/events',
## 'from' : '+1234567890',
## 'id' : 'c-abc123',
## 'recordingEnabled' : False,
## 'recordingFileFormat' : 'wav',
## 'recordings' : 'https://api.catapult.inetwork.com/v1/users/u-abc/calls/c-abc123/recordings',
## 'startTime' : '2017-01-26T16:10:11Z',
## 'state' : 'started',
## 'to' : '+1234567891',
## 'transcriptionEnabled': False,
## 'transcriptions' : 'https://api.../v1/users/u-abc/calls/c-abc123/transcriptions'}
"""
kwargs["from"] = from_
kwargs["to"] = to
kwargs["callTimeout"] = call_timeout
kwargs["callbackUrl"] = callback_url
kwargs["callbackTimeout"] = callback_timeout
kwargs["callbackHttpMethod"] = callback_http_method
kwargs["fallbackUrl"] = fallback_url
kwargs["bridgeId"] = bridge_id
kwargs["conferenceId"] = conference_id
kwargs["recordingEnabled"] = recording_enabled
kwargs["recordingFileFormat"] = recording_file_format
kwargs["recordingMaxDuration"] = recording_max_duration
kwargs["transcriptionEnabled"] = transcription_enabled
kwargs["tag"] = tag
kwargs["sipHeaders"] = sip_headers
return self._make_request('post', '/users/%s/calls' % self.user_id, json=kwargs)[2]
def get_call(self, call_id):
"""
Get information about a call
:type call_id: str
:param call_id: id of a call
:rtype: dict
:returns: call information
Fetch and Print Call::
my_call = api.get_call(call_id)
print(my_call)
## { 'callbackUrl' : 'http://yoursite.com/calls',
## 'direction' : 'out',
## 'events' : 'https://api.catapult.inetwork.com/v1/users/u-abc/calls/c-abc123/events',
## 'from' : '+1234567890',
## 'id' : 'c-abc123',
## 'recordingEnabled' : False,
## 'recordingFileFormat' : 'wav',
## 'recordings' : 'https://api.catapult.inetwork.com/v1/users/u-abc/calls/c-abc123/recordings',
## 'startTime' : '2017-01-26T16:10:11Z',
## 'state' : 'started',
## 'to' : '+1234567891',
## 'transcriptionEnabled': False,
## 'transcriptions' : 'https://api..../v1/users/u-abc/calls/c-abc123/transcriptions'}
"""
return self._make_request('get', '/users/%s/calls/%s' % (self.user_id, call_id))[0]
def update_call(self,
call_id,
state=None,
recording_enabled=None,
recording_file_format=None,
transfer_to=None,
transfer_caller_id=None,
whisper_audio=None,
callback_url=None,
**kwargs):
"""
Update a call
:type call_id: str
:param call_id: id of a call
:param str state: The call state. Possible values: rejected to reject not answer, active to answer the call,
completed to hangup the call, transferring to start and connect call to a new outbound call.
:param bool recording_enabled: Indicates if the call should be recorded. \
Values true or false. You can turn recording on/off and have multiple recordings on a single call.
:param str recording_file_format: The file format of the recorded call. \
Supported values are wav (default) and mp3.
:param str transfer_to: Phone number or SIP address that the call is going to be transferred to.
:param str transfer_caller_id: This is the caller id that will be used when the call is transferred.
:param dict whisper_audio: Audio to be played to the caller that the call will be transferred to.
:param str callback_url: The server URL where the call events for the new call will be sent upon transferring.
Update call with state = completed. (Hang up the call)::
my_call = api.get_call(call_id)
my_call_state = my_call['state']
print(my_call_state)
## started
api.update_call(my_call['id'], state='completed')
my_call = api.get_call(my_call['id'])
print(my_call['state'])
## completed
"""
kwargs["state"] = state
kwargs["recordingEnabled"] = recording_enabled
kwargs["recordingFileFormat"] = recording_file_format
kwargs["transferTo"] = transfer_to
kwargs["transferCallerId"] = transfer_caller_id
kwargs["whisperAudio"] = whisper_audio
kwargs["callbackUrl"] = callback_url
return self._make_request('post', '/users/%s/calls/%s' % (self.user_id, call_id), json=kwargs)[2]
def play_audio_to_call(self,
call_id,
file_url=None,
sentence=None,
gender=None,
locale=None,
voice=None,
loop_enabled=None,
**kwargs):
"""
Play audio to a call
:param str call_id: id of a call
:param str file_url: The location of an audio file to play (WAV and MP3 supported).
:param str sentence: The sentence to speak.
:param str gender: The gender of the voice used to synthesize the sentence.
:param str locale: The locale used to get the accent of the voice used to synthesize the sentence.
:param str voice: The voice to speak the sentence.
:param bool loop_enabled: When value is true, the audio will keep playing in a loop.
Play audio in file::
api.play_audio_to_call('callId', fileUrl= 'http://host/path/file.mp3')
api.play_audio_to_call('callId', sentence='Press 0 to complete call', gender='female')
# or with extension methods
api.play_audio_file_to_call('callId', 'http://host/path/file.mp3')
api.speak_sentence_to_call('callId', 'Hello')
"""
kwargs["fileUrl"] = file_url
kwargs["sentence"] = sentence
kwargs["gender"] = gender
kwargs["locale"] = locale
kwargs["voice"] = voice
kwargs["loopEnabled"] = loop_enabled
self._make_request(
'post', '/users/%s/calls/%s/audio' % (self.user_id, call_id), json=kwargs)
def send_dtmf_to_call(self, call_id, dtmf_out, **kwargs):
"""
Send DTMF (phone keypad digit presses).
:param str call_id: id of a call
:param str dtmf_out: String containing the DTMF characters to be sent in a call.
Example: Send Digits to call::
api.send_dtmf_to_cal('c-callId', '1234')
"""
kwargs["dtmfOut"] = dtmf_out
self._make_request('post', '/users/%s/calls/%s/dtmf' %
(self.user_id, call_id), json=kwargs)
def list_call_recordings(self, call_id):
"""
Get a list of recordings of a call
:type call_id: str
:param call_id: id of a call
:rtype: types.GeneratorType
:returns: list of recordings
Fetch all call recordings for a call::
list = api.get_call_recordings('callId')
"""
path = '/users/%s/calls/%s/recordings' % (self.user_id, call_id)
return get_lazy_enumerator(self, lambda: self._make_request('get', path))
def list_call_transcriptions(self, call_id):
"""
Get a list of transcriptions of a call
:type call_id: str
:param call_id: id of a call
:rtype: types.GeneratorType
:returns: list of transcriptions
Get all transcriptions for calls::
list = api.get_call_transcriptions('callId')
"""
path = '/users/%s/calls/%s/transcriptions' % (self.user_id, call_id)
return get_lazy_enumerator(self, lambda: self._make_request('get', path))
def list_call_events(self, call_id):
"""
Get a list of events of a call
:param str call_id: id of a call
:rtype: types.GeneratorType
:returns: list of events
Fetch all events for calls::
list = api.get_call_events('callId')
"""
path = '/users/%s/calls/%s/events' % (self.user_id, call_id)
return get_lazy_enumerator(self, lambda: self._make_request('get', path))
def get_call_event(self, call_id, event_id):
"""
Get an event of a call
:type call_id: str
:param call_id: id of a call
:type event_id: str
:param event_id: id of an event
:rtype: dict
:returns: data of event
Fetch information on a specific event::
data = api.get_call_event('callId', 'eventId')
"""
return self._make_request('get', '/users/%s/calls/%s/events/%s' % (self.user_id, call_id, event_id))[0]
def create_call_gather(self, call_id,
max_digits=None,
inter_digit_timeout=None,
terminating_digits=None,
tag=None,
**kwargs):
"""
Create a gather for a call
:type call_id: str
:param call_id: id of a call
:param int max_digits: The maximum number of digits to collect, not including terminating digits (maximum 30).
:param int inter_digit_timeout: Stop gathering if a DTMF digit is not detected in this many seconds
(default 5.0; maximum 30.0).
:param str terminating_digits: A string of DTMF digits that end the gather operation immediately
if any one of them is detected
:param str tag: A string you choose that will be included with the response and events for
this gather operation.
:rtype: str
:returns: id of create of gather
Create gather for only single digit::
gather_id = api.create_call_gather('callId', max_digits = 1)
"""
kwargs['maxDigits'] = max_digits
kwargs['interDigitTimeout'] = inter_digit_timeout
kwargs['terminatingDigits'] = terminating_digits
kwargs['tag'] = tag
return self._make_request('post', '/users/%s/calls/%s/gather' % (self.user_id, call_id), json=kwargs)[2]
def get_call_gather(self, call_id, gather_id):
"""
Get a gather of a call
:type call_id: str
:param call_id: id of a call
:type gather_id: str
:param gather_id: id of a gather
:rtype: dict
:returns: data of gather
Get Gather information for a previously created gather::
data = api.get_call_gather('callId', 'gatherId')
"""
return self._make_request('get', '/users/%s/calls/%s/gather/%s' % (self.user_id, call_id, gather_id))[0]
def update_call_gather(self, call_id, gather_id, state=None, **kwargs):
"""
Update a gather of a call
:type call_id: str
:param call_id: id of a call
:type gather_id: str
:param gather_id: id of a gather
:param str state: The only update allowed is state:completed to stop the gather.
End gather::
api.update_call_gather('callId', 'gatherId', state = 'completed')
"""
kwargs['state'] = state
return self._make_request('post', '/users/%s/calls/%s/gather/%s' % (self.user_id, call_id, gather_id),
json=kwargs)
# extensions
def answer_call(self, call_id):
"""
Answer incoming call
:type call_id: str
:param call_id: id of a call
Example: Answer incoming call::
api.answer_call('callId')
"""
return self.update_call(call_id, state='active')
def reject_call(self, call_id):
"""
Reject incoming call
:type call_id: str
:param call_id: id of a call
Example: Reject incoming call::
api.reject_call('callId')
"""
return self.update_call(call_id, state='rejected')
def hangup_call(self, call_id):
"""
Complete active call
:type call_id: str
:param call_id: id of a call
Example: Hangup call::
api.hangup_call('callId')
"""
return self.update_call(call_id, state='completed')
def enable_call_recording(self, call_id):
"""
Turn on call recording
:type call_id: str
:param call_id: id of a call
Example: Enable Call Recording::
api.enable_call_recording('c-callId')
"""
return self.update_call(call_id, recording_enabled=True)
def disable_call_recording(self, call_id):
"""
Turn off call recording
:type call_id: str
:param call_id: id of a call
Example: Disable Call Recording::
api.disable_call_recording('c-callId')
"""
return self.update_call(call_id, recording_enabled=False)
def toggle_call_recording(self, call_id):
"""
Fetches the current call state and either toggles recording on or off
:param str call_id: id of the call to toggle
Example: Toggle the call recording::
my_call_id = api.create_call(to='+19192223333', from_='+18281114444')
my_call = api.get_call(my_call_id)
print(my_call['recordingEnabled'])
## False
api.toggle_call_recording(my_call_id)
my_call = api.get_call(my_call_id)
print(my_call['recordingEnabled'])
## True
api.toggle_call_recording(my_call_id)
my_call = api.get_call(my_call_id)
print(my_call['recordingEnabled'])
## False
"""
call_status = self.get_call(call_id)
recording_enabled = call_status['recordingEnabled']
if recording_enabled is True:
return self.disable_call_recording(call_id)
elif recording_enabled is False:
return self.enable_call_recording(call_id)
else:
return call_status
def transfer_call(self, call_id, to, caller_id=None, whisper_audio=None, callback_url=None, **kwargs):
"""
Transfer a call
:type call_id: str
:param call_id: id of a call
:type to: str
:param to: number that the call is going to be transferred to.
:type caller_id: str
:param caller_id: caller id that will be used when the call is transferred
:type whisper_audio: dict
:param whisper_audio: audio to be played to the caller that the call will be transferred to
:type callback_url: str
:param callback_url: URL where the call events for the new call will be sent upon transferring
:returns str: id of created call
Example: Transfer with whisper::
my_sentence = api.build_sentence(sentence = "Hello from Bandwidth",
gender="Female",
locale="en_UK",
voice="Bridget",
loop_enabled=True
)
my_call = api.get_call('c-callId')
api.transfer_call('c-callId', to = '+1234564890', caller_id = my_call['from'], whisper_audio = my_sentence )
Example: Transfer with whisper audio playback::
my_audio = api.build_audio_playback('http://my_site.com/file.mp3', loop_enabled=True)
my_call = api.get_call('c-callId')
api.transfer_call('c-callId', to = '+1234564890', whisper_audio = my_audio )
"""
return self.update_call(call_id,
state='transferring',
transfer_caller_id=caller_id,
transfer_to=to,
callback_url=callback_url,
whisper_audio=whisper_audio,
**kwargs)
def list_applications(self, size=None, **kwargs):
"""
Get a list of user's applications
:param int size: Used for pagination to indicate the size of each page requested for querying a list
of items. If no value is specified the default value is 25. (Maximum value 1000)
:rtype: types.GeneratorType
:returns: list of applications
Example: Fetch and print all applications::
apps = api.list_applications()
print(list(apps))
Example: Iterate over all applications to find specific name::
apps = api.list_applications(size=20)
app_name = ""
while app_name != "MyAppName":
my_app = next(apps)
app_name = my_app["name"]
print(my_app)
## { 'autoAnswer': True,
## 'callbackHttpMethod': 'get',
## 'id': 'a-asdf',
## 'incomingCallUrl': 'https://test.com/callcallback/',
## 'incomingMessageUrl': 'https://test.com/msgcallback/',
## 'name': 'MyAppName'
## }
"""
kwargs["size"] = size
path = '/users/%s/applications' % self.user_id
return get_lazy_enumerator(self, lambda: self._make_request('get', path, params=kwargs))
def create_application(self,
name,
incoming_call_url=None,
incoming_call_url_callback_timeout=None,
incoming_call_fallback_url=None,
incoming_message_url=None,
incoming_message_url_callback_timeout=None,
incoming_message_fallback_url=None,
callback_http_method=None,
auto_answer=None,
**kwargs):
"""
Creates an application that can handle calls and messages for one of your phone number.
:param str name: A name you choose for this application (required).
:param str incoming_call_url: A URL where call events will be sent for an inbound call.
:param str incoming_call_url_callback_timeout: Determine how long should the platform wait for
inconmingCallUrl's response before timing out in milliseconds.
:param str incoming_call_fallback_url: The URL used to send the callback event
if the request to incomingCallUrl fails.
:param str incoming_message_url: A URL where message events will be sent for an inbound SMS message
:param str incoming_message_url_callback_timeout: Determine how long should the platform wait for
incomingMessageUrl's response before timing out in milliseconds.
:param str incoming_message_fallback_url: The URL used to send the callback event if the request to
incomingMessageUrl fails.
:param str callback_http_method: Determine if the callback event should be sent via HTTP GET or HTTP POST.\
(If not set the default is HTTP POST)
:param str auto_answer: Determines whether or not an incoming call should be automatically answered. \
Default value is 'true'.
:rtype: str
:returns: id of created application
Example: Create Application::
my_app_id = api.create_application( name = "MyFirstApp",
incoming_call_url = "http://callback.com/calls",
incoming_message_url = "http://callback.com/messages",
callback_http_method = "GET")
print(my_app_id)
## a-1232asf123
my_app = api.get_application(my_app_id)
print(my_app)
## { 'autoAnswer' : True,
## 'callbackHttpMethod': 'get',
## 'id' : 'a-1232asf123',
## 'incomingCallUrl' : 'http://callback.com/calls',
## 'incomingMessageUrl': 'http://callback.com/messages',
## 'incomingSmsUrl' : 'http://callback.com/messages',
## 'name' : 'MyFirstApp2'
## }
print(my_app["id"])
## a-1232asf123
"""
kwargs["name"] = name
kwargs["incomingCallUrl"] = incoming_call_url
kwargs[
"incomingCallUrlCallbackTimeout"] = incoming_call_url_callback_timeout
kwargs["incomingCallFallbackUrl"] = incoming_call_fallback_url
kwargs["incomingMessageUrl"] = incoming_message_url
kwargs[
"incomingMessageUrlCallbackTimeout"] = incoming_message_url_callback_timeout
kwargs["incomingMessageFallbackUrl"] = incoming_message_fallback_url
kwargs["callbackHttpMethod"] = callback_http_method
kwargs["autoAnswer"] = auto_answer
return self._make_request('post', '/users/%s/applications' % self.user_id, json=kwargs)[2]
def get_application(self, app_id):
"""
Gets information about an application
:type app_id: str
:param app_id: id of an application
:rtype: dict
:returns: application information
Example: Fetch single application::
my_app = api.get_application(my_app_id)
print(my_app)
## { 'autoAnswer': True,
## 'callbackHttpMethod': 'get',
## 'id': 'a-1232asf123',
## 'incomingCallUrl': 'http://callback.com/calls',
## 'incomingMessageUrl': 'http://callback.com/messages',
## 'incomingSmsUrl': 'http://callback.com/messages',
## 'name': 'MyFirstApp2'
## }
print(my_app["id"])
## a-1232asf123
"""
return self._make_request('get', '/users/%s/applications/%s' % (self.user_id, app_id))[0]
def update_application(self, app_id,
name=None,
incoming_call_url=None,
incoming_call_url_callback_timeout=None,
incoming_call_fallback_url=None,
incoming_message_url=None,
incoming_message_url_callback_timeout=None,
incoming_message_fallback_url=None,
callback_http_method=None,
auto_answer=None,
**kwargs):
"""
Updates an application that can handle calls and messages for one of your phone number.
:param str app_id: The Id of the application to upate (a-123)
:param str name: A name you choose for this application (required).
:param str incoming_call_url: A URL where call events will be sent for an inbound call.
:param str incoming_call_url_callback_timeout: Determine how long should the platform wait for
inconmingCallUrl's response before timing out in milliseconds.
:param str incoming_call_fallback_url: The URL used to send the callback event
if the request to incomingCallUrl fails.
:param str incoming_message_url: A URL where message events will be sent for an inbound SMS message
:param str incoming_message_url_callback_timeout: Determine how long should the platform wait for
incomingMessageUrl's response before timing out in milliseconds.
:param str incoming_message_fallback_url: The URL used to send the callback event if the request to
incomingMessageUrl fails.
:param str callback_http_method: Determine if the callback event should be sent via HTTP GET or HTTP POST.\
(If not set the default is HTTP POST)
:param str auto_answer: Determines whether or not an incoming call should be automatically answered. \
Default value is 'true'.
:rtype: str
:returns: id of created application
Example: Update Existing Application::
my_app_id = api.create_application( name = "MyFirstApp",
incoming_call_url = "http://callback.com/calls",
incoming_message_url = "http://callback.com/messages",
callback_http_method = "GET")
print(my_app_id)
## a-1232asf123
my_app = api.get_application(my_app_id)
print(my_app)
{ 'autoAnswer' : True,
'callbackHttpMethod': 'get',
'id' : 'a-1232asf123',
'incomingCallUrl' : 'http://callback.com/calls',
'incomingMessageUrl': 'http://callback.com/messages',
'incomingSmsUrl' : 'http://callback.com/messages',
'name' : 'MyFirstApp'
}
api.update_application(my_app_id, name = "My Updated App")
my_app = api.get_application(my_app_id)
print(my_app)
{ 'autoAnswer' : True,
'callbackHttpMethod': 'get',
'id' : 'a-1232asf123',
'incomingCallUrl' : 'http://callback.com/calls',
'incomingMessageUrl': 'http://callback.com/messages',
'incomingSmsUrl' : 'http://callback.com/messages',
'name' : 'My Updated App'
}
"""
kwargs["name"] = name
kwargs["incomingCallUrl"] = incoming_call_url
kwargs[
"incomingCallUrlCallbackTimeout"] = incoming_call_url_callback_timeout
kwargs["incomingCallFallbackUrl"] = incoming_call_fallback_url
kwargs["incomingMessageUrl"] = incoming_message_url
kwargs[
"incomingMessageUrlCallbackTimeout"] = incoming_message_url_callback_timeout
kwargs["incomingMessageFallbackUrl"] = incoming_message_fallback_url
kwargs["callbackHttpMethod"] = callback_http_method
kwargs["autoAnswer"] = auto_answer
self._make_request('post', '/users/%s/applications/%s' % (self.user_id, app_id), json=kwargs)
def delete_application(self, app_id):
"""
Remove an application
:type app_id: str
:param app_id: id of an application
Example: Delete single application::
api.delete_application('a-appId')
try :
api.get_application('a-appId')
except CatapultException as err:
print(err.message)
## The application a-appId could not be found
"""
self._make_request(
'delete', '/users/%s/applications/%s' % (self.user_id, app_id))
def search_available_local_numbers(self,
city=None,
state=None,
zip_code=None,
area_code=None,
local_number=None,
in_local_calling_area=None,
quantity=None,
pattern=None,
**kwargs):
"""
Searches for available local or toll free numbers.
:param str city: A city name
:param str state: A two-letter US state abbreviation
:param str zip_code: A 5-digit US ZIP code
:param str area_code: A 3-digit telephone area code
:param str local_number: It is defined as the first digits of a telephone number inside an area code for
filtering the results. It must have at least 3 digits and the areaCode field must be filled.
:param str in_local_calling_area: Boolean value to indicate that the search for available numbers
must consider overlayed areas.
:param int quantity: The maximum number of numbers to return (default 10, maximum 5000)
:param str pattern: A number pattern that may include letters, digits, and the wildcard characters
:rtype: list
:returns: list of numbers
Example: Search for 3 910 numbers::
numbers = api.search_available_local_numbers(area_code = '910', quantity = 3)
print(numbers)
## [ { 'city' : 'WILMINGTON',
## 'nationalNumber': '(910) 444-0230',
## 'number' : '+19104440230',
## 'price' : '0.35',
## 'rateCenter' : 'WILMINGTON',
## 'state' : 'NC'},
## { 'city' : 'WILMINGTON',
## 'nationalNumber': '(910) 444-0263',
## 'number' : '+19104440263',
## 'price' : '0.35',
## 'rateCenter' : 'WILMINGTON',
## 'state' : 'NC'},
## { 'city' : 'WILMINGTON',
## 'nationalNumber': '(910) 444-0268',
## 'number' : '+19104440268',
## 'price' : '0.35',
## 'rateCenter' : 'WILMINGTON',
## 'state' : 'NC'}
## ]
print(numbers[0]["number"])
## +19104440230
"""
kwargs["city"] = city
kwargs["state"] = state
kwargs["zip"] = zip_code
kwargs["areaCode"] = area_code
kwargs["localNumber"] = local_number
kwargs["inLocalCallingArea"] = in_local_calling_area
kwargs["quantity"] = quantity
kwargs["pattern"] = pattern
return self._make_request('get', '/availableNumbers/local', params=kwargs)[0]
def search_available_toll_free_numbers(self, quantity=None, pattern=None, **kwargs):
"""
Searches for available local or toll free numbers.
:param int quantity: The maximum number of numbers to return (default 10, maximum 5000)
:param str pattern: A number pattern that may include letters, digits, and the wildcard characters
:rtype: list
:returns: list of numbers
Example: Search for 3 toll free numbers with pattern 456::
numbers = api.search_available_toll_free_numbers(pattern = '*456', quantity = 3)
print(numbers)
## [ { 'nationalNumber': '(844) 489-0456',
## 'number' : '+18444890456',
## 'patternMatch' : ' 456',
## 'price' : '0.75'},
## { 'nationalNumber': '(844) 498-2456',
## 'number' : '+18444982456',
## 'patternMatch' : ' 456',
## 'price' : '0.75'},
## { 'nationalNumber': '(844) 509-4566',
## 'number' : '+18445094566',
## 'patternMatch' : ' 456 ',
## 'price' : '0.75'}]
print(numbers[0]["number"])
## +18444890456
"""
kwargs["quantity"] = quantity
kwargs["pattern"] = pattern
return self._make_request('get', '/availableNumbers/tollFree', params=kwargs)[0]
def search_and_order_local_numbers(self,
city=None,
state=None,
zip_code=None,
area_code=None,
local_number=None,
in_local_calling_area=None,
quantity=None,
**kwargs):
"""
Searches and orders for available local numbers.
:param str city: A city name
:param str state: A two-letter US state abbreviation
:param str zip_code: A 5-digit US ZIP code
:param str area_code: A 3-digit telephone area code
:param str local_number: It is defined as the first digits of a telephone number inside an area code for
filtering the results. It must have at least 3 digits and the areaCode field must be filled.
:param str in_local_calling_area: Boolean value to indicate that the search for available numbers
must consider overlayed areas.
:param int quantity: The maximum number of numbers to return (default 10, maximum 5000)
:rtype: list
:returns: list of ordered numbers
Example: Search _and_ order a single number::
ordered_numbers = api.search_and_order_available_numbers(zip = '27606', quantity = 1)
print(ordered_number)
## [ { 'city' : 'RALEIGH',
## 'id' : 'n-abc',
## 'location' : 'https://api.catapult.inetwork.com/v1/users/u-12/phoneNumbers/n-abc',
## 'nationalNumber': '(919) 222-4444',
## 'number' : '+19192224444',
## 'price' : '0.35',
## 'state' : 'NC'}]
"""
kwargs["city"] = city
kwargs["state"] = state
kwargs["zip"] = zip_code
kwargs["areaCode"] = area_code
kwargs["localNumber"] = local_number
kwargs["inLocalCallingArea"] = in_local_calling_area
kwargs["quantity"] = quantity
number_list = self._make_request(
'post', '/availableNumbers/local', params=kwargs)[0]
for item in number_list:
item['id'] = item.get('location', '').split('/')[-1]
return number_list
def search_and_order_toll_free_numbers(self, quantity, **kwargs):
"""
Searches for available local or toll free numbers.
Query parameters for toll free numbers
:param int quantity: The maximum number of numbers to return (default 10, maximum 5000)
:rtype: list
:returns: list of numbers
Example: Search then order a single toll-free number::
numbers = api.search_and_order_toll_free_numbers(quantity = 1)
print(numbers)
## [ { 'location' : 'https://api.catapult.inetwork.com/v1/users/u-123/phoneNumbers/n-abc',
## 'nationalNumber': '(844) 484-1048',
## 'number' : '+18444841048',
## 'price' : '0.75'}]
print(numbers[0]["number"])
## +18444841048
"""
kwargs["quantity"] = quantity
list = self._make_request(
'post', '/availableNumbers/tollFree', params=kwargs)[0]
for item in list:
item['id'] = item.get('location', '').split('/')[-1]
return list
def list_bridges(self, size=None, **kwargs):
"""
Get a list of bridges
:param int size: Used for pagination to indicate the size of each page requested for querying a list of items.
If no value is specified the default value is 25. (Maximum value 1000)
:rtype: types.GeneratorType
:returns: list of bridges
Example: List bridges 1000 at a time::
bridges = api.list_bridges(size=1000)
for bridge in bridges:
print(bridge["id"])
## brg-6mv7pi22i
## brg-3emq7olua
## brg-bbufdc7yq
## brg-dvpvd7cuy
## brg-5ws2buzmq
"""
kwargs["size"] = size
path = '/users/%s/bridges' % self.user_id
return get_lazy_enumerator(self, lambda: self._make_request('get', path, params=kwargs))
def create_bridge(self, call_ids=None, bridge_audio=None, **kwargs):
"""
Create a bridge
:param bool bridge_audio: Enable/Disable two way audio path (default = true)
:param str call_ids: The first of the call ids in the bridge. If either of the call ids is not provided the
bridge is logically created and it can be used to place calls later.
:rtype: str
:returns: id of created bridge
Example: Create bridge with 2 calls and audio::
bridge_id = api.create_bridge(call_ids = ['callId1', 'callId2'], bridge_audio = True)
print(bridge_id)
# brg-123
"""
kwargs["callIds"] = call_ids
kwargs["bridgeAudio"] = bridge_audio
return self._make_request('post', '/users/%s/bridges' % self.user_id, json=kwargs)[2]
def get_bridge(self, bridge_id):
"""
Gets information about a bridge
:type bridge_id: str
:param bridge_id: id of a bridge
:rtype: dict
:returns: bridge information
Example: Fetch single bridge by ID::
my_bridge = api.get_bridge('brg-bridgeId')
print(my_bridge)
## { 'bridgeAudio': True,
## 'calls' : 'https://api.catapult.inetwork.com/v1/users/u-123/bridges/brg-bridgeId/calls',
## 'createdTime': '2017-01-26T01:15:09Z',
## 'id' : 'brg-bridgeId',
## 'state' : 'created'}
print(my_bridge["state"])
## created
"""
return self._make_request('get', '/users/%s/bridges/%s' % (self.user_id, bridge_id))[0]
def update_bridge(self, bridge_id, call_ids=None, bridge_audio=None, **kwargs):
"""
Update a bridge
:type bridge_id: str
:param bridge_id: id of a bridge
:param bool bridge_audio: Enable/Disable two way audio path (default = true)
:param str call_ids: The first of the call ids in the bridge. If either of the call ids
is not provided the bridge is logically created and it can be
used to place calls later.
Example: stop bridging audio::
my_bridge = api.get_bridge('brg-bridgeId')
print(my_bridge["bridgeAudio"])
## True
api.update_bridge(my_bridge['id'], call_ids = ['callId1', 'callId2'], bridge_audio = False)
my_bridge = api.get_bridge(my_bridge['id'])
print(my_bridge["bridgeAudio"])
## False
"""
kwargs["callIds"] = call_ids
kwargs["bridgeAudio"] = bridge_audio
self._make_request('post', '/users/%s/bridges/%s' %
(self.user_id, bridge_id), json=kwargs)
def list_bridge_calls(self, bridge_id):
"""
Get a list of calls of a bridge
:type bridge_id: str
:param bridge_id: id of a bridge
:rtype: types.GeneratorType
:returns: list of calls
Example: Fetch all calls that were in a bridge::
call_list = api.get_bridge_calls('bridgeId')
print(list(call_list))
## [
## {
## "activeTime" : "2013-05-22T19:49:39Z",
## "direction" : "out",
## "from" : "{fromNumber}",
## "id" : "{callId1}",
## "bridgeId" : "{bridgeId}",
## "startTime" : "2013-05-22T19:49:35Z",
## "state" : "active",
## "to" : "{toNumber1}",
## "recordingEnabled": false,
## "events" : "https://api.catapult.inetwork.com/v1/users/{userId}/calls/{callId1}/events",
## "bridge" : "https://api.catapult.inetwork.com/v1/users/{userId}/bridges/{bridgeId}"
## },
## {
## "activeTime" : "2013-05-22T19:50:16Z",
## "direction" : "out",
## "from" : "{fromNumber}",
## "id" : "{callId2}",
## "bridgeId" : "{bridgeId}",
## "startTime" : "2013-05-22T19:50:16Z",
## "state" : "active",
## "to" : "{toNumber2}",
## "recordingEnabled": false,
## "events" : "https://api.catapult.inetwork.com/v1/users/{userId}/calls/{callId2}/events",
## "bridge" : "https://api.catapult.inetwork.com/v1/users/{userId}/bridges/{bridgeId}"
## }
## ]
"""
path = '/users/%s/bridges/%s/calls' % (self.user_id, bridge_id)
return get_lazy_enumerator(self, lambda: self._make_request('get', path))
def play_audio_to_bridge(self, bridge_id,
file_url=None,
sentence=None,
gender=None,
locale=None,
voice=None,
loop_enabled=None,
**kwargs):
"""
Play audio to a bridge
:param str bridge_id: id of a bridge
:param str file_url: The location of an audio file to play (WAV and MP3 supported).
:param str sentence: The sentence to speak.
:param str gender: The gender of the voice used to synthesize the sentence.
:param str locale: The locale used to get the accent of the voice used to synthesize the sentence.
:param str voice: The voice to speak the sentence.
:param bool loop_enabled: When value is true, the audio will keep playing in a loop.
Examples: Play either file for speak sentence::
api.play_audio_to_bridge('bridgeId', fileUrl='http://host/path/file.mp3')
api.play_audio_to_bridge('bridgeId', sentence='Press 0 to complete call', gender='female')
# or with extension methods
api.play_audio_file_to_bridge('bridgeId', 'http://host/path/file.mp3')
api.speak_sentence_to_bridge('bridgeId', 'Hello')
"""
kwargs["fileUrl"] = file_url
kwargs["sentence"] = sentence
kwargs["gender"] = gender
kwargs["locale"] = locale
kwargs["voice"] = voice
kwargs["loopEnabled"] = loop_enabled
self._make_request(
'post', '/users/%s/bridges/%s/audio' % (self.user_id, bridge_id), json=kwargs)
def create_conference(self,
from_,
callback_url=None,
callback_timeout=None,
callback_http_method=None,
fallback_url=None,
tag=None,
**kwargs):
"""
Create a conference
:param str ``from_``: The phone number that will host the conference (required)
:param str callback_url: The full server URL where the conference events related to the Conference will be sent
:param str callback_timeout: Determine how long should the
platform wait for callbackUrl's response before timing out in milliseconds.
:param str callback_http_method: Determine if the callback event should be sent via HTTP GET or HTTP POST. \
Values are "GET" or "POST" (if not set the default is POST).
:param str fallback_url: The full server URL used to send the callback event if the request to callbackUrl
fails or timesout
:param str tag: A string that will be included in the callback events of the conference.
:rtype: str
:returns: id of created conference
Example: create simple conference::
conference_id = api.create_conference('+12018994444')
print(conference_id)
## conf-ixaagbn5wcyskisiy
Example: create conference with extra parameters::
conference_id = api.create_conference(from_ = "+12018994444", callback_url = "http://google.com",
callback_timeout= 2000, fallback_url = "http://yahoo.com")
print(conference_id)
## conf-ixaagbn5wcyskisiy
my_conf = api.get_conference(conference_id)
print(my_conf)
## { 'activeMembers' : 0,
## 'callbackHttpMethod': 'post',
## 'callbackTimeout' : 2000,
## 'callbackUrl' : 'http://google.com',
## 'createdTime' : '2017-01-26T01:58:59Z',
## 'fallbackUrl' : 'http://yahoo.com',
## 'from' : '+12018994444',
## 'hold' : False,
## 'id' : 'conf-ixaagbn5wcyskisiy',
## 'mute' : False,
## 'state' : 'created'}
"""
kwargs["from"] = from_
kwargs["callbackUrl"] = callback_url
kwargs["callbackTimeout"] = callback_timeout
kwargs["callbackHttpMethod"] = callback_http_method
kwargs["fallbackUrl"] = fallback_url
kwargs["tag"] = tag
return self._make_request('post', '/users/%s/conferences' % self.user_id, json=kwargs)[2]
def get_conference(self, conference_id):
"""
Get information about a conference
:type conference_id: str
:param conference_id: id of a conference
:rtype: dict
:returns: conference information
Example: Create then fetch conference::
conference_id = api.create_conference(from_ = "+12018994444", callback_url = "http://google.com",
callback_timeout= 2000, fallback_url = "http://yahoo.com")
print(conference_id)
## conf-ixaagbn5wcyskisiy
my_conf = api.get_conference(conference_id)
print(my_conf)
## { 'activeMembers' : 0,
## 'callbackHttpMethod': 'post',
## 'callbackTimeout' : 2000,
## 'callbackUrl' : 'http://google.com',
## 'createdTime' : '2017-01-26T01:58:59Z',
## 'fallbackUrl' : 'http://yahoo.com',
## 'from' : '+12018994444',
## 'hold' : False,
## 'id' : 'conf-ixaagbn5wcyskisiy',
## 'mute' : False,
## 'state' : 'created'}
"""
return self._make_request('get', '/users/%s/conferences/%s' % (self.user_id, conference_id))[0]
def update_conference(self,
conference_id,
state=None,
mute=None,
hold=None,
callback_url=None,
callback_timeout=None,
callback_http_method=None,
fallback_url=None,
tag=None,
**kwargs):
"""
Update a conference
:param str conference_id: id of a conference
:param str state: Conference state. Possible state values are: "completed" to terminate the conference.
:param str mute: If "true", all member can't speak in the conference.\
If "false", all members can speak in the conference
:param str hold: If "true", all member can't hear or speak in the conference. \
If "false", all members can hear and speak in the conference
:param str callback_url: The full server URL where the conference events related to the conference will be sent
:param str callback_timeout: Determine how long should the platform wait for callbackUrl's response before
timing out in milliseconds.
:param str callback_http_method: Determine if the callback event should be sent via HTTP GET or HTTP POST. \
Values are "GET" or "POST" (if not set the default is POST).
:param str fallback_url: The full server URL used to send the callback event
if the request to callbackUrl fails.
:param str tag: A string that will be included in the callback events of the conference.
Example: End conference::
api.update_conference('conferenceId', state='completed')
"""
kwargs["state"] = state
kwargs["mute"] = mute
kwargs["hold"] = hold
kwargs["callbackUrl"] = callback_url
kwargs["callbackTimeout"] = callback_timeout
kwargs["callbackHttpMethod"] = callback_http_method
kwargs["fallbackUrl"] = fallback_url
kwargs["tag"] = tag
self._make_request('post', '/users/%s/conferences/%s' %
(self.user_id, conference_id), json=kwargs)
def play_audio_to_conference(self,
conference_id,
file_url=None,
sentence=None,
gender=None,
locale=None,
voice=None,
loop_enabled=None,
**kwargs):
"""
Play audio to a conference
:type conference_id: str
:param conference_id: id of a conference
:param str file_url: The location of an audio file to play (WAV and MP3 supported).
:param str sentence: The sentence to speak.
:param str gender: The gender of the voice used to synthesize the sentence.
:param str locale: The locale used to get the accent of the voice used to synthesize the sentence.
:param str voice: The voice to speak the sentence.
:param str loop_enabled: When value is true, the audio will keep playing in a loop.
Example: Play audio file to conference::
api.play_audio_to_conference('conferenceId', fileUrl = 'http://host/path/file.mp3')
Example: Speak Sentence to conference::
api.play_audio_to_conference('conferenceId', sentence='Press 0 to complete call', gender='female')
Example: Use Extensions methods::
# or with extension methods
api.play_audio_file_to_conference('conferenceId', 'http://host/path/file.mp3')
api.speak_sentence_to_conference('conferenceId', 'Hello')
"""
kwargs['fileUrl'] = file_url
kwargs['sentence'] = sentence
kwargs['gender'] = gender
kwargs['locale'] = locale
kwargs['voice'] = voice
kwargs['loopEnabled'] = loop_enabled
self._make_request('post', '/users/%s/conferences/%s/audio' %
(self.user_id, conference_id), json=kwargs)
def list_conference_members(self, conference_id):
"""
Get a list of members of a conference
:type conference_id: str
:param conference_id: id of a conference
:rtype: types.GeneratorType
:returns: list of recordings
Example: Fetch and list conference members::
my_conf_id = api.create_conference(from_='+19192223333')
print(my_conf)
# conf-confId
my_call_id = api.create_call(from_='+19192223333', to='+19192223334', conference_id= 'conf-confId')
print(my_call_id)
# c-callId
my_conf_member_id = api.create_conference_member(my_conf_id, call_id=my_call_id)
print(my_conf_member_id)
# member-memberId
my_conference_members = list_conference_members(my_conf_id)
print(list(my_conference_members))
## [
## {
## 'addedTime' :'2017-01-30T22:01:11Z',
## 'call' :'https://api.catapult.inetwork.com/v1/users/u-abc123/calls/c-callId',
## 'hold' :False,
## 'id' :'member-memberId',
## 'joinTone' :False,
## 'leavingTone':False,
## 'mute' :False,
## 'removedTime':'2017-01-30T22:01:21Z',
## 'state' :'completed'
## }
## ]
"""
path = '/users/%s/conferences/%s/members' % (
self.user_id, conference_id)
return get_lazy_enumerator(self, lambda: self._make_request('get', path))
def create_conference_member(self,
conference_id,
call_id=None,
join_tone=None,
leaving_tone=None,
mute=None,
hold=None,
**kwargs):
"""
Create a conference member for a conference
:type conference_id: str
:param conference_id: id of a conference
:param str call_id: The callId must refer to an active call that was created using this conferenceId (required)
:param bool join_tone: If "true", will play a tone when the member joins the conference. \
If "false", no tone is played when the member joins the conference.
:param bool leaving_tone: If "true", will play a tone when the member leaves the conference.\
If "false", no tone is played when the member leaves the conference.
:param bool mute: If "true", member can't speak in the conference.\
If "false", this members can speak in the conference (unless set at the conference level).
:param bool hold: If "true", member can't hear or speak in the conference.\
If "false", member can hear and speak in the conference (unless set at the conference level).
:rtype: str
:returns: id of create of conference member
Example: Create Conference and add member::
my_conf_id = api.create_conference(from_='+19192223333')
print(my_conf)
# conf-confId
my_call_id = api.create_call(from_='+19192223333', to='+19192223334', conference_id= 'conf-confId')
print(my_call_id)
# c-callId
my_conf_member_id = api.create_conference_member(my_conf_id, call_id=my_call_id, join_tone=True)
print(my_conf_member_id)
# member-memberId
my_conf_member = api.get_conference_member(my_conf_id, my_member_id)
print(my_conf_member)
## {
## 'addedTime': '2017-01-30T22:01:11Z',
## 'call' : 'https://api.catapult.inetwork.com/v1/users/u-abc123/calls/c-callId',
## 'hold' : False,
## 'id' : 'member-memberId',
## 'joinTone' : False,
## 'leavingTone' : False,
## 'mute' : False,
## 'removedTime' : '2017-01-30T22:01:21Z',
## 'state' : 'completed'
## }
"""
kwargs['callId'] = call_id
kwargs['joinTone'] = join_tone
kwargs['leavingTone'] = leaving_tone
kwargs['mute'] = mute
kwargs['hold'] = hold
path = '/users/%s/conferences/%s/members' % (
self.user_id, conference_id)
return self._make_request('post', path, json=kwargs)[2]
def get_conference_member(self, conference_id, member_id):
"""
Get a conference member
:type conference_id: str
:param conference_id: id of a conference
:type member_id: str
:param member_id: id of a member
:rtype: dict
:returns: data of conference member
Example: Create Conference and add member::
my_conf_id = api.create_conference(from_='+19192223333')
print(my_conf)
# conf-confId
my_call_id = api.create_call(from_='+19192223333', to='+19192223334', conference_id= 'conf-confId')
print(my_call_id)
# c-callId
my_conf_member_id = api.create_conference_member(my_conf_id, call_id=my_call_id, join_tone=True)
print(my_conf_member_id)
# member-memberId
my_conf_member = api.get_conference_member(my_conf_id, my_member_id)
print(my_conf_member)
## {
## 'addedTime': '2017-01-30T22:01:11Z',
## 'call' : 'https://api.catapult.inetwork.com/v1/users/u-abc123/calls/c-callId',
## 'hold' : False,
## 'id' : 'member-memberId',
## 'joinTone' : True,
## 'leavingTone' : False,
## 'mute' : False,
## 'removedTime' : '2017-01-30T22:01:21Z',
## 'state' : 'active'
## }
"""
path = '/users/%s/conferences/%s/members/%s' % (
self.user_id, conference_id, member_id)
return self._make_request('get', path)[0]
def update_conference_member(self,
conference_id,
member_id,
join_tone=None,
leaving_tone=None,
mute=None,
hold=None,
**kwargs):
"""
Update a conference member
:param str conference_id: id of a conference
:param str member_id: id of a conference member
:param bool join_tone: If "true", will play a tone when the member joins the conference. \
If "false", no tone is played when the member joins the conference.
:param bool leaving_tone: If "true", will play a tone when the member leaves the conference. \
If "false", no tone is played when the member leaves the conference.
:param bool mute: If "true", member can't speak in the conference. \
If "false", this members can speak in the conference (unless set at the conference level).
:param bool hold: If "true", member can't hear or speak in the conference. \
If "false", member can hear and speak in the conference (unless set at the conference level).
Example: update conference member::
my_conf_id = api.create_conference(from_='+19192223333')
print(my_conf)
# conf-confId
my_call_id = api.create_call(from_='+19192223333', to='+19192223334', conference_id= 'conf-confId')
print(my_call_id)
# c-callId
my_conf_member_id = api.create_conference_member(my_conf_id, call_id=my_call_id, join_tone=True)
print(my_conf_member_id)
# member-memberId
my_conf_member = api.get_conference_member(my_conf_id, my_member_id)
print(my_conf_member)
## {
## 'addedTime': '2017-01-30T22:01:11Z',
## 'call' : 'https://api.catapult.inetwork.com/v1/users/u-abc123/calls/c-callId',
## 'hold' : False,
## 'id' : 'member-memberId',
## 'joinTone' : True,
## 'leavingTone' : False,
## 'mute' : False,
## 'removedTime' : '2017-01-30T22:01:21Z',
## 'state' : 'active'
## }
api.update_conference_member(my_conf_id, my_member_id, mute=True, hold=True)
my_conf = api.get_conference_member(my_member_id)
## {
## 'addedTime': '2017-01-30T22:01:11Z',
## 'call' : 'https://api.catapult.inetwork.com/v1/users/u-abc123/calls/c-callId',
## 'hold' : True,
## 'id' : 'member-memberId',
## 'joinTone' : True,
## 'leavingTone' : False,
## 'mute' : True,
## 'removedTime' : '2017-01-30T22:01:21Z',
## 'state' : 'active'
## }
"""
kwargs['joinTone'] = join_tone
kwargs['leavingTone'] = leaving_tone
kwargs['mute'] = mute
kwargs['hold'] = hold
path = '/users/%s/conferences/%s/members/%s' % (
self.user_id, conference_id, member_id)
self._make_request('post', path, json=kwargs)
def play_audio_to_conference_member(self,
conference_id,
member_id,
file_url=None,
sentence=None,
gender=None,
locale=None,
voice=None,
loop_enabled=None,
**kwargs):
"""
Play audio to a conference member
:param str conference_id: id of a conference
:param str member_id: id of a conference member
:param str file_url: The location of an audio file to play (WAV and MP3 supported).
:param str sentence: The sentence to speak.
:param str gender: The gender of the voice used to synthesize the sentence.
:param str locale: The locale used to get the accent of the voice used to synthesize the sentence.
:param str voice: The voice to speak the sentence.
:param str loop_enabled: When value is true, the audio will keep playing in a loop.
Example: Play audio to specific conference member::
api.play_audio_to_conference_member('conferenceId', 'memberId', fileUrl=http://host/path/file.mp3)
api.play_audio_to_conference_member('conferenceId', 'memberId',
sentence='Press 0 to complete call', gender='female')
# or with extension methods
api.play_audio_file_to_conference_member('conferenceId', 'memberId', 'http://host/path/file.mp3')
api.speak_sentence_to_conference_member('conferenceId', 'memberId', 'Hello')
"""
kwargs['fileUrl'] = file_url
kwargs['sentence'] = sentence
kwargs['gender'] = gender
kwargs['locale'] = locale
kwargs['voice'] = voice
kwargs['loopEnabled'] = loop_enabled
path = '/users/%s/conferences/%s/members/%s/audio' % (
self.user_id, conference_id, member_id)
self._make_request('post', path, json=kwargs)
# extensions
def speak_sentence_to_conference_member(self,
conference_id,
member_id,
sentence,
gender='female',
voice='susan',
locale='en_US',
tag=None):
"""
Speak sentence to a conference member
:type conference_id: str
:param conference_id: id of a conference
:type member_id: str
:param member_id: id of a conference member
:type sentence: str
:param sentence: sentence to say
:type gender: str
:param gender: gender of voice
:type voice: str
:param voice: voice name
:type locale: str
:param locale: locale name
:type tag: str
:param tag: A string that will be included in the callback events of the call.
Example: Speak sentence to specific conference member::
api.speak_sentence_to_conference_member('conferenceId', 'memberId', 'Hello')
"""
self.play_audio_to_conference_member(conference_id, member_id,
sentence=sentence,
gender=gender,
voice=voice,
locale=locale,
tag=tag
)
def play_audio_file_to_conference_member(self, conference_id, member_id, file_url, tag=None):
"""
Play audio file to a conference member
:type conference_id: str
:param conference_id: id of a conference
:type member_id: str
:param member_id: id of a conference member
:type file_url: str
:param file_url: URL to remote file to play
:type tag: str
:param tag: A string that will be included in the callback events of the call.
Example: Play an audio file to specific member::
api.play_audio_file_to_conference_member('conferenceId', 'memberId', 'http://host/path/file.mp3')
"""
self.play_audio_to_conference_member(conference_id, member_id,
file_url=file_url,
tag=tag
)
def remove_conference_member(self, conference_id, member_id):
"""
Remove a conference member
:type conference_id: str
:param conference_id: id of a conference
:type member_id: str
:param member_id: id of a conference member
Example: Remove Member from Conference::
my_conf = api.get_conference('conferenceId')
my_conf_members = list(api.list_conference_members(my_conf['id']))
print(my_conf_members)
## [{ 'addedTime' : '2017-01-30T23:17:11Z',
## 'call' : 'https://api.catapult.inetwork.com/v1/users/u-abc123/calls/c-callId',
## 'hold' : False,
## 'id' : 'member-memberId',
## 'joinTone' : False,
## 'leavingTone': False,
## 'mute' : False,
## 'state' : 'active'},
## { 'addedTime' : '2017-01-30T23:17:14Z',
## 'call' : 'https://api.catapult.inetwork.com/v1/users/u-abc123/calls/c-callId2',
## 'hold' : False,
## 'id' : 'member-memberId2',
## 'joinTone' : False,
## 'leavingTone': False,
## 'mute' : False,
## 'state' : 'active'}]
api.remove_conference_member(my_conf['id'], my_conf_members[1]['id'])
my_conf_members = list(api.list_conference_members(my_conf['id']))
print(my_conf_members)
## [{ 'addedTime' : '2017-01-30T23:17:11Z',
## 'call' : 'https://api.catapult.inetwork.com/v1/users/u-abc123/calls/c-callId',
## 'hold' : False,
## 'id' : 'member-memberId',
## 'joinTone' : False,
## 'leavingTone': False,
## 'mute' : False,
## 'state' : 'active'},
## { 'addedTime' : '2017-01-30T23:17:14Z',
## 'call' : 'https://api.catapult.inetwork.com/v1/users/u-abc123/calls/c-callId2',
## 'hold' : False,
## 'id' : 'member-memberId2',
## 'joinTone' : False,
## 'leavingTone': False,
## 'mute' : False,
## 'state' : 'completed'}]
"""
self.update_conference_member(
conference_id, member_id, state='completed')
def hold_conference_member(self, conference_id, member_id, hold):
"""
Hold or unhold a conference member
:type conference_id: str
:param conference_id: id of a conference
:type member_id: str
:param member_id: id of a conference member
:type hold: bool
:param hold: hold (if true) or unhold (if false) a member
Example: Put specific conference member on hold::
api.hold_conference_member('conferenceId', 'memberId', True)
"""
self.update_conference_member(conference_id, member_id, hold=hold)
def mute_conference_member(self, conference_id, member_id, mute):
"""
Mute or unmute a conference member
:type conference_id: str
:param conference_id: id of a conference
:type member_id: str
:param member_id: id of a conference member
:type mute: bool
:param mute: mute (if true) or unmute (if false) a member
Example: Mute specific conference member::
api.mute_conference_member('conferenceId', 'memberId', True)
"""
self.update_conference_member(conference_id, member_id, mute=mute)
def terminate_conference(self, conference_id):
"""
Terminate of current conference
:type conference_id: str
:param conference_id: id of a conference
Example: End the Conference::
api.terminate_conference('conferenceId')
"""
self.update_conference(conference_id, state='completed')
def hold_conference(self, conference_id, hold):
"""
Hold or unhold a conference
:type conference_id: str
:param conference_id: id of a conference
:type hold: bool
:param hold: hold (if true) or unhold (if false) a conference
Example: Put entire confernce on hold, where no one can hear::
api.hold_conference('conferenceId', True)
"""
self.update_conference(conference_id, hold=hold)
def mute_conference(self, conference_id, mute):
"""
Mute or unmute a conference
:type conference_id: str
:param conference_id: id of a conference
:type mute: bool
:param mute: mute (if true) or unmute (if false) a conference
Example: Mute the entire Conference, where no one can speak::
api.mute_conference('conferenceId', True)
"""
self.update_conference(conference_id, mute=mute)
def list_domains(self, size=None, **kwargs):
"""
Get a list of domains
:param int size: Used for pagination to indicate the size of each page requested for querying a list of items. \
If no value is specified the default value is 25. (Maximum value 100)
:rtype: types.GeneratorType
:returns: list of domains
Example: Fetch domains and print::
domain_list = api.list_domains(size=10)
print(list(domain_list))
## [{ 'endpointsUrl': 'https://api.catapult.inetwork.com/v1/users/u-abc123/domains/endpoints',
## 'id' : 'rd-domainId',
## 'name' : 'siplearn1'},
## { 'endpointsUrl' : 'https://api.catapult.inetwork.com/v1/users/u-abc123/domains/endpoints',
## 'id' : 'rd-domainId2',
## 'name' : 'siplearn2'}]
Example: Search for domain based on name::
domain_list = api.list_domains(size=100)
domain_name = ''
while domain_name != 'My Prod Site':
my_domain = next(domain_list)
domain_name = my_domain['name']
print(my_domain)
## { 'description' : 'Python Docs Example',
## 'endpointsUrl': 'https://api.catapult.inetwork.com/v1/users/u-abc123/domains/rd-domainId/endpoints',
## 'id' : 'rd-domainId',
## 'name' : 'My Prod Site'}
"""
kwargs['size'] = size
path = '/users/%s/domains' % self.user_id
return get_lazy_enumerator(self, lambda: self._make_request('get', path, params=kwargs))
def create_domain(self, name, description=None, **kwargs):
"""
Create a domain
:param str name: The name is a unique URI to be used in DNS lookups
:param str description: String to describe the domain
:rtype: str
:returns: id of created domain
Example: Create Domain::
domain_id = api.create_domain(name='qwerty', description='Python Docs Example')
print(domain_id)
# rd-domainId
"""
kwargs['name'] = name
kwargs['description'] = description
return self._make_request('post', '/users/%s/domains' % self.user_id, json=kwargs)[2]
def get_domain(self, domain_id):
"""
Get information about a domain
:type domain_id: str
:param domain_id: id of the domain
:rtype: dict
:returns: domain information
Example: Create then fetch domain::
domain_id = api.create_domain(name='qwerty', description='Python Docs Example')
print(domain_id)
# rd-domainId
my_domain = api.get_domain(domain_id)
print(my_domain)
## { 'description' : 'Python Docs Example',
## 'endpointsUrl': 'https://api.catapult.inetwork.com/v1/users/u-abc123/domains/rd-domainId/endpoints',
## 'id' : 'rd-domainId',
## 'name' : 'qwerty'}
"""
return self._make_request('get', '/users/%s/domains/%s' % (self.user_id, domain_id))[0]
def delete_domain(self, domain_id):
"""
Delete a domain
:type domain_id: str
:param domain_id: id of a domain
Example: Delete domain 'domainId'::
api.delete_domain('domainId')
"""
self._make_request('delete', '/users/%s/domains/%s' %
(self.user_id, domain_id))
def list_domain_endpoints(self, domain_id, size=None, **kwargs):
"""
Get a list of domain's endpoints
:type domain_id: str
:param domain_id: id of a domain
:param int size: Used for pagination to indicate the size of each page requested for querying a list of items.\
If no value is specified the default value is 25. (Maximum value 1000)
:rtype: types.GeneratorType
:returns: list of endpoints
Example: List and iterate over::
endpoint_list = api.list_domain_endpoints('rd-domainId', size=1000)
for endpoint in endpoint_list:
print(endpoint['id'])
##re-endpointId1
##re-endpointId2
Example: List and print all::
endpoint_list = api.list_domain_endpoints('rd-domainId', size=1000)
print(list(endpoint_list))
## [
## {
## 'applicationId':'a-appId',
## 'credentials' :{
## 'realm' :'creds.bwapp.bwsip.io',
## 'username' :'user1'
## },
## 'description' :"Your SIP Account",
## 'domainId' :'rd-domainId',
## 'enabled' :True,
## 'id' :'re-endpointId1',
## 'name' :'User1_endpoint',
## 'sipUri' :'sip:user1@creds.bwapp.bwsip.io'
## },
## {
## 'applicationId':'a-appId',
## 'credentials' :{
## 'realm' :'creds1.bwapp.bwsip.io',
## 'username' :'user2'
## },
## 'description' :"Your SIP Account",
## 'domainId' :'rd-domainId',
## 'enabled' :True,
## 'id' :'re-endpointId2',
## 'name' :'User2_endpoint',
## 'sipUri' :'sip:user2@creds.bwapp.bwsip.io'
## }
## ]
"""
kwargs['size'] = size
path = '/users/%s/domains/%s/endpoints' % (self.user_id, domain_id)
return get_lazy_enumerator(self, lambda: self._make_request('get', path, params=kwargs))
def create_domain_endpoint(
self,
domain_id,
name,
password,
description=None,
application_id=None,
enabled=True,
**kwargs):
"""
Create a domain endpoint
:param str domain_id: id of a domain
:param str name: The name of endpoint
:param str description: String to describe the endpoint
:param str application_id: Id of application which will handle calls and messages of this endpoint
:param bool enabled: When set to true, SIP clients can register as this device to receive and make calls. \
When set to false, registration, inbound, and outbound calling will not succeed.
:param str password: Password of created SIP account
:rtype: str
:returns: id of endpoint
Example: Create Endpoint on Domain 'rd-domainId'::
endpoint_id = api.create_domain_endpoint('rd-domainId',
endpoint_name='User3_endpoint',
password='AtLeast6Chars')
print(endpoint_id)
# re-endpointId3
my_endpoint = api.get_domain_endpoint(endpoint_id)
print(my_endpoint)
## {
## 'credentials' :{
## 'realm' :'qwerty.bwapp.bwsip.io',
## 'username':'User3_endpoint'
## },
## 'domainId' :'rd-domainId',
## 'enabled' :True,
## 'id' :'re-endpointId3',
## 'name' :'User3_endpoint',
## 'sipUri' :'sip:user5@qwerty.bwapp.bwsip.io'
## }
"""
kwargs['name'] = name
kwargs['description'] = description
kwargs['applicationId'] = application_id
kwargs['enabled'] = enabled
kwargs['credentials'] = dict(password=password)
return self._make_request('post', '/users/%s/domains/%s/endpoints' % (self.user_id, domain_id), json=kwargs)[2]
def get_domain_endpoint(self, domain_id, endpoint_id):
"""
Get information about an endpoint
:type domain_id: str
:param domain_id: id of a domain
:type endpoint_id: str
:param endpoint_id: id of a endpoint
:rtype: dict
:returns: call information
Example: Create Endpoint on Domain 'rd-domainId' then fetch the endpoint::
endpoint_id = api.create_domain_endpoint('rd-domainId',
endpoint_name='User3_endpoint',
password='AtLeast6Chars')
print(endpoint_id)
# re-endpointId3
my_endpoint = api.get_domain_endpoint(endpoint_id)
print(my_endpoint)
## {
## 'credentials' :{
## 'realm' :'qwerty.bwapp.bwsip.io',
## 'username':'User3_endpoint'
## },
## 'domainId' :'rd-domainId',
## 'enabled' :True,
## 'id' :'re-endpointId3',
## 'name' :'User3_endpoint',
## 'sipUri' :'sip:user5@qwerty.bwapp.bwsip.io'
## }
"""
return self._make_request('get', '/users/%s/domains/%s/endpoints/%s' % (self.user_id,
domain_id, endpoint_id))[0]
def update_domain_endpoint(self,
domain_id,
endpoint_id,
password=None,
description=None,
application_id=None,
enabled=None,
**kwargs):
"""
Update information about an endpoint
:param str domain_id: id of a domain
:param str endpoint_id: id of a endpoint
:param str description: String to describe the endpoint
:param str application_id: Id of application which will handle calls and messages of this endpoint
:param bool enabled: When set to true, SIP clients can register as this device to receive and make calls. \
When set to false, registration, inbound, and outbound calling will not succeed.
:param str password: Password of created SIP account
Example: Update password and disable the endpoint::
my_endpoint = api.get_domain_endpoint('rd-domainId', 're-endpointId')
print(my_endpoint)
## {
## 'credentials' :{
## 'realm' :'qwerty.bwapp.bwsip.io',
## 'username':'user5'
## },
## 'domainId' :'rd-domainId',
## 'enabled' :True,
## 'id' :'re-endpointId',
## 'name' :'user3',
## 'sipUri' :'sip:user5@qwerty.bwapp.bwsip.io'
## }
api.update_domain_endpoint('rd-domainId', 're-endpointId', enabled=False, password='abc123')
my_endpoint = api.get_domain_endpoint('rd-domainId', 're-endpointId')
print(my_endpoint)
## {
## 'credentials' :{
## 'realm' :'qwerty.bwapp.bwsip.io',
## 'username':'user5'
## },
## 'domainId' :'rd-domainId',
## 'enabled' :False,
## 'id' :'re-endpointId',
## 'name' :'user3',
## 'sipUri' :'sip:user5@qwerty.bwapp.bwsip.io'
## }
"""
kwargs['description'] = description
kwargs['applicationId'] = application_id
kwargs['enabled'] = enabled
kwargs['credentials'] = dict(password=password)
self._make_request('post', '/users/%s/domains/%s/endpoints/%s' %
(self.user_id, domain_id, endpoint_id), json=kwargs)
def delete_domain_endpoint(self, domain_id, endpoint_id):
"""
Remove an endpoint
:param str domain_id: id of a domain
:param str endpoint_id: id of a endpoint
Example: Delete and try to fetch endpoint::
my_endpoint = api.get_domain_endpoint('rd-domainId', 're-endpointId')
print(my_endpoint)
## {
## 'credentials' :{
## 'realm' :'qwerty.bwapp.bwsip.io',
## 'username':'user5'
## },
## 'domainId' :'rd-domainId',
## 'enabled' :False,
## 'id' :'re-endpointId3ndpointId',
## 'name' :'user3',
## 'sipUri' :'sip:user5@qwerty.bwapp.bwsip.io'
## }
api.delete_domain_endpoint(d, e)
try:
my_endpoint = api.get_domain_endpoint(d, e)
except Exception as e:
print(e)
## CatapultException(404, "The endpoint 're-endpointId' could not be found")
"""
self._make_request(
'delete', '/users/%s/domains/%s/endpoints/%s' % (self.user_id, domain_id, endpoint_id))
def create_domain_endpoint_auth_token(self, domain_id, endpoint_id, expires=3600, **kwargs):
"""
Create auth token for an endpoint
:param str domain_id: id of a domain
:param str endpoint_id: id of a endpoint
:param int expires: Duration of valid token.
Example: Create token::
token = api.create_domain_endpoint_auth_token('domainId', 'endpointId', 5000)
"""
kwargs['expires'] = expires
path = '/users/%s/domains/%s/endpoints/%s/tokens' % (
self.user_id, domain_id, endpoint_id)
return self._make_request('post', path, json=kwargs)[0]
def list_errors(self, size=None, **kwargs):
"""
Get a list of errors
:param int size: Used for pagination to indicate the size of each page requested for querying a list
of items. If no value is specified the default value is 25. (Maximum value 1000)
:rtype: types.GeneratorType
:returns: list of calls
Example: List all errors::
error_list = api.list_errors()
print(list(error_list))
# [{
# 'category':'unavailable',
# 'code' :'number-allocator-unavailable',
# 'details':[
# {
# 'id' :'ued-eh3zn3dxgiin4y',
# 'name' :'requestPath',
# 'value':'availableNumbers/local'
# },
# {
# 'id' :'ued-3fsdqiq',
# 'name' :'remoteAddress',
# 'value':'216.82.234.65'
# },
# {
# 'id' :'ued-2r4t47bwi',
# 'name' :'requestMethod',
# 'value':'GET'
# }
# ],
# 'id' :'ue-upvfv53xzca',
# 'message':'Cannot connect to the number allocator',
# 'time' :'2016-03-28T18:31:33Z'
# },
# {
# 'category':'unavailable',
# 'code':'number-allocator-unavailable',
# 'details':[
# {
# 'id':'ued-kntwx7vyotalci',
# 'name':'requestPath',
# 'value':'availableNumbers/local'
# },
# {
# 'id':'ued-b24vxpfskldq',
# 'name':'remoteAddress',
# 'value':'216.82.234.65'
# },
# {
# 'id':'ued-ww5rcgl7zm2ydi',
# 'name':'requestMethod',
# 'value':'GET'
# }
# ],
# 'id':'ue-pok2vg7kyuzaqq',
# 'message':'Cannot connect to the number allocator',
# 'time':'2016-03-28T18:31:33Z'
# }]
"""
kwargs['size'] = size
path = '/users/%s/errors' % self.user_id
return get_lazy_enumerator(self, lambda: self._make_request('get', path, params=kwargs))
def get_error(self, error_id):
"""
Get information about an error
:type error_id: str
:param id: id of an error
:rtype: dict
:returns: error information
Example: Get information of specific error::
error = api.get_error('ue-errorId')
print(error)
## {
## 'category':'unavailable',
## 'code' :'number-allocator-unavailable',
## 'details' :[
## {
## 'id' :'ued-kntvyotalci',
## 'name' :'requestPath',
## 'value' :'availableNumbers/local'
## },
## {
## 'id' :'ued-b2dq',
## 'name' :'remoteAddress',
## 'value' :'216.82.234.65'
## },
## {
## 'id' :'ued-wzm2ydi',
## 'name' :'requestMethod',
## 'value' :'GET'
## }
## ],
## 'id' :'ue-errorId',
## 'message' :'Cannot connect to the number allocator',
## 'time' :'2016-03-28T18:31:33Z'
## }
"""
return self._make_request('get', '/users/%s/errors/%s' % (self.user_id, error_id))[0]
def list_media_files(self):
"""
Gets a list of user's media files.
:rtype: types.GeneratorType
:returns: list of media files
Example: list media files and save any with the name `dog` in file name::
media_list = api.list_media_files()
for media in media_list:
if 'dog' in media['mediaName'].lower():
stream, content_type = api.download_media_file(media['mediaName'])
with io.open(media['mediaName'], 'wb') as file:
file.write(stream.read())
"""
path = '/users/%s/media' % self.user_id
return get_lazy_enumerator(self, lambda: self._make_request('get', path))
def upload_media_file(self, media_name, content=None, content_type='application/octet-stream', file_path=None):
"""
Upload a file
:type media_name: str
:param media_name: name of file on bandwidth server
:type content: str|buffer|bytearray|stream|file
:param content: content of file to upload (file object, string or buffer).
Don't use together with file_path
:type content_type: str
:param content_type: mime type of file
:type file_path: str
:param file_path: path to file to upload. Don't use together with content
Example: Upload text file::
api.upload_media_file('file1.txt', 'content of file', 'text/plain')
# with file path
api.upload_media_file('file1.txt', file_path='/path/to/file1.txt')
"""
is_file_path = False
if file_path is not None and content is None:
content = open(file_path, 'rb')
is_file_path = True
path = '/users/%s/media/%s' % (self.user_id, quote(media_name))
try:
return self._make_request('put', path, data=content, headers={'content-type': content_type})
finally:
if is_file_path:
content.close()
def download_media_file(self, media_name):
"""
Download a file
:type media_name: str
:param media_name: name of file on bandwidth server
:rtype (stream, str)
:returns stream to file to download and mime type
Example: list media files and save any with the name `dog` in file name::
media_list = api.get_media_files()
for media in media_list:
if 'dog' in media['mediaName'].lower():
stream, content_type = api.download_media_file(media['mediaName'])
with io.open(media['mediaName'], 'wb') as file:
file.write(stream.read())
"""
path = '/users/%s/media/%s' % (self.user_id, quote(media_name))
response = self._request('get', path, stream=True)
response.raise_for_status()
return response.raw, response.headers['content-type']
def delete_media_file(self, media_name):
"""
Remove a file from the server
:type media_name: str
:param media_name: name of file on bandwidth server
Example: Delete a file from server::
api.delete_media_file('file1.txt')
"""
path = '/users/%s/media/%s' % (self.user_id, quote(media_name))
self._make_request('delete', path)
def list_messages(self,
from_=None,
to=None,
from_date_time=None,
to_date_time=None,
direction=None,
state=None,
delivery_state=None,
sort_order=None,
size=None,
**kwargs):
"""
Get a list of user's messages
:param str ``from_``: The phone number to filter the messages that came from
:param str to: The phone number to filter the messages that was sent to
:param str from_date_time: The starting date time to filter the messages
(must be in yyyy-MM-dd hh:mm:ss format, like 2014-05-25 12:00:00.)
:param str to_date_time: The ending date time to filter the messages (must be in
yyyy-MM-dd hh:mm:ss format, like 2014-05-25 12:00:00.)
:param str direction: Filter by direction of message, in - a message that came from the telephone
network to one of your numbers (an "inbound" message) or out - a message
that was sent from one of your numbers to the telephone network (an "outbound"
message)
:param str state: The message state to filter. Values are 'received', 'queued', 'sending',
'sent', 'error'
:param str delivery_state: The message delivery state to filter. Values are 'waiting', 'delivered',
'not-delivered'
:param str sort_order: How to sort the messages. Values are 'asc' or 'desc'
:param str size: Used for pagination to indicate the size of each page requested for querying a list
of items. If no value is specified the default value is 25. (Maximum value 1000)
:rtype: types.GeneratorType
:returns: list of messages
Example: Search for all messages and are in error::
message_list = api.list_messages()
for message in message_list:
if message['state'] == 'error':
print(message['id'])
## m-it6ewpyiyadfe
## m-pjnqofcjyadfe
## m-t2gspvs6iadfe
## m-shuh6d6pyadfe
"""
kwargs['from'] = from_
kwargs['to'] = to
kwargs['fromDateTime'] = from_date_time
kwargs['toDateTime'] = to_date_time
kwargs['direction'] = direction
kwargs['state'] = state
kwargs['deliveryState'] = delivery_state
kwargs['sortOrder'] = sort_order
kwargs['size'] = size
path = '/users/%s/messages' % self.user_id
return get_lazy_enumerator(self, lambda: self._make_request('get', path, params=kwargs))
def send_message(self, from_, to,
text=None,
media=None,
receipt_requested=None,
callback_url=None,
callback_http_method=None,
callback_timeout=None,
fallback_url=None,
tag=None,
**kwargs):
"""
Send a message (SMS or MMS)
:param str ``from_``: One of your telephone numbers the message should come from
:param str to: The phone number the message should be sent to
:param str text: The contents of the text message
:param list media: For MMS messages, a media url to the location of the media or
list of medias to be sent send with the message.
:param str receipt_requested: Requested receipt option for outbound messages: 'none', 'all', 'error'
:param str callback_url: The server URL where the events related to the outgoing message will be sent to
:param str callback_http_method: Determine if the callback event should be sent via HTTP GET or HTTP POST.
Values are get or post Default is post
:param str callback_timeout: Determine how long should the platform wait for
callbackUrl's response before timing out (milliseconds).
:param str fallback_url: The server URL used to send the message events if the request to callbackUrl fails.
:param str tag: Any string, it will be included in the callback events of the message.
:rtype: str
:returns: id of created message
Example: Send Text Message::
id = api.send_message(
from_ = '+1234567980',
to = '+1234567981',
text = 'SMS message'
)
Example: Send Picture Message::
id = api.send_message(
from_ = '+1234567980',
to = '+1234567981',
media = ['http://host/path/to/file']
)
"""
kwargs['from'] = from_
kwargs['to'] = to
kwargs['text'] = text
kwargs['media'] = media
kwargs['receiptRequested'] = receipt_requested
kwargs['callbackUrl'] = callback_url
kwargs['callbackHttpMethod'] = callback_http_method
kwargs['callbackTimeout'] = callback_timeout
kwargs['fallbackUrl'] = fallback_url
kwargs['tag'] = tag
return self._make_request('post', '/users/%s/messages' % self.user_id, json=kwargs)[2]
def send_messages(self, messages_data):
"""
Send some messages by one request
:type messages_data: list
:param messages_data: List of messages to send
Parameters of each message
from
One of your telephone numbers the message should come from
to
The phone number the message should be sent to
text
The contents of the text message
media
For MMS messages, a media url to the location of the media or list of medias to
be sent send with the message.
receiptRequested
Requested receipt option for outbound messages: 'none', 'all', 'error'
callbackUrl
The server URL where the events related to the outgoing message will
be sent to
callbackHttpMethod
Determine if the callback event should be sent via HTTP GET or HTTP POST.
Values are get or post Default is post
callbackTimeout
Determine how long should the platform wait for callbackUrl's response
before timing out (milliseconds).
fallbackUrl
The server URL used to send the message events if the request to callbackUrl fails.
tag
Any string, it will be included in the callback events of the message.
:rtype: list
:returns: results of sent messages
Example: Bulk Send Picture or Text messages (or both)::
results = api.send_messages([
{'from': '+1234567980', 'to': '+1234567981', 'text': 'SMS message'},
{'from': '+1234567980', 'to': '+1234567982', 'text': 'SMS message2'}
])
"""
results = self._make_request(
'post', '/users/%s/messages' % self.user_id, json=messages_data)[0]
for i in range(0, len(messages_data)):
item = results[i]
item['id'] = item.get('location', '').split('/')[-1]
item['message'] = messages_data[i]
return results
def get_message(self, id):
"""
Get information about a message
:type id: str
:param id: id of a message
:rtype: dict
:returns: message information
Example: Fetch information about single message::
my_message = api.get_message('m-na6cpyjf2qcpz6l3drhcx7y')
print(my_message)
## {
## 'callbackUrl' :'https://yoursite.com/message',
## 'direction' :'in',
## 'from' :'+19193047864',
## 'id' :'m-messageId',
## 'media' :[],
## 'messageId' :'m-messageId',
## 'skipMMSCarrierValidation':True,
## 'state' :'received',
## 'text' :'Hey there',
## 'time' :'2017-02-01T21:10:32Z',
## 'to' :'+19191234567'
## }
"""
return self._make_request('get', '/users/%s/messages/%s' % (self.user_id, id))[0]
def get_number_info(self, number):
"""
Gets CNAM information about phone number
:type number: str
:param number: phone number to get information
:rtype: dict
:returns: CNAM information
Example: Get Number information::
data = api.get_number_info('+1234567890')
print(data)
## { 'created': '2017-02-10T09:11:50Z',
## 'name' : 'RALEIGH, NC',
## 'number' : '+1234567890',
## 'updated' : '2017-02-10T09:11:50Z'}
"""
path = '/phoneNumbers/numberInfo/%s' % quote(number)
return self._make_request('get', path)[0]
def list_phone_numbers(
self,
application_id=None,
state=None,
name=None,
city=None,
number_state=None,
size=None,
**kwargs):
"""
Get a list of user's phone numbers
:param str application_id: Used to filter the retrieved list of numbers by an associated application ID.
:param str state: Used to filter the retrieved list of numbers allocated for the authenticated
user by a US state.
:param str name: Used to filter the retrieved list of numbers allocated for the authenticated
user by it's name.
:param str city: Used to filter the retrieved list of numbers allocated for the authenticated user
by it's city.
:param str number_state: Used to filter the retrieved list of numbers allocated for the authenticated user
by the number state.
:param str size: Used for pagination to indicate the size of each page requested for querying a list
of items. If no value is specified the default value is 25. (Maximum value 1000)
:rtype: types.GeneratorType
:returns: list of phone numbers
Example: List all phone numbers::
number_list = api.list_phone_numbers(size=1000)
print(list(number_list))
## [
## {
## 'city' :'RALEIGH',
## 'createdTime' :'2017-02-06T18:41:37Z',
## 'id' :'n-n123',
## 'name' :'demo name',
## 'nationalNumber':'(919) 555-5346',
## 'number' :'+19195555346',
## 'numberState' :'enabled',
## 'price' :'0.35',
## 'state' :'NC'
## },
## {
## 'city' :'RALEIGH',
## 'createdTime' :'2017-02-06T18:41:56Z',
## 'id' :'n-n1234',
## 'name' :'demo name',
## 'nationalNumber':'(919) 555-5378',
## 'number' :'+19195555378',
## 'numberState' :'enabled',
## 'price' :'0.35',
## 'state' :'NC'
## }
## ]
"""
kwargs['applicationId'] = application_id
kwargs['state'] = state
kwargs['name'] = name
kwargs['city'] = city
kwargs['numberState'] = number_state
kwargs['size'] = size
path = '/users/%s/phoneNumbers' % self.user_id
return get_lazy_enumerator(self, lambda: self._make_request('get', path, params=kwargs))
def order_phone_number(self,
number=None,
name=None,
application_id=None,
fallback_number=None,
**kwargs):
"""
Allocates a number so user can use it to make and receive calls and send
and receive messages.
:param str number: An available telephone number you want to use
:param str name: A name you choose for this number.
:param str application_id: The unique id of an Application you want to associate with this number.
:param str fallback_number: Number to transfer an incoming call when the callback/fallback events can't
be delivered.
:rtype: str
:returns: id of created phone number
Example: Order Number::
number_id = api.create_phone_number(number='+1234567890')
print(number_id)
# n-asdf123
"""
kwargs['number'] = number
kwargs['name'] = name
kwargs['applicationId'] = application_id
kwargs['fallbackNumber'] = fallback_number
return self._make_request('post', '/users/%s/phoneNumbers' % self.user_id, json=kwargs)[2]
def get_phone_number(self, number_id):
"""
Get information about a phone number
:type number_id: str
:param number_id: id of a phone number
:rtype: dict
:returns: number information
Example: Search, order, and fetch Number information::
available_numbers = api.search_available_local_numbers(city='Raleigh', state='NC')
number_id = api.order_phone_number(available_numbers[0]['number'])
print(number_id)
# n-123
my_number = api.get_phone_number(number_id)
print(my_number)
## {
## 'city' :'RALEIGH',
## 'createdTime' :'2017-02-06T18:27:14Z',
## 'id' :'n-123',
## 'nationalNumber':'(919) 561-5039',
## 'number' :'+19195615039',
## 'numberState' :'enabled',
## 'price' :'0.35',
## 'state' :'NC'
## }
"""
return self._make_request('get', '/users/%s/phoneNumbers/%s' % (self.user_id, number_id))[0]
def update_phone_number(self, number_id,
name=None,
application_id=None,
fallback_number=None,
**kwargs):
"""
Update information about a phone number
:param str number_id: id of a phone number
:param str name: A name you choose for this number.
:param str application_id: The unique id of an Application you want to associate with this number.
:param str fallback_number: Number to transfer an incoming call when the callback/fallback events can't
be delivered.
Example: Update number information::
my_number = api.get_phone_number(number_id)
print(my_number)
## {
## 'city' :'RALEIGH',
## 'createdTime' :'2017-02-06T18:27:14Z',
## 'id' :'n-123',
## 'nationalNumber':'(919) 561-5039',
## 'number' :'+19195615039',
## 'numberState' :'enabled',
## 'price' :'0.35',
## 'state' :'NC'
## }
api.update_phone_number(number_id, name='demo name')
my_number = api.get_phone_number(number_id)
print(my_number)
## {
## 'id' :'n-123',
## 'number' :'+19195615039',
## 'nationalNumber':'(919) 561-5039',
## 'name' :'demo name',
## 'createdTime' :'2017-02-06T18:41:56Z',
## 'city' :'RALEIGH',
## 'state' :'NC',
## 'price' :'0.35',
## 'numberState' :'enabled'
## }
"""
kwargs['name'] = name
kwargs['applicationId'] = application_id
kwargs['fallbackNumber'] = fallback_number
self._make_request(
'post', '/users/%s/phoneNumbers/%s' % (self.user_id, number_id), json=kwargs)
def delete_phone_number(self, number_id):
"""
Remove a phone number
:type number_id: str
:param number_id: id of a phone number
Example: Delete phone number (release) from account::
api.delete_phone_number('numberId')
"""
self._make_request(
'delete', '/users/%s/phoneNumbers/%s' % (self.user_id, number_id))
def list_recordings(self, size=None, **kwargs):
"""
Get a list of call recordings
:param int size: Used for pagination to indicate the size of each page requested for querying a list
of items. If no value is specified the default value is 25. (Maximum value 1000)
:rtype: types.GeneratorType
:returns: list of recordings
Example: List all recordings::
recording_list = api.list_recordings(size=1000)
print(recording_list)
## [
## {
## 'call' :'https://api.catapult.inetwork.com/v1/users/u-abc123/calls/c-callId',
## 'endTime' :'2017-01-30T17:58:45Z',
## 'id' :'rec-recordingId',
## 'media' :'https://api.catapult.inetwork.com/v1/users/u-abc123/media/c-callId-1.wav',
## 'mediaName':'c-callId-1.wav',
## 'startTime':'2017-01-30T17:58:34Z',
## 'state' :'complete'
## },
## {
## 'call' :'https://api.catapult.inetwork.com/v1/users/u-abc123/calls/c-callId2',
## 'endTime' :'2017-01-30T17:53:30Z',
## 'id' :'rec-recordingId2',
## 'media' :'https://api.catapult.inetwork.com/v1/users/u-abc123/media/c-callId2-1.wav',
## 'mediaName':'c-callId2-1.wav',
## 'startTime':'2017-01-30T17:53:20Z',
## 'state' :'complete'
## }
## ]
"""
kwargs['size'] = size
path = '/users/%s/recordings' % self.user_id
return lazy_map(_set_media_name,
get_lazy_enumerator(self, lambda: self._make_request('get', path, params=kwargs)))
def get_recording(self, recording_id):
"""
Gets information about a recording
:type recording_id: str
:param recording_id: id of a recording
:rtype: dict
:returns: recording information
Example: Fetch recording information::
my_recording = api.get_recording('recordingId2')
print(my_recording)
## {
## 'call' :'https://api.catapult.inetwork.com/v1/users/u-abc123/calls/c-callId2',
## 'endTime' :'2017-01-30T17:53:30Z',
## 'id' :'rec-recordingId2',
## 'media' :'https://api.catapult.inetwork.com/v1/users/u-abc123/media/c-callId2-1.wav',
## 'mediaName':'c-callId2-1.wav',
## 'startTime':'2017-01-30T17:53:20Z',
## 'state' :'complete'
## }
"""
path = '/users/%s/recordings/%s' % (self.user_id, recording_id)
return _set_media_name(self._make_request('get', path)[0])
def list_transcriptions(self, recording_id, size=None, **kwargs):
"""
Get a list of transcriptions
:type recording_id: str
:param recording_id: id of a recording
:param int size: Used for pagination to indicate the size of each page requested for querying a list
of items. If no value is specified the default value is 25. (Maximum value 1000)
:rtype: types.GeneratorType
:returns: list of transcriptions
Example: Print off all transcriptions for a recording::
transcriptions_list = api.list_transcriptions('recordingId')
print(list(transcriptions_list))
## [
## {
## 'chargeableDuration': 60,
## 'id': '{transcription-id}',
## 'state': 'completed',
## 'time': '2014-10-09T12:09:16Z',
## 'text': '{transcription-text}',
## 'textSize': 3627,
## 'textUrl': '{url-to-full-text}'
## },
## {
## 'chargeableDuration': 60,
## 'id': '{transcription-id}',
## 'state': 'completed',
## 'text': '{transcription-text}',
## 'time': '2014-10-09T14:04:44Z',
## 'textSize': 72,
## 'textUrl': '{url-to-full-text}'
## }
## ]
"""
kwargs['size'] = size
path = '/users/%s/recordings/%s/transcriptions' % (
self.user_id, recording_id)
return get_lazy_enumerator(self, lambda: self._make_request('get', path, params=kwargs))
def create_transcription(self, recording_id):
"""
Create a transcirption for given recording
:type recording_id: str
:param recording_id: id of a recording
:rtype: str
:returns: id of created transcription
Example: Create new transcription from existing recording::
transcription_id = api.create_transcirption('recordingId')
"""
path = '/users/%s/recordings/%s/transcriptions' % (
self.user_id, recording_id)
return self._make_request('post', path, json={})[2]
def get_transcription(self, recording_id, transcription_id):
"""
Get information about a transcription
:type recording_id: str
:param recording_id: id of a recording
:type id: str
:param id: id of a transcription
:rtype: dict
:returns: application information
Example: Fetch a single transcription on a recording::
my_transcription = api.get_transcription('recordingId', 'transcriptionId')
print(my_transcription)
## {
## 'chargeableDuration': 11,
## 'id' : '{transcriptionId}',
## 'state' : 'completed',
## 'text' : 'Hey there, I was calling to talk about plans for this saturday. ',
## 'textSize' : 63,
## 'textUrl' : 'https://api.catapult.inetwork.com/.../media/{transcriptionId}',
## 'time' : '2014-12-23T23:08:59Z'
## }
"""
path = '/users/%s/recordings/%s/transcriptions/%s' % (
self.user_id, recording_id, transcription_id)
return self._make_request('get', path)[0]
class CatapultException(Exception):
"""
Catapult API request exception
"""
def __init__(self, status_code, message, **kwargs):
"""
Initialize the catapult exception.
:type status_code: str
:param status_code: http status code
:type message: str
:param message: error message
:type code: str
:param code: optional error code
:rtype: bandwidth.catapult.CatapultException
:returns: instance of exception
"""
self.status_code = status_code
self.message = message
self.code = kwargs.get('code')
if self.code is None:
self.code = str(status_code)
def __str__(self):
return 'Error %s: %s' % (self.code, self.message)
| 39.890332 | 120 | 0.532684 | 14,018 | 132,037 | 4.87566 | 0.062848 | 0.019547 | 0.006672 | 0.006862 | 0.731181 | 0.68894 | 0.648309 | 0.610663 | 0.582732 | 0.541765 | 0 | 0.026938 | 0.364595 | 132,037 | 3,309 | 121 | 39.902387 | 0.787716 | 0.604929 | 0 | 0.50551 | 0 | 0.001377 | 0.116442 | 0.043313 | 0 | 0 | 0 | 0 | 0 | 1 | 0.129477 | false | 0.00551 | 0.012397 | 0.001377 | 0.239669 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
8ccc55051757a37c5ce9889cc0072a9107f7d3f7 | 1,843 | py | Python | transfer_function/compare_cl.py | planck-npipe/toast-npipe | ca3e92ea3a81a6146e246ec1d0c5bdcaea3b49f2 | [
"BSD-2-Clause"
] | 1 | 2021-04-20T08:09:35.000Z | 2021-04-20T08:09:35.000Z | transfer_function/compare_cl.py | planck-npipe/toast-npipe | ca3e92ea3a81a6146e246ec1d0c5bdcaea3b49f2 | [
"BSD-2-Clause"
] | null | null | null | transfer_function/compare_cl.py | planck-npipe/toast-npipe | ca3e92ea3a81a6146e246ec1d0c5bdcaea3b49f2 | [
"BSD-2-Clause"
] | null | null | null | import matplotlib.pyplot as plt
import numpy as np
import healpy as hp
import os
import sys
from planck_util import log_bin
npipegain = 1.002
npipefwhm = np.radians(.5 / 60)
nbin = 300
fsky = 52 # 90, 52, 25
fig = plt.figure(figsize=[18, 12])
plt.suptitle('fsky = {}%'.format(fsky))
axes = [fig.add_subplot(2, 2, 1+i) for i in range(4)]
for freq1, freq2 in [(70, 100), (100, 143), (100, 217), (143, 217)]:
name0 = '{:03}x{:03} Legacy'.format(freq1, freq2)
cl0 = hp.read_cl(
'cl_{}dx12x{}dx12_{:02}fsky.fits'.format(freq1, freq2, fsky))
name1 = '{:03}x{:03} NPIPE'.format(freq1, freq2)
cl1 = hp.read_cl(
'cl_{}x{}_{:02}fsky.fits'.format(freq1, freq2, fsky))
for freq in [freq1, freq2]:
if freq > 70:
cl1 *= npipegain
lmax = cl0[0].size - 1
ell = np.arange(lmax + 1)
ellbin, hits = log_bin(ell, nbin=nbin)
norm = ell * (ell + 1) / 2 / np.pi * 1e12
npipebeam = hp.gauss_beam(npipefwhm, lmax=lmax)
for i in range(2):
cl0bin, hits = log_bin(norm * cl0[i], nbin=nbin)
cl1bin, hits = log_bin(norm * cl1[i], nbin=nbin)
ax = axes[i]
comp = ['TT', 'EE', 'BB'][i]
ax.set_title(comp)
ax.plot(ellbin[2:], cl0bin[2:], label=name0)
ax.plot(ellbin[2:], cl1bin[2:], label=name1)
if i == 0:
ax.set_ylim([-100, 6100])
elif i == 1:
ax.set_ylim([-10, 50])
ax = axes[2 + i]
ax.set_title(comp + ' ratio')
ax.plot(ellbin[2:], cl1bin[2:] / cl0bin[2:],
label='{} / {}'.format(name1, name0))
ax.set_ylim([.99, 1.01])
ax.axhline(1, color='k')
ax.plot(ell, npipebeam ** 2, color='k', lw=2)
axes[1].legend(loc='best')
axes[3].legend(loc='best')
plt.show()
plt.savefig('clross_comparison_fsky{:02}.png'.format(fsky))
| 32.333333 | 69 | 0.559957 | 288 | 1,843 | 3.513889 | 0.388889 | 0.059289 | 0.063241 | 0.038538 | 0.128459 | 0.098814 | 0.059289 | 0 | 0 | 0 | 0 | 0.100871 | 0.252306 | 1,843 | 56 | 70 | 32.910714 | 0.633527 | 0.005426 | 0 | 0 | 0 | 0 | 0.086838 | 0.046423 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.117647 | 0 | 0.117647 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
8cd17fe0d15ea567fd31f1f211864c363fedc45d | 973 | py | Python | software/temcagt/temcagt/ui/nodes/camera.py | htem/GridTapeStage | 0b4764bc4ea8d64970ea481a32d6c7383d301989 | [
"RSA-MD"
] | 2 | 2020-02-07T10:34:23.000Z | 2021-09-24T02:28:10.000Z | software/temcagt/temcagt/ui/nodes/camera.py | htem/GridTapeStage | 0b4764bc4ea8d64970ea481a32d6c7383d301989 | [
"RSA-MD"
] | null | null | null | software/temcagt/temcagt/ui/nodes/camera.py | htem/GridTapeStage | 0b4764bc4ea8d64970ea481a32d6c7383d301989 | [
"RSA-MD"
] | null | null | null | #!/usr/bin/env python
import inspect
import os
import time
from . import base
module_folder = os.path.abspath(
os.path.dirname(inspect.getfile(inspect.currentframe())))
def timethis(f):
def wrapped(*args, **kwargs):
t = time.time()
r = f(*args, **kwargs)
t = time.time() - t
print("%s took %s" % (f.__name__, t))
return r
wrapped.__name__ = f.__name__
wrapped.__doc__ = f.__doc__
return wrapped
def build_spec(obj, name):
spec = base.build_spec(obj, name)
spec['template'] = open(
os.path.join(module_folder, 'templates', 'camera.html'), 'r').read()
return spec
def test(addr='tcp://127.0.0.1:11020'):
import wsrpc
import pizco
from tornado.ioloop import IOLoop
p = pizco.Proxy(addr)
base.add_wsrpc_to_proxy(p, 'camera')
if hasattr(IOLoop, '_instance'):
del IOLoop._instance
wsrpc.serve.register(build_spec(p, 'camera'))
wsrpc.serve.serve()
| 21.622222 | 76 | 0.628983 | 134 | 973 | 4.343284 | 0.462687 | 0.030928 | 0.037801 | 0.051546 | 0.134021 | 0 | 0 | 0 | 0 | 0 | 0 | 0.014608 | 0.226105 | 973 | 44 | 77 | 22.113636 | 0.7583 | 0.020555 | 0 | 0 | 0 | 0 | 0.085084 | 0.022059 | 0 | 0 | 0 | 0 | 0 | 1 | 0.129032 | false | 0 | 0.225806 | 0 | 0.451613 | 0.032258 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
8cd51376834e16bcd1f149b9e039d5f850cbd527 | 12,746 | py | Python | src/build_scripts/utils/js__map.py | 1024sparrow/traliva | db8e9579747d5d89aed09a488561f9498fc723e7 | [
"MIT"
] | 1 | 2017-12-01T09:43:27.000Z | 2017-12-01T09:43:27.000Z | src/build_scripts/utils/js__map.py | 1024sparrow/traliva | db8e9579747d5d89aed09a488561f9498fc723e7 | [
"MIT"
] | 1 | 2021-09-19T13:22:32.000Z | 2021-09-19T13:22:32.000Z | src/build_scripts/utils/js__map.py | 1024sparrow/traliva | db8e9579747d5d89aed09a488561f9498fc723e7 | [
"MIT"
] | 1 | 2021-12-03T00:20:41.000Z | 2021-12-03T00:20:41.000Z | #!/usr/bin/env python3
import sys, re
def get_map(pin_js_paths, pin_css_paths, pout_js, pout_css, pout_js_css):
for i_src in [(pin_js_paths, pout_js), (pin_css_paths, pout_css)]:
for i in i_src[0]:
with open(i) as f:
cand = {
'filepath': i,
'text': _get_text_as_array(f.readlines(), True, True)
}
i_src[1].append(cand)
pout_js_css.append(cand)
print('get_map()')
#print('pout_js_css: ', pout_js_css)##
def apply_map(p_js, p_css, p_js_css):
print('apply_map()')
for i in p_js_css:
#print('#%s:' % i['filepath'])
if i['filepath'] is None:
continue
cand = ''
for i_text in i['text']:
cand += i_text['text']
#print(cand)
f = open(i['filepath'], 'w')
f.write(cand)
f.close()
#def process_code_fragment(p_code):
# retval = '>>>>' + p_code + '<<<<'
# #retval = 'XXXX'
# return retval
# p_text - массив отдельных строк
# Должен вернуть массив фрагментов с указанием их типов (0 - комментарий, 1 - код, 2 - содержимое строки)
# [
# {
# type: 1,
# text: 'do_some();\nconsole.log(\''
# },
# {
# type: 2,
# text: 'hello world'
# },
# {
# type: 1,
# text: '\');'
# },
# {
# type: 0,
# text: '//некий комментарий'
# },
# ]
re_one_line_comment = re.compile(r'//.*', re.DOTALL)
def _get_text_as_array(p_text, pp_comment, pp_newlines):
global __type
global __buffer
___type = None
__buffer = ''
retval = []
if not pp_newlines:
pp_comment = False
use_strict_used = False
a = ''
usestrict_pos = None
for line in p_text:
stripline = line.strip()
if not use_strict_used:
if stripline.startswith("'use strict'") or stripline.startswith('"use strict"'):
usestrict_pos = len(a)
a += '#' # любой символ. В результат он не попадёт.
use_strict_used = True
continue
if pp_comment:
a += line
else:
if not pp_newlines:
line_cand = line.strip()
a += re.sub(re_one_line_comment, '', line_cand)
#b = ''
in_comment_1 = False # // ...
in_comment_2 = False # /* ... */
in_comment = False
in_string_1 = False # '
in_string_2 = False # "
in_string_3 = False # ``
string_type = 0 # for in_string_3
#string_content = [] # for in_string_3
#string_state = 0 # for in_string_3
#string_indent = 0 # for in_string_3
"""
`` - тупое экранирование. Сохраняются переносы строки и все символы между '`'
`
asd
` --> '\n\t\tasd\n\t'
1`` - как ``, но дополнительно обрезаются первая и последняя строки
1`
asd
` --> '\t\tasd'
2`` - как 1``, но дополнительно убираются отступы. Вычисляется наибольший общий отступ, и он отрезается. Отступы работают только с пробелами - символ табуляции не считается за отступ.
var a = 2`
var a =
5;
`; --> var a ='var a =\n\t5;';
3`` - убираются крайние пробельные символы и все переносы строки. Если последний символ в строке отличен от '>' и первый символ следующей строки отличен от '<', то в результат вставляется пробел. Первая и последняя строки не обрезаются (так, если что..).
var a = 3`
<table>
<tr>
</tr>
<tr>
</tr>
</table>
` --> var a = '<table><tr></tr><tr></tr></table>'
"""
in_string = False
prev_char = 's' # nor '\\' or '/' or '*'
code_cand = ''
counter = 0
for i in a:
if not (counter is None):
if counter == usestrict_pos:
t = __buffer + code_cand
if __buffer:
retval.append({
'type': __type,
'text': __buffer
})
__buffer = ''
if code_cand:
retval.append({
'type': 1,
'text': code_cand
})
code_cand = ''
retval.append({
'type': 1,
'text': "\n'use strict';\n"
})
__type = 1
counter += 1
continue
counter += 1
skip_current = False
if (not in_comment) and (not in_string) and prev_char == '/' and i == '/':
if len(code_cand) > 0:
code_cand = code_cand[:-1]
#b += process_code_fragment(code_cand) + '/'
_accumulate_array_by_symbols(1, code_cand, retval)
_accumulate_array_by_symbols(0, '/', retval)
code_cand = ''
in_comment_1 = True
in_comment = True
elif in_comment_1 and i == '\n':
if not in_comment_2:
in_comment_1 = False
in_comment = False
elif prev_char == '/' and i == '*':
if not in_comment_1:
if len(code_cand) > 0:
code_cand = code_cand[:-1]
#b += process_code_fragment(code_cand) + '/'
_accumulate_array_by_symbols(1, code_cand, retval)
code_cand = ''
in_comment_2 = True
in_comment = True
if pp_comment:
_accumulate_array_by_symbols(0, '/', retval)
#if not pp_comment:
# b = b[:-1] # удаляем предыдущий символ ('/')
elif prev_char == '*' and i == '/':
if not in_comment_1:
in_comment_2 = False
in_comment = False
skip_current = True
elif prev_char == '\\' and i == '\\':
prev_char = 's'
#b += i
_accumulate_array_by_symbols(__type, i, retval)
continue
elif prev_char != '\\' and i == '"':
if not in_comment and not in_string_1 and not in_string_3:
if in_string:
if in_string_2:
in_string_2 = False
else:
in_string_1 = False
in_string_3 = False
in_string = False
else:
#b += process_code_fragment(code_cand + '"')
skip_current = True
_accumulate_array_by_symbols(1, code_cand + '"', retval)
skip_current = True
code_cand = ''
in_string_2 = True
in_string = True
elif prev_char != '\\' and i == "'":
if not in_comment and not in_string_2 and not in_string_3:
if in_string:
if in_string_1:
in_string_1 = False
else:
in_string_2 = False
in_string_3 = False
in_string = False
else:
#b += process_code_fragment(code_cand + "'")
skip_current = True
_accumulate_array_by_symbols(1, code_cand + "'", retval)
skip_current = True
code_cand = ''
in_string_1 = True
in_string = True
elif prev_char != '\\' and i == "`":
if not in_comment and not in_string_1 and not in_string_2:
if in_string:
#skip_current = True
if in_string_3:
#in_string_3 = False
if string_type == 0 or string_type == 3:
tmp = string_content
else:
tmp = string_content[1:-1] # обрезаем первую и последнюю строки
if string_type == 2:
indent = 10000
for ca in tmp:
cand = 0
for ca_i in ca:
if ca_i == ' ':
cand += 1
else:
break
if cand < indent:
indent = cand
if string_type == 3:
prev = 'q' # any letter symbol
tmp_between_parath = False
for ca in [tmp2.strip() for tmp2 in tmp]:
if len(ca) and len(prev) and prev[-1] != '>' and ca[0] != '<':
_accumulate_array_by_symbols(2, ' ', retval)
tmp_between_parath = False
else:
tmp_between_parath = True
cand = ca
if tmp_between_parath:
while len(cand) and cand[0] == ' ':
cand = cand[1:]
_accumulate_array_by_symbols(2, ca, retval)
prev = ca
else:
for ca in tmp:
if string_type == 2:
cand = ca[indent:]
else:
cand = ca
_accumulate_array_by_symbols(2, cand, retval)
else:
in_string_1 = False
in_string_2 = False
_accumulate_array_by_symbols(1, code_cand + "'", retval)
in_string = False
else:
skip_current = True
#print('::',prev_char,'::::::::::', code_cand)
in_string_3 = True
in_string = True
string_type = 0
string_content = ['']
string_state = 0
string_indent = 0
if prev_char == '1':
string_type = 1
code_cand = code_cand[:-1]
elif prev_char == '2':
string_type = 2
code_cand = code_cand[:-1]
elif prev_char == '3':
string_type = 3
code_cand = code_cand[:-1]
_accumulate_array_by_symbols(1, code_cand + "'", retval)
code_cand = ''
if (not in_comment) and (not skip_current):
if in_string:
if in_string_3:
if i == '\n':
string_content.append('')
else:
ca = i
if i == "'":
ca = '\\\''
string_content[-1] += ca
else:
#b += i
_accumulate_array_by_symbols(2, i, retval)
else:
if in_string_3:
#_accumulate_array_by_symbols(1, "'", retval)
#code_cand += "'"
in_string_3 = False
else:
code_cand += i
else: # комментарии /* ... */
if not in_string:
if pp_comment:
#b += i
_accumulate_array_by_symbols(0, i, retval)
prev_char = i
prev_instring = in_string
#b += process_code_fragment(code_cand)
_accumulate_array_by_symbols(1, code_cand, retval)
_stop_accumulating_array_by_symbols(retval)
return retval
__buffer = ''
__type = None
def _accumulate_array_by_symbols(pin_type, pin_fragment, pout_target):
global __buffer
global __type
if len(pin_fragment) > 0:
if pin_type == __type:
__buffer += pin_fragment
else:
if __buffer:
pout_target.append({
'type': __type,
'text': __buffer
})
__type = pin_type
__buffer = pin_fragment
def _stop_accumulating_array_by_symbols(pout_target):
global __buffer
global __type
if __buffer:
pout_target.append({
'type': __type,
'text': __buffer
})
__buffer = ''
__type = None
| 36.626437 | 258 | 0.435274 | 1,297 | 12,746 | 3.944487 | 0.148805 | 0.070367 | 0.051994 | 0.07975 | 0.40129 | 0.351446 | 0.276583 | 0.237686 | 0.197811 | 0.181392 | 0 | 0.018266 | 0.471677 | 12,746 | 347 | 259 | 36.731988 | 0.741461 | 0.103091 | 0 | 0.524528 | 0 | 0 | 0.01795 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.018868 | false | 0 | 0.003774 | 0 | 0.026415 | 0.007547 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
8cd64ced10e49c5131f5f0002e8da18a3a3434eb | 3,895 | py | Python | otree_redwood/views.py | etherealmachine/otree-redwood-extensions | 98cdba94487da8db7eaee41f68e2b03d0a1887ad | [
"MIT"
] | null | null | null | otree_redwood/views.py | etherealmachine/otree-redwood-extensions | 98cdba94487da8db7eaee41f68e2b03d0a1887ad | [
"MIT"
] | null | null | null | otree_redwood/views.py | etherealmachine/otree-redwood-extensions | 98cdba94487da8db7eaee41f68e2b03d0a1887ad | [
"MIT"
] | null | null | null | from collections import defaultdict
import csv
import datetime
from importlib import import_module
import vanilla
import channels
from django.http import HttpResponse, JsonResponse
from django.contrib.contenttypes.models import ContentType
from otree.models import Session
from otree.session import SESSION_CONFIGS_DICT
from otree_redwood import stats
from otree_redwood.models import Event, Connection
def AppSpecificExportCSV(app_name, display_name, get_output_table, get_output_table_header):
class ExportCSV(vanilla.View):
url_name = 'redwood_export_{}'.format(app_name)
url_pattern = '^{}/$'.format(url_name)
app_name = app_name
display_name = display_name
def get(request, *args, **kwargs):
models_module = import_module('{}.models'.format(app_name))
groups = models_module.Group.objects.all()
tables = []
for group in groups:
events = Event.objects.filter(
content_type=ContentType.objects.get_for_model(group),
group_pk=group.pk)
tables.append(get_output_table(list(events)))
response = HttpResponse(content_type='text/csv')
response['Content-Disposition'] = 'attachment; filename="{}"'.format(
'{} Events (accessed {}).csv'.format(
display_name,
datetime.date.today().isoformat()
)
)
w = csv.writer(response)
w.writerow(get_output_table_header(list(groups)))
for rows in tables:
w.writerows(rows)
return response
return ExportCSV
class EventsJsonAPI(vanilla.ListView):
url_name = 'redwood_events_json_api'
url_pattern = r'^redwood/api/events/session/(?P<session_code>[a-zA-Z0-9_-]+)/$'
model = Event
def render_to_response(self, context):
session = Session.objects.get(code=self.kwargs['session_code'])
events_by_app_name_then_group = defaultdict(lambda: {})
for session_config in SESSION_CONFIGS_DICT.values():
app_name = session_config['name']
try:
groups_query = getattr(session, app_name + '_group')
except AttributeError:
continue
groups = list(groups_query.all())
if groups:
for group in groups:
events = Event.objects.filter(group_pk=group.pk)
events_by_app_name_then_group[app_name][group.pk] = [e.message for e in events]
return JsonResponse(events_by_app_name_then_group, safe=False)
class DebugView(vanilla.TemplateView):
url_name = 'redwood_debug'
url_pattern = r'^redwood/debug/session/(?P<session_code>[a-zA-Z0-9_-]+)/$'
template_name = 'otree_redwood/Debug.html'
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
context['stats'] = stats.items()
channel_layer = channels.asgi.get_channel_layer()
if 'statistics' in channel_layer.extensions:
context['global_channel_stats'] = channel_layer.global_statistics()
context['connected_participants'] = Connection.objects.all()
context['session_code'] = self.kwargs['session_code']
return context
app_specific_exports = []
for session_config in SESSION_CONFIGS_DICT.values():
app_name = session_config['name']
dotted_path = app_name + '.views'
display_name = session_config['display_name']
try:
module = import_module(dotted_path)
except ImportError:
continue
table_fn = getattr(module, 'get_output_table', None)
header_fn = getattr(module, 'get_output_table_header', None)
if table_fn and header_fn:
app_specific_exports.append(AppSpecificExportCSV(app_name, display_name, table_fn, header_fn))
| 35.733945 | 102 | 0.656226 | 452 | 3,895 | 5.380531 | 0.289823 | 0.040296 | 0.034539 | 0.022204 | 0.212993 | 0.161184 | 0.10773 | 0.10773 | 0.054276 | 0.054276 | 0 | 0.00136 | 0.244673 | 3,895 | 108 | 103 | 36.064815 | 0.825289 | 0 | 0 | 0.117647 | 0 | 0.011765 | 0.116303 | 0.054172 | 0 | 0 | 0 | 0 | 0 | 1 | 0.047059 | false | 0 | 0.176471 | 0 | 0.423529 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
8cd8aaad2ee6f2d1577fb2c9d0826b0d64d9588e | 4,471 | py | Python | arch/task_manager/task_manager_client.py | ZZIQIN/FATE | cc6783927564cbb15c067d5010f1cdf82a5de20a | [
"Apache-2.0"
] | 1 | 2019-07-29T13:22:36.000Z | 2019-07-29T13:22:36.000Z | arch/task_manager/task_manager_client.py | ZZIQIN/FATE | cc6783927564cbb15c067d5010f1cdf82a5de20a | [
"Apache-2.0"
] | null | null | null | arch/task_manager/task_manager_client.py | ZZIQIN/FATE | cc6783927564cbb15c067d5010f1cdf82a5de20a | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
import argparse
import json
import os
import requests
import time
import traceback
from arch.api.utils import file_utils
import sys
SERVERS = "servers"
ROLE = "manager"
server_conf = file_utils.load_json_conf("arch/conf/server_conf.json")
WORKFLOW_FUNC = ["workflow"]
WORKFLOW_JOB_FUNC = ["workflowRuntimeConf"]
DATA_FUNC = ["download", "upload"]
DTABLE_FUNC = ["tableInfo"]
OTHER_FUNC = ["delete"]
JOB_FUNC = ["jobStatus"]
JOB_QUEUE_FUNC = ["queueStatus"]
MODEL_FUNC = ["load", "online", "version"]
def get_err_result(msg, body):
return {"status": -1,
"msg": msg,
"created_at": time.strftime('%Y-%m-%d %H:%M:%S'),
"data": body}
def prettify(response, verbose=True):
response['created_at'] = time.strftime('%Y-%m-%d %H:%M:%S')
if verbose:
print(json.dumps(response))
return response
def call_fun(func, config_data):
IP = server_conf.get(SERVERS).get(ROLE).get('host')
HTTP_PORT = server_conf.get(SERVERS).get(ROLE).get('http.port')
LOCAL_URL = "http://{}:{}".format(IP, HTTP_PORT)
if func in WORKFLOW_FUNC:
response = requests.post("/".join([LOCAL_URL, "workflow", func]), json=config_data)
elif func in WORKFLOW_JOB_FUNC:
response = requests.post("/".join([LOCAL_URL, "workflow", func, config_data.get("job_id")]), json=config_data)
elif func in OTHER_FUNC:
response = requests.delete("/".join([LOCAL_URL, "job", config_data.get("job_id")]))
elif func in JOB_FUNC:
response = requests.post("/".join([LOCAL_URL, "job", func, config_data.get("job_id")]))
elif func in JOB_QUEUE_FUNC:
response = requests.post("/".join([LOCAL_URL, "job", func]))
elif func in DATA_FUNC:
response = requests.post("/".join([LOCAL_URL, "data", func]), json=config_data)
elif func in DTABLE_FUNC:
response = requests.post("/".join([LOCAL_URL, "dtable", func]), json=config_data)
elif func in MODEL_FUNC:
response = requests.post("/".join([LOCAL_URL, "model", func]), json=config_data)
return json.loads(response.text)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('-c', '--config', required=False, type=str, help="config json path")
parser.add_argument('-f', '--function', type=str,
choices=(WORKFLOW_FUNC + DATA_FUNC + OTHER_FUNC + MODEL_FUNC + JOB_FUNC +
WORKFLOW_JOB_FUNC + JOB_QUEUE_FUNC + DATA_FUNC + DTABLE_FUNC),
required=True,
help="function to call")
parser.add_argument('-j', '--job_id', required=False, type=str, help="job id")
parser.add_argument('-p', '--party_id', required=False, type=str, help="party id")
parser.add_argument('-r', '--role', required=False, type=str, help="role")
parser.add_argument('-s', '--scene_id', required=False, type=str, help="scene id")
parser.add_argument('-n', '--namespace', required=False, type=str, help="namespace")
parser.add_argument('-t', '--table_name', required=False, type=str, help="table name")
parser.add_argument('-i', '--file', required=False, type=str, help="file")
parser.add_argument('-o', '--output_path', required=False, type=str, help="output_path")
try:
args = parser.parse_args()
config_data = {}
try:
if args.config:
args.config = os.path.abspath(args.config)
with open(args.config, 'r') as f:
config_data = json.load(f)
config_data.update(dict((k, v) for k, v in vars(args).items() if v is not None))
if args.party_id or args.role:
config_data['local'] = config_data.get('local', {})
if args.party_id:
config_data['local']['party_id'] = args.party_id
if args.role:
config_data['local']['role'] = args.role
except ValueError:
print('json parse error')
exit(-102)
except IOError:
print("reading config jsonfile error")
exit(-103)
response = call_fun(args.function, config_data)
response_dict = prettify(response)
if response.get("status") < 0:
result = get_err_result(response_dict.get("msg"), response_dict.get('data'))
sys.exit(result.get("code"))
except:
traceback.print_exc()
| 40.279279 | 118 | 0.61597 | 581 | 4,471 | 4.549053 | 0.246127 | 0.064321 | 0.064321 | 0.068104 | 0.325009 | 0.253121 | 0.207719 | 0.113886 | 0.112751 | 0.020431 | 0 | 0.002887 | 0.225229 | 4,471 | 110 | 119 | 40.645455 | 0.760104 | 0.009618 | 0 | 0.021277 | 0 | 0 | 0.135563 | 0.005874 | 0 | 0 | 0 | 0 | 0 | 1 | 0.031915 | false | 0 | 0.085106 | 0.010638 | 0.148936 | 0.042553 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
8cd8b9a6d27d9f7881e2926528600c651c1ec361 | 1,311 | py | Python | solutions/0932-monotonic-array/monotonic-array.py | iFun/Project-G | d33b3b3c7bcee64f93dc2539fd9955a27f321d96 | [
"MIT"
] | null | null | null | solutions/0932-monotonic-array/monotonic-array.py | iFun/Project-G | d33b3b3c7bcee64f93dc2539fd9955a27f321d96 | [
"MIT"
] | null | null | null | solutions/0932-monotonic-array/monotonic-array.py | iFun/Project-G | d33b3b3c7bcee64f93dc2539fd9955a27f321d96 | [
"MIT"
] | null | null | null | # An array is monotonic if it is either monotone increasing or monotone decreasing.
#
# An array A is monotone increasing if for all i <= j, A[i] <= A[j]. An array A is monotone decreasing if for all i <= j, A[i] >= A[j].
#
# Return true if and only if the given array A is monotonic.
#
#
#
#
#
#
#
# Example 1:
#
#
# Input: [1,2,2,3]
# Output: true
#
#
#
# Example 2:
#
#
# Input: [6,5,4,4]
# Output: true
#
#
#
# Example 3:
#
#
# Input: [1,3,2]
# Output: false
#
#
#
# Example 4:
#
#
# Input: [1,2,4,5]
# Output: true
#
#
#
# Example 5:
#
#
# Input: [1,1,1]
# Output: true
#
#
#
#
# Note:
#
#
# 1 <= A.length <= 50000
# -100000 <= A[i] <= 100000
#
#
#
#
#
#
#
class Solution:
def isMonotonic(self, A: List[int]) -> bool:
if not A:
return True
diff = A[-1] - A[0]
if diff == 0:
prev = A[0]
for num in A:
if prev != num:
return False
elif diff > 0:
prev = A[0]
for num in A:
if prev > num:
return False
prev = num
else:
prev = A[0]
for num in A:
if prev < num:
return False
prev = num
return True
| 13.515464 | 137 | 0.441648 | 175 | 1,311 | 3.308571 | 0.297143 | 0.060449 | 0.08981 | 0.046632 | 0.333333 | 0.271157 | 0.271157 | 0.271157 | 0.271157 | 0.222798 | 0 | 0.062992 | 0.418764 | 1,311 | 96 | 138 | 13.65625 | 0.69685 | 0.410374 | 0 | 0.565217 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.043478 | false | 0 | 0 | 0 | 0.304348 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
8cd8df16a0e582d037b19de313eb56ba6ed39366 | 3,881 | py | Python | main.py | bravo583771/Variational-inference-and-Missing-not-at-random-imputation | 8688e0fe00e98e8546ebd45d1dfd421d494403be | [
"Apache-2.0"
] | null | null | null | main.py | bravo583771/Variational-inference-and-Missing-not-at-random-imputation | 8688e0fe00e98e8546ebd45d1dfd421d494403be | [
"Apache-2.0"
] | null | null | null | main.py | bravo583771/Variational-inference-and-Missing-not-at-random-imputation | 8688e0fe00e98e8546ebd45d1dfd421d494403be | [
"Apache-2.0"
] | null | null | null | import random
import torch
import numpy as np
import argparse
import torch.nn as nn
import torch.optim as optim
import numpy as np
import os
from torchvision import transforms
from torchvision.utils import save_image
#import sys
from utils.dataframe import UCIDatasets
from utils.experiment import exp_imputation
from model.MIWAE import MIWAE
from model.imputer import imputer
from utils.dataframe import dataframe, UCIDatasets
from utils.trainer import VAE_trainer, GAN_trainer
from utils.experiment import *
"""
Use the MIWAE and not-MIWAE on UCI data
Find a data from here
https://archive.ics.uci.edu/ml/datasets.php
"""
parser = argparse.ArgumentParser(description='VAE Example')
parser.add_argument('--batch-size', type=int, default=16, metavar='N',
help='input batch size for training (default: 128)')
parser.add_argument('--epochs', type=int, default=100000, metavar='N',
help='number of epochs to train (default: 10)')
parser.add_argument('--no-cuda', action='store_true', default=False,
help='enables CUDA training')
parser.add_argument('--seed', type=int, default=1, metavar='S',
help='random seed (default: 1)')
parser.add_argument('--log-interval', type=int, default=10, metavar='N',
help='how many batches to wait before logging training status')
parser.add_argument('--data', type=str, default='whitewine', metavar='N',
help='which dataset from UCI would you like to use?')
args = parser.parse_args()
os.environ["CUDA_VISIBLE_DEVICES"] = "0,1"
args.cuda = not args.no_cuda and torch.cuda.is_available()
random.seed(args.seed)
np.random.seed(args.seed)
torch.manual_seed(args.seed)
torch.cuda.manual_seed(args.seed)
device = torch.device("cuda" if args.cuda else "cpu")
kwargs = {'num_workers': 1, 'pin_memory': True} if args.cuda else {}
name = args.data
n_hidden = 128
n_samples = 20
max_iter = args.epochs
batch_size = args.batch_size
impute_sample = 10000
### the missing model ###
# mprocess = 'linear'
# mprocess = 'selfmasking'
mprocess = 'selfmasking_known'
# ---- number of runs
runs = 1
RMSE_result = dict()
methods = ['miwae','notmiwae','mean','mice','RF']
for method in methods:
RMSE_result[method] = []
"""
load data: white wine
"""
data = UCIDatasets(name=name)
N, D = data.N, data.D
dl = D - 1
optim_kwargs = {'lr': 0.0001, 'betas': (0.9, 0.999), 'eps': 1e-08 }
MIWAE_kwargs = {
'data_dim': D, 'z_dim': dl, 'h_dim': n_hidden, 'n_samples': n_samples
}
notMIWAE_kwargs = {
'data_dim': D, 'z_dim': dl, 'h_dim': n_hidden, 'n_samples': n_samples, 'missing_process': mprocess
}
data_kwargs = {
'batch_size': batch_size
}
imputer_par = {
'missing_values': np.nan, 'max_iter': 10, 'random_state': 0, 'n_estimators': 100, 'n_neighbors': 3, 'metric': 'nan_euclidean'
}
exp_kwargs = {
'dataset':name, 'runs':runs, 'seed': args.seed,
}
config = {
'exp_kwargs': exp_kwargs, 'optim_kwargs': optim_kwargs,
'MIWAE_kwargs': MIWAE_kwargs, 'notMIWAE_kwargs': notMIWAE_kwargs,
'data_kwargs': data_kwargs, 'imputer_par': imputer_par,
}
def main():
RMSE_result = exp_imputation( 'exp_imputation', model_list = ['miwae', 'notmiwae'], config = config, num_of_epoch = max_iter)
print("RMSE_miwae = {0:.5f} +- {1:.5f}".format(np.mean(RMSE_result['miwae']), np.std(RMSE_result['miwae'])))
print("RMSE_notmiwae = {0:.5f} +- {1:.5f}".format(np.mean(RMSE_result['notmiwae']), np.std(RMSE_result['notmiwae'])))
print("RMSE_mean = {0:.5f} +- {1:.5f}".format(np.mean(RMSE_result['mean']), np.std(RMSE_result['mean'])))
print("RMSE_mice = {0:.5f} +- {1:.5f}".format(np.mean(RMSE_result['mice']), np.std(RMSE_result['mice'])))
print("RMSE_missForest = {0:.5f} +- {1:.5f}".format(np.mean(RMSE_result['RF']), np.std(RMSE_result['RF'])))
if __name__ == "__main__":
main() | 35.605505 | 129 | 0.681268 | 565 | 3,881 | 4.506195 | 0.313274 | 0.05106 | 0.040063 | 0.011783 | 0.108405 | 0.091909 | 0.091909 | 0.091909 | 0.091909 | 0.036921 | 0 | 0.022283 | 0.155888 | 3,881 | 109 | 130 | 35.605505 | 0.754884 | 0.025251 | 0 | 0.02381 | 0 | 0 | 0.248416 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.011905 | false | 0 | 0.202381 | 0 | 0.214286 | 0.059524 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
8cdaa54436b6f5d796b026fa8e2d74183b41fb7c | 432 | py | Python | qap/qap/conf.py | brentian/artoopt | a83ad7db285c6483a45066df98c33f4d34e46cb6 | [
"MIT"
] | 2 | 2020-10-08T16:22:32.000Z | 2020-10-12T09:58:07.000Z | qap/qap/conf.py | brentian/artoopt | a83ad7db285c6483a45066df98c33f4d34e46cb6 | [
"MIT"
] | null | null | null | qap/qap/conf.py | brentian/artoopt | a83ad7db285c6483a45066df98c33f4d34e46cb6 | [
"MIT"
] | null | null | null | # @license: %MIT License%:~ http://www.opensource.org/licenses/MIT
# @project: qap_lp
# @file: /conf.py
# @created: Sunday, 27th September 2020
# @author: brentian (chuwzhang@gmail.com)
# @modified: brentian (chuwzhang@gmail.com>)
# Sunday, 27th September 2020 9:25:45 pm
# @description:
QAP_INSTANCE = 'qapdata'
QAP_SOL = 'qapsoln'
QAP_DEFAULT = {'scaling': 'L1', 'mu': 1}
MSK_DEFAULT = {'mioMaxTime': 60}
RESULT_DIR = 'result' | 30.857143 | 66 | 0.696759 | 58 | 432 | 5.086207 | 0.741379 | 0.067797 | 0.128814 | 0.155932 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.055851 | 0.12963 | 432 | 14 | 67 | 30.857143 | 0.728723 | 0.634259 | 0 | 0 | 0 | 0 | 0.273333 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
8cdb21fb171eeee46d82b6ac55b304268b3a85cf | 5,073 | py | Python | lab1/mygame.py | TommyGong08/Gobang_AI | c6f4b2096471157ee4c42e8be0fa7cad99e41611 | [
"MIT"
] | 3 | 2021-05-27T05:52:52.000Z | 2021-12-09T05:21:41.000Z | lab1/mygame.py | TommyGong08/Gobang_AI | c6f4b2096471157ee4c42e8be0fa7cad99e41611 | [
"MIT"
] | null | null | null | lab1/mygame.py | TommyGong08/Gobang_AI | c6f4b2096471157ee4c42e8be0fa7cad99e41611 | [
"MIT"
] | 2 | 2021-05-27T05:52:56.000Z | 2022-02-15T16:57:06.000Z | """人工智能五子棋"""
import sys
from pygame.locals import *
import pygame.gfxdraw
from AI_alpha_beta import *
from Checkboard import *
from DrawUI import *
from PIL import Image
import argparse
import matplotlib.image as mpimg
import cv2
from tiny_yolo import *
import matplotlib.pyplot as plt
FLAGS = None
Chessman = namedtuple('Chessman', 'Name Value Color')
Point = namedtuple('Point', 'X Y')
BLACK_CHESSMAN = Chessman('黑子', 1, (45, 45, 45))
WHITE_CHESSMAN = Chessman('白子', 2, (255, 255, 255))
offset = [(1, 0), (0, 1), (1, 1), (1, -1)]
def detect_img(yolo,image):
r_image = yolo.detect_image(image)
plt.imshow(r_image) # 暂停5秒钟
plt.pause(1) # 关闭当前显示的图像
plt.close()
#yolo.close_session()
def print_text(screen, font, x, y, text, fcolor=(255, 255, 255)):
imgText = font.render(text, True, fcolor)
screen.blit(imgText, (x, y))
def main():
pygame.init()
screen = pygame.display.set_mode((SCREEN_WIDTH, SCREEN_HEIGHT))
pygame.display.set_caption('五子棋')
font1 = pygame.font.SysFont('SimHei', 32)
font2 = pygame.font.SysFont('SimHei', 72)
fwidth, fheight = font2.size('黑方获胜')
checkerboard = Checkerboard(Line_Points)
cur_runner = BLACK_CHESSMAN
winner = None
computer = ChessAI(Line_Points, WHITE_CHESSMAN)
black_win_count = 0
white_win_count = 0
parser = argparse.ArgumentParser(argument_default=argparse.SUPPRESS)
parser.add_argument(
'--image', default=False, action="store_true",
help='Image detection mode, will ignore all positional arguments'
)
FLAGS = parser.parse_args()
while True:
for event in pygame.event.get():
if event.type == QUIT:
print("玩家退出游戏")
sys.exit()
elif event.type == KEYDOWN:
if event.key == K_RETURN:
# 一局结束的时候,按回车,会执行下面语句,重新初始化
if winner is not None:
winner = None
cur_runner = BLACK_CHESSMAN
checkerboard = Checkerboard(Line_Points)
# 把电脑设定为AI类,算法封装在AI
computer = ChessAI(Line_Points, WHITE_CHESSMAN)
elif event.type == MOUSEBUTTONDOWN:
# print("-------------")
if winner is None:
pressed_array = pygame.mouse.get_pressed()
if pressed_array[0]:
mouse_pos = pygame.mouse.get_pos()
# 根据鼠标点击返回游戏区坐标
click_point = DrawUI._get_clickpoint(mouse_pos)
# 点击区域有效
if click_point is not None:
# 如果该位置能够落子
if checkerboard.can_drop(click_point):
winner = checkerboard.drop(cur_runner, click_point)
if winner is None:
cur_runner = _get_next(cur_runner)
computer.get_opponent_drop(click_point)
# AI生成白棋,findBestChess返回坐标
AI_point, score = computer.findBestChess(WHITE_CHESSMAN.Value) #2
# 判断是否赢得比赛
winner = checkerboard.drop(cur_runner, AI_point)
# 识别棋局
pygame.image.save(screen, '1.jpg')
image = Image.open("1.jpg")
cropped = image.crop((13, 10, 580, 580))
detect_img(YOLO(**vars(FLAGS)), cropped)
if winner is not None:
white_win_count += 1
cur_runner = _get_next(cur_runner)
else:
black_win_count += 1
else:
print('该位置已经被占')
else:
print('超出棋盘区域')
# 画棋盘
DrawUI._draw_checkerboard(screen)
# 画棋盘上已有的棋子
for i, row in enumerate(checkerboard.checkerboard):
for j, cell in enumerate(row):
if cell == BLACK_CHESSMAN.Value:
DrawUI._draw_chessman(screen, Point(j, i), BLACK_CHESSMAN.Color)
elif cell == WHITE_CHESSMAN.Value:
DrawUI._draw_chessman(screen, Point(j, i), WHITE_CHESSMAN.Color)
# 在右侧打印出战况
DrawUI._draw_left_info(screen, font1, cur_runner, black_win_count, white_win_count,DrawUI)
# 判断最终的冠军
if winner:
print_text(screen, font2, (SCREEN_WIDTH - fwidth)//2, (SCREEN_HEIGHT - fheight)//2, winner.Name + '获胜', RED_COLOR)
pygame.display.flip()
def _get_next(cur_runner):
if cur_runner == BLACK_CHESSMAN:
return WHITE_CHESSMAN
else:
return BLACK_CHESSMAN
if __name__ == '__main__':
main() | 36.235714 | 126 | 0.523162 | 518 | 5,073 | 4.920849 | 0.34556 | 0.038839 | 0.021969 | 0.025893 | 0.121616 | 0.083954 | 0.034523 | 0.034523 | 0.034523 | 0 | 0 | 0.021174 | 0.385571 | 5,073 | 140 | 127 | 36.235714 | 0.796599 | 0.041987 | 0 | 0.176471 | 0 | 0 | 0.034925 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.039216 | false | 0 | 0.117647 | 0 | 0.176471 | 0.04902 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
8cdb7cba16f6faf51206063a2a60ea4ba7c13bab | 2,945 | py | Python | pygame_movement/__init__.py | ZayedMalick/pygame_movement_extension | d08045a7eae9cec1caf6abd7f42dc8fe458ada0e | [
"MIT"
] | null | null | null | pygame_movement/__init__.py | ZayedMalick/pygame_movement_extension | d08045a7eae9cec1caf6abd7f42dc8fe458ada0e | [
"MIT"
] | null | null | null | pygame_movement/__init__.py | ZayedMalick/pygame_movement_extension | d08045a7eae9cec1caf6abd7f42dc8fe458ada0e | [
"MIT"
] | null | null | null | # pygame
import pygame
import pygame.key
from pygame.locals import *
# rich
from rich.console import Console
# Console
console = Console()
# Extension Details
console.print("\nUsing Pygame Movement Extension", style="bold green")
# Movement Class
class Movement():
def __init__(self, character, speed, window_width, window_height):
# Character for movement
self.character = character
# Speed for character
self.speed = speed
# width and height of window
self.width = window_width
self.height = window_height
# Getting pressed keys
self.keys_pressed = pygame.key.get_pressed()
# Movement with WASD keys
def wasd(self):
# Looking for errors
try:
# W - Upper side movement
if self.keys_pressed[K_w] and self.character.y - self.speed > 0:
self.character.y -= self.speed
# S - Bottom side movement
if self.keys_pressed[K_s] and self.character.y + self.speed + self.character.get_height() < self.height:
self.character.y += self.speed
# A - Left side movement
if self.keys_pressed[K_a] and self.character.x + self.speed > self.speed + 5:
self.character.x -= self.speed
# D - Right side movement
if self.keys_pressed[K_d] and self.character.x + self.speed + self.character.get_width() < self.width + 5:
self.character.x += self.speed
# Handling errors
except:
# Printing Error
console.print(
"\nCant exhibit movement , please check the arguments !\n", style="bold red")
# Exiting
exit()
# Movement with ARROW keys
def arrows(self):
# Looking for errors
try:
# UP - Upper side movement
if self.keys_pressed[K_UP] and self.character.y - self.speed > 0:
self.character.y -= self.speed
# DOWN - Bottom side movement
if self.keys_pressed[K_DOWN] and self.character.y + self.speed + self.character.get_height() < self.height:
self.character.y += self.speed
# LEFT - Left side movement
if self.keys_pressed[K_LEFT] and self.character.x + self.speed > self.speed:
self.character.x -= self.speed
# RIGHT - Right side movement
if self.keys_pressed[K_RIGHT] and self.character.x + self.speed + self.character.get_width() < self.width + 5:
self.character.x += self.speed
# Handling errors
except:
# Printing Error
console.print(
"\nCant exhibit movement , please check the arguments !\n", style="bold red")
# Exiting
exit()
# printing engine
if __name__ == "__main__":
console.print("\nPygame Movement Extension\n", style="bold green")
| 32.01087 | 122 | 0.590492 | 355 | 2,945 | 4.791549 | 0.205634 | 0.168136 | 0.079365 | 0.084656 | 0.640212 | 0.599647 | 0.585538 | 0.585538 | 0.379777 | 0.379777 | 0 | 0.002484 | 0.316469 | 2,945 | 91 | 123 | 32.362637 | 0.842524 | 0.178947 | 0 | 0.418605 | 0 | 0 | 0.091252 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.069767 | false | 0 | 0.093023 | 0 | 0.186047 | 0.093023 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
8cdd27eb1dce92ebeb43123c647b8f03243ba74a | 13,200 | py | Python | src/extract_features.py | skycckk/Malware-Image-Analysis | c958ff91c386dbfd58b5661b5e1a4f80d5c7960d | [
"MIT"
] | 4 | 2017-12-04T07:22:06.000Z | 2019-12-03T09:19:44.000Z | src/extract_features.py | skycckk/Malware-Image-Analysis | c958ff91c386dbfd58b5661b5e1a4f80d5c7960d | [
"MIT"
] | 3 | 2018-08-01T23:09:45.000Z | 2019-12-25T02:24:03.000Z | src/extract_features.py | skycckk/Malware-Image-Analysis | c958ff91c386dbfd58b5661b5e1a4f80d5c7960d | [
"MIT"
] | 4 | 2017-11-08T21:28:18.000Z | 2020-10-22T14:53:02.000Z | from skimage import filters
from skimage import feature
from skimage import img_as_float
from skimage import exposure
from skimage import transform
from skimage.morphology import disk
import matplotlib.pyplot as plt
import numpy as np
import pywt
from skimage import io
from jpg_quantizer import *
__author__ = "Wei-Chung Huang"
__copyright__ = "Copyright 2018, The SJSU MSCS Master project"
__license__ = "MIT"
__version__ = "1.0.0"
def project_h_edge(img, gauss_sigma=1, print_img=False, total_blocks=-1):
"""
Project horizontal edges vertically by processing an h_sobel operation followed by gaussian blur
:param img: input gray image (0~255)
:param gauss_sigma: sigma in gaussian distribution w.r.t. blurness
:param print_img: print debug images
:param total_blocks: number of blocks or intervals in image spreading vertically
:return: histogram normalized to 0~1 and length is image height
"""
img_height, img_width = img.shape[0], img.shape[1]
img = img_as_float(img)
blurred = filters.gaussian(img, gauss_sigma)
edge_map = filters.sobel_h(blurred)
if total_blocks is -1:
total_blocks = img_height
if print_img is True:
dump_images(img, blurred, edge_map)
hist_len = min(total_blocks, img_height)
ratio = (hist_len - 1) / (img_height - 1)
hist = [0] * hist_len
peak_val = 0
for i in range(img_height):
hist_index = round(i * ratio)
for j in range(img_width):
hist[hist_index] += abs(edge_map[i][j])
if hist[hist_index] > peak_val:
peak_val = hist[hist_index]
for i in range(hist_len):
hist[i] /= peak_val
return hist
def extract_HOG(img, blocks=(12, 12), buckets=64, print_img=False):
"""
Extract Histogram of Gaussian features
:param img: input 1-channel image
:param blocks: block size of HOG in tuple(height, width). This controls the size of output vector
:param buckets: number of buckets of HOG in the result feature vector
:param print_img: visualize the HOG result
:return: feature vector
"""
img_height, img_width = img.shape[0], img.shape[1]
# cut the image to fit the block
block_len = blocks[0]
# make k times of block
new_height = block_len * int(img_height / block_len)
new_width = block_len * int(img_width / block_len)
pixels_per_cell = (int(new_width / block_len), int(new_height / block_len))
if not print_img:
arr = feature.hog(img[:new_height, :new_width],
pixels_per_cell=pixels_per_cell, block_norm='L2-Hys', visualise=False)
mmax, mmin = 0, 0
for k in range(len(arr)):
mmax = max(mmax, arr[k])
mmin = min(mmin, arr[k])
for k in range(len(arr)):
arr[k] = (arr[k] - mmin) / (mmax - mmin)
bucket_len = len(arr) / buckets
res = [0] * buckets
res_count = [0] * buckets
for i in range(len(arr)):
bucket_id = int(i / bucket_len)
res[bucket_id] += arr[i]
res_count[bucket_id] += 1
res = np.asarray(res) / np.asarray(res_count)
return res
else:
arr, hog_img = feature.hog(img[:new_height, :new_width],
pixels_per_cell=pixels_per_cell, block_norm='L2-Hys', visualise=True)
fig = plt.figure()
ax = fig.add_subplot(111)
ax.imshow(hog_img, cmap="gray")
plt.show()
io.use_plugin('pil')
dump = np.reshape(arr, (270, 270))
mmax = 0
mmin = 0
for i in range(dump.shape[0]):
for j in range(dump.shape[1]):
mmax = max(mmax, dump[i][j])
mmin = min(mmax, dump[i][j])
print(mmax, mmin)
for i in range(dump.shape[0]):
for j in range(dump.shape[1]):
dump[i][j] = (dump[i][j] - mmin) * 255 / (mmax - mmin)
return arr
def extract_grid_blocks(img, blocks_per_image=(3, 3)):
"""
Divide image into grids(blocks)
:param img: input 1-channel image
:param blocks_per_image: number of grids in tuple(height, width)
:return: 2D list containing pixel value for each slot
"""
img_height, img_width = img.shape[0], img.shape[1]
bx = int(img_width / blocks_per_image[1])
by = int(img_height / blocks_per_image[0])
grids = []
for i in range(blocks_per_image[0]):
start_i = i * by
end_i = start_i + by
for j in range(blocks_per_image[1]):
start_j = j * bx
end_j = start_j + bx
grid = img[start_i:end_i, start_j:end_j]
grids.append(grid)
return grids
def means_feature(grids):
"""
Compute the mean value of pixel intensity in each grid of image
:param grids: 2D list containing pixel value for each slot
:return: mean vector
"""
feature_vec = [0] * len(grids)
for i in range(len(grids)):
block = np.asarray(grids[i])
feature_vec[i] = block.mean() / 255
return feature_vec
def histogram_feature(img):
"""
Compute the histogram in each grid
:param img: ndarray
1-channel image
:return: concatenated histogram with value range from 0 to 1
"""
hist, bin_centers = exposure.histogram(img)
hist = hist / np.max(hist)
feature_vec = hist.flatten().tolist()
return feature_vec
def median_feature(img, resize_shape=(64, 64)):
"""
Compute median feature applying a median filter
:param img: ndarray
1-channel image
:param resize_shape: tuple(, )
The output image size after applying median filter.
:return: flatten feature vector of 2D median filtered image
"""
img_med = filters.median(img, disk(7))
img_med = transform.resize(img_med, resize_shape, mode='reflect')
return img_med.flatten().tolist()
def extract_lbp_feature(img, radius=2):
"""
Extract Local Binary Pattern(LBP) features
:param img: ndarray
1-channel image
:param radius: int
Number of circularly symmetric neighbour set points (quantization of the angular space).
:return: list
Histogram of LBP result
"""
n_points = 8 * radius
lbp = feature.local_binary_pattern(img, n_points, radius, method="uniform")
n_bins = int(lbp.max() + 1)
hist, _ = np.histogram(lbp, normed=True, bins=n_bins, range=(0, n_bins))
return hist.tolist()
def frequency_feature(img, line_bk=1):
"""
Extract a frequency distribution
:param img: ndarray
1-channel image
:param line_bk: int
Number of horizontal segments. (default is 1, an whole image)
:return: list
The frequency distribution over the number of horizontal segments. (i.e. 8x8 DCT with line_nk=4 will
produce 64 * 4 vector length)
"""
# Align the input image to the block size of DCT (i.e. align to 8) to unify DCT coefficient matrix
height = int(img.shape[0] / 8) * 8
width = int(img.shape[1] / 8) * 8
if height < 1 or width < 1:
raise ValueError('Input image size should greater than the DCT block size')
img_aligned = img[0:height, 0:width]
codec = JPGEncode()
codeword = codec.encode(img_aligned, bk_size=8, thresh=0.1, use_qtm=True, debug=False)
# codeword = codec.encode_with_qtm_dithering(img_aligned, bk_size=8) # turn on if dithering is needed
# but our experiment showed without dithering, the accuracy does not decrease but performance increases.
n = len(codeword)
height_in_bk = int(height / 8)
width_in_bk = int(width / 8)
feature_vec = list()
line_seg_size = height_in_bk / line_bk
if line_seg_size < 1:
raise ValueError('line_bk should less than the height / DCT_bk_size')
for i in np.r_[:height_in_bk:line_seg_size]:
start = int(i) * width_in_bk
end = int(i + line_seg_size) * width_in_bk
freq_size = 8 * 8
freq_distribution = [0] * freq_size
for j in range(start, end):
for k in range(freq_size):
freq_distribution[k] += abs(codeword[j][k])
for k in range(freq_size):
freq_distribution[k] = round(freq_distribution[k] / n)
feature_vec += freq_distribution
return feature_vec
def wavelet_features(img, wt_levels=5, bk=8, quantized=True):
"""
Extract statistical features based on images subband by using wavelet transform.
For each band, (random) tiling to get means or variances.
Get means for coarse bands.
Get variances for the rest bands.
:param img: ndarray
1-channel image
:param wt_levels: Level of wavelet transform
:param bk: Size of each cell. The total block numbers is bk * bk
:param quantized: Indicate whether to quantize the feature vector by a uniform distribution.
:return: A list.
"""
subbands = [0] * wt_levels
for i in range(wt_levels):
coeffs = pywt.dwt2(img, 'haar')
cA, (cH, cV, cD) = coeffs
img = cA
subbands[i] = img
feature_vec_size = 0
bk_list = [0] * wt_levels
for i in range(wt_levels): # This wrokaround prevents small size of images in multiple levels
bk_list[i] = bk - i
if i >= 3:
bk_list[i] = max(bk_list[i] - 2, 1)
feature_vec_size += (bk_list[i] * bk_list[i])
feature_vec = [0] * feature_vec_size
cnt = 0
for i in range(wt_levels):
img = subbands[i]
bk = bk_list[i]
height, width = img.shape[0], img.shape[1]
for j in np.r_[0:height-1:height / bk]:
start_i = int(round(j))
end_i = int(round(j + height / bk))
for k in np.r_[0:width-1:width / bk]:
start_j = int(round(k))
end_j = max(int(round(k + width / bk)), start_j + 1)
block = img[start_i:end_i, start_j:end_j]
if 0 <= i <= 1:
feature_vec[cnt] = np.var(block)
else:
feature_vec[cnt] = np.mean(block)
cnt += 1
if quantized:
max_r = np.max(feature_vec)
min_r = np.min(feature_vec)
interval = max((max_r - min_r) / 128.0, 1.0)
for i in range(len(feature_vec)):
feature_vec[i] = int((feature_vec[i] - min_r - 1) / interval)
return feature_vec
def convolution(img):
pass
# NOT IMPLEMENTED YET
# kernel = np.array([[0, -1, 0], [-1, 5, -1], [0, -1, 0]])
# # you can use 'valid' instead of 'same', then it will not add zero padding
# image_sharpen = scipy.signal.convolve2d(img, kernel, 'same')
def get_features(img, name):
"""
Get features with specific one
:param img: ndarray
1-channel input byte image
:param name: string
type of feature using
:return: list
1D feature vector
"""
if name == 'h-edge': # horizontal edge: 1 x 256
smooth_radius = 3
hist_size = 256
# original setting: 9 and 256
# tuned setting: 3 and 64
hist = project_h_edge(img, gauss_sigma=smooth_radius, print_img=False, total_blocks=hist_size)
# resize to desired length. e.g. 256
hist_arr = np.array(hist).reshape(1, len(hist))
feature_vec = transform.resize(hist_arr, (1, hist_size), mode='reflect').tolist()[0]
elif name == 'hog': # Histogram of Gaussian
hog_block_size = (4, 4)
hog_buckets = 128
# original setting: (12, 12) and 1
# tuned setting: (4, 4) and 128
feature_vec = extract_HOG(img, blocks=hog_block_size, buckets=hog_buckets).tolist()
elif name == 'mean': # Mean intensity of each grid
# ---------------------------
# if not using mean value, the dimension would be 8100
# ---------------------------
grid_block_size = (16, 16)
# original setting: (3, 3)
# tuned setting: (16, 16)
grids = extract_grid_blocks(img, blocks_per_image=grid_block_size)
feature_vec = means_feature(grids)
elif name == 'lbp': # Local Binary Pattern
lbp_radius = 2
# original setting: 2
# tuned setting: 2
feature_vec = extract_lbp_feature(img, radius=lbp_radius)
elif name == 'contrast': # Histogram(Contrast)
feature_vec = histogram_feature(img)
elif name == 'median': # median images (flatten)
median_shape = (64, 64)
# original setting: (64, 64)
# tuned setting: (64, 64)
feature_vec = median_feature(img, resize_shape=median_shape)
return feature_vec
def dump_images(img, blur_img, edge_map):
abs_edge_map = np.abs(edge_map)
max_edge = np.max(abs_edge_map)
new_edge_map = (abs_edge_map * 255) / max_edge
fig = plt.figure()
ax1 = fig.add_subplot(131)
ax2 = fig.add_subplot(132)
ax3 = fig.add_subplot(133)
ax1.set_title("source")
ax2.set_title("blurred")
ax3.set_title("edge map")
ax1.imshow(img, cmap="gray")
ax2.imshow(blur_img, cmap="gray")
ax3.imshow(new_edge_map, cmap="gray")
plt.tight_layout()
plt.show() | 34.464752 | 116 | 0.615606 | 1,910 | 13,200 | 4.08377 | 0.18534 | 0.034615 | 0.009231 | 0.015513 | 0.182179 | 0.131154 | 0.114359 | 0.085769 | 0.066795 | 0.044487 | 0 | 0.025587 | 0.277576 | 13,200 | 383 | 117 | 34.464752 | 0.792366 | 0.290227 | 0 | 0.111607 | 0 | 0 | 0.030944 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.053571 | false | 0.004464 | 0.049107 | 0 | 0.151786 | 0.026786 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
8cde62599f7d1989263bd95b80c275d54b8be1e3 | 2,411 | py | Python | tests/test_dataclass.py | brentyi/jax_dataclasses | 3c09ead85ecb7d544b79c6378d219584cc6ce5b9 | [
"MIT"
] | 22 | 2021-06-04T05:01:47.000Z | 2022-02-22T06:12:11.000Z | tests/test_dataclass.py | brentyi/jax_dataclasses | 3c09ead85ecb7d544b79c6378d219584cc6ce5b9 | [
"MIT"
] | 1 | 2022-02-23T23:04:33.000Z | 2022-02-28T00:11:57.000Z | tests/test_dataclass.py | brentyi/jax_dataclasses | 3c09ead85ecb7d544b79c6378d219584cc6ce5b9 | [
"MIT"
] | 1 | 2021-10-30T05:05:33.000Z | 2021-10-30T05:05:33.000Z | """Tests for standard jax_dataclasses.dataclass features. Initialization, flattening, unflattening,
static fields, etc.
"""
import jax
import numpy as onp
import pytest
import jax_dataclasses
def _assert_pytree_allclose(x, y):
jax.tree_multimap(
lambda *arrays: onp.testing.assert_allclose(arrays[0], arrays[1]), x, y
)
def test_init():
@jax_dataclasses.pytree_dataclass
class A:
field1: int
field2: int
assert A(field1=5, field2=3) == A(5, 3)
with pytest.raises(TypeError):
# Not enough arguments
A(field1=5)
def test_default_arg():
@jax_dataclasses.pytree_dataclass
class A:
field1: int
field2: int = 3
assert A(field1=5, field2=3) == A(5, 3) == A(field1=5) == A(5)
def test_flatten():
@jax_dataclasses.pytree_dataclass
class A:
field1: float
field2: float
@jax.jit
def jitted_sum(obj: A) -> float:
return obj.field1 + obj.field2
_assert_pytree_allclose(jitted_sum(A(5.0, 3.0)), 8.0)
def test_unflatten():
@jax_dataclasses.pytree_dataclass
class A:
field1: float
field2: float
@jax.jit
def construct_A(a: float) -> A:
return A(field1=a, field2=a * 2.0)
_assert_pytree_allclose(A(1.0, 2.0), construct_A(1.0))
def test_static_field():
@jax_dataclasses.pytree_dataclass
class A:
field1: float
field2: float = jax_dataclasses.field()
field3: bool = jax_dataclasses.static_field()
@jax.jit
def jitted_op(obj: A) -> float:
if obj.field3:
return obj.field1 + obj.field2
else:
return obj.field1 - obj.field2
with pytest.raises(ValueError):
# Cannot map over pytrees with different treedefs
_assert_pytree_allclose(A(1.0, 2.0, False), A(1.0, 2.0, True))
_assert_pytree_allclose(jitted_op(A(5.0, 3.0, True)), 8.0)
_assert_pytree_allclose(jitted_op(A(5.0, 3.0, False)), 2.0)
def test_no_init():
@jax_dataclasses.pytree_dataclass
class A:
field1: float
field2: float = jax_dataclasses.field()
field3: bool = jax_dataclasses.static_field(init=False)
def __post_init__(self):
object.__setattr__(self, "field3", False)
@jax.jit
def construct_A(a: float) -> A:
return A(field1=a, field2=a * 2.0)
assert construct_A(5.0).field3 is False
| 23.407767 | 99 | 0.636251 | 340 | 2,411 | 4.314706 | 0.235294 | 0.114519 | 0.0818 | 0.118609 | 0.551466 | 0.495569 | 0.495569 | 0.495569 | 0.460123 | 0.427403 | 0 | 0.046961 | 0.249274 | 2,411 | 102 | 100 | 23.637255 | 0.763536 | 0.077146 | 0 | 0.470588 | 0 | 0 | 0.002706 | 0 | 0 | 0 | 0 | 0 | 0.147059 | 1 | 0.176471 | false | 0 | 0.058824 | 0.044118 | 0.602941 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
8ce0be6439171ebae5f616a1f14411fc03492384 | 49,207 | py | Python | xpsi/PostProcessing/_corner.py | yveskini/xpsi | 946efaecf5110ac37652dcd617198c396854f2af | [
"MIT"
] | 14 | 2019-09-26T12:08:06.000Z | 2021-05-11T15:26:10.000Z | xpsi/PostProcessing/_corner.py | yveskini/xpsi | 946efaecf5110ac37652dcd617198c396854f2af | [
"MIT"
] | 13 | 2020-01-10T11:03:28.000Z | 2021-10-04T14:44:01.000Z | xpsi/PostProcessing/_corner.py | yveskini/xpsi | 946efaecf5110ac37652dcd617198c396854f2af | [
"MIT"
] | 9 | 2020-03-04T13:28:05.000Z | 2021-09-28T09:00:50.000Z | from __future__ import division, print_function
from scipy.special import logsumexp
from ._global_imports import *
from . import _precision
from getdist.plots import getSubplotPlotter
from getdist.mcsamples import MCSamples
try:
from nestcheck.ns_run_utils import get_logw, get_w_rel
from nestcheck.plots import bs_param_dists
from nestcheck.error_analysis import run_ci_bootstrap
from nestcheck.estimators import param_cred, logz
except ImportError:
_warning('CornerPlotter instances cannot use nestcheck functionality.')
else:
try:
from nestcheck.plots import getdist_kde
except ImportError:
try:
from nestcheck.plots import weighted_1d_gaussian_kde
except ImportError:
_warning('CornerPlotter instances cannot use nestcheck '
'functionality.')
else:
_warning('Using native nestcheck KDE instead of GetDist KDE.')
from ._backends import NestedBackend
from ._postprocessor import PostProcessor
class CornerPlotter(PostProcessor):
""" Plot marginal posterior densities and estimators.
"""
@fix_random_seed
@make_verbose('Executing posterior density estimation',
'Posterior density estimation complete')
def plot(self,
params,
IDs=None,
combine=False,
combine_all=False,
only_combined=False,
force_combine=True,
overwrite_combined=False,
bootstrap_estimators=True,
bootstrap_density=False,
separate_plots=False,
write=False,
root_filename='',
directory='./',
ext='.pdf',
dpi=300,
maxdots=2000,
**kwargs):
""" Generate posterior density plots.
Invokes :mod:`getdist` and :mod:`nestcheck` for nested sampling runs.
Up to five runs can be plotted natively via :mod:`nestcheck`; beyond
such a number the plots generally display too much information and
clarity is lost.
:param list params:
List of parameter strings for plotting. Must be shared by all
posteriors selected with the ``IDs`` argument.
:param OrderedDict IDs:
Keys must be string identifiers of :class:`Runs` instances.
Each dictionary element must be a list of string identifiers,
each matching objects collected in :class:`Runs` instance
corresponding to the key. Defaults to ``None``, meaning attempt to
use as many runs as possible subject to plotting restrictions.
.. note::
The order of IDs is used to control the layering of posteriors.
If there are multiple underlying posteriors (i.e., multiple
dictionary keys), only one (combined) run per posterior is
rendered, and the first posterior is rendered on the topmost layer.
If there is only one underlying posterior (i.e., one dictionary
keys), then the combined-sample posterior, if available, is plotted
on the topmost layer, whilst the runs on that posterior are
rendered on layers underneath in the order specified in the list.
In either case, estimators are calculated and reported for the
(combined) run on the topmost layer.
:param bool combine:
Additionally combine the runs into a single run for overplotting?
The overriding setting if there is more than one underlying
posterior to be plotted, is to attempt to combine runs on each
posterior if multiple such runs are available, in order to
reduce information density. If there is a single underlying
posterior, the user-specified value is respected.
:param bool combine_all:
Combine all runs in each :class:`Runs` instance or only those
for which IDs are provided? Ignored if ``combine`` is ``False``.
:param bool force_combine:
Force recombination of elligible run sets, even if a
combined run is already cached?
:param bool only_combined:
Only plot the combined run? Only heeded if a single posterior
is selected for plotting, and in that case is ignored if
``combine`` is ``False``.
:param bool overwrite_combined:
Overwrite combined-sample files on disk with the same filename?
:param bool bootstrap:
Use :mod:`nestcheck` and :mod:`fgivenx` to bootstrap the runs for
posterior density error estimation?
:param bool separate_plots:
Generate a lower-triangle plot with :mod:`getdist`, and a separate
error plot with :mod:`nestcheck` (with :mod:`fgivenx` and
:mod:`getdist`). If ``False`` (default), the diagonal panels of the
lower-triangle plot are modified by adding the nestcheck output.
Ignored if ``bootstrap`` is ``False``.
:param bool write:
Export the figure?
:param str root_filename:
Root filename to prepend to automatically generated name. Can be,
e.g., a model and/or data set identifier.
:param str directory:
If ``None`` defaults to current directory.
:param str ext:
File extension for writing. E.g., ``'.png'``.
:param int dpi:
Dots-per-square-inch settings for exporting plots.
:param kwargs:
Keyword arguments for the :meth:`_plot_density_with_error` and
:meth:`_plot_triangle` methods. Keyword arguments for line
properties (width and alpha) for :mod:`getdist` contours and density
distributions. If ``bootstrap and not separate_plots`` then
the density distribution linewidth is set to zero if not
explicitly specified with kwarg ``lw_1d``.
"""
self.set_subset(IDs, combine, combine_all,
force_combine, only_combined,
overwrite_combined)
self.set_params(params)
if bootstrap_density and not separate_plots:
if 'lw_1d' not in kwargs: kwargs['lw_1d'] = 0.0
self._set_line_and_contour_args(**kwargs)
self._plotter = self._plot_triangle(bootstrap_estimators, **kwargs)
if bootstrap_density and separate_plots:
figs = self._plot_density_with_error(**kwargs)
elif bootstrap_density:
figs = self._plot_density_with_error(plotter=self._plotter,
**kwargs)
if write:
root_filename = (root_filename + '__' if root_filename else '') + \
'posteriorDensity__runs_' + \
'_'.join(str(ID).replace(' ', '') for
ID in self.get_attr('ID')) + '__'
_dpi = dpi
if maxdots > 0:
ndots = dpi * len(self.params)
ndots *= self._plotter.settings.subplot_size_inch
if ndots > maxdots:
dpi = int(maxdots * _dpi / ndots)
self._plotter.export(fname=root_filename+'triangle'+ext,
adir=directory, dpi=dpi)
try:
figs[1].savefig(_os.path.join(directory,
root_filename+'fthetas_1d.pdf'),
dpi=_dpi,
bbox_inches='tight')
except IndexError:
if separate_plots:
fname = root_filename + 'params_1d.pdf'
else:
fname = root_filename + 'fthetas_1d.pdf'
figs[0].savefig(_os.path.join(directory, fname),
dpi=_dpi,
bbox_inches='tight')
except (TypeError, NameError):
pass
return self._plotter
@make_verbose('Simulating nested sampling realisations for '
'posterior density error visualisation',
'Simulated nested sampling realisations and '
'plotted posterior density error estimates')
def _plot_density_with_error(self,
plotter = None,
fthetas = None,
kde_func = None,
kde_kwargs = None,
**kwargs):
"""
:param plotter:
A :attr:`getdist.GetDistPlotter` instance if the :mod:`nestcheck`
output is to be displayed on a lower-triangle plot.
:param list fthetas:
Iterable containing functions of parameter vector for which
density is to be plotted with :mod:`nestcheck` via :mod:`fgivenx`.
The parameters themselves are handled automatically with ``lambda``
functions. Additional functions are always plotted using the
native :mod:`nestcheck` matplotlib figure; the parameter densities
are be added to a :mod:`getdist` lower-triangle plot is supplied.
:param func kde_func:
Function for KDE compatible with :mod:`nestcheck.plots`. Must
be *weighted* KDE (Higson et al. 2018). If ``None``, uses
:mod:`getdist` if available, or the native KDE function otherwise.
If using :mod:`getdist`, the KDE settings are automatically
retrieved from the first run and applied to :mod:`nestcheck` and
:mod:`fgivenx` *for all runs*.
:param kwargs:
Keyword arguments for :func:`nestcheck.plots.bs_param_dists`.
TODO:
----
* lims based on credible interval estimate for efficiency?
"""
nestcheck_bcknds, runs = self._filter_nestcheck_compatible()
nx = kwargs.pop('nx', 200); ny = kwargs.pop('ny', nx)
scale_ymax = kwargs.pop('scale_ymax', 1.1)
n_simulate = kwargs.pop('n_simulate', 200)
params = self.params.names
labels = self.params.labels
# declare how to access parameter samples for each run
_fthetas = []
for run in runs:
func = lambda y: (lambda theta: theta[:,y])
_fthetas.append([func(run.get_index(param)) for param in params])
# declare limits for parameters
bounds = []
for run in runs:
bounds.append([list(run.bounds[param]) for param in params])
_lims = [list(plotter.subplots[i,i].get_xlim()) for i in range(len(params))]
lims = [_lims for run in runs]
for _l, _b in zip(lims, bounds): # loop over runs
for l, b in zip(_l,_b): # loop over parameters
l[0] = (l[0] if l[0] > b[0] else b[0])
l[1] = (l[1] if l[1] < b[1] else b[1])
if kde_func is None:
try:
kde_func = getdist_kde
except NameError:
kde_func = weighted_1d_gaussian_kde
kde_kwargs = [None] * len(runs)
else:
normalize = kwargs.pop('normalize', False)
kde_kwargs = []
for run in runs:
kde_kwargs.append(
{'settings': run.kde_settings,
'ranges': [run.bounds[param] for param in params],
'normalize': normalize}
)
lines = kwargs.pop('lines', False)
parallel = kwargs.pop('parallel', True)
rasterize_contours = kwargs.pop('rasterize_contours', True)
tqdm_kwargs = kwargs.pop('tqdm_kwargs', None)
figsize = kwargs.pop('figsize', _np.array([6.0 * len(params),
3.0 * len(params)]))
figs = []
with verbose(plotter is not None,
'Adding density error information to triangle plot',
'Added density error information'):
fig = bs_param_dists(nestcheck_bcknds,
fthetas=_fthetas,
kde_func=kde_func,
kde_kwargs=kde_kwargs,
ftheta_lims=lims,
nx=nx,
ny=ny,
scale_ymax=scale_ymax,
n_simulate=n_simulate,
simulate_weights=True,
getdist_plotter=plotter,
figsize=figsize,
lines=lines,
parallel=parallel,
rasterize_contours=rasterize_contours,
labels=labels,
no_means=True,
tqdm_kwargs=tqdm_kwargs)
if fig: figs.append(fig)
if fthetas:
if not isinstance(fthetas[0], list):
num_funcs = len(fthetas[0])
else:
num_funcs = len(fthetas)
kde_kwargs['ranges'] = ftheta_lims
figsize *= float(num_funcs)/len(params)
if 'ftheta_labels' in kwargs:
kwargs = {'labels': kwargs['ftheta_labels']}
else:
kwargs = {}
fig = bs_param_dists(nestcheck_bcknds,
fthetas=fthetas,
kde_func=kde_func,
kde_kwargs=kde_kwargs,
ftheta_lims=ftheta_lims,
nx=nx,
ny=ny,
scale_ymax=scale_ymax,
n_simulate=n_simulate,
simulate_weights=True,
figsize=figsize,
lines=lines,
parallel=parallel,
rasterize_contours=rasterize_contours,
**kwargs)
figs.append(fig)
return figs if figs else None
@make_verbose('Constructing lower-triangle posterior density '
'plot via Gaussian KDE:',
'Constructed lower-triangle posterior density plot')
def _plot_triangle(self,
bootstrap,
prior_density = True,
KL_divergence = True,
KL_base = 'bits',
ndraws = int(1e6),
param_plot_lims = None,
crosshairs = False,
filled = False,
legend_loc = 'lower left',
legend_corner_coords = (0.75,0.75),
legend_frameon = False,
scale_attrs = None,
normalize = True,
veneer = False,
no_zero = True,
no_ylabel = False,
label_right = True,
no_ytick = False,
credible_interval_1d = True,
annotate_credible_interval = True,
annotate_xy=(0.025,0.915),
sixtyeight = True,
ninety = False,
compute_all_intervals=True,
**kwargs):
""" Call :meth:`getdist.plots.GetDistPlotter.triangle_plot`.
:param bool prior_density:
If ``True`` tries to draw samples from the joint prior and plot
marginal 1D prior densit functions. Silently fails if attempt
fails due to missing likelihood and prior callables.
:param bool KL_divergence:
If `True` and `prior_density` is `True`, estimate and annotate
credible interval for Kullback-Leibler divergence for each
parameter in triangle plot.
:param str KL_base:
Base for Kullback-Leibler divergence. Options are {'bits', 'nats'}.
:param int ndraws:
Number of samples drawn from the joint prior. Ignored if
``prior_density is False`` attempt to plot density fails.
:param dict param_plot_lims:
Dictionary of viewing ranges for plotting. Keys must be parameter
names.
:param bool crosshairs:
Display parameter truth crosshairs?
:param bool filled:
Specify whether the contours are to be filled.
:param str legend_loc:
Specify the legend location. Defaults to ``upper right`` because the
plot is a lower-triangle array.
:param tuple legend_corner_coords:
Modifies meaning of ``legend_loc`` to be the coordinates of the
point on the legend box specified by ``legend_loc``. Pass ``None``
if not applicable. Defaults to place legend in empty upper region
of lower-triangle plot.
:param dict scale_attrs:
Scale :class:`getdist.plots.GetDistPlotterSettings` attributes
from the automatic values. E.g., ``{'legend_fontsize': 2.0}``.
Use string values to set the key attribute to the value attribute.
Caution: do not rely on ordering of pairs in a dictionary, but
use an :class:`collections.OrderedDict` instead to heed order.
:param bool normalize:
Normalize density distribution in on-diagonal 1D panels?
:param bool no_zero:
Hide axes zeros within on-diagonal 1D panels?
:param bool no_ylabel:
Hide *probability density* label on diagonal 1D marginal panels?
:param bool label_right:
Display *probability density* label on diagonal 1D marginal plots?
:param bool no_ytick:
Hide y-axis ticks on diagonal 1D marginal plots?
:param bool credible_interval_1d:
Estimate and plot 1D marginal credible intervals? The upper
and lower quantiles of the interval are estimated via bootstrapping
with :mod:`nestcheck`, each bounding quantile plotted as a (narrow)
band bounded by the same quantiles with respect to the bootstrap
realisations. The interval between the two bands is generally much
wider and is shaded lighter.
:param bool annotate_credible_interval:
Annotate each on-diagonal panel with numeric credible interval
as median +/- distance to symmetric quantiles. Each quantile,
including the median, is estimated via bootstrapping with
:mod:`nestcheck`, and the median of each quantile from the bootstrap
realisations is used for the reported numeric credible interval.
:param tuple annotate_xy:
Coordinates as axis fractions for annotation of credible intervals.
:param bool sixtyeight:
Should the credible interval, which is symmetric in quantiles about
the mean, be approximately the 1-\sigma credible interval thus
containing ~68% of the posterior mass? If ``False`` the interval
computed is the approximate 2-\sigma interval containing ~95% of
the posterior mass.
:param kwargs:
* additional keyword arguments passed to
:meth:`getdist.GetDistPlotter.triangle_plot`
* settings for :mod:`getdist` posterior lower-triangle plotting, applied
to a :class:`getdist.plots.GetDistPlotSettings` instance
.. note::
Using ``subplot_size`` keyword argument (specify in inches) invokes
automated label fontsizes and tick sizes. If ``width_inch`` is
used instead, this automation does not occur.
"""
try:
for run in self.subset_to_plot:
if not isinstance(run, NestedBackend):
raise TypeError('Nested sampling backends are required.')
except AttributeError:
print('Nested sampling runs are required.')
raise
else:
getdist_bcknds = self.get_attr('getdist_backend')
getdist_bcknds.reverse()
line_args = self.get_attr('lines')
line_args.reverse()
contour_args = self.get_attr('contours')
contour_args.reverse()
if len(getdist_bcknds) == 1:
legend_labels = None
elif len(self._subset) > 1:
legend_labels = self.get_attr('parent_ID')
else:
legend_labels = self.get_attr('ID')
if legend_labels is not None:
legend_labels.reverse()
if param_plot_lims is None:
param_plot_lims = {}
if param_plot_lims:
prune = kwargs.get('tick_prune', None)
# try to set matching :class:`getdist.plots.GetDistPlotSettings` attrs
plotter = getSubplotPlotter(kwargs.pop('subplot_size', 2),
kwargs.pop('width_inch', None))
setattr(plotter.settings, 'progress', True)
setattr(plotter.settings, 'norm_prob_label', 'Probability density')
setattr(plotter.settings, 'prob_y_ticks', True)
setattr(plotter.settings, 'thin_long_subplot_ticks', False)
setattr(plotter.settings, 'tick_prune', None)
for key in kwargs.copy():
if hasattr(plotter.settings, key):
setattr(plotter.settings, key, kwargs[key])
del kwargs[key]
if scale_attrs is None:
scale_attrs = {}
for key, value in scale_attrs.iteritems():
if hasattr(plotter.settings, key):
if isinstance(value, float) or isinstance(value, int):
setattr(plotter.settings, key,
getattr(plotter.settings, key) * value)
elif isinstance(value, _six.string_types):
if hasattr(plotter.settings, value):
setattr(plotter.settings, key,
getattr(plotter.settings, value))
if isinstance(normalize, bool):
diag1d_kwargs = {'normalized': normalize}
if isinstance(no_zero, bool):
diag1d_kwargs['no_zero'] = no_zero
if isinstance(no_ylabel, bool):
diag1d_kwargs['no_ylabel'] = no_ylabel
if isinstance(label_right, bool):
diag1d_kwargs['label_right'] = label_right
if isinstance(no_ytick, bool):
diag1d_kwargs['no_ytick'] = no_ytick
plotter.triangle_plot(getdist_bcknds,
legend_labels = legend_labels,
params = self.params.names,
filled = filled,
legend_loc = legend_loc,
line_args = line_args,
contour_args = contour_args,
diag1d_kwargs = diag1d_kwargs,
**kwargs)
try:
if legend_corner_coords:
plotter.legend.set_bbox_to_anchor(legend_corner_coords)
except AttributeError:
pass
else:
plotter.legend.set_frame_on(legend_frameon)
# add custom parameter plotting limits and updated autolocation
with fragile(verbose(param_plot_lims,
'Applying bespoke parameter viewing intervals',
'Viewing intervals applied')) as condition:
if not condition: fragile.Break
params = self.params
for param, l in param_plot_lims.items():
j = params.names.index(param)
for i in range(j,len(params.names)):
ax = plotter.subplots[i,j]
ax.set_xlim(l)
ax.xaxis.set_major_locator(_get_default_locator(prune))
ax.xaxis.set_minor_locator(AutoMinorLocator())
for i in range(j):
ax = plotter.subplots[j,i]
ax.set_ylim(l)
ax.yaxis.set_major_locator(_get_default_locator(prune))
ax.yaxis.set_minor_locator(AutoMinorLocator())
plotter.fig.canvas.draw() # ensure the new locators take effect
for param in param_plot_lims.keys():
j = params.names.index(param)
# deal with x-axes
axis = plotter.subplots[-1,j].xaxis
xmin, xmax = axis.get_view_interval()
width = xmax - xmin
gap_wanted = width * plotter.settings.tight_gap_fraction
tick = [x for x in axis.get_major_ticks() if xmin <= x.get_loc() <= xmax]
if tick[0].get_loc() - xmin < gap_wanted:
tick[0].label1.set_visible(False)
if xmax - tick[-1].get_loc() < gap_wanted:
tick[-1].label1.set_visible(False)
# deal with y-axes
if j > 0:
axis = plotter.subplots[j,0].yaxis
xmin, xmax = axis.get_view_interval()
width = xmax - xmin
gap_wanted = width * plotter.settings.tight_gap_fraction
tick = [x for x in axis.get_major_ticks() if xmin <= x.get_loc() <= xmax]
if tick[0].get_loc() - xmin < gap_wanted:
tick[0].label1.set_visible(False)
if xmax - tick[-1].get_loc() < gap_wanted:
tick[-1].label1.set_visible(False)
if prior_density:
# only report KL divergence for topmost posterior,
# but plot the priors if available for the other posteriors
for i, posterior in enumerate(self.subset):
self._add_prior_density(plotter, posterior,
ndraws, normalize,
KL_divergence = KL_divergence if i == 0 else False,
KL_base = KL_base,
bootstrap = bootstrap,
n_simulate = kwargs.get('n_simulate'))
if veneer:
self._veneer_spines_ticks(plotter, **kwargs)
if crosshairs:
# only for topmost posterior
self._add_crosshairs(plotter, self.subset_to_plot[0].truths)
if credible_interval_1d: # include nestcheck estimator bootstrap error
self._add_credible_interval(plotter,
self.subset[0],
bootstrap=bootstrap,
n_simulate=kwargs.get('n_simulate'),
annotate=annotate_credible_interval,
annotate_xy=annotate_xy,
sixtyeight=sixtyeight,
ninety=ninety,
compute_all_intervals=compute_all_intervals)
self._plotter = plotter
return plotter
@make_verbose('Adding 1D marginal prior density functions',
'Added 1D marginal prior density functions')
def _add_prior_density(self, plotter, posterior,
ndraws, normalize,
KL_divergence, KL_base,
bootstrap, n_simulate):
""" Crudely estimate the prior density.
Kullback-Leibler divergence estimated in bits for a combined run or
the same run for which the credible intervals are calculated.
"""
run = posterior.subset_to_plot[0]
yield 'Plotting prior for posterior %s...' % posterior.ID
l = posterior.likelihood
if l is None:
return # quietly do nothing
elif not hasattr(l, 'prior'):
return
elif not hasattr(l.prior, 'draw'):
return
elif not callable(l.prior.draw):
return
samples, _ = l.prior.draw(ndraws, transform=True)
color, lw = (run.contours[key] for key in ('color', 'lw'))
quantiles = [None] * 3
with verbose(KL_divergence,
'Estimating 1D marginal KL-divergences in %s' % KL_base,
'Estimated 1D marginal KL-divergences') as condition:
for i, ax in enumerate([plotter.subplots[i,i] \
for i in range(plotter.subplots.shape[0])]):
name = self.params.names[i]
bounds = {name: posterior.bounds[name]}
settings = {'fine_bins': 1024,
'smooth_scale_1D': 0.3,
'boundary_correction_order': 1,
'mult_bias_correction_order': 1} # adopt from posterior settings or take custom input?
idx = l.index(name)
if idx is None: idx = l.prior.index(name)
bcknd = MCSamples(sampler='uncorrelated',
samples=samples[:,idx],
weights=None,
names=[name],
ranges=bounds,
settings=settings)
if normalize:
bcknd.get1DDensity(name).normalize(by='integral',
in_place=True)
x = _np.linspace(ax.xaxis.get_view_interval()[0],
ax.xaxis.get_view_interval()[1],
1000)
ax.plot(x, bcknd.get1DDensity(name).Prob(x),
ls='-.', color=color, lw=lw)
if not condition: continue # go to next iteration if no KL
# a prototype Kullback-Leibler divergence callback
# information in bits
def KL(ns_run, logw):
x = ns_run['theta'][:,posterior.get_index(name)]
w_rel = _np.exp(logw - logw.max())
where = w_rel > run.kde_settings.get('min_weight_ratio',
1.0e-30)
prior = bcknd.get1DDensity(name).Prob(x[where])
p = getdist_kde(x[where], x, w_rel,
ranges=[posterior.bounds[name]],
idx=0,
normalize=normalize,
settings=run.kde_settings)
# Due to spline interpolation, very small densities can be
# negative, so manually give a small postive value which
# does not affect KL integral approximation
p[p<=0.0] = p[p>0.0].min()
KL = _np.sum(w_rel[where] \
* (_np.log(p) - _np.log(prior))) \
/_np.sum(w_rel[where])
if KL_base == 'bits':
return KL / _np.log(2.0)
elif KL_base == 'nats':
return KL
else:
raise ValueError('Invalid base for KL-divergence.')
if bootstrap:
for j, cred_int in enumerate([0.025, 0.5, 0.975]):
quantiles[j] = run_ci_bootstrap(run.nestcheck_backend,
estimator_list=[KL],
cred_int=cred_int,
n_simulate=n_simulate,
simulate_weights=True,
flip_skew=True)
# KL in bits
interval = r'$D_{\mathrm{KL}}=%.2f_{-%.2f}^{+%.2f}$' \
% (quantiles[1],
quantiles[1] - quantiles[0],
quantiles[2] - quantiles[1])
yield ('%s KL-divergence = %.4f/-%.4f/+%.4f'
% (name,
quantiles[1],
quantiles[1] - quantiles[0],
quantiles[2] - quantiles[1]))
if not rcParams['text.usetex']:
fontsize = plotter.settings.lab_fontsize - 1
else:
fontsize = plotter.settings.lab_fontsize
ax.set_title(interval, color=color,
fontsize=fontsize)
else:
where = run.samples[:,0] > 0.0
ns_run = {'theta': run.samples[where,2:]}
divergence = KL(ns_run, _np.log(run.samples[where,0]))
yield ('%s KL-divergence = %.4f' % (name, divergence))
divergence = (r'$D_{\mathrm{KL}}=%.2f$' % divergence)
if not rcParams['text.usetex']:
fontsize = plotter.settings.lab_fontsize - 1
else:
fontsize = plotter.settings.lab_fontsize
ax.set_title(divergence, color=color,
fontsize=fontsize)
yield None
@make_verbose('Adding 1D marginal credible intervals',
'Added 1D marginal credible intervals')
def _add_credible_interval(self, plotter, posterior, bootstrap, n_simulate,
annotate, annotate_xy, sixtyeight,
ninety, compute_all_intervals):
"""
Estimate 1-:math:`\sigma` credible interval in one-dimension on a
combined run, or if such a run does not exist, on the run with
the specified ID.
Calls :func:`nestcheck.estimators.param_cred` for one-tailed weighted
estimate; two such estimates give a credible interval which is
symmetric in quantiles with respect to the median. Also calls
:func:`nestcheck.error_analysis.run_ci_bootstrap` for
credible interval on quantiles.
:param bool sixtyeight:
Plot the 68% credible interval about the median in 1D plots? If
``False`` plots 95% credible interval about the median -- i.e.,
symmetric quantiles about the median.
"""
diag = [plotter.subplots[i,i] for i in range(plotter.subplots.shape[0])]
run = posterior.subset_to_plot[0]
yield 'Plotting credible regions for posterior %s...' % posterior.ID
color = run.contours['color']
# estimator requires closure to be changable
def get_estimator(quantile, param_ind):
def estimator(*args, **kwargs):
return param_cred(*args,
probability=quantile,
param_ind=param_ind,
**kwargs)
return estimator
quantiles = [0.159, 0.5, 0.841] if sixtyeight else ([0.05,0.5,0.95] if ninety else [0.025, 0.5, 0.975])
def format_CI(name, cred, summary, additional=2, sscript=False):
if len(cred.shape) > 1:
_qs = (cred[1,1],
cred[1,1] - cred[0,1],
cred[2,1] - cred[1,1])
else:
_qs = (cred[1],
cred[1] - cred[0],
cred[2] - cred[1])
_p = max(_precision(_qs[0]), _precision(_qs[1]), _precision(_qs[2]))
_f = '%.' + str(_p + additional) + 'f'
if name: name += ' '
stats = ('%s' % name) + ('CI$_{%i\%%} = ' % summary)
if sscript:
stats += (('%s_{-%s}^{+%s}$' % (_f, _f, _f)) % (_qs[0], _qs[1], _qs[2]))
else:
stats += (('%s/-%s/+%s$' % (_f, _f, _f)) % (_qs[0], _qs[1], _qs[2]))
return stats
if bootstrap:
for i, ax in enumerate(diag):
ind = posterior.get_index(self.params.names[i])
def calculate_intervals(quantiles):
cred = _np.zeros((len(quantiles), len(quantiles)), dtype=_np.double)
for j, p in enumerate(quantiles):
for k, q in enumerate(quantiles):
cred[j,k] = run_ci_bootstrap(run.nestcheck_backend,
estimator_list=[get_estimator(p, ind)],
cred_int=q,
n_simulate=n_simulate,
simulate_weights=True,
flip_skew=True)[0]
return cred
cred = calculate_intervals(quantiles)
zorder = max([_.zorder for _ in ax.get_children()]) + 1
ax.axvspan(cred[0,0], cred[0,2], alpha=0.5,
facecolor=color,
edgecolor=color,
linewidth=0,
rasterized=True,
zorder=zorder)
ax.axvspan(cred[2,0], cred[2,2], alpha=0.5,
facecolor=color,
edgecolor=color,
linewidth=0,
rasterized=True,
zorder=zorder)
ax.axvspan(cred[0,2], cred[2,0], alpha=0.25,
facecolor=color,
edgecolor=color,
linewidth=0,
rasterized=True,
zorder=zorder)
if annotate:
stats = format_CI('', # parameter name not needed on plot
cred,
68 if sixtyeight else (90 if ninety else 95),
additional=1,
sscript=True)
title = ax.get_title()
if title:
title = stats.center(30) + '\n' + title.center(30)
else:
title = stats
if not rcParams['text.usetex']:
fontsize = plotter.settings.lab_fontsize - 1
else:
fontsize = plotter.settings.lab_fontsize
ax.set_title(title, color=color,
fontsize=fontsize)
if compute_all_intervals:
yield format_CI(self.params.names[i],
cred,
68 if sixtyeight else (90 if ninety else 95))
if sixtyeight:
yield format_CI(self.params.names[i],
calculate_intervals([0.05, 0.5, 0.95]),
90)
yield format_CI(self.params.names[i],
calculate_intervals([0.025, 0.5, 0.975]),
95)
elif ninety:
yield format_CI(self.params.names[i],
calculate_intervals([0.159, 0.5, 0.841]),
68)
yield format_CI(self.params.names[i],
calculate_intervals([0.025, 0.5, 0.975]),
95)
else:
yield format_CI(self.params.names[i],
calculate_intervals([0.159, 0.5, 0.841]),
68)
yield format_CI(self.params.names[i],
calculate_intervals([0.05, 0.5, 0.95]),
90)
else:
for i, ax in enumerate(diag):
ind = posterior.get_index(self.params.names[i])
def calculate_intervals(quantiles):
cred = _np.zeros(len(quantiles), dtype=_np.double)
for j, p in enumerate(quantiles):
where = run.samples[:,0] > 0.0
_t1 = run.samples[where,2:]
_t2 = _np.log(run.samples[where,0])
cred[j] = get_estimator(p, ind)({'theta': _t1}, _t2)
return cred
cred = calculate_intervals(quantiles)
zorder = max([_.zorder for _ in ax.get_children()]) + 1
ax.axvspan(cred[0], cred[2], alpha=0.25,
facecolor=color,
edgecolor=color,
linewidth=0,
rasterized=True,
zorder=zorder)
if annotate:
stats = format_CI('', # parameter name not needed on plot
cred,
68 if sixtyeight else (90 if ninety else 95),
additional=1,
sscript=True)
title = ax.get_title()
if title:
title = stats.center(30) + '\n' + title.center(30)
else:
title = stats
if not rcParams['text.usetex']:
fontsize = plotter.settings.lab_fontsize - 1
else:
fontsize = plotter.settings.lab_fontsize
ax.set_title(title, color=color,
fontsize=fontsize)
if compute_all_intervals:
yield format_CI(self.params.names[i],
cred,
68 if sixtyeight else (90 if ninety else 95))
if sixtyeight:
yield format_CI(self.params.names[i],
calculate_intervals([0.05, 0.5, 0.95]),
90)
yield format_CI(self.params.names[i],
calculate_intervals([0.025, 0.5, 0.975]),
95)
elif ninety:
yield format_CI(self.params.names[i],
calculate_intervals([0.159, 0.5, 0.841]),
68)
yield format_CI(self.params.names[i],
calculate_intervals([0.025, 0.5, 0.975]),
95)
else:
yield format_CI(self.params.names[i],
calculate_intervals([0.159, 0.5, 0.841]),
68)
yield format_CI(self.params.names[i],
calculate_intervals([0.05, 0.5, 0.95]),
90)
yield None
@staticmethod
@make_verbose('Adding parameter truth crosshairs',
'Added crosshairs')
def _add_crosshairs(plotter, truths):
""" Add parameter crosshairs to triangle plot. """
spine = next(plotter.subplots[0,0].spines.itervalues())
lw = spine.get_linewidth()
for i, truth in enumerate(truths):
if truth is not None:
for ax in plotter.subplots[:,i]:
if ax is not None:
ax.axvline(truth, color='black', ls='-', lw=lw)
if i > 0:
for ax in plotter.subplots[i,:i]:
if ax is not None:
ax.axhline(truth, color='black', ls='-', lw=lw)
@staticmethod
@make_verbose('Veneering spines and axis ticks',
'Veneered')
def _veneer_spines_ticks(plotter, lengthen=2.0, embolden=2.0,
**kwargs):
""" Embolden spines, and embolden and lengthen ticks. """
ax = plotter.subplots[0,0]
major_length = ax.xaxis.majorTicks[0].tick1line.get_markersize()
major_length *= lengthen
minor_length = ax.xaxis.minorTicks[0].tick1line.get_markersize()
minor_length *= lengthen
lw = ax.xaxis.majorTicks[0].tick1line.get_markeredgewidth()
lw *= embolden
for i in range(plotter.subplots.shape[0]):
for j in range(i+1):
ax = plotter.subplots[i,j]
ax.tick_params(which='major', colors='black', length=major_length)
ax.tick_params(which='minor', colors='black', length=minor_length)
ax.xaxis.set_tick_params(which='both', width=lw)
ax.yaxis.set_tick_params(which='both', width=lw)
for spine in ax.spines.itervalues():
spine.set_linewidth(lw)
def _set_line_and_contour_args(self, lw=1.0, alpha=1.0, **kwargs):
""" Match the :mod:`nestcheck` color scheme.
Always assigns reds to a combined run if it is found to exist.
"""
nestcheck_colors = ['darkred', 'darkblue', 'darkgrey', 'darkgreen',
'darkorange']
for run, color in zip(self.subset_to_plot,
nestcheck_colors[:len(self.subset_to_plot)]):
run.lines = {'lw': kwargs.get('lw_1d', lw),
'color': color,
'alpha': alpha}
run.contours = {'lw': lw, 'color': color, 'alpha': alpha}
def KL_divergence(self, base='bits', bootstrap=False,
quantiles=[0.025, 0.5, 0.975],
n_simulate=200, **kwargs):
""" Kullback-Leibler divergence integral jointly for all parameters.
E.g., if you want the interval about the median containing divergence
of 90% of realisations, declare ``quantiles=[0.05,0.5,0.95]``.
"""
if kwargs:
self.set_subset(**kwargs)
nestcheck_bcknds, runs = self._filter_nestcheck_compatible()
def estimator(ns_run, logw):
w_rel = _np.exp(logw - logw.max())
KL = _np.sum(w_rel * ns_run['logl']) / _np.sum(w_rel)
KL -= logsumexp(logw)
if base == 'bits':
return KL / _np.log(2.0)
elif base == 'nats':
return KL
else:
raise ValueError('Invalid base for KL-divergence.')
if bootstrap:
_quantiles = {}
for bcknd, run in zip(nestcheck_bcknds, runs):
_quantiles[run.prepend_ID] = [run_ci_bootstrap(bcknd,
estimator_list=[estimator],
cred_int=q,
n_simulate=n_simulate,
simulate_weights=True,
flip_skew=True)[0] for q in quantiles]
return _quantiles
else:
divergence = {}
for bcknd in nestcheck_bcknds:
divergence[run.prepend_ID] = estimator(bcknd, get_logw(bcknd))
return divergence
def evidence_error(self, quantiles=[0.025,0.5,0.975], n_simulate=200,
simulate_weights=True, flip_skew=True, **kwargs):
""" Estimate evidence error for nestcheck-compatible runs.
E.g., if you want the interval about the median containing the evidence
of 90% of realisations, declare ``quantiles=[0.05,0.5,0.95]``.
"""
if kwargs:
self.set_subset(**kwargs)
nestcheck_bcknds, runs = self._filter_nestcheck_compatible()
_quantiles = {}
for bcknd, run in zip(nestcheck_bcknds, runs):
_quantiles[run.prepend_ID] = [run_ci_bootstrap(bcknd,
estimator_list=[logz],
cred_int=q,
n_simulate=n_simulate,
simulate_weights=simulate_weights,
flip_skew=flip_skew)[0] for q in quantiles]
return _quantiles
| 43.239895 | 114 | 0.50253 | 5,007 | 49,207 | 4.798882 | 0.145197 | 0.014983 | 0.002497 | 0.01132 | 0.32279 | 0.276178 | 0.253371 | 0.21862 | 0.203304 | 0.199351 | 0 | 0.018441 | 0.417034 | 49,207 | 1,137 | 115 | 43.277924 | 0.81918 | 0.22318 | 0 | 0.382069 | 0 | 0 | 0.059932 | 0.004293 | 0 | 0 | 0 | 0.00088 | 0 | 1 | 0.023448 | false | 0.002759 | 0.023448 | 0.001379 | 0.074483 | 0.002759 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
8ce0da7b64017af4b904ac0b82cfb763c7efb62b | 90,481 | py | Python | venv/Lib/site-packages/music21/braille/segment.py | alimirzazadeh/wolfGANg | 5bf56f7d8e6c1c283edb98bdaecfd5a606b4462c | [
"MIT"
] | 1 | 2022-01-28T00:03:19.000Z | 2022-01-28T00:03:19.000Z | venv/Lib/site-packages/music21/braille/segment.py | alimirzazadeh/wolfGANg | 5bf56f7d8e6c1c283edb98bdaecfd5a606b4462c | [
"MIT"
] | null | null | null | venv/Lib/site-packages/music21/braille/segment.py | alimirzazadeh/wolfGANg | 5bf56f7d8e6c1c283edb98bdaecfd5a606b4462c | [
"MIT"
] | 1 | 2021-11-23T00:49:26.000Z | 2021-11-23T00:49:26.000Z | # -*- coding: utf-8 -*-
# ------------------------------------------------------------------------------
# Name: segment.py
# Purpose: Division of stream.Part into segments for individual handling
# Authors: Jose Cabal-Ugaz
#
# Copyright: Copyright © 2012 Michael Scott Cuthbert and the music21 Project
# License: BSD, see license.txt
# ------------------------------------------------------------------------------
'''
Inner classes and methods for transcribing musical segments into braille.
This module was made in consultation with the manual "Introduction to Braille
Music Transcription, Second Edition" by Mary Turner De Garmo, 2005. It is
available from the Library of Congress `here <http://www.loc.gov/nls/music/>`_,
and will henceforth be referred to as BMTM.
'''
import collections
import copy
import enum
import unittest
from music21 import bar
from music21 import chord
from music21 import clef
from music21 import dynamics
from music21 import exceptions21
from music21 import environment
from music21 import expressions
from music21 import key
from music21 import layout
from music21 import meter
from music21 import note
from music21 import spanner
from music21 import stream
from music21 import tempo
from music21.prebase import ProtoM21Object
from music21.braille import basic
from music21.braille import lookup
from music21.braille import noteGrouping as ngMod
from music21.braille import text
from music21.braille.objects import BrailleTranscriptionHelper
from music21.common.numberTools import opFrac
symbols = lookup.symbols
environRules = environment.Environment('segment.py')
# ------------------------------------------------------------------------------
class BrailleSegmentException(exceptions21.Music21Exception):
pass
class Affinity(enum.IntEnum):
_LOWEST = -1
SIGNATURE = 3
TTEXT = 4
MMARK = 5
LONG_TEXTEXPR = 6
INACCORD = 7
SPLIT1_NOTEGROUP = 8
NOTEGROUP = 9
SPLIT2_NOTEGROUP = 10
# Class Sort Order -- differs for Braille than for general music21
CSO_NOTE = 10
CSO_REST = 10
CSO_CHORD = 10
CSO_DYNAMIC = 9
CSO_CLEF = 7
CSO_BARLINE = 0
CSO_KEYSIG = 1
CSO_TIMESIG = 2
CSO_TTEXT = 3
CSO_MMARK = 4
CSO_VOICE = 10
# (music21Object, affinity code, class sort order)
affinityCodes = [(note.Note, Affinity.NOTEGROUP, CSO_NOTE),
(note.Rest, Affinity.NOTEGROUP, CSO_REST),
(chord.Chord, Affinity.NOTEGROUP, CSO_CHORD),
(dynamics.Dynamic, Affinity.NOTEGROUP, CSO_DYNAMIC),
(clef.Clef, Affinity.NOTEGROUP, CSO_CLEF),
(bar.Barline, Affinity.SPLIT2_NOTEGROUP, CSO_BARLINE),
(key.KeySignature, Affinity.SIGNATURE, CSO_KEYSIG),
(meter.TimeSignature, Affinity.SIGNATURE, CSO_TIMESIG),
(tempo.TempoText, Affinity.TTEXT, CSO_TTEXT),
(tempo.MetronomeMark, Affinity.MMARK, CSO_MMARK),
(stream.Voice, Affinity.INACCORD, CSO_VOICE)]
affinityNames = {Affinity.SIGNATURE: 'Signature Grouping',
Affinity.TTEXT: 'Tempo Text Grouping',
Affinity.MMARK: 'Metronome Mark Grouping',
Affinity.LONG_TEXTEXPR: 'Long Text Expression Grouping',
Affinity.INACCORD: 'Inaccord Grouping',
Affinity.NOTEGROUP: 'Note Grouping',
Affinity.SPLIT1_NOTEGROUP: 'Split Note Grouping A',
Affinity.SPLIT2_NOTEGROUP: 'Split Note Grouping B',
}
excludeFromBrailleElements = [spanner.Slur,
layout.SystemLayout,
layout.PageLayout,
layout.StaffLayout]
# Uncomment when Python 3.8 is the minimum version
# from typing import TypedDict, Optional
# class GroupingGlobals(TypedDict):
# keySignature: Optional[key.KeySignature]
# timeSignature: Optional[meter.TimeSignature]
# GROUPING_GLOBALS: GroupingGlobals = {...}
GROUPING_GLOBALS = {
'keySignature': None, # will be key.KeySignature(0) on first call
'timeSignature': None, # will be meter.TimeSignature('4/4') on first call
}
GROUPING_DESC_CHORDS = True
GROUPING_SHOW_CLEFS = False
GROUPING_UPPERFIRST_NOTEFINGERING = True
GROUPING_WITHHYPHEN = False
GROUPING_NUMREPEATS = 0
def setGroupingGlobals():
'''
sets defaults for grouping globals. Called first time anything
in Braille is run, but saves creating two expensive objects if never run
'''
if GROUPING_GLOBALS['keySignature'] is None:
# remove noinspection when Python 3.8 is the minimum
# noinspection PyTypeChecker
GROUPING_GLOBALS['keySignature'] = key.KeySignature(0)
if GROUPING_GLOBALS['timeSignature'] is None:
# remove noinspection when Python 3.8 is the minimum
# noinspection PyTypeChecker
GROUPING_GLOBALS['timeSignature'] = meter.TimeSignature('4/4')
# defaults for BrailleSegments
SEGMENT_CANCEL_OUTGOINGKEYSIG = True
SEGMENT_DUMMYRESTLENGTH = None
SEGMENT_LINELENGTH = 40
SEGMENT_SHOWFIRSTMEASURENUMBER = True
SEGMENT_SHOWHAND = None # override with None, 'left', or 'right'
SEGMENT_SHOWHEADING = True
SEGMENT_SUPPRESSOCTAVEMARKS = False
SEGMENT_ENDHYPHEN = False
SEGMENT_SLURLONGPHRASEWITHBRACKETS = True
SEGMENT_SHOWSHORTSLURSANDTIESTOGETHER = False
SEGMENT_SHOWLONGSLURSANDTIESTOGETHER = False
SEGMENT_MAXNOTESFORSHORTSLUR = 4
MAX_ELEMENTS_IN_SEGMENT = 48 # 8 measures of 6 notes, etc. each
_ThreeDigitNumber = collections.namedtuple('_ThreeDigitNumber', 'hundreds tens ones')
SegmentKey = collections.namedtuple('SegmentKey', 'measure ordinal affinity hand')
SegmentKey.__new__.__defaults__ = (0, 0, None, None)
# ------------------------------------------------------------------------------
class BrailleElementGrouping(ProtoM21Object):
_DOC_ATTR = {
'keySignature': 'The last :class:`~music21.key.KeySignature` preceding the grouping.',
'timeSignature': 'The last :class:`~music21.meter.TimeSignature` preceding the grouping.',
'descendingChords': '''True if a :class:`~music21.chord.Chord` should be spelled
from highest to lowest pitch
in braille, False if the opposite is the case.''',
'showClefSigns': '''If True, clef signs are shown in braille.
Representation of music in braille is not
dependent upon clefs and staves, so the clef signs would be displayed
for referential or historical purposes.''',
# 'upperFirstInNoteFingering' : 'No documentation.',
'withHyphen': 'If True, this grouping will end with a music hyphen.',
'numRepeats': 'The number of times this grouping is repeated.'
}
def __init__(self, *args):
'''
A BrailleElementGrouping is a superclass of list of objects which should be displayed
without a space in braille.
>>> from music21.braille import segment
>>> bg = segment.BrailleElementGrouping()
>>> bg.append(note.Note('C4'))
>>> bg.append(note.Note('D4'))
>>> bg.append(note.Rest())
>>> bg.append(note.Note('F4'))
>>> bg
<music21.braille.segment.BrailleElementGrouping [<music21.note.Note C>,
<music21.note.Note D>, <music21.note.Rest rest>, <music21.note.Note F>]>
>>> print(bg)
<music21.note.Note C>
<music21.note.Note D>
<music21.note.Rest rest>
<music21.note.Note F>
These are the defaults and they are shared across all objects...
>>> bg.keySignature
<music21.key.KeySignature of no sharps or flats>
>>> bg.timeSignature
<music21.meter.TimeSignature 4/4>
>>> bg.descendingChords
True
>>> bg.showClefSigns
False
>>> bg.upperFirstInNoteFingering
True
>>> bg.withHyphen
False
>>> bg.numRepeats
0
'''
super().__init__()
self.internalList = list(*args)
setGroupingGlobals()
self.keySignature = GROUPING_GLOBALS['keySignature']
self.timeSignature = GROUPING_GLOBALS['timeSignature']
self.descendingChords = GROUPING_DESC_CHORDS
self.showClefSigns = GROUPING_SHOW_CLEFS
self.upperFirstInNoteFingering = GROUPING_UPPERFIRST_NOTEFINGERING
self.withHyphen = GROUPING_WITHHYPHEN # False
self.numRepeats = GROUPING_NUMREPEATS
def __getitem__(self, item):
return self.internalList[item]
def __setitem__(self, pos, item):
self.internalList[pos] = item
def __len__(self):
return len(self.internalList)
def __getattr__(self, attr):
if attr == 'internalList':
raise AttributeError('internalList not defined yet')
return getattr(self.internalList, attr)
def __str__(self):
'''
Return an unicode braille representation
of each object in the BrailleElementGrouping.
'''
allObjects = []
for obj in self:
if isinstance(obj, stream.Voice):
for obj2 in obj:
try:
allObjects.append('\n'.join(obj2.editorial.brailleEnglish))
except (AttributeError, TypeError):
allObjects.append(str(obj2))
else:
try:
allObjects.append('\n'.join(obj.editorial.brailleEnglish))
except (AttributeError, TypeError):
allObjects.append(str(obj))
if self.numRepeats > 0:
allObjects.append(f'** Grouping x {self.numRepeats + 1} **')
if self.withHyphen is True:
allObjects.append(f'music hyphen {lookup.symbols["music_hyphen"]}')
out = '\n'.join(allObjects)
return out
def _reprInternal(self):
return repr(self.internalList)
class BrailleSegment(text.BrailleText):
_DOC_ATTR = {
'cancelOutgoingKeySig': '''If True, the previous key signature should be
cancelled immediately before a new key signature is encountered.''',
'dummyRestLength': '''For a given positive integer n, adds n "dummy rests"
near the beginning of a segment. Designed for test purposes, as they
are used to demonstrate measure division at the end of braille lines.''',
'lineLength': '''The maximum amount of braille characters that should be
present in a line. The standard is 40 characters.''',
'showFirstMeasureNumber': '''If True, then a measure number is shown
following the heading (if applicable) and preceding the music.''',
'showHand': '''If set to "right" or "left", shows the corresponding
hand sign at the beginning of the first line.''',
'showHeading': '''If True, then a braille heading is displayed.
See :meth:`~music21.braille.basic.transcribeHeading`
for more details on headings.''',
'suppressOctaveMarks': '''If True, then all octave marks are suppressed.
Designed for test purposes, as octave marks were not presented
until Chapter 7 of BMTM.''',
'endHyphen': '''If True, then the last
:class:`~music21.braille.segment.BrailleElementGrouping` of this
segment will be followed by a music hyphen.
The last grouping is incomplete, because a segment
break occurred in the middle of a measure.''',
'beginsMidMeasure': '''If True, then the initial measure number of this
segment should be followed by a dot. This segment
is starting in the middle of a measure.'''
}
def __init__(self):
'''
A segment is "a group of measures occupying more than one braille line."
Music is divided into segments so as to "present the music to the reader
in a meaningful manner and to give him convenient reference points to
use in memorization" (BMTM, 71).
>>> brailleSeg = braille.segment.BrailleSegment()
>>> brailleSeg.cancelOutgoingKeySig
True
>>> brailleSeg.dummyRestLength
>>> brailleSeg.lineLength
40
>>> brailleSeg.showFirstMeasureNumber
True
Possible showHand values are None, 'right', 'left':
>>> brailleSeg.showHand is None
True
>>> brailleSeg.showHeading
True
>>> brailleSeg.suppressOctaveMarks
False
>>> brailleSeg.endHyphen
False
>>> brailleSeg.beginsMidMeasure
False
A BrailleSegment is a type of defaultdict that returns a BrailleElementGrouping
when a key is missing.
>>> len(brailleSeg.keys())
0
>>> beg = brailleSeg[braille.segment.SegmentKey(4, 1, 9)]
>>> type(beg) is braille.segment.BrailleElementGrouping
True
Of course, creating random keys like this will have consequences:
>>> print(str(brailleSeg))
---begin segment---
<music21.braille.segment BrailleSegment>
Measure 4, Note Grouping 2:
<BLANKLINE>
===
---end segment---
'''
super().__init__(lineLength=SEGMENT_LINELENGTH)
self._groupingDict = {}
self.groupingKeysToProcess = None
self.currentGroupingKey = None
self.lastNote = None
self.previousGroupingKey = None
self.cancelOutgoingKeySig = SEGMENT_CANCEL_OUTGOINGKEYSIG
self.dummyRestLength = SEGMENT_DUMMYRESTLENGTH
self.showFirstMeasureNumber = SEGMENT_SHOWFIRSTMEASURENUMBER
self.showHand = SEGMENT_SHOWHAND
self.showHeading = SEGMENT_SHOWHEADING
self.suppressOctaveMarks = SEGMENT_SUPPRESSOCTAVEMARKS
self.endHyphen = SEGMENT_ENDHYPHEN
self.beginsMidMeasure = False
def __setitem__(self, item, value):
self._groupingDict[item] = value
def __getitem__(self, item):
if item not in self._groupingDict:
self._groupingDict[item] = BrailleElementGrouping()
return self._groupingDict[item]
def __delitem__(self, item):
if item not in self.__dict__:
del self._groupingDict[item]
else:
return ValueError(f'No item {item!r} in Segment')
def __getattr__(self, item):
return getattr(self._groupingDict, item)
def __contains__(self, item):
return item in self._groupingDict
def __iter__(self):
return iter(self._groupingDict)
def __len__(self):
return len(self._groupingDict)
@property
def brailleText(self):
'''
Returns the string from the BrailleText object
'''
return text.BrailleText.__str__(self)
def __str__(self):
name = '<music21.braille.segment BrailleSegment>'
allItems = sorted(self.items())
allKeys = []
allGroupings = []
# noinspection PyArgumentList
prevKey = SegmentKey() # defaults are defined.
for (itemKey, grouping) in allItems:
try:
if prevKey.affinity == Affinity.SPLIT1_NOTEGROUP:
prevKey = itemKey
continue
except TypeError:
pass
allKeys.append('Measure {0}, {1} {2}:\n'.format(itemKey.measure,
affinityNames[itemKey.affinity],
itemKey.ordinal + 1))
gStr = str(grouping)
allGroupings.append(gStr)
prevKey = itemKey
allElementGroupings = '\n'.join([''.join([k, g, '\n==='])
for (k, g) in list(zip(allKeys, allGroupings))])
out = '\n'.join(['---begin segment---',
name,
allElementGroupings,
'---end segment---'])
return out
def transcribe(self):
'''
transcribes all of the noteGroupings in this dict by:
first transcribing the Heading (if applicable)
then the Measure Number
then adds appropriate numbers of dummyRests
then adds the Rest of the Note Groupings
returns brailleText
'''
# noinspection PyAttributeOutsideInit
self.groupingKeysToProcess = list(sorted(self.keys()))
if self.showHeading:
self.extractHeading() # Heading
if self.showFirstMeasureNumber:
self.extractMeasureNumber() # Measure Number
if self.dummyRestLength is not None:
self.addDummyRests() # Dummy Rests
self.previousGroupingKey = None
while self.groupingKeysToProcess:
# noinspection PyAttributeOutsideInit
self.currentGroupingKey = self.groupingKeysToProcess.pop(0)
cgkAffinityGroup = self.currentGroupingKey.affinity
if cgkAffinityGroup == Affinity.NOTEGROUP:
self.extractNoteGrouping() # Note Grouping
elif cgkAffinityGroup == Affinity.SIGNATURE:
self.extractSignatureGrouping() # Signature(s) Grouping
elif cgkAffinityGroup == Affinity.LONG_TEXTEXPR:
self.extractLongExpressionGrouping() # Long Expression(s) Grouping
# elif cgkAffinityGroup == Affinity.INACCORD:
# self.extractInaccordGrouping() # In Accord Grouping
elif cgkAffinityGroup == Affinity.TTEXT:
self.extractTempoTextGrouping() # Tempo Text Grouping
# noinspection PyAttributeOutsideInit
self.previousGroupingKey = self.currentGroupingKey
return self.brailleText
def addDummyRests(self):
'''
Adds as many dummy rests as self.dummyRestLength to the signatures of
brailleText
>>> seg = braille.segment.BrailleSegment()
>>> seg.dummyRestLength = 4
>>> print(braille.lookup.rests['dummy'])
⠄
>>> seg.addDummyRests()
>>> print(seg.brailleText)
⠄⠄⠄⠄
'''
dummyRests = [self.dummyRestLength * lookup.rests['dummy']]
self.addSignatures(''.join(dummyRests))
def extractMeasureNumber(self):
'''
Adds a measure number from the segmentKey needing processing
>>> segKey = braille.segment.SegmentKey(measure=4, ordinal=1, affinity=9)
>>> seg = braille.segment.BrailleSegment()
Initialize a new Key
>>> type(seg[segKey])
<class 'music21.braille.segment.BrailleElementGrouping'>
>>> seg.extractMeasureNumber()
>>> print(seg.brailleText)
⠼⠙
Add a dot to the measure number if the segment begins mid-measure
>>> seg = braille.segment.BrailleSegment()
>>> seg[segKey]
<music21.braille.segment.BrailleElementGrouping []>
>>> seg.beginsMidMeasure = True
>>> seg.extractMeasureNumber()
>>> print(seg.brailleText)
⠼⠙⠄
'''
gkp = self.groupingKeysToProcess or sorted(self.keys())
firstSegmentKey = gkp[0]
initMeasureNumber = firstSegmentKey.measure
brailleNumber = basic.numberToBraille(initMeasureNumber)
if self.beginsMidMeasure:
brailleNumber += symbols['dot']
self.addMeasureNumber(brailleNumber)
def extractHeading(self):
'''
Extract a :class:`~music21.key.KeySignature`, :class:`~music21.meter.TimeSignature,
:class:`~music21.tempo.TempoText` and :class:`~music21.tempo.MetronomeMark` and
add an appropriate braille heading to the brailleText object inputted.
'''
keySignature = None
timeSignature = None
tempoText = None
metronomeMark = None
# find the first keySignature and timeSignature...
groupingKeysToProcess = self.groupingKeysToProcess or sorted(self.keys())
while groupingKeysToProcess:
if groupingKeysToProcess[0].affinity > Affinity.MMARK:
break
cgk = groupingKeysToProcess.pop(0) # cgk = currentGroupingKey
cgkAffinityGroup = cgk.affinity
currentBrailleGrouping = self._groupingDict.get(cgk) # currentGrouping...
if cgkAffinityGroup == Affinity.SIGNATURE:
if len(currentBrailleGrouping) >= 2:
keySignature, timeSignature = (currentBrailleGrouping[0],
currentBrailleGrouping[1])
elif len(currentBrailleGrouping) == 1:
keyOrTimeSig = currentBrailleGrouping[0]
if isinstance(keyOrTimeSig, key.KeySignature):
keySignature = keyOrTimeSig
else:
timeSignature = keyOrTimeSig
elif cgkAffinityGroup == Affinity.TTEXT:
tempoText = currentBrailleGrouping[0]
elif cgkAffinityGroup == Affinity.MMARK:
metronomeMark = currentBrailleGrouping[0]
if any([keySignature, timeSignature, tempoText, metronomeMark]):
brailleHeading = basic.transcribeHeading(
keySignature,
timeSignature,
tempoText,
metronomeMark,
maxLineLength=self.lineLength
)
self.addHeading(brailleHeading)
# def extractInaccordGrouping(self):
# inaccords = self._groupingDict.get(self.currentGroupingKey)
# voice_trans = []
# for music21Voice in inaccords:
# noteGrouping = extractBrailleElements(music21Voice)
# noteGrouping.descendingChords = inaccords.descendingChords
# noteGrouping.showClefSigns = inaccords.showClefSigns
# noteGrouping.upperFirstInNoteFingering = inaccords.upperFirstInNoteFingering
# voice_trans.append(ngMod.transcribeNoteGrouping(noteGrouping))
# brailleInaccord = symbols['full_inaccord'].join(voice_trans)
# self.addInaccord(brailleInaccord)
def extractLongExpressionGrouping(self):
'''
Extract the Long Expression that is in the ElementGrouping in cgk
and add it to brailleText.
'''
cgk = self.currentGroupingKey
currentElementGrouping = self._groupingDict.get(cgk)
longTextExpression = currentElementGrouping[0]
longExprInBraille = basic.textExpressionToBraille(longTextExpression)
self.addLongExpression(longExprInBraille)
def showLeadingOctaveFromNoteGrouping(self, noteGrouping):
'''
Given a noteGrouping, should we show the octave symbol?
>>> n1 = note.Note('C1')
>>> n2 = note.Note('D1')
>>> n3 = note.Note('E1')
>>> beg1 = braille.segment.BrailleElementGrouping([n1, n2, n3])
>>> bs1 = braille.segment.BrailleSegment()
This is True because last note is None
>>> bs1.lastNote is None
True
>>> bs1.showLeadingOctaveFromNoteGrouping(beg1)
True
But if we run it again, now we have a note within a fourth, so we do not
need to show the octave:
>>> bs1.lastNote
<music21.note.Note E>
>>> bs1.showLeadingOctaveFromNoteGrouping(beg1)
False
And that is true no matter how many ties we call it on the same
BrailleElementGrouping:
>>> bs1.showLeadingOctaveFromNoteGrouping(beg1)
False
But if we give a new, much higher BrailleElementGrouping, we
will see octave marks again.
>>> nHigh1 = note.Note('C6')
>>> nHigh2 = note.Note('D6')
>>> beg2 = braille.segment.BrailleElementGrouping([nHigh1, nHigh2])
>>> bs1.showLeadingOctaveFromNoteGrouping(beg2)
True
But if we set `self.suppressOctaveMarks` to True, we won't see any
when we switch back to beg1:
>>> bs1.suppressOctaveMarks = True
>>> bs1.showLeadingOctaveFromNoteGrouping(beg2)
False
We also show octaves if for some reason two noteGroups in the same measure have
different BrailleElementGroupings keyed to consecutive ordinals. The code simulates
that situation.
>>> bs1.suppressOctaveMarks = False
>>> bs1.previousGroupingKey = braille.segment.SegmentKey(measure=3, ordinal=1,
... affinity=braille.segment.Affinity.NOTEGROUP)
>>> bs1.currentGroupingKey = braille.segment.SegmentKey(measure=3, ordinal=2,
... affinity=braille.segment.Affinity.NOTEGROUP)
>>> bs1.showLeadingOctaveFromNoteGrouping(beg2)
True
>>> bs1.showLeadingOctaveFromNoteGrouping(beg1)
True
>>> bs1.showLeadingOctaveFromNoteGrouping(beg1)
True
'''
currentKey = self.currentGroupingKey
previousKey = self.previousGroupingKey
# if the previousKey did not exist
# or if the previousKey was not a collection of notes,
# or if the currentKey is split from the previous key for some reason
# while remaining in the same measure, then the lastNote is irrelevant
if (previousKey is not None
and currentKey is not None):
if (previousKey.affinity != Affinity.NOTEGROUP
or currentKey.affinity != Affinity.NOTEGROUP
or (currentKey.measure == previousKey.measure
and currentKey.ordinal == previousKey.ordinal + 1
and currentKey.hand == previousKey.hand)):
self.lastNote = None
if self.suppressOctaveMarks:
return False
# can't use Filter because noteGrouping is list-like not Stream-like
allNotes = [n for n in noteGrouping if 'Note' in n.classes]
showLeadingOctave = True
if allNotes:
if self.lastNote is not None:
firstNote = allNotes[0]
showLeadingOctave = basic.showOctaveWithNote(self.lastNote, firstNote)
# noinspection PyAttributeOutsideInit
self.lastNote = allNotes[-1] # probably should not be here...
return showLeadingOctave
def needsSplitToFit(self, brailleNoteGrouping) -> bool:
'''
Returns boolean on whether a note grouping needs to be split in order to fit.
Generally a noteGrouping will need to be split if the amount of space left
is more than 1/4 of the line length and the brailleNoteGrouping cannot fit.
>>> n1 = note.Note('C1')
>>> n2 = note.Note('D1')
>>> n3 = note.Note('E1')
>>> beg1 = braille.segment.BrailleElementGrouping([n1, n2, n3])
>>> seg = braille.segment.BrailleSegment()
>>> seg.needsSplitToFit(beg1)
False
>>> seg.lineLength = 10
>>> seg.needsSplitToFit(beg1)
True
'''
quarterLineLength = self.lineLength // 4
spaceLeft = self.lineLength - self.currentLine.textLocation
if (spaceLeft > quarterLineLength
and len(brailleNoteGrouping) > quarterLineLength):
return True
else:
return False
def splitNoteGroupingAndTranscribe(self,
noteGrouping,
showLeadingOctaveOnFirst=False,
addSpaceToFirst=False):
'''
Take a noteGrouping and split it at a logical place,
returning braille transcriptions of each section.
'''
transcriber = ngMod.NoteGroupingTranscriber()
beatDivisionOffset = 0
REASONABLE_LIMIT = 10
(splitNoteGroupA, splitNoteGroupB) = (None, None)
brailleNoteGroupingA = None
while beatDivisionOffset < REASONABLE_LIMIT:
(splitNoteGroupA, splitNoteGroupB) = splitNoteGrouping(
noteGrouping,
beatDivisionOffset=beatDivisionOffset
)
transcriber.showLeadingOctave = showLeadingOctaveOnFirst
splitNoteGroupA.withHyphen = True
brailleNoteGroupingA = transcriber.transcribeGroup(splitNoteGroupA)
if self.currentLine.canAppend(brailleNoteGroupingA, addSpace=addSpaceToFirst):
break
beatDivisionOffset += 1
continue
showLeadingOctave = not self.suppressOctaveMarks
transcriber.showLeadingOctave = showLeadingOctave
brailleNoteGroupingB = transcriber.transcribeGroup(splitNoteGroupB)
currentKey = self.currentGroupingKey
# noinspection PyProtectedMember
aKey = currentKey._replace(affinity=Affinity.SPLIT1_NOTEGROUP)
# noinspection PyProtectedMember
bKey = currentKey._replace(affinity=Affinity.SPLIT2_NOTEGROUP)
self[aKey] = splitNoteGroupA
self[bKey] = splitNoteGroupB
return (brailleNoteGroupingA, brailleNoteGroupingB)
def extractNoteGrouping(self):
'''
Fundamentally important method that adds a noteGrouping to the braille line.
'''
transcriber = ngMod.NoteGroupingTranscriber()
noteGrouping = self._groupingDict.get(self.currentGroupingKey)
showLeadingOctave = self.showLeadingOctaveFromNoteGrouping(noteGrouping)
transcriber.showLeadingOctave = showLeadingOctave
brailleNoteGrouping = transcriber.transcribeGroup(noteGrouping)
addSpace = self.optionalAddKeyboardSymbolsAndDots(brailleNoteGrouping)
if self.currentLine.canAppend(brailleNoteGrouping, addSpace=addSpace):
self.currentLine.append(brailleNoteGrouping, addSpace=addSpace)
else:
if self.needsSplitToFit(brailleNoteGrouping):
# there is too much space left in the current line to leave it blank
# but not enough space left to insert the current brailleNoteGrouping
# hence -- let us split this noteGrouping into two noteGroupings.
bngA, bngB = self.splitNoteGroupingAndTranscribe(noteGrouping,
showLeadingOctave,
addSpace)
self.currentLine.append(bngA, addSpace=addSpace)
self.addToNewLine(bngB)
else:
# not enough space left on this line to use, so
# move the whole group to another line
if showLeadingOctave is False and self.suppressOctaveMarks is False:
# if we didn't show the octave before, retranscribe with the octave
# displayed
transcriber.showLeadingOctave = True
brailleNoteGrouping = transcriber.transcribeGroup(noteGrouping)
# if not forceHyphen:
self.currentLine.lastHyphenToSpace()
self.addToNewLine(brailleNoteGrouping)
self.addRepeatSymbols(noteGrouping.numRepeats)
def addRepeatSymbols(self, repeatTimes):
'''
Adds the appropriate number of repeat symbols, following DeGarmo chapter 17.
>>> seg = braille.segment.BrailleSegment()
>>> seg.addRepeatSymbols(0)
>>> print(seg.brailleText)
>>> seg.addRepeatSymbols(1)
>>> print(seg.brailleText)
⠶
>>> seg = braille.segment.BrailleSegment()
>>> seg.addRepeatSymbols(2)
>>> print(seg.brailleText)
⠶⠀⠶
>>> seg = braille.segment.BrailleSegment()
>>> seg.addRepeatSymbols(3)
>>> print(seg.brailleText)
⠶⠼⠉
Does not yet handle situations beginning with Example 17-6 (repeats at
different octaves), and further
'''
if 0 < repeatTimes < 3:
for unused_repeatCounter in range(repeatTimes):
self.addSignatures(symbols['repeat'])
elif repeatTimes >= 3: # 17.3 -- repeat plus number.
self.addSignatures(symbols['repeat'] + basic.numberToBraille(repeatTimes))
# noinspection PyAttributeOutsideInit
self.lastNote = None # this is set up to force an octave symbol on next note
def extractSignatureGrouping(self):
'''
Extracts a key signature, time signature, and possibly an outgoing key signature
from the currentGroupingKey and adds it to the BrailleText object.
'''
keySignature = None
timeSignature = None
cgk = self.currentGroupingKey
noteGrouping = self._groupingDict.get(cgk)
if len(noteGrouping) >= 2:
keySignature, timeSignature = noteGrouping[0], noteGrouping[1]
elif len(noteGrouping) == 1:
keyOrTimeSig = self._groupingDict.get(self.currentGroupingKey)[0]
if isinstance(keyOrTimeSig, key.KeySignature):
keySignature = keyOrTimeSig
else:
timeSignature = keyOrTimeSig
outgoingKeySig = None
if self.cancelOutgoingKeySig and keySignature is not None:
try:
outgoingKeySig = keySignature.outgoingKeySig
except AttributeError:
pass
brailleSig = basic.transcribeSignatures(keySignature, timeSignature, outgoingKeySig)
if brailleSig != '':
self.addSignatures(brailleSig)
def extractTempoTextGrouping(self):
'''
extracts a tempo text and processes it...
'''
self.groupingKeysToProcess.insert(0, self.currentGroupingKey)
if self.previousGroupingKey.affinity == Affinity.SIGNATURE:
self.groupingKeysToProcess.insert(0, self.previousGroupingKey)
self.extractHeading()
self.extractMeasureNumber()
def consolidate(self):
'''
Puts together certain types of elements according to the last digit of their key
(if it is the same as Affinity.NOTEGROUP or not.
>>> SK = braille.segment.SegmentKey
>>> BS1 = braille.segment.BrailleSegment()
>>> BS1[SK(ordinal=0, affinity=2)] = ['hi', 'hello', 'there']
>>> BS1[SK(ordinal=1, affinity=9)] = ['these', 'get']
>>> BS1[SK(ordinal=2, affinity=9)] = ['put', 'together']
>>> BS1[SK(ordinal=3, affinity=4)] = ['in', 'new', 'group']
>>> BS1[SK(ordinal=4, affinity=9)] = ['with', 'the', 'previous']
>>> BS2 = BS1.consolidate()
>>> for (groupingKey, groupingList) in sorted(BS2.items()):
... print(groupingKey, groupingList)
SegmentKey(measure=0, ordinal=0, affinity=2, hand=None) ['hi', 'hello', 'there']
SegmentKey(measure=0, ordinal=1, affinity=9, hand=None) these
get
put
together
SegmentKey(measure=0, ordinal=3, affinity=4, hand=None) ['in', 'new', 'group']
SegmentKey(measure=0, ordinal=4, affinity=9, hand=None) with
the
previous
'''
newSegment = BrailleSegment()
pngKey = None
for (groupingKey, groupingList) in sorted(self.items()):
if groupingKey.affinity != Affinity.NOTEGROUP:
newSegment[groupingKey] = groupingList
pngKey = None
else:
if pngKey is None:
pngKey = groupingKey
for item in groupingList:
newSegment[pngKey].append(item)
return newSegment
def addGroupingAttributes(self, **partKeywords):
'''
Modifies the attributes of all :class:`~music21.braille.segment.BrailleElementGrouping`
instances in a list of :class:`~music21.braille.segment.BrailleSegment` instances. The
necessary information is retrieved both by passing in partKeywords as an argument and
by taking into account the linear progression of the groupings and segments.
'''
currentKeySig = key.KeySignature(0)
currentTimeSig = meter.TimeSignature('4/4')
descendingChords = GROUPING_DESC_CHORDS
showClefSigns = GROUPING_SHOW_CLEFS
upperFirstInNoteFingering = GROUPING_UPPERFIRST_NOTEFINGERING
if 'showClefSigns' in partKeywords:
showClefSigns = partKeywords['showClefSigns']
if 'upperFirstInNoteFingering' in partKeywords:
upperFirstInNoteFingering = partKeywords['upperFirstInNoteFingering']
if 'descendingChords' in partKeywords:
descendingChords = partKeywords['descendingChords']
allGroupings = sorted(self.items())
(previousKey, previousList) = (None, None)
for (groupingKey, groupingList) in allGroupings:
if previousKey is not None:
if groupingKey.ordinal >= 1:
previousList.withHyphen = True
if (previousKey.ordinal == 0
and previousKey.affinity == Affinity.NOTEGROUP
and groupingKey.ordinal == 0
and groupingKey.affinity == Affinity.NOTEGROUP):
if isinstance(previousList[0], clef.Clef):
isRepetition = areGroupingsIdentical(previousList[1:], groupingList)
else:
isRepetition = areGroupingsIdentical(previousList, groupingList)
if isRepetition:
previousList.numRepeats += 1
del self[groupingKey]
continue
if groupingKey.affinity == Affinity.SIGNATURE:
for brailleElement in groupingList:
if isinstance(brailleElement, meter.TimeSignature):
currentTimeSig = brailleElement
elif isinstance(brailleElement, key.KeySignature):
brailleElement.outgoingKeySig = currentKeySig
currentKeySig = brailleElement
elif groupingKey.affinity == Affinity.NOTEGROUP:
if isinstance(groupingList[0], clef.Clef):
if isinstance(groupingList[0], (clef.TrebleClef, clef.AltoClef)):
descendingChords = True
elif isinstance(groupingList[0], (clef.BassClef, clef.TenorClef)):
descendingChords = False
# make a whole rest no matter the length of the rest if only one note.
allGeneralNotes = [n for n in groupingList if isinstance(n, note.GeneralNote)]
if len(allGeneralNotes) == 1 and isinstance(allGeneralNotes[0], note.Rest):
allGeneralNotes[0].fullMeasure = True
groupingList.keySignature = currentKeySig
groupingList.timeSignature = currentTimeSig
groupingList.descendingChords = descendingChords
groupingList.showClefSigns = showClefSigns
groupingList.upperFirstInNoteFingering = upperFirstInNoteFingering
(previousKey, previousList) = (groupingKey, groupingList)
if self.endHyphen:
previousList.withHyphen = True
def addSegmentAttributes(self, **partKeywords):
'''
Modifies the attributes of a :class:`~music21.braille.segment.BrailleSegment`
by passing partKeywords as an argument.
'''
if 'cancelOutgoingKeySig' in partKeywords:
self.cancelOutgoingKeySig = partKeywords['cancelOutgoingKeySig']
if 'dummyRestLength' in partKeywords:
self.dummyRestLength = partKeywords['dummyRestLength']
if 'lineLength' in partKeywords:
self.lineLength = partKeywords['lineLength']
if 'showFirstMeasureNumber' in partKeywords:
self.showFirstMeasureNumber = partKeywords['showFirstMeasureNumber']
if 'showHand' in partKeywords:
self.showHand = partKeywords['showHand']
if 'showHeading' in partKeywords:
self.showHeading = partKeywords['showHeading']
if 'suppressOctaveMarks' in partKeywords:
self.suppressOctaveMarks = partKeywords['suppressOctaveMarks']
def fixArticulations(self):
'''
Goes through each :class:`~music21.braille.segment.BrailleSegment` and modifies the
list of :attr:`~music21.note.GeneralNote.articulations` of a :class:`~music21.note.Note`
if appropriate. In particular, two rules are applied:
* Doubling rule => If four or more of the same :class:`~music21.articulations.Articulation`
are found in a row, the first instance of the articulation is doubled and the rest are
omitted.
* Staccato, Tenuto rule => "If two repeated notes appear to be tied, but either is marked
staccato or tenuto, they are treated as slurred instead of tied." (BMTM, 112)
'''
from music21 import articulations
def fixOneArticulation(artic, music21NoteStart, allNotes, noteIndexStart):
articName = artic.name
if articName == 'fingering': # fingerings are not considered articulations...
return
if (isinstance(artic, (articulations.Staccato, articulations.Tenuto))
and music21NoteStart.tie is not None):
if music21NoteStart.tie.type == 'stop':
allNotes[noteIndexStart - 1].tie = None
allNotes[noteIndexStart - 1].shortSlur = True
else:
allNotes[noteIndexStart + 1].tie = None
music21NoteStart.shortSlur = True
music21NoteStart.tie = None
numSequential = 0
for noteIndexContinue in range(noteIndexStart + 1, len(allNotes)):
music21NoteContinue = allNotes[noteIndexContinue]
if articName in [a.name for a in music21NoteContinue.articulations]:
numSequential += 1
continue
break
if numSequential < 3:
return
# else:
# double the articulation on the first note and remove from the next...
music21NoteStart.articulations.append(artic)
for noteIndexContinue in range(noteIndexStart + 1,
noteIndexStart + numSequential):
music21NoteContinue = allNotes[noteIndexContinue]
for artOther in music21NoteContinue.articulations:
if artOther.name == articName:
music21NoteContinue.articulations.remove(artOther)
newSegment = self.consolidate()
noteGroupings = [newSegment[gpKey]
for gpKey in newSegment.keys()
if gpKey.affinity == Affinity.NOTEGROUP]
for noteGrouping in noteGroupings:
allNotes_outer = [n for n in noteGrouping if isinstance(n, note.Note)]
for noteIndexStart_outer in range(len(allNotes_outer)):
music21NoteStart_outer = allNotes_outer[noteIndexStart_outer]
for artic_outer in music21NoteStart_outer.articulations:
fixOneArticulation(
artic_outer,
music21NoteStart_outer,
allNotes_outer,
noteIndexStart_outer
)
class BrailleGrandSegment(BrailleSegment, text.BrailleKeyboard):
'''
A BrailleGrandSegment represents a pair of segments (rightSegment, leftSegment)
representing the right and left hands of a piano staff (or other two-staff object)
'''
def __init__(self):
BrailleSegment.__init__(self)
text.BrailleKeyboard.__init__(self, lineLength=SEGMENT_LINELENGTH)
self.allKeyPairs = []
self.previousGroupingPair = None
self.currentGroupingPair = None
@property
def brailleText(self):
return text.BrailleKeyboard.__str__(self)
def __str__(self):
name = '<music21.braille.segment BrailleGrandSegment>\n==='
allPairs = []
for (rightKey, leftKey) in self.yieldCombinedGroupingKeys():
if rightKey is not None:
rightHeading = 'Measure {0} Right, {1} {2}:\n'.format(
rightKey.measure, affinityNames[rightKey.affinity], rightKey.ordinal + 1)
rightContents = str(self._groupingDict.get(rightKey))
rightFull = ''.join([rightHeading, rightContents])
else:
rightFull = ''
if leftKey is not None:
leftHeading = '\nMeasure {0} Left, {1} {2}:\n'.format(
leftKey.measure, affinityNames[leftKey.affinity], leftKey.ordinal + 1)
leftContents = str(self._groupingDict.get(leftKey))
leftFull = ''.join([leftHeading, leftContents])
else:
leftFull = ''
allPairs.append('\n'.join([rightFull, leftFull, '====\n']))
out = '\n'.join(['---begin grand segment---', name, ''.join(allPairs),
'---end grand segment---'])
return out
def yieldCombinedGroupingKeys(self):
'''
yields all the keys in order as a tuple of (rightKey, leftKey) where
two keys are grouped if they have the same segmentKey except for the hand.
>>> bgs = braille.segment.BrailleGrandSegment()
>>> SegmentKey = braille.segment.SegmentKey # namedtuple
>>> bgs[SegmentKey(1, 1, 1, 'right')] = '1r'
>>> bgs[SegmentKey(1, 1, 1, 'left')] = '1l'
>>> bgs[SegmentKey(1, 2, 3, 'right')] = '2r'
>>> bgs[SegmentKey(1, 2, 4, 'left')] = '3l'
>>> bgs[SegmentKey(2, 1, 9, 'left')] = '4l'
>>> bgs[SegmentKey(2, 1, 9, 'right')] = '4r'
>>> bgs[SegmentKey(3, 1, 9, 'right')] = '5r'
>>> for l, r in bgs.yieldCombinedGroupingKeys():
... (bgs[l], bgs[r])
('1r', '1l')
('2r', <music21.braille.segment.BrailleElementGrouping []>)
(<music21.braille.segment.BrailleElementGrouping []>, '3l')
('4r', '4l')
('5r', <music21.braille.segment.BrailleElementGrouping []>)
'''
def segmentKeySortKey(segmentKey):
'''
sort by measure, then ordinal, then affinity, then hand (r then l)
'''
if segmentKey.hand == 'right':
skH = -1
else:
skH = 1
return (segmentKey.measure, segmentKey.ordinal, segmentKey.affinity, skH)
def matchOther(thisKey_inner, otherKey):
if (thisKey_inner.measure == otherKey.measure
and thisKey_inner.ordinal == otherKey.ordinal
and thisKey_inner.affinity == otherKey.affinity):
return True
else:
return False
storedRight = None
storedLeft = None
for thisKey in sorted(self.keys(), key=segmentKeySortKey):
if thisKey.hand == 'right':
if storedLeft is not None:
if matchOther(thisKey, storedLeft):
yield(thisKey, storedLeft)
elif (thisKey.affinity == Affinity.NOTEGROUP
and matchOther(thisKey._replace(affinity=Affinity.INACCORD), storedLeft)):
# r.h. notegroup goes before an lh inaccord, despite this being out of order
yield(thisKey, storedLeft)
else:
yield(None, storedLeft)
storedRight = thisKey
storedLeft = None
else:
storedRight = thisKey
elif thisKey.hand == 'left':
if storedRight is not None:
if matchOther(thisKey, storedRight):
yield(storedRight, thisKey)
elif storedRight.affinity < Affinity.INACCORD:
yield(storedRight, None)
yield(None, thisKey)
else:
yield(storedRight, None)
storedLeft = thisKey
storedRight = None
else:
storedLeft = thisKey
if storedRight:
yield (storedRight, None)
if storedLeft:
yield (None, storedLeft)
# def combineGroupingKeys(self, rightSegment, leftSegment):
# # return list(self.yieldCombinedGroupingKeys())
#
# groupingKeysRight = sorted(rightSegment.keys())
# groupingKeysLeft = sorted(leftSegment.keys())
# combinedGroupingKeys = []
#
# while groupingKeysRight:
# gkRight = groupingKeysRight.pop(0)
# try:
# groupingKeysLeft.remove(gkRight)
# combinedGroupingKeys.append((gkRight, gkRight))
# except ValueError:
# if gkRight.affinity < Affinity.INACCORD:
# combinedGroupingKeys.append((gkRight, None))
# else:
# if gkRight.affinity == Affinity.INACCORD:
# gkLeft = gkRight._replace(affinity=gkRight.affinity + 1)
# else:
# gkLeft = gkRight._replace(affinity=gkRight.affinity - 1)
# try:
# groupingKeysLeft.remove(gkLeft)
# except ValueError:
# raise BrailleSegmentException(
# 'Misaligned braille groupings: ' +
# 'groupingKeyLeft was %s' % gkLeft +
# 'groupingKeyRight was %s' % gkRight +
# 'rightSegment was %s, leftSegment was %s' %
# (rightSegment, leftSegment))
#
# try:
# combinedGroupingTuple = (gkRight, gkLeft)
# combinedGroupingKeys.append(combinedGroupingTuple)
# except ValueError:
# raise BrailleSegmentException(
# 'Misaligned braille groupings could not append combinedGroupingKeys')
#
#
# while groupingKeysLeft:
# gkLeft = groupingKeysLeft.pop(0)
# combinedGroupingTuple = (None, gkLeft)
# combinedGroupingKeys.append(combinedGroupingTuple)
#
# return combinedGroupingKeys
def transcribe(self):
'''
Returns the BrailleText from the combined grouping keys
'''
self.allKeyPairs = list(self.yieldCombinedGroupingKeys())
lastPair = self.allKeyPairs[-1]
highestMeasure = lastPair[0].measure if lastPair[0] else lastPair[1].measure
self.highestMeasureNumberLength = len(str(highestMeasure))
self.extractHeading() # Heading
self.currentGroupingPair = None
while self.allKeyPairs:
self.previousGroupingPair = self.currentGroupingPair
self.currentGroupingPair = self.allKeyPairs.pop(0)
(rightKey, leftKey) = self.currentGroupingPair
if ((rightKey is not None and rightKey.affinity >= Affinity.INACCORD)
or (leftKey is not None and leftKey.affinity >= Affinity.INACCORD)):
self.extractNoteGrouping() # Note or Inaccord Grouping
# elif (rightKey.affinity == Affinity.SIGNATURE
# or leftKey.affinity == Affinity.SIGNATURE):
# self.extractSignatureGrouping() # Signature Grouping
# elif (rightKey.affinity == Affinity.LONG_TEXTEXPR
# or leftKey.affinity == Affinity.LONG_TEXTEXPR):
# self.extractLongExpressionGrouping() # Long Expression Grouping
# elif rightKey.affinity == Affinity.TTEXT or leftKey.affinity == Affinity.TTEXT:
# self.extractTempoTextGrouping() # Tempo Text Grouping
return self.brailleText
def extractHeading(self):
'''
Finds KeySignatures, TimeSignatures, TempoText, and Metronome Marks
within the keyPairs, and removes some from allKeyPairs.
'''
keySignature = None
timeSignature = None
tempoText = None
metronomeMark = None
while True:
(rightKey, leftKey) = self.allKeyPairs[0]
useKey = rightKey
try:
useElement = self._groupingDict.get(rightKey)
except KeyError as ke:
if ke.args[0] == 'None':
useElement = self._groupingDict.get(leftKey)
useKey = leftKey
else:
raise ke
if useKey.affinity > Affinity.MMARK:
break
self.allKeyPairs.pop(0)
if useKey.affinity == Affinity.SIGNATURE:
try:
keySignature, timeSignature = useElement[0], useElement[1]
except IndexError:
if isinstance(useElement, key.KeySignature):
keySignature = useElement[0]
else:
timeSignature = useElement[0]
elif useKey.affinity == Affinity.TTEXT:
tempoText = useElement[0]
elif useKey.affinity == Affinity.MMARK:
metronomeMark = useElement[0]
try:
brailleHeading = basic.transcribeHeading(
keySignature,
timeSignature,
tempoText,
metronomeMark,
maxLineLength=self.lineLength
)
self.addHeading(brailleHeading)
except basic.BrailleBasicException as bbe:
if bbe.args[0] != 'No heading can be made.':
raise bbe
def extractNoteGrouping(self):
(rightKey, leftKey) = self.currentGroupingPair
if rightKey:
mNum = rightKey.measure
elif leftKey:
mNum = leftKey.measure
else:
raise ValueError('Measure must be defined for leftKey or rightKey')
currentMeasureNumber = basic.numberToBraille(mNum, withNumberSign=False)
def brailleFromKey(rightOrLeftKey):
if rightOrLeftKey is not None and rightOrLeftKey.affinity == Affinity.INACCORD:
inaccords = self._groupingDict.get(rightOrLeftKey)
voice_trans = []
for music21Voice in inaccords:
noteGrouping = extractBrailleElements(music21Voice)
noteGrouping.descendingChords = inaccords.descendingChords
noteGrouping.showClefSigns = inaccords.showClefSigns
noteGrouping.upperFirstInNoteFingering = inaccords.upperFirstInNoteFingering
voice_trans.append(ngMod.transcribeNoteGrouping(noteGrouping))
brailleStr = symbols['full_inaccord'].join(voice_trans)
elif rightOrLeftKey is not None:
brailleStr = ngMod.transcribeNoteGrouping(self._groupingDict.get(rightOrLeftKey))
else:
brailleStr = ''
return brailleStr
rhBraille = brailleFromKey(rightKey)
lhBraille = brailleFromKey(leftKey)
self.addNoteGroupings(currentMeasureNumber, rhBraille, lhBraille)
# # noinspection PyUnusedLocal
# def extractSignatureGrouping(self, brailleKeyboard):
# pass
#
# # noinspection PyUnusedLocal
# def extractLongExpressionGrouping(self, brailleKeyboard):
# pass
#
# # noinspection PyUnusedLocal
# def extractTempoTextGrouping(self, brailleKeyboard):
# pass
# ------------------------------------------------------------------------------
# Grouping + Segment creation from music21.stream Part
def findSegments(music21Part, **partKeywords):
'''
Takes in a :class:`~music21.stream.Part`
and a list of partKeywords.
Returns a list of :class:`~music21.segment.BrailleSegment` instances.
Five methods get called in the generation of segments:
* :meth:`~music21.braille.segment.prepareSlurredNotes`
* :meth:`~music21.braille.segment.getRawSegments`
* :meth:`~music21.braille.segment.BrailleSegment.addGroupingAttributes`
* :meth:`~music21.braille.segment.BrailleSegment.addSegmentAttributes`
* :meth:`~music21.braille.segment.BrailleSegment.fixArticulations`
>>> from music21.braille import test
>>> example = test.example11_2()
>>> allSegments = braille.segment.findSegments(example)
>>> print(str(allSegments[0]))
---begin segment---
<music21.braille.segment BrailleSegment>
Measure 0, Signature Grouping 1:
<music21.key.KeySignature of 3 flats>
<music21.meter.TimeSignature 4/4>
===
Measure 0, Note Grouping 1:
<music21.clef.TrebleClef>
<music21.note.Note B->
===
Measure 1, Note Grouping 1:
<music21.note.Note G>
<music21.note.Note E->
<music21.note.Note D>
<music21.note.Note E->
===
Measure 2, Note Grouping 1:
<music21.note.Note G>
<music21.note.Note F>
<music21.note.Note E->
===
Measure 3, Note Grouping 1:
<music21.note.Note A->
<music21.note.Note G>
<music21.note.Note C>
<music21.note.Note C>
===
Measure 4, Note Grouping 1:
<music21.note.Note B->
<music21.note.Note B->
===
Measure 5, Note Grouping 1:
<music21.note.Note E->
<music21.note.Note B->
<music21.note.Note A->
<music21.note.Note G>
===
Measure 6, Note Grouping 1:
<music21.note.Note G>
<music21.note.Note F>
<music21.note.Note C>
===
Measure 7, Note Grouping 1:
<music21.note.Note C>
<music21.note.Note F>
<music21.note.Note A->
<music21.note.Note D>
===
Measure 8, Note Grouping 1:
<music21.note.Note E->
music hyphen ⠐
===
---end segment---
Second segment
>>> print(str(allSegments[1]))
---begin segment---
<music21.braille.segment BrailleSegment>
Measure 8, Note Grouping 1:
<music21.note.Note G>
===
Measure 9, Note Grouping 1:
<music21.note.Note G>
<music21.note.Note F>
<music21.note.Note F>
<music21.note.Note F>
===
Measure 10, Note Grouping 1:
<music21.note.Note A->
<music21.note.Note G>
<music21.note.Note B->
===
Measure 11, Note Grouping 1:
<music21.note.Note B->
<music21.note.Note A>
<music21.note.Note A>
<music21.note.Note C>
===
Measure 12, Note Grouping 1:
<music21.note.Note B->
<music21.note.Note B->
===
Measure 13, Note Grouping 1:
<music21.note.Note E->
<music21.note.Note B->
<music21.note.Note A->
<music21.note.Note G>
===
Measure 14, Note Grouping 1:
<music21.note.Note G>
<music21.note.Note F>
<music21.note.Note C>
===
Measure 15, Note Grouping 1:
<music21.note.Note C>
<music21.note.Rest rest>
<music21.note.Note F>
<music21.note.Rest rest>
===
Measure 16, Note Grouping 1:
<music21.note.Note A->
<music21.note.Note D>
===
Measure 17, Note Grouping 1:
<music21.note.Note E->
<music21.bar.Barline type=final>
===
---end segment---
'''
# Slurring
# --------
prepareSlurredNotes(music21Part, **partKeywords)
# Raw Segments
# ------------
setHand = partKeywords['setHand'] if 'setHand' in partKeywords else None
allSegments = getRawSegments(music21Part, setHand=setHand)
# Grouping Attributes
# -------------------
for seg in allSegments:
seg.addGroupingAttributes(**partKeywords)
# Segment Attributes
# ------------------
seg.addSegmentAttributes(**partKeywords)
# Articulations
# -------------
seg.fixArticulations()
return allSegments
def prepareSlurredNotes(music21Part, **keywords):
'''
Takes in a :class:`~music21.stream.Part` and three keywords:
* slurLongPhraseWithBrackets
* showShortSlursAndTiesTogether
* showLongSlursAndTiesTogether
For any slurs present in the Part, the appropriate notes are labeled
with attributes indicating where to put the symbols that represent
slurring in braille. For purposes of slurring in braille, there is
a distinction between short and long phrases. In a short phrase, a
slur covers up to four notes. A short slur symbol should follow each
note except the last.
>>> import copy
>>> from music21.braille import segment
>>> short = converter.parse('tinynotation: 3/4 c4 d e')
>>> s1 = spanner.Slur(short.flat.notes[0], short.flat.notes[-1])
>>> short.append(s1)
>>> short.show('text')
{0.0} <music21.stream.Measure 1 offset=0.0>
{0.0} <music21.clef.TrebleClef>
{0.0} <music21.meter.TimeSignature 3/4>
{0.0} <music21.note.Note C>
{1.0} <music21.note.Note D>
{2.0} <music21.note.Note E>
{3.0} <music21.bar.Barline type=final>
{3.0} <music21.spanner.Slur <music21.note.Note C><music21.note.Note E>>
>>> shortA = copy.deepcopy(short)
>>> segment.prepareSlurredNotes(shortA)
>>> shortA.flat.notes[0].shortSlur
True
>>> shortA.flat.notes[1].shortSlur
True
In a long phrase, a slur covers more than four notes. There are two
options for slurring long phrases. The first is by using the bracket
slur. By default, slurLongPhraseWithBrackets is True. The opening
bracket sign is put before the first note, and the closing bracket
sign is put before the last note.
>>> long = converter.parse('tinynotation: 3/4 c8 d e f g a')
>>> s2 = spanner.Slur(long.flat.notes[0], long.flat.notes[-1])
>>> long.append(s2)
>>> long.show('text')
{0.0} <music21.stream.Measure 1 offset=0.0>
{0.0} <music21.clef.TrebleClef>
{0.0} <music21.meter.TimeSignature 3/4>
{0.0} <music21.note.Note C>
{0.5} <music21.note.Note D>
{1.0} <music21.note.Note E>
{1.5} <music21.note.Note F>
{2.0} <music21.note.Note G>
{2.5} <music21.note.Note A>
{3.0} <music21.bar.Barline type=final>
{3.0} <music21.spanner.Slur <music21.note.Note C><music21.note.Note A>>
>>> longA = copy.deepcopy(long)
>>> segment.prepareSlurredNotes(longA)
>>> longA.flat.notes[0].beginLongBracketSlur
True
>>> longA.flat.notes[-1].endLongBracketSlur
True
The other way is by using the double slur, setting slurLongPhraseWithBrackets
to False. The opening sign of the double slur is put after the first note
(i.e. before the second note) and the closing sign is put before the last
note (i.e. before the second to last note).
>>> longB = copy.deepcopy(long)
>>> segment.prepareSlurredNotes(longB, slurLongPhraseWithBrackets=False)
>>> longB.flat.notes[1].beginLongDoubleSlur
True
>>> longB.flat.notes[-2].endLongDoubleSlur
True
In the event that slurs and ties are shown together in print, the slur is
redundant. Examples are shown for slurring a short phrase; the process is
identical for slurring a long phrase.
Below, a tie has been added between the first two notes of the short phrase
defined above. If showShortSlursAndTiesTogether is set to its default value of
False, then the slur on either side of the phrase is reduced by the amount that
ties are present, as shown below.
>>> short.flat.notes[0].tie = tie.Tie('start')
>>> shortB = copy.deepcopy(short)
>>> segment.prepareSlurredNotes(shortB)
>>> shortB.flat.notes[0].shortSlur
Traceback (most recent call last):
AttributeError: 'Note' object has no attribute 'shortSlur'
>>> shortB.flat.notes[0].tie
<music21.tie.Tie start>
>>> shortB.flat.notes[1].shortSlur
True
If showShortSlursAndTiesTogether is set to True, then the slurs and ties are
shown together (i.e. the note has both a shortSlur and a tie).
>>> shortC = copy.deepcopy(short)
>>> segment.prepareSlurredNotes(shortC, showShortSlursAndTiesTogether=True)
>>> shortC.flat.notes[0].shortSlur
True
>>> shortC.flat.notes[0].tie
<music21.tie.Tie start>
TODO: This should not add attributes to Note objects but instead return a collection
of sets of notes that have each element applied to it.
'''
if not music21Part.spannerBundle:
return
slurLongPhraseWithBrackets = keywords.get('slurLongPhraseWithBrackets',
SEGMENT_SLURLONGPHRASEWITHBRACKETS)
showShortSlursAndTiesTogether = keywords.get('showShortSlursAndTiesTogether',
SEGMENT_SHOWSHORTSLURSANDTIESTOGETHER)
if 'showLongSlursAndTiesTogether' in keywords:
showLongSlursAndTiesTogether = keywords['showLongSlursAndTiesTogether']
elif slurLongPhraseWithBrackets:
showLongSlursAndTiesTogether = True
else:
showLongSlursAndTiesTogether = SEGMENT_SHOWLONGSLURSANDTIESTOGETHER
if slurLongPhraseWithBrackets is False:
pass
allNotes = music21Part.flat.notes.stream()
for slur in music21Part.spannerBundle.getByClass(spanner.Slur):
firstNote = slur[0]
lastNote = slur[1]
try:
beginIndex = allNotes.index(firstNote)
endIndex = allNotes.index(lastNote)
except exceptions21.StreamException:
continue
delta = abs(endIndex - beginIndex) + 1
if not showShortSlursAndTiesTogether and delta <= SEGMENT_MAXNOTESFORSHORTSLUR:
# normally slurs are not shown on a tied notes (unless
# showShortSlursAndTiesTogether is True, for facsimile transcriptions).
if (allNotes[beginIndex].tie is not None
and allNotes[beginIndex].tie.type == 'start'):
beginIndex += 1
if allNotes[endIndex].tie is not None and allNotes[endIndex].tie.type == 'stop':
endIndex -= 1
if not showLongSlursAndTiesTogether and delta > SEGMENT_MAXNOTESFORSHORTSLUR:
if (allNotes[beginIndex].tie is not None
and allNotes[beginIndex].tie.type == 'start'):
beginIndex += 1
if allNotes[endIndex].tie is not None and allNotes[endIndex].tie.type == 'stop':
endIndex -= 1
if delta <= SEGMENT_MAXNOTESFORSHORTSLUR:
for noteIndex in range(beginIndex, endIndex):
allNotes[noteIndex].shortSlur = True
else:
if slurLongPhraseWithBrackets:
allNotes[beginIndex].beginLongBracketSlur = True
allNotes[endIndex].endLongBracketSlur = True
else:
allNotes[beginIndex + 1].beginLongDoubleSlur = True
allNotes[endIndex - 1].endLongDoubleSlur = True
def getRawSegments(music21Part, setHand=None):
'''
Takes in a :class:`~music21.stream.Part` divided it up into segments (i.e. instances of
:class:`~music21.braille.segment.BrailleSegment`). This method assumes
that the Part is already divided up into measures
(see :class:`~music21.stream.Measure`). An acceptable input is shown below.
This will automatically find appropriate segment breaks at braille.objects.BrailleSegmentBreak
or braille.objects.BrailleOptionalSegmentBreak or after 48 elements if a double bar or
repeat sign is encountered.
Two methods are called on each measure during the creation of segments:
* :meth:`~music21.braille.segment.prepareBeamedNotes`
* :meth:`~music21.braille.segment.extractBrailleElements`
>>> tn = converter.parse("tinynotation: 3/4 c4 c c e e e g g g c'2.")
>>> tn = tn.makeNotation(cautionaryNotImmediateRepeat=False)
>>> tn.show('text')
{0.0} <music21.stream.Measure 1 offset=0.0>
{0.0} <music21.clef.TrebleClef>
{0.0} <music21.meter.TimeSignature 3/4>
{0.0} <music21.note.Note C>
{1.0} <music21.note.Note C>
{2.0} <music21.note.Note C>
{3.0} <music21.stream.Measure 2 offset=3.0>
{0.0} <music21.note.Note E>
{1.0} <music21.note.Note E>
{2.0} <music21.note.Note E>
{6.0} <music21.stream.Measure 3 offset=6.0>
{0.0} <music21.note.Note G>
{1.0} <music21.note.Note G>
{2.0} <music21.note.Note G>
{9.0} <music21.stream.Measure 4 offset=9.0>
{0.0} <music21.note.Note C>
{3.0} <music21.bar.Barline type=final>
By default, there is no break anywhere within the Part,
and a segmentList of size 1 is returned.
>>> import copy
>>> from music21.braille import segment
>>> tnA = copy.deepcopy(tn)
>>> rawSegments = segment.getRawSegments(tnA)
>>> len(rawSegments)
1
>>> rawSegments[0]
<music21.braille.segment.BrailleSegment 1 line, 0 headings, 40 cols>
>>> print(rawSegments[0])
---begin segment---
<music21.braille.segment BrailleSegment>
Measure 1, Signature Grouping 1:
<music21.meter.TimeSignature 3/4>
===
Measure 1, Note Grouping 1:
<music21.clef.TrebleClef>
<music21.note.Note C>
<music21.note.Note C>
<music21.note.Note C>
===
Measure 2, Note Grouping 1:
<music21.note.Note E>
<music21.note.Note E>
<music21.note.Note E>
===
Measure 3, Note Grouping 1:
<music21.note.Note G>
<music21.note.Note G>
<music21.note.Note G>
===
Measure 4, Note Grouping 1:
<music21.note.Note C>
<music21.bar.Barline type=final>
===
---end segment---
Now, a segment break occurs at measure 2, offset 1.0 within that measure.
The two segments are shown below.
>>> tnB = copy.deepcopy(tn)
>>> tnB.measure(2).insert(1.0, braille.objects.BrailleSegmentDivision())
>>> allSegments = segment.getRawSegments(tnB)
>>> len(allSegments)
2
>>> allSegments[0]
<music21.braille.segment.BrailleSegment 1 line, 0 headings, 40 cols>
>>> print(allSegments[0])
---begin segment---
<music21.braille.segment BrailleSegment>
Measure 1, Signature Grouping 1:
<music21.meter.TimeSignature 3/4>
===
Measure 1, Note Grouping 1:
<music21.clef.TrebleClef>
<music21.note.Note C>
<music21.note.Note C>
<music21.note.Note C>
===
Measure 2, Note Grouping 1:
<music21.note.Note E>
===
---end segment---
>>> allSegments[1]
<music21.braille.segment.BrailleSegment 1 line, 0 headings, 40 cols>
>>> print(allSegments[1])
---begin segment---
<music21.braille.segment BrailleSegment>
Measure 2, Note Grouping 1:
<music21.note.Note E>
<music21.note.Note E>
===
Measure 3, Note Grouping 1:
<music21.note.Note G>
<music21.note.Note G>
<music21.note.Note G>
===
Measure 4, Note Grouping 1:
<music21.note.Note C>
<music21.bar.Barline type=final>
===
---end segment---
'''
allSegments = []
currentSegment = BrailleSegment()
for music21Measure in music21Part.getElementsByClass([stream.Measure, stream.Voice]):
prepareBeamedNotes(music21Measure)
brailleElements = extractBrailleElements(music21Measure)
elementsInCurrentSegment = 0
offsetFactor = 0
previousAffinityCode = Affinity._LOWEST # -1
for brailleElement in brailleElements:
# TODO: use objects.BrailleSegmentDivision() here...
startANewSegment = False
if 'BrailleOptionalSegmentDivision' in brailleElement.classes:
# do not factor these two ifs into one, so that we fall through
# 'BrailleSegmentDivision' of which this is a subclass...
if elementsInCurrentSegment > MAX_ELEMENTS_IN_SEGMENT:
startANewSegment = True
elif 'BrailleSegmentDivision' in brailleElement.classes:
startANewSegment = True
elif 'Barline' in brailleElement.classes:
if (elementsInCurrentSegment > MAX_ELEMENTS_IN_SEGMENT
and brailleElement.type in ('double', 'final')):
startANewSegment = True
if startANewSegment:
# end of segment, get new one...
if brailleElement.offset != 0.0:
currentSegment.endHyphen = True
allSegments.append(currentSegment)
currentSegment = BrailleSegment()
elementsInCurrentSegment = 0
if brailleElement.offset != 0.0:
currentSegment.beginsMidMeasure = True
startANewSegment = False
if 'BrailleSegmentDivision' in brailleElement.classes:
continue
if brailleElement.affinityCode < previousAffinityCode:
offsetFactor += 1
affinityCode = brailleElement.affinityCode
if affinityCode == Affinity.SPLIT1_NOTEGROUP:
affinityCode = Affinity.INACCORD
elif affinityCode == Affinity.SPLIT2_NOTEGROUP:
affinityCode = Affinity.NOTEGROUP
segmentKey = SegmentKey(music21Measure.number,
offsetFactor,
affinityCode,
setHand
)
if segmentKey not in currentSegment:
currentSegment[segmentKey] = BrailleElementGrouping()
brailleElementGrouping = currentSegment[segmentKey]
brailleElementGrouping.append(brailleElement)
elementsInCurrentSegment += 1
previousAffinityCode = brailleElement.affinityCode
allSegments.append(currentSegment)
return allSegments
def extractBrailleElements(music21Measure):
'''
Takes in a :class:`~music21.stream.Measure` and returns a
:class:`~music21.braille.segment.BrailleElementGrouping` of correctly ordered
:class:`~music21.base.Music21Object` instances which can be directly transcribed to
braille.
>>> from music21.braille import segment
>>> tn = converter.parse('tinynotation: 2/4 c16 c c c d d d d', makeNotation=False)
>>> tn = tn.makeNotation(cautionaryNotImmediateRepeat=False)
>>> measure = tn[0]
>>> measure.append(spanner.Slur(measure.notes[0],measure.notes[-1]))
>>> measure.show('text')
{0.0} <music21.clef.TrebleClef>
{0.0} <music21.meter.TimeSignature 2/4>
{0.0} <music21.note.Note C>
{0.25} <music21.note.Note C>
{0.5} <music21.note.Note C>
{0.75} <music21.note.Note C>
{1.0} <music21.note.Note D>
{1.25} <music21.note.Note D>
{1.5} <music21.note.Note D>
{1.75} <music21.note.Note D>
{2.0} <music21.spanner.Slur <music21.note.Note C><music21.note.Note D>>
{2.0} <music21.bar.Barline type=final>
Spanners are dealt with in :meth:`~music21.braille.segment.prepareSlurredNotes`,
so they are not returned by this method, as seen below.
>>> print(segment.extractBrailleElements(measure))
<music21.meter.TimeSignature 2/4>
<music21.clef.TrebleClef>
<music21.note.Note C>
<music21.note.Note C>
<music21.note.Note C>
<music21.note.Note C>
<music21.note.Note D>
<music21.note.Note D>
<music21.note.Note D>
<music21.note.Note D>
<music21.bar.Barline type=final>
'''
allElements = BrailleElementGrouping()
for music21Object in music21Measure:
try:
if isinstance(music21Object, bar.Barline):
if music21Object.type == 'regular':
continue
setAffinityCode(music21Object)
music21Object.editorial.brailleEnglish = [str(music21Object)]
allElements.append(music21Object)
except BrailleSegmentException as notSupportedException: # pragma: no cover
isExempt = [isinstance(music21Object, music21Class)
for music21Class in excludeFromBrailleElements]
if isExempt.count(True) == 0:
environRules.warn(f'{notSupportedException}')
allElements.sort(key=lambda x: (x.offset, x.classSortOrder))
if len(allElements) >= 2 and isinstance(allElements[-1], dynamics.Dynamic):
if isinstance(allElements[-2], bar.Barline):
allElements[-1].classSortOrder = -1
allElements.sort(key=lambda x: (x.offset, x.classSortOrder))
return allElements
def prepareBeamedNotes(music21Measure):
'''
Takes in a :class:`~music21.stream.Measure` and labels beamed notes
of smaller value than an 8th with beamStart and beamContinue keywords
in accordance with beaming rules in braille music.
A more in-depth explanation of beaming in braille can be found in
Chapter 15 of Introduction to Braille Music Transcription, Second
Edition, by Mary Turner De Garmo.
>>> from music21.braille import segment
>>> tn = converter.parse('tinynotation: 2/4 c16 c c c d d d d')
>>> tn = tn.makeNotation(cautionaryNotImmediateRepeat=False)
>>> tn.show('text')
{0.0} <music21.stream.Measure 1 offset=0.0>
{0.0} <music21.clef.TrebleClef>
{0.0} <music21.meter.TimeSignature 2/4>
{0.0} <music21.note.Note C>
{0.25} <music21.note.Note C>
{0.5} <music21.note.Note C>
{0.75} <music21.note.Note C>
{1.0} <music21.note.Note D>
{1.25} <music21.note.Note D>
{1.5} <music21.note.Note D>
{1.75} <music21.note.Note D>
{2.0} <music21.bar.Barline type=final>
>>> measure = tn[0]
>>> segment.prepareBeamedNotes(measure)
>>> measure.notes[0].beamStart
True
>>> measure.notes[1].beamContinue
True
>>> measure.notes[2].beamContinue
True
>>> measure.notes[3].beamContinue
True
'''
allNotes = music21Measure.notes.stream()
for sampleNote in allNotes:
sampleNote.beamStart = False
sampleNote.beamContinue = False
allNotesAndRests = music21Measure.notesAndRests.stream()
def withBeamFilter(el, unused):
return (el.beams is not None) and len(el.beams) > 0
def beamStartFilter(el, unused):
return el.beams.getByNumber(1).type == 'start'
def beamStopFilter(el, unused):
return el.beams.getByNumber(1).type == 'stop'
allStartIter = allNotes.iter.addFilter(withBeamFilter).addFilter(beamStartFilter)
allStopIter = allNotes.iter.addFilter(withBeamFilter).addFilter(beamStopFilter)
if len(allStartIter) != len(allStopIter):
environRules.warn('Incorrect beaming: number of start notes != to number of stop notes.')
return
for beamIndex, startNote in enumerate(allStartIter):
# Eighth notes cannot be beamed in braille (redundant, because beamed
# notes look like eighth notes, but nevertheless useful).
if startNote.quarterLength == 0.5:
continue
stopNote = allStopIter[beamIndex]
startIndex = allNotesAndRests.index(startNote)
stopIndex = allNotesAndRests.index(stopNote)
delta = stopIndex - startIndex + 1
if delta < 3: # 2. The group must be composed of at least three notes.
continue
# 1. All notes in the group must have precisely the same value.
# 3. A rest of the same value may take the place of the first note in a group,
# but if the rest is located anywhere else, grouping may not be used.
allNotesOfSameValue = True
for noteIndex in range(startIndex + 1, stopIndex + 1):
if (allNotesAndRests[noteIndex].quarterLength != startNote.quarterLength
or isinstance(allNotesAndRests[noteIndex], note.Rest)):
allNotesOfSameValue = False
break
try:
afterStopNote = allNotesAndRests[stopIndex + 1]
if (isinstance(afterStopNote, note.Rest)
and (int(afterStopNote.beat) == int(stopNote.beat))):
allNotesOfSameValue = False
except exceptions21.StreamException: # stopNote is last note of measure.
pass
if not allNotesOfSameValue:
continue
try:
# 4. If the notes in the group are followed immediately by a
# true eighth note or by an eighth rest,
# grouping may not be used, unless the eighth is located in a new measure.
if allNotesAndRests[stopIndex + 1].quarterLength == 0.5:
continue
except exceptions21.StreamException: # stopNote is last note of measure.
pass
startNote.beamStart = True
try:
beforeStartNote = allNotesAndRests[startIndex - 1]
if (isinstance(beforeStartNote, note.Rest)
and int(beforeStartNote.beat) == int(startNote.beat)
and beforeStartNote.quarterLength == startNote.quarterLength):
startNote.beamContinue = True
except IndexError: # startNote is first note of measure.
pass
for noteIndex in range(startIndex + 1, stopIndex + 1):
allNotesAndRests[noteIndex].beamContinue = True
def setAffinityCode(music21Object):
'''
Takes in a :class:`~music21.base.Music21Object`, and does two things:
* Modifies the :attr:`~music21.base.Music21Object.classSortOrder` attribute of the
object to fit the slightly modified ordering of objects in braille music.
* Adds an affinity code to the object. This code indicates which surrounding
objects the object should be grouped with.
A BrailleSegmentException is raised if an affinity code cannot be assigned to
the object.
As seen in the following example, the affinity code of a :class:`~music21.note.Note`
and a :class:`~music21.clef.TrebleClef` are the same, because they should be grouped
together. However, the classSortOrder indicates that the TrebleClef should come first
in the braille.
>>> n1 = note.Note('D5')
>>> braille.segment.setAffinityCode(n1)
>>> n1.affinityCode
<Affinity.NOTEGROUP: 9>
>>> n1.classSortOrder
10
>>> c1 = clef.TrebleClef()
>>> braille.segment.setAffinityCode(c1)
>>> c1.affinityCode
<Affinity.NOTEGROUP: 9>
>>> c1.classSortOrder
7
'''
for (music21Class, code, sortOrder) in affinityCodes:
if isinstance(music21Object, music21Class):
music21Object.affinityCode = code
music21Object.classSortOrder = sortOrder
return
if isinstance(music21Object, expressions.TextExpression):
music21Object.affinityCode = Affinity.NOTEGROUP
if len(music21Object.content.split()) > 1:
music21Object.affinityCode = Affinity.LONG_TEXTEXPR
music21Object.classSortOrder = 8
return
if isinstance(music21Object, BrailleTranscriptionHelper):
return
raise BrailleSegmentException(f'{music21Object} cannot be transcribed to braille.')
def areGroupingsIdentical(noteGroupingA, noteGroupingB):
'''
Takes in two note groupings, noteGroupingA and noteGroupingB. Returns True
if both groupings have identical contents. False otherwise.
Helper for numRepeats...
Needs two identical length groupings.
>>> a = [note.Note('C4'), note.Note('D4')]
>>> b = [note.Note('C4'), note.Note('D4')]
>>> braille.segment.areGroupingsIdentical(a, b)
True
>>> d = b.pop()
>>> braille.segment.areGroupingsIdentical(a, b)
False
>>> c = [note.Rest(), note.Note('D4')]
>>> braille.segment.areGroupingsIdentical(a, c)
False
'''
if len(noteGroupingA) == len(noteGroupingB):
for (elementA, elementB) in zip(noteGroupingA, noteGroupingB):
if elementA != elementB:
return False
return True
return False
# ------------------------------------------------------------------------------
# Helper Methods
def splitNoteGrouping(noteGrouping, beatDivisionOffset=0):
'''
Almost identical to :meth:`~music21.braille.segment.splitMeasure`, but
functions on a :class:`~music21.braille.segment.BrailleElementGrouping`
instead.
>>> from music21.braille import segment
>>> bg = segment.BrailleElementGrouping()
>>> bg.timeSignature = meter.TimeSignature('2/2')
>>> s = converter.parse('tinyNotation: 2/2 c4 d r e')
>>> for n in s.recurse().notesAndRests:
... bg.append(n)
>>> left, right = segment.splitNoteGrouping(bg)
>>> left
<music21.braille.segment.BrailleElementGrouping
[<music21.note.Note C>, <music21.note.Note D>]>
>>> print(left)
<music21.note.Note C>
<music21.note.Note D>
>>> right
<music21.braille.segment.BrailleElementGrouping
[<music21.note.Rest rest>, <music21.note.Note E>]>
Now split one beat division earlier than it should be. For 2/2 that means
one half of a beat, or one quarter note earlier:
>>> left, right = segment.splitNoteGrouping(bg, beatDivisionOffset=1)
>>> left
<music21.braille.segment.BrailleElementGrouping
[<music21.note.Note C>]>
>>> right
<music21.braille.segment.BrailleElementGrouping
[<music21.note.Note D>, <music21.note.Rest rest>, <music21.note.Note E>]>
'''
music21Measure = stream.Measure()
for brailleElement in noteGrouping:
music21Measure.insert(brailleElement.offset, brailleElement)
(leftMeasure, rightMeasure) = splitMeasure(music21Measure,
beatDivisionOffset,
noteGrouping.timeSignature)
leftBrailleElements = copy.copy(noteGrouping)
leftBrailleElements.internalList = []
for brailleElement in leftMeasure:
leftBrailleElements.append(brailleElement)
rightBrailleElements = copy.copy(noteGrouping)
rightBrailleElements.internalList = []
for brailleElement in rightMeasure:
rightBrailleElements.append(brailleElement)
return leftBrailleElements, rightBrailleElements
def splitMeasure(music21Measure, beatDivisionOffset=0, useTimeSignature=None):
'''
Takes a :class:`~music21.stream.Measure`, divides it in two parts, and returns a
two-tuple of (leftMeasure, rightMeasure). The parameters are as
follows:
* beatDivisionOffset => Adjusts the end offset of the first partition by a certain amount
of beats to the left.
* useTimeSignature => In the event that the Measure comes from the middle of a Part
and thus does not define an explicit :class:`~music21.meter.TimeSignature`. If not
provided, a TimeSignature is retrieved by
using :meth:`~music21.stream.Measure.bestTimeSignature`.
>>> m = stream.Measure()
>>> m.append(note.Note('C4'))
>>> m.append(note.Note('D4'))
>>> left, right = braille.segment.splitMeasure(m)
>>> left.show('text')
{0.0} <music21.note.Note C>
>>> right.show('text')
{1.0} <music21.note.Note D>
'''
if useTimeSignature is not None:
ts = useTimeSignature
else:
ts = music21Measure.bestTimeSignature()
offset = 0.0
if beatDivisionOffset != 0:
if abs(beatDivisionOffset) > len(ts.beatDivisionDurations):
raise BrailleSegmentException(
f'beatDivisionOffset {beatDivisionOffset} is outside '
+ f'of ts.beatDivisionDurations {ts.beatDivisionDurations}'
)
duration_index = len(ts.beatDivisionDurations) - abs(beatDivisionOffset)
try:
offset += opFrac(ts.beatDivisionDurations[duration_index].quarterLength)
offset = opFrac(offset)
except IndexError:
environRules.warn('Problem in converting a time signature in measure '
+ f'{music21Measure.number}, offset may be wrong')
bs = copy.deepcopy(ts.beatSequence)
numberOfPartitions = 2
try:
bs.partitionByCount(numberOfPartitions, loadDefault=False)
(startOffsetZero, endOffsetZero) = bs.getLevelSpan()[0]
except meter.MeterException:
numberOfPartitions += 1
bs.partitionByCount(numberOfPartitions, loadDefault=False)
startOffsetZero = bs.getLevelSpan()[0][0]
endOffsetZero = bs.getLevelSpan()[-2][-1]
endOffsetZero -= offset
leftMeasure = stream.Measure()
rightMeasure = stream.Measure()
for x in music21Measure:
if (x.offset >= startOffsetZero
and (x.offset < endOffsetZero
or (x.offset == endOffsetZero
and isinstance(x, bar.Barline)))):
leftMeasure.insert(x.offset, x)
else:
rightMeasure.insert(x.offset, x)
for n in rightMeasure.notes:
if n.tie is not None:
leftMeasure.append(n)
rightMeasure.remove(n)
endOffsetZero += n.duration.quarterLength
continue
break
rest0Length = music21Measure.duration.quarterLength - endOffsetZero
r0 = note.Rest(quarterLength=rest0Length)
leftMeasure.insert(endOffsetZero, r0)
r1 = note.Rest(quarterLength=endOffsetZero)
rightMeasure.insert(0.0, r1)
ts0_delete = False
if leftMeasure.timeSignature is None:
ts0_delete = True
leftMeasure.timeSignature = ts
rightMeasure.timeSignature = ts
leftMeasure.mergeAttributes(music21Measure)
rightMeasure.mergeAttributes(music21Measure)
leftMeasure.makeBeams(inPlace=True)
rightMeasure.makeBeams(inPlace=True)
prepareBeamedNotes(leftMeasure)
prepareBeamedNotes(rightMeasure)
leftMeasure.remove(r0)
rightMeasure.remove(r1)
if ts0_delete:
leftMeasure.remove(ts)
rightMeasure.remove(ts)
return (leftMeasure, rightMeasure)
# ------------------------------------------------------------------------------
class Test(unittest.TestCase):
def testGetRawSegments(self):
from music21 import converter
tn = converter.parse("tinynotation: 3/4 c4 c c e e e g g g c'2.")
tn = tn.makeNotation(cautionaryNotImmediateRepeat=False)
rawSegList = getRawSegments(tn)
unused = str(rawSegList[0])
if __name__ == '__main__':
import music21
music21.mainTest(Test) # , runTest='testGetRawSegments')
| 38.667094 | 100 | 0.618141 | 8,822 | 90,481 | 6.309227 | 0.128542 | 0.023284 | 0.037729 | 0.012073 | 0.245814 | 0.194484 | 0.154222 | 0.132986 | 0.118361 | 0.102731 | 0 | 0.022961 | 0.285198 | 90,481 | 2,339 | 101 | 38.683625 | 0.837343 | 0.385893 | 0 | 0.222543 | 0 | 0 | 0.082596 | 0.013871 | 0 | 0 | 0 | 0.000855 | 0 | 1 | 0.055877 | false | 0.006744 | 0.026975 | 0.010597 | 0.142582 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
8ce427090a2e0b0af55c78dd3dcd745139d5044a | 4,484 | py | Python | ext/paginator.py | Starbors/RemixBot | 74517e90d88cf102e7a0787a098d1de6ea4a6e08 | [
"MIT"
] | 66 | 2018-01-04T17:09:21.000Z | 2022-03-28T08:37:10.000Z | ext/paginator.py | Tominous/RemixBot | 74517e90d88cf102e7a0787a098d1de6ea4a6e08 | [
"MIT"
] | 10 | 2018-01-04T17:55:01.000Z | 2021-07-14T15:01:47.000Z | ext/paginator.py | Tominous/RemixBot | 74517e90d88cf102e7a0787a098d1de6ea4a6e08 | [
"MIT"
] | 63 | 2018-01-04T16:11:52.000Z | 2022-03-13T17:21:32.000Z | import discord
from collections import OrderedDict
import asyncio
class PaginatorSession:
'''Class that interactively paginates
a set of embed using reactions'''
def __init__(self, ctx, timeout=60, pages=[], color=discord.Color.green(), footer=''):
self.footer = footer # footer message
self.ctx = ctx # ctx
self.timeout = timeout # when the reactions get cleared, int[seconds]
self.pages = pages # the list of embeds list[discord.Embed, discord.Embed]
self.running = False # currently running, bool
self.message = None # current message being paginated, discord.Message
self.current = 0 # current page index, int
self.color = color # embed color
# can't be awaited here, must be done in PaginatorSession.run()
self.reactions = OrderedDict({
'⏮': self.first_page,
'◀': self.previous_page,
'⏹': self.close,
'▶': self.next_page,
'⏭': self.last_page
})
# this wasn't used but i'll just leave it here i guess
def add_page(self, page):
if isinstance(page, discord.Embed):
self.pages.append(page)
else:
raise TypeError('Page must be a discord.Embed.')
def valid_page(self, index):
return index >= 0 or index < len(self.pages) # removed +1 so it's < instead of <=
async def show_page(self, index: int):
if not self.valid_page(index):
return # checks for a valid page
self.current = index
page = self.pages[index] # gets the page
page.set_footer(text=self.footer) # sets footer
if self.running:
# if the first embed was sent, it edits it
await self.message.edit(embed=page)
else:
self.running = True
# sends the message
self.message = await self.ctx.send(embed=page)
# adds reactions
for reaction in self.reactions.keys():
if len(self.pages) == 2 and reaction in '⏮⏭':
continue # ignores 2 page embed first and last emojis
await self.message.add_reaction(reaction)
def react_check(self, reaction, user):
'''Check to make sure it only responds to reactions from the sender and on the same message'''
if reaction.message.id != self.message.id:
return False # not the same message
if user.id != self.ctx.author.id:
return False # not the same user
if reaction.emoji in self.reactions.keys():
return True # reaction was one of the pagination emojis
async def run(self):
'''Actually runs the paginator session'''
if not self.running:
# defaults to first page
await self.show_page(0)
while self.running:
try:
# waits for reaction using react_check
reaction, user = await self.ctx.bot.wait_for('reaction_add', check=self.react_check, timeout=self.timeout)
except asyncio.TimeoutError:
self.running = False
try:
await self.message.clear_reactions() # tries to remove reactions
except:
pass # no perms
finally:
break # stops no matter what
else:
# same as above
try:
await self.message.remove_reaction(reaction, user)
except:
pass
action = self.reactions[reaction.emoji] # gets the function from the reaction map OrderedDict
await action() # awaits here with () because __init__ can't be async
# all functions with await must be async
async def first_page(self):
'''Go to the first page'''
return await self.show_page(0)
async def last_page(self):
'''Go to the last page'''
return await self.show_page(len(self.pages) - 1)
async def next_page(self):
'''Go to the next page'''
return await self.show_page(self.current + 1)
async def previous_page(self):
'''Go to the previous page.'''
return await self.show_page(self.current - 1)
async def close(self):
'''Stop the paginator session.'''
self.running = False
try:
await self.message.clear_reactions()
except:
pass
| 37.366667 | 122 | 0.578947 | 556 | 4,484 | 4.618705 | 0.302158 | 0.042056 | 0.031153 | 0.0331 | 0.14447 | 0.113707 | 0.074766 | 0.074766 | 0.074766 | 0.036604 | 0 | 0.004024 | 0.334969 | 4,484 | 119 | 123 | 37.680672 | 0.854795 | 0.234835 | 0 | 0.238095 | 0 | 0 | 0.014967 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.047619 | false | 0.035714 | 0.035714 | 0.011905 | 0.202381 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
8ce5aa779bf604c1872eb78b35ef49af06cb65aa | 1,927 | py | Python | scripts/practice/Amazon/reorderDataInLogFiles.py | bhimeshchauhan/competitive_programming | e0777bb0c425ffa03d8173a83e50ca55c4a3fcf5 | [
"MIT"
] | null | null | null | scripts/practice/Amazon/reorderDataInLogFiles.py | bhimeshchauhan/competitive_programming | e0777bb0c425ffa03d8173a83e50ca55c4a3fcf5 | [
"MIT"
] | 8 | 2020-09-05T16:04:31.000Z | 2022-02-27T09:57:51.000Z | scripts/practice/Amazon/reorderDataInLogFiles.py | bhimeshchauhan/competitive_programming | e0777bb0c425ffa03d8173a83e50ca55c4a3fcf5 | [
"MIT"
] | null | null | null | """
Reorder Data in Log Files
You are given an array of logs. Each log is a space-delimited
string of words, where the first word is the identifier.
There are two types of logs:
Letter-logs: All words (except the identifier) consist of lowercase English letters.
Digit-logs: All words (except the identifier) consist of digits.
Reorder these logs so that:
The letter-logs come before all digit-logs.
The letter-logs are sorted lexicographically by their contents. If their contents are the same, then sort them lexicographically by their identifiers.
The digit-logs maintain their relative ordering.
Return the final order of the logs.
Example 1:
Input: logs = ["dig1 8 1 5 1","let1 art can","dig2 3 6","let2 own kit dig","let3 art zero"]
Output: ["let1 art can","let3 art zero","let2 own kit dig","dig1 8 1 5 1","dig2 3 6"]
Explanation:
The letter-log contents are all different, so their ordering is "art can", "art zero", "own kit dig".
The digit-logs have a relative order of "dig1 8 1 5 1", "dig2 3 6".
Example 2:
Input: logs = ["a1 9 2 3 1","g1 act car","zo4 4 7","ab1 off key dog","a8 act zoo"]
Output: ["g1 act car","a8 act zoo","ab1 off key dog","a1 9 2 3 1","zo4 4 7"]
Constraints:
1 <= logs.length <= 100
3 <= logs[i].length <= 100
All the tokens of logs[i] are separated by a single space.
logs[i] is guaranteed to have an identifier and at least one word after the identifier.
"""
class Solution:
def reorderLogFiles(self, logs: List[str]) -> List[str]:
digit_logs = []
letter_logs = []
for log in logs:
l = log.split(" ")
identifier, words, wd = l[0], " ".join(l[1:]), "".join(l[1:])
if wd.isdigit():
digit_logs.append(log)
else:
letter_logs.append([log, words, identifier])
letter_logs.sort(key=lambda t: (t[1], t[2]))
return [log[0] for log in letter_logs] + digit_logs
| 33.807018 | 150 | 0.662688 | 326 | 1,927 | 3.895706 | 0.389571 | 0.055118 | 0.014173 | 0.016535 | 0.100787 | 0.085039 | 0.085039 | 0.085039 | 0 | 0 | 0 | 0.045364 | 0.222107 | 1,927 | 56 | 151 | 34.410714 | 0.801868 | 0.724961 | 0 | 0 | 0 | 0 | 0.003846 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.076923 | false | 0 | 0 | 0 | 0.230769 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
8ce61887b0a5e6e7c6962291d00bf76f45e1015c | 692 | py | Python | problems/boj/14226/bbumjun.py | Lee-Park-Bae-Project/Algorithm | f7cc13a6319a73c3fbe771a92a70599c416844e1 | [
"MIT"
] | null | null | null | problems/boj/14226/bbumjun.py | Lee-Park-Bae-Project/Algorithm | f7cc13a6319a73c3fbe771a92a70599c416844e1 | [
"MIT"
] | 19 | 2020-06-15T12:53:58.000Z | 2020-08-10T04:26:45.000Z | problems/boj/14226/bbumjun.py | Lee-Park-Bae-Project/Algorithm | f7cc13a6319a73c3fbe771a92a70599c416844e1 | [
"MIT"
] | null | null | null | from collections import deque
s = int(input())
q = deque([(1, 0, 0)])
ans = 0
isVisit = set([1, 0])
while len(q) != 0:
display, clipboard, cost = q.popleft()
if display == s:
ans = cost
break
if (display, display) not in isVisit:
isVisit.add((display, display))
q.append((display, display, cost+1))
if clipboard != 0 and (display+clipboard, clipboard) not in isVisit:
isVisit.add((display+clipboard, clipboard))
q.append((display+clipboard, clipboard, cost+1))
if display > 0 and (display-1, clipboard) not in isVisit:
isVisit.add((display-1, clipboard))
q.append((display-1, clipboard, cost+1))
print(ans)
| 31.454545 | 72 | 0.618497 | 95 | 692 | 4.505263 | 0.284211 | 0.149533 | 0.084112 | 0.133178 | 0.245327 | 0.245327 | 0.17757 | 0 | 0 | 0 | 0 | 0.028355 | 0.235549 | 692 | 21 | 73 | 32.952381 | 0.780718 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.05 | 0 | 0.05 | 0.05 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
8cece3741df2c90d8ed24fb26ef7d0acc3f39d3d | 2,039 | py | Python | concourse_common/jsonutil.py | cosee-concourse/concourse-common | a0fa1c7a2889dac3caecead27bbeb39091fbb2c8 | [
"MIT"
] | null | null | null | concourse_common/jsonutil.py | cosee-concourse/concourse-common | a0fa1c7a2889dac3caecead27bbeb39091fbb2c8 | [
"MIT"
] | null | null | null | concourse_common/jsonutil.py | cosee-concourse/concourse-common | a0fa1c7a2889dac3caecead27bbeb39091fbb2c8 | [
"MIT"
] | null | null | null | import json
import sys
import tempfile
from concourse_common import common
from concourse_common.request import Request
from jsonschema import Draft4Validator
def load_payload():
payload = json.load(sys.stdin)
_, folder_name = tempfile.mkstemp()
common.log_info("Logging payload to {}".format(folder_name))
with open(folder_name, 'w') as fp:
fp.write(json.dumps(payload))
return payload
def load_and_validate_payload(schemas, request):
payload = load_payload()
if request == Request.CHECK:
schema = schemas.check_schema
elif request == Request.IN:
schema = schemas.in_schema
else:
schema = schemas.out_schema
valid = validate_json(payload, schema)
if valid:
return valid, payload
else:
return valid, None
def validate_json(input, schema):
v = Draft4Validator(schema)
valid = True
for error in sorted(v.iter_errors(input), key=str):
valid = False
common.log_error("JSON Validation ERROR: " + error.message)
return valid
def get_version(payload, version_key_name):
try:
version = payload["version"][version_key_name]
except KeyError:
version = None
return version
def versions_as_list(versions, version_key_name):
if versions is None:
return json.dumps([])
else:
version_dictionary = []
for version in versions:
version_dictionary.append({version_key_name: version})
return json.dumps(version_dictionary)
def get_version_output(version, version_key_name):
if version is None:
return [{}]
else:
return json.dumps({"version": {version_key_name: version}})
def contains_params_key(payload, key_name):
return key_name in payload['params']
def get_source_value(payload, key):
try:
return payload['source'][key]
except KeyError:
return None
def get_params_value(payload, key):
try:
return payload['params'][key]
except KeyError:
return None
| 23.436782 | 67 | 0.67386 | 252 | 2,039 | 5.265873 | 0.277778 | 0.0422 | 0.063301 | 0.047476 | 0.087415 | 0.046722 | 0 | 0 | 0 | 0 | 0 | 0.001285 | 0.236881 | 2,039 | 86 | 68 | 23.709302 | 0.851542 | 0 | 0 | 0.1875 | 0 | 0 | 0.037764 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.140625 | false | 0 | 0.09375 | 0.015625 | 0.453125 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
8cee34daa32b36187f44adb5bfba1bc5b0aea5ea | 3,493 | py | Python | particlezoo/parsers/transform.py | npapapietro/particlezoo | f4a42fedd9f3a4ed10ae5926d217e3966a733a32 | [
"MIT"
] | null | null | null | particlezoo/parsers/transform.py | npapapietro/particlezoo | f4a42fedd9f3a4ed10ae5926d217e3966a733a32 | [
"MIT"
] | null | null | null | particlezoo/parsers/transform.py | npapapietro/particlezoo | f4a42fedd9f3a4ed10ae5926d217e3966a733a32 | [
"MIT"
] | null | null | null | from __future__ import annotations
from particlezoo.builders.lagrangian import Lagrangian
from sympy import sympify, Matrix
from liesym import SU, SO, U1, Sp, Z, E, Group, LieGroup
from typing import Dict, Union
from ..builders import (SymmetryGroup, Symmetry,
Representation, GenericField,
Field, Configuration)
from ..exceptions import ConfigError, ModelError
def group_lookup(name: list[str]) -> Group:
"""Parses string and returns instance of
group with proper dimension.
Args:
name (list[str]): List of [group type, dim]
Raises:
ConfigError: If unsupported group (eg [U, 5]) is passed.
Returns:
Group: Instance of group
"""
[grp, dim] = name
dim_ = int(dim)
grp = grp.lower()
if grp == "su":
return SU(dim_)
if grp == "so":
return SO(dim_)
if grp == "sp":
return Sp(dim_)
if grp == "e":
return E(dim_)
if grp == "z":
return Z(dim_)
if grp == "u" and dim_ == 1:
return U1()
raise ConfigError(
"Unsupported group. Please log an issue to request support.")
def transform_symmetry(model: SymmetryGroup) -> Symmetry:
"""Parses and transforms from raw input model to class model.
If `gauged` flag isn't passed in the config, will auto set based
on group type of `LieGroup`, `U1` or `E`.
"""
description = model.description or ""
group = group_lookup(model.group)
gauged = model.gauged
return Symmetry(
name=model.name,
group=group,
description=description,
coupling=model.coupling,
tag=model.tag,
gauged=isinstance(group, (LieGroup, U1, E)
) if gauged is None else gauged
)
def _lg_lookup(group: LieGroup, v: Union[str, list]) -> Matrix:
if isinstance(v, str):
try:
return group.algebra.irrep_lookup(v)
except KeyError:
raise ModelError(f"No representation, {v}, in {group.group}")
elif isinstance(group, list):
representation = Matrix([v])
if representation.shape != 1:
representation = representation.transpose()
return representation
else: # should not be hit unless called directly
raise ConfigError("Representation must be a string or list")
def transform_field(model: GenericField, lookups: Dict[str, Symmetry]) -> Field:
field_reps = {}
for k, v in model.representations.items():
group = lookups.get(k)
if group is None:
raise ConfigError(f"The symmetry {k} is undefined.")
# reps can either be matrix or str
if isinstance(group, LieGroup):
representation = Representation(_lg_lookup(group, v), group)
else:
representation = Representation(sympify(v), group)
field_reps[k] = representation
return Field(
name=model.name,
spin=model.spin,
representations=field_reps,
description=model.description,
no_mass=False
)
def transform_model(cfg: Configuration) -> Lagrangian:
version = cfg.version
name = cfg.name
description = cfg.description
symmetries = [transform_symmetry(x) for x in cfg.symmetries]
lookups = {x.name: x for x in symmetries}
fields = [transform_field(x, lookups) for x in cfg.fields]
return Lagrangian(
fields,
symmetries,
name, version, description
)
| 28.169355 | 80 | 0.62067 | 414 | 3,493 | 5.171498 | 0.321256 | 0.014012 | 0.018683 | 0.006539 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.002797 | 0.283424 | 3,493 | 123 | 81 | 28.398374 | 0.852577 | 0.134841 | 0 | 0.048193 | 0 | 0 | 0.059379 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.060241 | false | 0 | 0.084337 | 0 | 0.277108 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
8cf0dc521689c5fdbe3f6afa86a1cabcbd9f9e03 | 33,081 | py | Python | mys/cli/__init__.py | Dogeek/mys | 193259a634c3ab1d9058b9ff79a0462ae86274b7 | [
"MIT"
] | null | null | null | mys/cli/__init__.py | Dogeek/mys | 193259a634c3ab1d9058b9ff79a0462ae86274b7 | [
"MIT"
] | null | null | null | mys/cli/__init__.py | Dogeek/mys | 193259a634c3ab1d9058b9ff79a0462ae86274b7 | [
"MIT"
] | null | null | null | import argparse
import getpass
import glob
import json
import multiprocessing
import os
import re
import shutil
import subprocess
import sys
import tarfile
import time
from tempfile import TemporaryDirectory
from traceback import print_exc
import toml
import yaspin
from colors import cyan
from colors import green
from colors import red
from colors import strip_color
from colors import yellow
from humanfriendly import format_timespan
from pygments import highlight
from pygments.formatters import Terminal256Formatter
from pygments.lexers import PythonLexer
from ..transpiler import Source
from ..transpiler import transpile
from ..version import __version__
MYS_DIR = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
DOWNLOAD_DIRECTORY = 'build/dependencies'
BULB = '💡'
INFO = 'ℹ️'
ERROR = '❌️'
OPTIMIZE = {
'speed': '3',
'size': 's',
'debug': '0'
}
TRANSPILE_OPTIONS_FMT = '-n {package_name} -p {package_path} {flags}'
COPY_HPP_AND_CPP_FMT = '''\
{dst}: {src}
\tmkdir -p $(dir $@)
\tcp $< $@
'''
class BadPackageNameError(Exception):
pass
def create_file(path, data):
with open(path, 'w') as fout:
fout.write(data)
def read_template_file(path):
with open(os.path.join(MYS_DIR, 'cli/templates', path)) as fin:
return fin.read()
def find_config_file():
path = os.getenv('MYS_CONFIG')
config_dir = os.path.expanduser('~/.config/mys')
config_path = os.path.join(config_dir, 'config.toml')
if path is not None:
return path
if not os.path.exists(config_path):
os.makedirs(config_dir, exist_ok=True)
create_file(config_path, '')
return config_path
def load_mys_config():
"""Mys tool configuration.
Add validation when needed.
"""
path = find_config_file()
try:
with open(path) as fin:
return toml.loads(fin.read())
except toml.decoder.TomlDecodeError:
raise Exception(f"failed to load Mys configuration file '{path}'")
def default_jobs():
return max(1, multiprocessing.cpu_count() - 1)
def duration_start():
return time.time()
def duration_stop(start_time):
end_time = time.time()
duration = format_timespan(end_time - start_time)
return f' ({duration})'
def box_print(lines, icon, width=None):
if width is None:
width = 0
for line in lines:
width = max(width, len(strip_color(line)))
print(f'┌{"─" * (width - 3)} {icon} ─┐')
for line in lines:
padding = width - len(strip_color(line))
print(f'│ {line}{" " * padding} │')
print(f'└{"─" * (width + 2)}┘')
SPINNER = [
' ⠋', ' ⠙', ' ⠹', ' ⠸', ' ⠼', ' ⠴', ' ⠦', ' ⠧', ' ⠇', ' ⠏'
]
class Spinner(yaspin.api.Yaspin):
def __init__(self, text):
super().__init__(yaspin.Spinner(SPINNER, 80), text=text, color='yellow')
self._start_time = duration_start()
def __exit__(self, exc_type, exc_val, traceback):
duration = duration_stop(self._start_time)
if exc_type is None:
self.write(green(' ✔ ') + self.text + duration)
else:
self.write(red(' ✘ ') + self.text + duration)
return super().__exit__(exc_type, exc_val, traceback)
def run_with_spinner(command, message, env=None):
output = ''
try:
with Spinner(text=message):
result = subprocess.run(command,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
encoding='utf-8',
close_fds=False,
env=env)
output = result.stdout
result.check_returncode()
except Exception:
lines = []
for line in output.splitlines():
if 'make: *** ' in line:
continue
lines.append(line)
raise Exception('\n'.join(lines).rstrip())
def run(command, message, verbose, env=None):
if verbose:
start_time = duration_start()
try:
print('Command:', ' '.join(command))
subprocess.run(command, check=True, close_fds=False, env=env)
print(green(' ✔ ') + message + duration_stop(start_time))
except Exception:
print(red(' ✘ ') + message + duration_stop(start_time))
raise
else:
run_with_spinner(command, message, env)
def git_config_get(item, default=None):
try:
return subprocess.check_output(['git', 'config', '--get', item],
encoding='utf-8').strip()
except Exception:
return default
def find_authors(authors):
if authors is not None:
return ', '.join([f'"{author}"'for author in authors])
user = git_config_get('user.name', getpass.getuser())
email = git_config_get('user.email', f'{user}@example.com')
return f'"{user} <{email}>"'
def validate_package_name(package_name):
if not re.match(r'^[a-z][a-z0-9_]*$', package_name):
raise BadPackageNameError()
def create_file_from_template(path, dirictory, **kwargs):
template = read_template_file(os.path.join(dirictory, path))
create_file(path, template.format(**kwargs))
def create_new_file(path, **kwargs):
create_file_from_template(path, 'new', **kwargs)
def do_new(_parser, args, _mys_config):
package_name = os.path.basename(args.path)
authors = find_authors(args.authors)
try:
with Spinner(text=f"Creating package {package_name}"):
validate_package_name(package_name)
os.makedirs(args.path)
path = os.getcwd()
os.chdir(args.path)
try:
create_new_file('package.toml',
package_name=package_name,
authors=authors)
create_new_file('.gitignore')
create_new_file('.gitattributes')
create_new_file('README.rst',
package_name=package_name,
title=package_name.replace('_', ' ').title(),
line='=' * len(package_name))
create_new_file('LICENSE')
shutil.copyfile(os.path.join(MYS_DIR, 'cli/templates/new/pylintrc'),
'pylintrc')
os.mkdir('src')
create_new_file('src/lib.mys')
create_new_file('src/main.mys')
finally:
os.chdir(path)
except BadPackageNameError:
box_print(['Package names must start with a letter and only',
'contain letters, numbers and underscores. Only lower',
'case letters are allowed.',
'',
'Here are a few examples:',
'',
f'{cyan("mys new foo")}'
f'{cyan("mys new f1")}'
f'{cyan("mys new foo_bar")}'],
ERROR)
raise Exception()
cd = cyan(f'cd {package_name}')
box_print(['Build and run the new package by typing:',
'',
f'{cd}',
f'{cyan("mys run")}'],
BULB,
width=53)
class Author:
def __init__(self, name, email):
self.name = name
self.email = email
class PackageConfig:
def __init__(self):
self.authors = []
self.config = self.load_package_configuration()
def load_package_configuration(self):
with open('package.toml') as fin:
config = toml.loads(fin.read())
package = config.get('package')
if package is None:
raise Exception("'[package]' not found in package.toml.")
for name in ['name', 'version', 'authors']:
if name not in package:
raise Exception(f"'[package].{name}' not found in package.toml.")
for author in package['authors']:
mo = re.match(r'^([^<]+)<([^>]+)>$', author)
if not mo:
raise Exception(f"Bad author '{author}'.")
self.authors.append(Author(mo.group(1).strip(), mo.group(2).strip()))
if 'description' not in package:
package['description'] = ''
if 'dependencies' not in config:
config['dependencies'] = {}
return config
def __getitem__(self, key):
return self.config[key]
def setup_build():
os.makedirs('build/transpiled', exist_ok=True)
os.makedirs('build/dependencies', exist_ok=True)
def rename_one_matching(pattern, to):
paths = glob.glob(pattern)
if len(paths) != 1:
raise Exception(
f'{len(paths)} paths are matching when expecting exactly one to match')
os.rename(paths[0], to)
def prepare_download_dependency_from_registry(name, version):
if version == '*':
archive = f'mys-{name}-latest.tar.gz'
package_specifier = f'mys-{name}'
else:
archive = f'mys-{name}-{version}.tar.gz'
package_specifier = f'mys-{name}=={version}'
archive_path = f'build/dependencies/{archive}'
if os.path.exists(archive_path):
return None
else:
return (name, version, package_specifier, archive, archive_path)
def extract_dependency(name, version, archive, archive_path):
if version == '*':
rename_one_matching(os.path.join(DOWNLOAD_DIRECTORY, f'mys-{name}-*.tar.gz'),
archive_path)
with Spinner(text=f"Extracting {archive}"):
with tarfile.open(archive_path) as fin:
fin.extractall(DOWNLOAD_DIRECTORY)
if version == '*':
rename_one_matching(os.path.join(DOWNLOAD_DIRECTORY, f'mys-{name}-*/'),
os.path.join(DOWNLOAD_DIRECTORY, f'mys-{name}-latest'))
def download_dependencies(config, verbose):
packages = []
for name, info in config['dependencies'].items():
if isinstance(info, str):
package = prepare_download_dependency_from_registry(name, info)
if package is not None:
packages.append(package)
if not packages:
return
command = [
sys.executable, '-m', 'pip', 'download',
'-d', DOWNLOAD_DIRECTORY
]
command += [package_specifier for _, _, package_specifier, _, _ in packages]
run(command, 'Downloading dependencies', verbose)
for name, version, _, archive, archive_path in packages:
extract_dependency(name, version, archive, archive_path)
def read_package_configuration():
try:
with Spinner('Reading package configuration'):
return PackageConfig()
except FileNotFoundError:
box_print([
'Current directory does not contain a Mys package (package.toml does',
'not exist).',
'',
'Please enter a Mys package directory, and try again.',
'',
f'You can create a new package with {cyan("mys new <name>")}.'],
BULB)
raise Exception()
def find_package_sources(package_name, path, ignore_main=False):
srcs_mys = []
srcs_hpp = []
srcs_cpp = []
oldpath = os.getcwd()
os.chdir(os.path.join(path, 'src'))
try:
for src in glob.glob('**/*.mys', recursive=True):
if ignore_main and src == 'main.mys':
continue
srcs_mys.append((package_name, path, src, os.path.join(path, 'src', src)))
for src in glob.glob('**/*.hpp', recursive=True):
srcs_hpp.append((package_name, path, src, os.path.join(path, 'src', src)))
for src in glob.glob('**/*.cpp', recursive=True):
srcs_cpp.append((package_name, path, src, os.path.join(path, 'src', src)))
finally:
os.chdir(oldpath)
return srcs_mys, srcs_hpp, srcs_cpp
def dependency_path(dependency_name, config):
for package_name, info in config['dependencies'].items():
if package_name == dependency_name:
if isinstance(info, str):
if info == '*':
return f'build/dependencies/mys-{package_name}-latest/'
else:
return f'build/dependencies/mys-{package_name}-{info}/'
elif 'path' in info:
return info['path']
else:
raise Exception('Bad dependency format.')
raise Exception(f'Bad dependency {dependency_name}.')
def find_dependency_sources(config):
srcs_mys = []
srcs_hpp = []
srcs_cpp = []
for package_name in config['dependencies']:
path = dependency_path(package_name, config)
srcs = find_package_sources(package_name, path, ignore_main=True)
srcs_mys += srcs[0]
srcs_hpp += srcs[1]
srcs_cpp += srcs[2]
return srcs_mys, srcs_hpp, srcs_cpp
def create_makefile(config, optimize, no_ccache):
srcs_mys, srcs_hpp, srcs_cpp = find_package_sources(
config['package']['name'],
'.')
if not srcs_mys:
box_print(["'src/' is empty. Please create one or more .mys-files."], ERROR)
raise Exception()
srcs = find_dependency_sources(config)
srcs_mys += srcs[0]
srcs_hpp += srcs[1]
srcs_cpp += srcs[2]
transpile_options = []
transpile_srcs = []
transpile_srcs_paths = []
copy_hpp_and_cpp = []
objs = []
is_application = False
transpiled_cpp = []
hpps = []
for package_name, package_path, src, _path in srcs_mys:
flags = []
if package_name != config['package']['name']:
flags.append('-s yes')
else:
flags.append('-s no')
if src == 'main.mys':
is_application = True
flags.append('-m yes')
else:
flags.append('-m no')
flags = ' '.join(flags)
module_path = f'build/transpiled/src/{package_name}/{src}'
transpile_options.append(
TRANSPILE_OPTIONS_FMT.format(package_name=package_name,
package_path=package_path,
flags=flags))
transpile_srcs.append(src)
transpile_srcs_paths.append(os.path.join(package_path, 'src', src))
objs.append(f'OBJ += {module_path}.$(OBJ_SUFFIX)')
transpiled_cpp.append(f'SRC += {module_path}.cpp')
for package_name, package_path, src, _path in srcs_hpp:
src_path = os.path.join(package_path, 'src', src)
module_path = f'build/transpiled/src/{package_name}/{src}'
copy_hpp_and_cpp.append(COPY_HPP_AND_CPP_FMT.format(src=src_path,
dst=module_path))
hpps.append(module_path)
for package_name, package_path, src, _path in srcs_cpp:
src_path = os.path.join(package_path, 'src', src)
module_path = f'build/transpiled/src/{package_name}/{src}'
copy_hpp_and_cpp.append(COPY_HPP_AND_CPP_FMT.format(src=src_path,
dst=module_path))
objs.append(f'OBJ += {module_path}.o')
transpiled_cpp.append(f'SRC += {module_path}.cpp')
if is_application:
all_deps = '$(EXE)'
else:
all_deps = '$(OBJ)'
if not no_ccache and shutil.which('ccache'):
ccache = 'ccache '
else:
ccache = ''
create_file_from_template('build/Makefile',
'',
mys_dir=MYS_DIR,
mys=f'{sys.executable} -m mys',
ccache=ccache,
objs='\n'.join(objs),
optimize=OPTIMIZE[optimize],
transpile_options=' '.join(transpile_options),
transpile_srcs_paths=' '.join(transpile_srcs_paths),
transpile_srcs=' '.join(transpile_srcs),
hpps=' '.join(hpps),
copy_hpp_and_cpp='\n'.join(copy_hpp_and_cpp),
all_deps=all_deps,
package_name=config['package']['name'],
transpiled_cpp='\n'.join(transpiled_cpp))
return is_application
def build_prepare(verbose, optimize, no_ccache, config=None):
if config is None:
config = read_package_configuration()
if not os.path.exists('build/Makefile'):
setup_build()
download_dependencies(config, verbose)
return create_makefile(config, optimize, no_ccache)
def build_app(debug, verbose, jobs, is_application):
command = ['make', '-f', 'build/Makefile', 'all']
if os.getenv('MAKEFLAGS') is None:
command += ['-j', str(jobs)]
if debug:
command += ['TRANSPILE_DEBUG=--debug']
if not verbose:
command += ['-s']
if is_application:
command += ['APPLICATION=yes']
run(command, 'Building', verbose)
def do_build(_parser, args, _mys_config):
is_application = build_prepare(args.verbose, args.optimize, args.no_ccache)
build_app(args.debug, args.verbose, args.jobs, is_application)
def run_app(args, verbose):
if verbose:
print('./build/app')
subprocess.run(['./build/app'] + args, check=True)
def style_source(code):
return highlight(code,
PythonLexer(),
Terminal256Formatter(style='monokai')).rstrip()
def do_run(_parser, args, _mys_config):
if build_prepare(args.verbose, args.optimize, args.no_ccache):
build_app(args.debug, args.verbose, args.jobs, True)
run_app(args.args, args.verbose)
else:
main_1 = style_source('def main():\n')
main_2 = style_source(" print('Hello, world!')\n")
func = style_source('main()')
box_print([
f"This package is not executable. Create '{cyan('src/main.mys')}' and",
f"implement '{func}' to make the package executable.",
'',
main_1,
main_2], BULB)
raise Exception()
def do_test(_parser, args, _mys_config):
build_prepare(args.verbose, args.optimize, args.no_ccache)
command = [
'make', '-f', 'build/Makefile', 'test', 'TEST=yes'
]
if os.getenv('MAKEFLAGS') is None:
command += ['-j', str(args.jobs)]
if args.debug:
command += ['TRANSPILE_DEBUG=--debug']
run(command, 'Building tests', args.verbose)
run(['./build/test'], 'Running tests', args.verbose)
def do_clean(_parser, _args, _mys_config):
read_package_configuration()
with Spinner(text='Cleaning'):
shutil.rmtree('build', ignore_errors=True)
def print_lint_message(message):
location = f'{message["path"]}:{message["line"]}:{message["column"]}'
level = message['type'].upper()
symbol = message["symbol"]
message = message["message"]
if level == 'ERROR':
level = red(level, style='bold')
elif level == 'WARNING':
level = yellow(level, style='bold')
else:
level = cyan(level, style='bold')
print(f'{location} {level} {message} ({symbol})')
def do_lint(_parser, args, _mys_config):
read_package_configuration()
output = ''
returncode = 1
try:
with Spinner('Linting'):
proc = subprocess.run([sys.executable, '-m', 'pylint',
'-j', str(args.jobs),
'--output-format', 'json'
] + glob.glob('src/**/*.mys', recursive=True),
stdout=subprocess.PIPE)
output = proc.stdout.decode()
returncode = proc.returncode
proc.check_returncode()
except Exception:
pass
for item in json.loads(output):
print_lint_message(item)
if returncode != 0:
raise Exception()
def do_transpile(_parser, args, _mys_config):
sources = []
for i, mysfile in enumerate(args.mysfiles):
mys_path = os.path.join(args.package_path[i], 'src', mysfile)
module_hpp = os.path.join(args.package_name[i], mysfile + '.hpp')
module = '.'.join(module_hpp[:-8].split('/'))
hpp_path = os.path.join(args.outdir, 'include', module_hpp)
cpp_path = os.path.join(args.outdir,
'src',
args.package_name[i],
mysfile + '.cpp')
with open(mys_path, 'r') as fin:
sources.append(Source(fin.read(),
mysfile,
module,
mys_path,
module_hpp,
args.skip_tests[i] == 'yes',
hpp_path,
cpp_path,
args.main[i] == 'yes'))
generated = transpile(sources)
for source, (hpp_1_code, hpp_2_code, cpp_code) in zip(sources, generated):
os.makedirs(os.path.dirname(source.hpp_path), exist_ok=True)
os.makedirs(os.path.dirname(source.cpp_path), exist_ok=True)
create_file(source.hpp_path[:-3] + 'early.hpp', hpp_1_code)
create_file(source.hpp_path, hpp_2_code)
create_file(source.cpp_path, cpp_code)
def publish_create_release_package(config, verbose, archive):
create_file_from_template('setup.py',
'publish',
name=f"mys-{config['package']['name']}",
version=config['package']['version'],
description=config['package']['description'],
author="'" + ', '.join(
[author.name for author in config.authors]) + "'",
author_email="'" + ', '.join(
[author.email for author in config.authors]) + "'",
dependencies='[]')
create_file_from_template('MANIFEST.in', 'publish')
shutil.copytree('../../src', 'src')
shutil.copy('../../package.toml', 'package.toml')
shutil.copy('../../README.rst', 'README.rst')
run([sys.executable, 'setup.py', 'sdist'], f'Creating {archive}', verbose)
def publish_upload_release_package(verbose, username, password, archive):
# Try to hide the password.
env = os.environ.copy()
if username is None:
username = input('Username: ')
if password is None:
password = getpass.getpass()
env['TWINE_USERNAME'] = username
env['TWINE_PASSWORD'] = password
command = [sys.executable, '-m', 'twine', 'upload']
if verbose:
command += ['--verbose']
command += glob.glob('dist/*')
run(command, f'Uploading {archive}', verbose, env=env)
def do_publish(_parser, args, _mys_config):
config = read_package_configuration()
box_print([
"Mys is currently using Python's Package Index (PyPI). A PyPI",
'account is required to publish your package.'], INFO)
publish_dir = 'build/publish'
shutil.rmtree(publish_dir, ignore_errors=True)
os.makedirs(publish_dir)
path = os.getcwd()
os.chdir(publish_dir)
try:
name = config['package']['name']
version = config['package']['version']
archive = f"mys-{name}-{version}.tar.gz"
publish_create_release_package(config, args.verbose, archive)
publish_upload_release_package(args.verbose,
args.username,
args.password,
archive)
finally:
os.chdir(path)
def install_clean():
if not os.path.exists('package.toml'):
raise Exception('not a package')
with Spinner(text='Cleaning'):
shutil.rmtree('build', ignore_errors=True)
def install_download(args):
command = [
sys.executable, '-m', 'pip', 'download', f'mys-{args.package}'
]
run(command, 'Downloading package', args.verbose)
def install_extract():
archive = glob.glob('mys-*.tar.gz')[0]
with Spinner(text='Extracting package'):
with tarfile.open(archive) as fin:
fin.extractall()
os.remove(archive)
def install_build(args):
config = read_package_configuration()
is_application = build_prepare(args.verbose, 'speed', args.no_ccache, config)
if not is_application:
box_print(['There is no application to build in this package (src/main.mys ',
'missing).'],
ERROR)
raise Exception()
build_app(args.debug, args.verbose, args.jobs, is_application)
return config
def install_install(root, _args, config):
bin_dir = os.path.join(root, 'bin')
bin_name = config['package']['name']
src_file = 'build/app'
dst_file = os.path.join(bin_dir, bin_name)
with Spinner(text=f"Installing {bin_name} in {bin_dir}"):
os.makedirs(bin_dir, exist_ok=True)
shutil.copyfile(src_file, dst_file)
shutil.copymode(src_file, dst_file)
def install_from_current_dirctory(args, root):
install_clean()
config = install_build(args)
install_install(root, args, config)
def install_from_registry(args, root):
with TemporaryDirectory()as tmp_dir:
os.chdir(tmp_dir)
install_download(args)
install_extract()
os.chdir(glob.glob('mys-*')[0])
config = install_build(args)
install_install(root, args, config)
def do_install(_parser, args, _mys_config):
root = os.path.abspath(os.path.expanduser(args.root))
if args.package is None:
install_from_current_dirctory(args, root)
else:
install_from_registry(args, root)
def do_style(_parser, _args, _mys_config):
read_package_configuration()
box_print(['This subcommand is not yet implemented.'], ERROR)
raise Exception()
def do_help(parser, _args, _mys_config):
parser.print_help()
DESCRIPTION = f'''\
The Mys programming language package manager.
Available subcommands are:
{cyan('new')} Create a new package.
{cyan('build')} Build the appliaction.
{cyan('run')} Build and run the application.
{cyan('test')} Build and run tests
{cyan('clean')} Remove build output.
{cyan('lint')} Perform static code analysis.
{cyan('publish')} Publish a release.
{cyan('install')} Install an application from local package or registry.'
'''
def add_verbose_argument(subparser):
subparser.add_argument('-v', '--verbose',
action='store_true',
help='Verbose output.')
def add_jobs_argument(subparser):
subparser.add_argument(
'-j', '--jobs',
type=int,
default=default_jobs(),
help='Maximum number of parallel jobs (default: %(default)s).')
def add_optimize_argument(subparser, default):
subparser.add_argument(
'-o', '--optimize',
default=default,
choices=['speed', 'size', 'debug'],
help='Optimize the build for given level (default: %(default)s).')
def add_no_ccache_argument(subparser):
subparser.add_argument('-n', '--no-ccache',
action='store_true',
help='Do not use ccache.')
def create_parser():
parser = argparse.ArgumentParser(
description=DESCRIPTION,
formatter_class=argparse.RawDescriptionHelpFormatter)
parser.add_argument('-d', '--debug', action='store_true')
parser.add_argument('--config', help='Configuration file to use.')
parser.add_argument('--version',
action='version',
version=__version__,
help='Print version information and exit.')
subparsers = parser.add_subparsers(dest='subcommand',
help='Subcommand to execute.',
metavar='subcommand')
# The new subparser.
subparser = subparsers.add_parser(
'new',
description='Create a new package.')
subparser.add_argument(
'--author',
dest='authors',
action='append',
help=("Package author as 'Mys Lang <mys.lang@example.com>'. May "
"be given multiple times."))
subparser.add_argument('path')
subparser.set_defaults(func=do_new)
# The build subparser.
subparser = subparsers.add_parser(
'build',
description='Build the appliaction.')
add_verbose_argument(subparser)
add_jobs_argument(subparser)
add_optimize_argument(subparser, 'speed')
add_no_ccache_argument(subparser)
subparser.set_defaults(func=do_build)
# The run subparser.
subparser = subparsers.add_parser(
'run',
description='Build and run the application.')
add_verbose_argument(subparser)
add_jobs_argument(subparser)
add_optimize_argument(subparser, 'speed')
add_no_ccache_argument(subparser)
subparser.add_argument('args', nargs='*')
subparser.set_defaults(func=do_run)
# The test subparser.
subparser = subparsers.add_parser(
'test',
description='Build and run tests.')
add_verbose_argument(subparser)
add_jobs_argument(subparser)
add_optimize_argument(subparser, 'debug')
add_no_ccache_argument(subparser)
subparser.set_defaults(func=do_test)
# The clean subparser.
subparser = subparsers.add_parser(
'clean',
description='Remove build output.')
subparser.set_defaults(func=do_clean)
# The lint subparser.
subparser = subparsers.add_parser(
'lint',
description='Perform static code analysis.')
add_jobs_argument(subparser)
subparser.set_defaults(func=do_lint)
# The transpile subparser.
subparser = subparsers.add_parser(
'transpile',
description='Transpile given Mys file(s) to C++ header and source files.')
subparser.add_argument('-o', '--outdir',
default='.',
help='Output directory.')
subparser.add_argument('-p', '--package-path',
required=True,
action='append',
help='Package path.')
subparser.add_argument('-n', '--package-name',
required=True,
action='append',
help='Package name.')
subparser.add_argument('-s', '--skip-tests',
action='append',
choices=['yes', 'no'],
help='Skip tests.')
subparser.add_argument('-m', '--main',
action='append',
choices=['yes', 'no'],
help='Contains main().')
subparser.add_argument('mysfiles', nargs='+')
subparser.set_defaults(func=do_transpile)
# The publish subparser.
subparser = subparsers.add_parser(
'publish',
description='Publish a release.')
add_verbose_argument(subparser)
subparser.add_argument('-u', '--username',
help='Registry username.')
subparser.add_argument('-p', '--password',
help='Registry password.')
subparser.set_defaults(func=do_publish)
# The install subparser.
subparser = subparsers.add_parser(
'install',
description='Install an application from local package or registry.')
add_verbose_argument(subparser)
add_jobs_argument(subparser)
add_no_ccache_argument(subparser)
subparser.add_argument('--root',
default='~/.local',
help='Root folder to install into (default: %(default)s.')
subparser.add_argument(
'package',
nargs='?',
help=('Package to install application from. Installs current package if '
'not given.'))
subparser.set_defaults(func=do_install)
# The style subparser.
subparser = subparsers.add_parser(
'style',
description=(
'Check that the package follows the Mys style guidelines. Automatically '
'fixes trivial errors and prints the rest.'))
subparser.set_defaults(func=do_style)
# The help subparser.
subparser = subparsers.add_parser(
'help',
description='Show this help.')
subparser.set_defaults(func=do_help)
return parser
def main():
parser = create_parser()
args = parser.parse_args()
if not hasattr(args, 'func'):
parser.print_help()
sys.exit(1)
try:
args.func(parser, args, load_mys_config())
except Exception as e:
if args.debug:
print_exc()
sys.exit(str(e))
except KeyboardInterrupt:
print()
if args.debug:
raise
sys.exit(1)
| 30.405331 | 86 | 0.578489 | 3,720 | 33,081 | 4.96586 | 0.127419 | 0.027987 | 0.010827 | 0.011314 | 0.314405 | 0.219293 | 0.171385 | 0.122503 | 0.098197 | 0.080333 | 0 | 0.002063 | 0.296575 | 33,081 | 1,087 | 87 | 30.433303 | 0.790632 | 0.009401 | 0 | 0.223192 | 0 | 0 | 0.169319 | 0.0175 | 0.002494 | 0 | 0 | 0 | 0 | 1 | 0.077307 | false | 0.013716 | 0.034913 | 0.004988 | 0.15212 | 0.032419 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
8cf10a22da12300e75cb2c2f6e094b33c9763714 | 13,324 | py | Python | tests/test_hooks/conan-center/test_missing_system_libs.py | conan-io/plugins | f4732622ecaf803b01f556da1eef33a48ab7847a | [
"MIT"
] | 1 | 2018-10-09T16:43:48.000Z | 2018-10-09T16:43:48.000Z | tests/test_hooks/conan-center/test_missing_system_libs.py | conan-io/plugins | f4732622ecaf803b01f556da1eef33a48ab7847a | [
"MIT"
] | 5 | 2018-10-10T18:04:15.000Z | 2018-10-23T08:39:44.000Z | tests/test_hooks/conan-center/test_missing_system_libs.py | conan-io/plugins | f4732622ecaf803b01f556da1eef33a48ab7847a | [
"MIT"
] | 3 | 2018-10-09T16:43:54.000Z | 2018-10-10T16:36:20.000Z | import os
from parameterized import parameterized
import textwrap
import unittest
from conans import tools
from tests.utils.test_cases.conan_client import ConanClientTestCase
from conans import __version__ as conan_version
@unittest.skipUnless(conan_version >= "1.19.0", "Conan >= 1.19.0 needed")
class ConanMissingSystemLibs(ConanClientTestCase):
cmakelists = textwrap.dedent("""\
cmake_minimum_required(VERSION 2.8)
project(hooks_systemlib_test LANGUAGES C)
include(conanbuildinfo.cmake)
conan_basic_setup()
set(LINK_TO {link_lib})
if(LINK_TO)
find_library(LINK_LIB_FULL_PATH {link_lib})
if(LINK_LIB_FULL_PATH)
set(LINK_TO ${{LINK_LIB_FULL_PATH}})
endif()
endif()
add_library({name} simplelib.c)
target_link_libraries({name} ${{LINK_TO}} ${{CONAN_LIBS}})
include(GNUInstallDirs)
install(TARGETS {name}
ARCHIVE DESTINATION ${{CMAKE_INSTALL_LIBDIR}}
LIBRARY DESTINATION ${{CMAKE_INSTALL_LIBDIR}}
RUNTIME DESTINATION ${{CMAKE_INSTALL_BINDIR}})
""")
source_c = textwrap.dedent("""\
#if defined({name}_EXPORTS)
# if defined(_WIN32)
# define API_EXPORTS __declspec(dllexport)
# else
# define API_EXPORTS __attribute__((visibility("default")))
# endif
#else
# define API_EXPORTS
#endif
{includes}
API_EXPORTS
int {name}_function(int arg) {{
{function_call};
return 42;
}}
""")
conanfile = textwrap.dedent("""\
import os
from conans import CMake, ConanFile, tools
class AConan(ConanFile):
exports_sources = "CMakeLists.txt", "simplelib.c"
settings = "os", "arch", "compiler", "build_type"
options = {{"shared": [True, False], "fPIC": [True, False],}}
default_options = {{"shared": False, "fPIC": True,}}
generators = "cmake"
requires = {requires}
def package(self):
cmake = CMake(self)
cmake.configure()
cmake.build()
cmake.install()
def package_info(self):
self.cpp_info.libs = ["{name}"]
if self.options.shared:
self.cpp_info.system_libs = {system_libs_shared}
else:
self.cpp_info.system_libs = {system_libs_static}
self.cpp_info.frameworks = {frameworks}
""")
conanfile_test = textwrap.dedent("""\
from conans import ConanFile
class AConan(ConanFile):
def test(self):
pass
""")
class OSBuildInfo(object):
def __init__(self, includes, link_libs, shlibs_bases, frameworks, function):
self.includes = includes
self.libs = link_libs
self.shlibs_bases = shlibs_bases
self.frameworks = frameworks
self.function = function
@property
def _os_build_info(self):
return {
"Linux": self.OSBuildInfo(["dlfcn.h"], ["libdl.so"], ["dl"], [], "dlclose((void*)0)"),
"Windows": self.OSBuildInfo(["winsock2.h"], ["ws2_32.lib"], ["ws2_32"], [], "WSAStartup(0, 0)"),
"Macos": self.OSBuildInfo(["CoreServices/CoreServices.h"], ["CoreServices"], [],
["CoreServices"],
"FSEventStreamCopyDescription(0)"),
}[tools.detected_os()]
@property
def _os_build_info2(self):
return {
"Linux": self.OSBuildInfo(["mqueue.h"], ["librt.so"], ["rt"], [], "mq_open(0, 0)"),
"Windows": self.OSBuildInfo(["shlwapi.h"], ["shlwapi.lib"], ["shlwapi"], [],
"PathIsDirectory(\"C:\\Windows\")"),
"Macos": self.OSBuildInfo(["CoreFoundation/CoreFoundation.h"], ["CoreFoundation"], [],
["CoreFoundation"],
"CFNumberCreate(kCFAllocatorDefault, 0, 0)"),
}[tools.detected_os()]
@property
def _prefix(self):
return {
"Windows": "",
"Linux": "lib",
"Macos": "lib",
}[tools.detected_os()]
@property
def _shlext(self):
return {
"Windows": "dll",
"Linux": "so",
"Macos": "dylib",
}[tools.detected_os()]
@property
def _attribute(self):
return {
"Windows": "system_libs",
"Linux": "system_libs",
"Macos": "frameworks",
}[tools.detected_os()]
@property
def _shlibdir(self):
return {
"Windows": "bin",
"Linux": "lib",
"Macos": "lib",
}[tools.detected_os()]
def _get_environ(self, **kwargs):
kwargs = super(ConanMissingSystemLibs, self)._get_environ(**kwargs)
kwargs.update({'CONAN_HOOKS': os.path.join(os.path.dirname(__file__), '..', '..', '..',
'hooks', 'conan-center')})
return kwargs
def _write_files(self, osbuildinfo, system_libs_shared, frameworks, system_libs_static,
requires=(), name="simplelib", subdir="."):
tools.save(os.path.join(subdir, "conanfile.py"),
content=self.conanfile.format(name=name,
system_libs_shared=repr(system_libs_shared),
system_libs_static=repr(system_libs_static),
frameworks=repr(frameworks),
requires=repr(requires)))
tools.save(os.path.join(subdir, "CMakeLists.txt"),
content=self.cmakelists.format(name=name,
link_lib=" ".join(osbuildinfo.libs)))
tools.save(os.path.join(subdir, "simplelib.c"),
content=self.source_c.format(name=name,
includes="\n".join("#include <{}>".format(include) for include in osbuildinfo.includes),
function_call=osbuildinfo.function))
tools.save(os.path.join(subdir, "test_package", "conanfile.py"),
content=self.conanfile_test)
def test_no_system_lib(self):
osbuildinfo = self.OSBuildInfo([], [], [], [], "42")
self._write_files(osbuildinfo=osbuildinfo, system_libs_static=[], system_libs_shared=[],
frameworks=[])
output = self.conan(["create", ".", "name/version@user/channel", "-o", "name:shared=True"])
self.assertIn("[MISSING SYSTEM LIBS (KB-H043)] OK", output)
for lib in self._os_build_info.shlibs_bases:
library = os.path.join(".", self._shlibdir, "{}simplelib.{}".format(self._prefix, self._shlext))
self.assertNotIn("[MISSING SYSTEM LIBS (KB-H043)] Library '{library}' links to system "
"library '{syslib}' but it is not in cpp_info.{attribute}.".
format(library=library, shlext=self._shlext, syslib=lib,
attribute=self._attribute), output)
def test_system_lib_correct(self):
self._write_files(osbuildinfo=self._os_build_info,
system_libs_static=self._os_build_info.shlibs_bases,
system_libs_shared=[], frameworks=[])
output = self.conan(["create", ".", "name/version@user/channel", "-o", "name:shared=True"])
self.assertIn("[MISSING SYSTEM LIBS (KB-H043)] OK", output)
for lib in self._os_build_info.shlibs_bases:
library = os.path.join(".", self._shlibdir, "{}simplelib.{}".format(self._prefix, self._shlext))
self.assertNotIn("[MISSING SYSTEM LIBS (KB-H043)] Library '{library}' links to system "
"library '{syslib}' but it is not in cpp_info.{attribute}.".
format(library=library, shlext=self._shlext, syslib=lib,
attribute=self._attribute), output)
def test_system_lib_missing(self):
self._write_files(osbuildinfo=self._os_build_info, system_libs_static=[],
system_libs_shared=[], frameworks=[])
output = self.conan(["create", ".", "name/version@user/channel", "-o", "name:shared=True"])
for lib in self._os_build_info.shlibs_bases:
library = os.path.join(".", self._shlibdir, "{}simplelib.{}".format(self._prefix, self._shlext))
self.assertIn("[MISSING SYSTEM LIBS (KB-H043)] Library '{library}' links to system "
"library '{syslib}' but it is not in cpp_info.{attribute}.".
format(library=library, shlext=self._shlext, syslib=lib,
attribute=self._attribute), output)
@parameterized.expand([
(True,),
(False,),
])
def test_dep_system_lib_ok(self, dep_shared):
self._write_files(subdir="dep", name="dep", osbuildinfo=self._os_build_info,
system_libs_static=self._os_build_info.shlibs_bases,
system_libs_shared=[], frameworks=[])
self.conan(["create", "dep", "dep/version@user/channel", "-o", "dep:shared={}".format(dep_shared)])
self._write_files(subdir="lib", name="lib", requires=("dep/version@user/channel", ),
osbuildinfo=self._os_build_info2,
system_libs_static=self._os_build_info2.shlibs_bases,
system_libs_shared=[], frameworks=[])
output = self.conan(["create", "lib", "lib/version@user/channel", "-o", "lib:shared=True",
"-o", "dep:shared={}".format(dep_shared)])
self.assertIn("[MISSING SYSTEM LIBS (KB-H043)] OK", output)
for lib in self._os_build_info.shlibs_bases:
library = os.path.join(".", self._shlibdir, "{}lib.{}".format(self._prefix, self._shlext))
self.assertNotIn("[MISSING SYSTEM LIBS (KB-H043)] Library '{library}' links to system "
"library '{syslib}' but it is not in cpp_info.{attribute}.".
format(library=library, shlext=self._shlext, syslib=lib,
attribute=self._attribute), output)
for lib in self._os_build_info2.shlibs_bases:
library = os.path.join(".", self._shlibdir, "{}lib.{}".format(self._prefix, self._shlext))
self.assertNotIn("[MISSING SYSTEM LIBS (KB-H043)] Library '{library}' links to system "
"library '{syslib}' but it is not in cpp_info.{attribute}.".
format(library=library, shlext=self._shlext, syslib=lib,
attribute=self._attribute), output)
@parameterized.expand([
("shared_dependency", True),
("static_dependency", False),
])
def test_dep_system_lib_missing(self, name, dep_shared):
self._write_files(subdir="dep", name="dep", osbuildinfo=self._os_build_info,
frameworks=self._os_build_info.frameworks,
system_libs_static=self._os_build_info.shlibs_bases,
system_libs_shared=[])
self.conan(["create", "dep", "dep/version@user/channel", "-o", "dep:shared={}".format(dep_shared)])
self._write_files(subdir="lib", name="lib", requires=("dep/version@user/channel", ),
osbuildinfo=self._os_build_info2,
frameworks=self._os_build_info.frameworks,
system_libs_static=[],
system_libs_shared=[])
output = self.conan(["create", "lib", "lib/version@user/channel", "-o", "lib:shared=True",
"-o", "dep:shared={}".format(dep_shared)])
self.assertIn("[MISSING SYSTEM LIBS (KB-H043)] OK", output)
for lib in self._os_build_info.shlibs_bases:
library = os.path.join(".", self._shlibdir, "{}lib.{}".format(self._prefix, self._shlext))
self.assertNotIn("[MISSING SYSTEM LIBS (KB-H043)] Library '{library}' links to system "
"library '{syslib}' but it is not in cpp_info.{attribute}.".
format(library=library, shlext=self._shlext, syslib=lib,
attribute=self._attribute), output)
for lib in self._os_build_info2.shlibs_bases:
library = os.path.join(".", self._shlibdir, "{}lib.{}".format(self._prefix, self._shlext))
self.assertIn("[MISSING SYSTEM LIBS (KB-H043)] Library '{library}' links to system "
"library '{syslib}' but it is not in cpp_info.{attribute}.".
format(library=library, shlext=self._shlext, syslib=lib,
attribute=self._attribute), output)
| 48.627737 | 136 | 0.545482 | 1,331 | 13,324 | 5.231405 | 0.143501 | 0.053138 | 0.030016 | 0.030159 | 0.575183 | 0.544018 | 0.508114 | 0.490306 | 0.490306 | 0.477955 | 0 | 0.007725 | 0.319874 | 13,324 | 273 | 137 | 48.805861 | 0.760649 | 0 | 0 | 0.460905 | 0 | 0 | 0.341264 | 0.070324 | 0 | 0 | 0 | 0 | 0.045267 | 1 | 0.057613 | false | 0.004115 | 0.041152 | 0.024691 | 0.156379 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
8cf52cc5161eaa0e900419590d64152c1523b0c5 | 2,249 | py | Python | proc/scripts/data/unified/convert_examples.py | geokats/eeqa | 419ee7c6a87e4a1b3165681d16da6d5edf3990e9 | [
"MIT"
] | 1 | 2021-07-01T17:00:29.000Z | 2021-07-01T17:00:29.000Z | proc/scripts/data/unified/convert_examples.py | geokats/eeqa | 419ee7c6a87e4a1b3165681d16da6d5edf3990e9 | [
"MIT"
] | null | null | null | proc/scripts/data/unified/convert_examples.py | geokats/eeqa | 419ee7c6a87e4a1b3165681d16da6d5edf3990e9 | [
"MIT"
] | null | null | null | import json
import argparse
def convert_example(example):
sentence = example['words']
ner = []
relation = []
event = []
#Get NER information
for example_entity in example['golden-entity-mentions']:
entity_start = example_entity['start']
entity_end = example_entity['end'] - 1
entity_type = example_entity['entity-type']
ner.append([entity_start, entity_end, entity_type])
#Get event trigger and arguments
for example_event in example['golden-event-mentions']:
event_type = example_event['event-type']
trigger = example_event['trigger']
arguments = example_event['arguments']
if trigger['end'] - trigger['start'] != 1:
print(f"WARNING: Trigger \"{trigger['text']}\" is longer than one word! Only first word will be kept.")
converted_event = [[trigger['start'], event_type]]
for argument in arguments:
converted_event.append([argument['start'], argument['end'], argument['role']])
event.append(converted_event)
#Create converted example
converted_example = {
'sentence' : sentence,
's_start' : 0,
'ner' : ner,
'relation' : relation,
'event' : event
}
return converted_example
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument("input_file", type=str)
parser.add_argument("output_file", type=str)
args = parser.parse_args()
print(f"Converting examples from {args.input_file} to {args.output_file}...")
with open(args.input_file, 'r') as inpf, open(args.output_file, 'w') as outf:
count = 0
for line in inpf:
#Read and convert example
example = json.loads(line)
converted_example = convert_example(example)
#If sentence is too long for BERT, reject it
if len(converted_example['sentence']) > 500:
print(f"WARNING: Sentence of length {len(converted_example['sentence'])} is too long for BERT")
continue
#Write to output file
json.dump(converted_example, outf)
outf.write("\n")
count += 1
print(f"Converted {count} examples")
| 32.594203 | 115 | 0.618497 | 262 | 2,249 | 5.137405 | 0.332061 | 0.08321 | 0.046805 | 0.029718 | 0.035661 | 0.035661 | 0 | 0 | 0 | 0 | 0 | 0.004834 | 0.264117 | 2,249 | 68 | 116 | 33.073529 | 0.808459 | 0.071587 | 0 | 0 | 0 | 0 | 0.213737 | 0.048031 | 0 | 0 | 0 | 0 | 0 | 1 | 0.020833 | false | 0 | 0.041667 | 0 | 0.083333 | 0.083333 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
8cf604fadfa255cf2d2a87343356edba2f401340 | 6,680 | py | Python | veles/loader/file_image.py | AkshayJainG/veles | 21106f41a8e7e7e74453cd16a5059a0e6b1c315e | [
"Apache-2.0"
] | 1,007 | 2015-07-20T12:01:41.000Z | 2022-03-30T23:08:35.000Z | veles/loader/file_image.py | AkshayJainG/veles | 21106f41a8e7e7e74453cd16a5059a0e6b1c315e | [
"Apache-2.0"
] | 52 | 2015-07-21T10:26:24.000Z | 2019-01-24T05:46:43.000Z | veles/loader/file_image.py | AkshayJainG/veles | 21106f41a8e7e7e74453cd16a5059a0e6b1c315e | [
"Apache-2.0"
] | 235 | 2015-07-20T09:42:42.000Z | 2021-12-06T18:12:26.000Z | # -*- coding: utf-8 -*-
"""
.. invisible:
_ _ _____ _ _____ _____
| | | | ___| | | ___/ ___|
| | | | |__ | | | |__ \ `--.
| | | | __|| | | __| `--. \
\ \_/ / |___| |___| |___/\__/ /
\___/\____/\_____|____/\____/
Created on Feb 26, 2015
Image loaders which take data from the file system.
███████████████████████████████████████████████████████████████████████████████
Licensed to the Apache Software Foundation (ASF) under one
or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership. The ASF licenses this file
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing,
software distributed under the License is distributed on an
"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
KIND, either express or implied. See the License for the
specific language governing permissions and limitations
under the License.
███████████████████████████████████████████████████████████████████████████████
"""
from __future__ import division
from itertools import chain
import cv2
import numpy
from PIL import Image
from zope.interface import implementer
from veles.compat import from_none
import veles.error as error
from veles.loader.file_loader import AutoLabelFileLoader, FileFilter, \
FileLoaderBase, FileListLoaderBase
from veles.loader.image import ImageLoader, IImageLoader, MODE_COLOR_MAP, \
COLOR_CHANNELS_MAP
class FileImageLoaderBase(ImageLoader, FileFilter):
"""
Base class for loading something from files. Function is_valid_fiename()
should be used in child classes as filter for loading data.
"""
def __init__(self, workflow, **kwargs):
kwargs["file_type"] = "image"
kwargs["file_subtypes"] = kwargs.get("file_subtypes", ["jpeg", "png"])
super(FileImageLoaderBase, self).__init__(workflow, **kwargs)
def get_image_info(self, key):
"""
:param key: The full path to the analysed image.
:return: tuple (image size, number of channels).
"""
try:
with open(key, "rb") as fin:
img = Image.open(fin)
return tuple(reversed(img.size)), MODE_COLOR_MAP[img.mode]
except Exception as e:
self.warning("Failed to read %s with PIL: %s", key, e)
# Unable to read the image with PIL. Fall back to slow OpenCV
# method which reads the whole image.
img = cv2.imread(key, cv2.IMREAD_UNCHANGED)
if img is None:
raise error.BadFormatError("Unable to read %s" % key)
return img.shape[:2], "BGR"
def get_image_data(self, key):
"""
Loads data from image and normalizes it.
Returns:
:class:`numpy.ndarrayarray`: if there was one image in the file.
tuple: `(data, labels)` if there were many images in the file
"""
try:
with open(key, "rb") as fin:
img = Image.open(fin)
if img.mode in ("P", "CMYK"):
return numpy.array(img.convert("RGB"),
dtype=self.source_dtype)
else:
return numpy.array(img, dtype=self.source_dtype)
except (TypeError, KeyboardInterrupt) as e:
raise from_none(e)
except Exception as e:
self.warning("Failed to read %s with PIL: %s", key, e)
img = cv2.imread(key)
if img is None:
raise error.BadFormatError("Unable to read %s" % key)
return img.astype(self.source_dtype)
def get_image_label(self, key):
return self.get_label_from_filename(key)
def analyze_images(self, files, pathname):
# First pass: get the final list of files and shape
self.debug("Analyzing %d images in %s", len(files), pathname)
uniform_files = []
for file in files:
size, color_space = self.get_image_info(file)
shape = size + (COLOR_CHANNELS_MAP[color_space],)
if (not isinstance(self.scale, tuple) and
self.uncropped_shape != tuple() and
shape[:2] != self.uncropped_shape):
self.warning("%s has the different shape %s (expected %s)",
file, shape[:2], self.uncropped_shape)
else:
if self.uncropped_shape == tuple():
self.original_shape = shape
uniform_files.append(file)
return uniform_files
@implementer(IImageLoader)
class FileListImageLoader(FileImageLoaderBase, FileListLoaderBase):
"""
Input: text file, with each line giving an image filename and label
As with ImageLoader, it is useful for large datasets.
"""
MAPPING = "file_list_image"
def get_keys(self, index):
paths = (
self.path_to_test_text_file,
self.path_to_val_text_file,
self.path_to_train_text_file)[index]
if paths is None:
return []
return list(
chain.from_iterable(
self.analyze_images(self.scan_files(p), p) for p in paths))
@implementer(IImageLoader)
class FileImageLoader(FileImageLoaderBase, FileLoaderBase):
"""Loads images from multiple folders. As with ImageLoader, it is useful
for large datasets.
Attributes:
test_paths: list of paths with mask for test set,
for example: ["/tmp/\*.png"].
validation_paths: list of paths with mask for validation set,
for example: ["/tmp/\*.png"].
train_paths: list of paths with mask for train set,
for example: ["/tmp/\*.png"].
Must be overriden in child class:
get_label_from_filename()
is_valid_filename()
"""
def get_keys(self, index):
paths = (self.test_paths, self.validation_paths,
self.train_paths)[index]
if paths is None:
return []
return list(
chain.from_iterable(
self.analyze_images(self.scan_files(p), p) for p in paths))
class AutoLabelFileImageLoader(FileImageLoader, AutoLabelFileLoader):
"""
FileImageLoader extension which takes labels by regular expression from
file names. Unique selection groups are tracked and enumerated.
"""
MAPPING = "auto_label_file_image"
| 36.304348 | 79 | 0.604641 | 792 | 6,680 | 5.10101 | 0.332071 | 0.014851 | 0.006931 | 0.011881 | 0.210891 | 0.17599 | 0.17599 | 0.142079 | 0.142079 | 0.120792 | 0 | 0.00376 | 0.283383 | 6,680 | 183 | 80 | 36.502732 | 0.807186 | 0.385778 | 0 | 0.340909 | 0 | 0 | 0.06665 | 0.005383 | 0 | 0 | 0 | 0 | 0 | 1 | 0.079545 | false | 0 | 0.113636 | 0.011364 | 0.386364 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
8cf6b1a004e3fd55db09e1aed8d64e2a6ecfa304 | 2,991 | py | Python | rally/rally-plugins/netcreate-boot/netcreate_nova_boot_fip_ping.py | zulcss/browbeat | 1aedcebcdabec0d92c0c0002a6ef458858629e88 | [
"Apache-2.0"
] | null | null | null | rally/rally-plugins/netcreate-boot/netcreate_nova_boot_fip_ping.py | zulcss/browbeat | 1aedcebcdabec0d92c0c0002a6ef458858629e88 | [
"Apache-2.0"
] | null | null | null | rally/rally-plugins/netcreate-boot/netcreate_nova_boot_fip_ping.py | zulcss/browbeat | 1aedcebcdabec0d92c0c0002a6ef458858629e88 | [
"Apache-2.0"
] | null | null | null | # Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from rally_openstack import consts
from rally_openstack.scenarios.neutron import utils as neutron_utils
from rally_openstack.scenarios.vm import utils as vm_utils
from rally.task import atomic
from rally.task import scenario
from rally.task import types
from rally.task import validation
@types.convert(image={"type": "glance_image"}, flavor={"type": "nova_flavor"})
@validation.add("image_valid_on_flavor", flavor_param="flavor", image_param="image")
@validation.add("required_services", services=[consts.Service.NEUTRON, consts.Service.NOVA])
@validation.add("required_platform", platform="openstack", users=True)
@scenario.configure(context={"cleanup@openstack": ["neutron", "nova"], "keypair@openstack": {},
"allow_ssh@openstack": None},
name="BrowbeatPlugin.create_network_nova_boot_ping", platform="openstack")
class CreateNetworkNovaBootPing(vm_utils.VMScenario,
neutron_utils.NeutronScenario):
def run(self, image, flavor, ext_net_id, router_create_args=None,
network_create_args=None, subnet_create_args=None, **kwargs):
ext_net_name = None
if ext_net_id:
ext_net_name = self.clients("neutron").show_network(
ext_net_id)["network"]["name"]
router_create_args["name"] = self.generate_random_name()
router_create_args["tenant_id"] = self.context["tenant"]["id"]
router_create_args.setdefault("external_gateway_info",
{"network_id": ext_net_id, "enable_snat": True})
router = self._create_router(router_create_args)
network = self._create_network(network_create_args or {})
subnet = self._create_subnet(network, subnet_create_args or {})
self._add_interface_router(subnet['subnet'], router['router'])
kwargs["nics"] = [{'net-id': network['network']['id']}]
guest = self._boot_server_with_fip(image, flavor, True,
ext_net_name, **kwargs)
self._wait_for_ping(guest[1]['ip'])
@atomic.action_timer("neutron.create_router")
def _create_router(self, router_create_args):
"""Create neutron router.
:param router_create_args: POST /v2.0/routers request options
:returns: neutron router dict
"""
return self.admin_clients("neutron").create_router({"router": router_create_args})
| 49.85 | 95 | 0.69007 | 376 | 2,991 | 5.25266 | 0.385638 | 0.060759 | 0.06481 | 0.038481 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.002926 | 0.200267 | 2,991 | 59 | 96 | 50.694915 | 0.822742 | 0.217987 | 0 | 0 | 0 | 0 | 0.161445 | 0.046562 | 0 | 0 | 0 | 0 | 0 | 1 | 0.054054 | false | 0 | 0.189189 | 0 | 0.297297 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
8cfa9847aa4d50f475be53d50a57e4c398958442 | 7,407 | py | Python | quadart.py | ribab/quadart | 143556dd55663f49b7b8a98ec6767e2f4eeb0137 | [
"MIT"
] | 34 | 2019-08-04T18:28:52.000Z | 2022-03-27T09:08:21.000Z | quadart.py | ribab/quadart | 143556dd55663f49b7b8a98ec6767e2f4eeb0137 | [
"MIT"
] | null | null | null | quadart.py | ribab/quadart | 143556dd55663f49b7b8a98ec6767e2f4eeb0137 | [
"MIT"
] | 3 | 2019-08-07T22:05:00.000Z | 2021-05-14T14:44:42.000Z | #!/usr/bin/env python3
from wand.image import Image
from wand.display import display
from wand.color import Color
from wand.drawing import Drawing
import imageio
import click
import numpy as np
import time
def loading_bar(recurse_depth):
global load_progress
global start_time
load_depth=3
try:
load_progress
start_time
except:
load_progress = 0
start_time = time.time()
print('[' + ' '*(4**load_depth) + ']\r', end='')
if recurse_depth <= load_depth:
load_progress += 4**(load_depth - recurse_depth)
cur_time = time.time()
time_left = 4**load_depth*(cur_time - start_time)/load_progress \
- cur_time + start_time
print('[' + '='*load_progress \
+ ' '*(4**load_depth - load_progress) \
+ '] ' \
+ 'time left: {} secs'.format(int(time_left)).ljust(19) \
+ '\r', end='')
class QuadArt:
def __init__(self, std_thresh=10, draw_type='circle', max_recurse=None):
self.img = None
self.canvas = None
self.draw = None
self.std_thresh = std_thresh
self.draw_type = draw_type
self.recurse_depth = 0
self.max_recurse_depth = max_recurse
def recursive_draw(self, x, y, w, h):
'''Draw the QuadArt recursively
'''
if (self.max_recurse_depth == 0 or self.recurse_depth < self.max_recurse_depth) \
and self.too_many_colors(int(x), int(y), int(w), int(h)):
self.recurse_depth += 1
self.recursive_draw(x, y, w/2.0, h/2.0)
self.recursive_draw(x + w/2.0, y, w/2.0, h/2.0)
self.recursive_draw(x, y + h/2.0, w/2.0, h/2.0)
self.recursive_draw(x + w/2.0, y + h/2.0, w/2.0, h/2.0)
self.recurse_depth -= 1
if self.recurse_depth == 3:
loading_bar(self.recurse_depth)
else:
self.draw_avg(x, y, w, h)
if self.recurse_depth < 3:
loading_bar(self.recurse_depth)
def too_many_colors(self, x, y, w, h):
if w * self.output_scale <= 2 or w <= 2:
return False
img = self.img[y:y+h,x:x+w]
red = img[:,:,0]
green = img[:,:,1]
blue = img[:,:,2]
red_avg = np.average(red)
green_avg = np.average(green)
blue_avg = np.average(blue)
if red_avg >= 254 and green_avg >= 254 and blue_avg >= 254:
return False
if 255 - red_avg < self.std_thresh and 255 - green_avg < self.std_thresh \
and 255 - blue_avg < self.std_thresh:
return True
red_std = np.std(red)
if red_std > self.std_thresh:
return True
green_std = np.std(green)
if green_std > self.std_thresh:
return True
blue_std = np.std(blue)
if blue_std > self.std_thresh:
return True
return False
def draw_avg(self, x, y, w, h):
avg_color = self.get_color(int(x), int(y), int(w), int(h))
self.draw_in_box(avg_color, x, y, w, h)
return avg_color
def get_color(self, x, y, w, h):
img = self.img[y : y + h,
x : x + w]
red = np.average(img[:,:,0])
green = np.average(img[:,:,1])
blue = np.average(img[:,:,2])
color = Color('rgb(%s,%s,%s)' % (red, green, blue))
return color
def draw_in_box(self, color, x, y, w, h):
if self.draw_type == 'circle':
self.draw_circle_in_box(color, x, y, w, h)
else:
self.draw_square_in_box(color, x, y, w, h)
def draw_circle_in_box(self, color, x, y, w, h):
x *= self.output_scale
y *= self.output_scale
w *= self.output_scale
h *= self.output_scale
self.draw.fill_color = color
self.draw.circle((int(x + w/2.0), int(y + h/2.0)),
(int(x + w/2.0), int(y)))
def draw_square_in_box(self, color, x, y, w, h):
x *= self.output_scale
y *= self.output_scale
w *= self.output_scale
h *= self.output_scale
self.draw.fill_color = color
self.draw.rectangle(x, y, x + w, y + h)
def width(self):
return self.img.shape[1]
def scale_width(self):
return self.width() * self.output_scale
def height(self):
return self.img.shape[0]
def scale_height(self):
return self.height() * self.output_scale
def generate(self, filename,
left=None, right=None, up=None, down=None,
output_size=512):
self.img = imageio.imread(filename)
left = 0 if left is None else int(self.width() * float(left))
right = self.width() if right is None else int(self.width() * float(right))
up = 0 if up is None else int(self.height() * float(up))
down = self.height() if down is None else int(self.height() * float(down))
self.img = self.img[up:down,left:right]
if self.width() < self.height():
difference = self.height() - self.width()
subtract_top = int(difference/2)
subtract_bot = difference - subtract_top
self.img = self.img[subtract_top:-subtract_bot,:]
elif self.height() < self.width():
difference = self.width() - self.height()
subtract_left = int(difference/2)
subtract_right = difference - subtract_left
self.img = self.img[:,subtract_left:-subtract_right]
self.output_scale = float(output_size) / self.width()
self.canvas = Image(width = output_size,
height = output_size,
background = Color('white'))
self.canvas.format = 'png'
self.draw = Drawing()
self.recursive_draw(0, 0, self.width(), self.height())
self.draw(self.canvas)
def display(self):
display(self.canvas)
def save(self, filename):
self.canvas.save(filename=filename)
@click.command()
@click.argument('filename')
@click.option('-l', '--left', default=None, help='left pixel of image')
@click.option('-r', '--right', default=None, help='right pixel of image')
@click.option('-u', '--up', default=None, help='top pixel of image')
@click.option('-d', '--down', default=None, help='bottom pixel of image')
@click.option('-o', '--output', default=None, help='name of file to save result to')
@click.option('-s', '--size', default=512, help='Output size')
@click.option('-t', '--type', 'draw_type', default='circle', help='Draw type')
@click.option('--thresh', default=10, help='Standard deviation threshold for color difference')
@click.option('-m', '--max-recurse', 'max_recurse', default=0, help='Maximum allowed recursion depth. Default is infinity.')
def main(filename, left, right, up, down, output, size, draw_type, thresh, max_recurse):
quadart = QuadArt(std_thresh=thresh, draw_type=draw_type, max_recurse=max_recurse)
quadart.generate(filename, left=left, right=right,
up=up, down=down,
output_size=size)
if output is None:
quadart.display()
else:
quadart.save(output)
if __name__ == '__main__':
main()
| 34.61215 | 124 | 0.562711 | 1,020 | 7,407 | 3.931373 | 0.137255 | 0.007481 | 0.008978 | 0.010973 | 0.276309 | 0.199501 | 0.166584 | 0.126683 | 0.122195 | 0.112219 | 0 | 0.017398 | 0.301607 | 7,407 | 213 | 125 | 34.774648 | 0.757781 | 0.007965 | 0 | 0.128655 | 0 | 0 | 0.056524 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.099415 | false | 0 | 0.046784 | 0.023392 | 0.22807 | 0.011696 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
8cfac0cd213b0dbd5444651160841457f851f434 | 733 | py | Python | backend/tests/serializers/test_user_serializer.py | tuod/bikerides | ef5dfd25c6642f740c36e68cb4548b5c80d7ab44 | [
"Apache-2.0"
] | null | null | null | backend/tests/serializers/test_user_serializer.py | tuod/bikerides | ef5dfd25c6642f740c36e68cb4548b5c80d7ab44 | [
"Apache-2.0"
] | 6 | 2021-06-20T20:20:14.000Z | 2021-06-21T21:33:05.000Z | backend/tests/serializers/test_user_serializer.py | tuod/bikerides | ef5dfd25c6642f740c36e68cb4548b5c80d7ab44 | [
"Apache-2.0"
] | null | null | null | import datetime
import json
import uuid
import pytest
from src.domain.user import User
from src.serializers import user_serializer as srs
class TestSerializeDomainModelUser:
def test_serialize(self):
code = uuid.uuid4()
user = User(user_id=code, login="user1")
expected_json = """
{{
"user_id": "{}",
"login": "user1"
}}
""".format(
code
)
json_user = json.dumps(user, cls=srs.UserModelEncoder)
assert json.loads(json_user) == json.loads(expected_json)
def test_wrong_type(self):
with pytest.raises(TypeError):
json.dumps(datetime.datetime.now(), cls=srs.UserModelEncoder)
| 22.90625 | 73 | 0.600273 | 80 | 733 | 5.375 | 0.4625 | 0.055814 | 0.055814 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.005792 | 0.293315 | 733 | 31 | 74 | 23.645161 | 0.824324 | 0 | 0 | 0 | 0 | 0 | 0.150068 | 0 | 0 | 0 | 0 | 0 | 0.043478 | 1 | 0.086957 | false | 0 | 0.26087 | 0 | 0.391304 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
8cfdf7fc3fa416ac84e95748a98c731b243c4079 | 5,490 | py | Python | configs/atss/atss_vovnet_pafpn_Holly_OpenImage_CrowdHuman.py | HAOCHENYE/yehc_mmdet | 491cc13c6ff769996b7a23b871b10f9a5a1c56fa | [
"Apache-2.0"
] | 1 | 2021-12-25T13:22:39.000Z | 2021-12-25T13:22:39.000Z | configs/atss/atss_vovnet_pafpn_Holly_OpenImage_CrowdHuman.py | HAOCHENYE/yehc_mmdet | 491cc13c6ff769996b7a23b871b10f9a5a1c56fa | [
"Apache-2.0"
] | null | null | null | configs/atss/atss_vovnet_pafpn_Holly_OpenImage_CrowdHuman.py | HAOCHENYE/yehc_mmdet | 491cc13c6ff769996b7a23b871b10f9a5a1c56fa | [
"Apache-2.0"
] | 1 | 2021-02-01T13:33:26.000Z | 2021-02-01T13:33:26.000Z | # '''In this config, I try to add more convs in stride4&stride8 to detect more small object'''
dataset_type = 'CocoDataset'
data_root = '/usr/videodate/yehc/'
model = dict(
type='PAA',
backbone=dict(
type='YL_Vovnet',
stem_channels=16,
stage_channels=(16, 16, 16, 32, 32, 32),
concat_channels=(16, 24, 32, 64, 128, 128),
block_per_stage=(1, 1, 3, 4, 2, 2),
layer_per_block=(1, 1, 2, 2, 4, 4),
norm_cfg=dict(type='SyncBN', requires_grad=True)
),
neck=dict(
type='PAFPN',
in_channels=[24, 32, 64, 128, 128],
out_channels=32,
num_outs=5,
start_level=0,
add_extra_convs=False,
norm_cfg=dict(type='SyncBN', requires_grad=True)
),
bbox_head=dict(
type='ATSSPrivateHead',
num_classes=1,
in_channels=32,
stacked_convs=2,
feat_channels=96,
scale=1,
norm_cfg=dict(type='SyncBN', requires_grad=True),
anchor_generator=dict(
type='AnchorGenerator',
ratios=[1.0],
octave_base_scale=3,
scales_per_octave=1,
strides=[8, 16, 32, 64, 128]),
bbox_coder=dict(
type='DeltaXYWHBBoxCoder',
target_means=[.0, .0, .0, .0],
target_stds=[0.1, 0.1, 0.2, 0.2]),
loss_cls=dict(
type='FocalLoss',
use_sigmoid=True,
gamma=2.0,
alpha=0.25,
loss_weight=1.0),
loss_bbox=dict(type='GIoULoss', loss_weight=2.0),
loss_centerness=dict(
type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0)))
# training and testing settings
train_cfg = dict(
assigner=dict(type='ATSSAssigner', topk=9),
allowed_border=-1,
pos_weight=-1,
debug=False)
# train_cfg = dict(
# assigner=dict(
# type='MaxIoUAssigner',
# pos_iou_thr=0.1,
# neg_iou_thr=0.1,
# min_pos_iou=0,
# ignore_iof_thr=-1),
# allowed_border=-1,
# pos_weight=-1,
# debug=False)
test_cfg = dict(
nms_pre=1000,
min_bbox_size=0,
score_thr=0.05,
nms=dict(type='nms', iou_threshold=0.6),
max_per_img=100)
# optimizer
train_pipeline = [
dict(type='LoadImageFromFile', to_float32=True),
dict(type='LoadAnnotations', with_bbox=True),
dict(type='RandomRadiusBlur', prob=0.3, radius=11, std=0),
dict(
type='MinIoURandomCrop',
min_ious=(0.1, 0.3, 0.5, 0.7, 0.9),
min_crop_size=0.3),
dict(
type='Resize',
img_scale=[(480, 320)],
multiscale_mode='range',
keep_ratio=True),
dict(type='PhotoMetricDistortion'),
dict(type='RandomFlip', flip_ratio=0.5),
dict(
type='Normalize',
mean=[127.5, 127.5, 127.5],
std=[128, 128, 128],
to_rgb=True),
dict(type='Pad', size_divisor=32),
dict(type='DefaultFormatBundle'),
dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels'])
]
test_pipeline = [
dict(type='LoadImageFromFile'),
dict(
type='MultiScaleFlipAug',
img_scale=(480, 320),
flip=False,
transforms=[
dict(type='Resize', keep_ratio=True),
dict(type='RandomFlip'),
dict(
type='Normalize',
mean=[127.5, 127.5, 127.5],
std=[128, 128, 128],
to_rgb=True),
dict(type='Pad', size_divisor=32),
dict(type='ImageToTensor', keys=['img']),
dict(type='Collect', keys=['img'])
])
]
data = dict(
samples_per_gpu=96,
workers_per_gpu=8,
train=[dict(
type=dataset_type,
ann_file=data_root + '/hollywoodheads/hollywoodhead_train.json',
img_prefix=data_root + '/hollywoodheads/JPEGImages/',
classes=["person"],
pipeline=train_pipeline),
dict(
type=dataset_type,
ann_file=data_root+'ImageDataSets/OpenImageV6_CrowdHuman/OpenImageCrowdHuman_train.json',
img_prefix=data_root+'ImageDataSets/OpenImageV6_CrowdHuman/WIDER_train/images',
classes=["person"],
pipeline=train_pipeline)],
val=dict(
type=dataset_type,
ann_file=data_root + '/hollywoodheads/hollywoodhead_val.json',
img_prefix=data_root + '/hollywoodheads/JPEGImages/',
classes=["person"],
pipeline=test_pipeline),
test=dict(
type=dataset_type,
ann_file=data_root + '/hollywoodheads/hollywoodhead_val.json',
img_prefix=data_root + '/hollywoodheads/JPEGImages/',
pipeline=test_pipeline)
)
evaluation = dict(interval=5, metric='bbox')
# optimizer = dict(type='AdamW', lr=0.001)
# optimizer_config = dict(grad_clip=None)
optimizer = dict(type='SGD', lr=0.05, momentum=0.9, weight_decay=0.0001)
optimizer_config = dict(grad_clip=None)
# learning policy
lr_config = dict(
policy='step',
warmup='linear',
warmup_iters=2000,
warmup_ratio=1.0 / 5,
step=[90, 100, 110])
checkpoint_config = dict(interval=5)
log_config = dict(
interval=20,
hooks=[
dict(type='TextLoggerHook'),
dict(type='TensorboardLoggerHook')
])
# yapf:enable
# runtime settings
total_epochs = 120
device_ids = range(1)
dist_params = dict(backend='nccl')
log_level = 'INFO'
work_dir = 'work_dirs/atss_vovnet_pafpn_private_head_SGD_head_OpemImage_CrowdHuman_Holly_blur'
load_from = None
resume_from = None
workflow = [('train', 1)]
| 30 | 97 | 0.599636 | 689 | 5,490 | 4.564586 | 0.345428 | 0.106836 | 0.022893 | 0.010175 | 0.357711 | 0.286169 | 0.238792 | 0.238792 | 0.171065 | 0.171065 | 0 | 0.060904 | 0.258288 | 5,490 | 182 | 98 | 30.164835 | 0.711444 | 0.088525 | 0 | 0.220779 | 0 | 0 | 0.18014 | 0.088666 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
8cfdfe2e14171e1416f226629f83a5fb97d70fac | 26,307 | py | Python | APRI/localization_detection.py | andresperezlopez/DCASE2020 | 324f13e3ae9bb7e5677d93fa09e58a55020717a8 | [
"MIT"
] | 6 | 2020-07-02T07:05:14.000Z | 2021-03-24T11:20:04.000Z | APRI/localization_detection.py | andresperezlopez/DCASE2020 | 324f13e3ae9bb7e5677d93fa09e58a55020717a8 | [
"MIT"
] | null | null | null | APRI/localization_detection.py | andresperezlopez/DCASE2020 | 324f13e3ae9bb7e5677d93fa09e58a55020717a8 | [
"MIT"
] | 1 | 2020-10-28T09:29:00.000Z | 2020-10-28T09:29:00.000Z | """
localization_detection.py
This script contains all methods used for localization and detection.
- parse_annotations: groundtruth localization, based on metadata annotation parsing
- ld_oracle: wrapper for parse_annotations methor
- ld_basic: diffuseness mask and simple event grouping by temporal continuity
- ld_basic_dereverb_filter: same as above, but previous preprocessing usir MAR dereverberation,
and postfiltering of the results for removing spureous data
- ld_particle: diffuseness mask, particle filtering and postprocessing, as used in the PAPAFIL method
All wrapper methods return lists of Events.
"""
from APRI.utils import *
from baseline.cls_feature_class import create_folder
import tempfile
import matlab.engine
from scipy.io import loadmat
def parse_annotations(annotation_file, debug=False):
"""
parse annotation file and return event_list
:param annotation_file: file instance
:return: event_list
"""
############################################
# Delimite events
#
event_list = []
frames = annotation_file[:, 0]
classIDs = annotation_file[:, 1]
instances = annotation_file[:, 2]
azis = annotation_file[:, 3] * np.pi / 180 # gt is in degrees, but Event likes rads
eles = annotation_file[:, 4] * np.pi / 180
# source count
N = 600
num_sources = np.zeros(N)
for frame_idx in frames:
num_sources[int(frame_idx)] += 1
current_events = [] # maximum two allowed
for frame_idx, frame in enumerate(frames[:-1]):
if debug:
print(frame_idx, frame)
frame = int(frame)
if frame_idx == 0:
last_frame = -1 # avoid problem when starting with 2 sources
else:
last_frame = int(frames[frame_idx - 1])
if (frame - last_frame) > 1:
# if num_sources[frame] == 0:
# clear list of current events
if debug:
print('finish all')
while (len(current_events) != 0):
event_list.append(current_events[-1])
current_events.remove(current_events[-1])
if num_sources[frame] == 1:
# if last was 0, first just started
if num_sources[frame - 1] == 0:
classID = int(classIDs[frame_idx])
instance = instances[frame_idx]
frameNumber = [frame]
azi = [azis[frame_idx]]
ele = [eles[frame_idx]]
e = Event(classID, instance, frameNumber, azi, ele)
if debug:
print('new event---1')
e.print()
current_events.append(e)
# if last was 1, continue as before
elif num_sources[frame - 1] == 1:
# ensure that last event was same as this one
classID = int(classIDs[frame_idx])
instance = instances[frame_idx]
e = current_events[0]
if classIDs[frame_idx] == e.get_classID() and instances[frame_idx] == e.get_instance():
# it is same: just proceed normal
e = current_events[0]
e.add_frame(frame)
e.add_azi(azis[frame_idx])
e.add_ele(eles[frame_idx])
else:
# instantaneous change: remove last and add new
# print('instantaneous change!')
e = current_events[0]
event_list.append(e)
current_events.remove(e)
# add new
frameNumber = [frame]
azi = [azis[frame_idx]]
ele = [eles[frame_idx]]
e = Event(classID, instance, frameNumber, azi, ele)
if debug:
print('new event---1')
e.print()
current_events.append(e)
# if last was 2, second source just finished
elif num_sources[frame - 1] == 2:
if debug:
print('finish event')
classID = int(classIDs[frame_idx])
instance = instances[frame_idx]
# find which of the current events are we: same classID and instance number
class0 = current_events[0].get_classID()
class1 = current_events[1].get_classID()
instance0 = current_events[0].get_instance()
instance1 = current_events[1].get_instance()
both_finished = False
if classID == class0 and instance == instance0:
event_idx = 0
elif classID == class1 and instance == instance1:
event_idx = 1
else:
# This is a strange case happening in 'fold3_room2_mix044_ov2.wav',
# where two sources finish and a new one starts
both_finished = True
if not both_finished:
# first add current event data, as regular
e = current_events[event_idx]
e.add_frame(frame)
e.add_azi(azis[frame_idx])
e.add_ele(eles[frame_idx])
# then remove other event and add it to the main list
event_idx = np.mod(event_idx + 1, 2)
e = current_events[event_idx]
event_list.append(e)
current_events.remove(e)
else:
# Terminate both, and start the new one
e1 = current_events[-1]
event_list.append(e1)
current_events.remove(e1)
e0 = current_events[0]
event_list.append(e0)
current_events.remove(e0)
# add new
frameNumber = [frame]
azi = [azis[frame_idx]]
ele = [eles[frame_idx]]
e = Event(classID, instance, frameNumber, azi, ele)
if debug:
print('new event---1')
e.print()
current_events.append(e)
elif num_sources[frame] == 2:
# check cold start: 2 starting at the same time
if last_frame < frame - 1:
# just add it normal
classID = int(classIDs[frame_idx])
instance = instances[frame_idx]
frameNumber = [frame]
azi = [azis[frame_idx]]
ele = [eles[frame_idx]]
e = Event(classID, instance, frameNumber, azi, ele)
if debug:
print('new event---1')
e.print()
current_events.append(e)
else:
# if last was 1, second just started
if num_sources[frame - 1] == 1:
if len(current_events) == 2:
# 1 to 2 instantaneous change!
last_classID = int(classIDs[frame_idx - 1])
last_instance = instances[frame_idx - 1]
event0 = current_events[0]
event1 = current_events[1]
if event0.get_classID() == last_classID and event0.get_instance() == last_instance:
# remove the other one
event_list.append(event1)
current_events.remove(event1)
else:
# remove this one
event_list.append(event0)
current_events.remove(event0)
# now add the new one normal
classID = int(classIDs[frame_idx])
instance = instances[frame_idx]
frameNumber = [frame]
azi = [azis[frame_idx]]
ele = [eles[frame_idx]]
e = Event(classID, instance, frameNumber, azi, ele)
if debug:
print('new event---2')
e.print()
current_events.append(e)
elif len(current_events) == 1:
e = current_events[0]
# if same class as existing event, it's continuation
# if classIDs[frame_idx] == e.get_classID() and instances[frame_idx] == 0:
if classIDs[frame_idx] == e.get_classID() and instances[frame_idx] == e.get_instance():
# add normally
e.add_frame(frame)
e.add_azi(azis[frame_idx])
e.add_ele(eles[frame_idx])
# if not same class and instance, it's a new event
else:
classID = int(classIDs[frame_idx])
instance = instances[frame_idx]
frameNumber = [frame]
azi = [azis[frame_idx]]
ele = [eles[frame_idx]]
e = Event(classID, instance, frameNumber, azi, ele)
if debug:
print('new event---2')
e.print()
current_events.append(e)
else:
warnings.warn('something wird happen, v2!')
continue
# if last was 2, continue
# elif num_sources[frame - 1] == 2:
elif num_sources[last_frame] == 2:
if len(current_events) == 2:
# we have two active sources, so find the one that corresponds by class nummber
classID = int(classIDs[frame_idx])
instance = instances[frame_idx]
found = False
for e_idx, e in enumerate(current_events):
if e.get_classID() == classID and e.get_instance() == instance:
found = True
e.add_frame(frame)
e.add_azi(azis[frame_idx])
e.add_ele(eles[frame_idx])
last_used_current_event_idx = e_idx
if not found:
# instantaneous change: remove one current event and add another one
# which to remove? the one with different class that our last one, or different instance
# if current_events[0].get_classID() != current_events[1].get_classID():
e = current_events[np.mod(last_used_current_event_idx + 1, 2)]
event_list.append(e)
current_events.remove(e)
classID = int(classIDs[frame_idx])
instance = instances[frame_idx]
frameNumber = [frame]
azi = [azis[frame_idx]]
ele = [eles[frame_idx]]
e = Event(classID, instance, frameNumber, azi, ele)
if debug:
print('new event---2')
e.print()
current_events.append(e)
elif len(current_events) == 1:
# it might happen with a cold start from 0 to 2
# so add the current source
classID = int(classIDs[frame_idx])
instance = instances[frame_idx]
frameNumber = [frame]
azi = [azis[frame_idx]]
ele = [eles[frame_idx]]
e = Event(classID, instance, frameNumber, azi, ele)
if debug:
print('new event---2')
e.print()
current_events.append(e)
else:
warnings.warn('something wird happen, v2!')
continue
# release last ongoing event
e = current_events[0]
event_list.append(e)
current_events.remove(e)
if debug:
for e in event_list:
e.print()
print('---')
# ERROR CHECK
# Check that all frames are monotonically increasing by one
if debug:
print('ERROR CHECK...')
for e in event_list:
f = np.asarray(e.get_frames())
if f.size > 1:
assert np.allclose(f[1:] - f[:-1], np.ones(f.size - 1))
if debug:
print('ERROR CHECK OK')
return event_list
def ld_oracle(stft, audio_file_name, gt_folder_path):
"""
parse groundtruth
# TODO: check memory leak on file open
:param stft: this is in fact not used, but kept for compatibility when called from run.py
:return:
"""
metadata_file_name = os.path.splitext(audio_file_name)[0] + '.csv'
metadata_file_path = os.path.join(gt_folder_path, metadata_file_name)
csv = np.loadtxt(open(metadata_file_path, "rb"), delimiter=",")
return parse_annotations(csv)
def ld_basic(stft, diff_th):
"""
Diffuseness mask
:param stft:
:param diff_th:
:return:
"""
M, K, N = stft.shape
DOA = doa(stft) # Direction of arrival
diff = diffuseness(stft) # Diffuseness
diff_mask = diff <= diff_th
# segment audio based on diffuseness mask
source_activity = np.empty(N)
for n in range(N):
source_activity[n] = np.any(diff_mask[:,n]) # change here discriminative function
# compute statistics of relevant DOAs
active_frames = np.argwhere(source_activity>0).squeeze()
num_active_frames = active_frames.size
estimated_doa_per_frame = np.empty((num_active_frames,2))
for af_idx, af in enumerate(active_frames):
active_bins = diff_mask[:,af]
doas_active_bins = DOA[:,active_bins,af]
for a in range(2): # angle
estimated_doa_per_frame[af_idx,a] = circmedian(doas_active_bins[a])
# segmentate active bins into "events"
frame_changes = np.argwhere(active_frames[1:] - active_frames[:-1] != 1).flatten()
frame_changes = np.insert(frame_changes, 0, -1)
event_list = []
for idx in range(len(frame_changes)-1):
start_frame_idx = frame_changes[idx]+1
end_frame_idx = frame_changes[idx+1]
frames = active_frames[start_frame_idx:end_frame_idx+1]
azis = estimated_doa_per_frame[start_frame_idx:end_frame_idx + 1, 0]
eles = estimated_doa_per_frame[start_frame_idx:end_frame_idx + 1, 1]
event_list.append(Event(-1, -1, frames, azis, eles))
return event_list
def ld_basic_dereverb_filter(stft, diff_th=0.3, L=5, event_minimum_length=4):
"""
same as basic, but dereverb+filter out events shorter than a given parameter
:param stft:
:param diff_th:
:return:
"""
# dereverb
# L = 5
tau = 1
p = 0.25
i_max = 10
ita = 1e-4
epsilon = 1e-8
stft_dry, _, _ = estimate_MAR_sparse_parallel(stft, L, tau, p, i_max, ita, epsilon)
# l&d
event_list_full = ld_basic(stft_dry, diff_th)
# filter
# todo: probably easy to optimize
event_list_clean = []
for e in event_list_full:
if len(e.get_frames()) >= event_minimum_length:
event_list_clean.append(e)
return event_list_clean
def ld_particle(stft, diff_th, K_th, min_event_length, V_azi, V_ele, in_sd, in_sdn, init_birth, in_cp, num_particles, debug_plot=False, metadata_file_path=None):
"""
find single-source tf-bins, and then feed them into the particle tracker
:param stft:
:param diff_th:
:return:
CHANGES V2
- DISCARD LOW FREQUENCY ACTIVE BINS IN DIFF MASK
- DECIMATION COMPUTED AFTER PARTICLE, JUST DISCARDING ONE OUT OF EACH TWO
"""
# decimate in frequency
M, K, N = stft.shape
stft = stft[:, :K // 2, :]
M, K, N = stft.shape
# parametric analysis
DOA = doa(stft) # Direction of arrival
diff = diffuseness(stft, dt=2) # Diffuseness
diff_mask = diff <= diff_th
diff_mask[0] = False # manually set artifacts of low diffuseness in the low end spectrum
# create masked doa with nans
doa_masked = np.empty((2, K, N))
for k in range(K):
for n in range(N):
if diff_mask[k, n]:
doa_masked[:, k, n] = DOA[:, k, n]
else:
doa_masked[:, k, n] = np.nan
# # decimate DOA in time
# DOA_decimated = np.empty((2, K, N // 2)) # todo fix number
# for n in range(N // 2):
# # todo fix numbers depending on decimation factor
# # todo: nanmean but circular!!!
# meanvalue = np.nanmean([doa_masked[:, :, n * 2], doa_masked[:, :, n * 2 - 1]], axis=0)
# meanvalue2 = np.mean([doa_masked[:, :, n * 2], doa_masked[:, :, n * 2 - 1]], axis=0)
# # DOA_decimated[:, :, n] = meanvalue
# DOA_decimated[:, :, n] = meanvalue2
# # if np.any(~np.isnan(meanvalue)):
# # pass
# M, K, N = DOA_decimated.shape
#
DOA_decimated = doa_masked
# Create lists of azis and eles for each output frame size
# Filter out spureous candidates
azis = [[] for n in range(N)]
eles = [[] for n in range(N)]
for n in range(N):
a = DOA_decimated[0, :, n]
e = DOA_decimated[1, :, n]
azis_filtered = a[~np.isnan(a)]
if len(azis_filtered) > K_th:
azis[n] = azis_filtered
eles[n] = e[~np.isnan(e)]
# if debug_plot:
# plt.figure()
# # All estimates
# for n in range(N):
# if len(azis[n]) > 0:
# a = np.mod(azis[n] * 180 / np.pi, 360)
# plt.scatter(np.ones(len(a)) * n, a, marker='x', edgecolors='b')
# # Circmedian
# for n in range(N):
# if len(azis[n]) > 0:
# a = np.mod(azis[n] * 180 / np.pi, 360)
# plt.scatter(n, np.mod(circmedian(a, 'deg'), 360), facecolors='none', edgecolors='k')
#
# # circmean and std
# plt.figure()
# for n in range(N):
# if len(azis[n]) > 0:
# a = np.mod(azis[n] * 180 / np.pi, 360)
# plt.errorbar(n, scipy.stats.circmean(a, high=360, low=0), yerr= scipy.stats.circstd(a, high=360, low=0))
# plt.scatter(n, np.mod(circmedian(a, 'deg'), 360), facecolors='none', edgecolors='k')
#
#
# # boxplot
# import seaborn as sns
# a = []
# for n in range(N):
# if len(azis[n]) > 0:
# a.append(np.mod(azis[n] * 180 / np.pi, 360))
# else:
# a.append([])
# plt.figure()
# sns.boxplot(data=a)
#
# # number of single-source bins in frequency for each n
# plt.figure()
# plt.grid()
# for n in range(N):
# if len(azis[n]) > 0:
# plt.scatter(n, len(azis[n]), marker='x', edgecolors='b')
# TODO: separate frames with two overlapping sources
# Save into temp file
fo = tempfile.NamedTemporaryFile()
csv_file_path = fo.name + '.csv'
output_file_path = (os.path.splitext(csv_file_path)[0]) + '.mat'
with open(csv_file_path, 'a') as csvfile:
writer = csv.writer(csvfile)
for n in range(len(azis)):
if len(azis[n]) > 0: # if not empty, write
# time = n * seconds_per_frame
time = n * 0.1
# TODO: IQR TEST
azi = np.mod(circmedian(azis[n]) * 180 / np.pi, 360) # csv needs degrees, range 0..360
ele = 90 - (np.median(eles[n]) * 180 / np.pi) # csv needs degrees
writer.writerow([time, azi, ele])
# Call Matlab
eng = matlab.engine.start_matlab()
this_file_path = os.path.dirname(os.path.abspath(__file__))
matlab_path = this_file_path + '/../multiple-target-tracking-master'
eng.addpath(matlab_path)
eng.func_tracking(csv_file_path, float(V_azi), float(V_ele), float(in_sd),
float(in_sdn), init_birth, in_cp, float(num_particles), nargout=0)
# Load output matlab file
output = loadmat(output_file_path)
output_data = output['tracks'][0]
num_events = output_data.size
# each element of output_data is a different event
# order of stored data is [time][[azis][eles][std_azis][std_eles]]
# convert output data into Events
min_length = min_event_length
event_list = []
for n in range(num_events):
frames = (output_data[n][0][0] / 0.1).astype(int) # frame numbers
# sometimes there are repeated frames; clean them
diff = frames[1:] - frames[:-1]
frames = np.insert(frames[1:][diff != 0], 0, frames[0])
if len(frames) > min_length:
azis = output_data[n][1][0] * np.pi / 180. # in rads
azis = [a - (2*np.pi) if a > np.pi else a for a in azis] # adjust range to [-pi, pi]
eles = (90 - output_data[n][1][1]) * np.pi / 180. # in rads, incl2ele
event_list.append(Event(-1, -1, frames, azis, eles))
def trim_event(e):
frames = e.get_frames()
azis = e.get_azis()
eles = e.get_eles()
diff = frames[1:] - frames[:-1]
# large diffs tend to be at the end, so just discard everything after the peak
peak = np.argwhere(diff>40) # TODO ACHTUNG: HARDCODED VALUE
if peak.size>0:
# until the peak
peak_idx = peak[0][0]
new_frames = frames[:peak_idx+1]
new_azis = azis[:peak_idx+1]
new_eles = eles[:peak_idx+1]
else:
# just copy
new_frames = frames
new_azis = azis
new_eles = eles
return Event(-1, -1, np.asarray(new_frames), np.asarray(new_azis), np.asarray(new_eles))
trimmed_event_list = []
for e in event_list:
trimmed_event_list.append(trim_event(e))
event_list = trimmed_event_list
def interpolate_event(e):
frames = e.get_frames()
azis = e.get_azis()
eles = e.get_eles()
new_frames = []
new_azis = []
new_eles = []
frame_dist = frames[1:] - frames[:-1]
for fd_idx, fd in enumerate(frame_dist):
if fd == 1:
# contiguous, set next
new_frames.append(frames[fd_idx])
new_azis.append(azis[fd_idx])
new_eles.append(eles[fd_idx])
else:
start = frames[fd_idx]
end = frames[fd_idx+1]
new_frames.extend(np.arange(start, end, 1).tolist())
new_azis.extend(np.linspace(azis[fd_idx], azis[fd_idx+1], fd).tolist())
new_eles.extend(np.linspace(eles[fd_idx], eles[fd_idx+1], fd).tolist())
return Event(-1, -1, np.asarray(new_frames), np.asarray(new_azis), np.asarray(new_eles))
interpolated_event_list = []
for e in event_list:
interpolated_event_list.append(interpolate_event(e))
event_list = interpolated_event_list
# TODO PARAMETRIZE
def decimate_event(e):
frames = e.get_frames()
azis = e.get_azis()
eles = e.get_eles()
new_frames = []
new_azis = []
new_eles = []
for f_idx, f in enumerate(frames):
if f%2==1: # only odd
new_frames.append(f//2)
new_azis.append(azis[f_idx])
new_eles.append(eles[f_idx])
return Event(-1, -1, np.asarray(new_frames), np.asarray(new_azis), np.asarray(new_eles))
# Decimate list
decimated_event_list = []
for e in event_list:
decimated_event_list.append(decimate_event(e))
event_list = decimated_event_list
# Check that all events have data
filtered_event_list = []
for e in event_list:
if len(e.get_frames()) > 0:
filtered_event_list.append(e)
event_list = filtered_event_list
if debug_plot:
# # plot doa estimates and particle trajectories
# plt.figure()
# plt.grid()
# # framewise estimates
# est_csv = np.loadtxt(open(csv_file_path, "rb"), delimiter=",")
# t = est_csv[:, 0] * 10
# a = est_csv[:, 1]
# e = est_csv[:, 2]
# plt.scatter(t, a, marker='x', edgecolors='b')
# # particle filter
# for e_idx, e in enumerate(event_list):
# azis = np.asarray(e.get_azis()) * 180 / np.pi
# azis = [a + (360) if a < 0 else a for a in azis] # adjust range to [-pi, pi]
# plt.plot(e.get_frames(), azis, marker='.', color='chartreuse')
# PLOT # todo check elevation/inclination
plt.figure()
title_string = str(V_azi) + '_' + str(V_ele) + '_' + str(in_sd) + '_' + str(in_sdn) + '_' + str(
init_birth) + '_' + str(in_cp) + '_' + str(num_particles)
plt.title(title_string)
plt.grid()
# framewise estimates
est_csv = np.loadtxt(open(csv_file_path, "rb"), delimiter=",")
t = est_csv[:, 0] * 10 / 2 # TODO: ADAPTIVE DECIMATION
a = est_csv[:, 1]
e = est_csv[:, 2]
plt.scatter(t, a, marker='x', edgecolors='b')
# groundtruth
gt_csv = np.loadtxt(open(metadata_file_path, "rb"), delimiter=",")
t = gt_csv[:, 0]
a = np.mod(gt_csv[:, 3], 360)
e = gt_csv[:, 4]
plt.scatter(t, a, facecolors='none', edgecolors='r')
# particle filter
for e_idx, e in enumerate(event_list):
azis = e.get_azis() * 180 / np.pi
azis = [a + 360 if a < 0 else a for a in azis] # adjust range to [-pi, pi]
plt.plot(e.get_frames(), azis, marker='.', markersize=1, color='chartreuse')
return event_list
| 38.070912 | 161 | 0.524575 | 3,189 | 26,307 | 4.155535 | 0.146127 | 0.038636 | 0.012225 | 0.010791 | 0.405071 | 0.336251 | 0.30652 | 0.292409 | 0.285466 | 0.254905 | 0 | 0.019446 | 0.370548 | 26,307 | 690 | 162 | 38.126087 | 0.780844 | 0.252176 | 0 | 0.451613 | 0 | 0 | 0.015338 | 0.001814 | 0 | 0 | 0 | 0.004348 | 0.002481 | 1 | 0.019851 | false | 0 | 0.012407 | 0 | 0.052109 | 0.057072 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
8cfea2816c67d28c0443d15b5e9fb981124249c8 | 4,338 | py | Python | aws_interface/cloud/message/error.py | hubaimaster/aws-interface | 162dd056546d58b6eb29afcae1c3c2d78e4309b2 | [
"Apache-2.0"
] | 53 | 2018-10-02T05:58:54.000Z | 2020-09-15T08:58:26.000Z | aws_interface/cloud/message/error.py | hubaimaster/aws-interface | 162dd056546d58b6eb29afcae1c3c2d78e4309b2 | [
"Apache-2.0"
] | 52 | 2018-09-26T05:16:09.000Z | 2022-03-11T23:51:14.000Z | aws_interface/cloud/message/error.py | hubaimaster/aws-interface | 162dd056546d58b6eb29afcae1c3c2d78e4309b2 | [
"Apache-2.0"
] | 10 | 2019-03-11T16:35:14.000Z | 2019-10-23T08:03:54.000Z | """
Error code and message definitions
"""
PERMISSION_DENIED = {
'code': 1,
'message': 'Permission denied'
}
GUEST_LOGIN_INVALID = {
'code': 2,
'message': 'Guest login has been deactivated'
}
NO_SUCH_GUEST = {
'code': 3,
'message': 'No such guest'
}
EMAIL_LOGIN_INVALID = {
'code': 4,
'message': 'Email login has been deactivated'
}
WRONG_PASSWORD = {
'code': 5,
'message': 'Password did not match'
}
NO_SUCH_ACCOUNT = {
'code': 6,
'message': 'No such account'
}
LOGOUT_FAILED = {
'code': 7,
'message': 'Logout failed'
}
PUT_USER_GROUP_FAILED = {
'code': 8,
'message': 'Insert user group failed'
}
EXISTING_ACCOUNT = {
'code': 9,
'message': 'Existing account'
}
FORBIDDEN_MODIFICATION = {
'code': 10,
'message': 'Forbidden modifications'
}
NO_SUCH_PARTITION = {
'code': 11,
'message': 'No such partition'
}
NUM_OF_BATCH_ITEMS_MUST_BE_LESS_THAN_128 = {
'code': 12,
'message': 'Number of item_ids must be less than 128'
}
LOG_CREATION_FAILED = {
'code': 13,
'message': 'Log creation failed'
}
INVALID_SESSION = {
'code': 14,
'message': 'Invalid session'
}
INVALID_FILE_KEY = {
'code': 15,
'message': 'Invalid file key'
}
INVALID_REQUEST = {
'code': 16,
'message': 'Invalid request, please check parameters'
}
FORBIDDEN_REQUEST = {
'code': 17,
'message': 'Forbidden request'
}
NO_SUCH_FUNCTION = {
'code': 18,
'message': 'No such function'
}
DEFAULT_USER_GROUP_CANNOT_BE_MODIFIED = {
'code': 19,
'message': 'Default user groups can not be modified'
}
EXISTING_FUNCTION = {
'code': 20,
'message': 'Function name already exists'
}
FUNCTION_ERROR = {
'code': 21,
'message': 'Function has errors: {}'
}
NO_SUCH_FUNCTION_TEST = {
'code': 22,
'message': 'No such function test'
}
NO_SUCH_POLICY_MODE = {
'code': 23,
'message': 'No such policy mode'
}
NO_SUCH_ITEM = {
'code': 24,
'message': 'No such item'
}
UNSUPPORTED_FILE_TYPE = {
'code': 25,
'message': 'Unsupported file type'
}
NO_SUCH_FILE = {
'code': 26,
'message': 'No such file'
}
ADMIN_GROUP_CANNOT_BE_MODIFIED = {
'code': 27,
'message': 'An admin group can not be modified'
}
NO_SUCH_LOGIN_METHOD = {
'code': 28,
'message': 'No such login method',
}
REGISTER_POLICY_VIOLATION = {
'code': 29,
'message': 'Register policy violation',
}
EXISTING_TRIGGER = {
'code': 30,
'message': 'Existing trigger'
}
EXISTING_WEBHOOK = {
'code': 31,
'message': 'Existing webhook'
}
NO_SUCH_WEBHOOK = {
'code': 32,
'message': 'No such webhook'
}
EXISTING_SCHEDULE = {
'code': 33,
'message': 'Existing schedule'
}
NO_SUCH_SCHEDULE = {
'code': 34,
'message': 'No such schedule'
}
EXISTING_EMAIL_PROVIDER = {
'code': 35,
'message': 'Existing email provider'
}
NO_SUCH_EMAIL_PROVIDER = {
'code': 36,
'message': 'No such email provider'
}
FACEBOOK_LOGIN_INVALID = {
'code': 37,
'message': 'Facebook login invalid'
}
NAVER_LOGIN_INVALID = {
'code': 38,
'message': 'Naver login invalid'
}
KAKAO_LOGIN_INVALID = {
'code': 39,
'message': 'Kakao login invalid'
}
GOOGLE_LOGIN_INVALID = {
'code': 40,
'message': 'Google login invalid'
}
EXISTING_ACCOUNT_VIA_OTHER_LOGIN_METHOD = {
'code': 41,
'message': 'Existing account via other login method'
}
EXISTING_SORT_KEY = {
'code': 42,
'message': 'Existing sort index key'
}
EXISTING_SLACK_WEBHOOK_NAME = {
'code': 43,
'message': 'Existing slack webhook name'
}
NO_SUCH_SLACK_WEBHOOK = {
'code': 44,
'message': 'No such slack webhook'
}
QUERY_POLICY_VIOLATION = {
'code': 45,
'message': 'Query policy violation'
}
JOIN_POLICY_VIOLATION = {
'code': 46,
'message': 'Join policy violation'
}
UPDATE_POLICY_VIOLATION = {
'code': 47,
'message': 'Update policy violation',
}
DELETE_POLICY_VIOLATION = {
'code': 48,
'message': 'Delete policy violation',
}
CREATE_POLICY_VIOLATION = {
'code': 49,
'message': 'Create policy violation',
}
NOT_USER_PARTITION = {
'code': 50,
'message': 'Not a user partition'
}
READ_POLICY_VIOLATION = {
'code': 51,
'message': 'Read policy violation',
}
UNREGISTERED_PARTITION = {
'code': 52,
'message': 'Unregistered partition',
}
| 20.462264 | 57 | 0.629322 | 504 | 4,338 | 5.194444 | 0.291667 | 0.059587 | 0.064553 | 0.017571 | 0.05806 | 0.025974 | 0 | 0 | 0 | 0 | 0 | 0.029961 | 0.222914 | 4,338 | 211 | 58 | 20.559242 | 0.746663 | 0.007838 | 0 | 0 | 0 | 0 | 0.396415 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0.009615 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
5065147dac67c5b2ad5f117c6d26864ebd4e5278 | 1,923 | py | Python | microscopic-monks/primal/engine/perlin.py | CharlieADavies/code-jam-6 | 4de1527d0d54207c5b7b09fe2a7ad3ab8071bd95 | [
"MIT"
] | 76 | 2020-01-17T12:09:48.000Z | 2022-03-26T19:17:26.000Z | microscopic-monks/primal/engine/perlin.py | Hypertyz/code-jam-6 | a7eb3b1256ae113c93f0337892c667768e8bc199 | [
"MIT"
] | 17 | 2020-01-21T23:13:34.000Z | 2020-02-07T00:07:04.000Z | microscopic-monks/primal/engine/perlin.py | CharlieADavies/code-jam-6 | 4de1527d0d54207c5b7b09fe2a7ad3ab8071bd95 | [
"MIT"
] | 91 | 2020-01-17T12:01:06.000Z | 2022-03-22T20:38:59.000Z | from math import floor
# Feel free to change these
RND_A = 134775813
RND_B = 1103515245
SEED = 1
# Python3 integers are unlimited size.
# MAX_VAL is set so cubic_noise.py works the same as the other implementations.
MAX_VAL = 2 ** 31 - 1
periodx = MAX_VAL
periody = MAX_VAL
# Don't change MASK. MASK is used for the bitwise operations in randomize()
MASK = 2 ** 32 - 1
def interpolate(a, b, c, d, x):
p = (d - c) - (a - b)
return x * (x * (x * p + ((a - b) - p)) + (c - a)) + b
def randomize(x, y, seed=SEED):
return (((((x ^ y) * RND_A) ^ (seed + x)) *
(((RND_B * x) << 16) ^ (RND_B * y) - RND_A)) & MASK) / MASK
def tile(coordinate, period=MAX_VAL):
return coordinate % period
def _sample1d(x, seed=SEED, octave=1):
xi = floor(x / octave)
lerp = x / octave - xi
return interpolate(
randomize(tile(xi - 1, periodx), 0, seed),
randomize(tile(xi, periodx), 0, seed),
randomize(tile(xi + 1, periodx), 0, seed),
randomize(tile(xi + 2, periodx), 0, seed),
lerp) * 0.5 + 0.25
def _sample2d(x, y, seed=SEED, octave=1):
xi = floor(x / octave)
lerpx = x / octave - xi
yi = floor(y / octave)
lerpy = y / octave - yi
xSamples = [0, 0, 0, 0]
for ii in range(4):
xSamples[ii] = interpolate(
randomize(tile(xi - 1, periodx), tile(yi - 1 + ii, periody), seed),
randomize(tile(xi, periodx), tile(yi - 1 + ii, periody), seed),
randomize(tile(xi + 1, periodx), tile(yi - 1 + ii, periody), seed),
randomize(tile(xi + 2, periodx), tile(yi - 1 + ii, periody), seed),
lerpx)
return interpolate(*xSamples, lerpy) * 0.5 + 0.25
def sample(x=None, y=None, seed=SEED, octave=1):
if x is None:
return _sample1d(y, seed, octave)
elif y is None:
return _sample1d(x, seed, octave)
else:
return _sample2d(x, y, seed, octave)
| 28.701493 | 79 | 0.574103 | 294 | 1,923 | 3.697279 | 0.27551 | 0.095676 | 0.110396 | 0.104876 | 0.343146 | 0.328427 | 0.288868 | 0.264029 | 0.210672 | 0.210672 | 0 | 0.049027 | 0.278731 | 1,923 | 66 | 80 | 29.136364 | 0.734679 | 0.111284 | 0 | 0.043478 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.130435 | false | 0 | 0.021739 | 0.043478 | 0.326087 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
5066b5439aeede979d75a63229a6d0fd6dcbc9da | 3,899 | py | Python | estimagic/logging/update_database.py | SofiaBadini/estimagic | ff4948dc4175cd690b3a021969c6119a6a619f96 | [
"BSD-3-Clause"
] | null | null | null | estimagic/logging/update_database.py | SofiaBadini/estimagic | ff4948dc4175cd690b3a021969c6119a6a619f96 | [
"BSD-3-Clause"
] | null | null | null | estimagic/logging/update_database.py | SofiaBadini/estimagic | ff4948dc4175cd690b3a021969c6119a6a619f96 | [
"BSD-3-Clause"
] | null | null | null | """Functions to update a DataBase in a thread safe way.
All write operation to a database in estimagic should be done via functions from this
module.
Public functions in this module should not require any knowledge of sqlalchemy or
sql in general. This is also the reason why _execute_write_statements is not a public
function.
"""
import datetime as dt
import traceback
import warnings
from pathlib import Path
import pandas as pd
import sqlalchemy
def append_rows(database, tables, rows):
"""Append rows to one or several tables in one transaction.
Using just one transaction ensures that the iteration counters stay correct in
parallel optimizations. It is also faster than using several transactions.
If anything fails, the complete operation is rolled back and the data is stored in
pickle files instead.
Args:
database (sqlalchemy.MetaData):
tables (str or list): A table name or list of table names.
rows (dict, pd.Series or list): The data to append.
"""
if isinstance(tables, str):
tables = [tables]
if isinstance(rows, (dict, pd.Series)):
rows = [rows]
assert len(tables) == len(rows), "There must be one value per table."
rows = [dict(val) for val in rows]
inserts = [
database.tables[tab].insert().values(**row) for tab, row in zip(tables, rows)
]
_execute_write_statements(inserts, database)
def update_scalar_field(database, table, value):
"""Update the value of a table with one row and one column called "value".
Args:
database (sqlalchemy.MetaData)
table (string): Name of the table to be updated.
value: The new value of the table.
"""
value = {"value": value}
upd = database.tables[table].update().values(**value)
_execute_write_statements(upd, database)
def _execute_write_statements(statements, database):
"""Execute all statements in one atomic transaction.
If any statement fails, the transaction is rolled back, and a warning is issued.
If the statements contain inserts or updates, the values of that statement are
pickled in the same directory as the database.
Args:
statements (list or sqlalchemy statement): List of sqlalchemy statements
or single statement that entail a write operation. Examples are Insert,
Update and Delete.
database (sqlalchemy.MetaData): The bind argument must be set.
"""
if not isinstance(statements, (list, tuple)):
statements = [statements]
engine = database.bind
conn = engine.connect()
# acquire lock
trans = conn.begin()
try:
for stat in statements:
conn.execute(stat)
# release lock
trans.commit()
conn.close()
except (KeyboardInterrupt, SystemExit):
exception_info = traceback.format_exc()
trans.rollback()
conn.close()
_handle_exception(statements, database, exception_info)
raise
except Exception:
exception_info = traceback.format_exc()
trans.rollback()
conn.close()
_handle_exception(statements, database, exception_info)
def _handle_exception(statements, database, exception_info):
directory = Path(str(database.bind.url)[10:])
if not directory.is_dir():
directory = Path(".")
directory = directory.resolve()
for stat in statements:
if isinstance(stat, (sqlalchemy.sql.dml.Insert, sqlalchemy.sql.dml.Update)):
values = stat.compile().params
timestamp = dt.datetime.now().strftime("%Y%m%d-%H%M%S-%f")
filename = f"{stat.table.name}_{timestamp}.pickle"
pd.to_pickle(values, directory / filename)
warnings.warn(
f"Unable to write to database. The data was saved in {directory} instead. The "
f"traceback was:\n\n{exception_info}"
)
| 31.699187 | 87 | 0.675301 | 505 | 3,899 | 5.150495 | 0.348515 | 0.029988 | 0.033833 | 0.038062 | 0.09381 | 0.09381 | 0.076125 | 0.076125 | 0.076125 | 0.076125 | 0 | 0.000675 | 0.239549 | 3,899 | 122 | 88 | 31.959016 | 0.87656 | 0.399077 | 0 | 0.189655 | 0 | 0 | 0.09095 | 0.027015 | 0 | 0 | 0 | 0 | 0.017241 | 1 | 0.068966 | false | 0 | 0.103448 | 0 | 0.172414 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
506825dc7f25ceeaf9dc6797d7080fe6412f348c | 9,510 | py | Python | vpnporthole/settings.py | sourcesimian/vpn-porthole | de225ce9c730a40a0358f59c09fc5e16330e9dd7 | [
"MIT"
] | 15 | 2016-07-29T17:44:48.000Z | 2021-09-18T14:43:49.000Z | vpnporthole/settings.py | sourcesimian/vpn-porthole | de225ce9c730a40a0358f59c09fc5e16330e9dd7 | [
"MIT"
] | 2 | 2017-02-16T10:23:07.000Z | 2018-07-24T12:54:36.000Z | vpnporthole/settings.py | sourcesimian/vpn-porthole | de225ce9c730a40a0358f59c09fc5e16330e9dd7 | [
"MIT"
] | 3 | 2016-06-25T10:47:56.000Z | 2020-02-20T09:11:03.000Z | import sys
import os
from configobj import ConfigObj, get_extra_values, DuplicateError
from validate import Validator
from pkg_resources import resource_stream
from vpnporthole.ip import IPv4Subnet
class Settings(object):
__sudo_password = None
__ctx = None
def __init__(self, profile_name):
self.__profile_name = profile_name
self.__ensure_config_setup()
self.__settings = self.__get_settings()
self.__profile = self.__get_profile(profile_name)
if not self.__settings or not self.__profile:
exit(3)
@property
def profile_name(self):
return self.__profile_name
@property
def docker_machine(self):
machine = self.__profile['docker']['machine']
if machine:
return machine
machine = self.__settings['docker']['machine']
if machine:
return machine
return None
def username(self):
usr = self.__extract(self.__profile['username'])
if not usr:
usr = input("")
return usr
def password(self):
pwd = self.__extract(self.__profile['password'])
if not pwd:
import getpass
pwd = getpass.getpass('')
return pwd
def sudo(self):
try:
pwd = self.__settings['system']['sudo']
except KeyError:
pwd = ''
if not pwd:
if self.__sudo_password is not None:
return self.__sudo_password
import getpass
pwd = getpass.getpass('Enter sudo password:')
Settings.__sudo_password = pwd
return self.__extract(pwd)
def build_files(self):
ret = {}
for filename, content in self.__profile['build']['files'].iteritems():
if content:
content = self.__file_content(content)
if filename.endswith('.tmpl'):
content = self.__render_template(content)
filename = filename[:-5]
ret[filename] = content
return ret
def run_hook_files(self):
ret = {}
for filename, content in self.__profile['run']['hooks'].iteritems():
if content:
content = self.__file_content(content)
content = self.__render_template(content)
ret[filename] = content
return ret
def __file_content(self, value):
from textwrap import dedent
if value.startswith((' ', '\n', '\t', '\\')):
return dedent(value[value.find('\n') + 1:]).rstrip(' ')
else:
try:
with open(os.path.expanduser(value), 'rt') as fh:
return fh.read()
except FileNotFoundError:
raise FileNotFoundError('"%s"' % value)
def __render_template(self, content):
from tempita import Template
template = Template(content)
result = template.substitute(**{k: self.ctx[k] for k in self.ctx})
return result
def build_options(self):
ret = {}
for k, v in self.__profile['build']['options'].iteritems():
if v:
ret[k] = self.__extract(v)
return ret
def run_options(self):
args = []
for key in sorted(self.__profile['run']['options'].keys()):
value = self.__profile['run']['options'][key]
args.extend(value.split(' ', 1))
return args
def __extract(self, value):
if value and value.startswith('SHELL:'):
import subprocess
value = subprocess.check_output(value[6:], shell=True).decode('utf-8').rstrip()
return value
def vpn(self):
return self.__profile['vpn']
def subnets(self):
return [IPv4Subnet(k)
for k, v in self.__profile['subnets'].items()
if v is True]
def domains(self):
return [k
for k, v in self.__profile['domains'].items()
if v is True]
@property
def ctx(self):
if not self.__ctx:
import pwd
import grp
class dotdict(dict):
__getattr__ = dict.get
__setattr__ = dict.__setitem__
gid = os.getgid()
uid = os.getuid()
user = dotdict()
user.gid = gid
user.group = grp.getgrgid(gid).gr_name
user.uid = uid
user.name = pwd.getpwuid(uid).pw_name
local = dotdict()
local.user = user
vpn = dotdict()
vpn.addr = self.vpn()
option = dotdict()
for k, v in self.build_options().items():
option[k] = v
vpnp = dotdict()
from textwrap import dedent
vpnp.hooks = dedent('''
ADD vpnp/ /vpnp/
RUN sudo chmod +x /vpnp/*
''')
ctx = dotdict()
ctx.local = local
ctx.vpn = vpn
ctx.vpnp = vpnp
ctx.option = option
self.__ctx = ctx
return self.__ctx
@classmethod
def __default_settings_root(cls):
return os.path.expanduser('~/.config/vpn-porthole')
@classmethod
def __ensure_config_setup(cls):
root = cls.__default_settings_root()
if not os.path.exists(root):
os.makedirs(root)
settings_file = os.path.join(root, 'settings.conf')
if not os.path.exists(settings_file):
with open(settings_file, 'w+b') as fh:
content = resource_stream("vpnporthole", "resources/settings.conf").read()
fh.write(content)
print("* Wrote: %s" % settings_file)
root = os.path.join(root, 'profiles')
if not os.path.exists(root):
os.makedirs(root)
profile_file = os.path.join(root, 'example.conf')
if not os.path.exists(profile_file):
with open(profile_file, 'w+b') as fh:
content = resource_stream("vpnporthole", "resources/example.conf").read()
fh.write(content)
print("* Wrote: %s" % profile_file)
@classmethod
def __get_settings(cls):
config_root = cls.__default_settings_root()
settings_file = os.path.join(config_root, 'settings.conf')
settings_spec_lines = resource_stream("vpnporthole", "resources/settings.spec").readlines()
settings = cls.__load_configobj(settings_file, settings_spec_lines)
if not settings:
exit(3)
return settings
@classmethod
def __get_profile(cls, name):
config_root = cls.__default_settings_root()
if name in ('all',):
sys.stderr.write('! Invalid profile name "%s"\n' % name)
exit(1)
session_file = os.path.join(config_root, 'profiles', '%s.conf' % name)
session_spec_lines = resource_stream("vpnporthole", "resources/profile.spec").readlines()
profile = cls.__load_configobj(session_file, session_spec_lines)
return profile
@classmethod
def list_profile_names(cls):
names = []
config_root = cls.__default_settings_root()
sessions_glob = os.path.join(config_root, 'profiles', '*.conf')
from glob import glob
for session_file in glob(sessions_glob):
name = os.path.splitext(os.path.basename(session_file))[0]
names.append(name)
return names
@classmethod
def __load_configobj(cls, config_file, spec_lines):
try:
confobj = ConfigObj(config_file, configspec=spec_lines, raise_errors=True,
interpolation=False)
except DuplicateError as e:
sys.stderr.write('! Bad config file "%s": %s\n' % (config_file, e))
return None
except Exception as e:
sys.stderr.write('! Bad config file "%s": %s\n' % (config_file, e))
return None
bad_values = []
bad_keys = []
result = confobj.validate(Validator())
if result is False:
sys.stderr.write('! Unable to validate config file "%s":\n' % config_file)
return None
if result is not True:
def walk(node, dir):
for key, item in node.items():
path = dir + [key]
if isinstance(item, dict):
walk(item, path)
else:
if item is False:
bad_values.append(path)
walk(result, [])
extra = get_extra_values(confobj)
if extra:
for path, key in extra:
bad_keys.append(list(path) + [key])
if bad_keys:
sys.stderr.write('! Unknown keys in config file "%s":\n' % config_file)
for key in bad_keys:
sys.stderr.write(' - /%s\n' % '/'.join(key))
if bad_values:
sys.stderr.write('! Bad values in settings file "%s":\n' % config_file)
for key in bad_values:
value = confobj
try:
for k in key:
value = value[k]
except KeyError:
value = '<missing>'
sys.stderr.write(' - /%s = %s\n' % ('/'.join(key), value))
if bad_keys or bad_values:
return None
return confobj
| 31.80602 | 99 | 0.542376 | 1,045 | 9,510 | 4.716746 | 0.181818 | 0.035707 | 0.022723 | 0.012173 | 0.284033 | 0.235342 | 0.12538 | 0.117671 | 0.08521 | 0.042605 | 0 | 0.001782 | 0.350894 | 9,510 | 298 | 100 | 31.912752 | 0.796695 | 0 | 0 | 0.247967 | 0 | 0 | 0.080231 | 0.011777 | 0 | 0 | 0 | 0 | 0 | 1 | 0.097561 | false | 0.04065 | 0.060976 | 0.020325 | 0.292683 | 0.00813 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
50686c4e7f8c777a527d535c311f3a906fafb7c6 | 12,148 | py | Python | app/main/views.py | gems-uff/sms | 01cfa84bd467617c58f58da04711c5097dd93fe6 | [
"MIT"
] | null | null | null | app/main/views.py | gems-uff/sms | 01cfa84bd467617c58f58da04711c5097dd93fe6 | [
"MIT"
] | null | null | null | app/main/views.py | gems-uff/sms | 01cfa84bd467617c58f58da04711c5097dd93fe6 | [
"MIT"
] | null | null | null | # TODO: export logger from logger.py
import jsonpickle
from flask import (Blueprint, render_template, redirect,
url_for, current_app, session, request, flash)
import sqlalchemy
from flask_login import current_user
from app.extensions import db
from app.logger import logger
from app.auth.decorators import restrict_to_logged_users, permission_required
from app.auth.models import Permission, User
from .models import (
Order, OrderItem, Transaction, Stock, StockProduct, Product, Specification)
from . import forms
from . import services as svc
from . import utils
blueprint = Blueprint('main', __name__)
blueprint.before_request(restrict_to_logged_users)
@blueprint.route('/', methods=['GET'])
@permission_required(Permission.VIEW)
def index():
return redirect(url_for('.show_stock'))
@blueprint.route('/stock', methods=['GET'])
@permission_required(Permission.VIEW)
def show_stock():
template = 'main/index.html'
products = svc.get_products_in_stock(svc.get_stock())
stock = svc.get_stock()
stock_products = sorted(
[sp for sp in stock.stock_products if sp.amount > 0],
key=lambda sp: sp.product.name,
)
return render_template(template,
products=products,
stock_products=stock_products)
@blueprint.route('/catalog', methods=['GET'])
@permission_required(Permission.VIEW)
def show_catalog():
template = 'main/list-products.html'
view = 'main.show_catalog'
products = Product.query.order_by(Product.name).all()
return render_template(template,
products=products)
@blueprint.route('/transactions', methods=['GET'])
@permission_required(Permission.VIEW)
def list_transactions():
template = 'main/list-transactions.html'
view = 'main.list_transactions'
transactions = Transaction.query.order_by(
Transaction.updated_on.desc()).all()
return render_template(template,
transactions=transactions)
@blueprint.route('/orders', methods=['GET'])
@permission_required(Permission.VIEW)
def list_orders():
template = 'main/list-orders.html'
view = 'main.list_orders'
orders = Order.query.order_by(Order.order_date.desc()).all()
return render_template(template,
orders=orders)
# TODO: Implement this method:
@blueprint.route('/transactions/<int:transaction_id>/delete', methods=['GET'])
@permission_required(Permission.DELETE)
def delete_transaction(transaction_id):
flash('Essa funcionalidade ainda não foi implementada.', 'warning')
return redirect(url_for('.list_transactions'))
@blueprint.route('/orders/add', methods=['GET', 'POST'])
@permission_required(Permission.EDIT)
def purchase_product():
logger.info('purchase_product()')
specifications = svc.get_specifications()
form_context = {
'specs': specifications,
}
form = forms.OrderItemForm(**form_context)
order_items = svc.get_order_items_from_session()
for order_item in order_items:
order_item.item = Specification.query.get(order_item.item_id)
if request.method == 'POST':
if form.cancel.data is True:
svc.clear_order_items_session()
return redirect(url_for('.purchase_product'))
if form.finish_order.data is True:
if order_items:
return redirect(url_for('.checkout'))
flash('Pelo menos 1 reativo deve ser adicionado ao carrinho.',
'danger')
return redirect(url_for('.purchase_product'))
if form.validate():
order_item = OrderItem()
form.populate_obj(order_item)
svc.add_order_item_to_session(order_item)
flash('Reativo adicionado ao carrinho', 'success')
return redirect(url_for('.purchase_product'))
return render_template('main/create-order.html',
form=form, order_items=order_items)
@blueprint.route('/orders/checkout', methods=['GET', 'POST'])
@permission_required(Permission.EDIT)
def checkout():
form = forms.OrderForm()
stock = svc.get_stock()
if session.get('order_items') is None:
session['order_items'] = []
order_items = [jsonpickle.decode(item)
for item in session.get('order_items')]
for order_item in order_items:
order_item.item = Specification.query.get(order_item.item_id)
logger.info('Retrieve unpickled order_items from session')
if request.method == 'POST':
logger.info('POSTing to checkout')
if form.cancel.data is True:
logger.info('Cancel order, cleaning session')
session['order_items'] = []
return redirect(url_for('.purchase_product'))
if order_items:
if form.validate():
logger.info('starting check out...')
order = Order()
logger.info(
'populating order with form data and order_items')
form.populate_obj(order)
order.order_items = order_items
order.user = current_user
db.session.add(order)
try:
logger.info('Saving order to database...')
for order_item in order.order_items:
logger.info(
'Adding %s to stock' % order_item)
product = order_item.item.product
lot_number = order_item.lot_number
total_units = order_item.amount * order_item.item.units
expiration_date = order_item.expiration_date
logger.info('stock.add({}, {}, {}, {})'.format(
product, lot_number, expiration_date, total_units))
stock.add(
product,
lot_number,
expiration_date,
total_units)
order_item.added_to_stock = True
db.session.add(order_item)
logger.info('Comitting session...')
db.session.commit()
logger.info(
'Creating transactions from order...')
svc.create_add_transactions_from_order(order, stock)
logger.info(
'Flashing success and returning to index')
flash('Ordem executada com sucesso', 'success')
session['order_items'] = []
return redirect(url_for('.index'))
except (ValueError) as err:
db.session.rollback()
session['order_items'] = []
logger.error('Could not save the order to db. Rollback.')
logger.error(err)
flash('Algo deu errado, contate um administrador!')
return render_template('main/index.html')
else:
logger.info('No item added to cart')
flash('É necessário adicionar pelo menos 1 item ao carrinho.',
'warning')
return redirect(url_for('.purchase_product'))
return render_template('main/checkout.html',
form=form,
order_items=order_items,)
@blueprint.route('/products/consume', methods=['GET', 'POST'])
@permission_required(Permission.EDIT)
def consume_product():
logger.info('consume_product()')
stock = svc.get_stock()
stock_products = sorted(
[sp for sp in stock.stock_products if sp.amount > 0],
key=lambda sp: sp.product.name,
)
for stock_product in stock_products:
stock_product.manufacturer = svc.get_manufacturer_by_lot_number(stock_product.lot_number)
form_context = {
'stock_products': stock_products,
}
form = forms.ConsumeProductForm(**form_context)
if form.validate_on_submit():
logger.info('POSTing a valid form to consume_product')
logger.info('Creating a new SUB Transaction')
try:
selected_stock_product = StockProduct.query.get(
form.stock_product_id.data)
logger.info(
'Retrieving info from selected_stock_product')
product = selected_stock_product.product
lot_number = selected_stock_product.lot_number
amount = form.amount.data
stock.subtract(product, lot_number, amount)
logger.info('Commiting subtraction')
consumer_user = User.query.filter_by(id=form.consumer_id.data).first()
db.session.commit()
logger.info('Creating sub-transaction')
svc.create_sub_transaction(
consumer_user,
product,
lot_number,
amount,
stock
)
flash('{} unidades de {} removidas do estoque com sucesso!'.format(
form.amount.data, selected_stock_product.product.name),
'success',
)
return redirect(url_for('.consume_product'))
except ValueError as err:
logger.error(err)
form.amount.errors.append(
'Não há o suficiente desse reativo em estoque.')
except Exception:
flash('Erro inesperado, contate o administrador.', 'danger')
return render_template('main/consume-product.html', form=form)
@blueprint.route('/products/add', methods=['GET', 'POST'])
@permission_required(Permission.EDIT)
def add_product_to_catalog():
form = forms.AddProductForm()
if form.validate_on_submit():
product = svc.get_product_by_name(form.name.data)
if product:
flash('Já existe um reativo com esse nome no catálogo.\
Segue abaixo suas especificações', 'warning')
else:
product = svc.create_product(form.name.data)
flash(f'{product.name} adicionado ao catálogo com sucesso',
'success')
return redirect(url_for('.detail_product',
product_id=product.id,
specifications=product.specifications))
return render_template('main/create-product.html', form=form)
@blueprint.route('/products/<int:product_id>/specifications',
methods=['GET', 'POST'])
@permission_required(Permission.EDIT)
def add_specification_to_product(product_id):
product = Product.query.get_or_404(product_id)
form = forms.AddSpecificationForm(product.id)
if form.validate_on_submit():
try:
specification = Specification(
catalog_number=form.catalog_number.data,
manufacturer=form.manufacturer.data,
units=form.units.data,
product_id=product_id,
)
db.session.add(specification)
db.session.commit()
flash('Especificação adicionada com sucesso.', 'success')
return redirect(url_for('.detail_product', product_id=product.id))
except sqlalchemy.exc.IntegrityError:
db.session.rollback()
flash('Já existe uma especificação com esse catálogo e fabricante',
'danger')
return render_template('main/create-specification.html',
form=form, product=product)
@blueprint.route('/products/<int:product_id>', methods=['GET'])
@permission_required(Permission.EDIT)
def detail_product(product_id):
product = Product.query.get_or_404(product_id)
specifications = sorted(
[spec for spec in product.specifications],
key=lambda spec: spec.units,
)
return render_template('main/details-product.html',
product=product,
specifications=specifications)
@blueprint.route('/export/<string:table>')
@permission_required(Permission.VIEW)
def export(table):
response = utils.export_table(table, table + '.csv')
return response
| 39.061093 | 97 | 0.611459 | 1,297 | 12,148 | 5.541249 | 0.1835 | 0.032002 | 0.025324 | 0.033394 | 0.329066 | 0.273689 | 0.225824 | 0.184361 | 0.132044 | 0.09712 | 0 | 0.001154 | 0.286385 | 12,148 | 310 | 98 | 39.187097 | 0.827892 | 0.005186 | 0 | 0.280443 | 0 | 0 | 0.16835 | 0.030707 | 0 | 0 | 0 | 0.003226 | 0 | 1 | 0.04797 | false | 0 | 0.04428 | 0.00369 | 0.180812 | 0.059041 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
506b8da144828f148ebe5ffa500b5da9e7d8f6ee | 12,835 | py | Python | rs/vip_paypal_payments.py | alexander-marquardt/lexalink | d554f3a00699c8a4cdf1b28dd033655f929470fa | [
"Apache-2.0",
"BSD-3-Clause"
] | 1 | 2017-02-09T07:12:25.000Z | 2017-02-09T07:12:25.000Z | rs/vip_paypal_payments.py | alexander-marquardt/lexalink | d554f3a00699c8a4cdf1b28dd033655f929470fa | [
"Apache-2.0",
"BSD-3-Clause"
] | null | null | null | rs/vip_paypal_payments.py | alexander-marquardt/lexalink | d554f3a00699c8a4cdf1b28dd033655f929470fa | [
"Apache-2.0",
"BSD-3-Clause"
] | null | null | null | # -*- coding: utf-8 -*-
################################################################################
# LexaLink Copyright information - do not remove this copyright notice
# Copyright (C) 2012
#
# Lexalink - a free social network and dating website platform for the Google App Engine.
#
# Original author: Alexander Marquardt
# Documentation and additional information: http://www.LexaLink.com
# Git source code repository: https://github.com/alexander-marquardt/lexalink
#
# Please consider contributing your enhancements and modifications to the LexaLink community,
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
################################################################################
import datetime
import logging
import settings
import urllib, urllib2
import re
from django.http import HttpResponse
import site_configuration
from rs import email_utils
from rs import error_reporting
from rs import vip_payments_common
from rs import utils, utils_top_level
from rs import vip_status_support
from rs.localization_files import currency_by_country
# keep track of which currencies we currently support.
vip_paypal_valid_currencies = ['EUR', 'USD', 'MXN', 'USD_NON_US']
VIP_DEFAULT_CURRENCY = 'USD_NON_US' # International US dollars "$US" instead of just "$"
if settings.TESTING_PAYPAL_SANDBOX:
PP_URL = "https://www.sandbox.paypal.com/cgi-bin/webscr"
else:
PP_URL = "https://www.paypal.com/cgi-bin/webscr"
custom_info_pattern = re.compile(r'site:(.*); username:(.*); nid:(.*); flag:(.*);')
# generate the dictionary that will allow us to do a reverse lookup when we receive a payment amount
# to the corresponding membership category
vip_standard_prices_with_currency_units = vip_payments_common.generate_prices_with_currency_units(
vip_payments_common.vip_standard_membership_prices, vip_paypal_valid_currencies)
vip_discounted_prices_with_currency_units = vip_payments_common.generate_prices_with_currency_units(
vip_payments_common.vip_discounted_membership_prices, vip_paypal_valid_currencies)
vip_paypal_discounted_prices_percentage_savings = vip_payments_common.compute_savings_percentage_discount(
vip_payments_common.vip_discounted_membership_prices, vip_payments_common.vip_standard_membership_prices, vip_paypal_valid_currencies)
def generate_paypal_radio_options(currency, prices_with_currency_units, original_prices_with_currency_units = []):
# for efficiency don't call this from outside this module, instead perform a lookup in
# paypal_radio_options
generated_html = u''
for member_category in vip_payments_common.vip_membership_categories:
duration = u"%s" % vip_payments_common.vip_option_values[member_category]['duration']
duration_units = u"%s" % vip_payments_common.vip_option_values[member_category]['duration_units']
if member_category == vip_payments_common.DEFAULT_SELECTED_VIP_OPTION:
selected = "checked"
else:
selected = ''
has_discount = True if original_prices_with_currency_units else False
savings_html = vip_payments_common.get_html_showing_savings(currency, member_category, vip_paypal_discounted_prices_percentage_savings, original_prices_with_currency_units)
duration_html = vip_payments_common.get_html_showing_duration(member_category, has_discount)
generated_html += u"""<input type="radio" name="os0" value="%(duration)s %(duration_units)s" %(selected)s>
<strong>%(duration_html)s</strong>: %(total_price)s %(savings_html)s<br>\n""" % {
'duration': duration, 'duration_units' : duration_units,
'duration_html': duration_html,
'selected' : selected,
'savings_html': savings_html,
'total_price' : prices_with_currency_units[currency][member_category]}
return generated_html
def generate_paypal_options_hidden_fields(currency, vip_membership_prices):
# Paypal has a pretty obfuscated manner of passing values to their checkout page.
# First, an option_select[0-9] must be linked to a "value" that the user has selected
# Then, the option_select[0-9] is intrinsically linked to an option_amount[0-9] (price), which allows the
# selected value to pass a price to the paypal checkout page.
# Eg. In order to process a payment for 1 week (in spanish "1 semana"), we would have the following entries
# <input type="radio" name="os0" value="1 semana">1 semana : $5.95 <-- defines the value "1 semana", and shows appropriate text to the user
# <input type="hidden" name="option_select0" value="1 semana"> <-- link from "1 semana" to selector 0
# <input type="hidden" name="option_amount0" value="5.95"> <-- link from selector 0 to the price of 5.95
generated_html = ''
counter = 0
for member_category in vip_payments_common.vip_membership_categories:
generated_html += u'<input type="hidden" name="option_select%d" value="%s %s">' % (
counter, vip_payments_common.vip_option_values[member_category]['duration'], vip_payments_common.vip_option_values[member_category]['duration_units'])
generated_html += u'<input type="hidden" name="option_amount%d" value="%s">' % (counter, vip_membership_prices[currency][member_category])
counter += 1
return generated_html
def generate_paypal_data(request, userobject, http_country_code, user_has_discount):
paypal_data = {}
internal_currency_code = vip_payments_common.get_internal_currency_code(http_country_code, vip_paypal_valid_currencies, VIP_DEFAULT_CURRENCY)
paypal_data['currency_code'] = vip_payments_common.real_currency_codes[internal_currency_code]
paypal_data['testing_paypal_sandbox'] = site_configuration.TESTING_PAYPAL_SANDBOX
if not site_configuration.TESTING_PAYPAL_SANDBOX:
paypal_data['paypal_account'] = site_configuration.PAYPAL_ACCOUNT
else:
paypal_data['paypal_account'] = site_configuration.PAYPAL_SANDBOX_ACCOUNT
if user_has_discount:
paypal_data['radio_options'] = generate_paypal_radio_options(internal_currency_code, vip_discounted_prices_with_currency_units, vip_standard_prices_with_currency_units)
paypal_data['options_hidden_fields'] = generate_paypal_options_hidden_fields(internal_currency_code, vip_payments_common.vip_discounted_membership_prices)
else:
paypal_data['radio_options'] = generate_paypal_radio_options(internal_currency_code, vip_standard_prices_with_currency_units)
paypal_data['options_hidden_fields'] = generate_paypal_options_hidden_fields(internal_currency_code, vip_payments_common.vip_standard_membership_prices)
return paypal_data
def paypal_instant_payment_notification(request):
parameters = None
payment_status = None
try:
logging.info("Received payment notification from paypal")
# Note: apparently PayPal can send a Pending status while waiting for authorization, and then later a Completed
# payment_status -- but I believe that in both cases, it expects a confirmation of the message to be send
# back
payment_status = request.REQUEST.get('payment_status', None) # Completed or Pending are the most interesting .. but there are others as well
status = None
if request.POST:
parameters = request.POST.copy()
else:
parameters = request.GET.copy()
logging.info("parameters %s" % repr(parameters))
if parameters:
parameters['cmd']='_notify-validate'
# parameters['charset'] tells us the type of encoding that was used for the characters. We
# must encode the response to use the same encoding as the request.
charset = parameters['charset']
logging.info("charset = %s" % charset)
#params_decoded = dict([k, v.decode(charset)] for k, v in parameters.items())
params_urlencoded = urllib.urlencode(dict([k, v.encode('utf-8')] for k, v in parameters.items()))
#params_urlencoded = urllib.urlencode(parameters)
req = urllib2.Request(PP_URL, params_urlencoded)
req.add_header("Content-type", "application/x-www-form-urlencoded")
logging.info("request response: %s" % repr(req))
response = urllib2.urlopen(req)
status = response.read()
if not status == "VERIFIED":
logging.error("The request could not be verified, check for fraud. Status:" + str(status))
parameters = None
else:
logging.info("Payment status: %s" % status)
if status == "VERIFIED":
custom = parameters['custom']
match_custom = custom_info_pattern.match(custom)
if match_custom:
nid = match_custom.group(3)
user_has_discount_flag = match_custom.group(4)
else:
raise Exception("Paypal custom value does not match expected format: %s" % custom)
# This is not really a secure way of checking if the user has a discount - someone could fake a discount
# if the really wanted to - we could prevent this by adding a security hash to the payment if desired in
# the future.
user_has_discount = False
if user_has_discount_flag == vip_payments_common.USER_HAS_DISCOUNT_STRING:
user_has_discount = True
#logging.info("Paypal parameters: %s" % parameters)
donation_type = parameters['item_number']
txn_id = "paypal-" + parameters['txn_id']
currency = parameters['mc_currency']
amount_paid = parameters['mc_gross']
payer_email = parameters['payer_email']
last_name = parameters['last_name']
# os0 is represented as option_selection1
# We are not presently using this varible, but can use this in the future instead of looking up the membership
# category based on the price.
option_selected = parameters['option_selection1'] # this is language specific (ie. "1 year" in english "1 año" in spanish)
uid = utils.get_uid_from_nid(nid)
userobject = utils_top_level.get_object_from_string(uid)
if currency in vip_paypal_valid_currencies:
if user_has_discount:
membership_category = vip_payments_common.vip_discounted_price_to_membership_category_lookup[currency][amount_paid]
else:
membership_category = vip_payments_common.vip_standard_price_to_membership_category_lookup[currency][amount_paid]
num_days_awarded = vip_payments_common.num_days_in_vip_membership_category[membership_category]
else:
raise Exception("Paypal currency %s not handled by code" % currency)
if vip_status_support.check_payment_and_update_structures(userobject, currency, amount_paid, num_days_awarded, txn_id, "Paypal", payer_email, last_name):
# only process the payment if this is the first time we have seen this txn_id.
vip_status_support.update_userobject_vip_status("paypal", userobject, num_days_awarded, payer_email,
amount_paid, currency, txn_id, custom)
return HttpResponse("OK")
else:
raise Exception("Paypal transaction status is %s" % (status))
except:
# This is serious enough, that it warrants sending an email to the administrator. We don't include any extra
# information such as username, or email address, since these values might not be available, and could cause the
# message to trigger an exception
try:
message_content = """Paypal error - User not awarded VIP status - check paypal to see who has sent funds and
check if status is correctly set"""
email_utils.send_admin_alert_email(message_content, subject = "%s Paypal Error" % settings.APP_NAME)
finally:
error_reporting.log_exception(logging.critical, request=request)
# Return "OK" even though we had a server error - this will stop paypal from re-sending notifications of the
# payment.
return HttpResponse("OK") | 50.531496 | 180 | 0.707207 | 1,642 | 12,835 | 5.261876 | 0.261876 | 0.031829 | 0.04919 | 0.032407 | 0.28669 | 0.239699 | 0.196296 | 0.163657 | 0.134606 | 0.128356 | 0 | 0.004897 | 0.204519 | 12,835 | 254 | 181 | 50.531496 | 0.841332 | 0.285625 | 0 | 0.157143 | 0 | 0.014286 | 0.14771 | 0.022123 | 0 | 0 | 0 | 0 | 0 | 1 | 0.028571 | false | 0 | 0.092857 | 0 | 0.157143 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
506cba2bc15b37fb1e0ab9d02e930d786a6a026d | 13,847 | py | Python | torchmps/utils.py | jemisjoky/TorchMPS | c4b092bcef27dd9e96a69e2e99dc3fb1da2daee4 | [
"MIT"
] | 103 | 2019-03-25T15:49:13.000Z | 2022-02-24T02:50:05.000Z | torchmps/utils.py | jemisjoky/TorchMPS | c4b092bcef27dd9e96a69e2e99dc3fb1da2daee4 | [
"MIT"
] | 10 | 2019-03-26T14:45:08.000Z | 2021-08-12T06:53:12.000Z | torchmps/utils.py | jemisjoky/TorchMPS | c4b092bcef27dd9e96a69e2e99dc3fb1da2daee4 | [
"MIT"
] | 25 | 2019-04-04T15:57:12.000Z | 2022-03-28T21:11:11.000Z | import numpy as np
import torch
def svd_flex(tensor, svd_string, max_D=None, cutoff=1e-10, sv_right=True, sv_vec=None):
"""
Split an input tensor into two pieces using a SVD across some partition
Args:
tensor (Tensor): Pytorch tensor with at least two indices
svd_string (str): String of the form 'init_str->left_str,right_str',
where init_str describes the indices of tensor, and
left_str/right_str describe those of the left and
right output tensors. The characters of left_str
and right_str form a partition of the characters in
init_str, but each contain one additional character
representing the new bond which comes from the SVD
Reversing the terms in svd_string to the left and
right of '->' gives an ein_string which can be used
to multiply both output tensors to give a (low rank
approximation) of the input tensor
cutoff (float): A truncation threshold which eliminates any
singular values which are strictly less than cutoff
max_D (int): A maximum allowed value for the new bond. If max_D
is specified, the returned tensors
sv_right (bool): The SVD gives two orthogonal matrices and a matrix
of singular values. sv_right=True merges the SV
matrix with the right output, while sv_right=False
merges it with the left output
sv_vec (Tensor): Pytorch vector with length max_D, which is modified
in place to return the vector of singular values
Returns:
left_tensor (Tensor),
right_tensor (Tensor): Tensors whose indices are described by the
left_str and right_str parts of svd_string
bond_dim: The dimension of the new bond appearing from
the cutoff in our SVD. Note that this generally
won't match the dimension of left_/right_tensor
at this mode, which is padded with zeros
whenever max_D is specified
"""
def prod(int_list):
output = 1
for num in int_list:
output *= num
return output
with torch.no_grad():
# Parse svd_string into init_str, left_str, and right_str
svd_string = svd_string.replace(" ", "")
init_str, post_str = svd_string.split("->")
left_str, right_str = post_str.split(",")
# Check formatting of init_str, left_str, and right_str
assert all([c.islower() for c in init_str + left_str + right_str])
assert len(set(init_str + left_str + right_str)) == len(init_str) + 1
assert len(set(init_str)) + len(set(left_str)) + len(set(right_str)) == len(
init_str
) + len(left_str) + len(right_str)
# Get the special character representing our SVD-truncated bond
bond_char = set(left_str).intersection(set(right_str)).pop()
left_part = left_str.replace(bond_char, "")
right_part = right_str.replace(bond_char, "")
# Permute our tensor into something that can be viewed as a matrix
ein_str = f"{init_str}->{left_part+right_part}"
tensor = torch.einsum(ein_str, [tensor]).contiguous()
left_shape = list(tensor.shape[: len(left_part)])
right_shape = list(tensor.shape[len(left_part) :])
left_dim, right_dim = prod(left_shape), prod(right_shape)
tensor = tensor.view([left_dim, right_dim])
# Get SVD and format so that left_mat * diag(svs) * right_mat = tensor
left_mat, svs, right_mat = torch.svd(tensor)
svs, _ = torch.sort(svs, descending=True)
right_mat = torch.t(right_mat)
# Decrease or increase our tensor sizes in the presence of max_D
if max_D and len(svs) > max_D:
svs = svs[:max_D]
left_mat = left_mat[:, :max_D]
right_mat = right_mat[:max_D]
elif max_D and len(svs) < max_D:
copy_svs = torch.zeros([max_D])
copy_svs[: len(svs)] = svs
copy_left = torch.zeros([left_mat.size(0), max_D])
copy_left[:, : left_mat.size(1)] = left_mat
copy_right = torch.zeros([max_D, right_mat.size(1)])
copy_right[: right_mat.size(0)] = right_mat
svs, left_mat, right_mat = copy_svs, copy_left, copy_right
# If given as input, copy singular values into sv_vec
if sv_vec is not None and svs.shape == sv_vec.shape:
sv_vec[:] = svs
elif sv_vec is not None and svs.shape != sv_vec.shape:
raise TypeError(
f"sv_vec.shape must be {list(svs.shape)}, but is "
f"currently {list(sv_vec.shape)}"
)
# Find the truncation point relative to our singular value cutoff
truncation = 0
for s in svs:
if s < cutoff:
break
truncation += 1
if truncation == 0:
raise RuntimeError(
"SVD cutoff too large, attempted to truncate "
"tensor to bond dimension 0"
)
# Perform the actual truncation
if max_D:
svs[truncation:] = 0
left_mat[:, truncation:] = 0
right_mat[truncation:] = 0
else:
# If max_D wasn't given, set it to the truncation index
max_D = truncation
svs = svs[:truncation]
left_mat = left_mat[:, :truncation]
right_mat = right_mat[:truncation]
# Merge the singular values into the appropriate matrix
if sv_right:
right_mat = torch.einsum("l,lr->lr", [svs, right_mat])
else:
left_mat = torch.einsum("lr,r->lr", [left_mat, svs])
# Reshape the matrices to make them proper tensors
left_tensor = left_mat.view(left_shape + [max_D])
right_tensor = right_mat.view([max_D] + right_shape)
# Finally, permute the indices into the desired order
if left_str != left_part + bond_char:
left_tensor = torch.einsum(
f"{left_part+bond_char}->{left_str}", [left_tensor]
)
if right_str != bond_char + right_part:
right_tensor = torch.einsum(
f"{bond_char+right_part}->{right_str}", [right_tensor]
)
return left_tensor, right_tensor, truncation
def init_tensor(shape, bond_str, init_method):
"""
Initialize a tensor with a given shape
Args:
shape: The shape of our output parameter tensor.
bond_str: The bond string describing our output parameter tensor,
which is used in 'random_eye' initialization method.
The characters 'l' and 'r' are used to refer to the
left or right virtual indices of our tensor, and are
both required to be present for the random_eye and
min_random_eye initialization methods.
init_method: The method used to initialize the entries of our tensor.
This can be either a string, or else a tuple whose first
entry is an initialization method and whose remaining
entries are specific to that method. In each case, std
will always refer to a standard deviation for a random
normal random component of each entry of the tensor.
Allowed options are:
* ('random_eye', std): Initialize each tensor input
slice close to the identity
* ('random_zero', std): Initialize each tensor input
slice close to the zero matrix
* ('min_random_eye', std, init_dim): Initialize each
tensor input slice close to a truncated identity
matrix, whose truncation leaves init_dim unit
entries on the diagonal. If init_dim is larger
than either of the bond dimensions, then init_dim
is capped at the smaller bond dimension.
"""
# Unpack init_method if it is a tuple
if not isinstance(init_method, str):
init_str = init_method[0]
std = init_method[1]
if init_str == "min_random_eye":
init_dim = init_method[2]
init_method = init_str
else:
std = 1e-9
# Check that bond_str is properly sized and doesn't have repeat indices
assert len(shape) == len(bond_str)
assert len(set(bond_str)) == len(bond_str)
if init_method not in ["random_eye", "min_random_eye", "random_zero"]:
raise ValueError(f"Unknown initialization method: {init_method}")
if init_method in ["random_eye", "min_random_eye"]:
bond_chars = ["l", "r"]
assert all([c in bond_str for c in bond_chars])
# Initialize our tensor slices as identity matrices which each fill
# some or all of the initially allocated bond space
if init_method == "min_random_eye":
# The dimensions for our initial identity matrix. These will each
# be init_dim, unless init_dim exceeds one of the bond dimensions
bond_dims = [shape[bond_str.index(c)] for c in bond_chars]
if all([init_dim <= full_dim for full_dim in bond_dims]):
bond_dims = [init_dim, init_dim]
else:
init_dim = min(bond_dims)
eye_shape = [init_dim if c in bond_chars else 1 for c in bond_str]
expand_shape = [
init_dim if c in bond_chars else shape[i]
for i, c in enumerate(bond_str)
]
elif init_method == "random_eye":
eye_shape = [
shape[i] if c in bond_chars else 1 for i, c in enumerate(bond_str)
]
expand_shape = shape
bond_dims = [shape[bond_str.index(c)] for c in bond_chars]
eye_tensor = torch.eye(bond_dims[0], bond_dims[1]).view(eye_shape)
eye_tensor = eye_tensor.expand(expand_shape)
tensor = torch.zeros(shape)
tensor[[slice(dim) for dim in expand_shape]] = eye_tensor
# Add on a bit of random noise
tensor += std * torch.randn(shape)
elif init_method == "random_zero":
tensor = std * torch.randn(shape)
return tensor
### OLDER MISCELLANEOUS FUNCTIONS ### # noqa: E266
def onehot(labels, max_value):
"""
Convert a batch of labels from the set {0, 1,..., num_value-1} into their
onehot encoded counterparts
"""
label_vecs = torch.zeros([len(labels), max_value])
for i, label in enumerate(labels):
label_vecs[i, label] = 1.0
return label_vecs
def joint_shuffle(input_data, input_labels):
"""
Shuffle input data and labels in a joint manner, so each label points to
its corresponding datum. Works for both regular and CUDA tensors
"""
assert input_data.is_cuda == input_labels.is_cuda
use_gpu = input_data.is_cuda
if use_gpu:
input_data, input_labels = input_data.cpu(), input_labels.cpu()
data, labels = input_data.numpy(), input_labels.numpy()
# Shuffle relative to the same seed
np.random.seed(0)
np.random.shuffle(data)
np.random.seed(0)
np.random.shuffle(labels)
data, labels = torch.from_numpy(data), torch.from_numpy(labels)
if use_gpu:
data, labels = data.cuda(), labels.cuda()
return data, labels
def load_HV_data(length):
"""
Output a toy "horizontal/vertical" data set of black and white
images with size length x length. Each image contains a single
horizontal or vertical stripe, set against a background
of the opposite color. The labels associated with these images
are either 0 (horizontal stripe) or 1 (vertical stripe).
In its current version, this returns two data sets, a training
set with 75% of the images and a test set with 25% of the
images.
"""
num_images = 4 * (2 ** (length - 1) - 1)
num_patterns = num_images // 2
split = num_images // 4
if length > 14:
print(
"load_HV_data will generate {} images, "
"this could take a while...".format(num_images)
)
images = np.empty([num_images, length, length], dtype=np.float32)
labels = np.empty(num_images, dtype=np.int)
# Used to generate the stripe pattern from integer i below
template = "{:0" + str(length) + "b}"
for i in range(1, num_patterns + 1):
pattern = template.format(i)
pattern = [int(s) for s in pattern]
for j, val in enumerate(pattern):
# Horizontal stripe pattern
images[2 * i - 2, j, :] = val
# Vertical stripe pattern
images[2 * i - 1, :, j] = val
labels[2 * i - 2] = 0
labels[2 * i - 1] = 1
# Shuffle and partition into training and test sets
np.random.seed(0)
np.random.shuffle(images)
np.random.seed(0)
np.random.shuffle(labels)
train_images, train_labels = images[split:], labels[split:]
test_images, test_labels = images[:split], labels[:split]
return (
torch.from_numpy(train_images),
torch.from_numpy(train_labels),
torch.from_numpy(test_images),
torch.from_numpy(test_labels),
)
| 39.338068 | 87 | 0.591103 | 1,860 | 13,847 | 4.230645 | 0.192473 | 0.010675 | 0.007117 | 0.00915 | 0.146651 | 0.10624 | 0.085653 | 0.049816 | 0.03787 | 0.019316 | 0 | 0.007134 | 0.331913 | 13,847 | 351 | 88 | 39.450142 | 0.843476 | 0.423702 | 0 | 0.08046 | 0 | 0 | 0.06466 | 0.013405 | 0 | 0 | 0 | 0 | 0.04023 | 1 | 0.034483 | false | 0 | 0.011494 | 0 | 0.08046 | 0.005747 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
506d811711900dbce03597f5fc85d2cd7f4190b0 | 5,250 | py | Python | tools/deep_memory_profiler/subcommands/pprof.py | nagineni/chromium-crosswalk | 5725642f1c67d0f97e8613ec1c3e8107ab53fdf8 | [
"BSD-3-Clause-No-Nuclear-License-2014",
"BSD-3-Clause"
] | 231 | 2015-01-08T09:04:44.000Z | 2021-12-30T03:03:10.000Z | tools/deep_memory_profiler/subcommands/pprof.py | j4ckfrost/android_external_chromium_org | a1a3dad8b08d1fcf6b6b36c267158ed63217c780 | [
"BSD-3-Clause-No-Nuclear-License-2014",
"BSD-3-Clause"
] | 1 | 2017-02-14T21:55:58.000Z | 2017-02-14T21:55:58.000Z | tools/deep_memory_profiler/subcommands/pprof.py | j4ckfrost/android_external_chromium_org | a1a3dad8b08d1fcf6b6b36c267158ed63217c780 | [
"BSD-3-Clause-No-Nuclear-License-2014",
"BSD-3-Clause"
] | 268 | 2015-01-21T05:53:28.000Z | 2022-03-25T22:09:01.000Z | # Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import logging
import sys
from lib.bucket import BUCKET_ID, COMMITTED, ALLOC_COUNT, FREE_COUNT
from lib.policy import PolicySet
from lib.subcommand import SubCommand
LOGGER = logging.getLogger('dmprof')
class PProfCommand(SubCommand):
def __init__(self):
super(PProfCommand, self).__init__(
'Usage: %prog pprof [-c COMPONENT] <dump> <policy>')
self._parser.add_option('-c', '--component', type='string',
dest='component',
help='restrict to COMPONENT', metavar='COMPONENT')
def do(self, sys_argv):
options, args = self._parse_args(sys_argv, 2)
dump_path = args[1]
target_policy = args[2]
component = options.component
(bucket_set, dump) = SubCommand.load_basic_files(dump_path, False)
policy_set = PolicySet.load(SubCommand._parse_policy_list(target_policy))
with open(SubCommand._find_prefix(dump_path) + '.maps', 'r') as maps_f:
maps_lines = maps_f.readlines()
PProfCommand._output(
dump, policy_set[target_policy], bucket_set, maps_lines, component,
sys.stdout)
return 0
@staticmethod
def _output(dump, policy, bucket_set, maps_lines, component_name, out):
"""Converts the heap profile dump so it can be processed by pprof.
Args:
dump: A Dump object.
policy: A Policy object.
bucket_set: A BucketSet object.
maps_lines: A list of strings containing /proc/.../maps.
component_name: A name of component for filtering.
out: An IO object to output.
"""
out.write('heap profile: ')
com_committed, com_allocs = PProfCommand._accumulate(
dump, policy, bucket_set, component_name)
out.write('%6d: %8s [%6d: %8s] @ heapprofile\n' % (
com_allocs, com_committed, com_allocs, com_committed))
PProfCommand._output_stacktrace_lines(
dump, policy, bucket_set, component_name, out)
out.write('MAPPED_LIBRARIES:\n')
for line in maps_lines:
out.write(line)
@staticmethod
def _accumulate(dump, policy, bucket_set, component_name):
"""Accumulates size of committed chunks and the number of allocated chunks.
Args:
dump: A Dump object.
policy: A Policy object.
bucket_set: A BucketSet object.
component_name: A name of component for filtering.
Returns:
Two integers which are the accumulated size of committed regions and the
number of allocated chunks, respectively.
"""
com_committed = 0
com_allocs = 0
for _, region in dump.iter_map:
if region[0] != 'hooked':
continue
component_match, bucket = policy.find_mmap(region, bucket_set)
if (component_name and component_name != component_match) or (
region[1]['committed'] == 0):
continue
com_committed += region[1]['committed']
com_allocs += 1
for line in dump.iter_stacktrace:
words = line.split()
bucket = bucket_set.get(int(words[BUCKET_ID]))
if not bucket or bucket.allocator_type == 'malloc':
component_match = policy.find_malloc(bucket)
elif bucket.allocator_type == 'mmap':
continue
else:
assert False
if (not bucket or
(component_name and component_name != component_match)):
continue
com_committed += int(words[COMMITTED])
com_allocs += int(words[ALLOC_COUNT]) - int(words[FREE_COUNT])
return com_committed, com_allocs
@staticmethod
def _output_stacktrace_lines(dump, policy, bucket_set, component_name, out):
"""Prints information of stacktrace lines for pprof.
Args:
dump: A Dump object.
policy: A Policy object.
bucket_set: A BucketSet object.
component_name: A name of component for filtering.
out: An IO object to output.
"""
for _, region in dump.iter_map:
if region[0] != 'hooked':
continue
component_match, bucket = policy.find_mmap(region, bucket_set)
if (component_name and component_name != component_match) or (
region[1]['committed'] == 0):
continue
out.write(' 1: %8s [ 1: %8s] @' % (
region[1]['committed'], region[1]['committed']))
for address in bucket.stacktrace:
out.write(' 0x%016x' % address)
out.write('\n')
for line in dump.iter_stacktrace:
words = line.split()
bucket = bucket_set.get(int(words[BUCKET_ID]))
if not bucket or bucket.allocator_type == 'malloc':
component_match = policy.find_malloc(bucket)
elif bucket.allocator_type == 'mmap':
continue
else:
assert False
if (not bucket or
(component_name and component_name != component_match)):
continue
out.write('%6d: %8s [%6d: %8s] @' % (
int(words[ALLOC_COUNT]) - int(words[FREE_COUNT]),
words[COMMITTED],
int(words[ALLOC_COUNT]) - int(words[FREE_COUNT]),
words[COMMITTED]))
for address in bucket.stacktrace:
out.write(' 0x%016x' % address)
out.write('\n')
| 32.407407 | 80 | 0.647238 | 661 | 5,250 | 4.953101 | 0.245083 | 0.063531 | 0.027489 | 0.029016 | 0.55956 | 0.55956 | 0.512828 | 0.486255 | 0.475565 | 0.475565 | 0 | 0.01016 | 0.250095 | 5,250 | 161 | 81 | 32.608696 | 0.821438 | 0.201714 | 0 | 0.49 | 0 | 0 | 0.081492 | 0 | 0 | 0 | 0 | 0 | 0.02 | 1 | 0.05 | false | 0 | 0.05 | 0 | 0.13 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
506dd5b7c9347c97881f40b89813ac755d09d068 | 9,474 | py | Python | patrole_tempest_plugin/tests/api/network/test_agents_rbac.py | mail2nsrajesh/patrole | a662f824fea8fe8347391371ab468b1561637bf3 | [
"Apache-2.0"
] | null | null | null | patrole_tempest_plugin/tests/api/network/test_agents_rbac.py | mail2nsrajesh/patrole | a662f824fea8fe8347391371ab468b1561637bf3 | [
"Apache-2.0"
] | null | null | null | patrole_tempest_plugin/tests/api/network/test_agents_rbac.py | mail2nsrajesh/patrole | a662f824fea8fe8347391371ab468b1561637bf3 | [
"Apache-2.0"
] | null | null | null | # Copyright 2017 AT&T Corporation.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from tempest.lib.common.utils import test_utils
from tempest.lib import decorators
from tempest import test
from patrole_tempest_plugin import rbac_rule_validation
from patrole_tempest_plugin.tests.api.network import rbac_base as base
class AgentsRbacTest(base.BaseNetworkRbacTest):
@classmethod
def skip_checks(cls):
super(AgentsRbacTest, cls).skip_checks()
if not test.is_extension_enabled('agent', 'network'):
msg = "agent extension not enabled."
raise cls.skipException(msg)
@classmethod
def resource_setup(cls):
super(AgentsRbacTest, cls).resource_setup()
agents = cls.agents_client.list_agents()['agents']
cls.agent = agents[0]
@decorators.idempotent_id('f88e38e0-ab52-4b97-8ffa-48a27f9d199b')
@rbac_rule_validation.action(service="neutron",
rule="get_agent",
expected_error_code=404)
def test_show_agent(self):
"""Show agent test.
RBAC test for the neutron get_agent policy
"""
self.rbac_utils.switch_role(self, toggle_rbac_role=True)
self.agents_client.show_agent(self.agent['id'])
@decorators.idempotent_id('8ca68fdb-eaf6-4880-af82-ba0982949dec')
@rbac_rule_validation.action(service="neutron",
rule="update_agent",
expected_error_code=404)
def test_update_agent(self):
"""Update agent test.
RBAC test for the neutron update_agent policy
"""
original_status = self.agent['admin_state_up']
agent_status = {'admin_state_up': original_status}
self.rbac_utils.switch_role(self, toggle_rbac_role=True)
self.agents_client.update_agent(agent_id=self.agent['id'],
agent=agent_status)
class L3AgentSchedulerRbacTest(base.BaseNetworkRbacTest):
@classmethod
def skip_checks(cls):
super(L3AgentSchedulerRbacTest, cls).skip_checks()
if not test.is_extension_enabled('l3_agent_scheduler', 'network'):
msg = "l3_agent_scheduler extension not enabled."
raise cls.skipException(msg)
@classmethod
def resource_setup(cls):
super(L3AgentSchedulerRbacTest, cls).resource_setup()
cls.router = cls.create_router()
cls.agent = None
def setUp(self):
super(L3AgentSchedulerRbacTest, self).setUp()
if self.agent is not None:
return
# Find an agent and validate that it is correct.
agents = self.agents_client.list_agents()['agents']
agent = {'agent_type': None}
for a in agents:
if a['agent_type'] == 'L3 agent':
agent = a
break
self.assertEqual(agent['agent_type'], 'L3 agent', 'Could not find '
'L3 agent in agent list though l3_agent_scheduler '
'is enabled.')
self.agent = agent
@decorators.idempotent_id('5d2bbdbc-40a5-43d2-828a-84dc93fcc453')
@rbac_rule_validation.action(service="neutron",
rule="get_l3-routers")
def test_list_routers_on_l3_agent(self):
"""List routers on L3 agent test.
RBAC test for the neutron get_l3-routers policy
"""
self.rbac_utils.switch_role(self, toggle_rbac_role=True)
self.agents_client.list_routers_on_l3_agent(self.agent['id'])
@decorators.idempotent_id('466b2a10-8747-4c09-855a-bd90a1c86ce7')
@rbac_rule_validation.action(service="neutron",
rule="create_l3-router")
def test_create_router_on_l3_agent(self):
"""Create router on L3 agent test.
RBAC test for the neutron create_l3-router policy
"""
self.rbac_utils.switch_role(self, toggle_rbac_role=True)
self.agents_client.create_router_on_l3_agent(
self.agent['id'], router_id=self.router['id'])
self.addCleanup(
test_utils.call_and_ignore_notfound_exc,
self.agents_client.delete_router_from_l3_agent,
self.agent['id'], router_id=self.router['id'])
@decorators.idempotent_id('8138cfc9-3e48-4a34-adf6-894077aa1be4')
@rbac_rule_validation.action(service="neutron",
rule="delete_l3-router")
def test_delete_router_from_l3_agent(self):
"""Delete router from L3 agent test.
RBAC test for the neutron delete_l3-router policy
"""
self.agents_client.create_router_on_l3_agent(
self.agent['id'], router_id=self.router['id'])
self.addCleanup(
test_utils.call_and_ignore_notfound_exc,
self.agents_client.delete_router_from_l3_agent,
self.agent['id'], router_id=self.router['id'])
self.rbac_utils.switch_role(self, toggle_rbac_role=True)
self.agents_client.delete_router_from_l3_agent(
self.agent['id'], router_id=self.router['id'])
class DHCPAgentSchedulersRbacTest(base.BaseNetworkRbacTest):
@classmethod
def skip_checks(cls):
super(DHCPAgentSchedulersRbacTest, cls).skip_checks()
if not test.is_extension_enabled('dhcp_agent_scheduler', 'network'):
msg = "dhcp_agent_scheduler extension not enabled."
raise cls.skipException(msg)
@classmethod
def resource_setup(cls):
super(DHCPAgentSchedulersRbacTest, cls).resource_setup()
cls.agent = None
def setUp(self):
super(DHCPAgentSchedulersRbacTest, self).setUp()
if self.agent is not None:
return
# Find a DHCP agent and validate that it is correct.
agents = self.agents_client.list_agents()['agents']
agent = {'agent_type': None}
for a in agents:
if a['agent_type'] == 'DHCP agent':
agent = a
break
self.assertEqual(agent['agent_type'], 'DHCP agent', 'Could not find '
'DHCP agent in agent list though dhcp_agent_scheduler'
' is enabled.')
self.agent = agent
def _create_and_prepare_network_for_agent(self, agent_id):
"""Create network and ensure it is not hosted by agent_id."""
network_id = self.create_network()['id']
if self._check_network_in_dhcp_agent(network_id, agent_id):
self.agents_client.delete_network_from_dhcp_agent(
agent_id=agent_id, network_id=network_id)
return network_id
def _check_network_in_dhcp_agent(self, network_id, agent_id):
networks = self.agents_client.list_networks_hosted_by_one_dhcp_agent(
agent_id)['networks'] or []
return network_id in [network['id'] for network in networks]
@decorators.idempotent_id('dc84087b-4c2a-4878-8ed0-40370e19da17')
@rbac_rule_validation.action(service="neutron",
rule="get_dhcp-networks")
def test_list_networks_hosted_by_one_dhcp_agent(self):
"""List networks hosted by one DHCP agent test.
RBAC test for the neutron get_dhcp-networks policy
"""
self.rbac_utils.switch_role(self, toggle_rbac_role=True)
self.agents_client.list_networks_hosted_by_one_dhcp_agent(
self.agent['id'])
@decorators.idempotent_id('14e014ac-f355-46d3-b6d8-98f2c9ec1610')
@rbac_rule_validation.action(service="neutron",
rule="create_dhcp-network")
def test_add_dhcp_agent_to_network(self):
"""Add DHCP agent to network test.
RBAC test for the neutron create_dhcp-network policy
"""
network_id = self._create_and_prepare_network_for_agent(
self.agent['id'])
self.rbac_utils.switch_role(self, toggle_rbac_role=True)
self.agents_client.add_dhcp_agent_to_network(
self.agent['id'], network_id=network_id)
# Clean up is not necessary and might result in 409 being raised.
@decorators.idempotent_id('937a4302-4b49-407d-9980-5843d7badc38')
@rbac_rule_validation.action(service="neutron",
rule="delete_dhcp-network")
def test_delete_network_from_dhcp_agent(self):
"""Delete DHCP agent from network test.
RBAC test for the neutron delete_dhcp-network policy
"""
network_id = self._create_and_prepare_network_for_agent(
self.agent['id'])
self.agents_client.add_dhcp_agent_to_network(
self.agent['id'], network_id=network_id)
# Clean up is not necessary and might result in 409 being raised.
self.rbac_utils.switch_role(self, toggle_rbac_role=True)
self.agents_client.delete_network_from_dhcp_agent(
self.agent['id'], network_id=network_id)
| 39.806723 | 79 | 0.654317 | 1,174 | 9,474 | 5.019591 | 0.17632 | 0.026133 | 0.043441 | 0.032581 | 0.646699 | 0.616494 | 0.588155 | 0.500424 | 0.382488 | 0.328186 | 0 | 0.030003 | 0.254169 | 9,474 | 237 | 80 | 39.974684 | 0.803991 | 0.162022 | 0 | 0.526316 | 0 | 0 | 0.127649 | 0.037209 | 0 | 0 | 0 | 0 | 0.013158 | 1 | 0.118421 | false | 0 | 0.032895 | 0 | 0.197368 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
506f305043860d56b9b329397b0d18878956dec3 | 3,238 | py | Python | tests/test_match.py | Dominik1123/matchable | 693d501a49ca4dc683cb81a69b39200ca784b0f6 | [
"MIT"
] | 1 | 2021-05-17T04:42:09.000Z | 2021-05-17T04:42:09.000Z | tests/test_match.py | Dominik1123/matchable | 693d501a49ca4dc683cb81a69b39200ca784b0f6 | [
"MIT"
] | null | null | null | tests/test_match.py | Dominik1123/matchable | 693d501a49ca4dc683cb81a69b39200ca784b0f6 | [
"MIT"
] | null | null | null | import operator
import pytest
from matchable.match import Comparison, CompareAttr, CompareItem, Condition, IsInstance, Match, MatchAttr, MatchItem
@pytest.fixture
def comparison(type_c):
return CompareAttr(type_c, 'c', operator.eq, 0)
def test_condition():
cond = Condition(object)
with pytest.raises(NotImplementedError):
cond < cond
with pytest.raises(NotImplementedError):
cond == cond
with pytest.raises(NotImplementedError):
hash(cond)
with pytest.raises(NotImplementedError):
cond.match(0)
def test_comparison_base():
with pytest.raises(NotImplementedError):
Comparison(list, 0, operator.eq, 0).retrieve([0])
@pytest.mark.parametrize('attr_name', ['a', 'b', 'c'])
@pytest.mark.parametrize('op_name', ['lt', 'le', 'eq', 'ne', 'ge', 'gt'])
def test_compare_attr(type_c, attr_name, op_name):
op = getattr(operator, op_name)
ref = getattr(type_c, attr_name)
cond = CompareAttr(type_c, attr_name, op, ref)
obj = type_c()
assert cond.match(obj) == op(ref, ref)
@pytest.mark.parametrize('obj', [{0: 0, 1: 1}, [0, 1], (0, 1)])
@pytest.mark.parametrize('identifier', [0, 1])
@pytest.mark.parametrize('op_name', ['lt', 'le', 'eq', 'ne', 'ge', 'gt'])
def test_compare_item(obj, identifier, op_name):
op = getattr(operator, op_name)
ref = obj[identifier]
cond = CompareItem(type(obj), identifier, op, ref)
assert cond.match(obj) == op(ref, ref)
def test_comparison_lt_eq(comparison, type_c):
assert comparison == comparison
assert not (comparison < comparison)
assert Comparison(type_c, 'c', operator.eq, 0) != Comparison(type_c, 'c', operator.eq, 1)
with pytest.raises(TypeError):
comparison < 0
def test_comparison_missing_attribute(comparison):
assert not comparison.match(0)
def test_isinstance(any_type):
cond = IsInstance(any_type)
assert cond.match(any_type())
assert not cond.match(object())
def test_isinstance_lt_eq(type_a, type_b, type_c, comparison):
cond_a = IsInstance(type_a)
cond_b = IsInstance(type_b)
cond_c = IsInstance(type_c)
assert cond_a < cond_b
assert cond_a < cond_c
assert cond_b < cond_c
assert cond_a < comparison
assert cond_b < comparison
assert cond_c < comparison
assert cond_a == cond_a
assert cond_a != cond_b
def test_conditions_sorting(type_a, type_b, type_c):
ca = Comparison(type_a, 'a', operator.eq, 0)
cb = Comparison(type_b, 'a', operator.eq, 0)
cc = Comparison(type_c, 'a', operator.eq, 0)
ia = IsInstance(type_a)
ib = IsInstance(type_b)
ic = IsInstance(type_c)
assert sorted([cc, ca, ib, cb, ic, ia]) == [ia, ib, ic, cc, ca, cb]
def test_match(type_c):
assert isinstance(Match(type_c).a, MatchAttr)
assert isinstance(Match(type_c)['a'], MatchItem)
@pytest.mark.parametrize('match_cls,comp_cls', [(MatchAttr, CompareAttr), (MatchItem, CompareItem)])
@pytest.mark.parametrize('op_name', ['lt', 'le', 'eq', 'ne', 'ge', 'gt'])
def test_match_attr_item(type_c, match_cls, comp_cls, op_name):
op = getattr(operator, op_name)
cond = op(match_cls(type_c, 'a'), 0)
assert isinstance(cond, comp_cls)
assert cond == comp_cls(type_c, 'a', op, 0)
| 31.134615 | 116 | 0.679432 | 465 | 3,238 | 4.533333 | 0.141935 | 0.047438 | 0.069734 | 0.083017 | 0.332068 | 0.291746 | 0.198292 | 0.159867 | 0.129507 | 0.129507 | 0 | 0.009006 | 0.176961 | 3,238 | 103 | 117 | 31.436893 | 0.781989 | 0 | 0 | 0.168831 | 0 | 0 | 0.033663 | 0 | 0 | 0 | 0 | 0 | 0.272727 | 1 | 0.155844 | false | 0 | 0.038961 | 0.012987 | 0.207792 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
506f837106803565513b08b639fd52850a365b4d | 10,101 | py | Python | hack/boilerplate/boilerplate.py | syself/cluster-api-provider-hetzner | fa56f3486738da757e27fc2872a4cc3bc474cfcd | [
"Apache-2.0"
] | 48 | 2021-11-02T17:12:13.000Z | 2022-03-30T19:06:47.000Z | hack/boilerplate/boilerplate.py | syself/cluster-api-provider-hetzner | fa56f3486738da757e27fc2872a4cc3bc474cfcd | [
"Apache-2.0"
] | 14 | 2021-12-22T16:28:10.000Z | 2022-03-26T20:02:39.000Z | hack/boilerplate/boilerplate.py | syself/cluster-api-provider-hetzner | fa56f3486738da757e27fc2872a4cc3bc474cfcd | [
"Apache-2.0"
] | 2 | 2022-03-06T02:54:56.000Z | 2022-03-07T21:36:13.000Z | #!/usr/bin/env python
# Copyright 2015 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import argparse
import datetime
import difflib
import glob
import os
import re
import sys
import json
parser = argparse.ArgumentParser()
parser.add_argument(
"filenames",
help="list of files to check, all files if unspecified",
nargs='*')
# Rootdir defaults to the directory **above** the repo-infra dir.
rootdir = os.path.dirname(__file__) + "/../../"
rootdir = os.path.abspath(rootdir)
parser.add_argument(
"--rootdir", default=rootdir, help="root directory to examine")
default_boilerplate_dir = os.path.join(rootdir, "repo-infra/verify/boilerplate")
parser.add_argument(
"--boilerplate-dir", default=default_boilerplate_dir)
parser.add_argument(
"-v", "--verbose",
help="give verbose output regarding why a file does not pass",
action="store_true")
parser.add_argument(
"--ensure",
help="ensure all files which should have appropriate licence headers have them prepended",
action="store_true")
args = parser.parse_args()
verbose_out = sys.stderr if args.verbose else open("/dev/null", "w")
default_skipped_dirs = ['Godeps', '.git', 'vendor', 'third_party', '_gopath', '_output']
# list all the files that contain 'DO NOT EDIT', but are not generated
default_skipped_not_generated = []
def get_refs():
refs = {}
for path in glob.glob(os.path.join(args.boilerplate_dir, "boilerplate.*.txt")):
extension = os.path.basename(path).split(".")[1]
ref_file = open(path, 'r')
ref = ref_file.read().splitlines()
ref_file.close()
refs[extension] = ref
return refs
def is_generated_file(filename, data, regexs, files_to_skip):
for d in files_to_skip:
if d in filename:
return False
p = regexs["generated"]
return p.search(data)
def match_and_delete(content, re):
match = re.search(content)
if match is None:
return content, None
return re.sub("", content, 1), match.group()
def replace_specials(content, extension, regexs):
# remove build tags from the top of Go files
if extension == "go" or extension == "generatego":
re = regexs["go_build_constraints"]
return match_and_delete(content, re)
# remove shebang from the top of shell files
if extension == "sh":
re = regexs["shebang"]
return match_and_delete(content, re)
return content, None
def file_passes(filename, refs, regexs, not_generated_files_to_skip):
try:
f = open(filename, 'r')
except Exception as exc:
print("Unable to open %s: %s" % (filename, exc), file=verbose_out)
return False
data = f.read()
f.close()
ref, extension, generated = analyze_file(
filename, data, refs, regexs, not_generated_files_to_skip)
return file_content_passes(data, filename, ref, extension, generated, regexs)
def file_content_passes(data, filename, ref, extension, generated, regexs):
if ref is None:
return True
data, _ = replace_specials(data, extension, regexs)
data = data.splitlines()
# if our test file is smaller than the reference it surely fails!
if len(ref) > len(data):
print('File %s smaller than reference (%d < %d)' %
(filename, len(data), len(ref)),
file=verbose_out)
return False
# trim our file to the same number of lines as the reference file
data = data[:len(ref)]
p = regexs["year"]
for d in data:
if p.search(d):
if generated:
print('File %s has the YEAR field, but it should not be in generated file' % filename, file=verbose_out)
else:
print('File %s has the YEAR field, but missing the year of date' % filename, file=verbose_out)
return False
if not generated:
# Replace all occurrences of the regex "2014|2015|2016|2017|2018" with "YEAR"
p = regexs["date"]
for i, d in enumerate(data):
(data[i], found) = p.subn('YEAR', d)
if found != 0:
break
# if we don't match the reference at this point, fail
if ref != data:
print("Header in %s does not match reference, diff:" % filename, file=verbose_out)
if args.verbose:
print(file=verbose_out)
for line in difflib.unified_diff(ref, data, 'reference', filename, lineterm=''):
print(line, file=verbose_out)
print(file=verbose_out)
return False
return True
def file_extension(filename):
return os.path.splitext(filename)[1].split(".")[-1].lower()
def read_config_file(conf_path):
try:
with open(conf_path) as json_data_file:
return json.load(json_data_file)
except ValueError:
raise
except:
return {'dirs_to_skip': default_skipped_dirs, 'not_generated_files_to_skip': default_skipped_not_generated}
def normalize_files(files, dirs_to_skip):
newfiles = []
for pathname in files:
if any(x in pathname for x in dirs_to_skip):
continue
newfiles.append(pathname)
for i, pathname in enumerate(newfiles):
if not os.path.isabs(pathname):
newfiles[i] = os.path.join(args.rootdir, pathname)
return newfiles
def get_files(extensions, dirs_to_skip):
files = []
if len(args.filenames) > 0:
files = args.filenames
else:
for root, dirs, walkfiles in os.walk(args.rootdir):
# don't visit certain dirs. This is just a performance improvement
# as we would prune these later in normalize_files(). But doing it
# cuts down the amount of filesystem walking we do and cuts down
# the size of the file list
for d in dirs_to_skip:
if d in dirs:
dirs.remove(d)
for name in walkfiles:
pathname = os.path.join(root, name)
files.append(pathname)
files = normalize_files(files, dirs_to_skip)
outfiles = []
for pathname in files:
basename = os.path.basename(pathname)
extension = file_extension(pathname)
if extension in extensions or basename in extensions:
outfiles.append(pathname)
return outfiles
def analyze_file(file_name, file_content, refs, regexs, not_generated_files_to_skip):
# determine if the file is automatically generated
generated = is_generated_file(
file_name, file_content, regexs, not_generated_files_to_skip)
base_name = os.path.basename(file_name)
if generated:
extension = "generatego"
else:
extension = file_extension(file_name)
if extension != "":
ref = refs[extension]
else:
ref = refs.get(base_name, None)
return ref, extension, generated
def ensure_boilerplate_file(file_name, refs, regexs, not_generated_files_to_skip):
with open(file_name, mode='r+') as f:
file_content = f.read()
ref, extension, generated = analyze_file(
file_name, file_content, refs, regexs, not_generated_files_to_skip)
# licence header
licence_header = os.linesep.join(ref)
# content without shebang and such
content_without_specials, special_header = replace_specials(
file_content, extension, regexs)
# new content, to be writen to the file
new_content = ''
# shebang and such
if special_header is not None:
new_content += special_header
# licence header
current_year = str(datetime.datetime.now().year)
year_replacer = regexs['year']
new_content += year_replacer.sub(current_year, licence_header, 1)
# actual content
new_content += os.linesep + content_without_specials
f.seek(0)
f.write(new_content)
def get_dates():
years = datetime.datetime.now().year
return '(%s)' % '|'.join((str(year) for year in range(2014, years+1)))
def get_regexs():
regexs = {}
# Search for "YEAR" which exists in the boilerplate, but shouldn't in the real thing
regexs["year"] = re.compile('YEAR')
# get_dates return 2014, 2015, 2016, 2017, or 2018 until the current year as a regex like: "(2014|2015|2016|2017|2018)";
# company holder names can be anything
regexs["date"] = re.compile(get_dates())
# strip // +build \n\n build constraints
regexs["go_build_constraints"] = re.compile(
r"^(// \+build.*\n)+\n", re.MULTILINE)
# strip #!.* from shell scripts
regexs["shebang"] = re.compile(r"^(#!.*\n)\n*", re.MULTILINE)
# Search for generated files
regexs["generated"] = re.compile('DO NOT EDIT')
return regexs
def main():
config_file_path = os.path.join(args.rootdir, ".boilerplate.json")
config = read_config_file(config_file_path)
regexs = get_regexs()
refs = get_refs()
filenames = get_files(refs.keys(), config.get('dirs_to_skip'))
not_generated_files_to_skip = config.get('not_generated_files_to_skip', [])
for filename in filenames:
if not file_passes(filename, refs, regexs, not_generated_files_to_skip):
if args.ensure:
print("adding boilerplate header to %s" % filename)
ensure_boilerplate_file(
filename, refs, regexs, not_generated_files_to_skip)
else:
print(filename, file=sys.stdout)
return 0
if __name__ == "__main__":
sys.exit(main())
| 31.272446 | 124 | 0.651223 | 1,347 | 10,101 | 4.727543 | 0.234595 | 0.018844 | 0.022456 | 0.03282 | 0.169284 | 0.101759 | 0.078989 | 0.068624 | 0.053392 | 0.035804 | 0 | 0.010782 | 0.247104 | 10,101 | 322 | 125 | 31.369565 | 0.826561 | 0.177309 | 0 | 0.160194 | 0 | 0 | 0.11318 | 0.010036 | 0 | 0 | 0 | 0 | 0 | 1 | 0.072816 | false | 0.024272 | 0.043689 | 0.004854 | 0.23301 | 0.053398 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
50700aab59400aaf9c63459381539f28dc157e13 | 1,145 | py | Python | RegionalSelect.py | longtp12/opencv4nodejs | 05952d1119c1dd9ccf696434bcc5cdf73f06b627 | [
"MIT"
] | null | null | null | RegionalSelect.py | longtp12/opencv4nodejs | 05952d1119c1dd9ccf696434bcc5cdf73f06b627 | [
"MIT"
] | null | null | null | RegionalSelect.py | longtp12/opencv4nodejs | 05952d1119c1dd9ccf696434bcc5cdf73f06b627 | [
"MIT"
] | null | null | null | import cv2
import random
scale = 0.5
circles = []
counter = 0
counter2 = 0
point1 = []
point2 = []
myPoints = []
myColor = []
def mousePoints(event,x,y,flags,params):
global counter,point1,point2,counter2,circles,myColor
if event == cv2.EVENT_LBUTTONDOWN:
if counter == 0:
point1=int(x//scale),int(y//scale);
counter += 1
myColor = (random.randint(0,2)*200,random.randint(0,2)*200,random.randint(0,2)*200)
elif counter == 1:
point2 = int(x // scale), int(y // scale)
type = input('Enter Type ')
name = input('Enter Name ')
myPoints.append([point1, point2,type, name])
counter = 0
circles.append([x,y,myColor])
counter2 += 1
img = cv2.imread('cc.jpg')
img = cv2.resize(img, (0, 0), None, scale, scale)
while True:
for x,y,color in circles:
cv2.circle(img,(x,y),3,color,cv2.FILLED)
cv2.imshow("Original Image ", img)
cv2.setMouseCallback("Original Image ", mousePoints)
if cv2.waitKey(1) & 0xFF == ord('s'):
print(myPoints)
break
| 30.131579 | 96 | 0.563319 | 147 | 1,145 | 4.380952 | 0.394558 | 0.012422 | 0.065217 | 0.069876 | 0.139752 | 0.139752 | 0.083851 | 0.083851 | 0.083851 | 0.083851 | 0 | 0.060123 | 0.28821 | 1,145 | 37 | 97 | 30.945946 | 0.730061 | 0 | 0 | 0.057143 | 0 | 0 | 0.053297 | 0 | 0 | 0 | 0.003613 | 0 | 0 | 1 | 0.028571 | false | 0 | 0.057143 | 0 | 0.085714 | 0.028571 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
507023d505edfcc4469619c44608de02dd2d402e | 2,896 | py | Python | blobserver/main.py | Jeremyrqjones/firebase_friendlypix | c30b08995ffe9246dce0ef42edde28c1754d49eb | [
"CC-BY-4.0"
] | null | null | null | blobserver/main.py | Jeremyrqjones/firebase_friendlypix | c30b08995ffe9246dce0ef42edde28c1754d49eb | [
"CC-BY-4.0"
] | null | null | null | blobserver/main.py | Jeremyrqjones/firebase_friendlypix | c30b08995ffe9246dce0ef42edde28c1754d49eb | [
"CC-BY-4.0"
] | null | null | null | # Copyright 2015 Google Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Sample application that demonstrates how to use the App Engine Blobstore API.
For more information, see README.md.
"""
# [START all]
from google.appengine.api import users
from google.appengine.api import images
from google.appengine.ext import blobstore
from google.appengine.ext import ndb
from google.appengine.ext.webapp import blobstore_handlers
import webapp2
import logging
# This datastore model keeps track of which users uploaded which photos.
class UserPhoto(ndb.Model):
user = ndb.StringProperty()
blob_key = ndb.BlobKeyProperty()
class PhotoUploadFormHandler(webapp2.RequestHandler):
def get(self):
# [START upload_url]
upload_url = blobstore.create_upload_url('/upload_photo')
# [END upload_url]
# [START upload_form]
# To upload files to the blobstore, the request method must be "POST"
# and enctype must be set to "multipart/form-data".
self.response.out.write(upload_url)
# self.response.out.write("""
#<html><body>
#<form action="{0}" method="POST" enctype="multipart/form-data">
# Upload File: <input type="file" name="file"><br>
# <input type="submit" name="submit" value="Submit">
#</form>
#</body></html>""".format(upload_url))
# [END upload_form]
# [START upload_handler]
class PhotoUploadHandler(blobstore_handlers.BlobstoreUploadHandler):
def post(self):
logging.info("in upload handler")
#try:
upload = self.get_uploads()[0]
user_photo = UserPhoto(
user="default_user",
blob_key=upload.key())
user_photo.put()
logging.info(upload.key())
self.response.out.write(images.get_serving_url(upload.key()))
#self.redirect('/view_photo/%s' % upload.key())
#except:
#self.error(500)
# [END upload_handler]
# [START download_handler]
class ViewPhotoHandler(blobstore_handlers.BlobstoreDownloadHandler):
def get(self, photo_key):
if not blobstore.get(photo_key):
self.error(404)
else:
self.send_blob(photo_key)
# [END download_handler]
app = webapp2.WSGIApplication([
('/', PhotoUploadFormHandler),
('/upload_photo', PhotoUploadHandler),
('/view_photo/([^/]+)?', ViewPhotoHandler),
], debug=True)
# [END all]
| 31.478261 | 77 | 0.691644 | 369 | 2,896 | 5.341463 | 0.441734 | 0.030441 | 0.048199 | 0.033486 | 0.056824 | 0 | 0 | 0 | 0 | 0 | 0 | 0.008141 | 0.194061 | 2,896 | 91 | 78 | 31.824176 | 0.836332 | 0.480318 | 0 | 0 | 0 | 0 | 0.052019 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.085714 | false | 0 | 0.2 | 0 | 0.457143 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
50707a687f5cbabec0683d56e704214a48cbdc7b | 7,933 | py | Python | mpl_logoplot/psfm.py | micked/mpl-logoplot | e34257b1791ad75484dd802943beafb7324fd483 | [
"MIT"
] | null | null | null | mpl_logoplot/psfm.py | micked/mpl-logoplot | e34257b1791ad75484dd802943beafb7324fd483 | [
"MIT"
] | null | null | null | mpl_logoplot/psfm.py | micked/mpl-logoplot | e34257b1791ad75484dd802943beafb7324fd483 | [
"MIT"
] | null | null | null | """
Functions to count and cluster amino acid sequences.
"""
import numpy as np
from . import utils
class PSFM:
"""Meta class for a position specific scoring matrix"""
def __init__(self, pssm, alphabet=utils.AMINO_ACIDS, comments=(), consensus=None):
self._psfm = psfm
self.alphabet = alphabet
self.comments = comments
self.consensus = consensus
@classmethod
def from_txt(cls, lines):
"""Load a scoring/frequency matrix from text"""
alphabet = None
dtype = int
matlines = []
comments = []
consensus = []
for line in lines:
line = line.strip()
if line.startswith('#'):
comments.append(line.lstrip('#'))
line = line.split('#')[0]
if not line:
continue
if alphabet is None:
alphabet = tuple(line.strip().split())
else:
if dtype is float or '.' in line:
dtype = float
line = line.split()
consensus.append(line[1])
matlines.append([dtype(n) for n in line[2:]])
mat = np.array(matlines)
return cls(mat, alphabet=''.join(alphabet), comments=comments, consensus=''.join(consensus))
def to_txt(self, mat=None):
"""Output a matrix in text format."""
lines = []
for line in self.comments:
lines.append(f'#{line}')
if mat is None:
mat = self.pssm
consensus = self.consensus
if self.consensus is None:
consensus = ['X'] * mat.shape[0]
l = len(str(mat.shape[0]))
lines.append(' ' * (l + 3) + ' '.join([f' {a:<5}' for a in self.alphabet]))
for i in range(mat.shape[0]):
line = f'{i+1:>{l}} {consensus[i]} ' + ' '.join([f'{n:>6.3f}' if n else ' 0 ' for n in mat[i]])
lines.append(line)
return lines
def psfm(self):
return self._psfm
def pssm(self, bg='blosum62', return_psfm=False):
if bg is None:
bg = np.ones(len(self.alphabet)) / len(self.alphabet)
elif bg in ('blosum62', 'blosum', 'bl62', 'bl'):
bg = utils.bgfreq_array(self.alphabet)
psfm = self.psfm()
pssm = np.zeros_like(psfm, dtype='float')
mask = psfm > 0
pssm[mask] = np.log2((psfm / bg)[mask])
if return_psfm:
return pssm, psfm
return pssm
def shannon_logo(self):
pssm, psfm = self.pssm(bg=None, return_psfm=True)
return np.sum(pssm * psfm, axis=1, keepdims=True) * psfm
def kullback_leibler_logo(self):
pssm, psfm = self.pssm(bg='blosum62', return_psfm=True)
return np.sum(pssm * psfm, axis=1, keepdims=True) * psfm * np.sign(pssm)
def weighted_kullback_leibler_logo(self):
pssm, psfm = self.pssm(bg='blosum62', return_psfm=True)
return np.sum(pssm * psfm, axis=1, keepdims=True) * pssm / np.sum(np.abs(pssm), axis=1, keepdims=True)
def p_weighted_kullback_leibler_logo(self):
pssm, psfm = self.pssm(bg='blosum62', return_psfm=True)
return np.sum(pssm * psfm, axis=1, keepdims=True) * pssm * psfm / np.sum(
np.abs(pssm) * psfm, axis=1, keepdims=True)
class AlignmentPSFM(PSFM):
"""Position-specific scoring matrix from a set of sequences"""
def __init__(self, sequences, alphabet=utils.AMINO_ACIDS, clustering='hobohm1', weight_on_prior=200, **kwargs):
self.seqlen = None
self.alphabet = str(alphabet)
self.sequences = []
#Validate sequence lengths
for seq in sequences:
seq = str(seq)
self.sequences.append(seq)
if self.seqlen == None:
self.seqlen = len(seq)
elif self.seqlen != len(seq):
raise Exception('All sequences must be of same length!')
if isinstance(clustering, str):
if clustering.lower() in ('hobohm', 'hobohm1'):
clustering = hobohm1_factory(kwargs.get('hobohm1_threshold', 0.63))
elif clustering.lower() in ('heuristic', ):
clustering = heuristic
else:
raise Exception(f'Unknown clustering method: {clustering}')
self.clustering = clustering
self.weight_on_prior = weight_on_prior
self.consensus = None
self.comments = []
self.gaps = '-.'
@property
def alphabet_index(self):
alpha_index = {a: i for i, a in enumerate(self.alphabet, 1)}
for gap in self.gaps:
alpha_index[gap] = 0
return alpha_index
def alignment_array(self):
if set(self.gaps).intersection(set(self.alphabet)):
raise Exception(f'Alphabet ({self.alphabet}) must not contain gaps ({self.gaps})')
alphabet_index = self.alphabet_index
alignment_arr = []
for seq in self.sequences:
seq_coded = []
for letter in seq:
seq_coded.append(alphabet_index[letter])
alignment_arr.append(seq_coded)
return np.array(alignment_arr)
def sequence_weights(self):
if self.clustering is None:
return np.ones(len(self.sequences))
return self.clustering(self.alignment_array())
def psfm(self):
"""Position specific frequency matrix"""
a_index = self.alphabet_index
sequence_weights = self.sequence_weights()
countmat = np.zeros([len(self.sequences[0]), len(self.alphabet)], dtype='float')
for j, seq in enumerate(self.sequences):
for i, a in enumerate(seq):
a_idx = a_index[a] - 1
if a_idx >= 0:
countmat[i, a_idx] += sequence_weights[j]
frequency_matrix = countmat / np.sum(sequence_weights)
if self.weight_on_prior:
blosum_mat = utils.blosum62_array(self.alphabet)
pseudo_counts = np.dot(frequency_matrix, blosum_mat)
a = np.sum(sequence_weights) - 1
b = self.weight_on_prior
frequency_matrix = (a * frequency_matrix + b * pseudo_counts) / (a + b)
return frequency_matrix
#
# Clustering/weighting
def hobohm1_factory(threshold):
"""Return Hobohm1 clustering func with a given threshold"""
def _hobohm1(alignment_array):
return hobohm1(alignment_array, threshold)
return _hobohm1
def hobohm1(alignment_array, threshold=0.63):
"""Hobohm1 as implemented in seq2logo"""
n_seqs, seqlen = alignment_array.shape
seqsort = np.argsort(np.sum(alignment_array == 0, axis=1))
alignment_array = alignment_array[seqsort]
clusters = np.arange(n_seqs)
for i in range(n_seqs):
for j in range(i + 1, n_seqs):
if clusters[j] != j:
continue
sim = alignment_array[i] == alignment_array[j]
sim[alignment_array[i] == 0] = False
sim = np.sum(sim) / np.sum(alignment_array[i] != 0)
if sim >= threshold:
clusters[j] = i
counts = np.bincount(clusters)
weights = np.ones(n_seqs) / counts[clusters]
seqsort_undo = np.empty(seqsort.size, 'int')
seqsort_undo[seqsort] = np.arange(seqsort.size)
return weights[seqsort_undo]
def heuristic(alignment_array):
"""Heuristic (position-based) clustering.
Reference: https://doi.org/10.1016/0022-2836(94)90032-9
"""
n_seqs, seqlen = alignment_array.shape
#Calculate weight per position
#weight(seq, pos) = 1 / (count(letter_seq)_pos * n_uniq_letters_pos)
weight = np.zeros_like(alignment_array, dtype='float')
for pos in range(seqlen):
uniq, indices, counts = np.unique(alignment_array[:, pos], return_counts=True, return_inverse=True)
weight[:, pos] = (1 / (counts * uniq.size))[indices]
return np.sum(weight, axis=1)
| 32.646091 | 115 | 0.587924 | 985 | 7,933 | 4.617259 | 0.194924 | 0.052331 | 0.01715 | 0.022427 | 0.120053 | 0.10774 | 0.08905 | 0.083333 | 0.083333 | 0.083333 | 0 | 0.016201 | 0.291945 | 7,933 | 242 | 116 | 32.780992 | 0.793484 | 0.075129 | 0 | 0.066265 | 0 | 0 | 0.043897 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.108434 | false | 0 | 0.012048 | 0.012048 | 0.240964 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
507167233e5bb395bb748bc75fac2fc71d02f021 | 1,945 | py | Python | tests/integration_tests_plugins/cloudify_agent/operations.py | yeshess/cloudify-manager | 04dd199ce7df54355b87e9594f9db9fb1582924b | [
"Apache-2.0"
] | null | null | null | tests/integration_tests_plugins/cloudify_agent/operations.py | yeshess/cloudify-manager | 04dd199ce7df54355b87e9594f9db9fb1582924b | [
"Apache-2.0"
] | 1 | 2021-03-26T00:32:30.000Z | 2021-03-26T00:32:30.000Z | tests/integration_tests_plugins/cloudify_agent/operations.py | yeshess/cloudify-manager | 04dd199ce7df54355b87e9594f9db9fb1582924b | [
"Apache-2.0"
] | 1 | 2019-11-24T12:07:18.000Z | 2019-11-24T12:07:18.000Z | ########
# Copyright (c) 2013 GigaSpaces Technologies Ltd. All rights reserved
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# * See the License for the specific language governing permissions and
# * limitations under the License.
from cloudify import ctx
from cloudify.decorators import operation
from integration_tests_plugins.cloudify_agent import consumer
from integration_tests_plugins.utils import update_storage
@operation
def install_plugins(plugins, **_):
_operate_on_plugins(plugins, 'installed')
@operation
def uninstall_plugins(plugins, **_):
_operate_on_plugins(plugins, 'uninstalled')
def _operate_on_plugins(plugins, new_state):
plugin_installer = get_backend()
func = plugin_installer.install if new_state == 'installed' \
else plugin_installer.uninstall
for plugin in plugins:
with update_storage(ctx) as data:
if (new_state == 'uninstalled' and
'raise_exception_on_delete' in data):
raise Exception("Exception raised intentionally")
func(plugin)
plugin_name = plugin['name']
task_target = ctx.task_target or 'local'
data[task_target] = data.get(task_target, {})
data[task_target][plugin_name] = \
data[task_target].get(plugin_name, [])
data[task_target][plugin_name].append(new_state)
ctx.logger.info('Plugin {0} {1}'.format(plugin['name'], new_state))
def get_backend():
return consumer.ConsumerBackedPluginInstaller()
| 35.363636 | 79 | 0.701285 | 245 | 1,945 | 5.387755 | 0.465306 | 0.05303 | 0.042424 | 0.052273 | 0.110606 | 0.056061 | 0 | 0 | 0 | 0 | 0 | 0.006498 | 0.20874 | 1,945 | 54 | 80 | 36.018519 | 0.851202 | 0.312082 | 0 | 0.068966 | 0 | 0 | 0.092705 | 0.018997 | 0 | 0 | 0 | 0 | 0 | 1 | 0.137931 | false | 0 | 0.137931 | 0.034483 | 0.310345 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
50737f4fc570299443f2fca9634c4ba86dd2983b | 1,036 | py | Python | setup.py | juergberinger/cmdhelper | f1fc1a08835fe4dabf6d4ae8d547b7bc4ee176c4 | [
"MIT"
] | 1 | 2016-03-14T02:58:44.000Z | 2016-03-14T02:58:44.000Z | setup.py | juergberinger/cmdhelper | f1fc1a08835fe4dabf6d4ae8d547b7bc4ee176c4 | [
"MIT"
] | null | null | null | setup.py | juergberinger/cmdhelper | f1fc1a08835fe4dabf6d4ae8d547b7bc4ee176c4 | [
"MIT"
] | null | null | null | from setuptools import setup
from cmdhelper import __version__
def read(fname):
"""Return contents of file with name fname."""
with open(fname, 'r') as f:
return f.read()
setup(
name = 'cmdhelper',
version = __version__,
description = 'Python utility for writing command line scripts with consistent look and feel.',
long_description = read('README.rst'),
url = 'https://github.com/juergberinger/cmdhelper',
license = 'MIT',
author = 'Juerg Beringer',
author_email = 'juerg.beringer@gmail.com',
py_modules = ['cmdhelper'],
include_package_data = True,
classifiers = [
'Development Status :: 5 - Production/Stable',
'Environment :: Console',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Topic :: System :: Shells',
'Topic :: Utilities'
],
keywords = 'command line utility, scripts',
)
| 31.393939 | 99 | 0.625483 | 109 | 1,036 | 5.825688 | 0.678899 | 0.034646 | 0.07874 | 0.08189 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.005135 | 0.24807 | 1,036 | 32 | 100 | 32.375 | 0.810013 | 0.03861 | 0 | 0 | 0 | 0 | 0.472727 | 0.024242 | 0 | 0 | 0 | 0 | 0 | 1 | 0.035714 | false | 0 | 0.071429 | 0 | 0.142857 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
5077376272249297cca7a8309e587377ce706471 | 422 | py | Python | train.py | gtg7784/Yo-RongRong-AI | 285b7e08147081fbf55d6ee0af6687af5f3c40d1 | [
"MIT"
] | 3 | 2020-11-26T02:41:34.000Z | 2021-10-15T05:13:50.000Z | train.py | gtg7784/Yo-RongRong-AI | 285b7e08147081fbf55d6ee0af6687af5f3c40d1 | [
"MIT"
] | 2 | 2021-09-08T02:49:03.000Z | 2022-01-13T03:32:53.000Z | train.py | gtg7784/Yo-RongRong-AI | 285b7e08147081fbf55d6ee0af6687af5f3c40d1 | [
"MIT"
] | null | null | null | import cv2
import numpy as np
from .model import *
from tensorflow.keras.optimizers import Adam
def train():
input_height = 48
input_width = 48
input_channel = 3
input_shape = (input_height, input_width, input_channel)
n_classes = 6
model = YRRModel(input_shape, n_classes)
adam = Adam()
model.compile(
optimizer=adam,
loss='categorical_crossentropy', metrics=['acc'])
data_dir = './data'
| 19.181818 | 58 | 0.7109 | 57 | 422 | 5.052632 | 0.578947 | 0.076389 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.020468 | 0.189573 | 422 | 22 | 59 | 19.181818 | 0.821637 | 0 | 0 | 0 | 0 | 0 | 0.078014 | 0.056738 | 0 | 0 | 0 | 0 | 0 | 1 | 0.0625 | false | 0 | 0.25 | 0 | 0.3125 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
50775570c9b8df9bbedead1da9c309f1bab65af6 | 5,636 | py | Python | aiida/backends/djsite/utils.py | odarbelaeze/aiida_core | 934b4ccdc73a993f2a6656caf516500470e3da08 | [
"BSD-2-Clause"
] | 1 | 2019-03-15T10:37:53.000Z | 2019-03-15T10:37:53.000Z | aiida/backends/djsite/utils.py | odarbelaeze/aiida_core | 934b4ccdc73a993f2a6656caf516500470e3da08 | [
"BSD-2-Clause"
] | null | null | null | aiida/backends/djsite/utils.py | odarbelaeze/aiida_core | 934b4ccdc73a993f2a6656caf516500470e3da08 | [
"BSD-2-Clause"
] | null | null | null | # -*- coding: utf-8 -*-
###########################################################################
# Copyright (c), The AiiDA team. All rights reserved. #
# This file is part of the AiiDA code. #
# #
# The code is hosted on GitHub at https://github.com/aiidateam/aiida_core #
# For further information on the license, see the LICENSE.txt file #
# For further information please visit http://www.aiida.net #
###########################################################################
"""
This modules contains a number of utility functions specific to the
Django backend.
"""
from __future__ import division
from __future__ import print_function
from __future__ import absolute_import
import os
import django
# pylint: disable=no-name-in-module, no-member, import-error
def load_dbenv(profile=None):
"""
Load the database environment (Django) and perform some checks.
:param profile: the string with the profile to use. If not specified,
use the default one specified in the AiiDA configuration file.
"""
_load_dbenv_noschemacheck(profile)
# Check schema version and the existence of the needed tables
check_schema_version(profile_name=profile)
def _load_dbenv_noschemacheck(profile): # pylint: disable=unused-argument
"""
Load the database environment (Django) WITHOUT CHECKING THE SCHEMA VERSION.
:param profile: the string with the profile to use. If not specified,
use the default one specified in the AiiDA configuration file.
This should ONLY be used internally, inside load_dbenv, and for schema
migrations. DO NOT USE OTHERWISE!
"""
# This function does not use process and profile because they are read
# from global variables (set before by load_profile) inside the
# djsite.settings.settings module.
os.environ['DJANGO_SETTINGS_MODULE'] = 'aiida.backends.djsite.settings.settings'
django.setup()
_aiida_autouser_cache = None # pylint: disable=invalid-name
def migrate_database():
"""Migrate the database to the latest schema version."""
from django.core.management import call_command
call_command('migrate')
def check_schema_version(profile_name=None):
"""
Check if the version stored in the database is the same of the version
of the code.
:note: if the DbSetting table does not exist, this function does not
fail. The reason is to avoid to have problems before running the first
migrate call.
:note: if no version is found, the version is set to the version of the
code. This is useful to have the code automatically set the DB version
at the first code execution.
:raise aiida.common.ConfigurationError: if the two schema versions do not match.
Otherwise, just return.
"""
# pylint: disable=duplicate-string-formatting-argument
from django.db import connection
import aiida.backends.djsite.db.models
from aiida.common.exceptions import ConfigurationError
# Do not do anything if the table does not exist yet
if 'db_dbsetting' not in connection.introspection.table_names():
return
code_schema_version = aiida.backends.djsite.db.models.SCHEMA_VERSION
db_schema_version = get_db_schema_version()
if db_schema_version is None:
# No code schema defined yet, I set it to the code version
set_db_schema_version(code_schema_version)
db_schema_version = get_db_schema_version()
if code_schema_version != db_schema_version:
if profile_name is None:
from aiida.manage.manager import get_manager
manager = get_manager()
profile_name = manager.get_profile().name
raise ConfigurationError('Database schema version {} is outdated compared to the code schema version {}\n'
'To migrate the database to the current version, run the following commands:'
'\n verdi -p {} daemon stop\n verdi -p {} database migrate'.format(
db_schema_version, code_schema_version, profile_name, profile_name))
def set_db_schema_version(version):
"""
Set the schema version stored in the DB. Use only if you know what
you are doing.
"""
from aiida.backends.utils import set_global_setting
return set_global_setting(
'db|schemaversion', version, description="The version of the schema used in this database.")
def get_db_schema_version():
"""
Get the current schema version stored in the DB. Return None if
it is not stored.
"""
from aiida.backends.utils import get_global_setting
try:
return get_global_setting('db|schemaversion')
except KeyError:
return None
def delete_nodes_and_connections_django(pks_to_delete): # pylint: disable=invalid-name
"""
Delete all nodes corresponding to pks in the input.
:param pks_to_delete: A list, tuple or set of pks that should be deleted.
"""
from django.db import transaction
from django.db.models import Q
from aiida.backends.djsite.db import models
with transaction.atomic():
# This is fixed in pylint-django>=2, but this supports only py3
# pylint: disable=no-member
# Delete all links pointing to or from a given node
models.DbLink.objects.filter(Q(input__in=pks_to_delete) | Q(output__in=pks_to_delete)).delete()
# now delete nodes
models.DbNode.objects.filter(pk__in=pks_to_delete).delete()
| 38.868966 | 114 | 0.67264 | 743 | 5,636 | 4.955585 | 0.297443 | 0.084737 | 0.040739 | 0.019555 | 0.223248 | 0.120315 | 0.08365 | 0.08365 | 0.08365 | 0.08365 | 0 | 0.000697 | 0.235806 | 5,636 | 144 | 115 | 39.138889 | 0.854191 | 0.465578 | 0 | 0.038462 | 0 | 0 | 0.140226 | 0.022932 | 0 | 0 | 0 | 0 | 0 | 1 | 0.134615 | false | 0 | 0.288462 | 0 | 0.5 | 0.019231 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
5077e54797b206b32ca54e5d032a50b4c24c8317 | 3,109 | py | Python | fu/single/test/BranchRTL_test.py | mfkiwl/OpenCGRA-1 | 1d898da829b416d850ec9f94e3021ca4171ece5b | [
"BSD-3-Clause"
] | 49 | 2020-09-17T19:22:53.000Z | 2022-03-25T18:55:13.000Z | fu/single/test/BranchRTL_test.py | zslwyuan/OpenCGRA | 06646c8c277fd53ea3a287047ac8a4ce3e466295 | [
"BSD-3-Clause"
] | 8 | 2020-12-30T20:22:29.000Z | 2022-03-29T04:47:27.000Z | fu/single/test/BranchRTL_test.py | zslwyuan/OpenCGRA | 06646c8c277fd53ea3a287047ac8a4ce3e466295 | [
"BSD-3-Clause"
] | 12 | 2020-12-12T21:38:59.000Z | 2022-03-15T02:21:15.000Z | """
==========================================================================
BranchRTL_test.py
==========================================================================
Test cases for functional unit branch.
Author : Cheng Tan
Date : November 27, 2019
"""
from pymtl3 import *
from pymtl3.stdlib.test import TestSinkCL
from pymtl3.stdlib.test.test_srcs import TestSrcRTL
from ..BranchRTL import BranchRTL
from ....lib.opt_type import *
from ....lib.messages import *
#-------------------------------------------------------------------------
# Test harness
#-------------------------------------------------------------------------
class TestHarness( Component ):
def construct( s, FunctionUnit, DataType, CtrlType, num_inports, num_outports,
data_mem_size, src_data, src_comp, src_opt, sink_if, sink_else ):
s.src_data = TestSrcRTL( DataType, src_data )
s.src_comp = TestSrcRTL( DataType, src_comp )
s.src_opt = TestSrcRTL( CtrlType, src_opt )
s.sink_if = TestSinkCL( DataType, sink_if )
s.sink_else = TestSinkCL( DataType, sink_else )
s.dut = FunctionUnit( DataType, CtrlType, num_inports, num_outports,
data_mem_size )
connect( s.src_data.send, s.dut.recv_in[0] )
connect( s.src_comp.send, s.dut.recv_in[1] )
connect( s.src_opt.send, s.dut.recv_opt )
connect( s.dut.send_out[0], s.sink_if.recv )
connect( s.dut.send_out[1], s.sink_else.recv )
def done( s ):
return s.src_data.done() and s.src_comp.done() and s.src_opt.done() and\
s.sink_if.done() and s.sink_else.done()
def line_trace( s ):
return s.dut.line_trace()
def run_sim( test_harness, max_cycles=100 ):
test_harness.elaborate()
test_harness.apply( SimulationPass() )
test_harness.sim_reset()
# Run simulation
ncycles = 0
print()
print( "{}:{}".format( ncycles, test_harness.line_trace() ))
while not test_harness.done() and ncycles < max_cycles:
test_harness.tick()
ncycles += 1
print( "{}:{}".format( ncycles, test_harness.line_trace() ))
# Check timeout
assert ncycles < max_cycles
test_harness.tick()
test_harness.tick()
test_harness.tick()
def test_Branch():
FU = BranchRTL
DataType = mk_data( 16, 1 )
CtrlType = mk_ctrl()
num_inports = 2
num_outports = 2
data_mem_size = 8
FuInType = mk_bits( clog2( num_inports + 1 ) )
src_data = [ DataType(7, 1), DataType(3, 1), DataType(9, 1) ]
src_comp = [ DataType(0, 1), DataType(1, 1), DataType(0, 1) ]
src_opt = [ CtrlType( OPT_BRH, [FuInType(1), FuInType(2)] ),
CtrlType( OPT_BRH, [FuInType(1), FuInType(2)] ),
CtrlType( OPT_BRH, [FuInType(1), FuInType(2)] ) ]
sink_if = [ DataType(7, 1), DataType(3, 0), DataType(9, 1) ]
sink_else = [ DataType(7, 0), DataType(3, 1), DataType(9, 0) ]
th = TestHarness( FU, DataType, CtrlType, num_inports, num_outports,
data_mem_size, src_data, src_comp, src_opt, sink_if, sink_else )
run_sim( th )
| 33.430108 | 84 | 0.576391 | 396 | 3,109 | 4.30303 | 0.234848 | 0.077465 | 0.025822 | 0.045775 | 0.370305 | 0.293427 | 0.239437 | 0.194836 | 0.194836 | 0.194836 | 0 | 0.022923 | 0.214217 | 3,109 | 92 | 85 | 33.793478 | 0.67458 | 0.142168 | 0 | 0.101695 | 0 | 0 | 0.003769 | 0 | 0 | 0 | 0 | 0 | 0.016949 | 1 | 0.084746 | false | 0.016949 | 0.101695 | 0.033898 | 0.237288 | 0.050847 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
50795071ddf6e9e44540f641bac07785443f1504 | 3,489 | py | Python | python/demo.py | jasongwq/MTCNN | 168d346461fd7e7c59e4f911296cfa6133dc2ea5 | [
"MIT"
] | null | null | null | python/demo.py | jasongwq/MTCNN | 168d346461fd7e7c59e4f911296cfa6133dc2ea5 | [
"MIT"
] | null | null | null | python/demo.py | jasongwq/MTCNN | 168d346461fd7e7c59e4f911296cfa6133dc2ea5 | [
"MIT"
] | null | null | null | import cv2,os
import numpy as np
from Mtcnndnn import MTCNNDetector
from PIL import Image
import time
#from MtcnnDetector import MTCNNDetector
colors = [[255,0,0],[0,255,0],[0,0,255]]
objectPoints = np.array([[2.37427, 110.322, 21.7776],
[70.0602, 109.898, 20.8234],
[36.8301, 78.3185, 52.0345],
[14.8498, 51.0115, 30.2378],
[58.1825, 51.0115, 29.6224]])
def drawDetection(img, boxes, points):
for i in range(boxes.shape[0]):
box = boxes[i]
box = [ int(b) for b in box]
cv2.rectangle(img,(box[0],box[1]),(box[2],box[3]),(0,255,0),2)
for j in range(5):
cv2.circle(img,(int(points[j,i]),int(points[j+5,i])),2,(0,0,255), -1)
def estimateHeadPose(img, points):
if len(points) == 0 or points.shape[1] == 0:
return
height, width, _ = img.shape
focal_length = (width+height)/2
camera_matrix = np.zeros([3,3], dtype = np.float32)
camera_matrix[0,:] = [focal_length, 0, width/2]
camera_matrix[1,:] = [ 0, focal_length, height/2]
camera_matrix[2,:] = [0, 0, 1]
dist_coeffs = np.zeros([5,1], np.float32)
imagePoints = []
for i in range(5):
imagePoints.append([points[i],points[i+5]])
imagePoints = np.array(imagePoints,dtype=np.float32).reshape(-1,2)
ret, rVec, tVec = cv2.solvePnP(objectPoints, imagePoints, camera_matrix, dist_coeffs, flags = cv2.SOLVEPNP_EPNP)
if ret:
# draw 3d axis
axislength = 40
mp2 = objectPoints[2]
axis3d = np.array([[mp2[0],mp2[1],mp2[2]],
[mp2[0]+axislength,mp2[1],mp2[2]],
[mp2[0],mp2[1]+axislength,mp2[2]],
[mp2[0],mp2[1],mp2[2]+axislength]],dtype = np.float32)
axis2d, _ = cv2.projectPoints(axis3d,rVec,tVec,camera_matrix, dist_coeffs)
axis2d = np.int32(axis2d).reshape(-1,2)
pt0 = axis2d[0]
for i in range(3):
ptaxis = axis2d[i+1]
cv2.line(img, tuple(pt0),tuple(ptaxis),colors[i],3)
R = cv2.Rodrigues(rVec)[0]
T = np.hstack((R,tVec))
roll, pitch, yaw = cv2.decomposeProjectionMatrix(T, camera_matrix)[-1]
eulers= [roll, pitch, yaw]
for i in range(3):
cv2.putText(img, str(eulers[i]),(0,40+20*i),3,1,colors[i])
return eulers
def test_dir(detector, dir = r"/face_recognition/photo/dji"):
files = os.listdir(dir)
for file in files:
imgpath = dir + "/" + file
img = cv2.imread(imgpath)
if img is None:
continue
start=time.time()
boxes, points = detector.detect(img,100)
end=time.time()
print(end-start)
#estimateHeadPose(img, points)
drawDetection(img, boxes, points)
image = Image.fromarray(cv2.cvtColor(img,cv2.COLOR_BGR2RGB))
print("---------opencv")
image.save(dir + "/tmp/" + file)
def test_camera(detector):
cap = cv2.VideoCapture(0)
while True:
ret, img = cap.read()
if not ret:
break
boxes, points= detector.detect(img)
estimateHeadPose(img, points)
drawDetection(img, boxes, points)
image = Image.fromarray(cv2.cvtColor(showimg,cv2.COLOR_BGR2RGB))
print("---------opencv")
image.show()
if __name__ == '__main__':
detector = MTCNNDetector()
test_dir(detector)
#test_camera(detector) | 36.726316 | 116 | 0.567211 | 471 | 3,489 | 4.135881 | 0.322718 | 0.043121 | 0.01232 | 0.022587 | 0.189425 | 0.140657 | 0.084189 | 0.084189 | 0.084189 | 0.084189 | 0 | 0.092241 | 0.27601 | 3,489 | 95 | 117 | 36.726316 | 0.678939 | 0.029235 | 0 | 0.072289 | 0 | 0 | 0.020981 | 0.007979 | 0 | 0 | 0 | 0 | 0 | 1 | 0.048193 | false | 0 | 0.060241 | 0 | 0.13253 | 0.036145 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
507b63d82b4670d671cb5139218f09dd1f1dce79 | 4,983 | py | Python | finetuner/tuner/callback/early_stopping.py | rizwandel/finetuner | 7fef9df6b5101d19a4fd710084d54b5be45dc5d5 | [
"Apache-2.0"
] | null | null | null | finetuner/tuner/callback/early_stopping.py | rizwandel/finetuner | 7fef9df6b5101d19a4fd710084d54b5be45dc5d5 | [
"Apache-2.0"
] | 1 | 2022-02-09T23:48:24.000Z | 2022-02-09T23:48:24.000Z | finetuner/tuner/callback/early_stopping.py | rizwandel/finetuner | 7fef9df6b5101d19a4fd710084d54b5be45dc5d5 | [
"Apache-2.0"
] | null | null | null | import logging
from typing import TYPE_CHECKING, Optional
import numpy as np
from .base import BaseCallback
if TYPE_CHECKING:
from ..base import BaseTuner
class EarlyStopping(BaseCallback):
"""
Callback to stop training when a monitored metric has stopped improving.
A `model.fit()` training loop will check at the end of every epoch whether
the monitered metric is no longer improving.
"""
def __init__(
self,
monitor: str = 'val_loss',
mode: str = 'auto',
patience: int = 2,
min_delta: int = 0,
baseline: Optional[float] = None,
verbose: bool = False,
):
"""
:param monitor: if `monitor='loss'` best bodel saved will be according
to the training loss, if `monitor='val_loss'` best model saved will be
according to the validation loss
:param mode: one of {'auto', 'min', 'max'}. the
decision to overwrite the current_value save file is made based on either
the maximization or the minimization of the monitored quantity.
For `val_acc`, this should be `max`, for `val_loss` this should be
`min`, etc. In `auto` mode, the mode is set to `max` if the quantities
monitored are 'acc' or start with 'fmeasure' and are set to `min` for
the rest of the quantities.
:param patience: integer, the number of epochs after which the training is
stopped if there is no improvement.
i.e. if `patience = 2`, if the model doesn't improve for 2 consecutive
epochs the training is stopped.
:param min_delta: Minimum change in the monitored quantity
to qualify as an improvement, i.e. an absolute
change of less than min_delta, will count as no
improvement.
:param baseline: Baseline value for the monitored quantity.
Training will stop if the model doesn't show improvement over the
baseline.
:param verbose: Wheter to log score improvement events
"""
self._logger = logging.getLogger('finetuner.' + self.__class__.__name__)
self._logger.setLevel(logging.INFO if verbose else logging.WARNING)
self._monitor = monitor
self._mode = mode
self._patience = patience
self._min_delta = min_delta
self._baseline = baseline
self._train_losses = []
self._validation_losses = []
self._epoch_counter = 0
if mode not in ['auto', 'min', 'max']:
self._logger.warning('mode %s is unknown, ' 'fallback to auto mode.', mode)
mode = 'auto'
if mode == 'min':
self._monitor_op = np.less
self._best = np.Inf
elif mode == 'max':
self._monitor_op = np.greater
self._best = -np.Inf
else:
if 'acc' in self._monitor: # to adjust other metrics are added
self._monitor_op = np.greater
self._best = -np.Inf
else:
self._monitor_op = np.less
self._best = np.Inf
if self._monitor_op == np.greater:
self._min_delta *= 1
else:
self._min_delta *= -1
def on_epoch_end(self, tuner: 'BaseTuner'):
"""
Called at the end of the training epoch. Checks if the model has improved
or not for a certain metric `monitor`. If the model hasn't improved for
more than `patience` epochs, the training is stopped
"""
self._check(tuner)
self._train_losses = []
self._validation_losses = []
def on_train_batch_end(self, tuner: 'BaseTuner'):
self._train_losses.append(tuner.state.current_loss)
def on_val_batch_end(self, tuner: 'BaseTuner'):
self._validation_losses.append(tuner.state.current_loss)
def _check(self, tuner):
"""
Checks if training should be stopped. If `True`
it stops the training.
"""
current_value = None
if self._baseline is not None:
self._best = self._baseline
if self._monitor == 'val_loss':
current_value = np.mean(self._validation_losses)
elif self._monitor == 'train_loss':
current_value = np.mean(self._train_losses)
else:
self._logger.warning(f'Metric {self._monitor} not available, skipping.')
return
if self._monitor_op(current_value - self._min_delta, self._best):
self._logger.info(f'Model improved from {self._best} to {current_value}')
self._best = current_value
self._epoch_counter = 0
else:
self._epoch_counter += 1
if self._epoch_counter == self._patience:
self._logger.info(
f'Training is stopping, no improvement for {self._patience} epochs'
)
tuner.stop_training = True
| 37.75 | 87 | 0.602248 | 625 | 4,983 | 4.6192 | 0.2736 | 0.045722 | 0.027018 | 0.025979 | 0.192587 | 0.163492 | 0.074125 | 0.049186 | 0.049186 | 0.027018 | 0 | 0.002641 | 0.316075 | 4,983 | 131 | 88 | 38.038168 | 0.844484 | 0.34718 | 0 | 0.25 | 0 | 0 | 0.099057 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.065789 | false | 0 | 0.065789 | 0 | 0.157895 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |