hexsha string | size int64 | ext string | lang string | max_stars_repo_path string | max_stars_repo_name string | max_stars_repo_head_hexsha string | max_stars_repo_licenses list | max_stars_count int64 | max_stars_repo_stars_event_min_datetime string | max_stars_repo_stars_event_max_datetime string | max_issues_repo_path string | max_issues_repo_name string | max_issues_repo_head_hexsha string | max_issues_repo_licenses list | max_issues_count int64 | max_issues_repo_issues_event_min_datetime string | max_issues_repo_issues_event_max_datetime string | max_forks_repo_path string | max_forks_repo_name string | max_forks_repo_head_hexsha string | max_forks_repo_licenses list | max_forks_count int64 | max_forks_repo_forks_event_min_datetime string | max_forks_repo_forks_event_max_datetime string | content string | avg_line_length float64 | max_line_length int64 | alphanum_fraction float64 | qsc_code_num_words_quality_signal int64 | qsc_code_num_chars_quality_signal float64 | qsc_code_mean_word_length_quality_signal float64 | qsc_code_frac_words_unique_quality_signal float64 | qsc_code_frac_chars_top_2grams_quality_signal float64 | qsc_code_frac_chars_top_3grams_quality_signal float64 | qsc_code_frac_chars_top_4grams_quality_signal float64 | qsc_code_frac_chars_dupe_5grams_quality_signal float64 | qsc_code_frac_chars_dupe_6grams_quality_signal float64 | qsc_code_frac_chars_dupe_7grams_quality_signal float64 | qsc_code_frac_chars_dupe_8grams_quality_signal float64 | qsc_code_frac_chars_dupe_9grams_quality_signal float64 | qsc_code_frac_chars_dupe_10grams_quality_signal float64 | qsc_code_frac_chars_replacement_symbols_quality_signal float64 | qsc_code_frac_chars_digital_quality_signal float64 | qsc_code_frac_chars_whitespace_quality_signal float64 | qsc_code_size_file_byte_quality_signal float64 | qsc_code_num_lines_quality_signal float64 | qsc_code_num_chars_line_max_quality_signal float64 | qsc_code_num_chars_line_mean_quality_signal float64 | qsc_code_frac_chars_alphabet_quality_signal float64 | qsc_code_frac_chars_comments_quality_signal float64 | qsc_code_cate_xml_start_quality_signal float64 | qsc_code_frac_lines_dupe_lines_quality_signal float64 | qsc_code_cate_autogen_quality_signal float64 | qsc_code_frac_lines_long_string_quality_signal float64 | qsc_code_frac_chars_string_length_quality_signal float64 | qsc_code_frac_chars_long_word_length_quality_signal float64 | qsc_code_frac_lines_string_concat_quality_signal float64 | qsc_code_cate_encoded_data_quality_signal float64 | qsc_code_frac_chars_hex_words_quality_signal float64 | qsc_code_frac_lines_prompt_comments_quality_signal float64 | qsc_code_frac_lines_assert_quality_signal float64 | qsc_codepython_cate_ast_quality_signal float64 | qsc_codepython_frac_lines_func_ratio_quality_signal float64 | qsc_codepython_cate_var_zero_quality_signal bool | qsc_codepython_frac_lines_pass_quality_signal float64 | qsc_codepython_frac_lines_import_quality_signal float64 | qsc_codepython_frac_lines_simplefunc_quality_signal float64 | qsc_codepython_score_lines_no_logic_quality_signal float64 | qsc_codepython_frac_lines_print_quality_signal float64 | qsc_code_num_words int64 | qsc_code_num_chars int64 | qsc_code_mean_word_length int64 | qsc_code_frac_words_unique null | qsc_code_frac_chars_top_2grams int64 | qsc_code_frac_chars_top_3grams int64 | qsc_code_frac_chars_top_4grams int64 | qsc_code_frac_chars_dupe_5grams int64 | qsc_code_frac_chars_dupe_6grams int64 | qsc_code_frac_chars_dupe_7grams int64 | qsc_code_frac_chars_dupe_8grams int64 | qsc_code_frac_chars_dupe_9grams int64 | qsc_code_frac_chars_dupe_10grams int64 | qsc_code_frac_chars_replacement_symbols int64 | qsc_code_frac_chars_digital int64 | qsc_code_frac_chars_whitespace int64 | qsc_code_size_file_byte int64 | qsc_code_num_lines int64 | qsc_code_num_chars_line_max int64 | qsc_code_num_chars_line_mean int64 | qsc_code_frac_chars_alphabet int64 | qsc_code_frac_chars_comments int64 | qsc_code_cate_xml_start int64 | qsc_code_frac_lines_dupe_lines int64 | qsc_code_cate_autogen int64 | qsc_code_frac_lines_long_string int64 | qsc_code_frac_chars_string_length int64 | qsc_code_frac_chars_long_word_length int64 | qsc_code_frac_lines_string_concat null | qsc_code_cate_encoded_data int64 | qsc_code_frac_chars_hex_words int64 | qsc_code_frac_lines_prompt_comments int64 | qsc_code_frac_lines_assert int64 | qsc_codepython_cate_ast int64 | qsc_codepython_frac_lines_func_ratio int64 | qsc_codepython_cate_var_zero int64 | qsc_codepython_frac_lines_pass int64 | qsc_codepython_frac_lines_import int64 | qsc_codepython_frac_lines_simplefunc int64 | qsc_codepython_score_lines_no_logic int64 | qsc_codepython_frac_lines_print int64 | effective string | hits int64 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
518e296fcd47480555619d28da9b1f803d91b6a5 | 543 | py | Python | test/ep/file_test.py | alanjimenez1/qualibrate-api | d005e35029303ac9dfd8e66cb09c79393a472cad | [
"MIT"
] | null | null | null | test/ep/file_test.py | alanjimenez1/qualibrate-api | d005e35029303ac9dfd8e66cb09c79393a472cad | [
"MIT"
] | null | null | null | test/ep/file_test.py | alanjimenez1/qualibrate-api | d005e35029303ac9dfd8e66cb09c79393a472cad | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
"""
Test module for User endpoint responses
"""
__author__ = "@canimus"
__license__ = "MIT"
__revision__ = "1.0"
import unittest
from test.ep.utils import ApiRequest as http_client
class FileEndPointTest(unittest.TestCase):
'''Files endpoint tests'''
def test_file_upload(self):
"""
Upload a text valid file
"""
file_uploaded = http_client().post(":5000/files", "user_id=1 file@/sw/apps2/qualibrate-api/LICENSE")
self.assertTrue(file_uploaded['mime'] == 'text/plain')
| 25.857143 | 108 | 0.664825 | 67 | 543 | 5.104478 | 0.716418 | 0.05848 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.020455 | 0.189687 | 543 | 20 | 109 | 27.15 | 0.756818 | 0.198895 | 0 | 0 | 0 | 0 | 0.215539 | 0.092732 | 0 | 0 | 0 | 0 | 0.111111 | 1 | 0.111111 | false | 0 | 0.222222 | 0 | 0.444444 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
518f9bd6632a478fd29c87c47bda80684e792457 | 1,286 | py | Python | ZDF/CSCI 6212/QuickSelect.py | zdf0221/leetcode | 66e92e29e6619d138401658fa656f41404ccabe3 | [
"MIT"
] | null | null | null | ZDF/CSCI 6212/QuickSelect.py | zdf0221/leetcode | 66e92e29e6619d138401658fa656f41404ccabe3 | [
"MIT"
] | null | null | null | ZDF/CSCI 6212/QuickSelect.py | zdf0221/leetcode | 66e92e29e6619d138401658fa656f41404ccabe3 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
"""
-------------------------------------------------
File Name: QuickSelect
Description :
Author : zdf
date: 2018/9/26
-------------------------------------------------
Change Activity:
2018/9/26:13:11
-------------------------------------------------
"""
def partition(test, left, right, mid):
tmp = test[mid]
while left < right:
while test[right] > tmp and left < right:
right -= 1
while test[left] < tmp and left < right:
left += 1
if left < right:
test[left], test[right] = test[right], test[left]
right -= 1
left += 1
return mid
def quickselect(test, left, right, k):
if left == right:
return test[left]
mid = (left + right) // 2
mid = partition(test, left, right, mid)
if k == mid:
return test[k]
if k < mid:
return quickselect(test, left, mid - 1, k)
if k > mid:
return quickselect(test, mid + 1, right, k)
if __name__ == "__main__":
test = [1, 4, 2, 3.6, -1, 0, 25, -34, 8, 9, 1, 0]
print("Sorted list:", sorted(test))
for k in range(1,12):
print("The", k, "th smallest number is", quickselect(test, 0, len(test) - 1, k))
| 27.361702 | 88 | 0.452566 | 157 | 1,286 | 3.656051 | 0.324841 | 0.156794 | 0.090592 | 0.062718 | 0.184669 | 0.097561 | 0.097561 | 0 | 0 | 0 | 0 | 0.05157 | 0.306376 | 1,286 | 46 | 89 | 27.956522 | 0.591928 | 0.251944 | 0 | 0.142857 | 0 | 0 | 0.046122 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.071429 | false | 0 | 0 | 0 | 0.25 | 0.071429 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
5194f2f707886fc733209274dbe61c98c74c61c8 | 3,009 | py | Python | main.py | ResByte/torch-gcp-fn | eb343dd144c6a5d828e666db084cb39b70896ff8 | [
"Apache-2.0"
] | 1 | 2020-03-31T21:49:37.000Z | 2020-03-31T21:49:37.000Z | main.py | ResByte/torch-gcp-fn | eb343dd144c6a5d828e666db084cb39b70896ff8 | [
"Apache-2.0"
] | null | null | null | main.py | ResByte/torch-gcp-fn | eb343dd144c6a5d828e666db084cb39b70896ff8 | [
"Apache-2.0"
] | null | null | null | import os
import io
import json
import torch
import torch.nn as nn
import torch.nn.functional as F
from torchvision import models,transforms
from PIL import Image
import time
from flask import jsonify
import logging
logging.basicConfig(level=logging.INFO)
# lazy global
device = None
model = None
imagenet_class_index = None
def img_to_tensor(image_bytes):
"""Converts byte arrya to torch.tensor with transforms
Args:
-----
img: byte
input image as raw bytes
Returns:
--------
img_tensor: torch.Tensor
image as Tensor for using with deep learning model
"""
# transformations for raw image
normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225])
transform = transforms.Compose([
transforms.Resize(255),
transforms.CenterCrop(224),
transforms.ToTensor(),
normalize,
])
img = Image.open(io.BytesIO(image_bytes))
img_tensor = transform(img)
img_tensor = img_tensor.unsqueeze(0)
return img_tensor.to(device)
def get_prediction(image_bytes):
"""perform predictions using model defined globally
Args:
-----
image_bytes:bytes
raw image bytes recieved via POST
Returns:
--------
class_id: int
id defined in imagenet_class_index.json
class_name: str
top predicted category
prob: float
confidence score for prediction
"""
tensor = img_to_tensor(image_bytes=image_bytes)
outputs = F.softmax(model.forward(tensor),dim=1)
prob, y_hat = outputs.max(1)
prob = prob.item()
predicted_idx = str(y_hat.item())
class_id, class_name = imagenet_class_index[predicted_idx]
return class_id, class_name, prob
def handler(request):
"""Entry point for cloud function
Args:
-----
request: Flask.request
contains incoming data via HTTP POST
Return:
-------
inference results as Flask.jsonify object
"""
global device, model, imagenet_class_index
if device is None:
logging.info("device created")
device = torch.device("cuda" if torch.cuda.is_available() else "cpu" )
if model is None:
logging.info("creating resnet18 model")
model = models.resnet18(pretrained=True)
model.eval()
model.to(device)
if imagenet_class_index is None:
logging.info("loading imagenet class names ")
imagenet_class_index = json.load(open('imagenet_class_index.json'))
if request.method=='POST':
logging.info("postrequest received")
file = request.files['file']
img_bytes = file.read()
class_id, class_name, prob = get_prediction(image_bytes=img_bytes)
return jsonify({'class_id': class_id, 'class_name': class_name})
else:
return "Please specify image"
| 27.605505 | 78 | 0.627119 | 365 | 3,009 | 5.030137 | 0.369863 | 0.056645 | 0.068627 | 0.034858 | 0.044662 | 0 | 0 | 0 | 0 | 0 | 0 | 0.017066 | 0.279495 | 3,009 | 109 | 79 | 27.605505 | 0.829797 | 0.25457 | 0 | 0 | 0 | 0 | 0.078695 | 0.011996 | 0 | 0 | 0 | 0 | 0 | 1 | 0.052632 | false | 0 | 0.192982 | 0 | 0.315789 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
519570b06f319b34b830bc38363fe48ded0d8966 | 3,686 | py | Python | ressonantes/core/models/organization/organization.py | ag-castro/brazil-ongs-mapping | 80f7542d437913ad92cd74b6e456760f61be32ad | [
"Unlicense"
] | 1 | 2020-09-07T17:33:42.000Z | 2020-09-07T17:33:42.000Z | ressonantes/core/models/organization/organization.py | ag-castro/brazil-ongs-mapping | 80f7542d437913ad92cd74b6e456760f61be32ad | [
"Unlicense"
] | null | null | null | ressonantes/core/models/organization/organization.py | ag-castro/brazil-ongs-mapping | 80f7542d437913ad92cd74b6e456760f61be32ad | [
"Unlicense"
] | null | null | null | import random
import string
from datetime import date
from django.db import models
from django.core.validators import validate_email
from django.core.validators import validate_slug
from django.core.validators import validate_unicode_slug
from django.contrib.auth import get_user_model
from django.utils.translation import ugettext_lazy as _
from utils.validators.identity.validator import IdentityValidator
User = get_user_model()
class Organization(models.Model):
"""Organization Model definitions"""
owner = models.ForeignKey(
User,
verbose_name=_('Owner'),
related_name="organizations",
on_delete=models.DO_NOTHING
)
cnpj = models.CharField(
verbose_name='CNPJ',
blank=True, null=True,
max_length=30, unique=True,
validators=[IdentityValidator()]
)
name = models.CharField(
unique=True,
verbose_name='Nome da Organização',
max_length=150, blank=False, null=False,
)
intro = models.CharField(
max_length=255, verbose_name='Apresentação',
blank=False, null=False
)
about = models.TextField(
verbose_name='Sobre a Organização',
blank=False, null=False
)
founder = models.CharField(
max_length=150, blank=False, null=False,
verbose_name='Fundador',
)
founded_at = models.IntegerField(
null=False, blank=False, verbose_name='Desde',
help_text='Ano em que a Organização foi fundada.',
choices=[(i, i) for i in range(1900, date.today().year + 1)],
)
causes = models.ManyToManyField(
'core.Cause', blank=True,
verbose_name='Causas',
related_name='organization_causes'
)
address = models.OneToOneField(
'core.Address', blank=True, null=True,
verbose_name='Endereço',
on_delete=models.DO_NOTHING,
)
website = models.URLField(
blank=True, null=True,
verbose_name='Web Site',
help_text='Digite a URL completa do web site.'
)
email = models.EmailField(
blank=True, null=True,
validators=[validate_email],
verbose_name='E-mail para Contatos',
help_text='Digite um e-mail válido.'
)
members = models.ManyToManyField(
'core.Member',
verbose_name='Membros',
blank=True
)
social_networks = models.ManyToManyField(
'core.SocialNetwork',
verbose_name='Membros',
blank=True
)
slug = models.SlugField(
verbose_name='Slug', max_length=60,
help_text='URL de exibição da ONG.',
unique=True, null=False, blank=False,
auto_created=True, allow_unicode=False,
validators=[validate_slug, validate_unicode_slug],
default=''.join(
random.choice(string.ascii_lowercase + string.digits)
for _ in range(60))
)
created_at = models.DateTimeField(
verbose_name='Criado em',
auto_now_add=True,
)
updated_at = models.DateTimeField(
verbose_name='Editado em',
auto_now=True
)
logo = models.ForeignKey(
'core.ImageUploader',
on_delete=models.DO_NOTHING, null=True,
verbose_name='Logomarca da ONG',
related_name='logomarca_ong'
)
cover_image = models.ForeignKey(
'core.ImageUploader',
on_delete=models.DO_NOTHING, null=True,
verbose_name='Imagem da Capa',
related_name='coverimage_ong'
)
# Date
created_at = models.DateTimeField(
verbose_name='Criado em', auto_now_add=True,
)
updated_at = models.DateTimeField(
verbose_name='Editado em', auto_now=True
)
| 30.213115 | 69 | 0.645958 | 420 | 3,686 | 5.488095 | 0.330952 | 0.095445 | 0.039046 | 0.027766 | 0.313232 | 0.269848 | 0.196095 | 0.169197 | 0.169197 | 0.169197 | 0 | 0.00726 | 0.252577 | 3,686 | 121 | 70 | 30.46281 | 0.829401 | 0.009767 | 0 | 0.168142 | 0 | 0 | 0.127333 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.088496 | 0 | 0.274336 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
5196b7f30facdbb359a3288ac6d9df2fd67191b7 | 988 | py | Python | source/svlib/tests/conftest.py | DomBalint/SecureVision | 311e4744fdc8513079c2e7dcfbc2316035f92793 | [
"MIT"
] | null | null | null | source/svlib/tests/conftest.py | DomBalint/SecureVision | 311e4744fdc8513079c2e7dcfbc2316035f92793 | [
"MIT"
] | null | null | null | source/svlib/tests/conftest.py | DomBalint/SecureVision | 311e4744fdc8513079c2e7dcfbc2316035f92793 | [
"MIT"
] | null | null | null | import os
import pytest
files = ['test1.yaml', 'test2.yaml', 'test3.yaml']
@pytest.fixture(scope='module')
def desired_config_files():
return [os.path.join(os.getcwd(), 'data_test', file) for file in files]
params_configs = [
{
'mail_server': {
'host': 'example11.securevison.intra.net',
'port': '28031'
}
},
{
'kafka': {
'bootstrap_servers': 'example22.securevision.intra.net'
}
},
{
'sql_alchemy': {
'host': 'example33.securevision.intra.net',
'port': '27017',
'user': 'test',
'password': 'test'
}
}
]
params_files_configs = list(tuple(zip(files, params_configs)))
@pytest.fixture(scope='module', params=params_files_configs)
def desired_configs(request):
return request.param
@pytest.fixture(scope='module')
def whole_config():
wc = {}
for conf in params_configs:
wc.update(conf)
return wc
| 20.163265 | 75 | 0.574899 | 105 | 988 | 5.266667 | 0.514286 | 0.070524 | 0.097649 | 0.130199 | 0.097649 | 0 | 0 | 0 | 0 | 0 | 0 | 0.026499 | 0.274292 | 988 | 48 | 76 | 20.583333 | 0.74477 | 0 | 0 | 0.054054 | 0 | 0 | 0.244939 | 0.096154 | 0 | 0 | 0 | 0 | 0 | 1 | 0.081081 | false | 0.027027 | 0.054054 | 0.054054 | 0.216216 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
5196f9c7d01c0adec8869c697882fc216857847d | 643 | py | Python | app.py | miccarrer/copbot | 44c549afd6d80ae516c8615d6b18518b745f9020 | [
"MIT"
] | null | null | null | app.py | miccarrer/copbot | 44c549afd6d80ae516c8615d6b18518b745f9020 | [
"MIT"
] | null | null | null | app.py | miccarrer/copbot | 44c549afd6d80ae516c8615d6b18518b745f9020 | [
"MIT"
] | null | null | null | import logging
from src.config import get_env_var, get_yaml_file
from src.discord.bot import DiscordBot
from src.service import AppService
if __name__ == '__main__':
base_config = get_yaml_file('base')
logging.basicConfig(**base_config['logging'])
try:
logging.info('Starting Bot')
discord_config = get_yaml_file('discord')
token = get_env_var('DISCORD_TOKEN')
DiscordBot(discord_config, AppService()).run(token)
except KeyboardInterrupt:
logging.info('Stop keys detected')
except Exception as err:
logging.exception(err)
finally:
logging.info('Stopping Bot')
| 29.227273 | 59 | 0.698289 | 79 | 643 | 5.392405 | 0.443038 | 0.049296 | 0.077465 | 0.079812 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.202177 | 643 | 21 | 60 | 30.619048 | 0.830409 | 0 | 0 | 0 | 0 | 0 | 0.125972 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.222222 | 0 | 0.222222 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
51992a037bb72c62719a2b885a904c938dc3ece2 | 9,821 | py | Python | main.py | zypangpang/example_of_lightgbm | c4862253cbb4233f1143c21608f3dd6ff9f59d78 | [
"MIT"
] | null | null | null | main.py | zypangpang/example_of_lightgbm | c4862253cbb4233f1143c21608f3dd6ff9f59d78 | [
"MIT"
] | null | null | null | main.py | zypangpang/example_of_lightgbm | c4862253cbb4233f1143c21608f3dd6ff9f59d78 | [
"MIT"
] | 1 | 2021-10-22T02:36:10.000Z | 2021-10-22T02:36:10.000Z | import numpy as np
import pandas as pd
import lightgbm as lgb
from pathlib import Path
from functools import reduce
from sklearn.metrics import roc_auc_score
import hyperopt
from hyperopt import STATUS_OK, Trials, hp, space_eval, tpe
import utils, feature_selector, preprocess
global_params = {
'CK_var_threshold': 1,
'NASA_var_threshold': 30,
'na_threshold': 0.5,
'col_threshold': 0.98,
'remove_collinear_threshold': 700,
'lgb_num_round': 200,
'lgb_early_stop_rounds': 100,
'hyperopt_rounds': 50,
'model_num': 15,
'hyperopt_per_mode': False,
'remove_non_label_cols': True,
'last_model_weight': 8,
'global_metric':'auc'
}
def train_val_split(X_train, y_train, random_seed):
y_val = pd.concat([y_train.loc[lambda x: x == -1].sample(frac=0.3, random_state=random_seed),
y_train.loc[lambda x: x == 1].sample(frac=0.3, random_state=random_seed)])
y_trn = y_train.drop(y_val.index)
X_trn = X_train.loc[y_trn.index, :]
X_val = X_train.loc[y_val.index, :]
return X_trn, y_trn, X_val, y_val
def train_lightgbm(params, X_trn, y_trn, X_val, y_val): # , test_data, test_label):
train_data = lgb.Dataset(X_trn, label=y_trn)
val_data = lgb.Dataset(X_val, label=y_val)
model = lgb.train(params, train_data, global_params['lgb_num_round'], val_data,
early_stopping_rounds=global_params['lgb_early_stop_rounds'],
verbose_eval=100)
return model
def hyperopt_lightgbm(X_train: pd.DataFrame, y_train: pd.Series, X_val, y_val):
## fixed lightgbm params
params = {
"objective": "binary",
"metric": global_params['global_metric'],
"verbosity": -1,
"seed": 1,
"num_threads": 4,
"feature_fraction": .6,
"bagging_fraction": 0.8,
"bagging_freq": 5,
"reg_alpha": 0.1,
"reg_lambda": 0.1,
# "learning_rate": 0.1,
# "num_leaves": 32
}
## space for var_threshold search
space1 = hp.choice('var_threshold', np.linspace(0, 20, 15, dtype=int))
## space for lightgbm hyperparam search
space = {
"learning_rate": hp.loguniform("learning_rate", np.log(0.01), np.log(0.2)),
# "max_depth": hp.choice("max_depth", [-1, 2, 3, 4, 5, 6]),
"num_leaves": hp.choice("num_leaves", np.linspace(16, 64, 4, dtype=int)),
# "feature_fraction": hp.quniform("feature_fraction", 0.5, 1.0, 0.1),
# "bagging_fraction": hp.quniform("bagging_fraction", 0.5, 1.0, 0.1),
# "bagging_freq": hp.choice("bagging_freq", np.linspace(0, 10, 1, dtype=int)),
# "reg_alpha": hp.uniform("reg_alpha", 0, 2),
# "reg_lambda": hp.uniform("reg_lambda", 0, 2),
# "min_child_weight": hp.uniform('min_child_weight', 0.5, 10),
# "scale_pos_weight": hp.uniform('x', 0, 5),
}
var_series = X_train.var()
def objective(hyperparams):
# X_trn=X_train.loc[:,var_series>hyperparams]
# X_trn = X_train
# X_trn, y_trn, X_val, y_val = train_val_split(X_trn, y_train,random_seed)
model = train_lightgbm({**params, **hyperparams}, X_train, y_train, X_val, y_val)
# model=train_lightgbm(params,X_trn,y_trn,X_val,y_val)
score = model.best_score["valid_0"][global_params['global_metric']]
to_drop = X_train.columns[model.feature_importance('gain') == 0]
print(f'to drop:{len(to_drop)}')
# in classification, less is better
return {'loss': -score, 'status': STATUS_OK, "drop_feature": to_drop, "best_iter": model.best_iteration}
trials = Trials()
best = hyperopt.fmin(fn=objective, space=space, trials=trials,
algo=tpe.suggest, max_evals=global_params['hyperopt_rounds'], verbose=1,
rstate=np.random.RandomState(1))
hyperparams = space_eval(space, best)
print(f"hyperopt auc = {-trials.best_trial['result']['loss']:0.4f} {hyperparams}")
drop_feature = \
reduce(lambda r1, r2: {'drop_feature': r1['drop_feature'].union(r2['drop_feature'])}, trials.results)[
'drop_feature']
print(f'drop features:{len(drop_feature)}')
return {**params, **hyperparams}, drop_feature, trials.best_trial['result']['best_iter']
def train_all_data(hyperparams, best_num_round, X_train, y_train):
train_data = lgb.Dataset(X_train, label=y_train)
model = lgb.train(hyperparams, train_data, best_num_round, verbose_eval=100)
return model
def lightgbm_predict(models, X_test, y_test):
res_dict = {}
for index, model in enumerate(models):
ypred = model.predict(X_test)
res_dict[f'model_{index}'] = ypred
print(f'model_{index} predict finished')
res_df = pd.DataFrame(res_dict)
res_df.iloc[:, -1] = res_df.iloc[:, -1]*global_params['last_model_weight']
return roc_auc_score(y_test, res_df.mean(axis=1))
def test_dataset(dataset_name, train_file_path, test_file_path):
df = pd.read_csv(str(train_file_path))
df = preprocess.process_extra_label(df, global_params['remove_non_label_cols'])
print('finish reading train data')
df_test = pd.read_csv(str(test_file_path))
df_test = preprocess.process_extra_label(df_test, global_params['remove_non_label_cols'])
print('finish reading test data')
df_test_data = df_test.drop(columns=['l'])
df_test_label = df_test['l']
# df.isnull().sum().any()
X_train = df.drop(columns=['l'])
y_train = df['l']
y_trn = y_train
X_trn = feature_selector.remove_many_na_col(X_train, global_params['na_threshold'])
print('finish remove na cols')
X_trn = feature_selector.remove_single_unique(X_trn)
print('finish remove single unique cols')
X_trn = feature_selector.remove_small_variance(X_trn, global_params[f'{dataset_name}_var_threshold'])
print('finish remove small var cols')
if len(X_trn.index) < global_params['remove_collinear_threshold']:
X_trn = feature_selector.remove_collinear_col(X_trn, global_params['col_threshold'])
print('finish remove collinear cols')
X_trn, y_trn, X_val, y_val = train_val_split(X_trn, y_trn, 0)
print('finish split data')
hyperparams, drop_features, best_num_round = hyperopt_lightgbm(X_trn, y_trn, X_val, y_val)
print(f'drop_features: {drop_features}')
X_trn = X_trn.drop(columns=drop_features)
X_val = X_val.drop(columns=drop_features)
to_drop=feature_importance_iter(hyperparams,X_trn,y_trn,X_val,y_val)
X_trn=X_trn.drop(columns=to_drop)
print(f'X_trn columns:{X_trn.columns}')
print(f'X_trn columns:{len(X_trn.columns)}')
models, num_round_list = train_multiple_models(hyperparams, best_num_round, X_trn, y_trn)
num_round_list.append(best_num_round)
final_model = train_all_data(hyperparams, int(np.mean(num_round_list)), X_trn, y_trn)
#final_model = train_all_data(hyperparams, global_params['lgb_num_round'], X_trn, y_trn)
print("train all data finished")
models.append(final_model)
return lightgbm_predict(models, df_test_data.loc[:, X_trn.columns], df_test_label)
def train_multiple_models(hyperparams, num_rounds, X_train, y_train):
models = []
num_round_list = []
for i in range(0, global_params['model_num'] - 1):
X_trn, y_trn, X_val, y_val = train_val_split(X_train, y_train, i)
if global_params['hyperopt_per_mode']:
hyperparams, drop_features, best_num_round = hyperopt_lightgbm(X_trn, y_trn, X_val, y_val)
model = train_lightgbm(hyperparams, X_trn, y_trn, X_val, y_val)
num_round_list.append(model.best_iteration)
models.append(model)
print(f"Train model_{i} finished")
return models, num_round_list
def feature_importance_iter(hyperparams,X_trn,y_trn,X_val,y_val):
#X_trn,y_trn,X_val,y_val=train_val_split(X_train,y_train,0)
model=train_lightgbm(hyperparams, X_trn,y_trn,X_val,y_val)
best_score = model.best_score["valid_0"][global_params['global_metric']]
importance_df=pd.DataFrame()
importance_df['feature']=X_trn.columns
importance_df['importance']=model.feature_importance('gain')
importance_df=importance_df.sort_values('importance')
to_drop=[]
for row in importance_df.iterrows():
X_trn=X_trn.drop(columns=[row[1]['feature']])
X_val=X_val.drop(columns=[row[1]['feature']])
model=train_lightgbm(hyperparams, X_trn,y_trn,X_val,y_val)
score = model.best_score["valid_0"][global_params['global_metric']]
if score>=best_score:
to_drop.append(row[1]['feature'])
best_score=score
else:
break
print(f'best_score: {best_score}')
print(f'to_drop_imp_features: {to_drop}')
return to_drop
def run_on_data(ds_name, percentage):
train_path = Path(f'./data_split/{ds_name}/{ds_name}Train/{percentage}')
test_path = Path(f'./data_split/{ds_name}/{ds_name}Test/{percentage}')
aucs = {}
for train_file_path in train_path.iterdir():
test_file_path = test_path / train_file_path.name.replace('train', 'test')
aucs[train_file_path.name] = test_dataset(ds_name, train_file_path, test_file_path)
print(aucs)
utils.write_to_file(aucs, Path(f'./results/{ds_name}_{percentage}_results.json'))
@utils.debug_wrapper
def main():
ds_name = 'CK'
per = '30'
train_path = Path(f'./data_split/{ds_name}/{ds_name}Train/{per}')
test_path = Path(f'./data_split/{ds_name}/{ds_name}Test/{per}')
train_file_path = train_path / 'alltrain.csv'
test_file_path = test_path / 'alltest.csv'
auc = test_dataset(ds_name, train_file_path, test_file_path)
print(f'auc: {auc}')
#main()
run_on_data('CK',10)
run_on_data('CK',20)
run_on_data('CK',30)
run_on_data('NASA',30)
run_on_data('NASA',20)
run_on_data('NASA',10)
| 38.363281 | 112 | 0.68099 | 1,496 | 9,821 | 4.131684 | 0.149733 | 0.027827 | 0.01537 | 0.023297 | 0.327132 | 0.260961 | 0.21194 | 0.2011 | 0.191717 | 0.176185 | 0 | 0.016555 | 0.181957 | 9,821 | 255 | 113 | 38.513725 | 0.752801 | 0.103248 | 0 | 0.049451 | 0 | 0 | 0.188404 | 0.063447 | 0 | 0 | 0 | 0 | 0 | 1 | 0.06044 | false | 0 | 0.093407 | 0 | 0.203297 | 0.10989 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
519bee9e5276c118445215e895dd293662cda568 | 2,693 | py | Python | heic-conv.py | shihaocao/heic-converter | 5a6b068dfbf69c982397d20b69d9c67e260eee82 | [
"MIT"
] | null | null | null | heic-conv.py | shihaocao/heic-converter | 5a6b068dfbf69c982397d20b69d9c67e260eee82 | [
"MIT"
] | 1 | 2021-12-24T07:36:25.000Z | 2021-12-25T23:17:20.000Z | heic-conv.py | shihaocao/heic-converter | 5a6b068dfbf69c982397d20b69d9c67e260eee82 | [
"MIT"
] | 1 | 2021-12-24T01:06:11.000Z | 2021-12-24T01:06:11.000Z | '''A python script for batch converting heic files.'''
import argparse
import os
import subprocess
import time
CMD = "heif-convert"
QUALITY = "-q"
QUALITY_ARG = "99"
AUX_FILE_SUFFIX = "-urn:com:apple:photo:2020:aux:hdrgainmap"
def parse_args() -> argparse.Namespace:
'''Get the args from the command line'''
parser = argparse.ArgumentParser()
parser.add_argument("--daux", action="store_true", help="Delete aux files")
parser.add_argument("--dorig", action="store_true", help="Delete original files")
parser.add_argument("--mt", action="store_true", help="Run in multi-threaded mode")
parser.add_argument("-workers", type=int, default=4, help="Number of workers to use in multi-threaded mode")
args = parser.parse_args()
return args
def find_all_heics() -> None:
'''Find all heic files in the current directory, regardless of capitalization'''
all_files = os.listdir('.')
heics = [x for x in all_files if x.endswith('.heic') or x.endswith('.HEIC')]
return heics
def convert_and_delete(file_name: str, delete_aux: bool = False, delete_orig:bool = False) -> None:
'''On the original file name, call heif-convert, and delete the aux and original files'''
base_file = file_name[:-5]
original_file_extension = file_name[-5:]
# create the new file
new_file = base_file + ".JPG"
args = [CMD, QUALITY, QUALITY_ARG, file_name, new_file]
popen = subprocess.Popen(args, stdout=subprocess.PIPE)
popen.wait()
output = popen.stdout.read()
string_output = output.decode("utf-8")
print(string_output)
if delete_aux:
# delete the auxilary file
aux_file_name = base_file + AUX_FILE_SUFFIX + ".JPG"
os.remove(aux_file_name)
print(f"Deleted aux file: {aux_file_name}")
if delete_orig:
# delete the original file
orig_file_name = base_file + original_file_extension
os.remove(orig_file_name)
print(f"Deleted original file: {orig_file_name}\n")
if __name__ == "__main__":
start_time = time.time()
args = parse_args()
heics = find_all_heics()
print(f'Found {len(heics)} heic files in this directory.')
if args.mt:
print("Running in multi-threaded mode.")
from concurrent.futures import ThreadPoolExecutor
with ThreadPoolExecutor(max_workers=args.workers) as executor:
for heic in heics:
executor.submit(convert_and_delete, heic, args.daux, args.dorig)
else:
for file in heics:
convert_and_delete(file, args.daux, args.dorig)
end_time = time.time()
print(f'Converted {len(heics)} heic files in {end_time - start_time} seconds.') | 34.525641 | 112 | 0.678054 | 375 | 2,693 | 4.674667 | 0.338667 | 0.0502 | 0.038791 | 0.032516 | 0.10154 | 0 | 0 | 0 | 0 | 0 | 0 | 0.004671 | 0.204976 | 2,693 | 78 | 113 | 34.525641 | 0.814106 | 0.116227 | 0 | 0 | 0 | 0 | 0.201442 | 0.016964 | 0 | 0 | 0 | 0 | 0 | 1 | 0.055556 | false | 0 | 0.092593 | 0 | 0.185185 | 0.111111 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
519d2e3dfdf9c558c8b3e16c368955fda216a00c | 1,251 | py | Python | generate_images.py | rwg1234/sticks | 4d1398c0a659bbc1b4c9e44ae8ea5a5429a96c0f | [
"MIT"
] | null | null | null | generate_images.py | rwg1234/sticks | 4d1398c0a659bbc1b4c9e44ae8ea5a5429a96c0f | [
"MIT"
] | null | null | null | generate_images.py | rwg1234/sticks | 4d1398c0a659bbc1b4c9e44ae8ea5a5429a96c0f | [
"MIT"
] | null | null | null | # Script to create www/assets/sprites/{box,stack}_*.png
# This file is NOT a part of the www/ folder and DOES NOT need to be deployed. It is used for development only.
from PIL import Image
SOURCE_DIRECTORY = "www/assets/matches gens_1-12"
DEST_DIRECTORY = "www/assets/sprites"
for i in range(1, 13):
original_filename = SOURCE_DIRECTORY + f"/{i}MatchInside.png"
im = Image.open(original_filename)
# save the image for the box
(left, upper, right, lower) = (486, 272, 1920, 1356)
box = im.crop((left, upper, right, lower)) # we crop the image because the originals have a lot of transparent space around the box
new_box_filename = DEST_DIRECTORY + f"/box_{i}.png"
box.save(new_box_filename)
# now we construct the image for the stack, by stacking `i` copies of the box on top of each other
box_height = 140 # how much higher is one box than the previous
stack_height = box.height + box_height * (i - 1)
stack = Image.new('RGBA', (box.width, stack_height), (0, 0, 0, 0))
for j in range(i):
y = stack_height - box.height - (box_height * j)
stack.alpha_composite(box, (0, y))
new_stack_filename = DEST_DIRECTORY + f"/stack_{i}.png"
stack.save(new_stack_filename)
print("DONE") | 43.137931 | 135 | 0.692246 | 206 | 1,251 | 4.07767 | 0.456311 | 0.053571 | 0.071429 | 0.033333 | 0.069048 | 0.069048 | 0 | 0 | 0 | 0 | 0 | 0.028971 | 0.19984 | 1,251 | 29 | 136 | 43.137931 | 0.81019 | 0.334932 | 0 | 0 | 0 | 0 | 0.119855 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.052632 | 0 | 0.052632 | 0.052632 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
519e717718802250addcc03ae24b702964950e74 | 15,237 | py | Python | api/backend/app/namespace/api.py | radsec/ottr | 411559a2bac307594c92d4d14667143cd04625ff | [
"Apache-2.0"
] | 207 | 2021-10-29T20:35:04.000Z | 2022-03-02T08:04:06.000Z | api/backend/app/namespace/api.py | wngn123/ottr | 411559a2bac307594c92d4d14667143cd04625ff | [
"Apache-2.0"
] | 3 | 2021-11-05T05:50:57.000Z | 2022-01-03T06:07:18.000Z | api/backend/app/namespace/api.py | wngn123/ottr | 411559a2bac307594c92d4d14667143cd04625ff | [
"Apache-2.0"
] | 19 | 2021-11-03T06:34:46.000Z | 2022-03-21T14:06:54.000Z | """
Copyright 2021-present Airbnb, Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import os
import dateutil
import tldextract
from datetime import datetime, timedelta
from flask import abort, request
from flask_restx import Resource
from backend.app.namespace.authorization import validate_token_header
from backend.app.shared.client import query_acme_challenge_records
from backend.app.shared.network import Device
from backend.app.shared import client
from backend.app.models import (api_namespace, authentication_parser,
asset_output, asset_input,
developer_permissions,
privileged_permissions,
admin_permissions, PUBLIC_KEY)
# HTTP Status Codes: https://docs.python.org/3/library/http.html
CONF_ROUTE_FILE = os.path.join(
os.path.dirname(__file__), '../config/route.json')
dynamodb_client = client.DynamoDBClient(
region_name=os.environ['AWS_DEFAULT_REGION'], table_name=os.environ['TABLE'])
def authentication_header_parser(value, secret):
data = validate_token_header(value, secret)
if data is None:
abort(401)
return data
def filter(primary_index, secondary_index):
system_names = {d['system_name'] for d in primary_index}
output = [x for x in secondary_index if x['system_name'] in system_names]
return output
def query_expired_certificates(days_until_expiration):
response = dynamodb_client.scan_table()
data = response['Items']
expired_certificates = list()
days_until_expiration = int(days_until_expiration)
for host in data:
if host['certificate_expiration'] != 'None':
expiration_calculation = (dateutil.parser.parse(
host['certificate_expiration']) - timedelta(days=days_until_expiration)).isoformat()
if datetime.utcnow().isoformat() > expiration_calculation:
expired_certificates.append(host)
return expired_certificates
@api_namespace.route('/v1/search', methods=['GET'])
class Search(Resource):
@api_namespace.expect(authentication_parser)
@api_namespace.doc('search', responses={403: 'Invalid Role'}, params={'host_platform': {'description': 'Host OS Platform', 'in': 'query', 'type': 'str', 'required': False}, 'data_center': {'description': 'Data Center', 'in': 'query', 'type': 'str', 'required': False}, 'ip_address': {'description': 'IPv4 Address: [10.0.0.1]', 'in': 'query', 'type': 'str', 'required': False}, 'system_name': {'description': 'System Name: [subdomain.example.com]', 'in': 'query', 'type': 'str', 'required': False}, 'days_until_expiration': {'description': 'Number of Days (i.e. 30) Or Less Days Until Certificate Expires', 'in': 'query', 'type': 'int', 'required': False}, 'origin': {'description': 'Source of Asset: [API]', 'in': 'query', 'type': 'str', 'required': False}}, description='Search Asset Inventory')
@api_namespace.response(model=asset_output, code=200, description='Success', as_list=True)
def get(self):
args = authentication_parser.parse_args()
role = authentication_header_parser(
args['Authorization'], PUBLIC_KEY)
try:
if role in developer_permissions:
ip_address = request.args.get('ip_address')
system_name = request.args.get('system_name')
data_center = request.args.get('data_center')
host_platform = request.args.get('host_platform')
days_until_expiration = request.args.get(
'days_until_expiration')
origin = request.args.get('origin')
conversion = {
'ip_address': ip_address,
'system_name': system_name,
'data_center': data_center,
'host_platform': host_platform,
'origin': origin
}
query = ['ip_address', 'system_name',
'data_center', 'host_platform', 'origin']
unique_list_output = None
# Scan Table for Expiration
if days_until_expiration is not None:
unique_list_output = query_expired_certificates(
days_until_expiration)
# No Certificate Expiration Match
if len(unique_list_output) == 0:
return unique_list_output
# Query Table Based on Global Secondary Indexes
for elem in query:
if conversion[elem] is not None and unique_list_output is None:
unique_list_output = dynamodb_client.query_index(
'{}_index'.format(elem), elem, conversion[elem])['Items']
elif conversion[elem] is not None:
query_output = dynamodb_client.query_index(
'{}_index'.format(elem), elem, conversion[elem])['Items']
unique_list_output = filter(
query_output, unique_list_output)
else:
pass
return unique_list_output
else:
return {'Invalid Permissions': '{} Role Invalid'.format(role)}, 500
except Exception as error:
api_namespace.abort(
500, error.__doc__, statusCode='500')
@api_namespace.route('/v1/assets', methods=['POST', 'PUT'])
class Assets(Resource):
@api_namespace.doc('create_asset', responses={403: 'Invalid Role', 500: 'Input Validation Error'}, description='Add Device to Asset Inventory')
@api_namespace.response(model=asset_input, code=201, description='Success')
@api_namespace.expect(asset_input, authentication_parser)
def post(self):
args = authentication_parser.parse_args()
role = authentication_header_parser(
args['Authorization'], PUBLIC_KEY)
if role in privileged_permissions:
json_data = request.json
system_name = json_data.get('system_name')
common_name = json_data.get('common_name')
certificate_authority = json_data.get('certificate_authority')
data_center = json_data.get('data_center')
device_model = json_data.get('device_model')
host_platform = json_data.get('host_platform')
ip_address = json_data.get('ip_address')
os_version = json_data.get('os_version')
subject_alternative_name = json_data.get(
'subject_alternative_name')
if not subject_alternative_name:
subject_alternative_name = [common_name]
host = Device(
ip_address=ip_address, system_name=system_name, common_name=common_name, certificate_authority=certificate_authority, host_platform=host_platform, os_version=os_version, data_center=data_center, device_model=device_model, subject_alternative_name=subject_alternative_name, origin='API')
device = dynamodb_client.query_primary_key(
system_name).get('Items')
if bool(device):
api_namespace.abort(500, status="Device Exists: {device}".format(
device=system_name), statusCode='500')
else:
dynamodb_client.create_item(host)
return api_namespace.marshal(host, asset_input), 201
else:
return {'Invalid Permissions': '{} Role Invalid'.format(role)}, 500
@api_namespace.doc('update_asset', responses={500: 'Invalid Role', 500: 'Input Validation Error'}, description='Update Device in Asset Inventory')
@api_namespace.response(model=asset_input, code=200, description='Success')
@api_namespace.expect(asset_input, authentication_parser)
def put(self):
args = authentication_parser.parse_args()
role = authentication_header_parser(
args['Authorization'], PUBLIC_KEY)
if role in privileged_permissions:
json_data = request.json
system_name = json_data.get('system_name')
common_name = json_data.get('common_name')
certificate_authority = json_data.get('certificate_authority')
data_center = json_data.get('data_center')
device_model = json_data.get('device_model')
host_platform = json_data.get('host_platform')
ip_address = json_data.get('ip_address')
os_version = json_data.get('os_version')
subject_alternative_name = json_data.get(
'subject_alternative_name')
if not subject_alternative_name:
subject_alternative_name = [common_name]
host = Device(
ip_address=ip_address, system_name=system_name, common_name=common_name, certificate_authority=certificate_authority, host_platform=host_platform, os_version=os_version, data_center=data_center, device_model=device_model, subject_alternative_name=subject_alternative_name, origin='API')
device = dynamodb_client.query_primary_key(
system_name).get('Items')
if bool(device):
dynamodb_client.update_item(host)
return api_namespace.marshal(host, asset_input), 200
else:
api_namespace.abort(500, status="Device Does Not Exist: {device}".format(
device=system_name), statusCode='500')
else:
return {'Invalid Permissions': '{} Role Invalid'.format(role)}, 500
@api_namespace.route('/v1/assets/delete/<string:system_name>', methods=['DELETE'])
class DeleteAsset(Resource):
@api_namespace.expect(authentication_parser)
@api_namespace.doc('delete_asset', responses={204: 'Success', 200: 'Invalid Host', 500: 'Invalid Role'}, description='Delete Device in Asset Inventory')
def delete(self, system_name):
args = authentication_parser.parse_args()
role = authentication_header_parser(
args['Authorization'], PUBLIC_KEY)
if role in privileged_permissions:
device = dynamodb_client.query_primary_key(
system_name).get('Items')
if not bool(device):
return {'Invalid Host': '{}'.format(system_name)}, 200
response = dynamodb_client.delete_item(system_name)
if response['ResponseMetadata']['HTTPStatusCode'] == 200:
return '', 204
else:
return {'Invalid Permissions': '{} Role Invalid'.format(role)}, 500
@api_namespace.route('/v1/certificate/rotate/<string:system_name>', methods=['POST'])
class RotateExpiredCertificate(Resource):
@api_namespace.expect(authentication_parser)
@api_namespace.doc('rotate_expired_certificate', responses={200: 'Invalid Host', 204: 'Success', 403: 'Invalid Role'}, description='Rotate Certificate for Device')
def post(self, system_name):
args = authentication_parser.parse_args()
role = authentication_header_parser(
args['Authorization'], PUBLIC_KEY)
if role in privileged_permissions:
query = dynamodb_client.query_primary_key(system_name)
if not query['Items']:
return {'Invalid Host': '{}'.format(system_name)}, 200
else:
device = query.get('Items')[0]
common_name = device.get('common_name')
# Route53 DNS Mapping
output = tldextract.extract(common_name)
domain = output.domain + '.' + output.suffix
subdomain = output.subdomain
if not query_acme_challenge_records(domain, subdomain):
return {'Route53 Error': 'DNS CNAME Record Not Found for {}'.format(common_name)}, 200
client.start_execution(device)
return '', 204
else:
return {'Invalid Permissions': '{} Role Invalid'.format(role)}, 403
@api_namespace.route('/v1/management/certificate-validation/unset/<string:system_name>', methods=['PATCH'])
class UnsetCertificateValidation(Resource):
@api_namespace.expect(authentication_parser)
@api_namespace.doc('unset_certificate_validation', responses={200: 'Success', 403: 'Invalid Role'}, description='Set Database to Allow HTTP Requests Against Target Device with Self-Signed or Invalid Certificates')
def patch(self, system_name):
args = authentication_parser.parse_args()
role = authentication_header_parser(
args['Authorization'], PUBLIC_KEY)
if role in admin_permissions:
query = dynamodb_client.query_primary_key(system_name)
if not query['Items']:
return {'Invalid Host': '{}'.format(system_name)}, 200
else:
response = dynamodb_client.set_certificate_validation(
system_name=system_name, status='False')
if response['ResponseMetadata']['HTTPStatusCode'] == 200:
return {f'Certificate Validation Unset': f'Certificate validation disabled for the next execution on {system_name}. Please ensure this endpoint was only executed if the current certification on {system_name} is either a self-signed or an invalid certificate.'}, 200
else:
return {'Invalid Permissions': '{} Role Invalid'.format(role)}, 403
@api_namespace.route('/v1/management/certificate-validation/set/<string:system_name>', methods=['PATCH'])
class SetCertificateValidation(Resource):
@api_namespace.expect(authentication_parser)
@api_namespace.doc('set_certificate_validation', responses={200: 'Success', 403: 'Invalid Role'}, description='Set Database to Allow Certificate Verification for HTTP Requests on Target Device')
def patch(self, system_name):
args = authentication_parser.parse_args()
role = authentication_header_parser(
args['Authorization'], PUBLIC_KEY)
if role in admin_permissions:
query = dynamodb_client.query_primary_key(system_name)
if not query['Items']:
return {'Invalid Host': '{}'.format(system_name)}, 200
else:
response = dynamodb_client.set_certificate_validation(
system_name=system_name, status='True')
if response['ResponseMetadata']['HTTPStatusCode'] == 200:
return {f'Certificate Validation Enabled': f'Certificate validation enabled on {system_name}. Please ensure {system_name} does not currently have a self-signed or invalid certificate.'}, 200
else:
return {'Invalid Permissions': '{} Role Invalid'.format(role)}, 403
| 50.121711 | 800 | 0.64829 | 1,688 | 15,237 | 5.619076 | 0.167654 | 0.048498 | 0.020875 | 0.021402 | 0.558988 | 0.545704 | 0.50058 | 0.482657 | 0.469584 | 0.395888 | 0 | 0.014593 | 0.248934 | 15,237 | 303 | 801 | 50.287129 | 0.814226 | 0.049025 | 0 | 0.491597 | 0 | 0.008403 | 0.204863 | 0.033568 | 0 | 0 | 0 | 0 | 0 | 1 | 0.042017 | false | 0.004202 | 0.046218 | 0 | 0.210084 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
519f171ce6f44a916c5bc406a1cd821a75880bb1 | 2,803 | py | Python | my_robot/launch/robot_description.launch.py | arpit6232/ros2_gazebo_integration | a57c781824166722d68b555e21fc48fcf856a0bd | [
"Apache-2.0"
] | null | null | null | my_robot/launch/robot_description.launch.py | arpit6232/ros2_gazebo_integration | a57c781824166722d68b555e21fc48fcf856a0bd | [
"Apache-2.0"
] | null | null | null | my_robot/launch/robot_description.launch.py | arpit6232/ros2_gazebo_integration | a57c781824166722d68b555e21fc48fcf856a0bd | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python3
import os
import yaml
from ament_index_python.packages import get_package_share_directory
from launch import LaunchDescription
from launch.actions import DeclareLaunchArgument
from launch.actions import IncludeLaunchDescription
from launch.conditions import IfCondition
from launch.launch_description_sources import PythonLaunchDescriptionSource
from launch.substitutions import LaunchConfiguration
from launch_ros.actions import Node
def get_package_file(package, file_path):
"""Get the location of a file installed in an ament package"""
package_path = get_package_share_directory(package)
absolute_file_path = os.path.join(package_path, file_path)
return absolute_file_path
def load_file(file_path):
"""Load the contents of a file into a string"""
try:
with open(file_path, 'r') as file:
return file.read()
except EnvironmentError: # parent of IOError, OSError *and* WindowsError where available
return None
def load_yaml(file_path):
"""Load a yaml file into a dictionary"""
try:
with open(file_path, 'r') as file:
return yaml.safe_load(file)
except EnvironmentError: # parent of IOError, OSError *and* WindowsError where available
return None
def run_xacro(xacro_file):
"""Run xacro and output a file in the same directory with the same name, w/o a .xacro suffix"""
urdf_file, ext = os.path.splitext(xacro_file)
if ext != '.xacro':
raise RuntimeError(f'Input file to xacro must have a .xacro extension, got {xacro_file}')
os.system(f'xacro {xacro_file} -o {urdf_file}')
return urdf_file
def generate_launch_description():
xacro_file = get_package_file('my_robot', 'urdf/my_robot.urdf.xacro')
urdf_file = run_xacro(xacro_file)
pkg_gazebo_ros = get_package_share_directory('gazebo_ros')
gazebo = IncludeLaunchDescription(
PythonLaunchDescriptionSource(
os.path.join(pkg_gazebo_ros, 'launch', 'gazebo.launch.py'),
)
)
pkg_name = 'my_robot'
world_file_name = 'my_robot.world'
model_xacro_file_name = 'my_robot.urdf.xacro'
pkg_dir = get_package_share_directory(pkg_name)
world = os.path.join(pkg_dir, 'world', world_file_name)
xacro_path = os.path.join(pkg_dir, 'urdf', model_xacro_file_name)
my_robot_node = Node(
package='my_robot',
node_executable='my_robot',
output='screen',
arguments=[urdf_file],
)
print(xacro_file)
return LaunchDescription([
DeclareLaunchArgument(
'world',
default_value=[os.path.join(pkg_name, 'world', 'my_robot.world'), ''],
description='SDF world file for the robot'
),
gazebo
# my_robot_node
])
| 30.139785 | 99 | 0.699251 | 367 | 2,803 | 5.100817 | 0.283379 | 0.037393 | 0.026709 | 0.051282 | 0.169872 | 0.152778 | 0.126068 | 0.126068 | 0.126068 | 0.09188 | 0 | 0.000453 | 0.212629 | 2,803 | 92 | 100 | 30.467391 | 0.847757 | 0.136639 | 0 | 0.129032 | 0 | 0 | 0.123276 | 0.010029 | 0 | 0 | 0 | 0 | 0 | 1 | 0.080645 | false | 0 | 0.16129 | 0 | 0.354839 | 0.016129 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
51a0718e8863a5231ddc644bc610b93556c2f43d | 1,429 | py | Python | wouso/games/grandchallenge/views.py | AlexandruGhergut/wouso | f26244ff58ae626808ae8c58ccc93d21f9f2666f | [
"Apache-2.0"
] | 117 | 2015-01-02T18:07:33.000Z | 2021-01-06T22:36:25.000Z | wouso/games/grandchallenge/views.py | AlexandruGhergut/wouso | f26244ff58ae626808ae8c58ccc93d21f9f2666f | [
"Apache-2.0"
] | 229 | 2015-01-12T07:07:58.000Z | 2019-10-12T08:27:01.000Z | wouso/games/grandchallenge/views.py | AlexandruGhergut/wouso | f26244ff58ae626808ae8c58ccc93d21f9f2666f | [
"Apache-2.0"
] | 96 | 2015-01-07T05:26:09.000Z | 2020-06-25T07:28:51.000Z | from django.contrib import messages
from django.contrib.auth.decorators import login_required
from django.shortcuts import render, render_to_response
from django.template import RequestContext
from models import GrandChallengeGame, GrandChallengeUser
from wouso.core.ui import register_sidebar_block
from wouso.interface import render_string
@login_required
def index(request):
""" Shows all rounds played by the current user """
profile = request.user.get_profile()
gc_user = profile.get_extension(GrandChallengeUser)
active = gc_user.get_active()
played = gc_user.get_played()
if not gc_user in GrandChallengeGame.base_query():
messages.error(request, _('We are sorry, you are not part of the tournament'))
return render(request, 'grandchallenge/message.html')
return render_to_response('grandchallenge/index.html',
{'active': active, 'played': played, 'gcuser': gc_user, 'gc': GrandChallengeGame},
context_instance=RequestContext(request))
def sidebar_widget(context):
user = context.get('user', None)
gc = GrandChallengeGame
if gc.disabled() or not user or not user.is_authenticated():
return ''
gc_user = user.get_profile().get_extension(GrandChallengeUser)
return render_string('grandchallenge/sidebar.html', {'gc': gc, 'gcuser': gc_user, 'id': 'grandchallenge'})
register_sidebar_block('grandchallenge', sidebar_widget) | 42.029412 | 110 | 0.748775 | 176 | 1,429 | 5.903409 | 0.392045 | 0.040423 | 0.032724 | 0.071222 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.153254 | 1,429 | 34 | 111 | 42.029412 | 0.858678 | 0.030091 | 0 | 0 | 0 | 0 | 0.137056 | 0.057288 | 0 | 0 | 0 | 0 | 0 | 1 | 0.074074 | false | 0 | 0.259259 | 0 | 0.481481 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
51a2c3962a3a17215f77501362d5256d9fd5774a | 4,091 | py | Python | day_16.py | JamesOwers/aoc2017 | 682e73c3c80562e59cb9a3ce44c152fbd1994b97 | [
"MIT"
] | null | null | null | day_16.py | JamesOwers/aoc2017 | 682e73c3c80562e59cb9a3ce44c152fbd1994b97 | [
"MIT"
] | null | null | null | day_16.py | JamesOwers/aoc2017 | 682e73c3c80562e59cb9a3ce44c152fbd1994b97 | [
"MIT"
] | null | null | null | from __future__ import division, print_function
import os
from my_utils.tests import test_function
import string
def spin(inst_string, programs):
nr = int(inst_string)
if nr == 0:
return programs
old_programs = programs[:]
programs[0:nr] = old_programs[-nr:]
programs[nr:] = old_programs[0:-nr]
return programs
def exchange(inst_string, programs):
pid1, pid2 = [int(ii) for ii in inst_string.split('/')]
val1 = programs[pid1]
val2 = programs[pid2]
programs[pid1] = val2
programs[pid2] = val1
return programs
def partner(inst_string, programs):
pname1, pname2 = inst_string.split('/')
pid1 = programs.index(pname1)
pid2 = programs.index(pname2)
programs[pid1] = pname2
programs[pid2] = pname1
return programs
MOVE_FUN = {
's': spin,
'x': exchange,
'p': partner
}
def part_1(inst_string, programs=list(string.ascii_lowercase[:16])):
"""Function which calculates the solution to part 1
Arguments
---------
Returns
-------
"""
instruction_list = inst_string.split(',')
for instr in instruction_list:
programs = MOVE_FUN[instr[0]](instr[1:], programs)
return ''.join(programs)
def part_2(inst_string, nr_dances=int(1e9),
programs=list(string.ascii_lowercase[:16])):
"""Function which calculates the solution to part 2
Arguments
---------
Returns
-------
"""
configs = {''.join(programs): 0}
cycle_detected = 0
for ii in range(1, nr_dances+1):
this_programs = part_1(inst_string, programs=programs)
# print('iter {}: {}'.format(ii, this_programs))
if this_programs in configs:
cycle_detected = 1
break
else:
configs[this_programs] = ii
programs = list(this_programs)
if cycle_detected:
programs = list(this_programs)
first_occurence = configs[this_programs]
cycle_len = ii - first_occurence
remaining_iters = (nr_dances - ii) % cycle_len
# print('cycle detected @{}, focc {}, cyclen {}, remain {} from {}'.\
# format(ii, first_occurence, cycle_len, remaining_iters, nr_dances))
for jj in range(remaining_iters):
programs = list(part_1(inst_string, programs=programs))
return ''.join(programs)
def main(test_datas, functions, puzzle_input=None, test_functions=None):
if test_functions is None:
test_functions = functions
for ii, (test_data, fun) in enumerate(zip(test_datas, test_functions)):
nr_errors = test_function(fun, test_data)
if nr_errors == 0:
print('Pt. {} Tests Passed'.format(ii+1))
if puzzle_input is not None:
fn = os.path.basename(__file__)
for ii, fun in enumerate(functions):
ans = fun(puzzle_input)
print('{} Pt. {} Solution: {}'.format(fn, ii+1, ans))
if __name__ == "__main__":
# Testing data:
# - each element of input list will be passed to function
# - the relative element in output list is the expected output
test_data1 = {
'inputs': ['s1,x3/4,pe/b'],
'outputs': ['baedc']
}
test_data2 = {
'inputs': ['s1,x3/4,pe/b'],
'outputs': ['ceadb'] # contains a cycle of length 4
# this answer at iter 2 % 4
}
# Code to import the actual puzzle input
with open('./inputs/day_16.txt') as f:
puzzle_input = f.read().strip()
# puzzle_input = [line.rstrip('\n') for line in f]
part_1_test = lambda x: part_1(x, programs=list(string.ascii_lowercase[:5]))
part_2_test = lambda x: part_2(x, nr_dances=18, programs=list(string.ascii_lowercase[:5]))
# Main call: performs testing and calculates puzzle outputs
main(test_datas=[test_data2],
functions=[part_2],
puzzle_input=puzzle_input,
test_functions=[part_2_test])
# main(test_datas=[test_data1, test_data2],
# functions=[part_1, part_2],
# puzzle_input=puzzle_input)
| 30.080882 | 94 | 0.615742 | 519 | 4,091 | 4.643545 | 0.271676 | 0.045643 | 0.044813 | 0.038174 | 0.204564 | 0.154357 | 0.078838 | 0.061411 | 0.061411 | 0.061411 | 0 | 0.024124 | 0.260328 | 4,091 | 135 | 95 | 30.303704 | 0.772307 | 0.204595 | 0 | 0.117647 | 0 | 0 | 0.042245 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.070588 | false | 0.011765 | 0.047059 | 0 | 0.188235 | 0.035294 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
51a358ddac445393325f7c09b1e0d7e668772ee9 | 1,513 | py | Python | index/handler.py | googleglass/mirror-catfacts-python | 31d957cf9236128f8f4606668ce168b5e1470030 | [
"Apache-2.0"
] | 4 | 2016-10-13T22:17:52.000Z | 2020-08-08T18:29:23.000Z | index/handler.py | googleglass/mirror-catfacts-python | 31d957cf9236128f8f4606668ce168b5e1470030 | [
"Apache-2.0"
] | null | null | null | index/handler.py | googleglass/mirror-catfacts-python | 31d957cf9236128f8f4606668ce168b5e1470030 | [
"Apache-2.0"
] | 5 | 2015-02-21T09:04:13.000Z | 2020-02-02T00:01:38.000Z | # Copyright (C) 2014 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""RequestHandlers for starter project."""
__author__ = 'alainv@google.com (Alain Vongsouvanh)'
import jinja2
import webapp2
import util
JINJA_ENVIRONMENT = jinja2.Environment(
loader=jinja2.FileSystemLoader('templates'),
autoescape=True)
class IndexHandler(webapp2.RequestHandler):
"""Request handler to display the index page."""
def get(self):
"""Display the index page."""
approval_prompt = 'auto'
button_display = 'none'
if self.request.get('approvalPrompt') == 'force':
approval_prompt = 'force'
button_display = 'block'
template_data = {
'approvalPrompt': approval_prompt,
'buttonDisplay': button_display,
'clientId': util.get_client_id(),
'scope': ' '.join(util.SCOPES),
}
template = JINJA_ENVIRONMENT.get_template('index.html')
self.response.write(template.render(template_data))
INDEX_ROUTES = [
('/', IndexHandler),
]
| 28.018519 | 74 | 0.708526 | 187 | 1,513 | 5.636364 | 0.625668 | 0.056926 | 0.024668 | 0.030361 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.010535 | 0.184402 | 1,513 | 53 | 75 | 28.54717 | 0.843598 | 0.434898 | 0 | 0 | 0 | 0 | 0.163241 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.04 | false | 0 | 0.12 | 0 | 0.2 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
51a987bb42f9129c3d434e226fa501c00fe36edf | 9,801 | py | Python | sportrefpy/nfl/player.py | alexkahan/sports_stats | 6fbc435c8fa0dffb0b7152ce42c055a252858c54 | [
"MIT"
] | null | null | null | sportrefpy/nfl/player.py | alexkahan/sports_stats | 6fbc435c8fa0dffb0b7152ce42c055a252858c54 | [
"MIT"
] | null | null | null | sportrefpy/nfl/player.py | alexkahan/sports_stats | 6fbc435c8fa0dffb0b7152ce42c055a252858c54 | [
"MIT"
] | null | null | null | import requests
import os
from bs4 import BeautifulSoup, Comment
import pandas as pd
import numpy as np
import enchant
from sportrefpy.nfl.league import NFL
from sportrefpy.errors.not_found import PlayerNotFound
class NFLPlayer(NFL):
def __init__(self, player):
super().__init__()
player_dict = enchant.PyPWL(
os.path.dirname(os.path.dirname(__file__)) + "\\assets\\nfl_players.txt"
)
first_letter_last_name = player.split()[1][0].upper()
response = requests.get(self.url + f"/players/{first_letter_last_name}")
soup = BeautifulSoup(response.text, features="lxml")
players = soup.find("div", attrs={"id": "div_players"})
if player in players.text:
for choice in players:
if player in choice.text:
self.full_name = player
self.player_url = self.url + choice.find("a")["href"]
# response = requests.get(self.player_url)
# soup = BeautifulSoup(response.text, features='lxml')
# self.pitcher = True if 'Pitcher' in \
# soup.find_all('p')[0].text else False
# comments = soup.find_all(string=lambda text:isinstance(text, Comment))
# tables = []
# for comment in comments:
# if 'batting_postseason' in str(comment) or 'pitching_postseason' in str(comment):
# tables.append(str(comment))
# if tables:
# self.playoffs = True
# else:
# self.playoffs = False
else:
try:
suggestion = player_dict.suggest(player)[0]
message = f"""<{player}> not found.
Is it possible you meant {suggestion}?
Player names are case-sensitive."""
except:
message = f"""<{player}> not found.
Player names are case-sensitive."""
raise PlayerNotFound(message)
# def regular_season_batting(self, season=None, stat=None):
# '''
# Returns a players regular seasons batting stats by career.
# '''
# if not self.pitcher:
# batting = pd.read_html(self.player_url, attrs={'id': 'batting_standard'})[0]
# batting.dropna(how='any', axis='rows', subset='Year', inplace=True)
# batting = batting[~batting['Year'].str.contains('Yrs|yrs|yr|Avg')]
# batting = batting[batting['Lg'].str.contains('NL|AL|MLB')]
# batting = batting.apply(pd.to_numeric, errors='ignore')
# batting.set_index('Year', inplace=True)
# elif self.pitcher:
# response = requests.get(self.player_url)
# soup = BeautifulSoup(response.text, features='lxml')
# comments = soup.find_all(string=lambda text:isinstance(text, Comment))
# tables = []
# for comment in comments:
# if 'batting_standard' in str(comment):
# try:
# tables.append(pd.read_html(str(comment)))
# except:
# continue
# batting = tables[0][0]
# batting.dropna(how='any', axis='rows', subset='Year', inplace=True)
# batting = batting[~batting['Year'].str.contains('Yrs|yrs|yr|Avg')]
# batting = batting[batting['Lg'].str.contains('NL|AL|MLB')]
# batting = batting.apply(pd.to_numeric, errors='ignore')
# batting.set_index('Year', inplace=True)
# if season:
# try:
# return batting.loc[season]
# except KeyError:
# return None
# return batting
# def regular_season_pitching(self, season=None):
# '''
# Returns a players regular seasons pitching stats by career.
# '''
# if self.pitcher:
# pitching = pd.read_html(self.player_url, attrs={'id': 'pitching_standard'})[0]
# pitching.dropna(how='any', axis='rows', subset='Year', inplace=True)
# pitching = pitching[~pitching['Year'].str.contains('Yrs|yrs|yr|Avg')]
# pitching = pitching[pitching['Lg'].str.contains('NL|AL|MLB')]
# pitching = pitching.apply(pd.to_numeric, errors='ignore')
# pitching.set_index('Year', inplace=True)
# if season:
# try:
# return pitching.loc[season]
# except KeyError:
# return None
# return pitching
# else:
# return None
# def regular_season_fielding(self, season=None):
# '''
# Returns a players regular seasons fielding stats by career.
# '''
# response = requests.get(self.player_url)
# soup = BeautifulSoup(response.text, features='lxml')
# comments = soup.find_all(string=lambda text:isinstance(text, Comment))
# tables = []
# for comment in comments:
# if 'standard_fielding' in str(comment):
# try:
# tables.append(pd.read_html(str(comment)))
# except:
# continue
# fielding = tables[0][0]
# fielding.dropna(how='any', axis='rows', subset='Year', inplace=True)
# fielding = fielding[~fielding['Year'].str.contains('Seasons')]
# fielding = fielding[fielding['Lg'].str.contains('NL|AL|MLB')]
# fielding = fielding.apply(pd.to_numeric, errors='ignore')
# fielding.set_index('Year', inplace=True)
# if season:
# try:
# return fielding.loc[season]
# except KeyError:
# return None
# return fielding
# def post_season_batting(self, season=None):
# if not self.playoffs:
# return None
# response = requests.get(self.player_url)
# soup = BeautifulSoup(response.text, features='lxml')
# comments = soup.find_all(string=lambda text:isinstance(text, Comment))
# tables = []
# for comment in comments:
# if 'batting_postseason' in str(comment):
# try:
# tables.append(pd.read_html(str(comment)))
# except:
# continue
# batting = tables[0][0]
# batting.dropna(how='any', axis='rows', subset='Year', inplace=True)
# batting = batting[~batting['Year'].str.\
# contains('ALWC|NLWC|ALDS|NLDS|ALCS|NLCS|WS')]
# batting = batting[batting['Lg'].str.contains('NL|AL|MLB')]
# batting = batting.apply(pd.to_numeric, errors='ignore')
# batting.set_index('Year', inplace=True)
# if season:
# try:
# return batting.loc[season]
# except KeyError:
# return None
# return batting
# def post_season_pitching(self, season=None):
# if not self.pitcher:
# return None
# response = requests.get(self.player_url)
# soup = BeautifulSoup(response.text, features='lxml')
# comments = soup.find_all(string=lambda text:isinstance(text, Comment))
# tables = []
# for comment in comments:
# if 'pitching_postseason' in str(comment):
# try:
# tables.append(pd.read_html(str(comment)))
# except:
# continue
# pitching = tables[0][0]
# pitching.dropna(how='any', axis='rows', subset='Year', inplace=True)
# pitching = pitching[~pitching['Year'].str.\
# contains('ALWC|NLWC|ALDS|NLDS|ALCS|NLCS|WS')]
# pitching = pitching[pitching['Lg'].str.contains('NL|AL|MLB')]
# pitching = pitching.apply(pd.to_numeric, errors='ignore')
# pitching.set_index('Year', inplace=True)
# if season:
# try:
# return pitching.loc[season]
# except KeyError:
# return None
# return pitching
# def career_totals_pitching(self, stat=None):
# if self.pitcher:
# reg = pd.read_html(self.player_url, attrs={'id': 'pitching_standard'})[0]
# reg = reg[reg['Year'].str.contains('Yrs', na=False)]
# reg = reg.apply(pd.to_numeric, errors='ignore')
# reg.reset_index(drop=True, inplace=True)
# reg.drop(columns={'Year', 'Age', 'Tm', 'Lg', 'Awards'},
# inplace=True)
# response = requests.get(self.player_url)
# soup = BeautifulSoup(response.text, features='lxml')
# comments = soup.find_all(string=lambda text:isinstance(text, Comment))
# tables = []
# for comment in comments:
# if 'pitching_postseason' in str(comment):
# try:
# tables.append(pd.read_html(str(comment)))
# except:
# continue
# post = tables[0][0]
# post = post[post['Year'].str.contains('Yrs', na=False)]
# post = post.apply(pd.to_numeric, errors='ignore')
# post.drop(columns={'Year', 'Age', 'Tm', 'Lg'},
# inplace=True)
# career = reg.merge(post, how='outer')
# career.drop(columns={'Series', 'Rslt', 'Opp', 'WPA', 'cWPA'}, inplace=True)
# career = pd.DataFrame(career.sum())
# career.columns = ['Totals']
# if stat:
# try:
# return career.loc[stat]
# except KeyError:
# return None
# return career
# else:
# return None
| 42.986842 | 107 | 0.531374 | 1,017 | 9,801 | 5.036382 | 0.158309 | 0.034362 | 0.035143 | 0.02499 | 0.71105 | 0.666927 | 0.604842 | 0.59508 | 0.572433 | 0.556619 | 0 | 0.00277 | 0.336904 | 9,801 | 227 | 108 | 43.176211 | 0.785352 | 0.735741 | 0 | 0.121212 | 0 | 0 | 0.095851 | 0.024066 | 0 | 0 | 0 | 0 | 0 | 1 | 0.030303 | false | 0 | 0.242424 | 0 | 0.30303 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
51acd2ce8f5fd62db4d35ce9e5d03738c3f8c0ff | 2,395 | py | Python | undiscord/server/__main__.py | nklapste/undiscord | 221b8387561494f1c721db21ef05729e0abb6b08 | [
"MIT"
] | 3 | 2019-06-14T21:36:08.000Z | 2020-12-21T09:25:30.000Z | undiscord/server/__main__.py | nklapste/undiscord | 221b8387561494f1c721db21ef05729e0abb6b08 | [
"MIT"
] | 3 | 2019-01-13T21:06:04.000Z | 2019-01-14T06:56:44.000Z | undiscord/server/__main__.py | nklapste/undiscord | 221b8387561494f1c721db21ef05729e0abb6b08 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""argparse and entry point script for undiscord flask/cheroot server"""
import argparse
import os
import sys
from logging import getLogger
from cheroot.wsgi import Server as WSGIServer, PathInfoDispatcher
import undiscord.server.server
from undiscord.common import add_log_parser, init_logging
__log__ = getLogger(__name__)
def get_parser() -> argparse.ArgumentParser:
"""Create and return the argparser for undiscord flask/cheroot server"""
parser = argparse.ArgumentParser(
description="Start the UnDiscord flask/cheroot server",
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
)
group = parser.add_argument_group("server")
group.add_argument("-d", "--host", default='0.0.0.0',
help="Hostname to listen on")
group.add_argument("-p", "--port", default=8080, type=int,
help="Port of the webserver")
group.add_argument("-g", "--graph-dir", dest="graph_dir", default="graph",
help="Directory to store generated graphs")
group.add_argument("--debug", action="store_true",
help="Run the server in Flask debug mode")
add_log_parser(parser)
return parser
def main(argv=sys.argv[1:]) -> int:
"""main entry point undiscord flask/cheroot server"""
parser = get_parser()
args = parser.parse_args(argv)
init_logging(args, "undiscord_server.log")
graph_dir = os.path.abspath(args.graph_dir)
os.makedirs(graph_dir, exist_ok=True)
__log__.info("starting server: host: {} port: {} graph_dir: {}".format(args.host, args.port, graph_dir))
undiscord.server.server.GRAPH_DIR = graph_dir
if args.debug:
undiscord.server.server.APP.run(
host=args.host,
port=args.port,
debug=True
)
else:
path_info_dispatcher = PathInfoDispatcher({'/': undiscord.server.server.APP})
server = WSGIServer((args.host, args.port), path_info_dispatcher)
try:
server.start()
except KeyboardInterrupt:
__log__.info("stopping server: KeyboardInterrupt detected")
server.stop()
return 0
except Exception:
__log__.exception("stopping server: unexpected exception")
raise
if __name__ == "__main__":
sys.exit(main())
| 31.933333 | 108 | 0.65261 | 282 | 2,395 | 5.333333 | 0.368794 | 0.047872 | 0.055851 | 0.071809 | 0.065824 | 0 | 0 | 0 | 0 | 0 | 0 | 0.005975 | 0.231315 | 2,395 | 74 | 109 | 32.364865 | 0.810972 | 0.093528 | 0 | 0 | 0 | 0 | 0.17688 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.038462 | false | 0 | 0.134615 | 0 | 0.211538 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
51af100bd940df7f4472f28df5f3cb8e0fc2b56d | 8,943 | py | Python | logic/core/game.py | rdelacrz/connect-four | 7123d6e649aadcb76f429c61dde405527211c5b2 | [
"MIT"
] | null | null | null | logic/core/game.py | rdelacrz/connect-four | 7123d6e649aadcb76f429c61dde405527211c5b2 | [
"MIT"
] | null | null | null | logic/core/game.py | rdelacrz/connect-four | 7123d6e649aadcb76f429c61dde405527211c5b2 | [
"MIT"
] | null | null | null | """
Contains the logic needed to run a game of Connect Four.
"""
# Built-in modules
from copy import deepcopy
# User-defined modules
from .components import ConnectFourGrid, Disc
from .exceptions import IllegalAction, IllegalState, InvalidSpace
from ..utilities import js_callback
DISC_COLORS = [
'#F5473E', # red
'#FEEC49', # yellow
'#048B44', # green
'#293777', # blue
]
class Player:
def __init__(self, player_id: int, name: str):
self.id = player_id
self.name = name
def __deepcopy__(self, memodict={}):
return Player(self.id, self.name)
@property
def state(self):
return { 'id' : self.id, 'name' : self.name }
class ConnectFourGame:
"""
Encapsulates logic for setting up the initial parameters of a Connect Four game and establishing its rules.
"""
def __init__(self, player_names:'list[str]'=['Player One', 'Player Two'], width=7, height=6, victory_condition=4):
"""
Sets up a game of Connect Four.
:param `player_names`: List of the names of the players participating in this game.
:param `width`: Width of the grid.
:param `height`: Height of the grid.
:param `victory_condition`: Number of discs that need to line up horizontally,
vertically, or diagonally on the grid for a single player to win the game.
"""
# Checks for valid number of players
if len(player_names) < 2:
raise IllegalState('Game cannot be setup without at least two players.')
elif len(player_names) > len(DISC_COLORS):
raise IllegalState('Game cannot be setup with more than {0} players.'.format(len(DISC_COLORS)))
self.players = [Player(index, name) for index, name in enumerate(player_names)]
self.current_player = 0 # Starts with first player
self.discs = [Disc(player.id, DISC_COLORS[index]) for index, player in enumerate(self.players)]
self.grid = ConnectFourGrid(width, height)
self.victory_condition = victory_condition
self.winner_id = None
def __repr__(self):
"""
Produces visual representation of the Connect Four grid (from top to bottom), displaying _ for empty spaces,
and player ids wherever a player's disc is inserted. It will also show the id of the player who is next to move.
:return: String representing state of the Connect Four grid.
"""
board_repr = str(self.grid)
board_repr += '-------------------------\n'
board_repr += 'Next Player: {0}'.format(self.players[self.current_player].name)
return board_repr
def __deepcopy__(self, memodict={}):
game = ConnectFourGame(height=0, width=0) # Short-circuits initial setup logic for efficiency
game.players = deepcopy(self.players)
game.current_player = self.current_player
game.discs = deepcopy(self.discs)
game.grid = deepcopy(self.grid)
game.victory_condition = self.victory_condition
game.winner_id = self.winner_id
return game
@js_callback
def get_state(self):
state = {
'players' : [player.state for player in self.players],
'current_player' : self.current_player,
'discs' : [disc.state for disc in self.discs],
'grid' : self.grid.state,
'victory_condition' : self.victory_condition,
'winner_id' : self.winner_id
}
return state
def _get_player_chain(self, player_id: int, start_row: int, start_col: int, row_inc: int, col_inc: int):
"""
Gets a list of discs that belong to the given player, starting from the given start row and column,
continuing into a given direction based on the given row and column increments, and ending once either a disc
belonging to a different player is reached or the edge of the grid is reached.
:param `player_id`: Player id whose discs are being checked for.
:param `start_row`: Starting row to check for discs.
:param `start_col`: Starting column to check for discs.
:param `row_inc`: Increments the row after every check is made for a disc.
:param `col_inc`: Increments the column after every check is made for a disc.
:return: A list of discs belonging to the given player, within a direction determined by the row and column
increments.
"""
chain = []
row = start_row
col = start_col
while row >= 0 and row < self.grid.height and col >= 0 and col < self.grid.width:
disc = self.grid.grid_spaces[col][row].disc
if disc is not None and disc.player_id == player_id:
chain.append(self.grid.grid_spaces[col][row].disc)
row += row_inc
col += col_inc
else:
break
return chain
@js_callback
def check_for_discs_in_row(self, row: int, col: int, discs_in_row: int, player_id: int = None):
"""
Checks for a line of horizontal, vertical, or diagonal discs that are at least the
given number of discs in a row for a single player.
:param `row`: Starting row to check for discs in a row from.
:param `col`: Starting column to check for discs in a row from.
:param `discs_in_row`: Number of discs in a row to check for.
:param `player_id`: Player id whose discs are being looked for. If none is provided, the player id
of the disc at the given row and column will be used instead.
:return: Player id with the given number of discs in a row, or None if given discs in a row can't be found
at given starting row and column.
"""
if row < 0 or row > self.grid.height or col < 0 or col > self.grid.width:
raise InvalidSpace("Attempted to check a space that doesn't exist on the grid!")
player_id = player_id if player_id is not None else self.grid.grid_spaces[col][row].disc.player_id
# Checks for vertical line of discs
upper = self._get_player_chain(player_id, row + 1, col, 1, 0)
lower = self._get_player_chain(player_id, row - 1, col, -1, 0)
if len(upper) + len(lower) + 1 >= discs_in_row:
return player_id
# Checks for horizontal line of discs
left = self._get_player_chain(player_id, row, col - 1, 0, -1)
right = self._get_player_chain(player_id, row, col + 1, 0, 1)
if len(left) + len(right) + 1 >= discs_in_row:
return player_id
# Checks for downward-right diagonal line of discs
upper_left = self._get_player_chain(player_id, row + 1, col - 1, 1, -1)
lower_right = self._get_player_chain(player_id, row - 1, col + 1, -1, 1)
if len(upper_left) + len(lower_right) + 1 >= discs_in_row:
return player_id
# Checks for upward-right diagonal line of discs
lower_left = self._get_player_chain(player_id, row - 1, col - 1, -1, -1)
upper_right = self._get_player_chain(player_id, row + 1, col + 1, 1, 1)
if len(lower_left) + len(upper_right) + 1 >= discs_in_row:
return player_id
return None
@js_callback
def change_player(self, player_id: int = None):
"""
Changes player. If player id is given, that player id is explicitly set, otherwise goes to the next
player in the list of players.
:param `player_id`: Id of player to set.
:return: Id of player being changed to.
"""
if player_id is None:
self.current_player = self.current_player + 1 if self.current_player + 1 < len(self.players) else 0
else:
if player_id >= len(self.players):
raise IllegalAction('Player id does not exist in the list of players')
self.current_player = player_id
return self.current_player
@js_callback
def drop_disc(self, col_num: int):
"""
Drops disc belonging to the current player in the given column, and switches to the next player.
:return: Player id if player has won the game, None otherwise.
"""
disc = self.discs[self.current_player]
row_num = self.grid.drop_disc(disc, col_num)
player_id = self.check_for_discs_in_row(row_num, col_num, self.victory_condition)
# Has next player make move if current player has not won
if player_id is None:
self.change_player()
else:
self.winner_id = player_id
return player_id
@js_callback
def reset_game(self):
"""
Starts a new game, reverting grid and game conditions to their initial states.
:return: State of game after reset.
"""
self.grid.setup_grid()
self.current_player = 0
self.winner_id = None
return self.get_state() | 39.570796 | 120 | 0.634015 | 1,272 | 8,943 | 4.312107 | 0.177673 | 0.061258 | 0.034093 | 0.026253 | 0.269098 | 0.194713 | 0.162078 | 0.129991 | 0.102461 | 0.071103 | 0 | 0.010255 | 0.280331 | 8,943 | 226 | 121 | 39.570796 | 0.841983 | 0.339931 | 0 | 0.181034 | 0 | 0 | 0.066667 | 0.004932 | 0 | 0 | 0 | 0 | 0 | 1 | 0.103448 | false | 0 | 0.034483 | 0.017241 | 0.275862 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
51b196fb1c16109226ea07074ed93a9af4c9a3cb | 3,321 | py | Python | rdflib/plugins/parsers/pyRdfa/transform/DublinCore.py | gromgull/rdflib | 7c90f646e3734ee6d3081b5d3f699f0f501f6a39 | [
"BSD-3-Clause"
] | 8 | 2019-05-29T09:38:30.000Z | 2021-01-20T03:36:59.000Z | rdflib/plugins/parsers/pyRdfa/transform/DublinCore.py | gromgull/rdflib | 7c90f646e3734ee6d3081b5d3f699f0f501f6a39 | [
"BSD-3-Clause"
] | 12 | 2021-03-09T03:01:16.000Z | 2022-03-11T23:59:36.000Z | rdflib/plugins/parsers/pyRdfa/transform/DublinCore.py | gromgull/rdflib | 7c90f646e3734ee6d3081b5d3f699f0f501f6a39 | [
"BSD-3-Clause"
] | 4 | 2021-06-10T18:54:16.000Z | 2021-10-25T00:42:22.000Z | # -*- coding: utf-8 -*-
"""
Transfomer: handles the Dublin Core recommendation for XHTML for adding DC values. What this means is that:
- DC namespaces are defined via C{<link rel="schema.XX" value="...."/>}
- The 'XX.term' is used much like QNames in C{<link>} and C{<meta>} elements. For the latter, the namespaced names are added to a C{@property} attribute.
This transformer adds "real" namespaces and changes the DC references in link and meta elements to abide to the
RDFa namespace syntax.
@summary: Dublin Core transformer
@requires: U{RDFLib package<http://rdflib.net>}
@organization: U{World Wide Web Consortium<http://www.w3.org>}
@author: U{Ivan Herman<a href="http://www.w3.org/People/Ivan/">}
@license: This software is available for use under the
U{W3C® SOFTWARE NOTICE AND LICENSE<href="http://www.w3.org/Consortium/Legal/2002/copyright-software-20021231">}
@contact: Ivan Herman, ivan@w3.org
"""
"""
@version: $Id: DublinCore.py,v 1.4 2012-01-18 14:16:44 ivan Exp $
$Date: 2012-01-18 14:16:44 $
"""
def DC_transform(html, options, state) :
"""
@param html: a DOM node for the top level html element
@param options: invocation options
@type options: L{Options<pyRdfa.options>}
@param state: top level execution state
@type state: L{State<pyRdfa.state>}
"""
from ..host import HostLanguage
if not( options.host_language in [ HostLanguage.xhtml, HostLanguage.html5, HostLanguage.xhtml5 ] ) :
return
# the head element is necessary; to be sure, the namespaces are set
# on that level only
head = None
try :
head = html.getElementsByTagName("head")[0]
except :
# no head....
return
# At first, the DC namespaces must be found
dcprefixes = {}
for link in html.getElementsByTagName("link") :
if link.hasAttribute("rel") :
rel = link.getAttribute("rel")
uri = link.getAttribute("href")
if uri != None and rel != None and rel.startswith("schema.") :
# bingo...
try :
localname = rel.split(".")[1]
head.setAttributeNS("", "xmlns:"+localname,uri)
dcprefixes[localname] = uri
except :
# problem with the split; just ignore
pass
# get the link elements now to find the dc elements
for link in html.getElementsByTagName("link") :
if link.hasAttribute("rel") :
newProp = ""
for rel in link.getAttribute("rel").strip().split() :
# see if there is '.' to separate the attributes
if rel.find(".") != -1 :
key = rel.split(".",1)[0]
lname = rel.split(".",1)[1]
if key in dcprefixes and lname != "" :
# yep, this is one of those...
newProp += " " + key + ":" + lname
else :
newProp += " " + rel
else :
newProp += " " + rel
link.setAttribute("rel",newProp.strip())
# do almost the same with the meta elements...
for meta in html.getElementsByTagName("meta") :
if meta.hasAttribute("name") :
newProp = ""
for name in meta.getAttribute("name").strip().split() :
# see if there is '.' to separate the attributes
if name.find(".") != -1 :
key = name.split(".",1)[0]
lname = name.split(".",1)[1]
if key in dcprefixes and lname != "" :
# yep, this is one of those...
newProp += " " + key + ":" + lname
else :
newProp += " " + name
else :
newProp += " " + name
meta.setAttribute("property", newProp.strip())
| 33.887755 | 154 | 0.6486 | 464 | 3,321 | 4.640086 | 0.392241 | 0.013934 | 0.012541 | 0.016721 | 0.196935 | 0.182072 | 0.169066 | 0.169066 | 0.169066 | 0.169066 | 0 | 0.02344 | 0.203553 | 3,321 | 97 | 155 | 34.237113 | 0.79017 | 0.465824 | 0 | 0.489796 | 0 | 0 | 0.04823 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.020408 | false | 0.020408 | 0.020408 | 0 | 0.081633 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
51b2b4ac9db120f311d239b140062958cf771fe6 | 2,898 | py | Python | app.py | Yaamboo/suomipelit-api | fd5d0058d4820667dd78669207ae7646055239f4 | [
"Apache-2.0"
] | null | null | null | app.py | Yaamboo/suomipelit-api | fd5d0058d4820667dd78669207ae7646055239f4 | [
"Apache-2.0"
] | null | null | null | app.py | Yaamboo/suomipelit-api | fd5d0058d4820667dd78669207ae7646055239f4 | [
"Apache-2.0"
] | null | null | null | from flask import abort, Flask, jsonify
import sqlite3
import re
from Suomipelit.jsonencoder import OmaEncoder
from Suomipelit.models import Peli, Peliarvostelu, Kappale, Kuva
app = Flask(__name__)
app.json_encoder = OmaEncoder
@app.route("/api/pelit")
def pelit():
return jsonify(lataa_pelit())
def lataa_pelit():
connection = sqlite3.connect("suomipelit.db")
connection.row_factory = sqlite3.Row
c = connection.cursor()
pelit = []
for pelirivi in c.execute("SELECT * FROM pelit order by id asc LIMIT 0,5"):
peli = muodostaPeli(pelirivi, c)
pelit.append(peli)
# print(kappaleet)
return pelit
@app.route("/api/pelit/<id>")
def peli(id):
#id voi olla vain numeroita
clean_id = int(id)
peli = lataa_peli(clean_id)
if peli is not None:
return jsonify(peli)
abort(404)
def lataa_peli(id):
connection = sqlite3.connect("suomipelit.db")
connection.row_factory = sqlite3.Row
c = connection.cursor()
c.execute("select * from pelit where id = ?", (id,))
peli = c.fetchone()
if peli is not None:
return muodostaPeli(peli, connection)
return None
def muodostaPeli(pelirivi, connection):
peli = Peli(pelirivi["id"])
peli.nimi = pelirivi["nimi"]
peli.tekija = pelirivi["tekija"]
peli.url = pelirivi["url"]
peli.kuvaus = pelirivi["kuvaus"]
peli.vaatimukset = pelirivi["vaatimukset"]
pelikuva = Kuva(pelirivi["id"])
pelikuva.asemointi = None
pelikuva.kuvateksti = None
if pelirivi["kuva_iso"] != None and len(pelirivi["kuva_iso"]) > 0:
pelikuva.tiedosto = pelirivi["kuva_iso"]
else:
pelikuva.tiedosto = pelirivi["kuva"]
peli.kuva = pelikuva
if pelirivi["uusittu"] == 1:
arvostelu = Peliarvostelu()
arvostelu.julkaistu = pelirivi["paivays"]
arvostelu.kirjoittaja = pelirivi["user"]
kappaleet = []
for rivi in connection.cursor().execute("SELECT * FROM kappale where artikkeli_id = ? and kaytto='PELI' order by artikkeli_id asc, sivu asc, jarjestys", (pelirivi["id"],)):
kappale = Kappale(rivi["id"], rivi["otsikko"], rivi["teksti"])
kappale.artikkeliId = rivi["artikkeli_id"]
kappale.sivu = rivi["sivu"]
if len(rivi["kuva"]) > 0:
kuva = Kuva(rivi["id"])
if rivi["kuva_iso"] != None and len(rivi["kuva_iso"]) > 0:
kuva.tiedosto = rivi["kuva_iso"]
else:
kuva.tiedosto = rivi["kuva"]
kuva.asemointi = rivi["asemointi"]
kuva.kuvateksti = rivi["kuvateksti"]
else:
kuva = None
kappale.kuva = kuva
kappaleet.append(kappale)
arvostelu.kappaleet = kappaleet
peli.arvostelu = arvostelu
else:
peli.arvostelu = None
return peli
| 28.693069 | 180 | 0.613182 | 332 | 2,898 | 5.286145 | 0.268072 | 0.023932 | 0.02906 | 0.018234 | 0.164103 | 0.118519 | 0.094587 | 0.094587 | 0.094587 | 0.094587 | 0 | 0.006564 | 0.263975 | 2,898 | 100 | 181 | 28.98 | 0.816221 | 0.014493 | 0 | 0.157895 | 0 | 0.013158 | 0.141354 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.065789 | false | 0 | 0.065789 | 0.013158 | 0.210526 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
51b31553ef083d8d41b49feb74b1b78a77ce9832 | 1,563 | py | Python | ices_erf32_generator_cli.py | sharkdata/ices | e529a2636f06b942d39b57897ca17023f76fb80d | [
"MIT"
] | null | null | null | ices_erf32_generator_cli.py | sharkdata/ices | e529a2636f06b942d39b57897ca17023f76fb80d | [
"MIT"
] | null | null | null | ices_erf32_generator_cli.py | sharkdata/ices | e529a2636f06b942d39b57897ca17023f76fb80d | [
"MIT"
] | null | null | null | #!/usr/bin/python3
# -*- coding:utf-8 -*-
#
# Copyright (c) 2021-present SMHI, Swedish Meteorological and Hydrological Institute
# License: MIT License (see LICENSE.txt or http://opensource.org/licenses/mit).
import pathlib
import click
import ices_erf32_generator_main
global ices_config
@click.command()
@click.option(
"--row",
default=0,
prompt="Execute row",
help="Row number used to select which YAML-file to generate ICES-Erf32 from.",
)
def run_erf32_generator_command(row):
""" """
global ices_erf32_config
if (row < 0) or (row > len(ices_erf32_config)):
print("\n\nERROR: Wrong value. Please try again.\n\n")
return
generator = ices_erf32_generator_main.IcesErf32Generator()
if row == 0:
for config_file in ices_erf32_config:
generator.generate_erf32(config_file)
else:
generator.generate_erf32(ices_erf32_config[row - 1])
if __name__ == "__main__":
""" """
global ices_erf32_config
ices_erf32_config = []
for file_path in pathlib.Path("erf32_config").glob("ices_erf32_*.yaml"):
ices_erf32_config.append(str(file_path))
ices_erf32_config = sorted(ices_erf32_config)
# Print before command.
print("\n\nICES ERF 3.2 generator.")
print("-----------------------------")
print("Select row number. Press enter to run all.")
print("Press Ctrl-C to terminate.\n")
for index, row in enumerate(ices_erf32_config):
print(index + 1, " ", row)
print("")
# Execute command.
run_erf32_generator_command()
| 29.490566 | 84 | 0.666027 | 206 | 1,563 | 4.81068 | 0.42233 | 0.127144 | 0.151362 | 0.060545 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.042197 | 0.196417 | 1,563 | 52 | 85 | 30.057692 | 0.746815 | 0.152271 | 0 | 0.055556 | 0 | 0 | 0.226994 | 0.022239 | 0 | 0 | 0 | 0 | 0 | 1 | 0.027778 | false | 0 | 0.083333 | 0 | 0.138889 | 0.194444 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
51b3dd135b10c21259433ea1463301ed5c72163c | 2,790 | py | Python | test/module_train_test.py | nktankta/PytorchCNNModules | bc1469ceb37477d3f60062f14a750f272e7ceeb0 | [
"MIT"
] | null | null | null | test/module_train_test.py | nktankta/PytorchCNNModules | bc1469ceb37477d3f60062f14a750f272e7ceeb0 | [
"MIT"
] | null | null | null | test/module_train_test.py | nktankta/PytorchCNNModules | bc1469ceb37477d3f60062f14a750f272e7ceeb0 | [
"MIT"
] | null | null | null | import torch
import torch.nn as nn
import torch.optim as optim
import torchvision
import torchvision.transforms as transforms
import numpy as np
from module_easyModel import EasyModel
from module_list import get_test_module
import pytest
test_modules = get_test_module()
transform = transforms.Compose(
[transforms.RandomRotation(15),
transforms.ToTensor(),
transforms.Normalize((0.5, ), (0.5, ))])
trainset = torchvision.datasets.MNIST(root='./data',
train=True,
download=True,
transform=transform)
trainloader = torch.utils.data.DataLoader(trainset,
batch_size=100,
shuffle=True,
num_workers=2)
testset = torchvision.datasets.MNIST(root='./data',
train=False,
download=True,
transform=transform)
testloader = torch.utils.data.DataLoader(testset,
batch_size=100,
shuffle=False,
num_workers=2)
classes = tuple(np.linspace(0, 9, 10, dtype=np.uint8))
criterion = nn.CrossEntropyLoss()
@pytest.mark.parametrize("mode", ["normal","residual","dense"])
@pytest.mark.parametrize("test_module", test_modules)
def test_train_model(test_module,mode):
print("start testing")
net = EasyModel(1,10,test_module,mode=mode).to("cuda")
optimizer = optim.Adam(net.parameters(), lr=0.01)
for epoch in range(2):
running_loss = 0.0
for i, (inputs, labels) in enumerate(trainloader, 0):
inputs = inputs.to("cuda")
labels = labels.to("cuda")
optimizer.zero_grad()
outputs = net(inputs)
loss = criterion(outputs, labels)
loss.backward()
optimizer.step()
running_loss += loss.item()
if i % 100 == 99:
print('[{:d}, {:5d}] loss: {:.3f}'
.format(epoch + 1, i + 1, running_loss / 100))
running_loss = 0.0
print('Finished Training')
correct = 0
total = 0
with torch.no_grad():
for (images, labels) in testloader:
images = images.cuda()
labels = labels.cuda()
outputs = net(images)
_, predicted = torch.max(outputs.data, 1)
total += labels.size(0)
correct += (predicted == labels).sum().item()
print('Accuracy: {:.2f} %%'.format(100 * float(correct / total)))
assert float(correct / total)>0.25
| 36.710526 | 69 | 0.524373 | 281 | 2,790 | 5.120996 | 0.398577 | 0.034746 | 0.018068 | 0.038916 | 0.051425 | 0.051425 | 0 | 0 | 0 | 0 | 0 | 0.030474 | 0.364875 | 2,790 | 75 | 70 | 37.2 | 0.781603 | 0 | 0 | 0.151515 | 0 | 0 | 0.047704 | 0 | 0 | 0 | 0 | 0 | 0.015152 | 1 | 0.015152 | false | 0 | 0.136364 | 0 | 0.151515 | 0.060606 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
51b52f4360dde9f8fcf753a559f4341aae212c20 | 1,592 | py | Python | Projects/project_2_packages/Team_cool/OLS_team_cool/logit.py | gen-li/modularizationandtesting | 103be0c80bd70ffcf4c700861497745733b72640 | [
"MIT"
] | null | null | null | Projects/project_2_packages/Team_cool/OLS_team_cool/logit.py | gen-li/modularizationandtesting | 103be0c80bd70ffcf4c700861497745733b72640 | [
"MIT"
] | null | null | null | Projects/project_2_packages/Team_cool/OLS_team_cool/logit.py | gen-li/modularizationandtesting | 103be0c80bd70ffcf4c700861497745733b72640 | [
"MIT"
] | null | null | null | import numpy as np
import scipy.stats as st
import statsmodels as sm
from scipy import optimize
y = np.random.randint(2, size=(100,1))
x = np.random.normal(0,1,(100,2))
res_correct = sm.discrete.discrete_model.Logit(y,x).fit()
res_correct.params
def Logit(b,y,x):
# y = np.random.randint(2, size=(100,1))
# x = np.random.normal(0,1,(100,2))
n = x.shape[0]
# b = np.zeros((s,1))
# log_likelihood = (y.T @ x @ b)[0] - np.log(1 + np.exp(x.T @ b))
log_likelihood = -y.T @ np.log(1 + np.exp(-x @ b)) + (np.ones((n,1)) - y).T @ np.log(1 - 1 / (1 + np.exp(- x @ b)))
return -log_likelihood[0]
Logit(y,x,np.array((2,1)))
s = x.shape[1]
b_0 = np.array((0,0))
optimize.minimize(Logit,x0=b_0,args=(y,x))
optimize.fmin_bfgs(Logit, b_0,args=(y,x,))
y.shape
# def OLS(y,x,cf=0.95):
# """
# OLS estimation.
#
# Parameters
# −−−−−−−−−−
# y : Dependent variable
# x : Explanatory variable
# cf: Confidence level
#
# Returns
# −−−−−−−
# beta : Beta
# se: Standard Error
# confidence: Confidence Interval
#
# See Also
# −−−−−−−−
# other_function : This is a related function
# """
#
# beta = np.linalg.inv(x.T @ x) @ (x.T @ y)
#
# se_term1 = ((y - x @ beta).T @ (y - x @ beta)) / (x.shape[0] - 1)
# se_term2 = x.T @ x
# cov_matrix = se_term1 * se_term2
# se = np.sqrt(np.diag(cov_matrix))
#
# confidence = [beta - st.norm.ppf(1 - (1-0.95)/2) * se, beta \
# + st.norm.ppf(1 - (1-0.95)/2) * se]
#
# return {"Beta":beta, "Standard Error":se, "Confidence Interval":confidence}
| 23.411765 | 119 | 0.557161 | 269 | 1,592 | 3.327138 | 0.30855 | 0.017877 | 0.020112 | 0.023464 | 0.215642 | 0.176536 | 0.149721 | 0.149721 | 0.149721 | 0.149721 | 0 | 0.050654 | 0.231156 | 1,592 | 67 | 120 | 23.761194 | 0.660131 | 0.564698 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.055556 | false | 0 | 0.222222 | 0 | 0.333333 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
51bc4a4baca841ac8b5c86065dd040c9313df97d | 5,847 | py | Python | fractals/pytorch/model_processor.py | NeilBostian/ML | df487db8755ad074cdd42f1094747815ae555896 | [
"Unlicense"
] | 1 | 2019-10-11T21:36:06.000Z | 2019-10-11T21:36:06.000Z | fractals/pytorch/model_processor.py | NeilBostian/ML | df487db8755ad074cdd42f1094747815ae555896 | [
"Unlicense"
] | null | null | null | fractals/pytorch/model_processor.py | NeilBostian/ML | df487db8755ad074cdd42f1094747815ae555896 | [
"Unlicense"
] | null | null | null | import os
import random
import pickle
import datetime
import torch
import numpy as np
from PIL import Image
from model import Model
from train_data import TrainData
from loss_train_data import get_loss_train_data
class ModelProcessor():
def __init__(self, path):
self.path = path
self.device = torch.device('cuda')
self.model = Model(self.device)
self._load_model()
def train_frames(self):
if not self._loss_trained:
raise os.error('Loss has not been trained yet (call ModelProcessor.train_loss())')
for x, y, _ in ModelProcessor._train_frames_iter(300000, 1):
epoch = self._epoch
loss = self.model.train_frame(x, y)
print(f'{datetime.datetime.now()} train_frame epoch {epoch} loss={loss}')
self._epoch = self._epoch + 1
if (epoch % 500) == 0:
self._checkpoints[epoch] = {
'epoch': epoch,
'loss': loss
}
self.model.save(self._path(f'ckpt-{epoch}.pt'))
self._save_model()
self._process_sample_images()
def train_loss(self):
if self._loss_trained:
raise os.error('Loss has already been trained on this model')
for x, y, epoch in ModelProcessor._train_loss_iter(400, 4):
loss = self.model.train_loss(x, y)
print(f'{datetime.datetime.now()} train_loss epoch {epoch} loss={loss}')
self._loss_trained = True
self._checkpoints[1] = {
'epoch': 1,
'loss': None
}
self.model.save(self._path(f'ckpt-1.pt'))
self._save_model()
def _load_model(self):
if not os.path.exists(self.path):
os.mkdir(self.path)
if not os.path.exists(self._path('index')):
self._loss_trained = False
self._epoch = 1
self._checkpoints = { }
self._save_model()
else:
with open(self._path('index'), 'rb') as f:
mdata = pickle.load(f)
self._loss_trained = mdata['loss_trained']
self._epoch = mdata['epoch']
self._checkpoints = mdata['checkpoints']
if len(self._checkpoints) > 0:
latest_checkpoint = max(self._checkpoints)
ckpt_path = self._path(f'ckpt-{latest_checkpoint}.pt')
if os.path.exists(ckpt_path):
self.model.load(ckpt_path)
else:
self.model.load(self._path('ckpt-1.pt'))
self._epoch = 1
self._checkpoints = {
1: {'epoch': 1, 'loss': None}
}
def _save_model(self):
with open(self._path('index'), 'wb') as f:
mdata = {
'loss_trained': self._loss_trained,
'epoch': self._epoch,
'checkpoints': self._checkpoints
}
pickle.dump(mdata, f)
def _path(self, *paths):
return os.path.join(self.path, *paths)
def _train_frames_iter(num_batches, batch_size):
def _train_frames_iter_singles():
for i in range(0, batch_size * num_batches):
td = TrainData.get_random()
x = td.get_train_image()
y = td.get_next_train_image()
yield (x, y, i)
xs = []
ys = []
for x, y, i in _train_frames_iter_singles():
xs.append(x[0])
ys.append(y[0])
if len(xs) >= batch_size:
epoch = int((i + 1) / batch_size)
yield (np.array(xs), np.array(ys), epoch)
xs = []
ys = []
def _train_loss_iter(num_batches, batch_size):
def _train_loss_iter_singles():
for i in range(0, batch_size * num_batches):
g = random.randint(0, 1)
if g == 0:
x = TrainData.get_random().get_train_image()
y = 0
else:
x = get_loss_train_data()
y = 1
yield (x, y, i)
xs = []
ys = []
for x, y, i in _train_loss_iter_singles():
xs.append(x[0])
ys.append(y)
if len(xs) >= batch_size:
epoch = int((i + 1) / batch_size)
yield (np.array(xs), np.array(ys), epoch)
xs = []
ys = []
def _process_sample_images(self):
""" Processes images in the '.data/model_sample_inputs' directory through the model, each with 5 samples """
model = self.model
epoch = self._epoch
for img in os.listdir('.data/model_sample_inputs'):
sample_outputs = self._path('sample_outputs')
if not os.path.exists(sample_outputs):
os.mkdir(sample_outputs)
out_dir = self._path(f'sample_outputs', img)
if not os.path.exists(out_dir):
os.mkdir(out_dir)
print(f'process sample {img}')
try:
x = Image.open(f'.data/model_sample_inputs/{img}')
x.load()
x.save(f'{out_dir}/{epoch}-0.png')
x = TrainData.preprocess_pil_image(x)
max_iters = 4
for i in range(1, max_iters + 1):
x = model.get_frame(x)
y = TrainData.postprocess_pil_image(x)
y.save(f'{out_dir}/{epoch}-{i}.png')
y.close()
print(f'process sample {img} completed {i}/{max_iters}')
except Exception as e:
print(f'exception processing sample {img} {e}')
pass | 31.605405 | 116 | 0.507611 | 691 | 5,847 | 4.068017 | 0.180897 | 0.039843 | 0.032017 | 0.015653 | 0.334045 | 0.245464 | 0.245464 | 0.165777 | 0.118819 | 0.097474 | 0 | 0.011599 | 0.380708 | 5,847 | 185 | 117 | 31.605405 | 0.764706 | 0.017103 | 0 | 0.208333 | 0 | 0 | 0.110279 | 0.036411 | 0 | 0 | 0 | 0 | 0 | 1 | 0.076389 | false | 0.006944 | 0.069444 | 0.006944 | 0.159722 | 0.034722 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
51be5f0b52819e31e6e93a3882ca71e420ce2121 | 573 | py | Python | python/Python-Quick-Start/generator_func.py | pepincho/playground | 9202a3dab880ff789e5fb96b259c3e0c2503cb49 | [
"MIT"
] | null | null | null | python/Python-Quick-Start/generator_func.py | pepincho/playground | 9202a3dab880ff789e5fb96b259c3e0c2503cb49 | [
"MIT"
] | null | null | null | python/Python-Quick-Start/generator_func.py | pepincho/playground | 9202a3dab880ff789e5fb96b259c3e0c2503cb49 | [
"MIT"
] | null | null | null | # print all prime numbers in a range with a generator function in python
#that is an utility function
def isprime(n):
if n == 1:
return False
for x in range(2, n):
if n % x == 0:
return False
else:
return True
#generator function is used in the for loop as an iterator
#this function return an iterator object
def primes(n = 1):
while (True):
if isprime(n): yield n #yield makes tihs a generator
n += 1
#for loop use primes function as an iterator
for n in primes():
if n > 100: break
print(n)
| 23.875 | 72 | 0.624782 | 94 | 573 | 3.808511 | 0.457447 | 0.02514 | 0.022346 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.020101 | 0.30541 | 573 | 23 | 73 | 24.913043 | 0.879397 | 0.460733 | 0 | 0.133333 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.133333 | false | 0 | 0 | 0 | 0.333333 | 0.066667 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
51c02944976e6d03f939af067e9a4a01386ea663 | 8,577 | py | Python | pygalgen/generator/common/source_file_parsing/parser_discovery_and_init.py | Kulivox/PyGalGen | 816004bce50703737384e2fbdcfe43b61ce2f4dd | [
"MIT"
] | null | null | null | pygalgen/generator/common/source_file_parsing/parser_discovery_and_init.py | Kulivox/PyGalGen | 816004bce50703737384e2fbdcfe43b61ce2f4dd | [
"MIT"
] | null | null | null | pygalgen/generator/common/source_file_parsing/parser_discovery_and_init.py | Kulivox/PyGalGen | 816004bce50703737384e2fbdcfe43b61ce2f4dd | [
"MIT"
] | null | null | null | """
Module responsible for discovery of import statements importing Argument parser
and discovery of the statements initializing the parser itself
"""
import ast
import sys
from typing import Tuple, Optional, Any, Set, List
from .parsing_exceptions import ArgParseImportNotFound, ArgParserNotUsed
from .parsing_commons import Discovery
ARGPARSE_MODULE_NAME = "argparse"
ARGUMENT_PARSER_CLASS_NAME = "ArgumentParser"
class ImportDiscovery(Discovery):
"""
Class responsible for discovery and extraction of import statements
"""
def __init__(self, actions: List[ast.AST]):
super(ImportDiscovery, self).__init__(actions)
self.argparse_module_alias: Optional[str] = None
self.argument_parser_alias: Optional[str] = None
def visit_Import(self, node: ast.Import) -> Any:
for item in node.names:
if item.name == ARGPARSE_MODULE_NAME:
alias = item.asname if item.asname is not None \
else ARGPARSE_MODULE_NAME
self.argparse_module_alias = alias
self.actions.append(node)
return
# stdlib modules should be also imported during this step
if item.name in sys.stdlib_module_names:
self.actions.append(node)
def visit_ImportFrom(self, node: ast.ImportFrom) -> Any:
if node.module is None:
return
for name in node.module.split("."):
if name in sys.stdlib_module_names and name != \
ARGPARSE_MODULE_NAME:
self.actions.append(node)
return
if ARGPARSE_MODULE_NAME not in node.module:
return
for item in node.names:
if item.name == ARGUMENT_PARSER_CLASS_NAME:
alias = item.asname if item.asname is not None \
else ARGUMENT_PARSER_CLASS_NAME
self.argument_parser_alias = alias
self.actions.append(node)
return
# stdlib modules should be also imported during this step
def report_findings(self) -> Tuple:
if self.argparse_module_alias is None and \
self.argument_parser_alias is None:
raise ArgParseImportNotFound
return (self.actions, self.argparse_module_alias,
self.argument_parser_alias)
class ParserDiscovery(Discovery):
"""
Class responsible for discovery of ArgumentParser creation and assignment
"""
class ParserRenameFinder(ast.NodeVisitor):
def __init__(self, func_name: str):
self.func_name = func_name
self.arg_pos: Optional[int] = None
self.keyword = Optional[str] = None
def find_by_argument_pos(self, tree: ast.AST, n: int):
self.arg_pos = n
self.keyword = None
self.visit(tree)
def __init__(self, actions: List[ast.AST], argparse_alias: Optional[str],
argument_parser_alias: Optional[str]):
self.argument_parser_alias = argument_parser_alias
self.argparse_module_alias = argparse_alias
self.main_parser_name: Optional[str] = None
super(ParserDiscovery, self).__init__(actions)
# checks whether this assignment creates argument parser,
# and removes any arguments from the constructor,
# because they should not be needed
def is_this_argparse(self, node: ast.Assign) -> \
Tuple[bool, Optional[str]]:
if not (len(node.targets) == 1 and
isinstance(node.targets[0], ast.Name)):
return False, None
name = node.targets[0].id
# ArgumentParser was imported using from ... import
if (isinstance(node.value, ast.Call) and
isinstance(node.value.func, ast.Name) and
node.value.func.id == self.argument_parser_alias):
node.value.keywords = []
node.value.args = []
return True, name
# ArgumentParser is created using attribute call on imported module
if (isinstance(node.value, ast.Call) and
isinstance(node.value.func, ast.Attribute) and
node.value.func.attr == ARGUMENT_PARSER_CLASS_NAME and
node.value.func.value.id == self.argparse_module_alias):
node.value.args = []
node.value.keywords = []
return True, name
return False, None
def visit_Assign(self, node: ast.Assign):
# visit into children of this node is not necessary
is_argparse, name = self.is_this_argparse(node)
if is_argparse:
self.main_parser_name = name
self.actions.append(node)
def report_findings(self) -> Tuple:
if self.main_parser_name is None:
raise ArgParserNotUsed
return self.actions, self.main_parser_name
# this visitor class goes through the tree and tries to find creation of
# all argument groups
# it works only if the group is assigned a name
# (is created as a normal variable)
class GroupDiscovery(Discovery):
"""
Class responsible for discovery of statements that initialize argument
groups
"""
def __init__(self, actions: List[ast.AST], main_name: str):
self.main_name = main_name
self.groups = set()
super(GroupDiscovery, self).__init__(actions)
@staticmethod
def is_this_group_creation(node: ast.Assign):
if not (len(node.targets) == 1 and
isinstance(node.targets[0], ast.Name)):
return False, None
name = node.targets[0].id
if not (isinstance(node.value, ast.Call) and
isinstance(node.value.func, ast.Attribute) and
node.value.func.attr == "add_argument_group"):
return False, None
return True, name
def visit_Assign(self, node: ast.Assign):
is_group_creation, name = self.is_this_group_creation(node)
if is_group_creation:
self.groups.add(name)
self.actions.append(node)
def report_findings(self) -> Tuple:
return self.actions, self.main_name, self.groups
# # this visitor goes through all calls and extracts those to argument
# parser and groups. IMPORTANT! it also renames parsers on which those calls
# are called to ensure everything can be interpreted correctly
class ArgumentCreationDiscovery(Discovery):
"""
Class responsible for extraction of statements which initialize the input
arguments. It is able to extract function calls on the original parser,
and on the argument groups extracted by GroupDiscovery
"""
def __init__(self, actions: List[ast.AST], main_name: str,
groups: Set[str]):
self.main_name = main_name
self.sections = groups
super(ArgumentCreationDiscovery, self).__init__(actions)
def is_call_on_parser_or_group(self, node: ast.Call):
return isinstance(node.func, ast.Attribute) and \
node.func.attr == "add_argument" and \
(node.func.value.id in self.sections or
node.func.value.id ==self.main_name)
def visit_Call(self, node: ast.Call) -> Any:
if self.is_call_on_parser_or_group(node):
assert isinstance(node.func, ast.Attribute)
# name of the variable needs to be rewritten,
# because we want to use only one parser
if node.func.value.id != self.main_name and \
node.func.value.id not in self.sections:
node.func.value.id = self.main_name
self.actions.append(ast.Expr(node))
self.generic_visit(node)
def report_findings(self) -> Tuple:
return self.actions, self.main_name, self.sections
def get_parser_init_and_actions(source: ast.Module) -> \
Tuple[List[ast.AST], str, Set[str]]:
"""
Function used to extract necessary imports, parser and argument creation
function calls
Parameters
----------
source : ast.Module
source file parsed into ATT
Returns
-------
List of extracted AST nodes, the main name of the parser and a set of
section names
"""
discovery_classes = [ImportDiscovery, ParserDiscovery,
GroupDiscovery, ArgumentCreationDiscovery]
findings = [],
for cls in discovery_classes:
discovery = cls(*findings)
discovery.visit(source)
findings = discovery.report_findings()
actions, main_name, sections = findings
return actions, main_name, sections
| 35.589212 | 79 | 0.641483 | 1,046 | 8,577 | 5.09847 | 0.17304 | 0.039377 | 0.028502 | 0.025877 | 0.350834 | 0.270954 | 0.238702 | 0.184699 | 0.174198 | 0.174198 | 0 | 0.00097 | 0.279002 | 8,577 | 240 | 80 | 35.7375 | 0.861417 | 0.198205 | 0 | 0.303448 | 0 | 0 | 0.007876 | 0 | 0 | 0 | 0 | 0 | 0.006897 | 1 | 0.131034 | false | 0 | 0.075862 | 0.02069 | 0.365517 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
51c0e5193ac5162e3a0fb7638cb34cf6d76fc644 | 358 | py | Python | cpovc_pfs/pmtct/urls.py | uonafya/cpims-ovc-3.0 | ec2768c00fc0855eb4983a94204cfcdee0824e19 | [
"Apache-2.0"
] | 2 | 2022-02-26T14:04:40.000Z | 2022-03-23T17:33:32.000Z | cpovc_pfs/pmtct/urls.py | uonafya/cpims-ovc-3.0 | ec2768c00fc0855eb4983a94204cfcdee0824e19 | [
"Apache-2.0"
] | null | null | null | cpovc_pfs/pmtct/urls.py | uonafya/cpims-ovc-3.0 | ec2768c00fc0855eb4983a94204cfcdee0824e19 | [
"Apache-2.0"
] | 19 | 2022-02-26T13:44:58.000Z | 2022-03-26T17:20:22.000Z | from django.urls import path
from . import views
# This should contain urls related to OVC ONLY
urlpatterns = [
path('', views.pmtct_home, name='pmtct_home'),
path('new/<int:id>/', views.new_pmtct, name='new_pmtct'),
path('view/<int:id>/', views.view_pmtct, name='view_pmtct'),
path('edit/<int:id>/', views.edit_pmtct, name='edit_pmtct'),
]
| 32.545455 | 64 | 0.678771 | 54 | 358 | 4.351852 | 0.407407 | 0.06383 | 0.12766 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.139665 | 358 | 10 | 65 | 35.8 | 0.762987 | 0.122905 | 0 | 0 | 0 | 0 | 0.25641 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.25 | 0 | 0.25 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
51c49423dfa19179ff9a8960299203c4be600c6e | 311 | py | Python | delay.py | fleidloff/effect-pedal | 20680294e70979ec230ec2798c836a6447c49853 | [
"MIT"
] | null | null | null | delay.py | fleidloff/effect-pedal | 20680294e70979ec230ec2798c836a6447c49853 | [
"MIT"
] | null | null | null | delay.py | fleidloff/effect-pedal | 20680294e70979ec230ec2798c836a6447c49853 | [
"MIT"
] | null | null | null | import pyo
from settings import audioSource
s = pyo.Server(audio=audioSource, nchnls=1).boot()
s.start()
a = pyo.Input(chnl=0).out()
delay = pyo.Delay(a, delay=.5, feedback=.5)
delay.out()
while True:
s = raw_input('Delay');
if s == "q":
quit()
delay.setDelay(float(s))
#s.gui(locals()) | 17.277778 | 50 | 0.630225 | 49 | 311 | 3.979592 | 0.612245 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.015686 | 0.180064 | 311 | 18 | 51 | 17.277778 | 0.74902 | 0.048232 | 0 | 0 | 0 | 0 | 0.02027 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.166667 | 0 | 0.166667 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
51c64365386b4968c448213772fdf24be5c8b7b8 | 297 | py | Python | djangoPharma/drugs/validators.py | thodoris/djangoPharma | 76089e67bc9940651a876d078879469127f5ac66 | [
"Apache-2.0"
] | null | null | null | djangoPharma/drugs/validators.py | thodoris/djangoPharma | 76089e67bc9940651a876d078879469127f5ac66 | [
"Apache-2.0"
] | null | null | null | djangoPharma/drugs/validators.py | thodoris/djangoPharma | 76089e67bc9940651a876d078879469127f5ac66 | [
"Apache-2.0"
] | null | null | null | from django.core.exceptions import ValidationError
from django.utils.translation import ugettext_lazy as _
def validate_integer(value):
if type(value) is not int:
raise ValidationError(
_('%(value)s is not an even number'),
params={'value': value},
)
| 27 | 55 | 0.659933 | 36 | 297 | 5.333333 | 0.722222 | 0.104167 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.249158 | 297 | 10 | 56 | 29.7 | 0.860987 | 0 | 0 | 0 | 0 | 0 | 0.121212 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.125 | false | 0 | 0.25 | 0 | 0.375 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
51c80e982e0d954d47d04ce8e0ca20615f304653 | 6,568 | py | Python | Reto1/unet_training.py | Hackaton-JusticIA-2021/pista-latente-ML-sol | 3aeeae5970539c0b17358e4ac8585b13c9cea07b | [
"MIT"
] | null | null | null | Reto1/unet_training.py | Hackaton-JusticIA-2021/pista-latente-ML-sol | 3aeeae5970539c0b17358e4ac8585b13c9cea07b | [
"MIT"
] | null | null | null | Reto1/unet_training.py | Hackaton-JusticIA-2021/pista-latente-ML-sol | 3aeeae5970539c0b17358e4ac8585b13c9cea07b | [
"MIT"
] | 1 | 2021-08-22T02:38:38.000Z | 2021-08-22T02:38:38.000Z | import numpy as np
import cv2
import os
import keras
import tensorflow as tf
import random
import matplotlib.pyplot as plt
from glob import glob
from keras import layers
from keras.backend.tensorflow_backend import set_session
from tensorflow.python.client import device_lib
input_dir_1 = "unet/images/"
target_dir_1 = "unet/target/"
input_dir_2= "data/images/"
target_dir_2 = "data/target/"
img_size = (32, 32)
num_classes = 2
batch_size = 32
input_img_paths_1 = sorted(glob(os.path.join(input_dir_1, '*' + '.png')))
target_img_paths_1 = sorted(glob(os.path.join(target_dir_1, '*' + '.png')))
input_img_paths_2 = sorted(glob(os.path.join(input_dir_2, '*' + '.png')))
target_img_paths_2 = sorted(glob(os.path.join(target_dir_2, '*' + '.png')))
input_img_paths = input_img_paths_1 + input_img_paths_2
target_img_paths = target_img_paths_1 + target_img_paths_2
print("Number of samples:", len(input_img_paths))
for input_path, target_path in zip(input_img_paths[:10], target_img_paths[:10]):
print(input_path, "|", target_path)
class Patches(keras.utils.Sequence):
"""Helper to iterate over the data (as Numpy arrays)."""
def __init__(self, batch_size, img_size, input_img_paths, target_img_paths):
self.batch_size = batch_size
self.img_size = img_size
self.input_img_paths = input_img_paths
self.target_img_paths = target_img_paths
self.current_batch = 0
def __len__(self):
return len(self.target_img_paths) // self.batch_size
def __getitem__(self, idx):
"""Returns tuple (input, target) correspond to batch #idx."""
#print(idx)
i = idx * self.batch_size
if i == 0:
data_zip_list = list(zip(self.input_img_paths, self.target_img_paths))
random.shuffle(data_zip_list)
self.input_img_paths, self.target_img_paths = zip(*data_zip_list)
batch_input_img_paths = self.input_img_paths[i : i + self.batch_size]
batch_target_img_paths = self.target_img_paths[i : i + self.batch_size]
x = np.zeros((self.batch_size,) + self.img_size + (3,), dtype="float32")
for j, path in enumerate(batch_input_img_paths):
img = cv2.imread(path, cv2.IMREAD_COLOR)
n = np.random.randint(0, 3)
if n == 0:
img = cv2.blur(img, (3, 3)) / 255.
elif n == 1:
img = cv2.blur(img, (5, 5)) / 255.
else:
img = img / 255.
x[j] = img
y = np.zeros((self.batch_size,) + self.img_size + (1,), dtype="float32")
for j, path in enumerate(batch_target_img_paths):
img = cv2.imread(path, cv2.IMREAD_GRAYSCALE) * 1.
y[j] = np.expand_dims(img, 2)
return x, y
def get_model(img_size, num_classes):
inputs = keras.Input(shape=img_size)
### [First half of the network: downsampling inputs] ###
# Entry block
x = layers.Conv2D(32, 3, strides=2, padding="same")(inputs)
x = layers.BatchNormalization()(x)
x = layers.Activation("relu")(x)
previous_block_activation = x # Set aside residual
# Blocks 1, 2, 3 are identical apart from the feature depth.
for filters in [64, 128, 256]:
x = layers.Activation("relu")(x)
x = layers.SeparableConv2D(filters, 3, padding="same")(x)
x = layers.BatchNormalization()(x)
x = layers.Activation("relu")(x)
x = layers.SeparableConv2D(filters, 3, padding="same")(x)
x = layers.BatchNormalization()(x)
x = layers.MaxPooling2D(3, strides=2, padding="same")(x)
# Project residual
residual = layers.Conv2D(filters, 1, strides=2, padding="same")(previous_block_activation)
x = layers.add([x, residual]) # Add back residual
previous_block_activation = x # Set aside next residual
### [Second half of the network: upsampling inputs] ###
for filters in [256, 128, 64, 32]:
x = layers.Activation("relu")(x)
x = layers.Conv2DTranspose(filters, 3, padding="same")(x)
x = layers.BatchNormalization()(x)
x = layers.Activation("relu")(x)
x = layers.Conv2DTranspose(filters, 3, padding="same")(x)
x = layers.BatchNormalization()(x)
x = layers.UpSampling2D(2)(x)
# Project residual
residual = layers.UpSampling2D(2)(previous_block_activation)
residual = layers.Conv2D(filters, 1, padding="same")(residual)
x = layers.add([x, residual]) # Add back residual
previous_block_activation = x # Set aside next residual
# Add a per-pixel classification layer
outputs = layers.Conv2D(num_classes, 3, activation="sigmoid", padding="same")(x)
# Define the model
model = keras.Model(inputs, outputs)
return model
tf_config = tf.ConfigProto(device_count = {'GPU': 0})
tf_config.gpu_options.per_process_gpu_memory_fraction = 0.7
tf_config.gpu_options.visible_device_list = "0"
set_session(tf.Session(config=tf_config))
# Free up RAM in case the model definition cells were run multiple times
#keras.backend.clear_session()
# Build model
model = get_model((32, 32, 3), 1)
#model.load_weights('oxford_segmentation.h5')
model.summary()
# Split our img paths into a training and a validation set
val_samples = int(0.2*len(input_img_paths))
data_zip_list = list(zip(input_img_paths, target_img_paths))
random.shuffle(data_zip_list)
input_img_paths, target_img_paths = zip(*data_zip_list)
train_input_img_paths = input_img_paths[:-val_samples]
train_target_img_paths = target_img_paths[:-val_samples]
val_input_img_paths = input_img_paths[-val_samples:]
val_target_img_paths = target_img_paths[-val_samples:]
# Instantiate data Sequences for each split
train_gen = Patches(batch_size, img_size, train_input_img_paths, train_target_img_paths)
val_gen = Patches(batch_size, img_size, val_input_img_paths, val_target_img_paths)
# Configure the model for training.
# We use the "sparse" version of categorical_crossentropy
# because our target data is integers.
opt = keras.optimizers.SGD()
model.compile(optimizer="SGD", loss="binary_crossentropy")
callbacks = [keras.callbacks.ModelCheckpoint("oxford_segmentation.h5", save_best_only=True)]
# Train the model, doing validation at the end of each epoch.
epochs = 10
hist = model.fit_generator(train_gen, epochs=epochs, validation_data=val_gen, callbacks=callbacks)
fig = plt.figure()
plt.plot(hist.history['loss'], label = 'Training value', color = 'darkslategray')
plt.plot(hist.history['val_loss'], label = 'Validation value', color = 'darkslategray', linestyle = '--')
plt.xlabel('Epochs')
plt.ylabel('Loss')
plt.legend()
plt.savefig('loss.pdf')
plt.close(fig) | 37.531429 | 105 | 0.701279 | 966 | 6,568 | 4.52381 | 0.238095 | 0.087872 | 0.071396 | 0.027231 | 0.414416 | 0.367735 | 0.303661 | 0.26865 | 0.132265 | 0.121281 | 0 | 0.023071 | 0.175091 | 6,568 | 175 | 106 | 37.531429 | 0.783499 | 0.140073 | 0 | 0.173554 | 0 | 0 | 0.053524 | 0.003925 | 0 | 0 | 0 | 0 | 0 | 1 | 0.033058 | false | 0 | 0.090909 | 0.008264 | 0.157025 | 0.016529 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
51c897010d686c63214412a25e6bca01df90e345 | 343 | py | Python | wangdai/spiders/zj_sprider.py | jiaoshenmene/wangdai | 82090948602bc756048b4655b41a8a342e58a03e | [
"MIT"
] | null | null | null | wangdai/spiders/zj_sprider.py | jiaoshenmene/wangdai | 82090948602bc756048b4655b41a8a342e58a03e | [
"MIT"
] | null | null | null | wangdai/spiders/zj_sprider.py | jiaoshenmene/wangdai | 82090948602bc756048b4655b41a8a342e58a03e | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
import scrapy
class Sprider(scrapy.Spider):
name = "zj"
start_urls = [
'https://www.wdzj.com/pingji.html'
]
def parse(self , response):
for quote in response.css('div.tb-platname'):
yield {
'name': quote.css('a::text').extract_first(),
} | 19.055556 | 62 | 0.51312 | 39 | 343 | 4.461538 | 0.871795 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.00431 | 0.323615 | 343 | 18 | 63 | 19.055556 | 0.74569 | 0.061224 | 0 | 0 | 0 | 0 | 0.186916 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.090909 | false | 0 | 0.090909 | 0 | 0.454545 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
51c925099b64da573af34c5499717de76a3fec2e | 706 | py | Python | asterisk/forms.py | ahmednamoha/astroapp | 10ff7d2fa92ce430ce39a036c501f64429ddcec7 | [
"MIT"
] | null | null | null | asterisk/forms.py | ahmednamoha/astroapp | 10ff7d2fa92ce430ce39a036c501f64429ddcec7 | [
"MIT"
] | null | null | null | asterisk/forms.py | ahmednamoha/astroapp | 10ff7d2fa92ce430ce39a036c501f64429ddcec7 | [
"MIT"
] | null | null | null | from django.db import models
from django import forms
from django.forms import ModelForm, TextInput, FileField, NumberInput
from .models import Extentions, Queue
class ExtentionsForm(ModelForm):
class Meta:
model = Extentions
fields = ['exten', 'file']
widgets = {'exten': NumberInput(
attrs={'class': 'form-control', 'placeholder': 'Short code'})}
class QueueForm(ModelForm):
class Meta:
model = Queue
fields = ['name', 'optin', 'exten']
widgets = {'optin': NumberInput(
attrs={'class': 'form-control', 'placeholder': '1'}), 'name': TextInput(
attrs={'class': 'form-control', 'placeholder': 'queue name'})}
| 29.416667 | 84 | 0.620397 | 71 | 706 | 6.169014 | 0.422535 | 0.068493 | 0.09589 | 0.143836 | 0.269406 | 0.196347 | 0 | 0 | 0 | 0 | 0 | 0.001848 | 0.233711 | 706 | 23 | 85 | 30.695652 | 0.807763 | 0 | 0 | 0.117647 | 0 | 0 | 0.201133 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.235294 | 0 | 0.470588 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
51c93dd8819a23b3597d92e0d07d3b2369c52da0 | 1,983 | py | Python | admin-portal/therapy/models.py | oakbani/ksdp-portal | 8f44b3cb0081a7f31b9c8121883dd51945a05520 | [
"MIT"
] | null | null | null | admin-portal/therapy/models.py | oakbani/ksdp-portal | 8f44b3cb0081a7f31b9c8121883dd51945a05520 | [
"MIT"
] | null | null | null | admin-portal/therapy/models.py | oakbani/ksdp-portal | 8f44b3cb0081a7f31b9c8121883dd51945a05520 | [
"MIT"
] | 1 | 2021-09-19T10:58:17.000Z | 2021-09-19T10:58:17.000Z | from django.db import models
from clients.models import Client
# Create your models here.
class TherapyCenter(models.Model):
title = models.CharField(max_length=30)
location = models.CharField(max_length=30)
phone_no = models.CharField(max_length=15)
def __str__(self):
return self.title
class Therapist(models.Model):
name = models.CharField(max_length=30)
contact = models.CharField(max_length=15)
OT = models.IntegerField(choices=((1, "Yes"), (2, "No")))
PT = models.IntegerField(choices=((1, "Yes"), (2, "No")))
ST = models.IntegerField(choices=((1, "Yes"), (2, "No")))
def __str__(self):
return self.name
days = (
(1, "Monday"),
(2, "Tuesday"),
(3, "Wednesday"),
(4, "Thursday"),
(5, "Friday"),
(6, "Saturday"),
(7, "Sunday"),
)
class TherapistSchedule(models.Model):
therapist = models.ForeignKey(Therapist, on_delete=models.CASCADE)
day = models.IntegerField(choices=days)
start_time = models.TimeField()
end_time = models.TimeField()
therapy_center = models.ForeignKey(TherapyCenter, on_delete=models.CASCADE)
def __str__(self):
return f"{self.therapist}: {days[self.day-1][1]} ({self.start_time}-{self.end_time}) at {self.therapy_center}"
class TherapySlot(models.Model):
title = models.CharField(null=True, blank=True, max_length=30)
date = models.DateField()
start_time = models.TimeField()
end_time = models.TimeField()
therapist = models.ForeignKey(Therapist, on_delete=models.CASCADE)
therapy_type = models.IntegerField(
choices=((1, "OT"), (2, "PT"), (3, "ST")), null=True, blank=True
)
client = models.ForeignKey(Client, on_delete=models.CASCADE, null=True, blank=True)
status = models.IntegerField(choices=((1, "Available"), (2, "Booked")), default=1)
def __str__(self):
return f"Therapist: {self.therapist}, Client: {self.client}, {self.date} ({self.start_time}-{self.end_time})"
| 32.508197 | 118 | 0.665154 | 249 | 1,983 | 5.144578 | 0.301205 | 0.070258 | 0.117096 | 0.093677 | 0.466042 | 0.270101 | 0.232631 | 0.157689 | 0 | 0 | 0 | 0.02011 | 0.172466 | 1,983 | 60 | 119 | 33.05 | 0.760512 | 0.012103 | 0 | 0.217391 | 0 | 0.043478 | 0.145631 | 0.05723 | 0 | 0 | 0 | 0 | 0 | 1 | 0.086957 | false | 0 | 0.043478 | 0.086957 | 0.76087 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
51ceefe54c4a73ce82c7712450c6f6534c6876d4 | 3,501 | py | Python | service/handlers/my_handler.py | ran-isenberg/aws-lambda-handler-cookbook | adfe58dacd87315151265818869bb842c7eb4971 | [
"MIT"
] | 61 | 2022-02-07T05:21:14.000Z | 2022-03-27T14:11:30.000Z | service/handlers/my_handler.py | ran-isenberg/aws-lambda-handler-cookbook | adfe58dacd87315151265818869bb842c7eb4971 | [
"MIT"
] | 17 | 2022-02-26T05:25:31.000Z | 2022-03-16T20:02:46.000Z | service/handlers/my_handler.py | ran-isenberg/aws-lambda-handler-cookbook | adfe58dacd87315151265818869bb842c7eb4971 | [
"MIT"
] | 4 | 2022-02-17T16:35:27.000Z | 2022-03-07T03:13:07.000Z | from http import HTTPStatus
from typing import Any, Dict
from aws_lambda_powertools.metrics.metrics import MetricUnit
from aws_lambda_powertools.utilities.feature_flags.exceptions import ConfigurationStoreError, SchemaValidationError
from aws_lambda_powertools.utilities.parser import ValidationError, parse
from aws_lambda_powertools.utilities.parser.envelopes import ApiGatewayEnvelope
from aws_lambda_powertools.utilities.typing import LambdaContext
from service.handlers.schemas.dynamic_configuration import FeatureFlagsNames, MyConfiguration
from service.handlers.schemas.env_vars import MyHandlerEnvVars
from service.handlers.schemas.input import Input
from service.handlers.schemas.output import Output
from service.handlers.utils.dynamic_configuration import get_dynamic_configuration_store, parse_configuration
from service.handlers.utils.env_vars_parser import get_environment_variables, init_environment_variables
from service.handlers.utils.http_responses import build_response
from service.handlers.utils.observability import logger, metrics, tracer
@tracer.capture_method(capture_response=False)
def inner_function_example(my_name: str, order_item_count: int) -> Output:
# process input, etc. return output
config_store = get_dynamic_configuration_store()
campaign: bool = config_store.evaluate(
name=FeatureFlagsNames.TEN_PERCENT_CAMPAIGN.value,
context={},
default=False,
)
logger.debug('campaign feature flag value', extra={'campaign': campaign})
premium: bool = config_store.evaluate(
name=FeatureFlagsNames.PREMIUM.value,
context={'customer_name': my_name},
default=False,
)
logger.debug('premium feature flag value', extra={'premium': premium})
return Output(success=True, order_item_count=order_item_count)
@init_environment_variables(model=MyHandlerEnvVars)
@metrics.log_metrics
@tracer.capture_lambda_handler(capture_response=False)
def my_handler(event: Dict[str, Any], context: LambdaContext) -> Dict[str, Any]:
logger.set_correlation_id(context.aws_request_id)
logger.info('my_handler is called, calling inner_function_example')
env_vars: MyHandlerEnvVars = get_environment_variables(model=MyHandlerEnvVars)
logger.debug('environment variables', extra=env_vars.dict())
try:
my_configuration: MyConfiguration = parse_configuration(model=MyConfiguration)
logger.debug('fetched dynamic configuration', extra={'configuration': my_configuration.dict()})
except (SchemaValidationError, ConfigurationStoreError) as exc:
logger.exception(f'dynamic configuration error, error={str(exc)}')
return build_response(http_status=HTTPStatus.INTERNAL_SERVER_ERROR, body={})
try:
# we want to extract and parse the HTTP body from the api gw envelope
input: Input = parse(event=event, model=Input, envelope=ApiGatewayEnvelope)
logger.info('got create request', extra={'order_item_count': input.order_item_count})
except (ValidationError, TypeError) as exc:
logger.error('event failed input validation', extra={'error': str(exc)})
return build_response(http_status=HTTPStatus.BAD_REQUEST, body={})
response: Output = inner_function_example(input.my_name, input.order_item_count)
logger.info('inner_function_example finished successfully')
metrics.add_metric(name='ValidEvents', unit=MetricUnit.Count, value=1)
return build_response(http_status=HTTPStatus.OK, body=response.dict())
| 51.485294 | 115 | 0.790346 | 420 | 3,501 | 6.37619 | 0.304762 | 0.03286 | 0.056759 | 0.042942 | 0.137043 | 0.113144 | 0.037341 | 0.037341 | 0.037341 | 0 | 0 | 0.000326 | 0.123679 | 3,501 | 67 | 116 | 52.253731 | 0.872555 | 0.028849 | 0 | 0.072727 | 0 | 0 | 0.107153 | 0.012953 | 0 | 0 | 0 | 0 | 0 | 1 | 0.036364 | false | 0 | 0.272727 | 0 | 0.381818 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
51cf936521268672d39eaa8e60b4fea15504c4c2 | 327 | py | Python | faktura/breadcrumbs.py | Tethik/faktura | a2ffa7d93d9b4afbaafe02e5ae65c5e3541fd969 | [
"MIT"
] | null | null | null | faktura/breadcrumbs.py | Tethik/faktura | a2ffa7d93d9b4afbaafe02e5ae65c5e3541fd969 | [
"MIT"
] | 1 | 2016-02-16T10:06:34.000Z | 2016-02-16T10:06:34.000Z | faktura/breadcrumbs.py | Tethik/faktura | a2ffa7d93d9b4afbaafe02e5ae65c5e3541fd969 | [
"MIT"
] | null | null | null | class Breadcrumb:
def __init__(self, url, text):
self.url = url
self.text = text
url_dict = {
'Main Menu': '/',
'Invoices': '/invoices',
'Customers': '/customers',
'Settings': '/settings'
}
def breadcrumbs(*shortwords):
return [Breadcrumb(url_dict[word], word) for word in shortwords]
| 21.8 | 68 | 0.611621 | 36 | 327 | 5.388889 | 0.527778 | 0.072165 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.232416 | 327 | 14 | 69 | 23.357143 | 0.772908 | 0 | 0 | 0 | 0 | 0 | 0.192661 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.166667 | false | 0 | 0 | 0.083333 | 0.333333 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
51d3468a11499ea6c16dce5d6cf20348e89cbaf6 | 3,170 | py | Python | workon/contrib/tracking/models.py | dalou/django-workon | ef63c0a81c00ef560ed693e435cf3825f5170126 | [
"BSD-3-Clause"
] | null | null | null | workon/contrib/tracking/models.py | dalou/django-workon | ef63c0a81c00ef560ed693e435cf3825f5170126 | [
"BSD-3-Clause"
] | null | null | null | workon/contrib/tracking/models.py | dalou/django-workon | ef63c0a81c00ef560ed693e435cf3825f5170126 | [
"BSD-3-Clause"
] | null | null | null | from __future__ import unicode_literals
import uuid
try:
from django.contrib.contenttypes.fields import GenericForeignKey
except ImportError:
from django.contrib.contenttypes.generic import GenericForeignKey
from django.contrib.contenttypes.models import ContentType
from django.utils.translation import ugettext_lazy as _, pgettext_lazy
from django.db import models
# Used for object modifications
CREATE = 'CREATE'
UPDATE = 'UPDATE'
DELETE = 'DELETE'
# Used for m2m modifications
ADD = 'ADD'
REMOVE = 'REMOVE'
CLEAR = 'CLEAR'
class TrackingEvent(models.Model):
ACTIONS = (
(CREATE, _('Create')),
(UPDATE, _('Update')),
(DELETE, _('Delete')),
(ADD, _('Add')),
(REMOVE, pgettext_lazy('Remove from something', 'Remove')),
(CLEAR, _('Clear')),
)
id = models.UUIDField(primary_key=True, default=uuid.uuid4, editable=False)
date = models.DateTimeField(
_("Date"), auto_now_add=True, editable=False
)
action = models.CharField(
_('Action'), max_length=6, choices=ACTIONS, editable=False
)
object_content_type = models.ForeignKey(
ContentType,
related_name='workon_tracking_object_content_type',
editable=False
)
object_id = models.PositiveIntegerField(editable=False, null=True)
object = GenericForeignKey('object_content_type', 'object_id')
object_repr = models.CharField(
_("Object representation"),
help_text=_(
"Object representation, useful if the object is deleted later."
),
max_length=250,
editable=False
)
user_content_type = models.ForeignKey(
ContentType,
related_name='workon_tracking_user_content_type',
editable=False,
null=True,
)
user_id = models.PositiveIntegerField(editable=False, null=True)
user = GenericForeignKey('user_content_type', 'user_id')
user_repr = models.CharField(
_("User representation"),
help_text=_(
"User representation, useful if the user is deleted later."
),
max_length=250,
editable=False
)
class Meta:
db_table = "workon_tracking_tracking_event"
verbose_name = _('Tracking event')
verbose_name_plural = _('Tracking events')
ordering = ['-date']
class TrackedFieldModification(models.Model):
id = models.UUIDField(primary_key=True, default=uuid.uuid4, editable=False)
event = models.ForeignKey(
TrackingEvent, verbose_name=_("Event"), related_name='fields',
editable=False
)
field = models.CharField(_("Field"), max_length=40, editable=False)
old_value = models.TextField(
_("Old value"),
help_text=_("JSON serialized"),
null=True,
editable=False,
)
new_value = models.TextField(
_("New value"),
help_text=_("JSON serialized"),
null=True,
editable=False,
)
class Meta:
db_table = "workon_tracking_tracked_field_modification"
verbose_name = _('Tracking field modification')
verbose_name_plural = _('Tracking field modifications') | 28.053097 | 79 | 0.658991 | 329 | 3,170 | 6.091185 | 0.297872 | 0.090818 | 0.025449 | 0.043413 | 0.338822 | 0.324351 | 0.288423 | 0.239521 | 0.170659 | 0.05988 | 0 | 0.004948 | 0.235016 | 3,170 | 113 | 80 | 28.053097 | 0.821443 | 0.017666 | 0 | 0.266667 | 0 | 0 | 0.192159 | 0.044987 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.088889 | 0 | 0.322222 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
51d35292854e309612a05a0a9928f4f1a1103650 | 12,377 | py | Python | mister_ed/utils/checkpoints.py | jonasnm/geometric-certificates | 8730abaf2ab0c8972a2d40168d5fe64c8670fc62 | [
"MIT"
] | 40 | 2019-01-17T22:17:42.000Z | 2022-03-23T06:24:00.000Z | mister_ed/utils/checkpoints.py | Mortal12138/geometric-certificates | 8730abaf2ab0c8972a2d40168d5fe64c8670fc62 | [
"MIT"
] | 6 | 2019-08-03T08:49:21.000Z | 2022-03-11T23:43:56.000Z | mister_ed/utils/checkpoints.py | Mortal12138/geometric-certificates | 8730abaf2ab0c8972a2d40168d5fe64c8670fc62 | [
"MIT"
] | 4 | 2020-10-22T05:55:30.000Z | 2022-03-15T06:26:55.000Z | """ Code for saving/loading pytorch models and batches of adversarial images
CHECKPOINT NAMING CONVENTIONS:
<unique_experiment_name>.<architecture_abbreviation>.<6 digits of epoch number>.path
e.g.
fgsm_def.resnet32.20180301.120000.path
All checkpoints are stored in CHECKPOINT_DIR
Checkpoints are state dicts only!!!
"""
import torch
import math
import os
import re
import glob
import config
import numpy as np
import utils.pytorch_utils as utils
import random
CHECKPOINT_DIR = config.MODEL_PATH
OUTPUT_IMAGE_DIR = config.OUTPUT_IMAGE_PATH
##############################################################################
# #
# CHECKPOINTING MODELS #
# #
##############################################################################
def clear_experiment(experiment_name, architecture):
""" Deletes all saved state dicts for an experiment/architecture pair """
for filename in params_to_filename(experiment_name, architecture):
full_path = os.path.join(*[CHECKPOINT_DIR, filename])
os.remove(full_path) if os.path.exists(full_path) else None
def list_saved_epochs(experiment_name, architecture):
""" Returns a list of int epochs we've checkpointed for this
experiment name and architecture
"""
safe_int_cast = lambda s: int(s) if s.isdigit() else s
extract_epoch = lambda f: safe_int_cast(f.split('.')[-2])
filename_list = params_to_filename(experiment_name, architecture)
return [extract_epoch(f) for f in filename_list]
def params_to_filename(experiment_name, architecture, epoch_val=None):
""" Outputs string name of file.
ARGS:
experiment_name : string - name of experiment we're saving
architecture : string - abbreviation for model architecture
epoch_val : int/(intLo, intHi)/None -
- if int we return this int exactly
- if (intLo, intHi) we return all existing filenames with
highest epoch in range (intLo, intHi), in sorted order
- if None, we return all existing filenames with params
in ascending epoch-sorted order
RETURNS:
filenames: string or (possibly empty) string[] of just the base name
of saved models
"""
if isinstance(epoch_val, int):
return '.'.join([experiment_name, architecture, '%06d' % epoch_val,
'path'])
elif epoch_val == 'best':
return '.'.join([experiment_name, architecture, epoch_val,
'path'])
glob_prefix = os.path.join(*[CHECKPOINT_DIR,
'%s.%s.*' % (experiment_name, architecture)])
re_prefix = '%s\.%s\.' % (experiment_name, architecture)
re_suffix = r'\.path'
valid_name = lambda f: bool(re.match(re_prefix + r'(\d{6}|best)' +
re_suffix, f))
safe_int_cast = lambda s: int(s) if s.isdigit() else s
select_epoch = lambda f: safe_int_cast(re.sub(re_prefix, '',
re.sub(re_suffix, '', f)))
valid_epoch = lambda e: ((e == 'best') or
(e >= (epoch_val or (0, 0))[0] and
e <= (epoch_val or (0, float('inf')))[1]))
filename_epoch_pairs = []
best_filename = []
for full_path in glob.glob(glob_prefix):
filename = os.path.basename(full_path)
if not valid_name(filename):
continue
epoch = select_epoch(filename)
if valid_epoch(epoch):
if epoch != 'best':
filename_epoch_pairs.append((filename, epoch))
else:
best_filename.append(filename)
return best_filename +\
[_[0] for _ in sorted(filename_epoch_pairs, key=lambda el: el[1])]
def save_state_dict(experiment_name, architecture, epoch_val, model,
k_highest=10):
""" Saves the state dict of a model with the given parameters.
ARGS:
experiment_name : string - name of experiment we're saving
architecture : string - abbreviation for model architecture
epoch_val : int - which epoch we're saving
model : model - object we're saving the state dict of
k_higest : int - if not None, we make sure to not include more than
k state_dicts for (experiment_name, architecture) pair,
keeping the k-most recent if we overflow
RETURNS:
The model we saved
"""
# First resolve THIS filename
this_filename = params_to_filename(experiment_name, architecture, epoch_val)
# Next clear up memory if too many state dicts
current_filenames = [_ for _ in
params_to_filename(experiment_name, architecture)
if not _.endswith('.best.path')]
delete_els = []
if k_highest is not None:
num_to_delete = len(current_filenames) - k_highest + 1
if num_to_delete > 0:
delete_els = sorted(current_filenames)[:num_to_delete]
for delete_el in delete_els:
full_path = os.path.join(*[CHECKPOINT_DIR, delete_el])
os.remove(full_path) if os.path.exists(full_path) else None
# Finally save the state dict
torch.save(model.state_dict(), os.path.join(*[CHECKPOINT_DIR,
this_filename]))
return model
def load_state_dict_from_filename(filename, model):
""" Skips the whole parameter argument thing and just loads the whole
state dict from a filename.
ARGS:
filename : string - filename without directories
model : nn.Module - has 'load_state_dict' method
RETURNS:
the model loaded with the weights contained in the file
"""
assert len(glob.glob(os.path.join(*[CHECKPOINT_DIR, filename]))) == 1
# LOAD FILENAME
# If state_dict in keys, use that as the loader
right_dict = lambda d: d.get('state_dict', d)
model.load_state_dict(right_dict(torch.load(
os.path.join(*[CHECKPOINT_DIR, filename]))))
return model
def load_state_dict(experiment_name, architecture, epoch, model):
""" Loads a checkpoint that was previously saved
experiment_name : string - name of experiment we're saving
architecture : string - abbreviation for model architecture
epoch_val : int - which epoch we're loading
"""
filename = params_to_filename(experiment_name, architecture, epoch)
return load_state_dict_from_filename(filename, model)
###############################################################################
# #
# CHECKPOINTING DATA #
# #
###############################################################################
"""
This is a hacky fix to save batches of adversarial images along with their
labels.
"""
class CustomDataSaver(object):
# TODO: make this more pytorch compliant
def __init__(self, image_subdirectory):
self.image_subdirectory = image_subdirectory
# make this folder if it doesn't exist yet
def save_minibatch(self, examples, labels):
""" Assigns a random name to this minibatch and saves the examples and
labels in two separate files:
<random_name>.examples.npy and <random_name>.labels.npy
ARGS:
examples: Variable or Tensor (NxCxHxW) - examples to be saved
labels : Variable or Tensor (N) - labels matching the examples
"""
# First make both examples and labels into numpy arrays
examples = examples.cpu().numpy()
labels = labels.cpu().numpy()
# Make a name for the files
random_string = str(random.random())[2:] # DO THIS BETTER WHEN I HAVE INTERNET
# Save both files
example_file = '%s.examples.npy' % random_string
example_path = os.path.join(OUTPUT_IMAGE_DIR, self.image_subdirectory,
example_file)
np.save(example_path, examples)
label_file = '%s.labels.npy' % random_string
label_path = os.path.join(OUTPUT_IMAGE_DIR, self.image_subdirectory,
label_file)
np.save(label_path, labels)
class CustomDataLoader(object):
# TODO: make this more pytorch compliant
def __init__(self, image_subdirectory, batch_size=128, to_tensor=True,
use_gpu=False):
super(CustomDataLoader, self).__init__()
self.image_subdirectory = image_subdirectory
self.batch_size = batch_size
assert to_tensor >= use_gpu
self.to_tensor = to_tensor
self.use_gpu = use_gpu
def _prepare_data(self, examples, labels):
""" Takes in numpy examples and labels and tensor-ifies and cuda's them
if necessary
"""
if self.to_tensor:
examples = torch.Tensor(examples)
labels = torch.Tensor(labels)
return utils.cudafy(self.use_gpu, (examples, labels))
def _base_loader(self, prefix, which):
assert which in ['examples', 'labels']
filename = '%s.%s.npy' % (prefix, which)
full_path = os.path.join(OUTPUT_IMAGE_DIR, self.image_subdirectory,
filename)
return np.load(full_path)
def _example_loader(self, prefix):
""" Loads the numpy array of examples given the random 'prefix' """
return self._base_loader(prefix, 'examples')
def _label_loader(self, prefix):
""" Loads the numpy array of labels given the random 'prefix' """
return self._base_loader(prefix, 'labels')
def __iter__(self):
# First collect all the filenames:
glob_prefix = os.path.join(OUTPUT_IMAGE_DIR, self.image_subdirectory,
'*')
files = glob.glob(glob_prefix)
valid_random_names = set(os.path.basename(_).split('.')[0]
for _ in files)
# Now loop through filenames and yield out minibatches of correct size
running_examples, running_labels = [], []
running_size = 0
for random_name in valid_random_names:
# Load data from files and append to 'running' lists
loaded_examples = self._example_loader(random_name)
loaded_labels = self._label_loader(random_name)
running_examples.append(loaded_examples)
running_labels.append(loaded_labels)
running_size += loaded_examples.shape[0]
if running_size < self.batch_size:
# Load enough data to populate one minibatch, which might
# take multiple files
continue
# Concatenate all images together
merged_examples = np.concatenate(running_examples, axis=0)
merged_labels = np.concatenate(running_labels, axis=0)
# Make minibatches out of concatenated things,
for batch_no in range(running_size // self.batch_size):
index_lo = batch_no * self.batch_size
index_hi = index_lo + self.batch_size
example_batch = merged_examples[index_lo:index_hi]
label_batch = merged_labels[index_lo:index_hi]
yield self._prepare_data(example_batch, label_batch)
# Handle any remainder for remaining files
remainder_idx = (running_size // self.batch_size) * self.batch_size
running_examples = [merged_examples[remainder_idx:]]
running_labels = [merged_labels[remainder_idx:]]
running_size = running_size - remainder_idx
# If we're out of files, yield this last sub-minibatch of data
if running_size > 0:
merged_examples = np.concatenate(running_examples, axis=0)
merged_labels = np.concatenate(running_labels, axis=0)
yield self._prepare_data(merged_examples, merged_labels)
| 38.437888 | 88 | 0.594328 | 1,439 | 12,377 | 4.90132 | 0.203614 | 0.039699 | 0.058982 | 0.022118 | 0.327095 | 0.281015 | 0.2321 | 0.191975 | 0.158656 | 0.139232 | 0 | 0.005213 | 0.302577 | 12,377 | 321 | 89 | 38.557632 | 0.811863 | 0.316474 | 0 | 0.106667 | 0 | 0 | 0.019475 | 0 | 0 | 0 | 0 | 0.006231 | 0.02 | 1 | 0.093333 | false | 0 | 0.06 | 0 | 0.24 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
51d4ea4507d80e27f773f363a22c466284face7c | 2,219 | py | Python | extra/carbontools.py | carbon-org/carbon | 454d087f85f7fb9408eb0bc10ae702b8de844648 | [
"MIT"
] | 9 | 2021-03-20T13:09:52.000Z | 2022-03-18T07:33:40.000Z | extra/carbontools.py | ThakeeNathees/Carbon | 454d087f85f7fb9408eb0bc10ae702b8de844648 | [
"MIT"
] | 4 | 2020-08-11T07:57:00.000Z | 2020-11-30T21:05:51.000Z | extra/carbontools.py | carbon-org/carbon | 454d087f85f7fb9408eb0bc10ae702b8de844648 | [
"MIT"
] | null | null | null |
import os, sys
import shutil
CARBON_DIR = os.path.dirname(__file__)
USAGE = '''\
'''
## USAGE:
## sys.path.append('path/to/carbon/')
## import carbontools.py as cbtools
## lib = cbtools.GET_CARBON_LIB(env)
def GET_CARBON_LIB(env):
## TODO: generate "*.gen.h" files
SOURCES = []
cbenv = env.Clone();
cbenv.Append(CPPPATH=[os.path.join(CARBON_DIR, 'include/')])
ALL_SOURCES = [
'src/var/*.cpp',
'src/core/*.cpp',
'src/native/*.cpp',
'src/compiler/*.cpp',
'src/thirdparty/dlfcn-win32/*.c',
]
for src in ALL_SOURCES(cbenv):
SOURCES.append(cbenv.Glob(os.path.join(CARBON_DIR, src)))
lib = cbenv.Library(
target = os.path.join(CARBON_DIR, 'bin/carbon'),
source = SOURCES)
return lib
def main():
argcount = len(sys.argv)
if argcount < 2:
print(USAGE_STRING)
exit()
## switch commands
if sys.argv[1] == 'clean':
cleanall = False
for i in range(2, argcount):
if sys.argv[i] in ('--all', '-a'):
cleanall = True
else:
error_command(sys.argv[i])
clean(cleanall)
else:
error_command(sys.argv[1])
## Internal methods ####
def error_command(cmd):
print('[*]: ERROR: unknown command "'+ cmd + '"\n' + USAGE)
exit(-1)
def error_exit(msg):
print('[*]: ERROR: ' + msg + '"\n' + USAGE)
exit(-1)
def get_platform():
if sys.platform == 'win32': return 'windows'
elif sys.platform in ('linux', 'linux2'): return 'x11'
elif sys.platform == 'darwin': return 'osx'
else: error_exit("platform(%s) not supported." % sys.platform)
def clean():
CLEAN_DIRS = [
'x64/',
'debug/'
'release/',
'debug/',
'bin/',
'.vs',
'.vscode',
]
CLEAN_FILES = [
'.pdb',
'.idb',
'.ilk',
'.obj',
'.sln',
'.vcxproj',
'.vcxproj.filters',
'.vcxproj.user',
'.sconsign.dblite',
]
os.system('scons -c')
print('\n[*]: cleaning all files ...')
for _dir in CLEAN_DIRS:
try:
shutil.rmtree(_dir)
print('[*]: Removed - %s' % _dir)
except:
pass
for path, dirs, files in os.walk('.'):
for file in files:
for suffix in CLEAN_FILES:
if file.endswith(suffix):
os.remove(os.path.join(path, file))
print('[*]: Removed - %s' % os.path.join(path, file))
print('[*]: done cleaning targets.')
if __name__ == '__main__':
main() | 19.8125 | 63 | 0.611537 | 304 | 2,219 | 4.345395 | 0.394737 | 0.027252 | 0.03785 | 0.036336 | 0.133989 | 0.034822 | 0 | 0 | 0 | 0 | 0 | 0.008287 | 0.184317 | 2,219 | 112 | 64 | 19.8125 | 0.721547 | 0.080216 | 0 | 0.046512 | 0 | 0 | 0.222662 | 0.014844 | 0 | 0 | 0 | 0.008929 | 0 | 1 | 0.069767 | false | 0.011628 | 0.023256 | 0 | 0.104651 | 0.081395 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
51d59299ccdf71aaf83a73a14cec2a4bba7c6231 | 1,394 | py | Python | tests/profiler/test_smtfprofiler_events.py | aaronmarkham/sagemaker-debugger | d271fbefb0cbe9686100850249c96a40fdc45b06 | [
"Apache-2.0"
] | null | null | null | tests/profiler/test_smtfprofiler_events.py | aaronmarkham/sagemaker-debugger | d271fbefb0cbe9686100850249c96a40fdc45b06 | [
"Apache-2.0"
] | null | null | null | tests/profiler/test_smtfprofiler_events.py | aaronmarkham/sagemaker-debugger | d271fbefb0cbe9686100850249c96a40fdc45b06 | [
"Apache-2.0"
] | null | null | null | # First Party
from smdebug.profiler import SMTFProfilerEvents
def test_smtfprofiler_events(trace_file="./tests/profiler/smtf_profiler_trace.json"):
trace_json_file = trace_file
print(f"Reading the trace file {trace_json_file}")
t_events = SMTFProfilerEvents(trace_json_file)
all_trace_events = t_events.get_all_events()
num_trace_events = len(all_trace_events)
print(f"Number of events read = {num_trace_events}")
assert num_trace_events == 49
event_list = t_events.get_events_at(1589314018458800000) # nanoseconds
print(f"Number of events at 15013686 are {len(event_list)}")
assert len(event_list) == 1
completed_event_list = t_events.get_events_within_range(0, 1589314018470000000) # nanoseconds
print(f"Number of events occurred between 0 and 15013686 are {len(completed_event_list)}")
assert len(completed_event_list) == 34
start_time_sorted = t_events.get_events_start_time_sorted()
start_time_for_first_event = start_time_sorted[0].start_time
print(f"The first event started at {start_time_for_first_event}")
assert start_time_for_first_event == 1589314018458743000
end_time_sorted = t_events.get_events_end_time_sorted()
end_time_for_last_event = end_time_sorted[-1].end_time
print(f"The first event started at {end_time_for_last_event}")
assert end_time_for_last_event == 1589314018481947000
| 42.242424 | 98 | 0.781205 | 206 | 1,394 | 4.868932 | 0.271845 | 0.062812 | 0.04985 | 0.063809 | 0.36989 | 0.227318 | 0.063809 | 0.063809 | 0 | 0 | 0 | 0.084448 | 0.142037 | 1,394 | 32 | 99 | 43.5625 | 0.754181 | 0.025108 | 0 | 0 | 0 | 0 | 0.265683 | 0.089299 | 0 | 0 | 0 | 0 | 0.217391 | 1 | 0.043478 | false | 0 | 0.043478 | 0 | 0.086957 | 0.26087 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
51d63a3e6da1e5e89a19fcaa83ea91fa806990e0 | 2,551 | py | Python | package/tests/test_PartSegCore/segmentation/test_segmentation_algorithm.py | neuromusic/PartSeg | a4edff1b9fbe55eb7f5e1fc8b5b3f8e730b35caf | [
"BSD-3-Clause"
] | 15 | 2020-03-21T03:27:56.000Z | 2022-03-21T07:46:39.000Z | package/tests/test_PartSegCore/segmentation/test_segmentation_algorithm.py | neuromusic/PartSeg | a4edff1b9fbe55eb7f5e1fc8b5b3f8e730b35caf | [
"BSD-3-Clause"
] | 479 | 2019-10-27T22:57:22.000Z | 2022-03-30T12:48:14.000Z | package/tests/test_PartSegCore/segmentation/test_segmentation_algorithm.py | neuromusic/PartSeg | a4edff1b9fbe55eb7f5e1fc8b5b3f8e730b35caf | [
"BSD-3-Clause"
] | 5 | 2020-02-05T14:25:02.000Z | 2021-12-21T03:44:52.000Z | from typing import Type
import numpy as np
import pytest
from PartSegCore.segmentation import ROIExtractionAlgorithm
from PartSegCore.segmentation.algorithm_base import ROIExtractionResult, SegmentationLimitException
from PartSegCore.segmentation.restartable_segmentation_algorithms import final_algorithm_list as restartable_list
from PartSegCore.segmentation.segmentation_algorithm import (
CellFromNucleusFlow,
ThresholdFlowAlgorithm,
close_small_holes,
)
from PartSegCore.segmentation.segmentation_algorithm import final_algorithm_list as algorithm_list
def empty(*args):
pass
@pytest.fixture(autouse=True)
def fix_threshold_flow(monkeypatch):
values = ThresholdFlowAlgorithm.get_default_values()
values["threshold"]["values"]["core_threshold"]["values"]["threshold"] = 10
values["threshold"]["values"]["base_threshold"]["values"]["threshold"] = 5
def _param(self):
return values
monkeypatch.setattr(ThresholdFlowAlgorithm, "get_default_values", _param)
values2 = CellFromNucleusFlow.get_default_values()
values2["nucleus_threshold"]["values"]["threshold"] = 10
values2["cell_threshold"]["values"]["threshold"] = 5
def _param2(self):
return values2
monkeypatch.setattr(CellFromNucleusFlow, "get_default_values", _param2)
@pytest.mark.parametrize("algorithm", restartable_list + algorithm_list)
@pytest.mark.parametrize("masking", [True, False])
def test_segmentation_algorithm(image, algorithm: Type[ROIExtractionAlgorithm], masking):
assert algorithm.support_z() is True
assert algorithm.support_time() is False
assert isinstance(algorithm.get_steps_num(), int)
instance = algorithm()
instance.set_image(image)
if masking:
instance.set_mask(image.get_channel(0) > 0)
instance.set_parameters(**instance.get_default_values())
if not masking and "Need mask" in algorithm.get_fields():
with pytest.raises(SegmentationLimitException):
instance.calculation_run(empty)
else:
res = instance.calculation_run(empty)
assert isinstance(instance.get_info_text(), str)
assert isinstance(res, ROIExtractionResult)
instance.clean()
@pytest.mark.parametrize("ndim", (2, 3))
@pytest.mark.parametrize("dtype", (np.uint8, bool))
def test_close_small_holes(ndim, dtype):
data = np.zeros((10,) * ndim, dtype=dtype)
data[(slice(1, -1),) * ndim] = 1
copy = data.copy()
data[(slice(3, -3),) * ndim] = 0
res = close_small_holes(data, 5 ** 2)
assert np.all(res == copy)
| 35.430556 | 113 | 0.73971 | 290 | 2,551 | 6.317241 | 0.337931 | 0.049127 | 0.07369 | 0.026201 | 0.114629 | 0.058952 | 0 | 0 | 0 | 0 | 0 | 0.012408 | 0.147001 | 2,551 | 71 | 114 | 35.929577 | 0.829504 | 0 | 0 | 0 | 0 | 0 | 0.085849 | 0 | 0 | 0 | 0 | 0 | 0.107143 | 1 | 0.107143 | false | 0.017857 | 0.142857 | 0.035714 | 0.285714 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
51d655292e154bea97fce427b26a455cc09bac09 | 958 | py | Python | bootstrap.py | ayang818/pyweb-template | d4b8c97b9e99166a6b6d856929ef670771b90fd3 | [
"MIT"
] | null | null | null | bootstrap.py | ayang818/pyweb-template | d4b8c97b9e99166a6b6d856929ef670771b90fd3 | [
"MIT"
] | null | null | null | bootstrap.py | ayang818/pyweb-template | d4b8c97b9e99166a6b6d856929ef670771b90fd3 | [
"MIT"
] | null | null | null | # coding=utf-8
import logging
import os
from flask import Flask
from cloudware_server.route.base import register_routes
def config_logger():
"""
设置日志等级
"""
logging.getLogger().setLevel(logging.INFO)
config_logger()
def create_app(config=None):
"""
创建 bootstrap app
"""
app = Flask(__name__, instance_relative_config=True)
app.config.from_mapping(
SECRET_KEY='dev',
DATABASE=os.path.join(app.instance_path, 'flaskr.sqlite'),
)
if not config:
app.config.from_pyfile('config.py', silent=True)
else:
app.config.from_mapping(config)
try:
if not os.path.exists(app.instance_path):
os.makedirs(app.instance_path)
except OSError as e:
logging.error('启动失败 %s', e)
# 注册路由
register_routes(app)
return app
app = create_app()
logging.info("%s", os.path.join(app.instance_path, 'flaskr.sqlite'))
app.run(host='localhost', port=5000)
| 20.382979 | 68 | 0.656576 | 127 | 958 | 4.787402 | 0.488189 | 0.059211 | 0.098684 | 0.065789 | 0.121711 | 0.121711 | 0.121711 | 0.121711 | 0 | 0 | 0 | 0.006667 | 0.217119 | 958 | 46 | 69 | 20.826087 | 0.804 | 0.043841 | 0 | 0 | 0 | 0 | 0.063348 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.074074 | false | 0 | 0.148148 | 0 | 0.259259 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
51d7b2bb9f36934a7abf44cab165f9dd936da6f5 | 2,990 | py | Python | src/online/batch_job.py | jack139/fair | fe0ff64f8edbd794c3fb951ab6af420054e9e585 | [
"BSD-3-Clause"
] | 1 | 2019-07-16T09:46:39.000Z | 2019-07-16T09:46:39.000Z | src/online/batch_job.py | jack139/fair | fe0ff64f8edbd794c3fb951ab6af420054e9e585 | [
"BSD-3-Clause"
] | null | null | null | src/online/batch_job.py | jack139/fair | fe0ff64f8edbd794c3fb951ab6af420054e9e585 | [
"BSD-3-Clause"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import web
from bson.objectid import ObjectId
from config import setting
import helper
db = setting.db_web
url = ('/online/batch_job')
# - 批量处理订单
class handler:
def GET(self):
if helper.logged(helper.PRIV_USER,'BATCH_JOB'):
render = helper.create_render()
#user_data=web.input(start_date='', shop='__ALL__')
# 查找shop
db_shop = helper.get_shop_by_uid()
shop_name = helper.get_shop(db_shop['shop'])
# 统计线上订单
condition = {
'shop' : db_shop['shop'],
'status' : {'$in' : ['PAID','DISPATCH','ONROAD']},
'type' : {'$in' : ['TUAN', 'SINGLE']}, # 只拼团用
}
db_sale2 = db.order_app.find(condition, {
'order_id' : 1,
'paid_time' : 1,
'cart' : 1,
'type' : 1,
'status' : 1,
'address' : 1,
})
skus={}
for i in db_sale2:
# 区分省份
sheng = i['address'][8].split(',')[0] if len(i['address'])>=9 else u'未知'
if skus.has_key(i['cart'][0]['tuan_id']):
if skus[i['cart'][0]['tuan_id']].has_key(sheng):
skus[i['cart'][0]['tuan_id']][sheng]['num'] += 1
skus[i['cart'][0]['tuan_id']][sheng]['paid'] += (1 if i['status']=='PAID' else 0)
skus[i['cart'][0]['tuan_id']][sheng]['dispatch'] += (1 if i['status']=='DISPATCH' else 0)
skus[i['cart'][0]['tuan_id']][sheng]['onroad'] += (1 if i['status']=='ONROAD' else 0)
else:
skus[i['cart'][0]['tuan_id']][sheng] = {}
skus[i['cart'][0]['tuan_id']][sheng]['num'] = 1
skus[i['cart'][0]['tuan_id']][sheng]['paid'] = (1 if i['status']=='PAID' else 0)
skus[i['cart'][0]['tuan_id']][sheng]['dispatch'] = (1 if i['status']=='DISPATCH' else 0)
skus[i['cart'][0]['tuan_id']][sheng]['onroad'] = (1 if i['status']=='ONROAD' else 0)
else:
r = db.pt_store.find_one({'tuan_id':i['cart'][0]['tuan_id']},{'title':1})
if r:
title = r['title']
else:
title = 'n/a'
skus[i['cart'][0]['tuan_id']] = {
'name' : title,
'tuan_id' : i['cart'][0]['tuan_id'],
}
skus[i['cart'][0]['tuan_id']][sheng]={
'num' : 1, # 要包含送的
'paid' : 1 if i['status']=='PAID' else 0, # 已付款,待拣货的, 拼团用
'dispatch' : 1 if i['status']=='DISPATCH' else 0, # 已付款,待配送, 拼团用
'onroad' : 1 if i['status']=='ONROAD' else 0, # 已付款,配送中, 拼团用
}
total_sum={}
for i in skus.keys():
for j in skus[i].keys():
if j in ['name','tuan_id']:
continue
if total_sum.has_key(j):
total_sum[j]['paid'] += skus[i][j]['paid']
total_sum[j]['dispatch'] += skus[i][j]['dispatch']
total_sum[j]['onroad'] += skus[i][j]['onroad']
else:
total_sum[j] = {}
total_sum[j]['paid'] = skus[i][j]['paid']
total_sum[j]['dispatch'] = skus[i][j]['dispatch']
total_sum[j]['onroad'] = skus[i][j]['onroad']
return render.batch_job(helper.get_session_uname(), helper.get_privilege_name(),
skus, shop_name['name'], total_sum)
else:
raise web.seeother('/')
| 31.473684 | 95 | 0.538462 | 446 | 2,990 | 3.46861 | 0.226457 | 0.061409 | 0.058177 | 0.096962 | 0.46671 | 0.458953 | 0.438268 | 0.404654 | 0.352295 | 0.336134 | 0 | 0.020949 | 0.217726 | 2,990 | 94 | 96 | 31.808511 | 0.640445 | 0.057191 | 0 | 0.067568 | 0 | 0 | 0.211052 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.013514 | false | 0 | 0.054054 | 0 | 0.094595 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
51d8ae23bd3580d3a4606d438e3b8518ad7289ac | 8,124 | py | Python | attributes/architecture/main.py | Lufedi/reaper | bdf56b499e5b704c27b9f6c053d798c2a10fa4cf | [
"Apache-2.0"
] | null | null | null | attributes/architecture/main.py | Lufedi/reaper | bdf56b499e5b704c27b9f6c053d798c2a10fa4cf | [
"Apache-2.0"
] | 1 | 2021-03-16T20:28:19.000Z | 2021-03-16T20:28:19.000Z | attributes/architecture/main.py | Lufedi/reaper | bdf56b499e5b704c27b9f6c053d798c2a10fa4cf | [
"Apache-2.0"
] | 1 | 2022-03-04T01:21:09.000Z | 2022-03-04T01:21:09.000Z | import os
import re
import subprocess
import json
import networkx
from pygments import lexers, token, util
TOKENTYPE_WHITELIST = [
token.Name,
token.Name.Attribute,
token.Name.Builtin,
token.Name.Builtin.Pseudo,
token.Name.Constant,
token.Name.Decorator,
token.Name.Entity,
token.Name.Exception,
token.Name.Label,
token.Name.Namespace,
token.Name.Other,
token.Name.Tag,
token.Name.Variable,
token.Name.Variable.Class,
token.Name.Variable.Global,
token.Name.Variable.Instance
]
SUPPORTED_LANGUAGES = []
# Regular expression to parse the list of languages supported by ack as listed
# by ack --help-types
# Pattern: " --[no]python"
RE_ACK_LANGUAGES = re.compile('(?:^\s{4}--\[no\])(\w*)')
# Map GHTorrent's projects.language to ACK compatible language (if necessary).
ACK_LANGUAGE_MAP = {
'c': 'cc',
'c++': 'cpp',
'c#': 'csharp',
'objective-c': 'objc',
'ojective-c++': 'objcpp',
'javascript': 'js'
}
def init(cursor):
global SUPPORTED_LANGUAGES
ack_process2 = subprocess.Popen(
['ack', '--help-types'], stdout=subprocess.PIPE, stderr=subprocess.PIPE
)
lines, _ = [x.decode('utf-8') for x in ack_process2.communicate()]
for line in lines.split('\n'):
match = RE_ACK_LANGUAGES.match(line)
if match:
SUPPORTED_LANGUAGES.append(match.group(1))
def run(project_id, repo_path, cursor, **options):
result = 0
cursor.execute('''
SELECT
language
FROM
projects
WHERE
id = {0}
'''.format(project_id))
record = cursor.fetchone()
language = record[0]
language = language.lower() if language else language
ack_language = language
if ack_language in ACK_LANGUAGE_MAP:
ack_language = ACK_LANGUAGE_MAP[ack_language]
# Edge case if the repository language is not supported by us.
if (ack_language not in SUPPORTED_LANGUAGES) and (language.lower() != 'javascript'):
return False, result
file_paths = []
if language.lower() == 'javascript':
for root, dirs, files in os.walk(repo_path):
for _file in files:
if _file.endswith(".js"):
file_paths.append(os.path.join(root, _file))
else:
ack_process = subprocess.Popen(
['ack', '-f', "--{0}".format(ack_language), repo_path],
stdout=subprocess.PIPE, stderr=subprocess.PIPE
)
lines, _ = [
x.decode(errors='replace') for x in ack_process.communicate()
]
file_paths = [line for line in lines.split('\n') if line.strip()]
# Immediately fail the attribute if `minimumFiles` is not met.
if len(file_paths) < options.get('minimumFiles', 2):
return False, result
graph = networkx.Graph()
if language.lower() == 'javascript':
# JavaScript: Use external utility
success = build_js_graph(repo_path, file_paths, graph)
else:
lexer = lexers.get_lexer_by_name(language)
success = build_graph(repo_path, graph, lexer)
if success:
monolithicity = get_connectedness(graph)
else:
monolithicity = 0
return monolithicity >= options['threshold'], monolithicity
def build_js_graph(repo_path, file_paths, graph):
# add nodes
for file_path in file_paths:
graph.add_node(Node(file_path))
name = repo_path.split('/')[-1] # get name of the repository
# compute and store call graph as json using js-callgraph
graph_process = f"gtimeout 1000 js-callgraph --cg {repo_path} --output {name}_graph.json >/dev/null 2>&1"
os.system(graph_process)
try:
with open('{}_graph.json'.format(name), 'r') as json_file:
# load the json representation of the call graph
calls = json.load(json_file)
for call in calls:
source_file = call['source']['file'] # identify the source of the call
target_file = call['target']['file'] # identify the target of the call
# both source and target should be nodes in the call graph, i.e., .js files
if source_file.endswith(".js") and target_file.endswith(".js"):
graph.add_edge(Node(source_file), Node(target_file)) # add edge
graph.to_undirected() # just in case, transform into undirected (should be undirected by default anyway)
os.remove('{}_graph.json'.format(name)) # delete the json representation of the call graph
return True
except IOError as err:
print(err)
return False
def build_graph(file_paths, graph, lexer):
"""
for each file in the set of files
create a node and add it to the graph
open the file
read the contents into memory
get a list of tokens from the lexer
for each token in the resulting tokens
check if the token is defining a symbol
if true, add the symbol to the file node
for each file in the set of files
open the file
read the contents into memory
get a list of token from the lexer
for each token in the resulting tokens
check if the token is using a symbol
if true:
search the graph for the node that has the symbol definition
create a relationship from the current file to the node with
the symbol definition
"""
for file_path in file_paths:
node = Node(file_path)
graph.add_node(node)
try:
with open(file_path, 'r', encoding='utf-8') as file:
contents = file.read()
tokens = lexer.get_tokens(contents)
for item in tokens:
token_type = item[0]
symbol = item[1]
if token_type in [token.Name.Function, token.Name.Class]:
node.defines.add(symbol)
elif token_type in TOKENTYPE_WHITELIST:
node.references.add(symbol)
if 'DEBUG' in os.environ:
print(node)
except FileNotFoundError as e:
continue
except UnicodeDecodeError:
continue
for caller in graph.nodes_iter():
for reference in caller.references:
for callee in graph.nodes_iter():
if callee is not caller and reference in callee.defines:
graph.add_edge(caller, callee)
return True
def get_connectedness(graph):
components = list(networkx.connected_component_subgraphs(graph))
# N = networkx.nx_agraph.to_agraph(graph)
# N.layout(prog='dot')
# N.draw("file.png")
components.sort(key=lambda i: len(i.nodes()), reverse=True)
largest_component = components[0]
connectedness = 0
if graph.nodes() and len(graph.nodes()) > 0:
connectedness = len(largest_component.nodes()) / len(graph.nodes())
return connectedness
class Node():
def __init__(self, path):
self.path = path
self.defines = set()
self.references = set()
def __hash__(self):
return hash(self.path)
def __eq__(self, other):
return self.path == other.path
def __str__(self):
symbol_str = '\r' + '\n'.join(self.defines)
return "{0}\n{1}\n{2}".format(
self.path, '=' * len(self.path), symbol_str
)
if __name__ == '__main__':
import importlib
import json
import mysql.connector
import sys
sys.path.append(os.path.join(os.path.dirname(__file__), '..', '..'))
from lib.utilities import get_loc
os.environ['DEBUG'] = '1'
with open('../../config.json', 'r') as file:
config = json.load(file)
mysql_config = config['options']['datasource']
connection = mysql.connector.connect(**mysql_config)
connection.connect()
cursor = connection.cursor()
init(None)
result = run(sys.argv[1], sys.argv[2], cursor, threshold=0.75)
cursor.close()
connection.close()
print(result)
else:
from lib.utilities import get_loc
| 31.858824 | 117 | 0.616322 | 1,035 | 8,124 | 4.712077 | 0.252174 | 0.033217 | 0.013943 | 0.010662 | 0.143121 | 0.132869 | 0.104162 | 0.089809 | 0.065204 | 0.043879 | 0 | 0.005619 | 0.27708 | 8,124 | 254 | 118 | 31.984252 | 0.824791 | 0.19387 | 0 | 0.111111 | 0 | 0.005556 | 0.08005 | 0.003568 | 0.005556 | 0 | 0 | 0 | 0 | 1 | 0.05 | false | 0 | 0.066667 | 0.011111 | 0.177778 | 0.016667 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
51d8b62bf25917e74c95918a73ec119b3673d41b | 1,065 | py | Python | opencv/colordetect/sendimage.py | ronhandler/gitroot | beb81c4b826939f16e57a98ac5845d8acecf151d | [
"Unlicense"
] | null | null | null | opencv/colordetect/sendimage.py | ronhandler/gitroot | beb81c4b826939f16e57a98ac5845d8acecf151d | [
"Unlicense"
] | null | null | null | opencv/colordetect/sendimage.py | ronhandler/gitroot | beb81c4b826939f16e57a98ac5845d8acecf151d | [
"Unlicense"
] | null | null | null | #!/usr/bin/env python
import sys
import os.path
import cv2
import numpy as np
import boto
from boto.s3.key import Key
cap = cv2.VideoCapture(0)
ret, new_image = cap.read()
if ret == False:
exit(1)
filename = 'new.jpg'
cv2.imwrite(filename, new_image)
bucket_name = 'ronhandler'
AWS_ACCESS_KEY_ID = 'AKIAIYLDR3LU2XDICTSQ'
AWS_SECRET_ACCESS_KEY = '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'
print('Connecting to AWS S3...')
conn = boto.connect_s3(AWS_ACCESS_KEY_ID,
AWS_SECRET_ACCESS_KEY,
# Hardcoding the host parameter is a workaround for bug:
# https://github.com/boto/boto/issues/621
host="s3-eu-west-1.amazonaws.com")
bucket = conn.get_bucket(bucket_name)
k = Key(bucket)
k.key = filename
testfile = "/share/" + filename
print('Uploading "%s" to "%s/%s"...' % (testfile, bucket_name, k.key))
k.set_contents_from_filename(testfile)
print('Notifying the server that we have uploaded a file...')
import urllib2
url = """http://ec2-52-16-188-96.eu-west-1.compute.amazonaws.com/admin/run.php"""
urllib2.urlopen(url).read()
| 25.97561 | 81 | 0.723005 | 163 | 1,065 | 4.588957 | 0.552147 | 0.048128 | 0.032086 | 0.037433 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.043621 | 0.138967 | 1,065 | 40 | 82 | 26.625 | 0.772083 | 0.107981 | 0 | 0 | 0 | 0.034483 | 0.297782 | 0.069694 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.241379 | 0 | 0.241379 | 0.103448 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
51dded95316bf909a713965fa3b3d3c363309051 | 2,782 | py | Python | hipp/utils/utils.py | cmcneil-usgs/hipp | be6f9f8cccdc32b7b96be92172977a5c4006500c | [
"MIT"
] | 12 | 2020-10-07T22:12:11.000Z | 2022-02-15T23:10:53.000Z | hipp/utils/utils.py | cmcneil-usgs/hipp | be6f9f8cccdc32b7b96be92172977a5c4006500c | [
"MIT"
] | 7 | 2020-10-11T23:42:55.000Z | 2021-12-15T23:16:43.000Z | hipp/utils/utils.py | cmcneil-usgs/hipp | be6f9f8cccdc32b7b96be92172977a5c4006500c | [
"MIT"
] | 4 | 2020-10-11T19:48:58.000Z | 2022-03-08T21:32:13.000Z | import glob
import os
import cv2
import hipp.io
import hipp.utils
"""
Library for command line tools.
"""
def optimize_geotif(geotif_file_name,
output_file_name=None,
verbose=False,
print_call=False):
if output_file_name is None:
file_path, file_name, file_extension = hipp.io.split_file(geotif_file_name)
output_file_name = os.path.join(file_path,
file_name+'_optimized'+file_extension)
call = ['gdal_translate',
'-of','GTiff',
'-co','TILED=YES',
'-co','COMPRESS=LZW',
'-co','BIGTIFF=IF_SAFER',
geotif_file_name,
output_file_name]
if print_call==True:
print(*call)
else:
hipp.io.run_command(call, verbose=verbose)
return output_file_name
def optimize_geotifs(input_directory,
keep = False,
verbose=False):
print('Optimizing tifs in', input_directory, 'with:')
print(*['gdal_translate',
'-of','GTiff',
'-co','TILED=YES',
'-co','COMPRESS=LZW',
'-co','BIGTIFF=IF_SAFER'])
tifs = sorted(glob.glob(os.path.join(input_directory,'*.tif')))
output_tifs = []
for tif in tifs:
tif_optimized = hipp.utils.optimize_geotif(tif, verbose=verbose)
if not keep:
os.remove(tif)
os.rename(tif_optimized, tif)
output_tifs.append(tif)
else:
output_tifs.append(tif_optimized)
return output_tifs
def enhance_geotif_resolution(geotif_file_name,
output_file_name=None,
factor=None,
verbose=False,
print_call=False):
if output_file_name is None:
file_path, file_name, file_extension = hipp.io.split_file(geotif_file_name)
output_file_name = os.path.join(file_path,
file_name+'_high_res'+file_extension)
img = cv2.imread(geotif_file_name,cv2.IMREAD_GRAYSCALE)
w, h = img.shape[::-1]
w, h = w*factor, h*factor
call = ['gdal_translate',
'-of','GTiff',
'-co','TILED=YES',
'-co','COMPRESS=LZW',
'-co','BIGTIFF=IF_SAFER',
'-outsize',str(w),str(h),
'-r', 'cubic',
geotif_file_name,
output_file_name]
if print_call==True:
print(*call)
else:
hipp.io.run_command(call, verbose=verbose)
return output_file_name
| 29.284211 | 83 | 0.515097 | 300 | 2,782 | 4.513333 | 0.25 | 0.124077 | 0.103397 | 0.088626 | 0.564993 | 0.564993 | 0.564993 | 0.520679 | 0.520679 | 0.520679 | 0 | 0.002311 | 0.377786 | 2,782 | 94 | 84 | 29.595745 | 0.779896 | 0 | 0 | 0.56338 | 0 | 0 | 0.097187 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.042254 | false | 0 | 0.070423 | 0 | 0.15493 | 0.112676 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
51e08e485ba5b37c52a2c10b26fc9af31001557a | 5,836 | py | Python | tests/test_preprocessing/test_encoding.py | ig248/timeserio | afc2a953a83e763418d417059493ef13a17d349c | [
"MIT"
] | 63 | 2019-07-12T17:16:27.000Z | 2022-02-22T11:06:50.000Z | tests/test_preprocessing/test_encoding.py | ig248/timeserio | afc2a953a83e763418d417059493ef13a17d349c | [
"MIT"
] | 34 | 2019-07-30T11:52:09.000Z | 2022-03-28T12:42:02.000Z | tests/test_preprocessing/test_encoding.py | ig248/timeserio | afc2a953a83e763418d417059493ef13a17d349c | [
"MIT"
] | 12 | 2019-08-14T05:51:22.000Z | 2021-03-15T09:34:15.000Z | import numpy as np
import numpy.testing as npt
import pytest
from sklearn.preprocessing import OneHotEncoder
from timeserio.preprocessing import (
FeatureIndexEncoder, StatelessOneHotEncoder,
StatelessTemporalOneHotEncoder, StatelessPeriodicEncoder
)
from timeserio.preprocessing.encoding import PeriodicEncoder
class TestFeatureIndexEncoder:
@pytest.mark.parametrize(
'n_labels, expected_encoding', [
(1, np.arange(1)),
(2, np.arange(2)),
(3, np.arange(3)),
]
)
def test_feature_encoder(self, n_labels, expected_encoding):
encoder = FeatureIndexEncoder()
labels = np.array(
[f'label{idx}' for idx in range(n_labels)]
).reshape(-1, 1)
new_ids = encoder.fit_transform(labels)
assert isinstance(new_ids, np.ndarray)
assert len(new_ids.shape) == 2
assert new_ids.shape[1] == 1
assert set(new_ids.ravel() == set(expected_encoding.ravel()))
class TestStatelessOneHotEncoder:
n_rows = 10
def test_invalid_n_values(self):
with pytest.raises(ValueError):
StatelessOneHotEncoder(n_features=1, n_values='auto')
@pytest.mark.parametrize(
'n_features, n_values, categories', [
(1, 3, [[0, 1, 2]]),
(2, 3, [[0, 1, 2], [0, 1, 2]])
]
)
def test_same_as_stateful(
self, n_features, n_values, categories, random
):
x = np.random.randint(
0, np.min(n_values), size=(self.n_rows, n_features)
)
stateful_enc = OneHotEncoder(
categories=categories,
sparse=False
)
stateless_enc = StatelessOneHotEncoder(
n_features=n_features,
n_values=n_values,
sparse=False
)
x0 = stateful_enc.fit_transform(x)
x1 = stateless_enc.transform(x)
npt.assert_allclose(x1, x0)
@pytest.mark.parametrize(
'n_features, n_values, categories', [
(1, [3], [[0, 1, 2]]),
(2, [3, 2], [[0, 1, 2], [0, 1]])
]
)
def test_same_as_stateful_for_multiple_n_values(
self, n_features, n_values, categories, random
):
x = np.hstack([
np.random.randint(0, np.min(_n_values), size=(self.n_rows, 1))
for _n_values in n_values
])
stateful_enc = OneHotEncoder(
categories=categories,
sparse=False
)
stateless_enc = StatelessOneHotEncoder(
n_features=n_features,
n_values=n_values,
sparse=False
)
x0 = stateful_enc.fit_transform(x)
x1 = stateless_enc.transform(x)
npt.assert_allclose(x1, x0)
class TestStatelessTemporalOneHotEncoder:
n_rows = 3
@pytest.mark.parametrize('n_values', ['all', [True], [0]])
def test_invalid_n_values(self, n_values):
with pytest.raises(ValueError):
StatelessTemporalOneHotEncoder(n_features=1, n_values=n_values)
def test_temporal_onehot(self):
x = np.array([
[0, 0, 1, 1],
[0, 1, 0, 1],
])
y_expected = np.array(
[
[1, 1, 0, 0, 0, 0, 1, 1],
[1, 0, 1, 0, 0, 1, 0, 1],
]
)
n_values = 2
enc = StatelessTemporalOneHotEncoder(
n_features=x.shape[1], n_values=n_values, sparse=False
)
y = enc.fit_transform(x)
npt.assert_allclose(y, y_expected)
class TestPeriodicEncoder:
n_rows = 10
column = np.linspace(0, 1, num=n_rows)
column_sin = np.sin(2 * np.pi * column)
column_cos = np.cos(2 * np.pi * column)
column_stacked = np.vstack([column_sin, column_cos]).T
def array(self, n_features):
x = np.arange(n_features)
y = self.column
_, X = np.meshgrid(x, y)
return X
@pytest.mark.parametrize('periodic_features', [[], [False]])
def test_single_column_no_transform(self, periodic_features):
enc = PeriodicEncoder(periodic_features=periodic_features, period=1)
X = self.array(n_features=1)
Xt = enc.fit_transform(X)
npt.assert_array_equal(X, Xt)
@pytest.mark.parametrize('periodic_features', ['all', [0], [True]])
def test_single_column(self, periodic_features):
enc = PeriodicEncoder(periodic_features=periodic_features, period=1)
X = self.array(n_features=1)
Xt = enc.fit_transform(X)
npt.assert_allclose(Xt, self.column_stacked)
@pytest.mark.parametrize('n_features', [2])
@pytest.mark.parametrize(
'periodic_features', ['all', [0, 1], [True, True]]
)
def test_multi_column(self, n_features, periodic_features):
enc = PeriodicEncoder(periodic_features=periodic_features, period=1)
X = self.array(n_features=2)
Xt = enc.fit_transform(X)
npt.assert_allclose(Xt[:, ::2], self.column_stacked)
npt.assert_allclose(Xt[:, 1::2], self.column_stacked)
class TestStatelessPeriodicEncoder:
n_rows = 10
@pytest.mark.parametrize(
'n_features, periodic_features, period', [
(1, 'all', 1.), (2, 'all', 1.), (2, 'all', [1., 2.]),
(2, [True, False], 3), (2, [1], 3)
]
)
def test_same_as_stateful(self, n_features, periodic_features, period):
x = np.random.randint(0, 10, size=(self.n_rows, n_features))
stateful_enc = PeriodicEncoder(
periodic_features=periodic_features, period=period
)
stateless_enc = StatelessPeriodicEncoder(
n_features=n_features,
periodic_features=periodic_features,
period=period
)
x0 = stateful_enc.fit_transform(x)
x1 = stateless_enc.transform(x)
npt.assert_array_equal(x1, x0)
| 32.422222 | 76 | 0.600583 | 699 | 5,836 | 4.792561 | 0.153076 | 0.064478 | 0.056418 | 0.033433 | 0.546866 | 0.460597 | 0.412836 | 0.362687 | 0.333731 | 0.297015 | 0 | 0.028578 | 0.2805 | 5,836 | 179 | 77 | 32.603352 | 0.769231 | 0 | 0 | 0.310127 | 0 | 0 | 0.039239 | 0 | 0 | 0 | 0 | 0 | 0.075949 | 1 | 0.06962 | false | 0 | 0.037975 | 0 | 0.196203 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
51e1a411ad9558e7543b3930124feeca7cd75ff5 | 11,654 | py | Python | paper_examples/ex51_validation2D/main.py | jhabriel/mixdim-estimates | aa7041fe3fc7a13b820ef41dacefb759f4b592ff | [
"MIT"
] | 3 | 2022-02-15T14:56:16.000Z | 2022-03-24T10:20:00.000Z | paper_examples/ex51_validation2D/main.py | jhabriel/mixdim-estimates | aa7041fe3fc7a13b820ef41dacefb759f4b592ff | [
"MIT"
] | 3 | 2021-06-15T16:23:46.000Z | 2021-12-05T10:25:41.000Z | paper_examples/ex51_validation2D/main.py | jhabriel/mixdim-estimates | aa7041fe3fc7a13b820ef41dacefb759f4b592ff | [
"MIT"
] | null | null | null | # Importing modules
import numpy as np
import porepy as pp
import itertools
from time import time
from model import model
#%% Functions
def make_constrained_mesh(h=0.1):
"""
Creates unstructured mesh for a given target mesh size for the case of a
single vertical fracture embedded in the domain
Parameters
----------
h : float, optional
Target mesh size. The default is 0.1.
Returns
-------
gb : PorePy Object
Porepy grid bucket object.
"""
domain = {"xmin": 0, "xmax": 1, "ymin": 0, "ymax": 1}
network_2d = pp.fracture_importer.network_2d_from_csv("network.csv", domain=domain)
# Target lengths
target_h_bound = h
target_h_fract = h
mesh_args = {"mesh_size_bound": target_h_bound, "mesh_size_frac": target_h_fract}
# Construct grid bucket
gb = network_2d.mesh(mesh_args, constraints=[1, 2])
return gb
def create_non_matching_gridbucket(h_2d, h_1d, h_mortar):
"""
Generates a gridbucket containing non-matching grids
Parameters
----------
h_2d : Float
Mesh size of the higher-dimensional grid
h_1d : Float
Mesh size of the lower-dimensional grid
h_mortar : Float
Mesh size of the mortar grid
Raises
------
Warning
If the subdomain cells are smaller than the mortar cell
Returns
-------
gb : PorePy object
Grid bucket
"""
# Sanity check
if (h_2d > h_mortar) or (h_1d > h_mortar):
warning_msg = "Subdomain cell are smaller than mortar cells "
warning_msg += "and this may lead to inconsistent results."
raise Warning(warning_msg)
# NOTE: The easiest way to construct the non-matching gridbucket is to
# replace the lower-dimensional grid and the mortar grids into the
# higher-dimensional grid
# Create a grid bucket using h_2d as target mesh size
gb_h = make_constrained_mesh(h_2d)
gl_old = gb_h.grids_of_dimension(1)[0] # extract 1d-grid
mg_old = gb_h.get_mortar_grids()[0] # extract mortar-grid
# Obtain fracture and mortar grids to be replaced into
gl_new = make_constrained_mesh(h_1d).grids_of_dimension(1)[0]
mg_new = make_constrained_mesh(h_mortar).get_mortar_grids()[0]
# Create the mapping dictionaries
g_map = {gl_old: gl_new}
mg_map = {mg_old: mg_new.side_grids}
# Replace grids
gb = gb_h.copy()
gb.replace_grids(g_map=g_map)
gb.replace_grids(mg_map=mg_map)
return gb
#%% Defining numerical methods, and obtaining grid buckets
num_methods = ["TPFA", "MPFA", "RT0", "MVEM"]
levels = 5 # coarsening levels
coarsening_factor = 2
h_2d_ref = 0.003125 # reference 2D mesh size
h_1d_ref = h_2d_ref * 1.5 # reference 1D mesh size
h_mortar_ref = h_2d_ref * 2.0 # reference mortar mesh size
h_2d = coarsening_factor ** np.arange(levels) * h_2d_ref
h_1d = coarsening_factor ** np.arange(levels) * h_1d_ref
h_mortar = coarsening_factor ** np.arange(levels) * h_mortar_ref
grid_buckets = []
tic = time()
print("Assembling non-matching grid buckets...", end="")
for counter in range(levels):
grid_buckets.append(
create_non_matching_gridbucket(h_2d[counter], h_1d[counter], h_mortar[counter])
)
grid_buckets = grid_buckets[::-1]
print(f"\u2713 Time {time() - tic}\n")
#%% Create dictionary and initialize fields
d = {k: {} for k in num_methods}
for method in num_methods:
d[method] = {
"mesh_size": [],
"error_estimate_2d": [],
"true_error_pressure_2d": [],
"true_error_velocity_2d": [],
"mesh_size_2d": [],
"error_estimate_1d": [],
"true_error_pressure_1d": [],
"true_error_velocity_1d": [],
"mesh_size_1d": [],
"error_estimate_mortar": [],
"true_error_pressure_mortar": [],
"true_error_velocity_mortar": [],
"mesh_size_mortar": [],
"majorant": [],
"true_error_pressure": [],
"true_error_velocity": [],
"I_eff_pressure": [],
"I_eff_velocity": [],
"I_eff_combined": [],
}
#%% Populate fields (NOTE: This loop may take considerable time)
for i in itertools.product(num_methods, grid_buckets):
# Print info in the console
print("Solving with", i[0], "for refinement level", grid_buckets.index(i[1]) + 1)
# Get hold of errors
tic = time()
(
h_max,
error_estimate_2d,
true_error_pressure_2d,
true_error_velocity_2d,
mesh_size_2d,
error_estimate_1d,
true_error_pressure_1d,
true_error_velocity_1d,
mesh_size_1d,
error_estimates_mortar,
true_error_pressure_mortar,
true_error_velocity_mortar,
mesh_size_mortar,
majorant,
true_error_pressure,
true_error_velocity,
I_eff_pressure,
I_eff_velocity,
I_eff_combined,
) = model(i[1], i[0])
print(f"Done. Time {time() - tic}\n")
# Store errors in the dictionary
d[i[0]]["mesh_size"].append(h_max)
d[i[0]]["error_estimate_2d"].append(error_estimate_2d)
d[i[0]]["true_error_pressure_2d"].append(true_error_pressure_2d)
d[i[0]]["true_error_velocity_2d"].append(true_error_velocity_2d)
d[i[0]]["mesh_size_2d"].append(mesh_size_2d)
d[i[0]]["error_estimate_1d"].append(error_estimate_1d)
d[i[0]]["true_error_pressure_1d"].append(true_error_pressure_1d)
d[i[0]]["true_error_velocity_1d"].append(true_error_velocity_1d)
d[i[0]]["mesh_size_1d"].append(mesh_size_1d)
d[i[0]]["error_estimate_mortar"].append(error_estimates_mortar)
d[i[0]]["true_error_pressure_mortar"].append(true_error_pressure_mortar)
d[i[0]]["true_error_velocity_mortar"].append(true_error_velocity_mortar)
d[i[0]]["mesh_size_mortar"].append(mesh_size_mortar)
d[i[0]]["majorant"].append(majorant)
d[i[0]]["true_error_pressure"].append(true_error_pressure)
d[i[0]]["true_error_velocity"].append(true_error_velocity)
d[i[0]]["I_eff_pressure"].append(I_eff_pressure)
d[i[0]]["I_eff_velocity"].append(I_eff_velocity)
d[i[0]]["I_eff_combined"].append(I_eff_combined)
#%% Exporting
# Permutations
rows = len(num_methods) * len(grid_buckets)
# Initialize lists
num_method_name = []
diam_2d = []
diam_1d = []
diam_mortar = []
col_2d_estimate = []
col_1d_estimate = []
col_mortar_estimate = []
col_majorant = []
col_true_error_pressure = []
col_true_error_velocity = []
I_eff_pressure = []
I_eff_velocity = []
I_eff_combined = []
# Populate lists
for i in itertools.product(num_methods, range(levels)):
num_method_name.append(i[0])
diam_2d.append(d[i[0]]["mesh_size_2d"][i[1]])
diam_1d.append(d[i[0]]["mesh_size_1d"][i[1]])
diam_mortar.append(d[i[0]]["mesh_size_mortar"][i[1]])
col_2d_estimate.append(d[i[0]]["error_estimate_2d"][i[1]])
col_1d_estimate.append(d[i[0]]["error_estimate_1d"][i[1]])
col_mortar_estimate.append(d[i[0]]["error_estimate_mortar"][i[1]])
col_majorant.append(d[i[0]]["majorant"][i[1]])
col_true_error_pressure.append(d[i[0]]["true_error_pressure"][i[1]])
col_true_error_velocity.append(d[i[0]]["true_error_velocity"][i[1]])
I_eff_pressure.append(d[i[0]]["I_eff_pressure"][i[1]])
I_eff_velocity.append(d[i[0]]["I_eff_velocity"][i[1]])
I_eff_combined.append(d[i[0]]["I_eff_combined"][i[1]])
# Prepare for exporting
export = np.zeros(rows,
dtype=[ ('var2', 'U6'),
('var3', float), ('var4', float),
('var5', float), ('var6', float),
('var7', float), ('var8', float),
('var9', float), ('var10', float),
('var11', float), ('var12', float),
('var13', float), ('var14', float)
])
# Declaring column variables
export['var2'] = num_method_name
export['var3'] = diam_2d
export['var4'] = diam_1d
export['var5'] = diam_mortar
export['var6'] = col_2d_estimate
export['var7'] = col_1d_estimate
export['var8'] = col_mortar_estimate
export['var9'] = col_majorant
export['var10'] = col_true_error_pressure
export['var11'] = col_true_error_velocity
export['var12'] = I_eff_pressure
export['var13'] = I_eff_velocity
export['var14'] = I_eff_combined
# Formatting string
fmt = "%6s %2.5f %2.5f %2.5f %2.2e %2.2e "
fmt += "%2.2e %2.2e %2.2e %2.2e %2.2f %2.2f %2.2f"
# Headers
header = "num_method h_2d, h_1d, h_mortar, eta_2d eta_1d eta_mortar "
header += "majorant true_error_p true_error_u I_eff_p I_eff_u I_eff_pu"
# Writing into txt
np.savetxt('validation2d.txt', export, delimiter=',', fmt=fmt, header=header)
#%% Exporting to LaTeX
# Permutations
rows = len(num_methods) * len(grid_buckets)
# Initialize lists
ampersend = []
for i in range(rows): ampersend.append('&')
num_method_name = []
diam_2d = []
diam_1d = []
diam_mortar = []
col_2d_estimate = []
col_1d_estimate = []
col_mortar_estimate = []
col_majorant = []
col_true_error_pressure = []
col_true_error_velocity = []
I_eff_pressure = []
I_eff_velocity = []
I_eff_combined = []
# Populate lists
for i in itertools.product(num_methods, range(levels)):
num_method_name.append(i[0])
diam_2d.append(d[i[0]]["mesh_size_2d"][i[1]])
diam_1d.append(d[i[0]]["mesh_size_1d"][i[1]])
diam_mortar.append(d[i[0]]["mesh_size_mortar"][i[1]])
col_2d_estimate.append(d[i[0]]["error_estimate_2d"][i[1]])
col_1d_estimate.append(d[i[0]]["error_estimate_1d"][i[1]])
col_mortar_estimate.append(d[i[0]]["error_estimate_mortar"][i[1]])
col_majorant.append(d[i[0]]["majorant"][i[1]])
col_true_error_pressure.append(d[i[0]]["true_error_pressure"][i[1]])
col_true_error_velocity.append(d[i[0]]["true_error_velocity"][i[1]])
I_eff_pressure.append(d[i[0]]["I_eff_pressure"][i[1]])
I_eff_velocity.append(d[i[0]]["I_eff_velocity"][i[1]])
I_eff_combined.append(d[i[0]]["I_eff_combined"][i[1]])
# Prepare for exporting
export = np.zeros(rows,
dtype=[ ('var2', 'U6'),
('var3', float), ('var4', float),
('var5', float), ('var6', float),
('amp1', 'U6'), ('var7', float),
('amp2', 'U6'), ('var8', float),
('amp3', 'U6'), ('var9', float),
('amp4', 'U6'), ('var10', float),
('amp5', 'U6'), ('var11', float),
('amp6', 'U6'), ('var12', float),
('amp7', 'U6'), ('var13', float),
('amp8', 'U6'), ('var14', float)
])
# Prepare for exporting
export['var2'] = num_method_name
export['var3'] = diam_2d
export['var4'] = diam_1d
export['var5'] = diam_mortar
export['var6'] = col_2d_estimate
export['amp1'] = ampersend
export['var7'] = col_1d_estimate
export['amp2'] = ampersend
export['var8'] = col_mortar_estimate
export['amp3'] = ampersend
export['var9'] = col_majorant
export['amp4'] = ampersend
export['var10'] = col_true_error_pressure
export['amp5'] = ampersend
export['var11'] = col_true_error_velocity
export['amp6'] = ampersend
export['var12'] = I_eff_pressure
export['amp7'] = ampersend
export['var13'] = I_eff_velocity
export['amp8'] = ampersend
export['var14'] = I_eff_combined
# Formatting string
fmt = "%6s %2.5f %2.5f %2.5f %2.2e %1s %2.2e %1s %2.2e "
fmt += "%1s %2.2e %1s %2.2e %1s %2.2e %1s %2.2f %1s %2.2f %1s %2.2f"
# Headers
header = "num_method h_2d h_1d h_mortar eta_2d & eta_1d & eta_mortar & "
header += "majorant & true_error_p & true_error_u & I_eff_p & I_eff_u & I_eff_pu"
np.savetxt('validation2d_tex.txt',
export,
delimiter=',',
fmt=fmt,
header=header
)
| 32.282548 | 87 | 0.643041 | 1,706 | 11,654 | 4.094373 | 0.139508 | 0.067001 | 0.018468 | 0.030923 | 0.565211 | 0.538153 | 0.434503 | 0.397423 | 0.393414 | 0.393414 | 0 | 0.039266 | 0.204565 | 11,654 | 360 | 88 | 32.372222 | 0.714239 | 0.144843 | 0 | 0.395062 | 0 | 0.016461 | 0.213826 | 0.037169 | 0 | 0 | 0 | 0 | 0 | 1 | 0.00823 | false | 0 | 0.024691 | 0 | 0.041152 | 0.016461 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
51e7204783742c4d06205cf3ac4d3a46079650b3 | 22,987 | py | Python | slottools/PhotometryConfigWidget.py | apodemus/pysalt3 | 97bb790ad7bcf1137e3ffd2a7b32840ae7167358 | [
"BSD-3-Clause"
] | null | null | null | slottools/PhotometryConfigWidget.py | apodemus/pysalt3 | 97bb790ad7bcf1137e3ffd2a7b32840ae7167358 | [
"BSD-3-Clause"
] | null | null | null | slottools/PhotometryConfigWidget.py | apodemus/pysalt3 | 97bb790ad7bcf1137e3ffd2a7b32840ae7167358 | [
"BSD-3-Clause"
] | 1 | 2021-07-15T19:43:59.000Z | 2021-07-15T19:43:59.000Z | ################################# LICENSE ##################################
# Copyright (c) 2009, South African Astronomical Observatory (SAAO) #
# All rights reserved. #
# #
# Redistribution and use in source and binary forms, with or without #
# modification, are permitted provided that the following conditions #
# are met: #
# #
# * Redistributions of source code must retain the above copyright #
# notice, this list of conditions and the following disclaimer. #
# * Redistributions in binary form must reproduce the above copyright #
# notice, this list of conditions and the following disclaimer #
# in the documentation and/or other materials provided with the #
# distribution. #
# * Neither the name of the South African Astronomical Observatory #
# (SAAO) nor the names of its contributors may be used to endorse #
# or promote products derived from this software without specific #
# prior written permission. #
# #
# THIS SOFTWARE IS PROVIDED BY THE SAAO ''AS IS'' AND ANY EXPRESS OR #
# IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED #
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE #
# DISCLAIMED. IN NO EVENT SHALL THE SAAO BE LIABLE FOR ANY #
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL #
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS #
# OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) #
# HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, #
# STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN #
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE #
# POSSIBILITY OF SUCH DAMAGE. #
############################################################################
"""
Module containing generic graphical user interface widgets.
"""
# Ensure python 2.5 compatibility
import matplotlib.cm
# General imports
import pyfits
import numpy as np
# Gui library imports
try:
from PyQt4.QtCore import QString
except ImportError:
QString = str
from PyQt4 import QtGui, QtCore
from matplotlib.backends.backend_qt4agg import FigureCanvasQTAgg as FigureCanvas
from matplotlib.figure import Figure
from matplotlib.patches import CirclePolygon, Rectangle
# Salt imports
import saltsafeio
from salterror import SaltError, SaltIOError
from saltimagetools import find_object, zscale
class PhotometryConfigWidget(QtGui.QWidget):
"""Configure dialog for photometry.
Has settings for:
* target position, size
* target background
* type (anulus/region)
* parameters
* comparison position, size
* comparison background
* type (anulus/region)
* parameters
"""
def __init__(self, imdisplay, config, imlist=None, number=1, parent=None):
"""Setup widget.
*imdisplay* a `FitsDisplay` derived fits display widget,
*imlist* a list of fits image filenames,
*config* filename used for output configuration file,
*number* image number to load on startup,
*parent* parent widget.
"""
# Set default parameters
self.imlist=imlist
self.number=number
self.config=config
self.amp={'target' : 1, 'comparison' : 1 }
# Set default marker
self.mark_with='circle'
# Set default search distance for recentering
self.distance=5
# Default line style parameters
self.line={ 'target' : { 'color' : 'g', 'width' : 2 },
'comparison' : { 'color' : 'g', 'width' : 2 }}
# Import gui
from ui_photometryconfigwidget import Ui_PhotometryConfigWidget
# Setup widget
QtGui.QWidget.__init__(self, parent)
# Bind gui to widget
self.ui = Ui_PhotometryConfigWidget()
self.ui.setupUi(self)
# Destroy widget on close
self.setAttribute(QtCore.Qt.WA_DeleteOnClose)
# Connect to display window
self.imdisplay=imdisplay
# Connect position selected signal from display to event handler
self.connect(self.imdisplay, QtCore.SIGNAL('positionSelected(float, float)'), self.selectionHandler)
# Set current display widget for positionSelected signal
self.xdisplay=[]
self.ydisplay=[]
self.rdisplay=[]
# Keep track of currently displayed objects
self.display={'target' : {'position' : False,
'annulus' : False,
'region' : False },
'comparison' : {'position' : False,
'annulus' : False,
'region' : False }}
# Keep track of input widgets
self.parameters=['x','y','r','r1','r2','x1','y1','x2','y2']
self.input={'target' : { 'x' : self.ui.tgtXLineEdit,
'y' : self.ui.tgtYLineEdit,
'r' : self.ui.tgtRLineEdit,
'r1' : self.ui.tgtR1LineEdit,
'r2' : self.ui.tgtR2LineEdit,
'x1' : self.ui.tgtX1LineEdit,
'y1' : self.ui.tgtY1LineEdit,
'x2' : self.ui.tgtX2LineEdit,
'y2' : self.ui.tgtY2LineEdit},
'comparison' : { 'x' : self.ui.cmpXLineEdit,
'y' : self.ui.cmpYLineEdit,
'r' : self.ui.cmpRLineEdit,
'r1' : self.ui.cmpR1LineEdit,
'r2' : self.ui.cmpR2LineEdit,
'x1' : self.ui.cmpX1LineEdit,
'y1' : self.ui.cmpY1LineEdit,
'x2' : self.ui.cmpX2LineEdit,
'y2' : self.ui.cmpY2LineEdit}}
# Keep track of capture buttons
self.buttons=['position','radius','annulus','region']
self.capture={'target' \
: {'position' : self.ui.captureTgt,
'radius' : self.ui.captureTgtRadius,
'annulus' : self.ui.captureTgtAnulusBackground,
'region' : self.ui.captureTgtRegionBackground},
'comparison' \
: {'position' : self.ui.captureCmp,
'radius' : self.ui.captureCmpRadius,
'annulus' : self.ui.captureCmpAnulusBackground,
'region' : self.ui.captureCmpRegionBackground}}
# Keep track of checkbox recenter widgets
self.recenter={'target' : self.ui.tgtRecenterCheckBox,
'comparison' : self.ui.cmpRecenterCheckBox}
self.centered={'target' : False,
'comparison' : False}
# Enable blocking of redraws
self.block={'target' : { 'x' : False,
'y' : False,
'r' : False,
'r1' : False,
'r2' : False,
'x1' : False,
'y1' : False,
'x2' : False,
'y2' : False},
'comparison' : { 'x' : False,
'y' : False,
'r' : False,
'r1' : False,
'r2' : False,
'x1' : False,
'y1' : False,
'x2' : False,
'y2' : False}}
# Set validator to ensure valid input on lineEdit input widgets
self.validator = QtGui.QDoubleValidator(self)
for object in ['target','comparison']:
for key in self.parameters:
self.input[object][key].setValidator(self.validator)
# Set signal mapper for lineEdit updates
self.drawMapper = QtCore.QSignalMapper(self)
# Connect lineEdit updates to signal mapper
for object in ['target','comparison']:
for key in self.parameters:
# Add signal map entry
self.drawMapper.setMapping(self.input[object][key],
QString(object+','+key))
# Connect to signal mapper
self.connect(self.input[object][key], QtCore.SIGNAL('textChanged(QString)'), self.drawMapper, QtCore.SLOT('map()'))
# Connect signal mapper to draw handler
self.connect(self.drawMapper, QtCore.SIGNAL('mapped(QString)'),
self.textUpdated)
# Set signal mapper for capture buttons
self.captureMapper = QtCore.QSignalMapper(self)
# Connect capture button signals to signal mapper
for object in ['target','comparison']:
for key in self.buttons:
# Add signal map entry
self.captureMapper.setMapping(self.capture[object][key],
QString(object+','+key))
# Connect to signal mapper
self.connect(self.capture[object][key], QtCore.SIGNAL('clicked()'), self.captureMapper, QtCore.SLOT('map()'))
# Connect signal mapper to capture handler
self.connect(self.captureMapper, QtCore.SIGNAL('mapped(QString)'),
self.captureHandler)
# Connect save button
self.connect(self.ui.saveButton, QtCore.SIGNAL('clicked()'), self.save)
# If an image list is given
if self.imlist is not None:
# Connect image selection spinBox to event handlers
self.connect(self.ui.imageSpinBox, QtCore.SIGNAL('valueChanged(int)'), self.loadImage)
self.connect(self.ui.imageSpinBox, QtCore.SIGNAL('valueChanged(int)'), self.redraw)
# Load first image
self.setImageNumber(self.number)
# Hide end selection widgets (not implemented here)
self.ui.tgtEndPosLabel.hide()
self.ui.tgtEndXLabel.hide()
self.ui.tgtEndYLabel.hide()
self.ui.cmpEndPosLabel.hide()
self.ui.cmpEndXLabel.hide()
self.ui.cmpEndYLabel.hide()
self.ui.tgtXEndLineEdit.hide()
self.ui.tgtYEndLineEdit.hide()
self.ui.cmpXEndLineEdit.hide()
self.ui.cmpYEndLineEdit.hide()
self.ui.captureTgtEnd.hide()
self.ui.captureCmpEnd.hide()
def setImageNumber(self,number):
"""Set the image number."""
self.ui.imageSpinBox.setValue(number)
def loadImage(self, number):
"""Loads a new image.
*number* is the image number to be loaded.
This function uses `saltsafeio.getexposure` to get the correct
exposure from a list of fits files containing an arbitrary number
of extensions.
"""
# Emit signal
self.emit(QtCore.SIGNAL("imageNumberUpdated(int)"), number)
# Load image from file
self.img=saltsafeio.get_exposure(self.imlist,number)
# Display image
self.imdisplay.loadImage(self.img)
# Redraw canvas
self.imdisplay.redraw_canvas()
def mark(self,*args,**kwargs):
if self.mark_with=='square':
self.imdisplay.addSquare(*args,**kwargs)
elif self.mark_with=='circle':
self.imdisplay.addCircle(*args,**kwargs)
def textUpdated(self,key):
# Get object and parameter from key
obj,par=str(key).split(',')
# Check block
if self.block[obj][par]:
return
# Set block to prevent infinite repeat
self.block[obj][par]=True
# Recenter on object if requested
if par=='x' and self.recenter[obj].isChecked() and not self.centered[obj]:
x=float(self.input[obj]['x'].text())
y=float(self.input[obj]['y'].text())
r=float(self.input[obj]['r'].text())
x,y=find_object(self.img,x,y,self.distance)
self.input[obj]['x'].setText(str(x))
self.input[obj]['y'].setText(str(y))
self.centered[obj]=not(self.centered[obj])
# Check if object region size locking is on
if self.ui.lockObjectSizes.isChecked():
if par=='r':
r=self.input[obj]['r'].text()
if obj=='target':
self.input['comparison']['r'].setText(r)
elif obj=='comparison':
self.input['target']['r'].setText(r)
# Check if background size locking is on
if self.ui.lockBackgroundSize.isChecked():
if par in ['r1','r2']:
r=self.input[obj][par].text()
if obj=='target':
self.ui.cmpAnulusRadioButton.setChecked(True)
self.input['comparison'][par].setText(r)
elif obj=='comparison':
self.ui.tgtAnulusRadioButton.setChecked(True)
self.input['target'][par].setText(r)
elif par in ['x1','y1','x2','y2']:
c=self.input[obj][par].text()
if obj=='target':
self.ui.cmpRegionRadioButton.setChecked(True)
self.input['comparison'][par].setText(c)
elif obj=='comparison':
self.ui.tgtRegionRadioButton.setChecked(True)
self.input['target'][par].setText(c)
# Check if background region centering
if self.ui.allignTgtVerticalCenter.isChecked():
if par in ['y1','y2']:
y=float(self.input[obj][par].text())
center=self.img.shape[0]/2.0
height=abs(y-center)
self.input[obj]['y1'].setText(str(center+height))
self.input[obj]['y2'].setText(str(center-height))
# Draw markers
self.draw(key)
# Unset block
self.block[obj][par]=False
def draw(self,key):
"""Draws markers for object positions, and backgrounds.
To be called when any input widget value changes.
*key* is given by the signal mapper and consists of a string with
the object and parameter separated by a comma.
"""
# Get object and parameter from key
obj,par=str(key).split(',')
try:
# Set amplifier
self.amp[obj]=self.getCurrentAmp()
# Draw markers
if par=='x' or par=='y' or par=='r':
x=float(self.input[obj]['x'].text())
y=float(self.input[obj]['y'].text())
r=float(self.input[obj]['r'].text())
self.display[obj]['position']=True
self.mark(obj,x,y,r,color=self.line[obj]['color'],lw=self.line[obj]['width'])
elif par=='r1' or par=='r2':
# Annulus is selected so remove region marker
self.imdisplay.removePatch(obj+'_region')
x=float(self.input[obj]['x'].text())
y=float(self.input[obj]['y'].text())
r=float(self.input[obj][par].text())
# Keep track of the selected background mode
self.display[obj]['annulus']=True
self.display[obj]['region']=False
self.mark(obj+'_'+par,x,y,r,color=self.line[obj]['color'],lw=self.line[obj]['width'])
elif par=='x1' or par=='y1' or par=='x2' or par=='y2':
# Region is selected so remove annulus markers
self.imdisplay.removePatch(obj+'_r1')
self.imdisplay.removePatch(obj+'_r2')
x1=float(self.input[obj]['x1'].text())
y1=float(self.input[obj]['y1'].text())
x2=float(self.input[obj]['x2'].text())
y2=float(self.input[obj]['y2'].text())
# Keep track of the selected background mode
self.display[obj]['annulus']=False
self.display[obj]['region']=True
self.imdisplay.addRectangle(obj+'_region',x1,y1,x2,y2,
color=self.line[obj]['color'],lw=self.line[obj]['width'])
# Redraw canvas
self.imdisplay.redraw_canvas(keepzoom=True)
except ValueError:
pass
def redraw(self, number):
"""Redraws object and background markers for all objects on the
currently displayed amplifier *number*.
"""
self.imdisplay.reset()
# Find wich amplifier is currently displayed
amp=self.getCurrentAmp()
# (Re)draw markers
for obj in ['target','comparison']:
if self.amp[obj]==amp:
if self.display[obj]['position']:
self.draw(obj+','+'r')
if self.display[obj]['annulus']:
self.draw(obj+','+'r1')
self.draw(obj+','+'r2')
if self.display[obj]['region']:
self.draw(obj+','+'y2')
def getCurrentAmp(self, namps=4):
"""Returns the currently displayed amplifier.
*namps* is the number of amplifiers on the CCD.
"""
# Get exposure number
n=int(self.ui.imageSpinBox.value())
# Convert exposure number to current amplifier number
amp=n%namps
if amp==0:
amp=namps
return amp
def captureHandler(self, key):
"""Called when a capture button is clicked.
*key* is given by the signal mapper and consists of a string with
the object and parameter separated by a comma.
Depending on the *key* input widgets are added to the current
display lists.
Subsequent calls to `self.selectionHandler` get displayed in
the listed widgets.
"""
# Get object and parameter from key
obj,par=str(key).split(',')
# Add input widgets to lists
if par=='position':
self.xdisplay=[self.input[obj]['x']]
self.ydisplay=[self.input[obj]['y']]
self.rdisplay=[]
elif par=='radius':
self.xdisplay=[]
self.ydisplay=[]
self.x=float(self.input[obj]['x'].text())
self.y=float(self.input[obj]['y'].text())
self.rdisplay=[self.input[obj]['r']]
elif par=='annulus':
self.xdisplay=[]
self.ydisplay=[]
self.x=float(self.input[obj]['x'].text())
self.y=float(self.input[obj]['y'].text())
self.rdisplay=[self.input[obj]['r1'], self.input[obj]['r2']]
elif par=='region':
self.xdisplay=[self.input[obj]['x1'], self.input[obj]['x2']]
self.ydisplay=[self.input[obj]['y1'], self.input[obj]['y2']]
self.rdisplay=[]
def selectionHandler(self, x, y):
"""Event handler for click in image display window.
*x*, *y* is the position (in image pixel coordinates) of the click.
These positions are inserted into the first input widgets in the
display lists.
If a radius is requested this is calculated from the position given
in (self.x, self.y) which should be set to the current object.
"""
if len(self.xdisplay)>0:
display=self.xdisplay.pop(0)
display.setText(str(x))
if len(self.ydisplay)>0:
display=self.ydisplay.pop(0)
display.setText(str(y))
if len(self.rdisplay)>0:
r=np.sqrt((x-self.x)**2+(y-self.y)**2)
display=self.rdisplay.pop(0)
display.setText(str(r))
def setSearchDistance(self, distance):
"""Set search distance used for recentering."""
self.distance=int(distance)
def setMarkerType(self, marker):
"""Set marker type to 'circle' or 'square'."""
if marker in ['circle','square']:
self.mark_with=marker
else:
raise SaltIOError('Unknown marker type '+str(marker))
def setLineColor(self, object, color):
"""Changes the default line color used for marking."""
self.line[object]['color']=color
def setLineWidth(self, object, width):
"""Changes the default line width used for marking."""
self.line[object]['width']=width
def save(self):
"""Save configuration.
The format is::
For objects that use an anullus:
object amp x y r r1 r2
For objects that use a region:
object amp x y r x1 y1 x2 y2
"""
if (self.ui.tgtAnulusRadioButton.isChecked() and self.ui.cmpRegionRadioButton.isChecked()) or \
(self.ui.tgtRegionRadioButton.isChecked() and self.ui.cmpAnulusRadioButton.isChecked()):
msg='SLOTPREVIEW--SLOTPHOT can not handle different background types'
raise SaltError(msg)
# Write values to file
with open(self.config,'w') as f:
for i,obj in enumerate(['target','comparison']):
b_type='region'
if obj=='target':
print(obj, self.ui.tgtAnulusRadioButton.isChecked())
if self.ui.tgtAnulusRadioButton.isChecked(): b_type='annulus'
elif obj=='comparison':
if self.ui.cmpAnulusRadioButton.isChecked(): b_type='annulus'
# If r1 is not zero, assumes annulus
line='%i\t%i\t' % (i+1, self.amp[obj])
if b_type=='annulus':
line+=''.join('%3.2f\t' % float(self.input[obj][key].text()) for key in ['x', 'y', 'r', 'r1', 'r2'])
else:
line+=''.join('%3.2f\t' % float(self.input[obj][key].text()) for key in ['x', 'y', 'r', 'x1', 'y2', 'x2', 'y2'])
# Write string to configfile
f.write(line.rstrip()+'\n')
# Exit program
self.close()
| 39.428816 | 131 | 0.525253 | 2,404 | 22,987 | 5.009567 | 0.206323 | 0.030391 | 0.035871 | 0.028232 | 0.250851 | 0.19638 | 0.17313 | 0.150046 | 0.150046 | 0.150046 | 0 | 0.008578 | 0.355897 | 22,987 | 582 | 132 | 39.496564 | 0.804809 | 0.274938 | 0 | 0.196013 | 0 | 0 | 0.070532 | 0.004204 | 0 | 0 | 0 | 0 | 0 | 1 | 0.049834 | false | 0.003322 | 0.043189 | 0 | 0.10299 | 0.003322 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
51e720cfeb8235927a5ad18a3477edc29e509a46 | 1,895 | py | Python | Linked Lists/add_two_numbers.py | fredricksimi/leetcode | f6352c26914ca77f915f5994746ecf0b36efc89b | [
"MIT"
] | null | null | null | Linked Lists/add_two_numbers.py | fredricksimi/leetcode | f6352c26914ca77f915f5994746ecf0b36efc89b | [
"MIT"
] | null | null | null | Linked Lists/add_two_numbers.py | fredricksimi/leetcode | f6352c26914ca77f915f5994746ecf0b36efc89b | [
"MIT"
] | 1 | 2021-12-05T12:27:46.000Z | 2021-12-05T12:27:46.000Z | """
Add Two Numbers: Leetcode 2
You are given two non-empty linked lists representing two non-negative integers.
The digits are stored in reverse order, and each of their nodes contains a single digit.
Add the two numbers and return the sum as a linked list.
You may assume the two numbers do not contain any leading zero, except the number 0 itself.
"""
# Definition for singly-linked list.
# class ListNode(object):
# def __init__(self, val=0, next=None):
# self.val = val
# self.next = next
class Solution(object):
"""
This is how addition works (Elementary Math):
111 <- carried values
|||
7692
+3723
-----
0426
-----
"""
# O(max(m,n)) time | O(max(m,n)) space | m=len(l1), n=len(l2)
def addTwoNumbers(self, l1, l2):
# declare pointers
p1 = l1
p2 = l2
# used to store the carry value
carry = 0
# declare result linked list
result = ListNode()
res_curr = result # position on the result linked list
# remember to add the 'carry' edge case to the while loop
# example 119 + 119
while p1 != None or p2 != None or carry != 0:
top = 0
bottom = 0
if p1 != None:
top = p1.val
p1 = p1.next
if p2 != None:
bottom = p2.val
p2 = p2.next
my_sum = carry + top + bottom
# check if we'll carry
# max of my_sum is 19
if my_sum > 9: # carry value
res_curr.next = ListNode(val=my_sum-10)
carry = 1
else:
res_curr.next = ListNode(val=my_sum)
carry = 0
res_curr = res_curr.next
# skip the node we created during initialization of the linked list
return result.next
| 25.958904 | 91 | 0.543008 | 256 | 1,895 | 3.964844 | 0.472656 | 0.049261 | 0.032512 | 0.011823 | 0.053202 | 0.053202 | 0.053202 | 0 | 0 | 0 | 0 | 0.044764 | 0.375198 | 1,895 | 72 | 92 | 26.319444 | 0.8125 | 0.510818 | 0 | 0.08 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.04 | false | 0 | 0 | 0 | 0.12 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
51e82124f47a81e7a21c3ded6ba32d9e42bbdaa0 | 6,539 | py | Python | darts_socket/darts_server/display.py | y-azuma/opencv-softdarts | 644778be219fb96cdde32b884157899d39fc14e5 | [
"MIT"
] | 9 | 2019-05-01T18:42:47.000Z | 2021-09-05T09:49:44.000Z | darts_socket/darts_server/display.py | y-azuma/opencv-softdarts | 644778be219fb96cdde32b884157899d39fc14e5 | [
"MIT"
] | null | null | null | darts_socket/darts_server/display.py | y-azuma/opencv-softdarts | 644778be219fb96cdde32b884157899d39fc14e5 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
import tkinter as tk
import sound
import socket
import threading
class ConnClient():
'''
ソケット通信によりラズベリーパイから画像情報を受け取る。
'''
def __init__(self,conn, addr):
self.conn_socket = conn
self.addr = addr
self.recvdata = 0
self.recvdata1 = 0
self.recvdata2 = 0
self.data_list=0
def run(self):
try:
self.recvdata = self.conn_socket.recv(2359296)
self.recvdata1 = self.recvdata.decode('utf-8')
self.recvdata2 = self.recvdata1.split(",")
self.data_list = [int(s) for s in self.recvdata2]
except socket.error:
print("connect error")
def stop(self):
self.conn_socket.close()
def main():
global recvlist
s_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s_socket.bind((HOSTNAME, PORT))
s_socket.listen(CLIENTNUM)
while (1):
conn, addr = s_socket.accept()
print("Conneted by" + str(addr))
connClientThread = ConnClient(conn, addr)
connClientThread.run()
recvlist = connClientThread.data_list
print(recvlist)
bullsystem(recvlist[0])
def bullsystem(flag):
global throw_number, score, round_total,recvlist
photoorder = recvlist[1]
throw_number += 1
round_total = recvlist[2]
first_throw = recvlist[3]
second_throw = recvlist[4]
third_throw = recvlist[5]
canvas.itemconfig(on_canvas_text1, text=str(first_throw))
canvas.itemconfig(on_canvas_text2, text=str(second_throw))
canvas.itemconfig(on_canvas_text3, text=str(third_throw))
if flag == 1:
play_sounds.sound1()
score += 50
lb.insert(tk.END, str(throw_number)+ "BULL " + str(score))
canvas.itemconfig(
on_canvas_text,
text=str(score)
)
else:
lb.insert(tk.END, str(throw_number)+"NO BULL"+ str(score))
if photoorder == 3 and round_total > 0:
changeimg()
def memo():
value = entry.get()
if not value:
lb.insert(tk.END, "入力してね")
else:
lb.insert(tk.END, value)
entry.delete(0, tk.END)
def changeimg():
global canvas, on_canvas, score, round_total
canvas.move(
on_canvas_text,
1000,
1000
)
canvas.move(
on_canvas_text1,
1000,
1000
)
canvas.move(
on_canvas_text2,
1000,
1000
)
canvas.move(
on_canvas_text3,
1000,
1000
)
if round_total == 50:
canvas.itemconfig(
on_canvas,
image=images[1]
)
elif round_total == 100:
canvas.itemconfig(
on_canvas,
image=images[2]
)
elif round_total == 150:
canvas.itemconfig(
on_canvas,
image=images[3]
)
root.after(3900, play_sounds.sound2)
root.after(7000, rechangeimg)
def rechangeimg():
global root, canvas
canvas.itemconfig(
on_canvas,
image=images[0]
)
canvas.move(
on_canvas_text,
-1000,
-1000
)
canvas.move(
on_canvas_text1,
-1000,
-1000
)
canvas.move(
on_canvas_text2,
-1000,
-1000
)
canvas.move(
on_canvas_text3,
-1000,
-1000
)
def buffer():
#ソケット通信を並列処理
th_body = threading.Thread(target=main, name='main')
th_body.setDaemon(True)
th_body.start()
def rungui():
global root, canvas, on_canvas, images, lb, entry, on_canvas_text, score
global on_canvas_text1, on_canvas_text2, on_canvas_text3
#メインウィンドウ
root = tk.Tk()
root.geometry("1140x675")
root.title("DARTS BULL GAME")
font = ("Helevetica", 14)
font_log = ("Helevetica", 11)
# menubar
menubar = tk.Menu(root)
root.config(menu=menubar)
# startmenu
startmenu = tk.Menu(menubar)
menubar.add_cascade(label="BULL GAME", menu=startmenu)
startmenu.add_command(label="開始する", command=lambda: buffer())
# canvas make
canvas = tk.Canvas(
root,
width=960,
height=600,
relief=tk.RIDGE,
bd=2
)
canvas.place(x=175, y=0)
# image
images.append(tk.PhotoImage(file="501.png"))
images.append(tk.PhotoImage(file="onebull.png"))
images.append(tk.PhotoImage(file="lowton.png"))
images.append(tk.PhotoImage(file="hattrick.png"))
on_canvas = canvas.create_image(
0,
0,
image=images[0],
anchor=tk.NW
)
on_canvas_text = canvas.create_text(
480, 300, text=str(score), font=("Helvetica", 250, "bold")
)
on_canvas_text1 = canvas.create_text(
850, 145, text=0, font=("Helvetica", 40, "bold"), fill='white')
on_canvas_text2 = canvas.create_text(
850, 195, text=0, font=("Helvetica", 40, "bold"), fill='white')
on_canvas_text3 = canvas.create_text(
850, 245, text=0, font=("Helvetica", 40, "bold"), fill='white')
# response_area
response_area = tk.Label(
root,
width=106,
height=4,
bg="gray",
font=font,
relief=tk.RIDGE,
bd=2
)
response_area.place(x=176, y=600)
# entrybox
entry = tk.Entry(
root,
width=75,
font=font
)
entry.place(x=230, y=630)
entry.focus_set()
# listbox
lb = tk.Listbox(
root,
width=20,
height=43,
font=font_log
)
# scroolbar1
sb1 = tk.Scrollbar(
root,
orient=tk.VERTICAL,
command=lb.yview
)
# スクロールバーと連動
lb.configure(yscrollcommand=sb1.set)
lb.grid(row=0, column=0)
sb1.grid(row=0, column=1, sticky=tk.NS)
# button
button = tk.Button(
root,
bg='black',
command=lambda: buffer(),
text="START",
width=19,
)
button.place(x=0, y=655)
# button2
button2 = tk.Button(
root,
width=15,
text="MEMO",
command=lambda: memo())
button2.place(x=950, y=630)
# mainloop
root.mainloop()
if __name__ == "__main__":
lb = None
on_canvas = None
on_canvas_text = None
on_canvas_text1 = None
on_canvas_text2 = None
on_canvas_text3 = None
images = []
entry = None
response_area = None
score = 0
throw_number = 0
play_sounds = sound.Sounds()
HOSTNAME = "192.168.0.3"
PORT = 12345
CLIENTNUM = 1
rungui()
| 21.093548 | 76 | 0.570577 | 782 | 6,539 | 4.620205 | 0.283887 | 0.070855 | 0.039856 | 0.053141 | 0.217825 | 0.179352 | 0.114863 | 0.099917 | 0.090783 | 0.090783 | 0 | 0.058837 | 0.31121 | 6,539 | 309 | 77 | 21.161812 | 0.743339 | 0.027986 | 0 | 0.240506 | 0 | 0 | 0.040468 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.042194 | false | 0 | 0.016878 | 0 | 0.063291 | 0.012658 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
51eb7396a14c0a9adcd7a3d4b7b068d93d1985e2 | 2,566 | py | Python | neutron/tests/unit/services/trunk/drivers/openvswitch/agent/test_trunk_manager.py | p0i0/openstack-neutron | df2ee28ae9a43cc511482bd6ece5396eb1288814 | [
"Apache-2.0"
] | null | null | null | neutron/tests/unit/services/trunk/drivers/openvswitch/agent/test_trunk_manager.py | p0i0/openstack-neutron | df2ee28ae9a43cc511482bd6ece5396eb1288814 | [
"Apache-2.0"
] | null | null | null | neutron/tests/unit/services/trunk/drivers/openvswitch/agent/test_trunk_manager.py | p0i0/openstack-neutron | df2ee28ae9a43cc511482bd6ece5396eb1288814 | [
"Apache-2.0"
] | null | null | null | # Copyright (c) 2016 Red Hat
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
from oslo_utils import uuidutils
import testtools
from neutron.common import utils as common_utils
from neutron.services.trunk.drivers.openvswitch.agent import trunk_manager
from neutron.tests import base
NATIVE_OVSDB_CONNECTION = (
'neutron.agent.ovsdb.impl_idl.OvsdbIdl.ovsdb_connection')
class TrunkParentPortTestCase(base.BaseTestCase):
def setUp(self):
super(TrunkParentPortTestCase, self).setUp()
# Mock out connecting to ovsdb
mock.patch(NATIVE_OVSDB_CONNECTION).start()
trunk_id = uuidutils.generate_uuid()
port_id = uuidutils.generate_uuid()
trunk_mac = common_utils.get_random_mac('fa:16:3e:00:00:00'.split(':'))
self.trunk = trunk_manager.TrunkParentPort(
trunk_id, port_id, trunk_mac)
def test_multiple_transactions(self):
def method_inner(trunk):
with trunk.ovsdb_transaction() as txn:
return id(txn)
def method_outer(trunk):
with trunk.ovsdb_transaction() as txn:
return method_inner(trunk), id(txn)
with self.trunk.ovsdb_transaction() as txn1:
mock_commit = mock.patch.object(txn1, 'commit').start()
txn_inner_id, txn_outer_id = method_outer(self.trunk)
self.assertFalse(mock_commit.called)
self.assertTrue(mock_commit.called)
self.assertTrue(id(txn1) == txn_inner_id == txn_outer_id)
def test_transaction_raises_error(self):
class MyException(Exception):
pass
with testtools.ExpectedException(MyException):
with self.trunk.ovsdb_transaction() as txn1:
mock.patch.object(txn1, 'commit').start()
raise MyException()
self.assertIsNone(self.trunk._transaction)
with self.trunk.ovsdb_transaction() as txn2:
mock.patch.object(txn2, 'commit').start()
self.assertIsNot(txn1, txn2)
| 37.735294 | 79 | 0.684334 | 324 | 2,566 | 5.274691 | 0.429012 | 0.035108 | 0.061439 | 0.067291 | 0.203043 | 0.167934 | 0.093622 | 0.093622 | 0 | 0 | 0 | 0.013138 | 0.228761 | 2,566 | 67 | 80 | 38.298507 | 0.85043 | 0.243959 | 0 | 0.097561 | 0 | 0 | 0.046778 | 0.028067 | 0 | 0 | 0 | 0 | 0.121951 | 1 | 0.121951 | false | 0.02439 | 0.146341 | 0 | 0.365854 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
51ec05cacfb6953f807cc60da921092f7aeb6965 | 873 | py | Python | app/schema/queries/todo.py | rjNemo/graphql_python_template | 14bc5fd657f6bdba8d7293f21cfcec821fa6374f | [
"MIT"
] | 1 | 2021-05-02T01:47:57.000Z | 2021-05-02T01:47:57.000Z | app/schema/queries/todo.py | rjNemo/graphql_python_template | 14bc5fd657f6bdba8d7293f21cfcec821fa6374f | [
"MIT"
] | null | null | null | app/schema/queries/todo.py | rjNemo/graphql_python_template | 14bc5fd657f6bdba8d7293f21cfcec821fa6374f | [
"MIT"
] | null | null | null | """
Defines the query and how to interact with
"""
from app.schema.types.todo import TodoListResponseField, TodoResponseField
from app.usecases.todo import read_all_todos, read_todo_by_id
def resolve_list_todos(self, info) -> TodoListResponseField:
try:
todos = read_all_todos()
is_success = True
error_message = None
except Exception as e:
error_message = str(e)
is_success = False
todos = None
return TodoListResponseField(
todos=todos, is_success=is_success, error_message=error_message
)
def resolve_get_todo(self, info, todo_id: str) -> TodoResponseField:
todo, is_success = read_todo_by_id(todo_id)
error_message = "This element does not exist." if not is_success else None
return TodoResponseField(
todo=todo, is_success=is_success, error_message=error_message
)
| 28.16129 | 78 | 0.717068 | 115 | 873 | 5.173913 | 0.426087 | 0.121008 | 0.040336 | 0.040336 | 0.141176 | 0.141176 | 0.141176 | 0.141176 | 0 | 0 | 0 | 0 | 0.213058 | 873 | 30 | 79 | 29.1 | 0.866084 | 0.04811 | 0 | 0 | 0 | 0 | 0.034022 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.1 | false | 0 | 0.1 | 0 | 0.3 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
51ef4f3354164b3ce73be7d1b2f9128704be0733 | 8,884 | py | Python | meiduo_mall/apps/orders/views.py | canarysama/meiduo_project | 906cf667e27fa205b18aeb10b009d76dec19b211 | [
"MIT"
] | null | null | null | meiduo_mall/apps/orders/views.py | canarysama/meiduo_project | 906cf667e27fa205b18aeb10b009d76dec19b211 | [
"MIT"
] | null | null | null | meiduo_mall/apps/orders/views.py | canarysama/meiduo_project | 906cf667e27fa205b18aeb10b009d76dec19b211 | [
"MIT"
] | null | null | null | import json
from datetime import datetime
from decimal import Decimal
from django import http
from django.contrib.auth.mixins import LoginRequiredMixin
from django.shortcuts import render
# Create your views here.
from django.views import View
from django_redis import get_redis_connection
from apps.goods.models import SKU
from apps.orders.models import OrderInfo, OrderGoods
from apps.users.models import Address, User
from meiduo_mall.settings.dev import logger
from utils.response_code import RETCODE
class OrderSettlementView(LoginRequiredMixin,View):
def get(self, request):
user = request.user
try:
addresses = Address.objects.filter(user=user,is_deleted=False)
except Exception as e:
addresses = None
redis_client = get_redis_connection('carts')
carts_data = redis_client.hgetall(user.id)
carts_dict = {}
for key,value in carts_data.items():
sku_key = int(key.decode())
sku_dict = json.loads(value.decode())
if sku_dict["selected"]:
carts_dict[sku_key] = sku_dict
skus = SKU.objects.filter(id__in = carts_dict.keys())
total_count = 0
total_amount = Decimal('0.00')
for sku in skus:
sku.count = carts_dict[sku.id]['count']
sku.amount = sku.price * sku.count
total_count += sku.count
total_amount += sku.price * sku.count
freight = Decimal('10.00')
context = {
'addresses': addresses,
'skus': skus,
'total_count': total_count,
'total_amount': total_amount,
'freight': freight,
'payment_amount': total_amount + freight,
'default_address_id': user.default_address_id
}
return render(request, 'place_order.html', context)
class OrderCommitView(LoginRequiredMixin,View):
def post(self,request):
#接收参数
json_dict = json.loads(request.body.decode())
address_id = json.loads(request.body.decode())['address_id']
pay_method = json.loads(request.body.decode())['pay_method']
user = request.user
#效验
try:
address = Address.objects.get(id=address_id)
except Address.DoesNotExist:
return http.HttpResponseForbidden('WUXIAO')
if pay_method not in [OrderInfo.PAY_METHODS_ENUM['CASH'],OrderInfo.PAY_METHODS_ENUM['ALIPAY']]:
return http.HttpResponseForbidden('不支持')
#订单表__生成订单号 时间戳+9为
# user = request.user
order_id = datetime.now().strftime('%Y%m%d%H%M%S') + ('%09d' % user.id)
#事务
from django.db import transaction
with transaction.atomic():
# --------事物保存点--------
save_id = transaction.savepoint()
try:
order = OrderInfo.objects.create(
order_id=order_id,
user = user,
address = address,
total_count = 0,
total_amount = Decimal('0.00'),
freight = Decimal("10.00"),
pay_method = pay_method,
status=OrderInfo.ORDER_STATUS_ENUM['UNPAID'] if pay_method == OrderInfo.PAY_METHODS_ENUM['ALIPAY'] else
OrderInfo.ORDER_STATUS_ENUM['UNSEND']
)
redis_client = get_redis_connection('carts')
carts_data = redis_client.hgetall(user.id)
carts_dict = {}
for key,value in carts_data.items():
sku_id = int(key.decode())
sku_dict = json.loads(value.decode())
if sku_dict['selected']:
carts_dict[sku_id] = sku_dict
sku_ids = carts_dict.keys()
for sku_id in sku_ids:
while True:
sku = SKU.objects.get(id=sku_id)
# sku.stock -= cart_count
# sku.sales += cart_count
# sku.sava()
original_stock = sku.stock
original_sales = sku.sales
#判断库存
cart_count = carts_dict[sku_id]['count']
if cart_count > sku.stock:
transaction.savepoint_rollback(save_id)
return http.JsonResponse({'code': RETCODE.STOCKERR, 'errmsg': '库存不足'})
import time
# time.sleep(10)
new_stock = original_stock - cart_count
new_sales = original_sales + cart_count
result = SKU.objects.filter(id=sku_id, stock=original_stock).update(stock=new_stock,sales=new_sales)
if result == 0:
continue
sku.stock -= cart_count
sku.sales += cart_count
sku.save()
sku.spu.sales += cart_count
sku.spu.save()
# 创建订单商品数据
OrderGoods.objects.create(
order_id = order_id,
sku = sku,
count = cart_count,
price = sku.price,
)
#总个数和总金额(没运费)
order.total_count += cart_count
order.total_amount += sku.price * cart_count
#下单成功或者失败退出
break
#加运费 总金额
order.total_amount += order.freight
order.save()
except Exception as e :
logger.error(e)
transaction.savepoint_rollback(save_id)
return http.JsonResponse({'code': RETCODE.STOCKERR, 'errmsg': '库存不足'})
transaction.savepoint_commit(save_id)
#清空购物车
# redis_client.hdel(user.id, *carts_dict)
return http.JsonResponse({'code': RETCODE.OK, 'errmsg': '下单成功', 'order_id': order.order_id})
class OrderSuccessView(View):
def get(self,request):
order_id = request.GET.get("order_id")
pay_method = request.GET.get("pay_method")
payment_amount = request.GET.get("payment_amount")
context={
"order_id":order_id,
"pay_method":pay_method,
"payment_amount":payment_amount,
}
return render(request,'order_success.html',context)
class OrderShowView(LoginRequiredMixin,View):
def get(self,request,page_num):
username = request.COOKIES.get('username')
user = User.objects.get(username=username)
user_id = user.id
order_data = OrderInfo.objects.all()
goods_data = OrderGoods.objects.all()
order_ids = order_data.filter(user_id=user_id).values('order_id')
# order_ids = OrderInfo.objects.filter(user_id=user_id)
page_orders = {}
# 所有订单号的列表
order_list = []
order_id_count = goods_data.values('order_id', 'count')
order_id_set = set()
for order_data_co in order_id_count:
a = order_data_co['order_id']
order_list.append(a)
order_list =list(set(order_list))
print(order_list)
for order_id in order_ids:
order_id = order_id['order_id'] # 订单号
time_old = order_data.filter(order_id=order_id).values('create_time') # 时间
time = str(time_old[0]['create_time'])
time_new = time[0:16] # 时间
freight = time_old.values('freight')[0]['freight'] # 运费
"""<QuerySet [{'address_id': 1, 'user_id': 19, 'total_count': 1,
'order_id': '20190927003440000000019',
'status': 1, 'pay_method': 2,
'create_time': datetime.datetime(2019, 9, 27, 0, 34, 40, 214624, tzinfo=<UTC>),
'update_time': datetime.datetime(2019, 9, 27, 0, 34, 40, 235034, tzinfo=<UTC>),
'freight': Decimal('10.00'), 'total_amount': Decimal('6698.00')}]>
"""
# if total_amount-freight == 0.00 or total_amount == 0.00:
# continue
#
# page_orders = {}
# for Goods in goods_data:
# page_orders.setdefault(order_id,[time,freight,]).append(Goods)
page_num = 1
"""
下单时间 订单号
商品信息 数量 单价 总价 运费 支付方式 订单状态 """
context = {
"page_orders": page_orders,
# # # 总页数
# # 'total_page': total_page,
# # # 当前页
'page_num': page_num,
}
return render(request,'user_center_order.html',context)
| 31.842294 | 124 | 0.533431 | 945 | 8,884 | 4.798942 | 0.225397 | 0.038589 | 0.023815 | 0.018523 | 0.261963 | 0.213892 | 0.174201 | 0.158765 | 0.144653 | 0.114223 | 0 | 0.019319 | 0.364926 | 8,884 | 278 | 125 | 31.956835 | 0.784474 | 0.06281 | 0 | 0.173913 | 0 | 0 | 0.060637 | 0.002826 | 0 | 0 | 0 | 0 | 0 | 1 | 0.024845 | false | 0 | 0.093168 | 0 | 0.192547 | 0.006211 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
51f094af4d80238e40eb72e75e31a6ae810f3f62 | 11,159 | py | Python | pie/tray_icon.py | sabaatworld/pie-indexing-service-py | f48ee18023f9c15e18fdb4296ba651fd343aef01 | [
"MIT"
] | 2 | 2020-03-30T18:00:40.000Z | 2020-05-30T17:09:04.000Z | pie/tray_icon.py | sabaatworld/pie-indexing-service-py | f48ee18023f9c15e18fdb4296ba651fd343aef01 | [
"MIT"
] | null | null | null | pie/tray_icon.py | sabaatworld/pie-indexing-service-py | f48ee18023f9c15e18fdb4296ba651fd343aef01 | [
"MIT"
] | null | null | null | import json
import logging
import os
import ssl
import webbrowser
from multiprocessing import Event, Queue
from urllib.request import urlopen
import certifi
from PySide2 import QtCore, QtGui, QtWidgets
from packaging import version
from pie.core import IndexDB, IndexingHelper, MediaProcessor
from pie.domain import IndexingTask, Settings
from pie.log_window import LogWindow
from pie.preferences_window import PreferencesWindow
from pie.util import MiscUtils, QWorker
class TrayIcon(QtWidgets.QSystemTrayIcon):
__APP_VER = "1.0.2"
__logger = logging.getLogger('TrayIcon')
def __init__(self, log_queue: Queue):
super().__init__(QtGui.QIcon(MiscUtils.get_app_icon_path()))
self.log_queue = log_queue
self.preferences_window: PreferencesWindow = None
self.log_window: LogWindow = None
self.indexing_stop_event: Event = None
self.observer = None
self.indexDB = IndexDB()
self.threadpool: QtCore.QThreadPool = QtCore.QThreadPool()
self.__logger.debug("QT multithreading with thread pool size: %s", self.threadpool.maxThreadCount())
self.setToolTip("Batch Media Compressor")
self.activated.connect(self.trayIcon_activated)
tray_menu = QtWidgets.QMenu('Main Menu')
self.startIndexAction = tray_menu.addAction('Start Processing', self.startIndexAction_triggered)
self.stopIndexAction = tray_menu.addAction('Stop Processing', self.stopIndexAction_triggered)
self.stopIndexAction.setEnabled(False)
tray_menu.addSeparator()
self.clearIndexAction = tray_menu.addAction('Clear Indexed Files', self.clearIndexAction_triggered)
self.clearOutputDirsAction = tray_menu.addAction('Clear Ouput Directories', self.clearOutputDirsAction_triggered)
tray_menu.addSeparator()
self.editPrefAction = tray_menu.addAction('Edit Preferences', self.editPreferencesAction_triggered)
self.viewLogsAction = tray_menu.addAction('View Logs', self.viewLogsAction_triggered)
tray_menu.addSeparator()
self.updateCheckAction = tray_menu.addAction('Check for Updates', self.updateCheckAction_triggered)
self.coffeeAction = tray_menu.addAction('Buy me a Coffee', self.coffeeAction_triggered)
tray_menu.addSeparator()
tray_menu.addAction('Quit', self.quitMenuAction_triggered)
self.setContextMenu(tray_menu)
self.apply_process_changed_setting()
if self.indexDB.get_settings().auto_update_check:
self.update_check_worker = QWorker(self.auto_update_check)
self.threadpool.start(self.update_check_worker)
def trayIcon_activated(self, reason):
pass
def startIndexAction_triggered(self):
if self.indexDB.get_settings().auto_show_log_window:
self.show_view_logs_window()
self.background_processing_started()
self.indexing_stop_event = Event()
self.indexing_worker = QWorker(self.start_indexing)
self.indexing_worker.signals.finished.connect(self.background_processing_finished)
self.threadpool.start(self.indexing_worker)
self.stopIndexAction.setEnabled(True)
def stopIndexAction_triggered(self):
response: QtWidgets.QMessageBox.StandardButton = QtWidgets.QMessageBox.question(
None, "Confirm Action", "Are you sure you want to stop the current task?",
QtWidgets.QMessageBox.Yes | QtWidgets.QMessageBox.No
)
if QtWidgets.QMessageBox.Yes == response:
self.stopIndexAction.setEnabled(False)
self.stop_async_tasks()
def clearIndexAction_triggered(self):
response: QtWidgets.QMessageBox.StandardButton = QtWidgets.QMessageBox.question(
None, "Confirm Action", "Forget indexed files and delete all output files?",
QtWidgets.QMessageBox.Yes | QtWidgets.QMessageBox.No
)
if QtWidgets.QMessageBox.Yes == response:
self.background_processing_started()
self.deletion_worker = QWorker(self.start_deletion, True)
self.deletion_worker.signals.finished.connect(self.background_processing_finished)
self.threadpool.start(self.deletion_worker)
def clearOutputDirsAction_triggered(self):
response: QtWidgets.QMessageBox.StandardButton = QtWidgets.QMessageBox.question(
None, "Confirm Action", "Delete all output files?",
QtWidgets.QMessageBox.Yes | QtWidgets.QMessageBox.No
)
if QtWidgets.QMessageBox.Yes == response:
self.background_processing_started()
self.deletion_worker = QWorker(self.start_deletion, False)
self.deletion_worker.signals.finished.connect(self.background_processing_finished)
self.threadpool.start(self.deletion_worker)
def start_deletion(self, clearIndex: bool):
MiscUtils.debug_this_thread()
with IndexDB() as indexDB:
if clearIndex:
indexDB.clear_indexed_files()
self.__logger.info("Index cleared")
settings: Settings = indexDB.get_settings()
MiscUtils.recursively_delete_children(settings.output_dir)
MiscUtils.recursively_delete_children(settings.unknown_output_dir)
self.__logger.info("Output directories cleared")
def editPreferencesAction_triggered(self):
if self.preferences_window is None:
self.preferences_window = PreferencesWindow(self.apply_process_changed_setting)
self.preferences_window.show()
def viewLogsAction_triggered(self):
self.show_view_logs_window()
def show_view_logs_window(self):
if self.log_window is None:
self.log_window = LogWindow(self.threadpool)
self.log_window.show()
def updateCheckAction_triggered(self):
self.check_for_updates(True)
def auto_update_check(self):
MiscUtils.debug_this_thread()
self.check_for_updates(False)
def check_for_updates(self, display_not_found: bool):
api_url = "https://api.github.com/repos/sabaatworld/batch-media-compressor/releases/latest"
releases_url = "https://github.com/sabaatworld/batch-media-compressor/releases"
update_found = False
try:
ssl_context = ssl.create_default_context(cafile=certifi.where())
response = urlopen(api_url, context=ssl_context)
response_string = response.read().decode('utf-8')
response_json = json.loads(response_string)
tag_name: str = response_json["tag_name"]
if tag_name is not None:
release_version = version.parse(tag_name.replace("v", ""))
current_version = version.parse(self.__APP_VER)
self.__logger.info("Updated Check successful: Current Version: %s, Latest Release: %s", str(current_version), str(release_version))
if current_version < release_version:
update_found = True
except:
self.__logger.exception("Failed to check for updates")
if update_found:
if QtWidgets.QMessageBox.information(
None, "Update Check",
"New version available. Do you wish to download the latest release now?\n\nCurrent Verion: {}\nNew Version: {}".format(str(current_version), str(release_version)),
QtWidgets.QMessageBox.Yes | QtWidgets.QMessageBox.No
) == QtWidgets.QMessageBox.Yes:
webbrowser.open(releases_url)
elif display_not_found:
QtWidgets.QMessageBox.information(None, "Update Check", "No updates found.\n\nIf you think this is an error, please check your internet connection and try again.", QtWidgets.QMessageBox.Ok)
def coffeeAction_triggered(self):
webbrowser.open('https://paypal.me/sabaat')
def quitMenuAction_triggered(self):
QtWidgets.QApplication.quit()
def start_indexing(self):
MiscUtils.debug_this_thread()
with IndexDB() as indexDB:
indexing_task = IndexingTask()
indexing_task.settings = indexDB.get_settings()
if self.settings_valid(indexing_task.settings):
misc_utils = MiscUtils(indexing_task)
misc_utils.create_root_marker()
indexing_helper = IndexingHelper(indexing_task, self.log_queue, self.indexing_stop_event)
(scanned_files, _) = indexing_helper.scan_dirs()
indexing_helper.remove_slate_files(indexDB, scanned_files)
indexing_helper.lookup_already_indexed_files(indexDB, scanned_files)
if not self.indexing_stop_event.is_set():
indexing_helper.create_media_files(scanned_files)
if not self.indexing_stop_event.is_set():
media_processor = MediaProcessor(indexing_task, self.log_queue, self.indexing_stop_event)
media_processor.save_processed_files(indexDB)
if not self.indexing_stop_event.is_set():
misc_utils.cleanEmptyOutputDirs()
def settings_valid(self, settings: Settings) -> bool:
error_msg: str = None
if settings.monitored_dir is None:
error_msg = "Directory to scan not configured"
elif not os.path.isdir(settings.monitored_dir):
error_msg = "Directory to scan is invalid"
elif settings.output_dir is None:
error_msg = "Media with Capture Date directory not configured"
elif not os.path.isdir(settings.output_dir):
error_msg = "Media with Capture Date directory is invalid"
elif settings.unknown_output_dir is None:
error_msg = "Media without Capture Date directory not configured"
elif not os.path.isdir(settings.unknown_output_dir):
error_msg = "Media without Capture Date directory is invalid"
if error_msg is not None:
self.__logger.error("Cannot start processing: %s. Please update preferences and try again.", error_msg)
return False
else:
return True
def background_processing_started(self):
self.startIndexAction.setEnabled(False)
self.clearIndexAction.setEnabled(False)
self.clearOutputDirsAction.setEnabled(False)
self.editPrefAction.setEnabled(False)
if self.preferences_window is not None:
self.preferences_window.hide()
def background_processing_finished(self):
self.startIndexAction.setEnabled(True)
self.stopIndexAction.setEnabled(False)
self.clearIndexAction.setEnabled(True)
self.clearOutputDirsAction.setEnabled(True)
self.editPrefAction.setEnabled(True)
def stop_async_tasks(self):
if self.indexing_stop_event:
self.indexing_stop_event.set()
def cleanup(self):
if self.preferences_window is not None:
self.preferences_window.cleanup()
if self.log_window is not None:
self.log_window.cleanup()
self.indexDB.disconnect_db()
def apply_process_changed_setting(self):
pass
| 46.690377 | 201 | 0.692087 | 1,232 | 11,159 | 6.039773 | 0.212662 | 0.056444 | 0.019352 | 0.0254 | 0.384491 | 0.27241 | 0.232899 | 0.213547 | 0.192313 | 0.180218 | 0 | 0.00058 | 0.226992 | 11,159 | 238 | 202 | 46.886555 | 0.862045 | 0 | 0 | 0.183575 | 0 | 0.014493 | 0.111928 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.10628 | false | 0.009662 | 0.072464 | 0 | 0.202899 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
51f40f22276db646a119642572727553613d35f0 | 8,903 | py | Python | vis_utils/graphics/geometry/procedural_primitives.py | eherr/vis_utils | b757b01f42e6da02ad62130c3b0e61e9eaa3886f | [
"MIT"
] | 4 | 2020-05-20T03:55:19.000Z | 2020-12-24T06:33:40.000Z | vis_utils/graphics/geometry/procedural_primitives.py | eherr/vis_utils | b757b01f42e6da02ad62130c3b0e61e9eaa3886f | [
"MIT"
] | 1 | 2020-05-18T11:21:35.000Z | 2020-07-07T21:25:57.000Z | vis_utils/graphics/geometry/procedural_primitives.py | eherr/vis_utils | b757b01f42e6da02ad62130c3b0e61e9eaa3886f | [
"MIT"
] | 1 | 2020-07-20T06:57:13.000Z | 2020-07-20T06:57:13.000Z | #!/usr/bin/env python
#
# Copyright 2019 DFKI GmbH.
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the
# following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN
# NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM,
# DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
# OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
# USE OR OTHER DEALINGS IN THE SOFTWARE.
import math
from copy import copy
import numpy as np
def merge_vertices_and_normals(vertices, normals):
data = []
for i in range(len(vertices)):
data.append(vertices[i] + normals[i])
return data
def construct_triangle_sphere(slices, stacks, diameter):
""" src: http://jacksondunstan.com/articles/1904
"""
stepTheta = (2.0 * math.pi) / slices
stepPhi = math.pi / stacks
verticesPerStack = slices + 1
positions = []
normals = []
triangles = []
# Pre-compute half the sin/cos of thetas
halfCosThetas = []
halfSinThetas = []
curTheta = 0
for slice in range(verticesPerStack):
halfCosThetas.append(math.cos(curTheta) * 0.5)
halfSinThetas.append(math.sin(curTheta) * 0.5)
curTheta += stepTheta
# Generate positions
curPhi = math.pi
for stack in range(stacks + 1):
curY = math.cos(curPhi) * 0.5 * diameter
sinCurPhi = math.sin(curPhi)
for slice in range(verticesPerStack):
point = [halfCosThetas[slice] * sinCurPhi * diameter, curY, halfSinThetas[slice] * sinCurPhi * diameter]
positions.append(point)
normals.append([point[0], point[1], point[2]])
curPhi -= stepPhi
# Generate triangles
lastStackFirstVertexIndex = 0
curStackFirstVertexIndex = verticesPerStack
for stack in range(stacks):
for slice in range(slices):
# Bottom tri of the quad
a = lastStackFirstVertexIndex + slice + 1
b = curStackFirstVertexIndex + slice
c = lastStackFirstVertexIndex + slice
triangles.append([a, b, c])
# Top tri of the quad
a = lastStackFirstVertexIndex + slice + 1
b = curStackFirstVertexIndex + slice + 1
c = curStackFirstVertexIndex + slice
triangles.append([a, b, c])
lastStackFirstVertexIndex += verticesPerStack
curStackFirstVertexIndex += verticesPerStack
data = merge_vertices_and_normals(positions, normals)
return data, triangles
def construct_quad_box(width, height, depth):
print("create box", width, height, depth)
data = np.array([
# north
[-width / 2, -height / 2, -depth / 2, 0, 0, -1],
[-width / 2, height / 2, -depth / 2, 0, 0, -1],
[width / 2, height / 2, -depth / 2, 0, 0, -1],
[width / 2, -height / 2, -depth / 2, 0, 0, -1],
# ,[ width/2, -height/2, -depth/2],[ -width/2, -height/2, -depth/2]
###west
[-width / 2, -height / 2, -depth / 2, -1, 0, 0],
[-width / 2, height / 2, -depth / 2, -1, 0, 0],
[-width / 2, height / 2, depth / 2, -1, 0, 0],
[-width / 2, -height / 2, depth / 2, -1, 0, 0],
###south
[-width / 2, -height / 2, depth / 2, 0, 0, 1],
[-width / 2, height / 2, depth / 2, 0, 0, 1],
[width / 2, height / 2, depth / 2, 0, 0, 1],
[width / 2, -height / 2, depth / 2, 0, 0, 1],
###east
[width / 2, -height / 2, -depth / 2, 1, 0, 0],
[width / 2, height / 2, -depth / 2, 1, 0, 0],
[width / 2, height / 2, depth / 2, 1, 0, 0],
[width / 2, -height / 2, depth / 2, 1, 0, 0],
##bottom
[-width / 2, -height / 2, -depth / 2, 0, -1, 0],
[-width / 2, -height / 2, depth / 2, 0, -1, 0],
[width / 2, -height / 2, depth / 2, 0, -1, 0],
[width / 2, -height / 2, -depth / 2, 0, -1, 0],
##top
[-width / 2, height / 2, -depth / 2, 0, 1, 0],
[-width / 2, height / 2, depth / 2, 0, 1, 0],
[width / 2, height / 2, depth / 2, 0, 1, 0],
[width / 2, height / 2, -depth / 2, 0, 1, 0]
], 'f')
return data
def construct_quad_box_based_on_height(width, height, depth):
data = np.array([
# north
[-width / 2, 0.0, -depth / 2, 0, 0, -1],
[-width / 2, height, -depth / 2, 0, 0, -1],
[width / 2, height, -depth / 2, 0, 0, -1],
[width / 2, 0.0, -depth / 2, 0, 0, -1],
# ,[ width/2, -height/2, -depth/2],[ -width/2, -height/2, -depth/2]
###west
[-width / 2, 0.0, -depth / 2, -1, 0, 0],
[-width / 2, height, -depth / 2, -1, 0, 0],
[-width / 2, height, depth / 2, -1, 0, 0],
[-width / 2, 0.0, depth / 2, -1, 0, 0],
###south
[-width / 2, 0.0, depth / 2, 0, 0, 1],
[-width / 2, height, depth / 2, 0, 0, 1],
[width / 2, height, depth / 2, 0, 0, 1],
[width / 2, 0.0, depth / 2, 0, 0, 1],
###east
[width / 2, 0.0, -depth / 2, 1, 0, 0],
[width / 2, height, -depth / 2, 1, 0, 0],
[width / 2, height, depth / 2, 1, 0, 0],
[width / 2, 0.0, depth / 2, 1, 0, 0],
##bottom
[-width / 2, 0.0, -depth / 2, 0, 1, 0],
[-width / 2, 0.0, depth / 2, 0, 1, 0],
[width / 2, 0.0, depth / 2, 0, 1, 0],
[width / 2, 0.0, -depth / 2, 0, 1, 0],
##top
[-width / 2, height, -depth / 2, 0, -1, 0],
[-width / 2, height, depth / 2, 0, -1, 0],
[width / 2, height, depth / 2, 0, -1, 0],
[width / 2, height, -depth / 2, 0, -1, 0]
], 'f')
return data
def construct_triangle_cylinder(slices, radius, length):
""" http://monsterden.net/software/ragdoll-pyode-tutorial
http://wiki.unity3d.com/index.php/ProceduralPrimitives
"""
half_length = length / 2.0
vertices = []
normals = []
triangles = []
v_idx = 0
#bottom
vertices.append([0, 0, half_length])
normals.append([0, 0, -1])
for i in range(0, slices+1):
angle = i / float(slices) * 2.0 * np.pi
ca = np.cos(angle)
sa = np.sin(angle)
vertices.append([radius * ca, radius * sa, half_length])
normals.append([0, 0, 1])
for idx in range(0, slices):
triangles.append([0, v_idx+1, v_idx+2])
v_idx += 1
#sides
for i in range(0, slices+1):
angle = i / float(slices) * 2.0 * np.pi
ca = np.cos(angle)
sa = np.sin(angle)
vertices.append([radius * ca, radius * sa, half_length])
vertices.append([radius * ca, radius * sa, -half_length])
normals.append([ca, sa, 0])
normals.append([ca, sa, 0])
for idx in range(0, slices*2):
triangles.append([v_idx, v_idx + 1, v_idx + 2])
v_idx += 1
#top
start = len(vertices)
vertices.append([0, 0, -half_length])
normals.append([0, 0, -1])
for i in range(0, slices+1):
angle = i / float(slices) * 2.0 * np.pi
ca = np.cos(angle)
sa = np.sin(angle)
vertices.append([radius * ca, radius * sa, -half_length])
normals.append([0, 0, -1])
for idx in range(0, slices):
triangles.append([start, v_idx+1, v_idx + 2])
v_idx += 1
return merge_vertices_and_normals(vertices, normals), triangles
def construct_triangle_capsule(slices, stacks, diameter, length, direction="z"):
data, triangles = construct_triangle_sphere(slices, stacks, diameter)
data = np.array(data)
half_idx = int(len(data)/2.0)
half_len = length/2
data[:half_idx, 1] -= half_len
data[half_idx:, 1] += half_len
if direction == "x":
m = np.array([[0, 1, 0],
[1, 0, 0],
[0, 0, -1]])
data = transform_vertex_data(data, m)
elif direction == "z":
m = np.array([[1, 0, 0],
[0, 0, -1],
[0, 1, 0]])
data = transform_vertex_data(data, m)
return data, triangles
def transform_vertex_data(data, m):
transformed_data = []
for v in data:
t_v = np.zeros(6)
t_v[:3] = np.dot(m, v[:3])[:3]
t_v[3:] = np.dot(m, v[3:])[:3]
transformed_data.append(t_v)
return transformed_data
| 35.75502 | 116 | 0.550826 | 1,255 | 8,903 | 3.858964 | 0.165737 | 0.023126 | 0.099112 | 0.07516 | 0.516209 | 0.479455 | 0.410489 | 0.410489 | 0.404295 | 0.375594 | 0 | 0.064516 | 0.296642 | 8,903 | 248 | 117 | 35.899194 | 0.708879 | 0.173986 | 0 | 0.274854 | 0 | 0 | 0.002062 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.040936 | false | 0 | 0.017544 | 0 | 0.099415 | 0.005848 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
51f4f2785dcaeca906879c7e62e621e4a846114b | 9,212 | py | Python | src/tokentype.py | londav28/chasm | a5cd97ab2732af30d20aaf05842f3ddad7618660 | [
"MIT"
] | null | null | null | src/tokentype.py | londav28/chasm | a5cd97ab2732af30d20aaf05842f3ddad7618660 | [
"MIT"
] | null | null | null | src/tokentype.py | londav28/chasm | a5cd97ab2732af30d20aaf05842f3ddad7618660 | [
"MIT"
] | null | null | null | # MIT LICENSE Copyright (c) 2018 David Longnecker
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to
# deal in the Software without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
# sell copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
# Luckily module initialization code is only run once.
_enum_counter = 0
_enum_strs = []
# So that I can reorder enum values at will.
def _intern(string):
global _enum_counter
result = _enum_counter
_enum_strs.append(string)
_enum_counter += 1
return result
# Fetch tokentype name from enum value.
def get_tokentype_str(toktype):
if toktype >= 0 and toktype < len(_enum_strs):
return _enum_strs[toktype]
return None
# These must appear in _strict_ order corresponding to opcode value.
t_op_nop = _intern('op:nop')
t_op_ldl = _intern('op:ldl')
t_op_stl = _intern('op:stl')
t_op_ldg = _intern('op:ldg')
t_op_stg = _intern('op:stg')
t_op_lfd = _intern('op:lfd')
t_op_sfd = _intern('op:sfd')
t_op_ldsc = _intern('op:ldsc')
t_op_pop = _intern('op:pop')
t_op_swp = _intern('op:swp')
t_op_dup = _intern('op:dup')
t_op_psh_b = _intern('op:psh_b')
t_op_psh_s = _intern('op:psh_s')
t_op_psh_d = _intern('op:psh_d')
t_op_psh_q = _intern('op:psh_q')
t_op_psh_f = _intern('op:psh_f')
t_op_psh_a = _intern('op:psh_a')
t_op_psh_nil = _intern('op:psh_nil')
t_op_par_b = _intern('op:par_b')
t_op_par_s = _intern('op:par_s')
t_op_par_d = _intern('op:par_d')
t_op_par_q = _intern('op:par_q')
t_op_par_f = _intern('op:par_f')
t_op_par_a = _intern('op:par_a')
t_op_lai = _intern('op:lai')
t_op_sai = _intern('op:sai')
t_op_alen = _intern('op:alen')
t_op_and = _intern('op:and')
t_op_or = _intern('op:or')
t_op_xor = _intern('op:xor')
t_op_not = _intern('op:not')
t_op_shl = _intern('op:shl')
t_op_shr = _intern('op:shr')
t_op_add_q = _intern('op:add_q')
t_op_sub_q = _intern('op:sub_q')
t_op_mul_q = _intern('op:mul_q')
t_op_div_q = _intern('op:div_q')
t_op_mod_q = _intern('op:mod_q')
t_op_neg_q = _intern('op:neg_q')
t_op_add_f = _intern('op:add_f')
t_op_sub_f = _intern('op:sub_f')
t_op_mul_f = _intern('op:mul_f')
t_op_div_f = _intern('op:div_f')
t_op_mod_f = _intern('op:mod_f')
t_op_neg_f = _intern('op:neg_f')
t_op_cst_qf = _intern('op:cst_qf')
t_op_cst_fq = _intern('op:cst_fq')
t_op_cmp_q = _intern('op:cmp_q')
t_op_cmp_f = _intern('op:cmp_f')
t_op_refcmp = _intern('op:refcmp')
t_op_jmp_eqz = _intern('op:jmp_eqz')
t_op_jmp_nez = _intern('op:jmp_nez')
t_op_jmp_ltz = _intern('op:jmp_ltz')
t_op_jmp_lez = _intern('op:jmp_lez')
t_op_jmp_gtz = _intern('op:jmp_gtz')
t_op_jmp_gez = _intern('op:jmp_gez')
t_op_jmp = _intern('op:jmp')
t_op_typeof = _intern('op:typeof')
t_op_call = _intern('op:call')
t_op_ret = _intern('op:ret')
t_op_leave = _intern('op:leave')
t_op_break = _intern('op:break')
t_op_throw = _intern('op:throw')
# Additional values to be used in the lexer/parser!
t_eof = _intern('eof')
t_unknown = _intern('unknown')
# Recognized whitespace tokens.
t_comment = _intern('comment')
t_spaces = _intern('spaces')
# LL(1) formatting characters.
t_newline = _intern('newline')
t_tab = _intern('tab')
# LL(1) braces and brackets.
t_lparen = _intern('lparen')
t_rparen = _intern('rparen')
t_lbrace = _intern('lbrace')
t_rbrace = _intern('rbrace')
t_lbracket = _intern('lbracket')
t_rbracket = _intern('rbracket')
# LL(1) comparison operators.
t_less = _intern('less')
t_greater = _intern('greater')
# LL(1) punctuation characters.
t_semicolon = _intern('semicolon')
t_comma = _intern('comma')
t_period = _intern('period')
t_colon = _intern('colon')
# LL(1) operators and meta symbols.
t_assign = _intern('assign')
t_star = _intern('star')
t_fslash = _intern('fslash')
t_percent = _intern('percent')
t_amper = _intern('amper')
t_at = _intern('at')
t_dollar = _intern('dollar')
# Literal values.
t_int = _intern('int')
t_str = _intern('str')
t_flt = _intern('flt')
t_hex = _intern('hex')
t_bin = _intern('bin')
# There's gonna be a whole lotta these!
t_symbol = _intern('symbol')
# Additional assembler keywords.
t_method = _intern('kw:method')
t_object = _intern('kw:object')
t_try = _intern('kw:try')
t_except = _intern('kw:except')
t_void = _intern('kw:void')
# Relies on opcode tokens being interned first!
def get_opcode_str(op):
if op < t_op_nop or op > t_op_eox:
return 'unknown'
return get_tokentype_str(op)
_keywords = [
t_method,
t_object,
t_try,
t_except,
t_void
]
_whitespace = [
t_comment,
t_spaces,
t_newline,
t_tab
]
_literals = [
t_int,
t_str,
t_flt,
t_hex,
t_bin
]
_instruction = [
t_op_nop,
t_op_ldl,
t_op_stl,
t_op_ldg,
t_op_stg,
t_op_lfd,
t_op_sfd,
t_op_ldsc,
t_op_pop,
t_op_swp,
t_op_dup,
t_op_psh_b,
t_op_psh_s,
t_op_psh_d,
t_op_psh_q,
t_op_psh_f,
t_op_psh_a,
t_op_psh_nil,
t_op_par_b,
t_op_par_s,
t_op_par_d,
t_op_par_q,
t_op_par_f,
t_op_par_a,
t_op_lai,
t_op_sai,
t_op_alen,
t_op_and,
t_op_or,
t_op_xor,
t_op_not,
t_op_shl,
t_op_shr,
t_op_add_q,
t_op_sub_q,
t_op_mul_q,
t_op_div_q,
t_op_mod_q,
t_op_neg_q,
t_op_add_f,
t_op_sub_f,
t_op_mul_f,
t_op_div_f,
t_op_mod_f,
t_op_neg_f,
t_op_cst_qf,
t_op_cst_fq,
t_op_cmp_q,
t_op_cmp_f,
t_op_refcmp,
t_op_jmp_eqz,
t_op_jmp_nez,
t_op_jmp_ltz,
t_op_jmp_lez,
t_op_jmp_gtz,
t_op_jmp_gez,
t_op_jmp,
t_op_typeof,
t_op_call,
t_op_ret,
t_op_leave,
t_op_break,
t_op_throw
]
_jump = [
t_op_jmp_eqz,
t_op_jmp_nez,
t_op_jmp_ltz,
t_op_jmp_lez,
t_op_jmp_gtz,
t_op_jmp_gez,
t_op_jmp
]
_interned_arg = [
t_op_psh_a,
t_op_par_a,
t_op_call,
t_op_ldsc,
t_op_psh_q,
t_op_psh_f
]
_has_immediate_u8 = [
t_op_ldl,
t_op_stl
]
_has_immediate_u16 = [
t_op_ldg,
t_op_stg,
t_op_lfd,
t_op_sfd
]
_has_immediate_u32 = _jump + [
t_op_psh_a,
t_op_par_a,
t_op_call,
t_op_ldsc,
t_op_psh_q,
t_op_psh_f
]
_has_immediate_u64 = []
_has_immediate_i8 = [ t_op_psh_b ]
_has_immediate_i16 = [ t_op_psh_s ]
_has_immediate_i32 = [ t_op_psh_d ]
_has_immediate_i64 = []
_has_immediate_f32 = []
_has_immediate_f64 = []
_has_immediate = (
_has_immediate_u8 +
_has_immediate_u16 +
_has_immediate_u32 +
_has_immediate_u64 +
_has_immediate_i8 +
_has_immediate_i16 +
_has_immediate_i32 +
_has_immediate_i64 +
_has_immediate_f32 +
_has_immediate_f64
)
# Tokens that can have varying values.
_non_static = _literals + [t_symbol] + [t_comment] + [t_spaces]
def is_keyword(v):
return v in _keywords
def is_literal(v):
return v in _literals
def is_non_static(v):
return v in _non_static
def is_whitespace(v):
return v in _whitespace
def is_instruction(v):
return v in _instruction
def is_jump(v):
return v in _jump
def has_interned_arg(v):
return v in _interned_arg
def has_immediate_u8(v):
return v in _has_immediate_u8
def has_immediate_u16(v):
return v in _has_immediate_u16
def has_immediate_u32(v):
return v in _has_immediate_u32
def has_immediate_u64(v):
return v in _has_immediate_u64
def has_immediate_i8(v):
return v in _has_immediate_i8
def has_immediate_i16(v):
return v in _has_immediate_i16
def has_immediate_i32(v):
return v in _has_immediate_i32
def has_immediate_i64(v):
return v in _has_immediate_i64
def has_immediate_f32(v):
return v in _has_immediate_f32
def has_immediate_f64(v):
return v in _has_immediate_f64
def has_immediate(v):
return v in _has_immediate
| 21.98568 | 78 | 0.65469 | 1,536 | 9,212 | 3.434896 | 0.18099 | 0.088704 | 0.026156 | 0.034117 | 0.347612 | 0.272176 | 0.192381 | 0.072024 | 0.069371 | 0.052312 | 0 | 0.012102 | 0.246526 | 9,212 | 418 | 79 | 22.038278 | 0.748019 | 0.180743 | 0 | 0.127148 | 0 | 0 | 0.091962 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.072165 | false | 0 | 0 | 0.061856 | 0.151203 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
51f6b55bf08cd3caaf58fe60b17883de8d75c7aa | 1,803 | py | Python | src/cr/sparse/_src/lop/reshape.py | carnot-shailesh/cr-sparse | 989ebead8a8ac37ade643093e1caa31ae2a3eda1 | [
"Apache-2.0"
] | 42 | 2021-06-11T17:11:29.000Z | 2022-03-29T11:51:44.000Z | src/cr/sparse/_src/lop/reshape.py | carnot-shailesh/cr-sparse | 989ebead8a8ac37ade643093e1caa31ae2a3eda1 | [
"Apache-2.0"
] | 19 | 2021-06-04T11:36:11.000Z | 2022-01-22T20:13:39.000Z | src/cr/sparse/_src/lop/reshape.py | carnot-shailesh/cr-sparse | 989ebead8a8ac37ade643093e1caa31ae2a3eda1 | [
"Apache-2.0"
] | 5 | 2021-11-21T21:01:11.000Z | 2022-02-28T07:20:03.000Z | # Copyright 2021 CR.Sparse Development Team
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from functools import reduce
import jax.numpy as jnp
from .lop import Operator
def reshape(in_shape, out_shape):
"""Returns a linear operator which reshapes vectors from model space to data space
Args:
in_shape (int): Shape of vectors in the model space
out_shape (int): Shape of vectors in the data space
Returns:
(Operator): A reshaping linear operator
"""
in_size = jnp.prod(jnp.array(in_shape))
out_size = jnp.prod(jnp.array(out_shape))
assert in_size == out_size, "Input and output size must be equal"
times = lambda x: jnp.reshape(x, out_shape)
trans = lambda x : jnp.reshape(x, in_shape)
return Operator(times=times, trans=trans, shape=(out_shape,in_shape))
def arr2vec(shape):
"""Returns a linear operator which reshapes arrays to vectors
Args:
shape (int): Shape of arrays in the model space
Returns:
(Operator): An array to vec linear operator
"""
in_size = reduce((lambda x, y: x * y), shape)
out_shape = (in_size,)
times = lambda x: jnp.reshape(x, (in_size,))
trans = lambda x : jnp.reshape(x, shape)
return Operator(times=times, trans=trans, shape=(out_shape,shape))
| 32.781818 | 86 | 0.702718 | 274 | 1,803 | 4.554745 | 0.390511 | 0.044872 | 0.041667 | 0.054487 | 0.298077 | 0.267628 | 0.190705 | 0.083333 | 0.083333 | 0.083333 | 0 | 0.00632 | 0.210205 | 1,803 | 54 | 87 | 33.388889 | 0.870084 | 0.552413 | 0 | 0 | 0 | 0 | 0.047425 | 0 | 0 | 0 | 0 | 0 | 0.0625 | 1 | 0.125 | false | 0 | 0.1875 | 0 | 0.4375 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
51f6ed71939071c909f6f821aec31a10daf6299c | 637 | py | Python | innuy_lambda/settings/dev.py | innuy/innuy_lambda | 739d2573919513f08925fe63cad6e301b69323f9 | [
"MIT"
] | null | null | null | innuy_lambda/settings/dev.py | innuy/innuy_lambda | 739d2573919513f08925fe63cad6e301b69323f9 | [
"MIT"
] | 1 | 2020-06-05T18:21:24.000Z | 2020-06-05T18:21:24.000Z | innuy_lambda/settings/dev.py | innuy/innuy_lambda | 739d2573919513f08925fe63cad6e301b69323f9 | [
"MIT"
] | null | null | null | from .base import *
INSTALLED_APPS += [
'debug_toolbar',
'zappa_django_utils',
'storages',
]
MIDDLEWARE += [
'debug_toolbar.middleware.DebugToolbarMiddleware',
]
INTERNAL_IPS = [
'127.0.0.1',
]
DATABASES = {
'default': {
'ENGINE': 'zappa_django_utils.db.backends.s3sqlite',
'NAME': 'sqlite.db',
'BUCKET': 'innuylambda'
}
}
ALLOWED_HOSTS = ['*']
AWS_STORAGE_BUCKET_NAME = 'innuylambda-static'
AWS_S3_CUSTOM_DOMAIN = '%s.s3.amazonaws.com' % AWS_STORAGE_BUCKET_NAME
STATIC_URL = "https://%s/" % AWS_S3_CUSTOM_DOMAIN
STATICFILES_STORAGE = 'storages.backends.s3boto.S3BotoStorage' | 18.735294 | 70 | 0.6719 | 70 | 637 | 5.785714 | 0.628571 | 0.059259 | 0.079012 | 0.098765 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.022945 | 0.178964 | 637 | 34 | 71 | 18.735294 | 0.751434 | 0 | 0 | 0 | 0 | 0 | 0.413793 | 0.194357 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.041667 | 0 | 0.041667 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
51f7369f83b845973e9526f6db9b7f120362742a | 3,658 | py | Python | UNET.py | ArtemBoyarintsev/cell_segmentation | 9c0e70c1edbb20d661e392bab4c42002d13ebf06 | [
"MIT"
] | null | null | null | UNET.py | ArtemBoyarintsev/cell_segmentation | 9c0e70c1edbb20d661e392bab4c42002d13ebf06 | [
"MIT"
] | null | null | null | UNET.py | ArtemBoyarintsev/cell_segmentation | 9c0e70c1edbb20d661e392bab4c42002d13ebf06 | [
"MIT"
] | null | null | null | import torch
import torch.nn as nn
import torchvision
class UNET(nn.Module):
THIRD_POOLING_INDEX = 16
FORTH_POOLING_INDEX = 23
def __init__(self, n_class = 1):
super(UNET, self).__init__()
# Contracting Path
self.c1 = UNET.get_conv2d_block(3, 16, 3, 1)
self.p1 = nn.MaxPool2d(2)
self.d1 = nn.Dropout2d()
self.c2 = UNET.get_conv2d_block(16, 32, 3, 1)
self.p2 = nn.MaxPool2d(2)
self.d2 = nn.Dropout2d()
self.c3 = UNET.get_conv2d_block(32, 64, 3, 1)
self.p3 = nn.MaxPool2d(2)
self.d3 = nn.Dropout2d()
self.c4 = UNET.get_conv2d_block(64, 128, 3, 1)
self.p4 = nn.MaxPool2d(2)
self.d4 = nn.Dropout2d()
self.c5 = UNET.get_conv2d_block(128, 256, 3, 1)
self.u6 = nn.ConvTranspose2d(256, 128, kernel_size=2, stride=2, padding=0)
self.d6 = nn.Dropout2d()
self.c6 = UNET.get_conv2d_block(256, 128, 3, 1)
self.u7 = nn.ConvTranspose2d(128, 64, kernel_size=2, stride=2, padding=0)
self.d7 = nn.Dropout2d()
self.c7 = UNET.get_conv2d_block(128, 64, 3, 1)
self.u8 = nn.ConvTranspose2d(64, 32, kernel_size=2, stride=2, padding=0)
self.d8 = nn.Dropout2d()
self.c8 = UNET.get_conv2d_block(64, 32, 3, 1)
self.u9 = nn.ConvTranspose2d(32, 16, kernel_size=2, stride=2, padding=0)
self.d9 = nn.Dropout2d()
self.c9 = UNET.get_conv2d_block(32, 16, 3, 1)
self.c10 = nn.Conv2d(16, 1, 1)
self.activation = nn.Sigmoid()
#outputs = Conv2D(1, (1, 1), activation='sigmoid')(c9)
def forward(self, batch):
c1_output = self.c1(batch)
h = c1_output
h = self.p1(h)
h = self.d1(h)
c2_output = self.c2(h)
h = c2_output
h = self.p2(h)
h = self.d2(h)
c3_output = self.c3(h)
h = c3_output
h = self.p3(h)
h = self.d3(h)
c4_output = self.c4(h)
h = c4_output
h = self.p4(h)
h = self.d4(h)
h = self.c5(h)
u = self.u6(h)
h = torch.cat((u, c4_output), dim=(1))
h = self.d6(h)
h = self.c6(h)
u = self.u7(h)
h = torch.cat((u, c3_output), dim=(1))
h = self.d7(h)
h = self.c7(h)
u = self.u8(h)
h = torch.cat((u, c2_output), dim=(1))
h = self.d8(h)
h = self.c8(h)
u = self.u9(h)
h = torch.cat((u, c1_output), dim=(1))
h = self.d9(h)
h = self.c9(h)
h = self.c10(h)
ret = self.activation(h)
return ret
@staticmethod
def get_conv2d_block(input_size, output_size, kernel_size, padding):
"""Function to add 2 convolutional layers with the parameters passed to it"""
# first layer
# kernel_initializer = 'he_normal', padding = 'same'
conv2d_block = nn.Sequential()
conv2d = nn.Conv2d(input_size, output_size, kernel_size = kernel_size, padding=padding)
conv2d_block.add_module('conv_0', conv2d)
conv2d_block.add_module('batchnorm_0', nn.BatchNorm2d(output_size))
conv2d_block.add_module('relu0', nn.ReLU())
conv2d_2 = nn.Conv2d(output_size, output_size, kernel_size=kernel_size, padding=padding)
conv2d_block.add_module('conv_1', conv2d_2)
conv2d_block.add_module('batchnorm_1', nn.BatchNorm2d(output_size))
conv2d_block.add_module('relu0', nn.ReLU())
return conv2d_block | 31.264957 | 96 | 0.5462 | 523 | 3,658 | 3.659656 | 0.193117 | 0.103448 | 0.073145 | 0.08464 | 0.360502 | 0.212121 | 0.194357 | 0.194357 | 0.131661 | 0.131661 | 0 | 0.092929 | 0.323401 | 3,658 | 117 | 97 | 31.264957 | 0.680404 | 0.056042 | 0 | 0.02381 | 0 | 0 | 0.012772 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.035714 | false | 0 | 0.035714 | 0 | 0.130952 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
51f8bea04c27110192da44644da602e9cd13a9c7 | 4,538 | py | Python | order_center/api.py | YuaShizuki/order_center | 6f0a8831b7cef82cee5b2f6268822acbee9077c4 | [
"MIT"
] | null | null | null | order_center/api.py | YuaShizuki/order_center | 6f0a8831b7cef82cee5b2f6268822acbee9077c4 | [
"MIT"
] | null | null | null | order_center/api.py | YuaShizuki/order_center | 6f0a8831b7cef82cee5b2f6268822acbee9077c4 | [
"MIT"
] | null | null | null | import frappe
import os
import json
import datetime
import uuid
@frappe.whitelist(allow_guest=True)
def clustering_and_scheduling():
for trip in json.loads(frappe.local.request.values["inputData"]):
build_trip(trip)
def build_trip(dat):
d = frappe.get_doc({
"doctype":"DRS",
"trip_name":dat["tripName"],
"status":"Clustering And Scheduling",
"driver_name":dat["driverName"],
"vehicle":dat["vehicle"],
"shipment_details":parse_shipment_details(dat["shipmentDetails"])
})
d.insert()
frappe.db.commit()
def parse_shipment_details(shdetails):
result = []
for shipment in shdetails:
d = dict()
d["latitude"] = shipment["latitude"]
d["longitude"] = shipment["longitude"]
d["awb"] = shipment["clientShipmentId"]
d["delivery_order"] = shipment["deliveryOrder"]
d["Status"] = "Unknown"
result.append(d)
return result
@frappe.whitelist(allow_guest=True)
def dispatch_start_trip():
start_trip(json.loads(frappe.local.request.values["inputData"]))
def start_trip(trip):
t = frappe.get_list("DRS", fields=["*"],
filters={"trip_name":trip["tripName"]})[0]
tx = frappe.get_doc("DRS", t["name"])
tx.status = "Start Trip"
tx.save()
@frappe.whitelist(allow_guest=True)
def load_items():
val = frappe.local.request.values["inputData"]
awb = json.loads(val)["clientShipmentId"]
t = frappe.get_list("Shipment Details", fields=["*"],
filters={"awb":awb})[0]
tx = frappe.get_doc("Shipment Details", t["name"])
tx.status = "Loaded"
tx.save()
@frappe.whitelist(allow_guest=True)
def pickup():
val = frappe.local.request.values["inputData"]
awb = json.loads(val)["clientShipmentId"]
t = frappe.get_list("Shipment Details", fields=["*"],
filters={"awb":awb})[0]
tx = frappe.get_doc("Shipment Details", t["name"])
tx.status = "Picked Up"
tx.save()
@frappe.whitelist(allow_guest=True)
def delivery_notification():
parcel = json.loads(frappe.local.request.values["inputData"])
set_deliverd(parcel, "Delivered")
def set_deliverd(parcel, status):
awb = parcel["clientShipmentId"]
t = frappe.get_list("Shipment Details", fields=["*"],
filters={"awb":awb})[0]
tx = frappe.get_doc("Shipment Details", t["name"])
tx.status = status
tx.latitude = parcel["latitude"]
tx.longitude = parcel["longitude"]
tx.save()
@frappe.whitelist(allow_guest=True)
def not_deliverd_notification():
parcel = json.loads(frappe.local.request.values["inputData"])
set_deliverd(parcel, "Not Delivered")
@frappe.whitelist(allow_guest=True)
def partial_delivery_notification():
parcel = json.loads(frappe.local.request.values["inputData"])
set_deliverd(parcel, "Partial Delivery")
@frappe.whitelist(allow_guest=True)
def arrival_end_trip():
trip = json.loads(frappe.local.request.values["inputData"])
t = frappe.get_list("DRS", fields=["*"],
filters={"trip_name":trip["tripName"]})[0]
tx = frappe.get_doc("DRS", t["name"])
tx.status = "End Trip"
tx.save()
#---------------------------------------------THROW-----------------------------
@frappe.whitelist(allow_guest=True)
def clear_all_cache():
frappe.clear_cache()
return "cache cleard"
#@frappe.whitelist(allow_guest=True)
#def arrival_end_trip():
# open(os.path.expanduser("~/erp_data/arrival_end_trip.json"),
# "a").write(frappe.local.request.data + "\n")
@frappe.whitelist(allow_guest=True)
def accept():
open(os.path.expanduser("~/erp_data/accept.json"),
"a").write(frappe.local.request.data + "\n")
@frappe.whitelist(allow_guest=True)
def reject():
open(os.path.expanduser("~/erp_data/reject.json"),
"a").write(frappe.local.request.data + "\n")
@frappe.whitelist(allow_guest=True)
def clustering_updates():
open(os.path.expanduser("~/erp_data/clustering_updates.json"),
"a").write(frappe.local.request.data + "\n")
#CombinedMultiDict([ImmutableMultiDict([]), ImmutableMultiDict([('inputData', u'[{"tripName":"TRIP-32","deliveryMediumName":"MEHUL","driverName":"","vehicle":"","shipmentDetails":[{"latitude":19.199272,"longitude":72.857732,"clientShipmentId":"222222201","deliveryOrder":4},{"latitude":19.199272,"longitude":72.857732,"clientShipmentId":"112000003","deliveryOrder":5},{"latitude":19.1076375,"longitude":72.8655789,"clientShipmentId":"test_order","deliveryOrder":6}]}]')])])
| 34.907692 | 473 | 0.653812 | 539 | 4,538 | 5.378479 | 0.211503 | 0.067265 | 0.089686 | 0.112108 | 0.618144 | 0.618144 | 0.547775 | 0.481545 | 0.385995 | 0.35426 | 0 | 0.020323 | 0.154253 | 4,538 | 129 | 474 | 35.178295 | 0.735018 | 0.160864 | 0 | 0.407767 | 0 | 0 | 0.184114 | 0.020516 | 0 | 0 | 0 | 0 | 0 | 1 | 0.15534 | false | 0 | 0.048544 | 0 | 0.223301 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
51f968ed80d1979d901e48aea3098c1013002f25 | 1,836 | py | Python | monkeytools/array_methods.py | mr-devs/monkeytools | e56197befcc0a14d9d082eb4463ebe27fb967116 | [
"MIT"
] | null | null | null | monkeytools/array_methods.py | mr-devs/monkeytools | e56197befcc0a14d9d082eb4463ebe27fb967116 | [
"MIT"
] | null | null | null | monkeytools/array_methods.py | mr-devs/monkeytools | e56197befcc0a14d9d082eb4463ebe27fb967116 | [
"MIT"
] | null | null | null | """
A collection of array-based algorithms
1. Find maximum sub-array
- https://en.wikipedia.org/wiki/Maximum_subarray_problem
Author: Matthew R. DeVerna
"""
from .utils import check_array
def max_subarray_kadane(given_array):
"""
Find a contiguous subarray with the largest sum.
Note: This algorithm is implemented with Kadane's algorithm with a slight
change (we do not add 1 to the best_end)
- https://en.wikipedia.org/wiki/Maximum_subarray_problem#Kadane's_algorithm
Complexity:
----------
- O(n)
Parameters:
----------
- given_array (list) : a numerical sequence
Returns:
----------
- best_sum (int) : the total sum between `best_start` and `best_end`
- best_start (int) : the first index in the largest sub-array (inclusive)
- best_end (int) : the last index in the largest sub-array (inclusive)
Exceptions:
----------
- TypeError
Example:
----------
lst = [-45, -78, -2, -60, 27, 21, 71, 80, 22, 59]
max_subarray(lst)
# Output
(280, 4, 10)
Where 280 is the sum between lst[4] (27, inclusive) and lst[9] (59, inclusive)
"""
# Ensure array is a list and contains only numeric values
check_array(given_array)
best_sum = float('-inf')
best_start = best_end = None
current_sum = 0
for current_end, x in enumerate(given_array):
if current_sum <= 0:
# Start a new sequence at the current element
current_start = current_end
current_sum = x
else:
# Extend the existing sequence with the current element
current_sum += x
if current_sum > best_sum:
best_sum = current_sum
best_start = current_start
best_end = current_end
return best_sum, best_start, best_end | 27.818182 | 83 | 0.622004 | 247 | 1,836 | 4.45749 | 0.437247 | 0.038147 | 0.032698 | 0.034514 | 0.143506 | 0.143506 | 0.143506 | 0.081744 | 0 | 0 | 0 | 0.028422 | 0.271786 | 1,836 | 66 | 84 | 27.818182 | 0.795064 | 0.609477 | 0 | 0 | 0 | 0 | 0.0067 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.058824 | false | 0 | 0.058824 | 0 | 0.176471 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
51fa66f7327624b0362e31f656c33d867492f9a3 | 2,922 | py | Python | Library/ContractUtils.py | rccannizzaro/QC-StrategyBacktest | 847dbd61680466bc60ce7893eced8a8f70d16b2e | [
"Apache-2.0"
] | 11 | 2021-12-02T15:41:47.000Z | 2022-03-14T03:49:22.000Z | Library/ContractUtils.py | ikamanu/QC-StrategyBacktest | 847dbd61680466bc60ce7893eced8a8f70d16b2e | [
"Apache-2.0"
] | null | null | null | Library/ContractUtils.py | ikamanu/QC-StrategyBacktest | 847dbd61680466bc60ce7893eced8a8f70d16b2e | [
"Apache-2.0"
] | 5 | 2022-02-02T12:07:51.000Z | 2022-02-13T02:24:19.000Z | ########################################################################################
# #
# Licensed under the Apache License, Version 2.0 (the "License"); #
# you may not use this file except in compliance with the License. #
# You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 #
# #
# Unless required by applicable law or agreed to in writing, software #
# distributed under the License is distributed on an "AS IS" BASIS, #
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. #
# See the License for the specific language governing permissions and #
# limitations under the License. #
# #
########################################################################################
from Logger import *
class ContractUtils:
def __init__(self, context):
# Set the context
self.context = context
# Set the logger
self.logger = Logger(context, className = type(self).__name__, logLevel = context.logLevel)
def getUnderlyingLastPrice(self, contract):
# Get the context
context = self.context
# Get the object from the Securities dictionary if available (pull the latest price), else use the contract object itself
if contract.UnderlyingSymbol in context.Securities:
security = context.Securities[contract.UnderlyingSymbol]
# Check if we have found the security
if security != None:
# Get the last known price of the security
return context.GetLastKnownPrice(security).Price
else:
# Get the UnderlyingLastPrice attribute of the contract
return contract.UnderlyingLastPrice
def getSecurity(self, contract):
# Get the Securities object
Securities = self.context.Securities
# Check if we can extract the Symbol attribute
if hasattr(contract, "Symbol") and contract.Symbol in Securities:
# Get the security from the Securities dictionary if available (pull the latest price), else use the contract object itself
security = Securities[contract.Symbol]
else:
# Use the contract itself
security = contract
return security
# Returns the mid-price of an option contract
def midPrice(self, contract):
security = self.getSecurity(contract)
return 0.5*(security.BidPrice + security.AskPrice)
def bidAskSpread(self, contract):
security = self.getSecurity(contract)
return abs(security.AskPrice - security.BidPrice)
| 48.7 | 132 | 0.558522 | 287 | 2,922 | 5.658537 | 0.393728 | 0.036946 | 0.018473 | 0.033251 | 0.166256 | 0.166256 | 0.166256 | 0.105911 | 0.105911 | 0.105911 | 0 | 0.003055 | 0.327858 | 2,922 | 59 | 133 | 49.525424 | 0.823829 | 0.485284 | 0 | 0.153846 | 0 | 0 | 0.004971 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.192308 | false | 0 | 0.038462 | 0 | 0.461538 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
51fad0b66c4e3fa317158550a35e046091d71c7e | 24,945 | py | Python | backtester/backtester.py | unbalancedparentheses/backtester_options | 46efd30e405f360c560f8eae8b2ee7d26f4532db | [
"MIT"
] | 91 | 2020-01-31T10:15:35.000Z | 2022-03-27T19:15:12.000Z | backtester/backtester.py | unbalancedparentheses/backtester_options | 46efd30e405f360c560f8eae8b2ee7d26f4532db | [
"MIT"
] | 38 | 2019-05-12T02:00:46.000Z | 2019-12-06T14:54:25.000Z | backtester/backtester.py | unbalancedparentheses/backtester_options | 46efd30e405f360c560f8eae8b2ee7d26f4532db | [
"MIT"
] | 20 | 2020-06-12T08:21:30.000Z | 2022-03-28T05:52:59.000Z | from functools import reduce
import numpy as np
import pandas as pd
import pyprind
from .enums import *
class Backtest:
"""Backtest runner class."""
def __init__(self, allocation, initial_capital=1_000_000, shares_per_contract=100):
assets = ('stocks', 'options', 'cash')
total_allocation = sum(allocation.get(a, 0.0) for a in assets)
self.allocation = {}
for asset in assets:
self.allocation[asset] = allocation.get(asset, 0.0) / total_allocation
self.initial_capital = initial_capital
self.stop_if_broke = True
self.shares_per_contract = shares_per_contract
self._stocks = []
self._options_strategy = None
self._stocks_data = None
self._options_data = None
@property
def stocks(self):
return self._stocks
@stocks.setter
def stocks(self, stocks):
assert np.isclose(sum(stock.percentage for stock in stocks), 1.0,
atol=0.000001), 'Stock percentages must sum to 1.0'
self._stocks = list(stocks)
return self
@property
def options_strategy(self):
return self._options_strategy
@options_strategy.setter
def options_strategy(self, strat):
self._options_strategy = strat
@property
def stocks_data(self):
return self._stocks_data
@stocks_data.setter
def stocks_data(self, data):
self._stocks_schema = data.schema
self._stocks_data = data
@property
def options_data(self):
return self._options_data
@options_data.setter
def options_data(self, data):
self._options_schema = data.schema
self._options_data = data
def run(self, rebalance_freq=0, monthly=False, sma_days=None):
"""Runs the backtest and returns a `pd.DataFrame` of the orders executed (`self.trade_log`)
Args:
rebalance_freq (int, optional): Determines the frequency of portfolio rebalances. Defaults to 0.
monthly (bool, optional): Iterates through data monthly rather than daily. Defaults to False.
Returns:
pd.DataFrame: Log of the trades executed.
"""
assert self._stocks_data, 'Stock data not set'
assert all(stock.symbol in self._stocks_data['symbol'].values
for stock in self._stocks), 'Ensure all stocks in portfolio are present in the data'
assert self._options_data, 'Options data not set'
assert self._options_strategy, 'Options Strategy not set'
assert self._options_data.schema == self._options_strategy.schema
option_dates = self._options_data['date'].unique()
stock_dates = self.stocks_data['date'].unique()
assert np.array_equal(stock_dates,
option_dates), 'Stock and options dates do not match (check that TZ are equal)'
self._initialize_inventories()
self.current_cash = self.initial_capital
self.trade_log = pd.DataFrame()
self.balance = pd.DataFrame({
'total capital': self.current_cash,
'cash': self.current_cash
},
index=[self.stocks_data.start_date - pd.Timedelta(1, unit='day')])
if sma_days:
self.stocks_data.sma(sma_days)
dates = pd.DataFrame(self.options_data._data[['quotedate',
'volume']]).drop_duplicates('quotedate').set_index('quotedate')
rebalancing_days = pd.to_datetime(
dates.groupby(pd.Grouper(freq=str(rebalance_freq) +
'BMS')).apply(lambda x: x.index.min()).values) if rebalance_freq else []
data_iterator = self._data_iterator(monthly)
bar = pyprind.ProgBar(len(stock_dates), bar_char='█')
for date, stocks, options in data_iterator:
if (date in rebalancing_days):
previous_rb_date = rebalancing_days[rebalancing_days.get_loc(date) -
1] if rebalancing_days.get_loc(date) != 0 else date
self._update_balance(previous_rb_date, date)
self._rebalance_portfolio(date, stocks, options, sma_days)
bar.update()
# Update balance for the period between the last rebalancing day and the last day
self._update_balance(rebalancing_days[-1], self.stocks_data.end_date)
self.balance['options capital'] = self.balance['calls capital'] + self.balance['puts capital']
self.balance['stocks capital'] = sum(self.balance[stock.symbol] for stock in self._stocks)
self.balance['stocks capital'].iloc[0] = 0
self.balance['options capital'].iloc[0] = 0
self.balance[
'total capital'] = self.balance['cash'] + self.balance['stocks capital'] + self.balance['options capital']
self.balance['% change'] = self.balance['total capital'].pct_change()
self.balance['accumulated return'] = (1.0 + self.balance['% change']).cumprod()
return self.trade_log
def _initialize_inventories(self):
"""Initialize empty stocks and options inventories."""
columns = pd.MultiIndex.from_product(
[[l.name for l in self._options_strategy.legs],
['contract', 'underlying', 'expiration', 'type', 'strike', 'cost', 'order']])
totals = pd.MultiIndex.from_product([['totals'], ['cost', 'qty', 'date']])
self._options_inventory = pd.DataFrame(columns=columns.append(totals))
self._stocks_inventory = pd.DataFrame(columns=['symbol', 'price', 'qty'])
def _data_iterator(self, monthly):
"""Returns combined iterator for stock and options data.
Each step, it produces a tuple like the following:
(date, stocks, options)
Returns:
generator: Daily/monthly iterator over `self._stocks_data` and `self.options_data`.
"""
if monthly:
it = zip(self._stocks_data.iter_months(), self._options_data.iter_months())
else:
it = zip(self._stocks_data.iter_dates(), self._options_data.iter_dates())
return ((date, stocks, options) for (date, stocks), (_, options) in it)
def _rebalance_portfolio(self, date, stocks, options, sma_days):
"""Reabalances the portfolio according to `self.allocation` weights.
Args:
date (pd.Timestamp): Current date.
stocks (pd.DataFrame): Stocks data for the current date.
options (pd.DataFrame): Options data for the current date.
sma_days (int): SMA window size
"""
self._execute_option_exits(date, options)
stock_capital = self._current_stock_capital(stocks)
options_capital = self._current_options_capital(options)
total_capital = self.current_cash + stock_capital + options_capital
# buy stocks
stocks_allocation = self.allocation['stocks'] * total_capital
self._stocks_inventory = pd.DataFrame(columns=['symbol', 'price', 'qty'])
# We simulate a sell of the stock positions and then a rebuy.
# This would **not** work if we added transaction fees.
self.current_cash = stocks_allocation + total_capital * self.allocation['cash']
self._buy_stocks(stocks, stocks_allocation, sma_days)
# exit/enter contracts
options_allocation = self.allocation['options'] * total_capital
if options_allocation >= options_capital:
self._execute_option_entries(date, options, options_allocation - options_capital)
else:
to_sell = options_capital - options_allocation
current_options = self._get_current_option_quotes(options)
self._sell_some_options(date, to_sell, current_options)
def _sell_some_options(self, date, to_sell, current_options):
sold = 0
total_costs = sum([current_options[i]['cost'] for i in range(len(current_options))])
for (exit_cost, (row_index, inventory_row)) in zip(total_costs, self._options_inventory.iterrows()):
if (to_sell - sold > -exit_cost) and (to_sell - sold) > 0:
qty_to_sell = (to_sell - sold) // exit_cost
if -qty_to_sell <= inventory_row['totals']['qty']:
qty_to_sell = (to_sell - sold) // exit_cost
else:
if qty_to_sell != 0:
qty_to_sell = -inventory_row['totals']['qty']
if qty_to_sell != 0:
trade_log_append = self._options_inventory.loc[row_index].copy()
trade_log_append['totals', 'qty'] = -qty_to_sell
trade_log_append['totals', 'date'] = date
trade_log_append['totals', 'cost'] = exit_cost
for i, leg in enumerate(self._options_strategy.legs):
trade_log_append[leg.name, 'order'] = ~trade_log_append[leg.name, 'order']
trade_log_append[leg.name, 'cost'] = current_options[i].loc[row_index]['cost']
self.trade_log = self.trade_log.append(trade_log_append, ignore_index=True)
self._options_inventory.at[row_index, ('totals', 'date')] = date
self._options_inventory.at[row_index, ('totals', 'qty')] += qty_to_sell
sold += (qty_to_sell * exit_cost)
self.current_cash += sold - to_sell
def _current_stock_capital(self, stocks):
"""Return the current value of the stocks inventory.
Args:
stocks (pd.DataFrame): Stocks data for the current time step.
Returns:
float: Total capital in stocks.
"""
current_stocks = self._stocks_inventory.merge(stocks,
how='left',
left_on='symbol',
right_on=self._stocks_schema['symbol'])
return (current_stocks[self._stocks_schema['adjClose']] * current_stocks['qty']).sum()
def _current_options_capital(self, options):
options_value = self._get_current_option_quotes(options)
values_by_row = [0] * len(options_value[0])
if len(options_value[0]) != 0:
for i in range(len(self._options_strategy.legs)):
values_by_row += options_value[i]['cost'].values
total = -sum(values_by_row * self._options_inventory['totals']['qty'].values)
else:
total = 0
return total
def _buy_stocks(self, stocks, allocation, sma_days):
"""Buys stocks according to their given weight, optionally using an SMA entry filter.
Updates `self._stocks_inventory` and `self.current_cash`.
Args:
stocks (pd.DataFrame): Stocks data for the current time step.
allocation (float): Total capital allocation for stocks.
sma_days (int): SMA window.
"""
stock_symbols = [stock.symbol for stock in self.stocks]
query = '{} in {}'.format(self._stocks_schema['symbol'], stock_symbols)
inventory_stocks = stocks.query(query)
stock_percentages = np.array([stock.percentage for stock in self.stocks])
stock_prices = inventory_stocks[self._stocks_schema['adjClose']]
if sma_days:
qty = np.where(inventory_stocks['sma'] < stock_prices, (allocation * stock_percentages) // stock_prices, 0)
else:
qty = (allocation * stock_percentages) // stock_prices
self.current_cash -= np.sum(stock_prices * qty)
self._stocks_inventory = pd.DataFrame({'symbol': stock_symbols, 'price': stock_prices, 'qty': qty})
def _update_balance(self, start_date, end_date):
"""Updates self.balance in batch in a certain period between rebalancing days"""
stocks_date_col = self._stocks_schema['date']
stocks_data = self._stocks_data.query('({date_col} >= "{start_date}") & ({date_col} < "{end_date}")'.format(
date_col=stocks_date_col, start_date=start_date, end_date=end_date))
options_date_col = self._options_schema['date']
options_data = self._options_data.query('({date_col} >= "{start_date}") & ({date_col} < "{end_date}")'.format(
date_col=options_date_col, start_date=start_date, end_date=end_date))
calls_value = pd.Series(0, index=options_data[options_date_col].unique())
puts_value = pd.Series(0, index=options_data[options_date_col].unique())
for leg in self._options_strategy.legs:
leg_inventory = self._options_inventory[leg.name]
cost_field = (~leg.direction).value
for contract in leg_inventory['contract']:
leg_inventory_contract = leg_inventory.query('contract == "{}"'.format(contract))
qty = self._options_inventory.loc[leg_inventory_contract.index]['totals']['qty'].values[0]
options_contract_col = self._options_schema['contract']
current = leg_inventory_contract[['contract']].merge(options_data,
how='left',
left_on='contract',
right_on=options_contract_col)
current.set_index(options_date_col, inplace=True)
if cost_field == Direction.BUY.value:
current[cost_field] = -current[cost_field]
if (leg_inventory_contract['type'] == Type.CALL.value).any():
calls_value = calls_value.add(current[cost_field] * qty * self.shares_per_contract, fill_value=0)
else:
puts_value = puts_value.add(current[cost_field] * qty * self.shares_per_contract, fill_value=0)
stocks_current = self._stocks_inventory[['symbol', 'qty']].merge(stocks_data[['date', 'symbol', 'adjClose']],
on='symbol')
stocks_current['cost'] = stocks_current['qty'] * stocks_current['adjClose']
columns = [
stocks_current[stocks_current['symbol'] == stock.symbol].set_index(stocks_date_col)[[
'cost'
]].rename(columns={'cost': stock.symbol}) for stock in self._stocks
]
add = pd.concat(columns, axis=1)
add['cash'] = self.current_cash
add['options qty'] = self._options_inventory['totals']['qty'].sum()
add['calls capital'] = calls_value
add['puts capital'] = puts_value
add['stocks qty'] = self._stocks_inventory['qty'].sum()
for _index, row in self._stocks_inventory.iterrows():
symbol = row['symbol']
add[symbol + ' qty'] = row['qty']
# sort=False means we're assuming the updates are done in chronological order, i.e,
# the dates in add are the immediate successors to the ones at the end of self.balance.
# Pass sort=True to ensure self.balance is always sorted chronologically if needed.
self.balance = self.balance.append(add, sort=False)
def _execute_option_entries(self, date, options, options_allocation):
"""Enters option positions according to `self._options_strategy`.
Calls `self._pick_entry_signals` to select from the entry signals given by the strategy.
Updates `self._options_inventory` and `self.current_cash`.
Args:
date (pd.Timestamp): Current date.
options (pd.DataFrame): Options data for the current time step.
options_allocation (float): Capital amount allocated to options.
"""
self.current_cash += options_allocation
# Remove contracts already in inventory
inventory_contracts = pd.concat(
[self._options_inventory[leg.name]['contract'] for leg in self._options_strategy.legs])
subset_options = options[~options[self._options_schema['contract']].isin(inventory_contracts)]
entry_signals = []
for leg in self._options_strategy.legs:
flt = leg.entry_filter
cost_field = leg.direction.value
leg_entries = subset_options[flt(subset_options)]
# Exit if no entry signals for the current leg
if leg_entries.empty:
return
fields = self._signal_fields(cost_field)
leg_entries = leg_entries.reindex(columns=fields.keys())
leg_entries.rename(columns=fields, inplace=True)
order = get_order(leg.direction, Signal.ENTRY)
leg_entries['order'] = order
# Change sign of cost for SELL orders
if leg.direction == Direction.SELL:
leg_entries['cost'] = -leg_entries['cost']
leg_entries['cost'] *= self.shares_per_contract
leg_entries.columns = pd.MultiIndex.from_product([[leg.name], leg_entries.columns])
entry_signals.append(leg_entries.reset_index(drop=True))
# Append the 'totals' column to entry_signals
total_costs = sum([leg_entry.droplevel(0, axis=1)['cost'] for leg_entry in entry_signals])
qty = options_allocation // abs(total_costs)
totals = pd.DataFrame.from_dict({'cost': total_costs, 'qty': qty, 'date': date})
totals.columns = pd.MultiIndex.from_product([['totals'], totals.columns])
entry_signals.append(totals)
entry_signals = pd.concat(entry_signals, axis=1)
# Remove signals where qty == 0
entry_signals = entry_signals[entry_signals['totals']['qty'] > 0]
entries = self._pick_entry_signals(entry_signals)
# Update options inventory, trade log and current cash
self._options_inventory = self._options_inventory.append(entries, ignore_index=True)
self.trade_log = self.trade_log.append(entries, ignore_index=True)
self.current_cash -= np.sum(entries['totals']['cost'] * entries['totals']['qty'])
def _execute_option_exits(self, date, options):
"""Exits option positions according to `self._options_strategy`.
Option positions are closed whenever the strategy signals an exit, when the profit/loss thresholds
are exceeded or whenever the contracts in `self._options_inventory` are not found in `options`.
Updates `self._options_inventory` and `self.current_cash`.
Args:
date (pd.Timestamp): Current date.
options (pd.DataFrame): Options data for the current time step.
"""
strategy = self._options_strategy
current_options_quotes = self._get_current_option_quotes(options)
filter_masks = []
for i, leg in enumerate(strategy.legs):
flt = leg.exit_filter
# This mask is to ensure that legs with missing contracts exit.
missing_contracts_mask = current_options_quotes[i]['cost'].isna()
filter_masks.append(flt(current_options_quotes[i]) | missing_contracts_mask)
fields = self._signal_fields((~leg.direction).value)
current_options_quotes[i] = current_options_quotes[i].reindex(columns=fields.values())
current_options_quotes[i].rename(columns=fields, inplace=True)
current_options_quotes[i].columns = pd.MultiIndex.from_product([[leg.name],
current_options_quotes[i].columns])
exit_candidates = pd.concat(current_options_quotes, axis=1)
# If a contract is missing we replace the NaN values with those of the inventory
# except for cost, which we imput as zero.
exit_candidates = self._impute_missing_option_values(exit_candidates)
# Append the 'totals' column to exit_candidates
qtys = self._options_inventory['totals']['qty']
total_costs = sum([exit_candidates[l.name]['cost'] for l in self._options_strategy.legs])
totals = pd.DataFrame.from_dict({'cost': total_costs, 'qty': qtys, 'date': date})
totals.columns = pd.MultiIndex.from_product([['totals'], totals.columns])
exit_candidates = pd.concat([exit_candidates, totals], axis=1)
# Compute which contracts need to exit, either because of price thresholds or user exit filters
threshold_exits = strategy.filter_thresholds(self._options_inventory['totals']['cost'], total_costs)
filter_mask = reduce(lambda x, y: x | y, filter_masks)
exits_mask = threshold_exits | filter_mask
exits = exit_candidates[exits_mask]
total_costs = total_costs[exits_mask] * exits['totals']['qty']
# Update options inventory, trade log and current cash
self._options_inventory.drop(self._options_inventory[exits_mask].index, inplace=True)
self.trade_log = self.trade_log.append(exits, ignore_index=True)
self.current_cash -= sum(total_costs)
def _pick_entry_signals(self, entry_signals):
"""Returns the entry signals to execute.
Args:
entry_signals (pd.DataFrame): DataFrame of option entry signals chosen by the strategy.
Returns:
pd.DataFrame: DataFrame of entries to execute.
"""
if not entry_signals.empty:
# FIXME: This is a naive signal selection criterion, it simply picks the first one in `entry_singals`
return entry_signals.iloc[0]
else:
return entry_signals
def _signal_fields(self, cost_field):
fields = {
self._options_schema['contract']: 'contract',
self._options_schema['underlying']: 'underlying',
self._options_schema['expiration']: 'expiration',
self._options_schema['type']: 'type',
self._options_schema['strike']: 'strike',
self._options_schema[cost_field]: 'cost',
'order': 'order'
}
return fields
def _get_current_option_quotes(self, options):
"""Returns the current quotes for all the options in `self._options_inventory` as a list of DataFrames.
It also adds a `cost` column with the cost of closing the position in each contract and an `order`
column with the corresponding exit order type.
Args:
options (pd.DataFrame): Options data in the current time step.
Returns:
[pd.DataFrame]: List of DataFrames, one for each leg in `self._options_inventory`,
with the exit cost for the contracts.
"""
current_options_quotes = []
for leg in self._options_strategy.legs:
inventory_leg = self._options_inventory[leg.name]
# This is a left join to ensure that the result has the same length as the inventory. If the contract
# isn't in the daily data the values will all be NaN and the filters should all yield False.
leg_options = inventory_leg[['contract']].merge(options,
how='left',
left_on='contract',
right_on=leg.schema['contract'])
# leg_options.index needs to be the same as the inventory's so that the exit masks that are constructed
# from it can be correctly applied to the inventory.
leg_options.index = self._options_inventory.index
leg_options['order'] = get_order(leg.direction, Signal.EXIT)
leg_options['cost'] = leg_options[self._options_schema[(~leg.direction).value]]
# Change sign of cost for SELL orders
if ~leg.direction == Direction.SELL:
leg_options['cost'] = -leg_options['cost']
leg_options['cost'] *= self.shares_per_contract
current_options_quotes.append(leg_options)
return current_options_quotes
def _impute_missing_option_values(self, exit_candidates):
"""Returns a copy of the inventory with the cost of all its contracts set to zero.
Args:
exit_candidates (pd.DataFrame): DataFrame of exit candidates with possible missing values.
Returns:
pd.DataFrame: Exit candidates with imputed values.
"""
df = self._options_inventory.copy()
for leg in self._options_strategy.legs:
df.at[:, (leg.name, 'cost')] = 0
return exit_candidates.fillna(df)
def __repr__(self):
return "Backtest(capital={}, allocation={}, stocks={}, strategy={})".format(
self.current_cash, self.allocation, self._stocks, self._options_strategy)
| 47.066038 | 119 | 0.621768 | 2,949 | 24,945 | 5.016616 | 0.126823 | 0.049074 | 0.032446 | 0.013992 | 0.296945 | 0.191429 | 0.154049 | 0.107544 | 0.097066 | 0.084494 | 0 | 0.00344 | 0.277571 | 24,945 | 529 | 120 | 47.155009 | 0.817435 | 0.197394 | 0 | 0.09375 | 0 | 0 | 0.072942 | 0 | 0 | 0 | 0 | 0.00189 | 0.021875 | 1 | 0.078125 | false | 0 | 0.015625 | 0.015625 | 0.146875 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
51fb7539b1f2f6afaf1c56b1fdd1c2bde6b3883c | 7,948 | py | Python | kicktipper/predictor.py | Kricki/kicktipper | fae146c8df0d9ba9bebe84c1f20cf8df6fc39678 | [
"ISC"
] | 1 | 2016-11-23T16:09:46.000Z | 2016-11-23T16:09:46.000Z | kicktipper/predictor.py | Kricki/kicktipper | fae146c8df0d9ba9bebe84c1f20cf8df6fc39678 | [
"ISC"
] | 1 | 2017-04-21T08:38:39.000Z | 2018-07-19T20:46:10.000Z | kicktipper/predictor.py | Kricki/kicktipper | fae146c8df0d9ba9bebe84c1f20cf8df6fc39678 | [
"ISC"
] | null | null | null | import numpy as np
from scipy import stats
import matplotlib.pyplot as plt
class MatchPredictor:
""" Class to calculates the probabilities for different scores (outcomes) of two teams.
Attributes
----------
l1 : float
Projected score for team 1 (expectation value for Poisson distribution)
l2 : float
Projected score for team 2 (expectation value for Poisson distribution)
"""
def __init__(self, l1=0.0, l2=0):
self._poisson_n_bins = 8
self.l1 = l1
self.l2 = l2
def poisson_pmf(self, l, n_bins=None):
""" Returns the probablity mass function of the Poissonian distribution with average number l
See https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.poisson.html
Parameters
----------
l : float
Average number of events per interval ("shape parameter")
n_bins : int
Number of bins. If None (default), the value from the class attribute _poisson_n_bins is used.
Returns
-------
Probability mass function of Poisson distribution
"""
if n_bins is None:
n_bins = self._poisson_n_bins
n = np.arange(0, n_bins)
return stats.poisson.pmf(n, l)
def calculate_score_probs(self, mode='all'):
""" Calculates the probabilities for different scores (outcomes) of two teams. The required information is
the expection value for their goal distributions l1 and l2 (class attributes).
Parameters
----------
mode : str, {'all' (default), 'draws', 'team1_wins', 'team2_wins'}
If 'all', the complete probabiliy matrix is returned. If 'draw' only the diagonal elements (corresponding to
all possible draws) are non-zero. If 'team1_wins', only the elements corresponding to outcomes where team 1
wins are non-zero. 'team2_wins' is analaog to 'team1_wins'.
Returns
-------
nd.array
The returned matrix is a quadratic 2x2 matrix. The first dimension corresponds to team 1, second dimension
to team 2. E.g. score_probs[2,1] gives the probability for the score being 2:1
"""
y1 = self.poisson_pmf(self.l1)
y2 = self.poisson_pmf(self.l2)
score_probs = np.tensordot(y1, y2, axes=0) # vector * vector => matrix
if mode == 'all':
pass
elif mode == 'draws':
# diagonal elements correspond to probabilites of the draws (0:0, 1:1, 2:2, ...)
score_probs = np.diag(np.diag(score_probs))
elif mode == 'team1_wins':
# elements of lower left triangle (excluding diagonals => k=-1) correspond to probabilies for outcomes at
# which team 1 wins (1:0, 2:0, 2:1, ...)
score_probs = np.tril(score_probs, k=-1)
elif mode == 'team2_wins':
# elements of upper right triangle (excluding diagonals => k=1) correspond to probabilies for outcomes at
# which team 2 wins (0:1, 0:2, 1:, ...)
score_probs = np.triu(score_probs, k=1)
else:
raise(ValueError('Invalid value for "mode".'))
return score_probs
@staticmethod
def plot_score_probs(score_probs):
fig, ax = plt.subplots()
fig.set_size_inches(5, 5)
ax.imshow(score_probs, cmap='jet')
ax.set_ylabel('Goals Team 1')
ax.set_xlabel('Goals Team 2')
ax.set_title('Score probabilites (%)')
# write probability (in %) in each element of the matrix
for (j, i), label in np.ndenumerate(score_probs):
ax.text(i, j, round(label*100, 1), ha='center', va='center')
plt.show()
def plot_poisson_pmf(self):
fig, ax = plt.subplots()
fig.set_size_inches(5, 5)
n_bins = np.arange(0, self._poisson_n_bins)
y1 = self.poisson_pmf(self.l1)
y2 = self.poisson_pmf(self.l2)
ax.plot(n_bins, y1, 'o-', color='red', label='Team 1')
ax.plot(n_bins, y2, 'o-', color='blue', label='Team 2')
ax.set_xlabel('Scored goals')
ax.set_ylabel('Probability')
ax.set_title('Poisson distribution')
ax.grid()
ax.legend()
plt.show()
@property
def probs_tendency(self):
""" Calculate the probability for the "tendency" of the outcome for a match played by two teams.
Returns
-------
list with 3 elements
[probability team 1 wins, probability team 2 wins, probabilty for a draw]
"""
p_team1 = np.sum(self.calculate_score_probs(mode='team1_wins'))
p_team2 = np.sum(self.calculate_score_probs(mode='team2_wins'))
p_draw = np.sum(self.calculate_score_probs(mode='draws'))
return [p_team1, p_team2, p_draw]
def prob_goal_difference(self, d, mode='all'):
""" Calculate the probability for the goal difference of the match played by two teams to be d.
Parameters
----------
d : int
Goal difference. Positive: team 1 wins, negative: team 2 wins, 0: draw
mode : str
Passed to call of calculate_score_probs. See definition there.
Returns
-------
float
Probability
"""
score_probs = self.calculate_score_probs(mode=mode)
k = -1*d
# Parameter k: defines which diagonal axis offset to main diagonal is used. The axis offset by -d corresponds to
# the outcomes with a goal difference of d.
return np.sum(np.diag(score_probs, k=k))
def most_likely_goal_difference(self, mode='all'):
# calculate probabilities for all possible goal differences (limited by the width of the Poisson distribution)
d_ar = np.arange(-(self._poisson_n_bins-1), self._poisson_n_bins)
prob = np.zeros(len(d_ar))
for idx, d in enumerate(d_ar):
prob[idx] = self.prob_goal_difference(d, mode)
return d_ar[np.argmax(prob)], np.max(prob)
def most_likely_score(self, d=None, mode='all'):
""" Returns the most likely score.
Parameters "mode" and "d" set furhter constrains on the subset of score probabilites to be considered.
Parameters
----------
d : int
Goal difference. Positive: team 1 wins, negative: team 2 wins, 0: draw
mode : str
Passed to call of calculate_score_probs. See definition there.
Returns
-------
tuple
([result], probability) e.g. ([2,1], 0.06)
"""
score_probs = self.calculate_score_probs(mode=mode)
if d is not None:
# Set all elements except the diagonal offset by -d to zero
# Remaining non-zero elements correspond to results with a goal difference of d.
score_probs = np.diag(np.diag(score_probs, k=-d), k=-d)
result = list(np.unravel_index(np.argmax(score_probs), score_probs.shape)) # gets the indicies with the highest
# probability inside score_probs as list.
# See: https://stackoverflow.com/questions/9482550/argmax-of-numpy-array-returning-non-flat-indices
prob = np.max(score_probs)
return result, prob
@property
def predicted_score(self):
# 1) Calculate most likely tendency
tendency = np.argmax(self.probs_tendency) # 0: team 1 wins, 1: team 2 wins, 2: draw
# 2) What is the most likely goal difference within the tendency
if tendency == 0:
mode ='team1_wins'
elif tendency == 1:
mode = 'team2_wins'
elif tendency == 2:
mode = 'draws'
else:
raise(ValueError('Invalid value for tendendy'))
d, _ = self.most_likely_goal_difference(mode=mode)
# 3) What is the most likely result with the predicted goal difference?
return self.most_likely_score(d=d, mode=mode)
| 37.847619 | 120 | 0.6116 | 1,074 | 7,948 | 4.408752 | 0.234637 | 0.063358 | 0.032101 | 0.016895 | 0.293981 | 0.226822 | 0.196832 | 0.176558 | 0.145723 | 0.145723 | 0 | 0.021854 | 0.28611 | 7,948 | 209 | 121 | 38.028708 | 0.812654 | 0.45848 | 0 | 0.179775 | 0 | 0 | 0.070601 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.11236 | false | 0.011236 | 0.033708 | 0 | 0.235955 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
51fbfcfac2b93e98239de7ce36bcc1077cb951a1 | 7,012 | py | Python | app.py | ai4r/SGToolkit | 684df2cfc830eeb8ea23c95a8af1c9199991ec99 | [
"MIT"
] | 16 | 2021-08-11T08:55:41.000Z | 2022-02-11T02:45:55.000Z | app.py | ai4r/SGToolkit | 684df2cfc830eeb8ea23c95a8af1c9199991ec99 | [
"MIT"
] | 9 | 2021-09-07T14:52:59.000Z | 2022-03-24T13:33:00.000Z | app.py | ai4r/SGToolkit | 684df2cfc830eeb8ea23c95a8af1c9199991ec99 | [
"MIT"
] | 2 | 2021-08-25T06:00:43.000Z | 2021-10-07T00:57:49.000Z | from flask import Flask, render_template, request, send_file
from flask_pymongo import PyMongo
import json
import sg_core_api as sgapi
import os
import pathlib
import numpy as np
from bson.json_util import dumps
from bson.objectid import ObjectId
from datetime import datetime
from scipy.interpolate import CubicSpline
app = Flask(__name__)
gesture_generator = sgapi.get_gesture_generator()
root_path = pathlib.Path(__file__).parent
app.config["MONGO_URI"] = "mongodb://localhost" # setup your own db to enable motion library and rule functions
mongo = PyMongo(app)
@app.route('/')
def index():
return render_template('index.html')
@app.route('/api/motion', methods=['GET', 'POST'])
def motion_library():
if request.method == 'POST':
json = request.get_json()
json["motion"] = sgapi.convert_pose_coordinate_for_ui(np.array(json["motion"])).tolist()
result = {}
try:
mongo.db.motion.insert_one(json)
result['msg'] = "success"
except Exception as e:
result['msg'] = "fail"
return result
elif request.method == 'GET':
try:
cursor = mongo.db.motion.find().sort("name", 1)
except AttributeError as e:
return {} # empty library
motions = sgapi.convert_pose_coordinate_for_ui_for_motion_library(list(cursor))
return dumps(motions)
else:
assert False
@app.route('/api/delete_motion/<id>', methods=['GET'])
def delete_motion_library(id):
result = mongo.db.motion.delete_one({'_id': ObjectId(id)})
msg = {}
if result.deleted_count > 0:
msg['msg'] = "success"
else:
msg['msg'] = "fail"
return msg
@app.route('/api/rule', methods=['GET', 'POST'])
def rule():
if request.method == 'POST':
json = request.get_json()
result = {}
try:
json['motion'] = ObjectId(json['motion'])
mongo.db.rule.insert_one(json)
result['msg'] = "success"
except Exception as e:
print(json)
print(e)
result['msg'] = "fail"
return result
elif request.method == 'GET':
pipeline = [{'$lookup':
{'from': 'motion',
'localField': 'motion',
'foreignField': '_id',
'as': 'motion_info'}},
]
try:
cursor = mongo.db.rule.aggregate(pipeline)
except AttributeError as e:
return {} # empty rules
rules = sgapi.convert_pose_coordinate_for_ui_for_rule_library(cursor)
rules = dumps(rules)
return rules
else:
assert False
@app.route('/api/delete_rule/<id>', methods=['GET'])
def delete_rule(id):
result = mongo.db.rule.delete_one({'_id': ObjectId(id)})
msg = {}
if result.deleted_count > 0:
msg['msg'] = "success"
else:
msg['msg'] = "fail"
return msg
@app.route('/api/input', methods=['POST'])
def input_text_post():
content = request.get_json()
input_text = content.get('text-input')
if input_text is None or len(input_text) == 0:
return {'msg': 'empty'}
print('--------------------------------------------')
print('request time:', datetime.now())
print('request IP:', request.remote_addr)
print(input_text)
kp_constraints = content.get('keypoint-constraints')
if kp_constraints:
pose_constraints_input = np.array(kp_constraints)
pose_constraints = sgapi.convert_pose_coordinate_for_model(np.copy(pose_constraints_input))
else:
pose_constraints = None
pose_constraints_input = None
style_constraints = content.get('style-constraints')
if style_constraints:
style_constraints = np.array(style_constraints)
else:
style_constraints = None
result = {}
result['msg'] = "success"
result['input-pose-constraints'] = pose_constraints_input.tolist() if pose_constraints_input is not None else None
result['input-style-constraints'] = style_constraints.tolist() if style_constraints is not None else None
result['input-voice'] = content.get('voice')
result['is-manual-scenario'] = content.get('is-manual-scenario')
if content.get('is-manual-scenario'):
# interpolate key poses
n_frames = pose_constraints_input.shape[0]
n_joints = int((pose_constraints_input.shape[1] - 1) / 3)
key_idxs = [i for i, e in enumerate(pose_constraints_input) if e[-1] == 1]
if len(key_idxs) >= 2:
out_gesture = np.zeros((n_frames, n_joints * 3))
xs = np.arange(0, n_frames, 1)
for i in range(n_joints):
pts = pose_constraints_input[key_idxs, i * 3:(i + 1) * 3]
cs = CubicSpline(key_idxs, pts, bc_type='clamped')
out_gesture[:, i * 3:(i + 1) * 3] = cs(xs)
result['output-data'] = out_gesture.tolist()
result['audio-filename'] = os.path.split(result['input-voice'])[
1] # WARNING: assumed manual mode uses external audio file
else:
result['msg'] = "fail"
else:
# run gesture generation model
output = gesture_generator.generate(input_text, pose_constraints=pose_constraints,
style_values=style_constraints, voice=content.get('voice'))
if output is None:
# something wrong
result['msg'] = "fail"
else:
gesture, audio, tts_filename, words_with_timestamps = output
gesture = sgapi.convert_pose_coordinate_for_ui(gesture)
result['audio-filename'] = os.path.split(tts_filename)[1] # filename without path
result['words-with-timestamps'] = words_with_timestamps
result['output-data'] = gesture.tolist()
return result
@app.route('/media/<path:filename>/<path:new_filename>')
def download_audio_file(filename, new_filename):
return send_file(os.path.join('./cached_wav', filename), as_attachment=True, attachment_filename=new_filename,
cache_timeout=0)
@app.route('/mesh/<path:filename>')
def download_mesh_file(filename):
mesh_path = root_path.joinpath("static", "mesh", filename)
return send_file(str(mesh_path), as_attachment=True, cache_timeout=0)
@app.route('/upload_audio', methods=['POST'])
def upload():
upload_dir = './cached_wav'
file_names = []
for key in request.files:
file = request.files[key]
_, ext = os.path.splitext(file.filename)
print('uploaded: ', file.filename)
try:
upload_path = os.path.join(upload_dir, "uploaded_audio" + ext)
file.save(upload_path)
file_names.append(upload_path)
except:
print('save fail: ' + os.path.join(upload_dir, file.filename))
return json.dumps({'filename': [f for f in file_names]})
if __name__ == '__main__':
app.run()
| 33.075472 | 118 | 0.61138 | 847 | 7,012 | 4.864227 | 0.227863 | 0.050971 | 0.043689 | 0.031553 | 0.251942 | 0.20267 | 0.153155 | 0.107524 | 0.089563 | 0.089563 | 0 | 0.004599 | 0.255705 | 7,012 | 211 | 119 | 33.232227 | 0.784825 | 0.032801 | 0 | 0.301775 | 0 | 0 | 0.123284 | 0.032039 | 0 | 0 | 0 | 0 | 0.011834 | 1 | 0.053254 | false | 0 | 0.065089 | 0.011834 | 0.201183 | 0.047337 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
51fdc866742d67c3e351348526ab6d8be86c0161 | 473 | py | Python | pins/pins.py | evarga/composite-decomposition | 07de8a21d1d1974a8a3b1346be1d5ee0d7764fa5 | [
"MIT"
] | null | null | null | pins/pins.py | evarga/composite-decomposition | 07de8a21d1d1974a8a3b1346be1d5ee0d7764fa5 | [
"MIT"
] | null | null | null | pins/pins.py | evarga/composite-decomposition | 07de8a21d1d1974a8a3b1346be1d5ee0d7764fa5 | [
"MIT"
] | null | null | null | from math import sqrt
def num_pins_full_row(n: int, k: int) -> int:
return (n // k + 1) * k + n % k + (n % k > 0) if n > 0 else 0
def num_pins_square(n: int, k: int) -> int:
m = int(sqrt(n))
used_pins = (m + 1)**2
n -= m * m
if 0 < n <= m:
used_pins += n + 1
elif n > m:
used_pins += n + 2
return used_pins if m > 0 else 0
data = tuple(map(int, input().split()))
print(min(num_pins_full_row(*data), num_pins_square(*data))) | 23.65 | 65 | 0.541226 | 90 | 473 | 2.688889 | 0.322222 | 0.115702 | 0.082645 | 0.115702 | 0.181818 | 0 | 0 | 0 | 0 | 0 | 0 | 0.032836 | 0.291755 | 473 | 20 | 66 | 23.65 | 0.689552 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.142857 | false | 0 | 0.071429 | 0.071429 | 0.357143 | 0.071429 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
a40166e450ee628d6a50ace1f3007f22ec4f1689 | 6,239 | py | Python | blocking_utils.py | colebryant/DeepBlocker | e90bbe2c4fa75f53fccea20cbdebf71b9167584d | [
"BSD-3-Clause"
] | null | null | null | blocking_utils.py | colebryant/DeepBlocker | e90bbe2c4fa75f53fccea20cbdebf71b9167584d | [
"BSD-3-Clause"
] | null | null | null | blocking_utils.py | colebryant/DeepBlocker | e90bbe2c4fa75f53fccea20cbdebf71b9167584d | [
"BSD-3-Clause"
] | null | null | null | import pandas as pd
import numpy as np
def topK_neighbors_to_candidate_set(topK_neighbors):
#We create a data frame corresponding to topK neighbors.
# We are given a 2D matrix of the form 1: [a1, a2, a3], 2: [b1, b2, b3]
# where a1, a2, a3 are the top-3 neighbors for tuple 1 and so on.
# We will now create a two column DF fo the form (1, a1), (1, a2), (1, a3), (2, b1), (2, b2), (2, b3)
topK_df = pd.DataFrame(topK_neighbors)
topK_df["ltable_id"] = topK_df.index
melted_df = pd.melt(topK_df, id_vars=["ltable_id"])
melted_df["rtable_id"] = melted_df["value"]
candidate_set_df = melted_df[["ltable_id", "rtable_id"]]
return candidate_set_df
def thresholded_pairs_to_candidate_set(thresholded_pairs):
# Merge record pair arrays to create DataFrame of candidate pairs
merged_arr = np.vstack((thresholded_pairs[0], thresholded_pairs[1])).T
candidate_set_df = pd.DataFrame(merged_arr, columns=["ltable_id", "rtable_id"])
return candidate_set_df
#This accepts four inputs:
# data frames for candidate set and ground truth matches
# left and right data frames
def compute_blocking_statistics(candidate_set_df, golden_df, left_df, right_df):
#Now we have two data frames with two columns ltable_id and rtable_id
# If we do an equi-join of these two data frames, we will get the matches that were in the top-K
merged_df = pd.merge(candidate_set_df, golden_df, on=['ltable_id', 'rtable_id'])
# Added to calculate total false positives
false_pos = candidate_set_df[~candidate_set_df['ltable_id'].isin(merged_df['ltable_id'])|(~candidate_set_df['rtable_id'].isin(merged_df['rtable_id']))]
left_num_tuples = len(left_df)
right_num_tuples = len(right_df)
statistics_dict = {
"left_num_tuples": left_num_tuples,
"right_num_tuples": right_num_tuples,
"candidate_set_length": len(candidate_set_df),
"golden_set_length": len(golden_df),
"merged_set_length": len(merged_df),
"false_positives_length": len(false_pos),
"precision": len(merged_df) / (len(merged_df) + len(false_pos)) if len(golden_df) > 0 else "N/A",
"recall": len(merged_df) / len(golden_df) if len(golden_df) > 0 else "N/A",
"cssr": len(candidate_set_df) / (left_num_tuples * right_num_tuples)
}
return statistics_dict
def compute_join_percentage(candidate_set_df, left_df, right_df):
THRESHOLD = 20
left_num_tuples = len(left_df)
right_num_tuples = len(right_df)
left_percent_join = 100 * round(candidate_set_df['ltable_id'].unique().shape[0] / left_num_tuples, 3)
right_percent_join = 100 * round(candidate_set_df['rtable_id'].unique().shape[0] / right_num_tuples, 3)
total_percent_join = 100 * round((candidate_set_df['ltable_id'].unique().shape[0] + candidate_set_df['rtable_id'].unique().shape[0]) / (left_num_tuples + right_num_tuples), 3)
statistics_dict = {
"left_num_tuples": left_num_tuples,
"right_num_tuples": right_num_tuples,
"candidate_set_length": len(candidate_set_df),
"left_percent_join": f"{left_percent_join}%",
"right_percent_join": f"{right_percent_join}%",
"right_percent_join": f"{right_percent_join}%",
"total_percent_join": f"{total_percent_join}%",
"prediction": "JOIN" if max(left_percent_join, right_percent_join) > THRESHOLD else "NO JOIN",
"cssr": len(candidate_set_df) / (left_num_tuples * right_num_tuples)
}
return statistics_dict
def compute_column_statistics(table_names,candidate_set_df, golden_df,left_df, right_df):
candidate_set_df = candidate_set_df.astype('str')
candidate_set_df['ltable_id_table'] = candidate_set_df['ltable_id'].apply(lambda x: left_df.columns[int(x)])
candidate_set_df['ltable_id_table'] = table_names[0] + '.' + candidate_set_df['ltable_id_table']
candidate_set_df['rtable_id_table'] = candidate_set_df['rtable_id'].apply(lambda x: right_df.columns[int(x)])
candidate_set_df['rtable_id_table'] = table_names[1] + '.' + candidate_set_df['rtable_id_table']
candidate_set_df = candidate_set_df[['ltable_id_table','rtable_id_table']].rename(columns={'ltable_id_table':'ltable_id','rtable_id_table':'rtable_id'})
merged_df = pd.merge(candidate_set_df, golden_df, on=['ltable_id', 'rtable_id'])
# Added to calculate total false positives
false_pos = candidate_set_df[~candidate_set_df['ltable_id'].isin(merged_df['ltable_id'])|(~candidate_set_df['rtable_id'].isin(merged_df['rtable_id']))]
if len(golden_df) > 0 and (len(merged_df) + len(false_pos)) > 0:
fp = float(len(merged_df)) / (len(merged_df) + len(false_pos))
else:
fp = "N/A"
left_num_columns = len(left_df.columns)
right_num_columns = len(right_df.columns)
statistics_dict = {
"left_table": table_names[0],
"right_table": table_names[1],
"left_num_columns": left_num_columns,
"right_num_columns": right_num_columns,
"candidate_set_length": len(candidate_set_df),
"candidate_set": candidate_set_df,
"golden_set_length": len(golden_df),
"golden_set": golden_df,
"merged_set_length": len(merged_df),
"merged_set": merged_df,
"false_positives_length": len(false_pos),
"false_positives": false_pos,
"precision": fp,
"recall": float(len(merged_df)) / len(golden_df) if len(golden_df) > 0 else "N/A",
"cssr": len(candidate_set_df) / (left_num_columns * right_num_columns)
}
return statistics_dict
#This function is useful when you download the preprocessed data from DeepMatcher dataset
# and want to convert to matches format.
#It loads the train/valid/test files, filters the duplicates,
# and saves them to a new file called matches.csv
def process_files(folder_root):
df1 = pd.read_csv(folder_root + "/train.csv")
df2 = pd.read_csv(folder_root + "/valid.csv")
df3 = pd.read_csv(folder_root + "/test.csv")
df1 = df1[df1["label"] == 1]
df2 = df2[df2["label"] == 1]
df3 = df3[df3["label"] == 1]
df = pd.concat([df1, df2, df3], ignore_index=True)
df[["ltable_id","rtable_id"]].to_csv(folder_root + "/matches.csv", header=True, index=False)
| 46.559701 | 179 | 0.702036 | 950 | 6,239 | 4.256842 | 0.184211 | 0.133531 | 0.131553 | 0.04451 | 0.56454 | 0.526212 | 0.481454 | 0.44634 | 0.369189 | 0.263106 | 0 | 0.014117 | 0.171181 | 6,239 | 133 | 180 | 46.909774 | 0.767937 | 0.150665 | 0 | 0.362637 | 0 | 0 | 0.197236 | 0.020254 | 0 | 0 | 0 | 0 | 0 | 1 | 0.065934 | false | 0 | 0.021978 | 0 | 0.142857 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
a401b7bfe3f33a4fcdd4603c34778103819c8259 | 1,989 | py | Python | project/simulation/naive/iterated_prisoners.py | horken7/game-theory | c5484e6c338646e8143e90290efdc07acf397f22 | [
"MIT"
] | null | null | null | project/simulation/naive/iterated_prisoners.py | horken7/game-theory | c5484e6c338646e8143e90290efdc07acf397f22 | [
"MIT"
] | null | null | null | project/simulation/naive/iterated_prisoners.py | horken7/game-theory | c5484e6c338646e8143e90290efdc07acf397f22 | [
"MIT"
] | null | null | null |
# coding: utf-8
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
both_coorporate_utility = 3
both_defect_utility = 1
looser_utility = 0
winner_utility = 3
a_resources = 2
b_resources = 2
a_actions = []
b_actions = []
a_utility = []
b_utility = []
rounds = 20
# Defect: action 0
# Cooperate: action 1
def evaluate_strategy(a, b):
if(a == 1 and b == 1): # both coorporate
return(both_coorporate_utility, both_coorporate_utility)
elif(a == 1 and b == 0): # a coorporate, b defect
return(looser_utility, winner_utility)
elif(a == 0 and b == 1): # a defect, be coorporate
return(winner_utility, looser_utility)
elif(a == 0 and b == 0): # both defect
return(both_defect_utility, both_defect_utility)
def tit_for_tat(me, opponent, t):
if(t == 0):
return(1)
return(opponent[t-1])
# play the game the defined amount of rounds
for t in range(rounds):
a_strategy = tit_for_tat(a_actions, b_actions, t)
b_strategy = round(np.random.rand()) # random strategy
a_actions.append(a_strategy)
b_actions.append(b_strategy)
[a_result, b_result] = evaluate_strategy(a_strategy, b_strategy)
a_utility.append(a_result)
b_utility.append(b_result)
ax = plt.subplot(1,1,1)
ax.plot(np.linspace(1,len(a_utility), len(a_utility)), a_utility, label='Tit for tat')
ax.plot(np.linspace(1,len(b_utility), len(b_utility)), b_utility, label='Random')
ax.set_title('Iteraded prisoners')
ax.set_xlabel('Iterations')
ax.set_ylabel('Utility')
handles, labels = ax.get_legend_handles_labels()
ax.legend(handles, labels)
plt.show()
ax = plt.subplot(1,1,1)
ax.plot(np.linspace(1,len(a_actions), len(a_actions)), a_actions, label='Tit for tat')
ax.plot(np.linspace(1,len(b_actions), len(b_actions)), b_actions, label='Random')
ax.set_title('Iteraded prisoners')
ax.set_xlabel('Iterations')
ax.set_ylabel('Action')
handles, labels = ax.get_legend_handles_labels()
ax.legend(handles, labels)
plt.show()
| 26.52 | 86 | 0.711413 | 322 | 1,989 | 4.177019 | 0.232919 | 0.035688 | 0.026766 | 0.047584 | 0.331599 | 0.331599 | 0.30632 | 0.30632 | 0.30632 | 0.30632 | 0 | 0.019059 | 0.155857 | 1,989 | 74 | 87 | 26.878378 | 0.782013 | 0.092509 | 0 | 0.226415 | 0 | 0 | 0.057446 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.037736 | false | 0 | 0.056604 | 0 | 0.09434 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
a403cb1eb639ba6d23d5ad19b32afabf17e2a3db | 844 | py | Python | higher_lower/__init__.py | simonw/higher-lower | 436810573bfcb0175738b6636b6b4d790b81183b | [
"Apache-2.0"
] | 2 | 2021-02-16T08:45:24.000Z | 2021-02-22T01:30:29.000Z | higher_lower/__init__.py | simonw/higher-lower | 436810573bfcb0175738b6636b6b4d790b81183b | [
"Apache-2.0"
] | 1 | 2021-02-16T07:17:21.000Z | 2021-02-16T19:39:12.000Z | higher_lower/__init__.py | simonw/higher-lower | 436810573bfcb0175738b6636b6b4d790b81183b | [
"Apache-2.0"
] | null | null | null | from enum import Enum
class ActualIs(Enum):
HIGHER = 1
MATCH = 0
LOWER = -1
def higher_lower(min_value, max_value, callback):
assert isinstance(max_value, int)
assert isinstance(min_value, int)
assert max_value > min_value
candidate = midpoint(min_value, max_value)
while True:
result = callback(candidate)
if result is ActualIs.MATCH:
return candidate
elif result is ActualIs.LOWER:
# lower
max_value = candidate
candidate = midpoint(min_value, candidate)
elif result is ActualIs.HIGHER:
# higher
min_value = candidate
candidate = midpoint(candidate, max_value)
else:
assert False, "Should be a ActualIs enum constant"
def midpoint(x, y):
return x + ((y - x) // 2)
| 25.575758 | 62 | 0.61019 | 100 | 844 | 5.02 | 0.36 | 0.095618 | 0.101594 | 0.063745 | 0.115538 | 0 | 0 | 0 | 0 | 0 | 0 | 0.006944 | 0.317536 | 844 | 32 | 63 | 26.375 | 0.864583 | 0.014218 | 0 | 0 | 0 | 0 | 0.041013 | 0 | 0 | 0 | 0 | 0 | 0.166667 | 1 | 0.083333 | false | 0 | 0.041667 | 0.041667 | 0.375 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
a405507dac4880c95cf65af8bea272bbc90ef96d | 4,819 | py | Python | satcfe/resposta/enviardadosvenda.py | danielgoncalves/satcfe | b460eaa2fc09b891b68a4ad25db5f7c45a1fcf4f | [
"Apache-2.0"
] | 38 | 2015-05-25T02:57:16.000Z | 2022-01-18T21:01:49.000Z | satcfe/resposta/enviardadosvenda.py | danielgoncalves/satcfe | b460eaa2fc09b891b68a4ad25db5f7c45a1fcf4f | [
"Apache-2.0"
] | 15 | 2015-08-19T13:30:46.000Z | 2022-01-19T22:34:17.000Z | satcfe/resposta/enviardadosvenda.py | danielgoncalves/satcfe | b460eaa2fc09b891b68a4ad25db5f7c45a1fcf4f | [
"Apache-2.0"
] | 13 | 2015-05-07T01:10:12.000Z | 2022-02-04T14:30:01.000Z | # -*- coding: utf-8 -*-
#
# satcfe/resposta/enviardadosvenda.py
#
# Copyright 2015 Base4 Sistemas Ltda ME
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from __future__ import absolute_import
from __future__ import print_function
from __future__ import unicode_literals
import xml.etree.ElementTree as ET
from decimal import Decimal
from io import StringIO
from builtins import str as text
from satcomum.ersat import dados_qrcode
from ..excecoes import ExcecaoRespostaSAT
from ..util import as_datetime
from ..util import base64_to_str
from .padrao import RespostaSAT
from .padrao import analisar_retorno
EMITIDO_COM_SUCESSO = '06000'
class RespostaEnviarDadosVenda(RespostaSAT):
"""Lida com as respostas da função ``EnviarDadosVenda`` (veja o método
:meth:`~satcfe.base.FuncoesSAT.enviar_dados_venda`). Os atributos
esperados em caso de sucesso, são:
.. sourcecode:: text
numeroSessao (int)
EEEEE (text)
CCCC (text)
mensagem (text)
cod (text)
mensagemSEFAZ (text)
arquivoCFeSAT (text)
timeStamp (datetime.datetime)
chaveConsulta (text)
valorTotalCFe (decimal.Decimal)
CPFCNPJValue (text)
assinaturaQRCODE (text)
Em caso de falha, são esperados apenas os atributos:
.. sourcecode:: text
numeroSessao (int)
EEEEE (text)
CCCC (text)
mensagem (text)
cod (text)
mensagemSEFAZ (text)
Finalmente, como último recurso, a resposta poderá incluir apenas os
atributos padrão, conforme descrito na constante
:attr:`~satcfe.resposta.padrao.RespostaSAT.CAMPOS`.
.. note::
Aqui, ``text`` diz respeito à um objeto ``unicode`` (Python 2) ou
``str`` (Python 3). Veja ``builtins.str`` da biblioteca ``future``.
"""
def xml(self):
"""Retorna o XML do CF-e-SAT decodificado de Base64.
:rtype: str
"""
if self._sucesso():
return base64_to_str(self.arquivoCFeSAT)
else:
raise ExcecaoRespostaSAT(self)
def qrcode(self):
"""Resulta nos dados que compõem o QRCode.
:rtype: str
"""
if self._sucesso():
tree = ET.parse(StringIO(self.xml()))
return dados_qrcode(tree)
else:
raise ExcecaoRespostaSAT(self)
def _sucesso(self):
return self.EEEEE == EMITIDO_COM_SUCESSO
@staticmethod
def analisar(retorno):
"""Constrói uma :class:`RespostaEnviarDadosVenda` a partir do
retorno informado.
:param str retorno: Retorno da função ``EnviarDadosVenda``.
"""
resposta = analisar_retorno(
retorno,
funcao='EnviarDadosVenda',
classe_resposta=RespostaEnviarDadosVenda,
campos=(
('numeroSessao', int),
('EEEEE', text),
('CCCC', text),
('mensagem', text),
('cod', text),
('mensagemSEFAZ', text),
('arquivoCFeSAT', text),
('timeStamp', as_datetime),
('chaveConsulta', text),
('valorTotalCFe', Decimal),
('CPFCNPJValue', text),
('assinaturaQRCODE', text),
),
campos_alternativos=[
# se a venda falhar apenas os primeiros seis campos
# especificados na ER deverão ser retornados...
(
('numeroSessao', int),
('EEEEE', text),
('CCCC', text),
('mensagem', text),
('cod', text),
('mensagemSEFAZ', text),
),
# por via das dúvidas, considera o padrão de campos,
# caso não haja nenhuma coincidência...
RespostaSAT.CAMPOS,
]
)
if resposta.EEEEE not in (EMITIDO_COM_SUCESSO,):
raise ExcecaoRespostaSAT(resposta)
return resposta
| 31.703947 | 76 | 0.564225 | 469 | 4,819 | 5.716418 | 0.45629 | 0.02238 | 0.02984 | 0.035808 | 0.23536 | 0.131294 | 0.131294 | 0.131294 | 0.131294 | 0.131294 | 0 | 0.007334 | 0.349243 | 4,819 | 151 | 77 | 31.913907 | 0.847577 | 0.426645 | 0 | 0.31746 | 0 | 0 | 0.073622 | 0 | 0 | 0 | 0 | 0.006623 | 0 | 1 | 0.063492 | false | 0 | 0.206349 | 0.015873 | 0.349206 | 0.015873 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
a4097c2af9c121f0f61a10affe12d058da5aad64 | 2,769 | py | Python | bocadillo_cli/helpers.py | bocadilloproject/bocadillo-cli | f11ec438504eb2edd3c4e8f5d2992e804b3da6b0 | [
"MIT"
] | 6 | 2019-04-17T17:07:46.000Z | 2020-08-09T07:37:34.000Z | bocadillo_cli/helpers.py | bocadilloproject/bocadillo-cli | f11ec438504eb2edd3c4e8f5d2992e804b3da6b0 | [
"MIT"
] | 10 | 2019-04-17T21:27:46.000Z | 2019-06-17T05:45:51.000Z | bocadillo_cli/helpers.py | bocadilloproject/bocadillo-cli | f11ec438504eb2edd3c4e8f5d2992e804b3da6b0 | [
"MIT"
] | 1 | 2019-05-12T17:32:45.000Z | 2019-05-12T17:32:45.000Z | import pathlib
import pkgutil
import typing
from contextlib import contextmanager
import click
from jinja2 import Template
from . import formatutils as fmt
class Templates:
def __init__(self, context: dict):
self.context = context
@staticmethod
def _get(name: str) -> Template:
path = str(pathlib.Path("templates", name))
content: bytes = pkgutil.get_data("bocadillo_cli", path)
if content is None:
raise ValueError(f"Template not found: {name}")
return Template(content.decode("utf-8"))
def render(self, name: str) -> str:
return self._get(f"{name}.jinja").render(self.context)
class Writer:
CREATE = fmt.success("CREATE")
SKIP = fmt.muted("SKIP")
def __init__(self, dry: bool, no_input: bool, templates: Templates):
self.dry = dry
self.no_input = no_input
self.templates = templates
self.root = None
def mkdir(self, path: pathlib.Path, **kwargs):
if path.exists():
action = self.SKIP
else:
action = self.CREATE
if not self.dry:
path.mkdir(**kwargs)
click.echo(f"{action} {path} {fmt.muted('directory')}")
def writefile(self, path: pathlib.Path, content: str):
if path.exists() and (
self.no_input
or not click.confirm(
fmt.pre_warn(
f"File {fmt.code(path)} already exists. Overwrite?"
)
)
):
nbytes = None
action = self.SKIP
else:
if not self.dry:
with open(str(path), "w", encoding="utf-8") as f:
f.write(content)
f.write("\n")
nbytes = len(content.encode())
action = self.CREATE
nbytes_formatted = fmt.muted(f" ({nbytes} bytes)") if nbytes else ""
click.echo(f"{action} {path}{nbytes_formatted}")
def writetemplate(self, *names: str, root: pathlib.Path = None) -> None:
if root is None:
assert self.root is not None
root = self.root
for name in names:
content = self.templates.render(name)
path = pathlib.Path(root, name)
self.writefile(path, content)
@contextmanager
def cd(self, directory: pathlib.Path):
self.mkdir(directory, exist_ok=True)
self.root = directory
try:
yield self
finally:
self.root = None
def generate(self, config: typing.Dict[str, typing.List[str]]):
for directory, filenames in config.items():
with self.cd(directory):
for filename in filenames:
self.writetemplate(filename)
| 29.774194 | 76 | 0.562658 | 320 | 2,769 | 4.80625 | 0.309375 | 0.042913 | 0.029259 | 0.019506 | 0.026008 | 0 | 0 | 0 | 0 | 0 | 0 | 0.001613 | 0.328277 | 2,769 | 92 | 77 | 30.097826 | 0.825269 | 0 | 0 | 0.131579 | 0 | 0 | 0.079812 | 0.017335 | 0 | 0 | 0 | 0 | 0.013158 | 1 | 0.118421 | false | 0 | 0.092105 | 0.013158 | 0.289474 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
a409daf46a3fa04de8f0e145bfe66c2e1af54b0c | 1,637 | py | Python | extras/cleanup-meshes.py | RQWorldblender/io_scene_numdlb | 8a67c092f1aef9536e6de7cc7665dec77c0d52c6 | [
"MIT"
] | 2 | 2020-12-04T14:55:09.000Z | 2021-03-04T02:11:27.000Z | extras/cleanup-meshes.py | RQWorldblender/io_scene_numdlb | 8a67c092f1aef9536e6de7cc7665dec77c0d52c6 | [
"MIT"
] | null | null | null | extras/cleanup-meshes.py | RQWorldblender/io_scene_numdlb | 8a67c092f1aef9536e6de7cc7665dec77c0d52c6 | [
"MIT"
] | 1 | 2021-03-04T02:11:42.000Z | 2021-03-04T02:11:42.000Z | import bpy, os
# Select Expressions
mesh_expr = ["*Blink*", "*Attack*", "*Ouch*", "*Talk*", "*Capture*", "*Ottotto*", "*Escape*", "*Half*", "*Pattern*", "*Result*", "*Harf*","*Hot*", "*Heavy*", "*Voice*", "*Fura*", "*Throw*", "*Catch*", "*Cliff*", "*FLIP*", "*Bound*", "*Down*", "*Bodybig*", "*Final*", "*Result*", "*StepPose*", "*Sorori*", "*Fall*", "*Appeal*", "*DamageFlyFront*", "*CameraHit*"]
# Make collections for each expressions
bpy.ops.object.select_all(action='DESELECT')
for exp in mesh_expr:
bpy.ops.object.select_pattern(pattern=exp)
selectNum = 0
for obj in bpy.data.objects:
if obj.select_get():
selectNum += 1
print(exp + " -> " + obj.name)
co = bpy.data.collections
if selectNum > 0:
if exp in co:
collect = co[exp]
else:
collect = co.new(name=exp)
bpy.context.view_layer.active_layer_collection.collection.children.link(collect)
for obj in bpy.data.objects:
if obj.select_get():
bpy.ops.collection.objects_remove_active()
collect.objects.link(obj)
collect.hide_viewport = True
collect.hide_render = True
bpy.ops.object.select_all(action='DESELECT')
#bpy.ops.object.select_all(action='TOGGLE')
#bpy.ops.object.select_pattern(pattern="*Openblink*")
#bpy.ops.object.select_pattern(pattern="*FaceN*")
# Change image filepaths to be relative to the Blender file
for image in bpy.data.images:
filename = os.path.basename(image.filepath)
image.filepath = os.path.join("//", filename)
| 38.97619 | 362 | 0.596823 | 194 | 1,637 | 4.948454 | 0.469072 | 0.04375 | 0.075 | 0.1125 | 0.276042 | 0.276042 | 0.147917 | 0.075 | 0.075 | 0.075 | 0 | 0.002364 | 0.224801 | 1,637 | 41 | 363 | 39.926829 | 0.754137 | 0.156384 | 0 | 0.222222 | 0 | 0 | 0.189047 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.037037 | 0 | 0.037037 | 0.037037 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
cf8bc62feb8e0471e65e54c2a009471153f8ea88 | 636 | py | Python | _sadm/listen/handlers/exec.py | jrmsdev/pysadm | 0d6b3f0c8d870d83ab499c8d9487ec8e3a89fc37 | [
"BSD-3-Clause"
] | 1 | 2019-10-15T08:37:56.000Z | 2019-10-15T08:37:56.000Z | _sadm/listen/handlers/exec.py | jrmsdev/pysadm | 0d6b3f0c8d870d83ab499c8d9487ec8e3a89fc37 | [
"BSD-3-Clause"
] | null | null | null | _sadm/listen/handlers/exec.py | jrmsdev/pysadm | 0d6b3f0c8d870d83ab499c8d9487ec8e3a89fc37 | [
"BSD-3-Clause"
] | null | null | null | # Copyright (c) Jeremías Casteglione <jrmsdev@gmail.com>
# See LICENSE file.
import json
from bottle import request
from _sadm import log
from _sadm.listen.errors import error
from _sadm.listen.webhook.repo.vcs.git import GitRepo
__all__ = ['exech']
_taskman = {
'webhook.repo.git': GitRepo(),
}
def handle(task, action):
log.debug("exec handle: %s %s" % (task, action))
taskman = _taskman.get(task, None)
if taskman is None:
raise error(500, "listen.exec task %s: no manager" % task)
try:
args = json.load(request.body)
taskman.hook(action, args)
except Exception as err:
raise error(500, "%s" % err)
return 'OK\n'
| 22.714286 | 60 | 0.709119 | 95 | 636 | 4.652632 | 0.578947 | 0.054299 | 0.063348 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.011215 | 0.158805 | 636 | 27 | 61 | 23.555556 | 0.814953 | 0.113208 | 0 | 0 | 0 | 0 | 0.135472 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.05 | false | 0 | 0.25 | 0 | 0.35 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
cf8e095b2dfd6ed697713c2bc61fb4d1890cfd30 | 579 | py | Python | EstruturaDeRepeticao/exercicio19.py | Nicolas-Wursthorn/exercicios-python-brasil | b2b564d48b519be04643636033ec0815e6d99ea1 | [
"MIT"
] | null | null | null | EstruturaDeRepeticao/exercicio19.py | Nicolas-Wursthorn/exercicios-python-brasil | b2b564d48b519be04643636033ec0815e6d99ea1 | [
"MIT"
] | null | null | null | EstruturaDeRepeticao/exercicio19.py | Nicolas-Wursthorn/exercicios-python-brasil | b2b564d48b519be04643636033ec0815e6d99ea1 | [
"MIT"
] | null | null | null | # Altere o programa anterior para que ele aceite apenas números entre 0 e 1000
condition = True
conjunto = []
while condition:
numero = int(input("Digite os números do conjunto (Digite 0 para parar): "))
if numero == 0:
break
elif numero > 1000 or numero < 0:
print("Digite somente números entre 0 e 1000.")
else:
conjunto.append(numero)
print("Soma dos valores do conjunto: {}!".format(sum(conjunto)))
print("O maior valor do conjunto: {}!".format(max(conjunto)))
print("O menor valor do conjunto: {}!".format(min(conjunto))) | 30.473684 | 80 | 0.659758 | 80 | 579 | 4.775 | 0.5375 | 0.104712 | 0.125654 | 0.073298 | 0.094241 | 0 | 0 | 0 | 0 | 0 | 0 | 0.037611 | 0.219344 | 579 | 19 | 81 | 30.473684 | 0.807522 | 0.131261 | 0 | 0 | 0 | 0 | 0.366534 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0 | 0 | 0 | 0.307692 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
cf8ef2b45d7d458a65d6c785888a76f8daf2c51b | 750 | py | Python | cogs/help.py | moisesjsanchez/anime-movie-discord-bot | 2979dc28cc6250e56f713c2d6483aaaff7688176 | [
"MIT"
] | null | null | null | cogs/help.py | moisesjsanchez/anime-movie-discord-bot | 2979dc28cc6250e56f713c2d6483aaaff7688176 | [
"MIT"
] | 2 | 2021-05-03T04:48:46.000Z | 2021-05-06T08:29:23.000Z | cogs/help.py | moisesjsanchez/anime-movie-discord-bot | 2979dc28cc6250e56f713c2d6483aaaff7688176 | [
"MIT"
] | null | null | null | import discord
from discord.ext import commands
class Help(commands.Cog):
def __init__(self, client):
self.client = client
# settings up the custom help functions
@commands.command()
async def help(self, ctx):
embed = discord.Embed(
title='Fathom Chan', description="A bot for your Fathom anime film related needs. Below are a list of commands:", color=0xE69138)
embed.add_field(
name='.help', value='Calls up list of commands that user can perform', inline=False)
embed.add_field(
name='.movies', value='Fetchs current Fathom event anime movies playing', inline=False)
await ctx.send(embed=embed)
def setup(client):
client.add_cog(Help(client))
| 28.846154 | 141 | 0.665333 | 100 | 750 | 4.92 | 0.58 | 0.04065 | 0.056911 | 0.069106 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.01049 | 0.237333 | 750 | 25 | 142 | 30 | 0.84965 | 0.049333 | 0 | 0.125 | 0 | 0 | 0.274262 | 0 | 0 | 0 | 0.011252 | 0 | 0 | 1 | 0.125 | false | 0 | 0.125 | 0 | 0.3125 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
cf8fb65655bb2de90066c1c7dab90f53f736211b | 2,255 | py | Python | summarize/modules/coverage_matrix_attention/coverage_matrix_attention.py | danieldeutsch/summarize | f36a86d58f381ff1f607f356dad3d6ef7b0e0224 | [
"Apache-2.0"
] | 15 | 2019-11-01T11:49:44.000Z | 2021-01-19T06:59:32.000Z | summarize/modules/coverage_matrix_attention/coverage_matrix_attention.py | CogComp/summary-cloze | b38e3e8c7755903477fd92a4cff27125cbf5553d | [
"Apache-2.0"
] | 2 | 2020-03-30T07:54:01.000Z | 2021-11-15T16:27:42.000Z | summarize/modules/coverage_matrix_attention/coverage_matrix_attention.py | CogComp/summary-cloze | b38e3e8c7755903477fd92a4cff27125cbf5553d | [
"Apache-2.0"
] | 3 | 2019-12-06T05:57:51.000Z | 2019-12-11T11:34:21.000Z | import torch
from allennlp.common.registrable import Registrable
from typing import Tuple
class CoverageMatrixAttention(torch.nn.Module, Registrable):
"""
The ``CoverageMatrixAttention`` computes a matrix of attention probabilities
between the encoder and decoder outputs. The attention function has access
to the cumulative probabilities that the attention has assigned to each
input token previously. In addition to the attention probabilities, the function
should return the coverage vectors which were used to compute the distribution
at each time step as well as the new coverage vector which takes into account
the function's computation.
The module must compute the probabilities instead of the raw scores (like
the ``MatrixAttention`` module does) because the coverage vector contains
the accumulated probabilities.
"""
def forward(self,
decoder_outputs: torch.Tensor,
encoder_outputs: torch.Tensor,
encoder_mask: torch.Tensor,
coverage_vector: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]:
"""
Computes a matrix of attention scores and updates the coverage vector.
Parameters
----------
decoder_outputs: (batch_size, num_decoder_tokens, hidden_dim)
The decoder's outputs.
encoder_outputs: (batch_size, num_encoder_tokens, hidden_dim)
The encoder's outputs.
encoder_mask: (batch_size, num_encoder_tokens)
The encoder token mask.
coverage_vector: (batch_size, num_encoder_tokens)
The cumulative attention probability assigned to each input token
thus far.
Returns
-------
torch.Tensor: (batch_size, num_decoder_tokens, num_encoder_tokens)
The attention probabilities between each decoder and encoder hidden representations.
torch.Tensor: (batch_size, num_decoder_tokens, num_encoder_tokens)
The coverage vectors used to compute the corresponding attention probabilities.
torch.Tensor: (batch_size, num_encoder_tokens)
The latest coverage vector after computing
"""
raise NotImplementedError
| 45.1 | 98 | 0.701552 | 263 | 2,255 | 5.882129 | 0.34981 | 0.071105 | 0.054299 | 0.061409 | 0.229476 | 0.125404 | 0.071105 | 0.071105 | 0.071105 | 0.071105 | 0 | 0 | 0.244789 | 2,255 | 49 | 99 | 46.020408 | 0.908397 | 0.695344 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.1 | false | 0 | 0.3 | 0 | 0.5 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
cf938a4c23d460c727e0aec9347ad4f5e6ef02f9 | 6,373 | py | Python | test/test_oscope_tf.py | alexisboukouvalas/OscoNet | f100d1ccfe8f7dad050a3082773a4b6383a4994a | [
"MIT"
] | 1 | 2020-09-03T10:00:44.000Z | 2020-09-03T10:00:44.000Z | test/test_oscope_tf.py | alexisboukouvalas/OscoNet | f100d1ccfe8f7dad050a3082773a4b6383a4994a | [
"MIT"
] | 1 | 2022-02-10T02:22:05.000Z | 2022-02-10T02:22:05.000Z | test/test_oscope_tf.py | alexisboukouvalas/OscoNet | f100d1ccfe8f7dad050a3082773a4b6383a4994a | [
"MIT"
] | 1 | 2019-09-25T16:44:30.000Z | 2019-09-25T16:44:30.000Z | """
TensorFlow 2 OscoNet code
"""
import numpy as np
import pytest
import tensorflow as tf
from OscopeBootstrap import qvalue
from OscopeBootstrap.create_edge_network_represention import create_edge_network_representation
from OscopeBootstrap.oscope_tf import PRECISION_fp, calc_e2, calc_e2_many_genes, find_best_psi_for_each_gene_pair, \
PRECISION_int, get_permuted_cost, get_pvalues, flatten_upper_triangular, get_symmetric_matrix_from_upper_triangular
from OscopeBootstrap.SyntheticDataset import GetSimISyntheticData, true_adj_matrix
from OscopeBootstrap.oscope_tf import bootstrap_hypothesis_test, get_accuracy, get_metrics_for_different_qvalue_thresholds
def calc_e2_np(X, Y, psi):
return np.sum(np.square(np.square(X) + np.square(Y) - 2 * X * Y * np.cos(psi) - np.square(np.sin(psi))))
def calc_e2_many_genes_np(X_many_genes: np.ndarray, psi_ng: np.ndarray):
'''
:param X_many_genes: G X N tensor of gene expression
:param psi_ng: G X G tensor of phase shift - should be symmetric
:return: total cost across all genes
'''
G = X_many_genes.shape[0]
c = 0
for ix in range(G):
for iy in range(G):
c += calc_e2_np(X_many_genes[ix, :], X_many_genes[iy, :], psi_ng[ix, iy])
return c
def create_single_group_example(N, std_noise, phase_shift):
t = np.linspace(0, 2 * np.pi, N)
G = 4
data = np.zeros((G, N))
data[0, :] = np.sin(t) + std_noise * np.random.randn(N)
data[1, :] = np.sin(t + phase_shift) + std_noise * np.random.randn(N)
data[2, :] = std_noise * np.random.randn(N)
data[3, :] = std_noise * np.random.randn(N)
return data
def test_get_symmetric_matrix_from_upper_triangular():
flatten_vector = np.array([1, 2, 3, 4, 5, 6])
a = get_symmetric_matrix_from_upper_triangular(4, flatten_vector)
np.testing.assert_equal(a, a.T)
def test_calc_e2():
np.random.seed(42)
N = 10
X = tf.constant(np.random.randn(N,), dtype=PRECISION_fp)
Y = tf.constant(np.random.randn(N, ), dtype=PRECISION_fp)
psi = tf.constant(np.array(3.), dtype=PRECISION_fp)
assert calc_e2(X, X, tf.constant(np.array(0.), dtype=PRECISION_fp)) == 0, 'must get minimum cost for identical gene with 0 phase'
e_tf = calc_e2(X, Y, psi)
e_np = calc_e2_np(X.numpy(), Y.numpy(), psi.numpy())
np.testing.assert_almost_equal(e_tf, e_np, decimal=1)
def test_calc_e2_many_genes():
G = 5
N = 10
X_many_genes = tf.constant(np.random.randn(G, N), dtype=PRECISION_fp)
psi_ng = tf.constant(np.random.randn(G, G), dtype=PRECISION_fp)
# make sure we include 0 as possible phase
cost = calc_e2_many_genes(X_many_genes, psi_ng)
cost_np = calc_e2_many_genes_np(X_many_genes.numpy(), psi_ng.numpy())
# np.testing.assert_almost_equal(cost, cost_np) Big differences!
assert np.all(cost > 0)
def test_find_best_psi_for_each_gene_pair():
np.random.seed(42)
tf.random.set_seed(42)
# construct example
phase_shift = np.pi
N = 10
G = 4
data_np = create_single_group_example(N, 0.1, phase_shift=phase_shift)
data = tf.constant(data_np, dtype=PRECISION_fp)
# candidate_psi = tf.linspace(0, 2 * tf.constant(np.pi), dtype=PRECISION)
candidate_psi = tf.constant(np.array([phase_shift, phase_shift/2]), dtype=PRECISION_fp)
n_permutations = tf.constant(np.array(20), dtype=PRECISION_int)
psi_ng = tf.Variable(tf.zeros((G, G), dtype=PRECISION_fp) * tf.constant(np.inf, dtype=PRECISION_fp))
cost_ng = tf.Variable(tf.ones((G, G), dtype=PRECISION_fp) * tf.constant(np.inf, dtype=PRECISION_fp))
cost_permuted = tf.Variable(tf.ones((G, G, n_permutations), dtype=PRECISION_fp) * tf.constant(np.inf, dtype=PRECISION_fp))
pvalues = tf.Variable(tf.ones((G, G), dtype=PRECISION_fp) * tf.constant(np.inf, dtype=PRECISION_fp))
find_best_psi_for_each_gene_pair(psi_ng, cost_ng, data, candidate_psi=candidate_psi)
assert psi_ng[0, 1] == phase_shift, 'why picked the other phase shift?'
get_permuted_cost(cost_permuted, data, candidate_psi, n_permutations)
get_pvalues(pvalues, cost_ng, cost_permuted)
# then q-values
# then check we find the right pair
pvalue_flatten = flatten_upper_triangular(pvalues.numpy())
qvalues_flatten, pi0 = qvalue.estimate(pvalue_flatten, verbose=True)
qvalues = get_symmetric_matrix_from_upper_triangular(pvalues.shape[0], qvalues_flatten)
adjacency_matrix = qvalues < 0.01
assert adjacency_matrix[0, 1]
assert adjacency_matrix[1, 0]
assert adjacency_matrix.sum() == 2, 'Only one significant pair should exist (0, 1)'
gene_names = [f'gene{i}' for i in range(4)]
a = create_edge_network_representation(adjacency_matrix, 1/cost_ng.numpy(), gene_names)
assert a.shape[1] == 3, 'must have gene1, gene2, weight columns'
assert a.shape[0] == 1, 'only one gene pair is significant'
@pytest.mark.slow
def test_bootstrap():
# This is a slow test (>10 secs) so need to run with `pytest --runslow -rs`
np.random.seed(42)
tf.random.set_seed(42)
NG = 5
G = 20
N = 100
ngroups = 1
alpha = 0.01 # significance level for test
data_df, phaseG, angularSpeed = GetSimISyntheticData(NG=NG, G=G, ngroups=ngroups,
N=N, noiseLevel=0)
adjacency_matrix, qvalues, cost = bootstrap_hypothesis_test(n_bootstrap=30,
data=data_df.values,
alpha=alpha,
grid_points_in_search=30)
assert qvalues.shape == (G, G)
assert adjacency_matrix.shape == (G, G)
assert np.all(~np.isnan(qvalues))
assert np.all(~np.isnan(adjacency_matrix))
assert cost.shape == (G, G)
adjacency_matrix_true = true_adj_matrix(G, angularSpeed)
correct_ratio = get_accuracy(adjacency_matrix, adjacency_matrix_true)
assert correct_ratio > .98
TPR, FDR, FPR = get_metrics_for_different_qvalue_thresholds(qvalues,
adjacency_matrix_true,
np.array([alpha]))
# To get appropriate values we need to increase number of bootstrap samples
assert TPR > 0.75
assert FDR < 0.3
assert FPR < 0.1
| 40.852564 | 133 | 0.672682 | 961 | 6,373 | 4.216441 | 0.204995 | 0.062192 | 0.063179 | 0.020731 | 0.293435 | 0.23692 | 0.14462 | 0.10612 | 0.092794 | 0.057749 | 0 | 0.022075 | 0.218108 | 6,373 | 155 | 134 | 41.116129 | 0.79109 | 0.09399 | 0 | 0.093458 | 0 | 0 | 0.036449 | 0 | 0 | 0 | 0 | 0 | 0.17757 | 1 | 0.074766 | false | 0 | 0.074766 | 0.009346 | 0.17757 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
cf96ec46a9f75ae061cfdda9d111b67ea90fbbf5 | 458 | py | Python | LC/9.py | szhu3210/LeetCode_Solutions | 64747eb172c2ecb3c889830246f3282669516e10 | [
"MIT"
] | 2 | 2018-02-24T17:20:02.000Z | 2018-02-24T17:25:43.000Z | LC/9.py | szhu3210/LeetCode_Solutions | 64747eb172c2ecb3c889830246f3282669516e10 | [
"MIT"
] | null | null | null | LC/9.py | szhu3210/LeetCode_Solutions | 64747eb172c2ecb3c889830246f3282669516e10 | [
"MIT"
] | null | null | null | class Solution(object):
def isPalindrome(self, x):
"""
:type x: int
:rtype: bool
"""
if x<0:
return False
a=x
b=0
while(a!=0):
# 1. get last digit of a and add to b, b=b*10+lastdigit
b=b*10+a%10
# 2. delete last digit of a
a=a/10
#compare x and b and return
return True if x==b else False | 24.105263 | 67 | 0.423581 | 65 | 458 | 2.984615 | 0.507692 | 0.030928 | 0.113402 | 0.123711 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.054852 | 0.482533 | 458 | 19 | 68 | 24.105263 | 0.763713 | 0.28821 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.1 | false | 0 | 0 | 0 | 0.4 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
cf976cb08811d9daaefe1b3400c3ef25e10128c3 | 1,918 | py | Python | live.py | ardamavi/Vocalize-Sign-Language | b00ce8c2a54f7f333ba8b3612567448281abfc61 | [
"Apache-2.0"
] | 65 | 2017-06-10T19:34:42.000Z | 2022-03-15T06:47:29.000Z | live.py | sygops/Vocalize-Sign-Language | b19f6251e48478193c3a8001966edf2d421e8281 | [
"Apache-2.0"
] | 1 | 2021-09-08T04:04:55.000Z | 2021-09-09T03:24:37.000Z | live.py | sygops/Vocalize-Sign-Language | b19f6251e48478193c3a8001966edf2d421e8281 | [
"Apache-2.0"
] | 25 | 2018-01-08T15:02:05.000Z | 2021-11-16T16:31:42.000Z | # Arda Mavi
import os
import cv2
import platform
import numpy as np
from predict import predict
from scipy.misc import imresize
from multiprocessing import Process
from keras.models import model_from_json
img_size = 64
channel_size = 1
def main():
# Getting model:
model_file = open('Data/Model/model.json', 'r')
model = model_file.read()
model_file.close()
model = model_from_json(model)
# Getting weights
model.load_weights("Data/Model/weights.h5")
print('Press "ESC" button for exit.')
# Get image from camera, get predict and say it with another process, repeat.
cap = cv2.VideoCapture(0)
old_char = ''
while 1:
ret, img = cap.read()
# Cropping image:
img_height, img_width = img.shape[:2]
side_width = int((img_width-img_height)/2)
img = img[0:img_height, side_width:side_width+img_height]
# Show window:
cv2.imshow('VSL', cv2.flip(img,1)) # cv2.flip(img,1) : Flip(mirror effect) for easy handling.
img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
img = imresize(img, (img_size, img_size, channel_size))
img = 1-np.array(img).astype('float32')/255.
img = img.reshape(1, img_size, img_size, channel_size)
Y_string, Y_possibility = predict(model, img)
if Y_possibility < 0.4: # For secondary vocalization
old_char = ''
if(platform.system() == 'Darwin') and old_char != Y_string and Y_possibility > 0.6:
print(Y_string, Y_possibility)
arg = 'say {0}'.format(Y_string)
# Say predict with multiprocessing
Process(target=os.system, args=(arg,)).start()
old_char = Y_string
if cv2.waitKey(200) == 27: # Decimal 27 = Esc
break
cap.release()
cv2.destroyAllWindows()
if __name__ == '__main__':
main()
| 30.935484 | 101 | 0.618874 | 257 | 1,918 | 4.435798 | 0.412451 | 0.030702 | 0.022807 | 0.019298 | 0.04386 | 0.04386 | 0 | 0 | 0 | 0 | 0 | 0.028592 | 0.270594 | 1,918 | 61 | 102 | 31.442623 | 0.786276 | 0.145464 | 0 | 0.046512 | 0 | 0 | 0.062654 | 0.025799 | 0 | 0 | 0 | 0 | 0 | 1 | 0.023256 | false | 0 | 0.186047 | 0 | 0.209302 | 0.046512 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
cf9c1389b641f423d0d37afccaff7445dbc77a66 | 1,028 | py | Python | examples/webhook/server.py | Alma-field/twitcaspy | 25f3e850f2d5aab8a864bd6b7003468587fa3ea7 | [
"MIT"
] | null | null | null | examples/webhook/server.py | Alma-field/twitcaspy | 25f3e850f2d5aab8a864bd6b7003468587fa3ea7 | [
"MIT"
] | 18 | 2021-10-01T13:40:01.000Z | 2021-10-18T12:34:57.000Z | examples/webhook/server.py | Alma-field/twitcaspy | 25f3e850f2d5aab8a864bd6b7003468587fa3ea7 | [
"MIT"
] | null | null | null | # Twitcaspy
# Copyright 2021 Alma-field
# See LICENSE for details.
# Before running this code, run the following command:
# このコードを実行する前に、以下のコマンドを実行してください。
# pip install twitcaspy[webhook]
from flask import Flask, request, make_response, jsonify, abort
app = Flask(__name__)
from twitcaspy import api, TwitcaspyException
@app.route('/', methods=['GET', 'POST'])
def main():
if request.method == 'POST':
webhook = api.incoming_webhook(request.json)
#Show Parse Result
print(f'signature : {webhook.signature}')
print(f'user_id : {webhook.broadcaster.id}')
print(f'title : {webhook.movie.title}')
return make_response(jsonify({'message':'OK'}))
if __name__ == '__main__':
import json
cassettes_file = '../../cassettes/testincomingwebhook.json'
# load test webhook data
with open(cassettes_file, "r", encoding='utf-8')as file:
data = json.load(file)
# set signature to api instance
api.signature = data['signature']
app.run(debug=True)
| 31.151515 | 63 | 0.678988 | 125 | 1,028 | 5.44 | 0.608 | 0.026471 | 0.055882 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.006017 | 0.191634 | 1,028 | 32 | 64 | 32.125 | 0.812274 | 0.238327 | 0 | 0 | 0 | 0 | 0.230272 | 0.109961 | 0 | 0 | 0 | 0 | 0 | 1 | 0.055556 | false | 0 | 0.166667 | 0 | 0.277778 | 0.166667 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
cf9db563a203c43bbc7c0bac94e9bc037f070989 | 7,814 | py | Python | SK_Net_Plus.py | xingshulicc/Channel_Attention_Selection | c51b8de34ddfe5d6a88dd3ab5e846930f53e7476 | [
"MIT"
] | 2 | 2020-10-26T06:44:29.000Z | 2020-10-31T06:06:59.000Z | SK_Net_Plus.py | xingshulicc/Channel_Attention_Selection | c51b8de34ddfe5d6a88dd3ab5e846930f53e7476 | [
"MIT"
] | null | null | null | SK_Net_Plus.py | xingshulicc/Channel_Attention_Selection | c51b8de34ddfe5d6a88dd3ab5e846930f53e7476 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
from __future__ import print_function
"""
Created on Fri Oct 23 13:31:34 2020
@author: Admin
"""
from keras.layers import Input
from keras.layers import Conv2D
from keras.layers import BatchNormalization
from keras.layers import Activation
from keras.layers import MaxPooling2D
from keras.layers import GlobalAveragePooling2D
from keras.layers import Concatenate
from keras.layers import concatenate
from keras.layers import add
from keras.layers import Dense
from keras.layers import Dropout
from keras.layers import Lambda
from keras import backend as K
from keras.models import Model
from keras.utils import plot_model
if K.image_data_format() == 'channels_first':
bn_axis = 1
else:
bn_axis = -1
def _grouped_conv_block(input_tensor, cardinality, output_filters, kernel_size, block):
'''
kernel_size = 3
cardinality = 2
'''
base_name = 'ek_block_' + str(block) + '_'
channel_axis = 1 if K.image_data_format() == 'channels_first' else -1
group_list = []
input_filters = input_tensor._keras_shape[channel_axis]
grouped_filters = int(input_filters / cardinality)
for c in range(cardinality):
if K.image_data_format() == 'channels_last':
x = Lambda(lambda z: z[:, :, :, c * grouped_filters:(c + 1) * grouped_filters])(input_tensor)
else:
x = Lambda(lambda z: z[:, c * grouped_filters:(c + 1) * grouped_filters, :, :])(input_tensor)
x = Conv2D(filters = output_filters // cardinality,
kernel_size = kernel_size,
strides = (1, 1),
padding = 'same',
name = base_name + 'grouped_conv_' + str(c))(x)
group_list.append(x)
group_merge = concatenate(group_list, axis = channel_axis)
# The shape of group_merge: b, h, w, output_filters
x_c = BatchNormalization(axis = channel_axis, name = base_name + 'grouped_conv_bn')(group_merge)
x_c = Activation('relu')(x_c)
x_c = Conv2D(filters = output_filters,
kernel_size = (1, 1),
strides = (1, 1),
name = base_name + 'mix_conv_1')(x_c)
x_c = BatchNormalization(axis = channel_axis, name = base_name + 'mix_bn_1')(x_c)
x_c = Activation('relu')(x_c)
x_c = Conv2D(filters = output_filters,
kernel_size = (1, 1),
strides = (1, 1),
name = base_name + 'mix_conv_2')(x_c)
x_c = BatchNormalization(axis = channel_axis, name = base_name + 'mix_bn_2')(x_c)
x_c = Activation('relu')(x_c)
return x_c
def _select_kernel(inputs, kernels, filters, cardinality, block):
'''
kernels = [3, 5]
cardinality = 2
'''
base_name = 'sk_block_' + str(block) + '_'
channel_axis = 1 if K.image_data_format() == 'channels_first' else -1
group_list = []
input_filters = inputs._keras_shape[channel_axis]
grouped_filters = int(input_filters / cardinality)
for c in range(cardinality):
if K.image_data_format() == 'channels_last':
x = Lambda(lambda z: z[:, :, :, c * grouped_filters:(c + 1) * grouped_filters])(inputs)
else:
x = Lambda(lambda z: z[:, c * grouped_filters:(c + 1) * grouped_filters, :, :])(inputs)
x_1 = Conv2D(filters = filters // cardinality,
kernel_size = kernels[0],
strides = (1, 1),
padding = 'same',
name = base_name + 'grouped_conv1_' + str(c))(x)
group_list.append(x_1)
x_2 = Conv2D(filters = filters // cardinality,
kernel_size = kernels[1],
strides = (1, 1),
padding = 'same',
name = base_name + 'grouped_conv2_' + str(c))(x)
group_list.append(x_2)
o_1 = add([group_list[0], group_list[2]])
o_2 = add([group_list[1], group_list[3]])
# The shape of o_1, o_2: b, h, w, filters // cardinality
group_merge = concatenate([o_1, o_2], axis = channel_axis)
# The shape of group_merge is: b, h, w, filters
x_c = BatchNormalization(axis = channel_axis, name = base_name + 'grouped_conv_bn')(group_merge)
x_c = Activation('relu')(x_c)
x_c = Conv2D(filters = filters,
kernel_size = (1, 1),
strides = (1, 1),
name = base_name + 'mix_conv_1')(x_c)
x_c = BatchNormalization(axis = channel_axis, name = base_name + 'mix_bn_1')(x_c)
x_c = Activation('relu')(x_c)
x_c = Conv2D(filters = filters,
kernel_size = (1, 1),
strides = (1, 1),
name = base_name + 'mix_conv_2')(x_c)
x_c = BatchNormalization(axis = channel_axis, name = base_name + 'mix_bn_2')(x_c)
x_c = Activation('relu')(x_c)
return x_c
def _initial_conv_block(inputs):
x = Conv2D(filters = 32,
kernel_size = (7, 7),
strides = (2, 2),
padding = 'same',
name = 'init_conv')(inputs)
x = BatchNormalization(axis = bn_axis, name = 'init_conv_bn')(x)
x = Activation('relu')(x)
x = MaxPooling2D(pool_size = (3, 3),
strides = (2, 2),
padding = 'same',
name = 'init_MaxPool')(x)
return x
def Weakly_DenseNet(input_shape, classes):
inputs = Input(shape = input_shape)
# The shape of inputs: 224 x 224 x 3
x_1 = _initial_conv_block(inputs)
# The shape of x_1: 56 x 56 x 32
x_2 = _select_kernel(x_1, [3, 5], 64, 2, 1)
# The shape of x_2: 56 x 56 x 64
pool_1 = MaxPooling2D(pool_size = (2, 2), strides = (2, 2), padding = 'same')(x_2)
# The shape of pool_1: 28 x 28 x 64
x_3 = Concatenate(axis = bn_axis)([x_1, x_2])
# The shape of x_3: 56 x 56 x 96
x_4 = _select_kernel(x_3, [3, 5], 128, 2, 2)
# The shape of x_4: 56 x 56 x 128
pool_2 = MaxPooling2D(pool_size = (2, 2), strides = (2, 2), padding = 'same')(x_4)
# The shape of pool_2: 28 x 28 x 128
x_5 = Concatenate(axis = bn_axis)([pool_1, pool_2])
# The shape of x_5: 28 x 28 x 192
x_6 = _select_kernel(x_5, [3, 5], 256, 2, 3)
# The shape of x_6: 28 x 28 x 256
pool_3 = MaxPooling2D(pool_size = (2, 2), strides = (2, 2), padding = 'same')(x_6)
# The shape of pool_3: 14 x 14 x 256
x_7 = Concatenate(axis = bn_axis)([pool_2, x_6])
# The shape of x_7: 28 x 28 x 384
x_8 = _select_kernel(x_7, [3, 5], 512, 2, 4)
# The shape of x_8: 28 x 28 x 512
pool_4 = MaxPooling2D(pool_size = (2, 2), strides = (2, 2), padding = 'same')(x_8)
# The shape of pool_4: 14 x 14 x 512
x_9 = Concatenate(axis = bn_axis)([pool_3, pool_4])
# The shape of x_9: 14 x 14 x 768
x_10 = _select_kernel(x_9, [3, 5], 512, 2, 5)
# The shape of x_10: 14 x 14 x 512
pool_5 = MaxPooling2D(pool_size = (2, 2), strides = (2, 2), padding = 'same')(x_10)
# The shape of pool_5: 7 x 7 x 512
output = GlobalAveragePooling2D()(pool_5)
output = Dense(512, activation = 'relu', name = 'fc_1')(output)
output = Dropout(rate = 0.5, name = 'dropout')(output)
output = Dense(classes, activation = 'softmax', name = 'fc_2')(output)
model = Model(inputs = inputs, outputs = output, name = 'Grouped_Weakly_Densenet_19')
return model
if __name__ == '__main__':
model = Weakly_DenseNet((224, 224, 3), 10)
plot_model(model, to_file = 'model_SK_Net.png', show_shapes = True, show_layer_names = True)
print(len(model.layers))
model.summary()
| 39.664975 | 106 | 0.579473 | 1,115 | 7,814 | 3.803587 | 0.127354 | 0.015091 | 0.044801 | 0.05942 | 0.546333 | 0.501061 | 0.501061 | 0.443056 | 0.42655 | 0.37892 | 0 | 0.061016 | 0.299463 | 7,814 | 196 | 107 | 39.867347 | 0.713738 | 0.10494 | 0 | 0.392593 | 0 | 0 | 0.061096 | 0.003893 | 0 | 0 | 0 | 0 | 0 | 1 | 0.02963 | false | 0 | 0.118519 | 0 | 0.177778 | 0.014815 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
cfa0c9cdefacf9cd6963bf8494c977ebb9d0cbfc | 1,386 | py | Python | examples/docs_snippets/docs_snippets/concepts/partitions_schedules_sensors/schedule_from_partitions.py | silentsokolov/dagster | 510bf07bf6906294d5a239d60079c88211002ebf | [
"Apache-2.0"
] | null | null | null | examples/docs_snippets/docs_snippets/concepts/partitions_schedules_sensors/schedule_from_partitions.py | silentsokolov/dagster | 510bf07bf6906294d5a239d60079c88211002ebf | [
"Apache-2.0"
] | null | null | null | examples/docs_snippets/docs_snippets/concepts/partitions_schedules_sensors/schedule_from_partitions.py | silentsokolov/dagster | 510bf07bf6906294d5a239d60079c88211002ebf | [
"Apache-2.0"
] | null | null | null | # isort: skip_file
from .partitioned_job import my_partitioned_config
from dagster import HourlyPartitionsDefinition
# start_marker
from dagster import build_schedule_from_partitioned_job, job
@job(config=my_partitioned_config)
def do_stuff_partitioned():
...
do_stuff_partitioned_schedule = build_schedule_from_partitioned_job(
do_stuff_partitioned,
)
# end_marker
# start_partitioned_asset_schedule
from dagster import define_asset_job
partitioned_asset_job = define_asset_job(
"partitioned_job",
selection="*",
partitions_def=HourlyPartitionsDefinition(start_date="2022-05-31", fmt="%Y-%m-%d"),
)
asset_partitioned_schedule = build_schedule_from_partitioned_job(
partitioned_asset_job,
)
# end_partitioned_asset_schedule
from .static_partitioned_job import continent_job, CONTINENTS
# start_static_partition
from dagster import schedule
@schedule(cron_schedule="0 0 * * *", job=continent_job)
def continent_schedule():
for c in CONTINENTS:
request = continent_job.run_request_for_partition(partition_key=c, run_key=c)
yield request
# end_static_partition
# start_single_partition
@schedule(cron_schedule="0 0 * * *", job=continent_job)
def antarctica_schedule():
request = continent_job.run_request_for_partition(
partition_key="Antarctica", run_key=None
)
yield request
# end_single_partition
| 21 | 87 | 0.786436 | 174 | 1,386 | 5.833333 | 0.270115 | 0.082759 | 0.070936 | 0.082759 | 0.312315 | 0.281773 | 0.281773 | 0.183251 | 0.183251 | 0 | 0 | 0.010033 | 0.137085 | 1,386 | 65 | 88 | 21.323077 | 0.838629 | 0.138528 | 0 | 0.129032 | 0 | 0 | 0.052365 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.096774 | false | 0 | 0.193548 | 0 | 0.290323 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
cfa4f9c3c47163b8357b553409d94c4c1cf0c0a0 | 1,019 | py | Python | src/contnext_viewer/graph.py | ContNeXt/web_app | 0ace1077ee07902cadca684e4e06b3e91cea437f | [
"MIT"
] | 3 | 2022-01-14T11:56:08.000Z | 2022-01-14T12:36:42.000Z | src/contnext_viewer/graph.py | ContNeXt/web_app | 0ace1077ee07902cadca684e4e06b3e91cea437f | [
"MIT"
] | null | null | null | src/contnext_viewer/graph.py | ContNeXt/web_app | 0ace1077ee07902cadca684e4e06b3e91cea437f | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
from contnext_viewer.models import Network, engine
from sqlalchemy.orm import sessionmaker
def create_json_file(id, node):
# Start database session
Session = sessionmaker(bind=engine)
sqlsession = Session()
try:
g = [each.data for each in sqlsession.query(Network).filter(Network.identifier == id).all()][0]
properties = [each.properties for each in sqlsession.query(Network).filter(Network.identifier == id).all()][0]
except:
return [], []
# Get edges linked to nodes:
edges = list(g.edges(node))
node_list = list(set([i[1] for i in edges[:]] + [i[0] for i in edges[:]]))
nodes_dic = {node_list[i]: i for i in range(len(node_list))}
nodes = [{'id': nodes_dic[str(i)], 'name': str(i), 'connections': properties.get(i).get('connections'),
'rank': properties.get(i).get('rank'), 'housekeeping': properties.get(i).get('housekeeping')
} for i in list(set(node_list))]
links = [{'source': nodes_dic[u[0]], 'target': nodes_dic[u[1]]} for u in edges]
return nodes, links
| 37.740741 | 112 | 0.68106 | 154 | 1,019 | 4.435065 | 0.383117 | 0.046852 | 0.035139 | 0.074671 | 0.175695 | 0.175695 | 0.175695 | 0.175695 | 0.175695 | 0.175695 | 0 | 0.007973 | 0.138371 | 1,019 | 26 | 113 | 39.192308 | 0.769932 | 0.069676 | 0 | 0 | 0 | 0 | 0.076271 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.055556 | false | 0 | 0.111111 | 0 | 0.277778 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
cfa6145bbf7350ea98ed17dde42977836a1d405b | 8,302 | py | Python | crystals/affine.py | priyankism/crystals | 683bf35fbc95d0ded8cafdad0f2dede7adf5b072 | [
"BSD-3-Clause"
] | null | null | null | crystals/affine.py | priyankism/crystals | 683bf35fbc95d0ded8cafdad0f2dede7adf5b072 | [
"BSD-3-Clause"
] | null | null | null | crystals/affine.py | priyankism/crystals | 683bf35fbc95d0ded8cafdad0f2dede7adf5b072 | [
"BSD-3-Clause"
] | null | null | null | # -*- coding: utf-8 -*-
"""
Linear algebra operations and helpers.
Inspired by Christoph Gohlke's transformation.py <http://www.lfd.uci.edu/~gohlke/>
This module is not directly exported by the `crystals` library. Use it with caution.
"""
import math
import numpy as np
# standard basis
e1, e2, e3 = np.eye(3)
def affine_map(array):
"""
Extends 3x3 transform matrices to 4x4, i.e. general affine transforms.
Parameters
----------
array : ndarray, shape {(3,3), (4,4)}
Transformation matrix. If shape = (4,4), returned intact.
Returns
-------
extended : ndarray, shape (4,4)
Extended array
Raises
------
ValueError : If the transformation matrix is neither 3x3 or 4x4
"""
if array.shape == (4, 4): # Already the right shape
return array
elif array.shape == (3, 3):
extended_matrix = np.zeros(shape=(4, 4), dtype=array.dtype)
extended_matrix[-1, -1] = 1
extended_matrix[:3, :3] = array
return extended_matrix
else:
raise ValueError(
"Array shape not 3x3 or 4x4, and thus is not a transformation matrix."
)
def transform(matrix, array):
"""
Applies a matrix transform on an array.
Parameters
----------
matrix : ndarray, shape {(3,3), (4,4)}
Transformation matrix.
array : ndarray, shape {(3,), (3,3), (4,4)}
Array to be transformed. Either a 1x3 vector, or a transformation
matrix in 3x3 or 4x4 shape.
Returns
-------
transformed : ndarray
Transformed array, either a 1D vector or a 4x4 transformation matrix
Raises
------
ValueError : If the transformation matrix is neither 3x3 or 4x4
"""
array = np.asarray(array)
if matrix.shape not in [(3, 3), (4, 4)]:
raise ValueError(
f"Input matrix is neither a 3x3 or 4x4 matrix, but \
rather of shape {matrix.shape}."
)
matrix = affine_map(matrix)
# Case of a vector (e.g. position vector):
if array.ndim == 1:
extended_vector = np.array([0, 0, 0, 1], dtype=array.dtype)
extended_vector[:3] = array
return np.dot(matrix, extended_vector)[:3]
else:
array = affine_map(array)
return np.dot(matrix, array)
def translation_matrix(direction):
"""
Return matrix to translate by direction vector.
Parameters
----------
direction : array_like, shape (3,)
Returns
-------
translation : `~numpy.ndarray`, shape (4,4)
4x4 translation matrix.
"""
matrix = np.eye(4)
matrix[:3, 3] = np.asarray(direction)[:3]
return matrix
def change_of_basis(basis1, basis2=(e1, e2, e3)):
"""
Returns the matrix transforms vectors expressed in one basis,
to vectors expressed in another basis.
Parameters
----------
basis1 : list of array_like, shape (3,)
First basis
basis2 : list of array_like, shape (3,), optional
Second basis. By default, this is the standard basis
Returns
-------
cob : `~numpy.ndarray`, shape (3,3)
Change-of-basis matrix.
"""
# Calculate the transform that goes from basis 1 to standard basis
basis1 = [np.asarray(vector).reshape(3, 1) for vector in basis1]
basis1_to_standard = np.hstack(tuple(basis1))
# Calculate the transform that goes from standard basis to basis2
basis2 = [np.asarray(vector).reshape(3, 1) for vector in basis2]
standard_to_basis2 = np.linalg.inv(np.hstack(tuple(basis2)))
return np.dot(standard_to_basis2, basis1_to_standard)
def is_basis(basis):
"""
Returns true if the set of vectors forms a basis. This is done by checking
whether basis vectors are independent via an eigenvalue calculation.
Parameters
----------
basis : list of array-like, shape (3,)
Returns
-------
out : bool
Whether or not the basis is valid.
"""
return 0 not in np.linalg.eigvals(np.asarray(basis))
def is_rotation_matrix(matrix):
"""
Checks whether a matrix is orthogonal with unit determinant (1 or -1), properties
of rotation matrices.
Parameters
----------
matrix : ndarray, shape {(3,3), (4,4)}
Rotation matrix candidate. If (4,4) matrix is provided,
only the top-left block matrix of (3,) is checked
Returns
-------
result : bool
If True, input could be a rotation matrix.
"""
# TODO: is this necessary? should a composite transformation
# of translation and rotation return True?
# if matrix.shape == (4,4):
# matrix = matrix[:3,:3]
is_orthogonal = np.allclose(np.linalg.inv(matrix), np.transpose(matrix))
unit_determinant = np.allclose(abs(np.linalg.det(matrix)), 1)
return is_orthogonal and unit_determinant
def rotation_matrix(angle, axis=(0, 0, 1)):
"""
Return matrix to rotate about axis defined by direction around the origin [0,0,0].
Parameters
----------
angle : float
Rotation angle [rad]
axis : array-like of length 3
Axis about which to rotate
Returns
-------
matrix : `~numpy.ndarray`, shape (3,3)
Rotation matrix.
See also
--------
translation_rotation_matrix
Notes
-----
To combine rotation and translations, see
http://www.euclideanspace.com/maths/geometry/affine/matrix4x4/index.htm
"""
sina, cosa = math.sin(angle), math.cos(angle)
# Make sure direction is a numpy vector of unit length
direction = np.asarray(axis)
direction = direction / np.linalg.norm(direction)
# rotation matrix around unit vector
R = np.diag([cosa, cosa, cosa])
R += np.outer(direction, direction) * (1.0 - cosa)
direction *= sina
R += np.array(
[
[0.0, -direction[2], direction[1]],
[direction[2], 0.0, -direction[0]],
[-direction[1], direction[0], 0.0],
]
)
return R
def translation_rotation_matrix(angle, axis, translation):
"""
Returns a 4x4 matrix that includes a rotation and a translation.
Parameters
----------
angle : float
Rotation angle [rad]
axis : array-like of length 3
Axis about which to rotate
translation : array_like, shape (3,)
Translation vector
Returns
-------
matrix : `~numpy.ndarray`, shape (4,4)
Affine transform matrix.
"""
rmat = affine_map(rotation_matrix(angle=angle, axis=axis))
rmat[:3, 3] = np.asarray(translation)
return rmat
def change_basis_mesh(xx, yy, zz, basis1, basis2):
"""
Changes the basis of meshgrid arrays.
Parameters
----------
xx, yy, zz : ndarrays
Arrays of equal shape, such as produced by numpy.meshgrid.
basis1 : list of ndarrays, shape(3,)
Basis of the mesh
basis2 : list of ndarrays, shape(3,)
Basis in which to express the mesh
Returns
-------
XX, YY, ZZ : `~numpy.ndarray`
"""
# Build coordinate array row-wise
changed = np.empty(shape=(3, xx.size), dtype=np.float)
linearized = np.empty(shape=(3, xx.size), dtype=np.float)
linearized[0, :] = xx.ravel()
linearized[1, :] = yy.ravel()
linearized[2, :] = zz.ravel()
# Change the basis at each row
COB = change_of_basis(basis1, basis2)
np.dot(COB, linearized, out=changed)
return (
changed[0, :].reshape(xx.shape),
changed[1, :].reshape(yy.shape),
changed[2, :].reshape(zz.shape),
)
def minimum_image_distance(xx, yy, zz, lattice):
"""
Returns a periodic array according to the minimum image convention.
Parameters
----------
xx, yy, zz : ndarrays
Arrays of equal shape, such as produced by numpy.meshgrid.
lattice : list of ndarrays, shape(3,)
Basis of the mesh
Returns
-------
r : `~numpy.ndarray`
Minimum image distance over the lattice
"""
COB = change_of_basis(np.eye(3), lattice)
linearized = np.empty(shape=(3, xx.size), dtype=np.float) # In the standard basis
ulinearized = np.empty_like(linearized) # In the unitcell basis
linearized[0, :] = xx.ravel()
linearized[1, :] = yy.ravel()
linearized[2, :] = zz.ravel()
# Go to unitcell basis, where the cell is cubic of side length 1
np.dot(COB, linearized, out=ulinearized)
ulinearized -= np.rint(ulinearized)
np.dot(np.linalg.inv(COB), ulinearized, out=linearized)
return np.reshape(np.linalg.norm(linearized, axis=0), xx.shape)
| 26.694534 | 86 | 0.63298 | 1,121 | 8,302 | 4.64496 | 0.219447 | 0.020741 | 0.00941 | 0.016132 | 0.25773 | 0.201844 | 0.172268 | 0.172268 | 0.149222 | 0.122719 | 0 | 0.028747 | 0.237413 | 8,302 | 310 | 87 | 26.780645 | 0.793713 | 0.540713 | 0 | 0.134831 | 0 | 0 | 0.018753 | 0 | 0 | 0 | 0 | 0.003226 | 0 | 1 | 0.11236 | false | 0 | 0.022472 | 0 | 0.269663 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
cfa6ad3ae4dd2b7b0f171c22b61ad5f626a27dd6 | 675 | py | Python | app/common/models.py | chaos-soft/velvet | 71edabaa6e25308e76af82b76eb62c159d2b3368 | [
"MIT"
] | null | null | null | app/common/models.py | chaos-soft/velvet | 71edabaa6e25308e76af82b76eb62c159d2b3368 | [
"MIT"
] | null | null | null | app/common/models.py | chaos-soft/velvet | 71edabaa6e25308e76af82b76eb62c159d2b3368 | [
"MIT"
] | null | null | null | import json
from django.db import models
class JSONEncoder(json.JSONEncoder):
def __init__(self, *args, **kwargs):
kwargs['ensure_ascii'] = False
super().__init__(*args, **kwargs)
class Document(models.Model):
document = models.JSONField(encoder=JSONEncoder, default=dict)
images = None
class Meta:
abstract = True
def __init__(self, *args, **kwargs):
self.images = []
super().__init__(*args, **kwargs)
if self.id:
for k in self.document:
if hasattr(self, k):
setattr(self, k, self.document[k])
else:
raise KeyError
| 24.107143 | 66 | 0.567407 | 73 | 675 | 5.013699 | 0.520548 | 0.10929 | 0.060109 | 0.081967 | 0.114754 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.317037 | 675 | 27 | 67 | 25 | 0.793926 | 0 | 0 | 0.2 | 0 | 0 | 0.017778 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.1 | false | 0 | 0.1 | 0 | 0.45 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
cfa6f7d12ac60700054deb918bc90c4c2c0ba1fc | 25,987 | py | Python | wsgi/iportalen_django/events/views.py | I-sektionen/i-portalen | 1713e5814d40c0da1bf3278d60a561e7d3df3550 | [
"MIT"
] | 4 | 2016-09-21T17:06:01.000Z | 2018-02-06T16:36:44.000Z | wsgi/iportalen_django/events/views.py | I-sektionen/i-portalen | 1713e5814d40c0da1bf3278d60a561e7d3df3550 | [
"MIT"
] | 149 | 2016-03-07T23:50:47.000Z | 2022-03-11T23:16:33.000Z | wsgi/iportalen_django/events/views.py | I-sektionen/i-portalen | 1713e5814d40c0da1bf3278d60a561e7d3df3550 | [
"MIT"
] | 1 | 2016-03-07T23:02:06.000Z | 2016-03-07T23:02:06.000Z | from django.core.mail import send_mail
from django.core.urlresolvers import reverse
from django.db.models import Q
from django.http.response import JsonResponse
from django.shortcuts import render, get_object_or_404, redirect
from django.forms import modelformset_factory
from django.contrib.auth.decorators import login_required, permission_required
from django.http import HttpResponseForbidden, HttpResponse
from django.core.exceptions import PermissionDenied, ObjectDoesNotExist
from django.contrib import messages
from django.utils import timezone
from django.db import transaction
import csv
from utils.validators import liu_id_validator
from .forms import EventForm, CheckForm, ImportEntriesForm, RejectionForm, AttachmentForm, \
ImageAttachmentForm, DeleteForm
from .models import Event, EntryAsPreRegistered, EntryAsReserve, EntryAsParticipant, OtherAttachment, \
ImageAttachment
from .exceptions import CouldNotRegisterException
from user_managements.models import IUser
from django.utils.translation import ugettext as _
# Create your views here.
from iportalen import settings
from utils.time import six_months_back
@login_required()
def summarise_noshow(request,pk):
event = get_object_or_404(Event,pk=pk)
if not event.can_administer(request.user):
raise PermissionDenied
if not event.finished:
event.finished = True
noshows = event.no_show
for user in noshows:
noshow = EntryAsPreRegistered.objects.get(event=event, user=user)
noshow.no_show = True
noshow.save()
for user in noshows:
if len(EntryAsPreRegistered.objects.get_noshow(user=user)) == 2:
subject = "Du har nu missat ditt andra event"
body = "<p>Hej du har missat 2 event som du har anmält dig på. Om du missar en tredje gång så blir vi tvungna att stänga av dig från " \
"framtida event fram tills ett halv år framåt.</p>"
send_mail(subject, "", settings.EMAIL_HOST_USER, [user.email, ], fail_silently=False, html_message=body)
elif len(EntryAsPreRegistered.objects.get_noshow(user=user)) == 3:
subject = "Du har nu missat ditt tredje event"
body = "<p>Hej igen du har missat 3 event som du har anmält dig på. Du kommer härmed att blir avstängd från " \
"framtida event fram tills ett halv år framåt. Ha en bra dag :)</p>"
send_mail(subject, "", settings.EMAIL_HOST_USER, [user.email, ], fail_silently=False, html_message=body)
event.save()
return redirect("events:administer event", pk=pk)
def view_event(request, pk):
event = get_object_or_404(Event, pk=pk)
if (event.status == Event.APPROVED and event.show_event_before_experation) or event.can_administer(request.user):
return render(request, "events/event.html", {"event": event})
raise PermissionDenied
@login_required()
def register_to_event(request, pk):
if request.method == "POST":
event = get_object_or_404(Event, pk=pk)
try:
event.register_user(request.user)
messages.success(request, _("Du är nu registrerad på eventet."))
except CouldNotRegisterException as err:
messages.error(request,
_("Fel, kunde inte registrera dig på ") + err.event.headline + _(" för att ") + err.reason + ".")
return redirect("events:event", pk=pk)
@login_required()
@transaction.atomic
def import_registrations(request, pk):
event = get_object_or_404(Event, pk=pk)
if not event.can_administer(request.user):
raise PermissionDenied
if request.method == 'POST':
form = ImportEntriesForm(request.POST)
if form.is_valid():
list_of_liu_id = form.cleaned_data['users'].splitlines()
for liu_id in list_of_liu_id:
try:
event.register_user(IUser.objects.get(username=liu_id))
except CouldNotRegisterException as err:
messages.error(
request,
"".join([_("Fel, kunde inte registrera"),
" {liu_id} ",
_("på"),
" {hedline} ",
_("för att"),
" {reason}."]).format(
liu_id=liu_id,
hedline=err.event.headline,
reason=err.reason))
except ObjectDoesNotExist:
messages.error(request, "".join(["{liu_id} ", _("finns inte i databasen.")]).format(liu_id))
else:
form = ImportEntriesForm()
return render(request, "events/import_users.html", {'form': form})
@login_required()
def register_as_reserve(request, pk):
if request.method == "POST":
event = get_object_or_404(Event, pk=pk)
entry = event.register_reserve(request.user)
messages.success(request,
_("Du är nu anmäld som reserv på eventet, du har plats nr. ") + str(entry.position()) + ".")
return redirect("events:event", pk=pk)
@login_required()
def administer_event(request, pk):
event = get_object_or_404(Event, pk=pk)
form = DeleteForm(request.POST or None, request.FILES or None,)
if event.can_administer(request.user):
return render(request, 'events/administer_event.html', {
'event': event, 'form':form,
})
else:
raise PermissionDenied # Nope.
@login_required()
def preregistrations_list(request, pk):
event = get_object_or_404(Event, pk=pk)
if event.can_administer(request.user):
return render(request, 'events/event_preregistrations.html', {
'event': event,
})
else:
raise PermissionDenied # Nope.
@login_required()
def participants_list(request, pk):
event = get_object_or_404(Event, pk=pk)
if event.can_administer(request.user):
return render(request, 'events/event_participants.html', {
'event': event,
})
else:
raise PermissionDenied # Nope.
@login_required()
def speech_nr_list(request, pk):
event = get_object_or_404(Event, pk=pk)
if event.can_administer(request.user):
return render(request, 'events/event_speech_nr_list.html', {
'event': event,
})
else:
raise PermissionDenied # Nope.
@login_required()
def reserves_list(request, pk):
event = get_object_or_404(Event, pk=pk)
event_reserves = event.reserves_object()
if event.can_administer(request.user):
return render(request, 'events/event_reserves.html', {
'event': event,
'event_reserves': event_reserves,
})
else:
raise PermissionDenied # Nope.
@login_required()
def check_in(request, pk):
event = get_object_or_404(Event, pk=pk)
can_administer = event.can_administer(request.user)
if can_administer:
form = CheckForm()
return render(request, 'events/event_check_in.html', {
'form': form, 'event': event, "can_administer": can_administer,
})
else:
raise PermissionDenied
@login_required()
def check_in_api(request, pk):
if request.method == 'POST':
try:
event = Event.objects.get(pk=pk)
if not event.can_administer(request.user):
raise PermissionDenied
except:
return JsonResponse({"status": "error", "message": _("Inget event med detta idnummer.")})
form = CheckForm(request.POST)
if form.is_valid():
form_user = form.cleaned_data["user"]
try:
event_user = IUser.objects.get(username=form_user)
except ObjectDoesNotExist:
try:
event_user = IUser.objects.get(rfid_number=form_user)
except ObjectDoesNotExist:
return JsonResponse({"status": "error", "message": _("Inget event med detta idnummer.")})
prereg = None
try:
# Preregistered
prereg = EntryAsPreRegistered.objects.get(event=event, user=event_user)
except ObjectDoesNotExist:
try:
prereg = EntryAsReserve.objects.get(event=event, user=event_user)
if not form.cleaned_data["force_check_in"]:
return JsonResponse({"status": "error", "message": "".join(["{0} {1} ", _("är anmäld som reserv")]).format(
event_user.first_name.capitalize(), event_user.last_name.capitalize())})
except ObjectDoesNotExist:
if not form.cleaned_data["force_check_in"]:
return JsonResponse({"status": "error", "message": "".join(["{0} {1} ", _("är inte anmäld på eventet")]).format(
event_user.first_name.capitalize(), event_user.last_name.capitalize())})
try:
EntryAsParticipant.objects.get(event=event, user=event_user)
return JsonResponse({"status": "error", "message": _("Redan incheckad.")})
except ObjectDoesNotExist:
pass
participant = EntryAsParticipant(user=event_user, event=event)
participant.add_speech_nr()
participant.save()
while EntryAsParticipant.objects.filter(event=event, speech_nr=participant.speech_nr).count() > 1:
participant.add_speech_nr()
participant.save()
if event.extra_deadline:
try:
if prereg.timestamp < event.extra_deadline:
extra_str = _("<br>Anmälde sig i tid för att ") + event.extra_deadline_text + "."
else:
extra_str = _("<br><span class='errorlist'>Anmälde sig ej i tid för att ") + \
event.extra_deadline_text + ".</span>"
except:
extra_str = ""
else:
extra_str = ""
return JsonResponse({"status": "success", "message": "".join(["{0} {1} ",
_("checkades in med talarnummer:"),
" {2}{3}"]).format(
event_user.first_name.capitalize(),
event_user.last_name.capitalize(),
participant.speech_nr,
extra_str
)})
return JsonResponse({"status": "error", "message": _("Fyll i Liu-id eller RFID.")})
return JsonResponse({})
@login_required()
def all_unapproved_events(request):
if request.user.has_perm("events.can_approve_event"):
events = Event.objects.filter(status=Event.BEING_REVIEWED, end__gte=timezone.now())
events_to_delete = Event.objects.filter(status=Event.BEING_CANCELD, end__gte=timezone.now())
return render(request, 'events/approve_event.html', {'events': events, 'events_to_delete': events_to_delete})
else:
raise PermissionDenied
@login_required()
@transaction.atomic
def approve_event(request, event_id):
event = Event.objects.get(pk=event_id)
if event.approve(request.user):
return redirect(reverse('events:unapproved'))
else:
raise PermissionDenied
@login_required()
def unapprove_event(request, pk):
event = Event.objects.get(pk=pk)
form = RejectionForm(request.POST or None)
if request.method == 'POST':
if form.is_valid():
if event.reject(request.user, form.cleaned_data['rejection_message']):
messages.success(request, _("Eventet har avslagits."))
return redirect('events:unapproved')
else:
raise PermissionDenied
return render(request, 'events/reject.html', {'form': form, 'event': event})
@login_required()
def CSV_view_participants(request, pk):
response = HttpResponse(content_type='text/csv')
response['Content-Disposition'] = 'attachment; filename="participants.txt"'
writer = csv.writer(response)
writer.writerow(['These are your participants:'])
event = get_object_or_404(Event, pk=pk)
participants = event.participants
for user in participants:
writer.writerow([user.username, user.first_name, user.last_name, user.email])
return response
@login_required()
def CSV_view_preregistrations(request, pk):
response = HttpResponse(content_type='text/csv')
response['Content-Disposition'] = 'attachment; filename="preregistrations.txt"'
writer = csv.writer(response)
writer.writerow(['These are your preregistrations:'])
event = get_object_or_404(Event, pk=pk)
preregistrations = event.preregistrations
for user in preregistrations:
writer.writerow([user.username, user.first_name, user.last_name, user.email])
return response
@login_required()
def unregister(request, pk):
if request.method == "POST":
event = get_object_or_404(Event, pk=pk)
try:
event.deregister_user(request.user)
messages.success(request, _("Du är nu avregistrerad på eventet."))
except CouldNotRegisterException as err:
messages.error(request,
"".join([_("Fel, kunde inte avregistrera dig på "),
err.event.headline,
_(" för att "),
err.reason,
"."]))
return redirect("events:event", pk=pk)
def event_calender(request):
return render(request, "events/calender.html")
def event_calender_view(request):
events = Event.objects.published().order_by('start')
return render(request, "events/calendar_view.html", {'events': events})
@login_required()
def registered_on_events(request):
entry_as_preregistered = EntryAsPreRegistered.objects.filter(user=request.user)
entry_as_reserve = EntryAsReserve.objects.filter(user=request.user)
reserve_events = []
preregistrations_events = []
for e in entry_as_preregistered:
if e.event.end >= timezone.now():
preregistrations_events.append(e)
for e in entry_as_reserve:
if e.event.end >= timezone.now():
reserve_events.append(e)
return render(request, "events/registerd_on_events.html",
{"reserve_events": reserve_events, "preregistrations_events": preregistrations_events})
@login_required()
def events_by_user(request):
user_events = Event.objects.user(request.user)
return render(request, 'events/my_events.html', {
'user_events': user_events
})
@login_required()
def create_or_modify_event(request, pk=None): # TODO: Reduce complexity
if pk: # if pk is set we modify an existing event.
duplicates = Event.objects.filter(replacing_id=pk)
if duplicates:
links = ""
for d in duplicates:
links += "<a href='{0}'>{1}</a><br>".format(d.get_absolute_url(), d.headline)
messages.error(request,
"".join([_("Det finns redan en ändrad version av det här arrangemanget! "
"Är du säker på att du vill ändra den här?<br>"
"Följande ändringar är redan föreslagna: <br> "),
"{:}"]).format(links),
extra_tags='safe')
event = get_object_or_404(Event, pk=pk)
if not event.can_administer(request.user):
raise PermissionDenied
form = EventForm(request.POST or None, request.FILES or None, instance=event)
else: # new event.
form = EventForm(request.POST or None, request.FILES or None)
if request.method == 'POST':
if form.is_valid():
event = form.save(commit=False)
if form.cleaned_data['draft']:
draft = True
else:
draft = False
status = event.get_new_status(draft)
event.status = status["status"]
event.user = request.user
if status["new"]:
event.replacing_id = event.id
event.id = None
event.save()
form.save_m2m()
if event.status == Event.DRAFT:
messages.success(request, _("Dina ändringar har sparats i ett utkast."))
elif event.status == Event.BEING_REVIEWED:
body = "<h1>Hej!</h1><br><br><p>Det finns nya artiklar att godkänna på i-Portalen.<br><a href='https://www.i-portalen.se/article/unapproved/'>Klicka här!</a></p><br><br><p>Med vänliga hälsningar, <br><br>Admins @ webgroup"
send_mail('Ny artikel att godkänna', '', settings.EMAIL_HOST_USER, ['infowebb@isektionen.se'], fail_silently=False, html_message=body)
messages.success(request, _("Dina ändringar har skickats för granskning."))
return redirect('events:by user')
else:
messages.error(request, _("Det uppstod ett fel, se detaljer nedan."))
return render(request, 'events/create_event.html', {
'form': form,
})
return render(request, 'events/create_event.html', {
'form': form,
})
def upload_attachments(request, pk):
event = get_object_or_404(Event, pk=pk)
if not event.can_administer(request.user):
raise PermissionDenied
AttachmentFormset = modelformset_factory(OtherAttachment,
form=AttachmentForm,
max_num=30,
extra=3,
can_delete=True,
)
if request.method == 'POST':
formset = AttachmentFormset(request.POST, request.FILES, queryset=OtherAttachment.objects.filter(event=event))
if formset.is_valid():
for entry in formset.cleaned_data:
if not entry == {}:
if entry['DELETE']:
try:
entry['id'].delete() # TODO: Remove the clear option from html-widget (or make it work).
except AttributeError:
pass
else:
if entry['id']:
attachment = entry['id']
else:
attachment = OtherAttachment(event=event)
attachment.file_name = entry['file'].name
attachment.file = entry['file']
attachment.display_name = entry['display_name']
attachment.modified_by = request.user
attachment.save()
messages.success(request, 'Dina bilagor har sparats.')
return redirect('events:manage attachments', pk=event.pk)
else:
return render(request, "events/attachments.html", {
'event': event,
'formset': formset,
})
formset = AttachmentFormset(queryset=OtherAttachment.objects.filter(event=event))
return render(request, "events/attachments.html", {
'event': event,
'formset': formset,
})
@login_required()
def upload_attachments_images(request, pk):
event = get_object_or_404(Event, pk=pk)
if not event.can_administer(request.user):
raise PermissionDenied
AttachmentFormset = modelformset_factory(ImageAttachment,
form=ImageAttachmentForm,
max_num=30,
extra=3,
can_delete=True,
)
if request.method == 'POST':
formset = AttachmentFormset(request.POST,
request.FILES,
queryset=ImageAttachment.objects.filter(event=event)
)
if formset.is_valid():
for entry in formset.cleaned_data:
if not entry == {}:
if entry['DELETE']:
try:
entry['id'].delete() # TODO: Remove the clear option from html-widget (or make it work).
except AttributeError:
pass
else:
if entry['id']:
attachment = entry['id']
else:
attachment = ImageAttachment(event=event)
attachment.img = entry['img']
attachment.caption = entry['caption']
attachment.modified_by = request.user
attachment.save()
messages.success(request, 'Dina bilagor har sparats.')
return redirect('events:event', event.pk)
else:
return render(request, "events/attach_images.html", {
'event': event,
'formset': formset,
})
formset = AttachmentFormset(queryset=ImageAttachment.objects.filter(event=event))
return render(request, "events/attach_images.html", {
'event': event,
'formset': formset,
})
@login_required()
def user_view(request, pk):
event = get_object_or_404(Event, pk=pk)
user = request.user
#checks if user is a participant
try:
participant = EntryAsParticipant.objects.get(event=event, user=user)
except EntryAsParticipant.DoesNotExist:
raise PermissionDenied
return render(request, "events/user_view.html", {'event': event})
def calendar_feed(request):
events = Event.objects.published()
response = render(request,
template_name='events/feed.ics',
context={'events': events},
content_type='text/calendar; charset=UTF-8')
response['Filename'] = 'feed.ics'
response['Content-Disposition'] = 'attachment; filename=feed.ics'
return response
def personal_calendar_feed(request, liu_id):
u = get_object_or_404(IUser, username=liu_id)
events = Event.objects.events_by_user(u)
response = render(request,
template_name='events/feed.ics',
context={'liu_user': u, 'events': events},
content_type='text/calendar; charset=UTF-8')
response['Filename'] = 'feed.ics'
response['Content-Disposition'] = 'attachment; filename=feed.ics'
return response
@login_required()
@permission_required('events.can_view_no_shows')
def show_noshows(request):
user = request.user
no_shows = EntryAsPreRegistered.objects.filter(no_show = True, timestamp__gte= six_months_back).order_by("user")
result = []
tempuser = {"user": None, "count": 0, "no_shows": []}
for no_show in no_shows:
if tempuser["user"] == no_show.user:
tempuser["count"] += 1
else:
if tempuser["user"]:
result.append(tempuser)
tempuser = {"user": no_show.user, "count":1, "no_shows": []}
tempuser["no_shows"].append(no_show)
if tempuser["user"]:
result.append(tempuser)
return render(request, "events/show_noshows.html", {"user": user, "no_shows": result})
@login_required()
@permission_required('events.can_remove_no_shows')
def remove_noshow(request):
user = request.user
if request.method == 'POST':
try:
user_id=request.POST.get('user_id')
event_id=request.POST.get('event_id')
except:
return JsonResponse({'status': 'fel request'})
no_shows = EntryAsPreRegistered.objects.filter(user_id=user_id, event_id=event_id, no_show=True)
print(no_shows)
if len(no_shows)==1:
no_shows[0].no_show=False
no_shows[0].save()
return JsonResponse({'status': 'OK'})
elif len(no_shows)==0:
return JsonResponse({'status': 'Ingen no show hittades'})
else:
return JsonResponse({'status': 'Error: fler än ett no show hittades'})
return JsonResponse({'status': 'fel request'})
@login_required()
def cancel(request, pk=None):
event = get_object_or_404(Event, pk=pk)
if event.can_administer(request.user):
if request.method == 'POST':
form = DeleteForm(request.POST)
if form.is_valid():
event.status = Event.BEING_CANCELD
event.cancel_message = form.cleaned_data["cancel"]
event.save()
form_user = form.cleaned_data["cancel"]
body = "<h1>Hej!</h1><br><br><p>Det finns nya event att ställa in på i-Portalen.<br><a href='https://www.i-portalen.se/article/unapproved/'>Klicka här!</a></p><br><br><p>Med vänliga hälsningar, <br><br>Admins @ webgroup" + form_user
send_mail('Nytt event att ställa in', '', settings.EMAIL_HOST_USER, ['admin@isektionen.se'], fail_silently=False, html_message=body)
messages.success(request, _("Dina ändringar har skickats för granskning."))
# vill låsa radera knapp
else:
messages.error(request, _("Det har ej fyllts i varför eventet önskas raderas."))
return redirect("events:administer event", pk=pk)
# vill stanna kvar på sidan
return render(request, 'events/administer_event.html', {'event': event, 'form':form, 'form_user':form_user, })
raise PermissionDenied
| 41.118671 | 248 | 0.588833 | 2,787 | 25,987 | 5.334051 | 0.147112 | 0.023678 | 0.01453 | 0.038679 | 0.571842 | 0.508408 | 0.433741 | 0.404749 | 0.380802 | 0.348581 | 0 | 0.005414 | 0.303459 | 25,987 | 631 | 249 | 41.183835 | 0.815867 | 0.014161 | 0 | 0.501859 | 0 | 0.007435 | 0.156235 | 0.033169 | 0 | 0 | 0 | 0.001585 | 0 | 1 | 0.057621 | false | 0.005576 | 0.046468 | 0.001859 | 0.197026 | 0.001859 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
cfa89d2739c826f494098053f56652fab2675cda | 411 | py | Python | loop_count_sum_average.py | bclead3/python_for_everyone | ccf72c335fe1e9b6419ccb34fc091c9520a69e5c | [
"MIT"
] | null | null | null | loop_count_sum_average.py | bclead3/python_for_everyone | ccf72c335fe1e9b6419ccb34fc091c9520a69e5c | [
"MIT"
] | null | null | null | loop_count_sum_average.py | bclead3/python_for_everyone | ccf72c335fe1e9b6419ccb34fc091c9520a69e5c | [
"MIT"
] | null | null | null | intNum = 0
fltTotal = 0.0
while True:
strVal = input('Enter a number: ')
if strVal == 'done':
break
try:
fltVal = float(strVal)
intNum += 1
fltTotal += fltVal
except ValueError:
print('Invalid Input value, continuing...')
continue
print("The number of valid lines:{}, the total:{}, the average:{}".format(intNum, fltTotal, fltTotal / intNum))
| 21.631579 | 111 | 0.586375 | 47 | 411 | 5.12766 | 0.659574 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.013605 | 0.284672 | 411 | 18 | 112 | 22.833333 | 0.806122 | 0 | 0 | 0 | 0 | 0 | 0.272506 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0 | 0 | 0 | 0.142857 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
cfaad53b73796070da96bc425bbb924627d94d3e | 66,608 | py | Python | direfl/api/invert.py | TUM-E21-ThinFilms/direfl | ef60610b1653ab1a93840ec481a0eed3242fcfcc | [
"MIT"
] | null | null | null | direfl/api/invert.py | TUM-E21-ThinFilms/direfl | ef60610b1653ab1a93840ec481a0eed3242fcfcc | [
"MIT"
] | null | null | null | direfl/api/invert.py | TUM-E21-ThinFilms/direfl | ef60610b1653ab1a93840ec481a0eed3242fcfcc | [
"MIT"
] | null | null | null | #!/usr/bin/env python
# This program is public domain
#
# Phase inversion author: Norm Berk
# Translated from Mathematica by Paul Kienzle
#
# Phase reconstruction author: Charles Majkrzak
# Converted from Fortran by Paul Kienzle
#
# Reflectivity calculation author: Paul Kienzle
#
# The National Institute of Standards and Technology makes no representations
# concerning this particular software and is not bound in any wy to correct
# possible errors or to provide extensions, upgrades or any form of support.
#
# This disclaimer must accompany any public distribution of this software.
# Note: save this file as invert to run as a stand-alone program.
"""
Core classes and functions:
* :class:`Interpolator`
Class that performs data interpolation.
* :class:`Inversion`
Class that implements the inversion calculator.
* :class:`SurroundVariation`
Class that performs the surround variation calculation.
* :func:`refl`
Reflectometry as a function of Qz and wavelength.
* :func:`reconstruct`
Phase reconstruction by surround variation magic.
* :func:`valid_f`
Calculate vector function using only the finite elements of the array.
Command line phase reconstruction phase inversion::
invert -u 2.07 -v 6.33 0 --Qmin 0.014 --thickness 1000 qrd1.refl qrd2.refl
Command line phase + inversion only::
invert --thickness=150 --Qmax 0.35 wsh02_re.dat
Scripts can use :func:`reconstruct` and :func:`invert`. For example:
.. doctest::
>>> from direfl.invert import reconstruct, invert
>>> substrate = 2.07
>>> f1, f2 = 0, -0.53
>>> phase = reconstruct("file1", "file2", substrate, f1, f2)
>>> inversion = invert(data=(phase.Q, phase.RealR), thickness=200)
>>> inversion.plot()
>>> inversion.save("profile.dat")
The resulting profile has attributes for the input (*Q*, *RealR*) and the
output (*z*, *rho*, *drho*). There are methods for plotting (*plot*,
*plot_residual*) and storing (*save*). The analysis can be rerun with
different attributes (*run(key=val, ...)*).
See :func:`reconstruct` and :class:`Inversion` for details.
The phase reconstruction algorithm is described in [Majkrzak2003]_. The
phase inversion algorithm is described in [Berk2009]_ and references therein.
It is based on the partial differential equation solver described
in [Sacks1993]_.
References
==========
.. [Majkrzak2003] C. F. Majkrzak, N. F. Berk and U. A. Perez-Salas,
"Phase-Sensitive Neutron Reflectometry", *Langmuir* 19, 7796-7810 (2003).
.. [Berk2009] N. F. Berk and C. F. Majkrzak, "Statistical analysis of
phase-inversion neutron specular reflectivity", *Langmuir* 25, 4132-4144 (2009).
.. [Sacks1993] P.E. Sacks, *Wave Motion* 18, 21-30 (1993).
"""
from __future__ import division, print_function
import os
from functools import reduce
import numpy as np
from numpy import (
pi, inf, nan, sqrt, exp, sin, cos, tan, log,
ceil, floor, real, imag, sign, isinf, isnan, isfinite,
diff, mean, std, arange, diag, isscalar)
from numpy.fft import fft
# The following line is temporarily commented out because Sphinx on Windows
# tries to document the three modules as part of inversion.api.invert when it
# should be skipping over them. The problem may be caused by numpy shipping
# these modules in a dll (mtrand.pyd) instead of in .pyc or .pyo files.
# Furthermore, Sphinx 1.0 generates non-fatal error messages when processing
# these imports and Sphinx 0.6.7 generates fatal errors and will not create the
# documentation. Sphinx on Linux does not exhibit these problems. The
# workaround is to use implicit imports in the functions or methods that use
# these functions.
#from numpy.random import uniform, poisson, normal
from .calc import convolve
from .util import isstr
# Custom colors
DARK_RED = "#990000"
# Common SLDs
silicon = Si = 2.07
sapphire = Al2O3 = 5.0
water = H2O = -0.56
heavywater = D2O = 6.33
lightheavywater = HDO = 2.9 # 50-50 mixture of H2O and D2O
def invert(**kw):
"""
Invert data returning an :class:`Inversion` object.
If outfile is specified, save z, rho, drho to the named file.
If plot=True, show a plot before returning
"""
doplot = kw.pop('plot', True)
outfile = kw.pop('outfile', None)
inverter = Inversion(**kw)
inverter.run()
if outfile is not None:
inverter.save(outfile)
if doplot:
import pylab
inverter.plot()
pylab.ginput(show_clicks=False)
return inverter
class Inversion():
"""
Class that implements the inversion calculator.
This object holds the data and results associated with the direct inversion
of the real value of the phase from a reflected signal.
Inversion converts a real reflectivity amplitude as computed by
:func:`reconstruct` into a step profile of scattering length density
as a function of depth. This process will only work for real-valued
scattering potentials - with non-negligible absorption the results
will be incorrect. With X-rays, the absorption is too high for this
technique to be used successfully. For details on the underlying
theory, see [Berk2009]_.
The following attributes and methods are of most interest:
**Inputs:**
================= =========================================================
Input Parameters Description
================= =========================================================
*data* The name of an input file or a pair of vectors (Q, RealR)
where RealR is the real portion of the complex
reflectivity amplitude.input filename or Q, RealR data
(required).
*thickness* (400) Defines the total thickness of the film of interest. If
the value chosen is too small, the inverted profile will
not be able to match the input reflection signal. If
the thickness is too large, the film of interest should
be properly reconstructed, but will be extended into a
reconstructed substrate below the film.film thickness.
*substrate* (0) It is the scattering length density of the substrate. The
inversion calculation determines the scattering length
densities (SLDs) within the profile relative to the SLD
of the substrate. Entering the correct value of
substrate will shift the profile back to the correct
values.
*bse* (0) It is the bound state energy correction factor. Films
with large negative potentials at their base sometimes
produce an incorrect inversion, as indicated by an
incorrect value for the substrate portion of a film. A
value of substrate SLD - bound state SLD seems to correct
the reconstruction.
*Qmin* (0) Minimum Q to use from data. Reduce *Qmax* to avoid
contamination from noise at high Q and improve precision.
However, doing this will reduce the size of the features
that you are sensitive to in your profile.
*Qmax* (None) Maximum Q to use from data. Increase *Qmin* to avoid
values at low Q which will not have the correct phase
reconstruction when Q is less than Qc^2 for both surround
variation measurements used in the phase reconstruction
calculation. Use this technique sparingly --- the overall
shape of the profile is sensitive to data at low Q.
*backrefl* (True) Reflection measured through the substrate. It is True if
the film is measured with an incident beam through the
substrate rather than the surface.
================= =========================================================
**Uncertainty controls:**
Uncertainty is handled by averaging over *stages* inversions with noise
added to the input data for each inversion. Usually the measurement
uncertainty is estimated during data reduction and phase reconstruction,
and Gaussian noise is added to the data. This is scaled by a factor of
*noise* so the effects of noisier or quieter input are easy to estimate.
If the uncertainty estimate is not available, 5% relative noise per point
is assumed.
If *monitor* is specified, then Poisson noise is used instead, according to
the following::
*noise* U[-1, 1] (poisson(*monitor* |real R|)/*monitor* - |real R|)
That is, a value is pulled from the Poisson distribution of the expected
counts, and the noise is the difference between this and the actual counts.
This is further scaled by a fudge factor of *noise* and a further random
uniform in [-1, 1].
==================== =======================================================
Uncertainty controls Description
==================== =======================================================
*stages* (4) number of inversions to average over
*noise* (1) noise scale factor
*monitor* (None) incident beam intensity (poisson noise source)
==================== =======================================================
**Inversion controls:**
=================== ========================================================
Inversions controls Description
=================== ========================================================
*rhopoints* (128) number of steps in the returned profile. If this value
is too low, the profile will be coarse. If it is too
high, the computation will take a long time. The
additional smoothness generated by a high value of
*rhopoints* is illusory --- the information content of
the profile is limited by the number of Q points which
have been measured. Set *rhopoints* to (1/*dz*) for a
step size near *dz* in the profile.
*calcpoints* (4) number of internal steps per profile step. It is used
internally to improve the accuracy of the calculation.
For larger values of *rhopoints*, smaller values of
*calcpoints* are feasible.
*iters* (6) number of iterations to use for inversion. A value of 6
seems to work well. You can observe this by setting
*showiters* to True and looking at the convergence of
each stage of the averaging calculation.
*showiters* (False) set to true to show inversion converging. Click the
graph to move to the next stage.
*ctf_window* (0) cosine transform smoothing. In practice, it is set to 0
for no smoothing.
=================== ========================================================
**Computed profile:**
The reflectivity computed from *z*, *rho* will not match the input data
because the effect of the substrate has been removed in the process of
reconstructing the phase. Instead, you will need to compute reflectivity
from *rho*-*substrate* on the reversed profile. This is done in
:meth:`refl` when no surround material is selected, and can be used to show
the difference between measured and inverted reflectivity. You may need to
increase *calcpoints* or modify *thickness* to get a close match.
====================== ===========================================================
Computed profile Description
====================== ===========================================================
*Qinput*, *RealRinput* input data. The input data *Qinput*, *RealRinput* need to
be placed on an even grid going from 0 to *Qmax* using
linear interpolation. Values below *Qmin* are set to
zero, and the number of points between *Qmin* and *Qmax*
is preserved. This resampling works best when the input
data are equally spaced, starting at k*dQ for some k.
*Q*, *RealR*, *dRealR* output data. The returned *Q*, *RealR*, *dRealR* are the
values averaged over multiple stages with added noise.
The plots show this as the range of input variation used
in approximating the profile variation.
*z* represents the depth into the profile. *z* equals
*thickness* at the substrate. If the thickness is correct,
then *z* will be zero at the top of the film, but in
practice the *thickness* value provided will be larger
than the actual film thickness, and a portion of the vacuum
will be included at the beginning of the profile.
*rho* It is the SLD at depth *z* in units of 10^-6 inv A^2. It
is calculated from the average of the inverted profiles
from the noisy data sets, and includes the correction for
the substrate SLD defined by *substrate*. The inverted
*rho* will contain artifacts from the abrupt cutoff in the
signal at *Qmin* and *Qmax*.
*drho* It is the uncertainty in the SLD profile at depth *z*. It
is calculated from the standard deviation of the inverted
profiles from the noisy data sets. The uncertainty *drho*
does not take into account the possible variation in the
signal above *Qmax*.
*signals* It is a list of the noisy (Q, RealR) input signals generated
by the uncertainty controls.
*profiles* It is a list of the corresponding (z, rho) profiles. The
first stage is computed without noise, so *signals[0]*
contains the meshed input and *profiles[0]* contains the
output of the inversion process without additional noise.
====================== ===========================================================
**Output methods:**
The primary output methods are
============== ===========================================================
Output methods Description
============== ===========================================================
*save* save the profile to a file.
*show* show the profile on the screen.
*plot* plot data and profile.
*refl* compute reflectivity from profile.
*run* run or rerun the inversion with new settings.
============== ===========================================================
**Additional methods for finer control of plots:**
=============== ===========================================================
Output methods Description
=============== ===========================================================
*plot_data* plot just the data.
*plot_profile* plot just the profile.
*plot_residual* plot data minus theory.
=============== ===========================================================
"""
# Global parameters for the class and their default values
substrate = 0
thickness = 400
calcpoints = 4
rhopoints = 128
Qmin = 0
Qmax = None
iters = 6
stages = 10
ctf_window = 0
backrefl = True
noise = 1
bse = 0
showiters = False
monitor = None
def __init__(self, data=None, **kw):
# Load the data
if isstr(data):
self._loaddata(data)
else: # assume it is a pair, e.g., a tuple, a list, or an Nx2 array
self._setdata(data)
# Run with current keywords
self._set(**kw)
def _loaddata(self, file):
"""
Load data from a file of Q, real(R), dreal(R).
"""
data = np.loadtxt(file).T
self._setdata(data, name=file)
def _setdata(self, data, name="data"):
"""
Set *Qinput*, *RealRinput* from Q, real(R) vectors.
"""
self.name = name
if len(data) == 3:
q, rer, drer = data
else:
q, rer = data
drer = None
# Force equal spacing by interpolation
self.Qinput, self.RealRinput = np.asarray(q), np.asarray(rer)
self.dRealRinput = np.asarray(drer) if drer is not None else None
def _remesh(self):
"""
Returns Qmeshed, RealRmeshed.
Resamples the data on an even grid, setting values below Qmin and above
Qmax to zero. The number of points between Qmin and Qmax is preserved.
This works best when data are equally spaced to begin with, starting a
k*dQ for some k.
"""
q, rer, drer = self.Qinput, self.RealRinput, self.dRealRinput
if drer is None:
drer = 0*rer
# Trim from Qmin to Qmax
if self.Qmin is not None:
idx = q >= self.Qmin
q, rer, drer = q[idx], rer[idx], drer[idx]
if self.Qmax is not None:
idx = q <= self.Qmax
q, rer, drer = q[idx], rer[idx], drer[idx]
# Resample on even spaced grid, preserving approximately the points
# between Qmin and Qmax
dq = (q[-1]-q[0])/(len(q) - 1)
npts = int(q[-1]/dq + 1.5)
q, rer = remesh([q, rer], 0, q[-1], npts, left=0, right=0)
# Process uncertainty
if self.dRealRinput is not None:
q, drer = remesh([q, drer], 0, q[-1], npts, left=0, right=0)
else:
drer = None
return q, rer, drer
def run(self, **kw):
"""
Run multiple inversions with resynthesized data for each.
All control keywords from the constructor can be used, except
*data* and *outfile*.
Sets *signals* to the list of noisy (Q, RealR) signals and sets
*profiles* to the list of generated (z, rho) profiles.
"""
from numpy.random import uniform, poisson, normal
self._set(**kw)
q, rer, drer = self._remesh()
signals = []
profiles = []
stages = self.stages if self.noise > 0 else 1
for i in range(stages):
if i == 0:
# Use data noise for the first stage
noisyR = rer
elif self.monitor is not None:
# Use incident beam as noise source
pnoise = poisson(self.monitor*abs(rer))/self.monitor - abs(rer)
unoise = uniform(-1, 1, rer.shape)
noisyR = rer + self.noise*unoise*pnoise
elif drer is not None:
# Use gaussian uncertainty estimate as noise source
noisyR = rer + normal(0, 1)*self.noise*drer
else:
# Use 5% relative amplitude as noise source
noisyR = rer + normal(0, 1)*self.noise*0.05*abs(rer)
ctf = self._transform(noisyR, Qmax=q[-1],
bse=self.bse, porder=1)
qp = self._invert(ctf, iters=self.iters)
if self.showiters: # Show individual iterations
import pylab
pylab.cla()
for qpi in qp:
pylab.plot(qpi[0], qpi[1])
pylab.ginput(show_clicks=False)
z, rho = remesh(qp[-1], 0, self.thickness, self.rhopoints)
if not self.backrefl:
z, rho = z[::-1], rho[::-1]
signals.append((q, noisyR))
profiles.append((z, rho))
self.signals, self.profiles = signals, profiles
def chisq(self):
"""
Compute normalized sum squared difference between original real R and
the real R for the inverted profile.
"""
from numpy.random import normal
idx = self.dRealR > 1e-15
#print("min dR", min(self.dRealR[self.dRealR>1e-15]))
q, rer, drer = self.Q[idx], self.RealR[idx], self.dRealR[idx]
rerinv = real(self.refl(q))
chisq = np.sum(((rer - rerinv)/drer)**2)/len(q)
return chisq
# Computed attributes.
def _get_z(self):
"""Inverted SLD profile depth in Angstroms"""
return self.profiles[0][0]
def _get_rho(self):
"""Inverted SLD profile in 10^-6 * inv A^2 units"""
rho = mean([p[1] for p in self.profiles], axis=0) + self.substrate
return rho
def _get_drho(self):
"""Inverted SLD profile uncertainty"""
drho = std([p[1] for p in self.profiles], axis=0)
return drho
def _get_Q(self):
"""Inverted profile calculation points"""
return self.signals[0][0]
def _get_RealR(self):
"""Average inversion free film reflectivity input"""
return mean([p[1] for p in self.signals], axis=0)
def _get_dRealR(self):
"""Free film reflectivity input uncertainty"""
return std([p[1] for p in self.signals], axis=0)
z = property(_get_z)
rho = property(_get_rho)
drho = property(_get_drho)
Q = property(_get_Q)
RealR = property(_get_RealR)
dRealR = property(_get_dRealR)
def show(self):
"""Print z, rho, drho to the screen."""
print("# %9s %11s %11s"%("z", "rho", "drho"))
for point in zip(self.z, self.rho, self.drho):
print("%11.4f %11.4f %11.4f"%point)
def save(self, outfile=None):
"""
Save z, rho, drho to three column text file named *outfile*.
**Parameters:**
*outfile:* file
If *outfile* is not provided, the name of the input file
will be used, but with the extension replaced by '.amp'.
**Returns:**
*None*
"""
if outfile is None:
basefile = os.path.splitext(os.path.basename(self.name))[0]
outfile = basefile+os.extsep+"amp"
fid = open(outfile, "w")
fid.write("# Z Rho dRho\n")
np.savetxt(fid, np.array([self.z, self.rho, self.drho]).T)
fid.close()
def refl(self, Q=None, surround=None):
"""
Return the complex reflectivity amplitude.
**Parameters:**
*Q:* boolean
Use *Q* if provided, otherwise use the evenly spaced Q values
used for the inversion.
*surround:* boolean
If *surround* is provided, compute the reflectivity for the free
film in the context of the substrate and the surround, otherwise
compute the reflectivity of the reversed free film embedded in
the substrate to match against the reflectivity amplitude
supplied as input.
**Returns:**
*None*
"""
if Q is None:
Q = self.Q
if self.backrefl:
# Back reflectivity is equivalent to -Q inputs
Q = -Q
if surround is None:
# Phase reconstructed free film reflectivty is reversed,
# and has an implicit substrate in front and behind.
surround = self.substrate
Q = -Q
dz = np.hstack((0, diff(self.z), 0))
rho = np.hstack((surround, self.rho[1:], self.substrate))
r = refl(Q, dz, rho)
return r
def plot(self, details=False, phase=None):
"""
Plot the data and the inversion.
**Parameters:**
*details:* boolean
If *details* is True, then plot the individual stages used to
calculate the average, otherwise just plot the envelope.
*phase:* boolean
If *phase* is a phase reconstruction object, plot the original
measurements.
**Returns:**
*None*
"""
import pylab
if phase:
pylab.subplot(221)
phase.plot_measurement(profile=(self.z, self.rho))
pylab.subplot(223)
phase.plot_imaginary()
pylab.subplot(222 if phase else 211)
self.plot_profile(details=details)
pylab.subplot(224 if phase else 212)
self.plot_input(details=details)
def plot6(self, details=False, phase=None):
# This is an alternate to plot6 for evaluation purposes.
import pylab
if phase:
pylab.subplot(321)
phase.plot_measurement(profile=(self.z, self.rho))
pylab.subplot(323)
phase.plot_imaginary()
pylab.subplot(325)
phase.plot_phase()
pylab.subplot(322 if phase else 311)
self.plot_profile(details=details)
pylab.subplot(324 if phase else 312)
self.plot_input(details=details)
pylab.subplot(326 if phase else 313)
self.plot_residual()
def plot_input(self, details=False, lowQ_inset=0):
"""
Plot the real R vs. the real R computed from inversion.
**Parameters**
*details:* boolean
If *details* is True, then plot the individual stages used to
calculate the average, otherwise just plot the envelope.
*lowQ_inset:* intger
If *lowQ_inset* > 0, then plot a graph of Q, real R values
below lowQ_inset, without scaling by Q**2.
**Returns:**
*None*
"""
from matplotlib.font_manager import FontProperties
import pylab
if details:
plotamp(self.Qinput, self.RealRinput)
for p in self.signals:
plotamp(self.Q, p[1])
else:
plotamp(self.Q, self.RealR, dr=self.dRealR, label=None,
linestyle='', color="blue")
plotamp(self.Qinput, self.RealRinput, label="Input",
color="blue")
Rinverted = real(self.refl(self.Qinput))
plotamp(self.Qinput, Rinverted, color=DARK_RED, label="Inverted")
pylab.legend(prop=FontProperties(size='medium'))
chisq = self.chisq() # Note: cache calculated profile?
pylab.text(0.01, 0.01, "chisq=%.1f"%chisq,
transform=pylab.gca().transAxes,
ha='left', va='bottom')
if lowQ_inset > 0:
# Low Q inset
orig = pylab.gca()
box = orig.get_position()
ax = pylab.axes([box.xmin+0.02, box.ymin+0.02,
box.width/4, box.height/4],
axisbg=[0.95, 0.95, 0.65, 0.85])
ax.plot(self.Qinput, self.RealRinput, color="blue")
ax.plot(self.Qinput, Rinverted)
ax.text(0.99, 0.01, "Q, Real R for Q<%g"%lowQ_inset,
transform=ax.transAxes, ha='right', va='bottom')
qmax = lowQ_inset
ymax = max(max(self.RealRinput[self.Qinput < qmax]),
max(Rinverted[self.Qinput < qmax]))
pylab.setp(ax, xticks=[], yticks=[],
xlim=[0, qmax], ylim=[-1, 1.1*(ymax+1)-1])
pylab.axes(orig)
plottitle('Reconstructed Phase')
def plot_profile(self, details=False, **kw):
"""
Plot the computed profiles.
**Parameters:**
*details:* boolean
If *details* is True, then plot the individual stages used to
calculate the average, otherwise just plot the envelope.
**Returns:**
*None*
"""
import pylab
pylab.grid(True)
if details:
for p in self.profiles:
pylab.plot(p[0], p[1]+self.substrate)
else:
z, rho, drho = self.z, self.rho, self.drho
[h] = pylab.plot(z, rho, color=DARK_RED, **kw)
pylab.fill_between(z, rho-drho, rho+drho,
color=h.get_color(), alpha=0.2)
#pylab.plot(z, rho+drho, '--', color=h.get_color())
#pylab.plot(z, rho-drho, '--', color=h.get_color())
pylab.text(0.01, 0.01, 'surface',
transform=pylab.gca().transAxes,
ha='left', va='bottom')
pylab.text(0.99, 0.01, 'substrate',
transform=pylab.gca().transAxes,
ha='right', va='bottom')
pylab.ylabel('SLD (inv A^2)')
pylab.xlabel('Depth (A)')
plottitle('Depth Profile')
def plot_residual(self, details=False):
"""
Plot the residuals (inversion minus input).
**Parameters:**
*details:* boolean
If *details* is True, then plot the individual stages used to
calculate the average, otherwise just plot the envelope.
**Returns:**
*None*
"""
import pylab
Q, RealR = self.Qinput, self.RealRinput
r = self.refl(Q)
pylab.plot(Q, Q**2*(real(r)-RealR))
pylab.ylabel('Residuals [Q^2 * (Real R - input)]')
pylab.xlabel("Q (inv A)")
plottitle('Phase Residuals')
def _set(self, **kw):
"""
Set a group of attributes.
"""
for k, v in kw.items():
if hasattr(self, k):
setattr(self, k, v)
else:
raise ValueError("Invalid keyword argument for Inversion class")
self.rhoscale = 1e6 / (4 * pi * self.thickness**2)
def _transform(self, RealR, Qmax=None, bse=0, porder=1):
"""
Returns the cosine transform function used by inversion.
*bse* is bound-state energy, with units of 10^-6 inv A^2. It was used
in the past to handle profiles with negative SLD at the beginning, but
the the plain correction of bse=0 has since been found to be good
enough for the profiles we are looking at. *porder* is the order of the
interpolating polynomial, which must be 1 for the current interpolation
class.
"""
if not 0 <= porder <= 6:
raise ValueError("Polynomial order must be between 0 and 6")
npts = len(RealR)
dK = 0.5 * Qmax / npts
kappa = sqrt(bse*1e-6)
dx = self.thickness/self.rhopoints
xs = dx*arange(2*self.rhopoints)
dim = int(2*pi/(dx*dK))
if dim < len(xs):
raise ValueError("Q spacing is too low for the given thickness")
# 1/sqrt(dim) is the normalization convention for Mathematica FFT
ct = real(fft(RealR, dim)/sqrt(dim))
convertfac = 2*dK/pi * sqrt(dim) * self.thickness
ctdatax = convertfac * ct[:len(xs)] # * rhoscale
## PAK <--
## Mathematica guarantees that the interpolation function
## goes through the points, so Interpolator(xs, ctall)(xs)
## is just the same as ctall, and so newctall is just ctdatax.
## Furthermore, "ctf[x_] := newctif[x]" is an identity transform
## and is not necessary. In the end, we only need one
## interplotor plus the correction for ctf[0] == 0.
#ctall = ctdatax
#ctif = Interpolation(xs, ctall, InterpolationOrder -> porder)
#newctall = ctif(xs)
#newctif = Interpolation(xs, newctall, InterpolationOrder -> porder)
#ctf[x_] := newctif[x]
# This is the uncorrected Cosine Transform
#newctf[x_] := ctf[x] - exp(-kappa*x) * ctf[0]
# This is the boundstate-corrected Cosine Transform
## PAK -->
# This is the uncorrected Cosine Transform
raw_ctf = Interpolator(xs, ctdatax, porder=porder)
# This is the boundstate-corrected Cosine Transform
ctf = lambda x: raw_ctf(x) - exp(-kappa*x) * raw_ctf(0)
return ctf
def _invert(self, ctf, iters):
"""
Perform the inversion.
"""
dz = 2/(self.calcpoints*self.rhopoints)
x = arange(0, ceil(2/dz))*dz
maxm = len(x)
if maxm%2 == 0:
maxm += 1
mx = int(maxm/2+0.5)
h = 2/(2*mx-3)
g = np.hstack((ctf(x[:-1]*self.thickness), 0, 0, 0))
q = 2 * diff(g[:-2])/h
q[-1] = 0
ut = arange(2*mx-2)*h*self.thickness/2
if self.ctf_window > 0:
# Smooth ctf with 3-sample approximation
du = self.ctf_window*h*self.thickness/2
qinter = Interpolator(ut, q, porder=1)
q = (qinter(ut - du) + qinter(ut) + qinter(ut + du))/3
q = np.hstack((q, 0))
qp = [(ut, -2*q*self.rhoscale)]
Delta = np.zeros((mx, 2*mx), 'd')
for iter in range(iters):
for m in range(2, mx):
n = np.array(range(m, 2*mx-(m+1)))
Delta[m, n] = (
h**2 * q[m-1] * (g[m+n] + Delta[m-1, n])
+ Delta[m-1, n+1] + Delta[m-1, n-1] - Delta[m-2, n])
udiag = -g[:2*mx-2:2] - diag(Delta)[:mx-1]
mup = len(udiag) - 2
h = 1/mup
ut = arange(mup)*h*self.thickness
q = 2 * diff(udiag[:-1])/h
qp.append((ut, self.rhoscale*q))
q = np.hstack((q, 0, 0))
return qp
def plottitle(title):
import pylab
# Place title above the plot so that it is not overlapped by the legend.
# Note that the title is drawn as text rather than as a title object so
# that it will be kept as close as possible to the plot when the window is
# resized to a smaller size.
pylab.text(0.5, 1.07, title, fontsize='medium',
transform=pylab.gca().transAxes,
ha='center', va='top', backgroundcolor=(0.9, 0.9, 0.6))
def plotamp(Q, r, dr=None, scaled=True, ylabel="Real R", **kw):
"""
Plot Q, realR data.
"""
import pylab
scale = 1e4*Q**2 if scaled else 1
if scaled:
ylabel = "(100 Q)^2 "+ylabel
[h] = pylab.plot(Q, scale*r, **kw)
if dr is not None:
pylab.fill_between(Q, scale*(r-dr), scale*(r+dr),
color=h.get_color(), alpha=0.2)
pylab.ylabel(ylabel)
pylab.xlabel("Q $[\AA^{-1}]$")
class Interpolator():
"""
Construct an interpolation function from pairs (xi, yi).
"""
def __init__(self, xi, yi, porder=1):
if len(xi) != len(yi):
raise ValueError("xi:%d and yi:%d must have the same length"
%(len(xi), len(yi)))
self.xi, self.yi = xi, yi
self.porder = porder
if porder != 1:
raise NotImplementedError(
"Interpolator only supports polynomial order of 1")
def __call__(self, x):
return np.interp(x, self.xi, self.yi)
def phase_shift(q, r, shift=0):
return r*exp(1j*shift*q)
def remesh(data, xmin, xmax, npts, left=None, right=None):
"""
Resample the data on a fixed grid.
"""
x, y = data
x, y = x[isfinite(x)], y[isfinite(y)]
if npts > len(x):
npts = len(x)
newx = np.linspace(xmin, xmax, npts)
newy = np.interp(newx, x, y, left=left, right=right)
return np.array((newx, newy))
# This program is public domain.
# Author: Paul Kienzle
"""
Optical matrix form of the reflectivity calculation.
O.S. Heavens, Optical Properties of Thin Solid Films
"""
def refl(Qz, depth, rho, mu=0, wavelength=1, sigma=0):
"""
Reflectometry as a function of Qz and wavelength.
**Parameters:**
*Qz:* float|A
Scattering vector 4*pi*sin(theta)/wavelength. This is an array.
*depth:* float|A
Thickness of each layer. The thickness of the incident medium
and substrate are ignored.
*rho, mu (uNb):* (float, float)|
Scattering length density and absorption of each layer.
*wavelength:* float|A
Incident wavelength (angstrom).
*sigma:* float|A
Interfacial roughness. This is the roughness between a layer
and the subsequent layer. There is no interface associated
with the substrate. The sigma array should have at least n-1
entries, though it may have n with the last entry ignored.
:Returns:
*r* array of float
"""
if isscalar(Qz):
Qz = np.array([Qz], 'd')
n = len(rho)
nQ = len(Qz)
# Make everything into arrays
kz = np.asarray(Qz, 'd')/2
depth = np.asarray(depth, 'd')
rho = np.asarray(rho, 'd')
mu = mu*np.ones(n, 'd') if isscalar(mu) else np.asarray(mu, 'd')
wavelength = wavelength*np.ones(nQ, 'd') \
if isscalar(wavelength) else np.asarray(wavelength, 'd')
sigma = sigma*np.ones(n-1, 'd') if isscalar(sigma) else np.asarray(sigma, 'd')
# Scale units
rho = rho*1e-6
mu = mu*1e-6
## For kz < 0 we need to reverse the order of the layers
## Note that the interface array sigma is conceptually one
## shorter than rho, mu so when reversing it, start at n-1.
## This allows the caller to provide an array of length n
## corresponding to rho, mu or of length n-1.
idx = (kz >= 0)
r = np.empty(len(kz), 'D')
r[idx] = _refl_calc(kz[idx], wavelength[idx], depth, rho, mu, sigma)
r[~idx] = _refl_calc(
abs(kz[~idx]), wavelength[~idx],
depth[-1::-1], rho[-1::-1], mu[-1::-1],
sigma[n-2::-1])
r[abs(kz) < 1.e-6] = -1 # reflectivity at kz=0 is -1
return r
def _refl_calc(kz, wavelength, depth, rho, mu, sigma):
"""Abeles matrix calculation."""
if len(kz) == 0:
return kz
## Complex index of refraction is relative to the incident medium.
## We can get the same effect using kz_rel^2 = kz^2 + 4*pi*rho_o
## in place of kz^2, and ignoring rho_o.
kz_sq = kz**2 + 4*pi*rho[0]
k = kz
# According to Heavens, the initial matrix should be [ 1 F; F 1],
# which we do by setting B=I and M0 to [1 F; F 1]. An extra matrix
# multiply versus some coding convenience.
B11 = 1
B22 = 1
B21 = 0
B12 = 0
for i in range(0, len(rho)-1):
k_next = sqrt(kz_sq - (4*pi*rho[i+1] + 2j*pi*mu[i+1]/wavelength))
F = (k - k_next) / (k + k_next)
F *= exp(-2*k*k_next*sigma[i]**2)
M11 = exp(1j*k*depth[i]) if i > 0 else 1
M22 = exp(-1j*k*depth[i]) if i > 0 else 1
M21 = F*M11
M12 = F*M22
C1 = B11*M11 + B21*M12
C2 = B11*M21 + B21*M22
B11 = C1
B21 = C2
C1 = B12*M11 + B22*M12
C2 = B12*M21 + B22*M22
B12 = C1
B22 = C2
k = k_next
r = B12/B11
return r
def reconstruct(file1, file2, u, v1, v2, stages=100):
r"""
Two reflectivity measurements of a film with different surrounding media
:math:`|r_1|^2` and :math:`|r_2|^2` can be combined to compute the expected
complex reflection amplitude r_reversed of the free standing film measured
from the opposite side. The calculation can be done by varying the fronting
media or by varying the backing media. For this code we only support
measurements through a uniform substrate *u*, on two varying surrounding
materials *v1*, *v2*.
We have to be careful about terminology. We will use the term substrate to
mean the base on which we deposit our film of interest, and surface to be
the material we put on the other side. The fronting or incident medium is
the material through which the beam enters the sample. The backing
material is the material on the other side. In back reflectivity, the
fronting material is the substrate and the backing material is the surface.
We are using u for the uniform substrate and v for the varying surface
material.
In the experimental setup at the NCNR, we have a liquid resevoir which we
can place above the film. We measure first with one liquid in the resevoir
such as heavy water (D2O) and again with air or a contrasting liquid such
as water (H2O). At approximately 100 um, the resevoir depth is much
thicker than the effective coherence length of the neutron in the z
direction, and so can be treated as a semi-infinite substrate, even when it
is empty.
.. Note:: You cannot simulate a semi-infinite substrate using a large but
finitely thick material using the reflectometry calculation; at
best the resulting reflection will be a high frequency signal which
smooths after applying the resolution correction to a magnitude
that is twice the reflection from a semi-infinite substrate.
The incident beam is measured through the substrate, and thus subject to
the same absorption as the reflected beam. Refraction on entering and
leaving the substrated is accounted for by a small adjustment to Q
inside the reflectivity calculation.
When measuring reflectivity through the substrate, the beam enters the
substrate from the side, refracts a little because of the steep angle of
entry, reflects off the sample, and leaves through the other side of the
substrate with an equal but opposite refraction. The reflectivity
calculation takes this into account. Traveling through several centimeters
of substrate, some of the beam will get absorbed. We account for this
either by entering an incident medium transmission coefficient in the
reduction process, or by measuring the incident beam through the substrate
so that it is subject to approximately the same absorption.
The phase cannot be properly computed for Q values which are below the
critical edge Qc^2 for both surround variations. This problem can be
avoided by choosing a substrate which is smaller than the surround on at
least one of the measurements. This measurement will not have a critical
edge at positive Q. In order to do a correct footprint correction the
other measurement should use a substrate SLD greater than the surround SLD.
If the input file records uncertainty in the measurement, we perform a
Monte Carlo uncertainty estimate of the reconstructed complex amplitude.
**Inputs:**
================ =============================================================
Input parameters Description
================ =============================================================
*file1*, *file2* reflectivity measurements at identical Q values. *file1*
and *file2* can be pairs of vectors (q1, r1), (q2, r2) or files
containing at least two columns (q, r), with the remaining
columns such as dr, dq, and lambda ignored. If a third
vector, dr, is present in both datasets, then an uncertainty
estimate will be calculated for the reconstructed phase.
*v1*, *v2* SLD of varying surrounds in *file1* and *file2*
*u* SLD of the uniform substrate
*stages* number of trials in Monte Carlo uncertainty estimate
================ =============================================================
Returns a :class:`SurroundVariation` object with the following attributes:
================== =========================================
Attributes Description
================== =========================================
*RealR*, *ImagR* real and imaginary reflectivity
*dRealR*, *dImagR* Monte Carlo uncertainty estimate
*name1*, *name2* names of the input files
*save(file)* save Q, RealR, ImagR to a file
*show()*, *plot()* display the results
================== =========================================
**Notes:**
There is a question of how beam effects (scale, background, resolution)
will show up in the phase reconstruction. To understand this we can play
with the reverse problem applying beam effects (intensity=A, background=B,
resolution=G) to the reflectivity amplitude $r$ such that the computed
$|r|^2$ matches the measured $R = A G*|r|^2 + B$, where $*$ is the
convolution operator.
There is a reasonably pretty solution for intensity and background: set
$s = r \surd A + i r \surd B / |r|$ so that
$|s|^2 = A |r|^2 + |r|^2 B/|r|^2 = A |r|^2 + B$. Since $r$ is complex,
the intensity and background will show up in both real and imaginary
channels of the phase reconstruction.
It is not so pretty for resolution since the sum of the squares does not
match the square of the sum:
.. math::
G * |r|^2 = \int G(q'-q)|r(q)|^2 dq \ne |\int G(q'-q)r(q)dq|^2 = |G*r|^2
This is an area may have been investigated in the 90's when the theory of
neutron phase reconstruction and inversion was developing, but this
reconstruction code does not do anything to take resolution into account.
Given that we known $\Delta q$ for each measured $R$ we should be able to
deconvolute using a matrix approximation to the integral:
.. math::
R = G R' \Rightarrow R' = G^{-1} R
where each row of $G$ is the gaussian weights $G(q_k - q)$ with width
$\Delta q_k$ evaluated at all measured points $q$. Trying this didn't
produce a useful (or believable) result. Maybe it was a problem with the
test code, or maybe it is an effect of applying an ill-conditioned
linear operator over data that varies by orders of magnitude.
So question: are there techniques for deconvoluting reflectivity curves?
Going the other direction, we can apply a resolution function to $Re(r)$
and $Im(r)$ to see how well they reproduce the resolution applied to
$|r|^2$. The answer is that it does a pretty good job, but the overall
smoothing is somewhat less than expected.
.. figure:: ../images/resolution.png
:alt: Reflectivity after applying resolution to amplitude.
Amplitude effects of applying a 2% $\Delta Q/Q$ resolution to the
complex amplitude prior to squaring.
I'm guessing that our reconstructed amplitude is going to show a similar
decay due to resolution. This ought to show up as a rounding off of edges
in the inverted profile (guessing again from the effects of applying
windowing functions to reduce ringing in the Fourier transform). This is
intuitive: poor resolution should show less detail in the profile.
"""
return SurroundVariation(file1, file2, u, v1, v2, stages=stages)
class SurroundVariation():
"""
See :func:`reconstruct` for details.
**Attributes:**
===================== ========================================
Attributes Description
===================== ========================================
*Q*, *RealR*, *ImagR* real and imaginary reflectivity
*dRealR*, *dImagR* Monte Carlo uncertainty estimate or None
*Qin*, *R1*, *R2* input data
*dR1*, *dR2* input uncertainty or None
*name1*, *name2* input file names
*save(file)* save output
*show()*, *plot()* show Q, RealR, ImagR
===================== ========================================
"""
backrefl = True
def __init__(self, file1, file2, u, v1, v2, stages=100):
self.u = u
self.v1, self.v2 = v1, v2
self._load(file1, file2)
self._calc()
self._calc_err(stages=stages)
self.clean()
def optimize(self, z, rho_initial):
"""
Run a quasi-Newton optimizer on a discretized profile.
**Parameters:**
*z:* boolean
Represents the depth into the profile. z equals thickness at
the substrate.
*rho_initial:* boolean
The initial profile *rho_initial* should come from direct
inversion.
**Returns:**
*rho:* (boolean, boolean)|
Returns the final profile rho which minimizes chisq.
"""
from scipy.optimize import fmin_l_bfgs_b as fmin
def cost(rho):
R1, R2 = self.refl(z, rho, resid=True)
return np.sum(R1**2) + np.sum(R2**2)
rho_final = rho_initial
rho_final, f, d = fmin(cost, rho_initial, approx_grad=True, maxfun=20)
return z, rho_final
def refl(self, z, rho, resid=False):
"""
Return the reflectivities R1 and R2 for the film *z*, *rho* in the
context of the substrate and surround variation.
**Parameters:**
*z:* boolean
Represents the depth into the profile. z equals thickness at
the substrate.
*rho:* boolean
If the resolution is known, then return the convolved theory
function.
*resid:* boolean
If *resid* is True, then return the weighted residuals vector.
**Returns:**
*R1, R2:* (boolean, boolean)|
Return the reflectivities R1 and R2 for the film *z*, *rho*.
"""
w = np.hstack((0, np.diff(z), 0))
rho = np.hstack((0, rho[1:], self.u))
rho[0] = self.v1
R1 = self._calc_refl(w, rho)
rho[0] = self.v2
R2 = self._calc_refl(w, rho)
if resid:
R1 = (self.R1in-R1)/self.dR1in
R2 = (self.R2in-R2)/self.dR2in
return R1, R2
def _calc_free(self, z, rho):
# This is more or less cloned code that should be written just once.
w = np.hstack((0, np.diff(z), 0))
rho = np.hstack((self.u, rho[1:], self.u))
rho[0] = self.u
Q = -self.Qin
if self.backrefl:
Q = -Q
r = refl(Q, w, rho)
return r.real, r.imag
def _calc_refl(self, w, rho):
Q, dQ = self.Qin, self.dQin
# Back reflectivity is equivalent to -Q inputs
if self.backrefl:
Q = -Q
r = refl(Q, w, rho)
if dQ is not None:
R = convolve(Q, abs(r)**2, Q, dQ)
else:
R = abs(r)**2
return R
def clean(self):
"""
Remove points which are NaN or Inf from the computed phase.
"""
# Toss invalid values
Q, re, im = self.Qin, self.RealR, self.ImagR
if self.dRealR is not None:
dre, dim = self.dRealR, self.dImagR
keep = reduce(lambda y, x: isfinite(x)&y, [re, im], True)
self.Q, self.RealR, self.dRealR, self.ImagR, self.dImagR \
= [v[keep] for v in (Q, re, dre, im, dim)]
else:
keep = reduce(lambda y, x: isfinite(x)&y, [re, im], True)
self.Q, self.RealR, self.ImagR = [v[keep] for v in (Q, re, im)]
def save(self, outfile=None, uncertainty=True):
"""
Save Q, RealR, ImagR to a three column text file named *outfile*, or
save Q, RealR, ImagR, dRealR, dImagR to a five column text file.
**Parameters:**
*outfile:* file
Include dRealR, dImagR if they exist and if *uncertainty*
is True, making a five column file.
*uncertainity:* boolean
Include dRealR and dImagR if True.
**Returns:**
*None*
"""
if outfile is None:
basefile = os.path.splitext(os.path.basename(self.name1))[0]
outfile = basefile+os.extsep+"amp"
header = "# Q RealR ImagR"
v = [self.Q, self.RealR, self.ImagR]
if self.dRealR is not None and uncertainty:
header += " dRealR dImagR"
v += [self.dRealR, self.dImagR]
fid = open(outfile, "w")
fid.write(header+"\n")
np.savetxt(fid, np.array(v).T)
fid.close()
def save_inverted(self, outfile=None, profile=None):
"""
Save Q, R1, R2, RealR of the inverted profile.
"""
R1, R2 = self.refl(*profile)
rer, imr = self._calc_free(*profile)
data = np.vstack((self.Qin, R1, R2, rer, imr))
fid = open(outfile, "w")
fid.write("# Q R1 R2 RealR ImagR\n")
np.savetxt(fid, np.array(data).T)
fid.close()
def show(self):
"""Print Q, RealR, ImagR to the screen."""
print("# %9s %11s %11s"%("Q", "RealR", "ImagR"))
for point in zip(self.Q, self.RealR, self.ImagR):
print("%11.4g %11.4g %11.4g"%point)
def plot_measurement(self, profile=None):
"""Plot the data, and if available, the inverted theory."""
from matplotlib.font_manager import FontProperties
import pylab
def plot1(Q, R, dR, Rth, surround, label, color):
# Fresnel reflectivity
if self.backrefl:
F = abs(refl(Q, [0, 0], [self.u, surround]))**2
else:
F = abs(refl(Q, [0, 0], [surround, self.u]))**2
pylab.plot(Q, R/F, '.', label=label, color=color)
if Rth is not None:
pylab.plot(Q, Rth/F, '-', label=None, color=color)
if dR is not None:
pylab.fill_between(Q, (R-dR)/F, (R+dR)/F,
color=color, alpha=0.2)
if Rth is not None:
chisq = np.sum(((R-Rth)/dR)**2)
else:
chisq = 0
return chisq, len(Q)
else:
# Doesn't make sense to compute chisq for unweighted
# reflectivity since there are several orders of magnitude
# differences between the data points.
return 0, 1
if profile is not None:
R1, R2 = self.refl(*profile)
else:
R1, R2 = None, None
# Only show file.ext portion of the file specification
name1 = os.path.basename(self.name1)
name2 = os.path.basename(self.name2)
pylab.cla()
chisq1, n1 = plot1(self.Qin, self.R1in, self.dR1in, R1,
self.v1, name1, 'blue')
chisq2, n2 = plot1(self.Qin, self.R2in, self.dR2in, R2,
self.v2, name2, 'green')
pylab.legend(prop=FontProperties(size='medium'))
chisq = (chisq1+chisq2)/(n1+n2)
if chisq != 0:
pylab.text(0.01, 0.01, "chisq=%.1f"%chisq,
transform=pylab.gca().transAxes,
ha='left', va='bottom')
pylab.ylabel('R / Fresnel_R')
pylab.xlabel('Q (inv A)')
plottitle('Reflectivity Measurements')
def plot_phase(self):
from matplotlib.font_manager import FontProperties
import pylab
plotamp(self.Q, self.ImagR, dr=self.dImagR,
color='blue', label='Imag R')
plotamp(self.Q, self.RealR, dr=self.dRealR,
color=DARK_RED, label='Real R')
pylab.legend(prop=FontProperties(size='medium'))
plottitle('Reconstructed Phase')
def plot_imaginary(self):
from matplotlib.font_manager import FontProperties
import pylab
plotamp(self.Q, -self.ImagR, dr=self.dImagR,
color='blue', label='Imag R+')
plotamp(self.Q, self.ImagR, dr=self.dImagR,
color='green', label='Imag R-')
pylab.legend(prop=FontProperties(size='medium'))
pylab.ylabel("(100 Q)^2 Imag R")
pylab.xlabel("Q (inv A)")
plottitle('Reconstructed Phase')
def _load(self, file1, file2):
"""
Load the data from files or from tuples of (Q, R) or (Q, R, dR),
(Q, dQ, R, dR) or (Q, dQ, R, dR, L).
"""
# This code assumes the following data file formats:
# 2-column data: Q, R
# 3-column data: Q, R, dR
# 4-column data: Q, dQ, R, dR
# 5-column data: Q, dQ, R, dR, Lambda
if isstr(file1):
d1 = np.loadtxt(file1).T
name1 = file1
else:
d1 = file1
name1 = "SimData1"
if isstr(file2):
d2 = np.loadtxt(file2).T
name2 = file2
else:
d2 = file2
name2 = "SimData2"
ncols = len(d1)
if ncols <= 1:
raise ValueError("Data file has less than two columns")
elif ncols == 2:
q1, r1 = d1[0:2]
q2, r2 = d2[0:2]
dr1 = dr2 = None
dq1 = dq2 = None
elif ncols == 3:
q1, r1, dr1 = d1[0:3]
q2, r2, dr2 = d2[0:3]
dq1 = dq2 = None
elif ncols == 4:
q1, dq1, r1, dr1 = d1[0:4]
q2, dq2, r2, dr2 = d2[0:4]
elif ncols >= 5:
q1, dq1, r1, dr1, lambda1 = d1[0:5]
q2, dq2, r2, dr2, lanbda2 = d2[0:5]
if not q1.shape == q2.shape or not (q1 == q2).all():
raise ValueError("Q points do not match in data files")
# Note that q2, dq2, lambda1, and lambda2 are currently discarded.
self.name1, self.name2 = name1, name2
self.Qin, self.dQin = q1, dq1
self.R1in, self.R2in = r1, r2
self.dR1in, self.dR2in = dr1, dr2
def _calc(self):
"""
Call the phase reconstruction calculator.
"""
re, im = _phase_reconstruction(self.Qin, self.R1in, self.R2in,
self.u, self.v1, self.v2)
self.RealR, self.ImagR = re, im
self.Q = self.Qin
def _calc_err(self, stages):
if self.dR1in is None:
return
from numpy.random import normal
runs = []
for i in range(stages):
R1 = normal(self.R1in, self.dR1in)
R2 = normal(self.R2in, self.dR2in)
rer, imr = _phase_reconstruction(self.Qin, R1, R2,
self.u, self.v1, self.v2)
runs.append((rer, imr))
rers, rims = zip(*runs)
self.RealR = valid_f(mean, rers)
self.ImagR = valid_f(mean, rims)
self.dRealR = valid_f(std, rers)
self.dImagR = valid_f(std, rims)
def valid_f(f, A, axis=0):
"""
Calculate vector function f using only the finite elements of the array *A*.
*axis* is the axis over which the calculation should be performed, or None
if the calculation should summarize the entire array.
"""
A = np.asarray(A)
A = np.ma.masked_array(A, mask=~isfinite(A))
return np.asarray(f(A, axis=axis))
def _phase_reconstruction(Q, R1sq, R2sq, rho_u, rho_v1, rho_v2):
"""
Compute phase reconstruction from back reflectivity on paired samples
with varying surface materials.
"Fixed Nonvacuum Fronting, Variable Backing"
Uses eq. (31), (32) from [Majkrzak2003].
Inputs::
*Q* is the measurement positions
*R1sq*, *R2sq* are the measurements in the two conditions
*rho_v1*, *rho_v2* are the backing media SLDs for *R1sq* and *R2sq*
*rho_u* is the fronting medium SLD
Returns RealR, ImagR
"""
# The used notation here is different from the paper [Majkrzak2003].
# To more easily understand the code, take a look at the following translation table
#
# Paper | Code
# f^2 = usq
# f^2(a^2 + f^2b^2) = alpha
# f^2(d^2 + c^2) = beta
# \Sigma^{fh_i} = sigmai with i = 1, 2
# h_1^2, h_1^2 = v1sq, v2sq
Qsq = Q**2 + 16.*pi*rho_u*1e-6
usq, v1sq, v2sq = [(1-16*pi*rho*1e-6/Qsq) for rho in (rho_u, rho_v1, rho_v2)]
with np.errstate(invalid='ignore'):
sigma1 = 2 * sqrt(v1sq*usq) * (1+R1sq) / (1-R1sq)
sigma2 = 2 * sqrt(v2sq*usq) * (1+R2sq) / (1-R2sq)
alpha = usq * (sigma1-sigma2) / (v1sq-v2sq)
beta = (v2sq*sigma1-v1sq*sigma2) / (v2sq-v1sq)
gamma = sqrt(alpha*beta - usq**2)
Rre = (alpha-beta) / (2*usq+alpha+beta)
Rim = -2*gamma / (2*usq+alpha+beta)
return Rre, Rim
def main():
"""
Drive phase reconstruction and direct inversion from the command line.
"""
import sys
import os
from optparse import OptionParser, OptionGroup
description = """\
Compute the scattering length density profile from the real portion of the
phase reconstructed reflectivity. Call with a phase reconstructed reflectivity
dataset AMP, or with a pair of reduced reflectivity datasets RF1 and RF2 for
complete phase inversion. Phase inversion requires two surrounding materials
and one substrate material to be specified. The measurement is assumed to come
through the substrate."""
parser = OptionParser(usage="%prog [options] AMP or RF1 RF2",
description=description,
version="%prog 1.0")
inversion_keys = [] # Collect the keywords we are using
group = OptionGroup(parser, "Sample description", description=None)
group.add_option("-t", "--thickness", dest="thickness",
default=Inversion.thickness, type="float",
help="sample thickness (A)")
group.add_option("-u", "--substrate", dest="substrate",
default=Inversion.substrate, type="float",
help="sample substrate material (10^6 * SLD)")
group.add_option("-v", "--surround", dest="surround",
type="float", nargs=2,
help="varying materials v1 v2 (10^6 * SLD) [for phase]")
# fronting is not an inversion key
inversion_keys += ['thickness', 'substrate']
parser.add_option_group(group)
group = OptionGroup(parser, "Data description", description=None)
group.add_option("--Qmin", dest="Qmin",
default=Inversion.Qmin, type="float",
help="minimum Q value to use from the data")
group.add_option("--Qmax", dest="Qmax",
default=Inversion.Qmax, type="float",
help="maximum Q value to use from the data")
group.add_option("-n", "--noise", dest="noise",
default=Inversion.noise, type="float",
help="noise scaling")
group.add_option("-M", "--monitor", dest="monitor",
default=Inversion.monitor, type="int",
help="monitor counts used for measurement")
inversion_keys += ['Qmin', 'Qmax', 'noise', 'monitor']
parser.add_option_group(group)
group = OptionGroup(parser, "Outputs", description=None)
group.add_option("-o", "--outfile", dest="outfile", default=None,
help="profile file (infile.prf), use '-' for console")
group.add_option("--ampfile", dest="ampfile", default=None,
help="amplitude file (infile.amp)")
group.add_option("-p", "--plot", dest="doplot",
action="store_true",
help="show plot of result")
group.add_option("-q", "--quiet", dest="doplot",
action="store_false", default=True,
help="don't show output plot")
# doplot is a post inversion options
parser.add_option_group(group)
group = OptionGroup(parser, "Calculation controls", description=None)
group.add_option("--rhopoints", dest="rhopoints",
default=Inversion.rhopoints, type="int",
help="number of profile steps [dz=thickness/rhopoints]")
group.add_option("-z", "--dz", dest="dz",
default=None, type="float",
help="max profile step size (A) [rhopoints=thickness/dz]")
group.add_option("--calcpoints", dest="calcpoints",
default=Inversion.calcpoints, type="int",
help="number of calculation points per profile step")
group.add_option("--stages", dest="stages",
default=Inversion.stages, type="int",
help="number of inversions to average over")
group.add_option("-a", dest="amp_only", default=False,
action="store_true",
help="calculate amplitude and stop")
inversion_keys += ['rhopoints', 'calcpoints', 'stages']
parser.add_option_group(group)
(options, args) = parser.parse_args()
if len(args) < 1 or len(args) > 2:
parser.error("Need real R data file or pair of reflectivities")
basefile = os.path.splitext(os.path.basename(args[0]))[0]
if len(args) == 1:
phase = None
data = args[0]
elif len(args) == 2:
if not options.surround or not options.substrate:
parser.error("Need fronting and backing for phase inversion")
v1, v2 = options.surround
u = options.substrate
phase = SurroundVariation(args[0], args[1], u=u, v1=v1, v2=v2)
data = phase.Q, phase.RealR, phase.dRealR
if options.ampfile:
phase.save(options.ampfile)
if options.amp_only and options.doplot:
import pylab
phase.plot()
pylab.show()
if options.amp_only:
return
if options.dz:
options.rhopoints = ceil(1/options.dz)
# Rather than trying to remember which control parameters I
# have options for, I update the list of parameters that I
# allow for each group of parameters, and pull the returned
# values out below.
res = Inversion(data=data, **dict((key, getattr(options, key))
for key in inversion_keys))
res.run(showiters=False)
if options.outfile == None:
options.outfile = basefile+os.path.extsep+"prf"
if options.outfile == "-":
res.show()
elif options.outfile != None:
res.save(options.outfile)
if options.doplot:
import pylab
res.plot(phase=phase)
pylab.show()
if __name__ == "__main__":
main()
| 38.952047 | 90 | 0.563191 | 8,549 | 66,608 | 4.359574 | 0.152883 | 0.006171 | 0.00601 | 0.003864 | 0.179608 | 0.136786 | 0.111913 | 0.089536 | 0.071183 | 0.06147 | 0 | 0.022703 | 0.31292 | 66,608 | 1,709 | 91 | 38.974839 | 0.791675 | 0.498349 | 0 | 0.181943 | 0 | 0 | 0.083922 | 0.001578 | 0 | 0 | 0 | 0 | 0 | 1 | 0.072503 | false | 0 | 0.045144 | 0.002736 | 0.191518 | 0.00684 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
cfab26c2310626960a9eb1bcfe2663950cbe8982 | 1,429 | py | Python | JupiterMag/Tools/TestTrace.py | mattkjames7/JupiterMag | 0c2bc82e9efadc4b026b82f4aeea30b068ba7fbd | [
"MIT"
] | null | null | null | JupiterMag/Tools/TestTrace.py | mattkjames7/JupiterMag | 0c2bc82e9efadc4b026b82f4aeea30b068ba7fbd | [
"MIT"
] | null | null | null | JupiterMag/Tools/TestTrace.py | mattkjames7/JupiterMag | 0c2bc82e9efadc4b026b82f4aeea30b068ba7fbd | [
"MIT"
] | null | null | null | import numpy as np
import matplotlib.pyplot as plt
def TestTrace(IntModel='jrm33',ExtModel='Con2020',fig=None,maps=[1,1,0,0],color='green'):
from ..TraceField import TraceField
from ..Con2020 import Config
#set the starting coords
n = 7
x = np.linspace(2.0,30.0,n)
x = np.append(-x[::-1],x)
y = np.zeros(n*2)
z = np.zeros(n*2)
#get the trace
cfg = Config()
Config(equation_type='analytic')
T = TraceField(x,y,z,Verbose=True,IntModel=IntModel,ExtModel=ExtModel)
Config(cfg)
#plot it
lab = ''
if not IntModel.upper() == 'NONE':
lab += IntModel
if not ExtModel.upper() == 'NONE':
if not lab == '':
lab += ' + '
lab += ExtModel
ax = T.PlotXZ(fig=fig,maps=maps,label=lab,color=color)
return ax
def CompareTrace():
from ..TraceField import TraceField
from ..Con2020 import Config
#get some starting coords
n = 8
theta = (180.0 - np.linspace(21,35,n))*np.pi/180.0
r = np.ones(n)
x = r*np.sin(theta)
y = np.zeros(n)
z = r*np.cos(theta)
#get traces with and without the external field
cfg = Config()
Config(equation_type='analytic')
T0 = TraceField(x,y,z,Verbose=True,IntModel='jrm33',ExtModel='none')
T1 = TraceField(x,y,z,Verbose=True,IntModel='jrm33',ExtModel='Con2020')
Config(cfg)
#plot them
ax = T0.PlotRhoZ(label='JRM33',color='black')
ax = T1.PlotRhoZ(fig=ax,label='JRM33 + Con2020',color='red')
ax.set_xlim(-2.0,25.0)
ax.set_ylim(-10.0,10.0)
return ax
| 21.651515 | 89 | 0.6669 | 237 | 1,429 | 4.004219 | 0.35443 | 0.00843 | 0.066386 | 0.041096 | 0.314015 | 0.314015 | 0.240253 | 0.206533 | 0.094837 | 0 | 0 | 0.059117 | 0.159552 | 1,429 | 65 | 90 | 21.984615 | 0.731057 | 0.085374 | 0 | 0.285714 | 0 | 0 | 0.071483 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.047619 | false | 0 | 0.142857 | 0 | 0.238095 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
cfac305f0f7ce458aca125a8380e57d97d04bc9b | 81,938 | py | Python | openstuder.py | OpenStuder/openstuder-client-python | ade667116afcd084faed93febfa4e267972f5250 | [
"MIT"
] | null | null | null | openstuder.py | OpenStuder/openstuder-client-python | ade667116afcd084faed93febfa4e267972f5250 | [
"MIT"
] | null | null | null | openstuder.py | OpenStuder/openstuder-client-python | ade667116afcd084faed93febfa4e267972f5250 | [
"MIT"
] | null | null | null | from __future__ import annotations
from typing import Callable, Optional, Tuple, List
from enum import Enum, Flag, auto
from threading import Thread
import datetime
import json
import websocket
class SIStatus(Enum):
"""
Status of operations on the OpenStuder gateway.
- **SIStatus.SUCCESS**: Operation was successfully completed.
- **SIStatus.IN_PROGRESS**: Operation is already in progress or another operation is occupying the resource.
- **SIStatus.ERROR**: General (unspecified) error.
- **SIStatus.NO_PROPERTY**: The property does not exist or the user's access level does not allow to access the property.
- **SIStatus.NO_DEVICE**: The device does not exist.
- **SIStatus.NO_DEVICE_ACCESS**: The device access instance does not exist.
- **SIStatus.TIMEOUT**: A timeout occurred when waiting for the completion of the operation.
- **SIStatus.INVALID_VALUE**: A invalid value was passed.
"""
SUCCESS = 0
IN_PROGRESS = 1
ERROR = -1
NO_PROPERTY = -2
NO_DEVICE = -3
NO_DEVICE_ACCESS = -4
TIMEOUT = -5
INVALID_VALUE = -6
@staticmethod
def from_string(string: str) -> SIStatus:
if string == 'Success':
return SIStatus.SUCCESS
elif string == 'InProgress':
return SIStatus.IN_PROGRESS
elif string == 'Error':
return SIStatus.ERROR
elif string == 'NoProperty':
return SIStatus.NO_PROPERTY
elif string == 'NoDevice':
return SIStatus.NO_DEVICE
elif string == 'NoDeviceAccess':
return SIStatus.NO_DEVICE_ACCESS
elif string == 'Timeout':
return SIStatus.TIMEOUT
elif string == 'InvalidValue':
return SIStatus.INVALID_VALUE
else:
return SIStatus.ERROR
class SIConnectionState(Enum):
"""
State of the connection to the OpenStuder gateway.
- **SIConnectionState.DISCONNECTED**: The client is not connected.
- **SIConnectionState.CONNECTING**: The client is establishing the WebSocket connection to the gateway.
- **SIConnectionState.AUTHORIZING**: The WebSocket connection to the gateway has been established and the client is authorizing.
- **SIConnectionState.CONNECTED**: The WebSocket connection is established and the client is authorized, ready to use.
"""
DISCONNECTED = auto()
CONNECTING = auto()
AUTHORIZING = auto()
CONNECTED = auto()
class SIAccessLevel(Enum):
"""
Level of access granted to a client from the OpenStuder gateway.
- **NONE**: No access at all.
- **BASIC**: Basic access to device information properties (configuration excluded).
- **INSTALLER**: Basic access + additional access to most common configuration properties.
- **EXPERT**: Installer + additional advanced configuration properties.
- **QUALIFIED_SERVICE_PERSONNEL**: Expert and all configuration and service properties only for qualified service personnel.
"""
NONE = 0
BASIC = auto()
INSTALLER = auto()
EXPERT = auto()
QUALIFIED_SERVICE_PERSONNEL = auto()
@staticmethod
def from_string(string: str) -> SIAccessLevel:
if string == 'None':
return SIAccessLevel.NONE
elif string == 'Basic':
return SIAccessLevel.BASIC
elif string == 'Installer':
return SIAccessLevel.INSTALLER
elif string == 'Expert':
return SIAccessLevel.EXPERT
elif string == 'QSP':
return SIAccessLevel.QUALIFIED_SERVICE_PERSONNEL
else:
return SIAccessLevel.NONE
class SIDescriptionFlags(Flag):
"""
Flags to control the format of the **DESCRIBE** functionality.
- **SIDescriptionFlags.NONE**: No description flags.
- **SIDescriptionFlags.INCLUDE_ACCESS_INFORMATION**: Includes device access instances information.
- **SIDescriptionFlags.INCLUDE_DEVICE_INFORMATION**: Include device information.
- **SIDescriptionFlags.INCLUDE_DRIVER_INFORMATION**: Include device property information.
- **SIDescriptionFlags.INCLUDE_DRIVER_INFORMATION**: Include device access driver information.
"""
NONE = 0
INCLUDE_ACCESS_INFORMATION = auto()
INCLUDE_DEVICE_INFORMATION = auto()
INCLUDE_PROPERTY_INFORMATION = auto()
INCLUDE_DRIVER_INFORMATION = auto()
class SIWriteFlags(Flag):
"""
Flags to control write property operation.
- **SIWriteFlags.NONE**: No write flags.
- **SIWriteFlags.PERMANENT**: Write the change to the persistent storage, eg the change lasts reboots.
"""
NONE = 0
PERMANENT = auto()
class SIProtocolError(IOError):
"""
Class for reporting all OpenStuder protocol errors.
"""
def __init__(self, message):
super(SIProtocolError, self).__init__(message)
def reason(self) -> str:
"""
Returns the actual reason for the error.
:return: Reason for the error.
"""
return super(SIProtocolError, self).args[0]
class SIDeviceMessage:
"""
The SIDeviceMessage class represents a message a device connected to the OpenStuder gateway has broadcast.
"""
def __init__(self, access_id: str, device_id: str, message_id: str, message: str, timestamp: datetime.datetime):
self.timestamp = timestamp
"""
Timestamp when the device message was received by the gateway.
"""
self.access_id = access_id
"""
ID of the device access driver that received the message.
"""
self.device_id = device_id
"""
ID of the device that broadcast the message.
"""
self.message_id = message_id
"""
Message ID.
"""
self.message = message
"""
String representation of the message.
"""
@staticmethod
def from_dict(d: dict) -> SIDeviceMessage:
try:
return SIDeviceMessage(d['access_id'], d['device_id'], d['message_id'], d['message'], datetime.datetime.fromisoformat(d['timestamp'].replace("Z", "+00:00")))
except KeyError:
raise SIProtocolError('invalid json body')
class SIPropertyReadResult:
"""
The SIDPropertyReadResult class represents the status of a property read result.
"""
def __init__(self, status: SIStatus, id_: str, value: Optional[any]):
self.status = status
"""
Status of the property read operation.
"""
self.id = id_
"""
ID of the property read.
"""
self.value = value
"""
Value that was read from the property, optional.
"""
def to_tuple(self) -> Tuple[SIStatus, str, Optional[any]]:
return self.status, self.id, self.value
@staticmethod
def from_dict(d: dict) -> SIPropertyReadResult:
try:
result = SIPropertyReadResult(SIStatus.from_string(d['status']), d['id'], None)
if 'value' in d and d['value'] is not None:
try:
result.value = float(d['value'])
except ValueError:
string = d['value'].lower()
if string == 'true':
result.value = True
elif string == 'false':
result.value = False
else:
result.value = string
return result
except KeyError:
raise SIProtocolError('invalid json body')
class SIPropertySubscriptionResult:
"""
The SIDPropertyReadResult class represents the status of a property subscription/unsubscription.
"""
def __init__(self, status: SIStatus, id_: str):
self.status = status
"""
Status of the property subscribe or unsubscribe operation.
"""
self.id = id_
"""
ID of the property.
"""
def to_tuple(self) -> Tuple[SIStatus, str]:
return self.status, self.id
@staticmethod
def from_dict(d: dict) -> SIPropertySubscriptionResult:
try:
return SIPropertySubscriptionResult(SIStatus.from_string(d['status']), d['id'])
except KeyError:
raise SIProtocolError('invalid json body')
class _SIAbstractGatewayClient:
def __init__(self):
super(_SIAbstractGatewayClient, self).__init__()
@staticmethod
def encode_authorize_frame_without_credentials() -> str:
return 'AUTHORIZE\nprotocol_version:1\n\n'
@staticmethod
def encode_authorize_frame_with_credentials(user: str, password: str) -> str:
return 'AUTHORIZE\nuser:{user}\npassword:{password}\nprotocol_version:1\n\n'.format(user=user, password=password)
@staticmethod
def decode_authorized_frame(frame: str) -> Tuple[SIAccessLevel, str]:
command, headers, _ = _SIAbstractGatewayClient.decode_frame(frame)
if command == 'AUTHORIZED' and 'access_level' in headers and 'protocol_version' in headers and 'gateway_version' in headers:
if headers['protocol_version'] == '1':
return SIAccessLevel.from_string(headers['access_level']), headers['gateway_version']
else:
raise SIProtocolError('protocol version 1 not supported by server')
elif command == 'ERROR' and 'reason' in headers:
raise SIProtocolError(headers['reason'])
else:
raise SIProtocolError('unknown error during authorization')
@staticmethod
def encode_enumerate_frame() -> str:
return 'ENUMERATE\n\n'
@staticmethod
def decode_enumerated_frame(frame: str) -> Tuple[SIStatus, int]:
command, headers, _ = _SIAbstractGatewayClient.decode_frame(frame)
if command == 'ENUMERATED' and 'status' in headers and 'device_count' in headers:
return SIStatus.from_string(headers['status']), int(headers['device_count'])
elif command == 'ERROR' and 'reason' in headers:
raise SIProtocolError(headers['reason'])
else:
raise SIProtocolError('unknown error during device enumeration')
@staticmethod
def encode_describe_frame(device_access_id: Optional[str], device_id: Optional[str], property_id: Optional[int], flags: Optional[SIDescriptionFlags]) -> str:
frame = 'DESCRIBE\n'
if device_access_id is not None:
frame += 'id:{device_access_id}'.format(device_access_id=device_access_id)
if device_id is not None:
frame += '.{device_id}'.format(device_id=device_id)
if property_id is not None:
frame += '.{property_id}'.format(property_id=property_id)
frame += '\n'
if flags is not None and isinstance(flags, SIDescriptionFlags):
frame += 'flags:'
if flags & SIDescriptionFlags.INCLUDE_ACCESS_INFORMATION:
frame += 'IncludeAccessInformation,'
if flags & SIDescriptionFlags.INCLUDE_DEVICE_INFORMATION:
frame += 'IncludeDeviceInformation,'
if flags & SIDescriptionFlags.INCLUDE_PROPERTY_INFORMATION:
frame += 'IncludePropertyInformation,'
if flags & SIDescriptionFlags.INCLUDE_DRIVER_INFORMATION:
frame += 'IncludeDriverInformation,'
frame = frame[:-1]
frame += '\n'
frame += '\n'
return frame
@staticmethod
def decode_description_frame(frame: str) -> Tuple[SIStatus, Optional[str], object]:
command, headers, body = _SIAbstractGatewayClient.decode_frame(frame)
if command == 'DESCRIPTION' and 'status' in headers:
status = SIStatus.from_string(headers['status'])
if status == SIStatus.SUCCESS:
description = json.loads(body)
return status, headers.get('id', None), description
else:
return status, headers.get('id', None), {}
elif command == 'ERROR' and 'reason' in headers:
raise SIProtocolError(headers['reason'])
else:
raise SIProtocolError('unknown error during description')
@staticmethod
def encode_find_properties_frame(property_id: str) -> str:
return 'FIND PROPERTIES\nid:{property_id}\n\n'.format(property_id=property_id)
@staticmethod
def decode_properties_found_frame(frame: str) -> (SIStatus, str, int, List[str]):
command, headers, body = _SIAbstractGatewayClient.decode_frame(frame)
if command == 'PROPERTIES FOUND' and 'status' in headers and 'id' in headers and 'count' in headers:
status = SIStatus.from_string(headers['status'])
if status == SIStatus.SUCCESS:
properties = json.loads(body)
return status, headers.get('id'), int(headers.get('count', 0)), properties
else:
return status, headers.get('id'), int(headers.get('count', 0)), []
elif command == 'ERROR' and 'reason' in headers:
raise SIProtocolError(headers['reason'])
else:
raise SIProtocolError('unknown error during finding properties')
@staticmethod
def encode_read_property_frame(property_id: str) -> str:
return 'READ PROPERTY\nid:{property_id}\n\n'.format(property_id=property_id)
@staticmethod
def decode_property_read_frame(frame: str) -> SIPropertyReadResult:
command, headers, _ = _SIAbstractGatewayClient.decode_frame(frame)
if command == 'PROPERTY READ' and 'status' in headers and 'id' in headers:
status = SIStatus.from_string(headers['status'])
if status == SIStatus.SUCCESS and 'value' in headers:
try:
value = float(headers['value'])
except ValueError:
string = headers['value'].lower()
if string == 'true':
value = True
elif string == 'false':
value = False
else:
value = string
return SIPropertyReadResult(status, headers['id'], value)
else:
return SIPropertyReadResult(status, headers['id'], None)
elif command == 'ERROR' and 'reason' in headers:
raise SIProtocolError(headers['reason'])
else:
raise SIProtocolError('unknown error during property read')
@staticmethod
def encode_read_properties_frame(property_ids: List[str]) -> str:
return 'READ PROPERTIES\n\n{property_ids}'.format(property_ids=json.dumps(property_ids))
@staticmethod
def decode_properties_read_frame(frame: str) -> List[SIPropertyReadResult]:
command, headers, body = _SIAbstractGatewayClient.decode_frame(frame)
if command == 'PROPERTIES READ' and 'status' in headers:
status = SIStatus.from_string(headers['status'])
if status == SIStatus.SUCCESS:
return json.loads(body, object_hook=SIPropertyReadResult.from_dict)
else:
raise SIProtocolError(f'error during property read, status={headers["status"]}')
elif command == 'ERROR' and 'reason' in headers:
raise SIProtocolError(headers['reason'])
else:
raise SIProtocolError('unknown error during properties read')
@staticmethod
def encode_write_property_frame(property_id: str, value: Optional[any], flags: Optional[SIWriteFlags]) -> str:
frame = 'WRITE PROPERTY\nid:{property_id}\n'.format(property_id=property_id)
if flags is not None and isinstance(flags, SIWriteFlags):
frame += 'flags:'
if flags & SIWriteFlags.PERMANENT:
frame += 'Permanent'
frame += '\n'
if value is not None:
frame += 'value:{value}\n'.format(value=value)
frame += '\n'
return frame
@staticmethod
def decode_property_written_frame(frame: str) -> Tuple[SIStatus, str]:
command, headers, _ = _SIAbstractGatewayClient.decode_frame(frame)
if command == 'PROPERTY WRITTEN' and 'status' in headers and 'id' in headers:
return SIStatus.from_string(headers['status']), headers['id']
elif command == 'ERROR' and 'reason' in headers:
raise SIProtocolError(headers['reason'])
else:
raise SIProtocolError('unknown error during property write')
@staticmethod
def encode_subscribe_property_frame(property_id: str) -> str:
return 'SUBSCRIBE PROPERTY\nid:{property_id}\n\n'.format(property_id=property_id)
@staticmethod
def decode_property_subscribed_frame(frame: str) -> Tuple[SIStatus, str]:
command, headers, _ = _SIAbstractGatewayClient.decode_frame(frame)
if command == 'PROPERTY SUBSCRIBED' and 'status' in headers and 'id' in headers:
return SIStatus.from_string(headers['status']), headers['id']
elif command == 'ERROR' and 'reason' in headers:
raise SIProtocolError(headers['reason'])
else:
raise SIProtocolError('unknown error during property subscribe')
@staticmethod
def encode_subscribe_properties_frame(property_ids: List[str]) -> str:
return 'SUBSCRIBE PROPERTIES\n\n{property_ids}'.format(property_ids=json.dumps(property_ids))
@staticmethod
def decode_properties_subscribed_frame(frame: str) -> List[SIPropertySubscriptionResult]:
command, headers, body = _SIAbstractGatewayClient.decode_frame(frame)
if command == 'PROPERTIES SUBSCRIBED' and 'status' in headers:
status = SIStatus.from_string(headers['status'])
if status == SIStatus.SUCCESS:
return json.loads(body, object_hook=SIPropertySubscriptionResult.from_dict)
else:
raise SIProtocolError(f'error during properties read, status={headers["status"]}')
elif command == 'ERROR' and 'reason' in headers:
raise SIProtocolError(headers['reason'])
else:
raise SIProtocolError('unknown error during properties subscribe')
@staticmethod
def encode_unsubscribe_property_frame(property_id: str) -> str:
return 'UNSUBSCRIBE PROPERTY\nid:{property_id}\n\n'.format(property_id=property_id)
@staticmethod
def decode_property_unsubscribed_frame(frame: str) -> Tuple[SIStatus, str]:
command, headers, _ = _SIAbstractGatewayClient.decode_frame(frame)
if command == 'PROPERTY UNSUBSCRIBED' and 'status' in headers and 'id' in headers:
return SIStatus.from_string(headers['status']), headers['id']
elif command == 'ERROR' and 'reason' in headers:
raise SIProtocolError(headers['reason'])
else:
raise SIProtocolError('unknown error during property unsubscribe')
@staticmethod
def encode_unsubscribe_properties_frame(property_ids: List[str]) -> str:
return 'UNSUBSCRIBE PROPERTIES\n\n{property_ids}'.format(property_ids=json.dumps(property_ids))
@staticmethod
def decode_properties_unsubscribed_frame(frame: str) -> List[SIPropertySubscriptionResult]:
command, headers, body = _SIAbstractGatewayClient.decode_frame(frame)
if command == 'PROPERTIES UNSUBSCRIBED' and 'status' in headers:
status = SIStatus.from_string(headers['status'])
if status == SIStatus.SUCCESS:
return json.loads(body, object_hook=SIPropertySubscriptionResult.from_dict)
else:
raise SIProtocolError(f'error during properties unsubscribe, status={headers["status"]}')
elif command == 'ERROR' and 'reason' in headers:
raise SIProtocolError(headers['reason'])
else:
raise SIProtocolError('unknown error during properties unsubscribe')
@staticmethod
def decode_property_update_frame(frame: str) -> Tuple[str, any]:
command, headers, _ = _SIAbstractGatewayClient.decode_frame(frame)
if command == 'PROPERTY UPDATE' and 'id' in headers and 'value' in headers:
try:
value = float(headers['value'])
except ValueError:
string = headers['value'].lower()
if string == 'true':
value = True
elif string == 'false':
value = False
else:
value = string
return headers['id'], value
elif command == 'ERROR' and 'reason' in headers:
raise SIProtocolError(headers['reason'])
else:
raise SIProtocolError('unknown error receiving property update')
@staticmethod
def encode_read_datalog_frame(property_id: Optional[str], from_: Optional[datetime.datetime], to: Optional[datetime.datetime], limit: Optional[int]) -> str:
frame = 'READ DATALOG\n'
if property_id is not None:
frame += 'id:{property_id}\n'.format(property_id=property_id)
frame += _SIAbstractGatewayClient.get_timestamp_header_if_present('from', from_)
frame += _SIAbstractGatewayClient.get_timestamp_header_if_present('to', to)
if limit is not None:
frame += 'limit:{limit}\n'.format(limit=limit)
frame += '\n'
return frame
@staticmethod
def decode_datalog_read_frame(frame: str) -> Tuple[SIStatus, Optional[str], int, str]:
command, headers, body = _SIAbstractGatewayClient.decode_frame(frame)
if command == 'DATALOG READ' and 'status' in headers and 'count' in headers:
return SIStatus.from_string(headers['status']), headers.get('id'), int(headers['count']), body
elif command == 'ERROR' and 'reason' in headers:
raise SIProtocolError(headers['reason'])
else:
raise SIProtocolError('unknown error receiving datalog read')
@staticmethod
def encode_read_messages_frame(from_: Optional[datetime.datetime], to: Optional[datetime.datetime], limit: Optional[int]) -> str:
frame = 'READ MESSAGES\n'
frame += _SIAbstractGatewayClient.get_timestamp_header_if_present('from', from_)
frame += _SIAbstractGatewayClient.get_timestamp_header_if_present('to', to)
if limit is not None:
frame += 'limit:{limit}\n'.format(limit=limit)
frame += '\n'
return frame
@staticmethod
def decode_messages_read_frame(frame: str) -> Tuple[SIStatus, int, List[SIDeviceMessage]]:
command, headers, body = _SIAbstractGatewayClient.decode_frame(frame)
if command == 'MESSAGES READ' and 'status' in headers and 'count' in headers:
status = SIStatus.from_string(headers['status'])
if status == SIStatus.SUCCESS:
messages = json.loads(body, object_hook=SIDeviceMessage.from_dict)
return status, int(headers['count']), messages
else:
return status, int(headers['count']), []
elif command == 'ERROR' and 'reason' in headers:
raise SIProtocolError(headers['reason'])
else:
raise SIProtocolError('unknown error during description')
@staticmethod
def decode_device_message_frame(frame: str) -> SIDeviceMessage:
command, headers, _ = _SIAbstractGatewayClient.decode_frame(frame)
if command == 'DEVICE MESSAGE' and 'access_id' in headers and 'device_id' in headers and 'message_id' in headers and 'message' in headers and 'timestamp' in headers:
return SIDeviceMessage.from_dict(headers)
elif command == 'ERROR' and 'reason' in headers:
raise SIProtocolError(headers['reason'])
else:
raise SIProtocolError('unknown error receiving device message')
@staticmethod
def peek_frame_command(frame: str) -> str:
return frame[:frame.index('\n')]
@staticmethod
def decode_frame(frame: str) -> Tuple[str, dict, str]:
lines = frame.split('\n')
if len(lines) < 2:
raise SIProtocolError('invalid frame')
command = lines[0]
line = 1
headers = {}
while line < len(lines) and lines[line]:
components = lines[line].split(':')
if len(components) >= 2:
headers[components[0]] = ':'.join(components[1:])
line += 1
line += 1
if line >= len(lines):
raise SIProtocolError('invalid frame')
body = '\n'.join(lines[line:])
return command, headers, body
@staticmethod
def get_timestamp_header_if_present(key: str, timestamp: Optional[datetime.datetime]):
if timestamp is not None and isinstance(timestamp, datetime.datetime):
return '{key}:{timestamp}\n'.format(key=key, timestamp=timestamp.replace(microsecond=0).isoformat())
else:
return ''
class SIGatewayClient(_SIAbstractGatewayClient):
"""
Simple, synchronous (blocking) OpenStuder gateway client.
This client uses a synchronous model which has the advantage to be much simpler to use than the asynchronous version SIAsyncGatewayClient. The drawback is that device message
indications are ignored by this client and subscriptions to property changes are not possible.
"""
def __init__(self):
super(SIGatewayClient, self).__init__()
self.__state: SIConnectionState = SIConnectionState.DISCONNECTED
self.__ws: Optional[websocket.WebSocket] = None
self.__access_level: SIAccessLevel = SIAccessLevel.NONE
self.__gateway_version: str = ''
def connect(self, host: str, port: int = 1987, user: str = None, password: str = None) -> SIAccessLevel:
"""
Establishes the WebSocket connection to the OpenStuder gateway and executes the user authorization process once the connection has been established. This method blocks the
current thread until the operation (authorize) has been completed or an error occurred. The method returns the access level granted to the client during authorization on
success or throws an **SIProtocolError** otherwise.
:param host: Hostname or IP address of the OpenStuder gateway to connect to.
:param port: TCP port used for the connection to the OpenStuder gateway, defaults to 1987.
:param user: Username send to the gateway used for authorization.
:param password: Password send to the gateway used for authorization.
:return: Access Level granted to the client.
:raises SIProtocolError: If the connection could not be established, or the authorization was refused.
"""
# Ensure that the client is in the DISCONNECTED state.
self.__ensure_in_state(SIConnectionState.DISCONNECTED)
# Connect to WebSocket server.
self.__state = SIConnectionState.CONNECTING
self.__ws = websocket.create_connection('ws://{host}:{port}'.format(host=host, port=port))
# Authorize client.
self.__state = SIConnectionState.AUTHORIZING
if user is None or password is None:
self.__ws.send(super(SIGatewayClient, self).encode_authorize_frame_without_credentials())
else:
self.__ws.send(super(SIGatewayClient, self).encode_authorize_frame_with_credentials(user, password))
try:
self.__access_level, self.__gateway_version = super(SIGatewayClient, self).decode_authorized_frame(self.__ws.recv())
except ConnectionRefusedError:
self.__state = SIConnectionState.DISCONNECTED
raise SIProtocolError('WebSocket connection refused')
# Change state to connected.
self.__state = SIConnectionState.CONNECTED
# Return access level.
return self.__access_level
def state(self) -> SIConnectionState:
"""
Returns the current state of the client. See **SIConnectionState** for details.
:return: Current state of the client.
"""
return self.__state
def access_level(self) -> SIAccessLevel:
"""
Return the access level the client has gained on the gateway connected. See **SIAccessLevel** for details.
:return: Access level granted to client.
"""
return self.__access_level
def gateway_version(self) -> str:
"""
Returns the version of the OpenStuder gateway software running on the host the client is connected to.
:return: Version of the gateway software.
"""
return self.__gateway_version
def enumerate(self) -> Tuple[SIStatus, int]:
"""
Instructs the gateway to scan every configured and functional device access driver for new devices and remove devices that do not respond anymore. Returns the status of
the operation, and the number of devices present.
:return: Returns two values. 1: operation status, 2: the number of devices present.
:raises SIProtocolError: On a connection, protocol of framing error.
"""
# Ensure that the client is in the CONNECTED state.
self.__ensure_in_state(SIConnectionState.CONNECTED)
# Encode and send ENUMERATE message to gateway.
self.__ws.send(super(SIGatewayClient, self).encode_enumerate_frame())
# Wait for ENUMERATED message, decode it and return data.
return super(SIGatewayClient, self).decode_enumerated_frame(self.__receive_frame_until_commands(['ENUMERATED', 'ERROR']))
def describe(self, device_access_id: str = None, device_id: str = None, property_id: int = None, flags: SIDescriptionFlags = None) -> Tuple[SIStatus, Optional[str], object]:
"""
This method can be used to retrieve information about the available devices and their properties from the connected gateway. Using the optional device_access_id,
device_id and property_id parameters, the method can either request information about the whole topology, a particular device access instance, a device or a property.
The flags control the level of detail in the gateway's response.
:param device_access_id: Device access ID for which the description should be retrieved.
:param device_id: Device ID for which the description should be retrieved. Note that device_access_id must be present too.
:param property_id: Property ID for which the description should be retrieved. Note that device_access_id and device_id must be present too.
:param flags: Flags to control level of detail of the response.
:return: Returns three values. 1: Status of the operation, 2: the subject's id, 3: the description object.
:raises SIProtocolError: On a connection, protocol of framing error.
"""
# Ensure that the client is in the CONNECTED state.
self.__ensure_in_state(SIConnectionState.CONNECTED)
# Encode and send DESCRIBE message to gateway.
self.__ws.send(super(SIGatewayClient, self).encode_describe_frame(device_access_id, device_id, property_id, flags))
# Wait for DESCRIPTION message, decode it and return data.
return super(SIGatewayClient, self).decode_description_frame(self.__receive_frame_until_commands(['DESCRIPTION', 'ERROR']))
def find_properties(self, property_id: str) -> Tuple[SIStatus, str, int, List[str]]:
"""
This method is used to retrieve a list of existing properties that match the given property ID in the form "<device access ID>.<device ID>.<property ID>". The wildcard
character "*" is supported for <device access ID> and <device ID> fields.
For example "*.inv.3136" represents all properties with ID 3136 on the device with ID "inv" connected through any device access, "demo.*.3136" represents all properties
with ID 3136 on any device that disposes that property connected through the device access "demo" and finally "*.*.3136" represents all properties with ID 3136 on any
device that disposes that property connected through any device access.
:param property_id: The search wildcard ID.
:return: Returns four values: 1: Status of the find operation, 2: the searched ID (including wildcard character), 3: the number of properties found,
4: List of the property IDs.
:raises SIProtocolError: On a connection, protocol of framing error.
"""
# Ensure that the client is in the CONNECTED state.
self.__ensure_in_state(SIConnectionState.CONNECTED)
# Encode and send FIND PROPERTIES message to gateway.
self.__ws.send(super(SIGatewayClient, self).encode_find_properties_frame(property_id))
# Wait for PROPERTIES FOUND message, decode it and return data.
return super(SIGatewayClient, self).decode_properties_found_frame(self.__receive_frame_until_commands(['PROPERTIES FOUND', 'ERROR']))
def read_property(self, property_id: str) -> Tuple[SIStatus, str, Optional[any]]:
"""
This method is used to retrieve the actual value of a given property from the connected gateway. The property is identified by the property_id parameter.
:param property_id: The ID of the property to read in the form '{device access ID}.{device ID}.{property ID}'.
:return: Returns three values: 1: Status of the read operation, 2: the ID of the property read, 3: the value read.
:raises SIProtocolError: On a connection, protocol of framing error.
"""
# Ensure that the client is in the CONNECTED state.
self.__ensure_in_state(SIConnectionState.CONNECTED)
# Encode and send READ PROPERTY message to gateway.
self.__ws.send(super(SIGatewayClient, self).encode_read_property_frame(property_id))
# Wait for PROPERTY READ message, decode it and return data.
return super(SIGatewayClient, self).decode_property_read_frame(self.__receive_frame_until_commands(['PROPERTY READ', 'ERROR'])).to_tuple()
def read_properties(self, property_ids: List[str]) -> List[SIPropertyReadResult]:
"""
This method is used to retrieve the actual value of multiple properties at the same time from the connected gateway. The properties are identified by the property_ids
parameter.
:param property_ids: The IDs of the properties to read in the form '{device access ID}.{device ID}.{property ID}'.
:return: Returns one value: 1: List of statuses and values of all read properties.
:raises SIProtocolError: On a connection, protocol of framing error.
"""
# Ensure that the client is in the CONNECTED state.
self.__ensure_in_state(SIConnectionState.CONNECTED)
# Encode and send READ PROPERTIES message to gateway.
self.__ws.send(super(SIGatewayClient, self).encode_read_properties_frame(property_ids))
# Wait for PROPERTIES READ message, decode it and return data.
return super(SIGatewayClient, self).decode_properties_read_frame(self.__receive_frame_until_commands(['PROPERTIES READ', 'ERROR']))
def write_property(self, property_id: str, value: any = None, flags: SIWriteFlags = None) -> Tuple[SIStatus, str]:
"""
The write_property method is used to change the actual value of a given property. The property is identified by the property_id parameter and the new value is passed by the
optional value parameter.
This value parameter is optional as it is possible to write to properties with the data type "Signal" where there is no actual value written, the write operation rather
triggers an action on the device.
:param property_id: The ID of the property to write in the form '{device access ID}.{<device ID}.{<property ID}'.
:param value: Optional value to write.
:param flags: Write flags, See SIWriteFlags for details, if not provided the flags are not send by the client, and the gateway uses the default flags
(SIWriteFlags.PERMANENT).
:return: Returns two values: 1: Status of the write operation, 2: the ID of the property written.
:raises SIProtocolError: On a connection, protocol of framing error.
"""
# Ensure that the client is in the CONNECTED state.
self.__ensure_in_state(SIConnectionState.CONNECTED)
# Encode and send WRITE PROPERTY message to gateway.
self.__ws.send(super(SIGatewayClient, self).encode_write_property_frame(property_id, value, flags))
# Wait for PROPERTY WRITTEN message, decode it and return data.
return super(SIGatewayClient, self).decode_property_written_frame(self.__receive_frame_until_commands(['PROPERTY WRITTEN', 'ERROR']))
def read_datalog_properties(self, from_: datetime.datetime = None, to: datetime.datetime = None) -> Tuple[SIStatus, List[str]]:
"""
This method is used to retrieve the list of IDs of all properties for whom data is logged on the gateway. If a time window is given using from and to, only data in this
time windows is considered.
:param from_: Optional date and time of the start of the time window to be considered.
:param to: Optional date and time of the end of the time window to be considered.
:return: Returns two values: 1: Status of the operation, 2: List of all properties for whom data is logged on the gateway in the optional time window.
:raises SIProtocolError: On a connection, protocol of framing error.
"""
# Ensure that the client is in the CONNECTED state.
self.__ensure_in_state(SIConnectionState.CONNECTED)
# Encode and send READ DATALOG message to gateway.
self.__ws.send(super(SIGatewayClient, self).encode_read_datalog_frame(None, from_, to, None))
# Wait for DATALOG READ message, decode it and return data.
status, _, _, parameters = super(SIGatewayClient, self).decode_datalog_read_frame(self.__receive_frame_until_commands(['DATALOG READ', 'ERROR']))
return status, parameters.splitlines()
def read_datalog_csv(self, property_id: str, from_: datetime.datetime = None, to: datetime.datetime = None, limit: int = None) -> Tuple[SIStatus, str, int, str]:
"""
This method is used to retrieve all or a subset of logged data of a given property from the gateway.
:param property_id: Global ID of the property for which the logged data should be retrieved. It has to be in the form '{device access ID}.{device ID}.{property ID}'.
:param from_: Optional date and time from which the data has to be retrieved, defaults to the oldest value logged.
:param to: Optional date and time to which the data has to be retrieved, defaults to the current time on the gateway.
:param limit: Using this optional parameter you can limit the number of results retrieved in total.
:return: Returns four values: 1: Status of the operation, 2: id of the property, 3: number of entries, 4: Properties data in CSV format whereas the first column is the
date and time in ISO 8601 extended format, and the second column contains the actual values.
:raises SIProtocolError: On a connection, protocol of framing error.
"""
# Ensure that the client is in the CONNECTED state.
self.__ensure_in_state(SIConnectionState.CONNECTED)
# Encode and send READ DATALOG message to gateway.
self.__ws.send(super(SIGatewayClient, self).encode_read_datalog_frame(property_id, from_, to, limit))
# Wait for DATALOG READ message, decode it and return data.
return super(SIGatewayClient, self).decode_datalog_read_frame(self.__receive_frame_until_commands(['DATALOG READ', 'ERROR']))
def read_messages(self, from_: datetime.datetime = None, to: datetime.datetime = None, limit: int = None) -> Tuple[SIStatus, int, List[SIDeviceMessage]]:
"""
The read_messages() method can be used to retrieve all or a subset of stored messages send by devices on all buses in the past from the gateway.
:param from_: Optional date and time from which the messages have to be retrieved, defaults to the oldest message saved.
:param to: Optional date and time to which the messages have to be retrieved, defaults to the current time on the gateway.
:param limit: Using this optional parameter you can limit the number of messages retrieved in total.
:return: Returns three values. 1: the status of the operation, 2: the number of messages, 3: the list of retrieved messages.
:raises SIProtocolError: On a connection, protocol of framing error.
"""
# Ensure that the client is in the CONNECTED state.
self.__ensure_in_state(SIConnectionState.CONNECTED)
# Encode and send READ MESSAGES message to gateway.
self.__ws.send(super(SIGatewayClient, self).encode_read_messages_frame(from_, to, limit))
# Wait for MESSAGES READ message, decode it and return data.
return super(SIGatewayClient, self).decode_messages_read_frame(self.__receive_frame_until_commands(['MESSAGES READ', 'ERROR']))
def disconnect(self) -> None:
"""
Disconnects the client from the gateway.
"""
# Ensure that the client is in the CONNECTED state.
self.__ensure_in_state(SIConnectionState.CONNECTED)
# Change state to disconnected.
self.__state = SIConnectionState.DISCONNECTED
# Close the WebSocket
self.__ws.close()
def __ensure_in_state(self, state: SIConnectionState) -> None:
if self.__state != state:
raise SIProtocolError("invalid client state")
def __receive_frame_until_commands(self, commands: list) -> str:
while True:
frame = self.__ws.recv()
if super(SIGatewayClient, self).peek_frame_command(frame) in commands:
return frame
class SIAsyncGatewayClientCallbacks:
"""
Base class containing all callback methods that can be called by the SIAsyncGatewayClient. You can use this as your base class and register it using
IAsyncGatewayClient.set_callbacks().
"""
def on_connected(self, access_level: SIAccessLevel, gateway_version: str) -> None:
"""
This method is called once the connection to the gateway could be established and the user has been successfully authorized.
:param access_level: Access level that was granted to the user during authorization.
:param gateway_version: Version of the OpenStuder software running on the gateway.
"""
pass
def on_disconnected(self) -> None:
"""
Called when the connection to the OpenStuder gateway has been gracefully closed by either side or the connection was lost by any other reason.
"""
pass
def on_error(self, reason) -> None:
"""
Called on severe errors.
:param reason: Exception that caused the erroneous behavior.
"""
pass
def on_enumerated(self, status: SIStatus, device_count: int) -> None:
"""
Called when the enumeration operation started using enumerate() has completed on the gateway.
The callback takes two arguments. 1: , 2: the .
:param status: Operation status.
:param device_count: Number of devices present.
"""
pass
def on_description(self, status: SIStatus, id_: Optional[str], description: object) -> None:
"""
Called when the gateway returned the description requested using the describe() method.
:param status: Status of the operation.
:param id_: Subject's ID.
:param description: Description object.
"""
pass
def on_properties_found(self, status: SIStatus, id_: str, count: int, properties: List[str]):
"""
Called when the gateway returned the list of found properties requested using the find_properties() method.
:param status: Status of the find operation.
:param id_: The searched ID (including wildcard character).
:param count: The number of properties found.
:param properties: List of the property IDs.
"""
pass
def on_property_read(self, status: SIStatus, property_id: str, value: Optional[any]) -> None:
"""
Called when the property read operation started using read_property() has completed on the gateway.
:param status: Status of the read operation.
:param property_id: ID of the property read.
:param value: The value read.
"""
pass
def on_properties_read(self, results: List[SIPropertyReadResult]) -> None:
"""
Called when the multiple properties read operation started using read_properties() has completed on the gateway.
:param results: List of all results of the operation.
"""
pass
def on_property_written(self, status: SIStatus, property_id: str) -> None:
"""
Called when the property write operation started using write_property() has completed on the gateway.
:param status: Status of the write operation.
:param property_id: ID of the property written.
"""
pass
def on_property_subscribed(self, status: SIStatus, property_id: str) -> None:
"""
Called when the gateway returned the status of the property subscription requested using the subscribe_to_property() method.
:param status: The status of the subscription.
:param property_id: ID of the property.
"""
pass
def on_properties_subscribed(self, statuses: List[SIPropertySubscriptionResult]) -> None:
"""
Called when the gateway returned the status of the properties subscription requested using the subscribe_to_properties() method.
:param statuses: The statuses of the individual subscriptions.
"""
pass
def on_property_unsubscribed(self, status: SIStatus, property_id: str) -> None:
"""
Called when the gateway returned the status of the property unsubscription requested using the unsubscribe_from_property() method.
:param status: The status of the unsubscription.
:param property_id: ID of the property.
"""
pass
def on_properties_unsubscribed(self, statuses: List[SIPropertySubscriptionResult]) -> None:
"""
Called when the gateway returned the status of the properties unsubscription requested using the unsubscribe_from_properties() method.
:param statuses: The statuses of the individual unsubscriptions.
"""
pass
def on_property_updated(self, property_id: str, value: any) -> None:
"""
This callback is called whenever the gateway send a property update.
:param property_id: ID of the updated property.
:param value: The current value of the property.
"""
pass
def on_datalog_properties_read(self, status: SIStatus, properties: List[str]) -> None:
"""
Called when the datalog property list operation started using read_datalog_properties() has completed on the gateway.
:param status: Status of the operation.
:param properties: List of the IDs of the properties for whom data is available in the data log.
"""
pass
def on_datalog_read_csv(self, status: SIStatus, property_id: str, count: int, values: str) -> None:
"""
Called when the datalog read operation started using read_datalog() has completed on the gateway. This version of the method returns the data in CSV format suitable to
be written to a file.
:param status: Status of the operation.
:param property_id: ID of the property.
:param count: Number of entries.
:param values: Properties data in CSV format whereas the first column is the date and time in ISO 8601 extended format and the second column contains the actual values.
"""
pass
def on_device_message(self, message: SIDeviceMessage) -> None:
"""
This callback is called whenever the gateway send a device message indication.
:param message: The device message received.
"""
pass
def on_messages_read(self, status: SIStatus, count: int, messages: List[SIDeviceMessage]) -> None:
"""
Called when the gateway returned the status of the read messages operation using the read_messages() method.
:param status: The status of the operation.
:param count: Number of messages retrieved.
:param messages: List of retrieved messages.
"""
pass
class SIAsyncGatewayClient(_SIAbstractGatewayClient):
"""
Complete, asynchronous (non-blocking) OpenStuder gateway client.
This client uses an asynchronous model which has the disadvantage to be a bit harder to use than the synchronous version. The advantages are that long operations do not block
the main thread as all results are reported using callbacks, device message indications are supported and subscriptions to property changes are possible.
"""
def __init__(self):
super(SIAsyncGatewayClient, self).__init__()
self.__state: SIConnectionState = SIConnectionState.DISCONNECTED
self.__ws: Optional[websocket.WebSocketApp] = None
self.__thread: Optional[Thread] = None
self.__access_level: SIAccessLevel = SIAccessLevel.NONE
self.__gateway_version: str = ''
self.__user: Optional[str] = None
self.__password: Optional[str] = None
self.on_connected: Optional[Callable[[SIAccessLevel, str], None]] = None
"""
This callback is called once the connection to the gateway could be established and the user has been successfully authorized.
The callback takes two arguments. 1: the access level that was granted to the user during authorization, 2: the version of the OpenStuder software running on the gateway.
"""
self.on_disconnected: Optional[Callable[[], None]] = None
"""
Called when the connection to the OpenStuder gateway has been gracefully closed by either side or the connection was lost by any other reason.
This callback has no parameters.
"""
self.on_error: Optional[Callable[[Exception], None]] = None
"""
Called on severe errors.
The single parameter passed to the callback is the exception that caused the erroneous behavior.
"""
self.on_enumerated: Optional[Callable[[str, int], None]] = None
"""
Called when the enumeration operation started using enumerate() has completed on the gateway.
The callback takes two arguments. 1: operation status, 2: the number of devices present.
"""
self.on_description: Optional[Callable[[str, Optional[str], object], None]] = None
"""
Called when the gateway returned the description requested using the describe() method.
The callback takes three parameters: 1: Status of the operation, 2: the subject's ID, 3: the description object.
"""
self.on_properties_found: Optional[Callable[[SIStatus, str, int, List[str]], None]] = None
"""
Called when the gateway returned the list of found properties requested using the find_properties() method.
The callback takes four parameters: 1: Status of the find operation, 2: the searched ID (including wildcard character), 3: the number of properties found,
4: List of the property IDs.
"""
self.on_property_read: Optional[Callable[[str, str, Optional[any]], None]] = None
"""
Called when the property read operation started using read_property() has completed on the gateway.
The callback takes three parameters: 1: Status of the read operation, 2: the ID of the property read, 3: the value read.
"""
self.on_properties_read: Optional[Callable[[List[SIPropertyReadResult]], None]] = None
"""
Called when the multiple properties read operation started using read_properties() has completed on the gateway.
The callback takes one parameters: 1: List of all results of the operation.
"""
self.on_property_written: Optional[Callable[[str, str], None]] = None
"""
Called when the property write operation started using write_property() has completed on the gateway.
The callback takes two parameters: 1: Status of the write operation, 2: the ID of the property written.
"""
self.on_property_subscribed: Optional[Callable[[str, str], None]] = None
"""
Called when the gateway returned the status of the property subscription requested using the subscribe_to_property() method.
The callback takes two parameters: 1: The status of the subscription, 2: The ID of the property.
"""
self.on_properties_subscribed: Optional[Callable[[List[SIPropertySubscriptionResult]], None]] = None
"""
Called when the gateway returned the status of the properties subscription requested using the subscribe_to_properties() method.
The callback takes one parameter: 1: List of statuses of individual subscription requests.
"""
self.on_property_unsubscribed: Optional[Callable[[str, str], None]] = None
"""
Called when the gateway returned the status of the property unsubscription requested using the unsubscribe_from_property() method.
The callback takes two parameters: 1: The status of the unsubscription, 2: The ID of the property.
"""
self.on_properties_unsubscribed: Optional[Callable[[List[SIPropertySubscriptionResult]], None]] = None
"""
Called when the gateway returned the status of the properties unsubscription requested using the unsubscribe_from_properties() method.
The callback takes one parameter: 1: List of statuses of individual unsubscription requests.
"""
self.on_property_updated: Optional[Callable[[str, any], None]] = None
"""
This callback is called whenever the gateway send a property update.
The callback takes two parameters: 1: the ID of the property that has updated, 2: the actual value.
"""
self.on_datalog_properties_read: Optional[Callable[[SIStatus, List[str]], None]] = None
"""
Called when the datalog property list operation started using read_datalog_properties() has completed on the gateway.
The callback takes 2 parameters: 1: Status of the operation, 2: List of the IDs of the properties for whom data is available in the data log.
"""
self.on_datalog_read_csv: Optional[Callable[[str, str, int, str], None]] = None
"""
Called when the datalog read operation started using read_datalog() has completed on the gateway. This version of the callback returns the data in CSV format suitable to
be written to a file.
The callback takes four parameters: 1: Status of the operation, 2: ID of the property, 3: number of entries, 4: properties data in CSV format whereas the first column is
the date and time in ISO 8601 extended format and the second column contains the actual values.
"""
self.on_device_message: Optional[Callable[[SIDeviceMessage], None]] = None
"""
This callback is called whenever the gateway send a device message indication.
The callback takes one parameter, the device message object.
"""
self.on_messages_read: Optional[Callable[[str, Optional[int], List[SIDeviceMessage]], None]] = None
"""
Called when the gateway returned the status of the read messages operation using the read_messages() method.
The callback takes three parameters: 1: the status of the operation, 2: the number of messages retrieved, 3: the list of retrieved messages.
"""
def connect(self, host: str, port: int = 1987, user: str = None, password: str = None, background: bool = True) -> None:
"""
Establishes the WebSocket connection to the OpenStuder gateway and executes the user authorization process once the connection has been established in the background. This
method returns immediately and does not block the current thread.
The status of the connection attempt is reported either by the on_connected() callback on success or the on_error() callback if the connection could not be established
or the authorisation for the given user was rejected by the gateway.
:param host: Hostname or IP address of the OpenStuder gateway to connect to.
:param port: TCP port used for the connection to the OpenStuder gateway, defaults to 1987.
:param user: Username send to the gateway used for authorization.
:param password: Password send to the gateway used for authorization.
:param background: If true, the handling of the WebSocket connection is done in the background, if false the current thread is took over.
:raises SIProtocolError: If there was an error initiating the WebSocket connection.
"""
# Ensure that the client is in the DISCONNECTED state.
self.__ensure_in_state(SIConnectionState.DISCONNECTED)
# Save parameter for later use.
self.__user = user
self.__password = password
# Connect to WebSocket server.
self.__state = SIConnectionState.CONNECTING
self.__ws = websocket.WebSocketApp('ws://{host}:{port}'.format(host=host, port=port),
on_open=self.__on_open,
on_message=self.__on_message,
on_error=self.__on_error,
on_close=self.__on_close
)
# TODO: Start connection timeout.
# If background mode is selected, start a daemon thread for the connection handling, otherwise take over current thread.
if background:
self.__thread = Thread(target=self.__ws.run_forever)
self.__thread.setDaemon(True)
self.__thread.start()
else:
self.__ws.run_forever()
def set_callbacks(self, callbacks: SIAsyncGatewayClientCallbacks) -> None:
"""
Configures the client to use all callbacks of the passed abstract client callback class. Using this you can set all callbacks to be called on the given object and avoid
having to set each callback individually.
:param callbacks: Object derived from SIAsyncGatewayClientCallbacks to be used for all callbacks.
"""
if isinstance(callbacks, SIAsyncGatewayClientCallbacks):
self.on_connected = callbacks.on_connected
self.on_disconnected = callbacks.on_disconnected
self.on_error = callbacks.on_error
self.on_enumerated = callbacks.on_enumerated
self.on_description = callbacks.on_description
self.on_properties_found = callbacks.on_properties_found
self.on_property_read = callbacks.on_property_read
self.on_properties_read = callbacks.on_properties_read
self.on_property_written = callbacks.on_property_written
self.on_property_subscribed = callbacks.on_property_subscribed
self.on_properties_subscribed = callbacks.on_properties_subscribed
self.on_property_unsubscribed = callbacks.on_property_unsubscribed
self.on_properties_unsubscribed = callbacks.on_properties_unsubscribed
self.on_property_updated = callbacks.on_property_updated
self.on_datalog_properties_read = callbacks.on_datalog_properties_read
self.on_datalog_read_csv = callbacks.on_datalog_read_csv
self.on_device_message = callbacks.on_device_message
self.on_messages_read = callbacks.on_messages_read
def state(self) -> SIConnectionState:
"""
Returns the current state of the client. See **SIConnectionState** for details.
:return: Current state of the client.
"""
return self.__state
def access_level(self) -> SIAccessLevel:
"""
Return the access level the client has gained on the gateway connected. See **SIAccessLevel** for details.
:return: Access level granted to client.
"""
return self.__access_level
def gateway_version(self) -> str:
"""
Returns the version of the OpenStuder gateway software running on the host the client is connected to.
:return: Version of the gateway software.
"""
return self.__gateway_version
def enumerate(self) -> None:
"""
Instructs the gateway to scan every configured and functional device access driver for new devices and remove devices that do not respond anymore.
The status of the operation and the number of devices present are reported using the on_enumerated() callback.
:raises SIProtocolError: If the client is not connected or not yet authorized.
"""
# Ensure that the client is in the CONNECTED state.
self.__ensure_in_state(SIConnectionState.CONNECTED)
# Encode and send ENUMERATE message to gateway.
self.__ws.send(super(SIAsyncGatewayClient, self).encode_enumerate_frame())
def describe(self, device_access_id: str = None, device_id: str = None, property_id: int = None, flags: SIDescriptionFlags = None) -> None:
"""
This method can be used to retrieve information about the available devices and their properties from the connected gateway. Using the optional device_access_id,
device_id and property_id parameters, the method can either request information about the whole topology, a particular device access instance, a device or a property.
The flags control the level of detail in the gateway's response.
The description is reported using the on_description() callback.
:param device_access_id: Device access ID for which the description should be retrieved.
:param device_id: Device ID for which the description should be retrieved. Note that device_access_id must be present too.
:param property_id: Property ID for which the description should be retrieved. Note that device_access_id and device_id must be present too.
:param flags: Flags to control level of detail of the response.
:raises SIProtocolError: If the client is not connected or not yet authorized.
"""
# Ensure that the client is in the CONNECTED state.
self.__ensure_in_state(SIConnectionState.CONNECTED)
# Encode and send DESCRIBE message to gateway.
self.__ws.send(super(SIAsyncGatewayClient, self).encode_describe_frame(device_access_id, device_id, property_id, flags))
def find_properties(self, property_id: str) -> None:
"""
This method is used to retrieve a list of existing properties that match the given property ID in the form "<device access ID>.<device ID>.<property ID>". The wildcard
character "*" is supported for <device access ID> and <device ID> fields.
For example "*.inv.3136" represents all properties with ID 3136 on the device with ID "inv" connected through any device access, "demo.*.3136" represents all properties
with ID 3136 on any device that disposes that property connected through the device access "demo" and finally "*.*.3136" represents all properties with ID 3136 on any
device that disposes that property connected through any device access.
The status of the read operation and the actual value of the property are reported using the on_properties_found() callback.
:param property_id: The search wildcard ID.
:raises SIProtocolError: If the client is not connected or not yet authorized.
"""
# Ensure that the client is in the CONNECTED state.
self.__ensure_in_state(SIConnectionState.CONNECTED)
# Encode and send FIND PROPERTIES message to gateway.
self.__ws.send(super(SIAsyncGatewayClient, self).encode_find_properties_frame(property_id))
def read_property(self, property_id: str) -> None:
"""
This method is used to retrieve the actual value of a given property from the connected gateway. The property is identified by the property_id parameter.
The status of the read operation and the actual value of the property are reported using the on_property_read() callback.
:param property_id: The ID of the property to read in the form '{device access ID}.{device ID}.{property ID}'.
:raises SIProtocolError: If the client is not connected or not yet authorized.
"""
# Ensure that the client is in the CONNECTED state.
self.__ensure_in_state(SIConnectionState.CONNECTED)
# Encode and send READ PROPERTY message to gateway.
self.__ws.send(super(SIAsyncGatewayClient, self).encode_read_property_frame(property_id))
def read_properties(self, property_ids: List[str]) -> None:
"""
This method is used to retrieve the actual value of multiple property at the same time from the connected gateway. The properties are identified by the property_ids
parameter.
The status of the multiple read operations and the actual value of the properties are reported using the on_properties_read() callback.
:param property_ids: The IDs of the properties to read in the form '{device access ID}.{device ID}.{property ID}'.
:raises SIProtocolError: If the client is not connected or not yet authorized.
"""
# Ensure that the client is in the CONNECTED state.
self.__ensure_in_state(SIConnectionState.CONNECTED)
# Encode and send READ PROPERTIES message to gateway.
self.__ws.send(super(SIAsyncGatewayClient, self).encode_read_properties_frame(property_ids))
def write_property(self, property_id: str, value: any = None, flags: SIWriteFlags = None) -> None:
"""
The write_property method is used to change the actual value of a given property. The property is identified by the property_id parameter and the new value is passed by the
optional value parameter.
This value parameter is optional as it is possible to write to properties with the data type "Signal" where there is no actual value written, the write operation rather
triggers an action on the device.
The status of the write operation is reported using the on_property_written() callback.
:param property_id: The ID of the property to write in the form '{device access ID}.{<device ID}.{<property ID}'.
:param value: Optional value to write.
:param flags: Write flags, See SIWriteFlags for details, if not provided the flags are not send by the client and the gateway uses the default flags
(SIWriteFlags.PERMANENT).
:raises SIProtocolError: If the client is not connected or not yet authorized.
"""
# Ensure that the client is in the CONNECTED state.
self.__ensure_in_state(SIConnectionState.CONNECTED)
# Encode and send WRITE PROPERTY message to gateway.
self.__ws.send(super(SIAsyncGatewayClient, self).encode_write_property_frame(property_id, value, flags))
def subscribe_to_property(self, property_id: str) -> None:
"""
This method can be used to subscribe to a property on the connected gateway. The property is identified by the property_id parameter.
The status of the subscribe request is reported using the on_property_subscribed() callback.
:param property_id: The ID of the property to subscribe to in the form '{device access ID}.{device ID}.{property ID}'.
:raises SIProtocolError: If the client is not connected or not yet authorized.
"""
# Ensure that the client is in the CONNECTED state.
self.__ensure_in_state(SIConnectionState.CONNECTED)
# Encode and send SUBSCRIBE PROPERTY message to gateway.
self.__ws.send(super(SIAsyncGatewayClient, self).encode_subscribe_property_frame(property_id))
def subscribe_to_properties(self, property_ids: List[str]) -> None:
"""
This method can be used to subscribe to multiple properties on the connected gateway. The properties are identified by the property_ids parameter.
The status of the subscribe request is reported using the on_properties_subscribed() callback.
:param property_ids: The list of IDs of the properties to subscribe to in the form '{device access ID}.{device ID}.{property ID}'.
:raises SIProtocolError: If the client is not connected or not yet authorized.
"""
# Ensure that the client is in the CONNECTED state.
self.__ensure_in_state(SIConnectionState.CONNECTED)
# Encode and send SUBSCRIBE PROPERTIES message to gateway.
self.__ws.send(super(SIAsyncGatewayClient, self).encode_subscribe_properties_frame(property_ids))
def unsubscribe_from_property(self, property_id: str) -> None:
"""
This method can be used to unsubscribe from a property on the connected gateway. The property is identified by the property_id parameter.
The status of the unsubscribe request is reported using the on_property_unsubscribed() callback.
:param property_id: The ID of the property to unsubscribe from in the form '{device access ID}.{device ID}.{property ID}'.
:raises SIProtocolError: If the client is not connected or not yet authorized.
"""
# Ensure that the client is in the CONNECTED state.
self.__ensure_in_state(SIConnectionState.CONNECTED)
# Encode and send UNSUBSCRIBE PROPERTY message to gateway.
self.__ws.send(super(SIAsyncGatewayClient, self).encode_unsubscribe_property_frame(property_id))
def unsubscribe_from_properties(self, property_ids: List[str]) -> None:
"""
This method can be used to unsubscribe from multiple properties on the connected gateway. The properties are identified by the property_ids parameter.
The status of the unsubscribe request is reported using the on_properties_unsubscribed() callback.
:param property_ids: The list of IDs of the properties to unsubscribe from in the form '{device access ID}.{device ID}.{property ID}'.
:raises SIProtocolError: If the client is not connected or not yet authorized.
"""
# Ensure that the client is in the CONNECTED state.
self.__ensure_in_state(SIConnectionState.CONNECTED)
# Encode and send UNSUBSCRIBE PROPERTY message to gateway.
self.__ws.send(super(SIAsyncGatewayClient, self).encode_unsubscribe_properties_frame(property_ids))
def read_datalog_properties(self, from_: datetime.datetime = None, to: datetime.datetime = None) -> None:
"""
This method is used to retrieve the list of IDs of all properties for whom data is logged on the gateway. If a time window is given using from and to, only data in this
time windows is considered.
The status of the operation is the list of properties for whom logged data is available are reported using the on_datalog_properties_read() callback.
:param from_: Optional date and time of the start of the time window to be considered.
:param to: Optional date and time of the end of the time window to be considered.
:raises SIProtocolError: On a connection, protocol of framing error.
"""
# Ensure that the client is in the CONNECTED state.
self.__ensure_in_state(SIConnectionState.CONNECTED)
# Encode and send READ DATALOG message to gateway.
self.__ws.send(super(SIAsyncGatewayClient, self).encode_read_datalog_frame(None, from_, to, None))
def read_datalog(self, property_id: str, from_: datetime.datetime = None, to: datetime.datetime = None, limit: int = None) -> None:
"""
This method is used to retrieve all or a subset of logged data of a given property from the gateway.
The status of this operation and the respective values are reported using the on_datalog_read_csv() callback.
:param property_id: Global ID of the property for which the logged data should be retrieved. It has to be in the form '{device access ID}.{device ID}.{property ID}'.
:param from_: Optional date and time from which the data has to be retrieved, defaults to the oldest value logged.
:param to: Optional date and time to which the data has to be retrieved, defaults to the current time on the gateway.
:param limit: Using this optional parameter you can limit the number of results retrieved in total.
:raises SIProtocolError: If the client is not connected or not yet authorized.
"""
# Ensure that the client is in the CONNECTED state.
self.__ensure_in_state(SIConnectionState.CONNECTED)
# Encode and send READ DATALOG message to gateway.
self.__ws.send(super(SIAsyncGatewayClient, self).encode_read_datalog_frame(property_id, from_, to, limit))
def read_messages(self, from_: datetime.datetime = None, to: datetime.datetime = None, limit: int = None) -> None:
"""
The read_messages method can be used to retrieve all or a subset of stored messages send by devices on all buses in the past from the gateway.
The status of this operation and the retrieved messages are reported using the on_messages_read() callback.
:param from_: Optional date and time from which the messages have to be retrieved, defaults to the oldest message saved.
:param to: Optional date and time to which the messages have to be retrieved, defaults to the current time on the gateway.
:param limit: Using this optional parameter you can limit the number of messages retrieved in total.
:raises SIProtocolError: If the client is not connected or not yet authorized.
"""
# Ensure that the client is in the CONNECTED state.
self.__ensure_in_state(SIConnectionState.CONNECTED)
# Encode and send READ MESSAGES message to gateway.
self.__ws.send(super(SIAsyncGatewayClient, self).encode_read_messages_frame(from_, to, limit))
def disconnect(self) -> None:
"""
Disconnects the client from the gateway.
"""
# Ensure that the client is in the CONNECTED state.
self.__ensure_in_state(SIConnectionState.CONNECTED)
# Close the WebSocket
self.__ws.close()
def __ensure_in_state(self, state: SIConnectionState) -> None:
if self.__state != state:
raise SIProtocolError("invalid client state")
def __on_open(self, ws) -> None:
# Change state to AUTHORIZING.
self.__state = SIConnectionState.AUTHORIZING
# Encode and send AUTHORIZE message to gateway.
if self.__user is None or self.__password is None:
self.__ws.send(super(SIAsyncGatewayClient, self).encode_authorize_frame_without_credentials())
else:
self.__ws.send(super(SIAsyncGatewayClient, self).encode_authorize_frame_with_credentials(self.__user, self.__password))
def __on_message(self, ws, frame: str) -> None:
# Determine the actual command.
command = super(SIAsyncGatewayClient, self).peek_frame_command(frame)
try:
# In AUTHORIZE state we only handle AUTHORIZED messages.
if self.__state == SIConnectionState.AUTHORIZING:
self.__access_level, self.__gateway_version = super(SIAsyncGatewayClient, self).decode_authorized_frame(frame)
# Change state to CONNECTED.
self.__state = SIConnectionState.CONNECTED
# Call callback if present.
if callable(self.on_connected):
self.on_connected(self.__access_level, self.__gateway_version)
# In CONNECTED state we handle all messages except the AUTHORIZED message.
else:
if command == 'ERROR':
if callable(self.on_error):
_, headers, _ = super(SIAsyncGatewayClient, self).decode_frame(frame)
self.on_error(SIProtocolError(headers['reason']))
elif command == 'ENUMERATED':
status, device_count = super(SIAsyncGatewayClient, self).decode_enumerated_frame(frame)
if callable(self.on_enumerated):
self.on_enumerated(status, device_count)
elif command == 'DESCRIPTION':
status, id_, description = super(SIAsyncGatewayClient, self).decode_description_frame(frame)
if callable(self.on_description):
self.on_description(status, id_, description)
elif command == 'PROPERTIES FOUND':
status, id_, count, list = super(SIAsyncGatewayClient, self).decode_properties_found_frame(frame)
if callable(self.on_properties_found):
self.on_properties_found(status, id_, count, list)
elif command == 'PROPERTY READ':
result = super(SIAsyncGatewayClient, self).decode_property_read_frame(frame)
if callable(self.on_property_read):
self.on_property_read(result.status, result.id, result.value)
elif command == 'PROPERTIES READ':
results = super(SIAsyncGatewayClient, self).decode_properties_read_frame(frame)
if callable(self.on_properties_read):
self.on_properties_read(results)
elif command == 'PROPERTY WRITTEN':
status, id_ = super(SIAsyncGatewayClient, self).decode_property_written_frame(frame)
if callable(self.on_property_written):
self.on_property_written(status, id_)
elif command == 'PROPERTY SUBSCRIBED':
status, id_ = super(SIAsyncGatewayClient, self).decode_property_subscribed_frame(frame)
if callable(self.on_property_subscribed):
self.on_property_subscribed(status, id_)
elif command == 'PROPERTIES SUBSCRIBED':
statuses = super(SIAsyncGatewayClient, self).decode_properties_subscribed_frame(frame)
if callable(self.on_properties_subscribed):
self.on_properties_subscribed(statuses)
elif command == 'PROPERTY UNSUBSCRIBED':
status, id_ = super(SIAsyncGatewayClient, self).decode_property_unsubscribed_frame(frame)
if callable(self.on_property_unsubscribed):
self.on_property_unsubscribed(status, id_)
elif command == 'PROPERTIES UNSUBSCRIBED':
statuses = super(SIAsyncGatewayClient, self).decode_properties_unsubscribed_frame(frame)
if callable(self.on_properties_unsubscribed):
self.on_properties_unsubscribed(statuses)
elif command == 'PROPERTY UPDATE':
id_, value = super(SIAsyncGatewayClient, self).decode_property_update_frame(frame)
if callable(self.on_property_updated):
self.on_property_updated(id_, value)
elif command == 'DATALOG READ':
status, id_, count, values = super(SIAsyncGatewayClient, self).decode_datalog_read_frame(frame)
if id_ is None:
if callable(self.on_datalog_properties_read):
self.on_datalog_properties_read(status, values.splitlines())
else:
if callable(self.on_datalog_read_csv):
self.on_datalog_read_csv(status, id_, count, values)
elif command == 'DEVICE MESSAGE':
message = super(SIAsyncGatewayClient, self).decode_device_message_frame(frame)
if callable(self.on_device_message):
self.on_device_message(message)
elif command == 'MESSAGES READ':
status, count, messages = super(SIAsyncGatewayClient, self).decode_messages_read_frame(frame)
if callable(self.on_messages_read):
self.on_messages_read(status, count, messages)
else:
if callable(self.on_error):
self.on_error(SIProtocolError('unsupported frame command: {command}'.format(command=command)))
except SIProtocolError as error:
if callable(self.on_error):
self.on_error(error)
if self.__state == SIConnectionState.AUTHORIZING:
self.__ws.close()
self.__state = SIConnectionState.DISCONNECTED
def __on_error(self, ws, error: Exception) -> None:
if callable(self.on_error):
self.on_error(SIProtocolError(error.args[1]))
def __on_close(self, ws) -> None:
# Change state to DISCONNECTED.
self.__state = SIConnectionState.DISCONNECTED
# Change access level to NONE.
self.__access_level = SIAccessLevel.NONE
# Call callback.
if callable(self.on_disconnected):
self.on_disconnected()
# Wait for the end of the thread.
self.__thread.join()
| 48.541469 | 180 | 0.672435 | 9,824 | 81,938 | 5.470684 | 0.051608 | 0.011722 | 0.010029 | 0.007554 | 0.744176 | 0.687295 | 0.646975 | 0.602542 | 0.573013 | 0.543279 | 0 | 0.002771 | 0.251251 | 81,938 | 1,687 | 181 | 48.570243 | 0.873238 | 0.348752 | 0 | 0.398417 | 0 | 0 | 0.071592 | 0.011891 | 0 | 0 | 0 | 0.000593 | 0 | 1 | 0.138522 | false | 0.036939 | 0.009235 | 0.01715 | 0.295515 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
cfb605ca953b0b2a145c9dfb2721c7a45cd2d149 | 2,485 | py | Python | Code/Aurora-to-Redshift-SnapshotExport-ELT/python/rds_snap_exp_check_snapshot.py | aws-samples/aurora-and-database-migration-labs | 5f7f4ab7985dda04c2a6cdc8a04fb34491f6a0aa | [
"MIT-0"
] | 23 | 2019-02-18T17:20:17.000Z | 2022-03-31T18:12:49.000Z | Code/Aurora-to-Redshift-SnapshotExport-ELT/python/rds_snap_exp_check_snapshot.py | CloudGuru79/aurora-and-database-migration-labs | e70b6a41747d6117d763a708df035e73db88e107 | [
"MIT-0"
] | 1 | 2021-05-25T14:07:51.000Z | 2021-08-04T16:06:44.000Z | Code/Aurora-to-Redshift-SnapshotExport-ELT/python/rds_snap_exp_check_snapshot.py | CloudGuru79/aurora-and-database-migration-labs | e70b6a41747d6117d763a708df035e73db88e107 | [
"MIT-0"
] | 11 | 2019-03-29T13:11:29.000Z | 2022-03-26T20:47:38.000Z | import boto3
from datetime import datetime, timezone
class SnapshotException(Exception):
pass
def lambda_handler(event, context):
# Input from Cloudwatch event rule
aurora_cluster_id=event["aurora_cluster_id"]
s3_bucket_for_rds_snap_exp=event["s3_bucket_for_rds_snap_exp"]
iam_role_for_rds_snap_exp = event["iam_role_for_rds_snap_exp"]
kms_key_id_for_rds_snap_exp = event["kms_key_id_for_rds_snap_exp"]
export_list = event["export_list"]
run_date=event["run_date"]
#Get run_date for which snapshot export needs to happen.
if run_date == "":
run_date= datetime.now(timezone.utc).strftime('%Y-%m-%d')
print('Run date is:' + run_date)
stsclient = boto3.client('sts')
response = stsclient.assume_role(
DurationSeconds=3600,
RoleArn=iam_role_for_rds_snap_exp,
RoleSessionName='snapshot-export-demo-session'
)
ACCESS_KEY = response['Credentials']['AccessKeyId']
SECRET_KEY = response['Credentials']['SecretAccessKey']
SESSION_TOKEN = response['Credentials']['SessionToken']
session = boto3.session.Session(
aws_access_key_id=ACCESS_KEY,
aws_secret_access_key=SECRET_KEY,
aws_session_token=SESSION_TOKEN
)
rdsclient = session.client('rds')
response = rdsclient.describe_db_cluster_snapshots(
DBClusterIdentifier=aurora_cluster_id,
SnapshotType='automated'
)
DBClusterSnapshots=response['DBClusterSnapshots']
# Find a snapshot matching the run_date
export_snapshot_arn = ''
for DBClusterSnapshot in DBClusterSnapshots:
snapshot_arn = DBClusterSnapshot['DBClusterSnapshotArn']
snapshot_status = DBClusterSnapshot['Status']
snapshot_date = datetime.strftime(DBClusterSnapshot['SnapshotCreateTime'], '%Y-%m-%d')
#print (snapshot_arn,snapshot_status,snapshot_date)
if snapshot_status == 'available' and snapshot_date == run_date:
export_snapshot_arn = snapshot_arn
print ('A valid snapshot to be exported matching the run date found: ' + snapshot_arn)
break
if export_snapshot_arn == '':
print ('Valid snapshot to export not found. Exiting...')
raise SnapshotException("Snapshot Not Found")
else:
return export_snapshot_arn | 31.455696 | 102 | 0.660362 | 274 | 2,485 | 5.660584 | 0.357664 | 0.045132 | 0.045132 | 0.058672 | 0.133462 | 0.092843 | 0.027079 | 0 | 0 | 0 | 0 | 0.004857 | 0.254326 | 2,485 | 79 | 103 | 31.455696 | 0.832164 | 0.070825 | 0 | 0 | 0 | 0 | 0.196095 | 0.045987 | 0 | 0 | 0 | 0 | 0 | 1 | 0.020833 | false | 0.020833 | 0.041667 | 0 | 0.104167 | 0.0625 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
cfb76a0451724cb5b9f942bb838e8d1e229a0d3d | 4,932 | py | Python | research/nlp/lstm_crf/eval.py | mindspore-ai/models | 9127b128e2961fd698977e918861dadfad00a44c | [
"Apache-2.0"
] | 77 | 2021-10-15T08:32:37.000Z | 2022-03-30T13:09:11.000Z | research/nlp/lstm_crf/eval.py | mindspore-ai/models | 9127b128e2961fd698977e918861dadfad00a44c | [
"Apache-2.0"
] | 3 | 2021-10-30T14:44:57.000Z | 2022-02-14T06:57:57.000Z | research/nlp/lstm_crf/eval.py | mindspore-ai/models | 9127b128e2961fd698977e918861dadfad00a44c | [
"Apache-2.0"
] | 24 | 2021-10-15T08:32:45.000Z | 2022-03-24T18:45:20.000Z | # Copyright 2021 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""
#################train lstm-crf example on CoNLL2000########################
"""
import os
from copy import deepcopy
import numpy as np
from src.util import F1, get_chunks, get_label_lists
from src.model_utils.config import config
from src.dataset import get_data_set
from src.LSTM_CRF import Lstm_CRF
from src.imdb import ImdbParser
from mindspore import Tensor, Model, context
from mindspore.train.serialization import load_checkpoint, load_param_into_net
def modelarts_process():
config.ckpt_file = os.path.join(config.output_path, config.ckpt_file)
def eval_lstm_crf():
""" eval lstm """
print('\neval.py config: \n', config)
context.set_context(
mode=context.GRAPH_MODE,
save_graphs=False,
device_id=config.device_id,
device_target=config.device_target
)
embeddings_size = config.embed_size
parser = ImdbParser(config.data_CoNLL_path,
config.glove_path,
config.data_CoNLL_path,
embed_size=config.embed_size
)
embeddings, sequence_length, _, _, sequence_index, sequence_tag_index, tags_to_index_map \
= parser.get_datas_embeddings(seg=['test'], build_data=False)
embeddings_table = embeddings.astype(np.float32)
# DynamicRNN in this network on Ascend platform only support the condition that the shape of input_size
# and hiddle_size is multiples of 16, this problem will be solved later.
if config.device_target == 'Ascend':
pad_num = int(np.ceil(config.embed_size / 16) * 16 - config.embed_size)
if pad_num > 0:
embeddings_table = np.pad(embeddings_table, [(0, 0), (0, pad_num)], 'constant')
embeddings_size = int(np.ceil(config.embed_size / 16) * 16)
ds_test = get_data_set(sequence_index, sequence_tag_index, config.batch_size)
network = Lstm_CRF(vocab_size=embeddings.shape[0],
tag_to_index=tags_to_index_map,
embedding_size=embeddings_size,
hidden_size=config.num_hiddens,
num_layers=config.num_layers,
weight=Tensor(embeddings_table),
bidirectional=config.bidirectional,
batch_size=config.batch_size,
seq_length=sequence_length,
is_training=False)
callback = F1(len(tags_to_index_map))
model = Model(network)
param_dict = load_checkpoint(os.path.join(config.ckpt_save_path, config.ckpt_path))
load_param_into_net(network, param_dict)
print("============== Starting Testing ==============")
rest_golds_list = list()
rest_preds_list = list()
columns_list = ["feature", "label"]
for data in ds_test.create_dict_iterator(num_epochs=1):
input_data = []
for i in columns_list:
input_data.append(data[i])
feature, label = input_data
logits = model.predict(feature, label)
logit_ids, label_ids = callback.update(logits, label)
rest_preds = np.array(logit_ids)
rest_preds = np.expand_dims(rest_preds, 0)
rest_labels = deepcopy(label_ids)
label_ids = np.expand_dims(label_ids, 0)
rest_labels = np.expand_dims(rest_labels, 0)
rest_golds, rest_preds = get_label_lists(rest_labels, rest_preds, label_ids)
rest_golds_list += rest_golds
rest_preds_list += rest_preds
accs = []
correct_preds, total_correct, total_preds = 0., 0., 0.
for golds, preds in zip(rest_golds_list, rest_preds_list):
accs += [a == b for (a, b) in zip(golds, preds)]
golds_chunks = set(get_chunks(golds, tags_to_index_map))
preds_chunks = set(get_chunks(preds, tags_to_index_map))
correct_preds += len(golds_chunks & preds_chunks)
total_preds += len(preds_chunks)
total_correct += len(golds_chunks)
p = correct_preds / total_preds if correct_preds > 0 else 0
r = correct_preds / total_correct if correct_preds > 0 else 0
f1 = 2 * p * r / (p + r) if correct_preds > 0 else 0
acc = np.mean(accs)
print("acc: {:.6f}%, F1: {:.6f}% ".format(acc*100, f1*100))
if __name__ == '__main__':
eval_lstm_crf()
| 39.774194 | 107 | 0.651663 | 659 | 4,932 | 4.605463 | 0.326252 | 0.026689 | 0.024712 | 0.023064 | 0.068204 | 0.038221 | 0.018451 | 0.018451 | 0 | 0 | 0 | 0.014803 | 0.232968 | 4,932 | 123 | 108 | 40.097561 | 0.78747 | 0.182482 | 0 | 0 | 0 | 0 | 0.032725 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.02381 | false | 0 | 0.119048 | 0 | 0.142857 | 0.035714 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
cfb7b75e80d6f16aa38ab2c5bb8e48063d63e82b | 2,682 | py | Python | prepare_training_data.py | vijayaganesh/Chrome-DinoGame-AI | 1c28114c3b0ac40dcb3812ba202b5a343de7d93a | [
"MIT"
] | null | null | null | prepare_training_data.py | vijayaganesh/Chrome-DinoGame-AI | 1c28114c3b0ac40dcb3812ba202b5a343de7d93a | [
"MIT"
] | null | null | null | prepare_training_data.py | vijayaganesh/Chrome-DinoGame-AI | 1c28114c3b0ac40dcb3812ba202b5a343de7d93a | [
"MIT"
] | null | null | null |
import cv2
import numpy as np
import tensorflow as tf
import time
import statistics
import h5py
vid_file = '/home/vijayaganesh/Videos/Google Chrome Dinosaur Game [Bird Update] BEST SCORE OF THE WORLD (No hack).mp4'
data_file = 'training_data.txt'
roi_x = 320
roi_y = 120
roi_w = 459
roi_h = 112
font = cv2.FONT_HERSHEY_SIMPLEX
vid = cv2.VideoCapture(vid_file)
### jump Case
jx = 0
jy = 48
jw = 30
jh = 40
# tx = 0
# ty = 30
# tw = 30
# th = 41
### Duck Case
dx = 0
dy = 102
dw = 45
dh = 10
### Idle Case
tx = 0
ty = 68
tw = 30
th = 27
### Variables to store state of jump
prev_j = ty
### Obstacle List
# prev_j_1 = ty
dist = 500
prev_dist = 500
frame_count = 1
speed_list = list()
speed = 0
dino_y = 0
control = ''
file = open(data_file,'w')
while(vid.isOpened()):
_,frame = vid.read()
roi_rgb = frame[roi_y:roi_y+roi_h,roi_x:roi_x+roi_w]
roi = cv2.cvtColor(roi_rgb,cv2.COLOR_BGR2GRAY)
print(frame.shape[:2])
_,roi_thresh = cv2.threshold(roi,150,255,cv2.THRESH_BINARY_INV)
_,contours,hierarchy = cv2.findContours(roi_thresh,cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
obstacle_x,obstacle_y = 500,500
for c in contours:
x,y,w,h = cv2.boundingRect(c)
if(w < 7 and h < 7):
continue
if(x > 340 and y == 4 ):
continue
if(x == jx and w ==jw):
if(prev_j-y > 0 and y < 67 and y > 45):
control = 'u'
prev_j = y
dino_y = y
elif(x == dx and y == dy and w == dw and h == dh):
control = 'd'
dino_y = y
elif(x == dx):
control = 'na'
dino_y = y
if(x>40):
cv2.rectangle(frame,(x+roi_x,y+roi_y),(roi_x+x+w,roi_y+y+h),(0,255,0),2)
if(x<obstacle_x):
obstacle_x = x;
obstacle_y = y;
dist = obstacle_x
cv2.putText(frame,'x = '+repr(obstacle_x)+","+repr(obstacle_y),(10,600), font, 4,(255,0,0),2,cv2.LINE_AA)
if(frame_count < 30):
speed_list.append(prev_dist - dist)
else:
speed = max(speed_list,key=speed_list.count)
speed_list = list()
frame_count = 0
cv2.putText(frame,repr(dino_y),(10,400), font, 4,(0,0,255),2,cv2.LINE_AA)
cv2.putText(frame,control,(10,500), font, 4,(0,0,255),2,cv2.LINE_AA)
cv2.putText(frame,'dx/dt = '+repr(speed),(10,700), font, 4,(255,0,0),2,cv2.LINE_AA)
prev_dist = dist
file.write(repr(dino_y)+","+repr(speed)+","+repr(obstacle_x)+","+repr(obstacle_y)+","+control+"\n")
cv2.imshow('roi',frame)
# time.sleep(0.1)
frame_count += 1
if cv2.waitKey(1) & 0xFF == ord('q'):
break
vid.release()
cv2.destroyAllWindows()
| 25.788462 | 118 | 0.590604 | 449 | 2,682 | 3.36971 | 0.336303 | 0.019828 | 0.039656 | 0.026438 | 0.124256 | 0.124256 | 0.072703 | 0.072703 | 0.072703 | 0.046266 | 0 | 0.082707 | 0.256152 | 2,682 | 103 | 119 | 26.038835 | 0.675689 | 0.052573 | 0 | 0.083333 | 0 | 0.011905 | 0.059595 | 0.012714 | 0 | 0 | 0.001589 | 0 | 0 | 1 | 0 | false | 0 | 0.071429 | 0 | 0.071429 | 0.011905 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
cfbe82223a8d3479a6dfc0c1cbd4a4db1f89fae3 | 941 | py | Python | crfill/trainers/__init__.py | node21challenge/rank2_node21_generation | 6c1708468b4ba48383c55bc8473ebcc5a83b8995 | [
"Apache-2.0"
] | null | null | null | crfill/trainers/__init__.py | node21challenge/rank2_node21_generation | 6c1708468b4ba48383c55bc8473ebcc5a83b8995 | [
"Apache-2.0"
] | null | null | null | crfill/trainers/__init__.py | node21challenge/rank2_node21_generation | 6c1708468b4ba48383c55bc8473ebcc5a83b8995 | [
"Apache-2.0"
] | 1 | 2022-02-11T12:42:21.000Z | 2022-02-11T12:42:21.000Z | import importlib
def find_trainer_using_name(model_name):
model_filename = "trainers." + model_name + "_trainer"
modellib = importlib.import_module(model_filename)
# In the file, the class called ModelNameModel() will
# be instantiated. It has to be a subclass of torch.nn.Module,
# and it is case-insensitive.
model = None
target_model_name = model_name.replace('_', '') + 'trainer'
for name, cls in modellib.__dict__.items():
if name.lower() == target_model_name.lower():
model = cls
if model is None:
print("In %s.py, there should be a subclass of torch.nn.Module with class name that matches %s in lowercase." % (model_filename, target_model_name))
exit(0)
return model
def create_trainer(opt):
model = find_trainer_using_name(opt.trainer)
instance = model(opt)
print("model [%s] was created" % (type(instance).__name__))
return instance
| 32.448276 | 156 | 0.682253 | 129 | 941 | 4.744186 | 0.465116 | 0.088235 | 0.073529 | 0.065359 | 0.084967 | 0.084967 | 0.084967 | 0 | 0 | 0 | 0 | 0.001353 | 0.214665 | 941 | 28 | 157 | 33.607143 | 0.826793 | 0.148778 | 0 | 0 | 0 | 0.055556 | 0.185696 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.111111 | false | 0 | 0.111111 | 0 | 0.333333 | 0.111111 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
cfbf0b7ed91177f382813b392b45d5b821b94144 | 3,790 | py | Python | setup.py | zfit/zfit-flavour | 291be3d3d80a8e20907a5de88239098d0ed7e96d | [
"BSD-3-Clause"
] | null | null | null | setup.py | zfit/zfit-flavour | 291be3d3d80a8e20907a5de88239098d0ed7e96d | [
"BSD-3-Clause"
] | null | null | null | setup.py | zfit/zfit-flavour | 291be3d3d80a8e20907a5de88239098d0ed7e96d | [
"BSD-3-Clause"
] | null | null | null | #!/usr/bin/env python
# -*- encoding: utf-8 -*-
import io
import re
import os
import glob
from setuptools import find_packages
from setuptools import setup
def read(*names, **kwargs):
with io.open(
os.path.join(os.path.dirname(__file__), *names),
encoding=kwargs.get('encoding', 'utf8')
) as fh:
return fh.read()
here = os.path.abspath(os.path.dirname(__file__))
with open(os.path.join(here, 'requirements.txt'), encoding='utf-8') as requirements_file:
requirements = requirements_file.read().splitlines()
with open(os.path.join(here, 'requirements_dev.txt'), encoding='utf-8') as requirements_dev_file:
requirements_dev = requirements_dev_file.read().splitlines()
# split the developer requirements into setup and test requirements
if not requirements_dev.count("") == 1 or requirements_dev.index("") == 0:
raise SyntaxError("requirements_dev.txt has the wrong format: setup and test "
"requirements have to be separated by one blank line.")
requirements_dev_split = requirements_dev.index("")
setup_requirements = ["pip>9",
"setuptools_scm",
"setuptools_scm_git_archive"]
test_requirements = requirements_dev[requirements_dev_split + 1:] # +1: skip empty line
setup(
name='zfit-flavour',
use_scm_version={
'local_scheme': 'dirty-tag',
'write_to': 'src/zfit_flavour/_version.py',
'fallback_version': '0.0.1',
},
license='BSD-3-Clause',
description='Flavour physics for zfit',
long_description='%s\n%s' % (
re.compile('^.. start-badges.*^.. end-badges', re.M | re.S).sub('', read('README.rst')),
re.sub(':[a-z]+:`~?(.*?)`', r'``\1``', read('CHANGELOG.rst'))
),
author='Jonas Eschle, Rafael Silva Coutinho',
author_email='Jonas.Eschle@cern.ch, rafael.silva.coutinho@cern.ch',
url='https://github.com/zfit/zfit-flavour',
packages=find_packages('src'),
package_dir={'': 'src'},
py_modules=[os.path.splitext(os.path.basename(path))[0] for path in glob.glob('zfit_flavour/*.py')],
include_package_data=True,
zip_safe=False,
classifiers=[
# complete classifier list: http://pypi.python.org/pypi?%3Aaction=list_classifiers
'Development Status :: 5 - Production/Stable',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Operating System :: Unix',
'Operating System :: POSIX',
'Operating System :: Microsoft :: Windows',
'Programming Language :: Python',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
'Programming Language :: Python :: Implementation :: CPython',
'Programming Language :: Python :: Implementation :: PyPy',
# uncomment if you test on these interpreters:
# 'Programming Language :: Python :: Implementation :: IronPython',
# 'Programming Language :: Python :: Implementation :: Jython',
# 'Programming Language :: Python :: Implementation :: Stackless',
'Topic :: Utilities',
],
project_urls={
'Documentation': 'https://zfit-flavour.readthedocs.io/',
'Changelog': 'https://zfit-flavour.readthedocs.io/en/latest/changelog.html',
'Issue Tracker': 'https://github.com/zfit/zfit-flavour/issues',
},
keywords=[
'flavour', 'zfit', 'model fitting'
],
python_requires='>=3.6',
install_requires=requirements,
setup_requires=setup_requirements,
test_suite='tests',
tests_require=test_requirements,
)
| 38.673469 | 104 | 0.641953 | 435 | 3,790 | 5.462069 | 0.43908 | 0.09596 | 0.126263 | 0.054714 | 0.124579 | 0.077441 | 0.02862 | 0 | 0 | 0 | 0 | 0.009967 | 0.205805 | 3,790 | 97 | 105 | 39.072165 | 0.779402 | 0.11847 | 0 | 0.025641 | 0 | 0 | 0.408163 | 0.031212 | 0 | 0 | 0 | 0 | 0 | 1 | 0.012821 | false | 0 | 0.076923 | 0 | 0.102564 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
cfbfa275b0b7f6610c00f6739476900452fbdfa6 | 1,858 | py | Python | engine/utils/scheduler.py | rmarticedeno/Agent_Platform | 12891e96bae4670e50f12e56a2dee258b7b584b4 | [
"MIT"
] | 2 | 2019-12-07T00:20:51.000Z | 2019-12-23T15:54:27.000Z | engine/utils/scheduler.py | rmarticedeno/Agent_Platform | 12891e96bae4670e50f12e56a2dee258b7b584b4 | [
"MIT"
] | null | null | null | engine/utils/scheduler.py | rmarticedeno/Agent_Platform | 12891e96bae4670e50f12e56a2dee258b7b584b4 | [
"MIT"
] | null | null | null | from collections import deque
class TaskScheduler:
'''
Represents an ordered Scheduler.
'''
def __init__(self):
self._task_deque = deque()
def new_task(self, task):
'''
Admit a newly started task to the scheduler\n
(must be a generator `yield`)
'''
self._task_deque.append(task)
def run(self):
'''
Run until there are no more tasks
'''
while self._task_deque:
task = self._task_deque.popleft()
try:
# Run the task until the next yield
next(task)
# Not ended
self._task_deque.append(task)
except StopIteration:
# Generator is no longer executing
pass
# Two simple generator functions
def __countdown(n):
while n > 0:
print('T-minus', n)
yield
n -= 1
print('Blastoff!')
def __countup(n):
x = 0
while x < n:
print('Counting up', x)
yield
x += 1
if __name__ == "__main__":
# Example use
sched = TaskScheduler()
sched.new_task(__countdown(10))
sched.new_task(__countdown(5))
sched.new_task(__countup(15))
sched.run()
# output:
# T-minus 10
# T-minus 5
# Counting up 0
# T-minus 9
# T-minus 4
# Counting up 1
# T-minus 8
# T-minus 3
# Counting up 2
# T-minus 7
# T-minus 2
# Counting up 3
# T-minus 6
# T-minus 1
# Counting up 4
# T-minus 5
# Blastoff!
# Counting up 5
# T-minus 4
# Counting up 6
# T-minus 3
# Counting up 7
# T-minus 2
# Counting up 8
# T-minus 1
# Counting up 9
# Blastoff!
# Counting up 10
# Counting up 11
# Counting up 12
# Counting up 13
# Counting up 14
| 19.557895 | 57 | 0.520452 | 236 | 1,858 | 3.944915 | 0.34322 | 0.103115 | 0.069817 | 0.040816 | 0.197637 | 0.038668 | 0 | 0 | 0 | 0 | 0 | 0.039543 | 0.387513 | 1,858 | 94 | 58 | 19.765957 | 0.778559 | 0.353068 | 0 | 0.125 | 0 | 0 | 0.032498 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.15625 | false | 0.03125 | 0.03125 | 0 | 0.21875 | 0.09375 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
cfc02d4a4657dd5adc5e53454218d4b69d722cf8 | 2,138 | py | Python | leet_code/editorial/numbers_roman.py | theeksha101/problem_solving | 431c4ff224035bb98ad67ead963860329dd4c9ff | [
"MIT"
] | null | null | null | leet_code/editorial/numbers_roman.py | theeksha101/problem_solving | 431c4ff224035bb98ad67ead963860329dd4c9ff | [
"MIT"
] | null | null | null | leet_code/editorial/numbers_roman.py | theeksha101/problem_solving | 431c4ff224035bb98ad67ead963860329dd4c9ff | [
"MIT"
] | null | null | null | class Solution:
def intToRoman(self, num: int) -> str:
a = {
'I': 1,
'IV': 4,
'V': 5,
'IX': 9,
'X': 10,
'XL': 40,
'L': 50,
'XC': 90,
'C': 100,
'CD': 400,
'D': 500,
'CM': 900,
'M': 1000
}
c = []
for k, v in reversed(a.items()):
while num > 0:
if v <= num:
c.append(k)
num -= v
else:
break
return "".join(c)
sol = Solution()
print(sol.intToRoman(1994))
print(sol.intToRoman(562))
print(sol.intToRoman(42))
print(sol.intToRoman(724))
print("59 -> ", sol.intToRoman(59))
class Solution3:
def intToRoman(self, num: int) -> str:
roman = [["I", 1], ["IV", 4], ["V", 5], ["IX", 9], ["X", 10], ["XL", 40], ["L", 50], ["XC", 90], ["C", 100],
["CD", 400], ["D", 500], ["CM", 900], ["M", 1000]]
result = ''
for key, value in reversed(roman):
if num // value:
count = num // value
result += (count * key)
num = num % value
return result
sol3 = Solution3()
print(sol3.intToRoman(625))
class Solution2:
def intToRoman(self, num: int) -> str:
symbol_map = {1: 'I',
5: 'V',
10: 'X',
50: 'L',
100: 'C',
500: 'D',
1000: 'M'}
res = (num // 1000) * symbol_map[1000]
num %= 1000
div = 100
while div:
div_count = num // div
div_symbol, divx5_symbol = symbol_map[div], symbol_map[div * 5]
if div_count == 4:
res += div_symbol + divx5_symbol
elif div_count == 9:
res += div_symbol + symbol_map[div * 10]
else:
res += ((div_count >= 5) * divx5_symbol) + ((div_count % 5) * div_symbol)
num %= div
div //= 10
return res
| 25.759036 | 116 | 0.384471 | 233 | 2,138 | 3.454936 | 0.300429 | 0.080745 | 0.089441 | 0.074534 | 0.213665 | 0.213665 | 0.11677 | 0.11677 | 0.11677 | 0.11677 | 0 | 0.107174 | 0.45884 | 2,138 | 82 | 117 | 26.073171 | 0.588591 | 0 | 0 | 0.072464 | 0 | 0 | 0.023854 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.043478 | false | 0 | 0 | 0 | 0.130435 | 0.086957 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
cfc2420a8b0d8a4ab0326148009595e910540813 | 2,310 | py | Python | test_incoming.py | james-721/chat_limpet | 88a394d4e6e3f34b9af4dd29d43999f28bd91224 | [
"MIT"
] | null | null | null | test_incoming.py | james-721/chat_limpet | 88a394d4e6e3f34b9af4dd29d43999f28bd91224 | [
"MIT"
] | null | null | null | test_incoming.py | james-721/chat_limpet | 88a394d4e6e3f34b9af4dd29d43999f28bd91224 | [
"MIT"
] | null | null | null | import zlib
import zmq
import simplejson
import sys
import time
import pprint
import math
pp = pprint.PrettyPrinter(indent=4)
"""
" Configuration
"""
__relayEDDN = 'tcp://eddn.edcd.io:9500'
__timeoutEDDN = 600000
"""
" Start
"""
def distance_finder(input_coords):
colonia_coords = [-9530.5, -910.28125, 19808.125]
ogmar_coords = [-9534, -905.28125, 19802.03125]
colonia_dist = math.sqrt(((colonia_coords[0] - (input_coords[0])) ** 2) + ((colonia_coords[1] - (input_coords[1])) ** 2) + ((colonia_coords[2] - (input_coords[2]))**2))
ogmar_dist = math.sqrt(((ogmar_coords[0] - (input_coords[0]))**2) + ((ogmar_coords[1] - (input_coords[1]))**2) + ((ogmar_coords[2] - input_coords[2])**2))
output = [colonia_dist, ogmar_dist]
return output
def main():
context = zmq.Context()
subscriber = context.socket(zmq.SUB)
subscriber.setsockopt(zmq.SUBSCRIBE, b"")
subscriber.setsockopt(zmq.RCVTIMEO, __timeoutEDDN)
while True:
try:
subscriber.connect(__relayEDDN)
while True:
__message = subscriber.recv()
if __message == False:
subscriber.disconnect(__relayEDDN)
break
__message = zlib.decompress(__message)
__json = simplejson.loads(__message)
# call dumps() to ensure double quotes in output
#pp.pprint(__json)
try:
star_system = __json['message']['StarSystem']
star_pos = __json['message']['StarPos']
timestamp = __json['header']['gatewayTimestamp']
softwarename = __json['header']['softwareName']
distances = distance_finder(star_pos)
print(f'{timestamp} {star_system} {distances[1]}')
except:
print('data missing')
sys.stdout.flush()
except zmq.ZMQError as e:
print ('ZMQSocketException: ' + str(e))
sys.stdout.flush()
subscriber.disconnect(__relayEDDN)
time.sleep(5)
time.sleep(.1)
if __name__ == '__main__':
main() | 32.535211 | 172 | 0.544156 | 226 | 2,310 | 5.265487 | 0.438053 | 0.064706 | 0.020168 | 0.030252 | 0.10084 | 0.10084 | 0 | 0 | 0 | 0 | 0 | 0.048481 | 0.330303 | 2,310 | 71 | 173 | 32.535211 | 0.72075 | 0.027273 | 0 | 0.16 | 0 | 0 | 0.078983 | 0.01044 | 0 | 0 | 0 | 0 | 0 | 1 | 0.04 | false | 0 | 0.14 | 0 | 0.2 | 0.1 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
cfc3b184faded8ad9ad737db946c5f8bf1633768 | 8,599 | py | Python | bayesian_optimization/algorithms/dt.py | ukritw/autonialm | a5264753dae19b9b5257ca75433a0f55dce0f173 | [
"Apache-2.0"
] | 2 | 2022-02-22T13:16:17.000Z | 2022-03-24T17:37:54.000Z | bayesian_optimization/algorithms/dt.py | ukritw/autonialm | a5264753dae19b9b5257ca75433a0f55dce0f173 | [
"Apache-2.0"
] | null | null | null | bayesian_optimization/algorithms/dt.py | ukritw/autonialm | a5264753dae19b9b5257ca75433a0f55dce0f173 | [
"Apache-2.0"
] | 2 | 2019-03-24T20:58:53.000Z | 2022-03-06T06:51:50.000Z | from __future__ import print_function, division
import warnings; warnings.filterwarnings("ignore")
from nilmtk import DataSet
import pandas as pd
import numpy as np
import datetime
import time
import math
import glob
from sklearn.tree import DecisionTreeRegressor
# Bring packages onto the path
import sys, os
sys.path.append(os.path.abspath('../bayesian_optimization/'))
from utils import metrics_np
from utils.metrics_np import Metrics
# import argparse
def decision_tree(dataset_path, train_building, train_start, train_end, val_building, val_start, val_end, test_building, test_start, test_end, meter_key, sample_period, criterion, min_sample_split):
# Start tracking time
start = time.time()
# Prepare dataset and options
dataset_path = dataset_path
train = DataSet(dataset_path)
train.set_window(start=train_start, end=train_end)
val = DataSet(dataset_path)
val.set_window(start=val_start, end=val_end)
test = DataSet(dataset_path)
test.set_window(start=test_start, end=test_end)
train_building = train_building
val_building = val_building
test_building = test_building
meter_key = meter_key
sample_period = sample_period
train_elec = train.buildings[train_building].elec
val_elec = val.buildings[val_building].elec
test_elec = test.buildings[test_building].elec
try: # REDD
X_train = next(train_elec.mains().all_meters()[0].load(sample_period=sample_period)).fillna(0)
y_train = next(train_elec[meter_key].load(sample_period=sample_period)).fillna(0)
X_test = next(test_elec.mains().all_meters()[0].load(sample_period=sample_period)).fillna(0)
y_test = next(test_elec[meter_key].load(sample_period=sample_period)).fillna(0)
X_val = next(val_elec.mains().all_meters()[0].load(sample_period=sample_period)).fillna(0)
y_val = next(val_elec[meter_key].load(sample_period=sample_period)).fillna(0)
# Intersect between two dataframe - to make sure same trining instances in X and y
# Train set
intersect_index = pd.Index(np.sort(list(set(X_train.index).intersection(set(y_train.index)))))
X_train = X_train.ix[intersect_index]
y_train = y_train.ix[intersect_index]
# Test set
intersect_index = pd.Index(np.sort(list(set(X_test.index).intersection(set(y_test.index)))))
X_test = X_test.ix[intersect_index]
y_test = y_test.ix[intersect_index]
# Val set
intersect_index = pd.Index(np.sort(list(set(X_val.index).intersection(set(y_val.index)))))
X_val = X_val.ix[intersect_index]
y_val = y_val.ix[intersect_index]
# Get values from numpy array
X_train = X_train.values
y_train = y_train.values
X_test = X_test.values
y_test = y_test.values
X_val = X_val.values
y_val = y_val.values
except AttributeError: # UKDALE
X_train = train_elec.mains().power_series_all_data(sample_period=sample_period).fillna(0)
y_train = next(train_elec[meter_key].power_series(sample_period=sample_period)).fillna(0)
X_test = test_elec.mains().power_series_all_data(sample_period=sample_period).fillna(0)
y_test = next(test_elec[meter_key].power_series(sample_period=sample_period)).fillna(0)
# Intersect between two dataframe - to make sure same trining instances in X and y
# Train set
intersect_index = pd.Index(np.sort(list(set(X_train.index).intersection(set(y_train.index)))))
X_train = X_train.ix[intersect_index]
y_train = y_train.ix[intersect_index]
# Test set
intersect_index = pd.Index(np.sort(list(set(X_test.index).intersection(set(y_test.index)))))
X_test = X_test.ix[intersect_index]
y_test = y_test.ix[intersect_index]
# X_train = X_train.reshape(-1, 1)
# y_train = y_train.reshape(-1, 1)
# X_test = X_test.reshape(-1, 1)
# y_test = y_test.reshape(-1, 1)
# Get values from numpy array - Avoid server error
X_train = X_train.values.reshape(-1, 1)
y_train = y_train.values.reshape(-1, 1)
X_test = X_test.values.reshape(-1, 1)
y_test = y_test.values.reshape(-1, 1)
# Model settings and hyperparameters
min_samples_split = min_sample_split
tree_clf = DecisionTreeRegressor(criterion = criterion, min_samples_split = min_sample_split)
# print("========== TRAIN ============")
tree_clf.fit(X_train, y_train)
# print("========== DISAGGREGATE ============")
y_val_predict = tree_clf.predict(X_val)
y_test_predict = tree_clf.predict(X_test)
# print("========== RESULTS ============")
# me = Metrics(state_boundaries=[10])
on_power_threshold = train_elec[meter_key].on_power_threshold()
me = Metrics(state_boundaries=[on_power_threshold])
val_metrics_results_dict = Metrics.compute_metrics(me, y_val_predict, y_val.flatten())
test_metrics_results_dict = Metrics.compute_metrics(me, y_test_predict, y_test.flatten())
# end tracking time
end = time.time()
time_taken = end-start # in seconds
model_result_data = {
'val_metrics': val_metrics_results_dict,
'test_metrics': test_metrics_results_dict,
'time_taken': format(time_taken, '.2f'),
'epochs': None,
}
# Close Dataset files
train.store.close()
val.store.close()
test.store.close()
return model_result_data
# def main():
#
# # Take in arguments from command line
# parser = argparse.ArgumentParser(description='Decision Tree Regressor')
# parser.add_argument('--datapath', '-d', type=str, required=True,
# help='hd5 filepath')
#
# parser.add_argument('--train_building', type=int, required=True)
# parser.add_argument('--train_start', type=str, default=None, help='YYYY-MM-DD')
# parser.add_argument('--train_end', type=str, required=True, help='YYYY-MM-DD')
#
# parser.add_argument('--test_building', type=int, required=True)
# parser.add_argument('--test_start', type=str, required=True, help='YYYY-MM-DD')
# parser.add_argument('--test_end', type=str, default=None, help='YYYY-MM-DD')
#
# parser.add_argument('--appliance', type=str, required=True)
# parser.add_argument('--sampling_rate', type=int, required=True)
#
# # Model specific options and hyperparameters
# parser.add_argument('--min_sample_split', type=int, default=100)
# args = parser.parse_args()
#
# hd5_filepath = args.datapath
# train_building = args.train_building
# train_start = pd.Timestamp(args.train_start) if args.train_start != None else None
# train_end = pd.Timestamp(args.train_end)
# test_building = args.test_building
# test_start = pd.Timestamp(args.test_start)
# test_end = pd.Timestamp(args.test_end) if args.test_end != None else None
# appliance = args.appliance
# downsampling_period = args.sampling_rate
# min_sample_split = args.min_sample_split
#
#
# model_result_data = decision_tree(
# dataset_path=hd5_filepath,
# train_building=train_building, train_start=train_start, train_end=train_end,
# test_building=test_building, test_start=test_start, test_end=test_end,
# meter_key=appliance,
# sample_period=downsampling_period,
# criterion="mae",
# min_sample_split=min_sample_split)
#
# # # Write options and results to file
# # with open('dt_json.json', 'a+') as outfile:
# # json.dump(model_result_data, outfile, sort_keys=True,
# # indent=4, separators=(',', ': '))
# print(model_result_data)
#
# if __name__ == "__main__":
# main()
# python algorithms/dt.py --datapath ../data/REDD/redd.h5 --train_building 1 --train_building 1 --train_end 2011-05-10 --test_building 1 --test_start 2011-05-10 --appliance fridge --sampling_rate 20 --min_sample_split 100
# python dt.py --datapath ../data/REDD/redd.h5 --train_building 1 --train_building 1 --train_end 2011-05-10 --test_building 1 --test_start 2011-05-10 --appliance fridge --sampling_rate 20 --min_sample_split 100
# python dt.py --datapath /mnt/data/datasets/wattanavaekin/REDD/redd.h5 --train_building 1 --train_end 2011-05-10 --test_building 1 --test_start 2011-05-10 --appliance fridge --sampling_rate 20 --min_sample_split 100
# python dt.py --datapath /mnt/data/datasets/wattanavaekin/UKDALE/ukdale-2017.h5 --train_building 2 --train_end 2013-08-02 --test_building 2 --test_start 2013-08-02 --appliance fridge --sampling_rate 120 --min_sample_split 100
| 43.211055 | 230 | 0.695662 | 1,219 | 8,599 | 4.62018 | 0.162428 | 0.051136 | 0.02983 | 0.046875 | 0.481889 | 0.415661 | 0.405362 | 0.382635 | 0.350497 | 0.350497 | 0 | 0.020148 | 0.18037 | 8,599 | 198 | 231 | 43.429293 | 0.778944 | 0.437842 | 0 | 0.134831 | 0 | 0 | 0.015388 | 0.00527 | 0 | 0 | 0 | 0 | 0 | 1 | 0.011236 | false | 0 | 0.146067 | 0 | 0.168539 | 0.011236 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
cfc4dc6ecc3462bf8a0af5bf57b6f97a39afd525 | 856 | py | Python | Interpolation/New Tab with Special Layers.py | juandelperal/Glyphs-Scripts | 1f3cb71683ec044dff67a46cd895773e8271effa | [
"Apache-2.0"
] | null | null | null | Interpolation/New Tab with Special Layers.py | juandelperal/Glyphs-Scripts | 1f3cb71683ec044dff67a46cd895773e8271effa | [
"Apache-2.0"
] | null | null | null | Interpolation/New Tab with Special Layers.py | juandelperal/Glyphs-Scripts | 1f3cb71683ec044dff67a46cd895773e8271effa | [
"Apache-2.0"
] | null | null | null | #MenuTitle: New Tab with Special Layers
# -*- coding: utf-8 -*-
from __future__ import division, print_function, unicode_literals
from builtins import str
__doc__="""
Opens a new Edit tab containing all special (bracket & brace) layers.
"""
Glyphs.clearLog() # clears log of Macro window
thisFont = Glyphs.font # frontmost font
affectedLayers = []
for thisGlyph in thisFont.glyphs: # loop through all glyphs
for thisLayer in thisGlyph.layers: # loop through all layers
# collect affected layers:
if thisLayer.isSpecialLayer:
affectedLayers.append(thisLayer)
# open a new tab with the affected layers:
if affectedLayers:
newTab = thisFont.newTab()
newTab.layers = affectedLayers
# otherwise send a message:
else:
Message(
title = "Nothing Found",
message = "Could not find any bracket or brace layers in the font.",
OKButton = None
)
| 28.533333 | 70 | 0.748832 | 112 | 856 | 5.633929 | 0.589286 | 0.019017 | 0.031696 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.001404 | 0.168224 | 856 | 29 | 71 | 29.517241 | 0.884831 | 0.28271 | 0 | 0 | 0 | 0 | 0.229752 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.095238 | 0 | 0.095238 | 0.047619 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
cfc4e76ff268f1b04652ad27abd0acee3bdfb297 | 3,244 | py | Python | tests/wallet/nft_wallet/test_ownership_outer_puzzle.py | Chinilla/chinilla-blockchain | 59bebcf94e65b74fbb53ad4929bbd79cb28be619 | [
"Apache-2.0"
] | null | null | null | tests/wallet/nft_wallet/test_ownership_outer_puzzle.py | Chinilla/chinilla-blockchain | 59bebcf94e65b74fbb53ad4929bbd79cb28be619 | [
"Apache-2.0"
] | null | null | null | tests/wallet/nft_wallet/test_ownership_outer_puzzle.py | Chinilla/chinilla-blockchain | 59bebcf94e65b74fbb53ad4929bbd79cb28be619 | [
"Apache-2.0"
] | null | null | null | from typing import Optional
from clvm_tools.binutils import assemble
from chinilla.types.blockchain_format.program import Program
from chinilla.types.blockchain_format.sized_bytes import bytes32
from chinilla.util.ints import uint16
from chinilla.wallet.nft_wallet.ownership_outer_puzzle import puzzle_for_ownership_layer
from chinilla.wallet.nft_wallet.transfer_program_puzzle import puzzle_for_transfer_program
from chinilla.wallet.outer_puzzles import (
construct_puzzle,
create_asset_id,
get_inner_puzzle,
get_inner_solution,
match_puzzle,
solve_puzzle,
)
from chinilla.wallet.puzzle_drivers import PuzzleInfo, Solver
def test_ownership_outer_puzzle() -> None:
ACS = Program.to(1)
NIL = Program.to([])
owner = bytes32([0] * 32)
# (mod (current_owner conditions solution)
# (list current_owner () conditions)
# )
transfer_program = assemble( # type: ignore
"""
(c 2 (c () (c 5 ())))
"""
)
transfer_program_default: Program = puzzle_for_transfer_program(bytes32([1] * 32), bytes32([2] * 32), uint16(5000))
ownership_puzzle: Program = puzzle_for_ownership_layer(owner, transfer_program, ACS)
ownership_puzzle_empty: Program = puzzle_for_ownership_layer(NIL, transfer_program, ACS)
ownership_puzzle_default: Program = puzzle_for_ownership_layer(owner, transfer_program_default, ACS)
ownership_driver: Optional[PuzzleInfo] = match_puzzle(ownership_puzzle)
ownership_driver_empty: Optional[PuzzleInfo] = match_puzzle(ownership_puzzle_empty)
ownership_driver_default: Optional[PuzzleInfo] = match_puzzle(ownership_puzzle_default)
transfer_program_driver: Optional[PuzzleInfo] = match_puzzle(transfer_program_default)
assert ownership_driver is not None
assert ownership_driver_empty is not None
assert ownership_driver_default is not None
assert transfer_program_driver is not None
assert ownership_driver.type() == "ownership"
assert ownership_driver["owner"] == owner
assert ownership_driver_empty["owner"] == NIL
assert ownership_driver["transfer_program"] == transfer_program
assert ownership_driver_default["transfer_program"] == transfer_program_driver
assert transfer_program_driver.type() == "royalty transfer program"
assert transfer_program_driver["launcher_id"] == bytes32([1] * 32)
assert transfer_program_driver["royalty_address"] == bytes32([2] * 32)
assert transfer_program_driver["royalty_percentage"] == 5000
assert construct_puzzle(ownership_driver, ACS) == ownership_puzzle
assert construct_puzzle(ownership_driver_empty, ACS) == ownership_puzzle_empty
assert construct_puzzle(ownership_driver_default, ACS) == ownership_puzzle_default
assert get_inner_puzzle(ownership_driver, ownership_puzzle) == ACS
assert create_asset_id(ownership_driver) is None
# Set up for solve
inner_solution = Program.to(
[
[51, ACS.get_tree_hash(), 1],
[-10],
]
)
solution: Program = solve_puzzle(
ownership_driver,
Solver({}),
ACS,
inner_solution,
)
ownership_puzzle.run(solution)
assert get_inner_solution(ownership_driver, solution) == inner_solution
| 42.684211 | 119 | 0.750308 | 389 | 3,244 | 5.910026 | 0.197943 | 0.130492 | 0.073075 | 0.058721 | 0.328839 | 0.176599 | 0.074815 | 0.043497 | 0 | 0 | 0 | 0.017395 | 0.167078 | 3,244 | 75 | 120 | 43.253333 | 0.833457 | 0.034217 | 0 | 0 | 0 | 0 | 0.038611 | 0 | 0 | 0 | 0 | 0 | 0.306452 | 1 | 0.016129 | false | 0 | 0.145161 | 0 | 0.16129 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |