hexsha stringlengths 40 40 | size int64 4 996k | ext stringclasses 8
values | lang stringclasses 1
value | max_stars_repo_path stringlengths 4 245 | max_stars_repo_name stringlengths 6 130 | max_stars_repo_head_hexsha stringlengths 40 40 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 191k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 4 245 | max_issues_repo_name stringlengths 6 130 | max_issues_repo_head_hexsha stringlengths 40 40 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 67k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 4 245 | max_forks_repo_name stringlengths 6 130 | max_forks_repo_head_hexsha stringlengths 40 40 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 4 996k | avg_line_length float64 1.33 58.2k | max_line_length int64 2 323k | alphanum_fraction float64 0 0.97 | content_no_comment stringlengths 0 946k | is_comment_constant_removed bool 2
classes | is_sharp_comment_removed bool 1
class |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
f7feed2d0e6bbc22ca226fdfeec3d9cd4d713703 | 1,076 | py | Python | auth/token_box.py | ivmfnal/dm_common | 0f390d3da1c52191b017a5493bee47b0335eb6bd | [
"BSD-3-Clause"
] | 1 | 2022-03-18T19:01:29.000Z | 2022-03-18T19:01:29.000Z | auth/token_box.py | ivmfnal/dm_common | 0f390d3da1c52191b017a5493bee47b0335eb6bd | [
"BSD-3-Clause"
] | null | null | null | auth/token_box.py | ivmfnal/dm_common | 0f390d3da1c52191b017a5493bee47b0335eb6bd | [
"BSD-3-Clause"
] | null | null | null | class TokenBox(object):
def __init__(self, url, username, password, margin = 10, request_now = False):
self.URL = url
self.Username = username
self.Password = password
self.Token = None
self.Expiration = 0
self.Encoded = None
self.Margin = margin
if request_now:
self.renewIfNeeded()
def renewIfNeeded(self):
need_to_renew = self.Token is None or time.time() > self.Expiration - self.Margin
if need_to_renew:
from .rfc2617 import digest_client
status, body = digest_client(self.URL, self.Username, self.Password)
if status/100 == 2:
encoded = body.strip()
t = SignedToken.decode(encoded)
self.Token = t
self.Encoded = encoded
self.Expiration = t.expiration
else:
raise SignedTokenAuthoriztionError(body)
@property
def token(self):
self.renewIfNeeded()
return self.Encoded
| 32.606061 | 89 | 0.555762 | class TokenBox(object):
def __init__(self, url, username, password, margin = 10, request_now = False):
self.URL = url
self.Username = username
self.Password = password
self.Token = None
self.Expiration = 0
self.Encoded = None
self.Margin = margin
if request_now:
self.renewIfNeeded()
def renewIfNeeded(self):
need_to_renew = self.Token is None or time.time() > self.Expiration - self.Margin
if need_to_renew:
from .rfc2617 import digest_client
status, body = digest_client(self.URL, self.Username, self.Password)
if status/100 == 2:
encoded = body.strip()
t = SignedToken.decode(encoded)
self.Token = t
self.Encoded = encoded
self.Expiration = t.expiration
else:
raise SignedTokenAuthoriztionError(body)
@property
def token(self):
self.renewIfNeeded()
return self.Encoded
| true | true |
f7feedc493f997aa5760f4fb539749318c3bf7da | 1,760 | py | Python | thumbor/app.py | ravisaini1990S/thumbor | 8312a1e384edd9cb999bc52c8477d926a72f9869 | [
"MIT"
] | 6 | 2015-01-27T05:36:22.000Z | 2019-12-04T05:19:34.000Z | thumbor/app.py | ravisaini1990S/thumbor | 8312a1e384edd9cb999bc52c8477d926a72f9869 | [
"MIT"
] | null | null | null | thumbor/app.py | ravisaini1990S/thumbor | 8312a1e384edd9cb999bc52c8477d926a72f9869 | [
"MIT"
] | null | null | null | #!/usr/bin/python
# -*- coding: utf-8 -*-
# thumbor imaging service
# https://github.com/thumbor/thumbor/wiki
# Licensed under the MIT license:
# http://www.opensource.org/licenses/mit-license
# Copyright (c) 2011 globo.com thumbor@googlegroups.com
import tornado.web
import tornado.ioloop
from thumbor.handlers.blacklist import BlacklistHandler
from thumbor.handlers.healthcheck import HealthcheckHandler
from thumbor.handlers.upload import ImageUploadHandler
from thumbor.handlers.image_resource import ImageResourceHandler
from thumbor.url import Url
from thumbor.handlers.imaging import ImagingHandler
class ThumborServiceApp(tornado.web.Application):
def __init__(self, context):
self.context = context
self.debug = getattr(self.context.server, 'debug', False)
super(ThumborServiceApp, self).__init__(self.get_handlers(), debug=self.debug)
def get_handlers(self):
handlers = [
(self.context.config.HEALTHCHECK_ROUTE, HealthcheckHandler),
]
if self.context.config.UPLOAD_ENABLED:
# Handler to upload images (POST).
handlers.append(
(r'/image', ImageUploadHandler, {'context': self.context})
)
# Handler to retrieve or modify existing images (GET, PUT, DELETE)
handlers.append(
(r'/image/(.*)', ImageResourceHandler, {'context': self.context})
)
if self.context.config.USE_BLACKLIST:
handlers.append(
(r'/blacklist', BlacklistHandler, {'context': self.context})
)
# Imaging handler (GET)
handlers.append(
(Url.regex(), ImagingHandler, {'context': self.context})
)
return handlers
| 32 | 86 | 0.6625 |
import tornado.web
import tornado.ioloop
from thumbor.handlers.blacklist import BlacklistHandler
from thumbor.handlers.healthcheck import HealthcheckHandler
from thumbor.handlers.upload import ImageUploadHandler
from thumbor.handlers.image_resource import ImageResourceHandler
from thumbor.url import Url
from thumbor.handlers.imaging import ImagingHandler
class ThumborServiceApp(tornado.web.Application):
def __init__(self, context):
self.context = context
self.debug = getattr(self.context.server, 'debug', False)
super(ThumborServiceApp, self).__init__(self.get_handlers(), debug=self.debug)
def get_handlers(self):
handlers = [
(self.context.config.HEALTHCHECK_ROUTE, HealthcheckHandler),
]
if self.context.config.UPLOAD_ENABLED:
handlers.append(
(r'/image', ImageUploadHandler, {'context': self.context})
)
handlers.append(
(r'/image/(.*)', ImageResourceHandler, {'context': self.context})
)
if self.context.config.USE_BLACKLIST:
handlers.append(
(r'/blacklist', BlacklistHandler, {'context': self.context})
)
handlers.append(
(Url.regex(), ImagingHandler, {'context': self.context})
)
return handlers
| true | true |
f7feedfae7ecf52dc44632d9e9328da672bc44fe | 3,187 | py | Python | learn/tests/tests_services/tests_answer.py | Aigrefin/py3learn | 8104315689caff2523bda3b4ad70a807f4a43fa7 | [
"MIT"
] | null | null | null | learn/tests/tests_services/tests_answer.py | Aigrefin/py3learn | 8104315689caff2523bda3b4ad70a807f4a43fa7 | [
"MIT"
] | 1 | 2021-06-10T19:05:49.000Z | 2021-06-10T19:05:49.000Z | learn/tests/tests_services/tests_answer.py | Aigrefin/py3learn | 8104315689caff2523bda3b4ad70a807f4a43fa7 | [
"MIT"
] | null | null | null | from unittest import TestCase
from unittest.mock import MagicMock, patch
from django.contrib.auth.models import User
from django.utils import timezone
from learn.infrastructure.database import Database
from learn.models import Translation, RythmNotation
from learn.services.answer import Answer
class AnswerTests(TestCase):
def setUp(self):
self.database = MagicMock(spec=Database)
self.answer = Answer(database=self.database)
self.user = User()
def test_shouldReturnTrue_WhenGoodAnswer(self):
# Given
translation = Translation(word_to_learn="Xin chào", known_word="Bonjour")
# When
result = self.answer.is_good_answer("xin chào", translation)
# Then
self.assertTrue(result)
def test_shouldReturnFalse_WhenBadAnswer(self):
# Given
translation = Translation(word_to_learn="Xin chào", known_word="Bonjour")
# When
result = self.answer.is_good_answer("xin chao", translation)
# Then
self.assertFalse(result)
def test_shouldRetreiveNotation_FromTranslation_AndUser(self):
# Given
translation = Translation(word_to_learn="Xin chào", known_word="Bonjour")
# When
self.answer.update_translation_statistics(True, self.user, translation)
# Then
self.assertEqual(self.database.get_matching_notation.call_args_list[0][0][0], self.user)
self.assertEqual(self.database.get_matching_notation.call_args_list[0][0][1], translation)
@patch('learn.services.answer.compute_next_repetition')
def test_shouldImproveTranslationStatistics_WhenGoodAnswer(self, compute_next_repetition_mock):
# Given
translation = Translation(word_to_learn="Xin chào", known_word="Bonjour")
notation = RythmNotation(translation=translation, successes=0, next_repetition=None)
self.database.get_matching_notation.return_value = notation
next_repetition = timezone.now()
compute_next_repetition_mock.return_value = next_repetition
# When
self.answer.update_translation_statistics(True, self.user, translation)
# Then
self.assertEqual(self.database.save_rythm_notation.call_args_list[0][0][0], next_repetition)
self.assertEqual(self.database.save_rythm_notation.call_args_list[0][0][1], 1)
@patch('learn.services.answer.compute_next_repetition')
def test_shouldDowngradeTranslationStatistics_WhenBadAnswer(self, compute_next_repetition_mock):
# Given
translation = Translation(word_to_learn="Xin chào", known_word="Bonjour")
notation = RythmNotation(translation=translation, successes=42, next_repetition=None)
self.database.get_matching_notation.return_value = notation
next_repetition = timezone.now()
compute_next_repetition_mock.return_value = next_repetition
# When
self.answer.update_translation_statistics(False, self.user, translation)
# Then
self.assertEqual(self.database.save_rythm_notation.call_args_list[0][0][0], next_repetition)
self.assertEqual(self.database.save_rythm_notation.call_args_list[0][0][1], 21)
| 37.940476 | 100 | 0.72733 | from unittest import TestCase
from unittest.mock import MagicMock, patch
from django.contrib.auth.models import User
from django.utils import timezone
from learn.infrastructure.database import Database
from learn.models import Translation, RythmNotation
from learn.services.answer import Answer
class AnswerTests(TestCase):
def setUp(self):
self.database = MagicMock(spec=Database)
self.answer = Answer(database=self.database)
self.user = User()
def test_shouldReturnTrue_WhenGoodAnswer(self):
translation = Translation(word_to_learn="Xin chào", known_word="Bonjour")
result = self.answer.is_good_answer("xin chào", translation)
self.assertTrue(result)
def test_shouldReturnFalse_WhenBadAnswer(self):
translation = Translation(word_to_learn="Xin chào", known_word="Bonjour")
result = self.answer.is_good_answer("xin chao", translation)
self.assertFalse(result)
def test_shouldRetreiveNotation_FromTranslation_AndUser(self):
translation = Translation(word_to_learn="Xin chào", known_word="Bonjour")
self.answer.update_translation_statistics(True, self.user, translation)
self.assertEqual(self.database.get_matching_notation.call_args_list[0][0][0], self.user)
self.assertEqual(self.database.get_matching_notation.call_args_list[0][0][1], translation)
@patch('learn.services.answer.compute_next_repetition')
def test_shouldImproveTranslationStatistics_WhenGoodAnswer(self, compute_next_repetition_mock):
translation = Translation(word_to_learn="Xin chào", known_word="Bonjour")
notation = RythmNotation(translation=translation, successes=0, next_repetition=None)
self.database.get_matching_notation.return_value = notation
next_repetition = timezone.now()
compute_next_repetition_mock.return_value = next_repetition
self.answer.update_translation_statistics(True, self.user, translation)
self.assertEqual(self.database.save_rythm_notation.call_args_list[0][0][0], next_repetition)
self.assertEqual(self.database.save_rythm_notation.call_args_list[0][0][1], 1)
@patch('learn.services.answer.compute_next_repetition')
def test_shouldDowngradeTranslationStatistics_WhenBadAnswer(self, compute_next_repetition_mock):
translation = Translation(word_to_learn="Xin chào", known_word="Bonjour")
notation = RythmNotation(translation=translation, successes=42, next_repetition=None)
self.database.get_matching_notation.return_value = notation
next_repetition = timezone.now()
compute_next_repetition_mock.return_value = next_repetition
self.answer.update_translation_statistics(False, self.user, translation)
self.assertEqual(self.database.save_rythm_notation.call_args_list[0][0][0], next_repetition)
self.assertEqual(self.database.save_rythm_notation.call_args_list[0][0][1], 21)
| true | true |
f7feef1001806af6273753ae5efc274125de6d6f | 9,018 | py | Python | data/GAScore_latency.py | sharm294/shoal | db7dd08a70882585fb9740a39b57b4b7a48b3081 | [
"MIT"
] | 1 | 2021-04-12T06:41:33.000Z | 2021-04-12T06:41:33.000Z | data/GAScore_latency.py | UofT-HPRC/shoal | db7dd08a70882585fb9740a39b57b4b7a48b3081 | [
"MIT"
] | null | null | null | data/GAScore_latency.py | UofT-HPRC/shoal | db7dd08a70882585fb9740a39b57b4b7a48b3081 | [
"MIT"
] | null | null | null | import os
# This script parses the output from Vivado to compute latency numbers for the
# GAScore.
vivado_output_file = "GAScore_latency.txt"
search_string = "STAT_"
units = 'us'
clock_period = 0.02
# clock_align = 0.01 # signal transitions from sonar occur on negedges
table_template = "| {label:38} | {init:11} | {count:6} | {extra:38} |"
def start_trans(first_beat):
return round(first_beat - clock_period, 3)
def cycle_count(base, delta, scaling=1):
cycles = round(round(delta - base, 3) / clock_period, 3)
return int(cycles)/scaling
def truncate_string(text_str, length):
trunc_length = length - 3
return text_str[:trunc_length] + '...' if len(text_str) > length else text_str
def print_stat_abs(label, first_beat, last_beat, prev_beat=None):
label_trunc = truncate_string(label, 38)
if prev_beat is not None:
init_cycles = cycle_count(prev_beat, first_beat)
else:
init_cycles = "N/A"
cycles = cycle_count(first_beat, last_beat)
print(table_template.format(label=label_trunc, init=init_cycles, count=cycles, extra='N/A'))
def print_stat_offset(label, first_beat, last_beat, target=None, prev_beat=None):
label_trunc = truncate_string(label, 38)
if prev_beat is not None:
init_cycles = cycle_count(prev_beat, start_trans(first_beat))
else:
init_cycles = "N/A"
cycles = cycle_count(start_trans(first_beat), last_beat)
if target is not None:
extra = truncate_string("Should be " + str(target) + " cycles", 38)
else:
extra = 'N/A'
print(table_template.format(label=label_trunc, init=init_cycles, count=cycles, extra=extra))
def create_tuples(start_index, end_index):
# lindex = start_index + 1 # skip the initial so only intra packet beats are measured
lindex = start_index
rindex = lindex + 1
tuples_list = []
while(rindex <= end_index):
tuples_list.append((lindex, rindex))
lindex = rindex
rindex += 1
return tuples_list
def delay(stat_list, tuples_list):
max_delay = 0
min_delay = -1
for tuples in tuples_list:
current_delay = cycle_count(stat_list[tuples[0]]['time'], stat_list[tuples[1]]['time'])
if current_delay > max_delay:
max_delay = current_delay
if min_delay == -1:
min_delay = current_delay
elif current_delay < min_delay:
min_delay = current_delay
return max_delay, min_delay
def print_delay(stat_list, start_index, end_index, target_count):
# print(start_trans(stat_list[start_index]['time']))
# print(stat_list[end_index]['time'])
if cycle_count(start_trans(stat_list[start_index]['time']), stat_list[end_index]['time']) != target_count:
# print(start_index, end_index)
# print(create_tuples(start_index, end_index))
max_delay, min_delay = delay(stat_list, create_tuples(start_index, end_index))
# print(max_delay)
# print(min_delay)
if max_delay != min_delay:
max_string = "Max delay between beats: " + str(max_delay) + " cycles"
label_trunc = truncate_string(max_string, 38)
print(table_template.format(label='^', init='^', count='^', extra=label_trunc))
min_string = "Min delay between beats: " + str(min_delay) + " cycles"
else:
min_string = "Delay between beats: " + str(min_delay) + " cycles"
label_trunc = truncate_string(min_string, 38)
print(table_template.format(label='^', init='^', count='^', extra=label_trunc))
def print_header(label, filler):
# print(table_template.format(label="", init="", count="", extra=''))
print("| " + label + " " + filler*(101-len(label)) + " |" )
# print(table_template.format(label="", init="", count="", extra=''))
# get relative path of the text file (same location as script)
__location__ = os.path.realpath(
os.path.join(os.getcwd(), os.path.dirname(__file__)))
filename = os.path.join(__location__, vivado_output_file)
stats = []
with open(filename, 'r') as f:
for line in f:
if line.startswith(search_string):
line_split = line.split(':')
stat_id_split = line_split[0].split('_')
stat_time_split = line_split[1].split(' ')
stat = {}
stat['vector'] = stat_id_split[1]
stat['thread'] = int(stat_id_split[2])
stat['index'] = int(stat_id_split[3])
stat['time'] = float(stat_time_split[1])
stats.append(stat)
print(table_template.format(label="Label", init="Init Cycles", count="Cycles", extra='Metadata'))
# parse short message A (sma)
fltr_stats_2 = list(filter(lambda x : x['vector'] == 'sma' and x['thread'] == 2, stats))
fltr_stats_3 = list(filter(lambda x : x['vector'] == 'sma' and x['thread'] == 3, stats))
print_header("Short Message A", "=")
print_header("Thread 1", "-")
# time for VIP to do a AXI-Lite write
print_stat_abs("Handler config register write (VIP)", fltr_stats_2[0]['time'], fltr_stats_2[1]['time'])
# time between first and last beats of incoming network SM
print_stat_offset("Incoming SM from network - third", fltr_stats_2[3]['time'], fltr_stats_2[4]['time'], 2.0)
print_delay(fltr_stats_2, 3, 4, 2.0)
print_stat_offset("Incoming SM from network - fourth", fltr_stats_2[5]['time'], fltr_stats_2[7]['time'], 3.0, fltr_stats_2[4]['time'])
print_delay(fltr_stats_2, 5, 7, 3.0)
# time for the interrupt signal to go high following the SM
print_stat_abs("Interrupt resolution", fltr_stats_2[7]['time'], fltr_stats_2[8]['time'])
print_header("Thread 2", "-")
# time between first and last beat of network reply
print_stat_offset("Outgoing reply to network - third", fltr_stats_3[1]['time'], fltr_stats_3[2]['time'], 2.0)
print_delay(fltr_stats_3, 0, 2, 2.0)
print_stat_offset("Outgoing reply to network - fourth", fltr_stats_3[3]['time'], fltr_stats_3[4]['time'], 2.0)
print_delay(fltr_stats_3, 2, 4, 2.0)
print_header("Inter-thread", "-")
# time between end of SM to beginning of reply
print_stat_abs("SM to reply delay - third", fltr_stats_2[4]['time'], fltr_stats_3[1]['time'])
print_stat_abs("SM to reply delay - fourth", fltr_stats_2[7]['time'], fltr_stats_3[3]['time'])
# parse short message B (smb)
fltr_stats_2 = list(filter(lambda x : x['vector'] == 'smb' and x['thread'] == 2, stats))
fltr_stats_3 = list(filter(lambda x : x['vector'] == 'smb' and x['thread'] == 3, stats))
fltr_stats_4 = list(filter(lambda x : x['vector'] == 'smb' and x['thread'] == 4, stats))
print_header("Short Message B", "=")
print_header("Thread 1", "-")
print_stat_offset("Incoming SM from kernel - 1", fltr_stats_2[0]['time'], fltr_stats_2[2]['time'], 3.0)
print_delay(fltr_stats_2, 0, 2, 3.0)
print_stat_offset("Incoming SM from kernel - 2", fltr_stats_2[3]['time'], fltr_stats_2[5]['time'], 3.0, fltr_stats_2[2]['time'])
print_delay(fltr_stats_2, 3, 5, 3.0)
# time for VIP to do a AXI-Lite write
print_stat_abs("Handler config register write (VIP)", fltr_stats_2[5]['time'], fltr_stats_2[6]['time'])
print_stat_offset("Incoming SM from kernel - 3", fltr_stats_2[8]['time'], fltr_stats_2[9]['time'], 2.0, fltr_stats_2[7]['time'])
print_delay(fltr_stats_2, 8, 9, 2.0)
print_stat_offset("Incoming SM from kernel - 4", fltr_stats_2[10]['time'], fltr_stats_2[12]['time'], 3.0, fltr_stats_2[9]['time'])
print_delay(fltr_stats_2, 10, 12, 3.0)
print_stat_offset("Incoming SM from kernel for network - 5", fltr_stats_2[13]['time'], fltr_stats_2[14]['time'], 2.0, fltr_stats_2[12]['time'])
print_delay(fltr_stats_2, 13, 14, 2.0)
print_stat_offset("Incoming SM from kernel for network - 6", fltr_stats_2[15]['time'], fltr_stats_2[16]['time'], 2.0, fltr_stats_2[14]['time'])
print_delay(fltr_stats_2, 15, 16, 2.0)
print_stat_offset("Incoming SM reply from network", fltr_stats_2[18]['time'], fltr_stats_2[19]['time'], 2.0)
print_delay(fltr_stats_2, 18, 19, 2.0)
print_header("Thread 2", "-")
# ? This needs to be updated after reply messages were no longer forwarded to the kernel
print_stat_offset("Outgoing to network - 7", fltr_stats_3[1]['time'], fltr_stats_3[2]['time'], 2.0)
print_delay(fltr_stats_3, 1, 2, 2.0)
print_stat_offset("Outgoing to network - 8", fltr_stats_3[3]['time'], fltr_stats_3[4]['time'], 2.0)
print_delay(fltr_stats_3, 3, 4, 2.0)
print_header("Inter-thread", "-")
# time between end of SM to beginning of reply
# print_stat_abs("SM to reply delay - 1", fltr_stats_2[0]['time'], fltr_stats_3[1]['time'])
# print_stat_abs("SM to reply delay - 2", fltr_stats_2[3]['time'], fltr_stats_3[2]['time'])
# print_stat_abs("SM to reply delay - 3", fltr_stats_2[8]['time'], fltr_stats_3[3]['time'])
# print_stat_abs("SM to reply delay - 4", fltr_stats_2[10]['time'], fltr_stats_3[4]['time'])
print_stat_abs("SM to reply delay - 5", fltr_stats_2[13]['time'], fltr_stats_3[2]['time'])
print_stat_abs("SM to reply delay - 6", fltr_stats_2[15]['time'], fltr_stats_3[4]['time'])
# print_stat_abs("Net reply to kernel forward delay", fltr_stats_2[20]['time'], fltr_stats_3[10]['time'])
| 45.316583 | 143 | 0.681748 | import os
vivado_output_file = "GAScore_latency.txt"
search_string = "STAT_"
units = 'us'
clock_period = 0.02
ount:6} | {extra:38} |"
def start_trans(first_beat):
return round(first_beat - clock_period, 3)
def cycle_count(base, delta, scaling=1):
cycles = round(round(delta - base, 3) / clock_period, 3)
return int(cycles)/scaling
def truncate_string(text_str, length):
trunc_length = length - 3
return text_str[:trunc_length] + '...' if len(text_str) > length else text_str
def print_stat_abs(label, first_beat, last_beat, prev_beat=None):
label_trunc = truncate_string(label, 38)
if prev_beat is not None:
init_cycles = cycle_count(prev_beat, first_beat)
else:
init_cycles = "N/A"
cycles = cycle_count(first_beat, last_beat)
print(table_template.format(label=label_trunc, init=init_cycles, count=cycles, extra='N/A'))
def print_stat_offset(label, first_beat, last_beat, target=None, prev_beat=None):
label_trunc = truncate_string(label, 38)
if prev_beat is not None:
init_cycles = cycle_count(prev_beat, start_trans(first_beat))
else:
init_cycles = "N/A"
cycles = cycle_count(start_trans(first_beat), last_beat)
if target is not None:
extra = truncate_string("Should be " + str(target) + " cycles", 38)
else:
extra = 'N/A'
print(table_template.format(label=label_trunc, init=init_cycles, count=cycles, extra=extra))
def create_tuples(start_index, end_index):
es_list = []
while(rindex <= end_index):
tuples_list.append((lindex, rindex))
lindex = rindex
rindex += 1
return tuples_list
def delay(stat_list, tuples_list):
max_delay = 0
min_delay = -1
for tuples in tuples_list:
current_delay = cycle_count(stat_list[tuples[0]]['time'], stat_list[tuples[1]]['time'])
if current_delay > max_delay:
max_delay = current_delay
if min_delay == -1:
min_delay = current_delay
elif current_delay < min_delay:
min_delay = current_delay
return max_delay, min_delay
def print_delay(stat_list, start_index, end_index, target_count):
if cycle_count(start_trans(stat_list[start_index]['time']), stat_list[end_index]['time']) != target_count:
max_delay, min_delay = delay(stat_list, create_tuples(start_index, end_index))
if max_delay != min_delay:
max_string = "Max delay between beats: " + str(max_delay) + " cycles"
label_trunc = truncate_string(max_string, 38)
print(table_template.format(label='^', init='^', count='^', extra=label_trunc))
min_string = "Min delay between beats: " + str(min_delay) + " cycles"
else:
min_string = "Delay between beats: " + str(min_delay) + " cycles"
label_trunc = truncate_string(min_string, 38)
print(table_template.format(label='^', init='^', count='^', extra=label_trunc))
def print_header(label, filler):
print("| " + label + " " + filler*(101-len(label)) + " |" )
__location__ = os.path.realpath(
os.path.join(os.getcwd(), os.path.dirname(__file__)))
filename = os.path.join(__location__, vivado_output_file)
stats = []
with open(filename, 'r') as f:
for line in f:
if line.startswith(search_string):
line_split = line.split(':')
stat_id_split = line_split[0].split('_')
stat_time_split = line_split[1].split(' ')
stat = {}
stat['vector'] = stat_id_split[1]
stat['thread'] = int(stat_id_split[2])
stat['index'] = int(stat_id_split[3])
stat['time'] = float(stat_time_split[1])
stats.append(stat)
print(table_template.format(label="Label", init="Init Cycles", count="Cycles", extra='Metadata'))
fltr_stats_2 = list(filter(lambda x : x['vector'] == 'sma' and x['thread'] == 2, stats))
fltr_stats_3 = list(filter(lambda x : x['vector'] == 'sma' and x['thread'] == 3, stats))
print_header("Short Message A", "=")
print_header("Thread 1", "-")
print_stat_abs("Handler config register write (VIP)", fltr_stats_2[0]['time'], fltr_stats_2[1]['time'])
print_stat_offset("Incoming SM from network - third", fltr_stats_2[3]['time'], fltr_stats_2[4]['time'], 2.0)
print_delay(fltr_stats_2, 3, 4, 2.0)
print_stat_offset("Incoming SM from network - fourth", fltr_stats_2[5]['time'], fltr_stats_2[7]['time'], 3.0, fltr_stats_2[4]['time'])
print_delay(fltr_stats_2, 5, 7, 3.0)
print_stat_abs("Interrupt resolution", fltr_stats_2[7]['time'], fltr_stats_2[8]['time'])
print_header("Thread 2", "-")
print_stat_offset("Outgoing reply to network - third", fltr_stats_3[1]['time'], fltr_stats_3[2]['time'], 2.0)
print_delay(fltr_stats_3, 0, 2, 2.0)
print_stat_offset("Outgoing reply to network - fourth", fltr_stats_3[3]['time'], fltr_stats_3[4]['time'], 2.0)
print_delay(fltr_stats_3, 2, 4, 2.0)
print_header("Inter-thread", "-")
print_stat_abs("SM to reply delay - third", fltr_stats_2[4]['time'], fltr_stats_3[1]['time'])
print_stat_abs("SM to reply delay - fourth", fltr_stats_2[7]['time'], fltr_stats_3[3]['time'])
fltr_stats_2 = list(filter(lambda x : x['vector'] == 'smb' and x['thread'] == 2, stats))
fltr_stats_3 = list(filter(lambda x : x['vector'] == 'smb' and x['thread'] == 3, stats))
fltr_stats_4 = list(filter(lambda x : x['vector'] == 'smb' and x['thread'] == 4, stats))
print_header("Short Message B", "=")
print_header("Thread 1", "-")
print_stat_offset("Incoming SM from kernel - 1", fltr_stats_2[0]['time'], fltr_stats_2[2]['time'], 3.0)
print_delay(fltr_stats_2, 0, 2, 3.0)
print_stat_offset("Incoming SM from kernel - 2", fltr_stats_2[3]['time'], fltr_stats_2[5]['time'], 3.0, fltr_stats_2[2]['time'])
print_delay(fltr_stats_2, 3, 5, 3.0)
print_stat_abs("Handler config register write (VIP)", fltr_stats_2[5]['time'], fltr_stats_2[6]['time'])
print_stat_offset("Incoming SM from kernel - 3", fltr_stats_2[8]['time'], fltr_stats_2[9]['time'], 2.0, fltr_stats_2[7]['time'])
print_delay(fltr_stats_2, 8, 9, 2.0)
print_stat_offset("Incoming SM from kernel - 4", fltr_stats_2[10]['time'], fltr_stats_2[12]['time'], 3.0, fltr_stats_2[9]['time'])
print_delay(fltr_stats_2, 10, 12, 3.0)
print_stat_offset("Incoming SM from kernel for network - 5", fltr_stats_2[13]['time'], fltr_stats_2[14]['time'], 2.0, fltr_stats_2[12]['time'])
print_delay(fltr_stats_2, 13, 14, 2.0)
print_stat_offset("Incoming SM from kernel for network - 6", fltr_stats_2[15]['time'], fltr_stats_2[16]['time'], 2.0, fltr_stats_2[14]['time'])
print_delay(fltr_stats_2, 15, 16, 2.0)
print_stat_offset("Incoming SM reply from network", fltr_stats_2[18]['time'], fltr_stats_2[19]['time'], 2.0)
print_delay(fltr_stats_2, 18, 19, 2.0)
print_header("Thread 2", "-")
print_stat_offset("Outgoing to network - 7", fltr_stats_3[1]['time'], fltr_stats_3[2]['time'], 2.0)
print_delay(fltr_stats_3, 1, 2, 2.0)
print_stat_offset("Outgoing to network - 8", fltr_stats_3[3]['time'], fltr_stats_3[4]['time'], 2.0)
print_delay(fltr_stats_3, 3, 4, 2.0)
print_header("Inter-thread", "-")
print_stat_abs("SM to reply delay - 5", fltr_stats_2[13]['time'], fltr_stats_3[2]['time'])
print_stat_abs("SM to reply delay - 6", fltr_stats_2[15]['time'], fltr_stats_3[4]['time'])
| true | true |
f7feefc2e6409d729d7f47aec8f699ae8879d5a6 | 163 | py | Python | mozumder/template/__init__.py | mozumder/django-mozumder | 887ce303249eac2d77de062fd57023dbc4b782dd | [
"MIT"
] | 1 | 2020-06-13T06:12:16.000Z | 2020-06-13T06:12:16.000Z | mozumder/template/__init__.py | mozumder/django-mozumder | 887ce303249eac2d77de062fd57023dbc4b782dd | [
"MIT"
] | 4 | 2020-06-18T03:53:29.000Z | 2021-06-09T17:56:12.000Z | mozumder/template/__init__.py | mozumder/django-mozumder | 887ce303249eac2d77de062fd57023dbc4b782dd | [
"MIT"
] | null | null | null | from .components import *
from .template import MozumderTemplate, Block
from .default import MozumderHTMLMessageTemplate
from .errors import MozumderErrorTemplate
| 32.6 | 48 | 0.858896 | from .components import *
from .template import MozumderTemplate, Block
from .default import MozumderHTMLMessageTemplate
from .errors import MozumderErrorTemplate
| true | true |
f7fef1c85ca7f9bf5ff712c32f46593eb5b8ca11 | 429 | py | Python | nba-stats/str_to_second.py | fndomariano/data-studies | 726ded420ca22eb9a7526ef43bf01506fbf47519 | [
"MIT"
] | null | null | null | nba-stats/str_to_second.py | fndomariano/data-studies | 726ded420ca22eb9a7526ef43bf01506fbf47519 | [
"MIT"
] | null | null | null | nba-stats/str_to_second.py | fndomariano/data-studies | 726ded420ca22eb9a7526ef43bf01506fbf47519 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
def str_to_second(time):
if isinstance(time, int) or isinstance(time, float):
return time * 60
time = time.split(':')
if (len(time) == 3):
hours = int(time[0]) * 3600
minutes = int(time[1]) * 60
seconds = int(time[2])
return (hours + minutes + seconds)
if (len(time) == 2):
minutes = int(time[0]) * 60
seconds = int(time[1])
return (minutes + seconds)
return int(time[0]) * 60 | 20.428571 | 53 | 0.599068 |
def str_to_second(time):
if isinstance(time, int) or isinstance(time, float):
return time * 60
time = time.split(':')
if (len(time) == 3):
hours = int(time[0]) * 3600
minutes = int(time[1]) * 60
seconds = int(time[2])
return (hours + minutes + seconds)
if (len(time) == 2):
minutes = int(time[0]) * 60
seconds = int(time[1])
return (minutes + seconds)
return int(time[0]) * 60 | true | true |
f7fef1ed64208e4f7bf8401a5c23a4a3069d0873 | 3,520 | py | Python | setup.py | jbarlow-mcafee/opendxl-thehive-service-python | 371a1fd3d4731e654d4a100069ed8d56c4044624 | [
"Apache-2.0"
] | null | null | null | setup.py | jbarlow-mcafee/opendxl-thehive-service-python | 371a1fd3d4731e654d4a100069ed8d56c4044624 | [
"Apache-2.0"
] | null | null | null | setup.py | jbarlow-mcafee/opendxl-thehive-service-python | 371a1fd3d4731e654d4a100069ed8d56c4044624 | [
"Apache-2.0"
] | null | null | null | # pylint: disable=no-member, no-name-in-module, import-error
from __future__ import absolute_import
import glob
import os
import distutils.command.sdist
import distutils.log
import subprocess
from setuptools import Command, setup
import setuptools.command.sdist
# Patch setuptools' sdist behaviour with distutils' sdist behaviour
setuptools.command.sdist.sdist.run = distutils.command.sdist.sdist.run
VERSION_INFO = {}
CWD = os.path.abspath(os.path.dirname(__file__))
with open(os.path.join(CWD, "dxlthehiveservice", "_version.py")) as f:
exec(f.read(), VERSION_INFO) # pylint: disable=exec-used
class LintCommand(Command):
"""
Custom setuptools command for running lint
"""
description = 'run lint against project source files'
user_options = []
def initialize_options(self):
pass
def finalize_options(self):
pass
def run(self):
self.announce("Running pylint for library source files and tests",
level=distutils.log.INFO)
subprocess.check_call(["pylint", "dxlthehiveservice", "tests"] +
glob.glob("*.py"))
self.announce("Running pylint for samples", level=distutils.log.INFO)
subprocess.check_call(["pylint"] + glob.glob("sample/*.py") +
glob.glob("sample/**/*.py") +
["--rcfile", ".pylintrc.samples"])
class CiCommand(Command):
"""
Custom setuptools command for running steps that are performed during
Continuous Integration testing.
"""
description = 'run CI steps (lint, test, etc.)'
user_options = []
def initialize_options(self):
pass
def finalize_options(self):
pass
def run(self):
self.run_command("lint")
self.run_command("test")
TEST_REQUIREMENTS = ["mock", "nose", "pylint", "requests-mock"]
DEV_REQUIREMENTS = TEST_REQUIREMENTS + ["sphinx"]
setup(
# Package name:
name="dxlthehiveservice",
# Version number:
version=VERSION_INFO["__version__"],
# Package requirements
install_requires=[
"requests",
"dxlbootstrap>=0.2.0",
"dxlclient>=4.1.0.184"
],
tests_require=TEST_REQUIREMENTS,
extras_require={
"dev": DEV_REQUIREMENTS,
"test": TEST_REQUIREMENTS
},
test_suite="nose.collector",
# Package author details:
author="McAfee LLC",
# License
license="Apache License 2.0",
# Keywords
keywords=['opendxl', 'dxl', 'mcafee', 'service', 'thehive'],
# Packages
packages=[
"dxlthehiveservice",
"dxlthehiveservice._config",
"dxlthehiveservice._config.sample",
"dxlthehiveservice._config.app"],
package_data={
"dxlthehiveservice._config.sample" : ['*'],
"dxlthehiveservice._config.app" : ['*']},
# Details
url="http://www.mcafee.com",
description="TheHive DXL Python Service",
long_description=open('README').read(),
# Python version requirements
python_requires=">=2.7.9,!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*",
classifiers=[
"Programming Language :: Python",
"Programming Language :: Python :: 2",
"Programming Language :: Python :: 2.7",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.4",
"Programming Language :: Python :: 3.5",
"Programming Language :: Python :: 3.6"
],
cmdclass={
"ci": CiCommand,
"lint": LintCommand
}
)
| 27.076923 | 77 | 0.621875 |
from __future__ import absolute_import
import glob
import os
import distutils.command.sdist
import distutils.log
import subprocess
from setuptools import Command, setup
import setuptools.command.sdist
setuptools.command.sdist.sdist.run = distutils.command.sdist.sdist.run
VERSION_INFO = {}
CWD = os.path.abspath(os.path.dirname(__file__))
with open(os.path.join(CWD, "dxlthehiveservice", "_version.py")) as f:
exec(f.read(), VERSION_INFO)
class LintCommand(Command):
description = 'run lint against project source files'
user_options = []
def initialize_options(self):
pass
def finalize_options(self):
pass
def run(self):
self.announce("Running pylint for library source files and tests",
level=distutils.log.INFO)
subprocess.check_call(["pylint", "dxlthehiveservice", "tests"] +
glob.glob("*.py"))
self.announce("Running pylint for samples", level=distutils.log.INFO)
subprocess.check_call(["pylint"] + glob.glob("sample/*.py") +
glob.glob("sample/**/*.py") +
["--rcfile", ".pylintrc.samples"])
class CiCommand(Command):
description = 'run CI steps (lint, test, etc.)'
user_options = []
def initialize_options(self):
pass
def finalize_options(self):
pass
def run(self):
self.run_command("lint")
self.run_command("test")
TEST_REQUIREMENTS = ["mock", "nose", "pylint", "requests-mock"]
DEV_REQUIREMENTS = TEST_REQUIREMENTS + ["sphinx"]
setup(
name="dxlthehiveservice",
version=VERSION_INFO["__version__"],
install_requires=[
"requests",
"dxlbootstrap>=0.2.0",
"dxlclient>=4.1.0.184"
],
tests_require=TEST_REQUIREMENTS,
extras_require={
"dev": DEV_REQUIREMENTS,
"test": TEST_REQUIREMENTS
},
test_suite="nose.collector",
author="McAfee LLC",
license="Apache License 2.0",
keywords=['opendxl', 'dxl', 'mcafee', 'service', 'thehive'],
packages=[
"dxlthehiveservice",
"dxlthehiveservice._config",
"dxlthehiveservice._config.sample",
"dxlthehiveservice._config.app"],
package_data={
"dxlthehiveservice._config.sample" : ['*'],
"dxlthehiveservice._config.app" : ['*']},
url="http://www.mcafee.com",
description="TheHive DXL Python Service",
long_description=open('README').read(),
python_requires=">=2.7.9,!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*",
classifiers=[
"Programming Language :: Python",
"Programming Language :: Python :: 2",
"Programming Language :: Python :: 2.7",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.4",
"Programming Language :: Python :: 3.5",
"Programming Language :: Python :: 3.6"
],
cmdclass={
"ci": CiCommand,
"lint": LintCommand
}
)
| true | true |
f7fef337b283f919d64c8d148223da3698cdcaca | 4,028 | py | Python | core/tests/gae_suite.py | tjinjoy/oppia | ed5ccbd95e42078457d40dde1dda02f1ae6a4354 | [
"Apache-2.0"
] | 2 | 2019-03-31T07:03:32.000Z | 2019-04-24T18:12:53.000Z | core/tests/gae_suite.py | tjinjoy/oppia | ed5ccbd95e42078457d40dde1dda02f1ae6a4354 | [
"Apache-2.0"
] | 3 | 2019-08-01T18:38:54.000Z | 2019-08-12T03:02:59.000Z | core/tests/gae_suite.py | tjinjoy/oppia | ed5ccbd95e42078457d40dde1dda02f1ae6a4354 | [
"Apache-2.0"
] | 1 | 2020-03-15T14:29:55.000Z | 2020-03-15T14:29:55.000Z | # Copyright 2014 The Oppia Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Oppia test suite.
In general, this script should not be run directly. Instead, invoke
it from the command line by running
python -m scripts.run_backend_tests
from the oppia/ root folder.
"""
from __future__ import absolute_import # pylint: disable=import-only-modules
from __future__ import unicode_literals # pylint: disable=import-only-modules
import argparse
import os
import sys
import unittest
CURR_DIR = os.path.abspath(os.getcwd())
OPPIA_TOOLS_DIR = os.path.join(CURR_DIR, '..', 'oppia_tools')
THIRD_PARTY_DIR = os.path.join(CURR_DIR, 'third_party')
DIRS_TO_ADD_TO_SYS_PATH = [
os.path.join(
OPPIA_TOOLS_DIR, 'google_appengine_1.9.67', 'google_appengine'),
os.path.join(OPPIA_TOOLS_DIR, 'webtest-2.0.33'),
os.path.join(
OPPIA_TOOLS_DIR, 'google_appengine_1.9.67', 'google_appengine',
'lib', 'webob_0_9'),
os.path.join(OPPIA_TOOLS_DIR, 'browsermob-proxy-0.8.0'),
os.path.join(OPPIA_TOOLS_DIR, 'selenium-3.13.0'),
os.path.join(OPPIA_TOOLS_DIR, 'Pillow-6.0.0'),
os.path.join(OPPIA_TOOLS_DIR, 'psutil-5.6.7'),
CURR_DIR,
os.path.join(THIRD_PARTY_DIR, 'backports.functools_lru_cache-1.5'),
os.path.join(THIRD_PARTY_DIR, 'beautifulsoup4-4.7.1'),
os.path.join(THIRD_PARTY_DIR, 'bleach-3.1.0'),
os.path.join(THIRD_PARTY_DIR, 'callbacks-0.3.0'),
os.path.join(THIRD_PARTY_DIR, 'future-0.17.1'),
os.path.join(THIRD_PARTY_DIR, 'gae-cloud-storage-1.9.22.1'),
os.path.join(THIRD_PARTY_DIR, 'gae-mapreduce-1.9.22.0'),
os.path.join(THIRD_PARTY_DIR, 'gae-pipeline-1.9.22.1'),
os.path.join(THIRD_PARTY_DIR, 'graphy-1.0.0'),
os.path.join(THIRD_PARTY_DIR, 'html5lib-python-1.0.1'),
os.path.join(THIRD_PARTY_DIR, 'mutagen-1.42.0'),
os.path.join(THIRD_PARTY_DIR, 'simplejson-3.16.0'),
os.path.join(THIRD_PARTY_DIR, 'six-1.12.0'),
os.path.join(THIRD_PARTY_DIR, 'soupsieve-1.9.1'),
os.path.join(THIRD_PARTY_DIR, 'webencodings-0.5.1'),
]
_PARSER = argparse.ArgumentParser()
_PARSER.add_argument(
'--test_target',
help='optional dotted module name of the test(s) to run',
type=str)
def create_test_suites(test_target=None):
"""Creates test suites. If test_dir is None, runs all tests."""
if test_target and '/' in test_target:
raise Exception('The delimiter in test_target should be a dot (.)')
loader = unittest.TestLoader()
return (
[loader.loadTestsFromName(test_target)]
if test_target else [loader.discover(
CURR_DIR, pattern='[^core/tests/data]*_test.py',
top_level_dir=CURR_DIR)])
def main(args=None):
"""Runs the tests."""
parsed_args = _PARSER.parse_args(args=args)
for directory in DIRS_TO_ADD_TO_SYS_PATH:
if not os.path.exists(os.path.dirname(directory)):
raise Exception('Directory %s does not exist.' % directory)
sys.path.insert(0, directory)
import dev_appserver
dev_appserver.fix_sys_path()
suites = create_test_suites(test_target=parsed_args.test_target)
results = [unittest.TextTestRunner(verbosity=2).run(suite)
for suite in suites]
for result in results:
if result.errors or result.failures:
raise Exception(
'Test suite failed: %s tests run, %s errors, %s failures.' % (
result.testsRun, len(result.errors), len(result.failures)))
if __name__ == '__main__':
main()
| 35.964286 | 79 | 0.69712 |
from __future__ import absolute_import
from __future__ import unicode_literals
import argparse
import os
import sys
import unittest
CURR_DIR = os.path.abspath(os.getcwd())
OPPIA_TOOLS_DIR = os.path.join(CURR_DIR, '..', 'oppia_tools')
THIRD_PARTY_DIR = os.path.join(CURR_DIR, 'third_party')
DIRS_TO_ADD_TO_SYS_PATH = [
os.path.join(
OPPIA_TOOLS_DIR, 'google_appengine_1.9.67', 'google_appengine'),
os.path.join(OPPIA_TOOLS_DIR, 'webtest-2.0.33'),
os.path.join(
OPPIA_TOOLS_DIR, 'google_appengine_1.9.67', 'google_appengine',
'lib', 'webob_0_9'),
os.path.join(OPPIA_TOOLS_DIR, 'browsermob-proxy-0.8.0'),
os.path.join(OPPIA_TOOLS_DIR, 'selenium-3.13.0'),
os.path.join(OPPIA_TOOLS_DIR, 'Pillow-6.0.0'),
os.path.join(OPPIA_TOOLS_DIR, 'psutil-5.6.7'),
CURR_DIR,
os.path.join(THIRD_PARTY_DIR, 'backports.functools_lru_cache-1.5'),
os.path.join(THIRD_PARTY_DIR, 'beautifulsoup4-4.7.1'),
os.path.join(THIRD_PARTY_DIR, 'bleach-3.1.0'),
os.path.join(THIRD_PARTY_DIR, 'callbacks-0.3.0'),
os.path.join(THIRD_PARTY_DIR, 'future-0.17.1'),
os.path.join(THIRD_PARTY_DIR, 'gae-cloud-storage-1.9.22.1'),
os.path.join(THIRD_PARTY_DIR, 'gae-mapreduce-1.9.22.0'),
os.path.join(THIRD_PARTY_DIR, 'gae-pipeline-1.9.22.1'),
os.path.join(THIRD_PARTY_DIR, 'graphy-1.0.0'),
os.path.join(THIRD_PARTY_DIR, 'html5lib-python-1.0.1'),
os.path.join(THIRD_PARTY_DIR, 'mutagen-1.42.0'),
os.path.join(THIRD_PARTY_DIR, 'simplejson-3.16.0'),
os.path.join(THIRD_PARTY_DIR, 'six-1.12.0'),
os.path.join(THIRD_PARTY_DIR, 'soupsieve-1.9.1'),
os.path.join(THIRD_PARTY_DIR, 'webencodings-0.5.1'),
]
_PARSER = argparse.ArgumentParser()
_PARSER.add_argument(
'--test_target',
help='optional dotted module name of the test(s) to run',
type=str)
def create_test_suites(test_target=None):
if test_target and '/' in test_target:
raise Exception('The delimiter in test_target should be a dot (.)')
loader = unittest.TestLoader()
return (
[loader.loadTestsFromName(test_target)]
if test_target else [loader.discover(
CURR_DIR, pattern='[^core/tests/data]*_test.py',
top_level_dir=CURR_DIR)])
def main(args=None):
parsed_args = _PARSER.parse_args(args=args)
for directory in DIRS_TO_ADD_TO_SYS_PATH:
if not os.path.exists(os.path.dirname(directory)):
raise Exception('Directory %s does not exist.' % directory)
sys.path.insert(0, directory)
import dev_appserver
dev_appserver.fix_sys_path()
suites = create_test_suites(test_target=parsed_args.test_target)
results = [unittest.TextTestRunner(verbosity=2).run(suite)
for suite in suites]
for result in results:
if result.errors or result.failures:
raise Exception(
'Test suite failed: %s tests run, %s errors, %s failures.' % (
result.testsRun, len(result.errors), len(result.failures)))
if __name__ == '__main__':
main()
| true | true |
f7fef3b275b60c6bd9a041b4632d6f8bd6cea8b8 | 5,297 | py | Python | pyscience/algebra/variable.py | m-alzam/pyscience | 63452dd6dc662928613cd45c19b911d48866fabe | [
"MIT"
] | null | null | null | pyscience/algebra/variable.py | m-alzam/pyscience | 63452dd6dc662928613cd45c19b911d48866fabe | [
"MIT"
] | null | null | null | pyscience/algebra/variable.py | m-alzam/pyscience | 63452dd6dc662928613cd45c19b911d48866fabe | [
"MIT"
] | null | null | null | """
pyscience - python science programming
Copyright (c) 2019 Manuel Alcaraz Zambrano
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
from pyscience import algebra
from pyscience.math import Fraction
class Variable:
def __init__(self, name='x'):
self.name = name
def evaluate(self, **kwargs):
"""
Evaluate the expression for the given values. Example:
>>> x = Variable(name='x')
>>> x.evaluate(x=3)
3
>>> x.evaluate(y=6)
x # Type: Variable
"""
items = kwargs.keys()
if self.name in list(items):
return kwargs.get(self.name)
return Variable(name=self.name)
def __mul__(self, value):
if isinstance(value, algebra.Monomial):
return algebra.Monomial(variables=value.variables + self.name,
coefficient=value.coefficient)
elif isinstance(value, int):
return algebra.Monomial(variables=self.name, coefficient=value)
elif isinstance(value, Variable):
return algebra.Monomial(variables=self.name + value.name)
elif isinstance(value, Fraction):
return algebra.Monomial(variables=self.name, coefficient=value)
elif isinstance(value, algebra.Polynomial):
return value * self
raise TypeError(f'Cannot multiply Variable by {type(value)}')
def __add__(self, value):
if isinstance(value, algebra.Monomial):
if value.variables == self.name:
return algebra.Monomial(coefficient=1 + value.coefficient, variables=self.name)
else:
return algebra.Polynomial(monomials=[algebra.Monomial(variables=self.name), value])
elif isinstance(value, Variable):
if value.name == self.name:
return algebra.Monomial(coefficient=2, variables=self.name)
else:
return algebra.Polynomial(
monomials=[algebra.Monomial(variables=self.name),
algebra.Monomial(variables=value.name)])
elif isinstance(value, int):
return algebra.Polynomial(monomials=[algebra.Monomial(variables=self.name)],
numerical_term=value)
elif isinstance(value, Fraction):
return Fraction(value.numerator + self * value.denominator, value.denominator)
raise TypeError(f'Cannot add Variable to {type(value)}')
def __radd__(self, value):
return self.__add__(value)
def __sub__(self, value):
if isinstance(value, algebra.Monomial) and value.variables == self.name:
return algebra.Monomial(coefficient=1 - value.coefficient, variables=self.name)
elif isinstance(value, Variable) and value.name == self.name:
return 0
elif isinstance(value, int):
return algebra.Polynomial(monomials=[algebra.Monomial(variables=self.name), ],
numerical_term=-value)
elif isinstance(value, Fraction):
return Fraction(value.numerator - self * value.denominator, value.denominator)
raise ValueError(f'Cannot subtract Variable to {type(value)}')
def __rsub__(self, value):
return (-self) + value
def __truediv__(self, value):
if isinstance(value, (int, Variable)):
return Fraction(self, value)
raise ValueError(f'Cannot divide a Variable by {type(value)}')
def __rtruediv__(self, value):
if isinstance(value, (int, value)):
return Fraction(value, self)
raise ValueError(f'Cannot divide a {type(value)} by a Variable')
def __pow__(self, value, mod=None):
if mod:
raise NotImplementedError
return algebra.Monomial(variables=self.name * value)
def __rmul__(self, value):
return self.__mul__(value)
def __neg__(self):
return algebra.Monomial(variables=self.name, coefficient=-1)
def __pos__(self):
return self
def __eq__(self, other):
if isinstance(other, self.__class__):
return other.name == self.name
return False
def __str__(self):
return self.name
def __repr__(self):
return f'<Variable {self.name}>'
| 37.041958 | 99 | 0.650179 |
from pyscience import algebra
from pyscience.math import Fraction
class Variable:
def __init__(self, name='x'):
self.name = name
def evaluate(self, **kwargs):
items = kwargs.keys()
if self.name in list(items):
return kwargs.get(self.name)
return Variable(name=self.name)
def __mul__(self, value):
if isinstance(value, algebra.Monomial):
return algebra.Monomial(variables=value.variables + self.name,
coefficient=value.coefficient)
elif isinstance(value, int):
return algebra.Monomial(variables=self.name, coefficient=value)
elif isinstance(value, Variable):
return algebra.Monomial(variables=self.name + value.name)
elif isinstance(value, Fraction):
return algebra.Monomial(variables=self.name, coefficient=value)
elif isinstance(value, algebra.Polynomial):
return value * self
raise TypeError(f'Cannot multiply Variable by {type(value)}')
def __add__(self, value):
if isinstance(value, algebra.Monomial):
if value.variables == self.name:
return algebra.Monomial(coefficient=1 + value.coefficient, variables=self.name)
else:
return algebra.Polynomial(monomials=[algebra.Monomial(variables=self.name), value])
elif isinstance(value, Variable):
if value.name == self.name:
return algebra.Monomial(coefficient=2, variables=self.name)
else:
return algebra.Polynomial(
monomials=[algebra.Monomial(variables=self.name),
algebra.Monomial(variables=value.name)])
elif isinstance(value, int):
return algebra.Polynomial(monomials=[algebra.Monomial(variables=self.name)],
numerical_term=value)
elif isinstance(value, Fraction):
return Fraction(value.numerator + self * value.denominator, value.denominator)
raise TypeError(f'Cannot add Variable to {type(value)}')
def __radd__(self, value):
return self.__add__(value)
def __sub__(self, value):
if isinstance(value, algebra.Monomial) and value.variables == self.name:
return algebra.Monomial(coefficient=1 - value.coefficient, variables=self.name)
elif isinstance(value, Variable) and value.name == self.name:
return 0
elif isinstance(value, int):
return algebra.Polynomial(monomials=[algebra.Monomial(variables=self.name), ],
numerical_term=-value)
elif isinstance(value, Fraction):
return Fraction(value.numerator - self * value.denominator, value.denominator)
raise ValueError(f'Cannot subtract Variable to {type(value)}')
def __rsub__(self, value):
return (-self) + value
def __truediv__(self, value):
if isinstance(value, (int, Variable)):
return Fraction(self, value)
raise ValueError(f'Cannot divide a Variable by {type(value)}')
def __rtruediv__(self, value):
if isinstance(value, (int, value)):
return Fraction(value, self)
raise ValueError(f'Cannot divide a {type(value)} by a Variable')
def __pow__(self, value, mod=None):
if mod:
raise NotImplementedError
return algebra.Monomial(variables=self.name * value)
def __rmul__(self, value):
return self.__mul__(value)
def __neg__(self):
return algebra.Monomial(variables=self.name, coefficient=-1)
def __pos__(self):
return self
def __eq__(self, other):
if isinstance(other, self.__class__):
return other.name == self.name
return False
def __str__(self):
return self.name
def __repr__(self):
return f'<Variable {self.name}>'
| true | true |
f7fef4eb25bd85db81b2be7750171bf10fc591df | 21,259 | py | Python | ashpy/losses/gan.py | EmanueleGhelfi/ashpy | 6156b97c636c5b568c5a57c23b77d9ae28421bba | [
"Apache-2.0"
] | null | null | null | ashpy/losses/gan.py | EmanueleGhelfi/ashpy | 6156b97c636c5b568c5a57c23b77d9ae28421bba | [
"Apache-2.0"
] | 2 | 2019-07-16T08:20:27.000Z | 2019-07-16T11:10:45.000Z | ashpy/losses/gan.py | EmanueleGhelfi/ashpy | 6156b97c636c5b568c5a57c23b77d9ae28421bba | [
"Apache-2.0"
] | null | null | null | # Copyright 2019 Zuru Tech HK Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""GAN losses."""
from abc import ABC
from enum import Enum
from typing import List, Union, Type
import tensorflow as tf
from ashpy.contexts import GANContext
from ashpy.losses.executor import Executor, SumExecutor
class AdversarialLossType(Enum):
"""
Enumeration for Adversarial Losses. Implemented: GAN and LSGAN.
"""
GAN = 0 # classical gan loss (minmax)
LSGAN = 1 # Least Square GAN
class GANExecutor(Executor, ABC):
"""
Executor for GANs. Implements the basic functions needed by the GAN losses
"""
@staticmethod
def get_discriminator_inputs(
context: GANContext,
fake_or_real: tf.Tensor,
condition: tf.Tensor,
training: bool,
) -> Union[tf.Tensor, List[tf.Tensor]]:
"""
Returns the discriminator inputs. If needed it uses the encoder.
The current implementation uses the number of inputs to determine
whether the discriminator is conditioned or not.
Args:
context (:py:class:`ashpy.contexts.gan.GANContext`): context for GAN models
fake_or_real (:py:class:`tf.Tensor`): discriminator input tensor, it can be fake (generated) or real
condition (:py:class:`tf.Tensor`): discriminator condition (it can also be generator noise)
training (:py:class:`bool`): whether is training phase or not
Returns:
The discriminator inputs.
"""
num_inputs = len(context.discriminator_model.inputs)
# Handle encoder
if hasattr(context, "encoder_model"):
if num_inputs == 2:
d_inputs = [
fake_or_real,
context.encoder_model(fake_or_real, training=training),
]
elif num_inputs == 3:
d_inputs = [
fake_or_real,
context.encoder_model(fake_or_real, training=training),
condition,
]
else:
raise ValueError(
f"Context has encoder_model, but generator has only {num_inputs} inputs"
)
else:
if num_inputs == 2:
d_inputs = [fake_or_real, condition]
else:
d_inputs = fake_or_real
return d_inputs
class AdversarialLossG(GANExecutor):
r"""
Base class for the adversarial loss of the generator
"""
def __init__(self, loss_fn=None):
"""
Args:
loss_fn: loss_fn to call passing (tf.ones_like(d_fake_i), d_fake_i)
"""
super().__init__(loss_fn)
@Executor.reduce_loss
def call(self, context, *, fake, condition, training, **kwargs):
r"""
Call: setup the discriminator inputs and calls `loss_fn`
Args:
context: GAN Context
fake: fake images
condition: generator condition
training: if training or evaluation
Returns:
The loss for each example
"""
fake_inputs = self.get_discriminator_inputs(
context=context, fake_or_real=fake, condition=condition, training=training
)
d_fake = context.discriminator_model(fake_inputs, training=training)
# support for Multiscale discriminator
# TODO: Improve
if isinstance(d_fake, list):
value = tf.add_n(
[
tf.reduce_mean(
self._fn(tf.ones_like(d_fake_i), d_fake_i), axis=[1, 2]
)
for d_fake_i in d_fake
]
)
return value
else:
value = self._fn(tf.ones_like(d_fake), d_fake)
value = tf.cond(
tf.equal(tf.rank(d_fake), tf.constant(4)),
lambda: value,
lambda: tf.expand_dims(tf.expand_dims(value, axis=-1), axis=-1),
)
return tf.reduce_mean(value, axis=[1, 2])
class GeneratorBCE(AdversarialLossG):
r"""
The Binary CrossEntropy computed among the generator and the 1 label.
.. math::
L_{G} = E [\log (D( G(z))]
"""
def __init__(self, from_logits=True):
self.name = "GeneratorBCE"
super().__init__(tf.losses.BinaryCrossentropy(from_logits=from_logits))
class GeneratorLSGAN(AdversarialLossG):
r"""
Least Square GAN Loss for generator
Reference: https://arxiv.org/abs/1611.04076
Basically the Mean Squared Error between
the discriminator output when evaluated in fake and 1
.. math::
L_{G} = \frac{1}{2} E [(1 - D(G(z))^2]
"""
def __init__(self):
super().__init__(tf.keras.losses.MeanSquaredError())
self.name = "GeneratorLSGAN"
class GeneratorL1(GANExecutor):
r"""
L1 loss between the generator output and the target.
.. math::
L_G = E ||x - G(z)||_1
where x is the target and G(z) is generated image.
"""
class L1Loss(tf.losses.Loss):
def __init__(self):
super().__init__()
self._reduction = tf.losses.Reduction.SUM_OVER_BATCH_SIZE
@property
def reduction(self):
return self._reduction
@reduction.setter
def reduction(self, value):
self._reduction = value
def call(self, x, y):
"""
For each element the mean of the l1 between x and y
"""
if self._reduction == tf.losses.Reduction.SUM_OVER_BATCH_SIZE:
axis = None
elif self._reduction == tf.losses.Reduction.NONE:
axis = (1, 2, 3)
else:
raise ValueError("L1Loss: unhandled reduction type")
return tf.reduce_mean(tf.abs(x - y), axis=axis)
def __init__(self):
super().__init__(GeneratorL1.L1Loss())
@Executor.reduce_loss
def call(self, context, *, fake, real, **kwargs):
mae = self._fn(fake, real)
return mae
class FeatureMatchingLoss(GeneratorL1):
r"""
Conditional GAN Feature matching loss.
The loss is computed for each example and it's the L1 (MAE) of the feature difference.
Implementation of pix2pix HD: https://github.com/NVIDIA/pix2pixHD
.. math::
\text{FM} = \sum_{i=0}^N \frac{1}{M_i} ||D_i(x, c) - D_i(G(c), c) ||_1
Where:
- D_i is the i-th layer of the discriminator
- N is the total number of layer of the discriminator
- M_i is the number of components for the i-th layer
- x is the target image
- c is the condition
- G(c) is the generated image from the condition c
- || ||_1 stands for norm 1.
This is for a single example: basically for each layer of the discriminator we compute the absolute error between
the layer evaluated in real examples and in fake examples.
Then we average along the batch. In the case where D_i is a multidimensional tensor we simply calculate the mean
over the axis 1,2,3.
"""
@Executor.reduce_loss
def call(self, context, *, fake, real, condition, training, **kwargs):
fake_inputs = self.get_discriminator_inputs(
context, fake_or_real=fake, condition=condition, training=training
)
real_inputs = self.get_discriminator_inputs(
context, fake_or_real=real, condition=condition, training=training
)
_, features_fake = context.discriminator_model(
fake_inputs, training=training, return_features=True
)
_, features_real = context.discriminator_model(
real_inputs, training=training, return_features=True
)
# for each feature the L1 between the real and the fake
# every call to fn should return [batch_size, 1] that is the mean L1
feature_loss = [
self._fn(feat_real_i, feat_fake_i)
for feat_real_i, feat_fake_i in zip(features_real, features_fake)
]
mae = tf.add_n(feature_loss)
return mae
class CategoricalCrossEntropy(Executor):
r"""
Categorical Cross Entropy between generator output and target.
Useful when the output of the generator is a distribution over classes
The target must be represented in one hot notation
"""
def __init__(self):
self.name = "CrossEntropy"
super().__init__(tf.keras.losses.CategoricalCrossentropy())
@Executor.reduce_loss
def call(self, context, *, fake, real, **kwargs):
"""
Compute the categorical cross entropy loss
Args:
context: unused
fake: fake images G(condition)
real: Real images x(c)
**kwargs:
Returns:
The categorical cross entropy loss for each example
"""
loss_value = tf.reduce_mean(self._fn(real, fake), axis=[1, 2])
return loss_value
class Pix2PixLoss(SumExecutor):
r"""
Weighted sum of :py:class:`ashpy.losses.gan.GeneratorL1`, :py:class:`ashpy.losses.gan.AdversarialLossG` and
:py:class:`ashpy.losses.gan.FeatureMatchingLoss`.
Used by Pix2Pix [1] and Pix2PixHD [2]
.. [1] Image-to-Image Translation with Conditional Adversarial Networks
https://arxiv.org/abs/1611.07004
.. [2] High-Resolution Image Synthesis and Semantic Manipulation with Conditional GANs
https://arxiv.org/abs/1711.11585
"""
def __init__(
self,
l1_loss_weight=100.0,
adversarial_loss_weight=1.0,
feature_matching_weight=10.0,
adversarial_loss_type: Union[
AdversarialLossType, int
] = AdversarialLossType.GAN,
use_feature_matching_loss: bool = False,
):
r"""
Weighted sum of :py:class:`ashpy.losses.gan.GeneratorL1`, :py:class:`ashpy.losses.gan.AdversarialLossG` and
:py:class:`ashpy.losses.gan.FeatureMatchingLoss`.
Args:
l1_loss_weight: weight of L1 loss (scalar, :py:class:`tf.Tensor`, callable)
adversarial_loss_weight: weight of adversarial loss (scalar, :py:class:`tf.Tensor`, callable)
feature_matching_weight: weight of the feature matching loss (scalar, :py:class:`tf.Tensor`, callable)
adversarial_loss_type (:py:class:`ashpy.losses.gan.AdversarialLossType`): Adversarial loss type
(:py:class:`ashpy.losses.gan.AdversarialLossType.GAN`
or :py:class:`ashpy.losses.gan.AdversarialLossType.LSGAN`)
use_feature_matching_loss (bool): if True use also :py:class:`ashpy.losses.gan.FeatureMatchingLoss`
"""
executors = [
GeneratorL1() * l1_loss_weight,
get_adversarial_loss_generator(adversarial_loss_type)()
* adversarial_loss_weight,
]
if use_feature_matching_loss:
executors.append(FeatureMatchingLoss() * feature_matching_weight)
super().__init__(executors)
class Pix2PixLossSemantic(SumExecutor):
"""
Weighted sum of :py:class:`ashpy.losses.gan.CategoricalCrossEntropy`, :py:class:`ashpy.losses.gan.AdversarialLossG` and
:py:class:`ashpy.losses.gan.FeatureMatchingLoss`
"""
def __init__(
self,
cross_entropy_weight=100.0,
adversarial_loss_weight=1.0,
feature_matching_weight=10.0,
adversarial_loss_type: AdversarialLossType = AdversarialLossType.GAN,
use_feature_matching_loss: bool = False,
):
r"""
Weighted sum of :py:class:`ashpy.losses.gan.CategoricalCrossEntropy`, :py:class:`ashpy.losses.gan.AdversarialLossG` and
:py:class:`ashpy.losses.gan.FeatureMatchingLoss`
Args:
cross_entropy_weight: weight of the categorical cross entropy loss (scalar, :py:class:`tf.Tensor`, callable)
adversarial_loss_weight: weight of the adversarial loss (scalar, :py:class:`tf.Tensor`, callable)
feature_matching_weight: weight of the feature matching loss (scalar, :py:class:`tf.Tensor`, callable)
adversarial_loss_type (:py:class:`ashpy.losses.gan.AdversarialLossType`): type of adversarial loss,
see :py:class:`ashpy.losses.gan.AdversarialLossType`
use_feature_matching_loss (bool): whether to use feature matching loss or not
"""
executors = [
CategoricalCrossEntropy() * cross_entropy_weight,
get_adversarial_loss_generator(adversarial_loss_type)()
* adversarial_loss_weight,
]
if use_feature_matching_loss:
executors.append(FeatureMatchingLoss() * feature_matching_weight)
super().__init__(executors)
class EncoderBCE(Executor):
"""The Binary Cross Entropy computed among the encoder and the 0 label.
TODO: Check if this supports condition
"""
def __init__(self, from_logits=True):
super().__init__(tf.losses.BinaryCrossentropy(from_logits=from_logits))
@Executor.reduce_loss
def call(self, context, *, real, training, **kwargs):
encode = context.encoder_model(real, training=training)
d_real = context.discriminator_model([real, encode], training=training)
return self._fn(tf.zeros_like(d_real), d_real)
class AdversarialLossD(GANExecutor):
r"""
Base class for the adversarial loss of the discriminator
"""
def __init__(self, loss_fn=None):
r"""
Args:
loss_fn to call passing (d_real, d_fake)
"""
super().__init__(loss_fn)
@Executor.reduce_loss
def call(self, context, *, fake, real, condition, training, **kwargs):
r"""
Call: setup the discriminator inputs and calls `loss_fn`
Args:
context: GAN Context
fake: fake images corresponding to the condition G(c)
real: real images corresponding to the condition x(c)
condition: condition for the generator and discriminator
training: if training or evaluation
Returns:
The loss for each example
"""
fake_inputs = self.get_discriminator_inputs(
context, fake_or_real=fake, condition=condition, training=training
)
real_inputs = self.get_discriminator_inputs(
context, fake_or_real=real, condition=condition, training=training
)
d_fake = context.discriminator_model(fake_inputs, training=training)
d_real = context.discriminator_model(real_inputs, training=training)
if isinstance(d_fake, list):
value = tf.add_n(
[
tf.reduce_mean(self._fn(d_real_i, d_fake_i), axis=[1, 2])
for d_real_i, d_fake_i in zip(d_real, d_fake)
]
)
return value
else:
value = self._fn(d_real, d_fake)
value = tf.cond(
tf.equal(tf.rank(d_fake), tf.constant(4)),
lambda: value,
lambda: tf.expand_dims(tf.expand_dims(value, axis=-1), axis=-1),
)
return tf.reduce_mean(value, axis=[1, 2])
class DiscriminatorMinMax(AdversarialLossD):
r"""
The min-max game played by the discriminator.
.. math::
L_{D} = - \frac{1}{2} E [\log(D(x)) + \log (1 - D(G(z))]
"""
class GANLoss(tf.losses.Loss):
def __init__(self, from_logits=True, label_smoothing=0.0):
self._positive_bce = tf.losses.BinaryCrossentropy(
from_logits=from_logits,
label_smoothing=label_smoothing,
reduction=tf.losses.Reduction.NONE,
)
self._negative_bce = tf.losses.BinaryCrossentropy(
from_logits=from_logits,
label_smoothing=0.0,
reduction=tf.losses.Reduction.NONE,
)
super().__init__()
@property
def reduction(self):
return self._positive_bce.reduction
@reduction.setter
def reduction(self, value):
self._positive_bce.reduction = value
self._negative_bce.reduction = value
def call(self, d_real, d_fake):
"""Play the DiscriminatorMinMax game between the discriminator computed in real
and the discriminator compute with fake inputs."""
return 0.5 * (
self._positive_bce(tf.ones_like(d_real), d_real)
+ self._negative_bce(tf.zeros_like(d_fake), d_fake)
)
def __init__(self, from_logits=True, label_smoothing=0.0):
super().__init__(
DiscriminatorMinMax.GANLoss(
from_logits=from_logits, label_smoothing=label_smoothing
)
)
class DiscriminatorLSGAN(AdversarialLossD):
r"""
Least square Loss for discriminator.
Reference: Least Squares Generative Adversarial Networks [1]_ .
Basically the Mean Squared Error between
the discriminator output when evaluated in fake samples and 0
and the discriminator output when evaluated in real samples and 1:
For the unconditioned case this is:
.. math::
L_{D} = \frac{1}{2} E[(D(x) - 1)^2 + (0 - D(G(z))^2]
where x are real samples and z is the latent vector.
For the conditioned case this is:
.. math::
L_{D} = \frac{1}{2} E[(D(x, c) - 1)^2 + (0 - D(G(c), c)^2]
where c is the condition and x are real samples.
.. [1] https://arxiv.org/abs/1611.04076
"""
class LeastSquareLoss(tf.losses.Loss):
def __init__(self):
self._positive_mse = tf.keras.losses.MeanSquaredError(
reduction=tf.losses.Reduction.NONE
)
self._negative_mse = tf.keras.losses.MeanSquaredError(
reduction=tf.losses.Reduction.NONE
)
super().__init__()
@property
def reduction(self):
return self._positive_mse.reduction
@reduction.setter
def reduction(self, value):
self._positive_mse.reduction = value
self._negative_mse.reduction = value
def call(self, d_real, d_fake):
return 0.5 * (
self._positive_mse(tf.ones_like(d_real), d_real)
+ self._negative_mse(tf.zeros_like(d_fake), d_fake)
)
def __init__(self):
super().__init__(DiscriminatorLSGAN.LeastSquareLoss())
self.name = "DiscriminatorLSGAN"
###
# Utility functions in order to get the correct loss
###
def get_adversarial_loss_discriminator(
adversarial_loss_type: Union[AdversarialLossType, int] = AdversarialLossType.GAN
) -> Type[Executor]:
r"""
Returns the correct loss fot the discriminator
Args:
adversarial_loss_type (:py:class:`ashpy.losses.gan.AdversarialLossType`): Type of loss (:py:class:`ashpy.losses.gan.AdversarialLossType.GAN` or :py:class:`ashpy.losses.gan.AdversarialLossType.LSGAN`)
Returns:
The correct (:py:class:`ashpy.losses.executor.Executor`) (to be instantiated)
"""
if (
adversarial_loss_type == AdversarialLossType.GAN
or adversarial_loss_type == AdversarialLossType.GAN.value
):
return DiscriminatorMinMax
elif (
adversarial_loss_type == AdversarialLossType.LSGAN
or adversarial_loss_type == AdversarialLossType.LSGAN.value
):
return DiscriminatorLSGAN
else:
raise ValueError(
"Loss type not supported, the implemented losses are GAN or LSGAN"
)
def get_adversarial_loss_generator(
adversarial_loss_type: Union[AdversarialLossType, int] = AdversarialLossType.GAN
) -> Type[Executor]:
r"""
Returns the correct loss for the generator
Args:
adversarial_loss_type (:py:class:`ashpy.losses.gan.AdversarialLossType`): Type of loss (:py:class:`ashpy.losses.gan.AdversarialLossType.GAN` or :py:class:`ashpy.losses.gan.AdversarialLossType.LSGAN`)
Returns:
The correct (:py:class:`ashpy.losses.executor.Executor`) (to be instantiated)
"""
if (
adversarial_loss_type == AdversarialLossType.GAN
or adversarial_loss_type == AdversarialLossType.GAN.value
):
return GeneratorBCE
elif (
adversarial_loss_type == AdversarialLossType.LSGAN
or adversarial_loss_type == AdversarialLossType.LSGAN.value
):
return GeneratorLSGAN
else:
raise ValueError(
"Loss type not supported, the implemented losses are GAN or LSGAN"
)
| 34.06891 | 207 | 0.621901 |
from abc import ABC
from enum import Enum
from typing import List, Union, Type
import tensorflow as tf
from ashpy.contexts import GANContext
from ashpy.losses.executor import Executor, SumExecutor
class AdversarialLossType(Enum):
GAN = 0
LSGAN = 1
class GANExecutor(Executor, ABC):
@staticmethod
def get_discriminator_inputs(
context: GANContext,
fake_or_real: tf.Tensor,
condition: tf.Tensor,
training: bool,
) -> Union[tf.Tensor, List[tf.Tensor]]:
num_inputs = len(context.discriminator_model.inputs)
if hasattr(context, "encoder_model"):
if num_inputs == 2:
d_inputs = [
fake_or_real,
context.encoder_model(fake_or_real, training=training),
]
elif num_inputs == 3:
d_inputs = [
fake_or_real,
context.encoder_model(fake_or_real, training=training),
condition,
]
else:
raise ValueError(
f"Context has encoder_model, but generator has only {num_inputs} inputs"
)
else:
if num_inputs == 2:
d_inputs = [fake_or_real, condition]
else:
d_inputs = fake_or_real
return d_inputs
class AdversarialLossG(GANExecutor):
def __init__(self, loss_fn=None):
super().__init__(loss_fn)
@Executor.reduce_loss
def call(self, context, *, fake, condition, training, **kwargs):
fake_inputs = self.get_discriminator_inputs(
context=context, fake_or_real=fake, condition=condition, training=training
)
d_fake = context.discriminator_model(fake_inputs, training=training)
if isinstance(d_fake, list):
value = tf.add_n(
[
tf.reduce_mean(
self._fn(tf.ones_like(d_fake_i), d_fake_i), axis=[1, 2]
)
for d_fake_i in d_fake
]
)
return value
else:
value = self._fn(tf.ones_like(d_fake), d_fake)
value = tf.cond(
tf.equal(tf.rank(d_fake), tf.constant(4)),
lambda: value,
lambda: tf.expand_dims(tf.expand_dims(value, axis=-1), axis=-1),
)
return tf.reduce_mean(value, axis=[1, 2])
class GeneratorBCE(AdversarialLossG):
def __init__(self, from_logits=True):
self.name = "GeneratorBCE"
super().__init__(tf.losses.BinaryCrossentropy(from_logits=from_logits))
class GeneratorLSGAN(AdversarialLossG):
def __init__(self):
super().__init__(tf.keras.losses.MeanSquaredError())
self.name = "GeneratorLSGAN"
class GeneratorL1(GANExecutor):
class L1Loss(tf.losses.Loss):
def __init__(self):
super().__init__()
self._reduction = tf.losses.Reduction.SUM_OVER_BATCH_SIZE
@property
def reduction(self):
return self._reduction
@reduction.setter
def reduction(self, value):
self._reduction = value
def call(self, x, y):
if self._reduction == tf.losses.Reduction.SUM_OVER_BATCH_SIZE:
axis = None
elif self._reduction == tf.losses.Reduction.NONE:
axis = (1, 2, 3)
else:
raise ValueError("L1Loss: unhandled reduction type")
return tf.reduce_mean(tf.abs(x - y), axis=axis)
def __init__(self):
super().__init__(GeneratorL1.L1Loss())
@Executor.reduce_loss
def call(self, context, *, fake, real, **kwargs):
mae = self._fn(fake, real)
return mae
class FeatureMatchingLoss(GeneratorL1):
@Executor.reduce_loss
def call(self, context, *, fake, real, condition, training, **kwargs):
fake_inputs = self.get_discriminator_inputs(
context, fake_or_real=fake, condition=condition, training=training
)
real_inputs = self.get_discriminator_inputs(
context, fake_or_real=real, condition=condition, training=training
)
_, features_fake = context.discriminator_model(
fake_inputs, training=training, return_features=True
)
_, features_real = context.discriminator_model(
real_inputs, training=training, return_features=True
)
feature_loss = [
self._fn(feat_real_i, feat_fake_i)
for feat_real_i, feat_fake_i in zip(features_real, features_fake)
]
mae = tf.add_n(feature_loss)
return mae
class CategoricalCrossEntropy(Executor):
def __init__(self):
self.name = "CrossEntropy"
super().__init__(tf.keras.losses.CategoricalCrossentropy())
@Executor.reduce_loss
def call(self, context, *, fake, real, **kwargs):
loss_value = tf.reduce_mean(self._fn(real, fake), axis=[1, 2])
return loss_value
class Pix2PixLoss(SumExecutor):
def __init__(
self,
l1_loss_weight=100.0,
adversarial_loss_weight=1.0,
feature_matching_weight=10.0,
adversarial_loss_type: Union[
AdversarialLossType, int
] = AdversarialLossType.GAN,
use_feature_matching_loss: bool = False,
):
executors = [
GeneratorL1() * l1_loss_weight,
get_adversarial_loss_generator(adversarial_loss_type)()
* adversarial_loss_weight,
]
if use_feature_matching_loss:
executors.append(FeatureMatchingLoss() * feature_matching_weight)
super().__init__(executors)
class Pix2PixLossSemantic(SumExecutor):
def __init__(
self,
cross_entropy_weight=100.0,
adversarial_loss_weight=1.0,
feature_matching_weight=10.0,
adversarial_loss_type: AdversarialLossType = AdversarialLossType.GAN,
use_feature_matching_loss: bool = False,
):
executors = [
CategoricalCrossEntropy() * cross_entropy_weight,
get_adversarial_loss_generator(adversarial_loss_type)()
* adversarial_loss_weight,
]
if use_feature_matching_loss:
executors.append(FeatureMatchingLoss() * feature_matching_weight)
super().__init__(executors)
class EncoderBCE(Executor):
def __init__(self, from_logits=True):
super().__init__(tf.losses.BinaryCrossentropy(from_logits=from_logits))
@Executor.reduce_loss
def call(self, context, *, real, training, **kwargs):
encode = context.encoder_model(real, training=training)
d_real = context.discriminator_model([real, encode], training=training)
return self._fn(tf.zeros_like(d_real), d_real)
class AdversarialLossD(GANExecutor):
def __init__(self, loss_fn=None):
super().__init__(loss_fn)
@Executor.reduce_loss
def call(self, context, *, fake, real, condition, training, **kwargs):
fake_inputs = self.get_discriminator_inputs(
context, fake_or_real=fake, condition=condition, training=training
)
real_inputs = self.get_discriminator_inputs(
context, fake_or_real=real, condition=condition, training=training
)
d_fake = context.discriminator_model(fake_inputs, training=training)
d_real = context.discriminator_model(real_inputs, training=training)
if isinstance(d_fake, list):
value = tf.add_n(
[
tf.reduce_mean(self._fn(d_real_i, d_fake_i), axis=[1, 2])
for d_real_i, d_fake_i in zip(d_real, d_fake)
]
)
return value
else:
value = self._fn(d_real, d_fake)
value = tf.cond(
tf.equal(tf.rank(d_fake), tf.constant(4)),
lambda: value,
lambda: tf.expand_dims(tf.expand_dims(value, axis=-1), axis=-1),
)
return tf.reduce_mean(value, axis=[1, 2])
class DiscriminatorMinMax(AdversarialLossD):
class GANLoss(tf.losses.Loss):
def __init__(self, from_logits=True, label_smoothing=0.0):
self._positive_bce = tf.losses.BinaryCrossentropy(
from_logits=from_logits,
label_smoothing=label_smoothing,
reduction=tf.losses.Reduction.NONE,
)
self._negative_bce = tf.losses.BinaryCrossentropy(
from_logits=from_logits,
label_smoothing=0.0,
reduction=tf.losses.Reduction.NONE,
)
super().__init__()
@property
def reduction(self):
return self._positive_bce.reduction
@reduction.setter
def reduction(self, value):
self._positive_bce.reduction = value
self._negative_bce.reduction = value
def call(self, d_real, d_fake):
return 0.5 * (
self._positive_bce(tf.ones_like(d_real), d_real)
+ self._negative_bce(tf.zeros_like(d_fake), d_fake)
)
def __init__(self, from_logits=True, label_smoothing=0.0):
super().__init__(
DiscriminatorMinMax.GANLoss(
from_logits=from_logits, label_smoothing=label_smoothing
)
)
class DiscriminatorLSGAN(AdversarialLossD):
class LeastSquareLoss(tf.losses.Loss):
def __init__(self):
self._positive_mse = tf.keras.losses.MeanSquaredError(
reduction=tf.losses.Reduction.NONE
)
self._negative_mse = tf.keras.losses.MeanSquaredError(
reduction=tf.losses.Reduction.NONE
)
super().__init__()
@property
def reduction(self):
return self._positive_mse.reduction
@reduction.setter
def reduction(self, value):
self._positive_mse.reduction = value
self._negative_mse.reduction = value
def call(self, d_real, d_fake):
return 0.5 * (
self._positive_mse(tf.ones_like(d_real), d_real)
+ self._negative_mse(tf.zeros_like(d_fake), d_fake)
)
def __init__(self):
super().__init__(DiscriminatorLSGAN.LeastSquareLoss())
self.name = "DiscriminatorLSGAN"
ef get_adversarial_loss_discriminator(
adversarial_loss_type: Union[AdversarialLossType, int] = AdversarialLossType.GAN
) -> Type[Executor]:
if (
adversarial_loss_type == AdversarialLossType.GAN
or adversarial_loss_type == AdversarialLossType.GAN.value
):
return DiscriminatorMinMax
elif (
adversarial_loss_type == AdversarialLossType.LSGAN
or adversarial_loss_type == AdversarialLossType.LSGAN.value
):
return DiscriminatorLSGAN
else:
raise ValueError(
"Loss type not supported, the implemented losses are GAN or LSGAN"
)
def get_adversarial_loss_generator(
adversarial_loss_type: Union[AdversarialLossType, int] = AdversarialLossType.GAN
) -> Type[Executor]:
if (
adversarial_loss_type == AdversarialLossType.GAN
or adversarial_loss_type == AdversarialLossType.GAN.value
):
return GeneratorBCE
elif (
adversarial_loss_type == AdversarialLossType.LSGAN
or adversarial_loss_type == AdversarialLossType.LSGAN.value
):
return GeneratorLSGAN
else:
raise ValueError(
"Loss type not supported, the implemented losses are GAN or LSGAN"
)
| true | true |
f7fef51f7252ab620407c3abc4b1662f432c9847 | 2,233 | py | Python | app.py | mylovesnsd1998/detectf | 1f79f253c830693d39731a5b11f3a431061c6934 | [
"Apache-2.0"
] | null | null | null | app.py | mylovesnsd1998/detectf | 1f79f253c830693d39731a5b11f3a431061c6934 | [
"Apache-2.0"
] | null | null | null | app.py | mylovesnsd1998/detectf | 1f79f253c830693d39731a5b11f3a431061c6934 | [
"Apache-2.0"
] | null | null | null | from flask import json
import base64
from flask import request, send_from_directory
from label_image import callapi
from flask import Flask, redirect, url_for
from flask import jsonify
from flask_cors import CORS, cross_origin
from flask import send_file
import os
# db.create_all() # enable to create db
app = Flask(__name__)
cors = CORS(app)
app.config['CORS_HEADERS'] = 'Content-Type'
UPLOAD_FOLDER = '/tensorflow-for-poets-2/UPLOAD_FOLDER'
app.config['UPLOAD_FOLDER'] = UPLOAD_FOLDER
# api controller register account
from werkzeug.utils import secure_filename
@app.route('/api/classifi', methods=["POST"])
def custom_data():
content = request.json
name = content["name"]
resp = {
"name": "",
"link": "",
"code": 200
}
minh = callapi(name)
resp["name"] = minh
return app.response_class(response=json.dumps(resp),mimetype='application/json')
@cross_origin()
@app.route('/api/hello')
def downloadFile2():
resp = {
"name": "hello world",
"code": 200
}
return app.response_class(response=json.dumps(resp),mimetype='application/json')
@cross_origin()
@app.route('/upload', methods=['GET', 'POST'])
def upload_file():
resp = {
"name": "hello",
"predict": "null",
"code": 200
}
minh = True
if minh is True:
# resp["name"]= "Hello 4"
# check if the post request has the file part
if 'file' not in request.files:
# resp["name"]= "Hello 3"
return redirect(request.url)
file = request.files['file']
# if user does not select file, browser also
# submit an empty part without filename
if file.filename == '':
resp["name"]= "Hello 1"
return redirect(request.url)
if file:
resp["name"]= "Success"
filename = secure_filename(file.filename)
file.save(os.path.join(app.config['UPLOAD_FOLDER'], filename))
file.close()
minh = callapi(filename)
resp["predict"] = minh
return app.response_class(response=json.dumps(resp),mimetype='application/json')
if __name__ == '__main__':
app.run(debug=True, port=8000, threaded=True)
| 29 | 84 | 0.628751 | from flask import json
import base64
from flask import request, send_from_directory
from label_image import callapi
from flask import Flask, redirect, url_for
from flask import jsonify
from flask_cors import CORS, cross_origin
from flask import send_file
import os
)
cors = CORS(app)
app.config['CORS_HEADERS'] = 'Content-Type'
UPLOAD_FOLDER = '/tensorflow-for-poets-2/UPLOAD_FOLDER'
app.config['UPLOAD_FOLDER'] = UPLOAD_FOLDER
from werkzeug.utils import secure_filename
@app.route('/api/classifi', methods=["POST"])
def custom_data():
content = request.json
name = content["name"]
resp = {
"name": "",
"link": "",
"code": 200
}
minh = callapi(name)
resp["name"] = minh
return app.response_class(response=json.dumps(resp),mimetype='application/json')
@cross_origin()
@app.route('/api/hello')
def downloadFile2():
resp = {
"name": "hello world",
"code": 200
}
return app.response_class(response=json.dumps(resp),mimetype='application/json')
@cross_origin()
@app.route('/upload', methods=['GET', 'POST'])
def upload_file():
resp = {
"name": "hello",
"predict": "null",
"code": 200
}
minh = True
if minh is True:
if 'file' not in request.files:
return redirect(request.url)
file = request.files['file']
if file.filename == '':
resp["name"]= "Hello 1"
return redirect(request.url)
if file:
resp["name"]= "Success"
filename = secure_filename(file.filename)
file.save(os.path.join(app.config['UPLOAD_FOLDER'], filename))
file.close()
minh = callapi(filename)
resp["predict"] = minh
return app.response_class(response=json.dumps(resp),mimetype='application/json')
if __name__ == '__main__':
app.run(debug=True, port=8000, threaded=True)
| true | true |
f7fef66186a218d8ae0c5e5b93d3557b4153a76f | 4,423 | py | Python | projecteuler/projectEuler91_rightTriangleCount.py | qingfengxia/python-projecteuler | a2cba042fe7256364f6a5fa55df805a87da9a301 | [
"BSD-2-Clause-FreeBSD"
] | null | null | null | projecteuler/projectEuler91_rightTriangleCount.py | qingfengxia/python-projecteuler | a2cba042fe7256364f6a5fa55df805a87da9a301 | [
"BSD-2-Clause-FreeBSD"
] | null | null | null | projecteuler/projectEuler91_rightTriangleCount.py | qingfengxia/python-projecteuler | a2cba042fe7256364f6a5fa55df805a87da9a301 | [
"BSD-2-Clause-FreeBSD"
] | null | null | null | # -*- coding: utf-8 -*-
from __future__ import print_function, unicode_literals, absolute_import, division
#!/usr/bin/python
"""
problem 91
weblink:http://projecteuler.net/problem=91
description:
see webpage
Analysis:
method 1:
one point must be (0,0)
recursive RC(1)=3
symmetric, X-axis, Y-axis, 45degree except one case (n,0),(0,n)
some special cases, the right angle is not in (0,0)
method 2 bruteforce:
still consider the symmetric property
50**4
generate combination of coord, then test them!
"""
#from __future__ import *
from projecteulerhelper import *
#timeit is encluded into projecteulerhelper now
# test the correction by a small dimension first.
# test brute force first, method1
#then, try some smart method!
def isRightTriangle(x,y):
#based on the third point at z=(0,0)
#if x[0]==y[1] and x[0]==0: return True
#if y[0]==x[1] and y[0]==0: return True
# more general judgement!
e2l=sorted([x[1]*x[1]+x[0]*x[0], y[1]*y[1]+y[0]*y[0], (y[1]-x[1])**2+(y[0]-x[0])**2 ])
if min(e2l)==0: return False # points should be distinct!
if e2l[2]==e2l[1]+e2l[0]: return True
else: return False
#~ def countSpecialRightTriangles(n):
#~ """ second point at (i,n) top edge(coord y=0), for i in range(1,n), i.e.
#~ the two boundaries is not (0,n),(n,n) is not tested here!
#~ the third point is not on X axis,
#~ by bruteforce search
#~ """
#~ count=0
#~ for i in range(1,n):
#~ for y0 in range(1,n): # in fact ,only point below, ZX need to be tested
#~ for y1 in range(1,n):
#~ if isRightTriangle((i,n),(y0,y1)): count+=1
#~ return count
#~ def RC(n):
#~ """ it is easy to make error to distinguish special and regular
#~ this method is not correct!"""
#~ if n==1:
#~ return 3
#~ else:
#~ sym=(n-1)*2+1
#~ #new ones with point: x(0,n)->n-1, symX2, but (0,n)+y(n,0)->1
#~ #for 0<i<n , x(i,n)-> 2, non-square grid thus 2 on one side, symX2 for both side of 45degree
#~ sym+=(n-1)*2*2+2 # p1(n,n), sym of 45degree p2 (0,n)/(n,0)
#~ tilt=0
#~ if n%2==0: tilt+=2 # one edge is axis, third point on 45degree
#~ special=2*countSpecialRightTriangles(n) #sym for
#~ return RC(n-1)+sym+special+tilt
def countAppendedRightTriangles(n):
""" second point at (i,n) top edge(coord y=0), for i in range(0,n+1), i.e.
the two boundaries points are included: (0,n),
right end should be treated as special(n,n) , counted once
third can be point anywhere, but should not be coincident!
it is nearly bruteforce search
This method is not correct!!!, 16870 why?
Q point, should not included point on right boundary line (n,?), which is sym,
"""
count=0
for i in range(0,n): # how to excluded coincident triangle, only one! for X=(0,n) and Y=(n,0)?
for q0 in range(0,n): # in fact ,only point below, ZX need to be tested
for q1 in range(0,n+1):
if isRightTriangle((i,n),(q0,q1)): count+=1
count*=2 #sym
count-=1 #X(0,n) Y(n,0) is coincident, must remove once!
#second point at (n,n), only 2 triangle
for q0 in range(0,n+1): #
for q1 in range(0,n+1):
if isRightTriangle((n,n),(q0,q1)): count+=1
return count
def RTC(n):
if n==1:
return 3
else:
return RTC(n-1)+countAppendedRightTriangles(n)
def bruteforce():
""" it is possible for N=50, but why the number is doubled? 28468
symmetrical P and Q , should be divided by two!
"""
N=50
print("bruteforce find right triangles for N=",N)
count=0
for x1 in range(0,N+1):
for x2 in range(0,N+1):
for y1 in range(0,N+1):
for y2 in range(0,N+1):
if isRightTriangle((x1,y1),(x2,y2)): count+=1
print(count/2)
def smarter():
"""
"""
print(RTC(50))
def test():
# assert
print(isRightTriangle((0,5),(12,0)))
for i in range(2,5): print(i,"=>",RTC(i))
def solve():
#bruteforce() #correctly
smarter()
if __name__ == "__main__":
test()
timeit(solve)
#timeit(func, param) | 33.763359 | 111 | 0.563645 |
from __future__ import print_function, unicode_literals, absolute_import, division
from projecteulerhelper import *
def isRightTriangle(x,y):
e2l=sorted([x[1]*x[1]+x[0]*x[0], y[1]*y[1]+y[0]*y[0], (y[1]-x[1])**2+(y[0]-x[0])**2 ])
if min(e2l)==0: return False
if e2l[2]==e2l[1]+e2l[0]: return True
else: return False
#~ the two boundaries is not (0,n),(n,n) is not tested here!
#~ the third point is not on X axis,
#~ by bruteforce search
#~ """
#~ this method is not correct!"""
count+=1
count*=2
count-=1
for q0 in range(0,n+1):
for q1 in range(0,n+1):
if isRightTriangle((n,n),(q0,q1)): count+=1
return count
def RTC(n):
if n==1:
return 3
else:
return RTC(n-1)+countAppendedRightTriangles(n)
def bruteforce():
N=50
print("bruteforce find right triangles for N=",N)
count=0
for x1 in range(0,N+1):
for x2 in range(0,N+1):
for y1 in range(0,N+1):
for y2 in range(0,N+1):
if isRightTriangle((x1,y1),(x2,y2)): count+=1
print(count/2)
def smarter():
print(RTC(50))
def test():
print(isRightTriangle((0,5),(12,0)))
for i in range(2,5): print(i,"=>",RTC(i))
def solve():
r()
if __name__ == "__main__":
test()
timeit(solve)
| true | true |
f7fef8065ec691004563e60844761af2644d5fb0 | 4,089 | py | Python | cim/database/db_delete_queries.py | ali-jal/cimdb-1 | be8e13ffb83667f44672c11f1d0d81dfd7d405f5 | [
"MIT"
] | 1 | 2021-09-03T13:49:40.000Z | 2021-09-03T13:49:40.000Z | cim/database/db_delete_queries.py | ali-jal/cimdb-1 | be8e13ffb83667f44672c11f1d0d81dfd7d405f5 | [
"MIT"
] | 20 | 2021-02-02T02:00:36.000Z | 2022-03-20T11:34:29.000Z | cim/database/db_delete_queries.py | ali-jal/cimdb-1 | be8e13ffb83667f44672c11f1d0d81dfd7d405f5 | [
"MIT"
] | 4 | 2021-02-02T02:24:52.000Z | 2021-02-09T06:45:49.000Z | # filename: db_delete_queries
# description: provides DELETE database queries to delete selected data from each of the entity tables
# connect to database
import cim.database.db_connector as db
# Create a connection to the database
db_connection = db.connect_to_database()
def delete(delete_query_to_run):
"""
Since all deletion queries will share the same steps,
this is just a validation wrapper that handles whether a delete was successful or not.
"""
# Attempt to delete. If successful, return True
try:
db_connection = db.connect_to_database()
cursor = db.execute_query(db_connection=db_connection, query=delete_query_to_run)
return True
# If unsuccessful, print the error to the server log and return False
except Exception as e:
print(f'An error occurred when attempting to delete from CIMDB: {str(e)}')
return False
def delete_site(site_id_to_delete):
# Load SQL query for DELETEing the data for a selected site
delete_site_query = """
DELETE FROM Sites WHERE site_id='%s';
"""%(site_id_to_delete)
delete(delete_query_to_run=delete_site_query)
def delete_work_order(wo_id):
# Load SQL query for DELETEing the data for a selected work order
delete_work_order_query = """DELETE FROM WorkOrders WHERE wo_id="""+wo_id+""" ;"""
delete(delete_query_to_run=delete_work_order_query)
def delete_work_order_products_by_wo_id(wo_id):
# Load SQL query for DELETEing the data for a selected work order/products
delete_work_order_products_by_wo_id_query = """DELETE FROM WorkOrderProducts WHERE wop_wo_id="""+wo_id+""" ;"""
delete(delete_query_to_run=delete_work_order_products_by_wo_id_query)
def delete_work_order_products_by_product_sn(product_sn):
# Load SQL query for DELETEing the data for a selected work order/products
delete_work_order_products_by_wo_id_query = """DELETE FROM WorkOrderProducts WHERE wop_product_sn="""+product_sn+""" ;"""
delete(delete_query_to_run=delete_work_order_products_by_wo_id_query)
def delete_employee(employee_id_to_delete):
# Load SQL query for DELETEing the data for a selected employee
delete_employee_query = """
DELETE FROM Employees WHERE employee_id='%s';
"""%(employee_id_to_delete)
delete(delete_query_to_run=delete_employee_query)
def delete_location(location_id_to_delete):
# Load SQL query for DELETEing the data for a selected location
delete_location_query = """
DELETE FROM Locations WHERE location_id='%s';
"""%(location_id_to_delete)
delete(delete_query_to_run=delete_location_query)
def delete_location_regular_comps():
# Load SQL query for DELETEing the data for a selected locations/regular components relationship
delete_location_regular_comps_query = """"""
delete(delete_query_to_run=delete_location_regular_comps_query)
def delete_products_regular_comps(product_sn):
# Load SQL query for DELETEing the data for a selected products/regular components relationship
delete_product_regular_comps_query = """DELETE FROM ProductsRegularComps WHERE prc_product_sn = """+product_sn+""" ;"""
delete(delete_query_to_run=delete_product_regular_comps_query)
def delete_products_special_comps():
# Load SQL query for DELETEing the data for a selected products/special components relationship
delete_product_special_comps_query = """"""
delete(delete_query_to_run=delete_product_special_comps_query)
def delete_product(product_sn):
# Load SQL query for DELETEing the data for a selected product
delete_product_query = """DELETE FROM Products WHERE product_sn = """+product_sn+""" ;"""
delete(delete_query_to_run=delete_product_query)
def delete_regular_component():
# Load SQL query for DELETEing the data for a selected regular component
delete_regular_component_query = """"""
delete(delete_query_to_run=delete_regular_component_query)
def delete_special_component(spec_comp_sn_to_delete):
# Load SQL query for DELETEing the data for a selected regular component
delete_special_component_query = """
DELETE FROM SpecialComponents WHERE sc_sn='%s';
"""%(spec_comp_sn_to_delete)
delete(delete_query_to_run=delete_special_component_query)
| 35.25 | 122 | 0.802886 |
import cim.database.db_connector as db
db_connection = db.connect_to_database()
def delete(delete_query_to_run):
try:
db_connection = db.connect_to_database()
cursor = db.execute_query(db_connection=db_connection, query=delete_query_to_run)
return True
except Exception as e:
print(f'An error occurred when attempting to delete from CIMDB: {str(e)}')
return False
def delete_site(site_id_to_delete):
delete_site_query = """
DELETE FROM Sites WHERE site_id='%s';
"""%(site_id_to_delete)
delete(delete_query_to_run=delete_site_query)
def delete_work_order(wo_id):
delete_work_order_query = """DELETE FROM WorkOrders WHERE wo_id="""+wo_id+""" ;"""
delete(delete_query_to_run=delete_work_order_query)
def delete_work_order_products_by_wo_id(wo_id):
delete_work_order_products_by_wo_id_query = """DELETE FROM WorkOrderProducts WHERE wop_wo_id="""+wo_id+""" ;"""
delete(delete_query_to_run=delete_work_order_products_by_wo_id_query)
def delete_work_order_products_by_product_sn(product_sn):
delete_work_order_products_by_wo_id_query = """DELETE FROM WorkOrderProducts WHERE wop_product_sn="""+product_sn+""" ;"""
delete(delete_query_to_run=delete_work_order_products_by_wo_id_query)
def delete_employee(employee_id_to_delete):
delete_employee_query = """
DELETE FROM Employees WHERE employee_id='%s';
"""%(employee_id_to_delete)
delete(delete_query_to_run=delete_employee_query)
def delete_location(location_id_to_delete):
delete_location_query = """
DELETE FROM Locations WHERE location_id='%s';
"""%(location_id_to_delete)
delete(delete_query_to_run=delete_location_query)
def delete_location_regular_comps():
delete_location_regular_comps_query = """"""
delete(delete_query_to_run=delete_location_regular_comps_query)
def delete_products_regular_comps(product_sn):
delete_product_regular_comps_query = """DELETE FROM ProductsRegularComps WHERE prc_product_sn = """+product_sn+""" ;"""
delete(delete_query_to_run=delete_product_regular_comps_query)
def delete_products_special_comps():
delete_product_special_comps_query = """"""
delete(delete_query_to_run=delete_product_special_comps_query)
def delete_product(product_sn):
delete_product_query = """DELETE FROM Products WHERE product_sn = """+product_sn+""" ;"""
delete(delete_query_to_run=delete_product_query)
def delete_regular_component():
delete_regular_component_query = """"""
delete(delete_query_to_run=delete_regular_component_query)
def delete_special_component(spec_comp_sn_to_delete):
delete_special_component_query = """
DELETE FROM SpecialComponents WHERE sc_sn='%s';
"""%(spec_comp_sn_to_delete)
delete(delete_query_to_run=delete_special_component_query)
| true | true |
f7fefa2e280bb4991f935c491a9ed8f9d3056831 | 293 | py | Python | kattis/Stand on Zanzibar.py | jaredliw/python-question-bank | 9c8c246623d8d171f875700b57772df0afcbdcdf | [
"MIT"
] | 1 | 2021-04-08T07:49:15.000Z | 2021-04-08T07:49:15.000Z | kattis/Stand on Zanzibar.py | jaredliw/leetcode-solutions | 9c8c246623d8d171f875700b57772df0afcbdcdf | [
"MIT"
] | null | null | null | kattis/Stand on Zanzibar.py | jaredliw/leetcode-solutions | 9c8c246623d8d171f875700b57772df0afcbdcdf | [
"MIT"
] | 1 | 2022-01-23T02:12:24.000Z | 2022-01-23T02:12:24.000Z | # CPU: 0.05 s
for _ in range(int(input())):
population = list(map(int, input().split()))
import_ = 0
for idx in range(1, len(population) - 1):
if population[idx] > population[idx - 1] * 2:
import_ += population[idx] - population[idx - 1] * 2
print(import_)
| 32.555556 | 64 | 0.576792 |
for _ in range(int(input())):
population = list(map(int, input().split()))
import_ = 0
for idx in range(1, len(population) - 1):
if population[idx] > population[idx - 1] * 2:
import_ += population[idx] - population[idx - 1] * 2
print(import_)
| true | true |
f7fefae90a28df8205be8de97c55f1c9e04c2c35 | 772 | py | Python | Week3/get_most_probable_motif.py | arvinddoraiswamy/mybioinfo | e964fa20f1bdea06d2ef26f6ea8ad57847985929 | [
"MIT"
] | null | null | null | Week3/get_most_probable_motif.py | arvinddoraiswamy/mybioinfo | e964fa20f1bdea06d2ef26f6ea8ad57847985929 | [
"MIT"
] | null | null | null | Week3/get_most_probable_motif.py | arvinddoraiswamy/mybioinfo | e964fa20f1bdea06d2ef26f6ea8ad57847985929 | [
"MIT"
] | null | null | null | import sys
import os
#Adding directory to the path where Python searches for modules
module_folder = os.path.dirname('/opt/Courses/TechCourses/Bioinformatics/code/')
sys.path.insert(0, module_folder)
#Importing genemanipulating module. This has a lot of common functions.
import genemanip
if __name__ == "__main__":
import sys
lines = sys.stdin.read().splitlines()
Text = lines[0]
k = int(lines[1])
A = [float(c) for c in lines[2].split()]
C = [float(c) for c in lines[3].split()]
G = [float(c) for c in lines[4].split()]
T = [float(c) for c in lines[5].split()]
profile = {'A':A, 'C':C, 'G':G, 'T':T}
#profile= genemanip.generate_profile_matrix(motifs.splitlines())
print genemanip.get_most_probable_motif(Text, k, profile)
| 33.565217 | 80 | 0.680052 | import sys
import os
module_folder = os.path.dirname('/opt/Courses/TechCourses/Bioinformatics/code/')
sys.path.insert(0, module_folder)
import genemanip
if __name__ == "__main__":
import sys
lines = sys.stdin.read().splitlines()
Text = lines[0]
k = int(lines[1])
A = [float(c) for c in lines[2].split()]
C = [float(c) for c in lines[3].split()]
G = [float(c) for c in lines[4].split()]
T = [float(c) for c in lines[5].split()]
profile = {'A':A, 'C':C, 'G':G, 'T':T}
print genemanip.get_most_probable_motif(Text, k, profile)
| false | true |
f7fefb3e5b767e25373665058d4fd6a298fb3d60 | 5,041 | py | Python | python/paddle/fluid/__init__.py | ZongwuYang/Paddle | 6224e61fd94e6ad87f18c2808a76256b516fa3f3 | [
"Apache-2.0"
] | null | null | null | python/paddle/fluid/__init__.py | ZongwuYang/Paddle | 6224e61fd94e6ad87f18c2808a76256b516fa3f3 | [
"Apache-2.0"
] | null | null | null | python/paddle/fluid/__init__.py | ZongwuYang/Paddle | 6224e61fd94e6ad87f18c2808a76256b516fa3f3 | [
"Apache-2.0"
] | null | null | null | # Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import os
# import all class inside framework into fluid module
from . import framework
from .framework import *
# import all class inside executor into fluid module
from . import executor
from .executor import *
from . import trainer
from . import inferencer
from . import io
from . import evaluator
from . import initializer
from . import layers
from . import contrib
from . import nets
from . import optimizer
from . import backward
from . import regularizer
from . import average
from . import metrics
from . import transpiler
from . import distribute_lookup_table
from .param_attr import ParamAttr, WeightNormParamAttr
from .data_feeder import DataFeeder
from .core import LoDTensor, LoDTensorArray, CPUPlace, CUDAPlace, CUDAPinnedPlace, Scope
from .transpiler import DistributeTranspiler, \
memory_optimize, release_memory, DistributeTranspilerConfig
from .lod_tensor import create_lod_tensor, create_random_int_lodtensor
from . import clip
from . import profiler
from . import unique_name
from . import recordio_writer
from . import parallel_executor
from .parallel_executor import *
from paddle.fluid.layers.math_op_patch import monkey_patch_variable
Tensor = LoDTensor
__all__ = framework.__all__ + executor.__all__ + \
trainer.__all__ + inferencer.__all__ + transpiler.__all__ + \
parallel_executor.__all__ + lod_tensor.__all__ + [
'io',
'initializer',
'layers',
'contrib',
'transpiler',
'nets',
'optimizer',
'learning_rate_decay',
'backward',
'regularizer',
'LoDTensor',
'LoDTensorArray',
'CPUPlace',
'CUDAPlace',
'CUDAPinnedPlace',
'Tensor',
'ParamAttr',
'WeightNormParamAttr',
'DataFeeder',
'clip',
'profiler',
'unique_name',
'recordio_writer',
'Scope',
]
def __bootstrap__():
"""
Enable reading gflags from environment variables.
Returns:
None
"""
import sys
import os
import platform
from . import core
in_test = 'unittest' in sys.modules
try:
num_threads = int(os.getenv('OMP_NUM_THREADS', '1'))
except ValueError:
num_threads = 1
if num_threads > 1:
print(
'WARNING: OMP_NUM_THREADS set to {0}, not 1. The computation '
'speed will not be optimized if you use data parallel. It will '
'fail if this PaddlePaddle binary is compiled with OpenBlas since'
' OpenBlas does not support multi-threads.'.format(num_threads),
file=sys.stderr)
print('PLEASE USE OMP_NUM_THREADS WISELY.', file=sys.stderr)
os.environ['OMP_NUM_THREADS'] = str(num_threads)
sysstr = platform.system()
read_env_flags = [
'check_nan_inf', 'benchmark', 'eager_delete_scope', 'use_mkldnn',
'use_ngraph', 'initial_cpu_memory_in_mb', 'init_allocated_mem',
'free_idle_memory', 'paddle_num_threads', "dist_threadpool_size",
'eager_delete_tensor_gb', 'allocator_strategy',
'reader_queue_speed_test_mode', 'print_sub_graph_dir'
]
if 'Darwin' not in sysstr:
read_env_flags.append('use_pinned_memory')
if os.name != 'nt':
read_env_flags.append('warpctc_dir')
read_env_flags.append('cpu_deterministic')
if core.is_compiled_with_dist():
read_env_flags.append('rpc_deadline')
read_env_flags.append('rpc_server_profile_path')
read_env_flags.append('enable_rpc_profiler')
read_env_flags.append('rpc_send_thread_num')
read_env_flags.append('rpc_get_thread_num')
read_env_flags.append('rpc_prefetch_thread_num')
read_env_flags.append('rpc_disable_reuse_port')
if core.is_compiled_with_cuda():
read_env_flags += [
'fraction_of_gpu_memory_to_use', 'cudnn_deterministic',
'enable_cublas_tensor_op_math', 'conv_workspace_size_limit',
'cudnn_exhaustive_search'
]
core.init_gflags([sys.argv[0]] +
["--tryfromenv=" + ",".join(read_env_flags)])
core.init_glog(sys.argv[0])
# don't init_p2p when in unittest to save time.
core.init_devices(not in_test)
# TODO(panyx0718): Avoid doing complex initialization logic in __init__.py.
# Consider paddle.init(args) or paddle.main(args)
monkey_patch_variable()
__bootstrap__()
| 32.522581 | 88 | 0.694703 |
from __future__ import print_function
import os
from . import framework
from .framework import *
from . import executor
from .executor import *
from . import trainer
from . import inferencer
from . import io
from . import evaluator
from . import initializer
from . import layers
from . import contrib
from . import nets
from . import optimizer
from . import backward
from . import regularizer
from . import average
from . import metrics
from . import transpiler
from . import distribute_lookup_table
from .param_attr import ParamAttr, WeightNormParamAttr
from .data_feeder import DataFeeder
from .core import LoDTensor, LoDTensorArray, CPUPlace, CUDAPlace, CUDAPinnedPlace, Scope
from .transpiler import DistributeTranspiler, \
memory_optimize, release_memory, DistributeTranspilerConfig
from .lod_tensor import create_lod_tensor, create_random_int_lodtensor
from . import clip
from . import profiler
from . import unique_name
from . import recordio_writer
from . import parallel_executor
from .parallel_executor import *
from paddle.fluid.layers.math_op_patch import monkey_patch_variable
Tensor = LoDTensor
__all__ = framework.__all__ + executor.__all__ + \
trainer.__all__ + inferencer.__all__ + transpiler.__all__ + \
parallel_executor.__all__ + lod_tensor.__all__ + [
'io',
'initializer',
'layers',
'contrib',
'transpiler',
'nets',
'optimizer',
'learning_rate_decay',
'backward',
'regularizer',
'LoDTensor',
'LoDTensorArray',
'CPUPlace',
'CUDAPlace',
'CUDAPinnedPlace',
'Tensor',
'ParamAttr',
'WeightNormParamAttr',
'DataFeeder',
'clip',
'profiler',
'unique_name',
'recordio_writer',
'Scope',
]
def __bootstrap__():
import sys
import os
import platform
from . import core
in_test = 'unittest' in sys.modules
try:
num_threads = int(os.getenv('OMP_NUM_THREADS', '1'))
except ValueError:
num_threads = 1
if num_threads > 1:
print(
'WARNING: OMP_NUM_THREADS set to {0}, not 1. The computation '
'speed will not be optimized if you use data parallel. It will '
'fail if this PaddlePaddle binary is compiled with OpenBlas since'
' OpenBlas does not support multi-threads.'.format(num_threads),
file=sys.stderr)
print('PLEASE USE OMP_NUM_THREADS WISELY.', file=sys.stderr)
os.environ['OMP_NUM_THREADS'] = str(num_threads)
sysstr = platform.system()
read_env_flags = [
'check_nan_inf', 'benchmark', 'eager_delete_scope', 'use_mkldnn',
'use_ngraph', 'initial_cpu_memory_in_mb', 'init_allocated_mem',
'free_idle_memory', 'paddle_num_threads', "dist_threadpool_size",
'eager_delete_tensor_gb', 'allocator_strategy',
'reader_queue_speed_test_mode', 'print_sub_graph_dir'
]
if 'Darwin' not in sysstr:
read_env_flags.append('use_pinned_memory')
if os.name != 'nt':
read_env_flags.append('warpctc_dir')
read_env_flags.append('cpu_deterministic')
if core.is_compiled_with_dist():
read_env_flags.append('rpc_deadline')
read_env_flags.append('rpc_server_profile_path')
read_env_flags.append('enable_rpc_profiler')
read_env_flags.append('rpc_send_thread_num')
read_env_flags.append('rpc_get_thread_num')
read_env_flags.append('rpc_prefetch_thread_num')
read_env_flags.append('rpc_disable_reuse_port')
if core.is_compiled_with_cuda():
read_env_flags += [
'fraction_of_gpu_memory_to_use', 'cudnn_deterministic',
'enable_cublas_tensor_op_math', 'conv_workspace_size_limit',
'cudnn_exhaustive_search'
]
core.init_gflags([sys.argv[0]] +
["--tryfromenv=" + ",".join(read_env_flags)])
core.init_glog(sys.argv[0])
core.init_devices(not in_test)
# TODO(panyx0718): Avoid doing complex initialization logic in __init__.py.
# Consider paddle.init(args) or paddle.main(args)
monkey_patch_variable()
__bootstrap__()
| true | true |
f7fefc80986df70cc91eb40096762f055d5f1afe | 2,058 | py | Python | problems/leetcode/lt-3.py | neerajp99/algorithms | 1d6885d2a895821ac511fa8a46913d34db2511ca | [
"MIT"
] | 1 | 2021-06-17T07:59:42.000Z | 2021-06-17T07:59:42.000Z | problems/leetcode/lt-3.py | neerajp99/algorithms | 1d6885d2a895821ac511fa8a46913d34db2511ca | [
"MIT"
] | null | null | null | problems/leetcode/lt-3.py | neerajp99/algorithms | 1d6885d2a895821ac511fa8a46913d34db2511ca | [
"MIT"
] | 1 | 2022-01-13T08:42:31.000Z | 2022-01-13T08:42:31.000Z | # 3. Longest Substring Without Repeating Characters
"""
Given a string s, find the length of the longest substring without repeating characters.
"""
class Solution:
# Solution 1 (Hashmap)
def lengthOfLongestSubstring(self, s: str) -> int:
# Initialise empty hasmap
check_dict = dict()
# Initialise the temporary check and final variable
max_value = 0
max_check = -1
# If there are just 1 charcter or an empty string, return the length of it
if len(s) < 2:
return len(s)
# Iterate over the range of the string length
for i in range(len(s)):
# If the character exists in the hashmap, update the value of the temporary check variale
if s[i] in check_dict:
max_check = max(max_check, check_dict[s[i]])
# Else, append the character to the hashmap
check_dict[s[i]] = i
# The final value would be the max of max_value and current location - location of the found character
max_value = max(max_value, i - max_check)
return max_value
# Solution 2 (Strings)
def lengthOfLongestSubstring(self, s: str) -> int:
# Initialise empty string
check_str = ""
# Initialise the final value as 0
max_value = 0
# Iterate through the string
for i in s:
# If the character is not the character string, add it to it
if i not in check_str:
check_str += i
else:
# Otherwise, the maximum length would be the max of max_value and the length of the string
max_value = max(max_value, len(check_str))
# Update the string by removing the characters before the found character inclusive of it from
# the character and append the character after
check_str = check_str[check_str.index(i) + 1: ] + i
return max(max_value, len(check_str))
| 40.352941 | 114 | 0.590379 |
class Solution:
def lengthOfLongestSubstring(self, s: str) -> int:
check_dict = dict()
max_value = 0
max_check = -1
if len(s) < 2:
return len(s)
for i in range(len(s)):
if s[i] in check_dict:
max_check = max(max_check, check_dict[s[i]])
check_dict[s[i]] = i
max_value = max(max_value, i - max_check)
return max_value
def lengthOfLongestSubstring(self, s: str) -> int:
check_str = ""
max_value = 0
for i in s:
if i not in check_str:
check_str += i
else:
max_value = max(max_value, len(check_str))
check_str = check_str[check_str.index(i) + 1: ] + i
return max(max_value, len(check_str))
| true | true |
f7fefc8ff48ab859635a15ee92ed550e6ffd38c7 | 2,769 | py | Python | han/vocabulary.py | nryotaro/han | ed78f6772f4bf6923d9a3f52dbcc8a55e757631b | [
"MIT"
] | null | null | null | han/vocabulary.py | nryotaro/han | ed78f6772f4bf6923d9a3f52dbcc8a55e757631b | [
"MIT"
] | null | null | null | han/vocabulary.py | nryotaro/han | ed78f6772f4bf6923d9a3f52dbcc8a55e757631b | [
"MIT"
] | null | null | null | """Word embedding."""
import typing as t
import torchtext.vocab as v
import torch
def build_vocabulary(
sentences: t.Iterator[t.Iterator[str]],
pad_symbol: str = "<pad>",
unknown_symbol: str = "<unk>",
) -> v.Vocab:
"""Build vocabulary.
Each element of `sentences` is a list of words. The vocabulary
encode unknown word to the indice of `unknown_symbol`.
"""
vocab: v.Vocab = v.build_vocab_from_iterator(
(sentence for sentence in sentences),
special_first=True,
specials=[pad_symbol, unknown_symbol],
)
vocab.set_default_index(1)
return vocab
class EmbeddingProtocol(t.Protocol):
"""Provide the format to provide trained embedding.
The methods of this protocol follows `torchtext.vocab.Vectors` to
use it.
"""
@property
def itos(self) -> list[str]:
"""Correspond to `stoi`."""
@property
def vectors(self) -> torch.Tensor:
"""Return embeddings.
The shape of the tensor is (`len(itos)`, embedding_dim).
"""
class VocabularyProtocol(t.Protocol):
"""Map strings to index."""
def forward(self, words: list[str]) -> list[int]:
"""Take words and return their index."""
def __getitem__(self, s: str) -> int:
"""Take a string and return its indice."""
def __call__(self, words: list[str]) -> list[int]:
"""See `forward`."""
def __len__(self) -> int:
"""Return the size of the vocabulary."""
class _VocabularyImpl:
def __init__(self, dictionary: dict[str, int], default_idx: int = 1):
self._dictionary = dictionary
self._default_idx = default_idx
def forward(self, words: list[str]) -> list[int]:
return [self.__getitem__(word) for word in words]
def __getitem__(self, s: str) -> int:
return self._dictionary.get(s, self._default_idx)
def __call__(self, words: list[str]) -> list[int]:
return self.forward(words)
def __len__(self) -> int:
return len(self._dictionary)
def create_vocab(
embedding: EmbeddingProtocol,
pad_symbol: str = "<pad>",
unknown_symbol: str = "<unk>",
) -> t.Tuple[VocabularyProtocol, torch.Tensor]:
"""Create a tensor that contains pad and unkown symbols.
Bind `pad_symbol` to 0 and `unknown_symbol` to 1.
"""
d = dict()
d[pad_symbol] = 0
d[unknown_symbol] = 1
c = 2
dim = embedding.vectors.shape[1]
weights = [torch.Tensor([0] * dim), torch.Tensor([0] * dim)]
for index, word in enumerate(embedding.itos):
if word not in set([pad_symbol, unknown_symbol]):
d[word] = c
c += 1
weights.append(embedding.vectors[index, :])
return _VocabularyImpl(d, 1), torch.vstack(weights)
| 26.122642 | 73 | 0.625135 | import typing as t
import torchtext.vocab as v
import torch
def build_vocabulary(
sentences: t.Iterator[t.Iterator[str]],
pad_symbol: str = "<pad>",
unknown_symbol: str = "<unk>",
) -> v.Vocab:
vocab: v.Vocab = v.build_vocab_from_iterator(
(sentence for sentence in sentences),
special_first=True,
specials=[pad_symbol, unknown_symbol],
)
vocab.set_default_index(1)
return vocab
class EmbeddingProtocol(t.Protocol):
@property
def itos(self) -> list[str]:
@property
def vectors(self) -> torch.Tensor:
class VocabularyProtocol(t.Protocol):
def forward(self, words: list[str]) -> list[int]:
def __getitem__(self, s: str) -> int:
def __call__(self, words: list[str]) -> list[int]:
def __len__(self) -> int:
class _VocabularyImpl:
def __init__(self, dictionary: dict[str, int], default_idx: int = 1):
self._dictionary = dictionary
self._default_idx = default_idx
def forward(self, words: list[str]) -> list[int]:
return [self.__getitem__(word) for word in words]
def __getitem__(self, s: str) -> int:
return self._dictionary.get(s, self._default_idx)
def __call__(self, words: list[str]) -> list[int]:
return self.forward(words)
def __len__(self) -> int:
return len(self._dictionary)
def create_vocab(
embedding: EmbeddingProtocol,
pad_symbol: str = "<pad>",
unknown_symbol: str = "<unk>",
) -> t.Tuple[VocabularyProtocol, torch.Tensor]:
d = dict()
d[pad_symbol] = 0
d[unknown_symbol] = 1
c = 2
dim = embedding.vectors.shape[1]
weights = [torch.Tensor([0] * dim), torch.Tensor([0] * dim)]
for index, word in enumerate(embedding.itos):
if word not in set([pad_symbol, unknown_symbol]):
d[word] = c
c += 1
weights.append(embedding.vectors[index, :])
return _VocabularyImpl(d, 1), torch.vstack(weights)
| true | true |
f7fefd0259b96892cf85d5ed37c1c06c7d5285fe | 172 | py | Python | samcli/commands/local/lib/exceptions.py | langn/aws-sam-cli | 160d87ff3c07f092315e1ac71ddc00257fde011b | [
"Apache-2.0"
] | 3 | 2018-11-29T12:57:56.000Z | 2021-02-24T11:58:58.000Z | samcli/commands/local/lib/exceptions.py | langn/aws-sam-cli | 160d87ff3c07f092315e1ac71ddc00257fde011b | [
"Apache-2.0"
] | 1 | 2018-05-23T19:51:18.000Z | 2018-05-23T19:51:18.000Z | samcli/commands/local/lib/exceptions.py | langn/aws-sam-cli | 160d87ff3c07f092315e1ac71ddc00257fde011b | [
"Apache-2.0"
] | 2 | 2018-09-03T11:54:16.000Z | 2021-02-05T03:32:17.000Z | """
Custom exceptions raised by this local library
"""
class NoApisDefined(Exception):
"""
Raised when there are no APIs defined in the template
"""
pass
| 15.636364 | 57 | 0.668605 |
class NoApisDefined(Exception):
pass
| true | true |
f7fefecf35025fdf3c21ce31e70c5aa72d32c5d5 | 3,385 | py | Python | origin/origin_channels.py | deathbybandaid/fHDHR_Ceton | 6d7224be0f97e25844afd8933bdb00893f48e88b | [
"WTFPL"
] | null | null | null | origin/origin_channels.py | deathbybandaid/fHDHR_Ceton | 6d7224be0f97e25844afd8933bdb00893f48e88b | [
"WTFPL"
] | null | null | null | origin/origin_channels.py | deathbybandaid/fHDHR_Ceton | 6d7224be0f97e25844afd8933bdb00893f48e88b | [
"WTFPL"
] | null | null | null | import xmltodict
import base64
import re
import threading
class OriginChannels():
def __init__(self, fhdhr, origin):
self.fhdhr = fhdhr
self.origin = origin
def get_channels(self):
cleaned_channels = []
url_headers = {'accept': 'application/xml;q=0.9, */*;q=0.8'}
count_url = ('http://' + self.fhdhr.config.dict["origin"]["ceton_ip"] +
'/view_channel_map.cgi?page=1')
try:
countReq = self.fhdhr.web.session.get(count_url, headers=url_headers)
countReq.raise_for_status()
except self.fhdhr.web.exceptions.HTTPError as err:
self.fhdhr.logger.error('Error while getting channel count: %s' % err)
return []
count = re.search('(?<=1 to 50 of )\w+', countReq.text)
count = int(int(count.group(0))/50+2)
for i in range(1, count):
stations_url = "http://%s/view_channel_map.cgi?page=%s&xml=1" % (self.fhdhr.config.dict["origin"]["ceton_ip"], i)
try:
stationsReq = self.fhdhr.web.session.get(stations_url, headers=url_headers)
stationsReq.raise_for_status()
except self.fhdhr.web.exceptions.HTTPError as err:
self.fhdhr.logger.error('Error while getting stations: %s' % err)
return []
stationsRes = xmltodict.parse(stationsReq.content)
for station_item in stationsRes['channels']['channel']:
nameTmp = station_item["name"]
nameTmp_bytes = nameTmp.encode('ascii')
namebytes = base64.b64decode(nameTmp_bytes)
name = namebytes.decode('ascii')
clean_station_item = {
"name": name,
"callsign": name,
"number": station_item["number"],
"eia": station_item["eia"],
"id": station_item["sourceid"],
}
cleaned_channels.append(clean_station_item)
return cleaned_channels
def get_channel_stream(self, chandict):
found, instance = self.origin.get_ceton_tuner_status(chandict)
# 1 to start or 0 to stop
if found:
port = self.origin.startstop_ceton_tuner(instance, 1)
else:
port = None
self.fhdhr.logger.error('No Ceton tuners available')
if port:
tuned = self.origin.set_ceton_tuner(chandict, instance)
self.fhdhr.logger.info('Preparing Ceton tuner ' + str(instance) + ' on port:' + str(port))
else:
tuned = None
self.origin.get_ceton_getvar(instance, "Frequency")
self.origin.get_ceton_getvar(instance, "ProgramNumber")
self.origin.get_ceton_getvar(instance, "CopyProtectionStatus")
if tuned:
self.fhdhr.logger.info('Initiate streaming channel ' +
str(chandict['number']) +
' from Ceton tuner#: ' + str(instance))
streamurl = "udp://127.0.0.1:" + str(port)
else:
streamurl = None
wd = threading.Thread(target=self.origin.tuner_watchdog, args=(chandict, instance))
wd.start()
return streamurl
| 36.793478 | 125 | 0.550369 | import xmltodict
import base64
import re
import threading
class OriginChannels():
def __init__(self, fhdhr, origin):
self.fhdhr = fhdhr
self.origin = origin
def get_channels(self):
cleaned_channels = []
url_headers = {'accept': 'application/xml;q=0.9, */*;q=0.8'}
count_url = ('http://' + self.fhdhr.config.dict["origin"]["ceton_ip"] +
'/view_channel_map.cgi?page=1')
try:
countReq = self.fhdhr.web.session.get(count_url, headers=url_headers)
countReq.raise_for_status()
except self.fhdhr.web.exceptions.HTTPError as err:
self.fhdhr.logger.error('Error while getting channel count: %s' % err)
return []
count = re.search('(?<=1 to 50 of )\w+', countReq.text)
count = int(int(count.group(0))/50+2)
for i in range(1, count):
stations_url = "http://%s/view_channel_map.cgi?page=%s&xml=1" % (self.fhdhr.config.dict["origin"]["ceton_ip"], i)
try:
stationsReq = self.fhdhr.web.session.get(stations_url, headers=url_headers)
stationsReq.raise_for_status()
except self.fhdhr.web.exceptions.HTTPError as err:
self.fhdhr.logger.error('Error while getting stations: %s' % err)
return []
stationsRes = xmltodict.parse(stationsReq.content)
for station_item in stationsRes['channels']['channel']:
nameTmp = station_item["name"]
nameTmp_bytes = nameTmp.encode('ascii')
namebytes = base64.b64decode(nameTmp_bytes)
name = namebytes.decode('ascii')
clean_station_item = {
"name": name,
"callsign": name,
"number": station_item["number"],
"eia": station_item["eia"],
"id": station_item["sourceid"],
}
cleaned_channels.append(clean_station_item)
return cleaned_channels
def get_channel_stream(self, chandict):
found, instance = self.origin.get_ceton_tuner_status(chandict)
if found:
port = self.origin.startstop_ceton_tuner(instance, 1)
else:
port = None
self.fhdhr.logger.error('No Ceton tuners available')
if port:
tuned = self.origin.set_ceton_tuner(chandict, instance)
self.fhdhr.logger.info('Preparing Ceton tuner ' + str(instance) + ' on port:' + str(port))
else:
tuned = None
self.origin.get_ceton_getvar(instance, "Frequency")
self.origin.get_ceton_getvar(instance, "ProgramNumber")
self.origin.get_ceton_getvar(instance, "CopyProtectionStatus")
if tuned:
self.fhdhr.logger.info('Initiate streaming channel ' +
str(chandict['number']) +
' from Ceton tuner#: ' + str(instance))
streamurl = "udp://127.0.0.1:" + str(port)
else:
streamurl = None
wd = threading.Thread(target=self.origin.tuner_watchdog, args=(chandict, instance))
wd.start()
return streamurl
| true | true |
f7fefee9eff9373e9e99a507d49cc43a2b14c7fa | 237 | py | Python | src/poetry_hooks/__version__.py | jvrana/poetry-hooks | a3f967ea4353c5466516b33a9c4576762f47350f | [
"MIT"
] | null | null | null | src/poetry_hooks/__version__.py | jvrana/poetry-hooks | a3f967ea4353c5466516b33a9c4576762f47350f | [
"MIT"
] | null | null | null | src/poetry_hooks/__version__.py | jvrana/poetry-hooks | a3f967ea4353c5466516b33a9c4576762f47350f | [
"MIT"
] | null | null | null | # __version__.py
# autogenerated by keats 0.2.28
__version__ = "0.4.2"
__name__ = "poetry-hooks"
__title__ = "poetry-hooks"
__authors__ = ['Justin Vrana <justin.vrana@gmail.com>']
__repo__ = None
__homepage__ = None
__description__ = ""
| 23.7 | 55 | 0.734177 |
__version__ = "0.4.2"
__name__ = "poetry-hooks"
__title__ = "poetry-hooks"
__authors__ = ['Justin Vrana <justin.vrana@gmail.com>']
__repo__ = None
__homepage__ = None
__description__ = ""
| true | true |
f7feffee5e417036c331f7b88919179c6b210753 | 119 | py | Python | w4/finterstellar/__init__.py | finterstellar/lecture | fb14fb1c6a842e2ee2f79b0225ac9f4d11c3ca47 | [
"MIT"
] | 2 | 2020-05-14T05:53:15.000Z | 2020-09-29T03:45:59.000Z | w4/finterstellar/__init__.py | finterstellar/lecture | fb14fb1c6a842e2ee2f79b0225ac9f4d11c3ca47 | [
"MIT"
] | null | null | null | w4/finterstellar/__init__.py | finterstellar/lecture | fb14fb1c6a842e2ee2f79b0225ac9f4d11c3ca47 | [
"MIT"
] | 6 | 2020-03-01T13:50:23.000Z | 2022-03-29T05:47:28.000Z | from .common import *
from .prep import *
#from .trading import *
from .valuation import *
from .visualization import * | 23.8 | 28 | 0.747899 | from .common import *
from .prep import *
from .valuation import *
from .visualization import * | true | true |
f7ff00f7410b91bf68dc4e6f17a7b5823635b06c | 11,164 | py | Python | lemur/plugins/lemur_kubernetes/plugin.py | backwardn/lemur | 9f641c14a916d72177216ac82b29c1d9b569d957 | [
"Apache-2.0"
] | 1 | 2020-11-11T22:01:58.000Z | 2020-11-11T22:01:58.000Z | lemur/plugins/lemur_kubernetes/plugin.py | backwardn/lemur | 9f641c14a916d72177216ac82b29c1d9b569d957 | [
"Apache-2.0"
] | 2 | 2021-02-10T02:29:45.000Z | 2021-04-30T21:40:40.000Z | lemur/plugins/lemur_kubernetes/plugin.py | backwardn/lemur | 9f641c14a916d72177216ac82b29c1d9b569d957 | [
"Apache-2.0"
] | null | null | null | """
.. module: lemur.plugins.lemur_kubernetes.plugin
:platform: Unix
:copyright: (c) 2018 by Netflix Inc., see AUTHORS for more
:license: Apache, see LICENSE for more details.
The plugin inserts certificates and the private key as Kubernetes secret that
can later be used to secure service endpoints running in Kubernetes pods
.. moduleauthor:: Mikhail Khodorovskiy <mikhail.khodorovskiy@jivesoftware.com>
"""
import base64
import itertools
import os
import requests
from flask import current_app
from lemur.common.defaults import common_name
from lemur.common.utils import parse_certificate
from lemur.plugins.bases import DestinationPlugin
DEFAULT_API_VERSION = "v1"
def ensure_resource(k8s_api, k8s_base_uri, namespace, kind, name, data):
# _resolve_uri(k8s_base_uri, namespace, kind, name, api_ver=DEFAULT_API_VERSION)
url = _resolve_uri(k8s_base_uri, namespace, kind)
current_app.logger.debug("K8S POST request URL: %s", url)
create_resp = k8s_api.post(url, json=data)
current_app.logger.debug("K8S POST response: %s", create_resp)
if 200 <= create_resp.status_code <= 299:
return None
elif create_resp.json().get("reason", "") != "AlreadyExists":
return create_resp.content
url = _resolve_uri(k8s_base_uri, namespace, kind, name)
current_app.logger.debug("K8S PUT request URL: %s", url)
update_resp = k8s_api.put(url, json=data)
current_app.logger.debug("K8S PUT response: %s", update_resp)
if not 200 <= update_resp.status_code <= 299:
return update_resp.content
return
def _resolve_ns(k8s_base_uri, namespace, api_ver=DEFAULT_API_VERSION):
api_group = "api"
if "/" in api_ver:
api_group = "apis"
return "{base}/{api_group}/{api_ver}/namespaces".format(
base=k8s_base_uri, api_group=api_group, api_ver=api_ver
) + ("/" + namespace if namespace else "")
def _resolve_uri(k8s_base_uri, namespace, kind, name=None, api_ver=DEFAULT_API_VERSION):
if not namespace:
namespace = "default"
return "/".join(
itertools.chain.from_iterable(
[
(_resolve_ns(k8s_base_uri, namespace, api_ver=api_ver),),
((kind + "s").lower(),),
(name,) if name else (),
]
)
)
# Performs Base64 encoding of string to string using the base64.b64encode() function
# which encodes bytes to bytes.
def base64encode(string):
return base64.b64encode(string.encode()).decode()
def build_secret(secret_format, secret_name, body, private_key, cert_chain):
secret = {
"apiVersion": "v1",
"kind": "Secret",
"type": "Opaque",
"metadata": {"name": secret_name},
}
if secret_format == "Full":
secret["data"] = {
"combined.pem": base64encode("%s\n%s" % (body, private_key)),
"ca.crt": base64encode(cert_chain),
"service.key": base64encode(private_key),
"service.crt": base64encode(body),
}
if secret_format == "TLS":
secret["type"] = "kubernetes.io/tls"
secret["data"] = {
"tls.crt": base64encode(body),
"tls.key": base64encode(private_key),
}
if secret_format == "Certificate":
secret["data"] = {"tls.crt": base64encode(cert_chain)}
return secret
class KubernetesDestinationPlugin(DestinationPlugin):
title = "Kubernetes"
slug = "kubernetes-destination"
description = "Allow the uploading of certificates to Kubernetes as secret"
author = "Mikhail Khodorovskiy"
author_url = "https://github.com/mik373/lemur"
options = [
{
"name": "secretNameFormat",
"type": "str",
"required": False,
# Validation is difficult. This regex is used by kubectl to validate secret names:
# [a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*
# Allowing the insertion of "{common_name}" (or any other such placeholder}
# at any point in the string proved very challenging and had a tendency to
# cause my browser to hang. The specified expression will allow any valid string
# but will also accept many invalid strings.
"validation": "(?:[a-z0-9.-]|\\{common_name\\})+",
"helpMessage": 'Must be a valid secret name, possibly including "{common_name}"',
"default": "{common_name}",
},
{
"name": "kubernetesURL",
"type": "str",
"required": False,
"validation": "https?://[a-zA-Z0-9.-]+(?::[0-9]+)?",
"helpMessage": "Must be a valid Kubernetes server URL!",
"default": "https://kubernetes.default",
},
{
"name": "kubernetesAuthToken",
"type": "str",
"required": False,
"validation": "[0-9a-zA-Z-_.]+",
"helpMessage": "Must be a valid Kubernetes server Token!",
},
{
"name": "kubernetesAuthTokenFile",
"type": "str",
"required": False,
"validation": "(/[^/]+)+",
"helpMessage": "Must be a valid file path!",
"default": "/var/run/secrets/kubernetes.io/serviceaccount/token",
},
{
"name": "kubernetesServerCertificate",
"type": "textarea",
"required": False,
"validation": "-----BEGIN CERTIFICATE-----[a-zA-Z0-9/+\\s\\r\\n]+-----END CERTIFICATE-----",
"helpMessage": "Must be a valid Kubernetes server Certificate!",
},
{
"name": "kubernetesServerCertificateFile",
"type": "str",
"required": False,
"validation": "(/[^/]+)+",
"helpMessage": "Must be a valid file path!",
"default": "/var/run/secrets/kubernetes.io/serviceaccount/ca.crt",
},
{
"name": "kubernetesNamespace",
"type": "str",
"required": False,
"validation": "[a-z0-9]([-a-z0-9]*[a-z0-9])?",
"helpMessage": "Must be a valid Kubernetes Namespace!",
},
{
"name": "kubernetesNamespaceFile",
"type": "str",
"required": False,
"validation": "(/[^/]+)+",
"helpMessage": "Must be a valid file path!",
"default": "/var/run/secrets/kubernetes.io/serviceaccount/namespace",
},
{
"name": "secretFormat",
"type": "select",
"required": True,
"available": ["Full", "TLS", "Certificate"],
"helpMessage": "The type of Secret to create.",
"default": "Full",
},
]
def __init__(self, *args, **kwargs):
super(KubernetesDestinationPlugin, self).__init__(*args, **kwargs)
def upload(self, name, body, private_key, cert_chain, options, **kwargs):
try:
k8_base_uri = self.get_option("kubernetesURL", options)
secret_format = self.get_option("secretFormat", options)
k8s_api = K8sSession(self.k8s_bearer(options), self.k8s_cert(options))
cn = common_name(parse_certificate(body))
secret_name_format = self.get_option("secretNameFormat", options)
secret_name = secret_name_format.format(common_name=cn)
secret = build_secret(
secret_format, secret_name, body, private_key, cert_chain
)
err = ensure_resource(
k8s_api,
k8s_base_uri=k8_base_uri,
namespace=self.k8s_namespace(options),
kind="secret",
name=secret_name,
data=secret,
)
except Exception as e:
current_app.logger.exception(
"Exception in upload: {}".format(e), exc_info=True
)
raise
if err is not None:
current_app.logger.error("Error deploying resource: %s", err)
raise Exception("Error uploading secret: " + err)
def k8s_bearer(self, options):
bearer = self.get_option("kubernetesAuthToken", options)
if not bearer:
bearer_file = self.get_option("kubernetesAuthTokenFile", options)
with open(bearer_file, "r") as file:
bearer = file.readline()
if bearer:
current_app.logger.debug("Using token read from %s", bearer_file)
else:
raise Exception(
"Unable to locate token in options or from %s", bearer_file
)
else:
current_app.logger.debug("Using token from options")
return bearer
def k8s_cert(self, options):
cert_file = self.get_option("kubernetesServerCertificateFile", options)
cert = self.get_option("kubernetesServerCertificate", options)
if cert:
cert_file = os.path.join(
os.path.abspath(os.path.dirname(__file__)), "k8.cert"
)
with open(cert_file, "w") as text_file:
text_file.write(cert)
current_app.logger.debug("Using certificate from options")
else:
current_app.logger.debug("Using certificate from %s", cert_file)
return cert_file
def k8s_namespace(self, options):
namespace = self.get_option("kubernetesNamespace", options)
if not namespace:
namespace_file = self.get_option("kubernetesNamespaceFile", options)
with open(namespace_file, "r") as file:
namespace = file.readline()
if namespace:
current_app.logger.debug(
"Using namespace %s from %s", namespace, namespace_file
)
else:
raise Exception(
"Unable to locate namespace in options or from %s", namespace_file
)
else:
current_app.logger.debug("Using namespace %s from options", namespace)
return namespace
class K8sSession(requests.Session):
def __init__(self, bearer, cert_file):
super(K8sSession, self).__init__()
self.headers.update({"Authorization": "Bearer %s" % bearer})
self.verify = cert_file
def request(
self,
method,
url,
params=None,
data=None,
headers=None,
cookies=None,
files=None,
auth=None,
timeout=30,
allow_redirects=True,
proxies=None,
hooks=None,
stream=None,
verify=None,
cert=None,
json=None,
):
"""
This method overrides the default timeout to be 10s.
"""
return super(K8sSession, self).request(
method,
url,
params,
data,
headers,
cookies,
files,
auth,
timeout,
allow_redirects,
proxies,
hooks,
stream,
verify,
cert,
json,
)
| 34.45679 | 104 | 0.567539 | import base64
import itertools
import os
import requests
from flask import current_app
from lemur.common.defaults import common_name
from lemur.common.utils import parse_certificate
from lemur.plugins.bases import DestinationPlugin
DEFAULT_API_VERSION = "v1"
def ensure_resource(k8s_api, k8s_base_uri, namespace, kind, name, data):
url = _resolve_uri(k8s_base_uri, namespace, kind)
current_app.logger.debug("K8S POST request URL: %s", url)
create_resp = k8s_api.post(url, json=data)
current_app.logger.debug("K8S POST response: %s", create_resp)
if 200 <= create_resp.status_code <= 299:
return None
elif create_resp.json().get("reason", "") != "AlreadyExists":
return create_resp.content
url = _resolve_uri(k8s_base_uri, namespace, kind, name)
current_app.logger.debug("K8S PUT request URL: %s", url)
update_resp = k8s_api.put(url, json=data)
current_app.logger.debug("K8S PUT response: %s", update_resp)
if not 200 <= update_resp.status_code <= 299:
return update_resp.content
return
def _resolve_ns(k8s_base_uri, namespace, api_ver=DEFAULT_API_VERSION):
api_group = "api"
if "/" in api_ver:
api_group = "apis"
return "{base}/{api_group}/{api_ver}/namespaces".format(
base=k8s_base_uri, api_group=api_group, api_ver=api_ver
) + ("/" + namespace if namespace else "")
def _resolve_uri(k8s_base_uri, namespace, kind, name=None, api_ver=DEFAULT_API_VERSION):
if not namespace:
namespace = "default"
return "/".join(
itertools.chain.from_iterable(
[
(_resolve_ns(k8s_base_uri, namespace, api_ver=api_ver),),
((kind + "s").lower(),),
(name,) if name else (),
]
)
)
def base64encode(string):
return base64.b64encode(string.encode()).decode()
def build_secret(secret_format, secret_name, body, private_key, cert_chain):
secret = {
"apiVersion": "v1",
"kind": "Secret",
"type": "Opaque",
"metadata": {"name": secret_name},
}
if secret_format == "Full":
secret["data"] = {
"combined.pem": base64encode("%s\n%s" % (body, private_key)),
"ca.crt": base64encode(cert_chain),
"service.key": base64encode(private_key),
"service.crt": base64encode(body),
}
if secret_format == "TLS":
secret["type"] = "kubernetes.io/tls"
secret["data"] = {
"tls.crt": base64encode(body),
"tls.key": base64encode(private_key),
}
if secret_format == "Certificate":
secret["data"] = {"tls.crt": base64encode(cert_chain)}
return secret
class KubernetesDestinationPlugin(DestinationPlugin):
title = "Kubernetes"
slug = "kubernetes-destination"
description = "Allow the uploading of certificates to Kubernetes as secret"
author = "Mikhail Khodorovskiy"
author_url = "https://github.com/mik373/lemur"
options = [
{
"name": "secretNameFormat",
"type": "str",
"required": False,
"validation": "(?:[a-z0-9.-]|\\{common_name\\})+",
"helpMessage": 'Must be a valid secret name, possibly including "{common_name}"',
"default": "{common_name}",
},
{
"name": "kubernetesURL",
"type": "str",
"required": False,
"validation": "https?://[a-zA-Z0-9.-]+(?::[0-9]+)?",
"helpMessage": "Must be a valid Kubernetes server URL!",
"default": "https://kubernetes.default",
},
{
"name": "kubernetesAuthToken",
"type": "str",
"required": False,
"validation": "[0-9a-zA-Z-_.]+",
"helpMessage": "Must be a valid Kubernetes server Token!",
},
{
"name": "kubernetesAuthTokenFile",
"type": "str",
"required": False,
"validation": "(/[^/]+)+",
"helpMessage": "Must be a valid file path!",
"default": "/var/run/secrets/kubernetes.io/serviceaccount/token",
},
{
"name": "kubernetesServerCertificate",
"type": "textarea",
"required": False,
"validation": "-----BEGIN CERTIFICATE-----[a-zA-Z0-9/+\\s\\r\\n]+-----END CERTIFICATE-----",
"helpMessage": "Must be a valid Kubernetes server Certificate!",
},
{
"name": "kubernetesServerCertificateFile",
"type": "str",
"required": False,
"validation": "(/[^/]+)+",
"helpMessage": "Must be a valid file path!",
"default": "/var/run/secrets/kubernetes.io/serviceaccount/ca.crt",
},
{
"name": "kubernetesNamespace",
"type": "str",
"required": False,
"validation": "[a-z0-9]([-a-z0-9]*[a-z0-9])?",
"helpMessage": "Must be a valid Kubernetes Namespace!",
},
{
"name": "kubernetesNamespaceFile",
"type": "str",
"required": False,
"validation": "(/[^/]+)+",
"helpMessage": "Must be a valid file path!",
"default": "/var/run/secrets/kubernetes.io/serviceaccount/namespace",
},
{
"name": "secretFormat",
"type": "select",
"required": True,
"available": ["Full", "TLS", "Certificate"],
"helpMessage": "The type of Secret to create.",
"default": "Full",
},
]
def __init__(self, *args, **kwargs):
super(KubernetesDestinationPlugin, self).__init__(*args, **kwargs)
def upload(self, name, body, private_key, cert_chain, options, **kwargs):
try:
k8_base_uri = self.get_option("kubernetesURL", options)
secret_format = self.get_option("secretFormat", options)
k8s_api = K8sSession(self.k8s_bearer(options), self.k8s_cert(options))
cn = common_name(parse_certificate(body))
secret_name_format = self.get_option("secretNameFormat", options)
secret_name = secret_name_format.format(common_name=cn)
secret = build_secret(
secret_format, secret_name, body, private_key, cert_chain
)
err = ensure_resource(
k8s_api,
k8s_base_uri=k8_base_uri,
namespace=self.k8s_namespace(options),
kind="secret",
name=secret_name,
data=secret,
)
except Exception as e:
current_app.logger.exception(
"Exception in upload: {}".format(e), exc_info=True
)
raise
if err is not None:
current_app.logger.error("Error deploying resource: %s", err)
raise Exception("Error uploading secret: " + err)
def k8s_bearer(self, options):
bearer = self.get_option("kubernetesAuthToken", options)
if not bearer:
bearer_file = self.get_option("kubernetesAuthTokenFile", options)
with open(bearer_file, "r") as file:
bearer = file.readline()
if bearer:
current_app.logger.debug("Using token read from %s", bearer_file)
else:
raise Exception(
"Unable to locate token in options or from %s", bearer_file
)
else:
current_app.logger.debug("Using token from options")
return bearer
def k8s_cert(self, options):
cert_file = self.get_option("kubernetesServerCertificateFile", options)
cert = self.get_option("kubernetesServerCertificate", options)
if cert:
cert_file = os.path.join(
os.path.abspath(os.path.dirname(__file__)), "k8.cert"
)
with open(cert_file, "w") as text_file:
text_file.write(cert)
current_app.logger.debug("Using certificate from options")
else:
current_app.logger.debug("Using certificate from %s", cert_file)
return cert_file
def k8s_namespace(self, options):
namespace = self.get_option("kubernetesNamespace", options)
if not namespace:
namespace_file = self.get_option("kubernetesNamespaceFile", options)
with open(namespace_file, "r") as file:
namespace = file.readline()
if namespace:
current_app.logger.debug(
"Using namespace %s from %s", namespace, namespace_file
)
else:
raise Exception(
"Unable to locate namespace in options or from %s", namespace_file
)
else:
current_app.logger.debug("Using namespace %s from options", namespace)
return namespace
class K8sSession(requests.Session):
def __init__(self, bearer, cert_file):
super(K8sSession, self).__init__()
self.headers.update({"Authorization": "Bearer %s" % bearer})
self.verify = cert_file
def request(
self,
method,
url,
params=None,
data=None,
headers=None,
cookies=None,
files=None,
auth=None,
timeout=30,
allow_redirects=True,
proxies=None,
hooks=None,
stream=None,
verify=None,
cert=None,
json=None,
):
return super(K8sSession, self).request(
method,
url,
params,
data,
headers,
cookies,
files,
auth,
timeout,
allow_redirects,
proxies,
hooks,
stream,
verify,
cert,
json,
)
| true | true |
f7ff00fd5487a03a90cb0e56e83f58c27dad60f7 | 4,091 | py | Python | test_statically.py | AlanCristhian/statically | 01ea9e5cbe047d4b7d69772b6155ef05fe475bb8 | [
"MIT"
] | 208 | 2017-10-30T13:11:52.000Z | 2022-03-14T00:04:24.000Z | test_statically.py | AlanCristhian/statically | 01ea9e5cbe047d4b7d69772b6155ef05fe475bb8 | [
"MIT"
] | 9 | 2017-10-30T21:43:18.000Z | 2019-11-04T15:25:22.000Z | test_statically.py | AlanCristhian/statically | 01ea9e5cbe047d4b7d69772b6155ef05fe475bb8 | [
"MIT"
] | 9 | 2017-10-30T14:26:43.000Z | 2019-07-08T02:25:27.000Z | import unittest
import asyncio
import subprocess
import sys
import os.path
import cython
try:
import IPython
except ImportError:
ipython_installed = False
else:
ipython_installed = True
import statically
ONE_HUNDRED = 100
execute = asyncio.get_event_loop().run_until_complete
def is_cython_function(obj):
return 'cython_function_or_method' in str(type(obj))
def anext(agen):
gen = agen.asend(None)
try:
gen.send(None)
except StopIteration as error:
return error.args[0]
class CompilationSuite(unittest.TestCase):
def test_function(self):
@statically.typed
def identity(x: cython.int):
return x
self.assertEqual(identity(14), 14)
def test_is_compiled(self):
@statically.typed
def compiled(x: cython.int):
return x
self.assertTrue(is_cython_function(compiled))
def test_non_local_var_in_class(self):
one = 1
@statically.typed
class Class:
number = 100 + one
self.assertEqual(Class.number, 101)
def test_non_local_var_in_method(self):
two = 2
class Class:
@statically.typed
def add_two(self, x):
return x + two
obj = Class()
self.assertEqual(obj.add_two(100), 102)
def test_non_local_var_in_function(self):
tree = 3
@statically.typed
def add_tree(x):
return x + tree
self.assertEqual(add_tree(100), 103)
def test_non_local_var_in_generator_function(self):
four = 4
@statically.typed
def add_four(x):
yield x + four
self.assertEqual(next(add_four(100)), 104)
def test_non_local_var_in_coroutine_function(self):
five = 5
@statically.typed
async def add_five(x):
return x + five
self.assertEqual(execute(add_five(100)), 105)
def test_global_var_in_class(self):
@statically.typed
class Class_:
number = 1 + ONE_HUNDRED
self.assertEqual(Class_.number, 101)
def test_global_var_in_method(self):
class Class:
@statically.typed
def add_one_hundred(self, x):
return ONE_HUNDRED + x
obj = Class()
self.assertEqual(obj.add_one_hundred(2), 102)
def test_global_var_in_function(self):
@statically.typed
def add_one_hundred(x):
return ONE_HUNDRED + x
self.assertEqual(add_one_hundred(3), 103)
def test_global_var_in_generator_function(self):
@statically.typed
def add_one_hundred(x):
yield ONE_HUNDRED + x
self.assertEqual(next(add_one_hundred(4)), 104)
def test_global_var_in_coroutine_function(self):
@statically.typed
async def add_one_hundred(x):
return ONE_HUNDRED + x
self.assertEqual(execute(add_one_hundred(5)), 105)
@unittest.skipUnless(statically.has_async_gen_fun, "Test does not apply for this version of Python")
def test_async_generator(self):
message = r"Async generator funcions are not supported."
with self.assertRaisesRegex(TypeError, message):
from test_statically_async import generator
@unittest.skipUnless(ipython_installed, "IPython not installed")
class IPythonSuite(unittest.TestCase):
def test_ipython(self):
base_dir = os.path.dirname(sys.executable)
executable = os.path.join(base_dir, "ipython")
process = subprocess.Popen([executable], stdin=subprocess.PIPE,
stdout=subprocess.PIPE)
script = "import statically\n" \
"@statically.typed\n" \
"def add(a: int, b: int): return a + b\n\n" \
"'cython_function_or_method' in str(type(add))\n".encode()
stdout, _ = process.communicate(script)
lines = stdout.decode().split("\n")
process.terminate()
self.assertEqual(lines[-4], "In [3]: Out[3]: True")
if __name__ == '__main__':
unittest.main()
| 28.809859 | 104 | 0.629186 | import unittest
import asyncio
import subprocess
import sys
import os.path
import cython
try:
import IPython
except ImportError:
ipython_installed = False
else:
ipython_installed = True
import statically
ONE_HUNDRED = 100
execute = asyncio.get_event_loop().run_until_complete
def is_cython_function(obj):
return 'cython_function_or_method' in str(type(obj))
def anext(agen):
gen = agen.asend(None)
try:
gen.send(None)
except StopIteration as error:
return error.args[0]
class CompilationSuite(unittest.TestCase):
def test_function(self):
@statically.typed
def identity(x: cython.int):
return x
self.assertEqual(identity(14), 14)
def test_is_compiled(self):
@statically.typed
def compiled(x: cython.int):
return x
self.assertTrue(is_cython_function(compiled))
def test_non_local_var_in_class(self):
one = 1
@statically.typed
class Class:
number = 100 + one
self.assertEqual(Class.number, 101)
def test_non_local_var_in_method(self):
two = 2
class Class:
@statically.typed
def add_two(self, x):
return x + two
obj = Class()
self.assertEqual(obj.add_two(100), 102)
def test_non_local_var_in_function(self):
tree = 3
@statically.typed
def add_tree(x):
return x + tree
self.assertEqual(add_tree(100), 103)
def test_non_local_var_in_generator_function(self):
four = 4
@statically.typed
def add_four(x):
yield x + four
self.assertEqual(next(add_four(100)), 104)
def test_non_local_var_in_coroutine_function(self):
five = 5
@statically.typed
async def add_five(x):
return x + five
self.assertEqual(execute(add_five(100)), 105)
def test_global_var_in_class(self):
@statically.typed
class Class_:
number = 1 + ONE_HUNDRED
self.assertEqual(Class_.number, 101)
def test_global_var_in_method(self):
class Class:
@statically.typed
def add_one_hundred(self, x):
return ONE_HUNDRED + x
obj = Class()
self.assertEqual(obj.add_one_hundred(2), 102)
def test_global_var_in_function(self):
@statically.typed
def add_one_hundred(x):
return ONE_HUNDRED + x
self.assertEqual(add_one_hundred(3), 103)
def test_global_var_in_generator_function(self):
@statically.typed
def add_one_hundred(x):
yield ONE_HUNDRED + x
self.assertEqual(next(add_one_hundred(4)), 104)
def test_global_var_in_coroutine_function(self):
@statically.typed
async def add_one_hundred(x):
return ONE_HUNDRED + x
self.assertEqual(execute(add_one_hundred(5)), 105)
@unittest.skipUnless(statically.has_async_gen_fun, "Test does not apply for this version of Python")
def test_async_generator(self):
message = r"Async generator funcions are not supported."
with self.assertRaisesRegex(TypeError, message):
from test_statically_async import generator
@unittest.skipUnless(ipython_installed, "IPython not installed")
class IPythonSuite(unittest.TestCase):
def test_ipython(self):
base_dir = os.path.dirname(sys.executable)
executable = os.path.join(base_dir, "ipython")
process = subprocess.Popen([executable], stdin=subprocess.PIPE,
stdout=subprocess.PIPE)
script = "import statically\n" \
"@statically.typed\n" \
"def add(a: int, b: int): return a + b\n\n" \
"'cython_function_or_method' in str(type(add))\n".encode()
stdout, _ = process.communicate(script)
lines = stdout.decode().split("\n")
process.terminate()
self.assertEqual(lines[-4], "In [3]: Out[3]: True")
if __name__ == '__main__':
unittest.main()
| true | true |
f7ff034a956d7a49e5d2f627fe862127ab5a7c41 | 14,792 | py | Python | pelix/rsa/providers/distribution/py4j.py | svidoso/ipopo | 1d4b81207e67890dfccc8f562336c7104f194c17 | [
"Apache-2.0"
] | 65 | 2015-04-21T10:41:18.000Z | 2022-01-02T16:25:40.000Z | pelix/rsa/providers/distribution/py4j.py | svidoso/ipopo | 1d4b81207e67890dfccc8f562336c7104f194c17 | [
"Apache-2.0"
] | 85 | 2015-01-20T14:23:52.000Z | 2022-02-19T17:08:46.000Z | pelix/rsa/providers/distribution/py4j.py | svidoso/ipopo | 1d4b81207e67890dfccc8f562336c7104f194c17 | [
"Apache-2.0"
] | 32 | 2015-03-13T07:43:05.000Z | 2020-04-24T07:56:53.000Z | #!/usr/bin/python
# -- Content-Encoding: UTF-8 --
"""
Py4j-based Distribution and Discovery Provider
:author: Scott Lewis
:copyright: Copyright 2020, Scott Lewis
:license: Apache License 2.0
:version: 1.0.1
..
Copyright 2020 Scott Lewis
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from concurrent.futures import ThreadPoolExecutor
from queue import Queue
from threading import Thread, RLock
import logging
from osgiservicebridge.bridge import (
JavaServiceProxy,
Py4jServiceBridgeEventListener,
Py4jServiceBridge,
PythonService,
)
from osgiservicebridge.protobuf import (
ProtobufJavaServiceProxy,
ProtobufPythonService,
)
from py4j.java_gateway import GatewayParameters, CallbackServerParameters
from py4j.java_gateway import DEFAULT_PORT, DEFAULT_PYTHON_PROXY_PORT
# needed ipopo decorators
from pelix.ipopo.decorators import (
ComponentFactory,
Provides,
Instantiate,
Property,
Validate,
ValidateComponent,
Invalidate,
PostRegistration,
)
from pelix.ipopo.constants import (
ARG_BUNDLE_CONTEXT,
ARG_PROPERTIES,
)
# Providers API
from pelix.rsa import prop_dot_suffix
from pelix.rsa.providers.distribution import (
Container,
ExportContainer,
ImportContainer,
DistributionProvider,
SERVICE_EXPORT_CONTAINER,
SERVICE_IMPORT_CONTAINER,
SERVICE_EXPORT_DISTRIBUTION_PROVIDER,
SERVICE_IMPORT_DISTRIBUTION_PROVIDER,
)
from pelix.rsa.endpointdescription import EndpointDescription
# ------------------------------------------------------------------------------
# Module version
__version_info__ = (1, 0, 1)
__version__ = ".".join(str(x) for x in __version_info__)
# Documentation strings format
__docformat__ = "restructuredtext en"
_logger = logging.getLogger(__name__)
# ------------------------------------------------------------------------------
# Note: These must match the Java-side constants recored in Java interface
# class: org.eclipse.ecf.provider.py4j.Py4jConstants
ECF_PY4J_CONTAINER_CONFIG_TYPE = "ecf.py4j"
ECF_PY4J_NAMESPACE = "ecf.namespace.py4j"
ECF_PY4J_JAVA_HOST_CONFIG_TYPE = "ecf.py4j.host"
ECF_PY4J_JAVA_CONSUMER_CONFIG_TYPE = "ecf.py4j.consumer"
ECF_PY4J_PYTHON_HOST_CONFIG_TYPE = "ecf.py4j.host.python"
ECF_PY4J_PYTHON_CONSUMER_CONFIG_TYPE = "ecf.py4j.consumer.python"
ECF_PY4J_SUPPORTED_INTENTS = [
"exactlyOnce",
"passByReference",
"ordered",
"py4j",
"py4j.async",
"osgi.async",
"osgi.private",
]
# Protobuf
ECF_PY4JPB_JAVA_HOST_CONFIG_TYPE = "ecf.py4j.protobuf.host"
ECF_PY4JPB_JAVA_CONSUMER_CONFIG_TYPE = "ecf.py4j.protobuf.consumer"
ECF_PY4JPB_PYTHON_HOST_CONFIG_TYPE = "ecf.py4j.python.protobuf.host"
ECF_PY4JPB_PYTHON_CONSUMER_CONFIG_TYPE = "ecf.py4j.python.protobuf.consumer"
ECF_PY4JPB_SUPPORTED_INTENTS = [
"exactlyOnce",
"passByReference",
"passByValue",
"ordered",
"py4j",
"py4j.protobuf",
"py4j.async",
"osgi.async",
"osgi.private",
]
ECF_PY4J_JAVA_PORT_PROP = "javaport"
ECF_PY4J_PYTHON_PORT_PROP = "pythonport"
ECF_PY4J_DEFAULT_SERVICE_TIMEOUT = "defaultservicetimeout"
# ------------------------------------------------------------------------------
@ComponentFactory(ECF_PY4J_CONTAINER_CONFIG_TYPE)
@Provides([SERVICE_EXPORT_CONTAINER, SERVICE_IMPORT_CONTAINER])
class Py4jContainer(ExportContainer, ImportContainer):
def __init__(self, max_workers=5):
ExportContainer.__init__(self)
ImportContainer.__init__(self)
self._max_workers = max_workers
self._executor = None
@ValidateComponent(ARG_BUNDLE_CONTEXT, ARG_PROPERTIES)
def _validate_component(self, bundle_context, container_props):
Container._validate_component(self, bundle_context, container_props)
self._executor = ThreadPoolExecutor(max_workers=self._max_workers)
@Invalidate
def _invalidate_component(self, _):
Container._invalidate_component(self, _)
if self._executor:
self._executor.shutdown()
self._executor = None
def get_connected_id(self):
return ExportContainer.get_connected_id(self)
def _export_service(self, svc, ed):
# pylint: disable=W0212
# modify svc class to have appropriate metadata for py4j
timeout = ed.get_osgi_basic_timeout()
if not timeout:
timeout = 30
args = [
self._get_distribution_provider()._get_bridge(),
ed.get_interfaces(),
svc,
self._executor,
timeout,
]
if (
ECF_PY4JPB_PYTHON_HOST_CONFIG_TYPE
in ed.get_remote_configs_supported()
):
clazz = ProtobufPythonService
else:
clazz = PythonService
psvc = clazz(*args)
self._get_distribution_provider()._get_bridge().export(
psvc, ed.get_properties()
)
ExportContainer._export_service(self, psvc, ed)
return True
def _unexport_service(self, ed):
# pylint: disable=W0212
dp = self._get_distribution_provider()
if dp:
bridge = dp._get_bridge()
if bridge:
bridge.unexport(ed.get_id())
ExportContainer._unexport_service(self, ed)
return True
def _prepare_proxy(self, endpoint_description):
# pylint: disable=W0212
# lookup the bridge proxy associated with the
# endpoint_description.get_id()
bridge = self._get_distribution_provider()._get_bridge()
proxy = bridge.get_import_endpoint(endpoint_description.get_id())[0]
timeout = endpoint_description.get_osgi_basic_timeout()
if not timeout:
timeout = self._container_props.get(
ECF_PY4J_DEFAULT_SERVICE_TIMEOUT, 30
)
args = [
bridge.get_jvm(),
endpoint_description.get_interfaces(),
proxy,
self._executor,
timeout,
]
clazz = JavaServiceProxy
if (
ECF_PY4JPB_JAVA_HOST_CONFIG_TYPE
in endpoint_description.get_remote_configs_supported()
):
clazz = ProtobufJavaServiceProxy
return clazz(*args)
def unimport_service(self, endpoint_description):
# pylint: disable=W0212
dp = self._get_distribution_provider()
if dp:
bridge = dp._get_bridge()
if bridge:
bridge.remove_import_endpoint(endpoint_description.get_id())
ImportContainer.unimport_service(self, endpoint_description)
@ComponentFactory("py4j-distribution-provider-factory")
@Provides(
[SERVICE_EXPORT_DISTRIBUTION_PROVIDER, SERVICE_IMPORT_DISTRIBUTION_PROVIDER]
)
@Property("_config_name", "config_name", ECF_PY4J_CONTAINER_CONFIG_TYPE)
@Property("_namespace", "namespace", ECF_PY4J_NAMESPACE)
@Property(
"_supported_configs",
"supported_configs",
[ECF_PY4J_PYTHON_HOST_CONFIG_TYPE, ECF_PY4J_PYTHON_CONSUMER_CONFIG_TYPE],
)
@Property("_supported_intents", "supported_intents",
ECF_PY4J_SUPPORTED_INTENTS)
@Property(
"_supported_pb_intents",
"supported_pb_intents",
ECF_PY4JPB_SUPPORTED_INTENTS,
)
@Property(
"_javaport",
prop_dot_suffix(ECF_PY4J_CONTAINER_CONFIG_TYPE, ECF_PY4J_JAVA_PORT_PROP),
DEFAULT_PORT,
)
@Property(
"_pythonport",
prop_dot_suffix(ECF_PY4J_CONTAINER_CONFIG_TYPE, ECF_PY4J_PYTHON_PORT_PROP),
DEFAULT_PYTHON_PROXY_PORT,
)
@Property(
"_default_service_timeout",
prop_dot_suffix(
ECF_PY4J_CONTAINER_CONFIG_TYPE, ECF_PY4J_DEFAULT_SERVICE_TIMEOUT
),
30,
)
@Instantiate("py4j-distribution-provider")
class Py4jDistributionProvider(
DistributionProvider, Py4jServiceBridgeEventListener
):
def __init__(self):
super(Py4jDistributionProvider, self).__init__()
self._bridge = None
self._container = None
self._queue = Queue()
self._thread = Thread(target=self._worker)
self._thread.daemon = True
self._done = False
self._lock = RLock()
self._py4jcontainer = self._supported_pb_intents = None
self._javaport = self._pythonport = self._default_service_timeout = None
def _get_bridge(self):
return self._bridge
# Override of DistributionProvider._get_imported_configs. Returns
# the Py4j bridge.get_id() in list
def _get_imported_configs(self, exported_configs):
imported_configs = []
if ECF_PY4JPB_JAVA_HOST_CONFIG_TYPE in exported_configs:
imported_configs.append(ECF_PY4JPB_PYTHON_HOST_CONFIG_TYPE)
if ECF_PY4J_JAVA_HOST_CONFIG_TYPE in exported_configs:
imported_configs.append(ECF_PY4J_PYTHON_HOST_CONFIG_TYPE)
return imported_configs
# Implementation of ImportDistributionProvider
def supports_import(self, exported_configs, service_intents, import_props):
# pylint: disable=W0613
if ECF_PY4JPB_JAVA_HOST_CONFIG_TYPE in exported_configs:
if self._match_intents_supported(
service_intents, self._supported_pb_intents
):
return self._container
elif ECF_PY4J_JAVA_HOST_CONFIG_TYPE in exported_configs:
if self._match_intents(service_intents):
return self._container
return None
# Implementation of ExportDistributionProvider
def supports_export(self, exported_configs, service_intents, export_props):
# pylint: disable=W0613
if self._match_intents(service_intents):
if (
ECF_PY4J_PYTHON_HOST_CONFIG_TYPE in exported_configs
or ECF_PY4JPB_PYTHON_HOST_CONFIG_TYPE in exported_configs
):
return self._container
return None
@Validate
def _validate(self, _):
# here is where we can get java and python ports and change the
# defaults for connecting
try:
self._bridge = Py4jServiceBridge(
service_listener=self,
gateway_parameters=GatewayParameters(port=self._javaport),
callback_server_parameters=CallbackServerParameters(
port=self._pythonport
),
)
self._bridge.connect()
except Exception as e:
self._bridge = None
raise e
# Once bridge is connected, instantiate container using bridge id
container_props = self._prepare_container_props(
self._supported_intents, None
)
if self._default_service_timeout:
container_props[
ECF_PY4J_DEFAULT_SERVICE_TIMEOUT
] = self._default_service_timeout
self._container = self._ipopo.instantiate(
self._config_name, self._bridge.get_id(), container_props
)
@Invalidate
def _invalidate(self, _):
if self._bridge:
with self._lock:
# Set done flag to True
self._done = True
# Trigger reading from queue in self._worker
# with empty task
self._queue.put((None, None, None))
try:
self._ipopo.invalidate(self._bridge.get_id())
except ValueError:
pass
try:
self._bridge.disconnect()
except Exception:
pass
self._bridge = None
self._container = None
# Implementation of Py4jServiceBridgeEventListener
def service_imported(
self, servicebridge, endpointid, proxy, endpoint_props
):
# put on task queue so no blocking, but fifo delivery to rsa
# _logger.info('service_imported endpointid='+endpointid)
self._queue.put((endpointid, endpoint_props, self._handle_import))
def service_modified(
self, servicebridge, endpointid, proxy, endpoint_props
):
# _logger.info('_service_modified endpointid='+endpointid+";proxy="+str(proxy)+";endpoint_props="+str(endpoint_props))
self._queue.put(
(endpointid, endpoint_props, self._handle_import_update)
)
def service_unimported(
self, servicebridge, endpointid, proxy, endpoint_props
):
# _logger.info('_service_unimported endpointid='+endpointid+";proxy="+str(proxy)+";endpoint_props="+str(endpoint_props))
# put on task queue so no blocking, but fifo delivery to rsa
self._queue.put(
(endpointid,
endpoint_props,
self._handle_import_close))
@PostRegistration
def _post_reg(self, _):
# start the thread for processing import_service import requests
self._thread.start()
# this is method called by self._thread. All it does is
# read from queue, and import/unregister imported the discovered service
def _worker(self):
while True:
with self._lock:
# If self._done flag is set, return and that's it
if self._done:
return
# otherwise block to get items from queue placed by service_imported,
# service_modified, and service_unimported
# called by Py4j handler thread
item = self._queue.get()
f = None
try:
# get the function from item[2]
f = item[2]
except Exception:
logging.error("Exception getting code in item=%s", item)
if f:
try:
# get the endpoint description properties from item[1]
# and create EndpointDescription instance
ed = EndpointDescription(properties=item[1])
except Exception:
logging.error(
"Exception creating endpoint description from props=%s",
item[1],
)
else:
# call appropriate function
try:
f(ed)
except Exception:
logging.error("Exception invoking function=%s", f)
# no matter what, we are done with this task
self._queue.task_done()
| 33.240449 | 128 | 0.652177 |
from concurrent.futures import ThreadPoolExecutor
from queue import Queue
from threading import Thread, RLock
import logging
from osgiservicebridge.bridge import (
JavaServiceProxy,
Py4jServiceBridgeEventListener,
Py4jServiceBridge,
PythonService,
)
from osgiservicebridge.protobuf import (
ProtobufJavaServiceProxy,
ProtobufPythonService,
)
from py4j.java_gateway import GatewayParameters, CallbackServerParameters
from py4j.java_gateway import DEFAULT_PORT, DEFAULT_PYTHON_PROXY_PORT
from pelix.ipopo.decorators import (
ComponentFactory,
Provides,
Instantiate,
Property,
Validate,
ValidateComponent,
Invalidate,
PostRegistration,
)
from pelix.ipopo.constants import (
ARG_BUNDLE_CONTEXT,
ARG_PROPERTIES,
)
from pelix.rsa import prop_dot_suffix
from pelix.rsa.providers.distribution import (
Container,
ExportContainer,
ImportContainer,
DistributionProvider,
SERVICE_EXPORT_CONTAINER,
SERVICE_IMPORT_CONTAINER,
SERVICE_EXPORT_DISTRIBUTION_PROVIDER,
SERVICE_IMPORT_DISTRIBUTION_PROVIDER,
)
from pelix.rsa.endpointdescription import EndpointDescription
__version_info__ = (1, 0, 1)
__version__ = ".".join(str(x) for x in __version_info__)
__docformat__ = "restructuredtext en"
_logger = logging.getLogger(__name__)
ECF_PY4J_CONTAINER_CONFIG_TYPE = "ecf.py4j"
ECF_PY4J_NAMESPACE = "ecf.namespace.py4j"
ECF_PY4J_JAVA_HOST_CONFIG_TYPE = "ecf.py4j.host"
ECF_PY4J_JAVA_CONSUMER_CONFIG_TYPE = "ecf.py4j.consumer"
ECF_PY4J_PYTHON_HOST_CONFIG_TYPE = "ecf.py4j.host.python"
ECF_PY4J_PYTHON_CONSUMER_CONFIG_TYPE = "ecf.py4j.consumer.python"
ECF_PY4J_SUPPORTED_INTENTS = [
"exactlyOnce",
"passByReference",
"ordered",
"py4j",
"py4j.async",
"osgi.async",
"osgi.private",
]
ECF_PY4JPB_JAVA_HOST_CONFIG_TYPE = "ecf.py4j.protobuf.host"
ECF_PY4JPB_JAVA_CONSUMER_CONFIG_TYPE = "ecf.py4j.protobuf.consumer"
ECF_PY4JPB_PYTHON_HOST_CONFIG_TYPE = "ecf.py4j.python.protobuf.host"
ECF_PY4JPB_PYTHON_CONSUMER_CONFIG_TYPE = "ecf.py4j.python.protobuf.consumer"
ECF_PY4JPB_SUPPORTED_INTENTS = [
"exactlyOnce",
"passByReference",
"passByValue",
"ordered",
"py4j",
"py4j.protobuf",
"py4j.async",
"osgi.async",
"osgi.private",
]
ECF_PY4J_JAVA_PORT_PROP = "javaport"
ECF_PY4J_PYTHON_PORT_PROP = "pythonport"
ECF_PY4J_DEFAULT_SERVICE_TIMEOUT = "defaultservicetimeout"
@ComponentFactory(ECF_PY4J_CONTAINER_CONFIG_TYPE)
@Provides([SERVICE_EXPORT_CONTAINER, SERVICE_IMPORT_CONTAINER])
class Py4jContainer(ExportContainer, ImportContainer):
def __init__(self, max_workers=5):
ExportContainer.__init__(self)
ImportContainer.__init__(self)
self._max_workers = max_workers
self._executor = None
@ValidateComponent(ARG_BUNDLE_CONTEXT, ARG_PROPERTIES)
def _validate_component(self, bundle_context, container_props):
Container._validate_component(self, bundle_context, container_props)
self._executor = ThreadPoolExecutor(max_workers=self._max_workers)
@Invalidate
def _invalidate_component(self, _):
Container._invalidate_component(self, _)
if self._executor:
self._executor.shutdown()
self._executor = None
def get_connected_id(self):
return ExportContainer.get_connected_id(self)
def _export_service(self, svc, ed):
timeout = ed.get_osgi_basic_timeout()
if not timeout:
timeout = 30
args = [
self._get_distribution_provider()._get_bridge(),
ed.get_interfaces(),
svc,
self._executor,
timeout,
]
if (
ECF_PY4JPB_PYTHON_HOST_CONFIG_TYPE
in ed.get_remote_configs_supported()
):
clazz = ProtobufPythonService
else:
clazz = PythonService
psvc = clazz(*args)
self._get_distribution_provider()._get_bridge().export(
psvc, ed.get_properties()
)
ExportContainer._export_service(self, psvc, ed)
return True
def _unexport_service(self, ed):
dp = self._get_distribution_provider()
if dp:
bridge = dp._get_bridge()
if bridge:
bridge.unexport(ed.get_id())
ExportContainer._unexport_service(self, ed)
return True
def _prepare_proxy(self, endpoint_description):
bridge = self._get_distribution_provider()._get_bridge()
proxy = bridge.get_import_endpoint(endpoint_description.get_id())[0]
timeout = endpoint_description.get_osgi_basic_timeout()
if not timeout:
timeout = self._container_props.get(
ECF_PY4J_DEFAULT_SERVICE_TIMEOUT, 30
)
args = [
bridge.get_jvm(),
endpoint_description.get_interfaces(),
proxy,
self._executor,
timeout,
]
clazz = JavaServiceProxy
if (
ECF_PY4JPB_JAVA_HOST_CONFIG_TYPE
in endpoint_description.get_remote_configs_supported()
):
clazz = ProtobufJavaServiceProxy
return clazz(*args)
def unimport_service(self, endpoint_description):
dp = self._get_distribution_provider()
if dp:
bridge = dp._get_bridge()
if bridge:
bridge.remove_import_endpoint(endpoint_description.get_id())
ImportContainer.unimport_service(self, endpoint_description)
@ComponentFactory("py4j-distribution-provider-factory")
@Provides(
[SERVICE_EXPORT_DISTRIBUTION_PROVIDER, SERVICE_IMPORT_DISTRIBUTION_PROVIDER]
)
@Property("_config_name", "config_name", ECF_PY4J_CONTAINER_CONFIG_TYPE)
@Property("_namespace", "namespace", ECF_PY4J_NAMESPACE)
@Property(
"_supported_configs",
"supported_configs",
[ECF_PY4J_PYTHON_HOST_CONFIG_TYPE, ECF_PY4J_PYTHON_CONSUMER_CONFIG_TYPE],
)
@Property("_supported_intents", "supported_intents",
ECF_PY4J_SUPPORTED_INTENTS)
@Property(
"_supported_pb_intents",
"supported_pb_intents",
ECF_PY4JPB_SUPPORTED_INTENTS,
)
@Property(
"_javaport",
prop_dot_suffix(ECF_PY4J_CONTAINER_CONFIG_TYPE, ECF_PY4J_JAVA_PORT_PROP),
DEFAULT_PORT,
)
@Property(
"_pythonport",
prop_dot_suffix(ECF_PY4J_CONTAINER_CONFIG_TYPE, ECF_PY4J_PYTHON_PORT_PROP),
DEFAULT_PYTHON_PROXY_PORT,
)
@Property(
"_default_service_timeout",
prop_dot_suffix(
ECF_PY4J_CONTAINER_CONFIG_TYPE, ECF_PY4J_DEFAULT_SERVICE_TIMEOUT
),
30,
)
@Instantiate("py4j-distribution-provider")
class Py4jDistributionProvider(
DistributionProvider, Py4jServiceBridgeEventListener
):
def __init__(self):
super(Py4jDistributionProvider, self).__init__()
self._bridge = None
self._container = None
self._queue = Queue()
self._thread = Thread(target=self._worker)
self._thread.daemon = True
self._done = False
self._lock = RLock()
self._py4jcontainer = self._supported_pb_intents = None
self._javaport = self._pythonport = self._default_service_timeout = None
def _get_bridge(self):
return self._bridge
def _get_imported_configs(self, exported_configs):
imported_configs = []
if ECF_PY4JPB_JAVA_HOST_CONFIG_TYPE in exported_configs:
imported_configs.append(ECF_PY4JPB_PYTHON_HOST_CONFIG_TYPE)
if ECF_PY4J_JAVA_HOST_CONFIG_TYPE in exported_configs:
imported_configs.append(ECF_PY4J_PYTHON_HOST_CONFIG_TYPE)
return imported_configs
def supports_import(self, exported_configs, service_intents, import_props):
if ECF_PY4JPB_JAVA_HOST_CONFIG_TYPE in exported_configs:
if self._match_intents_supported(
service_intents, self._supported_pb_intents
):
return self._container
elif ECF_PY4J_JAVA_HOST_CONFIG_TYPE in exported_configs:
if self._match_intents(service_intents):
return self._container
return None
def supports_export(self, exported_configs, service_intents, export_props):
if self._match_intents(service_intents):
if (
ECF_PY4J_PYTHON_HOST_CONFIG_TYPE in exported_configs
or ECF_PY4JPB_PYTHON_HOST_CONFIG_TYPE in exported_configs
):
return self._container
return None
@Validate
def _validate(self, _):
try:
self._bridge = Py4jServiceBridge(
service_listener=self,
gateway_parameters=GatewayParameters(port=self._javaport),
callback_server_parameters=CallbackServerParameters(
port=self._pythonport
),
)
self._bridge.connect()
except Exception as e:
self._bridge = None
raise e
container_props = self._prepare_container_props(
self._supported_intents, None
)
if self._default_service_timeout:
container_props[
ECF_PY4J_DEFAULT_SERVICE_TIMEOUT
] = self._default_service_timeout
self._container = self._ipopo.instantiate(
self._config_name, self._bridge.get_id(), container_props
)
@Invalidate
def _invalidate(self, _):
if self._bridge:
with self._lock:
self._done = True
self._queue.put((None, None, None))
try:
self._ipopo.invalidate(self._bridge.get_id())
except ValueError:
pass
try:
self._bridge.disconnect()
except Exception:
pass
self._bridge = None
self._container = None
def service_imported(
self, servicebridge, endpointid, proxy, endpoint_props
):
self._queue.put((endpointid, endpoint_props, self._handle_import))
def service_modified(
self, servicebridge, endpointid, proxy, endpoint_props
):
self._queue.put(
(endpointid, endpoint_props, self._handle_import_update)
)
def service_unimported(
self, servicebridge, endpointid, proxy, endpoint_props
):
self._queue.put(
(endpointid,
endpoint_props,
self._handle_import_close))
@PostRegistration
def _post_reg(self, _):
self._thread.start()
def _worker(self):
while True:
with self._lock:
if self._done:
return
# otherwise block to get items from queue placed by service_imported,
# service_modified, and service_unimported
# called by Py4j handler thread
item = self._queue.get()
f = None
try:
# get the function from item[2]
f = item[2]
except Exception:
logging.error("Exception getting code in item=%s", item)
if f:
try:
# get the endpoint description properties from item[1]
# and create EndpointDescription instance
ed = EndpointDescription(properties=item[1])
except Exception:
logging.error(
"Exception creating endpoint description from props=%s",
item[1],
)
else:
# call appropriate function
try:
f(ed)
except Exception:
logging.error("Exception invoking function=%s", f)
# no matter what, we are done with this task
self._queue.task_done()
| true | true |
f7ff03ffafef99ce507bb1fcd20f2653bbb18c58 | 386 | py | Python | course/migrations/0006_alter_coursesmodel_slug.py | dewale005/whitefieldcoursesite | e96277de34d0e7d464482cda787f1ee41fbe64fe | [
"MIT"
] | null | null | null | course/migrations/0006_alter_coursesmodel_slug.py | dewale005/whitefieldcoursesite | e96277de34d0e7d464482cda787f1ee41fbe64fe | [
"MIT"
] | null | null | null | course/migrations/0006_alter_coursesmodel_slug.py | dewale005/whitefieldcoursesite | e96277de34d0e7d464482cda787f1ee41fbe64fe | [
"MIT"
] | null | null | null | # Generated by Django 3.2.6 on 2021-08-25 18:21
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('course', '0005_coursesmodel_slug'),
]
operations = [
migrations.AlterField(
model_name='coursesmodel',
name='slug',
field=models.SlugField(unique=True),
),
]
| 20.315789 | 48 | 0.598446 |
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('course', '0005_coursesmodel_slug'),
]
operations = [
migrations.AlterField(
model_name='coursesmodel',
name='slug',
field=models.SlugField(unique=True),
),
]
| true | true |
f7ff04a0d14c05d62eebf33a07be72d3158395ce | 2,012 | py | Python | tests/testKuhn.py | 1696012928/RoomAI | 37be09590489ab5f7c85083173e83ea31c40b76c | [
"MIT"
] | 1 | 2018-03-02T00:49:31.000Z | 2018-03-02T00:49:31.000Z | tests/testKuhn.py | 1696012928/RoomAI | 37be09590489ab5f7c85083173e83ea31c40b76c | [
"MIT"
] | null | null | null | tests/testKuhn.py | 1696012928/RoomAI | 37be09590489ab5f7c85083173e83ea31c40b76c | [
"MIT"
] | null | null | null | import unittest
import roomai.kuhn
import roomai.common
class KuhnTester(unittest.TestCase):
"""
"""
def testKuhn(self):
"""
"""
for i in range(1000):
players = [roomai.kuhn.Example_KuhnPokerAlwaysBetPlayer() for i in range(2)] + [roomai.kuhn.KuhnPokerChancePlayer()]
env = roomai.kuhn.KuhnPokerEnv()
infos,public_state,_,_ = env.init()
for i in range(len(players)):
players[i].receive_info(infos[i])
while public_state.is_terminal == False:
turn = infos[-1].public_state.turn
action = players[turn].take_action()
infos,public_state,_,_ = env.forward(action)
for i in range(len(players)):
players[i].receive_info(infos[i])
print (env.public_state.scores)
def testKuhnEnvBackward(self):
env = roomai.kuhn.KuhnPokerEnv()
env.init({"backward_enable":True})
env.forward(roomai.kuhn.KuhnPokerActionChance.lookup("0,2"))
action = roomai.kuhn.KuhnPokerAction("bet")
infos, public_state, person_states, private_state = env.forward(action)
print (public_state.action_history,person_states[public_state.turn].id)
assert(len(public_state.action_history) == 2)
infos, public_state, person_states, private_state = env.forward(roomai.kuhn.KuhnPokerAction("bet"))
print (public_state.action_history,person_states[public_state.turn].id)
assert(len(public_state.action_history) == 3)
infos, public_state, person_states, private_state = env.backward()
print (public_state.action_history,person_states[public_state.turn].id)
assert(len(public_state.action_history) == 2)
def testCompete(self):
players = [roomai.kuhn.Example_KuhnPokerAlwaysBetPlayer() for i in range(2)]
env = roomai.kuhn.KuhnPokerEnv()
env.compete(env, players + [roomai.common.RandomPlayerChance()])
| 36.581818 | 128 | 0.644135 | import unittest
import roomai.kuhn
import roomai.common
class KuhnTester(unittest.TestCase):
def testKuhn(self):
for i in range(1000):
players = [roomai.kuhn.Example_KuhnPokerAlwaysBetPlayer() for i in range(2)] + [roomai.kuhn.KuhnPokerChancePlayer()]
env = roomai.kuhn.KuhnPokerEnv()
infos,public_state,_,_ = env.init()
for i in range(len(players)):
players[i].receive_info(infos[i])
while public_state.is_terminal == False:
turn = infos[-1].public_state.turn
action = players[turn].take_action()
infos,public_state,_,_ = env.forward(action)
for i in range(len(players)):
players[i].receive_info(infos[i])
print (env.public_state.scores)
def testKuhnEnvBackward(self):
env = roomai.kuhn.KuhnPokerEnv()
env.init({"backward_enable":True})
env.forward(roomai.kuhn.KuhnPokerActionChance.lookup("0,2"))
action = roomai.kuhn.KuhnPokerAction("bet")
infos, public_state, person_states, private_state = env.forward(action)
print (public_state.action_history,person_states[public_state.turn].id)
assert(len(public_state.action_history) == 2)
infos, public_state, person_states, private_state = env.forward(roomai.kuhn.KuhnPokerAction("bet"))
print (public_state.action_history,person_states[public_state.turn].id)
assert(len(public_state.action_history) == 3)
infos, public_state, person_states, private_state = env.backward()
print (public_state.action_history,person_states[public_state.turn].id)
assert(len(public_state.action_history) == 2)
def testCompete(self):
players = [roomai.kuhn.Example_KuhnPokerAlwaysBetPlayer() for i in range(2)]
env = roomai.kuhn.KuhnPokerEnv()
env.compete(env, players + [roomai.common.RandomPlayerChance()])
| true | true |
f7ff0518bda03cb65ae06dad43a0492bb2af5645 | 2,633 | py | Python | sdk/ImageSearch/image_search_client/_image_search_client.py | WMRamadan/bing-search-sdk-for-python | 276d9cd6963c939081b3dec91bdd9aded42b3b35 | [
"MIT"
] | 12 | 2021-03-11T20:24:12.000Z | 2022-02-10T22:55:03.000Z | sdk/ImageSearch/image_search_client/_image_search_client.py | WMRamadan/bing-search-sdk-for-python | 276d9cd6963c939081b3dec91bdd9aded42b3b35 | [
"MIT"
] | null | null | null | sdk/ImageSearch/image_search_client/_image_search_client.py | WMRamadan/bing-search-sdk-for-python | 276d9cd6963c939081b3dec91bdd9aded42b3b35 | [
"MIT"
] | 10 | 2021-03-09T17:02:48.000Z | 2022-02-12T18:40:01.000Z | # coding=utf-8
# --------------------------------------------------------------------------
# Code generated by Microsoft (R) AutoRest Code Generator (autorest: 3.0.6320, generator: {generator})
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import TYPE_CHECKING
from azure.core import PipelineClient
from msrest import Deserializer, Serializer
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from typing import Any, Optional
from azure.core.credentials import TokenCredential
from ._configuration import ImageSearchClientConfiguration
from .operations import ImagesOperations
from . import models
class ImageSearchClient(object):
"""The Image Search API lets you send a search query to Bing and get back a list of relevant images. This section provides technical details about the query parameters and headers that you use to request images and the JSON response objects that contain them. For examples that show how to make requests, see `Searching the Web for Images <https://docs.microsoft.com/en-us/bing/bing-image-search/overview>`_.
:ivar images: ImagesOperations operations
:vartype images: image_search_client.operations.ImagesOperations
:param credential: Credential needed for the client to connect to Azure.
:type credential: ~azure.core.credentials.TokenCredential
:param str base_url: Service URL
"""
def __init__(
self,
credential, # type: "TokenCredential"
base_url=None, # type: Optional[str]
**kwargs # type: Any
):
# type: (...) -> None
if not base_url:
base_url = 'https://api.bing.microsoft.com/v7.0'
self._config = ImageSearchClientConfiguration(credential, **kwargs)
self._client = PipelineClient(base_url=base_url, config=self._config, **kwargs)
client_models = {k: v for k, v in models.__dict__.items() if isinstance(v, type)}
self._serialize = Serializer(client_models)
self._serialize.client_side_validation = False
self._deserialize = Deserializer(client_models)
self.images = ImagesOperations(
self._client, self._config, self._serialize, self._deserialize)
def close(self):
# type: () -> None
self._client.close()
def __enter__(self):
# type: () -> ImageSearchClient
self._client.__enter__()
return self
def __exit__(self, *exc_details):
# type: (Any) -> None
self._client.__exit__(*exc_details)
| 40.507692 | 412 | 0.6673 |
from typing import TYPE_CHECKING
from azure.core import PipelineClient
from msrest import Deserializer, Serializer
if TYPE_CHECKING:
from typing import Any, Optional
from azure.core.credentials import TokenCredential
from ._configuration import ImageSearchClientConfiguration
from .operations import ImagesOperations
from . import models
class ImageSearchClient(object):
def __init__(
self,
credential,
base_url=None,
**kwargs
):
if not base_url:
base_url = 'https://api.bing.microsoft.com/v7.0'
self._config = ImageSearchClientConfiguration(credential, **kwargs)
self._client = PipelineClient(base_url=base_url, config=self._config, **kwargs)
client_models = {k: v for k, v in models.__dict__.items() if isinstance(v, type)}
self._serialize = Serializer(client_models)
self._serialize.client_side_validation = False
self._deserialize = Deserializer(client_models)
self.images = ImagesOperations(
self._client, self._config, self._serialize, self._deserialize)
def close(self):
self._client.close()
def __enter__(self):
self._client.__enter__()
return self
def __exit__(self, *exc_details):
self._client.__exit__(*exc_details)
| true | true |
f7ff0573d01e715f50b4eb5c23fc49a13a25d067 | 979 | py | Python | src/devint/counter.py | mathieucaroff/metravision | f0bbd4ed1d4b7c8d7a2de4c7a77c5dbe3714bf90 | [
"BSD-3-Clause"
] | 2 | 2019-01-21T09:45:59.000Z | 2019-10-22T12:00:12.000Z | src/devint/counter.py | mathieucaroff/metravision | f0bbd4ed1d4b7c8d7a2de4c7a77c5dbe3714bf90 | [
"BSD-3-Clause"
] | null | null | null | src/devint/counter.py | mathieucaroff/metravision | f0bbd4ed1d4b7c8d7a2de4c7a77c5dbe3714bf90 | [
"BSD-3-Clause"
] | null | null | null | """
Code ajoutant les compteurs à la collection d'images.
"""
import cv2
import numpy as np
def genFilledRegion(height=520, width=720, channelCount=None, dtype=np.uint8, fill_value=0):
shape = [height, width]
if channelCount is not None:
shape.append(channelCount)
return np.full(shape=shape, dtype=dtype, fill_value=fill_value)
def addCounters(im, segmenter):
counting = genFilledRegion(height=200, width=300, fill_value=255)
segmentIndex = segmenter.segmentIndex
pairs = [("Segment", segmentIndex)]
cs = segmenter.currentSegment
if cs is not None:
pairs.extend(sorted(cs.items()))
for i, (name, count) in enumerate(pairs):
text = f"{name}: {count}"
cv2.putText(
img=counting,
text=text,
org=(12, 45 + 40 * i),
fontFace=cv2.FONT_HERSHEY_SIMPLEX,
fontScale=1,
color=0,
thickness=2
)
im["counting"] = counting | 27.194444 | 92 | 0.623085 |
import cv2
import numpy as np
def genFilledRegion(height=520, width=720, channelCount=None, dtype=np.uint8, fill_value=0):
shape = [height, width]
if channelCount is not None:
shape.append(channelCount)
return np.full(shape=shape, dtype=dtype, fill_value=fill_value)
def addCounters(im, segmenter):
counting = genFilledRegion(height=200, width=300, fill_value=255)
segmentIndex = segmenter.segmentIndex
pairs = [("Segment", segmentIndex)]
cs = segmenter.currentSegment
if cs is not None:
pairs.extend(sorted(cs.items()))
for i, (name, count) in enumerate(pairs):
text = f"{name}: {count}"
cv2.putText(
img=counting,
text=text,
org=(12, 45 + 40 * i),
fontFace=cv2.FONT_HERSHEY_SIMPLEX,
fontScale=1,
color=0,
thickness=2
)
im["counting"] = counting | true | true |
f7ff05fa1947430fe89f554f001570431d4cf2d3 | 141 | py | Python | core_lib/observer/observer_listener.py | shubham-surya/core-lib | 543db80706746a937e5ed16bd50f2de8d58b32e4 | [
"MIT"
] | null | null | null | core_lib/observer/observer_listener.py | shubham-surya/core-lib | 543db80706746a937e5ed16bd50f2de8d58b32e4 | [
"MIT"
] | 9 | 2021-03-11T02:29:17.000Z | 2022-03-22T19:01:18.000Z | core_lib/observer/observer_listener.py | shubham-surya/core-lib | 543db80706746a937e5ed16bd50f2de8d58b32e4 | [
"MIT"
] | 2 | 2022-01-27T11:19:00.000Z | 2022-02-11T11:33:09.000Z | from abc import ABC, abstractmethod
class ObserverListener(ABC):
@abstractmethod
def update(self, key: str, value):
pass
| 14.1 | 38 | 0.680851 | from abc import ABC, abstractmethod
class ObserverListener(ABC):
@abstractmethod
def update(self, key: str, value):
pass
| true | true |
f7ff0628072d6e40053800e8fdcd09acc29b048c | 11,023 | py | Python | frappe/utils/user.py | AKedar21/frappe | 4c9ce1701caea07e595f81414af3a9f219cccb65 | [
"MIT"
] | null | null | null | frappe/utils/user.py | AKedar21/frappe | 4c9ce1701caea07e595f81414af3a9f219cccb65 | [
"MIT"
] | null | null | null | frappe/utils/user.py | AKedar21/frappe | 4c9ce1701caea07e595f81414af3a9f219cccb65 | [
"MIT"
] | null | null | null | # Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# MIT License. See license.txt
from __future__ import unicode_literals
import frappe, json
from frappe import _dict
import frappe.share
from frappe.utils import cint
from frappe.boot import get_allowed_reports
from frappe.permissions import get_roles, get_valid_perms
from frappe.core.doctype.domain_settings.domain_settings import get_active_modules
class UserPermissions:
"""
A user permission object can be accessed as `frappe.get_user()`
"""
def __init__(self, name=''):
self.defaults = None
self.name = name or frappe.session.get('user')
self.roles = []
self.all_read = []
self.can_create = []
self.can_read = []
self.can_write = []
self.can_cancel = []
self.can_delete = []
self.can_search = []
self.can_get_report = []
self.can_import = []
self.can_export = []
self.can_print = []
self.can_email = []
self.can_set_user_permissions = []
self.allow_modules = []
self.in_create = []
self.setup_user()
def setup_user(self):
def get_user_doc():
user = None
try:
user = frappe.get_doc("User", self.name).as_dict()
except frappe.DoesNotExistError:
pass
except Exception as e:
# install boo-boo
if not frappe.db.is_table_missing(e): raise
return user
if not frappe.flags.in_install_db and not frappe.flags.in_test:
user_doc = frappe.cache().hget("user_doc", self.name, get_user_doc)
if user_doc:
self.doc = frappe.get_doc(user_doc)
def get_roles(self):
"""get list of roles"""
if not self.roles:
self.roles = get_roles(self.name)
return self.roles
def build_doctype_map(self):
"""build map of special doctype properties"""
active_domains = frappe.get_active_domains()
self.doctype_map = {}
for r in frappe.db.sql("""select name, in_create, issingle, istable,
read_only, restrict_to_domain, module from tabDocType""", as_dict=1):
if (not r.restrict_to_domain) or (r.restrict_to_domain in active_domains):
self.doctype_map[r['name']] = r
def build_perm_map(self):
"""build map of permissions at level 0"""
self.perm_map = {}
for r in get_valid_perms():
dt = r['parent']
if not dt in self.perm_map:
self.perm_map[dt] = {}
for k in frappe.permissions.rights:
if not self.perm_map[dt].get(k):
self.perm_map[dt][k] = r.get(k)
def build_permissions(self):
"""build lists of what the user can read / write / create
quirks:
read_only => Not in Search
in_create => Not in create
"""
self.build_doctype_map()
self.build_perm_map()
user_shared = frappe.share.get_shared_doctypes()
no_list_view_link = []
active_modules = get_active_modules() or []
for dt in self.doctype_map:
dtp = self.doctype_map[dt]
p = self.perm_map.get(dt, {})
if not p.get("read") and (dt in user_shared):
p["read"] = 1
if not dtp.get('istable'):
if p.get('create') and not dtp.get('issingle'):
if dtp.get('in_create'):
self.in_create.append(dt)
else:
self.can_create.append(dt)
elif p.get('write'):
self.can_write.append(dt)
elif p.get('read'):
if dtp.get('read_only'):
# read_only = "User Cannot Search"
self.all_read.append(dt)
no_list_view_link.append(dt)
else:
self.can_read.append(dt)
if p.get('cancel'):
self.can_cancel.append(dt)
if p.get('delete'):
self.can_delete.append(dt)
if (p.get('read') or p.get('write') or p.get('create')):
if p.get('report'):
self.can_get_report.append(dt)
for key in ("import", "export", "print", "email", "set_user_permissions"):
if p.get(key):
getattr(self, "can_" + key).append(dt)
if not dtp.get('istable'):
if not dtp.get('issingle') and not dtp.get('read_only'):
self.can_search.append(dt)
if dtp.get('module') not in self.allow_modules:
if active_modules and dtp.get('module') not in active_modules:
pass
else:
self.allow_modules.append(dtp.get('module'))
self.can_write += self.can_create
self.can_write += self.in_create
self.can_read += self.can_write
self.shared = frappe.db.sql_list("""select distinct share_doctype from `tabDocShare`
where `user`=%s and `read`=1""", self.name)
self.can_read = list(set(self.can_read + self.shared))
self.all_read += self.can_read
for dt in no_list_view_link:
if dt in self.can_read:
self.can_read.remove(dt)
if "System Manager" in self.get_roles():
self.can_import = filter(lambda d: d in self.can_create,
frappe.db.sql_list("""select name from `tabDocType` where allow_import = 1"""))
def get_defaults(self):
import frappe.defaults
self.defaults = frappe.defaults.get_defaults(self.name)
return self.defaults
# update recent documents
def update_recent(self, dt, dn):
rdl = frappe.cache().hget("user_recent", self.name) or []
new_rd = [dt, dn]
# clear if exists
for i in range(len(rdl)):
rd = rdl[i]
if rd==new_rd:
del rdl[i]
break
if len(rdl) > 19:
rdl = rdl[:19]
rdl = [new_rd] + rdl
frappe.cache().hset("user_recent", self.name, rdl)
def _get(self, key):
if not self.can_read:
self.build_permissions()
return getattr(self, key)
def get_can_read(self):
"""return list of doctypes that the user can read"""
if not self.can_read:
self.build_permissions()
return self.can_read
def load_user(self):
d = frappe.db.sql("""select email, first_name, last_name, creation,
email_signature, user_type, language, background_image, background_style,
mute_sounds, send_me_a_copy from tabUser where name = %s""", (self.name,), as_dict=1)[0]
if not self.can_read:
self.build_permissions()
d.name = self.name
d.recent = json.dumps(frappe.cache().hget("user_recent", self.name) or [])
d.roles = self.get_roles()
d.defaults = self.get_defaults()
for key in ("can_create", "can_write", "can_read", "can_cancel", "can_delete",
"can_get_report", "allow_modules", "all_read", "can_search",
"in_create", "can_export", "can_import", "can_print", "can_email",
"can_set_user_permissions"):
d[key] = list(set(getattr(self, key)))
d.all_reports = self.get_all_reports()
return d
def get_all_reports(self):
return get_allowed_reports()
def get_user_fullname(user):
fullname = frappe.db.sql("SELECT CONCAT_WS(' ', first_name, last_name) FROM `tabUser` WHERE name=%s", (user,))
return fullname and fullname[0][0] or ''
def get_fullname_and_avatar(user):
first_name, last_name, avatar, name = frappe.db.get_value("User",
user, ["first_name", "last_name", "user_image", "name"])
return _dict({
"fullname": " ".join(filter(None, [first_name, last_name])),
"avatar": avatar,
"name": name
})
def get_system_managers(only_name=False):
"""returns all system manager's user details"""
import email.utils
from frappe.core.doctype.user.user import STANDARD_USERS
system_managers = frappe.db.sql("""SELECT DISTINCT `name`, `creation`,
CONCAT_WS(' ',
CASE WHEN `first_name`= '' THEN NULL ELSE `first_name` END,
CASE WHEN `last_name`= '' THEN NULL ELSE `last_name` END
) AS fullname
FROM `tabUser` AS p
WHERE `docstatus` < 2
AND `enabled` = 1
AND `name` NOT IN ({})
AND exists
(SELECT *
FROM `tabHas Role` AS ur
WHERE ur.parent = p.name
AND ur.role='System Manager')
ORDER BY `creation` DESC""".format(", ".join(["%s"]*len(STANDARD_USERS))),
STANDARD_USERS, as_dict=True)
if only_name:
return [p.name for p in system_managers]
else:
return [email.utils.formataddr((p.fullname, p.name)) for p in system_managers]
def add_role(user, role):
frappe.get_doc("User", user).add_roles(role)
def add_system_manager(email, first_name=None, last_name=None, send_welcome_email=False, password=None):
# add user
user = frappe.new_doc("User")
user.update({
"name": email,
"email": email,
"enabled": 1,
"first_name": first_name or email,
"last_name": last_name,
"user_type": "System User",
"send_welcome_email": 1 if send_welcome_email else 0
})
if password:
user.update({
"new_password": password
})
user.insert()
# add roles
roles = frappe.get_all('Role',
fields=['name'],
filters={
'name': ['not in', ('Administrator', 'Guest', 'All')]
}
)
roles = [role.name for role in roles]
user.add_roles(*roles)
def get_enabled_system_users():
# add more fields if required
return frappe.get_all('User',
fields=['email', 'language', 'name'],
filters={
'user_type': 'System User',
'enabled': 1,
'name': ['not in', ('Administrator', 'Guest')]
}
)
def is_website_user():
return frappe.db.get_value('User', frappe.session.user, 'user_type') == "Website User"
def is_system_user(username):
return frappe.db.get_value("User", {"name": username, "enabled": 1, "user_type": "System User"})
def get_users():
from frappe.core.doctype.user.user import get_system_users
users = []
system_managers = frappe.utils.user.get_system_managers(only_name=True)
for user in get_system_users():
users.append({
"full_name": frappe.utils.user.get_user_fullname(user),
"email": user,
"is_system_manager": 1 if (user in system_managers) else 0
})
return users
def set_last_active_to_now(user):
from frappe.utils import now_datetime
frappe.db.set_value("User", user, "last_active", now_datetime())
def disable_users(limits=None):
if not limits:
return
if limits.get('users'):
system_manager = get_system_managers(only_name=True)[-1]
#exclude system manager from active user list
active_users = frappe.db.sql_list("""select name from tabUser
where name not in ('Administrator', 'Guest', %s) and user_type = 'System User' and enabled=1
order by creation desc""", system_manager)
user_limit = cint(limits.get('users')) - 1
if len(active_users) > user_limit:
# if allowed user limit 1 then deactivate all additional users
# else extract additional user from active user list and deactivate them
if cint(limits.get('users')) != 1:
active_users = active_users[:-1 * user_limit]
for user in active_users:
frappe.db.set_value("User", user, 'enabled', 0)
from frappe.core.doctype.user.user import get_total_users
if get_total_users() > cint(limits.get('users')):
reset_simultaneous_sessions(cint(limits.get('users')))
frappe.db.commit()
def reset_simultaneous_sessions(user_limit):
for user in frappe.db.sql("""select name, simultaneous_sessions from tabUser
where name not in ('Administrator', 'Guest') and user_type = 'System User' and enabled=1
order by creation desc""", as_dict=1):
if user.simultaneous_sessions < user_limit:
user_limit = user_limit - user.simultaneous_sessions
else:
frappe.db.set_value("User", user.name, "simultaneous_sessions", 1)
user_limit = user_limit - 1
def get_link_to_reset_password(user):
link = ''
if not cint(frappe.db.get_single_value('System Settings', 'setup_complete')):
user = frappe.get_doc("User", user)
link = user.reset_password(send_email=False)
frappe.db.commit()
return {
'link': link
}
| 28.856021 | 111 | 0.692008 |
from __future__ import unicode_literals
import frappe, json
from frappe import _dict
import frappe.share
from frappe.utils import cint
from frappe.boot import get_allowed_reports
from frappe.permissions import get_roles, get_valid_perms
from frappe.core.doctype.domain_settings.domain_settings import get_active_modules
class UserPermissions:
def __init__(self, name=''):
self.defaults = None
self.name = name or frappe.session.get('user')
self.roles = []
self.all_read = []
self.can_create = []
self.can_read = []
self.can_write = []
self.can_cancel = []
self.can_delete = []
self.can_search = []
self.can_get_report = []
self.can_import = []
self.can_export = []
self.can_print = []
self.can_email = []
self.can_set_user_permissions = []
self.allow_modules = []
self.in_create = []
self.setup_user()
def setup_user(self):
def get_user_doc():
user = None
try:
user = frappe.get_doc("User", self.name).as_dict()
except frappe.DoesNotExistError:
pass
except Exception as e:
if not frappe.db.is_table_missing(e): raise
return user
if not frappe.flags.in_install_db and not frappe.flags.in_test:
user_doc = frappe.cache().hget("user_doc", self.name, get_user_doc)
if user_doc:
self.doc = frappe.get_doc(user_doc)
def get_roles(self):
if not self.roles:
self.roles = get_roles(self.name)
return self.roles
def build_doctype_map(self):
active_domains = frappe.get_active_domains()
self.doctype_map = {}
for r in frappe.db.sql("""select name, in_create, issingle, istable,
read_only, restrict_to_domain, module from tabDocType""", as_dict=1):
if (not r.restrict_to_domain) or (r.restrict_to_domain in active_domains):
self.doctype_map[r['name']] = r
def build_perm_map(self):
self.perm_map = {}
for r in get_valid_perms():
dt = r['parent']
if not dt in self.perm_map:
self.perm_map[dt] = {}
for k in frappe.permissions.rights:
if not self.perm_map[dt].get(k):
self.perm_map[dt][k] = r.get(k)
def build_permissions(self):
self.build_doctype_map()
self.build_perm_map()
user_shared = frappe.share.get_shared_doctypes()
no_list_view_link = []
active_modules = get_active_modules() or []
for dt in self.doctype_map:
dtp = self.doctype_map[dt]
p = self.perm_map.get(dt, {})
if not p.get("read") and (dt in user_shared):
p["read"] = 1
if not dtp.get('istable'):
if p.get('create') and not dtp.get('issingle'):
if dtp.get('in_create'):
self.in_create.append(dt)
else:
self.can_create.append(dt)
elif p.get('write'):
self.can_write.append(dt)
elif p.get('read'):
if dtp.get('read_only'):
self.all_read.append(dt)
no_list_view_link.append(dt)
else:
self.can_read.append(dt)
if p.get('cancel'):
self.can_cancel.append(dt)
if p.get('delete'):
self.can_delete.append(dt)
if (p.get('read') or p.get('write') or p.get('create')):
if p.get('report'):
self.can_get_report.append(dt)
for key in ("import", "export", "print", "email", "set_user_permissions"):
if p.get(key):
getattr(self, "can_" + key).append(dt)
if not dtp.get('istable'):
if not dtp.get('issingle') and not dtp.get('read_only'):
self.can_search.append(dt)
if dtp.get('module') not in self.allow_modules:
if active_modules and dtp.get('module') not in active_modules:
pass
else:
self.allow_modules.append(dtp.get('module'))
self.can_write += self.can_create
self.can_write += self.in_create
self.can_read += self.can_write
self.shared = frappe.db.sql_list("""select distinct share_doctype from `tabDocShare`
where `user`=%s and `read`=1""", self.name)
self.can_read = list(set(self.can_read + self.shared))
self.all_read += self.can_read
for dt in no_list_view_link:
if dt in self.can_read:
self.can_read.remove(dt)
if "System Manager" in self.get_roles():
self.can_import = filter(lambda d: d in self.can_create,
frappe.db.sql_list("""select name from `tabDocType` where allow_import = 1"""))
def get_defaults(self):
import frappe.defaults
self.defaults = frappe.defaults.get_defaults(self.name)
return self.defaults
def update_recent(self, dt, dn):
rdl = frappe.cache().hget("user_recent", self.name) or []
new_rd = [dt, dn]
for i in range(len(rdl)):
rd = rdl[i]
if rd==new_rd:
del rdl[i]
break
if len(rdl) > 19:
rdl = rdl[:19]
rdl = [new_rd] + rdl
frappe.cache().hset("user_recent", self.name, rdl)
def _get(self, key):
if not self.can_read:
self.build_permissions()
return getattr(self, key)
def get_can_read(self):
if not self.can_read:
self.build_permissions()
return self.can_read
def load_user(self):
d = frappe.db.sql("""select email, first_name, last_name, creation,
email_signature, user_type, language, background_image, background_style,
mute_sounds, send_me_a_copy from tabUser where name = %s""", (self.name,), as_dict=1)[0]
if not self.can_read:
self.build_permissions()
d.name = self.name
d.recent = json.dumps(frappe.cache().hget("user_recent", self.name) or [])
d.roles = self.get_roles()
d.defaults = self.get_defaults()
for key in ("can_create", "can_write", "can_read", "can_cancel", "can_delete",
"can_get_report", "allow_modules", "all_read", "can_search",
"in_create", "can_export", "can_import", "can_print", "can_email",
"can_set_user_permissions"):
d[key] = list(set(getattr(self, key)))
d.all_reports = self.get_all_reports()
return d
def get_all_reports(self):
return get_allowed_reports()
def get_user_fullname(user):
fullname = frappe.db.sql("SELECT CONCAT_WS(' ', first_name, last_name) FROM `tabUser` WHERE name=%s", (user,))
return fullname and fullname[0][0] or ''
def get_fullname_and_avatar(user):
first_name, last_name, avatar, name = frappe.db.get_value("User",
user, ["first_name", "last_name", "user_image", "name"])
return _dict({
"fullname": " ".join(filter(None, [first_name, last_name])),
"avatar": avatar,
"name": name
})
def get_system_managers(only_name=False):
import email.utils
from frappe.core.doctype.user.user import STANDARD_USERS
system_managers = frappe.db.sql("""SELECT DISTINCT `name`, `creation`,
CONCAT_WS(' ',
CASE WHEN `first_name`= '' THEN NULL ELSE `first_name` END,
CASE WHEN `last_name`= '' THEN NULL ELSE `last_name` END
) AS fullname
FROM `tabUser` AS p
WHERE `docstatus` < 2
AND `enabled` = 1
AND `name` NOT IN ({})
AND exists
(SELECT *
FROM `tabHas Role` AS ur
WHERE ur.parent = p.name
AND ur.role='System Manager')
ORDER BY `creation` DESC""".format(", ".join(["%s"]*len(STANDARD_USERS))),
STANDARD_USERS, as_dict=True)
if only_name:
return [p.name for p in system_managers]
else:
return [email.utils.formataddr((p.fullname, p.name)) for p in system_managers]
def add_role(user, role):
frappe.get_doc("User", user).add_roles(role)
def add_system_manager(email, first_name=None, last_name=None, send_welcome_email=False, password=None):
user = frappe.new_doc("User")
user.update({
"name": email,
"email": email,
"enabled": 1,
"first_name": first_name or email,
"last_name": last_name,
"user_type": "System User",
"send_welcome_email": 1 if send_welcome_email else 0
})
if password:
user.update({
"new_password": password
})
user.insert()
roles = frappe.get_all('Role',
fields=['name'],
filters={
'name': ['not in', ('Administrator', 'Guest', 'All')]
}
)
roles = [role.name for role in roles]
user.add_roles(*roles)
def get_enabled_system_users():
return frappe.get_all('User',
fields=['email', 'language', 'name'],
filters={
'user_type': 'System User',
'enabled': 1,
'name': ['not in', ('Administrator', 'Guest')]
}
)
def is_website_user():
return frappe.db.get_value('User', frappe.session.user, 'user_type') == "Website User"
def is_system_user(username):
return frappe.db.get_value("User", {"name": username, "enabled": 1, "user_type": "System User"})
def get_users():
from frappe.core.doctype.user.user import get_system_users
users = []
system_managers = frappe.utils.user.get_system_managers(only_name=True)
for user in get_system_users():
users.append({
"full_name": frappe.utils.user.get_user_fullname(user),
"email": user,
"is_system_manager": 1 if (user in system_managers) else 0
})
return users
def set_last_active_to_now(user):
from frappe.utils import now_datetime
frappe.db.set_value("User", user, "last_active", now_datetime())
def disable_users(limits=None):
if not limits:
return
if limits.get('users'):
system_manager = get_system_managers(only_name=True)[-1]
active_users = frappe.db.sql_list("""select name from tabUser
where name not in ('Administrator', 'Guest', %s) and user_type = 'System User' and enabled=1
order by creation desc""", system_manager)
user_limit = cint(limits.get('users')) - 1
if len(active_users) > user_limit:
if cint(limits.get('users')) != 1:
active_users = active_users[:-1 * user_limit]
for user in active_users:
frappe.db.set_value("User", user, 'enabled', 0)
from frappe.core.doctype.user.user import get_total_users
if get_total_users() > cint(limits.get('users')):
reset_simultaneous_sessions(cint(limits.get('users')))
frappe.db.commit()
def reset_simultaneous_sessions(user_limit):
for user in frappe.db.sql("""select name, simultaneous_sessions from tabUser
where name not in ('Administrator', 'Guest') and user_type = 'System User' and enabled=1
order by creation desc""", as_dict=1):
if user.simultaneous_sessions < user_limit:
user_limit = user_limit - user.simultaneous_sessions
else:
frappe.db.set_value("User", user.name, "simultaneous_sessions", 1)
user_limit = user_limit - 1
def get_link_to_reset_password(user):
link = ''
if not cint(frappe.db.get_single_value('System Settings', 'setup_complete')):
user = frappe.get_doc("User", user)
link = user.reset_password(send_email=False)
frappe.db.commit()
return {
'link': link
}
| true | true |
f7ff06b7c5f070e8226a4692d732ebf30068a5e2 | 6,523 | py | Python | src/modules/blogo_colours.py | mlewis109/blogo | eae7eae4363f0e93f1c40b3096ab51ad90eed8f7 | [
"Unlicense"
] | 2 | 2021-12-16T04:16:53.000Z | 2022-03-23T16:55:18.000Z | src/modules/blogo_colours.py | mlewis109/blogo | eae7eae4363f0e93f1c40b3096ab51ad90eed8f7 | [
"Unlicense"
] | null | null | null | src/modules/blogo_colours.py | mlewis109/blogo | eae7eae4363f0e93f1c40b3096ab51ad90eed8f7 | [
"Unlicense"
] | null | null | null | def all_colours():
colours = {
"indianred":(0.8039,0.3608,0.3608,1),
"lightcoral":(0.9412,0.502,0.502,1),
"salmon":(0.9804,0.502,0.4471,1),
"darksalmon":(0.9137,0.5882,0.4784,1),
"lightsalmon":(1,0.6275,0.4784,1),
"crimson":(0.8627,0.0784,0.2353,1),
"red":(1,0,0,1),
"firebrick":(0.698,0.1333,0.1333,1),
"darkred":(0.5451,0,0,1),
"pink":(1,0.7529,0.7961,1),
"lightpink":(1,0.7137,0.7569,1),
"hotpink":(1,0.4118,0.7059,1),
"deeppink":(1,0.0784,0.5765,1),
"mediumvioletred":(0.7804,0.0824,0.5216,1),
"palevioletred":(0.8588,0.4392,0.5765,1),
"lightsalmon":(1,0.6275,0.4784,1),
"coral":(1,0.498,0.3137,1),
"tomato":(1,0.3882,0.2784,1),
"orangered":(1,0.2706,0,1),
"darkorange":(1,0.549,0,1),
"orange":(1,0.6471,0,1),
"gold":(1,0.8431,0,1),
"yellow":(1,1,0,1),
"lightyellow":(1,1,0.8784,1),
"lemonchiffon":(1,0.9804,0.8039,1),
"lightgoldenrodyellow":(0.9804,0.9804,0.8235,1),
"papayawhip":(1,0.9373,0.8353,1),
"moccasin":(1,0.8941,0.7098,1),
"peachpuff":(1,0.8549,0.7255,1),
"palegoldenrod":(0.9333,0.9098,0.6667,1),
"khaki":(0.9412,0.902,0.549,1),
"darkkhaki":(0.7412,0.7176,0.4196,1),
"lavender":(0.902,0.902,0.9804,1),
"thistle":(0.8471,0.749,0.8471,1),
"plum":(0.8667,0.6275,0.8667,1),
"violet":(0.9333,0.5098,0.9333,1),
"orchid":(0.8549,0.4392,0.8392,1),
"fuchsia":(1,0,1,1),
"magenta":(1,0,1,1),
"mediumorchid":(0.7294,0.3333,0.8275,1),
"mediumpurple":(0.5765,0.4392,0.8588,1),
"rebeccapurple":(0.4,0.2,0.6,1),
"blueviolet":(0.5412,0.1686,0.8863,1),
"darkviolet":(0.5804,0,0.8275,1),
"darkorchid":(0.6,0.1961,0.8,1),
"darkmagenta":(0.5451,0,0.5451,1),
"purple":(0.502,0,0.502,1),
"indigo":(0.2941,0,0.5098,1),
"slateblue":(0.4157,0.3529,0.8039,1),
"darkslateblue":(0.2824,0.2392,0.5451,1),
"mediumslateblue":(0.4824,0.4078,0.9333,1),
"greenyellow":(0.6784,1,0.1843,1),
"chartreuse":(0.498,1,0,1),
"lawngreen":(0.4863,0.9882,0,1),
"lime":(0,1,0,1),
"limegreen":(0.1961,0.8039,0.1961,1),
"palegreen":(0.5961,0.9843,0.5961,1),
"lightgreen":(0.5647,0.9333,0.5647,1),
"mediumspringgreen":(0,0.9804,0.6039,1),
"springgreen":(0,1,0.498,1),
"mediumseagreen":(0.2353,0.702,0.4431,1),
"seagreen":(0.1804,0.5451,0.3412,1),
"forestgreen":(0.1333,0.5451,0.1333,1),
"green":(0,0.502,0,1),
"darkgreen":(0,0.3922,0,1),
"yellowgreen":(0.6039,0.8039,0.1961,1),
"olivedrab":(0.4196,0.5569,0.1373,1),
"olive":(0.502,0.502,0,1),
"darkolivegreen":(0.3333,0.4196,0.1843,1),
"mediumaquamarine":(0.4,0.8039,0.6667,1),
"darkseagreen":(0.5608,0.7373,0.5451,1),
"lightseagreen":(0.1255,0.698,0.6667,1),
"darkcyan":(0,0.5451,0.5451,1),
"teal":(0,0.502,0.502,1),
"aqua":(0,1,1,1),
"cyan":(0,1,1,1),
"lightcyan":(0.8784,1,1,1),
"paleturquoise":(0.6863,0.9333,0.9333,1),
"aquamarine":(0.498,1,0.8314,1),
"turquoise":(0.251,0.8784,0.8157,1),
"mediumturquoise":(0.2824,0.8196,0.8,1),
"darkturquoise":(0,0.8078,0.8196,1),
"cadetblue":(0.3725,0.6196,0.6275,1),
"steelblue":(0.2745,0.5098,0.7059,1),
"lightsteelblue":(0.6902,0.7686,0.8706,1),
"powderblue":(0.6902,0.8784,0.902,1),
"lightblue":(0.6784,0.8471,0.902,1),
"skyblue":(0.5294,0.8078,0.9216,1),
"lightskyblue":(0.5294,0.8078,0.9804,1),
"deepskyblue":(0,0.749,1,1),
"dodgerblue":(0.1176,0.5647,1,1),
"cornflowerblue":(0.3922,0.5843,0.9294,1),
"mediumslateblue":(0.4824,0.4078,0.9333,1),
"royalblue":(0.2549,0.4118,0.8824,1),
"blue":(0,0,1,1),
"mediumblue":(0,0,0.8039,1),
"darkblue":(0,0,0.5451,1),
"navy":(0,0,0.502,1),
"midnightblue":(0.098,0.098,0.4392,1),
"cornsilk":(1,0.9725,0.8627,1),
"blanchedalmond":(1,0.9216,0.8039,1),
"bisque":(1,0.8941,0.7686,1),
"navajowhite":(1,0.8706,0.6784,1),
"wheat":(0.9608,0.8706,0.702,1),
"burlywood":(0.8706,0.7216,0.5294,1),
"tan":(0.8235,0.7059,0.549,1),
"rosybrown":(0.7373,0.5608,0.5608,1),
"sandybrown":(0.9569,0.6431,0.3765,1),
"goldenrod":(0.8549,0.6471,0.1255,1),
"darkgoldenrod":(0.7216,0.5255,0.0431,1),
"peru":(0.8039,0.5216,0.2471,1),
"chocolate":(0.8235,0.4118,0.1176,1),
"saddlebrown":(0.5451,0.2706,0.0745,1),
"sienna":(0.6275,0.3216,0.1765,1),
"brown":(0.6471,0.1647,0.1647,1),
"maroon":(0.502,0,0,1),
"white":(1,1,1,1),
"snow":(1,0.9804,0.9804,1),
"honeydew":(0.9412,1,0.9412,1),
"mintcream":(0.9608,1,0.9804,1),
"azure":(0.9412,1,1,1),
"aliceblue":(0.9412,0.9725,1,1),
"ghostwhite":(0.9725,0.9725,1,1),
"whitesmoke":(0.9608,0.9608,0.9608,1),
"seashell":(1,0.9608,0.9333,1),
"beige":(0.9608,0.9608,0.8627,1),
"oldlace":(0.9922,0.9608,0.902,1),
"floralwhite":(1,0.9804,0.9412,1),
"ivory":(1,1,0.9412,1),
"antiquewhite":(0.9804,0.9216,0.8431,1),
"linen":(0.9804,0.9412,0.902,1),
"lavenderblush":(1,0.9412,0.9608,1),
"mistyrose":(1,0.8941,0.8824,1),
"gainsboro":(0.8627,0.8627,0.8627,1),
"lightgray":(0.8275,0.8275,0.8275,1),
"lightgrey":(0.8275,0.8275,0.8275,1),
"silver":(0.7529,0.7529,0.7529,1),
"darkgray":(0.6627,0.6627,0.6627,1),
"darkgrey":(0.6627,0.6627,0.6627,1),
"gray":(0.502,0.502,0.502,1),
"grey":(0.502,0.502,0.502,1),
"dimgray":(0.4118,0.4118,0.4118,1),
"dimgrey":(0.4118,0.4118,0.4118,1),
"lightslategray":(0.4667,0.5333,0.6,1),
"lightslategrey":(0.4667,0.5333,0.6,1),
"slategray":(0.4392,0.502,0.5647,1),
"slategrey":(0.4392,0.502,0.5647,1),
"darkslategray":(0.1843,0.3098,0.3098,1),
"darkslategrey":(0.1843,0.3098,0.3098,1),
"black":(0,0,0,1)
}
return colours
| 41.814103 | 57 | 0.505289 | def all_colours():
colours = {
"indianred":(0.8039,0.3608,0.3608,1),
"lightcoral":(0.9412,0.502,0.502,1),
"salmon":(0.9804,0.502,0.4471,1),
"darksalmon":(0.9137,0.5882,0.4784,1),
"lightsalmon":(1,0.6275,0.4784,1),
"crimson":(0.8627,0.0784,0.2353,1),
"red":(1,0,0,1),
"firebrick":(0.698,0.1333,0.1333,1),
"darkred":(0.5451,0,0,1),
"pink":(1,0.7529,0.7961,1),
"lightpink":(1,0.7137,0.7569,1),
"hotpink":(1,0.4118,0.7059,1),
"deeppink":(1,0.0784,0.5765,1),
"mediumvioletred":(0.7804,0.0824,0.5216,1),
"palevioletred":(0.8588,0.4392,0.5765,1),
"lightsalmon":(1,0.6275,0.4784,1),
"coral":(1,0.498,0.3137,1),
"tomato":(1,0.3882,0.2784,1),
"orangered":(1,0.2706,0,1),
"darkorange":(1,0.549,0,1),
"orange":(1,0.6471,0,1),
"gold":(1,0.8431,0,1),
"yellow":(1,1,0,1),
"lightyellow":(1,1,0.8784,1),
"lemonchiffon":(1,0.9804,0.8039,1),
"lightgoldenrodyellow":(0.9804,0.9804,0.8235,1),
"papayawhip":(1,0.9373,0.8353,1),
"moccasin":(1,0.8941,0.7098,1),
"peachpuff":(1,0.8549,0.7255,1),
"palegoldenrod":(0.9333,0.9098,0.6667,1),
"khaki":(0.9412,0.902,0.549,1),
"darkkhaki":(0.7412,0.7176,0.4196,1),
"lavender":(0.902,0.902,0.9804,1),
"thistle":(0.8471,0.749,0.8471,1),
"plum":(0.8667,0.6275,0.8667,1),
"violet":(0.9333,0.5098,0.9333,1),
"orchid":(0.8549,0.4392,0.8392,1),
"fuchsia":(1,0,1,1),
"magenta":(1,0,1,1),
"mediumorchid":(0.7294,0.3333,0.8275,1),
"mediumpurple":(0.5765,0.4392,0.8588,1),
"rebeccapurple":(0.4,0.2,0.6,1),
"blueviolet":(0.5412,0.1686,0.8863,1),
"darkviolet":(0.5804,0,0.8275,1),
"darkorchid":(0.6,0.1961,0.8,1),
"darkmagenta":(0.5451,0,0.5451,1),
"purple":(0.502,0,0.502,1),
"indigo":(0.2941,0,0.5098,1),
"slateblue":(0.4157,0.3529,0.8039,1),
"darkslateblue":(0.2824,0.2392,0.5451,1),
"mediumslateblue":(0.4824,0.4078,0.9333,1),
"greenyellow":(0.6784,1,0.1843,1),
"chartreuse":(0.498,1,0,1),
"lawngreen":(0.4863,0.9882,0,1),
"lime":(0,1,0,1),
"limegreen":(0.1961,0.8039,0.1961,1),
"palegreen":(0.5961,0.9843,0.5961,1),
"lightgreen":(0.5647,0.9333,0.5647,1),
"mediumspringgreen":(0,0.9804,0.6039,1),
"springgreen":(0,1,0.498,1),
"mediumseagreen":(0.2353,0.702,0.4431,1),
"seagreen":(0.1804,0.5451,0.3412,1),
"forestgreen":(0.1333,0.5451,0.1333,1),
"green":(0,0.502,0,1),
"darkgreen":(0,0.3922,0,1),
"yellowgreen":(0.6039,0.8039,0.1961,1),
"olivedrab":(0.4196,0.5569,0.1373,1),
"olive":(0.502,0.502,0,1),
"darkolivegreen":(0.3333,0.4196,0.1843,1),
"mediumaquamarine":(0.4,0.8039,0.6667,1),
"darkseagreen":(0.5608,0.7373,0.5451,1),
"lightseagreen":(0.1255,0.698,0.6667,1),
"darkcyan":(0,0.5451,0.5451,1),
"teal":(0,0.502,0.502,1),
"aqua":(0,1,1,1),
"cyan":(0,1,1,1),
"lightcyan":(0.8784,1,1,1),
"paleturquoise":(0.6863,0.9333,0.9333,1),
"aquamarine":(0.498,1,0.8314,1),
"turquoise":(0.251,0.8784,0.8157,1),
"mediumturquoise":(0.2824,0.8196,0.8,1),
"darkturquoise":(0,0.8078,0.8196,1),
"cadetblue":(0.3725,0.6196,0.6275,1),
"steelblue":(0.2745,0.5098,0.7059,1),
"lightsteelblue":(0.6902,0.7686,0.8706,1),
"powderblue":(0.6902,0.8784,0.902,1),
"lightblue":(0.6784,0.8471,0.902,1),
"skyblue":(0.5294,0.8078,0.9216,1),
"lightskyblue":(0.5294,0.8078,0.9804,1),
"deepskyblue":(0,0.749,1,1),
"dodgerblue":(0.1176,0.5647,1,1),
"cornflowerblue":(0.3922,0.5843,0.9294,1),
"mediumslateblue":(0.4824,0.4078,0.9333,1),
"royalblue":(0.2549,0.4118,0.8824,1),
"blue":(0,0,1,1),
"mediumblue":(0,0,0.8039,1),
"darkblue":(0,0,0.5451,1),
"navy":(0,0,0.502,1),
"midnightblue":(0.098,0.098,0.4392,1),
"cornsilk":(1,0.9725,0.8627,1),
"blanchedalmond":(1,0.9216,0.8039,1),
"bisque":(1,0.8941,0.7686,1),
"navajowhite":(1,0.8706,0.6784,1),
"wheat":(0.9608,0.8706,0.702,1),
"burlywood":(0.8706,0.7216,0.5294,1),
"tan":(0.8235,0.7059,0.549,1),
"rosybrown":(0.7373,0.5608,0.5608,1),
"sandybrown":(0.9569,0.6431,0.3765,1),
"goldenrod":(0.8549,0.6471,0.1255,1),
"darkgoldenrod":(0.7216,0.5255,0.0431,1),
"peru":(0.8039,0.5216,0.2471,1),
"chocolate":(0.8235,0.4118,0.1176,1),
"saddlebrown":(0.5451,0.2706,0.0745,1),
"sienna":(0.6275,0.3216,0.1765,1),
"brown":(0.6471,0.1647,0.1647,1),
"maroon":(0.502,0,0,1),
"white":(1,1,1,1),
"snow":(1,0.9804,0.9804,1),
"honeydew":(0.9412,1,0.9412,1),
"mintcream":(0.9608,1,0.9804,1),
"azure":(0.9412,1,1,1),
"aliceblue":(0.9412,0.9725,1,1),
"ghostwhite":(0.9725,0.9725,1,1),
"whitesmoke":(0.9608,0.9608,0.9608,1),
"seashell":(1,0.9608,0.9333,1),
"beige":(0.9608,0.9608,0.8627,1),
"oldlace":(0.9922,0.9608,0.902,1),
"floralwhite":(1,0.9804,0.9412,1),
"ivory":(1,1,0.9412,1),
"antiquewhite":(0.9804,0.9216,0.8431,1),
"linen":(0.9804,0.9412,0.902,1),
"lavenderblush":(1,0.9412,0.9608,1),
"mistyrose":(1,0.8941,0.8824,1),
"gainsboro":(0.8627,0.8627,0.8627,1),
"lightgray":(0.8275,0.8275,0.8275,1),
"lightgrey":(0.8275,0.8275,0.8275,1),
"silver":(0.7529,0.7529,0.7529,1),
"darkgray":(0.6627,0.6627,0.6627,1),
"darkgrey":(0.6627,0.6627,0.6627,1),
"gray":(0.502,0.502,0.502,1),
"grey":(0.502,0.502,0.502,1),
"dimgray":(0.4118,0.4118,0.4118,1),
"dimgrey":(0.4118,0.4118,0.4118,1),
"lightslategray":(0.4667,0.5333,0.6,1),
"lightslategrey":(0.4667,0.5333,0.6,1),
"slategray":(0.4392,0.502,0.5647,1),
"slategrey":(0.4392,0.502,0.5647,1),
"darkslategray":(0.1843,0.3098,0.3098,1),
"darkslategrey":(0.1843,0.3098,0.3098,1),
"black":(0,0,0,1)
}
return colours
| true | true |
f7ff06b88d5280b788b13c5b425763edcbe6b864 | 303 | py | Python | data/multilingual/Latn.EUS/Sans_12/pdf_to_json_test_Latn.EUS_Sans_12.py | antoinecarme/pdf_to_json_tests | d57a024fde862e698d916a1178f285883d7a3b2f | [
"BSD-3-Clause"
] | 1 | 2021-09-19T19:47:35.000Z | 2021-09-19T19:47:35.000Z | data/multilingual/Latn.EUS/Sans_12/pdf_to_json_test_Latn.EUS_Sans_12.py | antoinecarme/pdf_to_json_tests | d57a024fde862e698d916a1178f285883d7a3b2f | [
"BSD-3-Clause"
] | null | null | null | data/multilingual/Latn.EUS/Sans_12/pdf_to_json_test_Latn.EUS_Sans_12.py | antoinecarme/pdf_to_json_tests | d57a024fde862e698d916a1178f285883d7a3b2f | [
"BSD-3-Clause"
] | null | null | null | import pdf_to_json as p2j
import json
url = "file:data/multilingual/Latn.EUS/Sans_12/udhr_Latn.EUS_Sans_12.pdf"
lConverter = p2j.pdf_to_json.pdf_to_json_converter()
lConverter.mImageHashOnly = True
lDict = lConverter.convert(url)
print(json.dumps(lDict, indent=4, ensure_ascii=False, sort_keys=True))
| 30.3 | 73 | 0.811881 | import pdf_to_json as p2j
import json
url = "file:data/multilingual/Latn.EUS/Sans_12/udhr_Latn.EUS_Sans_12.pdf"
lConverter = p2j.pdf_to_json.pdf_to_json_converter()
lConverter.mImageHashOnly = True
lDict = lConverter.convert(url)
print(json.dumps(lDict, indent=4, ensure_ascii=False, sort_keys=True))
| true | true |
f7ff07662b3e96ced8491b8279428f96107213e1 | 743 | py | Python | orange3/Orange/preprocess/setup.py | rgschmitz1/BioDepot-workflow-builder | f74d904eeaf91ec52ec9b703d9fb38e9064e5a66 | [
"MIT"
] | 54 | 2017-01-08T17:21:49.000Z | 2021-11-02T08:46:07.000Z | orange3/Orange/preprocess/setup.py | Synthia-3/BioDepot-workflow-builder | 4ee93abe2d79465755e82a145af3b6a6e1e79fd4 | [
"MIT"
] | 22 | 2017-03-28T06:03:14.000Z | 2021-07-28T05:43:55.000Z | orange3/Orange/preprocess/setup.py | Synthia-3/BioDepot-workflow-builder | 4ee93abe2d79465755e82a145af3b6a6e1e79fd4 | [
"MIT"
] | 21 | 2017-01-26T21:12:09.000Z | 2022-01-31T21:34:59.000Z | # Author: Alexandre Gramfort <alexandre.gramfort@inria.fr>
# License: BSD Style.
import os
import numpy
def configuration(parent_package="", top_path=None):
from numpy.distutils.misc_util import Configuration
libraries = []
if os.name == "posix":
libraries.append("m")
config = Configuration("preprocess", parent_package, top_path)
for source in ("_discretize.c", "_relieff.cpp"):
config.add_extension(
source.rsplit(".", 1)[0],
sources=[source],
include_dirs=[numpy.get_include()],
libraries=libraries,
)
return config
if __name__ == "__main__":
from numpy.distutils.core import setup
setup(**configuration(top_path="").todict())
| 24.766667 | 66 | 0.644684 |
import os
import numpy
def configuration(parent_package="", top_path=None):
from numpy.distutils.misc_util import Configuration
libraries = []
if os.name == "posix":
libraries.append("m")
config = Configuration("preprocess", parent_package, top_path)
for source in ("_discretize.c", "_relieff.cpp"):
config.add_extension(
source.rsplit(".", 1)[0],
sources=[source],
include_dirs=[numpy.get_include()],
libraries=libraries,
)
return config
if __name__ == "__main__":
from numpy.distutils.core import setup
setup(**configuration(top_path="").todict())
| true | true |
f7ff0b9e6832e4686cd6461296f8f65650340966 | 46,343 | py | Python | calm/dsl/providers/plugins/azure_vm/main.py | tuxtof/calm-dsl | 5af67435d8304b97e170a690068f2d5975e9bfe6 | [
"Apache-2.0"
] | 37 | 2019-12-23T15:23:20.000Z | 2022-03-15T11:12:11.000Z | calm/dsl/providers/plugins/azure_vm/main.py | tuxtof/calm-dsl | 5af67435d8304b97e170a690068f2d5975e9bfe6 | [
"Apache-2.0"
] | 144 | 2020-03-09T11:22:09.000Z | 2022-03-28T21:34:09.000Z | calm/dsl/providers/plugins/azure_vm/main.py | abhijeetkaurav1st/calm-dsl | 6487a896967b3fd667b9320e2ad3a397c9960497 | [
"Apache-2.0"
] | 46 | 2020-01-23T14:28:04.000Z | 2022-03-09T04:17:10.000Z | import click
from ruamel import yaml
from distutils.version import LooseVersion as LV
from calm.dsl.api import get_resource_api, get_api_client
from calm.dsl.providers import get_provider_interface
from calm.dsl.store import Version
from .constants import AZURE as azure
Provider = get_provider_interface()
class AzureVmProvider(Provider):
provider_type = "AZURE_VM"
package_name = __name__
spec_template_file = "azure_vm_provider_spec.yaml.jinja2"
@classmethod
def create_spec(cls):
client = get_api_client()
create_spec(client)
@classmethod
def get_api_obj(cls):
"""returns object to call azure provider specific apis"""
client = get_api_client()
return Azure(client.connection)
class Azure:
def __init__(self, connection):
self.connection = connection
def resource_groups(self, account_id):
Obj = get_resource_api(azure.RESOURCE_GROUPS, self.connection)
payload = {"filter": "account_uuid=={};".format(account_id)}
res, err = Obj.list(payload)
if err:
raise Exception("[{}] - {}".format(err["code"], err["error"]))
res_groups = []
res = res.json()
for entity in res["entities"]:
res_groups.append(entity["status"]["name"])
return res_groups
def availability_sets(self, account_id, resource_group):
Obj = get_resource_api(azure.AVAILABILTY_SETS, self.connection)
payload = {
"filter": "account_uuid=={};resource_group=={}".format(
account_id, resource_group
)
}
res, err = Obj.list(payload)
if err:
raise Exception("[{}] - {}".format(err["code"], err["error"]))
name_id_map = {}
res = res.json()
for entity in res["entities"]:
name = entity["status"]["resources"]["name"]
entity_uuid = entity["status"]["resources"]["id"]
name_id_map[name] = entity_uuid
return name_id_map
def locations(self, account_id):
Obj = get_resource_api(azure.LOCATIONS, self.connection)
payload = {"filter": "account_uuid=={};".format(account_id)}
res, err = Obj.list(payload)
if err:
raise Exception("[{}] - {}".format(err["code"], err["error"]))
res = res.json()
name_value_map = {}
for entity in res["entities"]:
name = entity["status"]["resources"]["displayName"]
value = entity["status"]["resources"]["name"]
name_value_map[name] = value
return name_value_map
def availability_zones(self, account_id, resource_group, location):
Obj = get_resource_api(azure.AVAILABILITY_ZONES, self.connection)
payload = {
"filter": "account_uuid=={};resource_group=={};location=={}".format(
account_id, resource_group, location
)
}
res, err = Obj.list(payload)
if err:
raise Exception("[{}] - {}".format(err["code"], err["error"]))
res = res.json()
name_value_map = dict()
for entity in res["entities"]:
if "zones" in entity["status"]["resources"]:
zones = entity["status"]["resources"]["zones"]
for zone in zones:
name_value_map[zone["name"]] = zone["value"]
return name_value_map
def hardware_profiles(self, account_id, location):
Obj = get_resource_api(azure.VM_SIZES, self.connection)
payload = {
"filter": "account_uuid=={};location=={}".format(account_id, location)
}
res, err = Obj.list(payload)
if err:
raise Exception("[{}] - {}".format(err["code"], err["error"]))
res = res.json()
hwprofiles = {}
for entity in res["entities"]:
name = entity["status"]["resources"]["name"]
max_disk_count = entity["status"]["resources"]["maxDataDiskCount"]
hwprofiles[name] = max_disk_count
return hwprofiles
def custom_images(self, account_id, location):
Obj = get_resource_api(azure.SUBSCRIPTION_IMAGES, self.connection)
payload = {
"filter": "account_uuid=={};location=={}".format(account_id, location)
}
res, err = Obj.list(payload)
if err:
raise Exception("[{}] - {}".format(err["code"], err["error"]))
res = res.json()
name_id_map = {}
for entity in res["entities"]:
name = entity["status"]["resources"]["name"]
id = entity["status"]["resources"]["id"]
name_id_map[name] = id
return name_id_map
def image_publishers(self, account_id, location):
Obj = get_resource_api(azure.IMAGE_PUBLISHERS, self.connection)
payload = {
"filter": "account_uuid=={};location=={}".format(account_id, location)
}
res, err = Obj.list(payload)
if err:
raise Exception("[{}] - {}".format(err["code"], err["error"]))
res = res.json()
entity_list = []
for entity in res["entities"]:
name = entity["status"]["name"]
entity_list.append(name)
return entity_list
def image_offers(self, account_id, location, publisher):
Obj = get_resource_api(azure.IMAGE_OFFERS, self.connection)
payload = {
"filter": "account_uuid=={};location=={};publisher=={}".format(
account_id, location, publisher
)
}
res, err = Obj.list(payload)
if err:
raise Exception("[{}] - {}".format(err["code"], err["error"]))
res = res.json()
entity_list = []
for entity in res["entities"]:
name = entity["status"]["name"]
entity_list.append(name)
return entity_list
def image_skus(self, account_id, location, publisher, offer):
Obj = get_resource_api(azure.IMAGE_SKUS, self.connection)
payload = {
"filter": "account_uuid=={};location=={};publisher=={};offer=={}".format(
account_id, location, publisher, offer
)
}
res, err = Obj.list(payload)
if err:
raise Exception("[{}] - {}".format(err["code"], err["error"]))
res = res.json()
entity_list = []
for entity in res["entities"]:
name = entity["status"]["name"]
entity_list.append(name)
return entity_list
def image_versions(self, account_id, location, publisher, offer, sku):
Obj = get_resource_api(azure.IMAGE_VERSIONS, self.connection)
payload = {
"filter": "account_uuid=={};location=={};publisher=={};offer=={};sku=={}".format(
account_id, location, publisher, offer, sku
)
}
res, err = Obj.list(payload)
if err:
raise Exception("[{}] - {}".format(err["code"], err["error"]))
res = res.json()
entity_list = []
for entity in res["entities"]:
name = entity["status"]["name"]
entity_list.append(name)
return entity_list
def security_groups(self, account_id, resource_group, location):
Obj = get_resource_api(azure.SECURITY_GROUPS, self.connection)
payload = {
"filter": "account_uuid=={};location=={};resource_group=={}".format(
account_id, location, resource_group
)
}
res, err = Obj.list(payload)
if err:
raise Exception("[{}] - {}".format(err["code"], err["error"]))
res = res.json()
entity_list = []
for entity in res["entities"]:
name = entity["status"]["name"]
entity_list.append(name)
return entity_list
def virtual_networks(self, account_id, resource_group, location):
Obj = get_resource_api(azure.VIRTUAL_NETWORKS, self.connection)
payload = {
"filter": "account_uuid=={};location=={};resource_group=={}".format(
account_id, location, resource_group
)
}
res, err = Obj.list(payload)
if err:
raise Exception("[{}] - {}".format(err["code"], err["error"]))
res = res.json()
entity_list = []
for entity in res["entities"]:
name = entity["status"]["name"]
entity_list.append(name)
return entity_list
def subnets(self, account_id, resource_group, virtual_network):
Obj = get_resource_api(azure.SUBNETS, self.connection)
payload = {
"filter": "account_uuid=={};virtual_network=={};resource_group=={}".format(
account_id, virtual_network, resource_group
)
}
res, err = Obj.list(payload)
if err:
raise Exception("[{}] - {}".format(err["code"], err["error"]))
res = res.json()
entity_list = []
for entity in res["entities"]:
name = entity["status"]["name"]
entity_list.append(name)
return entity_list
def highlight_text(text, **kwargs):
"""Highlight text in our standard format"""
return click.style("{}".format(text), fg="blue", bold=False, **kwargs)
def create_spec(client):
CALM_VERSION = Version.get_version("Calm")
spec = {}
Obj = Azure(client.connection)
account_id = ""
resource_group = ""
location = ""
vm_os = ""
# VM Configuration
projects = client.project.get_name_uuid_map()
project_list = list(projects.keys())
if not project_list:
click.echo(highlight_text("No projects found!!!"))
click.echo(highlight_text("Please add first"))
return
click.echo("\nChoose from given projects:")
for ind, name in enumerate(project_list):
click.echo("\t {}. {}".format(str(ind + 1), highlight_text(name)))
project_id = ""
while True:
ind = click.prompt("\nEnter the index of project", default=1)
if (ind > len(project_list)) or (ind <= 0):
click.echo("Invalid index !!! ")
else:
project_id = projects[project_list[ind - 1]]
click.echo("{} selected".format(highlight_text(project_list[ind - 1])))
break
res, err = client.project.read(project_id)
if err:
raise Exception("[{}] - {}".format(err["code"], err["error"]))
project = res.json()
accounts = project["status"]["resources"]["account_reference_list"]
reg_accounts = []
for account in accounts:
reg_accounts.append(account["uuid"])
payload = {"filter": "type==azure"}
res, err = client.account.list(payload)
if err:
raise Exception("[{}] - {}".format(err["code"], err["error"]))
res = res.json()
azure_accounts = {}
for entity in res["entities"]:
entity_name = entity["metadata"]["name"]
entity_id = entity["metadata"]["uuid"]
if entity_id in reg_accounts:
azure_accounts[entity_name] = entity_id
accounts = list(azure_accounts.keys())
spec["resources"] = {}
click.echo("\nChoose from given AZURE accounts")
for ind, name in enumerate(accounts):
click.echo("\t {}. {}".format(str(ind + 1), highlight_text(name)))
while True:
res = click.prompt("\nEnter the index of account to be used", default=1)
if (res > len(accounts)) or (res <= 0):
click.echo("Invalid index !!! ")
else:
account_name = accounts[res - 1]
account_id = azure_accounts[account_name] # TO BE USED
spec["resources"]["account_uuid"] = account_id
click.echo("{} selected".format(highlight_text(account_name)))
break
if not account_id:
click.echo(
highlight_text("No azure account found registered in this project !!!")
)
click.echo("Please add one !!!")
return
click.echo("\nChoose from given Operating System types:")
os_types = azure.OPERATING_SYSTEMS
for ind, name in enumerate(os_types):
click.echo("\t {}. {}".format(str(ind + 1), highlight_text(name)))
while True:
ind = click.prompt("\nEnter the index of operating system", default=1)
if (ind > len(os_types)) or (ind <= 0):
click.echo("Invalid index !!! ")
else:
vm_os = os_types[ind - 1]
click.echo("{} selected".format(highlight_text(vm_os)))
break
click.echo("\n\t\t", nl=False)
click.secho("VM Configuration", bold=True, underline=True)
vm_name = "vm-@@{calm_unique_hash}@@-@@{calm_array_index}@@"
spec["resources"]["vm_name"] = click.prompt(
"\nEnter instance name", default=vm_name
)
# Add resource group
resource_groups = Obj.resource_groups(account_id)
if not resource_groups:
click.echo("\n{}".format(highlight_text("No resource group present")))
else:
click.echo("\nChoose from given resource groups")
for ind, name in enumerate(resource_groups):
click.echo("\t {}. {}".format(str(ind + 1), highlight_text(name)))
while True:
res = click.prompt("\nEnter the index of resource group", default=1)
if (res > len(resource_groups)) or (res <= 0):
click.echo("Invalid index !!! ")
else:
resource_group = resource_groups[res - 1] # TO BE USED
spec["resources"]["resource_group"] = resource_group
click.echo("{} selected".format(highlight_text(resource_group)))
break
# Add location
locations = Obj.locations(account_id)
if not locations:
click.echo("\n{}".format(highlight_text("No location group present")))
else:
click.echo("\nChoose from given locations")
location_names = list(locations.keys())
for ind, name in enumerate(location_names):
click.echo("\t {}. {}".format(str(ind + 1), highlight_text(name)))
while True:
res = click.prompt("\nEnter the index of resource group", default=1)
if (res > len(location_names)) or (res <= 0):
click.echo("Invalid index !!! ")
else:
location = location_names[res - 1]
click.echo("{} selected".format(highlight_text(location)))
location = locations[location]
spec["resources"]["location"] = location
break
if LV(CALM_VERSION) < LV("3.2.0"):
# Add availabililty set
choice = click.prompt(
"\n{}(y/n)".format(highlight_text("Want to add a availabilty set")),
default="n",
)
if choice[0] == "y":
availability_sets = Obj.availability_sets(account_id, resource_group)
avl_set_list = list(availability_sets.keys())
if not avl_set_list:
click.echo("\n{}".format(highlight_text("No availability_set present")))
else:
click.echo("\nChoose from given availabilty set")
for ind, name in enumerate(avl_set_list):
click.echo("\t {}. {}".format(str(ind + 1), highlight_text(name)))
while True:
res = click.prompt(
"\nEnter the index of availabilty set", default=1
)
if (res > len(avl_set_list)) or (res <= 0):
click.echo("Invalid index !!! ")
else:
avl_set = avl_set_list[res - 1]
spec["resources"]["availability_set_id"] = availability_sets[
avl_set
]
click.echo("{} selected".format(highlight_text(avl_set)))
break
else:
# Add availability option
choice = click.prompt(
"\n{}(y/n)".format(highlight_text("Want to select availability options")),
default="n",
)
if choice[0] == "y":
availability_options = ["Availability Sets", "Availability Zones"]
click.echo("\nChoose from given availability options")
for ind, name in enumerate(availability_options):
click.echo("\t {}. {}".format(str(ind + 1), highlight_text(name)))
while True:
res = click.prompt("\nEnter the index of option", default=1)
if (res > len(availability_options)) or (res <= 0):
click.echo("Invalid index !!! ")
else:
spec["resources"]["availability_option"] = availability_options[
res - 1
].replace(" ", "")
click.echo(
"{} selected".format(
highlight_text(availability_options[res - 1])
)
)
if res == 1:
availability_sets = Obj.availability_sets(
account_id, spec["resources"]["resource_group"]
)
avl_set_list = list(availability_sets.keys())
if not avl_set_list:
click.echo(
"\n{}".format(
highlight_text("No availability_set present")
)
)
else:
click.echo("\nChoose from given availabilty set")
for ind, name in enumerate(avl_set_list):
click.echo(
"\t {}. {}".format(
str(ind + 1), highlight_text(name)
)
)
while True:
res = click.prompt(
"\nEnter the index of availabilty set", default=1
)
if (res > len(avl_set_list)) or (res <= 0):
click.echo("Invalid index !!! ")
else:
avl_set = avl_set_list[res - 1]
spec["resources"][
"availability_set_id"
] = availability_sets[avl_set]
click.echo(
"{} selected".format(highlight_text(avl_set))
)
break
else:
availability_zones = Obj.availability_zones(
account_id,
spec["resources"]["resource_group"],
spec["resources"]["location"],
)
if not availability_zones:
click.echo(
"\n{}".format(
highlight_text(
"Selected location does not support Availability Zones"
)
)
)
else:
click.echo("\nChoose from the given zones")
zones = list(availability_zones.keys())
for ind, name in enumerate(zones):
click.echo(
"\t {}. {}".format(
str(ind + 1), highlight_text(name)
)
)
while True:
res = click.prompt(
"\nEnter the index of zone", default=1
)
if (res > len(availability_zones)) or (res <= 0):
click.echo("Invalid index !!! ")
else:
click.echo(
"{} selected".format(
highlight_text(zones[res - 1])
)
)
spec["resources"][
"availability_zone"
] = availability_zones[zones[res - 1]]
break
break
hardware_profiles = Obj.hardware_profiles(account_id, location)
if not hardware_profiles:
click.echo("\n{}".format(highlight_text("No hardware profile present")))
else:
click.echo("\nChoose from given Hardware Profiles")
hw_profile_names = list(hardware_profiles.keys())
for ind, name in enumerate(hw_profile_names):
click.echo("\t {}. {}".format(str(ind + 1), highlight_text(name)))
while True:
res = click.prompt("\nEnter the index of Hardware Profile", default=1)
if (res > len(hw_profile_names)) or (res <= 0):
click.echo("Invalid index !!! ")
else:
hw_profile = hw_profile_names[res - 1]
click.echo("{} selected".format(highlight_text(hw_profile)))
spec["resources"]["hw_profile"] = {
"vm_size": hw_profile,
"max_data_disk_count": hardware_profiles[hw_profile],
}
break
# OS Profile
spec["resources"]["os_profile"] = get_os_profile(vm_os)
# Storage Profile
spec["resources"]["storage_profile"] = get_storage_profile(
Obj, account_id, location
)
# Network Profile
spec["resources"]["nw_profile"] = {}
spec["resources"]["nw_profile"]["nic_list"] = get_nw_profile(
Obj, account_id, resource_group, location
)
# Add tags
choice = click.prompt(
"\n{}(y/n)".format(highlight_text("Want to add any tags")), default="n"
)
if choice[0] == "y":
tags = []
while True:
key = click.prompt("\n\tKey")
value = click.prompt("\tValue")
tag = {"key": key, "value": value}
tags.append(tag)
choice = click.prompt(
"\n{}(y/n)".format(highlight_text("Want to add more tags")), default="n"
)
if choice[0] == "n":
spec["resources"]["tag_list"] = tags
break
AzureVmProvider.validate_spec(spec)
click.secho("\nCreate spec for your AZURE VM:\n", underline=True)
click.echo(highlight_text(yaml.dump(spec, default_flow_style=False)))
def get_os_profile(os_type):
click.echo("\n\t\t", nl=False)
click.secho("OS PROFILE DETAILS", bold=True, underline=True)
choice = click.prompt(
"\n{}(y/n)".format(highlight_text("Want to add secrets")), default="n"
)
res = {}
res["secrets"] = []
certificate_list = []
while choice[0] == "y":
vault_id = click.prompt("\n\tEnter Vault ID ", default="")
choice = click.prompt(
"\n{}(y/n)".format(highlight_text("Add Vault Certificate Details")),
default="n",
)
vault_certificates = []
while choice[0] == "y":
certificate_store = ""
certificate_url = click.prompt("\n\tEnter Certificate URL", default="URL")
if os_type == "Windows":
certificate_store = click.prompt(
"\n\tEnter Certificate Store", default="Store"
)
vault_certificates.append(
{
"certificate_url": certificate_url,
"certificate_store": certificate_store,
}
)
if certificate_url:
certificate_list.append(certificate_url)
choice = click.prompt(
"\n{}(y/n)".format(highlight_text("Add more certificates")), default="n"
)
res["secrets"].append(
{"source_vault_id": vault_id, "vault_certificates": vault_certificates}
)
choice = click.prompt(
"\n{}(y/n)".format(highlight_text("Add more secrets")), default="n"
)
if os_type == "Linux":
res["linux_config"] = get_linux_config()
else:
res["windows_config"] = get_windows_config(certificate_list)
return res
def get_linux_config():
custom_data = click.prompt("\nEnter Cloud Init Script", default="")
return {"custom_data": custom_data}
def get_windows_config(certificate_list):
provision_vm_agent = click.prompt(
"\n{}(y/n)".format(highlight_text("Enable Provision Windows Guest Agent")),
default="n",
)
provision_vm_agent = True if provision_vm_agent[0] == "y" else False
auto_updates = click.prompt(
"\n{}(y/n)".format(highlight_text("Enable Automatic OS Upgrades")), default="n"
)
auto_updates = True if auto_updates[0] == "y" else False
unattend_content = []
choice = click.prompt(
"\n{}(y/n)".format(highlight_text("Want to add ADDITIONAL UNATTENDED CONTENT")),
default="n",
)
settings = azure.UNATTENDED_SETTINGS
while (choice[0] == "y") and settings:
click.echo("\nChoose from given Setting Names")
setting = ""
for ind, name in enumerate(settings):
click.echo("\t {}. {}".format(str(ind + 1), highlight_text(name)))
while True:
res = click.prompt("\nEnter the index of Setting", default=1)
if (res > len(settings)) or (res <= 0):
click.echo("Invalid index !!! ")
else:
setting = settings[res - 1]
settings.pop(res - 1)
click.echo("{} selected".format(highlight_text(setting)))
break
xml_content = click.prompt(
"\nEnter XML Content(Please use <{}> as the root element)".format(setting),
default="",
)
unattend_content.append({"setting_name": setting, "xml_content": xml_content})
choice = click.prompt(
"\n{}(y/n)".format(highlight_text("Want to add more Unattended content")),
default="n",
)
winrm_listensers = []
choice = click.prompt(
"\n{}(y/n)".format(highlight_text("Want to add WINRM LISTENERS")), default="n"
)
protocols = list(azure.PROTOCOLS.keys())
while (choice[0] == "y") and protocols:
click.echo("\nChoose from given Protocols")
protocol = ""
for ind, name in enumerate(protocols):
click.echo("\t {}. {}".format(str(ind + 1), highlight_text(name)))
while True:
res = click.prompt("\nEnter the index of protocol", default=1)
if (res > len(protocols)) or (res <= 0):
click.echo("Invalid index !!! ")
else:
protocol = protocols[res - 1]
protocols.pop(res - 1)
click.echo("{} selected".format(highlight_text(protocol)))
break
if protocol == "HTTPS":
cert_url = ""
click.echo("Choose from given certificate URLs")
for ind, name in enumerate(certificate_list):
click.echo("\t {}. {}".format(str(ind + 1), highlight_text(name)))
while True:
res = click.prompt("\nEnter the index of certificate URL", default=1)
if (res > len(certificate_list)) or (res <= 0):
click.echo("Invalid index !!! ")
else:
cert_url = certificate_list[res - 1]
click.echo("{} selected".format(highlight_text(cert_url)))
break
winrm_listensers.append(
{"protocol": azure.PROTOCOLS[protocol], "certificate_url": cert_url}
)
else:
winrm_listensers.append({"protocol": azure.PROTOCOLS[protocol]})
choice = click.prompt(
"\n{}(y/n)".format(highlight_text("Want to add more Winrm Listeners")),
default="n",
)
return {
"winrm_listeners": winrm_listensers,
"additional_unattend_content": unattend_content,
"provision_vm_agent": provision_vm_agent,
"auto_updates": auto_updates,
}
def get_storage_profile(azure_obj, account_id, location):
click.echo("\n\t\t", nl=False)
click.secho("STORAGE PROFILE DETAILS", bold=True, underline=True)
click.secho("\n1. VM Image Details", underline=True)
vm_image = {}
use_custom_image = click.prompt(
"\n{}(y/n)".format(highlight_text("Want to use custom image")), default="n"
)
use_custom_image = True if use_custom_image[0] == "y" else False
if use_custom_image:
vm_image = get_custom_vm_image(azure_obj, account_id, location)
else:
vm_image = get_non_custom_vm_image(azure_obj, account_id, location)
click.secho("\n2. OS Disk Details", underline=True)
os_disk = get_os_disk(use_custom_image)
click.secho("\n3. Data Disk Details", underline=True)
data_disks = get_data_disks()
return {
"is_managed": True, # Hardcoded in UI
"os_disk_details": os_disk,
"data_disk_list": data_disks,
"image_details": vm_image,
}
def get_data_disks():
disks = []
choice = click.prompt(
"\n{}(y/n)".format(highlight_text("Want to add data disks")), default="n"
)
disk_index = 0
while choice[0] == "y":
click.echo("\n\t\t", nl=False)
click.secho("Data-Disk {}".format(disk_index + 1), underline=True)
storage_type = ""
disk_name = "data-disk-@@{calm_unique_hash}@@-@@{calm_array_index}@@-" + str(
disk_index
)
disk_name = click.prompt("\nEnter data disk name", default=disk_name)
# Add storage type
choice = click.prompt(
"\n{}(y/n)".format(highlight_text("Want to add storage type to disk")),
default="n",
)
if choice[0] == "y":
storage_types = azure.STORAGE_TYPES
display_names = list(storage_types.keys())
click.echo("\nChoose from given storage types")
for ind, name in enumerate(display_names):
click.echo("\t {}. {}".format(str(ind + 1), highlight_text(name)))
while True:
res = click.prompt("\nEnter the index of storage type", default=1)
if (res > len(display_names)) or (res <= 0):
click.echo("Invalid index !!! ")
else:
storage_type = display_names[res - 1]
click.echo("{} selected".format(highlight_text(storage_type)))
storage_type = storage_types[storage_type]
break
# Add cache type
cache_types = azure.CACHE_TYPES
display_names = list(cache_types.keys())
click.echo("\nChoose from given cache types")
for ind, name in enumerate(display_names):
click.echo("\t {}. {}".format(str(ind + 1), highlight_text(name)))
while True:
res = click.prompt("\nEnter the index of cache type", default=1)
if (res > len(display_names)) or (res <= 0):
click.echo("Invalid index !!! ")
else:
cache_type = display_names[res - 1]
click.echo("{} selected".format(highlight_text(cache_type)))
cache_type = cache_types[cache_type]
break
# Add disk size
disk_size = click.prompt("\nEnter the size for disk(in GiB)", default=1)
# Add disk lun
disk_lun = click.prompt("\nEnter the Disk LUN", default=0)
disks.append(
{
"size_in_gb": disk_size,
"name": disk_name,
"storage_type": storage_type,
"caching_type": cache_type,
"lun": disk_lun,
}
)
disk_index += 1
choice = click.prompt(
"\n{}(y/n)".format(highlight_text("Want to add more data disks")),
default="n",
)
return disks
def get_os_disk(use_custom_image):
disk_create_option = ""
cache_type = ""
storage_type = ""
disk_name = "os-@@{calm_unique_hash}@@-@@{calm_array_index}@@-disk"
disk_name = click.prompt("\nEnter os disk name", default=disk_name)
# Add storage type
choice = click.prompt(
"\n{}(y/n)".format(highlight_text("Want to add storage type to os disk")),
default="n",
)
if choice[0] == "y":
storage_types = azure.STORAGE_TYPES
display_names = list(storage_types.keys())
click.echo("\nChoose from given storage types")
for ind, name in enumerate(display_names):
click.echo("\t {}. {}".format(str(ind + 1), highlight_text(name)))
while True:
res = click.prompt("\nEnter the index of storage type", default=1)
if (res > len(display_names)) or (res <= 0):
click.echo("Invalid index !!! ")
else:
storage_type = display_names[res - 1]
click.echo("{} selected".format(highlight_text(storage_type)))
storage_type = storage_types[storage_type]
break
# Add cache type
cache_types = azure.CACHE_TYPES
display_names = list(cache_types.keys())
click.echo("\nChoose from given cache types")
for ind, name in enumerate(display_names):
click.echo("\t {}. {}".format(str(ind + 1), highlight_text(name)))
while True:
res = click.prompt("\nEnter the index of cache type", default=1)
if (res > len(display_names)) or (res <= 0):
click.echo("Invalid index !!! ")
else:
cache_type = display_names[res - 1]
click.echo("{} selected".format(highlight_text(cache_type)))
cache_type = cache_types[cache_type]
break
# Add Disk Create Option
if use_custom_image:
disk_create_option = azure.DISK_CREATE_OPTIONS["FROMIMAGE"]
click.secho(
"\nNote: In case of custom vm image, Os Disk Create Option : {}".format(
disk_create_option
)
)
else:
disk_create_options = azure.DISK_CREATE_OPTIONS
display_names = list(disk_create_options.keys())
click.echo("\nChoose from given disk create option")
for ind, name in enumerate(display_names):
click.echo("\t {}. {}".format(str(ind + 1), highlight_text(name)))
while True:
res = click.prompt("\nEnter the index of disk create option", default=1)
if (res > len(display_names)) or (res <= 0):
click.echo("Invalid index !!! ")
else:
disk_create_option = display_names[res - 1]
click.echo("{} selected".format(highlight_text(disk_create_option)))
disk_create_option = disk_create_options[disk_create_option]
break
return {
"name": disk_name,
"storage_type": storage_type,
"caching_type": cache_type,
"create_option": disk_create_option,
}
def get_non_custom_vm_image(azure_obj, account_id, location):
image_publisher = ""
image_offer = ""
image_sku = ""
image_version = ""
# Add image publisher
publishers = azure_obj.image_publishers(account_id, location)
if not publishers:
click.echo("\n{}".format(highlight_text("No image publisher present")))
else:
click.echo("\nChoose from given image publisher")
for ind, name in enumerate(publishers):
click.echo("\t {}. {}".format(str(ind + 1), highlight_text(name)))
while True:
res = click.prompt("\nEnter the index of image publisher", default=1)
if (res > len(publishers)) or (res <= 0):
click.echo("Invalid index !!! ")
else:
image_publisher = publishers[res - 1]
click.echo("{} selected".format(highlight_text(image_publisher)))
break
# Add image offer
image_offers = azure_obj.image_offers(account_id, location, image_publisher)
if not image_offers:
click.echo("\n{}".format(highlight_text("No image offer present")))
else:
click.echo("\nChoose from given image offer")
for ind, name in enumerate(image_offers):
click.echo("\t {}. {}".format(str(ind + 1), highlight_text(name)))
while True:
res = click.prompt("\nEnter the index of image offer", default=1)
if (res > len(image_offers)) or (res <= 0):
click.echo("Invalid index !!! ")
else:
image_offer = image_offers[res - 1]
click.echo("{} selected".format(highlight_text(image_offer)))
break
# Add Image SKU
image_skus = azure_obj.image_skus(
account_id, location, image_publisher, image_offer
)
if not image_skus:
click.echo("\n{}".format(highlight_text("No image sku present")))
else:
click.echo("\nChoose from given image sku")
for ind, name in enumerate(image_skus):
click.echo("\t {}. {}".format(str(ind + 1), highlight_text(name)))
while True:
res = click.prompt("\nEnter the index of image sku", default=1)
if (res > len(image_skus)) or (res <= 0):
click.echo("Invalid index !!! ")
else:
image_sku = image_skus[res - 1]
click.echo("{} selected".format(highlight_text(image_sku)))
break
# Add Image Version
image_versions = azure_obj.image_versions(
account_id, location, image_publisher, image_offer, image_sku
)
if not image_versions:
click.echo("\n{}".format(highlight_text("No image version present")))
else:
click.echo("\nChoose from given image version")
for ind, name in enumerate(image_versions):
click.echo("\t {}. {}".format(str(ind + 1), highlight_text(name)))
while True:
res = click.prompt("\nEnter the index of image version", default=1)
if (res > len(image_versions)) or (res <= 0):
click.echo("Invalid index !!! ")
else:
image_version = image_versions[res - 1]
click.echo("{} selected".format(highlight_text(image_version)))
break
return {
"sku": image_sku,
"publisher": image_publisher,
"offer": image_offer,
"version": image_version,
"use_custom_image": False,
}
def get_custom_vm_image(azure_obj, account_id, location):
custom_image_id = ""
custom_images = azure_obj.custom_images(account_id, location)
custom_image_names = list(custom_images.keys())
if not custom_image_names:
click.echo("\n{}".format(highlight_text("No custom image present")))
else:
click.echo("\nChoose from given custom images")
for ind, name in enumerate(custom_image_names):
click.echo("\t {}. {}".format(str(ind + 1), highlight_text(name)))
while True:
res = click.prompt("\nEnter the index of custom image", default=1)
if (res > len(custom_image_names)) or (res <= 0):
click.echo("Invalid index !!! ")
else:
custom_image = custom_image_names[res - 1]
custom_image_id = custom_images[custom_image]
click.echo("{} selected".format(highlight_text(custom_image)))
break
return {"source_image_id": custom_image_id, "use_custom_image": True}
def get_nw_profile(azure_obj, account_id, resource_grp, location):
click.echo("\n\t\t", nl=False)
click.secho("NETWORK PROFILE DETAILS", bold=True, underline=True)
nics = []
choice = click.prompt(
"\n{}(y/n)".format(highlight_text("Want to add NICs")), default="n"
)
nic_index = 0
while choice[0] == "y":
click.echo("\n\t\t", nl=False)
click.secho("Nic {}".format(nic_index + 1), underline=True)
nic_name = "nic-@@{calm_unique_hash}@@-@@{calm_array_index}@@-" + str(nic_index)
nic_name = click.prompt("\nEnter nic name", default=nic_name)
security_group = ""
virtual_network = ""
subnet = ""
# Add security group
security_groups = azure_obj.security_groups(account_id, resource_grp, location)
if not security_groups:
click.echo("\n{}".format(highlight_text("No security group present")))
else:
click.echo("\nChoose from given security groups")
for ind, name in enumerate(security_groups):
click.echo("\t {}. {}".format(str(ind + 1), highlight_text(name)))
while True:
res = click.prompt("\nEnter the index of security group", default=1)
if (res > len(security_groups)) or (res <= 0):
click.echo("Invalid index !!! ")
else:
security_group = security_groups[res - 1]
click.echo("{} selected".format(highlight_text(security_group)))
break
# Add virtual network
virtual_networks = azure_obj.virtual_networks(
account_id, resource_grp, location
)
if not virtual_networks:
click.echo("\n{}".format(highlight_text("No virtual network present")))
else:
click.echo("\nChoose from given virtual networtks")
for ind, name in enumerate(virtual_networks):
click.echo("\t {}. {}".format(str(ind + 1), highlight_text(name)))
while True:
res = click.prompt("\nEnter the index of virtual network", default=1)
if (res > len(virtual_networks)) or (res <= 0):
click.echo("Invalid index !!! ")
else:
virtual_network = virtual_networks[res - 1]
click.echo("{} selected".format(highlight_text(virtual_network)))
break
# Add subnet
subnets = azure_obj.subnets(account_id, resource_grp, virtual_network)
if not subnets:
click.echo("\n{}".format(highlight_text("No subnet present")))
else:
click.echo("\nChoose from given subnets")
for ind, name in enumerate(subnets):
click.echo("\t {}. {}".format(str(ind + 1), highlight_text(name)))
while True:
res = click.prompt("\nEnter the index of subnet", default=1)
if (res > len(subnets)) or (res <= 0):
click.echo("Invalid index !!! ")
else:
subnet = subnets[res - 1]
click.echo("{} selected".format(highlight_text(subnet)))
break
click.secho("\nPublic IP Config", underline=True)
public_ip_info = get_public_ip_info(nic_index)
click.secho("\nPrivate IP Config", underline=True)
private_ip_info = get_private_ip_info()
nics.append(
{
"nsg_name": security_group,
"vnet_name": virtual_network,
"private_ip_info": private_ip_info,
"nic_name": nic_name,
"subnet_name": subnet,
"public_ip_info": public_ip_info,
}
)
nic_index += 1
choice = click.prompt(
"\n{}(y/n)".format(highlight_text("Want to add more nics")), default="n"
)
return nics
def get_public_ip_info(nic_index=0):
ip_name = "public-ip-@@{calm_unique_hash}@@-@@{calm_array_index}@@-" + str(
nic_index
)
ip_name = click.prompt("\nEnter public ip name", default=ip_name)
dns_label = "dns-@@{calm_unique_hash}@@-@@{calm_array_index}@@-" + str(nic_index)
dns_label = click.prompt("\nEnter DNS Label", default=dns_label)
allocation_methods = azure.ALLOCATION_METHODS
click.echo("\nChoose from given ip allocation method")
for ind, name in enumerate(allocation_methods):
click.echo("\t {}. {}".format(str(ind + 1), highlight_text(name)))
while True:
res = click.prompt("\nEnter the index of allocation methods", default=1)
if (res > len(allocation_methods)) or (res <= 0):
click.echo("Invalid index !!! ")
else:
allocation_method = allocation_methods[res - 1]
click.echo("{} selected".format(highlight_text(allocation_method)))
break
return {
"ip_allocation_method": allocation_method,
"dns_label": dns_label,
"ip_name": ip_name,
}
def get_private_ip_info():
allocation_method = ""
ip_address = ""
allocation_methods = azure.ALLOCATION_METHODS
click.echo("\nChoose from given ip allocation method")
for ind, name in enumerate(allocation_methods):
click.echo("\t {}. {}".format(str(ind + 1), highlight_text(name)))
while True:
res = click.prompt("\nEnter the index of allocation methods", default=1)
if (res > len(allocation_methods)) or (res <= 0):
click.echo("Invalid index !!! ")
else:
allocation_method = allocation_methods[res - 1]
click.echo("{} selected".format(highlight_text(allocation_method)))
break
if allocation_method == "Static":
ip_address = click.prompt("\nEnter IP Address", default="")
return {"ip_allocation_method": allocation_method, "ip_address": ip_address}
| 35.241825 | 95 | 0.541894 | import click
from ruamel import yaml
from distutils.version import LooseVersion as LV
from calm.dsl.api import get_resource_api, get_api_client
from calm.dsl.providers import get_provider_interface
from calm.dsl.store import Version
from .constants import AZURE as azure
Provider = get_provider_interface()
class AzureVmProvider(Provider):
provider_type = "AZURE_VM"
package_name = __name__
spec_template_file = "azure_vm_provider_spec.yaml.jinja2"
@classmethod
def create_spec(cls):
client = get_api_client()
create_spec(client)
@classmethod
def get_api_obj(cls):
client = get_api_client()
return Azure(client.connection)
class Azure:
def __init__(self, connection):
self.connection = connection
def resource_groups(self, account_id):
Obj = get_resource_api(azure.RESOURCE_GROUPS, self.connection)
payload = {"filter": "account_uuid=={};".format(account_id)}
res, err = Obj.list(payload)
if err:
raise Exception("[{}] - {}".format(err["code"], err["error"]))
res_groups = []
res = res.json()
for entity in res["entities"]:
res_groups.append(entity["status"]["name"])
return res_groups
def availability_sets(self, account_id, resource_group):
Obj = get_resource_api(azure.AVAILABILTY_SETS, self.connection)
payload = {
"filter": "account_uuid=={};resource_group=={}".format(
account_id, resource_group
)
}
res, err = Obj.list(payload)
if err:
raise Exception("[{}] - {}".format(err["code"], err["error"]))
name_id_map = {}
res = res.json()
for entity in res["entities"]:
name = entity["status"]["resources"]["name"]
entity_uuid = entity["status"]["resources"]["id"]
name_id_map[name] = entity_uuid
return name_id_map
def locations(self, account_id):
Obj = get_resource_api(azure.LOCATIONS, self.connection)
payload = {"filter": "account_uuid=={};".format(account_id)}
res, err = Obj.list(payload)
if err:
raise Exception("[{}] - {}".format(err["code"], err["error"]))
res = res.json()
name_value_map = {}
for entity in res["entities"]:
name = entity["status"]["resources"]["displayName"]
value = entity["status"]["resources"]["name"]
name_value_map[name] = value
return name_value_map
def availability_zones(self, account_id, resource_group, location):
Obj = get_resource_api(azure.AVAILABILITY_ZONES, self.connection)
payload = {
"filter": "account_uuid=={};resource_group=={};location=={}".format(
account_id, resource_group, location
)
}
res, err = Obj.list(payload)
if err:
raise Exception("[{}] - {}".format(err["code"], err["error"]))
res = res.json()
name_value_map = dict()
for entity in res["entities"]:
if "zones" in entity["status"]["resources"]:
zones = entity["status"]["resources"]["zones"]
for zone in zones:
name_value_map[zone["name"]] = zone["value"]
return name_value_map
def hardware_profiles(self, account_id, location):
Obj = get_resource_api(azure.VM_SIZES, self.connection)
payload = {
"filter": "account_uuid=={};location=={}".format(account_id, location)
}
res, err = Obj.list(payload)
if err:
raise Exception("[{}] - {}".format(err["code"], err["error"]))
res = res.json()
hwprofiles = {}
for entity in res["entities"]:
name = entity["status"]["resources"]["name"]
max_disk_count = entity["status"]["resources"]["maxDataDiskCount"]
hwprofiles[name] = max_disk_count
return hwprofiles
def custom_images(self, account_id, location):
Obj = get_resource_api(azure.SUBSCRIPTION_IMAGES, self.connection)
payload = {
"filter": "account_uuid=={};location=={}".format(account_id, location)
}
res, err = Obj.list(payload)
if err:
raise Exception("[{}] - {}".format(err["code"], err["error"]))
res = res.json()
name_id_map = {}
for entity in res["entities"]:
name = entity["status"]["resources"]["name"]
id = entity["status"]["resources"]["id"]
name_id_map[name] = id
return name_id_map
def image_publishers(self, account_id, location):
Obj = get_resource_api(azure.IMAGE_PUBLISHERS, self.connection)
payload = {
"filter": "account_uuid=={};location=={}".format(account_id, location)
}
res, err = Obj.list(payload)
if err:
raise Exception("[{}] - {}".format(err["code"], err["error"]))
res = res.json()
entity_list = []
for entity in res["entities"]:
name = entity["status"]["name"]
entity_list.append(name)
return entity_list
def image_offers(self, account_id, location, publisher):
Obj = get_resource_api(azure.IMAGE_OFFERS, self.connection)
payload = {
"filter": "account_uuid=={};location=={};publisher=={}".format(
account_id, location, publisher
)
}
res, err = Obj.list(payload)
if err:
raise Exception("[{}] - {}".format(err["code"], err["error"]))
res = res.json()
entity_list = []
for entity in res["entities"]:
name = entity["status"]["name"]
entity_list.append(name)
return entity_list
def image_skus(self, account_id, location, publisher, offer):
Obj = get_resource_api(azure.IMAGE_SKUS, self.connection)
payload = {
"filter": "account_uuid=={};location=={};publisher=={};offer=={}".format(
account_id, location, publisher, offer
)
}
res, err = Obj.list(payload)
if err:
raise Exception("[{}] - {}".format(err["code"], err["error"]))
res = res.json()
entity_list = []
for entity in res["entities"]:
name = entity["status"]["name"]
entity_list.append(name)
return entity_list
def image_versions(self, account_id, location, publisher, offer, sku):
Obj = get_resource_api(azure.IMAGE_VERSIONS, self.connection)
payload = {
"filter": "account_uuid=={};location=={};publisher=={};offer=={};sku=={}".format(
account_id, location, publisher, offer, sku
)
}
res, err = Obj.list(payload)
if err:
raise Exception("[{}] - {}".format(err["code"], err["error"]))
res = res.json()
entity_list = []
for entity in res["entities"]:
name = entity["status"]["name"]
entity_list.append(name)
return entity_list
def security_groups(self, account_id, resource_group, location):
Obj = get_resource_api(azure.SECURITY_GROUPS, self.connection)
payload = {
"filter": "account_uuid=={};location=={};resource_group=={}".format(
account_id, location, resource_group
)
}
res, err = Obj.list(payload)
if err:
raise Exception("[{}] - {}".format(err["code"], err["error"]))
res = res.json()
entity_list = []
for entity in res["entities"]:
name = entity["status"]["name"]
entity_list.append(name)
return entity_list
def virtual_networks(self, account_id, resource_group, location):
Obj = get_resource_api(azure.VIRTUAL_NETWORKS, self.connection)
payload = {
"filter": "account_uuid=={};location=={};resource_group=={}".format(
account_id, location, resource_group
)
}
res, err = Obj.list(payload)
if err:
raise Exception("[{}] - {}".format(err["code"], err["error"]))
res = res.json()
entity_list = []
for entity in res["entities"]:
name = entity["status"]["name"]
entity_list.append(name)
return entity_list
def subnets(self, account_id, resource_group, virtual_network):
Obj = get_resource_api(azure.SUBNETS, self.connection)
payload = {
"filter": "account_uuid=={};virtual_network=={};resource_group=={}".format(
account_id, virtual_network, resource_group
)
}
res, err = Obj.list(payload)
if err:
raise Exception("[{}] - {}".format(err["code"], err["error"]))
res = res.json()
entity_list = []
for entity in res["entities"]:
name = entity["status"]["name"]
entity_list.append(name)
return entity_list
def highlight_text(text, **kwargs):
return click.style("{}".format(text), fg="blue", bold=False, **kwargs)
def create_spec(client):
CALM_VERSION = Version.get_version("Calm")
spec = {}
Obj = Azure(client.connection)
account_id = ""
resource_group = ""
location = ""
vm_os = ""
projects = client.project.get_name_uuid_map()
project_list = list(projects.keys())
if not project_list:
click.echo(highlight_text("No projects found!!!"))
click.echo(highlight_text("Please add first"))
return
click.echo("\nChoose from given projects:")
for ind, name in enumerate(project_list):
click.echo("\t {}. {}".format(str(ind + 1), highlight_text(name)))
project_id = ""
while True:
ind = click.prompt("\nEnter the index of project", default=1)
if (ind > len(project_list)) or (ind <= 0):
click.echo("Invalid index !!! ")
else:
project_id = projects[project_list[ind - 1]]
click.echo("{} selected".format(highlight_text(project_list[ind - 1])))
break
res, err = client.project.read(project_id)
if err:
raise Exception("[{}] - {}".format(err["code"], err["error"]))
project = res.json()
accounts = project["status"]["resources"]["account_reference_list"]
reg_accounts = []
for account in accounts:
reg_accounts.append(account["uuid"])
payload = {"filter": "type==azure"}
res, err = client.account.list(payload)
if err:
raise Exception("[{}] - {}".format(err["code"], err["error"]))
res = res.json()
azure_accounts = {}
for entity in res["entities"]:
entity_name = entity["metadata"]["name"]
entity_id = entity["metadata"]["uuid"]
if entity_id in reg_accounts:
azure_accounts[entity_name] = entity_id
accounts = list(azure_accounts.keys())
spec["resources"] = {}
click.echo("\nChoose from given AZURE accounts")
for ind, name in enumerate(accounts):
click.echo("\t {}. {}".format(str(ind + 1), highlight_text(name)))
while True:
res = click.prompt("\nEnter the index of account to be used", default=1)
if (res > len(accounts)) or (res <= 0):
click.echo("Invalid index !!! ")
else:
account_name = accounts[res - 1]
account_id = azure_accounts[account_name]
spec["resources"]["account_uuid"] = account_id
click.echo("{} selected".format(highlight_text(account_name)))
break
if not account_id:
click.echo(
highlight_text("No azure account found registered in this project !!!")
)
click.echo("Please add one !!!")
return
click.echo("\nChoose from given Operating System types:")
os_types = azure.OPERATING_SYSTEMS
for ind, name in enumerate(os_types):
click.echo("\t {}. {}".format(str(ind + 1), highlight_text(name)))
while True:
ind = click.prompt("\nEnter the index of operating system", default=1)
if (ind > len(os_types)) or (ind <= 0):
click.echo("Invalid index !!! ")
else:
vm_os = os_types[ind - 1]
click.echo("{} selected".format(highlight_text(vm_os)))
break
click.echo("\n\t\t", nl=False)
click.secho("VM Configuration", bold=True, underline=True)
vm_name = "vm-@@{calm_unique_hash}@@-@@{calm_array_index}@@"
spec["resources"]["vm_name"] = click.prompt(
"\nEnter instance name", default=vm_name
)
resource_groups = Obj.resource_groups(account_id)
if not resource_groups:
click.echo("\n{}".format(highlight_text("No resource group present")))
else:
click.echo("\nChoose from given resource groups")
for ind, name in enumerate(resource_groups):
click.echo("\t {}. {}".format(str(ind + 1), highlight_text(name)))
while True:
res = click.prompt("\nEnter the index of resource group", default=1)
if (res > len(resource_groups)) or (res <= 0):
click.echo("Invalid index !!! ")
else:
resource_group = resource_groups[res - 1]
spec["resources"]["resource_group"] = resource_group
click.echo("{} selected".format(highlight_text(resource_group)))
break
locations = Obj.locations(account_id)
if not locations:
click.echo("\n{}".format(highlight_text("No location group present")))
else:
click.echo("\nChoose from given locations")
location_names = list(locations.keys())
for ind, name in enumerate(location_names):
click.echo("\t {}. {}".format(str(ind + 1), highlight_text(name)))
while True:
res = click.prompt("\nEnter the index of resource group", default=1)
if (res > len(location_names)) or (res <= 0):
click.echo("Invalid index !!! ")
else:
location = location_names[res - 1]
click.echo("{} selected".format(highlight_text(location)))
location = locations[location]
spec["resources"]["location"] = location
break
if LV(CALM_VERSION) < LV("3.2.0"):
choice = click.prompt(
"\n{}(y/n)".format(highlight_text("Want to add a availabilty set")),
default="n",
)
if choice[0] == "y":
availability_sets = Obj.availability_sets(account_id, resource_group)
avl_set_list = list(availability_sets.keys())
if not avl_set_list:
click.echo("\n{}".format(highlight_text("No availability_set present")))
else:
click.echo("\nChoose from given availabilty set")
for ind, name in enumerate(avl_set_list):
click.echo("\t {}. {}".format(str(ind + 1), highlight_text(name)))
while True:
res = click.prompt(
"\nEnter the index of availabilty set", default=1
)
if (res > len(avl_set_list)) or (res <= 0):
click.echo("Invalid index !!! ")
else:
avl_set = avl_set_list[res - 1]
spec["resources"]["availability_set_id"] = availability_sets[
avl_set
]
click.echo("{} selected".format(highlight_text(avl_set)))
break
else:
choice = click.prompt(
"\n{}(y/n)".format(highlight_text("Want to select availability options")),
default="n",
)
if choice[0] == "y":
availability_options = ["Availability Sets", "Availability Zones"]
click.echo("\nChoose from given availability options")
for ind, name in enumerate(availability_options):
click.echo("\t {}. {}".format(str(ind + 1), highlight_text(name)))
while True:
res = click.prompt("\nEnter the index of option", default=1)
if (res > len(availability_options)) or (res <= 0):
click.echo("Invalid index !!! ")
else:
spec["resources"]["availability_option"] = availability_options[
res - 1
].replace(" ", "")
click.echo(
"{} selected".format(
highlight_text(availability_options[res - 1])
)
)
if res == 1:
availability_sets = Obj.availability_sets(
account_id, spec["resources"]["resource_group"]
)
avl_set_list = list(availability_sets.keys())
if not avl_set_list:
click.echo(
"\n{}".format(
highlight_text("No availability_set present")
)
)
else:
click.echo("\nChoose from given availabilty set")
for ind, name in enumerate(avl_set_list):
click.echo(
"\t {}. {}".format(
str(ind + 1), highlight_text(name)
)
)
while True:
res = click.prompt(
"\nEnter the index of availabilty set", default=1
)
if (res > len(avl_set_list)) or (res <= 0):
click.echo("Invalid index !!! ")
else:
avl_set = avl_set_list[res - 1]
spec["resources"][
"availability_set_id"
] = availability_sets[avl_set]
click.echo(
"{} selected".format(highlight_text(avl_set))
)
break
else:
availability_zones = Obj.availability_zones(
account_id,
spec["resources"]["resource_group"],
spec["resources"]["location"],
)
if not availability_zones:
click.echo(
"\n{}".format(
highlight_text(
"Selected location does not support Availability Zones"
)
)
)
else:
click.echo("\nChoose from the given zones")
zones = list(availability_zones.keys())
for ind, name in enumerate(zones):
click.echo(
"\t {}. {}".format(
str(ind + 1), highlight_text(name)
)
)
while True:
res = click.prompt(
"\nEnter the index of zone", default=1
)
if (res > len(availability_zones)) or (res <= 0):
click.echo("Invalid index !!! ")
else:
click.echo(
"{} selected".format(
highlight_text(zones[res - 1])
)
)
spec["resources"][
"availability_zone"
] = availability_zones[zones[res - 1]]
break
break
hardware_profiles = Obj.hardware_profiles(account_id, location)
if not hardware_profiles:
click.echo("\n{}".format(highlight_text("No hardware profile present")))
else:
click.echo("\nChoose from given Hardware Profiles")
hw_profile_names = list(hardware_profiles.keys())
for ind, name in enumerate(hw_profile_names):
click.echo("\t {}. {}".format(str(ind + 1), highlight_text(name)))
while True:
res = click.prompt("\nEnter the index of Hardware Profile", default=1)
if (res > len(hw_profile_names)) or (res <= 0):
click.echo("Invalid index !!! ")
else:
hw_profile = hw_profile_names[res - 1]
click.echo("{} selected".format(highlight_text(hw_profile)))
spec["resources"]["hw_profile"] = {
"vm_size": hw_profile,
"max_data_disk_count": hardware_profiles[hw_profile],
}
break
spec["resources"]["os_profile"] = get_os_profile(vm_os)
spec["resources"]["storage_profile"] = get_storage_profile(
Obj, account_id, location
)
spec["resources"]["nw_profile"] = {}
spec["resources"]["nw_profile"]["nic_list"] = get_nw_profile(
Obj, account_id, resource_group, location
)
choice = click.prompt(
"\n{}(y/n)".format(highlight_text("Want to add any tags")), default="n"
)
if choice[0] == "y":
tags = []
while True:
key = click.prompt("\n\tKey")
value = click.prompt("\tValue")
tag = {"key": key, "value": value}
tags.append(tag)
choice = click.prompt(
"\n{}(y/n)".format(highlight_text("Want to add more tags")), default="n"
)
if choice[0] == "n":
spec["resources"]["tag_list"] = tags
break
AzureVmProvider.validate_spec(spec)
click.secho("\nCreate spec for your AZURE VM:\n", underline=True)
click.echo(highlight_text(yaml.dump(spec, default_flow_style=False)))
def get_os_profile(os_type):
click.echo("\n\t\t", nl=False)
click.secho("OS PROFILE DETAILS", bold=True, underline=True)
choice = click.prompt(
"\n{}(y/n)".format(highlight_text("Want to add secrets")), default="n"
)
res = {}
res["secrets"] = []
certificate_list = []
while choice[0] == "y":
vault_id = click.prompt("\n\tEnter Vault ID ", default="")
choice = click.prompt(
"\n{}(y/n)".format(highlight_text("Add Vault Certificate Details")),
default="n",
)
vault_certificates = []
while choice[0] == "y":
certificate_store = ""
certificate_url = click.prompt("\n\tEnter Certificate URL", default="URL")
if os_type == "Windows":
certificate_store = click.prompt(
"\n\tEnter Certificate Store", default="Store"
)
vault_certificates.append(
{
"certificate_url": certificate_url,
"certificate_store": certificate_store,
}
)
if certificate_url:
certificate_list.append(certificate_url)
choice = click.prompt(
"\n{}(y/n)".format(highlight_text("Add more certificates")), default="n"
)
res["secrets"].append(
{"source_vault_id": vault_id, "vault_certificates": vault_certificates}
)
choice = click.prompt(
"\n{}(y/n)".format(highlight_text("Add more secrets")), default="n"
)
if os_type == "Linux":
res["linux_config"] = get_linux_config()
else:
res["windows_config"] = get_windows_config(certificate_list)
return res
def get_linux_config():
custom_data = click.prompt("\nEnter Cloud Init Script", default="")
return {"custom_data": custom_data}
def get_windows_config(certificate_list):
provision_vm_agent = click.prompt(
"\n{}(y/n)".format(highlight_text("Enable Provision Windows Guest Agent")),
default="n",
)
provision_vm_agent = True if provision_vm_agent[0] == "y" else False
auto_updates = click.prompt(
"\n{}(y/n)".format(highlight_text("Enable Automatic OS Upgrades")), default="n"
)
auto_updates = True if auto_updates[0] == "y" else False
unattend_content = []
choice = click.prompt(
"\n{}(y/n)".format(highlight_text("Want to add ADDITIONAL UNATTENDED CONTENT")),
default="n",
)
settings = azure.UNATTENDED_SETTINGS
while (choice[0] == "y") and settings:
click.echo("\nChoose from given Setting Names")
setting = ""
for ind, name in enumerate(settings):
click.echo("\t {}. {}".format(str(ind + 1), highlight_text(name)))
while True:
res = click.prompt("\nEnter the index of Setting", default=1)
if (res > len(settings)) or (res <= 0):
click.echo("Invalid index !!! ")
else:
setting = settings[res - 1]
settings.pop(res - 1)
click.echo("{} selected".format(highlight_text(setting)))
break
xml_content = click.prompt(
"\nEnter XML Content(Please use <{}> as the root element)".format(setting),
default="",
)
unattend_content.append({"setting_name": setting, "xml_content": xml_content})
choice = click.prompt(
"\n{}(y/n)".format(highlight_text("Want to add more Unattended content")),
default="n",
)
winrm_listensers = []
choice = click.prompt(
"\n{}(y/n)".format(highlight_text("Want to add WINRM LISTENERS")), default="n"
)
protocols = list(azure.PROTOCOLS.keys())
while (choice[0] == "y") and protocols:
click.echo("\nChoose from given Protocols")
protocol = ""
for ind, name in enumerate(protocols):
click.echo("\t {}. {}".format(str(ind + 1), highlight_text(name)))
while True:
res = click.prompt("\nEnter the index of protocol", default=1)
if (res > len(protocols)) or (res <= 0):
click.echo("Invalid index !!! ")
else:
protocol = protocols[res - 1]
protocols.pop(res - 1)
click.echo("{} selected".format(highlight_text(protocol)))
break
if protocol == "HTTPS":
cert_url = ""
click.echo("Choose from given certificate URLs")
for ind, name in enumerate(certificate_list):
click.echo("\t {}. {}".format(str(ind + 1), highlight_text(name)))
while True:
res = click.prompt("\nEnter the index of certificate URL", default=1)
if (res > len(certificate_list)) or (res <= 0):
click.echo("Invalid index !!! ")
else:
cert_url = certificate_list[res - 1]
click.echo("{} selected".format(highlight_text(cert_url)))
break
winrm_listensers.append(
{"protocol": azure.PROTOCOLS[protocol], "certificate_url": cert_url}
)
else:
winrm_listensers.append({"protocol": azure.PROTOCOLS[protocol]})
choice = click.prompt(
"\n{}(y/n)".format(highlight_text("Want to add more Winrm Listeners")),
default="n",
)
return {
"winrm_listeners": winrm_listensers,
"additional_unattend_content": unattend_content,
"provision_vm_agent": provision_vm_agent,
"auto_updates": auto_updates,
}
def get_storage_profile(azure_obj, account_id, location):
click.echo("\n\t\t", nl=False)
click.secho("STORAGE PROFILE DETAILS", bold=True, underline=True)
click.secho("\n1. VM Image Details", underline=True)
vm_image = {}
use_custom_image = click.prompt(
"\n{}(y/n)".format(highlight_text("Want to use custom image")), default="n"
)
use_custom_image = True if use_custom_image[0] == "y" else False
if use_custom_image:
vm_image = get_custom_vm_image(azure_obj, account_id, location)
else:
vm_image = get_non_custom_vm_image(azure_obj, account_id, location)
click.secho("\n2. OS Disk Details", underline=True)
os_disk = get_os_disk(use_custom_image)
click.secho("\n3. Data Disk Details", underline=True)
data_disks = get_data_disks()
return {
"is_managed": True,
"os_disk_details": os_disk,
"data_disk_list": data_disks,
"image_details": vm_image,
}
def get_data_disks():
disks = []
choice = click.prompt(
"\n{}(y/n)".format(highlight_text("Want to add data disks")), default="n"
)
disk_index = 0
while choice[0] == "y":
click.echo("\n\t\t", nl=False)
click.secho("Data-Disk {}".format(disk_index + 1), underline=True)
storage_type = ""
disk_name = "data-disk-@@{calm_unique_hash}@@-@@{calm_array_index}@@-" + str(
disk_index
)
disk_name = click.prompt("\nEnter data disk name", default=disk_name)
choice = click.prompt(
"\n{}(y/n)".format(highlight_text("Want to add storage type to disk")),
default="n",
)
if choice[0] == "y":
storage_types = azure.STORAGE_TYPES
display_names = list(storage_types.keys())
click.echo("\nChoose from given storage types")
for ind, name in enumerate(display_names):
click.echo("\t {}. {}".format(str(ind + 1), highlight_text(name)))
while True:
res = click.prompt("\nEnter the index of storage type", default=1)
if (res > len(display_names)) or (res <= 0):
click.echo("Invalid index !!! ")
else:
storage_type = display_names[res - 1]
click.echo("{} selected".format(highlight_text(storage_type)))
storage_type = storage_types[storage_type]
break
cache_types = azure.CACHE_TYPES
display_names = list(cache_types.keys())
click.echo("\nChoose from given cache types")
for ind, name in enumerate(display_names):
click.echo("\t {}. {}".format(str(ind + 1), highlight_text(name)))
while True:
res = click.prompt("\nEnter the index of cache type", default=1)
if (res > len(display_names)) or (res <= 0):
click.echo("Invalid index !!! ")
else:
cache_type = display_names[res - 1]
click.echo("{} selected".format(highlight_text(cache_type)))
cache_type = cache_types[cache_type]
break
disk_size = click.prompt("\nEnter the size for disk(in GiB)", default=1)
disk_lun = click.prompt("\nEnter the Disk LUN", default=0)
disks.append(
{
"size_in_gb": disk_size,
"name": disk_name,
"storage_type": storage_type,
"caching_type": cache_type,
"lun": disk_lun,
}
)
disk_index += 1
choice = click.prompt(
"\n{}(y/n)".format(highlight_text("Want to add more data disks")),
default="n",
)
return disks
def get_os_disk(use_custom_image):
disk_create_option = ""
cache_type = ""
storage_type = ""
disk_name = "os-@@{calm_unique_hash}@@-@@{calm_array_index}@@-disk"
disk_name = click.prompt("\nEnter os disk name", default=disk_name)
choice = click.prompt(
"\n{}(y/n)".format(highlight_text("Want to add storage type to os disk")),
default="n",
)
if choice[0] == "y":
storage_types = azure.STORAGE_TYPES
display_names = list(storage_types.keys())
click.echo("\nChoose from given storage types")
for ind, name in enumerate(display_names):
click.echo("\t {}. {}".format(str(ind + 1), highlight_text(name)))
while True:
res = click.prompt("\nEnter the index of storage type", default=1)
if (res > len(display_names)) or (res <= 0):
click.echo("Invalid index !!! ")
else:
storage_type = display_names[res - 1]
click.echo("{} selected".format(highlight_text(storage_type)))
storage_type = storage_types[storage_type]
break
cache_types = azure.CACHE_TYPES
display_names = list(cache_types.keys())
click.echo("\nChoose from given cache types")
for ind, name in enumerate(display_names):
click.echo("\t {}. {}".format(str(ind + 1), highlight_text(name)))
while True:
res = click.prompt("\nEnter the index of cache type", default=1)
if (res > len(display_names)) or (res <= 0):
click.echo("Invalid index !!! ")
else:
cache_type = display_names[res - 1]
click.echo("{} selected".format(highlight_text(cache_type)))
cache_type = cache_types[cache_type]
break
if use_custom_image:
disk_create_option = azure.DISK_CREATE_OPTIONS["FROMIMAGE"]
click.secho(
"\nNote: In case of custom vm image, Os Disk Create Option : {}".format(
disk_create_option
)
)
else:
disk_create_options = azure.DISK_CREATE_OPTIONS
display_names = list(disk_create_options.keys())
click.echo("\nChoose from given disk create option")
for ind, name in enumerate(display_names):
click.echo("\t {}. {}".format(str(ind + 1), highlight_text(name)))
while True:
res = click.prompt("\nEnter the index of disk create option", default=1)
if (res > len(display_names)) or (res <= 0):
click.echo("Invalid index !!! ")
else:
disk_create_option = display_names[res - 1]
click.echo("{} selected".format(highlight_text(disk_create_option)))
disk_create_option = disk_create_options[disk_create_option]
break
return {
"name": disk_name,
"storage_type": storage_type,
"caching_type": cache_type,
"create_option": disk_create_option,
}
def get_non_custom_vm_image(azure_obj, account_id, location):
image_publisher = ""
image_offer = ""
image_sku = ""
image_version = ""
publishers = azure_obj.image_publishers(account_id, location)
if not publishers:
click.echo("\n{}".format(highlight_text("No image publisher present")))
else:
click.echo("\nChoose from given image publisher")
for ind, name in enumerate(publishers):
click.echo("\t {}. {}".format(str(ind + 1), highlight_text(name)))
while True:
res = click.prompt("\nEnter the index of image publisher", default=1)
if (res > len(publishers)) or (res <= 0):
click.echo("Invalid index !!! ")
else:
image_publisher = publishers[res - 1]
click.echo("{} selected".format(highlight_text(image_publisher)))
break
image_offers = azure_obj.image_offers(account_id, location, image_publisher)
if not image_offers:
click.echo("\n{}".format(highlight_text("No image offer present")))
else:
click.echo("\nChoose from given image offer")
for ind, name in enumerate(image_offers):
click.echo("\t {}. {}".format(str(ind + 1), highlight_text(name)))
while True:
res = click.prompt("\nEnter the index of image offer", default=1)
if (res > len(image_offers)) or (res <= 0):
click.echo("Invalid index !!! ")
else:
image_offer = image_offers[res - 1]
click.echo("{} selected".format(highlight_text(image_offer)))
break
image_skus = azure_obj.image_skus(
account_id, location, image_publisher, image_offer
)
if not image_skus:
click.echo("\n{}".format(highlight_text("No image sku present")))
else:
click.echo("\nChoose from given image sku")
for ind, name in enumerate(image_skus):
click.echo("\t {}. {}".format(str(ind + 1), highlight_text(name)))
while True:
res = click.prompt("\nEnter the index of image sku", default=1)
if (res > len(image_skus)) or (res <= 0):
click.echo("Invalid index !!! ")
else:
image_sku = image_skus[res - 1]
click.echo("{} selected".format(highlight_text(image_sku)))
break
image_versions = azure_obj.image_versions(
account_id, location, image_publisher, image_offer, image_sku
)
if not image_versions:
click.echo("\n{}".format(highlight_text("No image version present")))
else:
click.echo("\nChoose from given image version")
for ind, name in enumerate(image_versions):
click.echo("\t {}. {}".format(str(ind + 1), highlight_text(name)))
while True:
res = click.prompt("\nEnter the index of image version", default=1)
if (res > len(image_versions)) or (res <= 0):
click.echo("Invalid index !!! ")
else:
image_version = image_versions[res - 1]
click.echo("{} selected".format(highlight_text(image_version)))
break
return {
"sku": image_sku,
"publisher": image_publisher,
"offer": image_offer,
"version": image_version,
"use_custom_image": False,
}
def get_custom_vm_image(azure_obj, account_id, location):
custom_image_id = ""
custom_images = azure_obj.custom_images(account_id, location)
custom_image_names = list(custom_images.keys())
if not custom_image_names:
click.echo("\n{}".format(highlight_text("No custom image present")))
else:
click.echo("\nChoose from given custom images")
for ind, name in enumerate(custom_image_names):
click.echo("\t {}. {}".format(str(ind + 1), highlight_text(name)))
while True:
res = click.prompt("\nEnter the index of custom image", default=1)
if (res > len(custom_image_names)) or (res <= 0):
click.echo("Invalid index !!! ")
else:
custom_image = custom_image_names[res - 1]
custom_image_id = custom_images[custom_image]
click.echo("{} selected".format(highlight_text(custom_image)))
break
return {"source_image_id": custom_image_id, "use_custom_image": True}
def get_nw_profile(azure_obj, account_id, resource_grp, location):
click.echo("\n\t\t", nl=False)
click.secho("NETWORK PROFILE DETAILS", bold=True, underline=True)
nics = []
choice = click.prompt(
"\n{}(y/n)".format(highlight_text("Want to add NICs")), default="n"
)
nic_index = 0
while choice[0] == "y":
click.echo("\n\t\t", nl=False)
click.secho("Nic {}".format(nic_index + 1), underline=True)
nic_name = "nic-@@{calm_unique_hash}@@-@@{calm_array_index}@@-" + str(nic_index)
nic_name = click.prompt("\nEnter nic name", default=nic_name)
security_group = ""
virtual_network = ""
subnet = ""
security_groups = azure_obj.security_groups(account_id, resource_grp, location)
if not security_groups:
click.echo("\n{}".format(highlight_text("No security group present")))
else:
click.echo("\nChoose from given security groups")
for ind, name in enumerate(security_groups):
click.echo("\t {}. {}".format(str(ind + 1), highlight_text(name)))
while True:
res = click.prompt("\nEnter the index of security group", default=1)
if (res > len(security_groups)) or (res <= 0):
click.echo("Invalid index !!! ")
else:
security_group = security_groups[res - 1]
click.echo("{} selected".format(highlight_text(security_group)))
break
virtual_networks = azure_obj.virtual_networks(
account_id, resource_grp, location
)
if not virtual_networks:
click.echo("\n{}".format(highlight_text("No virtual network present")))
else:
click.echo("\nChoose from given virtual networtks")
for ind, name in enumerate(virtual_networks):
click.echo("\t {}. {}".format(str(ind + 1), highlight_text(name)))
while True:
res = click.prompt("\nEnter the index of virtual network", default=1)
if (res > len(virtual_networks)) or (res <= 0):
click.echo("Invalid index !!! ")
else:
virtual_network = virtual_networks[res - 1]
click.echo("{} selected".format(highlight_text(virtual_network)))
break
subnets = azure_obj.subnets(account_id, resource_grp, virtual_network)
if not subnets:
click.echo("\n{}".format(highlight_text("No subnet present")))
else:
click.echo("\nChoose from given subnets")
for ind, name in enumerate(subnets):
click.echo("\t {}. {}".format(str(ind + 1), highlight_text(name)))
while True:
res = click.prompt("\nEnter the index of subnet", default=1)
if (res > len(subnets)) or (res <= 0):
click.echo("Invalid index !!! ")
else:
subnet = subnets[res - 1]
click.echo("{} selected".format(highlight_text(subnet)))
break
click.secho("\nPublic IP Config", underline=True)
public_ip_info = get_public_ip_info(nic_index)
click.secho("\nPrivate IP Config", underline=True)
private_ip_info = get_private_ip_info()
nics.append(
{
"nsg_name": security_group,
"vnet_name": virtual_network,
"private_ip_info": private_ip_info,
"nic_name": nic_name,
"subnet_name": subnet,
"public_ip_info": public_ip_info,
}
)
nic_index += 1
choice = click.prompt(
"\n{}(y/n)".format(highlight_text("Want to add more nics")), default="n"
)
return nics
def get_public_ip_info(nic_index=0):
ip_name = "public-ip-@@{calm_unique_hash}@@-@@{calm_array_index}@@-" + str(
nic_index
)
ip_name = click.prompt("\nEnter public ip name", default=ip_name)
dns_label = "dns-@@{calm_unique_hash}@@-@@{calm_array_index}@@-" + str(nic_index)
dns_label = click.prompt("\nEnter DNS Label", default=dns_label)
allocation_methods = azure.ALLOCATION_METHODS
click.echo("\nChoose from given ip allocation method")
for ind, name in enumerate(allocation_methods):
click.echo("\t {}. {}".format(str(ind + 1), highlight_text(name)))
while True:
res = click.prompt("\nEnter the index of allocation methods", default=1)
if (res > len(allocation_methods)) or (res <= 0):
click.echo("Invalid index !!! ")
else:
allocation_method = allocation_methods[res - 1]
click.echo("{} selected".format(highlight_text(allocation_method)))
break
return {
"ip_allocation_method": allocation_method,
"dns_label": dns_label,
"ip_name": ip_name,
}
def get_private_ip_info():
allocation_method = ""
ip_address = ""
allocation_methods = azure.ALLOCATION_METHODS
click.echo("\nChoose from given ip allocation method")
for ind, name in enumerate(allocation_methods):
click.echo("\t {}. {}".format(str(ind + 1), highlight_text(name)))
while True:
res = click.prompt("\nEnter the index of allocation methods", default=1)
if (res > len(allocation_methods)) or (res <= 0):
click.echo("Invalid index !!! ")
else:
allocation_method = allocation_methods[res - 1]
click.echo("{} selected".format(highlight_text(allocation_method)))
break
if allocation_method == "Static":
ip_address = click.prompt("\nEnter IP Address", default="")
return {"ip_allocation_method": allocation_method, "ip_address": ip_address}
| true | true |
f7ff0c4b7b09af616779ab7b5e32f33941ef961b | 1,509 | py | Python | farmguru/health/migrations/0002_auto_20150409_0841.py | savioabuga/farmguru | 41d6b357a64e69f510070a4acf0a89053b03f80e | [
"BSD-3-Clause"
] | null | null | null | farmguru/health/migrations/0002_auto_20150409_0841.py | savioabuga/farmguru | 41d6b357a64e69f510070a4acf0a89053b03f80e | [
"BSD-3-Clause"
] | null | null | null | farmguru/health/migrations/0002_auto_20150409_0841.py | savioabuga/farmguru | 41d6b357a64e69f510070a4acf0a89053b03f80e | [
"BSD-3-Clause"
] | null | null | null | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
import django.utils.timezone
import model_utils.fields
class Migration(migrations.Migration):
dependencies = [
('groups', '0002_pasture'),
('health', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='AnimalGroupTreatment',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('created', model_utils.fields.AutoCreatedField(default=django.utils.timezone.now, verbose_name='created', editable=False)),
('modified', model_utils.fields.AutoLastModifiedField(default=django.utils.timezone.now, verbose_name='modified', editable=False)),
('type', models.CharField(max_length=20, choices=[(b'vaccination', 'Vaccination')])),
('date', models.DateField(null=True, blank=True)),
('description', models.TextField(blank=True)),
('notes', models.TextField(blank=True)),
('animal', models.ForeignKey(to='groups.AnimalGroup')),
],
options={
'abstract': False,
},
bases=(models.Model,),
),
migrations.RemoveField(
model_name='grouptreatment',
name='animal',
),
migrations.DeleteModel(
name='GroupTreatment',
),
]
| 35.928571 | 147 | 0.587806 |
from __future__ import unicode_literals
from django.db import models, migrations
import django.utils.timezone
import model_utils.fields
class Migration(migrations.Migration):
dependencies = [
('groups', '0002_pasture'),
('health', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='AnimalGroupTreatment',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('created', model_utils.fields.AutoCreatedField(default=django.utils.timezone.now, verbose_name='created', editable=False)),
('modified', model_utils.fields.AutoLastModifiedField(default=django.utils.timezone.now, verbose_name='modified', editable=False)),
('type', models.CharField(max_length=20, choices=[(b'vaccination', 'Vaccination')])),
('date', models.DateField(null=True, blank=True)),
('description', models.TextField(blank=True)),
('notes', models.TextField(blank=True)),
('animal', models.ForeignKey(to='groups.AnimalGroup')),
],
options={
'abstract': False,
},
bases=(models.Model,),
),
migrations.RemoveField(
model_name='grouptreatment',
name='animal',
),
migrations.DeleteModel(
name='GroupTreatment',
),
]
| true | true |
f7ff0c4d80d5303b76e53d98529529995f0ec483 | 1,190 | py | Python | web_app.py | palolo02/web-scraping-challenge | 1ba3235fc5dd29a8e85b58c3e1dca94c15b2e1ad | [
"ADSL"
] | null | null | null | web_app.py | palolo02/web-scraping-challenge | 1ba3235fc5dd29a8e85b58c3e1dca94c15b2e1ad | [
"ADSL"
] | null | null | null | web_app.py | palolo02/web-scraping-challenge | 1ba3235fc5dd29a8e85b58c3e1dca94c15b2e1ad | [
"ADSL"
] | null | null | null | # Paolo Vega
# SQLAlchemy Challenge
# Bootcamp
# Versión 1.0.0 May-24-2020
# Versión 1.0.1 May-24-2020
# Versión 1.0.2 May-24-2020
#################################################
# Import Modules
#################################################
from flask import Flask
from flask import render_template
from flask import redirect
import pymongo
import scrape_mars as sm
#################################################
# DB Connection
#################################################
app = Flask(__name__)
url = f'mongodb://localhost:27017/news_db'
#################################################
# Flask Routes
#################################################
@app.route("/")
def home():
print("======================================")
conn = url
client = pymongo.MongoClient(conn)
# Define database and collection
db = client.news_web
collection = db.items
print(collection)
mars_data = collection.find_one()
return render_template("index.html", data = mars_data)
@app.route("/scrape")
def scrape():
# Add Mongo Validation
sm.scrape_info()
return redirect("/", code=302)
if __name__ == "__main__":
app.run(debug=True) | 23.333333 | 58 | 0.497479 | true | true | |
f7ff0d752fa5d7ed8297479d22ec4e804e7964a6 | 3,738 | py | Python | pyansiwrapper/core/task_executor.py | stonelake/pyansiwrapper | 147a979f6ec68b270f0855b8dea99b4f8fd2ef64 | [
"MIT"
] | null | null | null | pyansiwrapper/core/task_executor.py | stonelake/pyansiwrapper | 147a979f6ec68b270f0855b8dea99b4f8fd2ef64 | [
"MIT"
] | null | null | null | pyansiwrapper/core/task_executor.py | stonelake/pyansiwrapper | 147a979f6ec68b270f0855b8dea99b4f8fd2ef64 | [
"MIT"
] | null | null | null | from collections import namedtuple
import ansible
from ansible.parsing.dataloader import DataLoader
from ansible.utils.display import Display
from ansible.vars import VariableManager
from ansible.inventory import Inventory
from ansible.playbook.play import Play
from ansible.executor.task_queue_manager import TaskQueueManager
class TaskExecutor(object):
def __init__(self, hosts, inventory_file=None, verbose=4):
self.verbose = verbose
self.inventory_file = inventory_file
self.hosts = hosts
self.tasks = []
def add_tasks(self, task_dicts):
self.tasks.extend(task_dicts)
def clear_tasks(self):
self.tasks = []
def run(self):
display = Display(verbosity=self.verbose)
import __main__ as main
setattr(main, "display", display)
default_options = {'subset': None, 'ask_pass': False,
'listtags': None,
'become_user': 'root', 'sudo': False,
'private_key_file': None,
'syntax': None, 'skip_tags': None, 'diff': False,
'sftp_extra_args': '', 'check': False,
'force_handlers': False,
'remote_user': None, 'become_method': 'sudo',
'vault_password_file': None, 'listtasks': None,
'output_file': None, 'ask_su_pass': False,
'new_vault_password_file': None,
'listhosts': None, 'ssh_extra_args': '',
'tags': 'all', 'become_ask_pass': False,
'start_at_task': None,
'flush_cache': None, 'step': None,
'module_path': None,
'su_user': None, 'ask_sudo_pass': False,
'su': False,
'scp_extra_args': '', 'connection': 'smart',
'ask_vault_pass': False, 'timeout': 30,
'become': False,
'sudo_user': None, 'ssh_common_args': ''}
default_options.update(
verbosity=self.verbose,
forks=ansible.constants.DEFAULT_FORKS,
remote_user=ansible.constants.DEFAULT_REMOTE_USER,
private_key_file=ansible.constants.DEFAULT_PRIVATE_KEY_FILE,
)
options = namedtuple('Options', default_options.keys())(
**default_options)
# initialize needed objects
variable_manager = VariableManager()
loader = DataLoader()
passwords = dict(vault_pass='secret')
# create inventory and pass to var manager
inventory = Inventory(loader=loader,
variable_manager=variable_manager,
host_list=self.inventory_file)
variable_manager.set_inventory(inventory)
# create play with tasks
play_source = dict(
name="Ansible AdHoc Play",
hosts=self.hosts,
tasks=self.tasks
)
play = Play().load(play_source, variable_manager=variable_manager,
loader=loader)
# actually run it
tqm = None
try:
tqm = TaskQueueManager(
inventory=inventory,
variable_manager=variable_manager,
loader=loader,
options=options,
passwords=passwords,
stdout_callback='default',
)
result = tqm.run(play)
return result
finally:
if tqm is not None:
tqm.cleanup()
| 37.38 | 76 | 0.53344 | from collections import namedtuple
import ansible
from ansible.parsing.dataloader import DataLoader
from ansible.utils.display import Display
from ansible.vars import VariableManager
from ansible.inventory import Inventory
from ansible.playbook.play import Play
from ansible.executor.task_queue_manager import TaskQueueManager
class TaskExecutor(object):
def __init__(self, hosts, inventory_file=None, verbose=4):
self.verbose = verbose
self.inventory_file = inventory_file
self.hosts = hosts
self.tasks = []
def add_tasks(self, task_dicts):
self.tasks.extend(task_dicts)
def clear_tasks(self):
self.tasks = []
def run(self):
display = Display(verbosity=self.verbose)
import __main__ as main
setattr(main, "display", display)
default_options = {'subset': None, 'ask_pass': False,
'listtags': None,
'become_user': 'root', 'sudo': False,
'private_key_file': None,
'syntax': None, 'skip_tags': None, 'diff': False,
'sftp_extra_args': '', 'check': False,
'force_handlers': False,
'remote_user': None, 'become_method': 'sudo',
'vault_password_file': None, 'listtasks': None,
'output_file': None, 'ask_su_pass': False,
'new_vault_password_file': None,
'listhosts': None, 'ssh_extra_args': '',
'tags': 'all', 'become_ask_pass': False,
'start_at_task': None,
'flush_cache': None, 'step': None,
'module_path': None,
'su_user': None, 'ask_sudo_pass': False,
'su': False,
'scp_extra_args': '', 'connection': 'smart',
'ask_vault_pass': False, 'timeout': 30,
'become': False,
'sudo_user': None, 'ssh_common_args': ''}
default_options.update(
verbosity=self.verbose,
forks=ansible.constants.DEFAULT_FORKS,
remote_user=ansible.constants.DEFAULT_REMOTE_USER,
private_key_file=ansible.constants.DEFAULT_PRIVATE_KEY_FILE,
)
options = namedtuple('Options', default_options.keys())(
**default_options)
variable_manager = VariableManager()
loader = DataLoader()
passwords = dict(vault_pass='secret')
inventory = Inventory(loader=loader,
variable_manager=variable_manager,
host_list=self.inventory_file)
variable_manager.set_inventory(inventory)
play_source = dict(
name="Ansible AdHoc Play",
hosts=self.hosts,
tasks=self.tasks
)
play = Play().load(play_source, variable_manager=variable_manager,
loader=loader)
tqm = None
try:
tqm = TaskQueueManager(
inventory=inventory,
variable_manager=variable_manager,
loader=loader,
options=options,
passwords=passwords,
stdout_callback='default',
)
result = tqm.run(play)
return result
finally:
if tqm is not None:
tqm.cleanup()
| true | true |
f7ff0e630a6b36b12eea97a51dcae73edf1abd36 | 1,475 | py | Python | api/helpers/search.py | kanav-mehra/solve-iwmi | d1db518a71f3343f39bfa14eb9234b033e7335eb | [
"MIT"
] | 1 | 2021-05-19T16:55:12.000Z | 2021-05-19T16:55:12.000Z | api/helpers/search.py | kanav-mehra/solve-iwmi | d1db518a71f3343f39bfa14eb9234b033e7335eb | [
"MIT"
] | null | null | null | api/helpers/search.py | kanav-mehra/solve-iwmi | d1db518a71f3343f39bfa14eb9234b033e7335eb | [
"MIT"
] | null | null | null | import sys
from pprint import pprint
from database import es
from helpers.filters import createQueryFilters
def createTableRows(filters):
query = createQueryFilters(filters)
body={
'query':query
}
if filters['sort'] !='id':
body['sort'] = [
{filters['sort']:{"order":filters["direction"]}},
"_score"
]
body['size'] = filters['size']*2
body['from'] = filters['from']
if 'search' in filters and filters['search']:
query['bool']['must'].append({
"match": {
"is_retweet": {
"query": False
}
}
})
body['highlight'] = {
"pre_tags" : ["<mark><b>"],
"post_tags" : ["</b></mark>"],
"fragment_size":500,
"fields": {
"full_text_trans": {
"highlight_query": {
"bool": {
"must": [{
"match": {
"full_text_trans": {
"query": filters['search']
}
}
}]
}
}
}
}
}
sys.stdout.flush()
rows = es.search(index = 'twitter',body=body)
return rows['hits'] | 25.877193 | 70 | 0.364068 | import sys
from pprint import pprint
from database import es
from helpers.filters import createQueryFilters
def createTableRows(filters):
query = createQueryFilters(filters)
body={
'query':query
}
if filters['sort'] !='id':
body['sort'] = [
{filters['sort']:{"order":filters["direction"]}},
"_score"
]
body['size'] = filters['size']*2
body['from'] = filters['from']
if 'search' in filters and filters['search']:
query['bool']['must'].append({
"match": {
"is_retweet": {
"query": False
}
}
})
body['highlight'] = {
"pre_tags" : ["<mark><b>"],
"post_tags" : ["</b></mark>"],
"fragment_size":500,
"fields": {
"full_text_trans": {
"highlight_query": {
"bool": {
"must": [{
"match": {
"full_text_trans": {
"query": filters['search']
}
}
}]
}
}
}
}
}
sys.stdout.flush()
rows = es.search(index = 'twitter',body=body)
return rows['hits'] | true | true |
f7ff0e7764ef9bc42482fe9df04c06eaacb2668e | 1,992 | py | Python | source/webanalyzer.py | hrbolek/func2pipe | 9e8d56239382d06af044f0f020547283444390a4 | [
"MIT"
] | 1 | 2020-06-01T21:12:19.000Z | 2020-06-01T21:12:19.000Z | source/webanalyzer.py | hrbolek/func2pipe | 9e8d56239382d06af044f0f020547283444390a4 | [
"MIT"
] | null | null | null | source/webanalyzer.py | hrbolek/func2pipe | 9e8d56239382d06af044f0f020547283444390a4 | [
"MIT"
] | null | null | null | import re
import os
import io
def webdelay(urlpart, minseconds, maxseconds):
def inner(func):
def result(url):
if (urlpart in url):
waitfor = random.randrange(minseconds, maxseconds)
time.sleep(waitfor)
return func(url)
return result
return inner
def filecache(basedir, encoding = 'utf-8'):
def inner(func):
def result(url):
filename = url.replace(':', '_').replace('/', '_').replace('?', '_').replace('$', '_').replace('*', '_').replace('&', '_')
fullFilename = basedir + filename
cacheexist = False
if (os.path.isfile(fullFilename)):
cacheexist = True
html = ""
if (cacheexist):
file = io.open(fullFilename, "r", encoding=encoding)
html = file.read()
file.close()
else:
html = func(url)
file = io.open(fullFilename, "w", encoding=encoding)
file.write(html)
file.close()
return html
return result
return inner
def html_analyze(pageContent, patternList):
result = {}
for pat in patternList:
currentName = pat["name"]
currentPattern = pat["pattern"]
currentSaveMulti = pat["saveMulti"]
currentValue = re.findall(currentPattern, pageContent)
if (type(currentName) == type(['b'])):
#defined multiplae names
index = 0;
for name in currentName:
result[name] = currentValue[index]
index = index + 1
else:
#defined single name
if len(currentValue) > 0:
if currentSaveMulti:
currentValue = currentValue
else:
currentValue = currentValue[0]
else:
currentValue = ""
result[currentName] = currentValue
return result
| 31.619048 | 134 | 0.515562 | import re
import os
import io
def webdelay(urlpart, minseconds, maxseconds):
def inner(func):
def result(url):
if (urlpart in url):
waitfor = random.randrange(minseconds, maxseconds)
time.sleep(waitfor)
return func(url)
return result
return inner
def filecache(basedir, encoding = 'utf-8'):
def inner(func):
def result(url):
filename = url.replace(':', '_').replace('/', '_').replace('?', '_').replace('$', '_').replace('*', '_').replace('&', '_')
fullFilename = basedir + filename
cacheexist = False
if (os.path.isfile(fullFilename)):
cacheexist = True
html = ""
if (cacheexist):
file = io.open(fullFilename, "r", encoding=encoding)
html = file.read()
file.close()
else:
html = func(url)
file = io.open(fullFilename, "w", encoding=encoding)
file.write(html)
file.close()
return html
return result
return inner
def html_analyze(pageContent, patternList):
result = {}
for pat in patternList:
currentName = pat["name"]
currentPattern = pat["pattern"]
currentSaveMulti = pat["saveMulti"]
currentValue = re.findall(currentPattern, pageContent)
if (type(currentName) == type(['b'])):
index = 0;
for name in currentName:
result[name] = currentValue[index]
index = index + 1
else:
if len(currentValue) > 0:
if currentSaveMulti:
currentValue = currentValue
else:
currentValue = currentValue[0]
else:
currentValue = ""
result[currentName] = currentValue
return result
| true | true |
f7ff0f079284ec661e60c8dcc5b12c3dfca11e78 | 680 | py | Python | var/spack/repos/builtin/packages/r-pfam-db/package.py | nkianggiss/spack | 3477d3375142a30f5714bb5966a6d8bb22c33c06 | [
"ECL-2.0",
"Apache-2.0",
"MIT"
] | 3 | 2019-06-27T13:26:50.000Z | 2019-07-01T16:24:54.000Z | var/spack/repos/builtin/packages/r-pfam-db/package.py | openbiox/spack | bb6ec7fb40c14b37e094a860e3625af53f633174 | [
"ECL-2.0",
"Apache-2.0",
"MIT"
] | 75 | 2016-07-27T11:43:00.000Z | 2020-12-08T15:56:53.000Z | var/spack/repos/builtin/packages/r-pfam-db/package.py | openbiox/spack | bb6ec7fb40c14b37e094a860e3625af53f633174 | [
"ECL-2.0",
"Apache-2.0",
"MIT"
] | 8 | 2015-10-16T13:51:49.000Z | 2021-10-18T13:58:03.000Z | # Copyright 2013-2019 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class RPfamDb(RPackage):
"""A set of protein ID mappings for PFAM assembled using data from
public repositories."""
homepage = "https://www.bioconductor.org/packages/PFAM.db/"
url = "https://www.bioconductor.org/packages/3.5/data/annotation/src/contrib/PFAM.db_3.4.1.tar.gz"
version('3.4.1', '65ed35887ecc44f5ac9f9c8563e03f44')
depends_on('r@3.4.0:3.4.9', when='@3.4.1')
depends_on('r-annotationdbi', type=('build', 'run'))
| 34 | 107 | 0.707353 |
from spack import *
class RPfamDb(RPackage):
homepage = "https://www.bioconductor.org/packages/PFAM.db/"
url = "https://www.bioconductor.org/packages/3.5/data/annotation/src/contrib/PFAM.db_3.4.1.tar.gz"
version('3.4.1', '65ed35887ecc44f5ac9f9c8563e03f44')
depends_on('r@3.4.0:3.4.9', when='@3.4.1')
depends_on('r-annotationdbi', type=('build', 'run'))
| true | true |
f7ff0f76aad12c68577a42277b18ac5fcdc4010a | 5,048 | py | Python | src/core/depth_map_utils.py | salarim/scene_vis | 8e146195599aaa7598137dd223e9ce2b9e0b25a3 | [
"MIT"
] | 33 | 2019-07-16T19:52:43.000Z | 2022-03-17T15:30:59.000Z | src/core/depth_map_utils.py | salarim/scene_vis | 8e146195599aaa7598137dd223e9ce2b9e0b25a3 | [
"MIT"
] | null | null | null | src/core/depth_map_utils.py | salarim/scene_vis | 8e146195599aaa7598137dd223e9ce2b9e0b25a3 | [
"MIT"
] | 8 | 2019-07-26T03:24:35.000Z | 2022-03-02T01:51:00.000Z | import cv2
import numpy as np
import png
from datasets.kitti.obj import calib_utils
def read_depth_map(depth_map_path):
depth_image = cv2.imread(depth_map_path, cv2.IMREAD_ANYDEPTH)
depth_map = depth_image / 256.0
# Discard depths less than 10cm from the camera
depth_map[depth_map < 0.1] = 0.0
return depth_map.astype(np.float32)
def save_depth_map(save_path, depth_map,
version='cv2', png_compression=3):
"""Saves depth map to disk as uint16 png
Args:
save_path: path to save depth map
depth_map: depth map numpy array [h w]
version: 'cv2' or 'pypng'
png_compression: Only when version is 'cv2', sets png compression level.
A lower value is faster with larger output,
a higher value is slower with smaller output.
"""
# Convert depth map to a uint16 png
depth_image = (depth_map * 256.0).astype(np.uint16)
if version == 'cv2':
ret = cv2.imwrite(save_path, depth_image, [cv2.IMWRITE_PNG_COMPRESSION, png_compression])
if not ret:
raise RuntimeError('Could not save depth map')
elif version == 'pypng':
with open(save_path, 'wb') as f:
depth_image = (depth_map * 256.0).astype(np.uint16)
writer = png.Writer(width=depth_image.shape[1],
height=depth_image.shape[0],
bitdepth=16,
greyscale=True)
writer.write(f, depth_image)
else:
raise ValueError('Invalid version', version)
def get_depth_point_cloud(depth_map, cam_p, min_v=0, flatten=True, in_cam0_frame=True):
"""Calculates the point cloud from a depth map given the camera parameters
Args:
depth_map: depth map
cam_p: camera p matrix
min_v: amount to crop off the top
flatten: flatten point cloud to (3, N), otherwise return the point cloud
in xyz_map (3, H, W) format. (H, W, 3) points can be retrieved using
xyz_map.transpose(1, 2, 0)
in_cam0_frame: (optional) If True, shifts the point cloud into cam_0 frame.
If False, returns the point cloud in the provided camera frame
Returns:
point_cloud: (3, N) point cloud
"""
depth_map_shape = depth_map.shape[0:2]
if min_v > 0:
# Crop top part
depth_map[0:min_v] = 0.0
xx, yy = np.meshgrid(
np.linspace(0, depth_map_shape[1] - 1, depth_map_shape[1]),
np.linspace(0, depth_map_shape[0] - 1, depth_map_shape[0]))
# Calibration centre x, centre y, focal length
centre_u = cam_p[0, 2]
centre_v = cam_p[1, 2]
focal_length = cam_p[0, 0]
i = xx - centre_u
j = yy - centre_v
# Similar triangles ratio (x/i = d/f)
ratio = depth_map / focal_length
x = i * ratio
y = j * ratio
z = depth_map
if in_cam0_frame:
# Return the points in cam_0 frame
# Get x offset (b_cam) from calibration: cam_p[0, 3] = (-f_x * b_cam)
x_offset = -cam_p[0, 3] / focal_length
valid_pixel_mask = depth_map > 0
x[valid_pixel_mask] += x_offset
# Return the points in the provided camera frame
point_cloud_map = np.asarray([x, y, z])
if flatten:
point_cloud = np.reshape(point_cloud_map, (3, -1))
return point_cloud.astype(np.float32)
else:
return point_cloud_map.astype(np.float32)
def project_depths(point_cloud, cam_p, image_shape, max_depth=100.0):
"""Projects a point cloud into image space and saves depths per pixel.
Args:
point_cloud: (3, N) Point cloud in cam0
cam_p: camera projection matrix
image_shape: image shape [h, w]
max_depth: optional, max depth for inversion
Returns:
projected_depths: projected depth map
"""
# Only keep points in front of the camera
all_points = point_cloud.T
# Save the depth corresponding to each point
points_in_img = calib_utils.project_pc_to_image(all_points.T, cam_p)
points_in_img_int = np.int32(np.round(points_in_img))
# Remove points outside image
valid_indices = \
(points_in_img_int[0] >= 0) & (points_in_img_int[0] < image_shape[1]) & \
(points_in_img_int[1] >= 0) & (points_in_img_int[1] < image_shape[0])
all_points = all_points[valid_indices]
points_in_img_int = points_in_img_int[:, valid_indices]
# Invert depths
all_points[:, 2] = max_depth - all_points[:, 2]
# Only save valid pixels, keep closer points when overlapping
projected_depths = np.zeros(image_shape)
valid_indices = [points_in_img_int[1], points_in_img_int[0]]
projected_depths[valid_indices] = [
max(projected_depths[
points_in_img_int[1, idx], points_in_img_int[0, idx]],
all_points[idx, 2])
for idx in range(points_in_img_int.shape[1])]
projected_depths[valid_indices] = \
max_depth - projected_depths[valid_indices]
return projected_depths.astype(np.float32)
| 32.152866 | 97 | 0.642235 | import cv2
import numpy as np
import png
from datasets.kitti.obj import calib_utils
def read_depth_map(depth_map_path):
depth_image = cv2.imread(depth_map_path, cv2.IMREAD_ANYDEPTH)
depth_map = depth_image / 256.0
depth_map[depth_map < 0.1] = 0.0
return depth_map.astype(np.float32)
def save_depth_map(save_path, depth_map,
version='cv2', png_compression=3):
depth_image = (depth_map * 256.0).astype(np.uint16)
if version == 'cv2':
ret = cv2.imwrite(save_path, depth_image, [cv2.IMWRITE_PNG_COMPRESSION, png_compression])
if not ret:
raise RuntimeError('Could not save depth map')
elif version == 'pypng':
with open(save_path, 'wb') as f:
depth_image = (depth_map * 256.0).astype(np.uint16)
writer = png.Writer(width=depth_image.shape[1],
height=depth_image.shape[0],
bitdepth=16,
greyscale=True)
writer.write(f, depth_image)
else:
raise ValueError('Invalid version', version)
def get_depth_point_cloud(depth_map, cam_p, min_v=0, flatten=True, in_cam0_frame=True):
depth_map_shape = depth_map.shape[0:2]
if min_v > 0:
depth_map[0:min_v] = 0.0
xx, yy = np.meshgrid(
np.linspace(0, depth_map_shape[1] - 1, depth_map_shape[1]),
np.linspace(0, depth_map_shape[0] - 1, depth_map_shape[0]))
centre_u = cam_p[0, 2]
centre_v = cam_p[1, 2]
focal_length = cam_p[0, 0]
i = xx - centre_u
j = yy - centre_v
ratio = depth_map / focal_length
x = i * ratio
y = j * ratio
z = depth_map
if in_cam0_frame:
x_offset = -cam_p[0, 3] / focal_length
valid_pixel_mask = depth_map > 0
x[valid_pixel_mask] += x_offset
point_cloud_map = np.asarray([x, y, z])
if flatten:
point_cloud = np.reshape(point_cloud_map, (3, -1))
return point_cloud.astype(np.float32)
else:
return point_cloud_map.astype(np.float32)
def project_depths(point_cloud, cam_p, image_shape, max_depth=100.0):
all_points = point_cloud.T
points_in_img = calib_utils.project_pc_to_image(all_points.T, cam_p)
points_in_img_int = np.int32(np.round(points_in_img))
valid_indices = \
(points_in_img_int[0] >= 0) & (points_in_img_int[0] < image_shape[1]) & \
(points_in_img_int[1] >= 0) & (points_in_img_int[1] < image_shape[0])
all_points = all_points[valid_indices]
points_in_img_int = points_in_img_int[:, valid_indices]
all_points[:, 2] = max_depth - all_points[:, 2]
projected_depths = np.zeros(image_shape)
valid_indices = [points_in_img_int[1], points_in_img_int[0]]
projected_depths[valid_indices] = [
max(projected_depths[
points_in_img_int[1, idx], points_in_img_int[0, idx]],
all_points[idx, 2])
for idx in range(points_in_img_int.shape[1])]
projected_depths[valid_indices] = \
max_depth - projected_depths[valid_indices]
return projected_depths.astype(np.float32)
| true | true |
f7ff0faec391bb87ffd7d16b6284a8ca90f7521b | 22,903 | py | Python | tests/test_dbapi20.py | Hema-Mathiyazhagan/nzpy | e71bf64f88dcfe5211c5973fd087721f3449006e | [
"Apache-2.0"
] | null | null | null | tests/test_dbapi20.py | Hema-Mathiyazhagan/nzpy | e71bf64f88dcfe5211c5973fd087721f3449006e | [
"Apache-2.0"
] | null | null | null | tests/test_dbapi20.py | Hema-Mathiyazhagan/nzpy | e71bf64f88dcfe5211c5973fd087721f3449006e | [
"Apache-2.0"
] | null | null | null | import time
import warnings
import nzpy
import pytest
''' Python DB API 2.0 driver compliance unit test suite.
This software is Public Domain and may be used without restrictions.
"Now we have booze and barflies entering the discussion, plus rumours of
DBAs on drugs... and I won't tell you what flashes through my mind each
time I read the subject line with 'Anal Compliance' in it. All around
this is turning out to be a thoroughly unwholesome unit test."
-- Ian Bicking
'''
__rcs_id__ = '$Id: dbapi20.py,v 1.10 2003/10/09 03:14:14 zenzen Exp $'
__version__ = '$Revision: 1.10 $'[11:-2]
__author__ = 'Stuart Bishop <zen@shangri-la.dropbear.id.au>'
# $Log: dbapi20.py,v $
# Revision 1.10 2003/10/09 03:14:14 zenzen
# Add test for DB API 2.0 optional extension, where database exceptions
# are exposed as attributes on the Connection object.
#
# Revision 1.9 2003/08/13 01:16:36 zenzen
# Minor tweak from Stefan Fleiter
#
# Revision 1.8 2003/04/10 00:13:25 zenzen
# Changes, as per suggestions by M.-A. Lemburg
# - Add a table prefix, to ensure namespace collisions can always be avoided
#
# Revision 1.7 2003/02/26 23:33:37 zenzen
# Break out DDL into helper functions, as per request by David Rushby
#
# Revision 1.6 2003/02/21 03:04:33 zenzen
# Stuff from Henrik Ekelund:
# added test_None
# added test_nextset & hooks
#
# Revision 1.5 2003/02/17 22:08:43 zenzen
# Implement suggestions and code from Henrik Eklund - test that
# cursor.arraysize defaults to 1 & generic cursor.callproc test added
#
# Revision 1.4 2003/02/15 00:16:33 zenzen
# Changes, as per suggestions and bug reports by M.-A. Lemburg,
# Matthew T. Kromer, Federico Di Gregorio and Daniel Dittmar
# - Class renamed
# - Now a subclass of TestCase, to avoid requiring the driver stub
# to use multiple inheritance
# - Reversed the polarity of buggy test in test_description
# - Test exception heirarchy correctly
# - self.populate is now self._populate(), so if a driver stub
# overrides self.ddl1 this change propogates
# - VARCHAR columns now have a width, which will hopefully make the
# DDL even more portible (this will be reversed if it causes more problems)
# - cursor.rowcount being checked after various execute and fetchXXX methods
# - Check for fetchall and fetchmany returning empty lists after results
# are exhausted (already checking for empty lists if select retrieved
# nothing
# - Fix bugs in test_setoutputsize_basic and test_setinputsizes
#
''' Test a database self.driver for DB API 2.0 compatibility.
This implementation tests Gadfly, but the TestCase
is structured so that other self.drivers can subclass this
test case to ensure compiliance with the DB-API. It is
expected that this TestCase may be expanded in the future
if ambiguities or edge conditions are discovered.
The 'Optional Extensions' are not yet being tested.
self.drivers should subclass this test, overriding setUp, tearDown,
self.driver, connect_args and connect_kw_args. Class specification
should be as follows:
import dbapi20
class mytest(dbapi20.DatabaseAPI20Test):
[...]
Don't 'import DatabaseAPI20Test from dbapi20', or you will
confuse the unit tester - just 'import dbapi20'.
'''
# The self.driver module. This should be the module where the 'connect'
# method is to be found
driver = nzpy
table_prefix = 'dbapi20test_' # If you need to specify a prefix for tables
ddl1 = 'create table %sbooze (name varchar(20))' % table_prefix
ddl2 = 'create table %sbarflys (name varchar(20))' % table_prefix
xddl1 = 'drop table %sbooze' % table_prefix
xddl2 = 'drop table %sbarflys' % table_prefix
# Name of stored procedure to convert
# string->lowercase
lowerfunc = 'lower'
# Some drivers may need to override these helpers, for example adding
# a 'commit' after the execute.
def executeDDL1(cursor):
cursor.execute(ddl1)
def executeDDL2(cursor):
cursor.execute(ddl2)
@pytest.fixture
def db(request, con):
def fin():
with con.cursor() as cur:
for ddl in (xddl1, xddl2):
try:
cur.execute(ddl)
con.commit()
except driver.Error:
# Assume table didn't exist. Other tests will check if
# execute is busted.
pass
request.addfinalizer(fin)
return con
def test_apilevel():
# Must exist
apilevel = driver.apilevel
# Must equal 2.0
assert apilevel == '2.0'
def test_threadsafety():
try:
# Must exist
threadsafety = driver.threadsafety
# Must be a valid value
assert threadsafety in (0, 1, 2, 3)
except AttributeError:
assert False, "Driver doesn't define threadsafety"
def test_paramstyle():
try:
# Must exist
paramstyle = driver.paramstyle
# Must be a valid value
assert paramstyle in (
'qmark', 'numeric', 'named', 'format', 'pyformat')
except AttributeError:
assert False, "Driver doesn't define paramstyle"
def test_Exceptions():
# Make sure required exceptions exist, and are in the
# defined heirarchy.
assert issubclass(driver.Warning, Exception)
assert issubclass(driver.Error, Exception)
assert issubclass(driver.InterfaceError, driver.Error)
assert issubclass(driver.DatabaseError, driver.Error)
assert issubclass(driver.OperationalError, driver.Error)
assert issubclass(driver.IntegrityError, driver.Error)
assert issubclass(driver.InternalError, driver.Error)
assert issubclass(driver.ProgrammingError, driver.Error)
assert issubclass(driver.NotSupportedError, driver.Error)
def test_ExceptionsAsConnectionAttributes(con):
# OPTIONAL EXTENSION
# Test for the optional DB API 2.0 extension, where the exceptions
# are exposed as attributes on the Connection object
# I figure this optional extension will be implemented by any
# driver author who is using this test suite, so it is enabled
# by default.
warnings.simplefilter("ignore")
drv = driver
assert con.Warning is drv.Warning
assert con.Error is drv.Error
assert con.InterfaceError is drv.InterfaceError
assert con.DatabaseError is drv.DatabaseError
assert con.OperationalError is drv.OperationalError
assert con.IntegrityError is drv.IntegrityError
assert con.InternalError is drv.InternalError
assert con.ProgrammingError is drv.ProgrammingError
assert con.NotSupportedError is drv.NotSupportedError
warnings.resetwarnings()
def test_commit(con):
# Commit must work, even if it doesn't do anything
con.commit()
def test_rollback(con):
# If rollback is defined, it should either work or throw
# the documented exception
if hasattr(con, 'rollback'):
try:
con.rollback()
except driver.NotSupportedError:
pass
def test_cursor(con):
con.cursor()
def test_cursor_isolation(con):
# Make sure cursors created from the same connection have
# the documented transaction isolation level
cur1 = con.cursor()
cur2 = con.cursor()
executeDDL1(cur1)
cur1.execute(
"insert into %sbooze values ('Victoria Bitter')" % (table_prefix))
cur2.execute("select name from %sbooze" % table_prefix)
booze = cur2.fetchall()
assert len(booze) == 1
assert len(booze[0]) == 1
assert booze[0][0] == 'Victoria Bitter'
cur1.execute(xddl1)
def test_description(con):
cur = con.cursor()
executeDDL1(cur)
assert cur.description is None, \
'cursor.description should be none after executing a ' \
'statement that can return no rows (such as DDL)'
cur.execute('select name from %sbooze' % table_prefix)
assert len(cur.description) == 1, \
'cursor.description describes too many columns'
assert len(cur.description[0]) == 2, \
'cursor.description[x] tuples must have 2 elements'
assert cur.description[0][0].lower() == 'name', \
'cursor.description[x][0] must return column name'
assert cur.description[0][1] == driver.STRING, \
'cursor.description[x][1] must return column type. Got %r' \
% cur.description[0][1]
# Make sure self.description gets reset
executeDDL2(cur)
assert cur.description is None, \
'cursor.description not being set to None when executing ' \
'no-result statements (eg. DDL)'
cur.execute(xddl1)
cur.execute(xddl2)
def test_rowcount(cursor):
executeDDL1(cursor)
assert cursor.rowcount == -1, \
'cursor.rowcount should be -1 after executing no-result ' \
'statements'
cursor.execute(
"insert into %sbooze values ('Victoria Bitter')" % (table_prefix))
assert cursor.rowcount in (-1, 1), \
'cursor.rowcount should == number or rows inserted, or ' \
'set to -1 after executing an insert statement'
cursor.execute("select name from %sbooze" % table_prefix)
assert cursor.rowcount in (-1, 1), \
'cursor.rowcount should == number of rows returned, or ' \
'set to -1 after executing a select statement'
executeDDL2(cursor)
assert cursor.rowcount == -1, \
'cursor.rowcount not being reset to -1 after executing ' \
'no-result statements'
cursor.execute(xddl1)
cursor.execute(xddl2)
lower_func = 'lower'
def test_callproc(cursor):
if lower_func and hasattr(cursor, 'callproc'):
r = cursor.callproc(lower_func, ('FOO',))
assert len(r) == 1
assert r[0] == 'FOO'
r = cursor.fetchall()
assert len(r) == 1, 'callproc produced no result set'
assert len(r[0]) == 1, 'callproc produced invalid result set'
assert r[0][0] == 'foo', 'callproc produced invalid results'
def test_close(con):
cur = con.cursor()
con.close()
# cursor.execute should raise an Error if called after connection
# closed
with pytest.raises(ValueError):
executeDDL1(cur)
# connection.commit should raise an Error if called after connection'
# closed.'
with pytest.raises(ValueError):
con.commit()
# connection.close should raise an Error if called more than once
with pytest.raises(nzpy.core.InterfaceError):
con.close()
def test_execute(con):
cur = con.cursor()
_paraminsert(cur)
def _paraminsert(cur):
executeDDL1(cur)
cur.execute(
"insert into %sbooze values ('Victoria Bitter')" % (table_prefix))
assert cur.rowcount in (-1, 1)
if driver.paramstyle == 'qmark':
cur.execute(
'insert into %sbooze values (?)' % table_prefix, ("Coopers",))
elif driver.paramstyle == 'numeric':
cur.execute(
'insert into %sbooze values (:1)' % table_prefix, ("Coopers",))
elif driver.paramstyle == 'named':
cur.execute(
'insert into %sbooze values (:beer)' % table_prefix,
{'beer': "Cooper's"})
elif driver.paramstyle == 'format':
cur.execute(
'insert into %sbooze values (%%s)' % table_prefix, ("Coopers",))
elif driver.paramstyle == 'pyformat':
cur.execute(
'insert into %sbooze values (%%(beer)s)' % table_prefix,
{'beer': "Coopers"})
else:
assert False, 'Invalid paramstyle'
assert cur.rowcount in (-1, 1)
cur.execute('select name from %sbooze' % table_prefix)
res = cur.fetchall()
assert len(res) == 2, 'cursor.fetchall returned too few rows'
beers = [res[0][0], res[1][0]]
beers.sort()
assert beers[0] == "Coopers", \
'cursor.fetchall retrieved incorrect data, or data inserted ' \
'incorrectly'
assert beers[1] == "Victoria Bitter", \
'cursor.fetchall retrieved incorrect data, or data inserted ' \
'incorrectly'
cur.execute(xddl1)
def test_executemany(cursor):
executeDDL1(cursor)
largs = [("Coopers",), ("Boags",)]
margs = [{'beer': "Coopers"}, {'beer': "Boags"}]
if driver.paramstyle == 'qmark':
cursor.executemany(
'insert into %sbooze values (?)' % table_prefix, largs)
elif driver.paramstyle == 'numeric':
cursor.executemany(
'insert into %sbooze values (:1)' % table_prefix, largs)
elif driver.paramstyle == 'named':
cursor.executemany(
'insert into %sbooze values (:beer)' % table_prefix, margs)
elif driver.paramstyle == 'format':
cursor.executemany(
'insert into %sbooze values (%%s)' % table_prefix, largs)
elif driver.paramstyle == 'pyformat':
cursor.executemany(
'insert into %sbooze values (%%(beer)s)' % (table_prefix), margs)
else:
assert False, 'Unknown paramstyle'
assert cursor.rowcount in (-1, 2), \
'insert using cursor.executemany set cursor.rowcount to ' \
'incorrect value %r' % cursor.rowcount
cursor.execute('select name from %sbooze' % table_prefix)
res = cursor.fetchall()
assert len(res) == 2, 'cursor.fetchall retrieved incorrect number of rows'
beers = [res[0][0], res[1][0]]
beers.sort()
assert beers[0] == "Boags", 'incorrect data retrieved'
assert beers[1] == "Coopers", 'incorrect data retrieved'
cursor.execute(xddl1)
def test_fetchone(cursor):
# cursor.fetchone should raise an Error if called before
# executing a select-type query
with pytest.raises(driver.Error):
cursor.fetchone()
# cursor.fetchone should raise an Error if called after
# executing a query that cannnot return rows
executeDDL1(cursor)
with pytest.raises(driver.Error):
cursor.fetchone()
cursor.execute('select name from %sbooze' % table_prefix)
assert cursor.fetchone() is None, \
'cursor.fetchone should return None if a query retrieves ' \
'no rows'
assert cursor.rowcount in (-1, 0)
# cursor.fetchone should raise an Error if called after
# executing a query that cannnot return rows
cursor.execute(
"insert into %sbooze values ('Victoria Bitter')" % (table_prefix))
with pytest.raises(driver.Error):
cursor.fetchone()
cursor.execute('select name from %sbooze' % table_prefix)
r = cursor.fetchone()
assert len(r) == 1, 'cursor.fetchone should have retrieved a single row'
assert r[0] == 'Victoria Bitter', \
'cursor.fetchone retrieved incorrect data'
assert cursor.fetchone() is None, \
'cursor.fetchone should return None if no more rows available'
assert cursor.rowcount in (-1, 1)
cursor.execute(xddl1)
samples = [
'Carlton Cold',
'Carlton Draft',
'Mountain Goat',
'Redback',
'Victoria Bitter',
'XXXX'
]
def _populate():
''' Return a list of sql commands to setup the DB for the fetch
tests.
'''
populate = [
"insert into %sbooze values ('%s')" % (table_prefix, s)
for s in samples]
return populate
def test_fetchmany(cursor):
# cursor.fetchmany should raise an Error if called without
# issuing a query
with pytest.raises(driver.Error):
cursor.fetchmany(4)
executeDDL1(cursor)
for sql in _populate():
cursor.execute(sql)
cursor.execute('select name from %sbooze' % table_prefix)
r = cursor.fetchmany()
assert len(r) == 1, \
'cursor.fetchmany retrieved incorrect number of rows, ' \
'default of arraysize is one.'
cursor.arraysize = 10
r = cursor.fetchmany(3) # Should get 3 rows
assert len(r) == 3, 'cursor.fetchmany retrieved incorrect number of rows'
r = cursor.fetchmany(4) # Should get 2 more
assert len(r) == 2, 'cursor.fetchmany retrieved incorrect number of rows'
r = cursor.fetchmany(4) # Should be an empty sequence
assert len(r) == 0, \
'cursor.fetchmany should return an empty sequence after ' \
'results are exhausted'
assert cursor.rowcount in (-1, 6)
# Same as above, using cursor.arraysize
cursor.arraysize = 4
cursor.execute('select name from %sbooze' % table_prefix)
r = cursor.fetchmany() # Should get 4 rows
assert len(r) == 4, 'cursor.arraysize not being honoured by fetchmany'
r = cursor.fetchmany() # Should get 2 more
assert len(r) == 2
r = cursor.fetchmany() # Should be an empty sequence
assert len(r) == 0
assert cursor.rowcount in (-1, 6)
cursor.arraysize = 6
cursor.execute('select name from %sbooze' % table_prefix)
rows = cursor.fetchmany() # Should get all rows
assert cursor.rowcount in (-1, 6)
assert len(rows) == 6
assert len(rows) == 6
rows = [row[0] for row in rows]
rows.sort()
# Make sure we get the right data back out
for i in range(0, 6):
assert rows[i] == samples[i], \
'incorrect data retrieved by cursor.fetchmany'
rows = cursor.fetchmany() # Should return an empty list
assert len(rows) == 0, \
'cursor.fetchmany should return an empty sequence if ' \
'called after the whole result set has been fetched'
assert cursor.rowcount in (-1, 6)
executeDDL2(cursor)
cursor.execute('select name from %sbarflys' % table_prefix)
r = cursor.fetchmany() # Should get empty sequence
assert len(r) == 0, \
'cursor.fetchmany should return an empty sequence if ' \
'query retrieved no rows'
assert cursor.rowcount in (-1, 0)
cursor.execute(xddl1)
cursor.execute(xddl2)
def test_fetchall(cursor):
# cursor.fetchall should raise an Error if called
# without executing a query that may return rows (such
# as a select)
with pytest.raises(driver.Error):
cursor.fetchall()
executeDDL1(cursor)
for sql in _populate():
cursor.execute(sql)
# cursor.fetchall should raise an Error if called
# after executing a a statement that cannot return rows
with pytest.raises(driver.Error):
cursor.fetchall()
cursor.execute('select name from %sbooze' % table_prefix)
rows = cursor.fetchall()
assert cursor.rowcount in (-1, len(samples))
assert len(rows) == len(samples), \
'cursor.fetchall did not retrieve all rows'
rows = [r[0] for r in rows]
rows.sort()
for i in range(0, len(samples)):
assert rows[i] == samples[i], \
'cursor.fetchall retrieved incorrect rows'
rows = cursor.fetchall()
assert len(rows) == 0, \
'cursor.fetchall should return an empty list if called ' \
'after the whole result set has been fetched'
assert cursor.rowcount in (-1, len(samples))
executeDDL2(cursor)
cursor.execute('select name from %sbarflys' % table_prefix)
rows = cursor.fetchall()
assert cursor.rowcount in (-1, 0)
assert len(rows) == 0, \
'cursor.fetchall should return an empty list if ' \
'a select query returns no rows'
cursor.execute(xddl1)
cursor.execute(xddl2)
def test_mixedfetch(cursor):
executeDDL1(cursor)
for sql in _populate():
cursor.execute(sql)
cursor.execute('select name from %sbooze' % table_prefix)
rows1 = cursor.fetchone()
rows23 = cursor.fetchmany(2)
rows4 = cursor.fetchone()
rows56 = cursor.fetchall()
assert cursor.rowcount in (-1, 6)
assert len(rows23) == 2, 'fetchmany returned incorrect number of rows'
assert len(rows56) == 2, 'fetchall returned incorrect number of rows'
rows = [rows1[0]]
rows.extend([rows23[0][0], rows23[1][0]])
rows.append(rows4[0])
rows.extend([rows56[0][0], rows56[1][0]])
rows.sort()
for i in range(0, len(samples)):
assert rows[i] == samples[i], 'incorrect data retrieved or inserted'
cursor.execute(xddl1)
def help_nextset_setUp(cur):
''' Should create a procedure called deleteme
that returns two result sets, first the
number of rows in booze then "name from booze"
'''
raise NotImplementedError('Helper not implemented')
def help_nextset_tearDown(cur):
'If cleaning up is needed after nextSetTest'
raise NotImplementedError('Helper not implemented')
def test_nextset(cursor):
if not hasattr(cursor, 'nextset'):
return
try:
executeDDL1(cursor)
sql = _populate()
for sql in _populate():
cursor.execute(sql)
help_nextset_setUp(cursor)
cursor.callproc('deleteme')
numberofrows = cursor.fetchone()
assert numberofrows[0] == len(samples)
assert cursor.nextset()
names = cursor.fetchall()
assert len(names) == len(samples)
s = cursor.nextset()
assert s is None, 'No more return sets, should return None'
finally:
help_nextset_tearDown(cursor)
cursor.execute(xddl1)
cursor.execute(xddl2)
def test_arraysize(cursor):
# Not much here - rest of the tests for this are in test_fetchmany
assert hasattr(cursor, 'arraysize'), 'cursor.arraysize must be defined'
def test_setinputsizes(cursor):
cursor.setinputsizes((25,))
_paraminsert(cursor) # Make sure cursor still works
def test_setoutputsize_basic(cursor):
# Basic test is to make sure setoutputsize doesn't blow up
cursor.setoutputsize(1000)
cursor.setoutputsize(2000, 0)
_paraminsert(cursor) # Make sure the cursor still works
def test_None(cursor):
executeDDL1(cursor)
cursor.execute('insert into %sbooze values (NULL)' % table_prefix)
cursor.execute('select name from %sbooze' % table_prefix)
r = cursor.fetchall()
assert len(r) == 1
assert len(r[0]) == 1
assert r[0][0] is None, 'NULL value not returned as None'
def test_Date():
driver.Date(2002, 12, 25)
driver.DateFromTicks(time.mktime((2002, 12, 25, 0, 0, 0, 0, 0, 0)))
# Can we assume this? API doesn't specify, but it seems implied
# self.assertEqual(str(d1),str(d2))
def test_Time():
driver.Time(13, 45, 30)
driver.TimeFromTicks(time.mktime((2001, 1, 1, 13, 45, 30, 0, 0, 0)))
# Can we assume this? API doesn't specify, but it seems implied
# self.assertEqual(str(t1),str(t2))
def test_Timestamp():
driver.Timestamp(2002, 12, 25, 13, 45, 30)
driver.TimestampFromTicks(time.mktime((2002, 12, 25, 13, 45, 30, 0, 0, 0)))
# Can we assume this? API doesn't specify, but it seems implied
# self.assertEqual(str(t1),str(t2))
def test_Binary():
driver.Binary(b'Something')
driver.Binary(b'')
def test_STRING():
assert hasattr(driver, 'STRING'), 'module.STRING must be defined'
def test_BINARY():
assert hasattr(driver, 'BINARY'), 'module.BINARY must be defined.'
def test_NUMBER():
assert hasattr(driver, 'NUMBER'), 'module.NUMBER must be defined.'
def test_DATETIME():
assert hasattr(driver, 'DATETIME'), 'module.DATETIME must be defined.'
def test_ROWID():
assert hasattr(driver, 'ROWID'), 'module.ROWID must be defined.'
| 33.289244 | 79 | 0.664498 | import time
import warnings
import nzpy
import pytest
__rcs_id__ = '$Id: dbapi20.py,v 1.10 2003/10/09 03:14:14 zenzen Exp $'
__version__ = '$Revision: 1.10 $'[11:-2]
__author__ = 'Stuart Bishop <zen@shangri-la.dropbear.id.au>'
driver = nzpy
table_prefix = 'dbapi20test_'
ddl1 = 'create table %sbooze (name varchar(20))' % table_prefix
ddl2 = 'create table %sbarflys (name varchar(20))' % table_prefix
xddl1 = 'drop table %sbooze' % table_prefix
xddl2 = 'drop table %sbarflys' % table_prefix
lowerfunc = 'lower'
def executeDDL1(cursor):
cursor.execute(ddl1)
def executeDDL2(cursor):
cursor.execute(ddl2)
@pytest.fixture
def db(request, con):
def fin():
with con.cursor() as cur:
for ddl in (xddl1, xddl2):
try:
cur.execute(ddl)
con.commit()
except driver.Error:
# execute is busted.
pass
request.addfinalizer(fin)
return con
def test_apilevel():
# Must exist
apilevel = driver.apilevel
# Must equal 2.0
assert apilevel == '2.0'
def test_threadsafety():
try:
# Must exist
threadsafety = driver.threadsafety
# Must be a valid value
assert threadsafety in (0, 1, 2, 3)
except AttributeError:
assert False, "Driver doesn't define threadsafety"
def test_paramstyle():
try:
paramstyle = driver.paramstyle
assert paramstyle in (
'qmark', 'numeric', 'named', 'format', 'pyformat')
except AttributeError:
assert False, "Driver doesn't define paramstyle"
def test_Exceptions():
# Make sure required exceptions exist, and are in the
# defined heirarchy.
assert issubclass(driver.Warning, Exception)
assert issubclass(driver.Error, Exception)
assert issubclass(driver.InterfaceError, driver.Error)
assert issubclass(driver.DatabaseError, driver.Error)
assert issubclass(driver.OperationalError, driver.Error)
assert issubclass(driver.IntegrityError, driver.Error)
assert issubclass(driver.InternalError, driver.Error)
assert issubclass(driver.ProgrammingError, driver.Error)
assert issubclass(driver.NotSupportedError, driver.Error)
def test_ExceptionsAsConnectionAttributes(con):
# OPTIONAL EXTENSION
# Test for the optional DB API 2.0 extension, where the exceptions
# are exposed as attributes on the Connection object
# I figure this optional extension will be implemented by any
# driver author who is using this test suite, so it is enabled
# by default.
warnings.simplefilter("ignore")
drv = driver
assert con.Warning is drv.Warning
assert con.Error is drv.Error
assert con.InterfaceError is drv.InterfaceError
assert con.DatabaseError is drv.DatabaseError
assert con.OperationalError is drv.OperationalError
assert con.IntegrityError is drv.IntegrityError
assert con.InternalError is drv.InternalError
assert con.ProgrammingError is drv.ProgrammingError
assert con.NotSupportedError is drv.NotSupportedError
warnings.resetwarnings()
def test_commit(con):
# Commit must work, even if it doesn't do anything
con.commit()
def test_rollback(con):
if hasattr(con, 'rollback'):
try:
con.rollback()
except driver.NotSupportedError:
pass
def test_cursor(con):
con.cursor()
def test_cursor_isolation(con):
cur1 = con.cursor()
cur2 = con.cursor()
executeDDL1(cur1)
cur1.execute(
"insert into %sbooze values ('Victoria Bitter')" % (table_prefix))
cur2.execute("select name from %sbooze" % table_prefix)
booze = cur2.fetchall()
assert len(booze) == 1
assert len(booze[0]) == 1
assert booze[0][0] == 'Victoria Bitter'
cur1.execute(xddl1)
def test_description(con):
cur = con.cursor()
executeDDL1(cur)
assert cur.description is None, \
'cursor.description should be none after executing a ' \
'statement that can return no rows (such as DDL)'
cur.execute('select name from %sbooze' % table_prefix)
assert len(cur.description) == 1, \
'cursor.description describes too many columns'
assert len(cur.description[0]) == 2, \
'cursor.description[x] tuples must have 2 elements'
assert cur.description[0][0].lower() == 'name', \
'cursor.description[x][0] must return column name'
assert cur.description[0][1] == driver.STRING, \
'cursor.description[x][1] must return column type. Got %r' \
% cur.description[0][1]
executeDDL2(cur)
assert cur.description is None, \
'cursor.description not being set to None when executing ' \
'no-result statements (eg. DDL)'
cur.execute(xddl1)
cur.execute(xddl2)
def test_rowcount(cursor):
executeDDL1(cursor)
assert cursor.rowcount == -1, \
'cursor.rowcount should be -1 after executing no-result ' \
'statements'
cursor.execute(
"insert into %sbooze values ('Victoria Bitter')" % (table_prefix))
assert cursor.rowcount in (-1, 1), \
'cursor.rowcount should == number or rows inserted, or ' \
'set to -1 after executing an insert statement'
cursor.execute("select name from %sbooze" % table_prefix)
assert cursor.rowcount in (-1, 1), \
'cursor.rowcount should == number of rows returned, or ' \
'set to -1 after executing a select statement'
executeDDL2(cursor)
assert cursor.rowcount == -1, \
'cursor.rowcount not being reset to -1 after executing ' \
'no-result statements'
cursor.execute(xddl1)
cursor.execute(xddl2)
lower_func = 'lower'
def test_callproc(cursor):
if lower_func and hasattr(cursor, 'callproc'):
r = cursor.callproc(lower_func, ('FOO',))
assert len(r) == 1
assert r[0] == 'FOO'
r = cursor.fetchall()
assert len(r) == 1, 'callproc produced no result set'
assert len(r[0]) == 1, 'callproc produced invalid result set'
assert r[0][0] == 'foo', 'callproc produced invalid results'
def test_close(con):
cur = con.cursor()
con.close()
with pytest.raises(ValueError):
executeDDL1(cur)
# closed.'
with pytest.raises(ValueError):
con.commit()
with pytest.raises(nzpy.core.InterfaceError):
con.close()
def test_execute(con):
cur = con.cursor()
_paraminsert(cur)
def _paraminsert(cur):
executeDDL1(cur)
cur.execute(
"insert into %sbooze values ('Victoria Bitter')" % (table_prefix))
assert cur.rowcount in (-1, 1)
if driver.paramstyle == 'qmark':
cur.execute(
'insert into %sbooze values (?)' % table_prefix, ("Coopers",))
elif driver.paramstyle == 'numeric':
cur.execute(
'insert into %sbooze values (:1)' % table_prefix, ("Coopers",))
elif driver.paramstyle == 'named':
cur.execute(
'insert into %sbooze values (:beer)' % table_prefix,
{'beer': "Cooper's"})
elif driver.paramstyle == 'format':
cur.execute(
'insert into %sbooze values (%%s)' % table_prefix, ("Coopers",))
elif driver.paramstyle == 'pyformat':
cur.execute(
'insert into %sbooze values (%%(beer)s)' % table_prefix,
{'beer': "Coopers"})
else:
assert False, 'Invalid paramstyle'
assert cur.rowcount in (-1, 1)
cur.execute('select name from %sbooze' % table_prefix)
res = cur.fetchall()
assert len(res) == 2, 'cursor.fetchall returned too few rows'
beers = [res[0][0], res[1][0]]
beers.sort()
assert beers[0] == "Coopers", \
'cursor.fetchall retrieved incorrect data, or data inserted ' \
'incorrectly'
assert beers[1] == "Victoria Bitter", \
'cursor.fetchall retrieved incorrect data, or data inserted ' \
'incorrectly'
cur.execute(xddl1)
def test_executemany(cursor):
executeDDL1(cursor)
largs = [("Coopers",), ("Boags",)]
margs = [{'beer': "Coopers"}, {'beer': "Boags"}]
if driver.paramstyle == 'qmark':
cursor.executemany(
'insert into %sbooze values (?)' % table_prefix, largs)
elif driver.paramstyle == 'numeric':
cursor.executemany(
'insert into %sbooze values (:1)' % table_prefix, largs)
elif driver.paramstyle == 'named':
cursor.executemany(
'insert into %sbooze values (:beer)' % table_prefix, margs)
elif driver.paramstyle == 'format':
cursor.executemany(
'insert into %sbooze values (%%s)' % table_prefix, largs)
elif driver.paramstyle == 'pyformat':
cursor.executemany(
'insert into %sbooze values (%%(beer)s)' % (table_prefix), margs)
else:
assert False, 'Unknown paramstyle'
assert cursor.rowcount in (-1, 2), \
'insert using cursor.executemany set cursor.rowcount to ' \
'incorrect value %r' % cursor.rowcount
cursor.execute('select name from %sbooze' % table_prefix)
res = cursor.fetchall()
assert len(res) == 2, 'cursor.fetchall retrieved incorrect number of rows'
beers = [res[0][0], res[1][0]]
beers.sort()
assert beers[0] == "Boags", 'incorrect data retrieved'
assert beers[1] == "Coopers", 'incorrect data retrieved'
cursor.execute(xddl1)
def test_fetchone(cursor):
# cursor.fetchone should raise an Error if called before
# executing a select-type query
with pytest.raises(driver.Error):
cursor.fetchone()
# cursor.fetchone should raise an Error if called after
# executing a query that cannnot return rows
executeDDL1(cursor)
with pytest.raises(driver.Error):
cursor.fetchone()
cursor.execute('select name from %sbooze' % table_prefix)
assert cursor.fetchone() is None, \
'cursor.fetchone should return None if a query retrieves ' \
'no rows'
assert cursor.rowcount in (-1, 0)
# cursor.fetchone should raise an Error if called after
# executing a query that cannnot return rows
cursor.execute(
"insert into %sbooze values ('Victoria Bitter')" % (table_prefix))
with pytest.raises(driver.Error):
cursor.fetchone()
cursor.execute('select name from %sbooze' % table_prefix)
r = cursor.fetchone()
assert len(r) == 1, 'cursor.fetchone should have retrieved a single row'
assert r[0] == 'Victoria Bitter', \
'cursor.fetchone retrieved incorrect data'
assert cursor.fetchone() is None, \
'cursor.fetchone should return None if no more rows available'
assert cursor.rowcount in (-1, 1)
cursor.execute(xddl1)
samples = [
'Carlton Cold',
'Carlton Draft',
'Mountain Goat',
'Redback',
'Victoria Bitter',
'XXXX'
]
def _populate():
populate = [
"insert into %sbooze values ('%s')" % (table_prefix, s)
for s in samples]
return populate
def test_fetchmany(cursor):
# cursor.fetchmany should raise an Error if called without
# issuing a query
with pytest.raises(driver.Error):
cursor.fetchmany(4)
executeDDL1(cursor)
for sql in _populate():
cursor.execute(sql)
cursor.execute('select name from %sbooze' % table_prefix)
r = cursor.fetchmany()
assert len(r) == 1, \
'cursor.fetchmany retrieved incorrect number of rows, ' \
'default of arraysize is one.'
cursor.arraysize = 10
r = cursor.fetchmany(3) # Should get 3 rows
assert len(r) == 3, 'cursor.fetchmany retrieved incorrect number of rows'
r = cursor.fetchmany(4) # Should get 2 more
assert len(r) == 2, 'cursor.fetchmany retrieved incorrect number of rows'
r = cursor.fetchmany(4) # Should be an empty sequence
assert len(r) == 0, \
'cursor.fetchmany should return an empty sequence after ' \
'results are exhausted'
assert cursor.rowcount in (-1, 6)
# Same as above, using cursor.arraysize
cursor.arraysize = 4
cursor.execute('select name from %sbooze' % table_prefix)
r = cursor.fetchmany() # Should get 4 rows
assert len(r) == 4, 'cursor.arraysize not being honoured by fetchmany'
r = cursor.fetchmany() # Should get 2 more
assert len(r) == 2
r = cursor.fetchmany() # Should be an empty sequence
assert len(r) == 0
assert cursor.rowcount in (-1, 6)
cursor.arraysize = 6
cursor.execute('select name from %sbooze' % table_prefix)
rows = cursor.fetchmany() # Should get all rows
assert cursor.rowcount in (-1, 6)
assert len(rows) == 6
assert len(rows) == 6
rows = [row[0] for row in rows]
rows.sort()
# Make sure we get the right data back out
for i in range(0, 6):
assert rows[i] == samples[i], \
'incorrect data retrieved by cursor.fetchmany'
rows = cursor.fetchmany() # Should return an empty list
assert len(rows) == 0, \
'cursor.fetchmany should return an empty sequence if ' \
'called after the whole result set has been fetched'
assert cursor.rowcount in (-1, 6)
executeDDL2(cursor)
cursor.execute('select name from %sbarflys' % table_prefix)
r = cursor.fetchmany() # Should get empty sequence
assert len(r) == 0, \
'cursor.fetchmany should return an empty sequence if ' \
'query retrieved no rows'
assert cursor.rowcount in (-1, 0)
cursor.execute(xddl1)
cursor.execute(xddl2)
def test_fetchall(cursor):
# cursor.fetchall should raise an Error if called
# without executing a query that may return rows (such
# as a select)
with pytest.raises(driver.Error):
cursor.fetchall()
executeDDL1(cursor)
for sql in _populate():
cursor.execute(sql)
# cursor.fetchall should raise an Error if called
# after executing a a statement that cannot return rows
with pytest.raises(driver.Error):
cursor.fetchall()
cursor.execute('select name from %sbooze' % table_prefix)
rows = cursor.fetchall()
assert cursor.rowcount in (-1, len(samples))
assert len(rows) == len(samples), \
'cursor.fetchall did not retrieve all rows'
rows = [r[0] for r in rows]
rows.sort()
for i in range(0, len(samples)):
assert rows[i] == samples[i], \
'cursor.fetchall retrieved incorrect rows'
rows = cursor.fetchall()
assert len(rows) == 0, \
'cursor.fetchall should return an empty list if called ' \
'after the whole result set has been fetched'
assert cursor.rowcount in (-1, len(samples))
executeDDL2(cursor)
cursor.execute('select name from %sbarflys' % table_prefix)
rows = cursor.fetchall()
assert cursor.rowcount in (-1, 0)
assert len(rows) == 0, \
'cursor.fetchall should return an empty list if ' \
'a select query returns no rows'
cursor.execute(xddl1)
cursor.execute(xddl2)
def test_mixedfetch(cursor):
executeDDL1(cursor)
for sql in _populate():
cursor.execute(sql)
cursor.execute('select name from %sbooze' % table_prefix)
rows1 = cursor.fetchone()
rows23 = cursor.fetchmany(2)
rows4 = cursor.fetchone()
rows56 = cursor.fetchall()
assert cursor.rowcount in (-1, 6)
assert len(rows23) == 2, 'fetchmany returned incorrect number of rows'
assert len(rows56) == 2, 'fetchall returned incorrect number of rows'
rows = [rows1[0]]
rows.extend([rows23[0][0], rows23[1][0]])
rows.append(rows4[0])
rows.extend([rows56[0][0], rows56[1][0]])
rows.sort()
for i in range(0, len(samples)):
assert rows[i] == samples[i], 'incorrect data retrieved or inserted'
cursor.execute(xddl1)
def help_nextset_setUp(cur):
raise NotImplementedError('Helper not implemented')
def help_nextset_tearDown(cur):
raise NotImplementedError('Helper not implemented')
def test_nextset(cursor):
if not hasattr(cursor, 'nextset'):
return
try:
executeDDL1(cursor)
sql = _populate()
for sql in _populate():
cursor.execute(sql)
help_nextset_setUp(cursor)
cursor.callproc('deleteme')
numberofrows = cursor.fetchone()
assert numberofrows[0] == len(samples)
assert cursor.nextset()
names = cursor.fetchall()
assert len(names) == len(samples)
s = cursor.nextset()
assert s is None, 'No more return sets, should return None'
finally:
help_nextset_tearDown(cursor)
cursor.execute(xddl1)
cursor.execute(xddl2)
def test_arraysize(cursor):
# Not much here - rest of the tests for this are in test_fetchmany
assert hasattr(cursor, 'arraysize'), 'cursor.arraysize must be defined'
def test_setinputsizes(cursor):
cursor.setinputsizes((25,))
_paraminsert(cursor) # Make sure cursor still works
def test_setoutputsize_basic(cursor):
# Basic test is to make sure setoutputsize doesn't blow up
cursor.setoutputsize(1000)
cursor.setoutputsize(2000, 0)
_paraminsert(cursor)
def test_None(cursor):
executeDDL1(cursor)
cursor.execute('insert into %sbooze values (NULL)' % table_prefix)
cursor.execute('select name from %sbooze' % table_prefix)
r = cursor.fetchall()
assert len(r) == 1
assert len(r[0]) == 1
assert r[0][0] is None, 'NULL value not returned as None'
def test_Date():
driver.Date(2002, 12, 25)
driver.DateFromTicks(time.mktime((2002, 12, 25, 0, 0, 0, 0, 0, 0)))
# self.assertEqual(str(d1),str(d2))
def test_Time():
driver.Time(13, 45, 30)
driver.TimeFromTicks(time.mktime((2001, 1, 1, 13, 45, 30, 0, 0, 0)))
# Can we assume this? API doesn't specify, but it seems implied
def test_Timestamp():
driver.Timestamp(2002, 12, 25, 13, 45, 30)
driver.TimestampFromTicks(time.mktime((2002, 12, 25, 13, 45, 30, 0, 0, 0)))
# self.assertEqual(str(t1),str(t2))
def test_Binary():
driver.Binary(b'Something')
driver.Binary(b'')
def test_STRING():
assert hasattr(driver, 'STRING'), 'module.STRING must be defined'
def test_BINARY():
assert hasattr(driver, 'BINARY'), 'module.BINARY must be defined.'
def test_NUMBER():
assert hasattr(driver, 'NUMBER'), 'module.NUMBER must be defined.'
def test_DATETIME():
assert hasattr(driver, 'DATETIME'), 'module.DATETIME must be defined.'
def test_ROWID():
assert hasattr(driver, 'ROWID'), 'module.ROWID must be defined.'
| true | true |
f7ff117b02c17cdb8dd98b7183a3ecda2fa8e4cd | 3,303 | py | Python | h2o-py/tests/testdir_munging/pyunit_interaction.py | kyoren/https-github.com-h2oai-h2o-3 | 77b27109c84c4739f9f1b7a3078f8992beefc813 | [
"Apache-2.0"
] | 1 | 2016-09-30T05:58:18.000Z | 2016-09-30T05:58:18.000Z | h2o-py/tests/testdir_munging/pyunit_interaction.py | kyoren/https-github.com-h2oai-h2o-3 | 77b27109c84c4739f9f1b7a3078f8992beefc813 | [
"Apache-2.0"
] | null | null | null | h2o-py/tests/testdir_munging/pyunit_interaction.py | kyoren/https-github.com-h2oai-h2o-3 | 77b27109c84c4739f9f1b7a3078f8992beefc813 | [
"Apache-2.0"
] | null | null | null | import sys
sys.path.insert(1, "../../")
import h2o, tests
def interaction_check():
# Connect to a pre-existing cluster
iris = h2o.import_file(path=tests.locate("smalldata/iris/iris.csv"))
# add a couple of factor columns to iris
iris = iris.cbind(iris[4] == "Iris-setosa")
iris[5] = iris[5].asfactor()
iris.set_name(5,"C6")
iris = iris.cbind(iris[4] == "Iris-virginica")
iris[6] = iris[6].asfactor()
iris.set_name(6, name="C7")
# create a frame of the two-way interactions
two_way_interactions = h2o.interaction(iris, factors=[4,5,6], pairwise=True, max_factors=10000, min_occurrence=1)
assert two_way_interactions.nrow == 150 and two_way_interactions.ncol == 3, \
"Expected 150 rows and 3 columns, but got {0} rows and {1} " \
"columns".format(two_way_interactions.nrow, two_way_interactions.ncol)
levels1 = two_way_interactions[0].levels()
levels2 = two_way_interactions[1].levels()
levels3 = two_way_interactions[2].levels()
assert levels1 == ["Iris-setosa_1", "Iris-versicolor_0", "Iris-virginica_0"], \
"Expected the following levels {0}, but got {1}".format(["Iris-setosa_1", "Iris-versicolor_0", "Iris-virginica_0"],
levels1)
assert levels2 == ["Iris-setosa_0", "Iris-versicolor_0", "Iris-virginica_1"], \
"Expected the following levels {0}, but got {1}".format(["Iris-setosa_0", "Iris-versicolor_0", "Iris-virginica_1"],
levels2)
assert levels3 == ["0_0", "1_0", "0_1"], "Expected the following levels {0}, but got {1}".format(["0_0", "1_0", "0_1"],
levels3)
# do the same thing, but set 'factors' arg to list of column names
two_way_interactions = h2o.interaction(iris, factors=["C5","C6","C7"], pairwise=True, max_factors=10000, min_occurrence=1)
assert two_way_interactions.nrow == 150 and two_way_interactions.ncol == 3, \
"Expected 150 rows and 3 columns, but got {0} rows and {1} " \
"columns".format(two_way_interactions.nrow, two_way_interactions.ncol)
levels1 = two_way_interactions[0].levels()
levels2 = two_way_interactions[1].levels()
levels3 = two_way_interactions[2].levels()
assert levels1 == ["Iris-setosa_1", "Iris-versicolor_0", "Iris-virginica_0"], \
"Expected the following levels {0}, but got {1}".format(["Iris-setosa_1", "Iris-versicolor_0", "Iris-virginica_0"],
levels1)
assert levels2 == ["Iris-setosa_0", "Iris-versicolor_0", "Iris-virginica_1"], \
"Expected the following levels {0}, but got {1}".format(["Iris-setosa_0", "Iris-versicolor_0", "Iris-virginica_1"],
levels2)
assert levels3 == ["0_0", "1_0", "0_1"], "Expected the following levels {0}, but got {1}".format(["0_0", "1_0", "0_1"],
levels3)
#TODO: allow factors to be list of lists
if __name__ == "__main__":
tests.run_test(sys.argv, interaction_check)
| 54.147541 | 126 | 0.582198 | import sys
sys.path.insert(1, "../../")
import h2o, tests
def interaction_check():
iris = h2o.import_file(path=tests.locate("smalldata/iris/iris.csv"))
iris = iris.cbind(iris[4] == "Iris-setosa")
iris[5] = iris[5].asfactor()
iris.set_name(5,"C6")
iris = iris.cbind(iris[4] == "Iris-virginica")
iris[6] = iris[6].asfactor()
iris.set_name(6, name="C7")
two_way_interactions = h2o.interaction(iris, factors=[4,5,6], pairwise=True, max_factors=10000, min_occurrence=1)
assert two_way_interactions.nrow == 150 and two_way_interactions.ncol == 3, \
"Expected 150 rows and 3 columns, but got {0} rows and {1} " \
"columns".format(two_way_interactions.nrow, two_way_interactions.ncol)
levels1 = two_way_interactions[0].levels()
levels2 = two_way_interactions[1].levels()
levels3 = two_way_interactions[2].levels()
assert levels1 == ["Iris-setosa_1", "Iris-versicolor_0", "Iris-virginica_0"], \
"Expected the following levels {0}, but got {1}".format(["Iris-setosa_1", "Iris-versicolor_0", "Iris-virginica_0"],
levels1)
assert levels2 == ["Iris-setosa_0", "Iris-versicolor_0", "Iris-virginica_1"], \
"Expected the following levels {0}, but got {1}".format(["Iris-setosa_0", "Iris-versicolor_0", "Iris-virginica_1"],
levels2)
assert levels3 == ["0_0", "1_0", "0_1"], "Expected the following levels {0}, but got {1}".format(["0_0", "1_0", "0_1"],
levels3)
two_way_interactions = h2o.interaction(iris, factors=["C5","C6","C7"], pairwise=True, max_factors=10000, min_occurrence=1)
assert two_way_interactions.nrow == 150 and two_way_interactions.ncol == 3, \
"Expected 150 rows and 3 columns, but got {0} rows and {1} " \
"columns".format(two_way_interactions.nrow, two_way_interactions.ncol)
levels1 = two_way_interactions[0].levels()
levels2 = two_way_interactions[1].levels()
levels3 = two_way_interactions[2].levels()
assert levels1 == ["Iris-setosa_1", "Iris-versicolor_0", "Iris-virginica_0"], \
"Expected the following levels {0}, but got {1}".format(["Iris-setosa_1", "Iris-versicolor_0", "Iris-virginica_0"],
levels1)
assert levels2 == ["Iris-setosa_0", "Iris-versicolor_0", "Iris-virginica_1"], \
"Expected the following levels {0}, but got {1}".format(["Iris-setosa_0", "Iris-versicolor_0", "Iris-virginica_1"],
levels2)
assert levels3 == ["0_0", "1_0", "0_1"], "Expected the following levels {0}, but got {1}".format(["0_0", "1_0", "0_1"],
levels3)
if __name__ == "__main__":
tests.run_test(sys.argv, interaction_check)
| true | true |
f7ff11e70134f83ebfb3e63680787b3672a6a232 | 9,204 | py | Python | src/config.py | EthanJian/botty | 2afb05363e9b0cd2439faaebb4b9b6f76b809bb8 | [
"MIT"
] | null | null | null | src/config.py | EthanJian/botty | 2afb05363e9b0cd2439faaebb4b9b6f76b809bb8 | [
"MIT"
] | null | null | null | src/config.py | EthanJian/botty | 2afb05363e9b0cd2439faaebb4b9b6f76b809bb8 | [
"MIT"
] | null | null | null | import configparser
import numpy as np
import os
class Config:
def _select_val(self, section: str, key: str = None):
if section in self._custom and key in self._custom[section]:
return self._custom[section][key]
elif section in self._config:
return self._config[section][key]
elif section in self._pickit_config:
return self._pickit_config[section][key]
elif section in self._shop_config:
return self._shop_config[section][key]
else:
return self._game_config[section][key]
def __init__(self, print_warnings: bool = False):
# print_warnings, what a hack... here it is, not making the effort
# passing a single config instance through bites me in the ass
self._print_warnings = print_warnings
self._config = configparser.ConfigParser()
self._config.read('config/params.ini')
self._game_config = configparser.ConfigParser()
self._game_config.read('config/game.ini')
self._pickit_config = configparser.ConfigParser()
self._pickit_config.read('config/pickit.ini')
self._shop_config = configparser.ConfigParser()
self._shop_config.read('config/shop.ini')
self._custom = configparser.ConfigParser()
if os.environ.get('RUN_ENV') != "test" and os.path.exists('config/custom.ini'):
self._custom.read('config/custom.ini')
self.general = {
"saved_games_folder": self._select_val("general", "saved_games_folder"),
"name": self._select_val("general", "name"),
"monitor": int(self._select_val("general", "monitor")),
"max_game_length_s": float(self._select_val("general", "max_game_length_s")),
"exit_key": self._select_val("general", "exit_key"),
"resume_key": self._select_val("general", "resume_key"),
"auto_settings_key": self._select_val("general", "auto_settings_key"),
"graphic_debugger_key": self._select_val("general", "graphic_debugger_key"),
"logg_lvl": self._select_val("general", "logg_lvl"),
"randomize_runs": bool(int(self._select_val("general", "randomize_runs"))),
"difficulty": self._select_val("general", "difficulty"),
"custom_message_hook": self._select_val("general", "custom_message_hook"),
"discord_status_count": False if not self._select_val("general", "discord_status_count") else int(self._select_val("general", "discord_status_count")),
"info_screenshots": bool(int(self._select_val("general", "info_screenshots"))),
"loot_screenshots": bool(int(self._select_val("general", "loot_screenshots"))),
}
# Added for dclone ip hunting
self.dclone = {
"region_ips": self._select_val("dclone", "region_ips"),
"dclone_hotip": self._select_val("dclone", "dclone_hotip"),
}
self.routes = {}
for key in self._config["routes"]:
self.routes[key] = bool(int(self._select_val("routes", key)))
self.char = {
"type": self._select_val("char", "type"),
"show_items": self._select_val("char", "show_items"),
"inventory_screen": self._select_val("char", "inventory_screen"),
"stand_still": self._select_val("char", "stand_still"),
"force_move": self._select_val("char", "force_move"),
"num_loot_columns": int(self._select_val("char", "num_loot_columns")),
"take_health_potion": float(self._select_val("char", "take_health_potion")),
"take_mana_potion": float(self._select_val("char", "take_mana_potion")),
"take_rejuv_potion_health": float(self._select_val("char", "take_rejuv_potion_health")),
"take_rejuv_potion_mana": float(self._select_val("char", "take_rejuv_potion_mana")),
"heal_merc": float(self._select_val("char", "heal_merc")),
"heal_rejuv_merc": float(self._select_val("char", "heal_rejuv_merc")),
"chicken": float(self._select_val("char", "chicken")),
"merc_chicken": float(self._select_val("char", "merc_chicken")),
"tp": self._select_val("char", "tp"),
"belt_rows": int(self._select_val("char", "belt_rows")),
"show_belt": self._select_val("char", "show_belt"),
"potion1": self._select_val("char", "potion1"),
"potion2": self._select_val("char", "potion2"),
"potion3": self._select_val("char", "potion3"),
"potion4": self._select_val("char", "potion4"),
"belt_rejuv_columns": int(self._select_val("char", "belt_rejuv_columns")),
"belt_hp_columns": int(self._select_val("char", "belt_hp_columns")),
"belt_mp_columns": int(self._select_val("char", "belt_mp_columns")),
"stash_gold": bool(int(self._select_val("char", "stash_gold"))),
"gold_trav_only": bool(int(self._select_val("char", "gold_trav_only"))),
"use_merc": bool(int(self._select_val("char", "use_merc"))),
"pre_buff_every_run": bool(int(self._select_val("char", "pre_buff_every_run"))),
"cta_available": bool(int(self._select_val("char", "cta_available"))),
"weapon_switch": self._select_val("char", "weapon_switch"),
"battle_orders": self._select_val("char", "battle_orders"),
"battle_command": self._select_val("char", "battle_command"),
"casting_frames": int(self._select_val("char", "casting_frames")),
"atk_len_trav": float(self._select_val("char", "atk_len_trav")),
"atk_len_pindle": float(self._select_val("char", "atk_len_pindle")),
"atk_len_eldritch": float(self._select_val("char", "atk_len_eldritch")),
"atk_len_shenk": float(self._select_val("char", "atk_len_shenk")),
"atk_len_nihlatak": float(self._select_val("char", "atk_len_nihlatak")),
}
self.sorceress = dict(self._config["sorceress"])
if "sorceress" in self._custom:
self.sorceress.update(dict(self._custom["sorceress"]))
self.hammerdin = self._config["hammerdin"]
if "hammerdin" in self._custom:
self.hammerdin.update(self._custom["hammerdin"])
self.trapsin = self._config["trapsin"]
if "trapsin" in self._custom:
self.trapsin.update(self._custom["trapsin"])
self.advanced_options = {
"pathing_delay_factor": min(max(int(self._select_val("advanced_options", "pathing_delay_factor")), 1), 10),
"message_headers": self._select_val("advanced_options", "message_headers"),
"message_body_template": self._select_val("advanced_options", "message_body_template"),
"message_highlight": bool(int(self._select_val("advanced_options", "message_highlight"))),
}
self.items = {}
for key in self._pickit_config["items"]:
self.items[key] = int(self._select_val("items", key))
if self.items[key] and not os.path.exists(f"./assets/items/{key}.png") and self._print_warnings:
print(f"Warning: You activated {key} in pickit, but there is no img available in assets/items")
self.colors = {}
for key in self._game_config["colors"]:
self.colors[key] = np.split(np.array([int(x) for x in self._select_val("colors", key).split(",")]), 2)
self.ui_pos = {}
for key in self._game_config["ui_pos"]:
self.ui_pos[key] = int(self._select_val("ui_pos", key))
self.ui_roi = {}
for key in self._game_config["ui_roi"]:
self.ui_roi[key] = np.array([int(x) for x in self._select_val("ui_roi", key).split(",")])
self.path = {}
for key in self._game_config["path"]:
self.path[key] = np.reshape(np.array([int(x) for x in self._select_val("path", key).split(",")]), (-1, 2))
self.shop = {
"shop_trap_claws": bool(int(self._select_val("claws", "shop_trap_claws"))),
"shop_melee_claws": bool(int(self._select_val("claws", "shop_melee_claws"))),
"shop_3_skills_ias_gloves": bool(int(self._select_val("gloves", "shop_3_skills_ias_gloves"))),
"shop_2_skills_ias_gloves": bool(int(self._select_val("gloves", "shop_2_skills_ias_gloves"))),
"trap_min_score": int(self._select_val("claws", "trap_min_score")),
"melee_min_score": int(self._select_val("claws", "melee_min_score")),
}
if __name__ == "__main__":
config = Config(print_warnings=True)
# Check if any added items miss templates
for k in config.items:
if not os.path.exists(f"./assets/items/{k}.png"):
print(f"Template not found: {k}")
# Check if any item templates miss a config
for filename in os.listdir(f'assets/items'):
filename = filename.lower()
if filename.endswith('.png'):
item_name = filename[:-4]
blacklist_item = item_name.startswith("bl__")
if item_name not in config.items and not blacklist_item:
print(f"Config not found for: " + filename)
| 53.824561 | 163 | 0.627119 | import configparser
import numpy as np
import os
class Config:
def _select_val(self, section: str, key: str = None):
if section in self._custom and key in self._custom[section]:
return self._custom[section][key]
elif section in self._config:
return self._config[section][key]
elif section in self._pickit_config:
return self._pickit_config[section][key]
elif section in self._shop_config:
return self._shop_config[section][key]
else:
return self._game_config[section][key]
def __init__(self, print_warnings: bool = False):
self._print_warnings = print_warnings
self._config = configparser.ConfigParser()
self._config.read('config/params.ini')
self._game_config = configparser.ConfigParser()
self._game_config.read('config/game.ini')
self._pickit_config = configparser.ConfigParser()
self._pickit_config.read('config/pickit.ini')
self._shop_config = configparser.ConfigParser()
self._shop_config.read('config/shop.ini')
self._custom = configparser.ConfigParser()
if os.environ.get('RUN_ENV') != "test" and os.path.exists('config/custom.ini'):
self._custom.read('config/custom.ini')
self.general = {
"saved_games_folder": self._select_val("general", "saved_games_folder"),
"name": self._select_val("general", "name"),
"monitor": int(self._select_val("general", "monitor")),
"max_game_length_s": float(self._select_val("general", "max_game_length_s")),
"exit_key": self._select_val("general", "exit_key"),
"resume_key": self._select_val("general", "resume_key"),
"auto_settings_key": self._select_val("general", "auto_settings_key"),
"graphic_debugger_key": self._select_val("general", "graphic_debugger_key"),
"logg_lvl": self._select_val("general", "logg_lvl"),
"randomize_runs": bool(int(self._select_val("general", "randomize_runs"))),
"difficulty": self._select_val("general", "difficulty"),
"custom_message_hook": self._select_val("general", "custom_message_hook"),
"discord_status_count": False if not self._select_val("general", "discord_status_count") else int(self._select_val("general", "discord_status_count")),
"info_screenshots": bool(int(self._select_val("general", "info_screenshots"))),
"loot_screenshots": bool(int(self._select_val("general", "loot_screenshots"))),
}
self.dclone = {
"region_ips": self._select_val("dclone", "region_ips"),
"dclone_hotip": self._select_val("dclone", "dclone_hotip"),
}
self.routes = {}
for key in self._config["routes"]:
self.routes[key] = bool(int(self._select_val("routes", key)))
self.char = {
"type": self._select_val("char", "type"),
"show_items": self._select_val("char", "show_items"),
"inventory_screen": self._select_val("char", "inventory_screen"),
"stand_still": self._select_val("char", "stand_still"),
"force_move": self._select_val("char", "force_move"),
"num_loot_columns": int(self._select_val("char", "num_loot_columns")),
"take_health_potion": float(self._select_val("char", "take_health_potion")),
"take_mana_potion": float(self._select_val("char", "take_mana_potion")),
"take_rejuv_potion_health": float(self._select_val("char", "take_rejuv_potion_health")),
"take_rejuv_potion_mana": float(self._select_val("char", "take_rejuv_potion_mana")),
"heal_merc": float(self._select_val("char", "heal_merc")),
"heal_rejuv_merc": float(self._select_val("char", "heal_rejuv_merc")),
"chicken": float(self._select_val("char", "chicken")),
"merc_chicken": float(self._select_val("char", "merc_chicken")),
"tp": self._select_val("char", "tp"),
"belt_rows": int(self._select_val("char", "belt_rows")),
"show_belt": self._select_val("char", "show_belt"),
"potion1": self._select_val("char", "potion1"),
"potion2": self._select_val("char", "potion2"),
"potion3": self._select_val("char", "potion3"),
"potion4": self._select_val("char", "potion4"),
"belt_rejuv_columns": int(self._select_val("char", "belt_rejuv_columns")),
"belt_hp_columns": int(self._select_val("char", "belt_hp_columns")),
"belt_mp_columns": int(self._select_val("char", "belt_mp_columns")),
"stash_gold": bool(int(self._select_val("char", "stash_gold"))),
"gold_trav_only": bool(int(self._select_val("char", "gold_trav_only"))),
"use_merc": bool(int(self._select_val("char", "use_merc"))),
"pre_buff_every_run": bool(int(self._select_val("char", "pre_buff_every_run"))),
"cta_available": bool(int(self._select_val("char", "cta_available"))),
"weapon_switch": self._select_val("char", "weapon_switch"),
"battle_orders": self._select_val("char", "battle_orders"),
"battle_command": self._select_val("char", "battle_command"),
"casting_frames": int(self._select_val("char", "casting_frames")),
"atk_len_trav": float(self._select_val("char", "atk_len_trav")),
"atk_len_pindle": float(self._select_val("char", "atk_len_pindle")),
"atk_len_eldritch": float(self._select_val("char", "atk_len_eldritch")),
"atk_len_shenk": float(self._select_val("char", "atk_len_shenk")),
"atk_len_nihlatak": float(self._select_val("char", "atk_len_nihlatak")),
}
self.sorceress = dict(self._config["sorceress"])
if "sorceress" in self._custom:
self.sorceress.update(dict(self._custom["sorceress"]))
self.hammerdin = self._config["hammerdin"]
if "hammerdin" in self._custom:
self.hammerdin.update(self._custom["hammerdin"])
self.trapsin = self._config["trapsin"]
if "trapsin" in self._custom:
self.trapsin.update(self._custom["trapsin"])
self.advanced_options = {
"pathing_delay_factor": min(max(int(self._select_val("advanced_options", "pathing_delay_factor")), 1), 10),
"message_headers": self._select_val("advanced_options", "message_headers"),
"message_body_template": self._select_val("advanced_options", "message_body_template"),
"message_highlight": bool(int(self._select_val("advanced_options", "message_highlight"))),
}
self.items = {}
for key in self._pickit_config["items"]:
self.items[key] = int(self._select_val("items", key))
if self.items[key] and not os.path.exists(f"./assets/items/{key}.png") and self._print_warnings:
print(f"Warning: You activated {key} in pickit, but there is no img available in assets/items")
self.colors = {}
for key in self._game_config["colors"]:
self.colors[key] = np.split(np.array([int(x) for x in self._select_val("colors", key).split(",")]), 2)
self.ui_pos = {}
for key in self._game_config["ui_pos"]:
self.ui_pos[key] = int(self._select_val("ui_pos", key))
self.ui_roi = {}
for key in self._game_config["ui_roi"]:
self.ui_roi[key] = np.array([int(x) for x in self._select_val("ui_roi", key).split(",")])
self.path = {}
for key in self._game_config["path"]:
self.path[key] = np.reshape(np.array([int(x) for x in self._select_val("path", key).split(",")]), (-1, 2))
self.shop = {
"shop_trap_claws": bool(int(self._select_val("claws", "shop_trap_claws"))),
"shop_melee_claws": bool(int(self._select_val("claws", "shop_melee_claws"))),
"shop_3_skills_ias_gloves": bool(int(self._select_val("gloves", "shop_3_skills_ias_gloves"))),
"shop_2_skills_ias_gloves": bool(int(self._select_val("gloves", "shop_2_skills_ias_gloves"))),
"trap_min_score": int(self._select_val("claws", "trap_min_score")),
"melee_min_score": int(self._select_val("claws", "melee_min_score")),
}
if __name__ == "__main__":
config = Config(print_warnings=True)
for k in config.items:
if not os.path.exists(f"./assets/items/{k}.png"):
print(f"Template not found: {k}")
for filename in os.listdir(f'assets/items'):
filename = filename.lower()
if filename.endswith('.png'):
item_name = filename[:-4]
blacklist_item = item_name.startswith("bl__")
if item_name not in config.items and not blacklist_item:
print(f"Config not found for: " + filename)
| true | true |
f7ff1237763303b117d4c70444399ea1f9253802 | 1,821 | py | Python | Module 3/9324OS_13_code/old/observer.py | real-slim-chadi/Python_Master-the-Art-of-Design-Patterns | 95ec92272374e330b04d931208abbb184c7c7908 | [
"MIT"
] | 73 | 2016-09-15T23:07:04.000Z | 2022-03-05T15:09:48.000Z | Module 3/9324OS_13_code/old/observer.py | real-slim-chadi/Python_Master-the-Art-of-Design-Patterns | 95ec92272374e330b04d931208abbb184c7c7908 | [
"MIT"
] | null | null | null | Module 3/9324OS_13_code/old/observer.py | real-slim-chadi/Python_Master-the-Art-of-Design-Patterns | 95ec92272374e330b04d931208abbb184c7c7908 | [
"MIT"
] | 51 | 2016-10-07T20:47:51.000Z | 2021-12-22T21:00:24.000Z | class Publisher:
def __init__(self):
self.observers = []
def add(self, observer):
if observer not in self.observers:
self.observers.append(observer)
else:
print('Failed to add: {}'.format(observer))
def remove(self, observer):
try:
self.observers.remove(observer)
except ValueError:
print('Failed to remove: {}'.format(observer))
def notify(self):
[o.notify(self) for o in self.observers]
class DefaultFormatter(Publisher):
def __init__(self, name):
Publisher.__init__(self)
self.name = name
self._data = 0
def __str__(self):
return "{}: '{}' has data = {}".format(type(self).__name__, self.name, self._data)
@property
def data(self):
return self._data
@data.setter
def data(self, new_value):
try:
self._data = int(new_value)
except ValueError as e:
print('Error: {}'.format(e))
self.notify()
class HexFormatter:
def notify(self, publisher):
print("{}: '{}' has now hex data = {}".format(type(self).__name__, publisher.name, hex(publisher.data)))
class BinaryFormatter:
def notify(self, publisher):
print("{}: '{}' has now bin data = {}".format(type(self).__name__, publisher.name, bin(publisher.data)))
def main():
df = DefaultFormatter('test1')
print(df)
print()
hf = HexFormatter()
df.add(hf)
df.data = 3
print(df)
print()
bf = BinaryFormatter()
df.add(bf)
df.data = 21
print(df)
print()
df.remove(hf)
df.data = 40
print(df)
print()
df.remove(hf)
df.add(bf)
df.data = 'hello'
print(df)
print()
df.data = 15.8
print(df)
if __name__ == '__main__':
main()
| 21.939759 | 112 | 0.569467 | class Publisher:
def __init__(self):
self.observers = []
def add(self, observer):
if observer not in self.observers:
self.observers.append(observer)
else:
print('Failed to add: {}'.format(observer))
def remove(self, observer):
try:
self.observers.remove(observer)
except ValueError:
print('Failed to remove: {}'.format(observer))
def notify(self):
[o.notify(self) for o in self.observers]
class DefaultFormatter(Publisher):
def __init__(self, name):
Publisher.__init__(self)
self.name = name
self._data = 0
def __str__(self):
return "{}: '{}' has data = {}".format(type(self).__name__, self.name, self._data)
@property
def data(self):
return self._data
@data.setter
def data(self, new_value):
try:
self._data = int(new_value)
except ValueError as e:
print('Error: {}'.format(e))
self.notify()
class HexFormatter:
def notify(self, publisher):
print("{}: '{}' has now hex data = {}".format(type(self).__name__, publisher.name, hex(publisher.data)))
class BinaryFormatter:
def notify(self, publisher):
print("{}: '{}' has now bin data = {}".format(type(self).__name__, publisher.name, bin(publisher.data)))
def main():
df = DefaultFormatter('test1')
print(df)
print()
hf = HexFormatter()
df.add(hf)
df.data = 3
print(df)
print()
bf = BinaryFormatter()
df.add(bf)
df.data = 21
print(df)
print()
df.remove(hf)
df.data = 40
print(df)
print()
df.remove(hf)
df.add(bf)
df.data = 'hello'
print(df)
print()
df.data = 15.8
print(df)
if __name__ == '__main__':
main()
| true | true |
f7ff12420621511535f18333efbdd5bf6d01c34a | 1,917 | py | Python | var/spack/repos/builtin/packages/libspatialite/package.py | jeanbez/spack | f4e51ce8f366c85bf5aa0eafe078677b42dae1ba | [
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | null | null | null | var/spack/repos/builtin/packages/libspatialite/package.py | jeanbez/spack | f4e51ce8f366c85bf5aa0eafe078677b42dae1ba | [
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | 8 | 2021-11-09T20:28:40.000Z | 2022-03-15T03:26:33.000Z | var/spack/repos/builtin/packages/libspatialite/package.py | jeanbez/spack | f4e51ce8f366c85bf5aa0eafe078677b42dae1ba | [
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | 2 | 2019-02-08T20:37:20.000Z | 2019-03-31T15:19:26.000Z | # Copyright 2013-2022 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
import os
from spack.package import *
class Libspatialite(AutotoolsPackage):
"""SpatiaLite is an open source library intended to extend the
SQLite core to support fully fledged Spatial SQL capabilities."""
homepage = "https://www.gaia-gis.it"
url = "https://www.gaia-gis.it/gaia-sins/libspatialite-sources/libspatialite-4.3.0a.tar.gz"
manual_download = True
version('5.0.1', sha256='eecbc94311c78012d059ebc0fae86ea5ef6eecb13303e6e82b3753c1b3409e98')
version('5.0.0', sha256='7b7fd70243f5a0b175696d87c46dde0ace030eacc27f39241c24bac5dfac6dac')
# Must download manually from:
# https://www.gaia-gis.it/fossil/libspatialite/info/c7f67038bf06d98d
# For instructions on the file:// below..
# https://github.com/spack/spack/issues/2489
version('5.0.0.2.c7f67038bf',
sha256='f8100f71b769c7db066c6f938af6b00e920e4b90ac14c00a4f3ed7171565caab',
url="file://%s/SpatiaLite-c7f67038bf.tar.gz" % os.getcwd())
version('5.0.0-beta0', sha256='caacf5378a5cfab9b8e98bb361e2b592e714e21f5c152b795df80d0ab1da1c42')
version('4.3.0a',
sha256='88900030a4762904a7880273f292e5e8ca6b15b7c6c3fb88ffa9e67ee8a5a499')
version('3.0.1', sha256='4983d6584069fd5ff0cfcccccee1015088dab2db177c0dc7050ce8306b68f8e6')
depends_on('pkgconfig', type='build')
depends_on('sqlite+rtree')
depends_on('proj@:5', when='@:4')
# PROJ.6 is OK w/ newer versions
# https://www.gaia-gis.it/fossil/libspatialite/wiki?name=PROJ.6
depends_on('proj')
depends_on('geos')
depends_on('freexl')
depends_on('iconv')
depends_on('libxml2')
depends_on('minizip', when='@5.0.0:')
depends_on('librttopo', when='@5.0.1:')
| 41.673913 | 101 | 0.72144 |
import os
from spack.package import *
class Libspatialite(AutotoolsPackage):
homepage = "https://www.gaia-gis.it"
url = "https://www.gaia-gis.it/gaia-sins/libspatialite-sources/libspatialite-4.3.0a.tar.gz"
manual_download = True
version('5.0.1', sha256='eecbc94311c78012d059ebc0fae86ea5ef6eecb13303e6e82b3753c1b3409e98')
version('5.0.0', sha256='7b7fd70243f5a0b175696d87c46dde0ace030eacc27f39241c24bac5dfac6dac')
version('5.0.0.2.c7f67038bf',
sha256='f8100f71b769c7db066c6f938af6b00e920e4b90ac14c00a4f3ed7171565caab',
url="file://%s/SpatiaLite-c7f67038bf.tar.gz" % os.getcwd())
version('5.0.0-beta0', sha256='caacf5378a5cfab9b8e98bb361e2b592e714e21f5c152b795df80d0ab1da1c42')
version('4.3.0a',
sha256='88900030a4762904a7880273f292e5e8ca6b15b7c6c3fb88ffa9e67ee8a5a499')
version('3.0.1', sha256='4983d6584069fd5ff0cfcccccee1015088dab2db177c0dc7050ce8306b68f8e6')
depends_on('pkgconfig', type='build')
depends_on('sqlite+rtree')
depends_on('proj@:5', when='@:4')
depends_on('proj')
depends_on('geos')
depends_on('freexl')
depends_on('iconv')
depends_on('libxml2')
depends_on('minizip', when='@5.0.0:')
depends_on('librttopo', when='@5.0.1:')
| true | true |
f7ff12a64cee41ab112a3716230fa615d78fefa0 | 3,442 | py | Python | fairseq/criterions/interlingua_loss.py | carlosep93/LANGSPEC | 8c8f55d999d79628a56f48d4e1a8918f8c426f72 | [
"BSD-3-Clause"
] | null | null | null | fairseq/criterions/interlingua_loss.py | carlosep93/LANGSPEC | 8c8f55d999d79628a56f48d4e1a8918f8c426f72 | [
"BSD-3-Clause"
] | null | null | null | fairseq/criterions/interlingua_loss.py | carlosep93/LANGSPEC | 8c8f55d999d79628a56f48d4e1a8918f8c426f72 | [
"BSD-3-Clause"
] | null | null | null | # Copyright (c) 2017-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the LICENSE file in
# the root directory of this source tree. An additional grant of patent rights
# can be found in the PATENTS file in the same directory.
import math
import torch
from fairseq import utils
from . import FairseqCriterion, register_criterion
@register_criterion('interlingua_label_smoothed_cross_entropy')
class InterlinguaLabelSmoothedCrossEntropyCriterion(FairseqCriterion):
def __init__(self, args, task):
super().__init__(args, task)
self.eps = args.label_smoothing
@staticmethod
def add_args(parser):
"""Add criterion-specific arguments to the parser."""
parser.add_argument('--label-smoothing', default=0., type=float, metavar='D',
help='epsilon for label smoothing, 0 means no label smoothing')
def forward(self, model, sample, reduce=True):
"""Compute the loss for the given sample.
Returns a tuple with three elements:
1) the loss
2) the sample size, which is used as the denominator for the gradient
3) logging outputs to display while training
"""
net_output = model(**sample['net_input'])
loss, nll_loss = self.compute_loss(model, net_output, sample, reduce=reduce)
sample_size = sample['target'].size(0) if self.args.sentence_avg else sample['ntokens']
logging_output = {
'loss': utils.item(loss.data) if reduce else loss.data,
'nll_loss': utils.item(nll_loss.data) if reduce else nll_loss.data,
'ntokens': sample['ntokens'],
'nsentences': sample['target'].size(0),
'sample_size': sample_size,
}
return loss, sample_size, logging_output
def compute_loss(self, model, net_output, sample, reduce=True):
lprobs = model.get_normalized_probs(net_output, log_probs=True)
lprobs = lprobs.view(-1, lprobs.size(-1))
target = model.get_targets(sample, net_output).view(-1, 1)
non_pad_mask = target.ne(self.padding_idx)
nll_loss = -lprobs.gather(dim=-1, index=target)[non_pad_mask]
smooth_loss = -lprobs.sum(dim=-1, keepdim=True)[non_pad_mask]
if reduce:
nll_loss = nll_loss.sum()
smooth_loss = smooth_loss.sum()
eps_i = self.eps / lprobs.size(-1)
loss = (1. - self.eps) * nll_loss + eps_i * smooth_loss
nd = torch.cuda.device_count()
d0 = torch.device("cuda:" + str(nd-1)) if nd > 1 else torch.device("cpu:0")
return loss.to(d0), nll_loss.to(d0)
@staticmethod
def aggregate_logging_outputs(logging_outputs):
"""Aggregate logging outputs from data parallel training."""
ntokens = sum(log.get('ntokens', 0) for log in logging_outputs)
losses = sum(log.get('loss', 0) for log in logging_outputs)
nll_losses = sum(log.get('nll_loss', 0) for log in logging_outputs)
nsentences = sum(log.get('nsentences', 0) for log in logging_outputs)
sample_size = sum(log.get('sample_size', 0) for log in logging_outputs)
d = {
'loss': losses / sample_size / math.log(2),
'nll_loss': nll_losses / ntokens / math.log(2),
'ntokens': ntokens,
'nsentences': nsentences,
'sample_size': sample_size,
}
return d
| 42.493827 | 95 | 0.647879 |
import math
import torch
from fairseq import utils
from . import FairseqCriterion, register_criterion
@register_criterion('interlingua_label_smoothed_cross_entropy')
class InterlinguaLabelSmoothedCrossEntropyCriterion(FairseqCriterion):
def __init__(self, args, task):
super().__init__(args, task)
self.eps = args.label_smoothing
@staticmethod
def add_args(parser):
parser.add_argument('--label-smoothing', default=0., type=float, metavar='D',
help='epsilon for label smoothing, 0 means no label smoothing')
def forward(self, model, sample, reduce=True):
net_output = model(**sample['net_input'])
loss, nll_loss = self.compute_loss(model, net_output, sample, reduce=reduce)
sample_size = sample['target'].size(0) if self.args.sentence_avg else sample['ntokens']
logging_output = {
'loss': utils.item(loss.data) if reduce else loss.data,
'nll_loss': utils.item(nll_loss.data) if reduce else nll_loss.data,
'ntokens': sample['ntokens'],
'nsentences': sample['target'].size(0),
'sample_size': sample_size,
}
return loss, sample_size, logging_output
def compute_loss(self, model, net_output, sample, reduce=True):
lprobs = model.get_normalized_probs(net_output, log_probs=True)
lprobs = lprobs.view(-1, lprobs.size(-1))
target = model.get_targets(sample, net_output).view(-1, 1)
non_pad_mask = target.ne(self.padding_idx)
nll_loss = -lprobs.gather(dim=-1, index=target)[non_pad_mask]
smooth_loss = -lprobs.sum(dim=-1, keepdim=True)[non_pad_mask]
if reduce:
nll_loss = nll_loss.sum()
smooth_loss = smooth_loss.sum()
eps_i = self.eps / lprobs.size(-1)
loss = (1. - self.eps) * nll_loss + eps_i * smooth_loss
nd = torch.cuda.device_count()
d0 = torch.device("cuda:" + str(nd-1)) if nd > 1 else torch.device("cpu:0")
return loss.to(d0), nll_loss.to(d0)
@staticmethod
def aggregate_logging_outputs(logging_outputs):
ntokens = sum(log.get('ntokens', 0) for log in logging_outputs)
losses = sum(log.get('loss', 0) for log in logging_outputs)
nll_losses = sum(log.get('nll_loss', 0) for log in logging_outputs)
nsentences = sum(log.get('nsentences', 0) for log in logging_outputs)
sample_size = sum(log.get('sample_size', 0) for log in logging_outputs)
d = {
'loss': losses / sample_size / math.log(2),
'nll_loss': nll_losses / ntokens / math.log(2),
'ntokens': ntokens,
'nsentences': nsentences,
'sample_size': sample_size,
}
return d
| true | true |
f7ff130e2a91ce553a5e256194defbcff6bdbd8a | 1,891 | py | Python | src/04_sync/philosopher.py | rurumimic/concurrency-python | 3eb7875dd4848872226f8035d295a31a40e32bf0 | [
"MIT"
] | null | null | null | src/04_sync/philosopher.py | rurumimic/concurrency-python | 3eb7875dd4848872226f8035d295a31a40e32bf0 | [
"MIT"
] | null | null | null | src/04_sync/philosopher.py | rurumimic/concurrency-python | 3eb7875dd4848872226f8035d295a31a40e32bf0 | [
"MIT"
] | null | null | null | import random
import threading
import time
class Philosopher(threading.Thread):
def __init__(self, name, leftFork, rightFork):
print(f'{name} Has Sat Down At the Table')
threading.Thread.__init__(self, name=name)
self.leftFork = leftFork
self.rightFork = rightFork
def run(self):
print(f'{threading.current_thread().name} has started thinking')
while True:
time.sleep(random.randint(1, 5))
print(f'{threading.current_thread().name} has finished thinking')
self.leftFork.acquire()
time.sleep(random.randint(1, 5))
try:
print(
f'{threading.current_thread().name} has acquired the left fork')
self.rightFork.acquire()
try:
print(
f'{threading.current_thread().name} has attained both forks, currently eating')
finally:
self.rightFork.release()
print(
f'{threading.current_thread().name} has released the right fork')
finally:
self.leftFork.release()
print(
f'{threading.current_thread().name} has released the left fork')
fork1 = threading.RLock()
fork2 = threading.RLock()
fork3 = threading.RLock()
fork4 = threading.RLock()
fork5 = threading.RLock()
philosopher1 = Philosopher('Kant', fork1, fork2)
philosopher2 = Philosopher('Aristotle', fork2, fork3)
philosopher3 = Philosopher('Spinoza', fork3, fork4)
philosopher4 = Philosopher('Marx', fork4, fork5)
philosopher5 = Philosopher('Russell', fork5, fork1)
philosopher1.start()
philosopher2.start()
philosopher3.start()
philosopher4.start()
philosopher5.start()
philosopher1.join()
philosopher2.join()
philosopher3.join()
philosopher4.join()
philosopher5.join()
| 31 | 103 | 0.618191 | import random
import threading
import time
class Philosopher(threading.Thread):
def __init__(self, name, leftFork, rightFork):
print(f'{name} Has Sat Down At the Table')
threading.Thread.__init__(self, name=name)
self.leftFork = leftFork
self.rightFork = rightFork
def run(self):
print(f'{threading.current_thread().name} has started thinking')
while True:
time.sleep(random.randint(1, 5))
print(f'{threading.current_thread().name} has finished thinking')
self.leftFork.acquire()
time.sleep(random.randint(1, 5))
try:
print(
f'{threading.current_thread().name} has acquired the left fork')
self.rightFork.acquire()
try:
print(
f'{threading.current_thread().name} has attained both forks, currently eating')
finally:
self.rightFork.release()
print(
f'{threading.current_thread().name} has released the right fork')
finally:
self.leftFork.release()
print(
f'{threading.current_thread().name} has released the left fork')
fork1 = threading.RLock()
fork2 = threading.RLock()
fork3 = threading.RLock()
fork4 = threading.RLock()
fork5 = threading.RLock()
philosopher1 = Philosopher('Kant', fork1, fork2)
philosopher2 = Philosopher('Aristotle', fork2, fork3)
philosopher3 = Philosopher('Spinoza', fork3, fork4)
philosopher4 = Philosopher('Marx', fork4, fork5)
philosopher5 = Philosopher('Russell', fork5, fork1)
philosopher1.start()
philosopher2.start()
philosopher3.start()
philosopher4.start()
philosopher5.start()
philosopher1.join()
philosopher2.join()
philosopher3.join()
philosopher4.join()
philosopher5.join()
| true | true |
f7ff13e4b4de122bc1af7115f845b8db159ae3c1 | 692 | py | Python | Data Scientist Career Path/7. Summary Statistics/7. Visualizing Categorical Data/2. Pie/3. better.py | myarist/Codecademy | 2ba0f104bc67ab6ef0f8fb869aa12aa02f5f1efb | [
"MIT"
] | 23 | 2021-06-06T15:35:55.000Z | 2022-03-21T06:53:42.000Z | Data Scientist Career Path/7. Summary Statistics/7. Visualizing Categorical Data/2. Pie/3. better.py | shivaniverma1/Data-Scientist | f82939a411484311171465591455880c8e354750 | [
"MIT"
] | null | null | null | Data Scientist Career Path/7. Summary Statistics/7. Visualizing Categorical Data/2. Pie/3. better.py | shivaniverma1/Data-Scientist | f82939a411484311171465591455880c8e354750 | [
"MIT"
] | 9 | 2021-06-08T01:32:04.000Z | 2022-03-18T15:38:09.000Z | import matplotlib.pyplot as plt
import seaborn as sns
import pandas as pd
import codecademylib3
major_data = pd.read_csv("major_data.csv")
print(major_data.head())
major_data_agg = pd.read_csv("major_data_agg.csv")
print(major_data_agg.head())
pie_wedges = major_data["proportion"]
pie_labels = major_data["major"]
pie_wedges_agg = major_data_agg["proportion"]
pie_labels_agg = major_data_agg["department"]
plt.subplot(2,1,1)
plt.pie(pie_wedges, labels = pie_labels)
plt.axis('Equal')
plt.title("Too Many Slices")
plt.tight_layout()
plt.subplot(2,1,2)
plt.pie(pie_wedges_agg, labels = pie_labels_agg)
plt.axis('Equal')
plt.title("Good Number of Slices")
plt.tight_layout()
plt.show()
| 21.625 | 50 | 0.771676 | import matplotlib.pyplot as plt
import seaborn as sns
import pandas as pd
import codecademylib3
major_data = pd.read_csv("major_data.csv")
print(major_data.head())
major_data_agg = pd.read_csv("major_data_agg.csv")
print(major_data_agg.head())
pie_wedges = major_data["proportion"]
pie_labels = major_data["major"]
pie_wedges_agg = major_data_agg["proportion"]
pie_labels_agg = major_data_agg["department"]
plt.subplot(2,1,1)
plt.pie(pie_wedges, labels = pie_labels)
plt.axis('Equal')
plt.title("Too Many Slices")
plt.tight_layout()
plt.subplot(2,1,2)
plt.pie(pie_wedges_agg, labels = pie_labels_agg)
plt.axis('Equal')
plt.title("Good Number of Slices")
plt.tight_layout()
plt.show()
| true | true |
f7ff154ee539df909268c32975d808718debd77f | 4,225 | py | Python | recipes/m4/all/conanfile.py | ngerke/conan-center-index | 758e929499e06754c6a9fd081cf5faa0f9be4dd2 | [
"MIT"
] | null | null | null | recipes/m4/all/conanfile.py | ngerke/conan-center-index | 758e929499e06754c6a9fd081cf5faa0f9be4dd2 | [
"MIT"
] | null | null | null | recipes/m4/all/conanfile.py | ngerke/conan-center-index | 758e929499e06754c6a9fd081cf5faa0f9be4dd2 | [
"MIT"
] | null | null | null | from conans import ConanFile, tools, AutoToolsBuildEnvironment
from contextlib import contextmanager
import os
class M4Conan(ConanFile):
name = "m4"
description = "GNU M4 is an implementation of the traditional Unix macro processor"
topics = ("conan", "m4", "macro", "macro processor")
url = "https://github.com/conan-io/conan-center-index"
homepage = "https://www.gnu.org/software/m4/"
license = "GPL-3.0-only"
exports_sources = ["patches/*.patch"]
settings = "os", "arch", "compiler"
_autotools = None
_source_subfolder = "source_subfolder"
_build_subfolder = "build_subfolder"
@property
def _is_msvc(self):
return self.settings.compiler == "Visual Studio"
@property
def _is_clang(self):
return str(self.settings.compiler).endswith("clang")
def build_requirements(self):
if tools.os_info.is_windows and "CONAN_BASH_PATH" not in os.environ and \
tools.os_info.detect_windows_subsystem() != "msys2":
self.build_requires("msys2/20190524")
def source(self):
tools.get(**self.conan_data["sources"][self.version])
os.rename("m4-" + self.version, self._source_subfolder)
def _configure_autotools(self):
if self._autotools:
return self._autotools
conf_args = []
self._autotools = AutoToolsBuildEnvironment(self, win_bash=tools.os_info.is_windows)
build_canonical_name = None
host_canonical_name = None
if self.settings.compiler == "Visual Studio":
# The somewhat older configure script of m4 does not understand the canonical names of Visual Studio
build_canonical_name = False
host_canonical_name = False
self._autotools.configure(args=conf_args, configure_dir=self._source_subfolder, build=build_canonical_name, host=host_canonical_name)
return self._autotools
@contextmanager
def _build_context(self):
env = {}
if self.settings.compiler == "Visual Studio":
with tools.vcvars(self.settings):
env.update({
"AR": "{}/build-aux/ar-lib lib".format(tools.unix_path(self._source_subfolder)),
"CC": "cl -nologo",
"CXX": "cl -nologo",
"LD": "link",
"NM": "dumpbin -symbols",
"OBJDUMP": ":",
"RANLIB": ":",
"STRIP": ":",
})
with tools.environment_append(env):
yield
else:
if self._is_clang:
env["CFLAGS"] = "-rtlib=compiler-rt"
with tools.environment_append(env):
yield
def _patch_sources(self):
for patch in self.conan_data["patches"][self.version]:
tools.patch(**patch)
def build(self):
self._patch_sources()
with self._build_context():
autotools = self._configure_autotools()
autotools.make()
if bool(os.environ.get("CONAN_RUN_TESTS", "")):
self.output.info("Running m4 checks...")
with tools.chdir("tests"):
autotools.make(target="check")
def package(self):
self.copy(pattern="COPYING", dst="licenses", src=self._source_subfolder)
with self._build_context():
autotools = self._configure_autotools()
autotools.install()
tools.rmdir(os.path.join(self.package_folder, "share"))
def package_id(self):
self.info.include_build_settings()
def package_info(self):
bin_path = os.path.join(self.package_folder, "bin")
self.output.info("Appending PATH environment variable: {}".format(bin_path))
self.env_info.PATH.append(bin_path)
bin_ext = ".exe" if self.settings.os == "Windows" else ""
m4_bin = os.path.join(self.package_folder, "bin", "m4{}".format(bin_ext)).replace("\\", "/")
# M4 environment variable is used by a lot of scripts as a way to override a hard-coded embedded m4 path
self.output.info("Setting M4 environment variable: {}".format(m4_bin))
self.env_info.M4 = m4_bin
| 38.761468 | 141 | 0.607574 | from conans import ConanFile, tools, AutoToolsBuildEnvironment
from contextlib import contextmanager
import os
class M4Conan(ConanFile):
name = "m4"
description = "GNU M4 is an implementation of the traditional Unix macro processor"
topics = ("conan", "m4", "macro", "macro processor")
url = "https://github.com/conan-io/conan-center-index"
homepage = "https://www.gnu.org/software/m4/"
license = "GPL-3.0-only"
exports_sources = ["patches/*.patch"]
settings = "os", "arch", "compiler"
_autotools = None
_source_subfolder = "source_subfolder"
_build_subfolder = "build_subfolder"
@property
def _is_msvc(self):
return self.settings.compiler == "Visual Studio"
@property
def _is_clang(self):
return str(self.settings.compiler).endswith("clang")
def build_requirements(self):
if tools.os_info.is_windows and "CONAN_BASH_PATH" not in os.environ and \
tools.os_info.detect_windows_subsystem() != "msys2":
self.build_requires("msys2/20190524")
def source(self):
tools.get(**self.conan_data["sources"][self.version])
os.rename("m4-" + self.version, self._source_subfolder)
def _configure_autotools(self):
if self._autotools:
return self._autotools
conf_args = []
self._autotools = AutoToolsBuildEnvironment(self, win_bash=tools.os_info.is_windows)
build_canonical_name = None
host_canonical_name = None
if self.settings.compiler == "Visual Studio":
build_canonical_name = False
host_canonical_name = False
self._autotools.configure(args=conf_args, configure_dir=self._source_subfolder, build=build_canonical_name, host=host_canonical_name)
return self._autotools
@contextmanager
def _build_context(self):
env = {}
if self.settings.compiler == "Visual Studio":
with tools.vcvars(self.settings):
env.update({
"AR": "{}/build-aux/ar-lib lib".format(tools.unix_path(self._source_subfolder)),
"CC": "cl -nologo",
"CXX": "cl -nologo",
"LD": "link",
"NM": "dumpbin -symbols",
"OBJDUMP": ":",
"RANLIB": ":",
"STRIP": ":",
})
with tools.environment_append(env):
yield
else:
if self._is_clang:
env["CFLAGS"] = "-rtlib=compiler-rt"
with tools.environment_append(env):
yield
def _patch_sources(self):
for patch in self.conan_data["patches"][self.version]:
tools.patch(**patch)
def build(self):
self._patch_sources()
with self._build_context():
autotools = self._configure_autotools()
autotools.make()
if bool(os.environ.get("CONAN_RUN_TESTS", "")):
self.output.info("Running m4 checks...")
with tools.chdir("tests"):
autotools.make(target="check")
def package(self):
self.copy(pattern="COPYING", dst="licenses", src=self._source_subfolder)
with self._build_context():
autotools = self._configure_autotools()
autotools.install()
tools.rmdir(os.path.join(self.package_folder, "share"))
def package_id(self):
self.info.include_build_settings()
def package_info(self):
bin_path = os.path.join(self.package_folder, "bin")
self.output.info("Appending PATH environment variable: {}".format(bin_path))
self.env_info.PATH.append(bin_path)
bin_ext = ".exe" if self.settings.os == "Windows" else ""
m4_bin = os.path.join(self.package_folder, "bin", "m4{}".format(bin_ext)).replace("\\", "/")
self.output.info("Setting M4 environment variable: {}".format(m4_bin))
self.env_info.M4 = m4_bin
| true | true |
f7ff1607025948cbe197fe6999d784f564b5cee6 | 2,295 | py | Python | servers/dta/config_default.py | apache/incubator-milagro-mfa-server | b33dfe864ff0bcb8a26a46745b9c596d72d22ccf | [
"Apache-2.0"
] | 21 | 2016-09-18T19:13:58.000Z | 2021-11-10T18:35:30.000Z | servers/dta/config_default.py | apache/incubator-milagro-mfa-server | b33dfe864ff0bcb8a26a46745b9c596d72d22ccf | [
"Apache-2.0"
] | 3 | 2016-09-21T14:58:41.000Z | 2019-05-29T23:35:32.000Z | servers/dta/config_default.py | apache/incubator-milagro-mfa-server | b33dfe864ff0bcb8a26a46745b9c596d72d22ccf | [
"Apache-2.0"
] | 15 | 2016-05-24T11:15:47.000Z | 2021-11-10T18:35:22.000Z | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from __future__ import unicode_literals
"""HTTP server settings"""
address = "127.0.0.1"
port = 8001
"""Time synchronization
To be able to perform time based verification, by default D-TA syncs its time
with MIRACL servers. If you set it to False, you should still sync the server
using an accurate NTP time server!
"""
# syncTime = False
"""The location of your keys file (relative to mpin-backend/servers/dta)."""
credentialsFile = '%CREDENTIALSFILE%'
"""Entropy sources
D-TA supports multiple ways to gather entropy random, urandom, certivox or
combination of those.
"""
# EntropySources = 'dev_urandom:100' # Default
# EntropySources = 'certivox:100'
# EntropySources = 'dev_urandom:60,certivox:40'
"""Backup master secret
D-TA supports storing the master secret in a file rather than generating it every
time on startup. It is enabled by default, set to False to disable. Master secret
will be encrypted by default unless disabled by settingencrypt_master_secret to
False. Master secret will be encoded with passphrase and salt to be provided
- salt in the config file
- passphrase - supplied on startup or in the config (not encouraged)
Passphrase can be changed by running the service with changePassphrase option.
To change the location of the backup file change backup_file option (relative to
mpin-backend/servers/dta).
"""
# backup = False
backup_file = '%BACKUP_FILE%'
# encrypt_master_secret = False
passphrase = '%PASSPHRASE%'
salt = '%SALT%'
"""Debug options"""
# logLevel = "INFO"
| 34.772727 | 81 | 0.76732 |
from __future__ import unicode_literals
address = "127.0.0.1"
port = 8001
credentialsFile = '%CREDENTIALSFILE%'
up_file = '%BACKUP_FILE%'
passphrase = '%PASSPHRASE%'
salt = '%SALT%'
| true | true |
f7ff161b0418f98ead8a2de0684d91ab248d7ee3 | 3,945 | py | Python | netbox_client/models/width.py | nrfta/python-netbox-client | 68ba6dd4d7306513dc1ad38f3ac59122ba4f70a8 | [
"MIT"
] | null | null | null | netbox_client/models/width.py | nrfta/python-netbox-client | 68ba6dd4d7306513dc1ad38f3ac59122ba4f70a8 | [
"MIT"
] | null | null | null | netbox_client/models/width.py | nrfta/python-netbox-client | 68ba6dd4d7306513dc1ad38f3ac59122ba4f70a8 | [
"MIT"
] | null | null | null | # coding: utf-8
"""
NetBox API
API to access NetBox # noqa: E501
OpenAPI spec version: 2.8
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
class Width(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'label': 'str',
'value': 'int'
}
attribute_map = {
'label': 'label',
'value': 'value'
}
def __init__(self, label=None, value=None): # noqa: E501
"""Width - a model defined in Swagger""" # noqa: E501
self._label = None
self._value = None
self.discriminator = None
self.label = label
self.value = value
@property
def label(self):
"""Gets the label of this Width. # noqa: E501
:return: The label of this Width. # noqa: E501
:rtype: str
"""
return self._label
@label.setter
def label(self, label):
"""Sets the label of this Width.
:param label: The label of this Width. # noqa: E501
:type: str
"""
if label is None:
raise ValueError("Invalid value for `label`, must not be `None`") # noqa: E501
allowed_values = ["10 inches", "19 inches", "21 inches", "23 inches"] # noqa: E501
if label not in allowed_values:
raise ValueError(
"Invalid value for `label` ({0}), must be one of {1}" # noqa: E501
.format(label, allowed_values)
)
self._label = label
@property
def value(self):
"""Gets the value of this Width. # noqa: E501
:return: The value of this Width. # noqa: E501
:rtype: int
"""
return self._value
@value.setter
def value(self, value):
"""Sets the value of this Width.
:param value: The value of this Width. # noqa: E501
:type: int
"""
if value is None:
raise ValueError("Invalid value for `value`, must not be `None`") # noqa: E501
self._value = value
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(Width, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, Width):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| 26.3 | 91 | 0.531559 |
import pprint
import re
import six
class Width(object):
swagger_types = {
'label': 'str',
'value': 'int'
}
attribute_map = {
'label': 'label',
'value': 'value'
}
def __init__(self, label=None, value=None):
self._label = None
self._value = None
self.discriminator = None
self.label = label
self.value = value
@property
def label(self):
return self._label
@label.setter
def label(self, label):
if label is None:
raise ValueError("Invalid value for `label`, must not be `None`")
allowed_values = ["10 inches", "19 inches", "21 inches", "23 inches"]
if label not in allowed_values:
raise ValueError(
"Invalid value for `label` ({0}), must be one of {1}"
.format(label, allowed_values)
)
self._label = label
@property
def value(self):
return self._value
@value.setter
def value(self, value):
if value is None:
raise ValueError("Invalid value for `value`, must not be `None`")
self._value = value
def to_dict(self):
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(Width, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
return pprint.pformat(self.to_dict())
def __repr__(self):
return self.to_str()
def __eq__(self, other):
if not isinstance(other, Width):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
return not self == other
| true | true |
f7ff179043d1abfbb8cfad3b21f483f8593a6738 | 6,090 | py | Python | src/pur/core/txs/SlaveTransaction.py | pur-token/pur-core | ce372be274262a839c45436dfee58ba4ea105074 | [
"MIT"
] | null | null | null | src/pur/core/txs/SlaveTransaction.py | pur-token/pur-core | ce372be274262a839c45436dfee58ba4ea105074 | [
"MIT"
] | null | null | null | src/pur/core/txs/SlaveTransaction.py | pur-token/pur-core | ce372be274262a839c45436dfee58ba4ea105074 | [
"MIT"
] | null | null | null | from pypurlib.pypurlib import bin2hstr
from pur.core.State import State
from pur.core.StateContainer import StateContainer
from pur.core.misc import logger
from pur.core.txs.Transaction import Transaction
from pur.generated.pur_pb2 import SlaveMetadata
class SlaveTransaction(Transaction):
def __init__(self, protobuf_transaction=None):
super(SlaveTransaction, self).__init__(protobuf_transaction)
@property
def slave_pks(self):
return self._data.slave.slave_pks
@property
def access_types(self):
return self._data.slave.access_types
def get_data_bytes(self) -> bytes:
tmptxhash = (self.master_addr +
self.fee.to_bytes(8, byteorder='big', signed=False))
for index in range(0, len(self.slave_pks)):
tmptxhash = (tmptxhash +
self.slave_pks[index] +
self.access_types[index].to_bytes(8, byteorder='big', signed=False))
return tmptxhash
@staticmethod
def create(slave_pks: list, access_types: list, fee: int, purss_pk: bytes, master_addr: bytes = None):
transaction = SlaveTransaction()
if master_addr:
transaction._data.master_addr = master_addr
for slave_pk in slave_pks:
transaction._data.slave.slave_pks.append(slave_pk)
for access_type in access_types:
transaction._data.slave.access_types.append(access_type)
transaction._data.fee = fee
transaction._data.public_key = purss_pk
transaction.validate_or_raise(verify_signature=False)
return transaction
def _validate_custom(self) -> bool:
if len(self.slave_pks) != len(self.access_types):
logger.warning('Number of slave pks are not equal to the number of access types provided')
logger.warning('Slave pks len %s', len(self.slave_pks))
logger.warning('Access types len %s', len(self.access_types))
return False
if len(set(self.slave_pks)) != len(self.slave_pks):
logger.warning('Duplicate Slave PKS found')
logger.warning('Unique Slave pks len %s', len(set(self.slave_pks)))
logger.warning('Slave pks len %s', len(self.slave_pks))
return False
for access_type in self.access_types:
if access_type not in [0, 1]:
logger.warning('Invalid Access type %s', access_type)
return False
if self.fee < 0:
logger.info('Slave: State validation failed for %s because: Negative send', bin2hstr(self.txhash))
return False
return True
def _validate_extended(self, state_container: StateContainer) -> bool:
if (len(self.slave_pks) > state_container.current_dev_config.transaction_multi_output_limit or
len(self.access_types) > state_container.current_dev_config.transaction_multi_output_limit):
logger.warning('List has more than %s slave pks or access_types',
state_container.current_dev_config.transaction_multi_output_limit)
logger.warning('Slave pks len %s', len(self.slave_pks))
logger.warning('Access types len %s', len(self.access_types))
return False
tx_balance = state_container.addresses_state[self.addr_from].balance
if tx_balance < self.fee:
logger.info('Slave: State validation failed for %s because: Insufficient funds', bin2hstr(self.txhash))
logger.info('balance: %s, amount: %s', tx_balance, self.fee)
return False
for i in range(len(self.slave_pks)):
slave_pk = self.slave_pks[i]
if state_container.block_number < state_container.current_dev_config.hard_fork_heights[0]:
if len(slave_pk) > state_container.current_dev_config.slave_pk_max_length:
logger.info("[Slave Transaction] Slave PK length is beyond limit")
return False
if (self.addr_from, slave_pk) in state_container.slaves.data:
logger.info("[Slave Transaction] Invalid slave transaction as %s is already a slave for this address",
slave_pk)
return False
return True
def set_affected_address(self, addresses_set: set):
super().set_affected_address(addresses_set)
def apply(self,
state: State,
state_container: StateContainer) -> bool:
address_state = state_container.addresses_state[self.addr_from]
address_state.update_balance(state_container, self.fee, subtract=True)
for idx in range(0, len(self.slave_pks)):
state_container.slaves.data[(self.addr_from,
self.slave_pks[idx])] = SlaveMetadata(access_type=self.access_types[idx],
tx_hash=self.txhash)
state_container.paginated_slaves_hash.insert(address_state, self.txhash)
state_container.paginated_tx_hash.insert(address_state, self.txhash)
return self._apply_state_changes_for_PK(state_container)
def revert(self,
state: State,
state_container: StateContainer) -> bool:
address_state = state_container.addresses_state[self.addr_from]
address_state.update_balance(state_container, self.fee)
for idx in range(0, len(self.slave_pks)):
state_container.slaves.data[(self.addr_from,
self.slave_pks[idx])] = SlaveMetadata(access_type=self.access_types[idx],
tx_hash=self.txhash,
delete=True)
state_container.paginated_slaves_hash.remove(address_state, self.txhash)
state_container.paginated_tx_hash.remove(address_state, self.txhash)
return self._revert_state_changes_for_PK(state_container)
| 44.452555 | 118 | 0.635304 | from pypurlib.pypurlib import bin2hstr
from pur.core.State import State
from pur.core.StateContainer import StateContainer
from pur.core.misc import logger
from pur.core.txs.Transaction import Transaction
from pur.generated.pur_pb2 import SlaveMetadata
class SlaveTransaction(Transaction):
def __init__(self, protobuf_transaction=None):
super(SlaveTransaction, self).__init__(protobuf_transaction)
@property
def slave_pks(self):
return self._data.slave.slave_pks
@property
def access_types(self):
return self._data.slave.access_types
def get_data_bytes(self) -> bytes:
tmptxhash = (self.master_addr +
self.fee.to_bytes(8, byteorder='big', signed=False))
for index in range(0, len(self.slave_pks)):
tmptxhash = (tmptxhash +
self.slave_pks[index] +
self.access_types[index].to_bytes(8, byteorder='big', signed=False))
return tmptxhash
@staticmethod
def create(slave_pks: list, access_types: list, fee: int, purss_pk: bytes, master_addr: bytes = None):
transaction = SlaveTransaction()
if master_addr:
transaction._data.master_addr = master_addr
for slave_pk in slave_pks:
transaction._data.slave.slave_pks.append(slave_pk)
for access_type in access_types:
transaction._data.slave.access_types.append(access_type)
transaction._data.fee = fee
transaction._data.public_key = purss_pk
transaction.validate_or_raise(verify_signature=False)
return transaction
def _validate_custom(self) -> bool:
if len(self.slave_pks) != len(self.access_types):
logger.warning('Number of slave pks are not equal to the number of access types provided')
logger.warning('Slave pks len %s', len(self.slave_pks))
logger.warning('Access types len %s', len(self.access_types))
return False
if len(set(self.slave_pks)) != len(self.slave_pks):
logger.warning('Duplicate Slave PKS found')
logger.warning('Unique Slave pks len %s', len(set(self.slave_pks)))
logger.warning('Slave pks len %s', len(self.slave_pks))
return False
for access_type in self.access_types:
if access_type not in [0, 1]:
logger.warning('Invalid Access type %s', access_type)
return False
if self.fee < 0:
logger.info('Slave: State validation failed for %s because: Negative send', bin2hstr(self.txhash))
return False
return True
def _validate_extended(self, state_container: StateContainer) -> bool:
if (len(self.slave_pks) > state_container.current_dev_config.transaction_multi_output_limit or
len(self.access_types) > state_container.current_dev_config.transaction_multi_output_limit):
logger.warning('List has more than %s slave pks or access_types',
state_container.current_dev_config.transaction_multi_output_limit)
logger.warning('Slave pks len %s', len(self.slave_pks))
logger.warning('Access types len %s', len(self.access_types))
return False
tx_balance = state_container.addresses_state[self.addr_from].balance
if tx_balance < self.fee:
logger.info('Slave: State validation failed for %s because: Insufficient funds', bin2hstr(self.txhash))
logger.info('balance: %s, amount: %s', tx_balance, self.fee)
return False
for i in range(len(self.slave_pks)):
slave_pk = self.slave_pks[i]
if state_container.block_number < state_container.current_dev_config.hard_fork_heights[0]:
if len(slave_pk) > state_container.current_dev_config.slave_pk_max_length:
logger.info("[Slave Transaction] Slave PK length is beyond limit")
return False
if (self.addr_from, slave_pk) in state_container.slaves.data:
logger.info("[Slave Transaction] Invalid slave transaction as %s is already a slave for this address",
slave_pk)
return False
return True
def set_affected_address(self, addresses_set: set):
super().set_affected_address(addresses_set)
def apply(self,
state: State,
state_container: StateContainer) -> bool:
address_state = state_container.addresses_state[self.addr_from]
address_state.update_balance(state_container, self.fee, subtract=True)
for idx in range(0, len(self.slave_pks)):
state_container.slaves.data[(self.addr_from,
self.slave_pks[idx])] = SlaveMetadata(access_type=self.access_types[idx],
tx_hash=self.txhash)
state_container.paginated_slaves_hash.insert(address_state, self.txhash)
state_container.paginated_tx_hash.insert(address_state, self.txhash)
return self._apply_state_changes_for_PK(state_container)
def revert(self,
state: State,
state_container: StateContainer) -> bool:
address_state = state_container.addresses_state[self.addr_from]
address_state.update_balance(state_container, self.fee)
for idx in range(0, len(self.slave_pks)):
state_container.slaves.data[(self.addr_from,
self.slave_pks[idx])] = SlaveMetadata(access_type=self.access_types[idx],
tx_hash=self.txhash,
delete=True)
state_container.paginated_slaves_hash.remove(address_state, self.txhash)
state_container.paginated_tx_hash.remove(address_state, self.txhash)
return self._revert_state_changes_for_PK(state_container)
| true | true |
f7ff17e7b6f129a68378dbaf00f8b7e5e713d191 | 1,378 | py | Python | examples/signature_downloadFile.py | apinsard/yousign-api-client-python | 27e00bcd00c4ca180b76ef0d096f5b0b5962a690 | [
"Apache-2.0"
] | null | null | null | examples/signature_downloadFile.py | apinsard/yousign-api-client-python | 27e00bcd00c4ca180b76ef0d096f5b0b5962a690 | [
"Apache-2.0"
] | null | null | null | examples/signature_downloadFile.py | apinsard/yousign-api-client-python | 27e00bcd00c4ca180b76ef0d096f5b0b5962a690 | [
"Apache-2.0"
] | 1 | 2019-12-06T13:08:23.000Z | 2019-12-06T13:08:23.000Z | import base64
import suds
import os
import ysApi
import string
import random
if __name__ == "__main__":
# Config File
c = ysApi.ApiClient('../config/config.ini')
# c = ysApi.ApiClient(None, "username",
# "password",
# "apikey",
# "environment")
def id_generator(size=5, chars= string.ascii_lowercase + string.digits):
return ''.join(random.choice(chars) for _ in range(size))
print("Getting Signed file(s) ...")
try :
# For the last signature
last = c.getListSign(count=1)
idDemand = last[0]['cosignatureEvent']
res = c.getSignedFilesFromDemand(idDemand)
file = []
fileName =[]
# Get files contents
for el in res:
file.append(el['file'])
# Get files names
for el in res:
fileName.append(os.path.basename(el['fileName']))
# Write contents associated to each file
for el in fileName :
pathFile= 'documents/'+id_generator()+'_'+fileName[fileName.index(el)]
print(pathFile)
signedFile = open(pathFile, 'w')
signedFile.write(base64.b64decode(file[fileName.index(el)]))
print('Signed file saved in : '+os.getcwd()+'/'+pathFile)
except suds.WebFault as detail:
print(detail)
| 27.56 | 82 | 0.568215 | import base64
import suds
import os
import ysApi
import string
import random
if __name__ == "__main__":
c = ysApi.ApiClient('../config/config.ini')
def id_generator(size=5, chars= string.ascii_lowercase + string.digits):
return ''.join(random.choice(chars) for _ in range(size))
print("Getting Signed file(s) ...")
try :
last = c.getListSign(count=1)
idDemand = last[0]['cosignatureEvent']
res = c.getSignedFilesFromDemand(idDemand)
file = []
fileName =[]
for el in res:
file.append(el['file'])
for el in res:
fileName.append(os.path.basename(el['fileName']))
for el in fileName :
pathFile= 'documents/'+id_generator()+'_'+fileName[fileName.index(el)]
print(pathFile)
signedFile = open(pathFile, 'w')
signedFile.write(base64.b64decode(file[fileName.index(el)]))
print('Signed file saved in : '+os.getcwd()+'/'+pathFile)
except suds.WebFault as detail:
print(detail)
| true | true |
f7ff1836f4cfbfab8e90ffd4451894d24b97beea | 2,293 | py | Python | src/daisyDelight/madPlayer.py | Neppord/daisy-delight | a3efe83427239f137a8b2cc7138799cd2a3a005d | [
"MIT"
] | null | null | null | src/daisyDelight/madPlayer.py | Neppord/daisy-delight | a3efe83427239f137a8b2cc7138799cd2a3a005d | [
"MIT"
] | null | null | null | src/daisyDelight/madPlayer.py | Neppord/daisy-delight | a3efe83427239f137a8b2cc7138799cd2a3a005d | [
"MIT"
] | null | null | null | #! /usr/bin/python
# coding: latin1
import ossaudiodev, mad
class MadPlayer:
def __init__(self, state):
self.output = ossaudiodev.open('w')
self.Clip = None
self.file = None
self.state = state
state.addObserver(self)
self.isPlaying = False
self.src = None
self.t = None
def update(self, observable, *args):
if "clip" in observable.hasChanged:
self.stop()
self.Clip = observable.currClip
self.loadAudioFile()
if observable.isPaused and self.isPlaying:
self.stop()
if not observable.isPaused and not self.isPlaying:
self.play()
def play(self):
def setOutParam(self):
format = ossaudiodev.AFMT_S16_BE
numberOfChannels = 2 # self.file.mode()
samplerate = self.file.samplerate()
(f, n, s) = self.output.setparameters(format, numberOfChannels,
samplerate)
# print "Frormat %d/%d Channels %d/%d samlplerate %d/%d"%(f,format,n,numberOfChannels,s,samplerate)
if f != format or s != samplerate or n != numberOfChannels:
print "format not suported"
import sys
sys.exit(1)
def playhelper(self):
setOutParam(self)
buff = self.file.read()
while buff and self.isPlaying and self.file.current_time() < (
self.Clip[1] * 1000):
self.output.write(buff)
buff = self.file.read()
if self.isPlaying:
self.state.skip()
import threading
if self.t and self.t.isAlive and self.t != threading.currentThread():
self.t.join()
self.isPlaying = True
self.t = threading.Thread(target=playhelper, args=(self,))
self.t.start()
def stop(self):
self.isPlaying = False
def loadAudioFile(self):
if not self.src == self.Clip[2] or self.file.current_time() > self.Clip[
0] * 1000:
self.file = mad.MadFile(self.Clip[2])
self.src = self.Clip[2]
while self.file.current_time() < int(self.Clip[0] * 1000):
self.buff = self.file.read()
| 33.231884 | 113 | 0.54601 |
import ossaudiodev, mad
class MadPlayer:
def __init__(self, state):
self.output = ossaudiodev.open('w')
self.Clip = None
self.file = None
self.state = state
state.addObserver(self)
self.isPlaying = False
self.src = None
self.t = None
def update(self, observable, *args):
if "clip" in observable.hasChanged:
self.stop()
self.Clip = observable.currClip
self.loadAudioFile()
if observable.isPaused and self.isPlaying:
self.stop()
if not observable.isPaused and not self.isPlaying:
self.play()
def play(self):
def setOutParam(self):
format = ossaudiodev.AFMT_S16_BE
numberOfChannels = 2
samplerate = self.file.samplerate()
(f, n, s) = self.output.setparameters(format, numberOfChannels,
samplerate)
if f != format or s != samplerate or n != numberOfChannels:
print "format not suported"
import sys
sys.exit(1)
def playhelper(self):
setOutParam(self)
buff = self.file.read()
while buff and self.isPlaying and self.file.current_time() < (
self.Clip[1] * 1000):
self.output.write(buff)
buff = self.file.read()
if self.isPlaying:
self.state.skip()
import threading
if self.t and self.t.isAlive and self.t != threading.currentThread():
self.t.join()
self.isPlaying = True
self.t = threading.Thread(target=playhelper, args=(self,))
self.t.start()
def stop(self):
self.isPlaying = False
def loadAudioFile(self):
if not self.src == self.Clip[2] or self.file.current_time() > self.Clip[
0] * 1000:
self.file = mad.MadFile(self.Clip[2])
self.src = self.Clip[2]
while self.file.current_time() < int(self.Clip[0] * 1000):
self.buff = self.file.read()
| false | true |
f7ff18bbb8352db2837c5331db24f4ab0aef194e | 1,220 | py | Python | lib/python3.6/site-packages/conda/common/signals.py | PhonPhey/Magnezi | bf96246d69edc6882653ba5e1332c0eff8d10294 | [
"MIT"
] | 2 | 2021-11-28T12:47:01.000Z | 2021-12-04T16:58:16.000Z | lib/python3.6/site-packages/conda/common/signals.py | PhonPhey/Magnezi | bf96246d69edc6882653ba5e1332c0eff8d10294 | [
"MIT"
] | 2 | 2021-12-04T12:51:07.000Z | 2021-12-04T16:49:18.000Z | lib/python3.6/site-packages/conda/common/signals.py | PhonPhey/Magnezi | bf96246d69edc6882653ba5e1332c0eff8d10294 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
from __future__ import absolute_import, division, print_function, unicode_literals
from contextlib import contextmanager
from logging import getLogger
import signal
from .compat import iteritems
log = getLogger(__name__)
INTERRUPT_SIGNALS = (
'SIGABRT',
'SIGINT',
'SIGTERM',
'SIGQUIT',
'SIGBREAK',
)
def get_signal_name(signum):
"""
Examples:
>>> from signal import SIGINT
>>> get_signal_name(SIGINT)
'SIGINT'
"""
return next((k for k, v in iteritems(signal.__dict__)
if v == signum and k.startswith('SIG') and not k.startswith('SIG_')),
None)
@contextmanager
def signal_handler(handler):
previous_handlers = []
for signame in INTERRUPT_SIGNALS:
sig = getattr(signal, signame, None)
if sig:
log.debug("registering handler for %s", signame)
prev_handler = signal.signal(sig, handler)
previous_handlers.append((sig, prev_handler))
try:
yield
finally:
for sig, previous_handler in previous_handlers:
log.debug("de-registering handler for %s", signame)
signal.signal(sig, previous_handler)
| 24.897959 | 86 | 0.636066 |
from __future__ import absolute_import, division, print_function, unicode_literals
from contextlib import contextmanager
from logging import getLogger
import signal
from .compat import iteritems
log = getLogger(__name__)
INTERRUPT_SIGNALS = (
'SIGABRT',
'SIGINT',
'SIGTERM',
'SIGQUIT',
'SIGBREAK',
)
def get_signal_name(signum):
return next((k for k, v in iteritems(signal.__dict__)
if v == signum and k.startswith('SIG') and not k.startswith('SIG_')),
None)
@contextmanager
def signal_handler(handler):
previous_handlers = []
for signame in INTERRUPT_SIGNALS:
sig = getattr(signal, signame, None)
if sig:
log.debug("registering handler for %s", signame)
prev_handler = signal.signal(sig, handler)
previous_handlers.append((sig, prev_handler))
try:
yield
finally:
for sig, previous_handler in previous_handlers:
log.debug("de-registering handler for %s", signame)
signal.signal(sig, previous_handler)
| true | true |
f7ff198333448b7829141cb9e9ae779a2c2fcbca | 2,188 | py | Python | tests/__init__.py | dmr/ramdisk-mounter | fa00f59932bfdc56571d6218e43bbe6b733d2f77 | [
"MIT"
] | 3 | 2019-12-18T22:28:41.000Z | 2021-02-13T23:48:30.000Z | tests/__init__.py | dmr/ramdisk-mounter | fa00f59932bfdc56571d6218e43bbe6b733d2f77 | [
"MIT"
] | null | null | null | tests/__init__.py | dmr/ramdisk-mounter | fa00f59932bfdc56571d6218e43bbe6b733d2f77 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import unittest
import string, random
import time
import os
import ramdisk_mounter
fold = os.path.abspath(
os.path.join(
os.path.dirname(__file__),
'_folder_of_ram_disk')
)
if not os.path.exists(fold):
os.makedirs(fold)
class TestRaiseErrorToOuterContext(unittest.TestCase):
def test_error_inside_with_is_raised(self):
# raising error still umounts the ramdisk
def fct_with_error_n_ramdisk():
with ramdisk_mounter.Ramdisk(
folder=fold,
size=128
) as tmp_folder:
assert not ramdisk_mounter.check_is_same_device_as_root_fs(tmp_folder)
assert 0
self.assertRaises(
AssertionError, # raised inside the ramdisk operation
fct_with_error_n_ramdisk
)
def test_tmpfs_doesnt_exist_outside_with_and_unicode_name_possible():
tmp_file = os.path.join(fold, 'temporary_file.txt')
with ramdisk_mounter.Ramdisk(
folder=fold,
size=128
):
with open(tmp_file, 'w') as f:
f.write('test')
assert os.path.exists(tmp_file)
assert not os.path.exists(tmp_file)
def test_compare_speed():
tmp_file = os.path.join(fold, 'temporary_file_compare_speed.txt')
r_word = lambda length: "".join(
[random.choice(string.letters) for _ in range(length)])
text = " ".join([r_word(5) for _ in range(1000000)])
#import sys
#print sys.getsizeof(text)
before = time.time()
with open(tmp_file, 'w') as fp:
fp.write(text)
speed_without_ramdisk = time.time() - before
with ramdisk_mounter.Ramdisk(
folder=fold,
size=128
):
before = time.time()
with open(tmp_file, 'w') as fp:
fp.write(text)
speed_with_ramdisk = time.time() - before
assert speed_with_ramdisk < speed_without_ramdisk
print "Without RAMDISK: %s\n--> With RAMDISK: %s" % (
speed_without_ramdisk, speed_with_ramdisk)
#get_file_size = lambda file_name: os.stat(file_name).st_size
#print get_file_size(tmp_file)
test_compare_speed.__test__ = False
| 27.012346 | 86 | 0.64808 |
import unittest
import string, random
import time
import os
import ramdisk_mounter
fold = os.path.abspath(
os.path.join(
os.path.dirname(__file__),
'_folder_of_ram_disk')
)
if not os.path.exists(fold):
os.makedirs(fold)
class TestRaiseErrorToOuterContext(unittest.TestCase):
def test_error_inside_with_is_raised(self):
def fct_with_error_n_ramdisk():
with ramdisk_mounter.Ramdisk(
folder=fold,
size=128
) as tmp_folder:
assert not ramdisk_mounter.check_is_same_device_as_root_fs(tmp_folder)
assert 0
self.assertRaises(
AssertionError,
fct_with_error_n_ramdisk
)
def test_tmpfs_doesnt_exist_outside_with_and_unicode_name_possible():
tmp_file = os.path.join(fold, 'temporary_file.txt')
with ramdisk_mounter.Ramdisk(
folder=fold,
size=128
):
with open(tmp_file, 'w') as f:
f.write('test')
assert os.path.exists(tmp_file)
assert not os.path.exists(tmp_file)
def test_compare_speed():
tmp_file = os.path.join(fold, 'temporary_file_compare_speed.txt')
r_word = lambda length: "".join(
[random.choice(string.letters) for _ in range(length)])
text = " ".join([r_word(5) for _ in range(1000000)])
before = time.time()
with open(tmp_file, 'w') as fp:
fp.write(text)
speed_without_ramdisk = time.time() - before
with ramdisk_mounter.Ramdisk(
folder=fold,
size=128
):
before = time.time()
with open(tmp_file, 'w') as fp:
fp.write(text)
speed_with_ramdisk = time.time() - before
assert speed_with_ramdisk < speed_without_ramdisk
print "Without RAMDISK: %s\n--> With RAMDISK: %s" % (
speed_without_ramdisk, speed_with_ramdisk)
test_compare_speed.__test__ = False
| false | true |
f7ff1aba9de5d328718b1aa37091a9a0d4d6db4b | 1,999 | py | Python | custom_components/tuya_v2/remote.py | tbratfisch/tuya-home-assistant | 2805792d599b68de8ed101a96c48f2b89452362d | [
"MIT"
] | null | null | null | custom_components/tuya_v2/remote.py | tbratfisch/tuya-home-assistant | 2805792d599b68de8ed101a96c48f2b89452362d | [
"MIT"
] | null | null | null | custom_components/tuya_v2/remote.py | tbratfisch/tuya-home-assistant | 2805792d599b68de8ed101a96c48f2b89452362d | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
"""Support for Tuya switches."""
from __future__ import annotations
import logging
from typing import Any
from tuya_iot import TuyaHomeManager
from homeassistant.components.remote import RemoteEntity
from homeassistant.config_entries import ConfigEntry
from homeassistant.core import HomeAssistant
from homeassistant.helpers.dispatcher import async_dispatcher_connect
from .base import TuyaHaDevice
from .const import (
DOMAIN,
TUYA_HOME_MANAGER
)
_LOGGER = logging.getLogger(__name__)
async def async_setup_entry(
hass: HomeAssistant, _entry: ConfigEntry, async_add_entities
):
"""Set up tuya scenes."""
_LOGGER.info("scenes remote init")
entities = []
scenes = await hass.async_add_executor_job(hass.data[DOMAIN][TUYA_HOME_MANAGER].query_scenes)
for scene in scenes:
entities.append(TuyaHAScene(scene))
async_add_entities(entities)
class TuyaHAScene(TuyaHaDevice, RemoteEntity):
"""Tuya Scene Remote."""
def __init__(self, scene) -> None:
"""Init Tuya Scene."""
super().__init__()
self.scene = scene
self.entity_id = f"tuya_v2.ty{self.scene.scene_id}"
@property
def should_poll(self) -> bool:
"""Hass should not poll."""
return False
@property
def unique_id(self) -> str | None:
"""Return a unique ID."""
return f"tys{self.scene.scene_id}"
@property
def name(self) -> str | None:
"""Return Tuya scene name."""
return self.scene.name
@property
def device_info(self):
"""Return a device description for device registry."""
_device_info = {
"identifiers": {(DOMAIN, f"{self.scene.scene_id}")},
"manufacturer": "tuya",
"name": self.scene.name,
"model": "Tuya Scene",
}
return _device_info
@property
def available(self) -> bool:
"""Return if the scene is enabled."""
return self.scene.enabled
| 25.628205 | 97 | 0.65983 |
from __future__ import annotations
import logging
from typing import Any
from tuya_iot import TuyaHomeManager
from homeassistant.components.remote import RemoteEntity
from homeassistant.config_entries import ConfigEntry
from homeassistant.core import HomeAssistant
from homeassistant.helpers.dispatcher import async_dispatcher_connect
from .base import TuyaHaDevice
from .const import (
DOMAIN,
TUYA_HOME_MANAGER
)
_LOGGER = logging.getLogger(__name__)
async def async_setup_entry(
hass: HomeAssistant, _entry: ConfigEntry, async_add_entities
):
_LOGGER.info("scenes remote init")
entities = []
scenes = await hass.async_add_executor_job(hass.data[DOMAIN][TUYA_HOME_MANAGER].query_scenes)
for scene in scenes:
entities.append(TuyaHAScene(scene))
async_add_entities(entities)
class TuyaHAScene(TuyaHaDevice, RemoteEntity):
def __init__(self, scene) -> None:
super().__init__()
self.scene = scene
self.entity_id = f"tuya_v2.ty{self.scene.scene_id}"
@property
def should_poll(self) -> bool:
return False
@property
def unique_id(self) -> str | None:
return f"tys{self.scene.scene_id}"
@property
def name(self) -> str | None:
return self.scene.name
@property
def device_info(self):
_device_info = {
"identifiers": {(DOMAIN, f"{self.scene.scene_id}")},
"manufacturer": "tuya",
"name": self.scene.name,
"model": "Tuya Scene",
}
return _device_info
@property
def available(self) -> bool:
return self.scene.enabled
| true | true |
f7ff1b0e720b165e3e9612df9e3b0354dc781ebb | 1,428 | py | Python | var/spack/repos/builtin/packages/beast2/package.py | xiki-tempula/spack | 9d66c05e93ab8a933fc59915040c0e0c86a4aac4 | [
"ECL-2.0",
"Apache-2.0",
"MIT"
] | 9 | 2018-04-18T07:51:40.000Z | 2021-09-10T03:56:57.000Z | var/spack/repos/builtin/packages/beast2/package.py | xiki-tempula/spack | 9d66c05e93ab8a933fc59915040c0e0c86a4aac4 | [
"ECL-2.0",
"Apache-2.0",
"MIT"
] | 907 | 2018-04-18T11:17:57.000Z | 2022-03-31T13:20:25.000Z | var/spack/repos/builtin/packages/beast2/package.py | xiki-tempula/spack | 9d66c05e93ab8a933fc59915040c0e0c86a4aac4 | [
"ECL-2.0",
"Apache-2.0",
"MIT"
] | 29 | 2018-11-05T16:14:23.000Z | 2022-02-03T16:07:09.000Z | # Copyright 2013-2020 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class Beast2(Package):
"""BEAST is a cross-platform program for Bayesian inference using MCMC
of molecular sequences. It is entirely orientated towards rooted,
time-measured phylogenies inferred using strict or relaxed molecular
clock models. It can be used as a method of reconstructing phylogenies
but is also a framework for testing evolutionary hypotheses without
conditioning on a single tree topology."""
homepage = "http://beast2.org/"
url = "https://github.com/CompEvol/beast2/releases/download/v2.4.6/BEAST.v2.4.6.Linux.tgz"
version('2.5.2', sha256='2feb2281b4f7cf8f7de1a62de50f52a8678ed0767fc72f2322e77dde9b8cd45f')
version('2.4.6', sha256='84029c5680cc22f95bef644824130090f5f12d3d7f48d45cb4efc8e1d6b75e93')
depends_on('java')
def setup_run_environment(self, env):
env.set('BEAST', self.prefix)
def install(self, spec, prefix):
install_tree('bin', prefix.bin)
install_tree('examples', join_path(self.prefix, 'examples'))
install_tree('images', join_path(self.prefix, 'images'))
install_tree('lib', prefix.lib)
install_tree('templates', join_path(self.prefix, 'templates'))
| 42 | 99 | 0.72409 |
from spack import *
class Beast2(Package):
homepage = "http://beast2.org/"
url = "https://github.com/CompEvol/beast2/releases/download/v2.4.6/BEAST.v2.4.6.Linux.tgz"
version('2.5.2', sha256='2feb2281b4f7cf8f7de1a62de50f52a8678ed0767fc72f2322e77dde9b8cd45f')
version('2.4.6', sha256='84029c5680cc22f95bef644824130090f5f12d3d7f48d45cb4efc8e1d6b75e93')
depends_on('java')
def setup_run_environment(self, env):
env.set('BEAST', self.prefix)
def install(self, spec, prefix):
install_tree('bin', prefix.bin)
install_tree('examples', join_path(self.prefix, 'examples'))
install_tree('images', join_path(self.prefix, 'images'))
install_tree('lib', prefix.lib)
install_tree('templates', join_path(self.prefix, 'templates'))
| true | true |
f7ff1b7770561ff6571ca0f499434d65f091c05a | 3,595 | py | Python | spyder/middlewares.py | rean23/cityData | 886d19b9f3289ca1faefb6333aaed361829cfe43 | [
"MIT"
] | 3 | 2019-01-28T09:18:25.000Z | 2021-04-01T15:52:15.000Z | spyder/middlewares.py | rean23/cityData | 886d19b9f3289ca1faefb6333aaed361829cfe43 | [
"MIT"
] | null | null | null | spyder/middlewares.py | rean23/cityData | 886d19b9f3289ca1faefb6333aaed361829cfe43 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
# Define here the models for your spider middleware
#
# See documentation in:
# https://doc.scrapy.org/en/latest/topics/spider-middleware.html
from scrapy import signals
class SpyderSpiderMiddleware(object):
# Not all methods need to be defined. If a method is not defined,
# scrapy acts as if the spider middleware does not modify the
# passed objects.
@classmethod
def from_crawler(cls, crawler):
# This method is used by Scrapy to create your spiders.
s = cls()
crawler.signals.connect(s.spider_opened, signal=signals.spider_opened)
return s
def process_spider_input(self, response, spider):
# Called for each response that goes through the spider
# middleware and into the spider.
# Should return None or raise an exception.
return None
def process_spider_output(self, response, result, spider):
# Called with the results returned from the Spider, after
# it has processed the response.
# Must return an iterable of Request, dict or Item objects.
for i in result:
yield i
def process_spider_exception(self, response, exception, spider):
# Called when a spider or process_spider_input() method
# (from other spider middleware) raises an exception.
# Should return either None or an iterable of Response, dict
# or Item objects.
pass
def process_start_requests(self, start_requests, spider):
# Called with the start requests of the spider, and works
# similarly to the process_spider_output() method, except
# that it doesn’t have a response associated.
# Must return only requests (not items).
for r in start_requests:
yield r
def spider_opened(self, spider):
spider.logger.info('Spider opened: %s' % spider.name)
class SpyderDownloaderMiddleware(object):
# Not all methods need to be defined. If a method is not defined,
# scrapy acts as if the downloader middleware does not modify the
# passed objects.
@classmethod
def from_crawler(cls, crawler):
# This method is used by Scrapy to create your spiders.
s = cls()
crawler.signals.connect(s.spider_opened, signal=signals.spider_opened)
return s
def process_request(self, request, spider):
# Called for each request that goes through the downloader
# middleware.
# Must either:
# - return None: continue processing this request
# - or return a Response object
# - or return a Request object
# - or raise IgnoreRequest: process_exception() methods of
# installed downloader middleware will be called
return None
def process_response(self, request, response, spider):
# Called with the response returned from the downloader.
# Must either;
# - return a Response object
# - return a Request object
# - or raise IgnoreRequest
return response
def process_exception(self, request, exception, spider):
# Called when a download handler or a process_request()
# (from other downloader middleware) raises an exception.
# Must either:
# - return None: continue processing this exception
# - return a Response object: stops process_exception() chain
# - return a Request object: stops process_exception() chain
pass
def spider_opened(self, spider):
spider.logger.info('Spider opened: %s' % spider.name)
| 34.567308 | 78 | 0.665925 |
from scrapy import signals
class SpyderSpiderMiddleware(object):
@classmethod
def from_crawler(cls, crawler):
s = cls()
crawler.signals.connect(s.spider_opened, signal=signals.spider_opened)
return s
def process_spider_input(self, response, spider):
return None
def process_spider_output(self, response, result, spider):
for i in result:
yield i
def process_spider_exception(self, response, exception, spider):
pass
def process_start_requests(self, start_requests, spider):
for r in start_requests:
yield r
def spider_opened(self, spider):
spider.logger.info('Spider opened: %s' % spider.name)
class SpyderDownloaderMiddleware(object):
@classmethod
def from_crawler(cls, crawler):
s = cls()
crawler.signals.connect(s.spider_opened, signal=signals.spider_opened)
return s
def process_request(self, request, spider):
return None
def process_response(self, request, response, spider):
return response
def process_exception(self, request, exception, spider):
pass
def spider_opened(self, spider):
spider.logger.info('Spider opened: %s' % spider.name)
| true | true |
f7ff1b902b248457fefb06001e70abbd513a8e77 | 25,214 | py | Python | flask_web/app/system_state_module.py | bopopescu/local_scda | 40fa4a586f140dc00b8d3f53c732e22e022be338 | [
"MIT"
] | null | null | null | flask_web/app/system_state_module.py | bopopescu/local_scda | 40fa4a586f140dc00b8d3f53c732e22e022be338 | [
"MIT"
] | null | null | null | flask_web/app/system_state_module.py | bopopescu/local_scda | 40fa4a586f140dc00b8d3f53c732e22e022be338 | [
"MIT"
] | 2 | 2020-07-23T21:55:21.000Z | 2021-01-14T12:27:19.000Z | # This is the System State Modules
#
#
#
import json as simplejson
import json
import time
import os
import cherrypy
from urlparse import *
from shutil import *
import urllib
from cherrypy.lib.httputil import parse_query_string
import redis
import base64
redis_handle_15 = redis.StrictRedis(host='localhost', port=6379, db=15)
mode_string = [
"This should not happen",
"OFFLINE",
"QUEUE_SCHEDULE",
"QUEUE_SCHEDULE_STEP",
"QUEUE_SCHEDULE_STEP_TIME",
"CLEAN_FILTER",
"OPEN_MASTER_VALVE",
"CLOSE_MASTER_VALVE",
"RESTART_PROGRAM",
"RESET_SYSTEM" ,
"CHECK_OFF",
"SHUT_DOWN",
"TURN_ON",
"SKIP_STATION"
]
class System_state_modules:
def __init__(self, module_dictionary ):
module_dictionary["redis_get_status.html"] = self.redis_get_status
module_dictionary["get_flow_sensor_name.html"] = self.get_flow_sensor_name
module_dictionary["get_irrigation_queue.html"] = self.get_irrigation_queue
module_dictionary["load_controller_pins.html"] = self.load_controller_pins
module_dictionary["mode_request.html"] = self.mode_request_data
module_dictionary["schedule_data.html"] = self.schedule_data
module_dictionary["mode_change.html"] = self.change_mode
module_dictionary["controller_pin_turn_off.html"] = self.pin_off
module_dictionary["controller_pin_turn_on.html"] = self.pin_on
module_dictionary["change_rain_flag.html"] = self.change_rain_flag
module_dictionary["change_eto_flag.html"] = self.change_eto_flag
module_dictionary["rain_flag.html"] = self.get_rain_flag
module_dictionary["eto_flag.html"] = self.get_eto_flag
module_dictionary["get_queue_entry.html"] = self.get_queue_entry
module_dictionary["delete_queue_element.html"] = self.delete_queue
module_dictionary["get_eto_entries.html"] = self.get_eto_entries
module_dictionary["save_eto_data.html"] = self.save_eto_data
module_dictionary["flow_sensor_name.html"] = self.flow_sensor_name
module_dictionary["get_flow_queue.html"] = self.get_queue
module_dictionary["recent_plc.html"] = self.recent_plc
module_dictionary["recent_coil.html"] = self.recent_coil
module_dictionary["start_time_update.html"] = self.start_time_update
module_dictionary["run_time_update.html"] = self.update_run_time
module_dictionary["delete_schedule.html"] = self.delete_schedule
module_dictionary["insert_schedule.html"] = self.insert_schedule
module_dictionary["copy_schedule.html"] = self.copy_schedule
module_dictionary["change_schedule.html"] = self.change_schedule
module_dictionary["schedule_entry.html"] = self.schedule_entry
module_dictionary["load_valve_groups.html"] = self.load_valve_groups
module_dictionary["get_cleaning_interval.html"] = self.get_cleaning_interval
module_dictionary["set_cleaning_interval.html"] = self.set_cleaning_interval
module_dictionary["set_max_flow_rate_cut_off.html"] = self.set_max_flow_rate_cut_off
module_dictionary["get_max_flow_rate_cut_off.html"] = self.get_max_flow_rate_cut_off
def get_max_flow_rate_cut_off(self, url_list, redis_handle, cherrypy ):
temp = redis_handle.get( "max_flow_rate_cutoff")
if temp == None:
max_flow_rate_cutoff = 0
else:
max_flow_rate_cutoff = float(temp)
temp = redis_handle.get( "max_flow_rate_time")
if temp == None:
max_flow_rate_time = 0
else:
max_flow_rate_time = float(temp)
temp = json.dumps([ max_flow_rate_cutoff, max_flow_rate_time ] )
return temp
def set_max_flow_rate_cut_off(self, url_list, redis_handle, cherrypy ):
json_object = cherrypy.request.params["JSON"]
redis_handle.set("max_flow_rate_cutoff",int(json_object[0]))
redis_handle.set("max_flow_rate_time",int(json_object[1]))
return json.dumps("SUCCESS")
def set_cleaning_interval(self, url_list, redis_handle, cherrypy ):
json_object = cherrypy.request.params["JSON"]
json_object = float( json_object )
redis_handle.set("cleaning_interval",json_object)
return json.dumps("SUCCESS")
def get_cleaning_interval(self, url_list, redis_handle, cherrypy ):
temp = redis_handle.get( "cleaning_interval")
if temp == None:
temp = 0
else:
temp = float(temp)
temp = json.dumps(temp)
return temp
def redis_get_status(self, url_list, redis_handle, cherrypy ):
return_data = {}
return_data["controller_time_stamp"] = redis_handle.get("sprinkler_time_stamp")
return_data["flow_rate"] = redis_handle.get( "global_flow_sensor")
return_data["op_mode"] = redis_handle.get( "sprinkler_ctrl_mode")
return_data["schedule"] = redis_handle.get( "schedule_name" )
return_data["step"] = redis_handle.get( "schedule_step")
return_data["time_of_step"] = redis_handle.get( "schedule_time_max" )
return_data["current_duration"] = redis_handle.get( "schedule_time_count")
return_data["derating_factor"] = redis_handle.get("derating_factor")
return_data["rain_day"] = redis_handle.get("rain_day" )
return_data["pcl_current"] = redis_handle.get( "plc_current" )
return_data["coil_current"] = redis_handle.get( "coil_current" )
return_data["eto_yesterday"] = redis_handle.get( "YESTERDAY_ETO" )
return_data["eto_current"] = redis_handle.get( "CURRENT_ETO" )
return_data["eto_master_valve"] = redis_handle.get("MASTER_VALVE_SETUP")
return_data["eto_managment_flag"] = redis_handle.get("ETO_MANAGE_FLAG")
temp = json.dumps(return_data)
return temp
def get_flow_sensor_name( self, url_list, redis_handle, cherrypy ):
return_data = []
json_data=open("/media/mmc1/system_data_files/global_sensors.json")
data = json.load(json_data)
for i in data:
temp = []
temp.append(i[0])
temp.append(i[3])
return_data.append(temp)
temp = json.dumps(return_data)
return temp
# this is generating data for a bar graph program on java script side
# Essentially what we are doing is generating a list for each schedule
# unspecified is for a step scheduling
# We return the cummulative total as a dummy value and a list of
# All elements in the queue
def get_irrigation_queue( self, url_list, redis_handle, cherrypy ):
return_data = []
queue_len = redis_handle.llen("IRRIGATION_QUEUE")
element_list = []
if queue_len > 0 :
name = "unspecified"
total = 0
sub_total = 0
element_list.append(0) # first element of the list is the total
state = 0
for i in range(0, queue_len):
data = redis_handle.lindex("IRRIGATION_QUEUE", queue_len - i-1)
if data != None:
data = json.loads(data)
if data["type"] == "END_SCHEDULE":
element = {}
name = data["schedule_name"]
element["name"] = name # this is the name of the bar graph
element["value"] = element_list # these are values in a stacked bar graph
return_data.append(element) # adding to return array
element_list = []
element_list.append(total) # first element of the list is the total
name = "unspecified"
if data["type"] == "IRRIGATION_STEP" :
total = total + float( data["run_time"])
element_list.append( float( data["run_time"] ))
if len(element_list) > 1 : # we have an element with out an END_SCHEDULE ELEMENT
element = {} # generating a list value for the return_data array
element["name"] = name
element["value"] = element_list
return_data.append( element )
json_string = json.dumps(return_data)
print "json_string--------------->",json_string
return json_string
def load_valve_groups( self, url_list, redis_handle, cherrypy ):
json_data=open("/media/mmc1/system_data_files/valve_group_assignments.json")
print "json data ",json_data
data = json.load(json_data)
return json.dumps(data)
def load_controller_pins( self, url_list, redis_handle, cherrypy ):
json_data=open("/media/mmc1/system_data_files/controller_cable_assignment.json")
print "json data ",json_data
data = json.load(json_data)
return json.dumps(data)
def mode_request_data(self, url_list, redis_handle, cherrypy ):
return_data = {}
mode_object = {
"SHOULD NOT HAPPEN!":0,
"OFFLINE": 1,
"QUEUE_SCHEDULE":2,
"QUEUE_SCHEDULE_STEP":3,
"QUEUE_SCHEDULE_STEP_TIME":4,
"CLEAN_FILTER":5,
"OPEN_MASTER_VALVE":6,
"CLOSE_MASTER_VALVE":7,
"RESTART_PROGRAM":8,
"RESET_SYSTEM":9,
"CHECK_OFF":10,
"SHUT_DOWN":11,
"TURN_ON":12,
"SKIP_STATION":13
}
temp = redis_handle.get( "sprinkler_ctrl_mode")
if mode_object.has_key( temp ):
id = mode_object[temp]
else:
id = 0
return_data["mode"] = id
return_data["step"] = 0
return_data["run_time"] = 0
return json.dumps(return_data)
def generate_steps( self, file_data):
returnValue = []
controller_pins = []
if file_data["schedule"] != None:
schedule = file_data["schedule"]
for i in schedule:
returnValue.append(i[0][2])
temp = []
for l in i:
temp.append( [ l[0], l[1][0] ] )
controller_pins.append(temp)
return len(returnValue), returnValue, controller_pins
def schedule_data( self, url_list, redis_handle,cherrypy):
json_data=open("/media/mmc1/app_data_files/sprinkler_ctrl.json")
sprinkler_ctrl = json.load(json_data)
returnValue = []
for j in sprinkler_ctrl:
json_data=open("/media/mmc1/app_data_files/"+j["link"])
temp = json.load(json_data)
j["step_number"], j["steps"], j["controller_pins"] = self.generate_steps(temp)
returnValue.append(j)
return json.dumps(returnValue)
def change_mode( self, url_list, redis_handle, cherrypy ):
json_object = cherrypy.request.params["JSON"]
mode = int(json_object["mode"])
schedule_name = json_object["schedule_name"]
step = int(json_object["step"])
run_time = int(json_object["run_time"])
if (mode == 0 ) or (mode==1 ) :
schedule_name = "offline"
step = 1
run_time = 1
temp = {}
temp["command"] = mode_string[mode]
temp["schedule_name"] = schedule_name
temp["step"] = step
temp["run_time"] = run_time
scratch = json.dumps(temp)
redis_handle.lpush("sprinkler_ctrl_queue", base64.b64encode(scratch) )
return json.dumps("SUCCESS")
def pin_off( self, url_list, redis_handle, cherrypy ):
json_object = cherrypy.request.params["JSON"]
temp = {}
temp["command"] = "OFFLINE"
temp["schedule_name"] = "offline"
temp["step"] = 1
temp["run_time"] = 1
scratch = json.dumps(temp)
redis_handle.lpush("sprinkler_ctrl_queue", base64.b64encode(scratch) )
return json.dumps("SUCCESS")
def pin_on( self, url_list, redis_handle, cherrypy ):
json_object = cherrypy.request.params["JSON"]
self.pin_off( url_list,redis_handle, cherrypy) # sending offline command before native mode command
temp = {}
temp["command"] = "NATIVE_SPRINKLER"
temp["schedule_remote_queue"] = json_object["controller"]
temp["schedule_pin_queue"] = json_object["pin"]
temp["schedule_time_queue"] = json_object["run_time"]
scratch = json.dumps(temp)
redis_handle.lpush("sprinkler_ctrl_queue", base64.b64encode(scratch) )
return json.dumps("SUCCESS")
def change_eto_flag( self, url_list, redis_handle, cherrypy ):
json_object = cherrypy.request.params["JSON"]
redis_handle.set( "ETO_MANAGE_FLAG", json_object["eto_flag"] )
return json.dumps("SUCCESS")
def change_rain_flag( self, url_list, redis_handle, cherrypy ):
json_object = cherrypy.request.params["JSON"]
redis_handle.set( "rain_day", json_object["rain_flag"] )
return json.dumps("SUCCESS")
def get_rain_flag( self, url_list, redis_handle, cherrypy ):
json_object = {}
json_object["rain_flag"] = redis_handle.get( "rain_day" )
return json.dumps( json_object )
def get_eto_flag( self, url_list, redis_handle, cherrypy ):
json_object = {}
json_object["eto_flag"] = redis_handle.get( "ETO_MANAGE_FLAG" )
return json.dumps( json_object )
def get_queue_entry( self, url_list, redis_handle, cherrypy ):
json_object = []
length = redis_handle.llen( "IRRIGATION_QUEUE" )
if length > 0 :
name = "unspecified"
total = 0
for i in range(0, length):
data = redis_handle.lindex( "IRRIGATION_QUEUE", length-1 -i)
if data != None :
data = json.loads(data)
if data["type"] == "END_SCHEDULE" :
element = {}
name = data["schedule_name"]
element["name"] = name
element["value"] = total
json_object.append( element)
element_list = []
element_list.append(total)
name = "unspecified"
total = 0
if data["type"] == "IRRIGATION_STEP" :
total = total + int( data["run_time"])
if total > 0 :
element = {}
element["name"] = name
element["value"] = total
json_object.append(element)
json_string = json.dumps(json_object)
return json_string
def delete_queue( self, url_list, redis_handle, cherrypy ):
json_object = cherrypy.request.params["JSON"]
length = redis_handle.llen( "IRRIGATION_QUEUE" )
if length > 0 :
queue_index = 0
for i in range(0,length):
queue_index_temp = queue_index
data = redis_handle.lindex( "IRRIGATION_QUEUE", length - 1 -i)
if data != None:
data = json.loads(data)
if data["type"] == "END_SCHEDULE" :
queue_index_temp = queue_index +1
if json_object[ queue_index ] != 0 :
redis_handle.lset( "IRRIGATION_QUEUE",length - 1 -i,"NULL/NULL")
redis_handle.lrem( "IRRIGATION_QUEUE", 1, "NULL/NULL" )
queue_index = queue_index_temp
json_string = json.dumps("SUCCESS")
return json_string
def get_eto_entries( self, url_list, redis_handle, cherrypy ):
json_object = []
eto_dictionary = redis_handle.get( "ETO_RESOURCE_LIST")
eto_list = eto_dictionary.split(":")
for j in eto_list:
temp = {}
temp["name"] = j
temp["data"] = redis_handle.get( j )
json_object.append(temp)
json_string = json.dumps( json_object )
return json_string
def save_eto_data( self, url_list, redis_handle, cherrypy ):
json_object = cherrypy.request.params["JSON"]
for j in json_object:
redis_handle.set(j["name"],j["data"])
return json.dumps("SUCCESS")
def flow_sensor_name( self, url_list, redis_handle, cherrypy ):
data =open("/media/mmc1/system_data_files/global_sensors.json")
flow_sensor_data = json.load(data)
json_object = []
for j in flow_sensor_data:
json_object.append( [ j[0], j[3] ] )
json_string = json.dumps(json_object)
return json_string
def get_queue( self, url_list, redis_handle, cherrypy ):
json_object = {}
json_string = cherrypy.request.query_string
queue = json_string
print "-------------------------->",queue,"----------------------------"
json_object["flow_queue"] = []
length = redis_handle_15.llen("redis_flow_queue_"+queue )
for i in range(0,length):
data = redis_handle_15.lindex("redis_flow_queue_"+queue, i )
json_object["flow_queue"].append(data)
json_string = json.dumps( json_object )
return json_string
def recent_plc( self, url_list, redis_handle, cherrypy ):
length = redis_handle_15.llen("plc_current_queue" )
json_object = {}
json_object["plc_current_queue"] = []
for i in range(0,length):
data = redis_handle_15.lindex("plc_current_queue", i )
json_object["plc_current_queue"].append(data)
json_string = json.dumps( json_object )
return json_string
def recent_coil( self, url_list, redis_handle, cherrypy ):
length = redis_handle_15.llen("plc_current_queue" )
json_object = {}
json_object["coil_current_queue"] = []
for i in range(0,length):
data = redis_handle_15.lindex("coil_current_queue", i )
json_object["coil_current_queue"].append(data)
json_string = json.dumps( json_object )
return json_string
def find_step( self, sprinkler_ctrl, schedule_name ):
returnValue = None
count = 0
for j in sprinkler_ctrl:
if j["name"] == schedule_name:
returnValue = count
return returnValue
count = count +1
return returnValue
def start_time_update( self, url_list, redis_handle, cherrypy ):
json_object = cherrypy.request.params["JSON"]
json_file = open("/media/mmc1/app_data_files/sprinkler_ctrl.json")
sprinkler_ctrl = json.load(json_file)
step = self.find_step( sprinkler_ctrl, json_object["schedule_name"] );
sprinkler_ctrl[step]["start_time"] = json_object["start_time"];
sprinkler_ctrl[step]["end_time"] = json_object["end_time"];
sprinkler_ctrl[step]["dow"] = json_object["dow"];
json_file = open("/media/mmc1/app_data_files/sprinkler_ctrl.json",'w' )
json.dump( sprinkler_ctrl, json_file )
return json.dumps("SUCCESS")
def update_run_time( self, url_list, redis_handle, cherrypy ):
json_object = cherrypy.request.params["JSON"]
json_file = open("/media/mmc1/app_data_files/sprinkler_ctrl.json")
sprinkler_ctrl = json.load(json_file)
json_object["schedule_step"] = int(json_object["schedule_step"])
json_object["runtime_step"] = int(json_object["runtime_step"])
step = self.find_step( sprinkler_ctrl, json_object["schedule_name"])
json_file = open("/media/mmc1/app_data_files/"+sprinkler_ctrl[step]["link"] )
temp = json.load(json_file)
temp["schedule"][json_object["schedule_step"]][0][2] = json_object["runtime_step"]
json_file = open("/media/mmc1/app_data_files/"+sprinkler_ctrl[step]["link"],'w' )
json.dump( temp, json_file )
return json.dumps("SUCCESS")
def delete_schedule( self, url_list, redis_handle, cherrypy ):
json_object = cherrypy.request.params["JSON"]
print("------------------------ made it here -----------------")
json_file = open("/media/mmc1/app_data_files/sprinkler_ctrl.json")
sprinkler_ctrl = json.load(json_file)
step = self.find_step( sprinkler_ctrl, json_object["deleted_schedule"])
link_file = "/media/mmc1/app_data_files/"+sprinkler_ctrl[step]["link"]
os.remove( link_file )
del sprinkler_ctrl[step]
json_file = open("/media/mmc1/app_data_files/sprinkler_ctrl.json",'w' )
json.dump( sprinkler_ctrl, json_file )
return json.dumps("SUCCESS")
def insert_schedule( self, url_list, redis_handle, cherrypy ):
json_object = cherrypy.request.params["JSON"]
insert_schedule = json_object["insert_schedule"]
temp = {}
temp["name"] = insert_schedule
temp["description"] = ""
temp["end_time"] = []
temp["start_time"] = []
for i in range(0,2):
temp["start_time"].append(0)
for i in range(0,2):
temp["end_time"].append(0)
temp["dow"] = []
for i in range(0,7):
temp["dow"].append(0)
temp["link"] = insert_schedule+".json"
json_file = open("/media/mmc1/app_data_files/sprinkler_ctrl.json")
sprinkler_ctrl = json.load(json_file)
sprinkler_ctrl.append(temp)
json_file = open("/media/mmc1/app_data_files/sprinkler_ctrl.json",'w' )
json.dump( sprinkler_ctrl, json_file )
temp = {}
temp["bits"] = []
temp["bits"].append("C201")
temp["bits"].append("C2")
temp["bits"].append("DS2")
temp["schedule"] = None
json_file = open("/media/mmc1/app_data_files/"+insert_schedule+".json",'w' )
json.dump( temp, json_file )
return json.dumps("SUCCESS")
def copy_schedule( self, url_list, redis_handle, cherrypy ):
json_object = cherrypy.request.params["JSON"]
copy_source = json_object["copy_source"]
copy_destination = json_object["copy_destination"]
json_file = open("/media/mmc1/app_data_files/sprinkler_ctrl.json")
sprinkler_ctrl = json.load(json_file)
step = self.find_step(sprinkler_ctrl,copy_source)
temp = json.dumps(sprinkler_ctrl[step])
temp = json.loads(temp)
temp["name"] =copy_destination
temp["link"] = copy_destination+".json"
sprinkler_ctrl.append(temp)
json_file = open("/media/mmc1/app_data_files/sprinkler_ctrl.json",'w' )
json.dump( sprinkler_ctrl, json_file )
copyfile("/media/mmc1/app_data_files/"+copy_source+".json",
"/media/mmc1/app_data_files/"+copy_destination+".json" )
return json.dumps("SUCCESS")
def change_schedule( self, url_list, redis_handle, cherrypy ):
field_map = {"station_1","station_2","station_3","station_4","station_5"}
json_object = cherrypy.request.params["JSON"]
temp = {}
temp["name"] = json_object["schedule_name"]
temp["description"] = json_object["description"]
temp["end_time"] = []
for i in range(0,2):
temp["end_time"].append(json_object["end_time"][i])
temp["start_time"] = []
for i in range(0,2):
temp["start_time"].append( json_object["start_time"][i])
temp["dow"] = []
for i in range(0,7):
temp["dow"].append( json_object["dow"][i] )
temp["link"] = json_object["schedule_name"]+".json"
json_file = open("/media/mmc1/app_data_files/sprinkler_ctrl.json")
sprinkler_ctrl = json.load(json_file)
index = self.find_step( sprinkler_ctrl, json_object["schedule_name"] )
sprinkler_ctrl[index] = temp
json_file = open("/media/mmc1/app_data_files/sprinkler_ctrl.json",'w' )
json.dump( sprinkler_ctrl, json_file )
temp = {}
temp["bits"] = {}
temp["bits"][1] = "C201"
temp["bits"][2] = "C2"
temp["bits"][3] = "DS2"
if json_object["grid_data"] == None:
temp["schedule"]= None
else:
temp["schedule"] = []
for j in json_object["grid_data"]:
temp_schedule = []
for m in field_map:
if j.has_key(m) == True :
# builds the following element [ "satellite_1", [2],15 ]
controller_pin = j[m].split(":")
temp_element = []
temp_element.append(controller_pin[0])
temp_element.append([])
temp_element[1].append( int(controller_pin[1]))
temp_element.append(int(j["time"]))
temp_schedule.append(temp_element)
else:
break
temp["schedule"].append(temp_schedule)
json_file = open("/media/mmc1/app_data_files/"+json_object["schedule_name"]+".json",'w' )
json.dump( temp, json_file )
return json.dumps("SUCCESS")
def schedule_entry( self, url_list, redis_handle, cherrypy ):
returnValue = []
query_string = cherrypy.request.query_string
json_string = urllib.unquote(query_string)
json_object = json.loads(json_string)
schedule_name = json_object["schedule_name"]
json_file = open("/media/mmc1/app_data_files/sprinkler_ctrl.json")
sprinkler_ctrl = json.load(json_file)
for j in sprinkler_ctrl:
json_file = open("/media/mmc1/app_data_files/"+j["link"])
temp = json.load(json_file)
j["step_number"], j["steps"], j["controller_pins"] = self.generate_steps(temp)
returnValue.append(j)
index = self.find_step( sprinkler_ctrl, schedule_name )
returnValue = returnValue[index];
return json.dumps( returnValue )
| 34.398363 | 108 | 0.627667 |
import json as simplejson
import json
import time
import os
import cherrypy
from urlparse import *
from shutil import *
import urllib
from cherrypy.lib.httputil import parse_query_string
import redis
import base64
redis_handle_15 = redis.StrictRedis(host='localhost', port=6379, db=15)
mode_string = [
"This should not happen",
"OFFLINE",
"QUEUE_SCHEDULE",
"QUEUE_SCHEDULE_STEP",
"QUEUE_SCHEDULE_STEP_TIME",
"CLEAN_FILTER",
"OPEN_MASTER_VALVE",
"CLOSE_MASTER_VALVE",
"RESTART_PROGRAM",
"RESET_SYSTEM" ,
"CHECK_OFF",
"SHUT_DOWN",
"TURN_ON",
"SKIP_STATION"
]
class System_state_modules:
def __init__(self, module_dictionary ):
module_dictionary["redis_get_status.html"] = self.redis_get_status
module_dictionary["get_flow_sensor_name.html"] = self.get_flow_sensor_name
module_dictionary["get_irrigation_queue.html"] = self.get_irrigation_queue
module_dictionary["load_controller_pins.html"] = self.load_controller_pins
module_dictionary["mode_request.html"] = self.mode_request_data
module_dictionary["schedule_data.html"] = self.schedule_data
module_dictionary["mode_change.html"] = self.change_mode
module_dictionary["controller_pin_turn_off.html"] = self.pin_off
module_dictionary["controller_pin_turn_on.html"] = self.pin_on
module_dictionary["change_rain_flag.html"] = self.change_rain_flag
module_dictionary["change_eto_flag.html"] = self.change_eto_flag
module_dictionary["rain_flag.html"] = self.get_rain_flag
module_dictionary["eto_flag.html"] = self.get_eto_flag
module_dictionary["get_queue_entry.html"] = self.get_queue_entry
module_dictionary["delete_queue_element.html"] = self.delete_queue
module_dictionary["get_eto_entries.html"] = self.get_eto_entries
module_dictionary["save_eto_data.html"] = self.save_eto_data
module_dictionary["flow_sensor_name.html"] = self.flow_sensor_name
module_dictionary["get_flow_queue.html"] = self.get_queue
module_dictionary["recent_plc.html"] = self.recent_plc
module_dictionary["recent_coil.html"] = self.recent_coil
module_dictionary["start_time_update.html"] = self.start_time_update
module_dictionary["run_time_update.html"] = self.update_run_time
module_dictionary["delete_schedule.html"] = self.delete_schedule
module_dictionary["insert_schedule.html"] = self.insert_schedule
module_dictionary["copy_schedule.html"] = self.copy_schedule
module_dictionary["change_schedule.html"] = self.change_schedule
module_dictionary["schedule_entry.html"] = self.schedule_entry
module_dictionary["load_valve_groups.html"] = self.load_valve_groups
module_dictionary["get_cleaning_interval.html"] = self.get_cleaning_interval
module_dictionary["set_cleaning_interval.html"] = self.set_cleaning_interval
module_dictionary["set_max_flow_rate_cut_off.html"] = self.set_max_flow_rate_cut_off
module_dictionary["get_max_flow_rate_cut_off.html"] = self.get_max_flow_rate_cut_off
def get_max_flow_rate_cut_off(self, url_list, redis_handle, cherrypy ):
temp = redis_handle.get( "max_flow_rate_cutoff")
if temp == None:
max_flow_rate_cutoff = 0
else:
max_flow_rate_cutoff = float(temp)
temp = redis_handle.get( "max_flow_rate_time")
if temp == None:
max_flow_rate_time = 0
else:
max_flow_rate_time = float(temp)
temp = json.dumps([ max_flow_rate_cutoff, max_flow_rate_time ] )
return temp
def set_max_flow_rate_cut_off(self, url_list, redis_handle, cherrypy ):
json_object = cherrypy.request.params["JSON"]
redis_handle.set("max_flow_rate_cutoff",int(json_object[0]))
redis_handle.set("max_flow_rate_time",int(json_object[1]))
return json.dumps("SUCCESS")
def set_cleaning_interval(self, url_list, redis_handle, cherrypy ):
json_object = cherrypy.request.params["JSON"]
json_object = float( json_object )
redis_handle.set("cleaning_interval",json_object)
return json.dumps("SUCCESS")
def get_cleaning_interval(self, url_list, redis_handle, cherrypy ):
temp = redis_handle.get( "cleaning_interval")
if temp == None:
temp = 0
else:
temp = float(temp)
temp = json.dumps(temp)
return temp
def redis_get_status(self, url_list, redis_handle, cherrypy ):
return_data = {}
return_data["controller_time_stamp"] = redis_handle.get("sprinkler_time_stamp")
return_data["flow_rate"] = redis_handle.get( "global_flow_sensor")
return_data["op_mode"] = redis_handle.get( "sprinkler_ctrl_mode")
return_data["schedule"] = redis_handle.get( "schedule_name" )
return_data["step"] = redis_handle.get( "schedule_step")
return_data["time_of_step"] = redis_handle.get( "schedule_time_max" )
return_data["current_duration"] = redis_handle.get( "schedule_time_count")
return_data["derating_factor"] = redis_handle.get("derating_factor")
return_data["rain_day"] = redis_handle.get("rain_day" )
return_data["pcl_current"] = redis_handle.get( "plc_current" )
return_data["coil_current"] = redis_handle.get( "coil_current" )
return_data["eto_yesterday"] = redis_handle.get( "YESTERDAY_ETO" )
return_data["eto_current"] = redis_handle.get( "CURRENT_ETO" )
return_data["eto_master_valve"] = redis_handle.get("MASTER_VALVE_SETUP")
return_data["eto_managment_flag"] = redis_handle.get("ETO_MANAGE_FLAG")
temp = json.dumps(return_data)
return temp
def get_flow_sensor_name( self, url_list, redis_handle, cherrypy ):
return_data = []
json_data=open("/media/mmc1/system_data_files/global_sensors.json")
data = json.load(json_data)
for i in data:
temp = []
temp.append(i[0])
temp.append(i[3])
return_data.append(temp)
temp = json.dumps(return_data)
return temp
def get_irrigation_queue( self, url_list, redis_handle, cherrypy ):
return_data = []
queue_len = redis_handle.llen("IRRIGATION_QUEUE")
element_list = []
if queue_len > 0 :
name = "unspecified"
total = 0
sub_total = 0
element_list.append(0)
state = 0
for i in range(0, queue_len):
data = redis_handle.lindex("IRRIGATION_QUEUE", queue_len - i-1)
if data != None:
data = json.loads(data)
if data["type"] == "END_SCHEDULE":
element = {}
name = data["schedule_name"]
element["name"] = name
element["value"] = element_list
return_data.append(element)
element_list = []
element_list.append(total)
name = "unspecified"
if data["type"] == "IRRIGATION_STEP" :
total = total + float( data["run_time"])
element_list.append( float( data["run_time"] ))
if len(element_list) > 1 :
element = {}
element["name"] = name
element["value"] = element_list
return_data.append( element )
json_string = json.dumps(return_data)
print "json_string--------------->",json_string
return json_string
def load_valve_groups( self, url_list, redis_handle, cherrypy ):
json_data=open("/media/mmc1/system_data_files/valve_group_assignments.json")
print "json data ",json_data
data = json.load(json_data)
return json.dumps(data)
def load_controller_pins( self, url_list, redis_handle, cherrypy ):
json_data=open("/media/mmc1/system_data_files/controller_cable_assignment.json")
print "json data ",json_data
data = json.load(json_data)
return json.dumps(data)
def mode_request_data(self, url_list, redis_handle, cherrypy ):
return_data = {}
mode_object = {
"SHOULD NOT HAPPEN!":0,
"OFFLINE": 1,
"QUEUE_SCHEDULE":2,
"QUEUE_SCHEDULE_STEP":3,
"QUEUE_SCHEDULE_STEP_TIME":4,
"CLEAN_FILTER":5,
"OPEN_MASTER_VALVE":6,
"CLOSE_MASTER_VALVE":7,
"RESTART_PROGRAM":8,
"RESET_SYSTEM":9,
"CHECK_OFF":10,
"SHUT_DOWN":11,
"TURN_ON":12,
"SKIP_STATION":13
}
temp = redis_handle.get( "sprinkler_ctrl_mode")
if mode_object.has_key( temp ):
id = mode_object[temp]
else:
id = 0
return_data["mode"] = id
return_data["step"] = 0
return_data["run_time"] = 0
return json.dumps(return_data)
def generate_steps( self, file_data):
returnValue = []
controller_pins = []
if file_data["schedule"] != None:
schedule = file_data["schedule"]
for i in schedule:
returnValue.append(i[0][2])
temp = []
for l in i:
temp.append( [ l[0], l[1][0] ] )
controller_pins.append(temp)
return len(returnValue), returnValue, controller_pins
def schedule_data( self, url_list, redis_handle,cherrypy):
json_data=open("/media/mmc1/app_data_files/sprinkler_ctrl.json")
sprinkler_ctrl = json.load(json_data)
returnValue = []
for j in sprinkler_ctrl:
json_data=open("/media/mmc1/app_data_files/"+j["link"])
temp = json.load(json_data)
j["step_number"], j["steps"], j["controller_pins"] = self.generate_steps(temp)
returnValue.append(j)
return json.dumps(returnValue)
def change_mode( self, url_list, redis_handle, cherrypy ):
json_object = cherrypy.request.params["JSON"]
mode = int(json_object["mode"])
schedule_name = json_object["schedule_name"]
step = int(json_object["step"])
run_time = int(json_object["run_time"])
if (mode == 0 ) or (mode==1 ) :
schedule_name = "offline"
step = 1
run_time = 1
temp = {}
temp["command"] = mode_string[mode]
temp["schedule_name"] = schedule_name
temp["step"] = step
temp["run_time"] = run_time
scratch = json.dumps(temp)
redis_handle.lpush("sprinkler_ctrl_queue", base64.b64encode(scratch) )
return json.dumps("SUCCESS")
def pin_off( self, url_list, redis_handle, cherrypy ):
json_object = cherrypy.request.params["JSON"]
temp = {}
temp["command"] = "OFFLINE"
temp["schedule_name"] = "offline"
temp["step"] = 1
temp["run_time"] = 1
scratch = json.dumps(temp)
redis_handle.lpush("sprinkler_ctrl_queue", base64.b64encode(scratch) )
return json.dumps("SUCCESS")
def pin_on( self, url_list, redis_handle, cherrypy ):
json_object = cherrypy.request.params["JSON"]
self.pin_off( url_list,redis_handle, cherrypy)
temp = {}
temp["command"] = "NATIVE_SPRINKLER"
temp["schedule_remote_queue"] = json_object["controller"]
temp["schedule_pin_queue"] = json_object["pin"]
temp["schedule_time_queue"] = json_object["run_time"]
scratch = json.dumps(temp)
redis_handle.lpush("sprinkler_ctrl_queue", base64.b64encode(scratch) )
return json.dumps("SUCCESS")
def change_eto_flag( self, url_list, redis_handle, cherrypy ):
json_object = cherrypy.request.params["JSON"]
redis_handle.set( "ETO_MANAGE_FLAG", json_object["eto_flag"] )
return json.dumps("SUCCESS")
def change_rain_flag( self, url_list, redis_handle, cherrypy ):
json_object = cherrypy.request.params["JSON"]
redis_handle.set( "rain_day", json_object["rain_flag"] )
return json.dumps("SUCCESS")
def get_rain_flag( self, url_list, redis_handle, cherrypy ):
json_object = {}
json_object["rain_flag"] = redis_handle.get( "rain_day" )
return json.dumps( json_object )
def get_eto_flag( self, url_list, redis_handle, cherrypy ):
json_object = {}
json_object["eto_flag"] = redis_handle.get( "ETO_MANAGE_FLAG" )
return json.dumps( json_object )
def get_queue_entry( self, url_list, redis_handle, cherrypy ):
json_object = []
length = redis_handle.llen( "IRRIGATION_QUEUE" )
if length > 0 :
name = "unspecified"
total = 0
for i in range(0, length):
data = redis_handle.lindex( "IRRIGATION_QUEUE", length-1 -i)
if data != None :
data = json.loads(data)
if data["type"] == "END_SCHEDULE" :
element = {}
name = data["schedule_name"]
element["name"] = name
element["value"] = total
json_object.append( element)
element_list = []
element_list.append(total)
name = "unspecified"
total = 0
if data["type"] == "IRRIGATION_STEP" :
total = total + int( data["run_time"])
if total > 0 :
element = {}
element["name"] = name
element["value"] = total
json_object.append(element)
json_string = json.dumps(json_object)
return json_string
def delete_queue( self, url_list, redis_handle, cherrypy ):
json_object = cherrypy.request.params["JSON"]
length = redis_handle.llen( "IRRIGATION_QUEUE" )
if length > 0 :
queue_index = 0
for i in range(0,length):
queue_index_temp = queue_index
data = redis_handle.lindex( "IRRIGATION_QUEUE", length - 1 -i)
if data != None:
data = json.loads(data)
if data["type"] == "END_SCHEDULE" :
queue_index_temp = queue_index +1
if json_object[ queue_index ] != 0 :
redis_handle.lset( "IRRIGATION_QUEUE",length - 1 -i,"NULL/NULL")
redis_handle.lrem( "IRRIGATION_QUEUE", 1, "NULL/NULL" )
queue_index = queue_index_temp
json_string = json.dumps("SUCCESS")
return json_string
def get_eto_entries( self, url_list, redis_handle, cherrypy ):
json_object = []
eto_dictionary = redis_handle.get( "ETO_RESOURCE_LIST")
eto_list = eto_dictionary.split(":")
for j in eto_list:
temp = {}
temp["name"] = j
temp["data"] = redis_handle.get( j )
json_object.append(temp)
json_string = json.dumps( json_object )
return json_string
def save_eto_data( self, url_list, redis_handle, cherrypy ):
json_object = cherrypy.request.params["JSON"]
for j in json_object:
redis_handle.set(j["name"],j["data"])
return json.dumps("SUCCESS")
def flow_sensor_name( self, url_list, redis_handle, cherrypy ):
data =open("/media/mmc1/system_data_files/global_sensors.json")
flow_sensor_data = json.load(data)
json_object = []
for j in flow_sensor_data:
json_object.append( [ j[0], j[3] ] )
json_string = json.dumps(json_object)
return json_string
def get_queue( self, url_list, redis_handle, cherrypy ):
json_object = {}
json_string = cherrypy.request.query_string
queue = json_string
print "-------------------------->",queue,"----------------------------"
json_object["flow_queue"] = []
length = redis_handle_15.llen("redis_flow_queue_"+queue )
for i in range(0,length):
data = redis_handle_15.lindex("redis_flow_queue_"+queue, i )
json_object["flow_queue"].append(data)
json_string = json.dumps( json_object )
return json_string
def recent_plc( self, url_list, redis_handle, cherrypy ):
length = redis_handle_15.llen("plc_current_queue" )
json_object = {}
json_object["plc_current_queue"] = []
for i in range(0,length):
data = redis_handle_15.lindex("plc_current_queue", i )
json_object["plc_current_queue"].append(data)
json_string = json.dumps( json_object )
return json_string
def recent_coil( self, url_list, redis_handle, cherrypy ):
length = redis_handle_15.llen("plc_current_queue" )
json_object = {}
json_object["coil_current_queue"] = []
for i in range(0,length):
data = redis_handle_15.lindex("coil_current_queue", i )
json_object["coil_current_queue"].append(data)
json_string = json.dumps( json_object )
return json_string
def find_step( self, sprinkler_ctrl, schedule_name ):
returnValue = None
count = 0
for j in sprinkler_ctrl:
if j["name"] == schedule_name:
returnValue = count
return returnValue
count = count +1
return returnValue
def start_time_update( self, url_list, redis_handle, cherrypy ):
json_object = cherrypy.request.params["JSON"]
json_file = open("/media/mmc1/app_data_files/sprinkler_ctrl.json")
sprinkler_ctrl = json.load(json_file)
step = self.find_step( sprinkler_ctrl, json_object["schedule_name"] );
sprinkler_ctrl[step]["start_time"] = json_object["start_time"];
sprinkler_ctrl[step]["end_time"] = json_object["end_time"];
sprinkler_ctrl[step]["dow"] = json_object["dow"];
json_file = open("/media/mmc1/app_data_files/sprinkler_ctrl.json",'w' )
json.dump( sprinkler_ctrl, json_file )
return json.dumps("SUCCESS")
def update_run_time( self, url_list, redis_handle, cherrypy ):
json_object = cherrypy.request.params["JSON"]
json_file = open("/media/mmc1/app_data_files/sprinkler_ctrl.json")
sprinkler_ctrl = json.load(json_file)
json_object["schedule_step"] = int(json_object["schedule_step"])
json_object["runtime_step"] = int(json_object["runtime_step"])
step = self.find_step( sprinkler_ctrl, json_object["schedule_name"])
json_file = open("/media/mmc1/app_data_files/"+sprinkler_ctrl[step]["link"] )
temp = json.load(json_file)
temp["schedule"][json_object["schedule_step"]][0][2] = json_object["runtime_step"]
json_file = open("/media/mmc1/app_data_files/"+sprinkler_ctrl[step]["link"],'w' )
json.dump( temp, json_file )
return json.dumps("SUCCESS")
def delete_schedule( self, url_list, redis_handle, cherrypy ):
json_object = cherrypy.request.params["JSON"]
print("------------------------ made it here -----------------")
json_file = open("/media/mmc1/app_data_files/sprinkler_ctrl.json")
sprinkler_ctrl = json.load(json_file)
step = self.find_step( sprinkler_ctrl, json_object["deleted_schedule"])
link_file = "/media/mmc1/app_data_files/"+sprinkler_ctrl[step]["link"]
os.remove( link_file )
del sprinkler_ctrl[step]
json_file = open("/media/mmc1/app_data_files/sprinkler_ctrl.json",'w' )
json.dump( sprinkler_ctrl, json_file )
return json.dumps("SUCCESS")
def insert_schedule( self, url_list, redis_handle, cherrypy ):
json_object = cherrypy.request.params["JSON"]
insert_schedule = json_object["insert_schedule"]
temp = {}
temp["name"] = insert_schedule
temp["description"] = ""
temp["end_time"] = []
temp["start_time"] = []
for i in range(0,2):
temp["start_time"].append(0)
for i in range(0,2):
temp["end_time"].append(0)
temp["dow"] = []
for i in range(0,7):
temp["dow"].append(0)
temp["link"] = insert_schedule+".json"
json_file = open("/media/mmc1/app_data_files/sprinkler_ctrl.json")
sprinkler_ctrl = json.load(json_file)
sprinkler_ctrl.append(temp)
json_file = open("/media/mmc1/app_data_files/sprinkler_ctrl.json",'w' )
json.dump( sprinkler_ctrl, json_file )
temp = {}
temp["bits"] = []
temp["bits"].append("C201")
temp["bits"].append("C2")
temp["bits"].append("DS2")
temp["schedule"] = None
json_file = open("/media/mmc1/app_data_files/"+insert_schedule+".json",'w' )
json.dump( temp, json_file )
return json.dumps("SUCCESS")
def copy_schedule( self, url_list, redis_handle, cherrypy ):
json_object = cherrypy.request.params["JSON"]
copy_source = json_object["copy_source"]
copy_destination = json_object["copy_destination"]
json_file = open("/media/mmc1/app_data_files/sprinkler_ctrl.json")
sprinkler_ctrl = json.load(json_file)
step = self.find_step(sprinkler_ctrl,copy_source)
temp = json.dumps(sprinkler_ctrl[step])
temp = json.loads(temp)
temp["name"] =copy_destination
temp["link"] = copy_destination+".json"
sprinkler_ctrl.append(temp)
json_file = open("/media/mmc1/app_data_files/sprinkler_ctrl.json",'w' )
json.dump( sprinkler_ctrl, json_file )
copyfile("/media/mmc1/app_data_files/"+copy_source+".json",
"/media/mmc1/app_data_files/"+copy_destination+".json" )
return json.dumps("SUCCESS")
def change_schedule( self, url_list, redis_handle, cherrypy ):
field_map = {"station_1","station_2","station_3","station_4","station_5"}
json_object = cherrypy.request.params["JSON"]
temp = {}
temp["name"] = json_object["schedule_name"]
temp["description"] = json_object["description"]
temp["end_time"] = []
for i in range(0,2):
temp["end_time"].append(json_object["end_time"][i])
temp["start_time"] = []
for i in range(0,2):
temp["start_time"].append( json_object["start_time"][i])
temp["dow"] = []
for i in range(0,7):
temp["dow"].append( json_object["dow"][i] )
temp["link"] = json_object["schedule_name"]+".json"
json_file = open("/media/mmc1/app_data_files/sprinkler_ctrl.json")
sprinkler_ctrl = json.load(json_file)
index = self.find_step( sprinkler_ctrl, json_object["schedule_name"] )
sprinkler_ctrl[index] = temp
json_file = open("/media/mmc1/app_data_files/sprinkler_ctrl.json",'w' )
json.dump( sprinkler_ctrl, json_file )
temp = {}
temp["bits"] = {}
temp["bits"][1] = "C201"
temp["bits"][2] = "C2"
temp["bits"][3] = "DS2"
if json_object["grid_data"] == None:
temp["schedule"]= None
else:
temp["schedule"] = []
for j in json_object["grid_data"]:
temp_schedule = []
for m in field_map:
if j.has_key(m) == True :
controller_pin = j[m].split(":")
temp_element = []
temp_element.append(controller_pin[0])
temp_element.append([])
temp_element[1].append( int(controller_pin[1]))
temp_element.append(int(j["time"]))
temp_schedule.append(temp_element)
else:
break
temp["schedule"].append(temp_schedule)
json_file = open("/media/mmc1/app_data_files/"+json_object["schedule_name"]+".json",'w' )
json.dump( temp, json_file )
return json.dumps("SUCCESS")
def schedule_entry( self, url_list, redis_handle, cherrypy ):
returnValue = []
query_string = cherrypy.request.query_string
json_string = urllib.unquote(query_string)
json_object = json.loads(json_string)
schedule_name = json_object["schedule_name"]
json_file = open("/media/mmc1/app_data_files/sprinkler_ctrl.json")
sprinkler_ctrl = json.load(json_file)
for j in sprinkler_ctrl:
json_file = open("/media/mmc1/app_data_files/"+j["link"])
temp = json.load(json_file)
j["step_number"], j["steps"], j["controller_pins"] = self.generate_steps(temp)
returnValue.append(j)
index = self.find_step( sprinkler_ctrl, schedule_name )
returnValue = returnValue[index];
return json.dumps( returnValue )
| false | true |
f7ff1b91856c517d6d8fc3445a5f591fbd1c39cf | 6,992 | py | Python | lale/lib/imblearn/random_over_sampler.py | gbdrt/lale | 291f824a6b96f088e787979ca768f50d7758424e | [
"Apache-2.0"
] | null | null | null | lale/lib/imblearn/random_over_sampler.py | gbdrt/lale | 291f824a6b96f088e787979ca768f50d7758424e | [
"Apache-2.0"
] | null | null | null | lale/lib/imblearn/random_over_sampler.py | gbdrt/lale | 291f824a6b96f088e787979ca768f50d7758424e | [
"Apache-2.0"
] | null | null | null | # Copyright 2019 IBM Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from imblearn.over_sampling import RandomOverSampler as OrigModel
import lale.docstrings
import lale.operators
from lale.lib.imblearn.base_resampler import (
BaseResamplerImpl,
_input_decision_function_schema,
_input_fit_schema,
_input_predict_proba_schema,
_input_predict_schema,
_input_transform_schema,
_output_decision_function_schema,
_output_predict_proba_schema,
_output_predict_schema,
_output_transform_schema,
)
class RandomOverSamplerImpl(BaseResamplerImpl):
def __init__(self, operator=None, sampling_strategy="auto", random_state=None):
if operator is None:
raise ValueError("Operator is a required argument.")
self._hyperparams = {
"sampling_strategy": sampling_strategy,
"random_state": random_state,
}
resampler_instance = OrigModel(**self._hyperparams)
super(RandomOverSamplerImpl, self).__init__(
operator=operator, resampler=resampler_instance
)
_hyperparams_schema = {
"allOf": [
{
"type": "object",
"required": ["operator"],
"relevantToOptimizer": ["operator"],
"additionalProperties": False,
"properties": {
"operator": {
"description": """Trainable Lale pipeline that is trained using the data obtained from the current imbalance corrector.
Predict, transform, predict_proba or decision_function would just be forwarded to the trained pipeline.
If operator is a Planned pipeline, the current imbalance corrector can't be trained without using an optimizer to
choose a trainable operator first. Please refer to lale/examples for more examples.""",
"anyOf": [{"laleType": "operator"}],
},
"sampling_strategy": {
"description": """sampling_strategy : float, str, dict or callable, default='auto'.
Sampling information to resample the data set.
""",
"anyOf": [
{
"description": """When ``float``,
it corresponds to the desired ratio of the number of
samples in the minority class over the number of samples in the
majority class after resampling. Therefore, the ratio is expressed as
:math:`\\alpha_{os} = N_{rm} / N_{M}` where :math:`N_{rm}` is the
number of samples in the minority class after resampling and
:math:`N_{M}` is the number of samples in the majority class.
.. warning::
``float`` is only available for **binary** classification. An
error is raised for multi-class classification.""",
"type": "number",
},
{
"description": """When ``str``, specify the class targeted by the resampling.
The number of samples in the different classes will be equalized.
Possible choices are:
``'minority'``: resample only the minority class;
``'not minority'``: resample all classes but the minority class;
``'not majority'``: resample all classes but the majority class;
``'all'``: resample all classes;
``'auto'``: equivalent to ``'not majority'``.""",
"enum": [
"minority",
"not minority",
"not majority",
"all",
"auto",
],
},
{
"description": """- When ``dict``, the keys correspond to the targeted classes.
The values correspond to the desired number of samples for each targeted
class.""",
"type": "object",
},
{
"description": """When callable, function taking ``y`` and returns a ``dict``.
The keys correspond to the targeted classes. The values correspond to the
desired number of samples for each class.""",
"laleType": "callable",
},
],
"default": "auto",
},
"random_state": {
"description": "Control the randomization of the algorithm.",
"anyOf": [
{
"description": "RandomState used by np.random",
"enum": [None],
},
{
"description": "The seed used by the random number generator",
"type": "integer",
},
{
"description": "Random number generator instance.",
"laleType": "numpy.random.RandomState",
},
],
"default": None,
},
},
}
]
}
_combined_schemas = {
"$schema": "http://json-schema.org/draft-04/schema#",
"description": """Class to perform random over-sampling, i.e. over-sample the minority class(es) by picking samples at random with replacement.""",
"documentation_url": "https://lale.readthedocs.io/en/latest/modules/lale.lib.imblearn.random_over_sampler.html",
"import_from": "imblearn.over_sampling",
"type": "object",
"tags": {
"pre": [],
"op": [
"transformer",
"estimator",
"resampler",
], # transformer and estimator both as a higher-order operator
"post": [],
},
"properties": {
"hyperparams": _hyperparams_schema,
"input_fit": _input_fit_schema,
"input_transform": _input_transform_schema,
"output_transform": _output_transform_schema,
"input_predict": _input_predict_schema,
"output_predict": _output_predict_schema,
"input_predict_proba": _input_predict_proba_schema,
"output_predict_proba": _output_predict_proba_schema,
"input_decision_function": _input_decision_function_schema,
"output_decision_function": _output_decision_function_schema,
},
}
lale.docstrings.set_docstrings(RandomOverSamplerImpl, _combined_schemas)
RandomOverSampler = lale.operators.make_operator(
RandomOverSamplerImpl, _combined_schemas
)
| 40.888889 | 151 | 0.581236 |
from imblearn.over_sampling import RandomOverSampler as OrigModel
import lale.docstrings
import lale.operators
from lale.lib.imblearn.base_resampler import (
BaseResamplerImpl,
_input_decision_function_schema,
_input_fit_schema,
_input_predict_proba_schema,
_input_predict_schema,
_input_transform_schema,
_output_decision_function_schema,
_output_predict_proba_schema,
_output_predict_schema,
_output_transform_schema,
)
class RandomOverSamplerImpl(BaseResamplerImpl):
def __init__(self, operator=None, sampling_strategy="auto", random_state=None):
if operator is None:
raise ValueError("Operator is a required argument.")
self._hyperparams = {
"sampling_strategy": sampling_strategy,
"random_state": random_state,
}
resampler_instance = OrigModel(**self._hyperparams)
super(RandomOverSamplerImpl, self).__init__(
operator=operator, resampler=resampler_instance
)
_hyperparams_schema = {
"allOf": [
{
"type": "object",
"required": ["operator"],
"relevantToOptimizer": ["operator"],
"additionalProperties": False,
"properties": {
"operator": {
"description": """Trainable Lale pipeline that is trained using the data obtained from the current imbalance corrector.
Predict, transform, predict_proba or decision_function would just be forwarded to the trained pipeline.
If operator is a Planned pipeline, the current imbalance corrector can't be trained without using an optimizer to
choose a trainable operator first. Please refer to lale/examples for more examples.""",
"anyOf": [{"laleType": "operator"}],
},
"sampling_strategy": {
"description": """sampling_strategy : float, str, dict or callable, default='auto'.
Sampling information to resample the data set.
""",
"anyOf": [
{
"description": """When ``float``,
it corresponds to the desired ratio of the number of
samples in the minority class over the number of samples in the
majority class after resampling. Therefore, the ratio is expressed as
:math:`\\alpha_{os} = N_{rm} / N_{M}` where :math:`N_{rm}` is the
number of samples in the minority class after resampling and
:math:`N_{M}` is the number of samples in the majority class.
.. warning::
``float`` is only available for **binary** classification. An
error is raised for multi-class classification.""",
"type": "number",
},
{
"description": """When ``str``, specify the class targeted by the resampling.
The number of samples in the different classes will be equalized.
Possible choices are:
``'minority'``: resample only the minority class;
``'not minority'``: resample all classes but the minority class;
``'not majority'``: resample all classes but the majority class;
``'all'``: resample all classes;
``'auto'``: equivalent to ``'not majority'``.""",
"enum": [
"minority",
"not minority",
"not majority",
"all",
"auto",
],
},
{
"description": """- When ``dict``, the keys correspond to the targeted classes.
The values correspond to the desired number of samples for each targeted
class.""",
"type": "object",
},
{
"description": """When callable, function taking ``y`` and returns a ``dict``.
The keys correspond to the targeted classes. The values correspond to the
desired number of samples for each class.""",
"laleType": "callable",
},
],
"default": "auto",
},
"random_state": {
"description": "Control the randomization of the algorithm.",
"anyOf": [
{
"description": "RandomState used by np.random",
"enum": [None],
},
{
"description": "The seed used by the random number generator",
"type": "integer",
},
{
"description": "Random number generator instance.",
"laleType": "numpy.random.RandomState",
},
],
"default": None,
},
},
}
]
}
_combined_schemas = {
"$schema": "http://json-schema.org/draft-04/schema#",
"description": """Class to perform random over-sampling, i.e. over-sample the minority class(es) by picking samples at random with replacement.""",
"documentation_url": "https://lale.readthedocs.io/en/latest/modules/lale.lib.imblearn.random_over_sampler.html",
"import_from": "imblearn.over_sampling",
"type": "object",
"tags": {
"pre": [],
"op": [
"transformer",
"estimator",
"resampler",
], # transformer and estimator both as a higher-order operator
"post": [],
},
"properties": {
"hyperparams": _hyperparams_schema,
"input_fit": _input_fit_schema,
"input_transform": _input_transform_schema,
"output_transform": _output_transform_schema,
"input_predict": _input_predict_schema,
"output_predict": _output_predict_schema,
"input_predict_proba": _input_predict_proba_schema,
"output_predict_proba": _output_predict_proba_schema,
"input_decision_function": _input_decision_function_schema,
"output_decision_function": _output_decision_function_schema,
},
}
lale.docstrings.set_docstrings(RandomOverSamplerImpl, _combined_schemas)
RandomOverSampler = lale.operators.make_operator(
RandomOverSamplerImpl, _combined_schemas
)
| true | true |
f7ff1cd170e27a543a04f599ad76ecbd73806bfe | 844 | py | Python | frappe/core/doctype/view_log/test_view_log.py | vigneshbarani/frappe | 5e7ac14ddff9939882c44019b542ce6eb5f9c267 | [
"MIT"
] | null | null | null | frappe/core/doctype/view_log/test_view_log.py | vigneshbarani/frappe | 5e7ac14ddff9939882c44019b542ce6eb5f9c267 | [
"MIT"
] | 5 | 2020-12-04T21:08:07.000Z | 2022-03-12T00:39:56.000Z | frappe/core/doctype/view_log/test_view_log.py | vigneshbarani/frappe | 5e7ac14ddff9939882c44019b542ce6eb5f9c267 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
# Copyright (c) 2018, Frappe Technologies and Contributors
# See license.txt
from __future__ import unicode_literals
import frappe
import unittest
class TestViewLog(unittest.TestCase):
def tearDown(self):
frappe.set_user('Administrator')
def test_if_user_is_added(self):
ev = frappe.get_doc({
'doctype': 'Event',
'subject': 'test event for view logs',
'starts_on': '2018-06-04 14:11:00',
'event_type': 'Public'
}).insert()
frappe.set_user('test@gmail.com')
from frappe.desk.form.load import getdoc
# load the form
getdoc('Event', ev.name)
a = frappe.get_value(
doctype="View Log",
filters={
"reference_doctype": "Event",
"reference_name": ev.name
},
fieldname=['viewed_by']
)
self.assertEqual('test@gmail.com', a)
self.assertNotEqual('test1@gmail.com', a) | 22.810811 | 58 | 0.684834 |
from __future__ import unicode_literals
import frappe
import unittest
class TestViewLog(unittest.TestCase):
def tearDown(self):
frappe.set_user('Administrator')
def test_if_user_is_added(self):
ev = frappe.get_doc({
'doctype': 'Event',
'subject': 'test event for view logs',
'starts_on': '2018-06-04 14:11:00',
'event_type': 'Public'
}).insert()
frappe.set_user('test@gmail.com')
from frappe.desk.form.load import getdoc
getdoc('Event', ev.name)
a = frappe.get_value(
doctype="View Log",
filters={
"reference_doctype": "Event",
"reference_name": ev.name
},
fieldname=['viewed_by']
)
self.assertEqual('test@gmail.com', a)
self.assertNotEqual('test1@gmail.com', a) | true | true |
f7ff1e3ec66b203f2e86e07ab73c0dfa4b3c0576 | 3,199 | py | Python | environment/figure_library.py | JannerM/spatial-reasoning | e163003a33177e41ca02d5feefee3fdfca5ba154 | [
"MIT"
] | 54 | 2017-07-14T01:08:57.000Z | 2021-07-09T12:46:57.000Z | environment/figure_library.py | jannerm/spatial-reasoning | e163003a33177e41ca02d5feefee3fdfca5ba154 | [
"MIT"
] | null | null | null | environment/figure_library.py | jannerm/spatial-reasoning | e163003a33177e41ca02d5feefee3fdfca5ba154 | [
"MIT"
] | 16 | 2017-07-16T03:18:19.000Z | 2021-05-28T13:04:12.000Z | spritepath = 'sprites/'
objects = {
'grass': {
'index': 0,
'value': 0,
'sprite': 'sprites/grass_figure_4.png', # 'sprites/white.png',
'background': True,
'unique': False,
},
'puddle': {
'index': 1,
'value': -1,
'sprite': 'sprites/water_figure_2.png',
'background': True,
'unique': False,
},
## unique
'star': {
'index': 2,
'value': 0,
'sprite': 'sprites/star_figure-01.png', ## white_alpha.png
'background': False,
'unique': True,
},
'circle': {
'index': 3,
'value': 0,
'sprite': 'sprites/circle_figure-01.png',
'background': False,
'unique': True,
},
'triangle': {
'index': 4,
'value': 0,
'sprite': 'sprites/triangle_figure-01.png',
'background': False,
'unique': True,
},
'heart': {
'index': 5,
'value': 0,
'sprite': 'sprites/heart_figure-01.png',
'background': False,
'unique': True,
},
'spade': {
'index': 6,
'value': 0,
'sprite': 'sprites/spade_figure-01.png',
'background': False,
'unique': True,
},
'diamond': {
'index': 7,
'value': 0,
'sprite': 'sprites/diamond_figure-01.png',
'background': False,
'unique': True,
},
## non-unique
'rock': {
'index': 8,
'value': 0,
'sprite': 'sprites/rock_figure-01.png',
'background': False,
'unique': False,
},
'tree': {
'index': 9,
'value': 0,
'sprite': 'sprites/tree_figure-01.png',
'background': False,
'unique': False,
},
'house': {
'index': 10,
'value': 0,
'sprite': 'sprites/house_figure-01.png',
'background': False,
'unique': False,
},
'horse': {
'index': 11,
'value': 0,
'sprite': 'sprites/horse_figure-01.png',
'background': False,
'unique': False,
},
}
unique_instructions = {
## original
'to top left of': (-1, -1),
'on top of': (-1, 0),
'to top right of': (-1, 1),
'to left of': (0, -1),
'with': (0, 0),
'to right of': (0, 1),
'to bottom left of': (1, -1),
'on bottom of': (1, 0),
'to bottom right of': (1, 1),
## two steps away
'two to the left and two above': (-2, -2),
'one to the left and two above': (-2, -1),
'two above': (-2, 0),
'one to the right and two above': (-2, 1),
'two to the right and two above': (-2, 2),
'two to the right and one above': (-1, 2),
'two to the right of': (0, 2),
'two to the right and one below': (1, 2),
'two to the right and two below': (2, 2),
'one to the right and two below': (2, 1),
'two below': (2, 0),
'one to the left and two below': (2, -1),
'two to the left and two below': (2, -2),
'two to the left and one below': (1, -2),
'two to the left': (0, -2),
'two to the left and one above': (-1, -2)
}
background = 'sprites/grass_figure_4.png'
# print objects
| 25.388889 | 70 | 0.467334 | spritepath = 'sprites/'
objects = {
'grass': {
'index': 0,
'value': 0,
'sprite': 'sprites/grass_figure_4.png',
'background': True,
'unique': False,
},
'puddle': {
'index': 1,
'value': -1,
'sprite': 'sprites/water_figure_2.png',
'background': True,
'unique': False,
},
ar': {
'index': 2,
'value': 0,
'sprite': 'sprites/star_figure-01.png', und': False,
'unique': True,
},
'circle': {
'index': 3,
'value': 0,
'sprite': 'sprites/circle_figure-01.png',
'background': False,
'unique': True,
},
'triangle': {
'index': 4,
'value': 0,
'sprite': 'sprites/triangle_figure-01.png',
'background': False,
'unique': True,
},
'heart': {
'index': 5,
'value': 0,
'sprite': 'sprites/heart_figure-01.png',
'background': False,
'unique': True,
},
'spade': {
'index': 6,
'value': 0,
'sprite': 'sprites/spade_figure-01.png',
'background': False,
'unique': True,
},
'diamond': {
'index': 7,
'value': 0,
'sprite': 'sprites/diamond_figure-01.png',
'background': False,
'unique': True,
},
{
'index': 8,
'value': 0,
'sprite': 'sprites/rock_figure-01.png',
'background': False,
'unique': False,
},
'tree': {
'index': 9,
'value': 0,
'sprite': 'sprites/tree_figure-01.png',
'background': False,
'unique': False,
},
'house': {
'index': 10,
'value': 0,
'sprite': 'sprites/house_figure-01.png',
'background': False,
'unique': False,
},
'horse': {
'index': 11,
'value': 0,
'sprite': 'sprites/horse_figure-01.png',
'background': False,
'unique': False,
},
}
unique_instructions = {
p left of': (-1, -1),
'on top of': (-1, 0),
'to top right of': (-1, 1),
'to left of': (0, -1),
'with': (0, 0),
'to right of': (0, 1),
'to bottom left of': (1, -1),
'on bottom of': (1, 0),
'to bottom right of': (1, 1),
left and two above': (-2, -2),
'one to the left and two above': (-2, -1),
'two above': (-2, 0),
'one to the right and two above': (-2, 1),
'two to the right and two above': (-2, 2),
'two to the right and one above': (-1, 2),
'two to the right of': (0, 2),
'two to the right and one below': (1, 2),
'two to the right and two below': (2, 2),
'one to the right and two below': (2, 1),
'two below': (2, 0),
'one to the left and two below': (2, -1),
'two to the left and two below': (2, -2),
'two to the left and one below': (1, -2),
'two to the left': (0, -2),
'two to the left and one above': (-1, -2)
}
background = 'sprites/grass_figure_4.png'
| true | true |
f7ff1ea3188cc4c77411fa2c5529a7e4972e5b4e | 8,264 | py | Python | trdemo/python/antchain_sdk_trdemo/models.py | alipay/antchain-openapi-prod-sdk | f78549e5135d91756093bd88d191ca260b28e083 | [
"MIT"
] | 6 | 2020-06-28T06:40:50.000Z | 2022-02-25T11:02:18.000Z | trdemo/python/antchain_sdk_trdemo/models.py | alipay/antchain-openapi-prod-sdk | f78549e5135d91756093bd88d191ca260b28e083 | [
"MIT"
] | null | null | null | trdemo/python/antchain_sdk_trdemo/models.py | alipay/antchain-openapi-prod-sdk | f78549e5135d91756093bd88d191ca260b28e083 | [
"MIT"
] | 6 | 2020-06-30T09:29:03.000Z | 2022-01-07T10:42:22.000Z | # -*- coding: utf-8 -*-
# This file is auto-generated, don't edit it. Thanks.
from Tea.model import TeaModel
class Config(TeaModel):
"""
Model for initing client
"""
def __init__(
self,
access_key_id: str = None,
access_key_secret: str = None,
security_token: str = None,
protocol: str = None,
read_timeout: int = None,
connect_timeout: int = None,
http_proxy: str = None,
https_proxy: str = None,
endpoint: str = None,
no_proxy: str = None,
max_idle_conns: int = None,
user_agent: str = None,
socks_5proxy: str = None,
socks_5net_work: str = None,
max_idle_time_millis: int = None,
keep_alive_duration_millis: int = None,
max_requests: int = None,
max_requests_per_host: int = None,
):
# accesskey id
self.access_key_id = access_key_id
# accesskey secret
self.access_key_secret = access_key_secret
# security token
self.security_token = security_token
# http protocol
self.protocol = protocol
# read timeout
self.read_timeout = read_timeout
# connect timeout
self.connect_timeout = connect_timeout
# http proxy
self.http_proxy = http_proxy
# https proxy
self.https_proxy = https_proxy
# endpoint
self.endpoint = endpoint
# proxy white list
self.no_proxy = no_proxy
# max idle conns
self.max_idle_conns = max_idle_conns
# user agent
self.user_agent = user_agent
# socks5 proxy
self.socks_5proxy = socks_5proxy
# socks5 network
self.socks_5net_work = socks_5net_work
# 长链接最大空闲时长
self.max_idle_time_millis = max_idle_time_millis
# 长链接最大连接时长
self.keep_alive_duration_millis = keep_alive_duration_millis
# 最大连接数(长链接最大总数)
self.max_requests = max_requests
# 每个目标主机的最大连接数(分主机域名的长链接最大总数
self.max_requests_per_host = max_requests_per_host
def validate(self):
pass
def to_map(self):
result = dict()
if self.access_key_id is not None:
result['accessKeyId'] = self.access_key_id
if self.access_key_secret is not None:
result['accessKeySecret'] = self.access_key_secret
if self.security_token is not None:
result['securityToken'] = self.security_token
if self.protocol is not None:
result['protocol'] = self.protocol
if self.read_timeout is not None:
result['readTimeout'] = self.read_timeout
if self.connect_timeout is not None:
result['connectTimeout'] = self.connect_timeout
if self.http_proxy is not None:
result['httpProxy'] = self.http_proxy
if self.https_proxy is not None:
result['httpsProxy'] = self.https_proxy
if self.endpoint is not None:
result['endpoint'] = self.endpoint
if self.no_proxy is not None:
result['noProxy'] = self.no_proxy
if self.max_idle_conns is not None:
result['maxIdleConns'] = self.max_idle_conns
if self.user_agent is not None:
result['userAgent'] = self.user_agent
if self.socks_5proxy is not None:
result['socks5Proxy'] = self.socks_5proxy
if self.socks_5net_work is not None:
result['socks5NetWork'] = self.socks_5net_work
if self.max_idle_time_millis is not None:
result['maxIdleTimeMillis'] = self.max_idle_time_millis
if self.keep_alive_duration_millis is not None:
result['keepAliveDurationMillis'] = self.keep_alive_duration_millis
if self.max_requests is not None:
result['maxRequests'] = self.max_requests
if self.max_requests_per_host is not None:
result['maxRequestsPerHost'] = self.max_requests_per_host
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('accessKeyId') is not None:
self.access_key_id = m.get('accessKeyId')
if m.get('accessKeySecret') is not None:
self.access_key_secret = m.get('accessKeySecret')
if m.get('securityToken') is not None:
self.security_token = m.get('securityToken')
if m.get('protocol') is not None:
self.protocol = m.get('protocol')
if m.get('readTimeout') is not None:
self.read_timeout = m.get('readTimeout')
if m.get('connectTimeout') is not None:
self.connect_timeout = m.get('connectTimeout')
if m.get('httpProxy') is not None:
self.http_proxy = m.get('httpProxy')
if m.get('httpsProxy') is not None:
self.https_proxy = m.get('httpsProxy')
if m.get('endpoint') is not None:
self.endpoint = m.get('endpoint')
if m.get('noProxy') is not None:
self.no_proxy = m.get('noProxy')
if m.get('maxIdleConns') is not None:
self.max_idle_conns = m.get('maxIdleConns')
if m.get('userAgent') is not None:
self.user_agent = m.get('userAgent')
if m.get('socks5Proxy') is not None:
self.socks_5proxy = m.get('socks5Proxy')
if m.get('socks5NetWork') is not None:
self.socks_5net_work = m.get('socks5NetWork')
if m.get('maxIdleTimeMillis') is not None:
self.max_idle_time_millis = m.get('maxIdleTimeMillis')
if m.get('keepAliveDurationMillis') is not None:
self.keep_alive_duration_millis = m.get('keepAliveDurationMillis')
if m.get('maxRequests') is not None:
self.max_requests = m.get('maxRequests')
if m.get('maxRequestsPerHost') is not None:
self.max_requests_per_host = m.get('maxRequestsPerHost')
return self
class QueryLoadtestmarkRequest(TeaModel):
def __init__(
self,
auth_token: str = None,
product_instance_id: str = None,
time_limit: str = None,
):
# OAuth模式下的授权token
self.auth_token = auth_token
self.product_instance_id = product_instance_id
# 测试字段
self.time_limit = time_limit
def validate(self):
self.validate_required(self.time_limit, 'time_limit')
def to_map(self):
result = dict()
if self.auth_token is not None:
result['auth_token'] = self.auth_token
if self.product_instance_id is not None:
result['product_instance_id'] = self.product_instance_id
if self.time_limit is not None:
result['time_limit'] = self.time_limit
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('auth_token') is not None:
self.auth_token = m.get('auth_token')
if m.get('product_instance_id') is not None:
self.product_instance_id = m.get('product_instance_id')
if m.get('time_limit') is not None:
self.time_limit = m.get('time_limit')
return self
class QueryLoadtestmarkResponse(TeaModel):
def __init__(
self,
req_msg_id: str = None,
result_code: str = None,
result_msg: str = None,
):
# 请求唯一ID,用于链路跟踪和问题排查
self.req_msg_id = req_msg_id
# 结果码,一般OK表示调用成功
self.result_code = result_code
# 异常信息的文本描述
self.result_msg = result_msg
def validate(self):
pass
def to_map(self):
result = dict()
if self.req_msg_id is not None:
result['req_msg_id'] = self.req_msg_id
if self.result_code is not None:
result['result_code'] = self.result_code
if self.result_msg is not None:
result['result_msg'] = self.result_msg
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('req_msg_id') is not None:
self.req_msg_id = m.get('req_msg_id')
if m.get('result_code') is not None:
self.result_code = m.get('result_code')
if m.get('result_msg') is not None:
self.result_msg = m.get('result_msg')
return self
| 36.405286 | 79 | 0.607696 |
from Tea.model import TeaModel
class Config(TeaModel):
def __init__(
self,
access_key_id: str = None,
access_key_secret: str = None,
security_token: str = None,
protocol: str = None,
read_timeout: int = None,
connect_timeout: int = None,
http_proxy: str = None,
https_proxy: str = None,
endpoint: str = None,
no_proxy: str = None,
max_idle_conns: int = None,
user_agent: str = None,
socks_5proxy: str = None,
socks_5net_work: str = None,
max_idle_time_millis: int = None,
keep_alive_duration_millis: int = None,
max_requests: int = None,
max_requests_per_host: int = None,
):
# accesskey id
self.access_key_id = access_key_id
# accesskey secret
self.access_key_secret = access_key_secret
# security token
self.security_token = security_token
# http protocol
self.protocol = protocol
# read timeout
self.read_timeout = read_timeout
# connect timeout
self.connect_timeout = connect_timeout
# http proxy
self.http_proxy = http_proxy
# https proxy
self.https_proxy = https_proxy
# endpoint
self.endpoint = endpoint
# proxy white list
self.no_proxy = no_proxy
# max idle conns
self.max_idle_conns = max_idle_conns
# user agent
self.user_agent = user_agent
# socks5 proxy
self.socks_5proxy = socks_5proxy
# socks5 network
self.socks_5net_work = socks_5net_work
# 长链接最大空闲时长
self.max_idle_time_millis = max_idle_time_millis
# 长链接最大连接时长
self.keep_alive_duration_millis = keep_alive_duration_millis
# 最大连接数(长链接最大总数)
self.max_requests = max_requests
# 每个目标主机的最大连接数(分主机域名的长链接最大总数
self.max_requests_per_host = max_requests_per_host
def validate(self):
pass
def to_map(self):
result = dict()
if self.access_key_id is not None:
result['accessKeyId'] = self.access_key_id
if self.access_key_secret is not None:
result['accessKeySecret'] = self.access_key_secret
if self.security_token is not None:
result['securityToken'] = self.security_token
if self.protocol is not None:
result['protocol'] = self.protocol
if self.read_timeout is not None:
result['readTimeout'] = self.read_timeout
if self.connect_timeout is not None:
result['connectTimeout'] = self.connect_timeout
if self.http_proxy is not None:
result['httpProxy'] = self.http_proxy
if self.https_proxy is not None:
result['httpsProxy'] = self.https_proxy
if self.endpoint is not None:
result['endpoint'] = self.endpoint
if self.no_proxy is not None:
result['noProxy'] = self.no_proxy
if self.max_idle_conns is not None:
result['maxIdleConns'] = self.max_idle_conns
if self.user_agent is not None:
result['userAgent'] = self.user_agent
if self.socks_5proxy is not None:
result['socks5Proxy'] = self.socks_5proxy
if self.socks_5net_work is not None:
result['socks5NetWork'] = self.socks_5net_work
if self.max_idle_time_millis is not None:
result['maxIdleTimeMillis'] = self.max_idle_time_millis
if self.keep_alive_duration_millis is not None:
result['keepAliveDurationMillis'] = self.keep_alive_duration_millis
if self.max_requests is not None:
result['maxRequests'] = self.max_requests
if self.max_requests_per_host is not None:
result['maxRequestsPerHost'] = self.max_requests_per_host
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('accessKeyId') is not None:
self.access_key_id = m.get('accessKeyId')
if m.get('accessKeySecret') is not None:
self.access_key_secret = m.get('accessKeySecret')
if m.get('securityToken') is not None:
self.security_token = m.get('securityToken')
if m.get('protocol') is not None:
self.protocol = m.get('protocol')
if m.get('readTimeout') is not None:
self.read_timeout = m.get('readTimeout')
if m.get('connectTimeout') is not None:
self.connect_timeout = m.get('connectTimeout')
if m.get('httpProxy') is not None:
self.http_proxy = m.get('httpProxy')
if m.get('httpsProxy') is not None:
self.https_proxy = m.get('httpsProxy')
if m.get('endpoint') is not None:
self.endpoint = m.get('endpoint')
if m.get('noProxy') is not None:
self.no_proxy = m.get('noProxy')
if m.get('maxIdleConns') is not None:
self.max_idle_conns = m.get('maxIdleConns')
if m.get('userAgent') is not None:
self.user_agent = m.get('userAgent')
if m.get('socks5Proxy') is not None:
self.socks_5proxy = m.get('socks5Proxy')
if m.get('socks5NetWork') is not None:
self.socks_5net_work = m.get('socks5NetWork')
if m.get('maxIdleTimeMillis') is not None:
self.max_idle_time_millis = m.get('maxIdleTimeMillis')
if m.get('keepAliveDurationMillis') is not None:
self.keep_alive_duration_millis = m.get('keepAliveDurationMillis')
if m.get('maxRequests') is not None:
self.max_requests = m.get('maxRequests')
if m.get('maxRequestsPerHost') is not None:
self.max_requests_per_host = m.get('maxRequestsPerHost')
return self
class QueryLoadtestmarkRequest(TeaModel):
def __init__(
self,
auth_token: str = None,
product_instance_id: str = None,
time_limit: str = None,
):
# OAuth模式下的授权token
self.auth_token = auth_token
self.product_instance_id = product_instance_id
# 测试字段
self.time_limit = time_limit
def validate(self):
self.validate_required(self.time_limit, 'time_limit')
def to_map(self):
result = dict()
if self.auth_token is not None:
result['auth_token'] = self.auth_token
if self.product_instance_id is not None:
result['product_instance_id'] = self.product_instance_id
if self.time_limit is not None:
result['time_limit'] = self.time_limit
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('auth_token') is not None:
self.auth_token = m.get('auth_token')
if m.get('product_instance_id') is not None:
self.product_instance_id = m.get('product_instance_id')
if m.get('time_limit') is not None:
self.time_limit = m.get('time_limit')
return self
class QueryLoadtestmarkResponse(TeaModel):
def __init__(
self,
req_msg_id: str = None,
result_code: str = None,
result_msg: str = None,
):
# 请求唯一ID,用于链路跟踪和问题排查
self.req_msg_id = req_msg_id
# 结果码,一般OK表示调用成功
self.result_code = result_code
# 异常信息的文本描述
self.result_msg = result_msg
def validate(self):
pass
def to_map(self):
result = dict()
if self.req_msg_id is not None:
result['req_msg_id'] = self.req_msg_id
if self.result_code is not None:
result['result_code'] = self.result_code
if self.result_msg is not None:
result['result_msg'] = self.result_msg
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('req_msg_id') is not None:
self.req_msg_id = m.get('req_msg_id')
if m.get('result_code') is not None:
self.result_code = m.get('result_code')
if m.get('result_msg') is not None:
self.result_msg = m.get('result_msg')
return self
| true | true |
f7ff1edbffb05123c5d50c0f38fef92ed675cf97 | 2,298 | py | Python | fabtools/require/system.py | pahaz/fabtools | 6ccf400a6b0d2b0097e1f764822b6e45a6e48b88 | [
"BSD-2-Clause"
] | null | null | null | fabtools/require/system.py | pahaz/fabtools | 6ccf400a6b0d2b0097e1f764822b6e45a6e48b88 | [
"BSD-2-Clause"
] | null | null | null | fabtools/require/system.py | pahaz/fabtools | 6ccf400a6b0d2b0097e1f764822b6e45a6e48b88 | [
"BSD-2-Clause"
] | null | null | null | """
System settings
===============
"""
from __future__ import with_statement
from re import escape
from fabric.api import sudo, warn
from fabric.contrib.files import append, uncomment
from fabtools.files import is_file, watch
from fabtools.system import (
get_hostname, set_hostname,
get_sysctl, set_sysctl,
supported_locales,
)
def sysctl(key, value, persist=True):
"""
Require a kernel parameter to have a specific value.
"""
if get_sysctl(key) != value:
set_sysctl(key, value)
if persist:
from fabtools import require
filename = '/etc/sysctl.d/60-%s.conf' % key
with watch(filename, use_sudo=True) as config:
require.file(filename,
contents='%(key)s = %(value)s\n' % locals(),
use_sudo=True)
if config.changed:
sudo('service procps start')
def hostname(name):
"""
Require the hostname to have a specific value.
"""
if get_hostname() != name:
set_hostname(name)
def locales(names):
"""
Require the list of locales to be available.
"""
config_file = '/var/lib/locales/supported.d/local'
if not is_file(config_file):
config_file = '/etc/locale.gen'
# Regenerate locales if config file changes
with watch(config_file, use_sudo=True) as config:
# Add valid locale names to the config file
supported = dict(supported_locales())
for name in names:
if name in supported:
charset = supported[name]
locale = "%s %s" % (name, charset)
uncomment(config_file, escape(locale), use_sudo=True)
append(config_file, locale, use_sudo=True)
else:
warn('Unsupported locale name "%s"' % name)
if config.changed:
sudo('dpkg-reconfigure --frontend=noninteractive locales')
def locale(name):
"""
Require the locale to be available.
"""
locales([name])
def default_locale(name):
"""
Require the locale to be the default.
"""
from fabtools import require
# Ensure the locale is available
locale(name)
# Make it the default
contents = 'LANG="%s"\n' % name
require.file('/etc/default/locale', contents, use_sudo=True)
| 24.709677 | 69 | 0.614447 | from __future__ import with_statement
from re import escape
from fabric.api import sudo, warn
from fabric.contrib.files import append, uncomment
from fabtools.files import is_file, watch
from fabtools.system import (
get_hostname, set_hostname,
get_sysctl, set_sysctl,
supported_locales,
)
def sysctl(key, value, persist=True):
if get_sysctl(key) != value:
set_sysctl(key, value)
if persist:
from fabtools import require
filename = '/etc/sysctl.d/60-%s.conf' % key
with watch(filename, use_sudo=True) as config:
require.file(filename,
contents='%(key)s = %(value)s\n' % locals(),
use_sudo=True)
if config.changed:
sudo('service procps start')
def hostname(name):
if get_hostname() != name:
set_hostname(name)
def locales(names):
config_file = '/var/lib/locales/supported.d/local'
if not is_file(config_file):
config_file = '/etc/locale.gen'
with watch(config_file, use_sudo=True) as config:
supported = dict(supported_locales())
for name in names:
if name in supported:
charset = supported[name]
locale = "%s %s" % (name, charset)
uncomment(config_file, escape(locale), use_sudo=True)
append(config_file, locale, use_sudo=True)
else:
warn('Unsupported locale name "%s"' % name)
if config.changed:
sudo('dpkg-reconfigure --frontend=noninteractive locales')
def locale(name):
locales([name])
def default_locale(name):
from fabtools import require
locale(name)
contents = 'LANG="%s"\n' % name
require.file('/etc/default/locale', contents, use_sudo=True)
| true | true |
f7ff1f6d98d56b13bc8f5ee441e552b8127305bd | 5,062 | py | Python | utils/custom.py | hyyc554/drf_rbac | 258743114a5214684d223aff5859b0e2174a9968 | [
"MIT"
] | 24 | 2020-01-27T11:57:17.000Z | 2022-01-14T05:36:09.000Z | utils/custom.py | hyyc554/drf_rbac | 258743114a5214684d223aff5859b0e2174a9968 | [
"MIT"
] | 11 | 2020-07-08T05:55:26.000Z | 2022-01-13T02:12:55.000Z | utils/custom.py | hyyc554/drf_rbac | 258743114a5214684d223aff5859b0e2174a9968 | [
"MIT"
] | 9 | 2020-07-23T10:08:51.000Z | 2021-05-12T03:36:34.000Z | # @Time : 2019/1/13 11:28
# @Author : xufqing
# import celery, logging, redis
import logging
from rest_framework import serializers
from rest_framework.generics import ListAPIView
from rest_framework.pagination import PageNumberPagination
from rest_framework.permissions import BasePermission
from rest_framework.permissions import IsAuthenticated
from rest_framework.response import Response
from rest_framework.views import exception_handler
from rest_framework_jwt.authentication import JSONWebTokenAuthentication
error_logger = logging.getLogger('error')
info_logger = logging.getLogger('info')
def xops_exception_handler(exc, context):
response = exception_handler(exc, context)
if response is not None:
msg = '失败' if response.status_code >= 400 else '成功'
notification_response = {}
notification_response['code'] = response.status_code
notification_response['message'] = msg
notification_response['detail'] = response.data
response.data = notification_response
return response
class CommonPagination(PageNumberPagination):
'''
分页设置
'''
page_size = 10
page_size_query_param = 'size'
class RbacPermission(BasePermission):
"""
基于角色的认证系统的权限校验类
"""
@classmethod
def get_permission_from_role(cls, request):
try:
perms = request.user.roles.values(
'permissions__method',
).distinct()
return [p['permissions__method'] for p in perms]
except AttributeError:
return None
def has_permission(self, request, view):
"""
权限校验逻辑
:param request:
:param view:
:return:
"""
perms = self.get_permission_from_role(request)
if perms:
if 'admin' in perms:
return True
elif not hasattr(view, 'perms_map'):
return True
else:
perms_map = view.perms_map
_method = request._request.method.lower()
for i in perms_map:
for method, alias in i.items():
if (_method == method or method == '*') and alias in perms:
return True
class ObjPermission(BasePermission):
'''
密码管理对象级权限控制
'''
def has_object_permission(self, request, view, obj):
perms = RbacPermission.get_permission_from_role(request)
if 'admin' in perms:
return True
elif request.user.id == obj.uid_id:
return True
class TreeSerializer(serializers.Serializer):
id = serializers.IntegerField()
label = serializers.CharField(max_length=20, source='name')
pid = serializers.PrimaryKeyRelatedField(read_only=True)
class TreeAPIView(ListAPIView):
'''
自定义树结构View
'''
serializer_class = TreeSerializer
authentication_classes = (JSONWebTokenAuthentication,)
permission_classes = (IsAuthenticated,)
def list(self, request, *args, **kwargs):
queryset = self.filter_queryset(self.get_queryset())
page = self.paginate_queryset(queryset)
serializer = self.get_serializer(queryset, many=True)
tree_dict = {}
tree_data = []
try:
for item in serializer.data:
tree_dict[item['id']] = item
for i in tree_dict:
if tree_dict[i]['pid']:
pid = tree_dict[i]['pid']
parent = tree_dict[pid]
parent.setdefault('children', []).append(tree_dict[i])
else:
tree_data.append(tree_dict[i])
results = tree_data
except KeyError:
results = serializer.data
if page is not None:
return self.get_paginated_response(results)
return Response(results)
# class CeleryTools(object):
# '''
# Celery的一些工具
# '''
#
# def get_celery_worker_status(self):
# d = None
# try:
# insp = celery.task.control.inspect()
# if not insp.stats():
# d = '没有找到可用的celery workers.'
# except IOError as e:
# msg = '无法连接celery backend: ' + str(e)
# if len(e.args) > 0 and errorcode.get(e.args[0]) == 'ECONNREFUSED':
# msg += '请检查RabbitMQ是否运行.'
# d = msg
# except ImportError as e:
# d = str(e)
# return d
#
#
# class RedisObj(object):
# def __init__(self, host=settings.REDIS_HOST, port=settings.REDIS_PORT, db=settings.REDIS_DB,
# password=settings.REDIS_PASSWORD):
# try:
# self.__conn = redis.StrictRedis(host=host, port=port, db=db, password=password,decode_responses=True)
# except Exception as e:
# msg = 'Redis连接失败,错误信息:%s' % e
# error_logger.error(msg)
# print(msg)
#
# def __getattr__(self, command):
# def _(*args):
# return getattr(self.__conn, command)(*args) # 重新组装方法调用
#
# return _
| 31.055215 | 115 | 0.600948 |
import logging
from rest_framework import serializers
from rest_framework.generics import ListAPIView
from rest_framework.pagination import PageNumberPagination
from rest_framework.permissions import BasePermission
from rest_framework.permissions import IsAuthenticated
from rest_framework.response import Response
from rest_framework.views import exception_handler
from rest_framework_jwt.authentication import JSONWebTokenAuthentication
error_logger = logging.getLogger('error')
info_logger = logging.getLogger('info')
def xops_exception_handler(exc, context):
response = exception_handler(exc, context)
if response is not None:
msg = '失败' if response.status_code >= 400 else '成功'
notification_response = {}
notification_response['code'] = response.status_code
notification_response['message'] = msg
notification_response['detail'] = response.data
response.data = notification_response
return response
class CommonPagination(PageNumberPagination):
page_size = 10
page_size_query_param = 'size'
class RbacPermission(BasePermission):
@classmethod
def get_permission_from_role(cls, request):
try:
perms = request.user.roles.values(
'permissions__method',
).distinct()
return [p['permissions__method'] for p in perms]
except AttributeError:
return None
def has_permission(self, request, view):
perms = self.get_permission_from_role(request)
if perms:
if 'admin' in perms:
return True
elif not hasattr(view, 'perms_map'):
return True
else:
perms_map = view.perms_map
_method = request._request.method.lower()
for i in perms_map:
for method, alias in i.items():
if (_method == method or method == '*') and alias in perms:
return True
class ObjPermission(BasePermission):
def has_object_permission(self, request, view, obj):
perms = RbacPermission.get_permission_from_role(request)
if 'admin' in perms:
return True
elif request.user.id == obj.uid_id:
return True
class TreeSerializer(serializers.Serializer):
id = serializers.IntegerField()
label = serializers.CharField(max_length=20, source='name')
pid = serializers.PrimaryKeyRelatedField(read_only=True)
class TreeAPIView(ListAPIView):
serializer_class = TreeSerializer
authentication_classes = (JSONWebTokenAuthentication,)
permission_classes = (IsAuthenticated,)
def list(self, request, *args, **kwargs):
queryset = self.filter_queryset(self.get_queryset())
page = self.paginate_queryset(queryset)
serializer = self.get_serializer(queryset, many=True)
tree_dict = {}
tree_data = []
try:
for item in serializer.data:
tree_dict[item['id']] = item
for i in tree_dict:
if tree_dict[i]['pid']:
pid = tree_dict[i]['pid']
parent = tree_dict[pid]
parent.setdefault('children', []).append(tree_dict[i])
else:
tree_data.append(tree_dict[i])
results = tree_data
except KeyError:
results = serializer.data
if page is not None:
return self.get_paginated_response(results)
return Response(results)
# Celery的一些工具
# '''
| true | true |
f7ff1fb6723cb9c2d11d65934096f062bdf1f1c9 | 2,635 | py | Python | main.py | rtspeaks360/exchange_rates_extractor | 7eb17da1b0c06ae084e22e8f0668accca9541dee | [
"MIT"
] | null | null | null | main.py | rtspeaks360/exchange_rates_extractor | 7eb17da1b0c06ae084e22e8f0668accca9541dee | [
"MIT"
] | null | null | null | main.py | rtspeaks360/exchange_rates_extractor | 7eb17da1b0c06ae084e22e8f0668accca9541dee | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
# @Author: rish
# @Date: 2020-08-04 00:16:57
# @Last Modified by: rish
# @Last Modified time: 2020-08-10 12:14:31
### Imports START
import os
import sys
import time
import logging
import parser
### Imports END
# Logger settings
logging.basicConfig(
level=logging.INFO,
format='%(asctime)s - %(name)s - %(levelname)s - %(message)s'
)
logger = logging.getLogger(__name__)
# Get script name and extract script path.
script_name = sys.argv[0]
script_path = script_name[:-8]
# Get arguments received
args = parser.parser_args()
FIRST = True
if args.env == 'prod':
logger.info('prod environment')
os.environ['ENV-INDICATOR'] = 'PROD'
os.environ['SCPATH'] = script_path
# Activate virtual environment with installed dependencies
# activate_this = script_path + 'env/bin/activate_this.py'
# with open(activate_this) as file_:
# exec(file_.read(), dict(__file__=activate_this))
# Use project directory
sys.path.insert(0, script_path)
else:
os.environ['ENV-INDICATOR'] = 'DEV'
os.environ['SCPATH'] = script_path
from er_extractor import core as er_extractor
from er_dashboard.core import app
# [START Main function for the pipeline]
def main(args):
'''
Main function for the extraction pipeline as well as the exploration
dashboard.
Args:
- args
Returns:
-
'''
global FIRST
if args.run_as == 'extractor':
logger.info('Running application as extractor process')
logger.info('')
er_extractor.get_exchange_rates(
args.get_data_by, args.start_date, args.end_date,
args.num_of_threads, args.multithreading,
args.multithreading_after
)
elif args.run_as == 'dashboard':
if os.environ.__contains__('DOCKER')\
and os.environ['DOCKER'] == 'True'\
and FIRST is True:
logger.info('Waiting for DB Container to initialize.')
time.sleep(2)
logger.info('Setting up schema')
er_extractor.utils.initdb()
FIRST = False
logging.info('Running application as extractor process')
logger.info('')
app.run(host='0.0.0.0', port=8000)
elif args.initdb:
er_extractor.utils.initdb()
else:
logger.warning('Invalid `run_as` argument.')
return
# [END]
if __name__ == '__main__':
# Process start time
process_start = time.time()
logger.info('Your namespace - ' + str(args))
logger.info('')
# Call for main function
main(args)
process_time = time.time() - process_start
mins = int(process_time / 60)
secs = int(process_time % 60)
logger.info(
'Total time consumed: {mins} minutes {secs} seconds'
.format(mins=mins, secs=secs)
)
logger.info('')
logger.info('-*-*-*-*-*-*-*-*-*-*-*-*-END-*-*-*-*-*-*-*-*-*-*-*-*-')
logger.info('')
| 22.330508 | 69 | 0.69222 |
me
import logging
import parser
vel=logging.INFO,
format='%(asctime)s - %(name)s - %(levelname)s - %(message)s'
)
logger = logging.getLogger(__name__)
script_name = sys.argv[0]
script_path = script_name[:-8]
args = parser.parser_args()
FIRST = True
if args.env == 'prod':
logger.info('prod environment')
os.environ['ENV-INDICATOR'] = 'PROD'
os.environ['SCPATH'] = script_path
sys.path.insert(0, script_path)
else:
os.environ['ENV-INDICATOR'] = 'DEV'
os.environ['SCPATH'] = script_path
from er_extractor import core as er_extractor
from er_dashboard.core import app
def main(args):
global FIRST
if args.run_as == 'extractor':
logger.info('Running application as extractor process')
logger.info('')
er_extractor.get_exchange_rates(
args.get_data_by, args.start_date, args.end_date,
args.num_of_threads, args.multithreading,
args.multithreading_after
)
elif args.run_as == 'dashboard':
if os.environ.__contains__('DOCKER')\
and os.environ['DOCKER'] == 'True'\
and FIRST is True:
logger.info('Waiting for DB Container to initialize.')
time.sleep(2)
logger.info('Setting up schema')
er_extractor.utils.initdb()
FIRST = False
logging.info('Running application as extractor process')
logger.info('')
app.run(host='0.0.0.0', port=8000)
elif args.initdb:
er_extractor.utils.initdb()
else:
logger.warning('Invalid `run_as` argument.')
return
if __name__ == '__main__':
process_start = time.time()
logger.info('Your namespace - ' + str(args))
logger.info('')
main(args)
process_time = time.time() - process_start
mins = int(process_time / 60)
secs = int(process_time % 60)
logger.info(
'Total time consumed: {mins} minutes {secs} seconds'
.format(mins=mins, secs=secs)
)
logger.info('')
logger.info('-*-*-*-*-*-*-*-*-*-*-*-*-END-*-*-*-*-*-*-*-*-*-*-*-*-')
logger.info('')
| true | true |
f7ff202c924fee2e44ab31ff578cc03c2e91730d | 31,736 | py | Python | shap/explainers/_partition.py | aaronwtr/shap | 5a7b3740a6eccd772bcc3450dee3188487c18104 | [
"MIT"
] | null | null | null | shap/explainers/_partition.py | aaronwtr/shap | 5a7b3740a6eccd772bcc3450dee3188487c18104 | [
"MIT"
] | null | null | null | shap/explainers/_partition.py | aaronwtr/shap | 5a7b3740a6eccd772bcc3450dee3188487c18104 | [
"MIT"
] | null | null | null | import types
import copy
import inspect
from ..utils import MaskedModel
import numpy as np
import warnings
import time
from tqdm.auto import tqdm
import queue
from ..utils import assert_import, record_import_error, safe_isinstance, make_masks, OpChain
from .. import Explanation
from .. import maskers
from ._explainer import Explainer
from .. import links
import cloudpickle
import pickle
from ..maskers import Masker
from ..models import Model
from numba import jit
# .shape[0] messes up pylint a lot here
# pylint: disable=unsubscriptable-object
class Partition(Explainer):
def __init__(self, model, masker, *, output_names=None, link=links.identity, linearize_link=True,
feature_names=None, **call_args):
""" Uses the Partition SHAP method to explain the output of any function.
Partition SHAP computes Shapley values recursively through a hierarchy of features, this
hierarchy defines feature coalitions and results in the Owen values from game theory. The
PartitionExplainer has two particularly nice properties: 1) PartitionExplainer is
model-agnostic but when using a balanced partition tree only has quadradic exact runtime
(in term of the number of input features). This is in contrast to the exponential exact
runtime of KernelExplainer or SamplingExplainer. 2) PartitionExplainer always assigns to groups of
correlated features the credit that set of features would have had if treated as a group. This
means if the hierarchical clustering given to PartitionExplainer groups correlated features
together, then feature correlations are "accounted for" ... in the sense that the total credit assigned
to a group of tightly dependent features does net depend on how they behave if their correlation
structure was broken during the explanation's perterbation process. Note that for linear models
the Owen values that PartitionExplainer returns are the same as the standard non-hierarchical
Shapley values.
Parameters
----------
model : function
User supplied function that takes a matrix of samples (# samples x # features) and
computes the output of the model for those samples.
masker : function or numpy.array or pandas.DataFrame or tokenizer
The function used to "mask" out hidden features of the form `masker(mask, x)`. It takes a
single input sample and a binary mask and returns a matrix of masked samples. These
masked samples will then be evaluated using the model function and the outputs averaged.
As a shortcut for the standard masking using by SHAP you can pass a background data matrix
instead of a function and that matrix will be used for masking. Domain specific masking
functions are available in shap such as shap.maksers.Image for images and shap.maskers.Text
for text.
partition_tree : None or function or numpy.array
A hierarchical clustering of the input features represented by a matrix that follows the format
used by scipy.cluster.hierarchy (see the notebooks_html/partition_explainer directory an example).
If this is a function then the function produces a clustering matrix when given a single input
example. If you are using a standard SHAP masker object then you can pass masker.clustering
to use that masker's built-in clustering of the features, or if partition_tree is None then
masker.clustering will be used by default.
Examples
--------
See `Partition explainer examples <https://shap.readthedocs.io/en/latest/api_examples/explainers/Partition.html>`_
"""
super().__init__(model, masker, link=link, linearize_link=linearize_link, algorithm="partition", \
output_names = output_names, feature_names=feature_names)
# convert dataframes
# if safe_isinstance(masker, "pandas.core.frame.DataFrame"):
# masker = TabularMasker(masker)
# elif safe_isinstance(masker, "numpy.ndarray") and len(masker.shape) == 2:
# masker = TabularMasker(masker)
# elif safe_isinstance(masker, "transformers.PreTrainedTokenizer"):
# masker = TextMasker(masker)
# self.masker = masker
# TODO: maybe? if we have a tabular masker then we build a PermutationExplainer that we
# will use for sampling
self.input_shape = masker.shape[1:] if hasattr(masker, "shape") and not callable(masker.shape) else None
# self.output_names = output_names
if not safe_isinstance(self.model, "shap.models.Model"):
self.model = Model(self.model)#lambda *args: np.array(model(*args))
self.expected_value = None
self._curr_base_value = None
if getattr(self.masker, "clustering", None) is None:
raise ValueError("The passed masker must have a .clustering attribute defined! Try shap.maskers.Partition(data) for example.")
# if partition_tree is None:
# if not hasattr(masker, "partition_tree"):
# raise ValueError("The passed masker does not have masker.clustering, so the partition_tree must be passed!")
# self.partition_tree = masker.clustering
# else:
# self.partition_tree = partition_tree
# handle higher dimensional tensor inputs
if self.input_shape is not None and len(self.input_shape) > 1:
self._reshaped_model = lambda x: self.model(x.reshape(x.shape[0], *self.input_shape))
else:
self._reshaped_model = self.model
# if we don't have a dynamic clustering algorithm then can precowe mpute
# a lot of information
if not callable(self.masker.clustering):
self._clustering = self.masker.clustering
self._mask_matrix = make_masks(self._clustering)
# if we have gotten default arguments for the call function we need to wrap ourselves in a new class that
# has a call function with those new default arguments
if len(call_args) > 0:
class Partition(self.__class__):
# this signature should match the __call__ signature of the class defined below
def __call__(self, *args, max_evals=500, fixed_context=None, main_effects=False, error_bounds=False, batch_size="auto",
outputs=None, silent=False):
return super().__call__(
*args, max_evals=max_evals, fixed_context=fixed_context, main_effects=main_effects, error_bounds=error_bounds,
batch_size=batch_size, outputs=outputs, silent=silent
)
Partition.__call__.__doc__ = self.__class__.__call__.__doc__
self.__class__ = Partition
for k, v in call_args.items():
self.__call__.__kwdefaults__[k] = v
# note that changes to this function signature should be copied to the default call argument wrapper above
def __call__(self, *args, max_evals=500, fixed_context=None, main_effects=False, error_bounds=False, batch_size="auto",
outputs=None, silent=False):
""" Explain the output of the model on the given arguments.
"""
return super().__call__(
*args, max_evals=max_evals, fixed_context=fixed_context, main_effects=main_effects, error_bounds=error_bounds, batch_size=batch_size,
outputs=outputs, silent=silent
)
def explain_row(self, *row_args, max_evals, main_effects, error_bounds, batch_size, outputs, silent, fixed_context = "auto"):
""" Explains a single row and returns the tuple (row_values, row_expected_values, row_mask_shapes).
"""
if fixed_context == "auto":
# if isinstance(self.masker, maskers.Text):
# fixed_context = 1 # we err on the side of speed for text models
# else:
fixed_context = None
elif fixed_context not in [0, 1, None]:
raise Exception("Unknown fixed_context value passed (must be 0, 1 or None): %s" %fixed_context)
# build a masked version of the model for the current input sample
fm = MaskedModel(self.model, self.masker, self.link, self.linearize_link, *row_args)
# make sure we have the base value and current value outputs
M = len(fm)
m00 = np.zeros(M, dtype=np.bool)
# if not fixed background or no base value assigned then compute base value for a row
if self._curr_base_value is None or not getattr(self.masker, "fixed_background", False):
self._curr_base_value = fm(m00.reshape(1, -1), zero_index=0)[0] # the zero index param tells the masked model what the baseline is
f11 = fm(~m00.reshape(1, -1))[0]
if callable(self.masker.clustering):
self._clustering = self.masker.clustering(*row_args)
self._mask_matrix = make_masks(self._clustering)
if hasattr(self._curr_base_value, 'shape') and len(self._curr_base_value.shape) > 0:
if outputs is None:
outputs = np.arange(len(self._curr_base_value))
elif isinstance(outputs, OpChain):
outputs = outputs.apply(Explanation(f11)).values
out_shape = (2*self._clustering.shape[0]+1, len(outputs))
else:
out_shape = (2*self._clustering.shape[0]+1,)
if max_evals == "auto":
max_evals = 500
self.values = np.zeros(out_shape)
self.dvalues = np.zeros(out_shape)
self.owen(fm, self._curr_base_value, f11, max_evals - 2, outputs, fixed_context, batch_size, silent)
# if False:
# if self.multi_output:
# return [self.dvalues[:,i] for i in range(self.dvalues.shape[1])], oinds
# else:
# return self.dvalues.copy(), oinds
# else:
# drop the interaction terms down onto self.values
self.values[:] = self.dvalues
lower_credit(len(self.dvalues) - 1, 0, M, self.values, self._clustering)
return {
"values": self.values[:M].copy(),
"expected_values": self._curr_base_value if outputs is None else self._curr_base_value[outputs],
"mask_shapes": [s + out_shape[1:] for s in fm.mask_shapes],
"main_effects": None,
"hierarchical_values": self.dvalues.copy(),
"clustering": self._clustering,
"output_indices": outputs,
"output_names": getattr(self.model, "output_names", None)
}
def __str__(self):
return "shap.explainers.Partition()"
def owen(self, fm, f00, f11, max_evals, output_indexes, fixed_context, batch_size, silent):
""" Compute a nested set of recursive Owen values based on an ordering recursion.
"""
#f = self._reshaped_model
#r = self.masker
#masks = np.zeros(2*len(inds)+1, dtype=np.int)
M = len(fm)
m00 = np.zeros(M, dtype=np.bool)
#f00 = fm(m00.reshape(1,-1))[0]
base_value = f00
#f11 = fm(~m00.reshape(1,-1))[0]
#f11 = self._reshaped_model(r(~m00, x)).mean(0)
ind = len(self.dvalues)-1
# make sure output_indexes is a list of indexes
if output_indexes is not None:
# assert self.multi_output, "output_indexes is only valid for multi-output models!"
# inds = output_indexes.apply(f11, 0)
# out_len = output_indexes_len(output_indexes)
# if output_indexes.startswith("max("):
# output_indexes = np.argsort(-f11)[:out_len]
# elif output_indexes.startswith("min("):
# output_indexes = np.argsort(f11)[:out_len]
# elif output_indexes.startswith("max(abs("):
# output_indexes = np.argsort(np.abs(f11))[:out_len]
f00 = f00[output_indexes]
f11 = f11[output_indexes]
q = queue.PriorityQueue()
q.put((0, 0, (m00, f00, f11, ind, 1.0)))
eval_count = 0
total_evals = min(max_evals, (M-1)*M) # TODO: (M-1)*M is only right for balanced clusterings, but this is just for plotting progress...
pbar = None
start_time = time.time()
while not q.empty():
# if we passed our execution limit then leave everything else on the internal nodes
if eval_count >= max_evals:
while not q.empty():
m00, f00, f11, ind, weight = q.get()[2]
self.dvalues[ind] += (f11 - f00) * weight
break
# create a batch of work to do
batch_args = []
batch_masks = []
while not q.empty() and len(batch_masks) < batch_size and eval_count + len(batch_masks) < max_evals:
# get our next set of arguments
m00, f00, f11, ind, weight = q.get()[2]
# get the left and right children of this cluster
lind = int(self._clustering[ind-M, 0]) if ind >= M else -1
rind = int(self._clustering[ind-M, 1]) if ind >= M else -1
# get the distance of this cluster's children
if ind < M:
distance = -1
else:
if self._clustering.shape[1] >= 3:
distance = self._clustering[ind-M, 2]
else:
distance = 1
# check if we are a leaf node (or other negative distance cluster) and so should terminate our decent
if distance < 0:
self.dvalues[ind] += (f11 - f00) * weight
continue
# build the masks
m10 = m00.copy() # we separate the copy from the add so as to not get converted to a matrix
m10[:] += self._mask_matrix[lind, :]
m01 = m00.copy()
m01[:] += self._mask_matrix[rind, :]
batch_args.append((m00, m10, m01, f00, f11, ind, lind, rind, weight))
batch_masks.append(m10)
batch_masks.append(m01)
batch_masks = np.array(batch_masks)
# run the batch
if len(batch_args) > 0:
fout = fm(batch_masks)
if output_indexes is not None:
fout = fout[:,output_indexes]
eval_count += len(batch_masks)
if pbar is None and time.time() - start_time > 5:
pbar = tqdm(total=total_evals, disable=silent, leave=False)
pbar.update(eval_count)
if pbar is not None:
pbar.update(len(batch_masks))
# use the results of the batch to add new nodes
for i in range(len(batch_args)):
m00, m10, m01, f00, f11, ind, lind, rind, weight = batch_args[i]
# get the evaluated model output on the two new masked inputs
f10 = fout[2*i]
f01 = fout[2*i+1]
new_weight = weight
if fixed_context is None:
new_weight /= 2
elif fixed_context == 0:
self.dvalues[ind] += (f11 - f10 - f01 + f00) * weight # leave the interaction effect on the internal node
elif fixed_context == 1:
self.dvalues[ind] -= (f11 - f10 - f01 + f00) * weight # leave the interaction effect on the internal node
if fixed_context is None or fixed_context == 0:
# recurse on the left node with zero context
args = (m00, f00, f10, lind, new_weight)
q.put((-np.max(np.abs(f10 - f00)) * new_weight, np.random.randn(), args))
# recurse on the right node with zero context
args = (m00, f00, f01, rind, new_weight)
q.put((-np.max(np.abs(f01 - f00)) * new_weight, np.random.randn(), args))
if fixed_context is None or fixed_context == 1:
# recurse on the left node with one context
args = (m01, f01, f11, lind, new_weight)
q.put((-np.max(np.abs(f11 - f01)) * new_weight, np.random.randn(), args))
# recurse on the right node with one context
args = (m10, f10, f11, rind, new_weight)
q.put((-np.max(np.abs(f11 - f10)) * new_weight, np.random.randn(), args))
if pbar is not None:
pbar.close()
self.last_eval_count = eval_count
return output_indexes, base_value
def owen3(self, fm, f00, f11, max_evals, output_indexes, fixed_context, batch_size, silent):
""" Compute a nested set of recursive Owen values based on an ordering recursion.
"""
#f = self._reshaped_model
#r = self.masker
#masks = np.zeros(2*len(inds)+1, dtype=np.int)
M = len(fm)
m00 = np.zeros(M, dtype=np.bool)
#f00 = fm(m00.reshape(1,-1))[0]
base_value = f00
#f11 = fm(~m00.reshape(1,-1))[0]
#f11 = self._reshaped_model(r(~m00, x)).mean(0)
ind = len(self.dvalues)-1
# make sure output_indexes is a list of indexes
if output_indexes is not None:
# assert self.multi_output, "output_indexes is only valid for multi-output models!"
# inds = output_indexes.apply(f11, 0)
# out_len = output_indexes_len(output_indexes)
# if output_indexes.startswith("max("):
# output_indexes = np.argsort(-f11)[:out_len]
# elif output_indexes.startswith("min("):
# output_indexes = np.argsort(f11)[:out_len]
# elif output_indexes.startswith("max(abs("):
# output_indexes = np.argsort(np.abs(f11))[:out_len]
f00 = f00[output_indexes]
f11 = f11[output_indexes]
# our starting plan is to evaluate all the nodes with a fixed_context
evals_planned = M
q = queue.PriorityQueue()
q.put((0, 0, (m00, f00, f11, ind, 1.0, fixed_context))) # (m00, f00, f11, tree_index, weight)
eval_count = 0
total_evals = min(max_evals, (M-1)*M) # TODO: (M-1)*M is only right for balanced clusterings, but this is just for plotting progress...
pbar = None
start_time = time.time()
while not q.empty():
# if we passed our execution limit then leave everything else on the internal nodes
if eval_count >= max_evals:
while not q.empty():
m00, f00, f11, ind, weight, _ = q.get()[2]
self.dvalues[ind] += (f11 - f00) * weight
break
# create a batch of work to do
batch_args = []
batch_masks = []
while not q.empty() and len(batch_masks) < batch_size and eval_count < max_evals:
# get our next set of arguments
m00, f00, f11, ind, weight, context = q.get()[2]
# get the left and right children of this cluster
lind = int(self._clustering[ind-M, 0]) if ind >= M else -1
rind = int(self._clustering[ind-M, 1]) if ind >= M else -1
# get the distance of this cluster's children
if ind < M:
distance = -1
else:
distance = self._clustering[ind-M, 2]
# check if we are a leaf node (or other negative distance cluster) and so should terminate our decent
if distance < 0:
self.dvalues[ind] += (f11 - f00) * weight
continue
# build the masks
m10 = m00.copy() # we separate the copy from the add so as to not get converted to a matrix
m10[:] += self._mask_matrix[lind, :]
m01 = m00.copy()
m01[:] += self._mask_matrix[rind, :]
batch_args.append((m00, m10, m01, f00, f11, ind, lind, rind, weight, context))
batch_masks.append(m10)
batch_masks.append(m01)
batch_masks = np.array(batch_masks)
# run the batch
if len(batch_args) > 0:
fout = fm(batch_masks)
if output_indexes is not None:
fout = fout[:,output_indexes]
eval_count += len(batch_masks)
if pbar is None and time.time() - start_time > 5:
pbar = tqdm(total=total_evals, disable=silent, leave=False)
pbar.update(eval_count)
if pbar is not None:
pbar.update(len(batch_masks))
# use the results of the batch to add new nodes
for i in range(len(batch_args)):
m00, m10, m01, f00, f11, ind, lind, rind, weight, context = batch_args[i]
# get the the number of leaves in this cluster
if ind < M:
num_leaves = 0
else:
num_leaves = self._clustering[ind-M, 3]
# get the evaluated model output on the two new masked inputs
f10 = fout[2*i]
f01 = fout[2*i+1]
# see if we have enough evaluations left to get both sides of a fixed context
if max_evals - evals_planned > num_leaves:
evals_planned += num_leaves
ignore_context = True
else:
ignore_context = False
new_weight = weight
if context is None or ignore_context:
new_weight /= 2
if context is None or context == 0 or ignore_context:
self.dvalues[ind] += (f11 - f10 - f01 + f00) * weight # leave the interaction effect on the internal node
# recurse on the left node with zero context, flip the context for all decendents if we are ignoring it
args = (m00, f00, f10, lind, new_weight, 0 if context == 1 else context)
q.put((-np.max(np.abs(f10 - f00)) * new_weight, np.random.randn(), args))
# recurse on the right node with zero context, flip the context for all decendents if we are ignoring it
args = (m00, f00, f01, rind, new_weight, 0 if context == 1 else context)
q.put((-np.max(np.abs(f01 - f00)) * new_weight, np.random.randn(), args))
if context is None or context == 1 or ignore_context:
self.dvalues[ind] -= (f11 - f10 - f01 + f00) * weight # leave the interaction effect on the internal node
# recurse on the left node with one context, flip the context for all decendents if we are ignoring it
args = (m01, f01, f11, lind, new_weight, 1 if context == 0 else context)
q.put((-np.max(np.abs(f11 - f01)) * new_weight, np.random.randn(), args))
# recurse on the right node with one context, flip the context for all decendents if we are ignoring it
args = (m10, f10, f11, rind, new_weight, 1 if context == 0 else context)
q.put((-np.max(np.abs(f11 - f10)) * new_weight, np.random.randn(), args))
if pbar is not None:
pbar.close()
self.last_eval_count = eval_count
return output_indexes, base_value
# def owen2(self, fm, f00, f11, max_evals, output_indexes, fixed_context, batch_size, silent):
# """ Compute a nested set of recursive Owen values based on an ordering recursion.
# """
# #f = self._reshaped_model
# #r = self.masker
# #masks = np.zeros(2*len(inds)+1, dtype=np.int)
# M = len(fm)
# m00 = np.zeros(M, dtype=np.bool)
# #f00 = fm(m00.reshape(1,-1))[0]
# base_value = f00
# #f11 = fm(~m00.reshape(1,-1))[0]
# #f11 = self._reshaped_model(r(~m00, x)).mean(0)
# ind = len(self.dvalues)-1
# # make sure output_indexes is a list of indexes
# if output_indexes is not None:
# # assert self.multi_output, "output_indexes is only valid for multi-output models!"
# # inds = output_indexes.apply(f11, 0)
# # out_len = output_indexes_len(output_indexes)
# # if output_indexes.startswith("max("):
# # output_indexes = np.argsort(-f11)[:out_len]
# # elif output_indexes.startswith("min("):
# # output_indexes = np.argsort(f11)[:out_len]
# # elif output_indexes.startswith("max(abs("):
# # output_indexes = np.argsort(np.abs(f11))[:out_len]
# f00 = f00[output_indexes]
# f11 = f11[output_indexes]
# fc_owen(m00, m11, 1)
# fc_owen(m00, m11, 0)
# def fc_owen(m00, m11, context):
# # recurse on the left node with zero context
# args = (m00, f00, f10, lind, new_weight)
# q.put((-np.max(np.abs(f10 - f00)) * new_weight, np.random.randn(), args))
# # recurse on the right node with zero context
# args = (m00, f00, f01, rind, new_weight)
# q.put((-np.max(np.abs(f01 - f00)) * new_weight, np.random.randn(), args))
# fc_owen(m00, m11, 1)
# m00 m11
# owen(fc=1)
# owen(fc=0)
# q = queue.PriorityQueue()
# q.put((0, 0, (m00, f00, f11, ind, 1.0, 1)))
# eval_count = 0
# total_evals = min(max_evals, (M-1)*M) # TODO: (M-1)*M is only right for balanced clusterings, but this is just for plotting progress...
# pbar = None
# start_time = time.time()
# while not q.empty():
# # if we passed our execution limit then leave everything else on the internal nodes
# if eval_count >= max_evals:
# while not q.empty():
# m00, f00, f11, ind, weight, _ = q.get()[2]
# self.dvalues[ind] += (f11 - f00) * weight
# break
# # create a batch of work to do
# batch_args = []
# batch_masks = []
# while not q.empty() and len(batch_masks) < batch_size and eval_count < max_evals:
# # get our next set of arguments
# m00, f00, f11, ind, weight, context = q.get()[2]
# # get the left and right children of this cluster
# lind = int(self._clustering[ind-M, 0]) if ind >= M else -1
# rind = int(self._clustering[ind-M, 1]) if ind >= M else -1
# # get the distance of this cluster's children
# if ind < M:
# distance = -1
# else:
# if self._clustering.shape[1] >= 3:
# distance = self._clustering[ind-M, 2]
# else:
# distance = 1
# # check if we are a leaf node (or other negative distance cluster) and so should terminate our decent
# if distance < 0:
# self.dvalues[ind] += (f11 - f00) * weight
# continue
# # build the masks
# m10 = m00.copy() # we separate the copy from the add so as to not get converted to a matrix
# m10[:] += self._mask_matrix[lind, :]
# m01 = m00.copy()
# m01[:] += self._mask_matrix[rind, :]
# batch_args.append((m00, m10, m01, f00, f11, ind, lind, rind, weight, context))
# batch_masks.append(m10)
# batch_masks.append(m01)
# batch_masks = np.array(batch_masks)
# # run the batch
# if len(batch_args) > 0:
# fout = fm(batch_masks)
# if output_indexes is not None:
# fout = fout[:,output_indexes]
# eval_count += len(batch_masks)
# if pbar is None and time.time() - start_time > 5:
# pbar = tqdm(total=total_evals, disable=silent, leave=False)
# pbar.update(eval_count)
# if pbar is not None:
# pbar.update(len(batch_masks))
# # use the results of the batch to add new nodes
# for i in range(len(batch_args)):
# m00, m10, m01, f00, f11, ind, lind, rind, weight, context = batch_args[i]
# # get the evaluated model output on the two new masked inputs
# f10 = fout[2*i]
# f01 = fout[2*i+1]
# new_weight = weight
# if fixed_context is None:
# new_weight /= 2
# elif fixed_context == 0:
# self.dvalues[ind] += (f11 - f10 - f01 + f00) * weight # leave the interaction effect on the internal node
# elif fixed_context == 1:
# self.dvalues[ind] -= (f11 - f10 - f01 + f00) * weight # leave the interaction effect on the internal node
# if fixed_context is None or fixed_context == 0:
# self.dvalues[ind] += (f11 - f10 - f01 + f00) * weight # leave the interaction effect on the internal node
# # recurse on the left node with zero context
# args = (m00, f00, f10, lind, new_weight)
# q.put((-np.max(np.abs(f10 - f00)) * new_weight, np.random.randn(), args))
# # recurse on the right node with zero context
# args = (m00, f00, f01, rind, new_weight)
# q.put((-np.max(np.abs(f01 - f00)) * new_weight, np.random.randn(), args))
# if fixed_context is None or fixed_context == 1:
# self.dvalues[ind] -= (f11 - f10 - f01 + f00) * weight # leave the interaction effect on the internal node
# # recurse on the left node with one context
# args = (m01, f01, f11, lind, new_weight)
# q.put((-np.max(np.abs(f11 - f01)) * new_weight, np.random.randn(), args))
# # recurse on the right node with one context
# args = (m10, f10, f11, rind, new_weight)
# q.put((-np.max(np.abs(f11 - f10)) * new_weight, np.random.randn(), args))
# if pbar is not None:
# pbar.close()
# return output_indexes, base_value
def output_indexes_len(output_indexes):
if output_indexes.startswith("max("):
return int(output_indexes[4:-1])
elif output_indexes.startswith("min("):
return int(output_indexes[4:-1])
elif output_indexes.startswith("max(abs("):
return int(output_indexes[8:-2])
elif not isinstance(output_indexes, str):
return len(output_indexes)
@jit
def lower_credit(i, value, M, values, clustering):
if i < M:
values[i] += value
return
li = int(clustering[i-M,0])
ri = int(clustering[i-M,1])
group_size = int(clustering[i-M,3])
lsize = int(clustering[li-M,3]) if li >= M else 1
rsize = int(clustering[ri-M,3]) if ri >= M else 1
assert lsize+rsize == group_size
values[i] += value
lower_credit(li, values[i] * lsize / group_size, M, values, clustering)
lower_credit(ri, values[i] * rsize / group_size, M, values, clustering) | 46.127907 | 145 | 0.573985 | import types
import copy
import inspect
from ..utils import MaskedModel
import numpy as np
import warnings
import time
from tqdm.auto import tqdm
import queue
from ..utils import assert_import, record_import_error, safe_isinstance, make_masks, OpChain
from .. import Explanation
from .. import maskers
from ._explainer import Explainer
from .. import links
import cloudpickle
import pickle
from ..maskers import Masker
from ..models import Model
from numba import jit
class Partition(Explainer):
def __init__(self, model, masker, *, output_names=None, link=links.identity, linearize_link=True,
feature_names=None, **call_args):
super().__init__(model, masker, link=link, linearize_link=linearize_link, algorithm="partition", \
output_names = output_names, feature_names=feature_names)
self.input_shape = masker.shape[1:] if hasattr(masker, "shape") and not callable(masker.shape) else None
if not safe_isinstance(self.model, "shap.models.Model"):
self.model = Model(self.model)
self.expected_value = None
self._curr_base_value = None
if getattr(self.masker, "clustering", None) is None:
raise ValueError("The passed masker must have a .clustering attribute defined! Try shap.maskers.Partition(data) for example.")
if self.input_shape is not None and len(self.input_shape) > 1:
self._reshaped_model = lambda x: self.model(x.reshape(x.shape[0], *self.input_shape))
else:
self._reshaped_model = self.model
# a lot of information
if not callable(self.masker.clustering):
self._clustering = self.masker.clustering
self._mask_matrix = make_masks(self._clustering)
# if we have gotten default arguments for the call function we need to wrap ourselves in a new class that
# has a call function with those new default arguments
if len(call_args) > 0:
class Partition(self.__class__):
# this signature should match the __call__ signature of the class defined below
def __call__(self, *args, max_evals=500, fixed_context=None, main_effects=False, error_bounds=False, batch_size="auto",
outputs=None, silent=False):
return super().__call__(
*args, max_evals=max_evals, fixed_context=fixed_context, main_effects=main_effects, error_bounds=error_bounds,
batch_size=batch_size, outputs=outputs, silent=silent
)
Partition.__call__.__doc__ = self.__class__.__call__.__doc__
self.__class__ = Partition
for k, v in call_args.items():
self.__call__.__kwdefaults__[k] = v
# note that changes to this function signature should be copied to the default call argument wrapper above
def __call__(self, *args, max_evals=500, fixed_context=None, main_effects=False, error_bounds=False, batch_size="auto",
outputs=None, silent=False):
return super().__call__(
*args, max_evals=max_evals, fixed_context=fixed_context, main_effects=main_effects, error_bounds=error_bounds, batch_size=batch_size,
outputs=outputs, silent=silent
)
def explain_row(self, *row_args, max_evals, main_effects, error_bounds, batch_size, outputs, silent, fixed_context = "auto"):
if fixed_context == "auto":
# if isinstance(self.masker, maskers.Text):
# fixed_context = 1 # we err on the side of speed for text models
# else:
fixed_context = None
elif fixed_context not in [0, 1, None]:
raise Exception("Unknown fixed_context value passed (must be 0, 1 or None): %s" %fixed_context)
# build a masked version of the model for the current input sample
fm = MaskedModel(self.model, self.masker, self.link, self.linearize_link, *row_args)
# make sure we have the base value and current value outputs
M = len(fm)
m00 = np.zeros(M, dtype=np.bool)
# if not fixed background or no base value assigned then compute base value for a row
if self._curr_base_value is None or not getattr(self.masker, "fixed_background", False):
self._curr_base_value = fm(m00.reshape(1, -1), zero_index=0)[0] # the zero index param tells the masked model what the baseline is
f11 = fm(~m00.reshape(1, -1))[0]
if callable(self.masker.clustering):
self._clustering = self.masker.clustering(*row_args)
self._mask_matrix = make_masks(self._clustering)
if hasattr(self._curr_base_value, 'shape') and len(self._curr_base_value.shape) > 0:
if outputs is None:
outputs = np.arange(len(self._curr_base_value))
elif isinstance(outputs, OpChain):
outputs = outputs.apply(Explanation(f11)).values
out_shape = (2*self._clustering.shape[0]+1, len(outputs))
else:
out_shape = (2*self._clustering.shape[0]+1,)
if max_evals == "auto":
max_evals = 500
self.values = np.zeros(out_shape)
self.dvalues = np.zeros(out_shape)
self.owen(fm, self._curr_base_value, f11, max_evals - 2, outputs, fixed_context, batch_size, silent)
# if False:
# if self.multi_output:
# return [self.dvalues[:,i] for i in range(self.dvalues.shape[1])], oinds
# else:
# return self.dvalues.copy(), oinds
# else:
# drop the interaction terms down onto self.values
self.values[:] = self.dvalues
lower_credit(len(self.dvalues) - 1, 0, M, self.values, self._clustering)
return {
"values": self.values[:M].copy(),
"expected_values": self._curr_base_value if outputs is None else self._curr_base_value[outputs],
"mask_shapes": [s + out_shape[1:] for s in fm.mask_shapes],
"main_effects": None,
"hierarchical_values": self.dvalues.copy(),
"clustering": self._clustering,
"output_indices": outputs,
"output_names": getattr(self.model, "output_names", None)
}
def __str__(self):
return "shap.explainers.Partition()"
def owen(self, fm, f00, f11, max_evals, output_indexes, fixed_context, batch_size, silent):
#f = self._reshaped_model
#r = self.masker
#masks = np.zeros(2*len(inds)+1, dtype=np.int)
M = len(fm)
m00 = np.zeros(M, dtype=np.bool)
#f00 = fm(m00.reshape(1,-1))[0]
base_value = f00
#f11 = fm(~m00.reshape(1,-1))[0]
#f11 = self._reshaped_model(r(~m00, x)).mean(0)
ind = len(self.dvalues)-1
# make sure output_indexes is a list of indexes
if output_indexes is not None:
# assert self.multi_output, "output_indexes is only valid for multi-output models!"
# inds = output_indexes.apply(f11, 0)
# out_len = output_indexes_len(output_indexes)
# if output_indexes.startswith("max("):
# output_indexes = np.argsort(-f11)[:out_len]
# elif output_indexes.startswith("min("):
# output_indexes = np.argsort(f11)[:out_len]
# elif output_indexes.startswith("max(abs("):
# output_indexes = np.argsort(np.abs(f11))[:out_len]
f00 = f00[output_indexes]
f11 = f11[output_indexes]
q = queue.PriorityQueue()
q.put((0, 0, (m00, f00, f11, ind, 1.0)))
eval_count = 0
total_evals = min(max_evals, (M-1)*M) # TODO: (M-1)*M is only right for balanced clusterings, but this is just for plotting progress...
pbar = None
start_time = time.time()
while not q.empty():
# if we passed our execution limit then leave everything else on the internal nodes
if eval_count >= max_evals:
while not q.empty():
m00, f00, f11, ind, weight = q.get()[2]
self.dvalues[ind] += (f11 - f00) * weight
break
# create a batch of work to do
batch_args = []
batch_masks = []
while not q.empty() and len(batch_masks) < batch_size and eval_count + len(batch_masks) < max_evals:
# get our next set of arguments
m00, f00, f11, ind, weight = q.get()[2]
# get the left and right children of this cluster
lind = int(self._clustering[ind-M, 0]) if ind >= M else -1
rind = int(self._clustering[ind-M, 1]) if ind >= M else -1
# get the distance of this cluster's children
if ind < M:
distance = -1
else:
if self._clustering.shape[1] >= 3:
distance = self._clustering[ind-M, 2]
else:
distance = 1
if distance < 0:
self.dvalues[ind] += (f11 - f00) * weight
continue
m10 = m00.copy()
m10[:] += self._mask_matrix[lind, :]
m01 = m00.copy()
m01[:] += self._mask_matrix[rind, :]
batch_args.append((m00, m10, m01, f00, f11, ind, lind, rind, weight))
batch_masks.append(m10)
batch_masks.append(m01)
batch_masks = np.array(batch_masks)
if len(batch_args) > 0:
fout = fm(batch_masks)
if output_indexes is not None:
fout = fout[:,output_indexes]
eval_count += len(batch_masks)
if pbar is None and time.time() - start_time > 5:
pbar = tqdm(total=total_evals, disable=silent, leave=False)
pbar.update(eval_count)
if pbar is not None:
pbar.update(len(batch_masks))
for i in range(len(batch_args)):
m00, m10, m01, f00, f11, ind, lind, rind, weight = batch_args[i]
f10 = fout[2*i]
f01 = fout[2*i+1]
new_weight = weight
if fixed_context is None:
new_weight /= 2
elif fixed_context == 0:
self.dvalues[ind] += (f11 - f10 - f01 + f00) * weight
elif fixed_context == 1:
self.dvalues[ind] -= (f11 - f10 - f01 + f00) * weight
if fixed_context is None or fixed_context == 0:
args = (m00, f00, f10, lind, new_weight)
q.put((-np.max(np.abs(f10 - f00)) * new_weight, np.random.randn(), args))
args = (m00, f00, f01, rind, new_weight)
q.put((-np.max(np.abs(f01 - f00)) * new_weight, np.random.randn(), args))
if fixed_context is None or fixed_context == 1:
args = (m01, f01, f11, lind, new_weight)
q.put((-np.max(np.abs(f11 - f01)) * new_weight, np.random.randn(), args))
args = (m10, f10, f11, rind, new_weight)
q.put((-np.max(np.abs(f11 - f10)) * new_weight, np.random.randn(), args))
if pbar is not None:
pbar.close()
self.last_eval_count = eval_count
return output_indexes, base_value
def owen3(self, fm, f00, f11, max_evals, output_indexes, fixed_context, batch_size, silent):
M = len(fm)
m00 = np.zeros(M, dtype=np.bool)
base_value = f00
ind = len(self.dvalues)-1
if output_indexes is not None:
f00 = f00[output_indexes]
f11 = f11[output_indexes]
evals_planned = M
q = queue.PriorityQueue()
q.put((0, 0, (m00, f00, f11, ind, 1.0, fixed_context)))
eval_count = 0
total_evals = min(max_evals, (M-1)*M)
pbar = None
start_time = time.time()
while not q.empty():
if eval_count >= max_evals:
while not q.empty():
m00, f00, f11, ind, weight, _ = q.get()[2]
self.dvalues[ind] += (f11 - f00) * weight
break
batch_args = []
batch_masks = []
while not q.empty() and len(batch_masks) < batch_size and eval_count < max_evals:
m00, f00, f11, ind, weight, context = q.get()[2]
lind = int(self._clustering[ind-M, 0]) if ind >= M else -1
rind = int(self._clustering[ind-M, 1]) if ind >= M else -1
if ind < M:
distance = -1
else:
distance = self._clustering[ind-M, 2]
# check if we are a leaf node (or other negative distance cluster) and so should terminate our decent
if distance < 0:
self.dvalues[ind] += (f11 - f00) * weight
continue
# build the masks
m10 = m00.copy() # we separate the copy from the add so as to not get converted to a matrix
m10[:] += self._mask_matrix[lind, :]
m01 = m00.copy()
m01[:] += self._mask_matrix[rind, :]
batch_args.append((m00, m10, m01, f00, f11, ind, lind, rind, weight, context))
batch_masks.append(m10)
batch_masks.append(m01)
batch_masks = np.array(batch_masks)
# run the batch
if len(batch_args) > 0:
fout = fm(batch_masks)
if output_indexes is not None:
fout = fout[:,output_indexes]
eval_count += len(batch_masks)
if pbar is None and time.time() - start_time > 5:
pbar = tqdm(total=total_evals, disable=silent, leave=False)
pbar.update(eval_count)
if pbar is not None:
pbar.update(len(batch_masks))
# use the results of the batch to add new nodes
for i in range(len(batch_args)):
m00, m10, m01, f00, f11, ind, lind, rind, weight, context = batch_args[i]
# get the the number of leaves in this cluster
if ind < M:
num_leaves = 0
else:
num_leaves = self._clustering[ind-M, 3]
# get the evaluated model output on the two new masked inputs
f10 = fout[2*i]
f01 = fout[2*i+1]
# see if we have enough evaluations left to get both sides of a fixed context
if max_evals - evals_planned > num_leaves:
evals_planned += num_leaves
ignore_context = True
else:
ignore_context = False
new_weight = weight
if context is None or ignore_context:
new_weight /= 2
if context is None or context == 0 or ignore_context:
self.dvalues[ind] += (f11 - f10 - f01 + f00) * weight # leave the interaction effect on the internal node
# recurse on the left node with zero context, flip the context for all decendents if we are ignoring it
args = (m00, f00, f10, lind, new_weight, 0 if context == 1 else context)
q.put((-np.max(np.abs(f10 - f00)) * new_weight, np.random.randn(), args))
# recurse on the right node with zero context, flip the context for all decendents if we are ignoring it
args = (m00, f00, f01, rind, new_weight, 0 if context == 1 else context)
q.put((-np.max(np.abs(f01 - f00)) * new_weight, np.random.randn(), args))
if context is None or context == 1 or ignore_context:
self.dvalues[ind] -= (f11 - f10 - f01 + f00) * weight # leave the interaction effect on the internal node
# recurse on the left node with one context, flip the context for all decendents if we are ignoring it
args = (m01, f01, f11, lind, new_weight, 1 if context == 0 else context)
q.put((-np.max(np.abs(f11 - f01)) * new_weight, np.random.randn(), args))
# recurse on the right node with one context, flip the context for all decendents if we are ignoring it
args = (m10, f10, f11, rind, new_weight, 1 if context == 0 else context)
q.put((-np.max(np.abs(f11 - f10)) * new_weight, np.random.randn(), args))
if pbar is not None:
pbar.close()
self.last_eval_count = eval_count
return output_indexes, base_value
# def owen2(self, fm, f00, f11, max_evals, output_indexes, fixed_context, batch_size, silent):
# """ Compute a nested set of recursive Owen values based on an ordering recursion.
# """
# #f = self._reshaped_model
# #r = self.masker
# #masks = np.zeros(2*len(inds)+1, dtype=np.int)
# M = len(fm)
# m00 = np.zeros(M, dtype=np.bool)
# #f00 = fm(m00.reshape(1,-1))[0]
# base_value = f00
# #f11 = fm(~m00.reshape(1,-1))[0]
# #f11 = self._reshaped_model(r(~m00, x)).mean(0)
# ind = len(self.dvalues)-1
# # make sure output_indexes is a list of indexes
# if output_indexes is not None:
# # assert self.multi_output, "output_indexes is only valid for multi-output models!"
# # inds = output_indexes.apply(f11, 0)
# # out_len = output_indexes_len(output_indexes)
# # if output_indexes.startswith("max("):
# # output_indexes = np.argsort(-f11)[:out_len]
# # elif output_indexes.startswith("min("):
# # output_indexes = np.argsort(f11)[:out_len]
# # elif output_indexes.startswith("max(abs("):
# # output_indexes = np.argsort(np.abs(f11))[:out_len]
# f00 = f00[output_indexes]
# f11 = f11[output_indexes]
# fc_owen(m00, m11, 1)
# fc_owen(m00, m11, 0)
# def fc_owen(m00, m11, context):
# # recurse on the left node with zero context
# args = (m00, f00, f10, lind, new_weight)
# q.put((-np.max(np.abs(f10 - f00)) * new_weight, np.random.randn(), args))
# # recurse on the right node with zero context
# args = (m00, f00, f01, rind, new_weight)
# q.put((-np.max(np.abs(f01 - f00)) * new_weight, np.random.randn(), args))
# fc_owen(m00, m11, 1)
# m00 m11
# owen(fc=1)
# owen(fc=0)
# q = queue.PriorityQueue()
# q.put((0, 0, (m00, f00, f11, ind, 1.0, 1)))
# eval_count = 0
# total_evals = min(max_evals, (M-1)*M) # TODO: (M-1)*M is only right for balanced clusterings, but this is just for plotting progress...
# pbar = None
# start_time = time.time()
# while not q.empty():
# # if we passed our execution limit then leave everything else on the internal nodes
# if eval_count >= max_evals:
# while not q.empty():
# m00, f00, f11, ind, weight, _ = q.get()[2]
# self.dvalues[ind] += (f11 - f00) * weight
# break
# # create a batch of work to do
# batch_args = []
# batch_masks = []
# while not q.empty() and len(batch_masks) < batch_size and eval_count < max_evals:
# # get our next set of arguments
# m00, f00, f11, ind, weight, context = q.get()[2]
# # get the left and right children of this cluster
# lind = int(self._clustering[ind-M, 0]) if ind >= M else -1
# rind = int(self._clustering[ind-M, 1]) if ind >= M else -1
# # get the distance of this cluster's children
@jit
def lower_credit(i, value, M, values, clustering):
if i < M:
values[i] += value
return
li = int(clustering[i-M,0])
ri = int(clustering[i-M,1])
group_size = int(clustering[i-M,3])
lsize = int(clustering[li-M,3]) if li >= M else 1
rsize = int(clustering[ri-M,3]) if ri >= M else 1
assert lsize+rsize == group_size
values[i] += value
lower_credit(li, values[i] * lsize / group_size, M, values, clustering)
lower_credit(ri, values[i] * rsize / group_size, M, values, clustering) | true | true |
f7ff2053a632e3635774f6bfa710cc06f514a39c | 2,996 | py | Python | examples/twistedweb_server.py | ShadowJonathan/txredisapi | 5da94b91e7936af8fddba824da21cde428581996 | [
"Apache-2.0"
] | 104 | 2015-01-05T16:10:44.000Z | 2019-10-14T14:59:10.000Z | examples/twistedweb_server.py | ShadowJonathan/txredisapi | 5da94b91e7936af8fddba824da21cde428581996 | [
"Apache-2.0"
] | 55 | 2015-01-22T11:25:20.000Z | 2019-11-19T21:39:32.000Z | examples/twistedweb_server.py | ShadowJonathan/txredisapi | 5da94b91e7936af8fddba824da21cde428581996 | [
"Apache-2.0"
] | 50 | 2015-03-01T10:26:28.000Z | 2019-11-17T23:26:51.000Z | #!/usr/bin/env twistd -ny
# coding: utf-8
# Copyright 2009 Alexandre Fiori
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# run:
# twistd -ny twistwedweb_server.py
import txredisapi as redis
from twisted.application import internet
from twisted.application import service
from twisted.internet import defer
from twisted.web import server
from twisted.web import xmlrpc
from twisted.web.resource import Resource
class Root(Resource):
isLeaf = False
class BaseHandler(object):
isLeaf = True
def __init__(self, db):
self.db = db
Resource.__init__(self)
class IndexHandler(BaseHandler, Resource):
def _success(self, value, request, message):
request.write(message % repr(value))
request.finish()
def _failure(self, error, request, message):
request.write(message % str(error))
request.finish()
def render_GET(self, request):
try:
key = request.args["key"][0]
except:
request.setResponseCode(404, "not found")
return ""
d = self.db.get(key)
d.addCallback(self._success, request, "get: %s\n")
d.addErrback(self._failure, request, "get failed: %s\n")
return server.NOT_DONE_YET
def render_POST(self, request):
try:
key = request.args["key"][0]
value = request.args["value"][0]
except:
request.setResponseCode(404, "not found")
return ""
d = self.db.set(key, value)
d.addCallback(self._success, request, "set: %s\n")
d.addErrback(self._failure, request, "set failed: %s\n")
return server.NOT_DONE_YET
class InfoHandler(BaseHandler, Resource):
def render_GET(self, request):
return "redis: %s\n" % repr(self.db)
class XmlrpcHandler(BaseHandler, xmlrpc.XMLRPC):
allowNone = True
@defer.inlineCallbacks
def xmlrpc_get(self, key):
value = yield self.db.get(key)
defer.returnValue(value)
@defer.inlineCallbacks
def xmlrpc_set(self, key, value):
result = yield self.db.set(key, value)
defer.returnValue(result)
# redis connection
_db = redis.lazyConnectionPool()
# http resources
root = Root()
root.putChild("", IndexHandler(_db))
root.putChild("info", InfoHandler(_db))
root.putChild("xmlrpc", XmlrpcHandler(_db))
application = service.Application("webredis")
srv = internet.TCPServer(8888, server.Site(root), interface="127.0.0.1")
srv.setServiceParent(application)
| 27.740741 | 74 | 0.676235 |
import txredisapi as redis
from twisted.application import internet
from twisted.application import service
from twisted.internet import defer
from twisted.web import server
from twisted.web import xmlrpc
from twisted.web.resource import Resource
class Root(Resource):
isLeaf = False
class BaseHandler(object):
isLeaf = True
def __init__(self, db):
self.db = db
Resource.__init__(self)
class IndexHandler(BaseHandler, Resource):
def _success(self, value, request, message):
request.write(message % repr(value))
request.finish()
def _failure(self, error, request, message):
request.write(message % str(error))
request.finish()
def render_GET(self, request):
try:
key = request.args["key"][0]
except:
request.setResponseCode(404, "not found")
return ""
d = self.db.get(key)
d.addCallback(self._success, request, "get: %s\n")
d.addErrback(self._failure, request, "get failed: %s\n")
return server.NOT_DONE_YET
def render_POST(self, request):
try:
key = request.args["key"][0]
value = request.args["value"][0]
except:
request.setResponseCode(404, "not found")
return ""
d = self.db.set(key, value)
d.addCallback(self._success, request, "set: %s\n")
d.addErrback(self._failure, request, "set failed: %s\n")
return server.NOT_DONE_YET
class InfoHandler(BaseHandler, Resource):
def render_GET(self, request):
return "redis: %s\n" % repr(self.db)
class XmlrpcHandler(BaseHandler, xmlrpc.XMLRPC):
allowNone = True
@defer.inlineCallbacks
def xmlrpc_get(self, key):
value = yield self.db.get(key)
defer.returnValue(value)
@defer.inlineCallbacks
def xmlrpc_set(self, key, value):
result = yield self.db.set(key, value)
defer.returnValue(result)
_db = redis.lazyConnectionPool()
root = Root()
root.putChild("", IndexHandler(_db))
root.putChild("info", InfoHandler(_db))
root.putChild("xmlrpc", XmlrpcHandler(_db))
application = service.Application("webredis")
srv = internet.TCPServer(8888, server.Site(root), interface="127.0.0.1")
srv.setServiceParent(application)
| true | true |
f7ff20e76088b0b92d2df80c3fbeb09a7de2e990 | 1,465 | py | Python | networking/graphviz/second_program.py | maciej233/PYTHON | 7e635a2250890f1a293983b3988bdcc5f5e71ccf | [
"MIT"
] | null | null | null | networking/graphviz/second_program.py | maciej233/PYTHON | 7e635a2250890f1a293983b3988bdcc5f5e71ccf | [
"MIT"
] | null | null | null | networking/graphviz/second_program.py | maciej233/PYTHON | 7e635a2250890f1a293983b3988bdcc5f5e71ccf | [
"MIT"
] | null | null | null | import glob, re
from graphviz import Digraph, Source
pattern = re.compile('Et[23]/[0123]')
device_lldp_neighbors = []
for file_name in glob.glob("/tmp/logi_lldp/*"):
device = file_name.split("/")[3].split("_")[0]
#print("device: " + device)
with open(file_name, 'r') as f:
for line in f.readlines():
line = eval(line) # EVAL LINE AS LIST
for item in line[0]:
# match pattern
if re.search(pattern, item):
#print(" neighors: " + item.split()[0].split('.')[0])
device_lldp_neighbors.append((device, item.split()[0].split('.')[0]))
#print(device_lldp_neighbors)
print("*"*10)
print("Edges: " + str(device_lldp_neighbors))
my_graph = Digraph("My_Network")
my_graph.edge("Client", "s07")
my_graph.edge("s13", "Server")
my_graph.edge("s01", "r1")
my_graph.edge("r1", "r2")
my_graph.edge("r2", "s10")
# make neigbors edges for graph
for neighbors in device_lldp_neighbors:
node1, node2 = neighbors
my_graph.edge(node1, node2)
source = my_graph.source
original_text = "digraph My_Network {"
new_text = 'digraph My_Network {\n\
{rank=same s01 r1 r2 s10}\n\
{rank=same s02 s03 s04}\n\
{rank=same s05 s06}\n\
{rank=same Client s07 s08}\n\
{rank=same s11 s12}\n'
new_source = source.replace(original_text, new_text)
print(new_source)
new_graph = Source(new_source)
new_graph.render("output/fourh_graph.gv") | 31.170213 | 89 | 0.631399 | import glob, re
from graphviz import Digraph, Source
pattern = re.compile('Et[23]/[0123]')
device_lldp_neighbors = []
for file_name in glob.glob("/tmp/logi_lldp/*"):
device = file_name.split("/")[3].split("_")[0]
with open(file_name, 'r') as f:
for line in f.readlines():
line = eval(line)
for item in line[0]:
if re.search(pattern, item):
device_lldp_neighbors.append((device, item.split()[0].split('.')[0]))
print("*"*10)
print("Edges: " + str(device_lldp_neighbors))
my_graph = Digraph("My_Network")
my_graph.edge("Client", "s07")
my_graph.edge("s13", "Server")
my_graph.edge("s01", "r1")
my_graph.edge("r1", "r2")
my_graph.edge("r2", "s10")
for neighbors in device_lldp_neighbors:
node1, node2 = neighbors
my_graph.edge(node1, node2)
source = my_graph.source
original_text = "digraph My_Network {"
new_text = 'digraph My_Network {\n\
{rank=same s01 r1 r2 s10}\n\
{rank=same s02 s03 s04}\n\
{rank=same s05 s06}\n\
{rank=same Client s07 s08}\n\
{rank=same s11 s12}\n'
new_source = source.replace(original_text, new_text)
print(new_source)
new_graph = Source(new_source)
new_graph.render("output/fourh_graph.gv") | true | true |
f7ff216a1168b984d5e82978f869dc3123c6f78b | 11,935 | py | Python | doc/source/notebooks/intro_to_gpflow2.pct.py | BracketJohn/GPflow | 33178689c34d773a05532d50e3d4d97e7d5d6d60 | [
"Apache-2.0"
] | null | null | null | doc/source/notebooks/intro_to_gpflow2.pct.py | BracketJohn/GPflow | 33178689c34d773a05532d50e3d4d97e7d5d6d60 | [
"Apache-2.0"
] | null | null | null | doc/source/notebooks/intro_to_gpflow2.pct.py | BracketJohn/GPflow | 33178689c34d773a05532d50e3d4d97e7d5d6d60 | [
"Apache-2.0"
] | null | null | null | # ---
# jupyter:
# jupytext:
# formats: ipynb,.pct.py:percent
# text_representation:
# extension: .py
# format_name: percent
# format_version: '1.3'
# jupytext_version: 1.3.3
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# %% [markdown]
# GPflow with TensorFlow 2
# ===
#
# ##### Small steps big changes
#
# <br>
#
#
# %%
from typing import Tuple, Optional
from pathlib import Path
import datetime
import io
import matplotlib.pyplot as plt
import numpy as np
import tensorflow as tf
import gpflow
from gpflow.config import default_float
import warnings
warnings.filterwarnings('ignore')
# %% [markdown]
# Make `tensorboard` work inside notebook:
# %%
output_logdir = "/tmp/tensorboard"
# !rm -rf "{output_logdir}"
# !mkdir "{output_logdir}"
# %load_ext tensorboard
# %matplotlib inline
def enumerated_logdir(_logdir_id: int = [0]):
logdir = Path(output_logdir, str(_logdir_id[0]))
_logdir_id[0] += 1
return str(logdir)
# %% [markdown]
# Set up random seeds and default float for `gpflow` tensors:
# %%
gpflow.config.set_default_float(np.float64)
np.random.seed(0)
tf.random.set_seed(0)
# %% [markdown]
# ## Loading data using TensorFlow Datasets
#
# For this example, we create a synthetic dataset (noisy sine function):
# %%
def noisy_sin(x):
return tf.math.sin(x) + 0.1 * tf.random.normal(x.shape, dtype=default_float())
num_train_data, num_test_data = 100, 500
X = tf.random.uniform((num_train_data, 1), dtype=default_float()) * 10
Xtest = tf.random.uniform((num_test_data, 1), dtype=default_float()) * 10
Y = noisy_sin(X)
Ytest = noisy_sin(Xtest)
data = (X, Y)
plt.plot(X, Y, 'xk')
plt.show()
# %% [markdown]
# Working with TensorFlow Datasets is an efficient way to rapidly shuffle, iterate, and batch from data.
# %%
train_dataset = tf.data.Dataset.from_tensor_slices((X, Y))
test_dataset = tf.data.Dataset.from_tensor_slices((Xtest, Ytest))
batch_size = 32
num_features = 10
prefetch_size = num_train_data // 2
shuffle_buffer_size = num_train_data // 2
num_batches_per_epoch = num_train_data // batch_size
original_train_dataset = train_dataset
train_dataset = train_dataset.repeat()\
.prefetch(prefetch_size)\
.shuffle(buffer_size=shuffle_buffer_size)\
.batch(batch_size)
print(f"prefetch_size={prefetch_size}")
print(f"shuffle_buffer_size={shuffle_buffer_size}")
print(f"num_batches_per_epoch={num_batches_per_epoch}")
# %% [markdown]
# ## Define a GP model
#
# In GPflow 2.0, we use `tf.Module` (or the very thin `gpflow.base.Module` wrapper) to build all our models, as well as their components (kernels, likelihoods, parameters, and so on).
# %%
kernel = gpflow.kernels.SquaredExponential(variance=2.)
likelihood = gpflow.likelihoods.Gaussian()
inducing_variable = np.linspace(0, 10, num_features).reshape(-1, 1)
model = gpflow.models.SVGP(kernel=kernel, likelihood=likelihood, inducing_variable=inducing_variable)
# %% [markdown]
# You can set a module (or a particular parameter) to be non-trainable using the auxiliary method ```set_trainable(module, False)```:
# %%
from gpflow.utilities import set_trainable
set_trainable(likelihood, False)
set_trainable(kernel.variance, False)
set_trainable(likelihood, True)
set_trainable(kernel.variance, True)
# %% [markdown]
# We can use ```param.assign(value)``` to assign a value to a parameter:
# %%
kernel.lengthscale.assign(0.5)
# %% [markdown]
# All these changes are reflected when we use ```print_summary(model)``` to print a detailed summary of the model. By default the output is displayed in a minimalistic and simple table.
# %%
from gpflow.utilities import print_summary
print_summary(model) # same as print_summary(model, fmt="simple")
# %% [markdown]
# We can change default printing so that it will look nicer in our notebook:
# %%
gpflow.config.set_default_summary_fmt("notebook")
print_summary(model) # same as print_summary(model, fmt="notebook")
# %% [markdown]
# Jupyter notebooks also format GPflow classes (that are subclasses of `gpflow.base.Module`) in the same nice way when at the end of a cell (this is independent of the `default_summary_fmt`):
# %%
model
# %% [markdown]
# ## Training using Gradient Tapes
#
# In TensorFlow 2, we can optimize (trainable) model parameters with TensorFlow optimizers using `tf.GradientTape`. In this simple example, we perform one gradient update of the Adam optimizer to minimize the negative marginal log likelihood (or ELBO) of our model.
# %%
optimizer = tf.optimizers.Adam()
with tf.GradientTape() as tape:
tape.watch(model.trainable_variables)
obj = - model.elbo(data)
grads = tape.gradient(obj, model.trainable_variables)
optimizer.apply_gradients(zip(grads, model.trainable_variables))
# %% [markdown]
# For a more elaborate example of a gradient update we can define an ```optimization_step``` that uses the decorator ```tf.function``` on a closure. A closure is a callable that returns the model objective evaluated at a given dataset when called.
# %%
def optimization_step(model: gpflow.models.SVGP, batch: Tuple[tf.Tensor, tf.Tensor]):
with tf.GradientTape(watch_accessed_variables=False) as tape:
tape.watch(model.trainable_variables)
obj = - model.elbo(batch)
grads = tape.gradient(obj, model.trainable_variables)
optimizer.apply_gradients(zip(grads, model.trainable_variables))
# %% [markdown]
# We can use the functionality of TensorFlow Datasets to define a simple training loop that iterates over batches of the training dataset:
# %%
def simple_training_loop(model: gpflow.models.SVGP, epochs: int = 1, logging_epoch_freq: int = 10):
batches = iter(train_dataset)
tf_optimization_step = tf.function(optimization_step, autograph=False)
for epoch in range(epochs):
for _ in range(num_batches_per_epoch):
tf_optimization_step(model, next(batches))
epoch_id = epoch + 1
if epoch_id % logging_epoch_freq == 0:
tf.print(f"Epoch {epoch_id}: ELBO (train) {model.elbo(data)}")
# %%
simple_training_loop(model, epochs=10, logging_epoch_freq=2)
# %% [markdown]
# ## Monitoring
#
# We can monitor the training procedure using `tf.summary`. First we create a summary writer object through which we can write scalars and images.
# %%
from intro_to_gpflow2_plotting import plotting_regression, summary_matplotlib_image
samples_input = tf.cast(np.linspace(0, 10, 100).reshape(100, 1), default_float())
def monitored_training_loop(model: gpflow.models.SVGP, logdir: str,
epochs: int = 1, logging_epoch_freq: int = 10,
num_samples: int = 10):
summary_writer = tf.summary.create_file_writer(logdir)
tf_optimization_step = tf.function(optimization_step)
batches = iter(train_dataset)
with summary_writer.as_default():
for epoch in range(epochs):
for _ in range(num_batches_per_epoch):
tf_optimization_step(model, next(batches))
epoch_id = epoch + 1
if epoch_id % logging_epoch_freq == 0:
tf.print(f"Epoch {epoch_id}: ELBO (train) {model.elbo(data)}")
mean, var = model.predict_f(samples_input)
samples = model.predict_f_samples(samples_input, num_samples)
fig = plotting_regression(X, Y, samples_input, mean, var, samples)
summary_matplotlib_image(dict(model_samples=fig), step=epoch)
tf.summary.scalar('elbo', data=model.elbo(data), step=epoch)
tf.summary.scalar('likelihood/variance', data=model.likelihood.variance, step=epoch)
tf.summary.scalar('kernel/lengthscale', data=model.kernel.lengthscale, step=epoch)
tf.summary.scalar('kernel/variance', data=model.kernel.variance, step=epoch)
# %%
model = gpflow.models.SVGP(kernel=kernel, likelihood=likelihood, inducing_variable=inducing_variable)
output_logdir = enumerated_logdir()
monitored_training_loop(model, output_logdir, epochs=1000, logging_epoch_freq=100)
# %% [markdown]
# Then, we can use TensorBoard to examine the training procedure in more detail
# %%
# # %tensorboard --logdir "{output_logdir}"
# %% [markdown]
# ## Checkpointing: saving and loading models
#
# With the help of `tf.train.CheckpointManager` and `tf.train.Checkpoint`, we can checkpoint the model throughout the training procedure. Let's start with a simple example using checkpointing to save and load a `tf.Variable`:
# %%
initial_value = 1.2
a = tf.Variable(initial_value)
# %% [markdown]
# Create `Checkpoint` object:
# %%
ckpt = tf.train.Checkpoint(a=a)
manager = tf.train.CheckpointManager(ckpt, output_logdir, max_to_keep=3)
# %% [markdown]
# Save the variable `a` and change its value right after:
# %%
manager.save()
_ = a.assign(0.33)
# %% [markdown]
# Now we can restore the old variable value:
# %%
print(f"Current value of variable a: {a.numpy():0.3f}")
ckpt.restore(manager.latest_checkpoint)
print(f"Value of variable a after restore: {a.numpy():0.3f}")
# %% [markdown]
# In the example below, we modify a simple training loop to save the model every 100 epochs using the `CheckpointManager`.
# %%
model = gpflow.models.SVGP(kernel=kernel, likelihood=likelihood, inducing_variable=inducing_variable)
def checkpointing_training_loop(model: gpflow.models.SVGP,
batch_size: int,
epochs: int,
manager: tf.train.CheckpointManager,
logging_epoch_freq: int = 100,
epoch_var: Optional[tf.Variable] = None,
step_var: Optional[tf.Variable] = None):
tf_optimization_step = tf.function(optimization_step)
batches = iter(train_dataset)
for epoch in range(epochs):
for step in range(num_batches_per_epoch):
tf_optimization_step(model, next(batches))
if step_var is not None:
step_var.assign(epoch * num_batches_per_epoch + step + 1)
if epoch_var is not None:
epoch_var.assign(epoch + 1)
epoch_id = epoch + 1
if epoch_id % logging_epoch_freq == 0:
ckpt_path = manager.save()
tf.print(f"Epoch {epoch_id}: ELBO (train) {model.elbo(data)}, saved at {ckpt_path}")
# %%
step_var = tf.Variable(1, dtype=tf.int32, trainable=False)
epoch_var = tf.Variable(1, dtype=tf.int32, trainable=False)
ckpt = tf.train.Checkpoint(model=model, step=step_var, epoch=epoch_var)
manager = tf.train.CheckpointManager(ckpt, output_logdir, max_to_keep=5)
print(f"Checkpoint folder path at: {output_logdir}")
checkpointing_training_loop(model, batch_size=batch_size, epochs=1000, manager=manager, epoch_var=epoch_var, step_var=step_var)
# %% [markdown]
# After the models have been saved, we can restore them using ```tf.train.Checkpoint.restore``` and assert that their performance corresponds to that logged during training.
# %%
for i, recorded_checkpoint in enumerate(manager.checkpoints):
ckpt.restore(recorded_checkpoint)
print(f"{i} restored model from epoch {int(epoch_var)} [step:{int(step_var)}] : ELBO training set {model.elbo(data)}")
# %% [markdown]
# ## Copying (hyper)parameter values between models
#
# It is easy to interact with the set of all parameters of a model or a subcomponent programmatically.
#
# The following returns a dictionary of all parameters within
# %%
model = gpflow.models.SGPR(data, kernel=kernel, inducing_variable=inducing_variable)
# %%
gpflow.utilities.parameter_dict(model)
# %% [markdown]
# Such a dictionary can be assigned back to this model (or another model with the same tree of parameters) as follows:
# %%
params = gpflow.utilities.parameter_dict(model)
gpflow.utilities.multiple_assign(model, params)
| 32.69863 | 265 | 0.704985 |
umpy as np
import tensorflow as tf
import gpflow
from gpflow.config import default_float
import warnings
warnings.filterwarnings('ignore')
output_logdir = "/tmp/tensorboard"
def enumerated_logdir(_logdir_id: int = [0]):
logdir = Path(output_logdir, str(_logdir_id[0]))
_logdir_id[0] += 1
return str(logdir)
gpflow.config.set_default_float(np.float64)
np.random.seed(0)
tf.random.set_seed(0)
, dtype=default_float())
num_train_data, num_test_data = 100, 500
X = tf.random.uniform((num_train_data, 1), dtype=default_float()) * 10
Xtest = tf.random.uniform((num_test_data, 1), dtype=default_float()) * 10
Y = noisy_sin(X)
Ytest = noisy_sin(Xtest)
data = (X, Y)
plt.plot(X, Y, 'xk')
plt.show()
train_dataset = tf.data.Dataset.from_tensor_slices((X, Y))
test_dataset = tf.data.Dataset.from_tensor_slices((Xtest, Ytest))
batch_size = 32
num_features = 10
prefetch_size = num_train_data // 2
shuffle_buffer_size = num_train_data // 2
num_batches_per_epoch = num_train_data // batch_size
original_train_dataset = train_dataset
train_dataset = train_dataset.repeat()\
.prefetch(prefetch_size)\
.shuffle(buffer_size=shuffle_buffer_size)\
.batch(batch_size)
print(f"prefetch_size={prefetch_size}")
print(f"shuffle_buffer_size={shuffle_buffer_size}")
print(f"num_batches_per_epoch={num_batches_per_epoch}")
onential(variance=2.)
likelihood = gpflow.likelihoods.Gaussian()
inducing_variable = np.linspace(0, 10, num_features).reshape(-1, 1)
model = gpflow.models.SVGP(kernel=kernel, likelihood=likelihood, inducing_variable=inducing_variable)
from gpflow.utilities import set_trainable
set_trainable(likelihood, False)
set_trainable(kernel.variance, False)
set_trainable(likelihood, True)
set_trainable(kernel.variance, True)
kernel.lengthscale.assign(0.5)
from gpflow.utilities import print_summary
print_summary(model)
gpflow.config.set_default_summary_fmt("notebook")
print_summary(model)
model
s tape:
tape.watch(model.trainable_variables)
obj = - model.elbo(data)
grads = tape.gradient(obj, model.trainable_variables)
optimizer.apply_gradients(zip(grads, model.trainable_variables))
def optimization_step(model: gpflow.models.SVGP, batch: Tuple[tf.Tensor, tf.Tensor]):
with tf.GradientTape(watch_accessed_variables=False) as tape:
tape.watch(model.trainable_variables)
obj = - model.elbo(batch)
grads = tape.gradient(obj, model.trainable_variables)
optimizer.apply_gradients(zip(grads, model.trainable_variables))
def simple_training_loop(model: gpflow.models.SVGP, epochs: int = 1, logging_epoch_freq: int = 10):
batches = iter(train_dataset)
tf_optimization_step = tf.function(optimization_step, autograph=False)
for epoch in range(epochs):
for _ in range(num_batches_per_epoch):
tf_optimization_step(model, next(batches))
epoch_id = epoch + 1
if epoch_id % logging_epoch_freq == 0:
tf.print(f"Epoch {epoch_id}: ELBO (train) {model.elbo(data)}")
simple_training_loop(model, epochs=10, logging_epoch_freq=2)
2_plotting import plotting_regression, summary_matplotlib_image
samples_input = tf.cast(np.linspace(0, 10, 100).reshape(100, 1), default_float())
def monitored_training_loop(model: gpflow.models.SVGP, logdir: str,
epochs: int = 1, logging_epoch_freq: int = 10,
num_samples: int = 10):
summary_writer = tf.summary.create_file_writer(logdir)
tf_optimization_step = tf.function(optimization_step)
batches = iter(train_dataset)
with summary_writer.as_default():
for epoch in range(epochs):
for _ in range(num_batches_per_epoch):
tf_optimization_step(model, next(batches))
epoch_id = epoch + 1
if epoch_id % logging_epoch_freq == 0:
tf.print(f"Epoch {epoch_id}: ELBO (train) {model.elbo(data)}")
mean, var = model.predict_f(samples_input)
samples = model.predict_f_samples(samples_input, num_samples)
fig = plotting_regression(X, Y, samples_input, mean, var, samples)
summary_matplotlib_image(dict(model_samples=fig), step=epoch)
tf.summary.scalar('elbo', data=model.elbo(data), step=epoch)
tf.summary.scalar('likelihood/variance', data=model.likelihood.variance, step=epoch)
tf.summary.scalar('kernel/lengthscale', data=model.kernel.lengthscale, step=epoch)
tf.summary.scalar('kernel/variance', data=model.kernel.variance, step=epoch)
model = gpflow.models.SVGP(kernel=kernel, likelihood=likelihood, inducing_variable=inducing_variable)
output_logdir = enumerated_logdir()
monitored_training_loop(model, output_logdir, epochs=1000, logging_epoch_freq=100)
train.Checkpoint(a=a)
manager = tf.train.CheckpointManager(ckpt, output_logdir, max_to_keep=3)
# %% [markdown]
# Save the variable `a` and change its value right after:
# %%
manager.save()
_ = a.assign(0.33)
# %% [markdown]
# Now we can restore the old variable value:
# %%
print(f"Current value of variable a: {a.numpy():0.3f}")
ckpt.restore(manager.latest_checkpoint)
print(f"Value of variable a after restore: {a.numpy():0.3f}")
# %% [markdown]
# In the example below, we modify a simple training loop to save the model every 100 epochs using the `CheckpointManager`.
# %%
model = gpflow.models.SVGP(kernel=kernel, likelihood=likelihood, inducing_variable=inducing_variable)
def checkpointing_training_loop(model: gpflow.models.SVGP,
batch_size: int,
epochs: int,
manager: tf.train.CheckpointManager,
logging_epoch_freq: int = 100,
epoch_var: Optional[tf.Variable] = None,
step_var: Optional[tf.Variable] = None):
tf_optimization_step = tf.function(optimization_step)
batches = iter(train_dataset)
for epoch in range(epochs):
for step in range(num_batches_per_epoch):
tf_optimization_step(model, next(batches))
if step_var is not None:
step_var.assign(epoch * num_batches_per_epoch + step + 1)
if epoch_var is not None:
epoch_var.assign(epoch + 1)
epoch_id = epoch + 1
if epoch_id % logging_epoch_freq == 0:
ckpt_path = manager.save()
tf.print(f"Epoch {epoch_id}: ELBO (train) {model.elbo(data)}, saved at {ckpt_path}")
# %%
step_var = tf.Variable(1, dtype=tf.int32, trainable=False)
epoch_var = tf.Variable(1, dtype=tf.int32, trainable=False)
ckpt = tf.train.Checkpoint(model=model, step=step_var, epoch=epoch_var)
manager = tf.train.CheckpointManager(ckpt, output_logdir, max_to_keep=5)
print(f"Checkpoint folder path at: {output_logdir}")
checkpointing_training_loop(model, batch_size=batch_size, epochs=1000, manager=manager, epoch_var=epoch_var, step_var=step_var)
# %% [markdown]
# After the models have been saved, we can restore them using ```tf.train.Checkpoint.restore``` and assert that their performance corresponds to that logged during training.
# %%
for i, recorded_checkpoint in enumerate(manager.checkpoints):
ckpt.restore(recorded_checkpoint)
print(f"{i} restored model from epoch {int(epoch_var)} [step:{int(step_var)}] : ELBO training set {model.elbo(data)}")
# %% [markdown]
# ## Copying (hyper)parameter values between models
#
# It is easy to interact with the set of all parameters of a model or a subcomponent programmatically.
#
# The following returns a dictionary of all parameters within
# %%
model = gpflow.models.SGPR(data, kernel=kernel, inducing_variable=inducing_variable)
# %%
gpflow.utilities.parameter_dict(model)
# %% [markdown]
# Such a dictionary can be assigned back to this model (or another model with the same tree of parameters) as follows:
# %%
params = gpflow.utilities.parameter_dict(model)
gpflow.utilities.multiple_assign(model, params)
| true | true |
f7ff219d0bf42aa764301ad0fe2f8f1cd022eaf5 | 8,190 | py | Python | test/functional/wallet_basic.py | Trittium/trittium | 1342377171ee59aeb505d7a95cd87074ca52684a | [
"MIT"
] | 20 | 2018-07-05T07:38:37.000Z | 2021-11-28T14:57:47.000Z | test/functional/wallet_basic.py | Trittium/trittium | 1342377171ee59aeb505d7a95cd87074ca52684a | [
"MIT"
] | 4 | 2019-04-08T06:50:39.000Z | 2021-03-31T15:09:47.000Z | test/functional/wallet_basic.py | Trittium/trittium | 1342377171ee59aeb505d7a95cd87074ca52684a | [
"MIT"
] | 23 | 2018-05-08T14:37:26.000Z | 2021-03-09T17:02:07.000Z | #!/usr/bin/env python3
# Copyright (c) 2014-2017 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test the wallet."""
from test_framework.test_framework import PivxTestFramework
from test_framework.util import (
assert_array_result,
assert_equal,
assert_fee_amount,
assert_raises_rpc_error,
connect_nodes,
Decimal,
wait_until,
)
class WalletTest(PivxTestFramework):
def set_test_params(self):
self.num_nodes = 4
self.setup_clean_chain = True
def setup_network(self):
self.add_nodes(4)
self.start_node(0)
self.start_node(1)
self.start_node(2)
connect_nodes(self.nodes[0], 1)
connect_nodes(self.nodes[1], 2)
connect_nodes(self.nodes[0], 2)
self.sync_all([self.nodes[0:3]])
def get_vsize(self, txn):
return self.nodes[0].decoderawtransaction(txn)['size']
def run_test(self):
# Check that there's no UTXO on none of the nodes
assert_equal(len(self.nodes[0].listunspent()), 0)
assert_equal(len(self.nodes[1].listunspent()), 0)
assert_equal(len(self.nodes[2].listunspent()), 0)
self.log.info("Mining blocks...")
self.nodes[0].generate(1)
walletinfo = self.nodes[0].getwalletinfo()
assert_equal(walletinfo['immature_balance'], 250)
assert_equal(walletinfo['balance'], 0)
self.sync_all([self.nodes[0:3]])
self.nodes[1].generate(101)
self.sync_all([self.nodes[0:3]])
assert_equal(self.nodes[0].getbalance(), 250)
assert_equal(self.nodes[1].getbalance(), 250)
assert_equal(self.nodes[2].getbalance(), 0)
# Check that only first and second nodes have UTXOs
utxos = self.nodes[0].listunspent()
assert_equal(len(utxos), 1)
assert_equal(len(self.nodes[1].listunspent()), 1)
assert_equal(len(self.nodes[2].listunspent()), 0)
walletinfo = self.nodes[0].getwalletinfo()
assert_equal(walletinfo['immature_balance'], 0)
# Exercise locking of unspent outputs
unspent_0 = self.nodes[1].listunspent()[0]
unspent_0 = {"txid": unspent_0["txid"], "vout": unspent_0["vout"]}
self.nodes[1].lockunspent(False, [unspent_0])
assert_raises_rpc_error(-4, "Insufficient funds", self.nodes[1].sendtoaddress, self.nodes[1].getnewaddress(), 20)
assert_equal([unspent_0], self.nodes[1].listlockunspent())
self.nodes[1].lockunspent(True, [unspent_0])
assert_equal(len(self.nodes[1].listlockunspent()), 0)
# Send 21 TRTT from 1 to 0 using sendtoaddress call.
self.nodes[1].sendtoaddress(self.nodes[0].getnewaddress(), 21)
self.nodes[1].generate(1)
self.sync_all([self.nodes[0:3]])
# Node0 should have two unspent outputs.
# Create a couple of transactions to send them to node2, submit them through
# node1, and make sure both node0 and node2 pick them up properly:
node0utxos = self.nodes[0].listunspent(1)
assert_equal(len(node0utxos), 2)
# create both transactions
fee_per_kbyte = Decimal('0.001')
txns_to_send = []
for utxo in node0utxos:
inputs = []
outputs = {}
inputs.append({ "txid" : utxo["txid"], "vout" : utxo["vout"]})
outputs[self.nodes[2].getnewaddress()] = float(utxo["amount"]) - float(fee_per_kbyte)
raw_tx = self.nodes[0].createrawtransaction(inputs, outputs)
txns_to_send.append(self.nodes[0].signrawtransaction(raw_tx))
# Have node 1 (miner) send the transactions
self.nodes[1].sendrawtransaction(txns_to_send[0]["hex"], True)
self.nodes[1].sendrawtransaction(txns_to_send[1]["hex"], True)
# Have node1 mine a block to confirm transactions:
self.nodes[1].generate(1)
self.sync_all([self.nodes[0:3]])
assert_equal(self.nodes[0].getbalance(), 0)
node_2_expected_bal = Decimal('250') + Decimal('21') - 2 * fee_per_kbyte
node_2_bal = self.nodes[2].getbalance()
assert_equal(node_2_bal, node_2_expected_bal)
# Send 10 TRTT normal
address = self.nodes[0].getnewaddress("test")
self.nodes[2].settxfee(float(fee_per_kbyte))
txid = self.nodes[2].sendtoaddress(address, 10, "", "")
fee = self.nodes[2].gettransaction(txid)["fee"]
node_2_bal -= (Decimal('10') - fee)
assert_equal(self.nodes[2].getbalance(), node_2_bal)
self.nodes[2].generate(1)
self.sync_all([self.nodes[0:3]])
node_0_bal = self.nodes[0].getbalance()
assert_equal(node_0_bal, Decimal('10'))
# Sendmany 10 TRTT
txid = self.nodes[2].sendmany('', {address: 10}, 0, "")
fee = self.nodes[2].gettransaction(txid)["fee"]
self.nodes[2].generate(1)
self.sync_all([self.nodes[0:3]])
node_0_bal += Decimal('10')
node_2_bal -= (Decimal('10') - fee)
assert_equal(self.nodes[2].getbalance(), node_2_bal)
assert_equal(self.nodes[0].getbalance(), node_0_bal)
assert_fee_amount(-fee, self.get_vsize(self.nodes[2].getrawtransaction(txid)), fee_per_kbyte)
# This will raise an exception since generate does not accept a string
assert_raises_rpc_error(-1, "not an integer", self.nodes[0].generate, "2")
# Import address and private key to check correct behavior of spendable unspents
# 1. Send some coins to generate new UTXO
address_to_import = self.nodes[2].getnewaddress()
self.nodes[0].sendtoaddress(address_to_import, 1)
self.nodes[0].generate(1)
self.sync_all([self.nodes[0:3]])
# 2. Import address from node2 to node1
self.nodes[1].importaddress(address_to_import)
# 3. Validate that the imported address is watch-only on node1
assert(self.nodes[1].validateaddress(address_to_import)["iswatchonly"])
# 4. Check that the unspents after import are not spendable
listunspent = self.nodes[1].listunspent(1, 9999999, [], 2)
assert_array_result(listunspent,
{"address": address_to_import},
{"spendable": False})
# 5. Import private key of the previously imported address on node1
priv_key = self.nodes[2].dumpprivkey(address_to_import)
self.nodes[1].importprivkey(priv_key)
# 6. Check that the unspents are now spendable on node1
assert_array_result(self.nodes[1].listunspent(),
{"address": address_to_import},
{"spendable": True})
# check if wallet or blochchain maintenance changes the balance
self.sync_all([self.nodes[0:3]])
blocks = self.nodes[0].generate(2)
self.sync_all([self.nodes[0:3]])
balance_nodes = [self.nodes[i].getbalance() for i in range(3)]
block_count = self.nodes[0].getblockcount()
maintenance = [
'-rescan',
'-reindex',
]
for m in maintenance:
self.log.info("check " + m)
self.stop_nodes()
# set lower ancestor limit for later
self.start_node(0, [m])
self.start_node(1, [m])
self.start_node(2, [m])
if m == '-reindex':
# reindex will leave rpc warm up "early"; Wait for it to finish
wait_until(lambda: [block_count] * 3 == [self.nodes[i].getblockcount() for i in range(3)])
assert_equal(balance_nodes, [self.nodes[i].getbalance() for i in range(3)])
# Exercise listsinceblock with the last two blocks
coinbase_tx_1 = self.nodes[0].listsinceblock(blocks[0])
assert_equal(coinbase_tx_1["lastblock"], blocks[1])
assert_equal(len(coinbase_tx_1["transactions"]), 1)
assert_equal(coinbase_tx_1["transactions"][0]["blockhash"], blocks[1])
assert_equal(len(self.nodes[0].listsinceblock(blocks[1])["transactions"]), 0)
if __name__ == '__main__':
WalletTest().main()
| 41.573604 | 121 | 0.62906 |
from test_framework.test_framework import PivxTestFramework
from test_framework.util import (
assert_array_result,
assert_equal,
assert_fee_amount,
assert_raises_rpc_error,
connect_nodes,
Decimal,
wait_until,
)
class WalletTest(PivxTestFramework):
def set_test_params(self):
self.num_nodes = 4
self.setup_clean_chain = True
def setup_network(self):
self.add_nodes(4)
self.start_node(0)
self.start_node(1)
self.start_node(2)
connect_nodes(self.nodes[0], 1)
connect_nodes(self.nodes[1], 2)
connect_nodes(self.nodes[0], 2)
self.sync_all([self.nodes[0:3]])
def get_vsize(self, txn):
return self.nodes[0].decoderawtransaction(txn)['size']
def run_test(self):
assert_equal(len(self.nodes[0].listunspent()), 0)
assert_equal(len(self.nodes[1].listunspent()), 0)
assert_equal(len(self.nodes[2].listunspent()), 0)
self.log.info("Mining blocks...")
self.nodes[0].generate(1)
walletinfo = self.nodes[0].getwalletinfo()
assert_equal(walletinfo['immature_balance'], 250)
assert_equal(walletinfo['balance'], 0)
self.sync_all([self.nodes[0:3]])
self.nodes[1].generate(101)
self.sync_all([self.nodes[0:3]])
assert_equal(self.nodes[0].getbalance(), 250)
assert_equal(self.nodes[1].getbalance(), 250)
assert_equal(self.nodes[2].getbalance(), 0)
# Check that only first and second nodes have UTXOs
utxos = self.nodes[0].listunspent()
assert_equal(len(utxos), 1)
assert_equal(len(self.nodes[1].listunspent()), 1)
assert_equal(len(self.nodes[2].listunspent()), 0)
walletinfo = self.nodes[0].getwalletinfo()
assert_equal(walletinfo['immature_balance'], 0)
# Exercise locking of unspent outputs
unspent_0 = self.nodes[1].listunspent()[0]
unspent_0 = {"txid": unspent_0["txid"], "vout": unspent_0["vout"]}
self.nodes[1].lockunspent(False, [unspent_0])
assert_raises_rpc_error(-4, "Insufficient funds", self.nodes[1].sendtoaddress, self.nodes[1].getnewaddress(), 20)
assert_equal([unspent_0], self.nodes[1].listlockunspent())
self.nodes[1].lockunspent(True, [unspent_0])
assert_equal(len(self.nodes[1].listlockunspent()), 0)
# Send 21 TRTT from 1 to 0 using sendtoaddress call.
self.nodes[1].sendtoaddress(self.nodes[0].getnewaddress(), 21)
self.nodes[1].generate(1)
self.sync_all([self.nodes[0:3]])
# Node0 should have two unspent outputs.
# Create a couple of transactions to send them to node2, submit them through
# node1, and make sure both node0 and node2 pick them up properly:
node0utxos = self.nodes[0].listunspent(1)
assert_equal(len(node0utxos), 2)
# create both transactions
fee_per_kbyte = Decimal('0.001')
txns_to_send = []
for utxo in node0utxos:
inputs = []
outputs = {}
inputs.append({ "txid" : utxo["txid"], "vout" : utxo["vout"]})
outputs[self.nodes[2].getnewaddress()] = float(utxo["amount"]) - float(fee_per_kbyte)
raw_tx = self.nodes[0].createrawtransaction(inputs, outputs)
txns_to_send.append(self.nodes[0].signrawtransaction(raw_tx))
# Have node 1 (miner) send the transactions
self.nodes[1].sendrawtransaction(txns_to_send[0]["hex"], True)
self.nodes[1].sendrawtransaction(txns_to_send[1]["hex"], True)
# Have node1 mine a block to confirm transactions:
self.nodes[1].generate(1)
self.sync_all([self.nodes[0:3]])
assert_equal(self.nodes[0].getbalance(), 0)
node_2_expected_bal = Decimal('250') + Decimal('21') - 2 * fee_per_kbyte
node_2_bal = self.nodes[2].getbalance()
assert_equal(node_2_bal, node_2_expected_bal)
# Send 10 TRTT normal
address = self.nodes[0].getnewaddress("test")
self.nodes[2].settxfee(float(fee_per_kbyte))
txid = self.nodes[2].sendtoaddress(address, 10, "", "")
fee = self.nodes[2].gettransaction(txid)["fee"]
node_2_bal -= (Decimal('10') - fee)
assert_equal(self.nodes[2].getbalance(), node_2_bal)
self.nodes[2].generate(1)
self.sync_all([self.nodes[0:3]])
node_0_bal = self.nodes[0].getbalance()
assert_equal(node_0_bal, Decimal('10'))
# Sendmany 10 TRTT
txid = self.nodes[2].sendmany('', {address: 10}, 0, "")
fee = self.nodes[2].gettransaction(txid)["fee"]
self.nodes[2].generate(1)
self.sync_all([self.nodes[0:3]])
node_0_bal += Decimal('10')
node_2_bal -= (Decimal('10') - fee)
assert_equal(self.nodes[2].getbalance(), node_2_bal)
assert_equal(self.nodes[0].getbalance(), node_0_bal)
assert_fee_amount(-fee, self.get_vsize(self.nodes[2].getrawtransaction(txid)), fee_per_kbyte)
# This will raise an exception since generate does not accept a string
assert_raises_rpc_error(-1, "not an integer", self.nodes[0].generate, "2")
# Import address and private key to check correct behavior of spendable unspents
# 1. Send some coins to generate new UTXO
address_to_import = self.nodes[2].getnewaddress()
self.nodes[0].sendtoaddress(address_to_import, 1)
self.nodes[0].generate(1)
self.sync_all([self.nodes[0:3]])
# 2. Import address from node2 to node1
self.nodes[1].importaddress(address_to_import)
# 3. Validate that the imported address is watch-only on node1
assert(self.nodes[1].validateaddress(address_to_import)["iswatchonly"])
# 4. Check that the unspents after import are not spendable
listunspent = self.nodes[1].listunspent(1, 9999999, [], 2)
assert_array_result(listunspent,
{"address": address_to_import},
{"spendable": False})
# 5. Import private key of the previously imported address on node1
priv_key = self.nodes[2].dumpprivkey(address_to_import)
self.nodes[1].importprivkey(priv_key)
# 6. Check that the unspents are now spendable on node1
assert_array_result(self.nodes[1].listunspent(),
{"address": address_to_import},
{"spendable": True})
# check if wallet or blochchain maintenance changes the balance
self.sync_all([self.nodes[0:3]])
blocks = self.nodes[0].generate(2)
self.sync_all([self.nodes[0:3]])
balance_nodes = [self.nodes[i].getbalance() for i in range(3)]
block_count = self.nodes[0].getblockcount()
maintenance = [
'-rescan',
'-reindex',
]
for m in maintenance:
self.log.info("check " + m)
self.stop_nodes()
# set lower ancestor limit for later
self.start_node(0, [m])
self.start_node(1, [m])
self.start_node(2, [m])
if m == '-reindex':
# reindex will leave rpc warm up "early"; Wait for it to finish
wait_until(lambda: [block_count] * 3 == [self.nodes[i].getblockcount() for i in range(3)])
assert_equal(balance_nodes, [self.nodes[i].getbalance() for i in range(3)])
# Exercise listsinceblock with the last two blocks
coinbase_tx_1 = self.nodes[0].listsinceblock(blocks[0])
assert_equal(coinbase_tx_1["lastblock"], blocks[1])
assert_equal(len(coinbase_tx_1["transactions"]), 1)
assert_equal(coinbase_tx_1["transactions"][0]["blockhash"], blocks[1])
assert_equal(len(self.nodes[0].listsinceblock(blocks[1])["transactions"]), 0)
if __name__ == '__main__':
WalletTest().main()
| true | true |
f7ff23475ff5889fc69efe2ea1de0977e3323ebb | 5,707 | py | Python | accelbyte_py_sdk/api/iam/operations/roles/get_role.py | AccelByte/accelbyte-python-sdk | dcd311fad111c59da828278975340fb92e0f26f7 | [
"MIT"
] | null | null | null | accelbyte_py_sdk/api/iam/operations/roles/get_role.py | AccelByte/accelbyte-python-sdk | dcd311fad111c59da828278975340fb92e0f26f7 | [
"MIT"
] | 1 | 2021-10-13T03:46:58.000Z | 2021-10-13T03:46:58.000Z | accelbyte_py_sdk/api/iam/operations/roles/get_role.py | AccelByte/accelbyte-python-sdk | dcd311fad111c59da828278975340fb92e0f26f7 | [
"MIT"
] | null | null | null | # Copyright (c) 2021 AccelByte Inc. All Rights Reserved.
# This is licensed software from AccelByte Inc, for limitations
# and restrictions contact your company contract manager.
#
# Code generated. DO NOT EDIT!
# template file: justice_py_sdk_codegen/__main__.py
# pylint: disable=duplicate-code
# pylint: disable=line-too-long
# pylint: disable=missing-function-docstring
# pylint: disable=missing-module-docstring
# pylint: disable=too-many-arguments
# pylint: disable=too-many-branches
# pylint: disable=too-many-instance-attributes
# pylint: disable=too-many-lines
# pylint: disable=too-many-locals
# pylint: disable=too-many-public-methods
# pylint: disable=too-many-return-statements
# pylint: disable=too-many-statements
# pylint: disable=unused-import
# justice-iam-service (5.10.1)
from __future__ import annotations
from typing import Any, Dict, List, Optional, Tuple, Union
from .....core import Operation
from .....core import HeaderStr
from .....core import HttpResponse
from ...models import ModelRoleResponse
class GetRole(Operation):
"""Get Role (GetRole)
Required permission 'ROLE [READ]'
Required Permission(s):
- ROLE [READ]
Properties:
url: /iam/roles/{roleId}
method: GET
tags: ["Roles"]
consumes: ["application/json"]
produces: ["application/json"]
securities: [BEARER_AUTH]
role_id: (roleId) REQUIRED str in path
Responses:
200: OK - ModelRoleResponse (OK)
401: Unauthorized - (Unauthorized access)
403: Forbidden - (Forbidden)
404: Not Found - (Data not found)
"""
# region fields
_url: str = "/iam/roles/{roleId}"
_method: str = "GET"
_consumes: List[str] = ["application/json"]
_produces: List[str] = ["application/json"]
_securities: List[List[str]] = [["BEARER_AUTH"]]
_location_query: str = None
role_id: str # REQUIRED in [path]
# endregion fields
# region properties
@property
def url(self) -> str:
return self._url
@property
def method(self) -> str:
return self._method
@property
def consumes(self) -> List[str]:
return self._consumes
@property
def produces(self) -> List[str]:
return self._produces
@property
def securities(self) -> List[List[str]]:
return self._securities
@property
def location_query(self) -> str:
return self._location_query
# endregion properties
# region get methods
# endregion get methods
# region get_x_params methods
def get_all_params(self) -> dict:
return {
"path": self.get_path_params(),
}
def get_path_params(self) -> dict:
result = {}
if hasattr(self, "role_id"):
result["roleId"] = self.role_id
return result
# endregion get_x_params methods
# region is/has methods
# endregion is/has methods
# region with_x methods
def with_role_id(self, value: str) -> GetRole:
self.role_id = value
return self
# endregion with_x methods
# region to methods
def to_dict(self, include_empty: bool = False) -> dict:
result: dict = {}
if hasattr(self, "role_id") and self.role_id:
result["roleId"] = str(self.role_id)
elif include_empty:
result["roleId"] = ""
return result
# endregion to methods
# region response methods
# noinspection PyMethodMayBeStatic
def parse_response(self, code: int, content_type: str, content: Any) -> Tuple[Union[None, ModelRoleResponse], Union[None, HttpResponse]]:
"""Parse the given response.
200: OK - ModelRoleResponse (OK)
401: Unauthorized - (Unauthorized access)
403: Forbidden - (Forbidden)
404: Not Found - (Data not found)
---: HttpResponse (Undocumented Response)
---: HttpResponse (Unexpected Content-Type Error)
---: HttpResponse (Unhandled Error)
"""
pre_processed_response, error = self.pre_process_response(code=code, content_type=content_type, content=content)
if error is not None:
return None, None if error.is_no_content() else error
code, content_type, content = pre_processed_response
if code == 200:
return ModelRoleResponse.create_from_dict(content), None
if code == 401:
return None, HttpResponse.create(code, "Unauthorized")
if code == 403:
return None, HttpResponse.create(code, "Forbidden")
if code == 404:
return None, HttpResponse.create(code, "Not Found")
return None, self.handle_undocumented_response(code=code, content_type=content_type, content=content)
# endregion response methods
# region static methods
@classmethod
def create(
cls,
role_id: str,
) -> GetRole:
instance = cls()
instance.role_id = role_id
return instance
@classmethod
def create_from_dict(cls, dict_: dict, include_empty: bool = False) -> GetRole:
instance = cls()
if "roleId" in dict_ and dict_["roleId"] is not None:
instance.role_id = str(dict_["roleId"])
elif include_empty:
instance.role_id = ""
return instance
@staticmethod
def get_field_info() -> Dict[str, str]:
return {
"roleId": "role_id",
}
@staticmethod
def get_required_map() -> Dict[str, bool]:
return {
"roleId": True,
}
# endregion static methods
| 25.591928 | 141 | 0.62362 |
from __future__ import annotations
from typing import Any, Dict, List, Optional, Tuple, Union
from .....core import Operation
from .....core import HeaderStr
from .....core import HttpResponse
from ...models import ModelRoleResponse
class GetRole(Operation):
_url: str = "/iam/roles/{roleId}"
_method: str = "GET"
_consumes: List[str] = ["application/json"]
_produces: List[str] = ["application/json"]
_securities: List[List[str]] = [["BEARER_AUTH"]]
_location_query: str = None
role_id: str
@property
def url(self) -> str:
return self._url
@property
def method(self) -> str:
return self._method
@property
def consumes(self) -> List[str]:
return self._consumes
@property
def produces(self) -> List[str]:
return self._produces
@property
def securities(self) -> List[List[str]]:
return self._securities
@property
def location_query(self) -> str:
return self._location_query
def get_all_params(self) -> dict:
return {
"path": self.get_path_params(),
}
def get_path_params(self) -> dict:
result = {}
if hasattr(self, "role_id"):
result["roleId"] = self.role_id
return result
def with_role_id(self, value: str) -> GetRole:
self.role_id = value
return self
def to_dict(self, include_empty: bool = False) -> dict:
result: dict = {}
if hasattr(self, "role_id") and self.role_id:
result["roleId"] = str(self.role_id)
elif include_empty:
result["roleId"] = ""
return result
def parse_response(self, code: int, content_type: str, content: Any) -> Tuple[Union[None, ModelRoleResponse], Union[None, HttpResponse]]:
pre_processed_response, error = self.pre_process_response(code=code, content_type=content_type, content=content)
if error is not None:
return None, None if error.is_no_content() else error
code, content_type, content = pre_processed_response
if code == 200:
return ModelRoleResponse.create_from_dict(content), None
if code == 401:
return None, HttpResponse.create(code, "Unauthorized")
if code == 403:
return None, HttpResponse.create(code, "Forbidden")
if code == 404:
return None, HttpResponse.create(code, "Not Found")
return None, self.handle_undocumented_response(code=code, content_type=content_type, content=content)
@classmethod
def create(
cls,
role_id: str,
) -> GetRole:
instance = cls()
instance.role_id = role_id
return instance
@classmethod
def create_from_dict(cls, dict_: dict, include_empty: bool = False) -> GetRole:
instance = cls()
if "roleId" in dict_ and dict_["roleId"] is not None:
instance.role_id = str(dict_["roleId"])
elif include_empty:
instance.role_id = ""
return instance
@staticmethod
def get_field_info() -> Dict[str, str]:
return {
"roleId": "role_id",
}
@staticmethod
def get_required_map() -> Dict[str, bool]:
return {
"roleId": True,
}
| true | true |
f7ff23b8ef6be5806b413294ee2ac4762875de0e | 1,466 | py | Python | classification/Inception-V3_PyTorch/dataloader.py | Divyanshu23/model-zoo | 2eea6df691d302e182bb1ff8ec5af3542de562ba | [
"MIT"
] | 43 | 2020-05-16T21:05:34.000Z | 2022-02-08T11:33:29.000Z | classification/Inception-V3_PyTorch/dataloader.py | Divyanshu23/model-zoo | 2eea6df691d302e182bb1ff8ec5af3542de562ba | [
"MIT"
] | 52 | 2020-05-14T16:18:08.000Z | 2021-11-02T19:13:47.000Z | classification/Inception-V3_PyTorch/dataloader.py | Divyanshu23/model-zoo | 2eea6df691d302e182bb1ff8ec5af3542de562ba | [
"MIT"
] | 69 | 2020-05-14T13:39:23.000Z | 2021-07-30T00:51:27.000Z | import shutil
import torch
import torch.nn as nn
from torch.utils.data import Dataset, DataLoader, random_split
from torchvision import transforms, datasets
def load_cifar():
transform = transforms.Compose([transforms.Resize((32, 32)),
transforms.ToTensor(),
transforms.Normalize(mean=[0.5], std=[0.5])])
train_dataset = datasets.CIFAR10(
'./data', train=True, download=True, transform=transform)
test_dataset = datasets.CIFAR10(
'./data', train=False, download=True, transform=transform)
# Split dataset into training set and validation set.
train_dataset, val_dataset = random_split(train_dataset, (45000, 5000))
print("Image Shape: {}".format(
train_dataset[0][0].numpy().shape), end='\n\n')
print("Training Set: {} samples".format(len(train_dataset)))
print("Validation Set: {} samples".format(len(val_dataset)))
print("Test Set: {} samples".format(len(test_dataset)))
BATCH_SIZE = 32
# Create iterator.
train_loader = DataLoader(
train_dataset, batch_size=BATCH_SIZE, shuffle=True)
val_loader = DataLoader(val_dataset, batch_size=BATCH_SIZE, shuffle=True)
test_loader = DataLoader(test_dataset, batch_size=BATCH_SIZE, shuffle=True)
# Delete the data/ folder.
shutil.rmtree('./data')
return train_loader, val_loader, test_loader
| 35.756098 | 82 | 0.651432 | import shutil
import torch
import torch.nn as nn
from torch.utils.data import Dataset, DataLoader, random_split
from torchvision import transforms, datasets
def load_cifar():
transform = transforms.Compose([transforms.Resize((32, 32)),
transforms.ToTensor(),
transforms.Normalize(mean=[0.5], std=[0.5])])
train_dataset = datasets.CIFAR10(
'./data', train=True, download=True, transform=transform)
test_dataset = datasets.CIFAR10(
'./data', train=False, download=True, transform=transform)
train_dataset, val_dataset = random_split(train_dataset, (45000, 5000))
print("Image Shape: {}".format(
train_dataset[0][0].numpy().shape), end='\n\n')
print("Training Set: {} samples".format(len(train_dataset)))
print("Validation Set: {} samples".format(len(val_dataset)))
print("Test Set: {} samples".format(len(test_dataset)))
BATCH_SIZE = 32
train_loader = DataLoader(
train_dataset, batch_size=BATCH_SIZE, shuffle=True)
val_loader = DataLoader(val_dataset, batch_size=BATCH_SIZE, shuffle=True)
test_loader = DataLoader(test_dataset, batch_size=BATCH_SIZE, shuffle=True)
shutil.rmtree('./data')
return train_loader, val_loader, test_loader
| true | true |
f7ff24ec1f7ee94b7822f70639f2d123168ebfe5 | 11,105 | py | Python | tensorflow/contrib/batching/python/ops/batch_ops_test.py | imdone/tensorflow | bb4d1ef3861c83627ee9586b85ac3070a7d38335 | [
"Apache-2.0"
] | 1 | 2021-04-16T14:53:22.000Z | 2021-04-16T14:53:22.000Z | tensorflow/contrib/batching/python/ops/batch_ops_test.py | imdone/tensorflow | bb4d1ef3861c83627ee9586b85ac3070a7d38335 | [
"Apache-2.0"
] | 10 | 2018-02-04T18:41:52.000Z | 2018-05-02T09:00:46.000Z | tensorflow/contrib/batching/python/ops/batch_ops_test.py | imdone/tensorflow | bb4d1ef3861c83627ee9586b85ac3070a7d38335 | [
"Apache-2.0"
] | 4 | 2018-01-17T14:22:49.000Z | 2018-02-27T15:06:41.000Z | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for the currently experimental in-graph batch ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import threading
import time
from tensorflow.contrib.batching.python.ops import batch_ops
from tensorflow.python.framework import dtypes
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import gradients_impl
from tensorflow.python.ops import script_ops
from tensorflow.python.platform import test
def delayed_plus1(x):
"""Sleeps for 100ms then returns x+1."""
time.sleep(0.1)
return x + 1
class BatchOpsTest(test.TestCase):
"""Tests for batch_ops.{un,}batch."""
def testBasicBatch(self):
"""Tests that a single batched tensor executes together and only once."""
with self.test_session() as sess:
inp = array_ops.placeholder(dtype=dtypes.int32, shape=[1])
batched, index, _ = batch_ops.batch(
[inp], num_batch_threads=1, max_batch_size=2,
batch_timeout_micros=36000000, grad_timeout_micros=0,
batching_queue="")
thread_results = []
def worker():
thread_results.extend(
sess.run([batched, index], feed_dict={inp: [1]}))
worker_thread = threading.Thread(target=worker)
worker_thread.start()
main_results = sess.run([batched, index], feed_dict={inp: [2]})
worker_thread.join()
# At this point either the thread or the main did the batch and the other
# should have empty results.
if list(thread_results[0][0]):
batch_t = thread_results[0][0]
index_t = thread_results[1]
empty_b = main_results[0][0]
empty_m = main_results[1]
else:
batch_t = main_results[0][0]
index_t = main_results[1]
empty_b = thread_results[0][0]
empty_m = thread_results[1]
# Check that both the inputs made it out exactly once.
self.assertAllEqual(sorted(batch_t), (1, 2))
# Check that we get 2 rows in the index tensor.
self.assertEqual(len(index_t), 2)
# Check that the other ones are empty.
self.assertEqual(len(empty_b), 0)
self.assertEqual(len(empty_m), 0)
def testBatchWithPadding(self):
"""Test that batching with padding up to an allowed batch size works."""
with self.test_session() as sess:
inp = array_ops.placeholder(dtype=dtypes.int32, shape=[2])
batched, index, _ = batch_ops.batch(
[inp], num_batch_threads=1, max_batch_size=10,
batch_timeout_micros=100000, # 100ms
allowed_batch_sizes=[5, 10],
grad_timeout_micros=0, batching_queue="")
thread_results = []
def worker():
thread_results.extend(
sess.run([batched, index], feed_dict={inp: [1, 3]}))
worker_thread = threading.Thread(target=worker)
worker_thread.start()
main_results = sess.run([batched, index], feed_dict={inp: [2, 4]})
worker_thread.join()
# At this point either the thread or the main did the batch and the other
# should have empty results.
if list(thread_results[0][0]):
batch_t = thread_results[0][0]
else:
batch_t = main_results[0][0]
# Check that the batch tensor incorporates the padding.
self.assertEqual(len(batch_t), 5)
def testMultipleBatch(self):
"""Tests that multiple batched tensors execute together."""
with self.test_session() as sess:
inp0 = array_ops.placeholder(dtype=dtypes.int32, shape=[1])
inp1 = array_ops.placeholder(dtype=dtypes.int32, shape=[1])
batched, _, _ = batch_ops.batch(
[inp0, inp1],
num_batch_threads=1,
max_batch_size=2,
batch_timeout_micros=36000000,
grad_timeout_micros=0,
batching_queue="")
thread_results = []
def worker():
thread_results.extend(
sess.run([batched], feed_dict={inp0: [1],
inp1: [2]}))
worker_thread = threading.Thread(target=worker)
worker_thread.start()
main_results = sess.run([batched], feed_dict={inp0: [2], inp1: [3]})
worker_thread.join()
# At this point either the thread or the main did the batch and the other
# should have empty results.
if list(thread_results[0][0]):
batch_t = thread_results[0]
empty_t = main_results[0]
else:
batch_t = main_results[0]
empty_t = thread_results[0]
# Assert that the tensors were batched together.
self.assertAllEqual(sorted(batch_t[0]), [1, 2])
self.assertAllEqual(sorted(batch_t[1]), [2, 3])
self.assertAllEqual(empty_t[0], [])
self.assertAllEqual(empty_t[1], [])
def testIllegalBatchDifferentDim0Sizes(self):
"""Tests illegally feeding tensors with different dim0 sizes."""
with self.test_session() as sess:
inp0 = array_ops.placeholder(dtype=dtypes.int32, shape=[1])
inp1 = array_ops.placeholder(dtype=dtypes.int32, shape=[2])
batched, index, _ = batch_ops.batch(
[inp0, inp1], num_batch_threads=1, max_batch_size=2,
batch_timeout_micros=0, grad_timeout_micros=0, batching_queue="")
with self.assertRaises(Exception) as raised:
_ = sess.run([batched, index], feed_dict={inp0: [0], inp1: [1, 2]})
self.assertGreater(
raised.exception.message.find("must have equal 0th-dimension size"),
0)
def testBasicUnbatch(self):
"""Tests that batch and unbatch work together."""
with self.test_session() as sess:
inp = array_ops.placeholder(dtype=dtypes.int32, shape=[1])
batched, index, id_t = batch_ops.batch(
[inp], num_batch_threads=1, max_batch_size=10,
batch_timeout_micros=100000, # 100ms
allowed_batch_sizes=[3, 10],
grad_timeout_micros=0, batching_queue="")
computation = batched[0] + 1
result = batch_ops.unbatch(computation, index, id_t,
timeout_micros=1000000, shared_name="unbatch")
thread_results = []
def worker():
thread_results.extend(sess.run([result], feed_dict={inp: [1]}))
worker_thread = threading.Thread(target=worker)
worker_thread.start()
main_results = sess.run([result], feed_dict={inp: [2]})
worker_thread.join()
self.assertEqual(thread_results[0], [2])
self.assertEqual(main_results[0], [3])
def testBasicUnbatchDecorated(self):
"""Tests that the batch_function decorator works."""
with self.test_session() as sess:
@batch_ops.batch_function(1, 10, 100000)
def computation(in_t):
return in_t + 1
inp = array_ops.placeholder(dtype=dtypes.int32, shape=[1])
result = computation(inp)
thread_results = []
def worker():
thread_results.extend(sess.run([result], feed_dict={inp: [1]}))
worker_thread = threading.Thread(target=worker)
worker_thread.start()
main_results = sess.run([result], feed_dict={inp: [2]})
worker_thread.join()
self.assertEqual(thread_results[0], [2])
self.assertEqual(main_results[0], [3])
def testUnbatchTimeout(self):
"""Tests that the unbatch timeout works."""
with self.test_session() as sess:
inp = array_ops.placeholder(dtype=dtypes.int32, shape=[1])
batched, index, id_t = batch_ops.batch(
[inp], num_batch_threads=1, max_batch_size=2,
batch_timeout_micros=36000000, grad_timeout_micros=0,
batching_queue="")
computation = batched[0] + 1
timeout_micros = 10
result = batch_ops.unbatch(computation, index, id_t, timeout_micros,
shared_name="shared_unbatch")
# Set up a parallel pipeline that delays the computation, but uses the
# same unbatch resource object as the non-delayed pipeline.
computation_delayed = script_ops.py_func(delayed_plus1,
[batched[0]],
dtypes.int32)
result_delayed = batch_ops.unbatch(computation_delayed,
index,
id_t,
timeout_micros,
shared_name="shared_unbatch")
thread_results = []
def worker():
# A first call using the non-delayed pipeline. The batcher will send an
# empty tensor along the non-delayed pipeline.
thread_results.extend(sess.run([result], feed_dict={inp: [1]}))
worker_thread = threading.Thread(target=worker)
worker_thread.start()
time.sleep(0.1) # Ensure the thread's call starts first.
# A second call using the delayed pipeline. The batcher will send the
# batched tensor along the delayed pipeline, thus delaying the arrival of
# the batched tensor at the unbatch op, relative to the empty tensor.
#
# TODO (olston, apassos): Avoid relying on the order in which the batch op id:560
# https://github.com/imdone/tensorflow/issues/561
# emits the empty tensor versus the batched one.
_ = sess.run([result_delayed], feed_dict={inp: [2]})
worker_thread.join()
# The thread's call should hit the timeout, and thus get 0 results.
self.assertEqual(len(thread_results), 0)
def testUnbatchGrad(self):
"""Tests that batch and unbatch are differentiable."""
with self.test_session() as sess:
inp = array_ops.placeholder(dtype=dtypes.float32, shape=[1])
batched, index, id_t = batch_ops.batch(
[inp], num_batch_threads=1, max_batch_size=2,
batch_timeout_micros=36000000, grad_timeout_micros=1000000,
batching_queue="")
computation = batched[0] * batched[0]
result = batch_ops.unbatch(computation, index, id_t,
timeout_micros=1000000, shared_name="unbatch")
grad = gradients_impl.gradients(result, inp)
thread_results = []
def worker():
thread_results.extend(sess.run([grad], feed_dict={inp: [1]}))
worker_thread = threading.Thread(target=worker)
worker_thread.start()
main_results = sess.run([grad], feed_dict={inp: [2]})
worker_thread.join()
self.assertEqual(thread_results[0], [2])
self.assertEqual(main_results[0], [4])
if __name__ == "__main__":
test.main()
| 39.946043 | 87 | 0.642954 |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import threading
import time
from tensorflow.contrib.batching.python.ops import batch_ops
from tensorflow.python.framework import dtypes
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import gradients_impl
from tensorflow.python.ops import script_ops
from tensorflow.python.platform import test
def delayed_plus1(x):
time.sleep(0.1)
return x + 1
class BatchOpsTest(test.TestCase):
def testBasicBatch(self):
with self.test_session() as sess:
inp = array_ops.placeholder(dtype=dtypes.int32, shape=[1])
batched, index, _ = batch_ops.batch(
[inp], num_batch_threads=1, max_batch_size=2,
batch_timeout_micros=36000000, grad_timeout_micros=0,
batching_queue="")
thread_results = []
def worker():
thread_results.extend(
sess.run([batched, index], feed_dict={inp: [1]}))
worker_thread = threading.Thread(target=worker)
worker_thread.start()
main_results = sess.run([batched, index], feed_dict={inp: [2]})
worker_thread.join()
if list(thread_results[0][0]):
batch_t = thread_results[0][0]
index_t = thread_results[1]
empty_b = main_results[0][0]
empty_m = main_results[1]
else:
batch_t = main_results[0][0]
index_t = main_results[1]
empty_b = thread_results[0][0]
empty_m = thread_results[1]
self.assertAllEqual(sorted(batch_t), (1, 2))
self.assertEqual(len(index_t), 2)
self.assertEqual(len(empty_b), 0)
self.assertEqual(len(empty_m), 0)
def testBatchWithPadding(self):
with self.test_session() as sess:
inp = array_ops.placeholder(dtype=dtypes.int32, shape=[2])
batched, index, _ = batch_ops.batch(
[inp], num_batch_threads=1, max_batch_size=10,
batch_timeout_micros=100000,
allowed_batch_sizes=[5, 10],
grad_timeout_micros=0, batching_queue="")
thread_results = []
def worker():
thread_results.extend(
sess.run([batched, index], feed_dict={inp: [1, 3]}))
worker_thread = threading.Thread(target=worker)
worker_thread.start()
main_results = sess.run([batched, index], feed_dict={inp: [2, 4]})
worker_thread.join()
if list(thread_results[0][0]):
batch_t = thread_results[0][0]
else:
batch_t = main_results[0][0]
self.assertEqual(len(batch_t), 5)
def testMultipleBatch(self):
with self.test_session() as sess:
inp0 = array_ops.placeholder(dtype=dtypes.int32, shape=[1])
inp1 = array_ops.placeholder(dtype=dtypes.int32, shape=[1])
batched, _, _ = batch_ops.batch(
[inp0, inp1],
num_batch_threads=1,
max_batch_size=2,
batch_timeout_micros=36000000,
grad_timeout_micros=0,
batching_queue="")
thread_results = []
def worker():
thread_results.extend(
sess.run([batched], feed_dict={inp0: [1],
inp1: [2]}))
worker_thread = threading.Thread(target=worker)
worker_thread.start()
main_results = sess.run([batched], feed_dict={inp0: [2], inp1: [3]})
worker_thread.join()
if list(thread_results[0][0]):
batch_t = thread_results[0]
empty_t = main_results[0]
else:
batch_t = main_results[0]
empty_t = thread_results[0]
self.assertAllEqual(sorted(batch_t[0]), [1, 2])
self.assertAllEqual(sorted(batch_t[1]), [2, 3])
self.assertAllEqual(empty_t[0], [])
self.assertAllEqual(empty_t[1], [])
def testIllegalBatchDifferentDim0Sizes(self):
with self.test_session() as sess:
inp0 = array_ops.placeholder(dtype=dtypes.int32, shape=[1])
inp1 = array_ops.placeholder(dtype=dtypes.int32, shape=[2])
batched, index, _ = batch_ops.batch(
[inp0, inp1], num_batch_threads=1, max_batch_size=2,
batch_timeout_micros=0, grad_timeout_micros=0, batching_queue="")
with self.assertRaises(Exception) as raised:
_ = sess.run([batched, index], feed_dict={inp0: [0], inp1: [1, 2]})
self.assertGreater(
raised.exception.message.find("must have equal 0th-dimension size"),
0)
def testBasicUnbatch(self):
with self.test_session() as sess:
inp = array_ops.placeholder(dtype=dtypes.int32, shape=[1])
batched, index, id_t = batch_ops.batch(
[inp], num_batch_threads=1, max_batch_size=10,
batch_timeout_micros=100000,
allowed_batch_sizes=[3, 10],
grad_timeout_micros=0, batching_queue="")
computation = batched[0] + 1
result = batch_ops.unbatch(computation, index, id_t,
timeout_micros=1000000, shared_name="unbatch")
thread_results = []
def worker():
thread_results.extend(sess.run([result], feed_dict={inp: [1]}))
worker_thread = threading.Thread(target=worker)
worker_thread.start()
main_results = sess.run([result], feed_dict={inp: [2]})
worker_thread.join()
self.assertEqual(thread_results[0], [2])
self.assertEqual(main_results[0], [3])
def testBasicUnbatchDecorated(self):
with self.test_session() as sess:
@batch_ops.batch_function(1, 10, 100000)
def computation(in_t):
return in_t + 1
inp = array_ops.placeholder(dtype=dtypes.int32, shape=[1])
result = computation(inp)
thread_results = []
def worker():
thread_results.extend(sess.run([result], feed_dict={inp: [1]}))
worker_thread = threading.Thread(target=worker)
worker_thread.start()
main_results = sess.run([result], feed_dict={inp: [2]})
worker_thread.join()
self.assertEqual(thread_results[0], [2])
self.assertEqual(main_results[0], [3])
def testUnbatchTimeout(self):
with self.test_session() as sess:
inp = array_ops.placeholder(dtype=dtypes.int32, shape=[1])
batched, index, id_t = batch_ops.batch(
[inp], num_batch_threads=1, max_batch_size=2,
batch_timeout_micros=36000000, grad_timeout_micros=0,
batching_queue="")
computation = batched[0] + 1
timeout_micros = 10
result = batch_ops.unbatch(computation, index, id_t, timeout_micros,
shared_name="shared_unbatch")
computation_delayed = script_ops.py_func(delayed_plus1,
[batched[0]],
dtypes.int32)
result_delayed = batch_ops.unbatch(computation_delayed,
index,
id_t,
timeout_micros,
shared_name="shared_unbatch")
thread_results = []
def worker():
thread_results.extend(sess.run([result], feed_dict={inp: [1]}))
worker_thread = threading.Thread(target=worker)
worker_thread.start()
time.sleep(0.1)
# A second call using the delayed pipeline. The batcher will send the
# batched tensor along the delayed pipeline, thus delaying the arrival of
# the batched tensor at the unbatch op, relative to the empty tensor.
#
# TODO (olston, apassos): Avoid relying on the order in which the batch op id:560
# https://github.com/imdone/tensorflow/issues/561
# emits the empty tensor versus the batched one.
_ = sess.run([result_delayed], feed_dict={inp: [2]})
worker_thread.join()
# The thread's call should hit the timeout, and thus get 0 results.
self.assertEqual(len(thread_results), 0)
def testUnbatchGrad(self):
with self.test_session() as sess:
inp = array_ops.placeholder(dtype=dtypes.float32, shape=[1])
batched, index, id_t = batch_ops.batch(
[inp], num_batch_threads=1, max_batch_size=2,
batch_timeout_micros=36000000, grad_timeout_micros=1000000,
batching_queue="")
computation = batched[0] * batched[0]
result = batch_ops.unbatch(computation, index, id_t,
timeout_micros=1000000, shared_name="unbatch")
grad = gradients_impl.gradients(result, inp)
thread_results = []
def worker():
thread_results.extend(sess.run([grad], feed_dict={inp: [1]}))
worker_thread = threading.Thread(target=worker)
worker_thread.start()
main_results = sess.run([grad], feed_dict={inp: [2]})
worker_thread.join()
self.assertEqual(thread_results[0], [2])
self.assertEqual(main_results[0], [4])
if __name__ == "__main__":
test.main()
| true | true |
f7ff2684d053e255af1625b8e32e0fbfe593614d | 1,228 | py | Python | crom/bootstrap/bootstrap.py | mropert/crom | b871b756c348952de2a044b22b36c9fbb0e76132 | [
"MIT"
] | null | null | null | crom/bootstrap/bootstrap.py | mropert/crom | b871b756c348952de2a044b22b36c9fbb0e76132 | [
"MIT"
] | null | null | null | crom/bootstrap/bootstrap.py | mropert/crom | b871b756c348952de2a044b22b36c9fbb0e76132 | [
"MIT"
] | null | null | null | from crom.project import Project
def with_directory(file, dir):
if dir is None:
return file
else:
return'/'.join([dir, file])
def list_files(files, dir):
return map(lambda f: with_directory(f, dir), files)
def get_files(files, dir):
return dict(map(lambda p: (with_directory(p[0], dir), p[1]), files.items()))
class Bootstrap:
def __init__(self, name, type, sources={}, headers={}, tests={}, test_deps=[]):
self.name = name
self.type = type
self.sources = sources
self.headers = headers
self.tests = tests
self.test_deps = test_deps
def to_project(self, src_dir=None, include_dir=None, test_dir=None):
return Project(self.name, self.type, list_files(self.sources.keys(), src_dir),
list_files(self.headers.keys(), include_dir),
list_files(self.tests.keys(), test_dir),
test_deps=self.test_deps)
def get_all_files(self, src_dir=None, include_dir=None, test_dir=None):
files = get_files(self.sources, src_dir)
files.update(get_files(self.headers, include_dir))
files.update(get_files(self.tests, test_dir))
return files
| 31.487179 | 86 | 0.62785 | from crom.project import Project
def with_directory(file, dir):
if dir is None:
return file
else:
return'/'.join([dir, file])
def list_files(files, dir):
return map(lambda f: with_directory(f, dir), files)
def get_files(files, dir):
return dict(map(lambda p: (with_directory(p[0], dir), p[1]), files.items()))
class Bootstrap:
def __init__(self, name, type, sources={}, headers={}, tests={}, test_deps=[]):
self.name = name
self.type = type
self.sources = sources
self.headers = headers
self.tests = tests
self.test_deps = test_deps
def to_project(self, src_dir=None, include_dir=None, test_dir=None):
return Project(self.name, self.type, list_files(self.sources.keys(), src_dir),
list_files(self.headers.keys(), include_dir),
list_files(self.tests.keys(), test_dir),
test_deps=self.test_deps)
def get_all_files(self, src_dir=None, include_dir=None, test_dir=None):
files = get_files(self.sources, src_dir)
files.update(get_files(self.headers, include_dir))
files.update(get_files(self.tests, test_dir))
return files
| true | true |
f7ff284ed125ec95d0f2dc9fcdee3b0ae82ac8bb | 7,600 | py | Python | tests/test_subcluster.py | mmascher/osg-configure | 0a8490f87ff1b3340796f94ed657b62b19602347 | [
"Apache-2.0"
] | null | null | null | tests/test_subcluster.py | mmascher/osg-configure | 0a8490f87ff1b3340796f94ed657b62b19602347 | [
"Apache-2.0"
] | null | null | null | tests/test_subcluster.py | mmascher/osg-configure | 0a8490f87ff1b3340796f94ed657b62b19602347 | [
"Apache-2.0"
] | null | null | null | """Module for unit testing subcluster / resource entry configuration"""
# pylint: disable=W0703
# pylint: disable=R0904
from __future__ import print_function
import os
import sys
import unittest
import ConfigParser
import logging
# setup system library path
pathname = os.path.realpath('../')
sys.path.insert(0, pathname)
# NullHandler is only available in Python 2.7+
try:
NullHandler = logging.NullHandler
except AttributeError:
class NullHandler(logging.Handler):
def emit(self, record):
pass
global_logger = logging.getLogger(__name__)
global_logger.addHandler(NullHandler())
from osg_configure.configure_modules import localsettings
from osg_configure.modules import exceptions
try:
from osg_configure.modules import subcluster
except ImportError:
subcluster = None
print("subcluster not found -- skipping subcluster tests")
from osg_configure.modules import utilities
from osg_configure.modules.utilities import get_test_config
class TestSubcluster(unittest.TestCase):
"""Class for unit testing subcluster / resource entry configuration code"""
def test_missing_sc(self):
"""
Make sure that we have failures when there is no configured SC.
"""
if not subcluster: return
config_parser = ConfigParser.SafeConfigParser()
config_file = get_test_config("subcluster/red-missing-sc.ini")
config_parser.read(config_file)
self.assertFalse(subcluster.check_config(config_parser), msg="Did not properly detect a missing SC.")
def test_changeme4(self):
"""
Make sure that we have failures because SC CHANGEME section is present.
"""
if not subcluster: return
config_parser = ConfigParser.SafeConfigParser()
config_file = get_test_config("subcluster/changeme_section_sc.ini")
config_parser.read(config_file)
self.assertRaises(exceptions.SettingError, subcluster.check_config, config_parser) # detect enabled CHANGEME section.
def test_missing_attributes(self):
"""
Make sure that we have failures when there are missing attributes.
"""
if not subcluster: return
config_parser = ConfigParser.SafeConfigParser()
config_file = get_test_config("subcluster/red-missing-attributes.ini")
config_parser.read(config_file)
self.assertRaises(exceptions.SettingError, subcluster.check_config, config_parser) # detect missing attrs.
def test_new_config(self):
"""
Make sure that we can correctly parse a correct new-style GIP config.
"""
if not subcluster: return
config_parser = ConfigParser.SafeConfigParser()
config_file = get_test_config("subcluster/red-new-gip-config.ini")
config_parser.read(config_file)
self.assertTrue(subcluster.check_config(config_parser))
def test_local_settings(self):
"""
Test to see if the local settings parsing works.
"""
if not subcluster: return
config_parser = ConfigParser.SafeConfigParser()
config_parser.optionxform = str
config_file = get_test_config("subcluster/local_settings.ini")
config_parser.read(config_file)
local_settings = localsettings.LocalSettings(logger= \
global_logger)
local_settings.parse_configuration(config_parser)
attributes = local_settings.get_attributes()
self.assertTrue('default' not in attributes,
msg="Attributes set that weren't in the test config file")
self.assertTrue('Foo' in attributes and attributes['Foo'] == 'value1',
msg="Incorrectly named key." \
" Desired name: Foo; only found %s." %
(" ".join(attributes.keys())))
self.assertTrue(attributes['Foo'] == 'value1',
msg="Incorrect value wanted value1, " \
"got %s" % attributes['Foo'])
self.assertTrue('bar' in attributes and attributes['bar'] == 'value2',
msg="Incorrectly named key." \
" Desired name: bar; only found %s." %
(" ".join(attributes.keys())))
self.assertTrue('bar' in attributes and attributes['bar'] == 'value2',
msg="Incorrect value wanted value2, " \
"got %s" % attributes['bar'])
def test_hepspec_valid(self):
"""
Make sure a valid HEPSPEC value is accepted.
"""
if not subcluster: return
did_fail = False
config_parser = ConfigParser.SafeConfigParser()
config_file = get_test_config("subcluster/sc_samples.ini")
config_parser.read(config_file)
try:
subcluster.check_section(config_parser, "Subcluster Valid")
except exceptions.SettingError:
did_fail = True
self.assertFalse(did_fail, msg="Valid HEPSPEC entry threw an exception.")
def test_hepspec_invalid(self):
"""
Make sure a invalid HEPSPEC value no longer causes an error..
"""
if not subcluster: return
config_parser = ConfigParser.SafeConfigParser()
config_file = get_test_config("subcluster/sc_samples.ini")
config_parser.read(config_file)
try:
subcluster.check_section(config_parser, "Subcluster Bad HEPSPEC")
except exceptions.SettingError:
self.fail(msg="Invalid HEPSPEC entry threw an exception.")
try:
subcluster.check_section(config_parser, "Subcluster Formerly Bad Cores")
except exceptions.SettingError:
self.fail(msg="Formerly Bad Cores entry threw an exception")
def test_no_name(self):
"""
Make sure a missing name causes an error
"""
if not subcluster: return
config_parser = ConfigParser.SafeConfigParser()
config_file = get_test_config("subcluster/sc_samples.ini")
config_parser.read(config_file)
self.assertRaises(exceptions.SettingError, subcluster.check_section, config_parser, "Subcluster No Name")
def test_resource_entry(self):
"""
Make sure a Resource Entry section is detected
"""
if not subcluster: return
config_parser = ConfigParser.SafeConfigParser()
config_file = get_test_config("subcluster/resourceentry.ini")
config_parser.read(config_file)
found_scs = subcluster.check_config(config_parser)
self.assertTrue(found_scs, msg="Resource Entry Valid not found.")
def test_resource_entry_2(self):
"""
Make sure most subcluster attributes are optional for a
Resource Entry section
"""
if not subcluster: return
config_parser = ConfigParser.SafeConfigParser()
config_file = get_test_config("subcluster/resourceentry.ini")
config_parser.read(config_file)
did_fail = False
for section in ["Resource Entry Valid Old Attribs",
"Resource Entry Valid New Attribs"]:
try:
subcluster.check_section(config_parser, section)
except exceptions.SettingError:
did_fail = True
self.assertFalse(did_fail, msg="Section %s threw an exception." % section)
if __name__ == '__main__':
console = logging.StreamHandler()
console.setLevel(logging.ERROR)
global_logger.addHandler(console)
unittest.main()
| 39.583333 | 125 | 0.653289 |
from __future__ import print_function
import os
import sys
import unittest
import ConfigParser
import logging
pathname = os.path.realpath('../')
sys.path.insert(0, pathname)
try:
NullHandler = logging.NullHandler
except AttributeError:
class NullHandler(logging.Handler):
def emit(self, record):
pass
global_logger = logging.getLogger(__name__)
global_logger.addHandler(NullHandler())
from osg_configure.configure_modules import localsettings
from osg_configure.modules import exceptions
try:
from osg_configure.modules import subcluster
except ImportError:
subcluster = None
print("subcluster not found -- skipping subcluster tests")
from osg_configure.modules import utilities
from osg_configure.modules.utilities import get_test_config
class TestSubcluster(unittest.TestCase):
def test_missing_sc(self):
if not subcluster: return
config_parser = ConfigParser.SafeConfigParser()
config_file = get_test_config("subcluster/red-missing-sc.ini")
config_parser.read(config_file)
self.assertFalse(subcluster.check_config(config_parser), msg="Did not properly detect a missing SC.")
def test_changeme4(self):
if not subcluster: return
config_parser = ConfigParser.SafeConfigParser()
config_file = get_test_config("subcluster/changeme_section_sc.ini")
config_parser.read(config_file)
self.assertRaises(exceptions.SettingError, subcluster.check_config, config_parser)
def test_missing_attributes(self):
if not subcluster: return
config_parser = ConfigParser.SafeConfigParser()
config_file = get_test_config("subcluster/red-missing-attributes.ini")
config_parser.read(config_file)
self.assertRaises(exceptions.SettingError, subcluster.check_config, config_parser)
def test_new_config(self):
if not subcluster: return
config_parser = ConfigParser.SafeConfigParser()
config_file = get_test_config("subcluster/red-new-gip-config.ini")
config_parser.read(config_file)
self.assertTrue(subcluster.check_config(config_parser))
def test_local_settings(self):
if not subcluster: return
config_parser = ConfigParser.SafeConfigParser()
config_parser.optionxform = str
config_file = get_test_config("subcluster/local_settings.ini")
config_parser.read(config_file)
local_settings = localsettings.LocalSettings(logger= \
global_logger)
local_settings.parse_configuration(config_parser)
attributes = local_settings.get_attributes()
self.assertTrue('default' not in attributes,
msg="Attributes set that weren't in the test config file")
self.assertTrue('Foo' in attributes and attributes['Foo'] == 'value1',
msg="Incorrectly named key." \
" Desired name: Foo; only found %s." %
(" ".join(attributes.keys())))
self.assertTrue(attributes['Foo'] == 'value1',
msg="Incorrect value wanted value1, " \
"got %s" % attributes['Foo'])
self.assertTrue('bar' in attributes and attributes['bar'] == 'value2',
msg="Incorrectly named key." \
" Desired name: bar; only found %s." %
(" ".join(attributes.keys())))
self.assertTrue('bar' in attributes and attributes['bar'] == 'value2',
msg="Incorrect value wanted value2, " \
"got %s" % attributes['bar'])
def test_hepspec_valid(self):
if not subcluster: return
did_fail = False
config_parser = ConfigParser.SafeConfigParser()
config_file = get_test_config("subcluster/sc_samples.ini")
config_parser.read(config_file)
try:
subcluster.check_section(config_parser, "Subcluster Valid")
except exceptions.SettingError:
did_fail = True
self.assertFalse(did_fail, msg="Valid HEPSPEC entry threw an exception.")
def test_hepspec_invalid(self):
if not subcluster: return
config_parser = ConfigParser.SafeConfigParser()
config_file = get_test_config("subcluster/sc_samples.ini")
config_parser.read(config_file)
try:
subcluster.check_section(config_parser, "Subcluster Bad HEPSPEC")
except exceptions.SettingError:
self.fail(msg="Invalid HEPSPEC entry threw an exception.")
try:
subcluster.check_section(config_parser, "Subcluster Formerly Bad Cores")
except exceptions.SettingError:
self.fail(msg="Formerly Bad Cores entry threw an exception")
def test_no_name(self):
if not subcluster: return
config_parser = ConfigParser.SafeConfigParser()
config_file = get_test_config("subcluster/sc_samples.ini")
config_parser.read(config_file)
self.assertRaises(exceptions.SettingError, subcluster.check_section, config_parser, "Subcluster No Name")
def test_resource_entry(self):
if not subcluster: return
config_parser = ConfigParser.SafeConfigParser()
config_file = get_test_config("subcluster/resourceentry.ini")
config_parser.read(config_file)
found_scs = subcluster.check_config(config_parser)
self.assertTrue(found_scs, msg="Resource Entry Valid not found.")
def test_resource_entry_2(self):
if not subcluster: return
config_parser = ConfigParser.SafeConfigParser()
config_file = get_test_config("subcluster/resourceentry.ini")
config_parser.read(config_file)
did_fail = False
for section in ["Resource Entry Valid Old Attribs",
"Resource Entry Valid New Attribs"]:
try:
subcluster.check_section(config_parser, section)
except exceptions.SettingError:
did_fail = True
self.assertFalse(did_fail, msg="Section %s threw an exception." % section)
if __name__ == '__main__':
console = logging.StreamHandler()
console.setLevel(logging.ERROR)
global_logger.addHandler(console)
unittest.main()
| true | true |
f7ff28e0d7921f6698ca0444b0b371e2e820261c | 7,140 | py | Python | exam_system/stud_app/migrations/0001_initial.py | hiruthikj/exam-system | 952cb87bd43b31f6337aac1f1e57e05a68e7c531 | [
"Apache-2.0"
] | 3 | 2020-11-16T17:32:56.000Z | 2021-04-07T14:16:24.000Z | exam_system/stud_app/migrations/0001_initial.py | hiruthikj/exam-system | 952cb87bd43b31f6337aac1f1e57e05a68e7c531 | [
"Apache-2.0"
] | null | null | null | exam_system/stud_app/migrations/0001_initial.py | hiruthikj/exam-system | 952cb87bd43b31f6337aac1f1e57e05a68e7c531 | [
"Apache-2.0"
] | 1 | 2020-11-03T17:10:20.000Z | 2020-11-03T17:10:20.000Z | # Generated by Django 3.1.3 on 2020-11-16 09:43
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Attendee',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('total_marks', models.FloatField(blank=True, null=True)),
('submitted_on', models.DateTimeField(auto_now_add=True, null=True)),
],
),
migrations.CreateModel(
name='Choice',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('choice_text', models.CharField(max_length=200)),
('is_selected', models.BooleanField(blank=True, default=False, null=True, verbose_name='Selected Answer')),
('is_correct', models.BooleanField(default=False, verbose_name='Correct Answer')),
],
),
migrations.CreateModel(
name='Course',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('course_code', models.CharField(max_length=6, unique=True)),
('course_name', models.CharField(max_length=50)),
('course_desc', models.TextField(blank=True, max_length=100, null=True, verbose_name='Course Description')),
],
),
migrations.CreateModel(
name='Department',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('dept_code', models.CharField(max_length=3, unique=True)),
('dept_name', models.CharField(max_length=50)),
],
),
migrations.CreateModel(
name='Exam',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('exam_name', models.CharField(max_length=40, unique=True)),
('qn_mark', models.FloatField(blank=True, default=4, null=True)),
('neg_mark', models.FloatField(blank=True, default=1, null=True)),
('start_time', models.DateTimeField(blank=True, null=True)),
('end_time', models.DateTimeField(blank=True, null=True)),
('time_limit', models.DurationField(help_text='HH:MM:SS format')),
('is_active', models.BooleanField(default=True)),
('created_on', models.DateTimeField(auto_now_add=True)),
('updated_on', models.DateTimeField(auto_now=True)),
('pub_date', models.DateTimeField(auto_now_add=True, verbose_name='Date Published')),
('course_fk', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='stud_app.course', verbose_name='Course')),
],
options={
'ordering': ['start_time'],
},
),
migrations.CreateModel(
name='Question',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('qn_text', models.TextField(max_length=200, verbose_name='Question Description')),
('qn_image', models.ImageField(blank=True, null=True, upload_to='', verbose_name='Question Image')),
('pub_date', models.DateTimeField(auto_now_add=True, verbose_name='date published')),
('exams', models.ManyToManyField(to='stud_app.Exam')),
],
),
migrations.CreateModel(
name='Student',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('birth_date', models.DateField(blank=True, null=True)),
('phone_no', models.CharField(help_text='10-digit phone number', max_length=10, unique=True)),
('joined_on', models.DateField(blank=True, null=True)),
('course_fk', models.ManyToManyField(to='stud_app.Course')),
('dept_fk', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='stud_app.department')),
('user', models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, related_name='student_user', to=settings.AUTH_USER_MODEL)),
],
options={
'ordering': ['dept_fk__dept_name'],
},
),
migrations.CreateModel(
name='Response',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('attendee_fk', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='stud_app.attendee')),
('choice', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to='stud_app.choice')),
('question', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to='stud_app.question')),
],
),
migrations.CreateModel(
name='Faculty',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('phone_no', models.CharField(help_text='10-digit phone number', max_length=10, unique=True)),
('joined_on', models.DateField(blank=True, null=True)),
('course_fk', models.ManyToManyField(to='stud_app.Course')),
('dept_fk', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='stud_app.department')),
('user', models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, related_name='faculty_user', to=settings.AUTH_USER_MODEL)),
],
options={
'ordering': ['dept_fk__dept_name'],
},
),
migrations.AddField(
model_name='course',
name='dept_fk',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='stud_app.department'),
),
migrations.AddField(
model_name='choice',
name='question',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='stud_app.question'),
),
migrations.AddField(
model_name='attendee',
name='exam_fk',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='stud_app.exam'),
),
migrations.AddField(
model_name='attendee',
name='student_fk',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='stud_app.student'),
),
]
| 51 | 150 | 0.592437 |
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Attendee',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('total_marks', models.FloatField(blank=True, null=True)),
('submitted_on', models.DateTimeField(auto_now_add=True, null=True)),
],
),
migrations.CreateModel(
name='Choice',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('choice_text', models.CharField(max_length=200)),
('is_selected', models.BooleanField(blank=True, default=False, null=True, verbose_name='Selected Answer')),
('is_correct', models.BooleanField(default=False, verbose_name='Correct Answer')),
],
),
migrations.CreateModel(
name='Course',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('course_code', models.CharField(max_length=6, unique=True)),
('course_name', models.CharField(max_length=50)),
('course_desc', models.TextField(blank=True, max_length=100, null=True, verbose_name='Course Description')),
],
),
migrations.CreateModel(
name='Department',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('dept_code', models.CharField(max_length=3, unique=True)),
('dept_name', models.CharField(max_length=50)),
],
),
migrations.CreateModel(
name='Exam',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('exam_name', models.CharField(max_length=40, unique=True)),
('qn_mark', models.FloatField(blank=True, default=4, null=True)),
('neg_mark', models.FloatField(blank=True, default=1, null=True)),
('start_time', models.DateTimeField(blank=True, null=True)),
('end_time', models.DateTimeField(blank=True, null=True)),
('time_limit', models.DurationField(help_text='HH:MM:SS format')),
('is_active', models.BooleanField(default=True)),
('created_on', models.DateTimeField(auto_now_add=True)),
('updated_on', models.DateTimeField(auto_now=True)),
('pub_date', models.DateTimeField(auto_now_add=True, verbose_name='Date Published')),
('course_fk', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='stud_app.course', verbose_name='Course')),
],
options={
'ordering': ['start_time'],
},
),
migrations.CreateModel(
name='Question',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('qn_text', models.TextField(max_length=200, verbose_name='Question Description')),
('qn_image', models.ImageField(blank=True, null=True, upload_to='', verbose_name='Question Image')),
('pub_date', models.DateTimeField(auto_now_add=True, verbose_name='date published')),
('exams', models.ManyToManyField(to='stud_app.Exam')),
],
),
migrations.CreateModel(
name='Student',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('birth_date', models.DateField(blank=True, null=True)),
('phone_no', models.CharField(help_text='10-digit phone number', max_length=10, unique=True)),
('joined_on', models.DateField(blank=True, null=True)),
('course_fk', models.ManyToManyField(to='stud_app.Course')),
('dept_fk', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='stud_app.department')),
('user', models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, related_name='student_user', to=settings.AUTH_USER_MODEL)),
],
options={
'ordering': ['dept_fk__dept_name'],
},
),
migrations.CreateModel(
name='Response',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('attendee_fk', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='stud_app.attendee')),
('choice', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to='stud_app.choice')),
('question', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to='stud_app.question')),
],
),
migrations.CreateModel(
name='Faculty',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('phone_no', models.CharField(help_text='10-digit phone number', max_length=10, unique=True)),
('joined_on', models.DateField(blank=True, null=True)),
('course_fk', models.ManyToManyField(to='stud_app.Course')),
('dept_fk', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='stud_app.department')),
('user', models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, related_name='faculty_user', to=settings.AUTH_USER_MODEL)),
],
options={
'ordering': ['dept_fk__dept_name'],
},
),
migrations.AddField(
model_name='course',
name='dept_fk',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='stud_app.department'),
),
migrations.AddField(
model_name='choice',
name='question',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='stud_app.question'),
),
migrations.AddField(
model_name='attendee',
name='exam_fk',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='stud_app.exam'),
),
migrations.AddField(
model_name='attendee',
name='student_fk',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='stud_app.student'),
),
]
| true | true |
f7ff294ad6a5df593bbd854409a5e766cca7cf79 | 204 | py | Python | practice_py/love.py | RootProgrammer/Python | d3308af735934d40df5ca2b115cf1deffcae5fac | [
"MIT"
] | 1 | 2021-04-18T08:14:41.000Z | 2021-04-18T08:14:41.000Z | practice_py/love.py | RootProgrammer/Python | d3308af735934d40df5ca2b115cf1deffcae5fac | [
"MIT"
] | null | null | null | practice_py/love.py | RootProgrammer/Python | d3308af735934d40df5ca2b115cf1deffcae5fac | [
"MIT"
] | null | null | null | from turtle import *
color("blue","black")
pensize(1)
speed(1)
begin_fill()
left(50)
forward(100)
circle(40,180)
left(260)
circle(40,177)
forward(100)
end_fill()
hideturtle()
done() | 10.736842 | 22 | 0.642157 | from turtle import *
color("blue","black")
pensize(1)
speed(1)
begin_fill()
left(50)
forward(100)
circle(40,180)
left(260)
circle(40,177)
forward(100)
end_fill()
hideturtle()
done() | true | true |
f7ff29715fcfb8b3ac8fa348a68ca50e86816ab2 | 562 | py | Python | muro/dashboards/migrations/0006_auto_20180316_1404.py | edupo/muro | 618ed01a37c417ba2d67c613dbc53366b81dd734 | [
"Apache-2.0"
] | null | null | null | muro/dashboards/migrations/0006_auto_20180316_1404.py | edupo/muro | 618ed01a37c417ba2d67c613dbc53366b81dd734 | [
"Apache-2.0"
] | 4 | 2018-02-28T08:32:59.000Z | 2018-04-24T10:50:38.000Z | muro/dashboards/migrations/0006_auto_20180316_1404.py | edupo/muro | 618ed01a37c417ba2d67c613dbc53366b81dd734 | [
"Apache-2.0"
] | 1 | 2018-02-09T18:04:32.000Z | 2018-02-09T18:04:32.000Z | # Generated by Django 2.0.2 on 2018-03-16 14:04
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('dashboards', '0005_auto_20180301_2056'),
]
operations = [
migrations.AddField(
model_name='dashboard',
name='fromtime',
field=models.TimeField(blank=True, null=True),
),
migrations.AddField(
model_name='dashboard',
name='totime',
field=models.TimeField(blank=True, null=True),
),
]
| 23.416667 | 58 | 0.580071 |
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('dashboards', '0005_auto_20180301_2056'),
]
operations = [
migrations.AddField(
model_name='dashboard',
name='fromtime',
field=models.TimeField(blank=True, null=True),
),
migrations.AddField(
model_name='dashboard',
name='totime',
field=models.TimeField(blank=True, null=True),
),
]
| true | true |
f7ff29ac0c977aea009765d623946d669ae89db0 | 3,349 | py | Python | wagtailgeowidget/edit_handlers.py | mariusboe/wagtail-geo-widget | bced21950685d0d1843acd77bb0740b7df3b1415 | [
"MIT"
] | null | null | null | wagtailgeowidget/edit_handlers.py | mariusboe/wagtail-geo-widget | bced21950685d0d1843acd77bb0740b7df3b1415 | [
"MIT"
] | null | null | null | wagtailgeowidget/edit_handlers.py | mariusboe/wagtail-geo-widget | bced21950685d0d1843acd77bb0740b7df3b1415 | [
"MIT"
] | null | null | null | import warnings
from wagtail.admin.edit_handlers import FieldPanel
from wagtailgeowidget import geocoders
from wagtailgeowidget.app_settings import GEO_WIDGET_ZOOM
from wagtailgeowidget.widgets import GeocoderField, GoogleMapsField, LeafletField
class GoogleMapsPanel(FieldPanel):
def __init__(self, *args, **kwargs):
self.classname = kwargs.pop("classname", "")
self.address_field = kwargs.pop("address_field", "")
self.zoom_field = kwargs.pop("zoom_field", "")
self.hide_latlng = kwargs.pop("hide_latlng", False)
self.zoom = kwargs.pop("zoom", GEO_WIDGET_ZOOM)
super().__init__(*args, **kwargs)
def widget_overrides(self):
field = self.model._meta.get_field(self.field_name)
srid = getattr(field, "srid", 4326)
return {
self.field_name: GoogleMapsField(
address_field=self.address_field,
zoom_field=self.zoom_field,
hide_latlng=self.hide_latlng,
zoom=self.zoom,
srid=srid,
id_prefix="id_",
)
}
def clone(self):
return self.__class__(
field_name=self.field_name,
classname=self.classname,
address_field=self.address_field,
zoom_field=self.zoom_field,
hide_latlng=self.hide_latlng,
zoom=self.zoom,
)
class GeoPanel(GoogleMapsPanel):
def __init__(self, *args, **kwargs):
import warnings
warnings.warn(
"GeoPanel will be deprecated in version 7, use GoogleMapsPanel instead",
PendingDeprecationWarning,
)
super().__init__(*args, **kwargs)
class GeoAddressPanel(FieldPanel):
def __init__(self, *args, **kwargs):
self.geocoder = kwargs.pop("geocoder", geocoders.NOMINATIM)
super().__init__(*args, **kwargs)
def widget_overrides(self):
return {
self.field_name: GeocoderField(
geocoder=self.geocoder,
)
}
def clone(self):
return self.__class__(
field_name=self.field_name,
geocoder=self.geocoder,
)
class LeafletPanel(FieldPanel):
def __init__(self, *args, **kwargs):
self.classname = kwargs.pop("classname", "")
self.address_field = kwargs.pop("address_field", "")
self.zoom_field = kwargs.pop("zoom_field", "")
self.hide_latlng = kwargs.pop("hide_latlng", False)
self.zoom = kwargs.pop("zoom", GEO_WIDGET_ZOOM)
super().__init__(*args, **kwargs)
def widget_overrides(self):
field = self.model._meta.get_field(self.field_name)
srid = getattr(field, "srid", 4326)
return {
self.field_name: LeafletField(
address_field=self.address_field,
zoom_field=self.zoom_field,
hide_latlng=self.hide_latlng,
zoom=self.zoom,
srid=srid,
id_prefix="id_",
)
}
def clone(self):
return self.__class__(
field_name=self.field_name,
classname=self.classname,
address_field=self.address_field,
zoom_field=self.zoom_field,
hide_latlng=self.hide_latlng,
zoom=self.zoom,
)
| 29.901786 | 84 | 0.597193 | import warnings
from wagtail.admin.edit_handlers import FieldPanel
from wagtailgeowidget import geocoders
from wagtailgeowidget.app_settings import GEO_WIDGET_ZOOM
from wagtailgeowidget.widgets import GeocoderField, GoogleMapsField, LeafletField
class GoogleMapsPanel(FieldPanel):
def __init__(self, *args, **kwargs):
self.classname = kwargs.pop("classname", "")
self.address_field = kwargs.pop("address_field", "")
self.zoom_field = kwargs.pop("zoom_field", "")
self.hide_latlng = kwargs.pop("hide_latlng", False)
self.zoom = kwargs.pop("zoom", GEO_WIDGET_ZOOM)
super().__init__(*args, **kwargs)
def widget_overrides(self):
field = self.model._meta.get_field(self.field_name)
srid = getattr(field, "srid", 4326)
return {
self.field_name: GoogleMapsField(
address_field=self.address_field,
zoom_field=self.zoom_field,
hide_latlng=self.hide_latlng,
zoom=self.zoom,
srid=srid,
id_prefix="id_",
)
}
def clone(self):
return self.__class__(
field_name=self.field_name,
classname=self.classname,
address_field=self.address_field,
zoom_field=self.zoom_field,
hide_latlng=self.hide_latlng,
zoom=self.zoom,
)
class GeoPanel(GoogleMapsPanel):
def __init__(self, *args, **kwargs):
import warnings
warnings.warn(
"GeoPanel will be deprecated in version 7, use GoogleMapsPanel instead",
PendingDeprecationWarning,
)
super().__init__(*args, **kwargs)
class GeoAddressPanel(FieldPanel):
def __init__(self, *args, **kwargs):
self.geocoder = kwargs.pop("geocoder", geocoders.NOMINATIM)
super().__init__(*args, **kwargs)
def widget_overrides(self):
return {
self.field_name: GeocoderField(
geocoder=self.geocoder,
)
}
def clone(self):
return self.__class__(
field_name=self.field_name,
geocoder=self.geocoder,
)
class LeafletPanel(FieldPanel):
def __init__(self, *args, **kwargs):
self.classname = kwargs.pop("classname", "")
self.address_field = kwargs.pop("address_field", "")
self.zoom_field = kwargs.pop("zoom_field", "")
self.hide_latlng = kwargs.pop("hide_latlng", False)
self.zoom = kwargs.pop("zoom", GEO_WIDGET_ZOOM)
super().__init__(*args, **kwargs)
def widget_overrides(self):
field = self.model._meta.get_field(self.field_name)
srid = getattr(field, "srid", 4326)
return {
self.field_name: LeafletField(
address_field=self.address_field,
zoom_field=self.zoom_field,
hide_latlng=self.hide_latlng,
zoom=self.zoom,
srid=srid,
id_prefix="id_",
)
}
def clone(self):
return self.__class__(
field_name=self.field_name,
classname=self.classname,
address_field=self.address_field,
zoom_field=self.zoom_field,
hide_latlng=self.hide_latlng,
zoom=self.zoom,
)
| true | true |
f7ff2b76a5718666b526e8bc6752d78cfcc79232 | 8,202 | py | Python | carl/jaccard.py | zaratec/carl | 9d655c2cb75d90ddc6b2d101073248a2fc3c252e | [
"MIT"
] | null | null | null | carl/jaccard.py | zaratec/carl | 9d655c2cb75d90ddc6b2d101073248a2fc3c252e | [
"MIT"
] | null | null | null | carl/jaccard.py | zaratec/carl | 9d655c2cb75d90ddc6b2d101073248a2fc3c252e | [
"MIT"
] | 1 | 2020-11-19T23:41:28.000Z | 2020-11-19T23:41:28.000Z | import itertools
import operator
from carl import charts
from carl import common
from carl import storage
from carl.analysis import table_to_dict, map_items_to_parent, print_tabulated
def gen_view_sets(pages):
""" Generates the view_set for each page in pages"""
views = common.VIEWS
reqs = table_to_dict("req")
page_req_lists = map_items_to_parent(reqs, pages)
view_sets = {}
for page_id, req_list in page_req_lists.iteritems():
# exclude any pages that did not successfully save a HAR file
if pages[page_id].data["har_status"] == "success":
# prep empty sets per page for each view
view_sets[page_id] = {}
for v in views:
view_sets[page_id][v] = set()
# accumulate requests per page by view
for r in req_list:
for v in views:
view_sets[page_id][v].add(r.data[v])
return view_sets
def print_jaccard_by_url(verbose, filt):
data = jaccard_by_url()
if filt:
data = filter_url_result(data)
views = common.VIEWS
headers = ["url", "loads"] + views
headers += ["pair avg {}".format(v) for v in views]
table = []
for url in data:
# extract url data
page_set = data[url]["page_set"]
jac = data[url]["jaccard"]
pair_jac = data[url]["pair_jaccard"]
# print per url details
if verbose:
print("site: {} : loads: {}".format(url, len(page_set)))
print_page_set_cardinality(page_set)
print_jaccard(jac, pair_jac)
print("#"*40)
# construct summary row
row = [url, len(page_set)]
for view in views:
view_jac = jac[view]
view_str = "{:.2f} ({})".format(
view_jac["val"], len(view_jac["u"]))
row.append(view_str)
for view in views:
row.append("{:.2f}".format(pair_jac[view]))
table.append(row)
table = sorted(table, key=operator.itemgetter(headers.index(views[0])))
print_tabulated(table, headers)
def filter_url_result(results):
sizes = {}
for url in results:
sizes[url] = len(results[url]["page_set"])
max_size = max(sizes.values())
valid = {}
invalid = {}
for url in results:
num_loads = len(results[url]["page_set"])
if num_loads >= max_size/2.0 and num_loads > 1:
valid[url] = results[url]
else:
invalid[url] = results[url]
removed = [url for url in invalid]
print("Filtered out {} due to failing more than half the time: {}".format(
len(removed), removed))
percent = float(len(valid))/float(len(results))
print("Keeping {}/{} ({:.2f})".format(len(valid), len(results), percent))
return valid
def jaccard_by_url():
# collect necessary data
pages = table_to_dict("page")
page_sets = gen_view_sets(pages)
urls = storage.get("urls")
# initialize data structures
page_set_by_url = {}
result = {}
for u in urls:
url = u["url"]
page_set_by_url[url] = {}
result[url] = {"page_set": None, "jaccard": None}
# group page_sets by url
for page_id, view_set in page_sets.iteritems():
url = pages[page_id].data["url"]
page_set_by_url[url][page_id] = view_set
# calculate the jaccard across each page set
for url, page_set in page_set_by_url.iteritems():
result[url]["page_set"] = page_set
jac = calculate_jaccard_over_pages(page_set)
result[url]["jaccard"] = jac
# calculate jaccard across all pairs of loads
pair_jac_results = []
for pair in list(itertools.combinations(page_set.items(), 2)):
pair_jac = calculate_jaccard_over_pages(dict(pair))
pair_jac_results.append(pair_jac)
result[url]["pair_jaccard"] = summarize_pairs(pair_jac_results)
return result
def calculate_jaccard_over_pages(page_sets):
views = common.VIEWS
jaccard = {}
# initialize sets
for v in views:
jaccard[v] = {"i": None, "u": set()}
for page, view_sets in page_sets.iteritems():
for view, value in view_sets.iteritems():
jaccard[view]["u"] = jaccard[view]["u"].union(value)
if jaccard[view]["i"] is None:
# for the first pass through we need to initiailize the set
jaccard[view]["i"] = value
else:
jaccard[view]["i"] = jaccard[view]["i"].intersection(value)
for view in views:
if len(page_sets) > 0:
i = len(jaccard[view]["i"])
u = len(jaccard[view]["u"])
jaccard[view]["val"] = (float(i)/float(u))
else:
jaccard[view] = {"i": set(), "u": set(), "val": 0}
return jaccard
def print_page_set_cardinality(page_sets):
views = common.VIEWS
headers = ["page"]
rows = dict(zip(views, [[v] for v in views]))
for page, page_sets in page_sets.iteritems():
headers.append(page[:4])
for view, value in page_sets.iteritems():
rows[view].append(len(value))
table = [rows[view] for view in views]
print_tabulated(table, headers)
def print_jaccard(jaccard, pairs=None):
views = common.VIEWS
inter = ["inter"]
union = ["union"]
value = ["value"]
pair_avg = ["pair avg"]
for view in views:
inter.append(len(jaccard[view]["i"]))
union.append(len(jaccard[view]["u"]))
value.append("{:.2f}".format(jaccard[view]["val"]))
if pairs:
pair_avg.append("{:.2f}".format(pairs[view]))
table = [inter, union, value]
if pairs:
table.append(pair_avg)
headers = ["measure"]+views
print_tabulated(table, headers)
def chart_jaccard(filt):
views = common.VIEWS
data = jaccard_by_url()
if filt:
data = filter_url_result(data)
# initiailize empty lists
out = {}
for v in views:
out[v] = []
out[v+"_pair_avg"] = []
# accumulate jaccard values by url
for url in data:
jac = data[url]["jaccard"]
pair = data[url]["pair_jaccard"]
for v in views:
out[v].append(jac[v]["val"])
out[v+"_pair_avg"].append(pair[v])
# sort all data sets
for v in views:
out[v] = sorted(out[v])
out[v+"_pair_avg"] = sorted(out[v+"_pair_avg"])
charts.ecdf(out)
charts.density(out)
def print_page_set_view(pages, jac, view):
headers = list(jac[view]["u"])
table = []
for page_id, view_sets in pages.iteritems():
if len(view_sets[view]) > 0:
row = [page_id[:4]]
for item in headers:
if item in view_sets[view]:
row.append("#")
else:
row.append("0")
table.append(row)
headers = ["page"] + range(len(headers))
print_tabulated(table, headers)
print "Union across all loads"
print ["{} {}".format(i, h) for i, h in enumerate(list(jac[view]["u"]))]
print "\nVariance from intersection"
intersection = jac[view]["i"]
for page_id, view_sets in pages.iteritems():
if len(view_sets[view]) > 0:
diff = list(view_sets[view].difference(intersection))
res = "{}:{}".format(page_id[:4], diff)
if len(diff) > 0:
print res
def inspect_url(url):
if not url.startswith("http"):
url = "http://{}".format(url)
page_rows = storage.get("pages_for_url", (url,))
pages = {}
for row in page_rows:
item = storage.ITEMS["page"].from_sql_row(row)
pages[item.data["page_id"]] = item
view_sets = gen_view_sets(pages)
print_page_set_cardinality(view_sets)
jac = calculate_jaccard_over_pages(view_sets)
print_jaccard(jac)
print_page_set_view(view_sets, jac, common.VIEWS[0])
def summarize_pairs(jac_list):
views = common.VIEWS
result = {}
for v in views:
view_vals = [jac[v]["val"] for jac in jac_list]
if len(view_vals) > 0:
result[v] = sum(view_vals)/float(len(view_vals))
else:
result[v] = 0
return result
| 30.043956 | 78 | 0.584492 | import itertools
import operator
from carl import charts
from carl import common
from carl import storage
from carl.analysis import table_to_dict, map_items_to_parent, print_tabulated
def gen_view_sets(pages):
""" Generates the view_set for each page in pages"""
views = common.VIEWS
reqs = table_to_dict("req")
page_req_lists = map_items_to_parent(reqs, pages)
view_sets = {}
for page_id, req_list in page_req_lists.iteritems():
if pages[page_id].data["har_status"] == "success":
view_sets[page_id] = {}
for v in views:
view_sets[page_id][v] = set()
for r in req_list:
for v in views:
view_sets[page_id][v].add(r.data[v])
return view_sets
def print_jaccard_by_url(verbose, filt):
data = jaccard_by_url()
if filt:
data = filter_url_result(data)
views = common.VIEWS
headers = ["url", "loads"] + views
headers += ["pair avg {}".format(v) for v in views]
table = []
for url in data:
page_set = data[url]["page_set"]
jac = data[url]["jaccard"]
pair_jac = data[url]["pair_jaccard"]
if verbose:
print("site: {} : loads: {}".format(url, len(page_set)))
print_page_set_cardinality(page_set)
print_jaccard(jac, pair_jac)
print("#"*40)
row = [url, len(page_set)]
for view in views:
view_jac = jac[view]
view_str = "{:.2f} ({})".format(
view_jac["val"], len(view_jac["u"]))
row.append(view_str)
for view in views:
row.append("{:.2f}".format(pair_jac[view]))
table.append(row)
table = sorted(table, key=operator.itemgetter(headers.index(views[0])))
print_tabulated(table, headers)
def filter_url_result(results):
sizes = {}
for url in results:
sizes[url] = len(results[url]["page_set"])
max_size = max(sizes.values())
valid = {}
invalid = {}
for url in results:
num_loads = len(results[url]["page_set"])
if num_loads >= max_size/2.0 and num_loads > 1:
valid[url] = results[url]
else:
invalid[url] = results[url]
removed = [url for url in invalid]
print("Filtered out {} due to failing more than half the time: {}".format(
len(removed), removed))
percent = float(len(valid))/float(len(results))
print("Keeping {}/{} ({:.2f})".format(len(valid), len(results), percent))
return valid
def jaccard_by_url():
pages = table_to_dict("page")
page_sets = gen_view_sets(pages)
urls = storage.get("urls")
page_set_by_url = {}
result = {}
for u in urls:
url = u["url"]
page_set_by_url[url] = {}
result[url] = {"page_set": None, "jaccard": None}
for page_id, view_set in page_sets.iteritems():
url = pages[page_id].data["url"]
page_set_by_url[url][page_id] = view_set
for url, page_set in page_set_by_url.iteritems():
result[url]["page_set"] = page_set
jac = calculate_jaccard_over_pages(page_set)
result[url]["jaccard"] = jac
pair_jac_results = []
for pair in list(itertools.combinations(page_set.items(), 2)):
pair_jac = calculate_jaccard_over_pages(dict(pair))
pair_jac_results.append(pair_jac)
result[url]["pair_jaccard"] = summarize_pairs(pair_jac_results)
return result
def calculate_jaccard_over_pages(page_sets):
views = common.VIEWS
jaccard = {}
for v in views:
jaccard[v] = {"i": None, "u": set()}
for page, view_sets in page_sets.iteritems():
for view, value in view_sets.iteritems():
jaccard[view]["u"] = jaccard[view]["u"].union(value)
if jaccard[view]["i"] is None:
jaccard[view]["i"] = value
else:
jaccard[view]["i"] = jaccard[view]["i"].intersection(value)
for view in views:
if len(page_sets) > 0:
i = len(jaccard[view]["i"])
u = len(jaccard[view]["u"])
jaccard[view]["val"] = (float(i)/float(u))
else:
jaccard[view] = {"i": set(), "u": set(), "val": 0}
return jaccard
def print_page_set_cardinality(page_sets):
views = common.VIEWS
headers = ["page"]
rows = dict(zip(views, [[v] for v in views]))
for page, page_sets in page_sets.iteritems():
headers.append(page[:4])
for view, value in page_sets.iteritems():
rows[view].append(len(value))
table = [rows[view] for view in views]
print_tabulated(table, headers)
def print_jaccard(jaccard, pairs=None):
views = common.VIEWS
inter = ["inter"]
union = ["union"]
value = ["value"]
pair_avg = ["pair avg"]
for view in views:
inter.append(len(jaccard[view]["i"]))
union.append(len(jaccard[view]["u"]))
value.append("{:.2f}".format(jaccard[view]["val"]))
if pairs:
pair_avg.append("{:.2f}".format(pairs[view]))
table = [inter, union, value]
if pairs:
table.append(pair_avg)
headers = ["measure"]+views
print_tabulated(table, headers)
def chart_jaccard(filt):
views = common.VIEWS
data = jaccard_by_url()
if filt:
data = filter_url_result(data)
out = {}
for v in views:
out[v] = []
out[v+"_pair_avg"] = []
for url in data:
jac = data[url]["jaccard"]
pair = data[url]["pair_jaccard"]
for v in views:
out[v].append(jac[v]["val"])
out[v+"_pair_avg"].append(pair[v])
for v in views:
out[v] = sorted(out[v])
out[v+"_pair_avg"] = sorted(out[v+"_pair_avg"])
charts.ecdf(out)
charts.density(out)
def print_page_set_view(pages, jac, view):
headers = list(jac[view]["u"])
table = []
for page_id, view_sets in pages.iteritems():
if len(view_sets[view]) > 0:
row = [page_id[:4]]
for item in headers:
if item in view_sets[view]:
row.append("#")
else:
row.append("0")
table.append(row)
headers = ["page"] + range(len(headers))
print_tabulated(table, headers)
print "Union across all loads"
print ["{} {}".format(i, h) for i, h in enumerate(list(jac[view]["u"]))]
print "\nVariance from intersection"
intersection = jac[view]["i"]
for page_id, view_sets in pages.iteritems():
if len(view_sets[view]) > 0:
diff = list(view_sets[view].difference(intersection))
res = "{}:{}".format(page_id[:4], diff)
if len(diff) > 0:
print res
def inspect_url(url):
if not url.startswith("http"):
url = "http://{}".format(url)
page_rows = storage.get("pages_for_url", (url,))
pages = {}
for row in page_rows:
item = storage.ITEMS["page"].from_sql_row(row)
pages[item.data["page_id"]] = item
view_sets = gen_view_sets(pages)
print_page_set_cardinality(view_sets)
jac = calculate_jaccard_over_pages(view_sets)
print_jaccard(jac)
print_page_set_view(view_sets, jac, common.VIEWS[0])
def summarize_pairs(jac_list):
views = common.VIEWS
result = {}
for v in views:
view_vals = [jac[v]["val"] for jac in jac_list]
if len(view_vals) > 0:
result[v] = sum(view_vals)/float(len(view_vals))
else:
result[v] = 0
return result
| false | true |
f7ff2b88a5f2e3746635f10ada3446d842d5baea | 1,248 | py | Python | tests/test_openwebifpy.py | fbradyirl/openwebifpy | e40454fbf6e67568a032c67700818aaf6d8e81df | [
"MIT"
] | 5 | 2019-04-07T09:37:37.000Z | 2021-12-01T11:30:23.000Z | tests/test_openwebifpy.py | fbradyirl/openwebifpy | e40454fbf6e67568a032c67700818aaf6d8e81df | [
"MIT"
] | 6 | 2019-03-01T16:16:17.000Z | 2021-05-21T14:52:06.000Z | tests/test_openwebifpy.py | fbradyirl/openwebifpy | e40454fbf6e67568a032c67700818aaf6d8e81df | [
"MIT"
] | 1 | 2020-11-13T14:42:02.000Z | 2020-11-13T14:42:02.000Z | """
tests.test_api
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Tests the api
Copyright (c) 2015 Finbarr Brady <https://github.com/fbradyirl>
Licensed under the MIT license.
"""
# pylint: disable=protected-access
import unittest
import openwebif.api
from openwebif.error import OpenWebIfError, MissingParamError
class TestAPI(unittest.TestCase):
""" Tests openwebif.api module. """
def test_create(self):
""" Test creating a new device. """
# Bogus config
self.assertRaises(MissingParamError, lambda: openwebif.api.CreateDevice())
# self.assertRaises(OpenWebIfError, lambda: openwebif.api.CreateDevice('10.10.10.4'))
def test_get_picon_name(self):
self.assertEqual(openwebif.api.CreateDevice.get_picon_name('RTÉ One'), "rteone")
# def test_status(self):
# """ Test getting version and status. """
# # Use this to test on real box
# client = openwebif.api.CreateDevice('vuduo2.local')
# self.assertEqual("OWIF 1.3.6", client.get_version())
# self.assertTrue(len(client.get_status_info()) > 8)
# # Test that an exception doesnt get thrown
# result = client.is_box_in_standby()
# self.assertTrue(result is True or result is False)
| 32 | 93 | 0.66266 |
import unittest
import openwebif.api
from openwebif.error import OpenWebIfError, MissingParamError
class TestAPI(unittest.TestCase):
def test_create(self):
self.assertRaises(MissingParamError, lambda: openwebif.api.CreateDevice())
def test_get_picon_name(self):
self.assertEqual(openwebif.api.CreateDevice.get_picon_name('RTÉ One'), "rteone")
| true | true |
f7ff2bafcd87a9be7eec1b6ce21916f115d5b7af | 1,058 | py | Python | tests/functional/scripts/pyi_load_dll_using_ctypes.py | hawkhai/pyinstaller | 016a24479b34de161792c72dde455a81ad4c78ae | [
"Apache-2.0"
] | 9,267 | 2015-01-01T04:08:45.000Z | 2022-03-31T11:42:38.000Z | tests/functional/scripts/pyi_load_dll_using_ctypes.py | hawkhai/pyinstaller | 016a24479b34de161792c72dde455a81ad4c78ae | [
"Apache-2.0"
] | 5,150 | 2015-01-01T12:09:56.000Z | 2022-03-31T18:06:12.000Z | tests/functional/scripts/pyi_load_dll_using_ctypes.py | hawkhai/pyinstaller | 016a24479b34de161792c72dde455a81ad4c78ae | [
"Apache-2.0"
] | 2,101 | 2015-01-03T10:25:27.000Z | 2022-03-30T11:04:42.000Z | #-----------------------------------------------------------------------------
# Copyright (c) 2005-2021, PyInstaller Development Team.
#
# Distributed under the terms of the GNU General Public License (version 2
# or later) with exception for distributing the bootloader.
#
# The full license is in the file COPYING.txt, distributed with this software.
#
# SPDX-License-Identifier: (GPL-2.0-or-later WITH Bootloader-exception)
#-----------------------------------------------------------------------------
import os
import sys
from ctypes import CDLL
from pyi_get_datadir import get_data_dir
# Library name based on platform.
if sys.platform.startswith('win32'):
name = 'ctypes_dylib.dll'
elif sys.platform.startswith("darwin"):
name = 'ctypes_dylib.dylib'
else:
name = 'ctypes_dylib.so'
# Test resolving dynamic libraries loaded in Python code at runtime by Python module 'ctypes'.
tct = CDLL(os.path.join(get_data_dir(), 'ctypes_dylib', name))
# The "dummy" function in ctypes_dylib returning value + 12.
assert tct.dummy(42) == (42 + 12)
| 35.266667 | 94 | 0.640832 |
import os
import sys
from ctypes import CDLL
from pyi_get_datadir import get_data_dir
if sys.platform.startswith('win32'):
name = 'ctypes_dylib.dll'
elif sys.platform.startswith("darwin"):
name = 'ctypes_dylib.dylib'
else:
name = 'ctypes_dylib.so'
tct = CDLL(os.path.join(get_data_dir(), 'ctypes_dylib', name))
assert tct.dummy(42) == (42 + 12)
| true | true |
f7ff2e0ebe354bc5ae9421ae9279e46ddcffc9d9 | 1,477 | py | Python | google/ads/googleads/v10/errors/types/access_invitation_error.py | JakobSteixner/google-ads-python | df2b802cc7e78295a4ece21cc7ef3787cd35dab0 | [
"Apache-2.0"
] | null | null | null | google/ads/googleads/v10/errors/types/access_invitation_error.py | JakobSteixner/google-ads-python | df2b802cc7e78295a4ece21cc7ef3787cd35dab0 | [
"Apache-2.0"
] | null | null | null | google/ads/googleads/v10/errors/types/access_invitation_error.py | JakobSteixner/google-ads-python | df2b802cc7e78295a4ece21cc7ef3787cd35dab0 | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import proto # type: ignore
__protobuf__ = proto.module(
package="google.ads.googleads.v10.errors",
marshal="google.ads.googleads.v10",
manifest={"AccessInvitationErrorEnum",},
)
class AccessInvitationErrorEnum(proto.Message):
r"""Container for enum describing possible AccessInvitation
errors.
"""
class AccessInvitationError(proto.Enum):
r"""Enum describing possible AccessInvitation errors."""
UNSPECIFIED = 0
UNKNOWN = 1
INVALID_EMAIL_ADDRESS = 2
EMAIL_ADDRESS_ALREADY_HAS_ACCESS = 3
INVALID_INVITATION_STATUS = 4
GOOGLE_CONSUMER_ACCOUNT_NOT_ALLOWED = 5
INVALID_INVITATION_ID = 6
EMAIL_ADDRESS_ALREADY_HAS_PENDING_INVITATION = 7
PENDING_INVITATIONS_LIMIT_EXCEEDED = 8
EMAIL_DOMAIN_POLICY_VIOLATED = 9
__all__ = tuple(sorted(__protobuf__.manifest))
| 31.425532 | 74 | 0.725796 |
import proto
__protobuf__ = proto.module(
package="google.ads.googleads.v10.errors",
marshal="google.ads.googleads.v10",
manifest={"AccessInvitationErrorEnum",},
)
class AccessInvitationErrorEnum(proto.Message):
class AccessInvitationError(proto.Enum):
UNSPECIFIED = 0
UNKNOWN = 1
INVALID_EMAIL_ADDRESS = 2
EMAIL_ADDRESS_ALREADY_HAS_ACCESS = 3
INVALID_INVITATION_STATUS = 4
GOOGLE_CONSUMER_ACCOUNT_NOT_ALLOWED = 5
INVALID_INVITATION_ID = 6
EMAIL_ADDRESS_ALREADY_HAS_PENDING_INVITATION = 7
PENDING_INVITATIONS_LIMIT_EXCEEDED = 8
EMAIL_DOMAIN_POLICY_VIOLATED = 9
__all__ = tuple(sorted(__protobuf__.manifest))
| true | true |
f7ff2e201982a9655fd15644da752ad00b36de5d | 103 | py | Python | setup.py | chanhakim/RTGraph | 71a0054a574d1e23cc31420cc6f3124d4f8c2cc1 | [
"MIT"
] | null | null | null | setup.py | chanhakim/RTGraph | 71a0054a574d1e23cc31420cc6f3124d4f8c2cc1 | [
"MIT"
] | null | null | null | setup.py | chanhakim/RTGraph | 71a0054a574d1e23cc31420cc6f3124d4f8c2cc1 | [
"MIT"
] | null | null | null | from setuptools import setup
setup(name="mbci lab", app=["mbci_lab/app.py"], setup_requires=['py2app']) | 51.5 | 74 | 0.747573 | from setuptools import setup
setup(name="mbci lab", app=["mbci_lab/app.py"], setup_requires=['py2app']) | true | true |
f7ff2e55f9e4bfd191e519db80c4ee04a2aaa64b | 1,648 | py | Python | gui/prototxt_editor/editor_slider.py | anglebinbin/Barista-tool | 2d51507fb3566881923f0b273127f59d23ed317f | [
"MIT"
] | 1 | 2020-02-11T19:05:17.000Z | 2020-02-11T19:05:17.000Z | gui/prototxt_editor/editor_slider.py | anglebinbin/Barista-tool | 2d51507fb3566881923f0b273127f59d23ed317f | [
"MIT"
] | null | null | null | gui/prototxt_editor/editor_slider.py | anglebinbin/Barista-tool | 2d51507fb3566881923f0b273127f59d23ed317f | [
"MIT"
] | null | null | null | from PyQt5.QtWidgets import QWidget, QSlider, QHBoxLayout
from PyQt5.QtCore import QMargins
from PyQt5.QtCore import QObject, pyqtSignal
class EditorSlider(QWidget):
# widget containig the slider
# direct subclassing of QSlider leads to errors
valueChanged = pyqtSignal(int)
def __init__(self, vmin, vcur, vmax, parent):
QWidget.__init__(self, parent)
self.editor = parent
self.setFocusPolicy(0)
#layouts
self.layout = QHBoxLayout(self)
self.layout.setContentsMargins(0, 0, 0, 0)
#add the slider
self.slider = QSlider()
self.slider.setRange(vmin, vmax)
self.slider.setValue(vcur)
self.slider.setOrientation(1)
self.slider.setFocusPolicy(0)
self.layout.addWidget(self.slider)
self.slider.valueChanged.connect(self._passThrough)
def updatePosition(self):
'''set the position of the slider'''
rect = self.editor.rect()
dy = 0
dx = 0
#check for scrollbars
if self.editor.verticalScrollBar().isVisible():
dx = self.editor.verticalScrollBar().width()
if self.editor.horizontalScrollBar().isVisible():
dy = self.editor.horizontalScrollBar().height()
mx = rect.width() - self.width() - dx - 5
my = rect.height() - self.height() - dy - 5
self.move(mx, my)
def paintEvent(self, event):
self.updatePosition()
super(EditorSlider, self).paintEvent(event)
def _passThrough(self, value):
'''pass through the valueChanged signal of the QSlider'''
self.valueChanged.emit(value)
| 31.09434 | 65 | 0.63835 | from PyQt5.QtWidgets import QWidget, QSlider, QHBoxLayout
from PyQt5.QtCore import QMargins
from PyQt5.QtCore import QObject, pyqtSignal
class EditorSlider(QWidget):
valueChanged = pyqtSignal(int)
def __init__(self, vmin, vcur, vmax, parent):
QWidget.__init__(self, parent)
self.editor = parent
self.setFocusPolicy(0)
self.layout = QHBoxLayout(self)
self.layout.setContentsMargins(0, 0, 0, 0)
self.slider = QSlider()
self.slider.setRange(vmin, vmax)
self.slider.setValue(vcur)
self.slider.setOrientation(1)
self.slider.setFocusPolicy(0)
self.layout.addWidget(self.slider)
self.slider.valueChanged.connect(self._passThrough)
def updatePosition(self):
rect = self.editor.rect()
dy = 0
dx = 0
if self.editor.verticalScrollBar().isVisible():
dx = self.editor.verticalScrollBar().width()
if self.editor.horizontalScrollBar().isVisible():
dy = self.editor.horizontalScrollBar().height()
mx = rect.width() - self.width() - dx - 5
my = rect.height() - self.height() - dy - 5
self.move(mx, my)
def paintEvent(self, event):
self.updatePosition()
super(EditorSlider, self).paintEvent(event)
def _passThrough(self, value):
self.valueChanged.emit(value)
| true | true |
f7ff2ead94d10a1453750fc5a531f65fbb53f3cf | 2,387 | py | Python | NLPCode/named_entity_recognition/utils.py | trusthlt/dp-across-nlp-tasks | ec3e03511420044cdb0bb1a3574925d354ff03f4 | [
"Apache-2.0"
] | 1 | 2021-12-21T14:05:34.000Z | 2021-12-21T14:05:34.000Z | NLPCode/named_entity_recognition/utils.py | trusthlt/dp-across-nlp-tasks | ec3e03511420044cdb0bb1a3574925d354ff03f4 | [
"Apache-2.0"
] | null | null | null | NLPCode/named_entity_recognition/utils.py | trusthlt/dp-across-nlp-tasks | ec3e03511420044cdb0bb1a3574925d354ff03f4 | [
"Apache-2.0"
] | null | null | null | import time
import torch
from queue import Queue
import numpy as np
from sklearn.metrics import precision_recall_fscore_support
def get_acc_pre_rec_f1(y_true, y_pred):
assert (len(y_true) == len(y_pred))
# accuracy
acc = 0
for t, p in zip(y_true, y_pred):
if t == p:
acc += 1
# precision, recall, f1
pr_epoch, rec_epoch, f1_epoch, _ = precision_recall_fscore_support(y_true, y_pred, average='macro')
return acc / len(y_true), pr_epoch, rec_epoch, f1_epoch
def take_no_pad(seqlens, y_pred, y):
# for measurment save only the non padded tags
y_true_noPad = []
y_pred_noPad = []
for i, seqlen in enumerate(seqlens):
y_pred_noPad.append(y_pred[i][:seqlen].cpu().detach().numpy())
y_true_noPad.append(y[i][:seqlen].cpu().detach().numpy())
if not (len(y_true_noPad[i]) == seqlens[i] and len(y_pred_noPad[i]) == seqlens[i]):
print(y_pred)
print(len(y_pred))
print(y)
print(len(y))
print(f'{len(y_true_noPad[i])} == {seqlens[i]} and {len(y_pred_noPad[i])} == {seqlens[i]}')
print(f'{y_true_noPad[i]} with length: {seqlens[i]}')
print(f'{y_pred_noPad[i]} with length: {seqlens[i]}')
# sanity check if seq len is actual length of seqence
assert(len(y_true_noPad[i]) == seqlens[i] and len(y_pred_noPad[i]) == seqlens[i])
return y_true_noPad, y_pred_noPad
def epoch_time(start_time, end_time):
elapsed_time = end_time - start_time
elapsed_mins = int(elapsed_time / 60)
elapsed_secs = int(elapsed_time - (elapsed_mins * 60))
return elapsed_mins, elapsed_secs
class EarlyStopping:
def __init__(self, patience):
self.patience = patience
self.q = Queue(maxsize = self.patience)
self.max_acc = -1
self.counter = 0
def should_stop(self, accuracy):
# check if accuracy is greater than max than empy out queue and set new max
if accuracy > self.max_acc:
self.q.queue.clear()
self.max_acc = accuracy
self.counter = 0
else:
# else add element to queue and check if queue is full (if we should do early stopping)
self.q.put(accuracy)
self.counter += 1
if self.q.full():
# do early stopping
return True | 34.1 | 103 | 0.616255 | import time
import torch
from queue import Queue
import numpy as np
from sklearn.metrics import precision_recall_fscore_support
def get_acc_pre_rec_f1(y_true, y_pred):
assert (len(y_true) == len(y_pred))
acc = 0
for t, p in zip(y_true, y_pred):
if t == p:
acc += 1
pr_epoch, rec_epoch, f1_epoch, _ = precision_recall_fscore_support(y_true, y_pred, average='macro')
return acc / len(y_true), pr_epoch, rec_epoch, f1_epoch
def take_no_pad(seqlens, y_pred, y):
y_true_noPad = []
y_pred_noPad = []
for i, seqlen in enumerate(seqlens):
y_pred_noPad.append(y_pred[i][:seqlen].cpu().detach().numpy())
y_true_noPad.append(y[i][:seqlen].cpu().detach().numpy())
if not (len(y_true_noPad[i]) == seqlens[i] and len(y_pred_noPad[i]) == seqlens[i]):
print(y_pred)
print(len(y_pred))
print(y)
print(len(y))
print(f'{len(y_true_noPad[i])} == {seqlens[i]} and {len(y_pred_noPad[i])} == {seqlens[i]}')
print(f'{y_true_noPad[i]} with length: {seqlens[i]}')
print(f'{y_pred_noPad[i]} with length: {seqlens[i]}')
assert(len(y_true_noPad[i]) == seqlens[i] and len(y_pred_noPad[i]) == seqlens[i])
return y_true_noPad, y_pred_noPad
def epoch_time(start_time, end_time):
elapsed_time = end_time - start_time
elapsed_mins = int(elapsed_time / 60)
elapsed_secs = int(elapsed_time - (elapsed_mins * 60))
return elapsed_mins, elapsed_secs
class EarlyStopping:
def __init__(self, patience):
self.patience = patience
self.q = Queue(maxsize = self.patience)
self.max_acc = -1
self.counter = 0
def should_stop(self, accuracy):
if accuracy > self.max_acc:
self.q.queue.clear()
self.max_acc = accuracy
self.counter = 0
else:
self.q.put(accuracy)
self.counter += 1
if self.q.full():
return True | true | true |
f7ff2ecf83ed29ff957f9409894c59ba5e464f59 | 367 | py | Python | src/gym_cpr_grid/setup.py | karthiks1701/cpr-appropriation-1 | 2c7d4be27b5e5aa09bb778fadf1d2ddf4a9d80fd | [
"MIT"
] | 1 | 2022-03-05T13:34:21.000Z | 2022-03-05T13:34:21.000Z | src/gym_cpr_grid/setup.py | karthiks1701/cpr-appropriation-1 | 2c7d4be27b5e5aa09bb778fadf1d2ddf4a9d80fd | [
"MIT"
] | null | null | null | src/gym_cpr_grid/setup.py | karthiks1701/cpr-appropriation-1 | 2c7d4be27b5e5aa09bb778fadf1d2ddf4a9d80fd | [
"MIT"
] | 1 | 2022-03-04T11:54:17.000Z | 2022-03-04T11:54:17.000Z | from setuptools import setup
setup(
name="gym_cpr_grid",
version="0.0.1",
description="CPR appropriation grid world compatible with OpenAI Gym",
url="http://github.com/Wadaboa/cpr-appropriation",
author="Alessio Falai",
author_email="falai.alessio@gmail.com",
license="MIT",
install_requires=["gym"],
packages=["gym_cpr_grid"],
)
| 26.214286 | 74 | 0.689373 | from setuptools import setup
setup(
name="gym_cpr_grid",
version="0.0.1",
description="CPR appropriation grid world compatible with OpenAI Gym",
url="http://github.com/Wadaboa/cpr-appropriation",
author="Alessio Falai",
author_email="falai.alessio@gmail.com",
license="MIT",
install_requires=["gym"],
packages=["gym_cpr_grid"],
)
| true | true |
f7ff2f594e64999c5ca7288ea1b09a0061b030bc | 23,429 | py | Python | Task 12-14 - Fill births and deaths/Wikipedia/update_births_deaths.py | maurusian/DarijaBot | e2e70378dd5e6645a97359b7495fc2bba6ab185d | [
"MIT"
] | null | null | null | Task 12-14 - Fill births and deaths/Wikipedia/update_births_deaths.py | maurusian/DarijaBot | e2e70378dd5e6645a97359b7495fc2bba6ab185d | [
"MIT"
] | null | null | null | Task 12-14 - Fill births and deaths/Wikipedia/update_births_deaths.py | maurusian/DarijaBot | e2e70378dd5e6645a97359b7495fc2bba6ab185d | [
"MIT"
] | null | null | null | from openpyxl import Workbook, load_workbook
import re
import pywikibot
from pgvbotLib import *
from urllib.request import urlopen, quote, Request
from urllib.error import URLError
import json, sys, os
#import SPARQLWrapper
import requests
date_pattern = r'[-]{0,1}[0-9]+-[0-9]+-[0-9]+'
#print(re.match(date_pattern,'t2391385487'))
filename = './data/query.sparql'
export = './data/dict_list.json'
BIRTH_PAGE_PART = "قالب:ناس تزادو ف"
DEATH_PAGE_PART = "قالب:ناس توفاو ف"
BOT_NOTICE = "<noinclude>{{پاج كيعمرها بوت}}</noinclude>"
DARIJABOT_CAT = "<noinclude>[[تصنيف:قوالب زادهوم داريجابوت]]</noinclude>"
SAVE_MESSAGE = "لپاج تعمّرات ب معلومات من ويكيداطا"
BC = "ق.م."
NAME_SEPARATOR = " {{•}} "
TIMEQUERY = """
SELECT ?time ?timeprecision
WHERE
{ SERVICE wikibase:label { bd:serviceParam wikibase:language "en". }
{ wd:{1} p:{2}/psv:{2} ?timenode. }
?timenode wikibase:timeValue ?time.
?timenode wikibase:timePrecision ?timeprecision.
}
"""
#added to corresponding person page and potentially created
BIRTH_YEAR_CAT_PATTERN = "تصنيف:زيادة {year}{BC}"
DEATH_YEAR_CAT_PATTERN = "تصنيف:وفيات {year}{BC}"
#added to corresponding year cat page and potentially created
MAIN_YEAR_CAT_PATTERN = "تصنيف:{year}{BC}"
#added to all main year cat pages
GENERAL_YEAR_CAT = "تصنيف:لعوام"
#added to all year cat pages of the same type
BIRTHS_BY_YEAR_CAT = "تصنيف:زيادات علا حساب لعام"
DEATHS_BY_YEAR_CAT = "تصنيف:وفيات علا حساب لعام"
#added to corresponding year cat pages of the same type and potentially created (should be calculated)
BIRTH_DECADE_CAT_PATTERN = "تصنيف:زيادة ف عوام {decade}{BC}"
DEATH_DECADE_CAT_PATTERN = "تصنيف:وفيات ف عوام {decade}{BC}"
#added to corresponding decade cat page
MAIN_DECADE_CAT_PATTERN = "تصنيف:عوام {decade}{BC}"
#added to all main decade cat pages
GENERAL_DECADE_CAT = "تصنيف:لعقود"
#added to all decade cat pages of the same type
BIRTHS_BY_DECADE_CAT = "تصنيف:زيادات علا حساب لعقد"
DEATHS_BY_DECADE_CAT = "تصنيف:وفيات علا حساب لعقد"
#added to corresponding decade pages of the same type and potentially created (should be calculated)
BIRTH_CENT_CAT_PATTERN = "تصنيف:زيادة ف لقرن {century}{BC}"
DEATH_CENT_CAT_PATTERN = "تصنيف:وفيات ف لقرن {century}{BC}"
#added to corresponding century cat page
MAIN_CENT_CAT_PATTERN = "تصنيف:لقرن {century}{BC}"
#added to all century cat pages of the same type
BIRTHS_BY_CENT_CAT = "تصنيف:زيادات علا حساب لقرن"
DEATHS_BY_CENT_CAT = "تصنيف:وفيات علا حساب لقرن"
#added to all main century cat pages
GENERAL_CENT_CAT = "تصنيف:لقرون"
#added to corresponding century pages of the same type and potentially created (should be calculated)
BIRTH_MILN_CAT_PATTERN = "تصنيف:زيادات ف لألفية {millennium}{BC}"
DEATH_MILN_CAT_PATTERN = "تصنيف:وفيات ف لألفية {millennium}{BC}"
MAIN_MILN_CAT_PATTERN = "تصنيف:لألفية {millennium}{BC}"
CAT_ADDED_MESSAGE = "تصنيف تزاد"
CAT_PAGE_CREATED_MSG = "پاج د تّصنيف تقادات"
CAT_FIXED_MESSAGE = "تّصنيف تّصلح"
DARIJABOT_CAT_CATEGORY_PAGE = "[[تصنيف:تصنيفات زادهوم داريجابوت]]"
BC = " قبل لميلاد"
CENTURY_NUM_NAMES = {1:'لول'
,2:'تاني'
,3:'تالت'
,4:'رابع'
,5:'لخامس'
,6:'سات'
,7:'سابع'
,8:'تامن'
,9:'تاسع'
,10:'لعاشر'
,11:'لحاضش'
,12:'طناش'
,13:'تلطاش'
,14:'ربعطاش'
,15:'خمسطاش'
,16:'سطاش'
,17:'سبعطاش'
,18:'تمنطاش'
,19:'تسعطاش'
,20:'لعشرين'
,21:'لواحد ؤ عشرين'
,22:'تنين ؤ عشرين'
,23:'تلاتة ؤ عشرين'
,24:'ربعة عشرين'
,25:'خمسة ؤ عشرين'
,26:'ستة ؤ عشرين'
,27:'سبعة ؤ عشرين'
,28:'تمنية ؤ عشرين'
,29:'تسعود ؤ عشرين'
,30:'تلاتين'
}
MILLENIUM_NUM_NAMES = {1:'لولة'
,2:'تانية'
,3:'تالتة'
,4:'رابعة'}
def BC_value(year):
if year < 0:
return BC
else:
return ""
def get_decade_value(year):
return year - year%10
def get_century_value(year):
century_num = year//100
if year%100 != 0:
century_num += 1
return CENTURY_NUM_NAMES[century_num]
def get_millennium_value(year):
millennium_num = year//1000
if year%1000 != 0:
millennium_num += 1
return MILLENIUM_NUM_NAMES[millennium_num]
def get_precision(objectCode,date_type,date):
#print(objectCode)
#print(date_type)
#print(date)
query = TIMEQUERY.replace('{1}',objectCode).replace('{2}',date_type)
#print(query)
url = "https://darijabot@query.wikidata.org/sparql?query=%s&format=json" % quote(query)
#headers are necessary, without user-agent the Wikidata server refuses to connect, and without the charset ensues a Unicode error
headers = {
'User-Agent': 'DarijaBot/0.1 (Edition Windows 10 Home, Version 20H2, OS build 19042.1165, Windows Feature Experience Pack 120.2212.3530.0) Python3.9.0',
'Content-Type': 'text/text; charset=utf-8'
}
response = requests.get(url, headers=headers)
res = response.json()
if response is not None:
#res = json.loads(response)
res = response.json()
#print(res)
values = []
for i in range(len(res['results']['bindings'])):
if res['results']['bindings'][i]['time']['value'] == date:
values.append(int(res['results']['bindings'][i]['timeprecision']['value']))
if len(values)>0:
return max(values)
return 0
def simplify_json(jason):
"""
Converts json response from Wikidata server into a simpler dictionary list,
that only has the required values.
"""
dict_list = []
for i in range(len(jason['results']['bindings'])):
#print(i)
#print(jason['results']['bindings'][i]['personLabel']['value'])
dict_list.append({})
dict_list[i]['personLabel'] = jason['results']['bindings'][i]['personLabel']['value']
try:
dict_list[i]['dateOfBirth'] = jason['results']['bindings'][i]['dateOfBirth']['value']
except KeyError:
#print('Date of Birth not available for '+jason['results']['bindings'][i]['personLabel']['value'])
#print(sys.exc_info())
pass
if 'dateOfBirth' in dict_list[i].keys():
objectCode = jason['results']['bindings'][i]['person']['value'].split('/')[-1]
date_type = 'P569'
date = dict_list[i]['dateOfBirth']
try:
dict_list[i]['birthPrecision'] = get_precision(objectCode,date_type,date)
except:
dict_list[i]['birthPrecision'] = 0
try:
if 'dateOfDeath' in jason['results']['bindings'][i].keys():
dict_list[i]['dateOfDeath'] = jason['results']['bindings'][i]['dateOfDeath']['value']
except KeyError:
#print('Date of Death not available for '+jason['results']['bindings'][i]['personLabel']['value'])
#print(sys.exc_info())
pass
if 'dateOfDeath' in dict_list[i].keys():
objectCode = jason['results']['bindings'][i]['person']['value'].split('/')[-1]
date_type = 'P570'
date = dict_list[i]['dateOfDeath']
try:
dict_list[i]['deathPrecision'] = get_precision(objectCode,date_type,date)
except:
dict_list[i]['deathPrecision'] = 0
return dict_list
def wikidata_rest_query(filename):
with open(filename,'r',encoding='utf8') as f:
query = f.read()
#headers are necessary, without user-agent the Wikidata server refuses to connect, and without the charset ensues a Unicode error
headers = {
'User-Agent': 'DarijaBot/0.1 (Edition Windows 10 Home, Version 20H2, OS build 19042.1165, Windows Feature Experience Pack 120.2212.3530.0) Python3.9.0',
'Content-Type': 'text/text; charset=utf-8'
}
url = "https://query.wikidata.org/sparql?query=%s&format=json" % quote(query)
response = requests.get(url, headers=headers)
return response.json()
def get_dict_by_new_key(key_index,value_index,raw_dict,min_prec):
"""
Transforms raw_dict into a new dictionary with one of
the elements of the value in raw_dict as the new key.
The old key and the rest of the value elements form
a list of tuples that are the value of new_dict.
Input:
- key_index
- value_index
- raw_dict
- min_prec
"""
new_dict = {}
for key,value in raw_dict.items():
if len(value)== 1:
#make sure the precision is at least equal to min_prec, for daymonth values if it is different from 1 January, the precision doesn't matter
if (key_index == 0 and value[0][2] >= min_prec) or (key_index == 1 and (value[0][key_index]!='0101' or value[0][2] >= min_prec)):
if value[0][key_index] not in new_dict.keys():
new_dict[value[0][key_index]] = []
new_dict[value[0][key_index]].append((key,value[0][value_index]))
elif len(value)>1:
for v in value:
#make sure the precision is at least equal to min_prec, for daymonth values if it is different from 1 January, the precision doesn't matter
if (key_index == 0 and v[2] >= min_prec) or (key_index == 1 and (v[key_index]!='0101' or v[2] >= min_prec)):
if v[key_index] not in new_dict.keys():
new_dict[v[key_index]] = []
new_dict[v[key_index]].append((key,v[value_index]))
return new_dict
def get_daymonth(key):
"""
"""
day_number = key[2:]
if day_number[0] == '0':
day_number = day_number[-1]
month_number = int(key[:2])
month = MONTHS[month_number-1]['ary_name']
return day_number+' '+month
def save_dict_list(dict_list):
with open(export,'w',encoding='utf-8') as f:
f.write(str(dict_list))
def load_dict_list():
with open(export,'r',encoding='utf-8') as f:
dict_list = eval(f.read())
return dict_list
def create_add_all_categories(site,_type,year,title):
abs_year = abs(year)
#print('Year: '+str(abs(year)))
#print('BC: '+BC_value(year))
MAIN_YEAR_CAT = MAIN_YEAR_CAT_PATTERN.replace('{year}',str(abs_year)).replace('{BC}',BC_value(year))
MAIN_DECADE_CAT = MAIN_DECADE_CAT_PATTERN.replace('{decade}',str(get_decade_value(abs_year))).replace('{BC}',BC_value(year))
MAIN_CENT_CAT = MAIN_CENT_CAT_PATTERN.replace('{century}',str(get_century_value(abs_year))).replace('{BC}',BC_value(year))
MAIN_MILN_CAT = MAIN_MILN_CAT_PATTERN.replace('{millennium}',str(get_millennium_value(abs_year))).replace('{BC}',BC_value(year))
if _type == 'b':
YEAR_CAT = BIRTH_YEAR_CAT_PATTERN.replace('{year}',str(abs_year)).replace('{BC}',BC_value(year))
DECADE_CAT = BIRTH_DECADE_CAT_PATTERN.replace('{decade}',str(get_decade_value(abs_year))).replace('{BC}',BC_value(year))
CENT_CAT = BIRTH_CENT_CAT_PATTERN.replace('{century}',str(get_century_value(abs_year))).replace('{BC}',BC_value(year))
MILN_CAT = BIRTH_MILN_CAT_PATTERN.replace('{millennium}',str(get_millennium_value(abs_year))).replace('{BC}',BC_value(year))
BY_YEAR_CAT = BIRTHS_BY_YEAR_CAT
BY_DECADE_CAT = BIRTHS_BY_DECADE_CAT
BY_CENT_CAT = BIRTHS_BY_CENT_CAT
elif _type == 'd':
YEAR_CAT = DEATH_YEAR_CAT_PATTERN.replace('{year}',str(abs_year)).replace('{BC}',BC_value(year))
DECADE_CAT = DEATH_DECADE_CAT_PATTERN.replace('{decade}',str(get_decade_value(abs_year))).replace('{BC}',BC_value(year))
CENT_CAT = DEATH_CENT_CAT_PATTERN.replace('{century}',str(get_century_value(abs_year))).replace('{BC}',BC_value(year))
MILN_CAT = DEATH_MILN_CAT_PATTERN.replace('{millennium}',str(get_millennium_value(abs_year))).replace('{BC}',BC_value(year))
BY_YEAR_CAT = DEATHS_BY_YEAR_CAT
BY_DECADE_CAT = DEATHS_BY_DECADE_CAT
BY_CENT_CAT = DEATHS_BY_CENT_CAT
else:
print("Unknown type value in function create_add_all_categories")
return None
#update person page
page = pywikibot.Page(site,title)
if page.text != '':
if '[['+YEAR_CAT+']]' not in page.text:
page.text+='\n[['+YEAR_CAT+']]'
save_page(page,CAT_ADDED_MESSAGE)
else:
print('Page '+title+' not found!') #replace with log line
#create or update birth/death year category page
page = pywikibot.Page(site,YEAR_CAT)
if page.text == '':
page.text = '[['+BY_YEAR_CAT+']]\n'+'[['+DECADE_CAT+']]\n'+'[['+MAIN_YEAR_CAT+']]\n'+DARIJABOT_CAT_CATEGORY_PAGE
save_page(page,CAT_PAGE_CREATED_MSG)
else:
temp = page.text
if BY_YEAR_CAT not in page.text:
page.text += '\n[['+BY_YEAR_CAT+']]'
if DECADE_CAT not in page.text:
page.text += '\n[['+DECADE_CAT+']]'
if MAIN_YEAR_CAT not in page.text:
page.text += '\n[['+MAIN_YEAR_CAT+']]'
if temp != page.text:
save_page(page,CAT_ADDED_MESSAGE)
#create or update main year category page
page = pywikibot.Page(site,MAIN_YEAR_CAT)
if page.text == '':
page.text = '[['+GENERAL_YEAR_CAT+']]\n'+'[['+MAIN_DECADE_CAT+']]\n'+DARIJABOT_CAT_CATEGORY_PAGE
save_page(page,CAT_PAGE_CREATED_MSG)
#create or update birth/death decade category page
page = pywikibot.Page(site,DECADE_CAT)
if page.text == '':
page.text = '[['+BY_DECADE_CAT+']]\n'+'[['+CENT_CAT+']]\n'+'[['+MAIN_DECADE_CAT+']]\n'+DARIJABOT_CAT_CATEGORY_PAGE
save_page(page,CAT_PAGE_CREATED_MSG)
#create or update main decade category page
page = pywikibot.Page(site,MAIN_DECADE_CAT)
if page.text == '':
page.text = '[['+GENERAL_DECADE_CAT+']]\n'+'[['+MAIN_CENT_CAT+']]\n'+DARIJABOT_CAT_CATEGORY_PAGE
save_page(page,CAT_PAGE_CREATED_MSG)
#create or update birth/death century category page
page = pywikibot.Page(site,CENT_CAT)
if page.text == '':
page.text = '[['+BY_CENT_CAT+']]\n'+'[['+MILN_CAT+']]\n'+'[['+MAIN_CENT_CAT+']]\n'+DARIJABOT_CAT_CATEGORY_PAGE
save_page(page,CAT_PAGE_CREATED_MSG)
#create or update main century category page
page = pywikibot.Page(site,MAIN_CENT_CAT)
if page.text == '':
page.text = '[['+GENERAL_CENT_CAT+']]\n'+'[['+MAIN_MILN_CAT+']]\n'+DARIJABOT_CAT_CATEGORY_PAGE
save_page(page,CAT_PAGE_CREATED_MSG)
#load data from Wikidata
print("Loading data from Wikidata")
if os.path.exists(export):
dict_list = load_dict_list()
else:
dict_list = simplify_json(wikidata_rest_query(filename))
save_dict_list(dict_list)
print("Data loaded")
dict_by_person_birth = {}
dict_by_person_death = {}
for i in range(len(dict_list)):
if ('dateOfBirth' in dict_list[i].keys() and dict_list[i]['dateOfBirth'] != ''):
if dict_list[i]['personLabel'] not in dict_by_person_birth.keys():
dict_by_person_birth[dict_list[i]['personLabel']] = []
fulldob = dict_list[i]['dateOfBirth'].split('T')[0]
#print(dict_list[i]['dateOfBirth'])
#print(fulldob)
if re.match(date_pattern,fulldob):
if 'birthPrecision' in dict_list[i].keys():
print("adding birth date precision")
prec = dict_list[i]['birthPrecision']
else:
prec = 0
if fulldob[0] == '-':
year = 0-int(fulldob.split('-')[1])
#print((year,fulldob[-5:].replace('-','')))
else:
year = int(fulldob.split('-')[0])
tupl = (year,fulldob[-5:].replace('-',''),prec)
if tupl not in dict_by_person_birth[dict_list[i]['personLabel']]:
dict_by_person_birth[dict_list[i]['personLabel']].append(tupl)
if ('dateOfDeath' in dict_list[i].keys() and dict_list[i]['dateOfDeath'] != ''):
if dict_list[i]['personLabel'] not in dict_by_person_death.keys():
dict_by_person_death[dict_list[i]['personLabel']] = []
fulldod = dict_list[i]['dateOfDeath'].split('T')[0]
if re.match(date_pattern,fulldod):
if 'deathPrecision' in dict_list[i].keys():
print("adding death date precision")
prec = dict_list[i]['deathPrecision']
else:
prec = 0
if fulldod[0] == '-':
year = 0-int(fulldod.split('-')[1])
#print((year,fulldod[-5:].replace('-','')))
else:
year = int(fulldod.split('-')[0])
tupl = (year,fulldod[-5:].replace('-',''),prec)
if tupl not in dict_by_person_death[dict_list[i]['personLabel']]:
dict_by_person_death[dict_list[i]['personLabel']].append(tupl)
#print(dict_by_person_birth['لويس أنطوان دو بوݣانڤيل'])
dict_by_day_birth = get_dict_by_new_key(1,0,dict_by_person_birth,11)
#print(dict_by_day_birth)
for key, value in dict_by_day_birth.items():
dict_by_day_birth[key] = sorted(value,key=lambda x:x[1])
dict_by_day_death = get_dict_by_new_key(1,0,dict_by_person_death,11)
#print(dict_by_day_death)
for key, value in dict_by_day_death.items():
dict_by_day_death[key] = sorted(value,key=lambda x:x[1])
dict_by_year_birth = get_dict_by_new_key(0,1,dict_by_person_birth,9)
#print(dict_by_year_birth)
for key, value in dict_by_year_birth.items():
dict_by_year_birth[key] = sorted(value,key=lambda x:x[1])
print(dict_by_year_birth)
dict_by_year_death = get_dict_by_new_key(0,1,dict_by_person_death,9)
#print(dict_by_year_death)
for key, value in dict_by_year_death.items():
dict_by_year_death[key] = sorted(value,key=lambda x:x[1])
site = pywikibot.Site()
current_year = None
for key, value in dict_by_day_birth.items():
if len(key) == 4:
#print(key)
daymonth = get_daymonth(key)
title = BIRTH_PAGE_PART+' '+daymonth
page = pywikibot.Page(site,title)
temp = page.text #temporary variable to compare
text = BOT_NOTICE+'\n\n'
name_list = []
current_year = value[0][1]
for v in value:
if current_year != v[1]:
#print(current_year)
text+= '\n* '
if current_year < 0:
text+="'''"+str(0-current_year)+" "+BC+":''' "
print(str(0-current_year))
else:
text+="'''"+str(current_year)+":''' "
#print('namelist: '+str(name_list))
if len(name_list)>0:
text+=NAME_SEPARATOR.join(["[["+name+"]]" for name in name_list])
name_list = []
current_year = v[1]
name_list.append(v[0])
#text+= '* [['+v[0]+']]\n'
text+= '\n* '
if current_year < 0:
text+="'''"+str(0-current_year)+" "+BC+":''' "
print(str(0-current_year))
else:
text+="'''"+str(current_year)+":''' "
if len(name_list)>0:
text+=NAME_SEPARATOR.join(["[["+name+"]]" for name in name_list])
if temp != text:
#text+='\n'+DARIJABOT_CAT
page.text = text
#print(text)
save_page(page,SAVE_MESSAGE)
else:
print("Invalid key: "+key+" for record "+str(value))
for key, value in dict_by_day_death.items():
if len(key) == 4:
#print(key)
daymonth = get_daymonth(key)
title = DEATH_PAGE_PART+' '+daymonth
page = pywikibot.Page(site,title)
temp = page.text #temporary variable to compare
text = BOT_NOTICE+'\n\n'
name_list = []
current_year = value[0][1]
for v in value:
if current_year != v[1]:
#print(current_year)
text+= '\n* '
if current_year < 0:
text+="'''"+str(0-current_year)+" "+BC+":''' "
print(str(0-current_year))
else:
text+="'''"+str(current_year)+":''' "
#print('namelist: '+str(name_list))
if len(name_list)>0:
text+=NAME_SEPARATOR.join(["[["+name+"]]" for name in name_list])
name_list = []
current_year = v[1]
name_list.append(v[0])
#text+= '* [['+v[0]+']]\n'
text+= '\n* '
if current_year < 0:
text+="'''"+str(0-current_year)+" "+BC+":''' "
print(str(0-current_year))
else:
text+="'''"+str(current_year)+":''' "
if len(name_list)>0:
text+=NAME_SEPARATOR.join(["[["+name+"]]" for name in name_list])
if temp != text:
#text+='\n'+DARIJABOT_CAT
page.text = text
#print(text)
save_page(page,SAVE_MESSAGE)
else:
print("Invalid key: "+key+" for record "+str(value))
for key, value in dict_by_year_birth.items():
year = key
for v in value:
name = v[0]
create_add_all_categories(site=site,_type='b',year=year,title=name)
for key, value in dict_by_year_death.items():
year = key
for v in value:
name = v[0]
create_add_all_categories(site=site,_type='d',year=year,title=name)
"""
for i in range(-600,2022):
abs_year = abs(i)
year = i
title = DEATH_YEAR_CAT_PATTERN.replace('{year}',str(abs_year)).replace('{BC}',BC_value(year))
page = pywikibot.Page(site,title)
temp = page.text
if page.text != '':
page.text = page.text.replace(BIRTHS_BY_YEAR_CAT,DEATHS_BY_YEAR_CAT)
if temp != page.text:
save_page(page,CAT_FIXED_MESSAGE)
for i in range(-600,2022,10):
abs_decade = abs(i)
year = i
title = DEATH_DECADE_CAT_PATTERN.replace('{decade}',str(abs_decade)).replace('{BC}',BC_value(year))
page = pywikibot.Page(site,title)
temp = page.text
if page.text != '':
page.text = page.text.replace(BIRTHS_BY_DECADE_CAT,DEATHS_BY_DECADE_CAT)
if temp != page.text:
save_page(page,CAT_FIXED_MESSAGE)
for century in CENTURY_NUM_NAMES.values():
title = DEATH_CENT_CAT_PATTERN.replace('{century}',century).replace('{BC}',"")
page = pywikibot.Page(site,title)
temp = page.text
if page.text != '':
page.text = page.text.replace(BIRTHS_BY_CENT_CAT,DEATHS_BY_CENT_CAT)
if temp != page.text:
save_page(page,CAT_FIXED_MESSAGE)
title = DEATH_CENT_CAT_PATTERN.replace('{century}',century).replace('{BC}',BC)
page = pywikibot.Page(site,title)
temp = page.text
if page.text != '':
page.text = page.text.replace(BIRTHS_BY_CENT_CAT,DEATHS_BY_CENT_CAT)
if temp != page.text:
save_page(page,CAT_FIXED_MESSAGE)
"""
| 36.550702 | 160 | 0.597934 | from openpyxl import Workbook, load_workbook
import re
import pywikibot
from pgvbotLib import *
from urllib.request import urlopen, quote, Request
from urllib.error import URLError
import json, sys, os
import requests
date_pattern = r'[-]{0,1}[0-9]+-[0-9]+-[0-9]+'
filename = './data/query.sparql'
export = './data/dict_list.json'
BIRTH_PAGE_PART = "قالب:ناس تزادو ف"
DEATH_PAGE_PART = "قالب:ناس توفاو ف"
BOT_NOTICE = "<noinclude>{{پاج كيعمرها بوت}}</noinclude>"
DARIJABOT_CAT = "<noinclude>[[تصنيف:قوالب زادهوم داريجابوت]]</noinclude>"
SAVE_MESSAGE = "لپاج تعمّرات ب معلومات من ويكيداطا"
BC = "ق.م."
NAME_SEPARATOR = " {{•}} "
TIMEQUERY = """
SELECT ?time ?timeprecision
WHERE
{ SERVICE wikibase:label { bd:serviceParam wikibase:language "en". }
{ wd:{1} p:{2}/psv:{2} ?timenode. }
?timenode wikibase:timeValue ?time.
?timenode wikibase:timePrecision ?timeprecision.
}
"""
BIRTH_YEAR_CAT_PATTERN = "تصنيف:زيادة {year}{BC}"
DEATH_YEAR_CAT_PATTERN = "تصنيف:وفيات {year}{BC}"
MAIN_YEAR_CAT_PATTERN = "تصنيف:{year}{BC}"
GENERAL_YEAR_CAT = "تصنيف:لعوام"
BIRTHS_BY_YEAR_CAT = "تصنيف:زيادات علا حساب لعام"
DEATHS_BY_YEAR_CAT = "تصنيف:وفيات علا حساب لعام"
BIRTH_DECADE_CAT_PATTERN = "تصنيف:زيادة ف عوام {decade}{BC}"
DEATH_DECADE_CAT_PATTERN = "تصنيف:وفيات ف عوام {decade}{BC}"
MAIN_DECADE_CAT_PATTERN = "تصنيف:عوام {decade}{BC}"
GENERAL_DECADE_CAT = "تصنيف:لعقود"
BIRTHS_BY_DECADE_CAT = "تصنيف:زيادات علا حساب لعقد"
DEATHS_BY_DECADE_CAT = "تصنيف:وفيات علا حساب لعقد"
BIRTH_CENT_CAT_PATTERN = "تصنيف:زيادة ف لقرن {century}{BC}"
DEATH_CENT_CAT_PATTERN = "تصنيف:وفيات ف لقرن {century}{BC}"
MAIN_CENT_CAT_PATTERN = "تصنيف:لقرن {century}{BC}"
BIRTHS_BY_CENT_CAT = "تصنيف:زيادات علا حساب لقرن"
DEATHS_BY_CENT_CAT = "تصنيف:وفيات علا حساب لقرن"
GENERAL_CENT_CAT = "تصنيف:لقرون"
BIRTH_MILN_CAT_PATTERN = "تصنيف:زيادات ف لألفية {millennium}{BC}"
DEATH_MILN_CAT_PATTERN = "تصنيف:وفيات ف لألفية {millennium}{BC}"
MAIN_MILN_CAT_PATTERN = "تصنيف:لألفية {millennium}{BC}"
CAT_ADDED_MESSAGE = "تصنيف تزاد"
CAT_PAGE_CREATED_MSG = "پاج د تّصنيف تقادات"
CAT_FIXED_MESSAGE = "تّصنيف تّصلح"
DARIJABOT_CAT_CATEGORY_PAGE = "[[تصنيف:تصنيفات زادهوم داريجابوت]]"
BC = " قبل لميلاد"
CENTURY_NUM_NAMES = {1:'لول'
,2:'تاني'
,3:'تالت'
,4:'رابع'
,5:'لخامس'
,6:'سات'
,7:'سابع'
,8:'تامن'
,9:'تاسع'
,10:'لعاشر'
,11:'لحاضش'
,12:'طناش'
,13:'تلطاش'
,14:'ربعطاش'
,15:'خمسطاش'
,16:'سطاش'
,17:'سبعطاش'
,18:'تمنطاش'
,19:'تسعطاش'
,20:'لعشرين'
,21:'لواحد ؤ عشرين'
,22:'تنين ؤ عشرين'
,23:'تلاتة ؤ عشرين'
,24:'ربعة عشرين'
,25:'خمسة ؤ عشرين'
,26:'ستة ؤ عشرين'
,27:'سبعة ؤ عشرين'
,28:'تمنية ؤ عشرين'
,29:'تسعود ؤ عشرين'
,30:'تلاتين'
}
MILLENIUM_NUM_NAMES = {1:'لولة'
,2:'تانية'
,3:'تالتة'
,4:'رابعة'}
def BC_value(year):
if year < 0:
return BC
else:
return ""
def get_decade_value(year):
return year - year%10
def get_century_value(year):
century_num = year//100
if year%100 != 0:
century_num += 1
return CENTURY_NUM_NAMES[century_num]
def get_millennium_value(year):
millennium_num = year//1000
if year%1000 != 0:
millennium_num += 1
return MILLENIUM_NUM_NAMES[millennium_num]
def get_precision(objectCode,date_type,date):
query = TIMEQUERY.replace('{1}',objectCode).replace('{2}',date_type)
url = "https://darijabot@query.wikidata.org/sparql?query=%s&format=json" % quote(query)
headers = {
'User-Agent': 'DarijaBot/0.1 (Edition Windows 10 Home, Version 20H2, OS build 19042.1165, Windows Feature Experience Pack 120.2212.3530.0) Python3.9.0',
'Content-Type': 'text/text; charset=utf-8'
}
response = requests.get(url, headers=headers)
res = response.json()
if response is not None:
res = response.json()
values = []
for i in range(len(res['results']['bindings'])):
if res['results']['bindings'][i]['time']['value'] == date:
values.append(int(res['results']['bindings'][i]['timeprecision']['value']))
if len(values)>0:
return max(values)
return 0
def simplify_json(jason):
dict_list = []
for i in range(len(jason['results']['bindings'])):
dict_list.append({})
dict_list[i]['personLabel'] = jason['results']['bindings'][i]['personLabel']['value']
try:
dict_list[i]['dateOfBirth'] = jason['results']['bindings'][i]['dateOfBirth']['value']
except KeyError:
pass
if 'dateOfBirth' in dict_list[i].keys():
objectCode = jason['results']['bindings'][i]['person']['value'].split('/')[-1]
date_type = 'P569'
date = dict_list[i]['dateOfBirth']
try:
dict_list[i]['birthPrecision'] = get_precision(objectCode,date_type,date)
except:
dict_list[i]['birthPrecision'] = 0
try:
if 'dateOfDeath' in jason['results']['bindings'][i].keys():
dict_list[i]['dateOfDeath'] = jason['results']['bindings'][i]['dateOfDeath']['value']
except KeyError:
pass
if 'dateOfDeath' in dict_list[i].keys():
objectCode = jason['results']['bindings'][i]['person']['value'].split('/')[-1]
date_type = 'P570'
date = dict_list[i]['dateOfDeath']
try:
dict_list[i]['deathPrecision'] = get_precision(objectCode,date_type,date)
except:
dict_list[i]['deathPrecision'] = 0
return dict_list
def wikidata_rest_query(filename):
with open(filename,'r',encoding='utf8') as f:
query = f.read()
headers = {
'User-Agent': 'DarijaBot/0.1 (Edition Windows 10 Home, Version 20H2, OS build 19042.1165, Windows Feature Experience Pack 120.2212.3530.0) Python3.9.0',
'Content-Type': 'text/text; charset=utf-8'
}
url = "https://query.wikidata.org/sparql?query=%s&format=json" % quote(query)
response = requests.get(url, headers=headers)
return response.json()
def get_dict_by_new_key(key_index,value_index,raw_dict,min_prec):
new_dict = {}
for key,value in raw_dict.items():
if len(value)== 1:
if (key_index == 0 and value[0][2] >= min_prec) or (key_index == 1 and (value[0][key_index]!='0101' or value[0][2] >= min_prec)):
if value[0][key_index] not in new_dict.keys():
new_dict[value[0][key_index]] = []
new_dict[value[0][key_index]].append((key,value[0][value_index]))
elif len(value)>1:
for v in value:
#make sure the precision is at least equal to min_prec, for daymonth values if it is different from 1 January, the precision doesn't matter
if (key_index == 0 and v[2] >= min_prec) or (key_index == 1 and (v[key_index]!='0101' or v[2] >= min_prec)):
if v[key_index] not in new_dict.keys():
new_dict[v[key_index]] = []
new_dict[v[key_index]].append((key,v[value_index]))
return new_dict
def get_daymonth(key):
day_number = key[2:]
if day_number[0] == '0':
day_number = day_number[-1]
month_number = int(key[:2])
month = MONTHS[month_number-1]['ary_name']
return day_number+' '+month
def save_dict_list(dict_list):
with open(export,'w',encoding='utf-8') as f:
f.write(str(dict_list))
def load_dict_list():
with open(export,'r',encoding='utf-8') as f:
dict_list = eval(f.read())
return dict_list
def create_add_all_categories(site,_type,year,title):
abs_year = abs(year)
MAIN_YEAR_CAT = MAIN_YEAR_CAT_PATTERN.replace('{year}',str(abs_year)).replace('{BC}',BC_value(year))
MAIN_DECADE_CAT = MAIN_DECADE_CAT_PATTERN.replace('{decade}',str(get_decade_value(abs_year))).replace('{BC}',BC_value(year))
MAIN_CENT_CAT = MAIN_CENT_CAT_PATTERN.replace('{century}',str(get_century_value(abs_year))).replace('{BC}',BC_value(year))
MAIN_MILN_CAT = MAIN_MILN_CAT_PATTERN.replace('{millennium}',str(get_millennium_value(abs_year))).replace('{BC}',BC_value(year))
if _type == 'b':
YEAR_CAT = BIRTH_YEAR_CAT_PATTERN.replace('{year}',str(abs_year)).replace('{BC}',BC_value(year))
DECADE_CAT = BIRTH_DECADE_CAT_PATTERN.replace('{decade}',str(get_decade_value(abs_year))).replace('{BC}',BC_value(year))
CENT_CAT = BIRTH_CENT_CAT_PATTERN.replace('{century}',str(get_century_value(abs_year))).replace('{BC}',BC_value(year))
MILN_CAT = BIRTH_MILN_CAT_PATTERN.replace('{millennium}',str(get_millennium_value(abs_year))).replace('{BC}',BC_value(year))
BY_YEAR_CAT = BIRTHS_BY_YEAR_CAT
BY_DECADE_CAT = BIRTHS_BY_DECADE_CAT
BY_CENT_CAT = BIRTHS_BY_CENT_CAT
elif _type == 'd':
YEAR_CAT = DEATH_YEAR_CAT_PATTERN.replace('{year}',str(abs_year)).replace('{BC}',BC_value(year))
DECADE_CAT = DEATH_DECADE_CAT_PATTERN.replace('{decade}',str(get_decade_value(abs_year))).replace('{BC}',BC_value(year))
CENT_CAT = DEATH_CENT_CAT_PATTERN.replace('{century}',str(get_century_value(abs_year))).replace('{BC}',BC_value(year))
MILN_CAT = DEATH_MILN_CAT_PATTERN.replace('{millennium}',str(get_millennium_value(abs_year))).replace('{BC}',BC_value(year))
BY_YEAR_CAT = DEATHS_BY_YEAR_CAT
BY_DECADE_CAT = DEATHS_BY_DECADE_CAT
BY_CENT_CAT = DEATHS_BY_CENT_CAT
else:
print("Unknown type value in function create_add_all_categories")
return None
page = pywikibot.Page(site,title)
if page.text != '':
if '[['+YEAR_CAT+']]' not in page.text:
page.text+='\n[['+YEAR_CAT+']]'
save_page(page,CAT_ADDED_MESSAGE)
else:
print('Page '+title+' not found!')
page = pywikibot.Page(site,YEAR_CAT)
if page.text == '':
page.text = '[['+BY_YEAR_CAT+']]\n'+'[['+DECADE_CAT+']]\n'+'[['+MAIN_YEAR_CAT+']]\n'+DARIJABOT_CAT_CATEGORY_PAGE
save_page(page,CAT_PAGE_CREATED_MSG)
else:
temp = page.text
if BY_YEAR_CAT not in page.text:
page.text += '\n[['+BY_YEAR_CAT+']]'
if DECADE_CAT not in page.text:
page.text += '\n[['+DECADE_CAT+']]'
if MAIN_YEAR_CAT not in page.text:
page.text += '\n[['+MAIN_YEAR_CAT+']]'
if temp != page.text:
save_page(page,CAT_ADDED_MESSAGE)
page = pywikibot.Page(site,MAIN_YEAR_CAT)
if page.text == '':
page.text = '[['+GENERAL_YEAR_CAT+']]\n'+'[['+MAIN_DECADE_CAT+']]\n'+DARIJABOT_CAT_CATEGORY_PAGE
save_page(page,CAT_PAGE_CREATED_MSG)
page = pywikibot.Page(site,DECADE_CAT)
if page.text == '':
page.text = '[['+BY_DECADE_CAT+']]\n'+'[['+CENT_CAT+']]\n'+'[['+MAIN_DECADE_CAT+']]\n'+DARIJABOT_CAT_CATEGORY_PAGE
save_page(page,CAT_PAGE_CREATED_MSG)
page = pywikibot.Page(site,MAIN_DECADE_CAT)
if page.text == '':
page.text = '[['+GENERAL_DECADE_CAT+']]\n'+'[['+MAIN_CENT_CAT+']]\n'+DARIJABOT_CAT_CATEGORY_PAGE
save_page(page,CAT_PAGE_CREATED_MSG)
page = pywikibot.Page(site,CENT_CAT)
if page.text == '':
page.text = '[['+BY_CENT_CAT+']]\n'+'[['+MILN_CAT+']]\n'+'[['+MAIN_CENT_CAT+']]\n'+DARIJABOT_CAT_CATEGORY_PAGE
save_page(page,CAT_PAGE_CREATED_MSG)
page = pywikibot.Page(site,MAIN_CENT_CAT)
if page.text == '':
page.text = '[['+GENERAL_CENT_CAT+']]\n'+'[['+MAIN_MILN_CAT+']]\n'+DARIJABOT_CAT_CATEGORY_PAGE
save_page(page,CAT_PAGE_CREATED_MSG)
print("Loading data from Wikidata")
if os.path.exists(export):
dict_list = load_dict_list()
else:
dict_list = simplify_json(wikidata_rest_query(filename))
save_dict_list(dict_list)
print("Data loaded")
dict_by_person_birth = {}
dict_by_person_death = {}
for i in range(len(dict_list)):
if ('dateOfBirth' in dict_list[i].keys() and dict_list[i]['dateOfBirth'] != ''):
if dict_list[i]['personLabel'] not in dict_by_person_birth.keys():
dict_by_person_birth[dict_list[i]['personLabel']] = []
fulldob = dict_list[i]['dateOfBirth'].split('T')[0]
if re.match(date_pattern,fulldob):
if 'birthPrecision' in dict_list[i].keys():
print("adding birth date precision")
prec = dict_list[i]['birthPrecision']
else:
prec = 0
if fulldob[0] == '-':
year = 0-int(fulldob.split('-')[1])
else:
year = int(fulldob.split('-')[0])
tupl = (year,fulldob[-5:].replace('-',''),prec)
if tupl not in dict_by_person_birth[dict_list[i]['personLabel']]:
dict_by_person_birth[dict_list[i]['personLabel']].append(tupl)
if ('dateOfDeath' in dict_list[i].keys() and dict_list[i]['dateOfDeath'] != ''):
if dict_list[i]['personLabel'] not in dict_by_person_death.keys():
dict_by_person_death[dict_list[i]['personLabel']] = []
fulldod = dict_list[i]['dateOfDeath'].split('T')[0]
if re.match(date_pattern,fulldod):
if 'deathPrecision' in dict_list[i].keys():
print("adding death date precision")
prec = dict_list[i]['deathPrecision']
else:
prec = 0
if fulldod[0] == '-':
year = 0-int(fulldod.split('-')[1])
else:
year = int(fulldod.split('-')[0])
tupl = (year,fulldod[-5:].replace('-',''),prec)
if tupl not in dict_by_person_death[dict_list[i]['personLabel']]:
dict_by_person_death[dict_list[i]['personLabel']].append(tupl)
dict_by_day_birth = get_dict_by_new_key(1,0,dict_by_person_birth,11)
for key, value in dict_by_day_birth.items():
dict_by_day_birth[key] = sorted(value,key=lambda x:x[1])
dict_by_day_death = get_dict_by_new_key(1,0,dict_by_person_death,11)
for key, value in dict_by_day_death.items():
dict_by_day_death[key] = sorted(value,key=lambda x:x[1])
dict_by_year_birth = get_dict_by_new_key(0,1,dict_by_person_birth,9)
for key, value in dict_by_year_birth.items():
dict_by_year_birth[key] = sorted(value,key=lambda x:x[1])
print(dict_by_year_birth)
dict_by_year_death = get_dict_by_new_key(0,1,dict_by_person_death,9)
for key, value in dict_by_year_death.items():
dict_by_year_death[key] = sorted(value,key=lambda x:x[1])
site = pywikibot.Site()
current_year = None
for key, value in dict_by_day_birth.items():
if len(key) == 4:
daymonth = get_daymonth(key)
title = BIRTH_PAGE_PART+' '+daymonth
page = pywikibot.Page(site,title)
temp = page.text
text = BOT_NOTICE+'\n\n'
name_list = []
current_year = value[0][1]
for v in value:
if current_year != v[1]:
text+= '\n* '
if current_year < 0:
text+="'''"+str(0-current_year)+" "+BC+":''' "
print(str(0-current_year))
else:
text+="'''"+str(current_year)+":''' "
if len(name_list)>0:
text+=NAME_SEPARATOR.join(["[["+name+"]]" for name in name_list])
name_list = []
current_year = v[1]
name_list.append(v[0])
text+= '\n* '
if current_year < 0:
text+="'''"+str(0-current_year)+" "+BC+":''' "
print(str(0-current_year))
else:
text+="'''"+str(current_year)+":''' "
if len(name_list)>0:
text+=NAME_SEPARATOR.join(["[["+name+"]]" for name in name_list])
if temp != text:
page.text = text
save_page(page,SAVE_MESSAGE)
else:
print("Invalid key: "+key+" for record "+str(value))
for key, value in dict_by_day_death.items():
if len(key) == 4:
daymonth = get_daymonth(key)
title = DEATH_PAGE_PART+' '+daymonth
page = pywikibot.Page(site,title)
temp = page.text
text = BOT_NOTICE+'\n\n'
name_list = []
current_year = value[0][1]
for v in value:
if current_year != v[1]:
text+= '\n* '
if current_year < 0:
text+="'''"+str(0-current_year)+" "+BC+":''' "
print(str(0-current_year))
else:
text+="'''"+str(current_year)+":''' "
if len(name_list)>0:
text+=NAME_SEPARATOR.join(["[["+name+"]]" for name in name_list])
name_list = []
current_year = v[1]
name_list.append(v[0])
text+= '\n* '
if current_year < 0:
text+="'''"+str(0-current_year)+" "+BC+":''' "
print(str(0-current_year))
else:
text+="'''"+str(current_year)+":''' "
if len(name_list)>0:
text+=NAME_SEPARATOR.join(["[["+name+"]]" for name in name_list])
if temp != text:
page.text = text
save_page(page,SAVE_MESSAGE)
else:
print("Invalid key: "+key+" for record "+str(value))
for key, value in dict_by_year_birth.items():
year = key
for v in value:
name = v[0]
create_add_all_categories(site=site,_type='b',year=year,title=name)
for key, value in dict_by_year_death.items():
year = key
for v in value:
name = v[0]
create_add_all_categories(site=site,_type='d',year=year,title=name)
| true | true |
f7ff2ff48a6488f28ebfeed1405cab6a6fc0502f | 1,551 | py | Python | update_judicial_data.py | ronaldshaooo/OpenDataJudicial | ee55b234276511b824a04d836607741f9189ebd8 | [
"MIT"
] | null | null | null | update_judicial_data.py | ronaldshaooo/OpenDataJudicial | ee55b234276511b824a04d836607741f9189ebd8 | [
"MIT"
] | null | null | null | update_judicial_data.py | ronaldshaooo/OpenDataJudicial | ee55b234276511b824a04d836607741f9189ebd8 | [
"MIT"
] | null | null | null | from bs4 import BeautifulSoup
import os
import patoolib
import requests
import shutil
judicial_history_directory = 'judicial_history'
fincial_judgement_directory = 'fincial_judgement'
def copy_fincial_judgement():
for root, _dirs, files in os.walk(judicial_history_directory):
print('check {}'.format(root))
for item in files:
if '金' in item:
shutil.copy(os.sep.join([root, item]), fincial_judgement_directory)
print('copy {}'.format(item))
def check_history(link):
file_dir = link.replace('rar/','').replace('.rar','')
if os.path.isdir('judicial_history/{}'.format(file_dir)):
print('{} exists'.format(file_dir))
else:
download_link = 'http://data.judicial.gov.tw/{}'.format(link)
print(download_link)
with open('file.rar', "wb") as file:
response = requests.get(download_link)
file.write(response.content)
patoolib.extract_archive('file.rar', outdir= '{}/{}'.format(judicial_history_directory, file_dir))
def refresh():
if not os.path.isdir(judicial_history_directory):
os.mkdir(judicial_history_directory)
if not os.path.isdir(fincial_judgement_directory):
os.mkdir(fincial_judgement_directory)
response = requests.get("http://data.judicial.gov.tw/")
soup = BeautifulSoup(response.text, "html.parser")
for item in soup.find_all('a', href=True):
link = item['href']
if 'Delete-Infor.csv' in link:
continue
check_history(link)
| 33 | 106 | 0.659574 | from bs4 import BeautifulSoup
import os
import patoolib
import requests
import shutil
judicial_history_directory = 'judicial_history'
fincial_judgement_directory = 'fincial_judgement'
def copy_fincial_judgement():
for root, _dirs, files in os.walk(judicial_history_directory):
print('check {}'.format(root))
for item in files:
if '金' in item:
shutil.copy(os.sep.join([root, item]), fincial_judgement_directory)
print('copy {}'.format(item))
def check_history(link):
file_dir = link.replace('rar/','').replace('.rar','')
if os.path.isdir('judicial_history/{}'.format(file_dir)):
print('{} exists'.format(file_dir))
else:
download_link = 'http://data.judicial.gov.tw/{}'.format(link)
print(download_link)
with open('file.rar', "wb") as file:
response = requests.get(download_link)
file.write(response.content)
patoolib.extract_archive('file.rar', outdir= '{}/{}'.format(judicial_history_directory, file_dir))
def refresh():
if not os.path.isdir(judicial_history_directory):
os.mkdir(judicial_history_directory)
if not os.path.isdir(fincial_judgement_directory):
os.mkdir(fincial_judgement_directory)
response = requests.get("http://data.judicial.gov.tw/")
soup = BeautifulSoup(response.text, "html.parser")
for item in soup.find_all('a', href=True):
link = item['href']
if 'Delete-Infor.csv' in link:
continue
check_history(link)
| true | true |
f7ff3017c4d0c54c1a82b058504ce4532e958323 | 26,132 | py | Python | mexdex/pvutils.py | parallelworks/welding | eb1fe04e9f1be1d374782f7476767dcf2197fe36 | [
"MIT"
] | null | null | null | mexdex/pvutils.py | parallelworks/welding | eb1fe04e9f1be1d374782f7476767dcf2197fe36 | [
"MIT"
] | null | null | null | mexdex/pvutils.py | parallelworks/welding | eb1fe04e9f1be1d374782f7476767dcf2197fe36 | [
"MIT"
] | null | null | null | from paraview.simple import *
import sys
import data_IO
import os
import subprocess
import shutil
# For saving plots as pngs
import matplotlib
import numpy as np
import warnings
def getParaviewVersion():
""" Return paraview version as a double number: e.g. 5.4"""
PVversionMajor = paraview.servermanager.vtkSMProxyManager.GetVersionMajor()
PVversionMinor = paraview.servermanager.vtkSMProxyManager.GetVersionMinor()
PVversion = PVversionMajor + PVversionMinor/100.0
return PVversion
def planeNormalFromName(planeName):
if planeName.lower() == "x":
normal = [1.0, 0.0, 0.0]
if planeName.lower() == "y":
normal = [0.0, 1.0, 0.0]
if planeName.lower() == "z":
normal = [0.0, 0.0, 1.0]
return normal
def setviewposition(position_key, camera):
center = position_key.split()
nPoints = len(center)/3
positionXYZ = []
for iPoint in range(nPoints):
positionXYZ.extend(list(camera.GetFocalPoint()))
for i in range(iPoint*3, 3+iPoint*3):
if center[i] != "center":
positionXYZ[i] = float(center[i])
return positionXYZ
def read_csv(f):
kpihash = {}
cols = [l.replace("\n", "") for l in f.readline().split(",")]
for i, line in enumerate(f):
data = [l.replace("\n", "") for l in line.split(",")]
kpihash[data[0]] = {}
for ii, v in enumerate(data):
if ii != 0:
kpihash[data[0]][cols[ii]] = v
return kpihash
def getfieldsfromkpihash(kpihash):
cellsarrays = []
for kpi in kpihash:
if 'field' in kpihash[kpi]:
cellsarrays.append(kpihash[kpi]['field'])
ca = set(cellsarrays)
cellsarrays = list(ca)
return cellsarrays
def isfldScalar(arrayInfo):
numComps = arrayInfo.GetNumberOfComponents()
if numComps == 1:
return True
else:
return False
def getfldComponentMap(arrayInfo):
compName2num = {}
numComps = arrayInfo.GetNumberOfComponents()
if numComps>1:
for iComp in range(-1,numComps):
compName2num[arrayInfo.GetComponentName(iComp)] = iComp
return compName2num
def getfldCompNumber(arrayInfo, kpiComp):
compNumberMap = getfldComponentMap(arrayInfo)
if not kpiComp:
compNum = 0
else:
compNum = compNumberMap[kpiComp]
return compNum
def getdatarange(datasource, kpifld, kpifldcomp):
arrayInfo = datasource.PointData[kpifld]
compNumber = getfldCompNumber(arrayInfo, kpifldcomp)
datarange = arrayInfo.GetRange(compNumber)
return datarange
def extractStatsOld(d, kpi, kpifield, kpiComp, kpitype, fp_csv_metrics, ave=[]):
datarange = getdatarange(d, kpifield, kpiComp)
if kpitype == "Probe":
average=(datarange[0]+datarange[1])/2
elif kpitype == "Line":
average=ave
elif kpitype == "Slice":
# get kpi field value and area - average = value/area
integrateVariables = IntegrateVariables(Input=d)
average = getdatarange(integrateVariables, kpifield, kpiComp)[0]\
/ integrateVariables.CellData['Area'].GetRange()[0]
elif kpitype == "Volume" or kpitype == "Clip":
integrateVariables = IntegrateVariables(Input=d)
average = getdatarange(integrateVariables, kpifield, kpiComp)[0]\
/ integrateVariables.CellData['Volume'].GetRange()[0]
fp_csv_metrics.write(",".join([kpi, str(average), str(datarange[0]),str(datarange[1])]) + "\n")
def extractStats(dataSource, kpi, kpifield, kpiComp, kpitype, fp_csv_metrics):
# If kpifield is a vector, add a calculater on top and extract the component of the vector
# as a scalar
arrayInfo = dataSource.PointData[kpifield]
if isfldScalar(arrayInfo):
statVarName = kpifield
else:
# create a new 'Calculator'
statVarName = kpifield + '_' + kpiComp
calc1 = Calculator(Input=dataSource)
calc1.ResultArrayName = statVarName
if kpiComp == 'Magnitude':
calc1.Function = 'mag('+kpifield+')'
else:
calc1.Function = calc1.ResultArrayName
UpdatePipeline()
dataSource = calc1
# create a new 'Descriptive Statistics'
dStats = DescriptiveStatistics(Input=dataSource, ModelInput=None)
dStats.VariablesofInterest = [statVarName]
UpdatePipeline()
dStatsDataInfo = dStats.GetDataInformation()
dStatsStatsInfo = dStatsDataInfo.GetRowDataInformation()
numStats = dStatsDataInfo.GetRowDataInformation().GetNumberOfArrays()
for iStat in range(numStats):
statName = dStatsStatsInfo.GetArrayInformation(iStat).GetName()
statValue = dStatsStatsInfo.GetArrayInformation(iStat).GetComponentRange(0)[0]
if statName == 'Maximum':
maxaximum = statValue
elif statName == 'Minimum' :
minimum = statValue
elif statName == 'Mean':
average = statValue
elif statName == 'Standard Deviation':
stanDev = statValue
fp_csv_metrics.write(",".join([kpi, str(average), str(minimum), str(maxaximum), str(stanDev)]) + "\n")
def correctfieldcomponent(datasource, metrichash):
"""
Set "fieldComponent" to "Magnitude" if the component of vector/tensor fields is not given. For scalar fields set
"fieldComponent" to an empty string.
"""
kpifld = metrichash['field']
arrayInfo = datasource.PointData[kpifld]
if isfldScalar(arrayInfo):
metrichash['fieldComponent'] = ''
else:
if not 'fieldComponent' in metrichash:
metrichash['fieldComponent'] = 'Magnitude'
return metrichash
def getReaderTypeFromfileAddress(dataFileAddress):
if dataFileAddress.endswith('system/controlDict'):
readerType = 'openFOAM'
else:
try:
filename, file_extension = os.path.splitext(dataFileAddress)
readerType = file_extension.replace('.', '')
except:
print('Error: Reader type cannot be set. Please check data file address')
sys.exit(1)
return readerType
def readDataFile(dataFileAddress, dataarray):
readerType = getReaderTypeFromfileAddress(dataFileAddress)
if readerType == 'exo':
# Read the results file : create a new 'ExodusIIReader'
dataReader = ExodusIIReader(FileName=dataFileAddress)
dataReader.ElementBlocks = ['PNT', 'C3D20 C3D20R', 'COMPOSITE LAYER C3D20', 'Beam B32 B32R',
'CPS8 CPE8 CAX8 S8 S8R', 'C3D8 C3D8R', 'TRUSS2', 'TRUSS2',
'CPS4R CPE4R S4 S4R', 'CPS4I CPE4I', 'C3D10', 'C3D4', 'C3D15',
'CPS6 CPE6 S6', 'C3D6', 'CPS3 CPE3 S3',
'2-node 1d network entry elem', '2-node 1d network exit elem',
'2-node 1d genuine network elem']
# only load the data that is needed
dataReader.PointVariables = dataarray
elif readerType == 'openFOAM':
# create a new 'OpenFOAMReader'
dataReader = OpenFOAMReader(FileName=dataFileAddress)
dataReader.MeshRegions = ['internalMesh']
dataReader.CellArrays = dataarray
elif readerType == 'vtk':
dataReader = LegacyVTKReader(FileNames=[dataFileAddress])
elif readerType == 'stl':
dataReader = STLReader(FileNames=[dataFileAddress])
return dataReader
def getTimeSteps():
# get animation scene
animationScene1 = GetAnimationScene()
# update animation scene based on data timesteps
animationScene1.UpdateAnimationUsingDataTimeSteps()
timeSteps = []
if type(animationScene1.TimeKeeper.TimestepValues)== int or type(animationScene1.TimeKeeper.TimestepValues)== float:
timeSteps.append(animationScene1.TimeKeeper.TimestepValues)
else:
timeSteps = list(animationScene1.TimeKeeper.TimestepValues)
return timeSteps
def setFrame2latestTime(renderView1):
TimeSteps = getTimeSteps()
latesttime = TimeSteps[-1]
print("Setting view to latest Time: " + str(latesttime))
renderView1.ViewTime = latesttime
return renderView1
def initRenderView (dataReader, viewSize, backgroundColor):
# get active view
renderView1 = GetActiveViewOrCreate('RenderView')
try:
renderView1 = setFrame2latestTime(renderView1)
except:
pass
# set the view size
renderView1.ViewSize = viewSize
renderView1.Background = backgroundColor
# show data in view
readerDisplay = Show(dataReader, renderView1)
# reset view to fit data
renderView1.ResetCamera()
return renderView1, readerDisplay
def colorMetric(d, metrichash):
display = GetDisplayProperties(d)
kpifld = metrichash['field']
kpifldcomp = metrichash['fieldComponent']
ColorBy(display, ('POINTS', kpifld, kpifldcomp))
Render()
UpdateScalarBars()
ctf = GetColorTransferFunction(kpifld)
try:
ctf.ApplyPreset(metrichash["colorscale"], True)
except:
pass
try:
if data_IO.str2bool(metrichash["invertcolor"]):
ctf.InvertTransferFunction()
except:
pass
try:
datarange = getdatarange(d, kpifld, kpifldcomp)
min = datarange[0]
max = datarange[1]
if metrichash["min"] != "auto":
min = float(metrichash["min"])
if metrichash["max"] != "auto":
max = float(metrichash["max"])
ctf.RescaleTransferFunction(min, max)
if int(metrichash["discretecolors"]) > 0:
ctf.Discretize = 1
ctf.NumberOfTableValues = int(metrichash["discretecolors"])
else:
ctf.Discretize = 0
except:
pass
renderView1 = GetActiveViewOrCreate('RenderView')
ctfColorBar = GetScalarBar(ctf, renderView1)
ctfColorBar.Orientation = "Horizontal"
# Properties modified on uLUTColorBar
if 'barTitle' in metrichash:
ctfColorBar.Title = metrichash["barTitle"]
if 'ComponentTitle' in metrichash:
ctfColorBar.ComponentTitle = metrichash["ComponentTitle"]
if 'FontColor' in metrichash:
ctfColorBar.TitleColor = data_IO.read_floats_from_string(metrichash["FontColor"])
ctfColorBar.LabelColor = data_IO.read_floats_from_string(metrichash["FontColor"])
else:
ctfColorBar.TitleColor = [0, 0, 0]
ctfColorBar.LabelColor = [0, 0, 0]
if 'FontSize' in metrichash:
ctfColorBar.TitleFontSize = int(metrichash["FontSize"])
ctfColorBar.LabelFontSize = int(metrichash["FontSize"])
if 'LabelFormat' in metrichash:
ctfColorBar.LabelFormat = metrichash["LabelFormat"]
ctfColorBar.RangeLabelFormat = metrichash["LabelFormat"]
imgtype=metrichash['image'].split("_")[0]
PVversion = getParaviewVersion()
if (imgtype!="iso"):
# center
if PVversion < 5.04:
ctfColorBar.Position = [0.25,0.05]
ctfColorBar.Position2 = [0.5,0] # no such property in PV 5.04
else:
ctfColorBar.WindowLocation = 'LowerCenter'
else:
# left
if PVversion < 5.04:
ctfColorBar.Position = [0.05,0.025]
ctfColorBar.Position2 = [0.4,0] # no such property in PV 5.04
else:
ctfColorBar.WindowLocation = 'LowerLeftCorner'
#if individualImages == False:
# display.SetScalarBarVisibility(renderView1, False)
def createSlice(metrichash, dataReader, dataDisplay):
camera = GetActiveCamera()
renderView1 = GetActiveViewOrCreate('RenderView')
opacity=float(metrichash['opacity'])
bodyopacity=float(metrichash['bodyopacity'])
dataDisplay.Opacity = bodyopacity
dataDisplay.ColorArrayName = ['POINTS', '']
slicetype = "Plane"
plane = metrichash['plane']
s = Slice(Input=dataReader)
s.SliceType = slicetype
s.SliceType.Origin = setviewposition(metrichash['position'], camera)
s.SliceType.Normal = planeNormalFromName(plane)
sDisplay = Show(s, renderView1)
sDisplay.ColorArrayName = [None, '']
sDisplay.SetRepresentationType('Surface')
sDisplay.DiffuseColor = [0.0, 1.0, 0.0]
sDisplay.Specular = 0
sDisplay.Opacity = opacity
colorMetric(s, metrichash)
return s
def createStreamTracer(metrichash, data_reader, data_display):
camera = GetActiveCamera()
renderView1 = GetActiveViewOrCreate('RenderView')
opacity = float(metrichash['opacity'])
bodyopacity = float(metrichash['bodyopacity'])
data_display.Opacity = bodyopacity
data_display.ColorArrayName = ['POINTS', '']
seedPosition = setviewposition(metrichash['position'], camera)
if metrichash['seedType'].lower() == 'line':
streamTracer = StreamTracer(Input=data_reader,
SeedType='High Resolution Line Source')
streamTracer.SeedType.Point1 = seedPosition[0:3]
streamTracer.SeedType.Point2 = seedPosition[3:6]
streamTracer.SeedType.Resolution = int(metrichash['resolution'])
elif metrichash['seedType'].lower() == 'plane':
# create a new 'Point Plane Interpolator' for seeding the stream lines
pointPlaneInterpolator = PointPlaneInterpolator(Input=data_reader, Source='Bounded Plane')
pointPlaneInterpolator.Source.Center = setviewposition(metrichash['center'], camera)
pointPlaneInterpolator.Source.BoundingBox = seedPosition
pointPlaneInterpolator.Source.Normal = planeNormalFromName(metrichash['plane'])
pointPlaneInterpolator.Source.Resolution = int(metrichash['resolution'])
UpdatePipeline()
streamTracer = StreamTracerWithCustomSource(Input=data_reader,
SeedSource=pointPlaneInterpolator)
kpifld = metrichash['field'] #!!!!!!!
streamTracer.Vectors = ['POINTS', kpifld]
streamTracer.IntegrationDirection = metrichash['integralDirection'] # 'BACKWARD', 'FORWARD' or 'BOTH'
streamTracer.IntegratorType = 'Runge-Kutta 4'
# To do : Add a default value based on domain size ?
streamTracer.MaximumStreamlineLength = float(metrichash['maxStreamLength'])
##
# create a new 'Tube'
tube = Tube(Input=streamTracer)
tube.Radius = float(metrichash['tubeRadius'])
# show data in view
tubeDisplay = Show(tube, renderView1)
# trace defaults for the display properties.
tubeDisplay.Representation = 'Surface'
tubeDisplay.ColorArrayName = [None, '']
tubeDisplay.EdgeColor = [0.0, 0.0, 0.0]
tubeDisplay.DiffuseColor = [0.0, 1.0, 0.0]
tubeDisplay.Specular = 0
tubeDisplay.Opacity = opacity
metrichash['field'] = metrichash['colorByField']
if 'colorByFieldComponent' in metrichash:
metrichash['fieldComponent'] = metrichash['colorByFieldComponent']
metrichash = correctfieldcomponent(streamTracer, metrichash)
colorMetric(tube, metrichash)
try:
if metrichash['image'].split("_")[1] == "solo":
Hide(data_reader, renderView1)
except:
pass
return tube
def createClip(metrichash, data_reader, data_display):
camera = GetActiveCamera()
renderView1 = GetActiveViewOrCreate('RenderView')
opacity = float(metrichash['opacity'])
bodyopacity = float(metrichash['bodyopacity'])
data_display.Opacity = bodyopacity
data_display.ColorArrayName = ['POINTS', '']
cliptype = "Plane"
plane = metrichash['plane']
if 'invert' in metrichash.keys():
invert = data_IO.str2bool(metrichash['invert'])
else:
invert = 0
s = Clip(Input=data_reader)
s.ClipType = cliptype
s.ClipType.Origin = camera.GetFocalPoint()
s.InsideOut = invert
s.ClipType.Origin = setviewposition(metrichash['position'],camera)
s.ClipType.Normal = planeNormalFromName(plane)
sDisplay = Show(s, renderView1)
sDisplay.ColorArrayName = [None, '']
sDisplay.SetRepresentationType('Surface')
sDisplay.DiffuseColor = [0.0, 1.0, 0.0]
sDisplay.Specular = 0
sDisplay.Opacity = opacity
colorMetric(s, metrichash)
try:
if metrichash['image'].split("_")[1] == "solo":
Hide(data_reader, renderView1)
except:
pass
return s
def createProbe(metrichash, data_reader):
camera = GetActiveCamera()
renderView1 = GetActiveViewOrCreate('RenderView')
p = ProbeLocation(Input=data_reader, ProbeType='Fixed Radius Point Source')
p.PassFieldArrays = 1
#p.ProbeType.Center = [1.2176899909973145, 1.2191989705897868, 1.5207239668816328]
p.ProbeType.Center = setviewposition(metrichash['position'], camera)
p.ProbeType.NumberOfPoints = 1
p.ProbeType.Radius = 0.0
ps = Sphere(Radius=0.025, ThetaResolution=32)
ps.Center = setviewposition(metrichash['position'], camera)
psDisplay = Show(ps, renderView1)
psDisplay.DiffuseColor = [1.0, 0.0, 0.0]
psDisplay.Opacity = 0.8
return p
def createVolume(metrichash, data_reader):
bounds = [float(x) for x in metrichash['position'].split(" ")]
renderView1 = GetActiveViewOrCreate('RenderView')
c = Clip(Input=data_reader)
c.ClipType = 'Box'
# (xmin,xmax,ymin,ymax,zmin,zmax)
#c.ClipType.Bounds = [0.1, 3, 0.1, 2.3, 0.15, 2.3]
c.ClipType.Bounds = bounds
c.InsideOut = 1
cDisplay = Show(c, renderView1)
cDisplay.ColorArrayName = ['Points', metrichash['field']]
cDisplay.SetRepresentationType('Surface')
cDisplay.DiffuseColor = [1.0, 1.0, 0.0]
cDisplay.Specular = 0
cDisplay.Opacity = 0.1
return c
def createBasic(metrichash, dataReader, dataDisplay):
camera = GetActiveCamera()
renderView1 = GetActiveViewOrCreate('RenderView')
bodyopacity=float(metrichash['bodyopacity'])
dataDisplay.Opacity = bodyopacity
if not (metrichash['field'] == 'None'):
colorMetric(dataReader, metrichash)
else:
ColorBy(dataDisplay, ('POINTS', ''))
return dataReader
def plotLine(infile, imageName) :
matplotlib.use('Agg')
import matplotlib.pyplot as plt
warnings.filterwarnings('ignore')
header = np.genfromtxt(infile, delimiter=',', names=True).dtype.names
data = np.genfromtxt(infile, delimiter=',', skip_header=1)
x = data[:, 0]
y = data[:, 1]
plt.figure(figsize=(10, 6))
plt.plot(x, y)
locs, labels = plt.yticks()
plt.yticks(locs, map(lambda x: "%g" % x, locs))
plt.xlabel('Point')
plt.ylabel(header[1])
# plt.title(infile.replace(".csv", "").replace("plot_", "") + ' Plot')
plt.grid(True)
plt.savefig(imageName)
def createLine(metrichash, data_reader, outputDir=".", caseNumber=""):
resolution = int(metrichash['resolution'])
try:
image = metrichash['image']
except:
image = None
point = [x for x in metrichash['position'].split()]
camera = GetActiveCamera()
renderView1 = GetActiveViewOrCreate('RenderView')
if point[0] == "center":
point[0] = camera.GetFocalPoint()[0]
if point[3] == "center":
point[3] = camera.GetFocalPoint()[0]
if point[1] == "center":
point[1] = camera.GetFocalPoint()[1]
if point[4] == "center":
point[4] = camera.GetFocalPoint()[1]
if point[2] == "center":
point[2] = camera.GetFocalPoint()[2]
if point[5] == "center":
point[5] = camera.GetFocalPoint()[2]
point1=[float(point[0]),float(point[1]),float(point[2])]
point2=[float(point[3]),float(point[4]),float(point[5])]
l = PlotOverLine(Input=data_reader, Source='High Resolution Line Source')
l.PassPartialArrays = 1
l.Source.Point1 = point1
l.Source.Point2 = point2
l.Source.Resolution = resolution
lDisplay = Show(l, renderView1)
lDisplay.DiffuseColor = [1.0, 0.0, 0.0]
lDisplay.Specular = 0
lDisplay.Opacity = 1
# Get the line data
pl = servermanager.Fetch(l)
kpifld = metrichash['field']
kpiComp = metrichash['fieldComponent']
if (image == "plot"):
if not (os.path.exists(outputDir)):
os.makedirs(outputDir)
if caseNumber:
metrichash['imageName'] = metrichash['imageName'].format(int(caseNumber))
imageFullPath = outputDir + '/' + metrichash['imageName']
imageName, imageExtension = os.path.splitext(imageFullPath)
csvFileName = imageName + ".csv"
f=open(csvFileName,"w")
f.write("point,"+kpifld)
if kpiComp:
f.write("_" + kpiComp)
f.write("\n")
METRIC_INDEX=0
for a in range(0,pl.GetPointData().GetNumberOfArrays()):
if kpifld == pl.GetPointData().GetArrayName(a):
METRIC_INDEX = a
sum=0
num=pl.GetPointData().GetArray(METRIC_INDEX).GetNumberOfTuples()
# Get the component numbers from the input of line filter (data_reader) (?)
compNumber = getfldCompNumber(data_reader.PointData[kpifld], kpiComp)
for t in range(0,num):
dataPoint = pl.GetPointData().GetArray(METRIC_INDEX).GetTuple(t)[compNumber]
if str(float(dataPoint)).lower() != "nan":
sum += dataPoint
if image == "plot":
f.write(",".join([str(t), str(dataPoint)])+"\n")
if image == "plot":
f.close()
plotLine(csvFileName, imageFullPath)
ave = sum/pl.GetPointData().GetArray(METRIC_INDEX).GetNumberOfTuples()
return l
def adjustCamera(view, renderView1, metrichash):
camera=GetActiveCamera()
if view.startswith("iso"):
camera.SetFocalPoint(0, 0, 0)
if (view == "iso-flipped"):
camera.SetPosition(0, 1, 0)
else:
camera.SetPosition(0, -1, 0)
renderView1.ResetCamera()
# adjust for scale margin
camera.SetFocalPoint(camera.GetFocalPoint()[0],camera.GetFocalPoint()[1],camera.GetFocalPoint()[2]-0.25)
camera.SetPosition(camera.GetPosition()[0],camera.GetPosition()[1],camera.GetPosition()[2]-1)
camera.Elevation(45)
camera.Azimuth(45)
elif view == "+X" or view == "+x" or view == "back":
camera.SetFocalPoint(0,0,0)
camera.SetPosition(1,0,0)
renderView1.ResetCamera()
elif view == "-X" or view == "-x" or view == "front":
camera.SetFocalPoint(0,0,0)
camera.SetPosition(-1,0,0)
renderView1.ResetCamera()
elif view == "+Y" or view == "+y" or view == "right":
camera.SetFocalPoint(0,0,0)
camera.SetPosition(0,1,0)
renderView1.ResetCamera()
elif view == "-Y" or view == "-y" or view == "left":
camera.SetFocalPoint(0,0,0)
camera.SetPosition(0,-1,0)
renderView1.ResetCamera()
elif view == "+Z" or view == "+z" or view == "top":
camera.SetFocalPoint(0,0,0)
camera.SetPosition(0,0,1)
renderView1.ResetCamera()
# camera.Roll(90)
elif view == "-Z" or view == "-z" or view == "bottom":
camera.SetFocalPoint(0,0,0)
camera.SetPosition(0,0,-1)
renderView1.ResetCamera()
# camera.Roll(-90)
elif view == "customize":
renderView1.InteractionMode = '3D'
renderView1.CameraPosition = data_IO.read_floats_from_string(metrichash["CameraPosition"])
renderView1.CameraFocalPoint = data_IO.read_floats_from_string(metrichash["CameraFocalPoint"])
renderView1.CameraViewUp = data_IO.read_floats_from_string(metrichash["CameraViewUp"])
renderView1.CameraParallelScale = float(metrichash["CameraParallelScale"])
renderView1.CameraParallelProjection = int(metrichash["CameraParallelProjection"])
def makeAnimation(outputDir, kpi, magnification, animationName, deleteFrames=True):
animationFramesDir = outputDir + '/animFrames'
if not (os.path.exists(animationFramesDir)):
os.makedirs(animationFramesDir)
WriteAnimation(animationFramesDir + "/out_" + kpi + ".png", Magnification=magnification, FrameRate=15.0,
Compression=False)
subprocess.call(["convert", "-delay", "15", "-loop", "0",
animationFramesDir + "/out_" + kpi + ".*.png",
outputDir + "/" + animationName])
if deleteFrames:
shutil.rmtree(animationFramesDir)
def exportx3d(outputDir,kpi, metricObj, dataReader, renderBody, blenderContext):
blenderFramesDir = outputDir + kpi + '_blender'
if not (os.path.exists(blenderFramesDir)):
os.makedirs(blenderFramesDir)
try:
TimeSteps = getTimeSteps()
firstTimeStep = TimeSteps[0]
renderView1 = GetActiveViewOrCreate('RenderView')
renderView1.ViewTime = firstTimeStep
for num, time in enumerate(TimeSteps):
name_solo = blenderFramesDir + '/' + str(num) + '_solo.x3d'
Show(metricObj, renderView1)
Hide(dataReader, renderView1)
ExportView(name_solo, view=renderView1)
if renderBody == "true":
name_body = blenderFramesDir + '/' + str(num) + '_body.x3d'
Show(dataReader, renderView1)
Hide(metricObj, renderView1)
ExportView(name_body, view=renderView1)
animationScene1 = GetAnimationScene()
animationScene1.GoToNext()
except:
renderView1 = GetActiveViewOrCreate('RenderView')
name_body = blenderFramesDir + '/' + 'body.x3d'
Show(dataReader, renderView1)
ExportView(name_body, view=renderView1)
if blenderContext != None and len(blenderContext) > 0:
for i in blenderContext:
dataReaderTmp = readDataFile(i, None)
renderViewTmp = CreateView('RenderView')
readerDisplayTmp = Show(dataReaderTmp, renderViewTmp)
name_body = blenderFramesDir + '/' + os.path.splitext(os.path.basename(i))[0] + '.x3d'
ExportView(name_body, view=renderViewTmp)
# tar the directory
data_IO.tarDirectory(blenderFramesDir + ".tar", blenderFramesDir)
shutil.rmtree(blenderFramesDir)
def saveSTLfile(renderView,filename,magnification,quality):
adjustCamera("iso", renderView, None, "false")
SaveScreenshot(filename, magnification=magnification, quality=quality)
| 35.361299 | 120 | 0.650046 | from paraview.simple import *
import sys
import data_IO
import os
import subprocess
import shutil
import matplotlib
import numpy as np
import warnings
def getParaviewVersion():
PVversionMajor = paraview.servermanager.vtkSMProxyManager.GetVersionMajor()
PVversionMinor = paraview.servermanager.vtkSMProxyManager.GetVersionMinor()
PVversion = PVversionMajor + PVversionMinor/100.0
return PVversion
def planeNormalFromName(planeName):
if planeName.lower() == "x":
normal = [1.0, 0.0, 0.0]
if planeName.lower() == "y":
normal = [0.0, 1.0, 0.0]
if planeName.lower() == "z":
normal = [0.0, 0.0, 1.0]
return normal
def setviewposition(position_key, camera):
center = position_key.split()
nPoints = len(center)/3
positionXYZ = []
for iPoint in range(nPoints):
positionXYZ.extend(list(camera.GetFocalPoint()))
for i in range(iPoint*3, 3+iPoint*3):
if center[i] != "center":
positionXYZ[i] = float(center[i])
return positionXYZ
def read_csv(f):
kpihash = {}
cols = [l.replace("\n", "") for l in f.readline().split(",")]
for i, line in enumerate(f):
data = [l.replace("\n", "") for l in line.split(",")]
kpihash[data[0]] = {}
for ii, v in enumerate(data):
if ii != 0:
kpihash[data[0]][cols[ii]] = v
return kpihash
def getfieldsfromkpihash(kpihash):
cellsarrays = []
for kpi in kpihash:
if 'field' in kpihash[kpi]:
cellsarrays.append(kpihash[kpi]['field'])
ca = set(cellsarrays)
cellsarrays = list(ca)
return cellsarrays
def isfldScalar(arrayInfo):
numComps = arrayInfo.GetNumberOfComponents()
if numComps == 1:
return True
else:
return False
def getfldComponentMap(arrayInfo):
compName2num = {}
numComps = arrayInfo.GetNumberOfComponents()
if numComps>1:
for iComp in range(-1,numComps):
compName2num[arrayInfo.GetComponentName(iComp)] = iComp
return compName2num
def getfldCompNumber(arrayInfo, kpiComp):
compNumberMap = getfldComponentMap(arrayInfo)
if not kpiComp:
compNum = 0
else:
compNum = compNumberMap[kpiComp]
return compNum
def getdatarange(datasource, kpifld, kpifldcomp):
arrayInfo = datasource.PointData[kpifld]
compNumber = getfldCompNumber(arrayInfo, kpifldcomp)
datarange = arrayInfo.GetRange(compNumber)
return datarange
def extractStatsOld(d, kpi, kpifield, kpiComp, kpitype, fp_csv_metrics, ave=[]):
datarange = getdatarange(d, kpifield, kpiComp)
if kpitype == "Probe":
average=(datarange[0]+datarange[1])/2
elif kpitype == "Line":
average=ave
elif kpitype == "Slice":
integrateVariables = IntegrateVariables(Input=d)
average = getdatarange(integrateVariables, kpifield, kpiComp)[0]\
/ integrateVariables.CellData['Area'].GetRange()[0]
elif kpitype == "Volume" or kpitype == "Clip":
integrateVariables = IntegrateVariables(Input=d)
average = getdatarange(integrateVariables, kpifield, kpiComp)[0]\
/ integrateVariables.CellData['Volume'].GetRange()[0]
fp_csv_metrics.write(",".join([kpi, str(average), str(datarange[0]),str(datarange[1])]) + "\n")
def extractStats(dataSource, kpi, kpifield, kpiComp, kpitype, fp_csv_metrics):
arrayInfo = dataSource.PointData[kpifield]
if isfldScalar(arrayInfo):
statVarName = kpifield
else:
statVarName = kpifield + '_' + kpiComp
calc1 = Calculator(Input=dataSource)
calc1.ResultArrayName = statVarName
if kpiComp == 'Magnitude':
calc1.Function = 'mag('+kpifield+')'
else:
calc1.Function = calc1.ResultArrayName
UpdatePipeline()
dataSource = calc1
dStats = DescriptiveStatistics(Input=dataSource, ModelInput=None)
dStats.VariablesofInterest = [statVarName]
UpdatePipeline()
dStatsDataInfo = dStats.GetDataInformation()
dStatsStatsInfo = dStatsDataInfo.GetRowDataInformation()
numStats = dStatsDataInfo.GetRowDataInformation().GetNumberOfArrays()
for iStat in range(numStats):
statName = dStatsStatsInfo.GetArrayInformation(iStat).GetName()
statValue = dStatsStatsInfo.GetArrayInformation(iStat).GetComponentRange(0)[0]
if statName == 'Maximum':
maxaximum = statValue
elif statName == 'Minimum' :
minimum = statValue
elif statName == 'Mean':
average = statValue
elif statName == 'Standard Deviation':
stanDev = statValue
fp_csv_metrics.write(",".join([kpi, str(average), str(minimum), str(maxaximum), str(stanDev)]) + "\n")
def correctfieldcomponent(datasource, metrichash):
kpifld = metrichash['field']
arrayInfo = datasource.PointData[kpifld]
if isfldScalar(arrayInfo):
metrichash['fieldComponent'] = ''
else:
if not 'fieldComponent' in metrichash:
metrichash['fieldComponent'] = 'Magnitude'
return metrichash
def getReaderTypeFromfileAddress(dataFileAddress):
if dataFileAddress.endswith('system/controlDict'):
readerType = 'openFOAM'
else:
try:
filename, file_extension = os.path.splitext(dataFileAddress)
readerType = file_extension.replace('.', '')
except:
print('Error: Reader type cannot be set. Please check data file address')
sys.exit(1)
return readerType
def readDataFile(dataFileAddress, dataarray):
readerType = getReaderTypeFromfileAddress(dataFileAddress)
if readerType == 'exo':
dataReader = ExodusIIReader(FileName=dataFileAddress)
dataReader.ElementBlocks = ['PNT', 'C3D20 C3D20R', 'COMPOSITE LAYER C3D20', 'Beam B32 B32R',
'CPS8 CPE8 CAX8 S8 S8R', 'C3D8 C3D8R', 'TRUSS2', 'TRUSS2',
'CPS4R CPE4R S4 S4R', 'CPS4I CPE4I', 'C3D10', 'C3D4', 'C3D15',
'CPS6 CPE6 S6', 'C3D6', 'CPS3 CPE3 S3',
'2-node 1d network entry elem', '2-node 1d network exit elem',
'2-node 1d genuine network elem']
dataReader.PointVariables = dataarray
elif readerType == 'openFOAM':
dataReader = OpenFOAMReader(FileName=dataFileAddress)
dataReader.MeshRegions = ['internalMesh']
dataReader.CellArrays = dataarray
elif readerType == 'vtk':
dataReader = LegacyVTKReader(FileNames=[dataFileAddress])
elif readerType == 'stl':
dataReader = STLReader(FileNames=[dataFileAddress])
return dataReader
def getTimeSteps():
animationScene1 = GetAnimationScene()
animationScene1.UpdateAnimationUsingDataTimeSteps()
timeSteps = []
if type(animationScene1.TimeKeeper.TimestepValues)== int or type(animationScene1.TimeKeeper.TimestepValues)== float:
timeSteps.append(animationScene1.TimeKeeper.TimestepValues)
else:
timeSteps = list(animationScene1.TimeKeeper.TimestepValues)
return timeSteps
def setFrame2latestTime(renderView1):
TimeSteps = getTimeSteps()
latesttime = TimeSteps[-1]
print("Setting view to latest Time: " + str(latesttime))
renderView1.ViewTime = latesttime
return renderView1
def initRenderView (dataReader, viewSize, backgroundColor):
renderView1 = GetActiveViewOrCreate('RenderView')
try:
renderView1 = setFrame2latestTime(renderView1)
except:
pass
renderView1.ViewSize = viewSize
renderView1.Background = backgroundColor
readerDisplay = Show(dataReader, renderView1)
renderView1.ResetCamera()
return renderView1, readerDisplay
def colorMetric(d, metrichash):
display = GetDisplayProperties(d)
kpifld = metrichash['field']
kpifldcomp = metrichash['fieldComponent']
ColorBy(display, ('POINTS', kpifld, kpifldcomp))
Render()
UpdateScalarBars()
ctf = GetColorTransferFunction(kpifld)
try:
ctf.ApplyPreset(metrichash["colorscale"], True)
except:
pass
try:
if data_IO.str2bool(metrichash["invertcolor"]):
ctf.InvertTransferFunction()
except:
pass
try:
datarange = getdatarange(d, kpifld, kpifldcomp)
min = datarange[0]
max = datarange[1]
if metrichash["min"] != "auto":
min = float(metrichash["min"])
if metrichash["max"] != "auto":
max = float(metrichash["max"])
ctf.RescaleTransferFunction(min, max)
if int(metrichash["discretecolors"]) > 0:
ctf.Discretize = 1
ctf.NumberOfTableValues = int(metrichash["discretecolors"])
else:
ctf.Discretize = 0
except:
pass
renderView1 = GetActiveViewOrCreate('RenderView')
ctfColorBar = GetScalarBar(ctf, renderView1)
ctfColorBar.Orientation = "Horizontal"
if 'barTitle' in metrichash:
ctfColorBar.Title = metrichash["barTitle"]
if 'ComponentTitle' in metrichash:
ctfColorBar.ComponentTitle = metrichash["ComponentTitle"]
if 'FontColor' in metrichash:
ctfColorBar.TitleColor = data_IO.read_floats_from_string(metrichash["FontColor"])
ctfColorBar.LabelColor = data_IO.read_floats_from_string(metrichash["FontColor"])
else:
ctfColorBar.TitleColor = [0, 0, 0]
ctfColorBar.LabelColor = [0, 0, 0]
if 'FontSize' in metrichash:
ctfColorBar.TitleFontSize = int(metrichash["FontSize"])
ctfColorBar.LabelFontSize = int(metrichash["FontSize"])
if 'LabelFormat' in metrichash:
ctfColorBar.LabelFormat = metrichash["LabelFormat"]
ctfColorBar.RangeLabelFormat = metrichash["LabelFormat"]
imgtype=metrichash['image'].split("_")[0]
PVversion = getParaviewVersion()
if (imgtype!="iso"):
if PVversion < 5.04:
ctfColorBar.Position = [0.25,0.05]
ctfColorBar.Position2 = [0.5,0]
else:
ctfColorBar.WindowLocation = 'LowerCenter'
else:
if PVversion < 5.04:
ctfColorBar.Position = [0.05,0.025]
ctfColorBar.Position2 = [0.4,0]
else:
ctfColorBar.WindowLocation = 'LowerLeftCorner'
def createSlice(metrichash, dataReader, dataDisplay):
camera = GetActiveCamera()
renderView1 = GetActiveViewOrCreate('RenderView')
opacity=float(metrichash['opacity'])
bodyopacity=float(metrichash['bodyopacity'])
dataDisplay.Opacity = bodyopacity
dataDisplay.ColorArrayName = ['POINTS', '']
slicetype = "Plane"
plane = metrichash['plane']
s = Slice(Input=dataReader)
s.SliceType = slicetype
s.SliceType.Origin = setviewposition(metrichash['position'], camera)
s.SliceType.Normal = planeNormalFromName(plane)
sDisplay = Show(s, renderView1)
sDisplay.ColorArrayName = [None, '']
sDisplay.SetRepresentationType('Surface')
sDisplay.DiffuseColor = [0.0, 1.0, 0.0]
sDisplay.Specular = 0
sDisplay.Opacity = opacity
colorMetric(s, metrichash)
return s
def createStreamTracer(metrichash, data_reader, data_display):
camera = GetActiveCamera()
renderView1 = GetActiveViewOrCreate('RenderView')
opacity = float(metrichash['opacity'])
bodyopacity = float(metrichash['bodyopacity'])
data_display.Opacity = bodyopacity
data_display.ColorArrayName = ['POINTS', '']
seedPosition = setviewposition(metrichash['position'], camera)
if metrichash['seedType'].lower() == 'line':
streamTracer = StreamTracer(Input=data_reader,
SeedType='High Resolution Line Source')
streamTracer.SeedType.Point1 = seedPosition[0:3]
streamTracer.SeedType.Point2 = seedPosition[3:6]
streamTracer.SeedType.Resolution = int(metrichash['resolution'])
elif metrichash['seedType'].lower() == 'plane':
pointPlaneInterpolator = PointPlaneInterpolator(Input=data_reader, Source='Bounded Plane')
pointPlaneInterpolator.Source.Center = setviewposition(metrichash['center'], camera)
pointPlaneInterpolator.Source.BoundingBox = seedPosition
pointPlaneInterpolator.Source.Normal = planeNormalFromName(metrichash['plane'])
pointPlaneInterpolator.Source.Resolution = int(metrichash['resolution'])
UpdatePipeline()
streamTracer = StreamTracerWithCustomSource(Input=data_reader,
SeedSource=pointPlaneInterpolator)
kpifld = metrichash['field']
streamTracer.Vectors = ['POINTS', kpifld]
streamTracer.IntegrationDirection = metrichash['integralDirection']
streamTracer.IntegratorType = 'Runge-Kutta 4'
streamTracer.MaximumStreamlineLength = float(metrichash['maxStreamLength'])
tube = Tube(Input=streamTracer)
tube.Radius = float(metrichash['tubeRadius'])
tubeDisplay = Show(tube, renderView1)
tubeDisplay.Representation = 'Surface'
tubeDisplay.ColorArrayName = [None, '']
tubeDisplay.EdgeColor = [0.0, 0.0, 0.0]
tubeDisplay.DiffuseColor = [0.0, 1.0, 0.0]
tubeDisplay.Specular = 0
tubeDisplay.Opacity = opacity
metrichash['field'] = metrichash['colorByField']
if 'colorByFieldComponent' in metrichash:
metrichash['fieldComponent'] = metrichash['colorByFieldComponent']
metrichash = correctfieldcomponent(streamTracer, metrichash)
colorMetric(tube, metrichash)
try:
if metrichash['image'].split("_")[1] == "solo":
Hide(data_reader, renderView1)
except:
pass
return tube
def createClip(metrichash, data_reader, data_display):
camera = GetActiveCamera()
renderView1 = GetActiveViewOrCreate('RenderView')
opacity = float(metrichash['opacity'])
bodyopacity = float(metrichash['bodyopacity'])
data_display.Opacity = bodyopacity
data_display.ColorArrayName = ['POINTS', '']
cliptype = "Plane"
plane = metrichash['plane']
if 'invert' in metrichash.keys():
invert = data_IO.str2bool(metrichash['invert'])
else:
invert = 0
s = Clip(Input=data_reader)
s.ClipType = cliptype
s.ClipType.Origin = camera.GetFocalPoint()
s.InsideOut = invert
s.ClipType.Origin = setviewposition(metrichash['position'],camera)
s.ClipType.Normal = planeNormalFromName(plane)
sDisplay = Show(s, renderView1)
sDisplay.ColorArrayName = [None, '']
sDisplay.SetRepresentationType('Surface')
sDisplay.DiffuseColor = [0.0, 1.0, 0.0]
sDisplay.Specular = 0
sDisplay.Opacity = opacity
colorMetric(s, metrichash)
try:
if metrichash['image'].split("_")[1] == "solo":
Hide(data_reader, renderView1)
except:
pass
return s
def createProbe(metrichash, data_reader):
camera = GetActiveCamera()
renderView1 = GetActiveViewOrCreate('RenderView')
p = ProbeLocation(Input=data_reader, ProbeType='Fixed Radius Point Source')
p.PassFieldArrays = 1
p.ProbeType.Center = setviewposition(metrichash['position'], camera)
p.ProbeType.NumberOfPoints = 1
p.ProbeType.Radius = 0.0
ps = Sphere(Radius=0.025, ThetaResolution=32)
ps.Center = setviewposition(metrichash['position'], camera)
psDisplay = Show(ps, renderView1)
psDisplay.DiffuseColor = [1.0, 0.0, 0.0]
psDisplay.Opacity = 0.8
return p
def createVolume(metrichash, data_reader):
bounds = [float(x) for x in metrichash['position'].split(" ")]
renderView1 = GetActiveViewOrCreate('RenderView')
c = Clip(Input=data_reader)
c.ClipType = 'Box'
c.ClipType.Bounds = bounds
c.InsideOut = 1
cDisplay = Show(c, renderView1)
cDisplay.ColorArrayName = ['Points', metrichash['field']]
cDisplay.SetRepresentationType('Surface')
cDisplay.DiffuseColor = [1.0, 1.0, 0.0]
cDisplay.Specular = 0
cDisplay.Opacity = 0.1
return c
def createBasic(metrichash, dataReader, dataDisplay):
camera = GetActiveCamera()
renderView1 = GetActiveViewOrCreate('RenderView')
bodyopacity=float(metrichash['bodyopacity'])
dataDisplay.Opacity = bodyopacity
if not (metrichash['field'] == 'None'):
colorMetric(dataReader, metrichash)
else:
ColorBy(dataDisplay, ('POINTS', ''))
return dataReader
def plotLine(infile, imageName) :
matplotlib.use('Agg')
import matplotlib.pyplot as plt
warnings.filterwarnings('ignore')
header = np.genfromtxt(infile, delimiter=',', names=True).dtype.names
data = np.genfromtxt(infile, delimiter=',', skip_header=1)
x = data[:, 0]
y = data[:, 1]
plt.figure(figsize=(10, 6))
plt.plot(x, y)
locs, labels = plt.yticks()
plt.yticks(locs, map(lambda x: "%g" % x, locs))
plt.xlabel('Point')
plt.ylabel(header[1])
plt.grid(True)
plt.savefig(imageName)
def createLine(metrichash, data_reader, outputDir=".", caseNumber=""):
resolution = int(metrichash['resolution'])
try:
image = metrichash['image']
except:
image = None
point = [x for x in metrichash['position'].split()]
camera = GetActiveCamera()
renderView1 = GetActiveViewOrCreate('RenderView')
if point[0] == "center":
point[0] = camera.GetFocalPoint()[0]
if point[3] == "center":
point[3] = camera.GetFocalPoint()[0]
if point[1] == "center":
point[1] = camera.GetFocalPoint()[1]
if point[4] == "center":
point[4] = camera.GetFocalPoint()[1]
if point[2] == "center":
point[2] = camera.GetFocalPoint()[2]
if point[5] == "center":
point[5] = camera.GetFocalPoint()[2]
point1=[float(point[0]),float(point[1]),float(point[2])]
point2=[float(point[3]),float(point[4]),float(point[5])]
l = PlotOverLine(Input=data_reader, Source='High Resolution Line Source')
l.PassPartialArrays = 1
l.Source.Point1 = point1
l.Source.Point2 = point2
l.Source.Resolution = resolution
lDisplay = Show(l, renderView1)
lDisplay.DiffuseColor = [1.0, 0.0, 0.0]
lDisplay.Specular = 0
lDisplay.Opacity = 1
pl = servermanager.Fetch(l)
kpifld = metrichash['field']
kpiComp = metrichash['fieldComponent']
if (image == "plot"):
if not (os.path.exists(outputDir)):
os.makedirs(outputDir)
if caseNumber:
metrichash['imageName'] = metrichash['imageName'].format(int(caseNumber))
imageFullPath = outputDir + '/' + metrichash['imageName']
imageName, imageExtension = os.path.splitext(imageFullPath)
csvFileName = imageName + ".csv"
f=open(csvFileName,"w")
f.write("point,"+kpifld)
if kpiComp:
f.write("_" + kpiComp)
f.write("\n")
METRIC_INDEX=0
for a in range(0,pl.GetPointData().GetNumberOfArrays()):
if kpifld == pl.GetPointData().GetArrayName(a):
METRIC_INDEX = a
sum=0
num=pl.GetPointData().GetArray(METRIC_INDEX).GetNumberOfTuples()
compNumber = getfldCompNumber(data_reader.PointData[kpifld], kpiComp)
for t in range(0,num):
dataPoint = pl.GetPointData().GetArray(METRIC_INDEX).GetTuple(t)[compNumber]
if str(float(dataPoint)).lower() != "nan":
sum += dataPoint
if image == "plot":
f.write(",".join([str(t), str(dataPoint)])+"\n")
if image == "plot":
f.close()
plotLine(csvFileName, imageFullPath)
ave = sum/pl.GetPointData().GetArray(METRIC_INDEX).GetNumberOfTuples()
return l
def adjustCamera(view, renderView1, metrichash):
camera=GetActiveCamera()
if view.startswith("iso"):
camera.SetFocalPoint(0, 0, 0)
if (view == "iso-flipped"):
camera.SetPosition(0, 1, 0)
else:
camera.SetPosition(0, -1, 0)
renderView1.ResetCamera()
camera.SetFocalPoint(camera.GetFocalPoint()[0],camera.GetFocalPoint()[1],camera.GetFocalPoint()[2]-0.25)
camera.SetPosition(camera.GetPosition()[0],camera.GetPosition()[1],camera.GetPosition()[2]-1)
camera.Elevation(45)
camera.Azimuth(45)
elif view == "+X" or view == "+x" or view == "back":
camera.SetFocalPoint(0,0,0)
camera.SetPosition(1,0,0)
renderView1.ResetCamera()
elif view == "-X" or view == "-x" or view == "front":
camera.SetFocalPoint(0,0,0)
camera.SetPosition(-1,0,0)
renderView1.ResetCamera()
elif view == "+Y" or view == "+y" or view == "right":
camera.SetFocalPoint(0,0,0)
camera.SetPosition(0,1,0)
renderView1.ResetCamera()
elif view == "-Y" or view == "-y" or view == "left":
camera.SetFocalPoint(0,0,0)
camera.SetPosition(0,-1,0)
renderView1.ResetCamera()
elif view == "+Z" or view == "+z" or view == "top":
camera.SetFocalPoint(0,0,0)
camera.SetPosition(0,0,1)
renderView1.ResetCamera()
elif view == "-Z" or view == "-z" or view == "bottom":
camera.SetFocalPoint(0,0,0)
camera.SetPosition(0,0,-1)
renderView1.ResetCamera()
elif view == "customize":
renderView1.InteractionMode = '3D'
renderView1.CameraPosition = data_IO.read_floats_from_string(metrichash["CameraPosition"])
renderView1.CameraFocalPoint = data_IO.read_floats_from_string(metrichash["CameraFocalPoint"])
renderView1.CameraViewUp = data_IO.read_floats_from_string(metrichash["CameraViewUp"])
renderView1.CameraParallelScale = float(metrichash["CameraParallelScale"])
renderView1.CameraParallelProjection = int(metrichash["CameraParallelProjection"])
def makeAnimation(outputDir, kpi, magnification, animationName, deleteFrames=True):
animationFramesDir = outputDir + '/animFrames'
if not (os.path.exists(animationFramesDir)):
os.makedirs(animationFramesDir)
WriteAnimation(animationFramesDir + "/out_" + kpi + ".png", Magnification=magnification, FrameRate=15.0,
Compression=False)
subprocess.call(["convert", "-delay", "15", "-loop", "0",
animationFramesDir + "/out_" + kpi + ".*.png",
outputDir + "/" + animationName])
if deleteFrames:
shutil.rmtree(animationFramesDir)
def exportx3d(outputDir,kpi, metricObj, dataReader, renderBody, blenderContext):
blenderFramesDir = outputDir + kpi + '_blender'
if not (os.path.exists(blenderFramesDir)):
os.makedirs(blenderFramesDir)
try:
TimeSteps = getTimeSteps()
firstTimeStep = TimeSteps[0]
renderView1 = GetActiveViewOrCreate('RenderView')
renderView1.ViewTime = firstTimeStep
for num, time in enumerate(TimeSteps):
name_solo = blenderFramesDir + '/' + str(num) + '_solo.x3d'
Show(metricObj, renderView1)
Hide(dataReader, renderView1)
ExportView(name_solo, view=renderView1)
if renderBody == "true":
name_body = blenderFramesDir + '/' + str(num) + '_body.x3d'
Show(dataReader, renderView1)
Hide(metricObj, renderView1)
ExportView(name_body, view=renderView1)
animationScene1 = GetAnimationScene()
animationScene1.GoToNext()
except:
renderView1 = GetActiveViewOrCreate('RenderView')
name_body = blenderFramesDir + '/' + 'body.x3d'
Show(dataReader, renderView1)
ExportView(name_body, view=renderView1)
if blenderContext != None and len(blenderContext) > 0:
for i in blenderContext:
dataReaderTmp = readDataFile(i, None)
renderViewTmp = CreateView('RenderView')
readerDisplayTmp = Show(dataReaderTmp, renderViewTmp)
name_body = blenderFramesDir + '/' + os.path.splitext(os.path.basename(i))[0] + '.x3d'
ExportView(name_body, view=renderViewTmp)
data_IO.tarDirectory(blenderFramesDir + ".tar", blenderFramesDir)
shutil.rmtree(blenderFramesDir)
def saveSTLfile(renderView,filename,magnification,quality):
adjustCamera("iso", renderView, None, "false")
SaveScreenshot(filename, magnification=magnification, quality=quality)
| true | true |
f7ff3078f4a8fe151ccb117d86d20ae3a168ed98 | 1,574 | py | Python | PyFlow/Packages/PyFlowBase/Nodes/cliexit.py | liaokongVFX/PyFlow | 337462746acf087432f4dd3248e3a1349c3a3c79 | [
"Apache-2.0"
] | null | null | null | PyFlow/Packages/PyFlowBase/Nodes/cliexit.py | liaokongVFX/PyFlow | 337462746acf087432f4dd3248e3a1349c3a3c79 | [
"Apache-2.0"
] | null | null | null | PyFlow/Packages/PyFlowBase/Nodes/cliexit.py | liaokongVFX/PyFlow | 337462746acf087432f4dd3248e3a1349c3a3c79 | [
"Apache-2.0"
] | 1 | 2019-08-21T07:36:20.000Z | 2019-08-21T07:36:20.000Z | ## Copyright 2015-2019 Ilgar Lunin, Pedro Cabrera
## Licensed under the Apache License, Version 2.0 (the "License");
## you may not use this file except in compliance with the License.
## You may obtain a copy of the License at
## http://www.apache.org/licenses/LICENSE-2.0
## Unless required by applicable law or agreed to in writing, software
## distributed under the License is distributed on an "AS IS" BASIS,
## WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
## See the License for the specific language governing permissions and
## limitations under the License.
from PyFlow.Core import NodeBase
from PyFlow.Core.Common import *
from PyFlow.Core.GraphManager import GraphManagerSingleton
from PyFlow.Core.NodeBase import NodePinsSuggestionsHelper
from PyFlow.Packages.PyFlowBase.Nodes import FLOW_CONTROL_COLOR
class cliexit(NodeBase):
def __init__(self, name):
super(cliexit, self).__init__(name)
self.inp0 = self.createInputPin(DEFAULT_IN_EXEC_NAME, 'ExecPin', None, self.compute)
@staticmethod
def pinTypeHints():
helper = NodePinsSuggestionsHelper()
helper.addInputDataType('ExecPin')
helper.addInputStruct(PinStructure.Single)
return helper
@staticmethod
def category():
return 'CLI'
@staticmethod
def keywords():
return []
@staticmethod
def description():
return 'Stops cli program loop'
def compute(self, *args, **kwargs):
man = GraphManagerSingleton().get()
man.terminationRequested = True
| 31.48 | 92 | 0.720457 | xecPin')
helper.addInputStruct(PinStructure.Single)
return helper
@staticmethod
def category():
return 'CLI'
@staticmethod
def keywords():
return []
@staticmethod
def description():
return 'Stops cli program loop'
def compute(self, *args, **kwargs):
man = GraphManagerSingleton().get()
man.terminationRequested = True
| true | true |
f7ff3081486b687c2eaf174841864e919416c8b7 | 2,396 | py | Python | tensorflow_io/core/python/experimental/serialization_ops.py | pshiko/io | a1793e6b41ed7a8db572249aba15a8e513a348a5 | [
"Apache-2.0"
] | null | null | null | tensorflow_io/core/python/experimental/serialization_ops.py | pshiko/io | a1793e6b41ed7a8db572249aba15a8e513a348a5 | [
"Apache-2.0"
] | null | null | null | tensorflow_io/core/python/experimental/serialization_ops.py | pshiko/io | a1793e6b41ed7a8db572249aba15a8e513a348a5 | [
"Apache-2.0"
] | null | null | null | # Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Serialization Ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
from tensorflow_io.core.python.ops import core_ops
# _NamedTensorSpec allows adding a `named` key while traversing,
# so that it is possible to build up the `/R/Foo` JSON Pointers.
class _NamedTensorSpec(tf.TensorSpec):
"""_NamedTensorSpec"""
def named(self, named=None):
if named is not None:
self._named = named
return self._named
# named_spec updates named field for JSON Pointers while traversing.
def named_spec(specs, name=''):
"""named_spec"""
if isinstance(specs, _NamedTensorSpec):
specs.named(name)
return
if isinstance(specs, dict):
for k in specs.keys():
named_spec(specs[k], "{}/{}".format(name, k))
return
for k, _ in enumerate(specs):
named_spec(specs[k], "{}/{}".format(name, k))
return
def decode_json(json, specs, name=None):
"""
Decode JSON string into Tensors.
TODO: support batch (1-D) input
Args:
json: A String Tensor. The JSON strings to decode.
specs: A structured TensorSpecs describing the signature
of the JSON elements.
name: A name for the operation (optional).
Returns:
A structured Tensors.
"""
# Make a copy of specs to keep the original specs
named = tf.nest.map_structure(lambda e: _NamedTensorSpec(e.shape, e.dtype), specs)
named_spec(named)
named = tf.nest.flatten(named)
names = [e.named() for e in named]
shapes = [e.shape for e in named]
dtypes = [e.dtype for e in named]
values = core_ops.io_decode_json(json, names, shapes, dtypes, name=name)
return tf.nest.pack_sequence_as(specs, values)
| 32.378378 | 84 | 0.698664 |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
from tensorflow_io.core.python.ops import core_ops
class _NamedTensorSpec(tf.TensorSpec):
def named(self, named=None):
if named is not None:
self._named = named
return self._named
def named_spec(specs, name=''):
if isinstance(specs, _NamedTensorSpec):
specs.named(name)
return
if isinstance(specs, dict):
for k in specs.keys():
named_spec(specs[k], "{}/{}".format(name, k))
return
for k, _ in enumerate(specs):
named_spec(specs[k], "{}/{}".format(name, k))
return
def decode_json(json, specs, name=None):
named = tf.nest.map_structure(lambda e: _NamedTensorSpec(e.shape, e.dtype), specs)
named_spec(named)
named = tf.nest.flatten(named)
names = [e.named() for e in named]
shapes = [e.shape for e in named]
dtypes = [e.dtype for e in named]
values = core_ops.io_decode_json(json, names, shapes, dtypes, name=name)
return tf.nest.pack_sequence_as(specs, values)
| true | true |
f7ff310d356e03af98ca58a20d2d8a005d10a589 | 154 | py | Python | core/tests/unittests/test_import_version.py | zhiqiangdon/autogluon | 71ee7ef0f05d8f0aad112d8c1719174aa33194d9 | [
"Apache-2.0"
] | 4,462 | 2019-12-09T17:41:07.000Z | 2022-03-31T22:00:41.000Z | core/tests/unittests/test_import_version.py | zhiqiangdon/autogluon | 71ee7ef0f05d8f0aad112d8c1719174aa33194d9 | [
"Apache-2.0"
] | 1,408 | 2019-12-09T17:48:59.000Z | 2022-03-31T20:24:12.000Z | core/tests/unittests/test_import_version.py | zhiqiangdon/autogluon | 71ee7ef0f05d8f0aad112d8c1719174aa33194d9 | [
"Apache-2.0"
] | 623 | 2019-12-10T02:04:18.000Z | 2022-03-20T17:11:01.000Z | import autogluon.core
def test_import_version():
assert isinstance(autogluon.core.__version__, str)
assert len(autogluon.core.__version__) != 0
| 22 | 54 | 0.766234 | import autogluon.core
def test_import_version():
assert isinstance(autogluon.core.__version__, str)
assert len(autogluon.core.__version__) != 0
| true | true |
f7ff31a3b74859e4cd30da654696681c7bbe8c86 | 292 | py | Python | image_annotations/abstract.py | brandontrabucco/image_annotations | 4140fce44c6465bfd5de90eb935e014cec4c024e | [
"MIT"
] | null | null | null | image_annotations/abstract.py | brandontrabucco/image_annotations | 4140fce44c6465bfd5de90eb935e014cec4c024e | [
"MIT"
] | null | null | null | image_annotations/abstract.py | brandontrabucco/image_annotations | 4140fce44c6465bfd5de90eb935e014cec4c024e | [
"MIT"
] | null | null | null | """Author: Brandon Trabucco
An abstract class for defining usage the dataset.
"""
class Abstract(object):
"""Scaffold the dataset annotator and loader.
"""
def start(self):
"""Will eventually house computation and return an interesting result.
"""
pass | 20.857143 | 78 | 0.65411 |
class Abstract(object):
def start(self):
pass | true | true |
f7ff31f42e5b26cbf413226fb48b06e483fc6c0e | 95,067 | py | Python | lib/matplotlib/transforms.py | pmarshwx/matplotlib | 12be528dbf2114f7c25abf60de8100cb2d4494af | [
"MIT",
"BSD-3-Clause"
] | null | null | null | lib/matplotlib/transforms.py | pmarshwx/matplotlib | 12be528dbf2114f7c25abf60de8100cb2d4494af | [
"MIT",
"BSD-3-Clause"
] | null | null | null | lib/matplotlib/transforms.py | pmarshwx/matplotlib | 12be528dbf2114f7c25abf60de8100cb2d4494af | [
"MIT",
"BSD-3-Clause"
] | null | null | null | """
matplotlib includes a framework for arbitrary geometric
transformations that is used determine the final position of all
elements drawn on the canvas.
Transforms are composed into trees of :class:`TransformNode` objects
whose actual value depends on their children. When the contents of
children change, their parents are automatically invalidated. The
next time an invalidated transform is accessed, it is recomputed to
reflect those changes. This invalidation/caching approach prevents
unnecessary recomputations of transforms, and contributes to better
interactive performance.
For example, here is a graph of the transform tree used to plot data
to the graph:
.. image:: ../_static/transforms.png
The framework can be used for both affine and non-affine
transformations. However, for speed, we want use the backend
renderers to perform affine transformations whenever possible.
Therefore, it is possible to perform just the affine or non-affine
part of a transformation on a set of data. The affine is always
assumed to occur after the non-affine. For any transform::
full transform == non-affine part + affine part
The backends are not expected to handle non-affine transformations
themselves.
"""
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import six
import numpy as np
from numpy import ma
from matplotlib._path import (affine_transform, count_bboxes_overlapping_bbox,
update_path_extents)
from numpy.linalg import inv
from weakref import WeakValueDictionary
import warnings
try:
set
except NameError:
from sets import Set as set
from .path import Path
DEBUG = False
MaskedArray = ma.MaskedArray
class TransformNode(object):
"""
:class:`TransformNode` is the base class for anything that
participates in the transform tree and needs to invalidate its
parents or be invalidated. This includes classes that are not
really transforms, such as bounding boxes, since some transforms
depend on bounding boxes to compute their values.
"""
_gid = 0
# Invalidation may affect only the affine part. If the
# invalidation was "affine-only", the _invalid member is set to
# INVALID_AFFINE_ONLY
INVALID_NON_AFFINE = 1
INVALID_AFFINE = 2
INVALID = INVALID_NON_AFFINE | INVALID_AFFINE
# Some metadata about the transform, used to determine whether an
# invalidation is affine-only
is_affine = False
is_bbox = False
pass_through = False
"""
If pass_through is True, all ancestors will always be
invalidated, even if 'self' is already invalid.
"""
def __init__(self, shorthand_name=None):
"""
Creates a new :class:`TransformNode`.
**shorthand_name** - a string representing the "name" of this
transform. The name carries no significance
other than to improve the readability of
``str(transform)`` when DEBUG=True.
"""
# Parents are stored in a WeakValueDictionary, so that if the
# parents are deleted, references from the children won't keep
# them alive.
self._parents = WeakValueDictionary()
# TransformNodes start out as invalid until their values are
# computed for the first time.
self._invalid = 1
self._shorthand_name = shorthand_name or ''
if DEBUG:
def __str__(self):
# either just return the name of this TransformNode, or it's repr
return self._shorthand_name or repr(self)
def __getstate__(self):
d = self.__dict__.copy()
# turn the weakkey dictionary into a normal dictionary
d['_parents'] = dict(six.iteritems(self._parents))
return d
def __setstate__(self, data_dict):
self.__dict__ = data_dict
# turn the normal dictionary back into a WeakValueDictionary
self._parents = WeakValueDictionary(self._parents)
def __copy__(self, *args):
raise NotImplementedError(
"TransformNode instances can not be copied. " +
"Consider using frozen() instead.")
__deepcopy__ = __copy__
def invalidate(self):
"""
Invalidate this :class:`TransformNode` and triggers an
invalidation of its ancestors. Should be called any
time the transform changes.
"""
value = self.INVALID
if self.is_affine:
value = self.INVALID_AFFINE
return self._invalidate_internal(value, invalidating_node=self)
def _invalidate_internal(self, value, invalidating_node):
"""
Called by :meth:`invalidate` and subsequently ascends the transform
stack calling each TransformNode's _invalidate_internal method.
"""
# determine if this call will be an extension to the invalidation
# status. If not, then a shortcut means that we needn't invoke an
# invalidation up the transform stack as it will already have been
# invalidated.
# N.B This makes the invalidation sticky, once a transform has been
# invalidated as NON_AFFINE, then it will always be invalidated as
# NON_AFFINE even when triggered with a AFFINE_ONLY invalidation.
# In most cases this is not a problem (i.e. for interactive panning and
# zooming) and the only side effect will be on performance.
status_changed = self._invalid < value
if self.pass_through or status_changed:
self._invalid = value
for parent in list(six.itervalues(self._parents)):
parent._invalidate_internal(value=value,
invalidating_node=self)
def set_children(self, *children):
"""
Set the children of the transform, to let the invalidation
system know which transforms can invalidate this transform.
Should be called from the constructor of any transforms that
depend on other transforms.
"""
for child in children:
child._parents[id(self)] = self
if DEBUG:
_set_children = set_children
def set_children(self, *children):
self._set_children(*children)
self._children = children
set_children.__doc__ = _set_children.__doc__
def frozen(self):
"""
Returns a frozen copy of this transform node. The frozen copy
will not update when its children change. Useful for storing
a previously known state of a transform where
``copy.deepcopy()`` might normally be used.
"""
return self
if DEBUG:
def write_graphviz(self, fobj, highlight=[]):
"""
For debugging purposes.
Writes the transform tree rooted at 'self' to a graphviz "dot"
format file. This file can be run through the "dot" utility
to produce a graph of the transform tree.
Affine transforms are marked in blue. Bounding boxes are
marked in yellow.
*fobj*: A Python file-like object
Once the "dot" file has been created, it can be turned into a
png easily with::
$> dot -Tpng -o $OUTPUT_FILE $DOT_FILE
"""
seen = set()
def recurse(root):
if root in seen:
return
seen.add(root)
props = {}
label = root.__class__.__name__
if root._invalid:
label = '[%s]' % label
if root in highlight:
props['style'] = 'bold'
props['shape'] = 'box'
props['label'] = '"%s"' % label
props = ' '.join(['%s=%s' % (key, val)
for key, val
in six.iteritems(props)])
fobj.write('%s [%s];\n' %
(hash(root), props))
if hasattr(root, '_children'):
for child in root._children:
name = '?'
for key, val in six.iteritems(root.__dict__):
if val is child:
name = key
break
fobj.write('"%s" -> "%s" [label="%s", fontsize=10];\n'
% (hash(root),
hash(child),
name))
recurse(child)
fobj.write("digraph G {\n")
recurse(self)
fobj.write("}\n")
class BboxBase(TransformNode):
"""
This is the base class of all bounding boxes, and provides
read-only access to its data. A mutable bounding box is provided
by the :class:`Bbox` class.
The canonical representation is as two points, with no
restrictions on their ordering. Convenience properties are
provided to get the left, bottom, right and top edges and width
and height, but these are not stored explicitly.
"""
is_bbox = True
is_affine = True
#* Redundant: Removed for performance
#
# def __init__(self):
# TransformNode.__init__(self)
if DEBUG:
def _check(points):
if ma.isMaskedArray(points):
warnings.warn("Bbox bounds are a masked array.")
points = np.asarray(points)
if (points[1, 0] - points[0, 0] == 0 or
points[1, 1] - points[0, 1] == 0):
warnings.warn("Singular Bbox.")
_check = staticmethod(_check)
def frozen(self):
return Bbox(self.get_points().copy())
frozen.__doc__ = TransformNode.__doc__
def __array__(self, *args, **kwargs):
return self.get_points()
def is_unit(self):
"""
Returns True if the :class:`Bbox` is the unit bounding box
from (0, 0) to (1, 1).
"""
return list(self.get_points().flatten()) == [0., 0., 1., 1.]
def _get_x0(self):
return self.get_points()[0, 0]
x0 = property(_get_x0, None, None, """
(property) :attr:`x0` is the first of the pair of *x* coordinates that
define the bounding box. :attr:`x0` is not guaranteed to be
less than :attr:`x1`. If you require that, use :attr:`xmin`.""")
def _get_y0(self):
return self.get_points()[0, 1]
y0 = property(_get_y0, None, None, """
(property) :attr:`y0` is the first of the pair of *y* coordinates that
define the bounding box. :attr:`y0` is not guaranteed to be
less than :attr:`y1`. If you require that, use :attr:`ymin`.""")
def _get_x1(self):
return self.get_points()[1, 0]
x1 = property(_get_x1, None, None, """
(property) :attr:`x1` is the second of the pair of *x* coordinates
that define the bounding box. :attr:`x1` is not guaranteed to be
greater than :attr:`x0`. If you require that, use :attr:`xmax`.""")
def _get_y1(self):
return self.get_points()[1, 1]
y1 = property(_get_y1, None, None, """
(property) :attr:`y1` is the second of the pair of *y* coordinates
that define the bounding box. :attr:`y1` is not guaranteed to be
greater than :attr:`y0`. If you require that, use :attr:`ymax`.""")
def _get_p0(self):
return self.get_points()[0]
p0 = property(_get_p0, None, None, """
(property) :attr:`p0` is the first pair of (*x*, *y*) coordinates
that define the bounding box. It is not guaranteed to be the
bottom-left corner. For that, use :attr:`min`.""")
def _get_p1(self):
return self.get_points()[1]
p1 = property(_get_p1, None, None, """
(property) :attr:`p1` is the second pair of (*x*, *y*) coordinates
that define the bounding box. It is not guaranteed to be the
top-right corner. For that, use :attr:`max`.""")
def _get_xmin(self):
return min(self.get_points()[:, 0])
xmin = property(_get_xmin, None, None, """
(property) :attr:`xmin` is the left edge of the bounding box.""")
def _get_ymin(self):
return min(self.get_points()[:, 1])
ymin = property(_get_ymin, None, None, """
(property) :attr:`ymin` is the bottom edge of the bounding box.""")
def _get_xmax(self):
return max(self.get_points()[:, 0])
xmax = property(_get_xmax, None, None, """
(property) :attr:`xmax` is the right edge of the bounding box.""")
def _get_ymax(self):
return max(self.get_points()[:, 1])
ymax = property(_get_ymax, None, None, """
(property) :attr:`ymax` is the top edge of the bounding box.""")
def _get_min(self):
return [min(self.get_points()[:, 0]),
min(self.get_points()[:, 1])]
min = property(_get_min, None, None, """
(property) :attr:`min` is the bottom-left corner of the bounding
box.""")
def _get_max(self):
return [max(self.get_points()[:, 0]),
max(self.get_points()[:, 1])]
max = property(_get_max, None, None, """
(property) :attr:`max` is the top-right corner of the bounding box.""")
def _get_intervalx(self):
return self.get_points()[:, 0]
intervalx = property(_get_intervalx, None, None, """
(property) :attr:`intervalx` is the pair of *x* coordinates that define
the bounding box. It is not guaranteed to be sorted from left to
right.""")
def _get_intervaly(self):
return self.get_points()[:, 1]
intervaly = property(_get_intervaly, None, None, """
(property) :attr:`intervaly` is the pair of *y* coordinates that define
the bounding box. It is not guaranteed to be sorted from bottom to
top.""")
def _get_width(self):
points = self.get_points()
return points[1, 0] - points[0, 0]
width = property(_get_width, None, None, """
(property) The width of the bounding box. It may be negative if
:attr:`x1` < :attr:`x0`.""")
def _get_height(self):
points = self.get_points()
return points[1, 1] - points[0, 1]
height = property(_get_height, None, None, """
(property) The height of the bounding box. It may be negative if
:attr:`y1` < :attr:`y0`.""")
def _get_size(self):
points = self.get_points()
return points[1] - points[0]
size = property(_get_size, None, None, """
(property) The width and height of the bounding box. May be negative,
in the same way as :attr:`width` and :attr:`height`.""")
def _get_bounds(self):
x0, y0, x1, y1 = self.get_points().flatten()
return (x0, y0, x1 - x0, y1 - y0)
bounds = property(_get_bounds, None, None, """
(property) Returns (:attr:`x0`, :attr:`y0`, :attr:`width`,
:attr:`height`).""")
def _get_extents(self):
return self.get_points().flatten().copy()
extents = property(_get_extents, None, None, """
(property) Returns (:attr:`x0`, :attr:`y0`, :attr:`x1`,
:attr:`y1`).""")
def get_points(self):
return NotImplementedError()
def containsx(self, x):
"""
Returns True if *x* is between or equal to :attr:`x0` and
:attr:`x1`.
"""
x0, x1 = self.intervalx
return ((x0 < x1
and (x >= x0 and x <= x1))
or (x >= x1 and x <= x0))
def containsy(self, y):
"""
Returns True if *y* is between or equal to :attr:`y0` and
:attr:`y1`.
"""
y0, y1 = self.intervaly
return ((y0 < y1
and (y >= y0 and y <= y1))
or (y >= y1 and y <= y0))
def contains(self, x, y):
"""
Returns *True* if (*x*, *y*) is a coordinate inside the
bounding box or on its edge.
"""
return self.containsx(x) and self.containsy(y)
def overlaps(self, other):
"""
Returns True if this bounding box overlaps with the given
bounding box *other*.
"""
ax1, ay1, ax2, ay2 = self._get_extents()
bx1, by1, bx2, by2 = other._get_extents()
if any(np.isnan(v) for v in [ax1, ay1, ax2, ay2, bx1, by1, bx2, by2]):
return False
if ax2 < ax1:
ax2, ax1 = ax1, ax2
if ay2 < ay1:
ay2, ay1 = ay1, ay2
if bx2 < bx1:
bx2, bx1 = bx1, bx2
if by2 < by1:
by2, by1 = by1, by2
return not ((bx2 < ax1) or
(by2 < ay1) or
(bx1 > ax2) or
(by1 > ay2))
def fully_containsx(self, x):
"""
Returns True if *x* is between but not equal to :attr:`x0` and
:attr:`x1`.
"""
x0, x1 = self.intervalx
return ((x0 < x1
and (x > x0 and x < x1))
or (x > x1 and x < x0))
def fully_containsy(self, y):
"""
Returns True if *y* is between but not equal to :attr:`y0` and
:attr:`y1`.
"""
y0, y1 = self.intervaly
return ((y0 < y1
and (y > y0 and y < y1))
or (y > y1 and y < y0))
def fully_contains(self, x, y):
"""
Returns True if (*x*, *y*) is a coordinate inside the bounding
box, but not on its edge.
"""
return self.fully_containsx(x) \
and self.fully_containsy(y)
def fully_overlaps(self, other):
"""
Returns True if this bounding box overlaps with the given
bounding box *other*, but not on its edge alone.
"""
ax1, ay1, ax2, ay2 = self._get_extents()
bx1, by1, bx2, by2 = other._get_extents()
if ax2 < ax1:
ax2, ax1 = ax1, ax2
if ay2 < ay1:
ay2, ay1 = ay1, ay2
if bx2 < bx1:
bx2, bx1 = bx1, bx2
if by2 < by1:
by2, by1 = by1, by2
return not ((bx2 <= ax1) or
(by2 <= ay1) or
(bx1 >= ax2) or
(by1 >= ay2))
def transformed(self, transform):
"""
Return a new :class:`Bbox` object, statically transformed by
the given transform.
"""
pts = self.get_points()
ll, ul, lr = transform.transform(np.array([pts[0],
[pts[0, 0], pts[1, 1]], [pts[1, 0], pts[0, 1]]]))
return Bbox([ll, [lr[0], ul[1]]])
def inverse_transformed(self, transform):
"""
Return a new :class:`Bbox` object, statically transformed by
the inverse of the given transform.
"""
return self.transformed(transform.inverted())
coefs = {'C': (0.5, 0.5),
'SW': (0, 0),
'S': (0.5, 0),
'SE': (1.0, 0),
'E': (1.0, 0.5),
'NE': (1.0, 1.0),
'N': (0.5, 1.0),
'NW': (0, 1.0),
'W': (0, 0.5)}
def anchored(self, c, container=None):
"""
Return a copy of the :class:`Bbox`, shifted to position *c*
within a container.
*c*: may be either:
* a sequence (*cx*, *cy*) where *cx* and *cy* range from 0
to 1, where 0 is left or bottom and 1 is right or top
* a string:
- 'C' for centered
- 'S' for bottom-center
- 'SE' for bottom-left
- 'E' for left
- etc.
Optional argument *container* is the box within which the
:class:`Bbox` is positioned; it defaults to the initial
:class:`Bbox`.
"""
if container is None:
container = self
l, b, w, h = container.bounds
if isinstance(c, six.string_types):
cx, cy = self.coefs[c]
else:
cx, cy = c
L, B, W, H = self.bounds
return Bbox(self._points +
[(l + cx * (w - W)) - L,
(b + cy * (h - H)) - B])
def shrunk(self, mx, my):
"""
Return a copy of the :class:`Bbox`, shrunk by the factor *mx*
in the *x* direction and the factor *my* in the *y* direction.
The lower left corner of the box remains unchanged. Normally
*mx* and *my* will be less than 1, but this is not enforced.
"""
w, h = self.size
return Bbox([self._points[0],
self._points[0] + [mx * w, my * h]])
def shrunk_to_aspect(self, box_aspect, container=None, fig_aspect=1.0):
"""
Return a copy of the :class:`Bbox`, shrunk so that it is as
large as it can be while having the desired aspect ratio,
*box_aspect*. If the box coordinates are relative---that
is, fractions of a larger box such as a figure---then the
physical aspect ratio of that figure is specified with
*fig_aspect*, so that *box_aspect* can also be given as a
ratio of the absolute dimensions, not the relative dimensions.
"""
if box_aspect <= 0 or fig_aspect <= 0:
raise ValueError("'box_aspect' and 'fig_aspect' must be positive")
if container is None:
container = self
w, h = container.size
H = w * box_aspect / fig_aspect
if H <= h:
W = w
else:
W = h * fig_aspect / box_aspect
H = h
return Bbox([self._points[0],
self._points[0] + (W, H)])
def splitx(self, *args):
"""
e.g., ``bbox.splitx(f1, f2, ...)``
Returns a list of new :class:`Bbox` objects formed by
splitting the original one with vertical lines at fractional
positions *f1*, *f2*, ...
"""
boxes = []
xf = [0] + list(args) + [1]
x0, y0, x1, y1 = self._get_extents()
w = x1 - x0
for xf0, xf1 in zip(xf[:-1], xf[1:]):
boxes.append(Bbox([[x0 + xf0 * w, y0], [x0 + xf1 * w, y1]]))
return boxes
def splity(self, *args):
"""
e.g., ``bbox.splitx(f1, f2, ...)``
Returns a list of new :class:`Bbox` objects formed by
splitting the original one with horizontal lines at fractional
positions *f1*, *f2*, ...
"""
boxes = []
yf = [0] + list(args) + [1]
x0, y0, x1, y1 = self._get_extents()
h = y1 - y0
for yf0, yf1 in zip(yf[:-1], yf[1:]):
boxes.append(Bbox([[x0, y0 + yf0 * h], [x1, y0 + yf1 * h]]))
return boxes
def count_contains(self, vertices):
"""
Count the number of vertices contained in the :class:`Bbox`.
*vertices* is a Nx2 Numpy array.
"""
if len(vertices) == 0:
return 0
vertices = np.asarray(vertices)
x0, y0, x1, y1 = self._get_extents()
with np.errstate(invalid='ignore'):
dx0 = np.sign(vertices[:, 0] - x0)
dy0 = np.sign(vertices[:, 1] - y0)
dx1 = np.sign(vertices[:, 0] - x1)
dy1 = np.sign(vertices[:, 1] - y1)
inside = ((abs(dx0 + dx1) + abs(dy0 + dy1)) == 0)
return np.sum(inside)
def count_overlaps(self, bboxes):
"""
Count the number of bounding boxes that overlap this one.
bboxes is a sequence of :class:`BboxBase` objects
"""
return count_bboxes_overlapping_bbox(self, [np.array(x) for x in bboxes])
def expanded(self, sw, sh):
"""
Return a new :class:`Bbox` which is this :class:`Bbox`
expanded around its center by the given factors *sw* and
*sh*.
"""
width = self.width
height = self.height
deltaw = (sw * width - width) / 2.0
deltah = (sh * height - height) / 2.0
a = np.array([[-deltaw, -deltah], [deltaw, deltah]])
return Bbox(self._points + a)
def padded(self, p):
"""
Return a new :class:`Bbox` that is padded on all four sides by
the given value.
"""
points = self.get_points()
return Bbox(points + [[-p, -p], [p, p]])
def translated(self, tx, ty):
"""
Return a copy of the :class:`Bbox`, statically translated by
*tx* and *ty*.
"""
return Bbox(self._points + (tx, ty))
def corners(self):
"""
Return an array of points which are the four corners of this
rectangle. For example, if this :class:`Bbox` is defined by
the points (*a*, *b*) and (*c*, *d*), :meth:`corners` returns
(*a*, *b*), (*a*, *d*), (*c*, *b*) and (*c*, *d*).
"""
l, b, r, t = self.get_points().flatten()
return np.array([[l, b], [l, t], [r, b], [r, t]])
def rotated(self, radians):
"""
Return a new bounding box that bounds a rotated version of
this bounding box by the given radians. The new bounding box
is still aligned with the axes, of course.
"""
corners = self.corners()
corners_rotated = Affine2D().rotate(radians).transform(corners)
bbox = Bbox.unit()
bbox.update_from_data_xy(corners_rotated, ignore=True)
return bbox
@staticmethod
def union(bboxes):
"""
Return a :class:`Bbox` that contains all of the given bboxes.
"""
if not len(bboxes):
raise ValueError("'bboxes' cannot be empty")
if len(bboxes) == 1:
return bboxes[0]
x0 = np.inf
y0 = np.inf
x1 = -np.inf
y1 = -np.inf
for bbox in bboxes:
points = bbox.get_points()
xs = points[:, 0]
ys = points[:, 1]
x0 = min(x0, np.min(xs))
y0 = min(y0, np.min(ys))
x1 = max(x1, np.max(xs))
y1 = max(y1, np.max(ys))
return Bbox.from_extents(x0, y0, x1, y1)
@staticmethod
def intersection(bbox1, bbox2):
"""
Return the intersection of the two bboxes or None
if they do not intersect.
Implements the algorithm described at:
http://www.tekpool.com/node/2687
"""
intersects = not (bbox2.xmin > bbox1.xmax or
bbox2.xmax < bbox1.xmin or
bbox2.ymin > bbox1.ymax or
bbox2.ymax < bbox1.ymin)
if intersects:
x0 = max([bbox1.xmin, bbox2.xmin])
x1 = min([bbox1.xmax, bbox2.xmax])
y0 = max([bbox1.ymin, bbox2.ymin])
y1 = min([bbox1.ymax, bbox2.ymax])
return Bbox.from_extents(x0, y0, x1, y1)
return None
class Bbox(BboxBase):
"""
A mutable bounding box.
"""
def __init__(self, points, **kwargs):
"""
*points*: a 2x2 numpy array of the form [[x0, y0], [x1, y1]]
If you need to create a :class:`Bbox` object from another form
of data, consider the static methods :meth:`unit`,
:meth:`from_bounds` and :meth:`from_extents`.
"""
BboxBase.__init__(self, **kwargs)
points = np.asarray(points, np.float_)
if points.shape != (2, 2):
raise ValueError('Bbox points must be of the form '
'"[[x0, y0], [x1, y1]]".')
self._points = points
self._minpos = np.array([0.0000001, 0.0000001])
self._ignore = True
# it is helpful in some contexts to know if the bbox is a
# default or has been mutated; we store the orig points to
# support the mutated methods
self._points_orig = self._points.copy()
if DEBUG:
___init__ = __init__
def __init__(self, points, **kwargs):
self._check(points)
self.___init__(points, **kwargs)
def invalidate(self):
self._check(self._points)
TransformNode.invalidate(self)
@staticmethod
def unit():
"""
(staticmethod) Create a new unit :class:`Bbox` from (0, 0) to
(1, 1).
"""
return Bbox(np.array([[0.0, 0.0], [1.0, 1.0]], np.float))
@staticmethod
def null():
"""
(staticmethod) Create a new null :class:`Bbox` from (inf, inf) to
(-inf, -inf).
"""
return Bbox(np.array([[np.inf, np.inf], [-np.inf, -np.inf]], np.float))
@staticmethod
def from_bounds(x0, y0, width, height):
"""
(staticmethod) Create a new :class:`Bbox` from *x0*, *y0*,
*width* and *height*.
*width* and *height* may be negative.
"""
return Bbox.from_extents(x0, y0, x0 + width, y0 + height)
@staticmethod
def from_extents(*args):
"""
(staticmethod) Create a new Bbox from *left*, *bottom*,
*right* and *top*.
The *y*-axis increases upwards.
"""
points = np.array(args, dtype=np.float_).reshape(2, 2)
return Bbox(points)
def __format__(self, fmt):
return (
'Bbox(x0={0.x0:{1}}, y0={0.y0:{1}}, x1={0.x1:{1}}, y1={0.y1:{1}})'.
format(self, fmt))
def __str__(self):
return format(self, '')
def __repr__(self):
return 'Bbox([[{0.x0}, {0.y0}], [{0.x1}, {0.y1}]])'.format(self)
def ignore(self, value):
"""
Set whether the existing bounds of the box should be ignored
by subsequent calls to :meth:`update_from_data` or
:meth:`update_from_data_xy`.
*value*:
- When True, subsequent calls to :meth:`update_from_data`
will ignore the existing bounds of the :class:`Bbox`.
- When False, subsequent calls to :meth:`update_from_data`
will include the existing bounds of the :class:`Bbox`.
"""
self._ignore = value
def update_from_data(self, x, y, ignore=None):
"""
Update the bounds of the :class:`Bbox` based on the passed in
data. After updating, the bounds will have positive *width*
and *height*; *x0* and *y0* will be the minimal values.
*x*: a numpy array of *x*-values
*y*: a numpy array of *y*-values
*ignore*:
- when True, ignore the existing bounds of the :class:`Bbox`.
- when False, include the existing bounds of the :class:`Bbox`.
- when None, use the last value passed to :meth:`ignore`.
"""
warnings.warn(
"update_from_data requires a memory copy -- please replace with "
"update_from_data_xy")
xy = np.hstack((x.reshape((len(x), 1)), y.reshape((len(y), 1))))
return self.update_from_data_xy(xy, ignore)
def update_from_path(self, path, ignore=None, updatex=True, updatey=True):
"""
Update the bounds of the :class:`Bbox` based on the passed in
data. After updating, the bounds will have positive *width*
and *height*; *x0* and *y0* will be the minimal values.
*path*: a :class:`~matplotlib.path.Path` instance
*ignore*:
- when True, ignore the existing bounds of the :class:`Bbox`.
- when False, include the existing bounds of the :class:`Bbox`.
- when None, use the last value passed to :meth:`ignore`.
*updatex*: when True, update the x values
*updatey*: when True, update the y values
"""
if ignore is None:
ignore = self._ignore
if path.vertices.size == 0:
return
points, minpos, changed = update_path_extents(
path, None, self._points, self._minpos, ignore)
if changed:
self.invalidate()
if updatex:
self._points[:, 0] = points[:, 0]
self._minpos[0] = minpos[0]
if updatey:
self._points[:, 1] = points[:, 1]
self._minpos[1] = minpos[1]
def update_from_data_xy(self, xy, ignore=None, updatex=True, updatey=True):
"""
Update the bounds of the :class:`Bbox` based on the passed in
data. After updating, the bounds will have positive *width*
and *height*; *x0* and *y0* will be the minimal values.
*xy*: a numpy array of 2D points
*ignore*:
- when True, ignore the existing bounds of the :class:`Bbox`.
- when False, include the existing bounds of the :class:`Bbox`.
- when None, use the last value passed to :meth:`ignore`.
*updatex*: when True, update the x values
*updatey*: when True, update the y values
"""
if len(xy) == 0:
return
path = Path(xy)
self.update_from_path(path, ignore=ignore,
updatex=updatex, updatey=updatey)
def _set_x0(self, val):
self._points[0, 0] = val
self.invalidate()
x0 = property(BboxBase._get_x0, _set_x0)
def _set_y0(self, val):
self._points[0, 1] = val
self.invalidate()
y0 = property(BboxBase._get_y0, _set_y0)
def _set_x1(self, val):
self._points[1, 0] = val
self.invalidate()
x1 = property(BboxBase._get_x1, _set_x1)
def _set_y1(self, val):
self._points[1, 1] = val
self.invalidate()
y1 = property(BboxBase._get_y1, _set_y1)
def _set_p0(self, val):
self._points[0] = val
self.invalidate()
p0 = property(BboxBase._get_p0, _set_p0)
def _set_p1(self, val):
self._points[1] = val
self.invalidate()
p1 = property(BboxBase._get_p1, _set_p1)
def _set_intervalx(self, interval):
self._points[:, 0] = interval
self.invalidate()
intervalx = property(BboxBase._get_intervalx, _set_intervalx)
def _set_intervaly(self, interval):
self._points[:, 1] = interval
self.invalidate()
intervaly = property(BboxBase._get_intervaly, _set_intervaly)
def _set_bounds(self, bounds):
l, b, w, h = bounds
points = np.array([[l, b], [l + w, b + h]], np.float_)
if np.any(self._points != points):
self._points = points
self.invalidate()
bounds = property(BboxBase._get_bounds, _set_bounds)
def _get_minpos(self):
return self._minpos
minpos = property(_get_minpos)
def _get_minposx(self):
return self._minpos[0]
minposx = property(_get_minposx)
def _get_minposy(self):
return self._minpos[1]
minposy = property(_get_minposy)
def get_points(self):
"""
Get the points of the bounding box directly as a numpy array
of the form: [[x0, y0], [x1, y1]].
"""
self._invalid = 0
return self._points
def set_points(self, points):
"""
Set the points of the bounding box directly from a numpy array
of the form: [[x0, y0], [x1, y1]]. No error checking is
performed, as this method is mainly for internal use.
"""
if np.any(self._points != points):
self._points = points
self.invalidate()
def set(self, other):
"""
Set this bounding box from the "frozen" bounds of another
:class:`Bbox`.
"""
if np.any(self._points != other.get_points()):
self._points = other.get_points()
self.invalidate()
def mutated(self):
'return whether the bbox has changed since init'
return self.mutatedx() or self.mutatedy()
def mutatedx(self):
'return whether the x-limits have changed since init'
return (self._points[0, 0] != self._points_orig[0, 0] or
self._points[1, 0] != self._points_orig[1, 0])
def mutatedy(self):
'return whether the y-limits have changed since init'
return (self._points[0, 1] != self._points_orig[0, 1] or
self._points[1, 1] != self._points_orig[1, 1])
class TransformedBbox(BboxBase):
"""
A :class:`Bbox` that is automatically transformed by a given
transform. When either the child bounding box or transform
changes, the bounds of this bbox will update accordingly.
"""
def __init__(self, bbox, transform, **kwargs):
"""
*bbox*: a child :class:`Bbox`
*transform*: a 2D :class:`Transform`
"""
if not bbox.is_bbox:
raise ValueError("'bbox' is not a bbox")
if not isinstance(transform, Transform):
msg = ("'transform' must be an instance of"
" 'matplotlib.transform.Transform'")
raise ValueError(msg)
if transform.input_dims != 2 or transform.output_dims != 2:
msg = "The input and output dimensions of 'transform' must be 2"
raise ValueError(msg)
BboxBase.__init__(self, **kwargs)
self._bbox = bbox
self._transform = transform
self.set_children(bbox, transform)
self._points = None
def __repr__(self):
return "TransformedBbox(%r, %r)" % (self._bbox, self._transform)
def get_points(self):
if self._invalid:
points = self._transform.transform(self._bbox.get_points())
points = np.ma.filled(points, 0.0)
self._points = points
self._invalid = 0
return self._points
get_points.__doc__ = Bbox.get_points.__doc__
if DEBUG:
_get_points = get_points
def get_points(self):
points = self._get_points()
self._check(points)
return points
class Transform(TransformNode):
"""
The base class of all :class:`TransformNode` instances that
actually perform a transformation.
All non-affine transformations should be subclasses of this class.
New affine transformations should be subclasses of
:class:`Affine2D`.
Subclasses of this class should override the following members (at
minimum):
- :attr:`input_dims`
- :attr:`output_dims`
- :meth:`transform`
- :attr:`is_separable`
- :attr:`has_inverse`
- :meth:`inverted` (if :attr:`has_inverse` is True)
If the transform needs to do something non-standard with
:class:`matplotlib.path.Path` objects, such as adding curves
where there were once line segments, it should override:
- :meth:`transform_path`
"""
input_dims = None
"""
The number of input dimensions of this transform.
Must be overridden (with integers) in the subclass.
"""
output_dims = None
"""
The number of output dimensions of this transform.
Must be overridden (with integers) in the subclass.
"""
has_inverse = False
"""True if this transform has a corresponding inverse transform."""
is_separable = False
"""True if this transform is separable in the x- and y- dimensions."""
def __add__(self, other):
"""
Composes two transforms together such that *self* is followed
by *other*.
"""
if isinstance(other, Transform):
return composite_transform_factory(self, other)
raise TypeError(
"Can not add Transform to object of type '%s'" % type(other))
def __radd__(self, other):
"""
Composes two transforms together such that *self* is followed
by *other*.
"""
if isinstance(other, Transform):
return composite_transform_factory(other, self)
raise TypeError(
"Can not add Transform to object of type '%s'" % type(other))
def __eq__(self, other):
# equality is based on transform object id. Hence:
# Transform() != Transform().
# Some classes, such as TransformWrapper & AffineBase, will override.
return self is other
def _iter_break_from_left_to_right(self):
"""
Returns an iterator breaking down this transform stack from left to
right recursively. If self == ((A, N), A) then the result will be an
iterator which yields I : ((A, N), A), followed by A : (N, A),
followed by (A, N) : (A), but not ((A, N), A) : I.
This is equivalent to flattening the stack then yielding
``flat_stack[:i], flat_stack[i:]`` where i=0..(n-1).
"""
yield IdentityTransform(), self
@property
def depth(self):
"""
Returns the number of transforms which have been chained
together to form this Transform instance.
.. note::
For the special case of a Composite transform, the maximum depth
of the two is returned.
"""
return 1
def contains_branch(self, other):
"""
Return whether the given transform is a sub-tree of this transform.
This routine uses transform equality to identify sub-trees, therefore
in many situations it is object id which will be used.
For the case where the given transform represents the whole
of this transform, returns True.
"""
if self.depth < other.depth:
return False
# check that a subtree is equal to other (starting from self)
for _, sub_tree in self._iter_break_from_left_to_right():
if sub_tree == other:
return True
return False
def contains_branch_seperately(self, other_transform):
"""
Returns whether the given branch is a sub-tree of this transform on
each seperate dimension.
A common use for this method is to identify if a transform is a blended
transform containing an axes' data transform. e.g.::
x_isdata, y_isdata = trans.contains_branch_seperately(ax.transData)
"""
if self.output_dims != 2:
raise ValueError('contains_branch_seperately only supports '
'transforms with 2 output dimensions')
# for a non-blended transform each seperate dimension is the same, so
# just return the appropriate shape.
return [self.contains_branch(other_transform)] * 2
def __sub__(self, other):
"""
Returns a transform stack which goes all the way down self's transform
stack, and then ascends back up other's stack. If it can, this is
optimised::
# normally
A - B == a + b.inverted()
# sometimes, when A contains the tree B there is no need to
# descend all the way down to the base of A (via B), instead we
# can just stop at B.
(A + B) - (B)^-1 == A
# similarly, when B contains tree A, we can avoid decending A at
# all, basically:
A - (A + B) == ((B + A) - A).inverted() or B^-1
For clarity, the result of ``(A + B) - B + B == (A + B)``.
"""
# we only know how to do this operation if other is a Transform.
if not isinstance(other, Transform):
return NotImplemented
for remainder, sub_tree in self._iter_break_from_left_to_right():
if sub_tree == other:
return remainder
for remainder, sub_tree in other._iter_break_from_left_to_right():
if sub_tree == self:
if not remainder.has_inverse:
raise ValueError("The shortcut cannot be computed since "
"other's transform includes a non-invertable component.")
return remainder.inverted()
# if we have got this far, then there was no shortcut possible
if other.has_inverse:
return self + other.inverted()
else:
raise ValueError('It is not possible to compute transA - transB '
'since transB cannot be inverted and there is no '
'shortcut possible.')
def __array__(self, *args, **kwargs):
"""
Array interface to get at this Transform's affine matrix.
"""
return self.get_affine().get_matrix()
def transform(self, values):
"""
Performs the transformation on the given array of values.
Accepts a numpy array of shape (N x :attr:`input_dims`) and
returns a numpy array of shape (N x :attr:`output_dims`).
Alternatively, accepts a numpy array of length :attr:`input_dims`
and returns a numpy array of length :attr:`output_dims`.
"""
# Ensure that values is a 2d array (but remember whether
# we started with a 1d or 2d array).
values = np.asanyarray(values)
ndim = values.ndim
values = values.reshape((-1, self.input_dims))
# Transform the values
res = self.transform_affine(self.transform_non_affine(values))
# Convert the result back to the shape of the input values.
if ndim == 0:
assert not np.ma.is_masked(res) # just to be on the safe side
return res[0, 0]
if ndim == 1:
return res.reshape(-1)
elif ndim == 2:
return res
else:
raise ValueError(
"Input values must have shape (N x {dims}) "
"or ({dims}).".format(dims=self.input_dims))
return res
def transform_affine(self, values):
"""
Performs only the affine part of this transformation on the
given array of values.
``transform(values)`` is always equivalent to
``transform_affine(transform_non_affine(values))``.
In non-affine transformations, this is generally a no-op. In
affine transformations, this is equivalent to
``transform(values)``.
Accepts a numpy array of shape (N x :attr:`input_dims`) and
returns a numpy array of shape (N x :attr:`output_dims`).
Alternatively, accepts a numpy array of length :attr:`input_dims`
and returns a numpy array of length :attr:`output_dims`.
"""
return self.get_affine().transform(values)
def transform_non_affine(self, values):
"""
Performs only the non-affine part of the transformation.
``transform(values)`` is always equivalent to
``transform_affine(transform_non_affine(values))``.
In non-affine transformations, this is generally equivalent to
``transform(values)``. In affine transformations, this is
always a no-op.
Accepts a numpy array of shape (N x :attr:`input_dims`) and
returns a numpy array of shape (N x :attr:`output_dims`).
Alternatively, accepts a numpy array of length :attr:`input_dims`
and returns a numpy array of length :attr:`output_dims`.
"""
return values
def transform_bbox(self, bbox):
"""
Transform the given bounding box.
Note, for smarter transforms including caching (a common
requirement for matplotlib figures), see :class:`TransformedBbox`.
"""
return Bbox(self.transform(bbox.get_points()))
def get_affine(self):
"""
Get the affine part of this transform.
"""
return IdentityTransform()
def get_matrix(self):
"""
Get the Affine transformation array for the affine part
of this transform.
"""
return self.get_affine().get_matrix()
def transform_point(self, point):
"""
A convenience function that returns the transformed copy of a
single point.
The point is given as a sequence of length :attr:`input_dims`.
The transformed point is returned as a sequence of length
:attr:`output_dims`.
"""
if len(point) != self.input_dims:
msg = "The length of 'point' must be 'self.input_dims'"
raise ValueError(msg)
return self.transform(np.asarray([point]))[0]
def transform_path(self, path):
"""
Returns a transformed path.
*path*: a :class:`~matplotlib.path.Path` instance.
In some cases, this transform may insert curves into the path
that began as line segments.
"""
return self.transform_path_affine(self.transform_path_non_affine(path))
def transform_path_affine(self, path):
"""
Returns a path, transformed only by the affine part of
this transform.
*path*: a :class:`~matplotlib.path.Path` instance.
``transform_path(path)`` is equivalent to
``transform_path_affine(transform_path_non_affine(values))``.
"""
return self.get_affine().transform_path_affine(path)
def transform_path_non_affine(self, path):
"""
Returns a path, transformed only by the non-affine
part of this transform.
*path*: a :class:`~matplotlib.path.Path` instance.
``transform_path(path)`` is equivalent to
``transform_path_affine(transform_path_non_affine(values))``.
"""
x = self.transform_non_affine(path.vertices)
return Path._fast_from_codes_and_verts(x, path.codes,
{'interpolation_steps': path._interpolation_steps,
'should_simplify': path.should_simplify})
def transform_angles(self, angles, pts, radians=False, pushoff=1e-5):
"""
Performs transformation on a set of angles anchored at
specific locations.
The *angles* must be a column vector (i.e., numpy array).
The *pts* must be a two-column numpy array of x,y positions
(angle transforms currently only work in 2D). This array must
have the same number of rows as *angles*.
*radians* indicates whether or not input angles are given in
radians (True) or degrees (False; the default).
*pushoff* is the distance to move away from *pts* for
determining transformed angles (see discussion of method
below).
The transformed angles are returned in an array with the same
size as *angles*.
The generic version of this method uses a very generic
algorithm that transforms *pts*, as well as locations very
close to *pts*, to find the angle in the transformed system.
"""
# Must be 2D
if self.input_dims != 2 or self.output_dims != 2:
raise NotImplementedError('Only defined in 2D')
if pts.shape[1] != 2:
raise ValueError("'pts' must be array with 2 columns for x,y")
if angles.ndim != 1 or angles.shape[0] != pts.shape[0]:
msg = "'angles' must be a column vector and have same number of"
msg += " rows as 'pts'"
raise ValueError(msg)
# Convert to radians if desired
if not radians:
angles = angles / 180.0 * np.pi
# Move a short distance away
pts2 = pts + pushoff * np.c_[np.cos(angles), np.sin(angles)]
# Transform both sets of points
tpts = self.transform(pts)
tpts2 = self.transform(pts2)
# Calculate transformed angles
d = tpts2 - tpts
a = np.arctan2(d[:, 1], d[:, 0])
# Convert back to degrees if desired
if not radians:
a = a * 180.0 / np.pi
return a
def inverted(self):
"""
Return the corresponding inverse transformation.
The return value of this method should be treated as
temporary. An update to *self* does not cause a corresponding
update to its inverted copy.
``x === self.inverted().transform(self.transform(x))``
"""
raise NotImplementedError()
class TransformWrapper(Transform):
"""
A helper class that holds a single child transform and acts
equivalently to it.
This is useful if a node of the transform tree must be replaced at
run time with a transform of a different type. This class allows
that replacement to correctly trigger invalidation.
Note that :class:`TransformWrapper` instances must have the same
input and output dimensions during their entire lifetime, so the
child transform may only be replaced with another child transform
of the same dimensions.
"""
pass_through = True
def __init__(self, child):
"""
*child*: A class:`Transform` instance. This child may later
be replaced with :meth:`set`.
"""
if not isinstance(child, Transform):
msg = ("'child' must be an instance of"
" 'matplotlib.transform.Transform'")
raise ValueError(msg)
Transform.__init__(self)
self.input_dims = child.input_dims
self.output_dims = child.output_dims
self._set(child)
self._invalid = 0
def __eq__(self, other):
return self._child.__eq__(other)
if DEBUG:
def __str__(self):
return str(self._child)
def __getstate__(self):
# only store the child
return {'child': self._child}
def __setstate__(self, state):
# re-initialise the TransformWrapper with the state's child
self.__init__(state['child'])
def __repr__(self):
return "TransformWrapper(%r)" % self._child
def frozen(self):
return self._child.frozen()
frozen.__doc__ = Transform.frozen.__doc__
def _set(self, child):
self._child = child
self.set_children(child)
self.transform = child.transform
self.transform_affine = child.transform_affine
self.transform_non_affine = child.transform_non_affine
self.transform_path = child.transform_path
self.transform_path_affine = child.transform_path_affine
self.transform_path_non_affine = child.transform_path_non_affine
self.get_affine = child.get_affine
self.inverted = child.inverted
self.get_matrix = child.get_matrix
# note we do not wrap other properties here since the transform's
# child can be changed with WrappedTransform.set and so checking
# is_affine and other such properties may be dangerous.
def set(self, child):
"""
Replace the current child of this transform with another one.
The new child must have the same number of input and output
dimensions as the current child.
"""
if (child.input_dims != self.input_dims or
child.output_dims != self.output_dims):
msg = ("The new child must have the same number of input and"
" output dimensions as the current child.")
raise ValueError(msg)
self._set(child)
self._invalid = 0
self.invalidate()
self._invalid = 0
def _get_is_affine(self):
return self._child.is_affine
is_affine = property(_get_is_affine)
def _get_is_separable(self):
return self._child.is_separable
is_separable = property(_get_is_separable)
def _get_has_inverse(self):
return self._child.has_inverse
has_inverse = property(_get_has_inverse)
class AffineBase(Transform):
"""
The base class of all affine transformations of any number of
dimensions.
"""
is_affine = True
def __init__(self, *args, **kwargs):
Transform.__init__(self, *args, **kwargs)
self._inverted = None
def __array__(self, *args, **kwargs):
# optimises the access of the transform matrix vs the superclass
return self.get_matrix()
@staticmethod
def _concat(a, b):
"""
Concatenates two transformation matrices (represented as numpy
arrays) together.
"""
return np.dot(b, a)
def __eq__(self, other):
if getattr(other, "is_affine", False):
return np.all(self.get_matrix() == other.get_matrix())
return NotImplemented
def transform(self, values):
return self.transform_affine(values)
transform.__doc__ = Transform.transform.__doc__
def transform_affine(self, values):
raise NotImplementedError('Affine subclasses should override this '
'method.')
transform_affine.__doc__ = Transform.transform_affine.__doc__
def transform_non_affine(self, points):
return points
transform_non_affine.__doc__ = Transform.transform_non_affine.__doc__
def transform_path(self, path):
return self.transform_path_affine(path)
transform_path.__doc__ = Transform.transform_path.__doc__
def transform_path_affine(self, path):
return Path(self.transform_affine(path.vertices),
path.codes, path._interpolation_steps)
transform_path_affine.__doc__ = Transform.transform_path_affine.__doc__
def transform_path_non_affine(self, path):
return path
transform_path_non_affine.__doc__ = Transform.transform_path_non_affine.__doc__
def get_affine(self):
return self
get_affine.__doc__ = Transform.get_affine.__doc__
class Affine2DBase(AffineBase):
"""
The base class of all 2D affine transformations.
2D affine transformations are performed using a 3x3 numpy array::
a c e
b d f
0 0 1
This class provides the read-only interface. For a mutable 2D
affine transformation, use :class:`Affine2D`.
Subclasses of this class will generally only need to override a
constructor and :meth:`get_matrix` that generates a custom 3x3 matrix.
"""
has_inverse = True
input_dims = 2
output_dims = 2
def frozen(self):
return Affine2D(self.get_matrix().copy())
frozen.__doc__ = AffineBase.frozen.__doc__
def _get_is_separable(self):
mtx = self.get_matrix()
return mtx[0, 1] == 0.0 and mtx[1, 0] == 0.0
is_separable = property(_get_is_separable)
def to_values(self):
"""
Return the values of the matrix as a sequence (a,b,c,d,e,f)
"""
mtx = self.get_matrix()
return tuple(mtx[:2].swapaxes(0, 1).flatten())
@staticmethod
def matrix_from_values(a, b, c, d, e, f):
"""
(staticmethod) Create a new transformation matrix as a 3x3
numpy array of the form::
a c e
b d f
0 0 1
"""
return np.array([[a, c, e], [b, d, f], [0.0, 0.0, 1.0]], np.float_)
def transform_affine(self, points):
mtx = self.get_matrix()
if isinstance(points, MaskedArray):
tpoints = affine_transform(points.data, mtx)
return ma.MaskedArray(tpoints, mask=ma.getmask(points))
return affine_transform(points, mtx)
def transform_point(self, point):
mtx = self.get_matrix()
return affine_transform([point], mtx)[0]
transform_point.__doc__ = AffineBase.transform_point.__doc__
if DEBUG:
_transform_affine = transform_affine
def transform_affine(self, points):
# The major speed trap here is just converting to the
# points to an array in the first place. If we can use
# more arrays upstream, that should help here.
if (not ma.isMaskedArray(points) and
not isinstance(points, np.ndarray)):
warnings.warn(
('A non-numpy array of type %s was passed in for ' +
'transformation. Please correct this.')
% type(points))
return self._transform_affine(points)
transform_affine.__doc__ = AffineBase.transform_affine.__doc__
def inverted(self):
if self._inverted is None or self._invalid:
mtx = self.get_matrix()
shorthand_name = None
if self._shorthand_name:
shorthand_name = '(%s)-1' % self._shorthand_name
self._inverted = Affine2D(inv(mtx), shorthand_name=shorthand_name)
self._invalid = 0
return self._inverted
inverted.__doc__ = AffineBase.inverted.__doc__
class Affine2D(Affine2DBase):
"""
A mutable 2D affine transformation.
"""
def __init__(self, matrix=None, **kwargs):
"""
Initialize an Affine transform from a 3x3 numpy float array::
a c e
b d f
0 0 1
If *matrix* is None, initialize with the identity transform.
"""
Affine2DBase.__init__(self, **kwargs)
if matrix is None:
matrix = np.identity(3)
elif DEBUG:
matrix = np.asarray(matrix, np.float_)
assert matrix.shape == (3, 3)
self._mtx = matrix
self._invalid = 0
def __repr__(self):
return "Affine2D(%s)" % repr(self._mtx)
# def __cmp__(self, other):
# # XXX redundant. this only tells us eq.
# if (isinstance(other, Affine2D) and
# (self.get_matrix() == other.get_matrix()).all()):
# return 0
# return -1
@staticmethod
def from_values(a, b, c, d, e, f):
"""
(staticmethod) Create a new Affine2D instance from the given
values::
a c e
b d f
0 0 1
.
"""
return Affine2D(
np.array([a, c, e, b, d, f, 0.0, 0.0, 1.0], np.float_)
.reshape((3, 3)))
def get_matrix(self):
"""
Get the underlying transformation matrix as a 3x3 numpy array::
a c e
b d f
0 0 1
.
"""
self._invalid = 0
return self._mtx
def set_matrix(self, mtx):
"""
Set the underlying transformation matrix from a 3x3 numpy array::
a c e
b d f
0 0 1
.
"""
self._mtx = mtx
self.invalidate()
def set(self, other):
"""
Set this transformation from the frozen copy of another
:class:`Affine2DBase` object.
"""
if not isinstance(other, Affine2DBase):
msg = ("'other' must be an instance of"
" 'matplotlib.transform.Affine2DBase'")
raise ValueError(msg)
self._mtx = other.get_matrix()
self.invalidate()
@staticmethod
def identity():
"""
(staticmethod) Return a new :class:`Affine2D` object that is
the identity transform.
Unless this transform will be mutated later on, consider using
the faster :class:`IdentityTransform` class instead.
"""
return Affine2D(np.identity(3))
def clear(self):
"""
Reset the underlying matrix to the identity transform.
"""
self._mtx = np.identity(3)
self.invalidate()
return self
def rotate(self, theta):
"""
Add a rotation (in radians) to this transform in place.
Returns *self*, so this method can easily be chained with more
calls to :meth:`rotate`, :meth:`rotate_deg`, :meth:`translate`
and :meth:`scale`.
"""
a = np.cos(theta)
b = np.sin(theta)
rotate_mtx = np.array(
[[a, -b, 0.0], [b, a, 0.0], [0.0, 0.0, 1.0]],
np.float_)
self._mtx = np.dot(rotate_mtx, self._mtx)
self.invalidate()
return self
def rotate_deg(self, degrees):
"""
Add a rotation (in degrees) to this transform in place.
Returns *self*, so this method can easily be chained with more
calls to :meth:`rotate`, :meth:`rotate_deg`, :meth:`translate`
and :meth:`scale`.
"""
return self.rotate(degrees * np.pi / 180.)
def rotate_around(self, x, y, theta):
"""
Add a rotation (in radians) around the point (x, y) in place.
Returns *self*, so this method can easily be chained with more
calls to :meth:`rotate`, :meth:`rotate_deg`, :meth:`translate`
and :meth:`scale`.
"""
return self.translate(-x, -y).rotate(theta).translate(x, y)
def rotate_deg_around(self, x, y, degrees):
"""
Add a rotation (in degrees) around the point (x, y) in place.
Returns *self*, so this method can easily be chained with more
calls to :meth:`rotate`, :meth:`rotate_deg`, :meth:`translate`
and :meth:`scale`.
"""
return self.translate(-x, -y).rotate_deg(degrees).translate(x, y)
def translate(self, tx, ty):
"""
Adds a translation in place.
Returns *self*, so this method can easily be chained with more
calls to :meth:`rotate`, :meth:`rotate_deg`, :meth:`translate`
and :meth:`scale`.
"""
translate_mtx = np.array(
[[1.0, 0.0, tx], [0.0, 1.0, ty], [0.0, 0.0, 1.0]],
np.float_)
self._mtx = np.dot(translate_mtx, self._mtx)
self.invalidate()
return self
def scale(self, sx, sy=None):
"""
Adds a scale in place.
If *sy* is None, the same scale is applied in both the *x*- and
*y*-directions.
Returns *self*, so this method can easily be chained with more
calls to :meth:`rotate`, :meth:`rotate_deg`, :meth:`translate`
and :meth:`scale`.
"""
if sy is None:
sy = sx
scale_mtx = np.array(
[[sx, 0.0, 0.0], [0.0, sy, 0.0], [0.0, 0.0, 1.0]],
np.float_)
self._mtx = np.dot(scale_mtx, self._mtx)
self.invalidate()
return self
def skew(self, xShear, yShear):
"""
Adds a skew in place.
*xShear* and *yShear* are the shear angles along the *x*- and
*y*-axes, respectively, in radians.
Returns *self*, so this method can easily be chained with more
calls to :meth:`rotate`, :meth:`rotate_deg`, :meth:`translate`
and :meth:`scale`.
"""
rotX = np.tan(xShear)
rotY = np.tan(yShear)
skew_mtx = np.array(
[[1.0, rotX, 0.0], [rotY, 1.0, 0.0], [0.0, 0.0, 1.0]],
np.float_)
self._mtx = np.dot(skew_mtx, self._mtx)
self.invalidate()
return self
def skew_deg(self, xShear, yShear):
"""
Adds a skew in place.
*xShear* and *yShear* are the shear angles along the *x*- and
*y*-axes, respectively, in degrees.
Returns *self*, so this method can easily be chained with more
calls to :meth:`rotate`, :meth:`rotate_deg`, :meth:`translate`
and :meth:`scale`.
"""
return self.skew(np.deg2rad(xShear), np.deg2rad(yShear))
def _get_is_separable(self):
mtx = self.get_matrix()
return mtx[0, 1] == 0.0 and mtx[1, 0] == 0.0
is_separable = property(_get_is_separable)
class IdentityTransform(Affine2DBase):
"""
A special class that does on thing, the identity transform, in a
fast way.
"""
_mtx = np.identity(3)
def frozen(self):
return self
frozen.__doc__ = Affine2DBase.frozen.__doc__
def __repr__(self):
return "IdentityTransform()"
def get_matrix(self):
return self._mtx
get_matrix.__doc__ = Affine2DBase.get_matrix.__doc__
def transform(self, points):
return np.asanyarray(points)
transform.__doc__ = Affine2DBase.transform.__doc__
transform_affine = transform
transform_affine.__doc__ = Affine2DBase.transform_affine.__doc__
transform_non_affine = transform
transform_non_affine.__doc__ = Affine2DBase.transform_non_affine.__doc__
def transform_path(self, path):
return path
transform_path.__doc__ = Affine2DBase.transform_path.__doc__
transform_path_affine = transform_path
transform_path_affine.__doc__ = Affine2DBase.transform_path_affine.__doc__
transform_path_non_affine = transform_path
transform_path_non_affine.__doc__ = Affine2DBase.transform_path_non_affine.__doc__
def get_affine(self):
return self
get_affine.__doc__ = Affine2DBase.get_affine.__doc__
inverted = get_affine
inverted.__doc__ = Affine2DBase.inverted.__doc__
class BlendedGenericTransform(Transform):
"""
A "blended" transform uses one transform for the *x*-direction, and
another transform for the *y*-direction.
This "generic" version can handle any given child transform in the
*x*- and *y*-directions.
"""
input_dims = 2
output_dims = 2
is_separable = True
pass_through = True
def __init__(self, x_transform, y_transform, **kwargs):
"""
Create a new "blended" transform using *x_transform* to
transform the *x*-axis and *y_transform* to transform the
*y*-axis.
You will generally not call this constructor directly but use
the :func:`blended_transform_factory` function instead, which
can determine automatically which kind of blended transform to
create.
"""
# Here we ask: "Does it blend?"
Transform.__init__(self, **kwargs)
self._x = x_transform
self._y = y_transform
self.set_children(x_transform, y_transform)
self._affine = None
def __eq__(self, other):
# Note, this is an exact copy of BlendedAffine2D.__eq__
if isinstance(other, (BlendedAffine2D, BlendedGenericTransform)):
return (self._x == other._x) and (self._y == other._y)
elif self._x == self._y:
return self._x == other
else:
return NotImplemented
def contains_branch_seperately(self, transform):
# Note, this is an exact copy of BlendedAffine2D.contains_branch_seperately
return self._x.contains_branch(transform), self._y.contains_branch(transform)
@property
def depth(self):
return max([self._x.depth, self._y.depth])
def contains_branch(self, other):
# a blended transform cannot possibly contain a branch from two different transforms.
return False
def _get_is_affine(self):
return self._x.is_affine and self._y.is_affine
is_affine = property(_get_is_affine)
def _get_has_inverse(self):
return self._x.has_inverse and self._y.has_inverse
has_inverse = property(_get_has_inverse)
def frozen(self):
return blended_transform_factory(self._x.frozen(), self._y.frozen())
frozen.__doc__ = Transform.frozen.__doc__
def __repr__(self):
return "BlendedGenericTransform(%s,%s)" % (self._x, self._y)
def transform_non_affine(self, points):
if self._x.is_affine and self._y.is_affine:
return points
x = self._x
y = self._y
if x == y and x.input_dims == 2:
return x.transform_non_affine(points)
if x.input_dims == 2:
x_points = x.transform_non_affine(points)[:, 0:1]
else:
x_points = x.transform_non_affine(points[:, 0])
x_points = x_points.reshape((len(x_points), 1))
if y.input_dims == 2:
y_points = y.transform_non_affine(points)[:, 1:]
else:
y_points = y.transform_non_affine(points[:, 1])
y_points = y_points.reshape((len(y_points), 1))
if isinstance(x_points, MaskedArray) or isinstance(y_points, MaskedArray):
return ma.concatenate((x_points, y_points), 1)
else:
return np.concatenate((x_points, y_points), 1)
transform_non_affine.__doc__ = Transform.transform_non_affine.__doc__
def inverted(self):
return BlendedGenericTransform(self._x.inverted(), self._y.inverted())
inverted.__doc__ = Transform.inverted.__doc__
def get_affine(self):
if self._invalid or self._affine is None:
if self._x == self._y:
self._affine = self._x.get_affine()
else:
x_mtx = self._x.get_affine().get_matrix()
y_mtx = self._y.get_affine().get_matrix()
# This works because we already know the transforms are
# separable, though normally one would want to set b and
# c to zero.
mtx = np.vstack((x_mtx[0], y_mtx[1], [0.0, 0.0, 1.0]))
self._affine = Affine2D(mtx)
self._invalid = 0
return self._affine
get_affine.__doc__ = Transform.get_affine.__doc__
class BlendedAffine2D(Affine2DBase):
"""
A "blended" transform uses one transform for the *x*-direction, and
another transform for the *y*-direction.
This version is an optimization for the case where both child
transforms are of type :class:`Affine2DBase`.
"""
is_separable = True
def __init__(self, x_transform, y_transform, **kwargs):
"""
Create a new "blended" transform using *x_transform* to
transform the *x*-axis and *y_transform* to transform the
*y*-axis.
Both *x_transform* and *y_transform* must be 2D affine
transforms.
You will generally not call this constructor directly but use
the :func:`blended_transform_factory` function instead, which
can determine automatically which kind of blended transform to
create.
"""
is_affine = x_transform.is_affine and y_transform.is_affine
is_separable = x_transform.is_separable and y_transform.is_separable
is_correct = is_affine and is_separable
if not is_correct:
msg = ("Both *x_transform* and *y_transform* must be 2D affine"
" transforms.")
raise ValueError(msg)
Transform.__init__(self, **kwargs)
self._x = x_transform
self._y = y_transform
self.set_children(x_transform, y_transform)
Affine2DBase.__init__(self)
self._mtx = None
def __eq__(self, other):
# Note, this is an exact copy of BlendedGenericTransform.__eq__
if isinstance(other, (BlendedAffine2D, BlendedGenericTransform)):
return (self._x == other._x) and (self._y == other._y)
elif self._x == self._y:
return self._x == other
else:
return NotImplemented
def contains_branch_seperately(self, transform):
# Note, this is an exact copy of BlendedTransform.contains_branch_seperately
return self._x.contains_branch(transform), self._y.contains_branch(transform)
def __repr__(self):
return "BlendedAffine2D(%s,%s)" % (self._x, self._y)
def get_matrix(self):
if self._invalid:
if self._x == self._y:
self._mtx = self._x.get_matrix()
else:
x_mtx = self._x.get_matrix()
y_mtx = self._y.get_matrix()
# This works because we already know the transforms are
# separable, though normally one would want to set b and
# c to zero.
self._mtx = np.vstack((x_mtx[0], y_mtx[1], [0.0, 0.0, 1.0]))
self._inverted = None
self._invalid = 0
return self._mtx
get_matrix.__doc__ = Affine2DBase.get_matrix.__doc__
def blended_transform_factory(x_transform, y_transform):
"""
Create a new "blended" transform using *x_transform* to transform
the *x*-axis and *y_transform* to transform the *y*-axis.
A faster version of the blended transform is returned for the case
where both child transforms are affine.
"""
if (isinstance(x_transform, Affine2DBase)
and isinstance(y_transform, Affine2DBase)):
return BlendedAffine2D(x_transform, y_transform)
return BlendedGenericTransform(x_transform, y_transform)
class CompositeGenericTransform(Transform):
"""
A composite transform formed by applying transform *a* then
transform *b*.
This "generic" version can handle any two arbitrary
transformations.
"""
pass_through = True
def __init__(self, a, b, **kwargs):
"""
Create a new composite transform that is the result of
applying transform *a* then transform *b*.
You will generally not call this constructor directly but use
the :func:`composite_transform_factory` function instead,
which can automatically choose the best kind of composite
transform instance to create.
"""
if a.output_dims != b.input_dims:
msg = ("The output dimension of 'a' must be equal to the input"
" dimensions of 'b'")
raise ValueError(msg)
self.input_dims = a.input_dims
self.output_dims = b.output_dims
Transform.__init__(self, **kwargs)
self._a = a
self._b = b
self.set_children(a, b)
is_affine = property(lambda self: self._a.is_affine and self._b.is_affine)
def frozen(self):
self._invalid = 0
frozen = composite_transform_factory(self._a.frozen(), self._b.frozen())
if not isinstance(frozen, CompositeGenericTransform):
return frozen.frozen()
return frozen
frozen.__doc__ = Transform.frozen.__doc__
def _invalidate_internal(self, value, invalidating_node):
# In some cases for a composite transform, an invalidating call to AFFINE_ONLY needs
# to be extended to invalidate the NON_AFFINE part too. These cases are when the right
# hand transform is non-affine and either:
# (a) the left hand transform is non affine
# (b) it is the left hand node which has triggered the invalidation
if value == Transform.INVALID_AFFINE \
and not self._b.is_affine \
and (not self._a.is_affine or invalidating_node is self._a):
value = Transform.INVALID
Transform._invalidate_internal(self, value=value,
invalidating_node=invalidating_node)
def __eq__(self, other):
if isinstance(other, (CompositeGenericTransform, CompositeAffine2D)):
return self is other or (self._a == other._a and self._b == other._b)
else:
return False
def _iter_break_from_left_to_right(self):
for lh_compliment, rh_compliment in self._a._iter_break_from_left_to_right():
yield lh_compliment, rh_compliment + self._b
for lh_compliment, rh_compliment in self._b._iter_break_from_left_to_right():
yield self._a + lh_compliment, rh_compliment
@property
def depth(self):
return self._a.depth + self._b.depth
def _get_is_affine(self):
return self._a.is_affine and self._b.is_affine
is_affine = property(_get_is_affine)
def _get_is_separable(self):
return self._a.is_separable and self._b.is_separable
is_separable = property(_get_is_separable)
if DEBUG:
def __str__(self):
return '(%s, %s)' % (self._a, self._b)
def __repr__(self):
return "CompositeGenericTransform(%r, %r)" % (self._a, self._b)
def transform_affine(self, points):
return self.get_affine().transform(points)
transform_affine.__doc__ = Transform.transform_affine.__doc__
def transform_non_affine(self, points):
if self._a.is_affine and self._b.is_affine:
return points
elif not self._a.is_affine and self._b.is_affine:
return self._a.transform_non_affine(points)
else:
return self._b.transform_non_affine(
self._a.transform(points))
transform_non_affine.__doc__ = Transform.transform_non_affine.__doc__
def transform_path_non_affine(self, path):
if self._a.is_affine and self._b.is_affine:
return path
elif not self._a.is_affine and self._b.is_affine:
return self._a.transform_path_non_affine(path)
else:
return self._b.transform_path_non_affine(
self._a.transform_path(path))
transform_path_non_affine.__doc__ = Transform.transform_path_non_affine.__doc__
def get_affine(self):
if not self._b.is_affine:
return self._b.get_affine()
else:
return Affine2D(np.dot(self._b.get_affine().get_matrix(),
self._a.get_affine().get_matrix()))
get_affine.__doc__ = Transform.get_affine.__doc__
def inverted(self):
return CompositeGenericTransform(self._b.inverted(), self._a.inverted())
inverted.__doc__ = Transform.inverted.__doc__
def _get_has_inverse(self):
return self._a.has_inverse and self._b.has_inverse
has_inverse = property(_get_has_inverse)
class CompositeAffine2D(Affine2DBase):
"""
A composite transform formed by applying transform *a* then transform *b*.
This version is an optimization that handles the case where both *a*
and *b* are 2D affines.
"""
def __init__(self, a, b, **kwargs):
"""
Create a new composite transform that is the result of
applying transform *a* then transform *b*.
Both *a* and *b* must be instances of :class:`Affine2DBase`.
You will generally not call this constructor directly but use
the :func:`composite_transform_factory` function instead,
which can automatically choose the best kind of composite
transform instance to create.
"""
if not a.is_affine or not b.is_affine:
raise ValueError("'a' and 'b' must be affine transforms")
if a.output_dims != b.input_dims:
msg = ("The output dimension of 'a' must be equal to the input"
" dimensions of 'b'")
raise ValueError(msg)
self.input_dims = a.input_dims
self.output_dims = b.output_dims
Affine2DBase.__init__(self, **kwargs)
self._a = a
self._b = b
self.set_children(a, b)
self._mtx = None
if DEBUG:
def __str__(self):
return '(%s, %s)' % (self._a, self._b)
@property
def depth(self):
return self._a.depth + self._b.depth
def _iter_break_from_left_to_right(self):
for lh_compliment, rh_compliment in self._a._iter_break_from_left_to_right():
yield lh_compliment, rh_compliment + self._b
for lh_compliment, rh_compliment in self._b._iter_break_from_left_to_right():
yield self._a + lh_compliment, rh_compliment
def __repr__(self):
return "CompositeAffine2D(%r, %r)" % (self._a, self._b)
def get_matrix(self):
if self._invalid:
self._mtx = np.dot(
self._b.get_matrix(),
self._a.get_matrix())
self._inverted = None
self._invalid = 0
return self._mtx
get_matrix.__doc__ = Affine2DBase.get_matrix.__doc__
def composite_transform_factory(a, b):
"""
Create a new composite transform that is the result of applying
transform a then transform b.
Shortcut versions of the blended transform are provided for the
case where both child transforms are affine, or one or the other
is the identity transform.
Composite transforms may also be created using the '+' operator,
e.g.::
c = a + b
"""
# check to see if any of a or b are IdentityTransforms. We use
# isinstance here to guarantee that the transforms will *always*
# be IdentityTransforms. Since TransformWrappers are mutable,
# use of equality here would be wrong.
if isinstance(a, IdentityTransform):
return b
elif isinstance(b, IdentityTransform):
return a
elif isinstance(a, Affine2D) and isinstance(b, Affine2D):
return CompositeAffine2D(a, b)
return CompositeGenericTransform(a, b)
class BboxTransform(Affine2DBase):
"""
:class:`BboxTransform` linearly transforms points from one
:class:`Bbox` to another :class:`Bbox`.
"""
is_separable = True
def __init__(self, boxin, boxout, **kwargs):
"""
Create a new :class:`BboxTransform` that linearly transforms
points from *boxin* to *boxout*.
"""
if not boxin.is_bbox or not boxout.is_bbox:
msg = "'boxin' and 'boxout' must be bbox"
raise ValueError(msg)
Affine2DBase.__init__(self, **kwargs)
self._boxin = boxin
self._boxout = boxout
self.set_children(boxin, boxout)
self._mtx = None
self._inverted = None
def __repr__(self):
return "BboxTransform(%r, %r)" % (self._boxin, self._boxout)
def get_matrix(self):
if self._invalid:
inl, inb, inw, inh = self._boxin.bounds
outl, outb, outw, outh = self._boxout.bounds
x_scale = outw / inw
y_scale = outh / inh
if DEBUG and (x_scale == 0 or y_scale == 0):
raise ValueError("Transforming from or to a singular bounding box.")
self._mtx = np.array([[x_scale, 0.0 , (-inl*x_scale+outl)],
[0.0 , y_scale, (-inb*y_scale+outb)],
[0.0 , 0.0 , 1.0 ]],
np.float_)
self._inverted = None
self._invalid = 0
return self._mtx
get_matrix.__doc__ = Affine2DBase.get_matrix.__doc__
class BboxTransformTo(Affine2DBase):
"""
:class:`BboxTransformTo` is a transformation that linearly
transforms points from the unit bounding box to a given
:class:`Bbox`.
"""
is_separable = True
def __init__(self, boxout, **kwargs):
"""
Create a new :class:`BboxTransformTo` that linearly transforms
points from the unit bounding box to *boxout*.
"""
if not boxout.is_bbox:
raise ValueError("'boxout' must be bbox")
Affine2DBase.__init__(self, **kwargs)
self._boxout = boxout
self.set_children(boxout)
self._mtx = None
self._inverted = None
def __repr__(self):
return "BboxTransformTo(%r)" % (self._boxout)
def get_matrix(self):
if self._invalid:
outl, outb, outw, outh = self._boxout.bounds
if DEBUG and (outw == 0 or outh == 0):
raise ValueError("Transforming to a singular bounding box.")
self._mtx = np.array([[outw, 0.0, outl],
[ 0.0, outh, outb],
[ 0.0, 0.0, 1.0]],
np.float_)
self._inverted = None
self._invalid = 0
return self._mtx
get_matrix.__doc__ = Affine2DBase.get_matrix.__doc__
class BboxTransformToMaxOnly(BboxTransformTo):
"""
:class:`BboxTransformTo` is a transformation that linearly
transforms points from the unit bounding box to a given
:class:`Bbox` with a fixed upper left of (0, 0).
"""
def __repr__(self):
return "BboxTransformToMaxOnly(%r)" % (self._boxout)
def get_matrix(self):
if self._invalid:
xmax, ymax = self._boxout.max
if DEBUG and (xmax == 0 or ymax == 0):
raise ValueError("Transforming to a singular bounding box.")
self._mtx = np.array([[xmax, 0.0, 0.0],
[ 0.0, ymax, 0.0],
[ 0.0, 0.0, 1.0]],
np.float_)
self._inverted = None
self._invalid = 0
return self._mtx
get_matrix.__doc__ = Affine2DBase.get_matrix.__doc__
class BboxTransformFrom(Affine2DBase):
"""
:class:`BboxTransformFrom` linearly transforms points from a given
:class:`Bbox` to the unit bounding box.
"""
is_separable = True
def __init__(self, boxin, **kwargs):
if not boxin.is_bbox:
raise ValueError("'boxin' must be bbox")
Affine2DBase.__init__(self, **kwargs)
self._boxin = boxin
self.set_children(boxin)
self._mtx = None
self._inverted = None
def __repr__(self):
return "BboxTransformFrom(%r)" % (self._boxin)
def get_matrix(self):
if self._invalid:
inl, inb, inw, inh = self._boxin.bounds
if DEBUG and (inw == 0 or inh == 0):
raise ValueError("Transforming from a singular bounding box.")
x_scale = 1.0 / inw
y_scale = 1.0 / inh
self._mtx = np.array([[x_scale, 0.0 , (-inl*x_scale)],
[0.0 , y_scale, (-inb*y_scale)],
[0.0 , 0.0 , 1.0 ]],
np.float_)
self._inverted = None
self._invalid = 0
return self._mtx
get_matrix.__doc__ = Affine2DBase.get_matrix.__doc__
class ScaledTranslation(Affine2DBase):
"""
A transformation that translates by *xt* and *yt*, after *xt* and *yt*
have been transformad by the given transform *scale_trans*.
"""
def __init__(self, xt, yt, scale_trans, **kwargs):
Affine2DBase.__init__(self, **kwargs)
self._t = (xt, yt)
self._scale_trans = scale_trans
self.set_children(scale_trans)
self._mtx = None
self._inverted = None
def __repr__(self):
return "ScaledTranslation(%r)" % (self._t,)
def get_matrix(self):
if self._invalid:
xt, yt = self._scale_trans.transform_point(self._t)
self._mtx = np.array([[1.0, 0.0, xt],
[0.0, 1.0, yt],
[0.0, 0.0, 1.0]],
np.float_)
self._invalid = 0
self._inverted = None
return self._mtx
get_matrix.__doc__ = Affine2DBase.get_matrix.__doc__
class TransformedPath(TransformNode):
"""
A :class:`TransformedPath` caches a non-affine transformed copy of
the :class:`~matplotlib.path.Path`. This cached copy is
automatically updated when the non-affine part of the transform
changes.
.. note::
Paths are considered immutable by this class. Any update to the
path's vertices/codes will not trigger a transform recomputation.
"""
def __init__(self, path, transform):
"""
Create a new :class:`TransformedPath` from the given
:class:`~matplotlib.path.Path` and :class:`Transform`.
"""
if not isinstance(transform, Transform):
msg = ("'transform' must be an instance of"
" 'matplotlib.transform.Transform'")
raise ValueError(msg)
TransformNode.__init__(self)
self._path = path
self._transform = transform
self.set_children(transform)
self._transformed_path = None
self._transformed_points = None
def _revalidate(self):
# only recompute if the invalidation includes the non_affine part of the transform
if ((self._invalid & self.INVALID_NON_AFFINE == self.INVALID_NON_AFFINE)
or self._transformed_path is None):
self._transformed_path = \
self._transform.transform_path_non_affine(self._path)
self._transformed_points = \
Path._fast_from_codes_and_verts(
self._transform.transform_non_affine(self._path.vertices),
None,
{'interpolation_steps': self._path._interpolation_steps,
'should_simplify': self._path.should_simplify})
self._invalid = 0
def get_transformed_points_and_affine(self):
"""
Return a copy of the child path, with the non-affine part of
the transform already applied, along with the affine part of
the path necessary to complete the transformation. Unlike
:meth:`get_transformed_path_and_affine`, no interpolation will
be performed.
"""
self._revalidate()
return self._transformed_points, self.get_affine()
def get_transformed_path_and_affine(self):
"""
Return a copy of the child path, with the non-affine part of
the transform already applied, along with the affine part of
the path necessary to complete the transformation.
"""
self._revalidate()
return self._transformed_path, self.get_affine()
def get_fully_transformed_path(self):
"""
Return a fully-transformed copy of the child path.
"""
self._revalidate()
return self._transform.transform_path_affine(self._transformed_path)
def get_affine(self):
return self._transform.get_affine()
def nonsingular(vmin, vmax, expander=0.001, tiny=1e-15, increasing=True):
'''
Modify the endpoints of a range as needed to avoid singularities.
*vmin*, *vmax*
the initial endpoints.
*tiny*
threshold for the ratio of the interval to the maximum absolute
value of its endpoints. If the interval is smaller than
this, it will be expanded. This value should be around
1e-15 or larger; otherwise the interval will be approaching
the double precision resolution limit.
*expander*
fractional amount by which *vmin* and *vmax* are expanded if
the original interval is too small, based on *tiny*.
*increasing*: [True | False]
If True (default), swap *vmin*, *vmax* if *vmin* > *vmax*
Returns *vmin*, *vmax*, expanded and/or swapped if necessary.
If either input is inf or NaN, or if both inputs are 0,
returns -*expander*, *expander*.
'''
if (not np.isfinite(vmin)) or (not np.isfinite(vmax)):
return -expander, expander
swapped = False
if vmax < vmin:
vmin, vmax = vmax, vmin
swapped = True
if vmax - vmin <= max(abs(vmin), abs(vmax)) * tiny:
if vmax == 0 and vmin == 0:
vmin = -expander
vmax = expander
else:
vmin -= expander*abs(vmin)
vmax += expander*abs(vmax)
if swapped and not increasing:
vmin, vmax = vmax, vmin
return vmin, vmax
def interval_contains(interval, val):
a, b = interval
return (
((a < b) and (a <= val and b >= val))
or (b <= val and a >= val))
def interval_contains_open(interval, val):
a, b = interval
return (
((a < b) and (a < val and b > val))
or (b < val and a > val))
def offset_copy(trans, fig=None, x=0.0, y=0.0, units='inches'):
'''
Return a new transform with an added offset.
args:
trans is any transform
kwargs:
fig is the current figure; it can be None if units are 'dots'
x, y give the offset
units is 'inches', 'points' or 'dots'
'''
if units == 'dots':
return trans + Affine2D().translate(x, y)
if fig is None:
raise ValueError('For units of inches or points a fig kwarg is needed')
if units == 'points':
x /= 72.0
y /= 72.0
elif not units == 'inches':
raise ValueError('units must be dots, points, or inches')
return trans + ScaledTranslation(x, y, fig.dpi_scale_trans)
| 34.159899 | 94 | 0.591541 |
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import six
import numpy as np
from numpy import ma
from matplotlib._path import (affine_transform, count_bboxes_overlapping_bbox,
update_path_extents)
from numpy.linalg import inv
from weakref import WeakValueDictionary
import warnings
try:
set
except NameError:
from sets import Set as set
from .path import Path
DEBUG = False
MaskedArray = ma.MaskedArray
class TransformNode(object):
_gid = 0
INVALID_NON_AFFINE = 1
INVALID_AFFINE = 2
INVALID = INVALID_NON_AFFINE | INVALID_AFFINE
is_affine = False
is_bbox = False
pass_through = False
def __init__(self, shorthand_name=None):
# them alive.
self._parents = WeakValueDictionary()
# TransformNodes start out as invalid until their values are
# computed for the first time.
self._invalid = 1
self._shorthand_name = shorthand_name or ''
if DEBUG:
def __str__(self):
# either just return the name of this TransformNode, or it's repr
return self._shorthand_name or repr(self)
def __getstate__(self):
d = self.__dict__.copy()
d['_parents'] = dict(six.iteritems(self._parents))
return d
def __setstate__(self, data_dict):
self.__dict__ = data_dict
self._parents = WeakValueDictionary(self._parents)
def __copy__(self, *args):
raise NotImplementedError(
"TransformNode instances can not be copied. " +
"Consider using frozen() instead.")
__deepcopy__ = __copy__
def invalidate(self):
value = self.INVALID
if self.is_affine:
value = self.INVALID_AFFINE
return self._invalidate_internal(value, invalidating_node=self)
def _invalidate_internal(self, value, invalidating_node):
# invalidation up the transform stack as it will already have been
# invalidated.
# N.B This makes the invalidation sticky, once a transform has been
# invalidated as NON_AFFINE, then it will always be invalidated as
# NON_AFFINE even when triggered with a AFFINE_ONLY invalidation.
# In most cases this is not a problem (i.e. for interactive panning and
# zooming) and the only side effect will be on performance.
status_changed = self._invalid < value
if self.pass_through or status_changed:
self._invalid = value
for parent in list(six.itervalues(self._parents)):
parent._invalidate_internal(value=value,
invalidating_node=self)
def set_children(self, *children):
for child in children:
child._parents[id(self)] = self
if DEBUG:
_set_children = set_children
def set_children(self, *children):
self._set_children(*children)
self._children = children
set_children.__doc__ = _set_children.__doc__
def frozen(self):
return self
if DEBUG:
def write_graphviz(self, fobj, highlight=[]):
seen = set()
def recurse(root):
if root in seen:
return
seen.add(root)
props = {}
label = root.__class__.__name__
if root._invalid:
label = '[%s]' % label
if root in highlight:
props['style'] = 'bold'
props['shape'] = 'box'
props['label'] = '"%s"' % label
props = ' '.join(['%s=%s' % (key, val)
for key, val
in six.iteritems(props)])
fobj.write('%s [%s];\n' %
(hash(root), props))
if hasattr(root, '_children'):
for child in root._children:
name = '?'
for key, val in six.iteritems(root.__dict__):
if val is child:
name = key
break
fobj.write('"%s" -> "%s" [label="%s", fontsize=10];\n'
% (hash(root),
hash(child),
name))
recurse(child)
fobj.write("digraph G {\n")
recurse(self)
fobj.write("}\n")
class BboxBase(TransformNode):
is_bbox = True
is_affine = True
#* Redundant: Removed for performance
#
# def __init__(self):
# TransformNode.__init__(self)
if DEBUG:
def _check(points):
if ma.isMaskedArray(points):
warnings.warn("Bbox bounds are a masked array.")
points = np.asarray(points)
if (points[1, 0] - points[0, 0] == 0 or
points[1, 1] - points[0, 1] == 0):
warnings.warn("Singular Bbox.")
_check = staticmethod(_check)
def frozen(self):
return Bbox(self.get_points().copy())
frozen.__doc__ = TransformNode.__doc__
def __array__(self, *args, **kwargs):
return self.get_points()
def is_unit(self):
return list(self.get_points().flatten()) == [0., 0., 1., 1.]
def _get_x0(self):
return self.get_points()[0, 0]
x0 = property(_get_x0, None, None, """
(property) :attr:`x0` is the first of the pair of *x* coordinates that
define the bounding box. :attr:`x0` is not guaranteed to be
less than :attr:`x1`. If you require that, use :attr:`xmin`.""")
def _get_y0(self):
return self.get_points()[0, 1]
y0 = property(_get_y0, None, None, """
(property) :attr:`y0` is the first of the pair of *y* coordinates that
define the bounding box. :attr:`y0` is not guaranteed to be
less than :attr:`y1`. If you require that, use :attr:`ymin`.""")
def _get_x1(self):
return self.get_points()[1, 0]
x1 = property(_get_x1, None, None, """
(property) :attr:`x1` is the second of the pair of *x* coordinates
that define the bounding box. :attr:`x1` is not guaranteed to be
greater than :attr:`x0`. If you require that, use :attr:`xmax`.""")
def _get_y1(self):
return self.get_points()[1, 1]
y1 = property(_get_y1, None, None, """
(property) :attr:`y1` is the second of the pair of *y* coordinates
that define the bounding box. :attr:`y1` is not guaranteed to be
greater than :attr:`y0`. If you require that, use :attr:`ymax`.""")
def _get_p0(self):
return self.get_points()[0]
p0 = property(_get_p0, None, None, """
(property) :attr:`p0` is the first pair of (*x*, *y*) coordinates
that define the bounding box. It is not guaranteed to be the
bottom-left corner. For that, use :attr:`min`.""")
def _get_p1(self):
return self.get_points()[1]
p1 = property(_get_p1, None, None, """
(property) :attr:`p1` is the second pair of (*x*, *y*) coordinates
that define the bounding box. It is not guaranteed to be the
top-right corner. For that, use :attr:`max`.""")
def _get_xmin(self):
return min(self.get_points()[:, 0])
xmin = property(_get_xmin, None, None, """
(property) :attr:`xmin` is the left edge of the bounding box.""")
def _get_ymin(self):
return min(self.get_points()[:, 1])
ymin = property(_get_ymin, None, None, """
(property) :attr:`ymin` is the bottom edge of the bounding box.""")
def _get_xmax(self):
return max(self.get_points()[:, 0])
xmax = property(_get_xmax, None, None, """
(property) :attr:`xmax` is the right edge of the bounding box.""")
def _get_ymax(self):
return max(self.get_points()[:, 1])
ymax = property(_get_ymax, None, None, """
(property) :attr:`ymax` is the top edge of the bounding box.""")
def _get_min(self):
return [min(self.get_points()[:, 0]),
min(self.get_points()[:, 1])]
min = property(_get_min, None, None, """
(property) :attr:`min` is the bottom-left corner of the bounding
box.""")
def _get_max(self):
return [max(self.get_points()[:, 0]),
max(self.get_points()[:, 1])]
max = property(_get_max, None, None, """
(property) :attr:`max` is the top-right corner of the bounding box.""")
def _get_intervalx(self):
return self.get_points()[:, 0]
intervalx = property(_get_intervalx, None, None, """
(property) :attr:`intervalx` is the pair of *x* coordinates that define
the bounding box. It is not guaranteed to be sorted from left to
right.""")
def _get_intervaly(self):
return self.get_points()[:, 1]
intervaly = property(_get_intervaly, None, None, """
(property) :attr:`intervaly` is the pair of *y* coordinates that define
the bounding box. It is not guaranteed to be sorted from bottom to
top.""")
def _get_width(self):
points = self.get_points()
return points[1, 0] - points[0, 0]
width = property(_get_width, None, None, """
(property) The width of the bounding box. It may be negative if
:attr:`x1` < :attr:`x0`.""")
def _get_height(self):
points = self.get_points()
return points[1, 1] - points[0, 1]
height = property(_get_height, None, None, """
(property) The height of the bounding box. It may be negative if
:attr:`y1` < :attr:`y0`.""")
def _get_size(self):
points = self.get_points()
return points[1] - points[0]
size = property(_get_size, None, None, """
(property) The width and height of the bounding box. May be negative,
in the same way as :attr:`width` and :attr:`height`.""")
def _get_bounds(self):
x0, y0, x1, y1 = self.get_points().flatten()
return (x0, y0, x1 - x0, y1 - y0)
bounds = property(_get_bounds, None, None, """
(property) Returns (:attr:`x0`, :attr:`y0`, :attr:`width`,
:attr:`height`).""")
def _get_extents(self):
return self.get_points().flatten().copy()
extents = property(_get_extents, None, None, """
(property) Returns (:attr:`x0`, :attr:`y0`, :attr:`x1`,
:attr:`y1`).""")
def get_points(self):
return NotImplementedError()
def containsx(self, x):
x0, x1 = self.intervalx
return ((x0 < x1
and (x >= x0 and x <= x1))
or (x >= x1 and x <= x0))
def containsy(self, y):
y0, y1 = self.intervaly
return ((y0 < y1
and (y >= y0 and y <= y1))
or (y >= y1 and y <= y0))
def contains(self, x, y):
return self.containsx(x) and self.containsy(y)
def overlaps(self, other):
ax1, ay1, ax2, ay2 = self._get_extents()
bx1, by1, bx2, by2 = other._get_extents()
if any(np.isnan(v) for v in [ax1, ay1, ax2, ay2, bx1, by1, bx2, by2]):
return False
if ax2 < ax1:
ax2, ax1 = ax1, ax2
if ay2 < ay1:
ay2, ay1 = ay1, ay2
if bx2 < bx1:
bx2, bx1 = bx1, bx2
if by2 < by1:
by2, by1 = by1, by2
return not ((bx2 < ax1) or
(by2 < ay1) or
(bx1 > ax2) or
(by1 > ay2))
def fully_containsx(self, x):
x0, x1 = self.intervalx
return ((x0 < x1
and (x > x0 and x < x1))
or (x > x1 and x < x0))
def fully_containsy(self, y):
y0, y1 = self.intervaly
return ((y0 < y1
and (y > y0 and y < y1))
or (y > y1 and y < y0))
def fully_contains(self, x, y):
return self.fully_containsx(x) \
and self.fully_containsy(y)
def fully_overlaps(self, other):
ax1, ay1, ax2, ay2 = self._get_extents()
bx1, by1, bx2, by2 = other._get_extents()
if ax2 < ax1:
ax2, ax1 = ax1, ax2
if ay2 < ay1:
ay2, ay1 = ay1, ay2
if bx2 < bx1:
bx2, bx1 = bx1, bx2
if by2 < by1:
by2, by1 = by1, by2
return not ((bx2 <= ax1) or
(by2 <= ay1) or
(bx1 >= ax2) or
(by1 >= ay2))
def transformed(self, transform):
pts = self.get_points()
ll, ul, lr = transform.transform(np.array([pts[0],
[pts[0, 0], pts[1, 1]], [pts[1, 0], pts[0, 1]]]))
return Bbox([ll, [lr[0], ul[1]]])
def inverse_transformed(self, transform):
return self.transformed(transform.inverted())
coefs = {'C': (0.5, 0.5),
'SW': (0, 0),
'S': (0.5, 0),
'SE': (1.0, 0),
'E': (1.0, 0.5),
'NE': (1.0, 1.0),
'N': (0.5, 1.0),
'NW': (0, 1.0),
'W': (0, 0.5)}
def anchored(self, c, container=None):
if container is None:
container = self
l, b, w, h = container.bounds
if isinstance(c, six.string_types):
cx, cy = self.coefs[c]
else:
cx, cy = c
L, B, W, H = self.bounds
return Bbox(self._points +
[(l + cx * (w - W)) - L,
(b + cy * (h - H)) - B])
def shrunk(self, mx, my):
w, h = self.size
return Bbox([self._points[0],
self._points[0] + [mx * w, my * h]])
def shrunk_to_aspect(self, box_aspect, container=None, fig_aspect=1.0):
if box_aspect <= 0 or fig_aspect <= 0:
raise ValueError("'box_aspect' and 'fig_aspect' must be positive")
if container is None:
container = self
w, h = container.size
H = w * box_aspect / fig_aspect
if H <= h:
W = w
else:
W = h * fig_aspect / box_aspect
H = h
return Bbox([self._points[0],
self._points[0] + (W, H)])
def splitx(self, *args):
boxes = []
xf = [0] + list(args) + [1]
x0, y0, x1, y1 = self._get_extents()
w = x1 - x0
for xf0, xf1 in zip(xf[:-1], xf[1:]):
boxes.append(Bbox([[x0 + xf0 * w, y0], [x0 + xf1 * w, y1]]))
return boxes
def splity(self, *args):
boxes = []
yf = [0] + list(args) + [1]
x0, y0, x1, y1 = self._get_extents()
h = y1 - y0
for yf0, yf1 in zip(yf[:-1], yf[1:]):
boxes.append(Bbox([[x0, y0 + yf0 * h], [x1, y0 + yf1 * h]]))
return boxes
def count_contains(self, vertices):
if len(vertices) == 0:
return 0
vertices = np.asarray(vertices)
x0, y0, x1, y1 = self._get_extents()
with np.errstate(invalid='ignore'):
dx0 = np.sign(vertices[:, 0] - x0)
dy0 = np.sign(vertices[:, 1] - y0)
dx1 = np.sign(vertices[:, 0] - x1)
dy1 = np.sign(vertices[:, 1] - y1)
inside = ((abs(dx0 + dx1) + abs(dy0 + dy1)) == 0)
return np.sum(inside)
def count_overlaps(self, bboxes):
return count_bboxes_overlapping_bbox(self, [np.array(x) for x in bboxes])
def expanded(self, sw, sh):
width = self.width
height = self.height
deltaw = (sw * width - width) / 2.0
deltah = (sh * height - height) / 2.0
a = np.array([[-deltaw, -deltah], [deltaw, deltah]])
return Bbox(self._points + a)
def padded(self, p):
points = self.get_points()
return Bbox(points + [[-p, -p], [p, p]])
def translated(self, tx, ty):
return Bbox(self._points + (tx, ty))
def corners(self):
l, b, r, t = self.get_points().flatten()
return np.array([[l, b], [l, t], [r, b], [r, t]])
def rotated(self, radians):
corners = self.corners()
corners_rotated = Affine2D().rotate(radians).transform(corners)
bbox = Bbox.unit()
bbox.update_from_data_xy(corners_rotated, ignore=True)
return bbox
@staticmethod
def union(bboxes):
if not len(bboxes):
raise ValueError("'bboxes' cannot be empty")
if len(bboxes) == 1:
return bboxes[0]
x0 = np.inf
y0 = np.inf
x1 = -np.inf
y1 = -np.inf
for bbox in bboxes:
points = bbox.get_points()
xs = points[:, 0]
ys = points[:, 1]
x0 = min(x0, np.min(xs))
y0 = min(y0, np.min(ys))
x1 = max(x1, np.max(xs))
y1 = max(y1, np.max(ys))
return Bbox.from_extents(x0, y0, x1, y1)
@staticmethod
def intersection(bbox1, bbox2):
intersects = not (bbox2.xmin > bbox1.xmax or
bbox2.xmax < bbox1.xmin or
bbox2.ymin > bbox1.ymax or
bbox2.ymax < bbox1.ymin)
if intersects:
x0 = max([bbox1.xmin, bbox2.xmin])
x1 = min([bbox1.xmax, bbox2.xmax])
y0 = max([bbox1.ymin, bbox2.ymin])
y1 = min([bbox1.ymax, bbox2.ymax])
return Bbox.from_extents(x0, y0, x1, y1)
return None
class Bbox(BboxBase):
def __init__(self, points, **kwargs):
BboxBase.__init__(self, **kwargs)
points = np.asarray(points, np.float_)
if points.shape != (2, 2):
raise ValueError('Bbox points must be of the form '
'"[[x0, y0], [x1, y1]]".')
self._points = points
self._minpos = np.array([0.0000001, 0.0000001])
self._ignore = True
# it is helpful in some contexts to know if the bbox is a
# default or has been mutated; we store the orig points to
# support the mutated methods
self._points_orig = self._points.copy()
if DEBUG:
___init__ = __init__
def __init__(self, points, **kwargs):
self._check(points)
self.___init__(points, **kwargs)
def invalidate(self):
self._check(self._points)
TransformNode.invalidate(self)
@staticmethod
def unit():
return Bbox(np.array([[0.0, 0.0], [1.0, 1.0]], np.float))
@staticmethod
def null():
return Bbox(np.array([[np.inf, np.inf], [-np.inf, -np.inf]], np.float))
@staticmethod
def from_bounds(x0, y0, width, height):
return Bbox.from_extents(x0, y0, x0 + width, y0 + height)
@staticmethod
def from_extents(*args):
points = np.array(args, dtype=np.float_).reshape(2, 2)
return Bbox(points)
def __format__(self, fmt):
return (
'Bbox(x0={0.x0:{1}}, y0={0.y0:{1}}, x1={0.x1:{1}}, y1={0.y1:{1}})'.
format(self, fmt))
def __str__(self):
return format(self, '')
def __repr__(self):
return 'Bbox([[{0.x0}, {0.y0}], [{0.x1}, {0.y1}]])'.format(self)
def ignore(self, value):
self._ignore = value
def update_from_data(self, x, y, ignore=None):
warnings.warn(
"update_from_data requires a memory copy -- please replace with "
"update_from_data_xy")
xy = np.hstack((x.reshape((len(x), 1)), y.reshape((len(y), 1))))
return self.update_from_data_xy(xy, ignore)
def update_from_path(self, path, ignore=None, updatex=True, updatey=True):
if ignore is None:
ignore = self._ignore
if path.vertices.size == 0:
return
points, minpos, changed = update_path_extents(
path, None, self._points, self._minpos, ignore)
if changed:
self.invalidate()
if updatex:
self._points[:, 0] = points[:, 0]
self._minpos[0] = minpos[0]
if updatey:
self._points[:, 1] = points[:, 1]
self._minpos[1] = minpos[1]
def update_from_data_xy(self, xy, ignore=None, updatex=True, updatey=True):
if len(xy) == 0:
return
path = Path(xy)
self.update_from_path(path, ignore=ignore,
updatex=updatex, updatey=updatey)
def _set_x0(self, val):
self._points[0, 0] = val
self.invalidate()
x0 = property(BboxBase._get_x0, _set_x0)
def _set_y0(self, val):
self._points[0, 1] = val
self.invalidate()
y0 = property(BboxBase._get_y0, _set_y0)
def _set_x1(self, val):
self._points[1, 0] = val
self.invalidate()
x1 = property(BboxBase._get_x1, _set_x1)
def _set_y1(self, val):
self._points[1, 1] = val
self.invalidate()
y1 = property(BboxBase._get_y1, _set_y1)
def _set_p0(self, val):
self._points[0] = val
self.invalidate()
p0 = property(BboxBase._get_p0, _set_p0)
def _set_p1(self, val):
self._points[1] = val
self.invalidate()
p1 = property(BboxBase._get_p1, _set_p1)
def _set_intervalx(self, interval):
self._points[:, 0] = interval
self.invalidate()
intervalx = property(BboxBase._get_intervalx, _set_intervalx)
def _set_intervaly(self, interval):
self._points[:, 1] = interval
self.invalidate()
intervaly = property(BboxBase._get_intervaly, _set_intervaly)
def _set_bounds(self, bounds):
l, b, w, h = bounds
points = np.array([[l, b], [l + w, b + h]], np.float_)
if np.any(self._points != points):
self._points = points
self.invalidate()
bounds = property(BboxBase._get_bounds, _set_bounds)
def _get_minpos(self):
return self._minpos
minpos = property(_get_minpos)
def _get_minposx(self):
return self._minpos[0]
minposx = property(_get_minposx)
def _get_minposy(self):
return self._minpos[1]
minposy = property(_get_minposy)
def get_points(self):
self._invalid = 0
return self._points
def set_points(self, points):
if np.any(self._points != points):
self._points = points
self.invalidate()
def set(self, other):
if np.any(self._points != other.get_points()):
self._points = other.get_points()
self.invalidate()
def mutated(self):
return self.mutatedx() or self.mutatedy()
def mutatedx(self):
return (self._points[0, 0] != self._points_orig[0, 0] or
self._points[1, 0] != self._points_orig[1, 0])
def mutatedy(self):
return (self._points[0, 1] != self._points_orig[0, 1] or
self._points[1, 1] != self._points_orig[1, 1])
class TransformedBbox(BboxBase):
def __init__(self, bbox, transform, **kwargs):
if not bbox.is_bbox:
raise ValueError("'bbox' is not a bbox")
if not isinstance(transform, Transform):
msg = ("'transform' must be an instance of"
" 'matplotlib.transform.Transform'")
raise ValueError(msg)
if transform.input_dims != 2 or transform.output_dims != 2:
msg = "The input and output dimensions of 'transform' must be 2"
raise ValueError(msg)
BboxBase.__init__(self, **kwargs)
self._bbox = bbox
self._transform = transform
self.set_children(bbox, transform)
self._points = None
def __repr__(self):
return "TransformedBbox(%r, %r)" % (self._bbox, self._transform)
def get_points(self):
if self._invalid:
points = self._transform.transform(self._bbox.get_points())
points = np.ma.filled(points, 0.0)
self._points = points
self._invalid = 0
return self._points
get_points.__doc__ = Bbox.get_points.__doc__
if DEBUG:
_get_points = get_points
def get_points(self):
points = self._get_points()
self._check(points)
return points
class Transform(TransformNode):
input_dims = None
output_dims = None
has_inverse = False
is_separable = False
def __add__(self, other):
if isinstance(other, Transform):
return composite_transform_factory(self, other)
raise TypeError(
"Can not add Transform to object of type '%s'" % type(other))
def __radd__(self, other):
if isinstance(other, Transform):
return composite_transform_factory(other, self)
raise TypeError(
"Can not add Transform to object of type '%s'" % type(other))
def __eq__(self, other):
# equality is based on transform object id. Hence:
# Transform() != Transform().
# Some classes, such as TransformWrapper & AffineBase, will override.
return self is other
def _iter_break_from_left_to_right(self):
yield IdentityTransform(), self
@property
def depth(self):
return 1
def contains_branch(self, other):
if self.depth < other.depth:
return False
# check that a subtree is equal to other (starting from self)
for _, sub_tree in self._iter_break_from_left_to_right():
if sub_tree == other:
return True
return False
def contains_branch_seperately(self, other_transform):
if self.output_dims != 2:
raise ValueError('contains_branch_seperately only supports '
'transforms with 2 output dimensions')
# for a non-blended transform each seperate dimension is the same, so
# just return the appropriate shape.
return [self.contains_branch(other_transform)] * 2
def __sub__(self, other):
# we only know how to do this operation if other is a Transform.
if not isinstance(other, Transform):
return NotImplemented
for remainder, sub_tree in self._iter_break_from_left_to_right():
if sub_tree == other:
return remainder
for remainder, sub_tree in other._iter_break_from_left_to_right():
if sub_tree == self:
if not remainder.has_inverse:
raise ValueError("The shortcut cannot be computed since "
"other's transform includes a non-invertable component.")
return remainder.inverted()
if other.has_inverse:
return self + other.inverted()
else:
raise ValueError('It is not possible to compute transA - transB '
'since transB cannot be inverted and there is no '
'shortcut possible.')
def __array__(self, *args, **kwargs):
return self.get_affine().get_matrix()
def transform(self, values):
values = np.asanyarray(values)
ndim = values.ndim
values = values.reshape((-1, self.input_dims))
res = self.transform_affine(self.transform_non_affine(values))
if ndim == 0:
assert not np.ma.is_masked(res)
return res[0, 0]
if ndim == 1:
return res.reshape(-1)
elif ndim == 2:
return res
else:
raise ValueError(
"Input values must have shape (N x {dims}) "
"or ({dims}).".format(dims=self.input_dims))
return res
def transform_affine(self, values):
return self.get_affine().transform(values)
def transform_non_affine(self, values):
return values
def transform_bbox(self, bbox):
return Bbox(self.transform(bbox.get_points()))
def get_affine(self):
return IdentityTransform()
def get_matrix(self):
return self.get_affine().get_matrix()
def transform_point(self, point):
if len(point) != self.input_dims:
msg = "The length of 'point' must be 'self.input_dims'"
raise ValueError(msg)
return self.transform(np.asarray([point]))[0]
def transform_path(self, path):
return self.transform_path_affine(self.transform_path_non_affine(path))
def transform_path_affine(self, path):
return self.get_affine().transform_path_affine(path)
def transform_path_non_affine(self, path):
x = self.transform_non_affine(path.vertices)
return Path._fast_from_codes_and_verts(x, path.codes,
{'interpolation_steps': path._interpolation_steps,
'should_simplify': path.should_simplify})
def transform_angles(self, angles, pts, radians=False, pushoff=1e-5):
if self.input_dims != 2 or self.output_dims != 2:
raise NotImplementedError('Only defined in 2D')
if pts.shape[1] != 2:
raise ValueError("'pts' must be array with 2 columns for x,y")
if angles.ndim != 1 or angles.shape[0] != pts.shape[0]:
msg = "'angles' must be a column vector and have same number of"
msg += " rows as 'pts'"
raise ValueError(msg)
if not radians:
angles = angles / 180.0 * np.pi
pts2 = pts + pushoff * np.c_[np.cos(angles), np.sin(angles)]
tpts = self.transform(pts)
tpts2 = self.transform(pts2)
d = tpts2 - tpts
a = np.arctan2(d[:, 1], d[:, 0])
if not radians:
a = a * 180.0 / np.pi
return a
def inverted(self):
raise NotImplementedError()
class TransformWrapper(Transform):
pass_through = True
def __init__(self, child):
if not isinstance(child, Transform):
msg = ("'child' must be an instance of"
" 'matplotlib.transform.Transform'")
raise ValueError(msg)
Transform.__init__(self)
self.input_dims = child.input_dims
self.output_dims = child.output_dims
self._set(child)
self._invalid = 0
def __eq__(self, other):
return self._child.__eq__(other)
if DEBUG:
def __str__(self):
return str(self._child)
def __getstate__(self):
return {'child': self._child}
def __setstate__(self, state):
self.__init__(state['child'])
def __repr__(self):
return "TransformWrapper(%r)" % self._child
def frozen(self):
return self._child.frozen()
frozen.__doc__ = Transform.frozen.__doc__
def _set(self, child):
self._child = child
self.set_children(child)
self.transform = child.transform
self.transform_affine = child.transform_affine
self.transform_non_affine = child.transform_non_affine
self.transform_path = child.transform_path
self.transform_path_affine = child.transform_path_affine
self.transform_path_non_affine = child.transform_path_non_affine
self.get_affine = child.get_affine
self.inverted = child.inverted
self.get_matrix = child.get_matrix
# note we do not wrap other properties here since the transform's
def set(self, child):
if (child.input_dims != self.input_dims or
child.output_dims != self.output_dims):
msg = ("The new child must have the same number of input and"
" output dimensions as the current child.")
raise ValueError(msg)
self._set(child)
self._invalid = 0
self.invalidate()
self._invalid = 0
def _get_is_affine(self):
return self._child.is_affine
is_affine = property(_get_is_affine)
def _get_is_separable(self):
return self._child.is_separable
is_separable = property(_get_is_separable)
def _get_has_inverse(self):
return self._child.has_inverse
has_inverse = property(_get_has_inverse)
class AffineBase(Transform):
is_affine = True
def __init__(self, *args, **kwargs):
Transform.__init__(self, *args, **kwargs)
self._inverted = None
def __array__(self, *args, **kwargs):
return self.get_matrix()
@staticmethod
def _concat(a, b):
return np.dot(b, a)
def __eq__(self, other):
if getattr(other, "is_affine", False):
return np.all(self.get_matrix() == other.get_matrix())
return NotImplemented
def transform(self, values):
return self.transform_affine(values)
transform.__doc__ = Transform.transform.__doc__
def transform_affine(self, values):
raise NotImplementedError('Affine subclasses should override this '
'method.')
transform_affine.__doc__ = Transform.transform_affine.__doc__
def transform_non_affine(self, points):
return points
transform_non_affine.__doc__ = Transform.transform_non_affine.__doc__
def transform_path(self, path):
return self.transform_path_affine(path)
transform_path.__doc__ = Transform.transform_path.__doc__
def transform_path_affine(self, path):
return Path(self.transform_affine(path.vertices),
path.codes, path._interpolation_steps)
transform_path_affine.__doc__ = Transform.transform_path_affine.__doc__
def transform_path_non_affine(self, path):
return path
transform_path_non_affine.__doc__ = Transform.transform_path_non_affine.__doc__
def get_affine(self):
return self
get_affine.__doc__ = Transform.get_affine.__doc__
class Affine2DBase(AffineBase):
has_inverse = True
input_dims = 2
output_dims = 2
def frozen(self):
return Affine2D(self.get_matrix().copy())
frozen.__doc__ = AffineBase.frozen.__doc__
def _get_is_separable(self):
mtx = self.get_matrix()
return mtx[0, 1] == 0.0 and mtx[1, 0] == 0.0
is_separable = property(_get_is_separable)
def to_values(self):
mtx = self.get_matrix()
return tuple(mtx[:2].swapaxes(0, 1).flatten())
@staticmethod
def matrix_from_values(a, b, c, d, e, f):
return np.array([[a, c, e], [b, d, f], [0.0, 0.0, 1.0]], np.float_)
def transform_affine(self, points):
mtx = self.get_matrix()
if isinstance(points, MaskedArray):
tpoints = affine_transform(points.data, mtx)
return ma.MaskedArray(tpoints, mask=ma.getmask(points))
return affine_transform(points, mtx)
def transform_point(self, point):
mtx = self.get_matrix()
return affine_transform([point], mtx)[0]
transform_point.__doc__ = AffineBase.transform_point.__doc__
if DEBUG:
_transform_affine = transform_affine
def transform_affine(self, points):
if (not ma.isMaskedArray(points) and
not isinstance(points, np.ndarray)):
warnings.warn(
('A non-numpy array of type %s was passed in for ' +
'transformation. Please correct this.')
% type(points))
return self._transform_affine(points)
transform_affine.__doc__ = AffineBase.transform_affine.__doc__
def inverted(self):
if self._inverted is None or self._invalid:
mtx = self.get_matrix()
shorthand_name = None
if self._shorthand_name:
shorthand_name = '(%s)-1' % self._shorthand_name
self._inverted = Affine2D(inv(mtx), shorthand_name=shorthand_name)
self._invalid = 0
return self._inverted
inverted.__doc__ = AffineBase.inverted.__doc__
class Affine2D(Affine2DBase):
def __init__(self, matrix=None, **kwargs):
Affine2DBase.__init__(self, **kwargs)
if matrix is None:
matrix = np.identity(3)
elif DEBUG:
matrix = np.asarray(matrix, np.float_)
assert matrix.shape == (3, 3)
self._mtx = matrix
self._invalid = 0
def __repr__(self):
return "Affine2D(%s)" % repr(self._mtx)
lues(a, b, c, d, e, f):
return Affine2D(
np.array([a, c, e, b, d, f, 0.0, 0.0, 1.0], np.float_)
.reshape((3, 3)))
def get_matrix(self):
self._invalid = 0
return self._mtx
def set_matrix(self, mtx):
self._mtx = mtx
self.invalidate()
def set(self, other):
if not isinstance(other, Affine2DBase):
msg = ("'other' must be an instance of"
" 'matplotlib.transform.Affine2DBase'")
raise ValueError(msg)
self._mtx = other.get_matrix()
self.invalidate()
@staticmethod
def identity():
return Affine2D(np.identity(3))
def clear(self):
self._mtx = np.identity(3)
self.invalidate()
return self
def rotate(self, theta):
a = np.cos(theta)
b = np.sin(theta)
rotate_mtx = np.array(
[[a, -b, 0.0], [b, a, 0.0], [0.0, 0.0, 1.0]],
np.float_)
self._mtx = np.dot(rotate_mtx, self._mtx)
self.invalidate()
return self
def rotate_deg(self, degrees):
return self.rotate(degrees * np.pi / 180.)
def rotate_around(self, x, y, theta):
return self.translate(-x, -y).rotate(theta).translate(x, y)
def rotate_deg_around(self, x, y, degrees):
return self.translate(-x, -y).rotate_deg(degrees).translate(x, y)
def translate(self, tx, ty):
translate_mtx = np.array(
[[1.0, 0.0, tx], [0.0, 1.0, ty], [0.0, 0.0, 1.0]],
np.float_)
self._mtx = np.dot(translate_mtx, self._mtx)
self.invalidate()
return self
def scale(self, sx, sy=None):
if sy is None:
sy = sx
scale_mtx = np.array(
[[sx, 0.0, 0.0], [0.0, sy, 0.0], [0.0, 0.0, 1.0]],
np.float_)
self._mtx = np.dot(scale_mtx, self._mtx)
self.invalidate()
return self
def skew(self, xShear, yShear):
rotX = np.tan(xShear)
rotY = np.tan(yShear)
skew_mtx = np.array(
[[1.0, rotX, 0.0], [rotY, 1.0, 0.0], [0.0, 0.0, 1.0]],
np.float_)
self._mtx = np.dot(skew_mtx, self._mtx)
self.invalidate()
return self
def skew_deg(self, xShear, yShear):
return self.skew(np.deg2rad(xShear), np.deg2rad(yShear))
def _get_is_separable(self):
mtx = self.get_matrix()
return mtx[0, 1] == 0.0 and mtx[1, 0] == 0.0
is_separable = property(_get_is_separable)
class IdentityTransform(Affine2DBase):
_mtx = np.identity(3)
def frozen(self):
return self
frozen.__doc__ = Affine2DBase.frozen.__doc__
def __repr__(self):
return "IdentityTransform()"
def get_matrix(self):
return self._mtx
get_matrix.__doc__ = Affine2DBase.get_matrix.__doc__
def transform(self, points):
return np.asanyarray(points)
transform.__doc__ = Affine2DBase.transform.__doc__
transform_affine = transform
transform_affine.__doc__ = Affine2DBase.transform_affine.__doc__
transform_non_affine = transform
transform_non_affine.__doc__ = Affine2DBase.transform_non_affine.__doc__
def transform_path(self, path):
return path
transform_path.__doc__ = Affine2DBase.transform_path.__doc__
transform_path_affine = transform_path
transform_path_affine.__doc__ = Affine2DBase.transform_path_affine.__doc__
transform_path_non_affine = transform_path
transform_path_non_affine.__doc__ = Affine2DBase.transform_path_non_affine.__doc__
def get_affine(self):
return self
get_affine.__doc__ = Affine2DBase.get_affine.__doc__
inverted = get_affine
inverted.__doc__ = Affine2DBase.inverted.__doc__
class BlendedGenericTransform(Transform):
input_dims = 2
output_dims = 2
is_separable = True
pass_through = True
def __init__(self, x_transform, y_transform, **kwargs):
Transform.__init__(self, **kwargs)
self._x = x_transform
self._y = y_transform
self.set_children(x_transform, y_transform)
self._affine = None
def __eq__(self, other):
if isinstance(other, (BlendedAffine2D, BlendedGenericTransform)):
return (self._x == other._x) and (self._y == other._y)
elif self._x == self._y:
return self._x == other
else:
return NotImplemented
def contains_branch_seperately(self, transform):
return self._x.contains_branch(transform), self._y.contains_branch(transform)
@property
def depth(self):
return max([self._x.depth, self._y.depth])
def contains_branch(self, other):
return False
def _get_is_affine(self):
return self._x.is_affine and self._y.is_affine
is_affine = property(_get_is_affine)
def _get_has_inverse(self):
return self._x.has_inverse and self._y.has_inverse
has_inverse = property(_get_has_inverse)
def frozen(self):
return blended_transform_factory(self._x.frozen(), self._y.frozen())
frozen.__doc__ = Transform.frozen.__doc__
def __repr__(self):
return "BlendedGenericTransform(%s,%s)" % (self._x, self._y)
def transform_non_affine(self, points):
if self._x.is_affine and self._y.is_affine:
return points
x = self._x
y = self._y
if x == y and x.input_dims == 2:
return x.transform_non_affine(points)
if x.input_dims == 2:
x_points = x.transform_non_affine(points)[:, 0:1]
else:
x_points = x.transform_non_affine(points[:, 0])
x_points = x_points.reshape((len(x_points), 1))
if y.input_dims == 2:
y_points = y.transform_non_affine(points)[:, 1:]
else:
y_points = y.transform_non_affine(points[:, 1])
y_points = y_points.reshape((len(y_points), 1))
if isinstance(x_points, MaskedArray) or isinstance(y_points, MaskedArray):
return ma.concatenate((x_points, y_points), 1)
else:
return np.concatenate((x_points, y_points), 1)
transform_non_affine.__doc__ = Transform.transform_non_affine.__doc__
def inverted(self):
return BlendedGenericTransform(self._x.inverted(), self._y.inverted())
inverted.__doc__ = Transform.inverted.__doc__
def get_affine(self):
if self._invalid or self._affine is None:
if self._x == self._y:
self._affine = self._x.get_affine()
else:
x_mtx = self._x.get_affine().get_matrix()
y_mtx = self._y.get_affine().get_matrix()
mtx = np.vstack((x_mtx[0], y_mtx[1], [0.0, 0.0, 1.0]))
self._affine = Affine2D(mtx)
self._invalid = 0
return self._affine
get_affine.__doc__ = Transform.get_affine.__doc__
class BlendedAffine2D(Affine2DBase):
is_separable = True
def __init__(self, x_transform, y_transform, **kwargs):
is_affine = x_transform.is_affine and y_transform.is_affine
is_separable = x_transform.is_separable and y_transform.is_separable
is_correct = is_affine and is_separable
if not is_correct:
msg = ("Both *x_transform* and *y_transform* must be 2D affine"
" transforms.")
raise ValueError(msg)
Transform.__init__(self, **kwargs)
self._x = x_transform
self._y = y_transform
self.set_children(x_transform, y_transform)
Affine2DBase.__init__(self)
self._mtx = None
def __eq__(self, other):
if isinstance(other, (BlendedAffine2D, BlendedGenericTransform)):
return (self._x == other._x) and (self._y == other._y)
elif self._x == self._y:
return self._x == other
else:
return NotImplemented
def contains_branch_seperately(self, transform):
return self._x.contains_branch(transform), self._y.contains_branch(transform)
def __repr__(self):
return "BlendedAffine2D(%s,%s)" % (self._x, self._y)
def get_matrix(self):
if self._invalid:
if self._x == self._y:
self._mtx = self._x.get_matrix()
else:
x_mtx = self._x.get_matrix()
y_mtx = self._y.get_matrix()
self._mtx = np.vstack((x_mtx[0], y_mtx[1], [0.0, 0.0, 1.0]))
self._inverted = None
self._invalid = 0
return self._mtx
get_matrix.__doc__ = Affine2DBase.get_matrix.__doc__
def blended_transform_factory(x_transform, y_transform):
if (isinstance(x_transform, Affine2DBase)
and isinstance(y_transform, Affine2DBase)):
return BlendedAffine2D(x_transform, y_transform)
return BlendedGenericTransform(x_transform, y_transform)
class CompositeGenericTransform(Transform):
pass_through = True
def __init__(self, a, b, **kwargs):
if a.output_dims != b.input_dims:
msg = ("The output dimension of 'a' must be equal to the input"
" dimensions of 'b'")
raise ValueError(msg)
self.input_dims = a.input_dims
self.output_dims = b.output_dims
Transform.__init__(self, **kwargs)
self._a = a
self._b = b
self.set_children(a, b)
is_affine = property(lambda self: self._a.is_affine and self._b.is_affine)
def frozen(self):
self._invalid = 0
frozen = composite_transform_factory(self._a.frozen(), self._b.frozen())
if not isinstance(frozen, CompositeGenericTransform):
return frozen.frozen()
return frozen
frozen.__doc__ = Transform.frozen.__doc__
def _invalidate_internal(self, value, invalidating_node):
if value == Transform.INVALID_AFFINE \
and not self._b.is_affine \
and (not self._a.is_affine or invalidating_node is self._a):
value = Transform.INVALID
Transform._invalidate_internal(self, value=value,
invalidating_node=invalidating_node)
def __eq__(self, other):
if isinstance(other, (CompositeGenericTransform, CompositeAffine2D)):
return self is other or (self._a == other._a and self._b == other._b)
else:
return False
def _iter_break_from_left_to_right(self):
for lh_compliment, rh_compliment in self._a._iter_break_from_left_to_right():
yield lh_compliment, rh_compliment + self._b
for lh_compliment, rh_compliment in self._b._iter_break_from_left_to_right():
yield self._a + lh_compliment, rh_compliment
@property
def depth(self):
return self._a.depth + self._b.depth
def _get_is_affine(self):
return self._a.is_affine and self._b.is_affine
is_affine = property(_get_is_affine)
def _get_is_separable(self):
return self._a.is_separable and self._b.is_separable
is_separable = property(_get_is_separable)
if DEBUG:
def __str__(self):
return '(%s, %s)' % (self._a, self._b)
def __repr__(self):
return "CompositeGenericTransform(%r, %r)" % (self._a, self._b)
def transform_affine(self, points):
return self.get_affine().transform(points)
transform_affine.__doc__ = Transform.transform_affine.__doc__
def transform_non_affine(self, points):
if self._a.is_affine and self._b.is_affine:
return points
elif not self._a.is_affine and self._b.is_affine:
return self._a.transform_non_affine(points)
else:
return self._b.transform_non_affine(
self._a.transform(points))
transform_non_affine.__doc__ = Transform.transform_non_affine.__doc__
def transform_path_non_affine(self, path):
if self._a.is_affine and self._b.is_affine:
return path
elif not self._a.is_affine and self._b.is_affine:
return self._a.transform_path_non_affine(path)
else:
return self._b.transform_path_non_affine(
self._a.transform_path(path))
transform_path_non_affine.__doc__ = Transform.transform_path_non_affine.__doc__
def get_affine(self):
if not self._b.is_affine:
return self._b.get_affine()
else:
return Affine2D(np.dot(self._b.get_affine().get_matrix(),
self._a.get_affine().get_matrix()))
get_affine.__doc__ = Transform.get_affine.__doc__
def inverted(self):
return CompositeGenericTransform(self._b.inverted(), self._a.inverted())
inverted.__doc__ = Transform.inverted.__doc__
def _get_has_inverse(self):
return self._a.has_inverse and self._b.has_inverse
has_inverse = property(_get_has_inverse)
class CompositeAffine2D(Affine2DBase):
def __init__(self, a, b, **kwargs):
if not a.is_affine or not b.is_affine:
raise ValueError("'a' and 'b' must be affine transforms")
if a.output_dims != b.input_dims:
msg = ("The output dimension of 'a' must be equal to the input"
" dimensions of 'b'")
raise ValueError(msg)
self.input_dims = a.input_dims
self.output_dims = b.output_dims
Affine2DBase.__init__(self, **kwargs)
self._a = a
self._b = b
self.set_children(a, b)
self._mtx = None
if DEBUG:
def __str__(self):
return '(%s, %s)' % (self._a, self._b)
@property
def depth(self):
return self._a.depth + self._b.depth
def _iter_break_from_left_to_right(self):
for lh_compliment, rh_compliment in self._a._iter_break_from_left_to_right():
yield lh_compliment, rh_compliment + self._b
for lh_compliment, rh_compliment in self._b._iter_break_from_left_to_right():
yield self._a + lh_compliment, rh_compliment
def __repr__(self):
return "CompositeAffine2D(%r, %r)" % (self._a, self._b)
def get_matrix(self):
if self._invalid:
self._mtx = np.dot(
self._b.get_matrix(),
self._a.get_matrix())
self._inverted = None
self._invalid = 0
return self._mtx
get_matrix.__doc__ = Affine2DBase.get_matrix.__doc__
def composite_transform_factory(a, b):
if isinstance(a, IdentityTransform):
return b
elif isinstance(b, IdentityTransform):
return a
elif isinstance(a, Affine2D) and isinstance(b, Affine2D):
return CompositeAffine2D(a, b)
return CompositeGenericTransform(a, b)
class BboxTransform(Affine2DBase):
is_separable = True
def __init__(self, boxin, boxout, **kwargs):
if not boxin.is_bbox or not boxout.is_bbox:
msg = "'boxin' and 'boxout' must be bbox"
raise ValueError(msg)
Affine2DBase.__init__(self, **kwargs)
self._boxin = boxin
self._boxout = boxout
self.set_children(boxin, boxout)
self._mtx = None
self._inverted = None
def __repr__(self):
return "BboxTransform(%r, %r)" % (self._boxin, self._boxout)
def get_matrix(self):
if self._invalid:
inl, inb, inw, inh = self._boxin.bounds
outl, outb, outw, outh = self._boxout.bounds
x_scale = outw / inw
y_scale = outh / inh
if DEBUG and (x_scale == 0 or y_scale == 0):
raise ValueError("Transforming from or to a singular bounding box.")
self._mtx = np.array([[x_scale, 0.0 , (-inl*x_scale+outl)],
[0.0 , y_scale, (-inb*y_scale+outb)],
[0.0 , 0.0 , 1.0 ]],
np.float_)
self._inverted = None
self._invalid = 0
return self._mtx
get_matrix.__doc__ = Affine2DBase.get_matrix.__doc__
class BboxTransformTo(Affine2DBase):
is_separable = True
def __init__(self, boxout, **kwargs):
if not boxout.is_bbox:
raise ValueError("'boxout' must be bbox")
Affine2DBase.__init__(self, **kwargs)
self._boxout = boxout
self.set_children(boxout)
self._mtx = None
self._inverted = None
def __repr__(self):
return "BboxTransformTo(%r)" % (self._boxout)
def get_matrix(self):
if self._invalid:
outl, outb, outw, outh = self._boxout.bounds
if DEBUG and (outw == 0 or outh == 0):
raise ValueError("Transforming to a singular bounding box.")
self._mtx = np.array([[outw, 0.0, outl],
[ 0.0, outh, outb],
[ 0.0, 0.0, 1.0]],
np.float_)
self._inverted = None
self._invalid = 0
return self._mtx
get_matrix.__doc__ = Affine2DBase.get_matrix.__doc__
class BboxTransformToMaxOnly(BboxTransformTo):
def __repr__(self):
return "BboxTransformToMaxOnly(%r)" % (self._boxout)
def get_matrix(self):
if self._invalid:
xmax, ymax = self._boxout.max
if DEBUG and (xmax == 0 or ymax == 0):
raise ValueError("Transforming to a singular bounding box.")
self._mtx = np.array([[xmax, 0.0, 0.0],
[ 0.0, ymax, 0.0],
[ 0.0, 0.0, 1.0]],
np.float_)
self._inverted = None
self._invalid = 0
return self._mtx
get_matrix.__doc__ = Affine2DBase.get_matrix.__doc__
class BboxTransformFrom(Affine2DBase):
is_separable = True
def __init__(self, boxin, **kwargs):
if not boxin.is_bbox:
raise ValueError("'boxin' must be bbox")
Affine2DBase.__init__(self, **kwargs)
self._boxin = boxin
self.set_children(boxin)
self._mtx = None
self._inverted = None
def __repr__(self):
return "BboxTransformFrom(%r)" % (self._boxin)
def get_matrix(self):
if self._invalid:
inl, inb, inw, inh = self._boxin.bounds
if DEBUG and (inw == 0 or inh == 0):
raise ValueError("Transforming from a singular bounding box.")
x_scale = 1.0 / inw
y_scale = 1.0 / inh
self._mtx = np.array([[x_scale, 0.0 , (-inl*x_scale)],
[0.0 , y_scale, (-inb*y_scale)],
[0.0 , 0.0 , 1.0 ]],
np.float_)
self._inverted = None
self._invalid = 0
return self._mtx
get_matrix.__doc__ = Affine2DBase.get_matrix.__doc__
class ScaledTranslation(Affine2DBase):
def __init__(self, xt, yt, scale_trans, **kwargs):
Affine2DBase.__init__(self, **kwargs)
self._t = (xt, yt)
self._scale_trans = scale_trans
self.set_children(scale_trans)
self._mtx = None
self._inverted = None
def __repr__(self):
return "ScaledTranslation(%r)" % (self._t,)
def get_matrix(self):
if self._invalid:
xt, yt = self._scale_trans.transform_point(self._t)
self._mtx = np.array([[1.0, 0.0, xt],
[0.0, 1.0, yt],
[0.0, 0.0, 1.0]],
np.float_)
self._invalid = 0
self._inverted = None
return self._mtx
get_matrix.__doc__ = Affine2DBase.get_matrix.__doc__
class TransformedPath(TransformNode):
def __init__(self, path, transform):
if not isinstance(transform, Transform):
msg = ("'transform' must be an instance of"
" 'matplotlib.transform.Transform'")
raise ValueError(msg)
TransformNode.__init__(self)
self._path = path
self._transform = transform
self.set_children(transform)
self._transformed_path = None
self._transformed_points = None
def _revalidate(self):
if ((self._invalid & self.INVALID_NON_AFFINE == self.INVALID_NON_AFFINE)
or self._transformed_path is None):
self._transformed_path = \
self._transform.transform_path_non_affine(self._path)
self._transformed_points = \
Path._fast_from_codes_and_verts(
self._transform.transform_non_affine(self._path.vertices),
None,
{'interpolation_steps': self._path._interpolation_steps,
'should_simplify': self._path.should_simplify})
self._invalid = 0
def get_transformed_points_and_affine(self):
self._revalidate()
return self._transformed_points, self.get_affine()
def get_transformed_path_and_affine(self):
self._revalidate()
return self._transformed_path, self.get_affine()
def get_fully_transformed_path(self):
self._revalidate()
return self._transform.transform_path_affine(self._transformed_path)
def get_affine(self):
return self._transform.get_affine()
def nonsingular(vmin, vmax, expander=0.001, tiny=1e-15, increasing=True):
if (not np.isfinite(vmin)) or (not np.isfinite(vmax)):
return -expander, expander
swapped = False
if vmax < vmin:
vmin, vmax = vmax, vmin
swapped = True
if vmax - vmin <= max(abs(vmin), abs(vmax)) * tiny:
if vmax == 0 and vmin == 0:
vmin = -expander
vmax = expander
else:
vmin -= expander*abs(vmin)
vmax += expander*abs(vmax)
if swapped and not increasing:
vmin, vmax = vmax, vmin
return vmin, vmax
def interval_contains(interval, val):
a, b = interval
return (
((a < b) and (a <= val and b >= val))
or (b <= val and a >= val))
def interval_contains_open(interval, val):
a, b = interval
return (
((a < b) and (a < val and b > val))
or (b < val and a > val))
def offset_copy(trans, fig=None, x=0.0, y=0.0, units='inches'):
if units == 'dots':
return trans + Affine2D().translate(x, y)
if fig is None:
raise ValueError('For units of inches or points a fig kwarg is needed')
if units == 'points':
x /= 72.0
y /= 72.0
elif not units == 'inches':
raise ValueError('units must be dots, points, or inches')
return trans + ScaledTranslation(x, y, fig.dpi_scale_trans)
| true | true |
f7ff321f48554463a337e8af0e6b695428971983 | 3,724 | py | Python | qml_workdir/data_work/feat_sel_12.py | quantum13/mlbootcamp5 | 2b473074daadce8ee7c859dcec6f6171464c3a43 | [
"MIT"
] | 2 | 2017-07-18T06:32:09.000Z | 2017-09-21T12:26:01.000Z | qml_workdir/data_work/feat_sel_12.py | quantum13/mlbootcamp5 | 2b473074daadce8ee7c859dcec6f6171464c3a43 | [
"MIT"
] | null | null | null | qml_workdir/data_work/feat_sel_12.py | quantum13/mlbootcamp5 | 2b473074daadce8ee7c859dcec6f6171464c3a43 | [
"MIT"
] | 2 | 2017-07-18T18:42:06.000Z | 2021-10-09T14:26:40.000Z | import datetime
import numpy as np
import os
import sys
sys.path.insert(0, os.getcwd())
from hyperopt import hp, fmin, tpe
import qml_workdir.classes.config
from qml.cv import QCV
from qml.models import QXgb, QAvg, QAvgOneModelData
from qml_workdir.classes.models import qm
trash wrong model
cv = QCV(qm)
model_id = qm.add_by_params(
QXgb(
** {"alpha": 0.008, "booster": "gbtree", "colsample_bylevel": 0.9, "colsample_bytree": 0.9, "eta": 0.024, "eval_metric": "logloss",
"gamma": 0.04, "max_depth": 4, "num_boost_round": 261, "objective": "binary:logistic", "subsample": 0.7, "tree_method": "hist"}
),
'hyperopt xgb',
)
model_id =qm.add_by_params(QAvgOneModelData(model_id, 8), level=-2)
cv.features_sel_add(model_id, 45, [
'age',
'height',
'weight',
'ap_hi',
'ap_lo',
'smoke',
'alco',
'active',
'gender_male',
'height_low',
'weight_low',
'cholesterol_all',
'gluc_all',
'cholesterol_1',
'cholesterol_2',
'cholesterol_3',
'gluc_1',
'gluc_2',
'gluc_3',
'ap_error',
'ap_error_swap',
'imt',
'imt_class_all',
'imt_class_0',
'imt_class_1',
'imt_class_2',
'imt_class_3',
'imt_class_4',
'imt_class_5',
'imt_class_6',
'x__age__gluc_all',
'x__ap_hi__cholesterol_all',
'div6__height__gluc_all__imt',
'plus__age_norm__ap_hi_norm__gluc_all_norm',
'x__age__weight',
'div1__age__weight__cholesterol_all',
'div6__age__weight__cholesterol_all',
'plus__height_norm__weight_norm__gluc_all_norm',
'div1__ap_hi__ap_lo__cholesterol_all',
'div6__ap_hi__ap_lo__cholesterol_all',
'plus__age_norm__gluc_all_norm__imt_norm',
'minus6__ap_hi_norm__ap_lo_norm__cholesterol_all_norm',
'minus1__ap_hi_norm__ap_lo_norm__cholesterol_all_norm',
'minus6__age_norm__ap_lo_norm__cholesterol_all_norm',
'minus1__age_norm__ap_lo_norm__cholesterol_all_norm',
'div6__height__weight__ap_lo',
'div2__ap_lo__cholesterol_all__gluc_all',
'x__age__ap_hi__gluc_all',
'div5__ap_lo__cholesterol_all__gluc_all',
'score_scale_val_v3',
'score_scale_val',
'k15_0',
'k15_1',
'k15_2',
'k15_3',
'k15_4',
'k15_5',
'k15_6',
'k15_7',
'k15_8',
'k15_9',
'k15_10',
'k15_11',
'k15_12',
'k15_13',
'k15_14',
'k7_0',
'k7_1',
'k7_2',
'k7_3',
'k7_4',
'k7_5',
'k7_6',
'k3_0',
'k3_1',
'k3_2',
'div6__height__gluc_all__imt__gender__scale',
],
[
'imt_class_all__gender__scale',
'minus6__ap_hi_norm__ap_lo_norm__cholesterol_all_norm__gender__scale',
'ap_lo__gender__scale',
'imt__gender__scale',
'gluc_all__gender__scale',
'cholesterol_all__gender__scale',
'plus__age_norm__ap_hi_norm__gluc_all_norm__gender__scale',
'minus1__age_norm__ap_lo_norm__cholesterol_all_norm__gender__scale',
'minus1__ap_hi_norm__ap_lo_norm__cholesterol_all_norm__gender__scale',
'score_scale_val__gender__scale',
'x__ap_hi__cholesterol_all__gender__scale',
'minus6__age_norm__ap_lo_norm__cholesterol_all_norm__gender__scale',
'div6__ap_hi__ap_lo__cholesterol_all__gender__scale',
'div1__ap_hi__ap_lo__cholesterol_all__gender__scale',
'div6__height__weight__ap_lo__gender__scale',
'div1__age__weight__cholesterol_all__gender__scale',
'weight__gender__scale',
'x__age__gluc_all__gender__scale',
'plus__age_norm__gluc_all_norm__imt_norm__gender__scale',
'plus__height_norm__weight_norm__gluc_all_norm__gender__scale',
'height__gender__scale',
'div6__age__weight__cholesterol_all__gender__scale',
'div5__ap_lo__cholesterol_all__gluc_all__gender__scale',
'x__age__weight__gender__scale',
'age__gender__scale',
'x__age__ap_hi__gluc_all__gender__scale',
'div2__ap_lo__cholesterol_all__gluc_all__gender__scale',
'ap_hi__gender__scale',
], early_stop_cv=lambda x: x > 0.5414, log_file='qml_workdir/logs/feat12.txt')
| 26.6 | 131 | 0.771482 | import datetime
import numpy as np
import os
import sys
sys.path.insert(0, os.getcwd())
from hyperopt import hp, fmin, tpe
import qml_workdir.classes.config
from qml.cv import QCV
from qml.models import QXgb, QAvg, QAvgOneModelData
from qml_workdir.classes.models import qm
trash wrong model
cv = QCV(qm)
model_id = qm.add_by_params(
QXgb(
** {"alpha": 0.008, "booster": "gbtree", "colsample_bylevel": 0.9, "colsample_bytree": 0.9, "eta": 0.024, "eval_metric": "logloss",
"gamma": 0.04, "max_depth": 4, "num_boost_round": 261, "objective": "binary:logistic", "subsample": 0.7, "tree_method": "hist"}
),
'hyperopt xgb',
)
model_id =qm.add_by_params(QAvgOneModelData(model_id, 8), level=-2)
cv.features_sel_add(model_id, 45, [
'age',
'height',
'weight',
'ap_hi',
'ap_lo',
'smoke',
'alco',
'active',
'gender_male',
'height_low',
'weight_low',
'cholesterol_all',
'gluc_all',
'cholesterol_1',
'cholesterol_2',
'cholesterol_3',
'gluc_1',
'gluc_2',
'gluc_3',
'ap_error',
'ap_error_swap',
'imt',
'imt_class_all',
'imt_class_0',
'imt_class_1',
'imt_class_2',
'imt_class_3',
'imt_class_4',
'imt_class_5',
'imt_class_6',
'x__age__gluc_all',
'x__ap_hi__cholesterol_all',
'div6__height__gluc_all__imt',
'plus__age_norm__ap_hi_norm__gluc_all_norm',
'x__age__weight',
'div1__age__weight__cholesterol_all',
'div6__age__weight__cholesterol_all',
'plus__height_norm__weight_norm__gluc_all_norm',
'div1__ap_hi__ap_lo__cholesterol_all',
'div6__ap_hi__ap_lo__cholesterol_all',
'plus__age_norm__gluc_all_norm__imt_norm',
'minus6__ap_hi_norm__ap_lo_norm__cholesterol_all_norm',
'minus1__ap_hi_norm__ap_lo_norm__cholesterol_all_norm',
'minus6__age_norm__ap_lo_norm__cholesterol_all_norm',
'minus1__age_norm__ap_lo_norm__cholesterol_all_norm',
'div6__height__weight__ap_lo',
'div2__ap_lo__cholesterol_all__gluc_all',
'x__age__ap_hi__gluc_all',
'div5__ap_lo__cholesterol_all__gluc_all',
'score_scale_val_v3',
'score_scale_val',
'k15_0',
'k15_1',
'k15_2',
'k15_3',
'k15_4',
'k15_5',
'k15_6',
'k15_7',
'k15_8',
'k15_9',
'k15_10',
'k15_11',
'k15_12',
'k15_13',
'k15_14',
'k7_0',
'k7_1',
'k7_2',
'k7_3',
'k7_4',
'k7_5',
'k7_6',
'k3_0',
'k3_1',
'k3_2',
'div6__height__gluc_all__imt__gender__scale',
],
[
'imt_class_all__gender__scale',
'minus6__ap_hi_norm__ap_lo_norm__cholesterol_all_norm__gender__scale',
'ap_lo__gender__scale',
'imt__gender__scale',
'gluc_all__gender__scale',
'cholesterol_all__gender__scale',
'plus__age_norm__ap_hi_norm__gluc_all_norm__gender__scale',
'minus1__age_norm__ap_lo_norm__cholesterol_all_norm__gender__scale',
'minus1__ap_hi_norm__ap_lo_norm__cholesterol_all_norm__gender__scale',
'score_scale_val__gender__scale',
'x__ap_hi__cholesterol_all__gender__scale',
'minus6__age_norm__ap_lo_norm__cholesterol_all_norm__gender__scale',
'div6__ap_hi__ap_lo__cholesterol_all__gender__scale',
'div1__ap_hi__ap_lo__cholesterol_all__gender__scale',
'div6__height__weight__ap_lo__gender__scale',
'div1__age__weight__cholesterol_all__gender__scale',
'weight__gender__scale',
'x__age__gluc_all__gender__scale',
'plus__age_norm__gluc_all_norm__imt_norm__gender__scale',
'plus__height_norm__weight_norm__gluc_all_norm__gender__scale',
'height__gender__scale',
'div6__age__weight__cholesterol_all__gender__scale',
'div5__ap_lo__cholesterol_all__gluc_all__gender__scale',
'x__age__weight__gender__scale',
'age__gender__scale',
'x__age__ap_hi__gluc_all__gender__scale',
'div2__ap_lo__cholesterol_all__gluc_all__gender__scale',
'ap_hi__gender__scale',
], early_stop_cv=lambda x: x > 0.5414, log_file='qml_workdir/logs/feat12.txt')
| false | true |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.