id stringlengths 1 265 | text stringlengths 6 5.19M | dataset_id stringclasses 7
values |
|---|---|---|
1787742 | from challenges.data_structure.stacks_and_queues.stacks_and_queues import *
import pytest
import unittest
''' Stack tests for '''
def test_push(stack_test):
excpected = "three\ntwo\none"
actual = f"{stack_test}"
assert excpected == actual
def test_push_to_empty():
stack = Stack()
stack.push("one")
excpected = "one"
actual = f"{stack}"
assert excpected == actual
def test_peek(stack_test):
stack = stack_test
excpected = "three"
actual = f"{stack.peek()}"
assert excpected == actual
def test_empty_stack():
stack = Stack()
excpected = True
actual = stack.is_empty()
assert excpected == actual
def test_empty_stack_peek():
stack = Stack()
with pytest.raises(EmptyStackException):
stack.peek()
def test_empty_queue_pop():
stack = Stack()
with pytest.raises(EmptyStackException):
stack.pop()
def test_pop(stack_test):
stack_test.pop()
excpected = "two\none"
actual = f"{stack_test}"
assert excpected == actual
def test_emptying_stack(stack_test):
for i in range(3):
stack_test.pop()
excpected = True
actual = stack_test.is_empty()
assert excpected == actual
def test_adding_multiple_to_stack():
stack = Stack()
for i in range(1,4):
stack.push(i)
excpected = "3\n2\n1"
actual = f"{stack}"
assert excpected == actual
''' queue tests '''
def test_enqueue(queue_test):
excpected = "one\ntwo\nthree"
actual = f"{queue_test}"
assert excpected == actual
def test_enqueue_to_empty():
queue = Queue()
queue.enqueue("one")
excpected = "one"
actual = f"{queue}"
assert excpected == actual
def test_peek(queue_test):
excpected = "one"
actual = f"{queue_test.peek()}"
assert excpected == actual
def test_empty_queue():
queue = Queue()
excpected = True
actual = queue.is_empty()
assert excpected == actual
def test_empty_queue_peek():
queue = Queue()
with pytest.raises(EmptyStackException):
queue.peek()
def test_empty_queue_dequeue():
queue = Queue()
with pytest.raises(EmptyStackException):
queue.dequeue()
def test_dequeue(queue_test):
queue_test.dequeue()
excpected = "two\nthree"
actual = f"{queue_test}"
assert excpected == actual
def test_emptying_queue(queue_test):
for i in range(3):
queue_test.dequeue()
excpected = True
actual = queue_test.is_empty()
assert excpected == actual
def test_adding_multiple_to_queue():
queue = Queue()
for i in range(1,4):
queue.enqueue(i)
excpected = "1\n2\n3"
actual = f"{queue}"
assert excpected == actual
@pytest.fixture
def stack_test():
stack = Stack()
stack.push('one')
stack.push("two")
stack.push("three")
return stack
@pytest.fixture
def queue_test():
queue = Queue()
queue.enqueue("one")
queue.enqueue("two")
queue.enqueue("three")
return queue
| StarcoderdataPython |
60900 | <filename>board/widgets.py
from rakmai.widgets import (
SummernoteBs4Widget, SummernoteLiteWidget
)
class MessageSummernoteBs4Widget(SummernoteBs4Widget):
class Media:
js = (
'js/board/message-summernote-ajax.js',
)
class MessageAdminSummernoteLiteWidget(SummernoteLiteWidget):
class Media:
css = {
'all': (
'css/django-summernote.css',
)
}
js = (
'js/post-admin-summernote-ajax.js',
)
| StarcoderdataPython |
4825097 | from NERTranserLearning.Experiment import Experiment
class ExperimentTransferTddTraining(Experiment):
"""
Contains the configurations of the transfer learning training on various data-sets for both Elmo and naive
embeddings with TDD output layer.
It's crucial the training will run in a sequential order as they depend one on another.
Comment all configurations but the one you wish to run and run this code
as a module.
"""
@property
def EXPERIMENT_PLAN(self):
return [
{
'train': {
'dataset': 'gmb_dataset',
'input_checkpoint': None,
'output_checkpoint': 'trained/transfer_tdd/gmb-tdd-simple.ckpt',
'model': 'tdd_simple_embedding_model',
'train': 'train_tdd_output',
'freeze_bi_lstm': False,
'freeze_output_layer': False
},
'test': ['btc_dataset','ritter_dataset'],
},
# {
# 'train': {
# 'dataset': 'btc_dataset_train',
# 'input_checkpoint': 'trained/transfer_tdd/gmb-tdd-simple.ckpt',
# 'output_checkpoint': 'trained/transfer_tdd/gmb+btc_train-tdd-simple.ckpt',
# 'model': 'tdd_simple_embedding_model',
# 'train': 'train_tdd_output',
# 'freeze_bi_lstm': False,
# 'freeze_output_layer': False
# },
# 'test': ['btc_dataset_test','ritter_dataset'],
# },
# {
# 'train': {
# 'dataset': 'btc_dataset',
# 'input_checkpoint': 'trained/transfer_tdd/gmb-tdd-simple.ckpt',
# 'output_checkpoint': 'trained/transfer_tdd/gmb+btc-tdd-simple.ckpt',
# 'model': 'tdd_simple_embedding_model',
# 'train': 'train_tdd_output',
# 'freeze_bi_lstm': False,
# 'freeze_output_layer': False
# },
# 'test': ['ritter_dataset'],
# },
# {
# 'train': {
# 'dataset': 'ritter_dataset_train',
# 'input_checkpoint': 'trained/transfer_tdd/gmb+btc-tdd-simple.ckpt',
# 'output_checkpoint': 'trained/transfer_tdd/gmb+btc+ritter_train-tdd-simple.ckpt',
# 'model': 'tdd_simple_embedding_model',
# 'train': 'train_tdd_output',
# 'freeze_bi_lstm': False,
# 'freeze_output_layer': False
# },
# 'test': ['ritter_dataset_test'],
# },
# {
# 'train': {
# 'dataset': 'ritter_dataset',
# 'input_checkpoint': 'trained/transfer_tdd/gmb+btc-tdd-simple.ckpt',
# 'output_checkpoint': 'trained/transfer_tdd/gmb+btc+ritter-tdd-simple.ckpt',
# 'model': 'tdd_simple_embedding_model',
# 'train': 'train_tdd_output',
# 'freeze_bi_lstm': False,
# 'freeze_output_layer': False
# },
# 'test': ['wsj_dataset'],
# },
# {
# 'train': {
# 'dataset': 'ritter_wsj_dataset_train',
# 'input_checkpoint': 'trained/transfer_tdd/gmb+btc-tdd-simple.ckpt',
# 'output_checkpoint': 'trained/transfer_tdd/gmb+btc+ritter_wsj_train-tdd-simple.ckpt',
# 'model': 'tdd_simple_embedding_model',
# 'train': 'train_tdd_output',
# 'freeze_bi_lstm': False,
# 'freeze_output_layer': False
# },
# 'test': ['wsj_dataset_test'],
# },
# {
# 'train': {
# 'dataset': 'gmb_dataset',
# 'input_checkpoint': None,
# 'output_checkpoint': 'trained/transfer_tdd/gmb-tdd-elmo.ckpt',
# 'model': 'tdd_elmo_embedding_model',
# 'train': 'train_tdd_output',
# 'freeze_bi_lstm': False,
# 'freeze_output_layer': False
# },
# 'test': ['btc_dataset','ritter_dataset'],
# },
# {
# 'train': {
# 'dataset': 'btc_dataset_train',
# 'input_checkpoint': 'trained/transfer_tdd/gmb-tdd-elmo.ckpt',
# 'output_checkpoint': 'trained/transfer_tdd/gmb+btc_train-tdd-elmo.ckpt',
# 'model': 'tdd_elmo_embedding_model',
# 'train': 'train_tdd_output',
# 'freeze_bi_lstm': False,
# 'freeze_output_layer': False
# },
# 'test': ['btc_dataset_test','ritter_dataset'],
# },
# {
# 'train': {
# 'dataset': 'btc_dataset',
# 'input_checkpoint': 'trained/transfer_tdd/gmb-tdd-elmo.ckpt',
# 'output_checkpoint': 'trained/transfer_tdd/gmb+btc-tdd-elmo.ckpt',
# 'model': 'tdd_elmo_embedding_model',
# 'train': 'train_tdd_output',
# 'freeze_bi_lstm': False,
# 'freeze_output_layer': False
# },
# 'test': ['ritter_dataset'],
# },
# {
# 'train': {
# 'dataset': 'ritter_dataset_train',
# 'input_checkpoint': 'trained/transfer_tdd/gmb+btc-tdd-elmo.ckpt',
# 'output_checkpoint': 'trained/transfer_tdd/gmb+btc+ritter_train-tdd-elmo.ckpt',
# 'model': 'tdd_elmo_embedding_model',
# 'train': 'train_tdd_output',
# 'freeze_bi_lstm': False,
# 'freeze_output_layer': False
# },
# 'test': ['ritter_dataset_test'],
# },
# {
# 'train': {
# 'dataset': 'ritter_dataset',
# 'input_checkpoint': 'trained/transfer_tdd/gmb+btc-tdd-elmo.ckpt',
# 'output_checkpoint': 'trained/transfer_tdd/gmb+btc+ritter-tdd-elmo.ckpt',
# 'model': 'tdd_elmo_embedding_model',
# 'train': 'train_tdd_output',
# 'freeze_bi_lstm': False,
# 'freeze_output_layer': False
# },
# 'test': ['wsj_dataset'],
# },
# {
# 'train': {
# 'dataset': 'ritter_wsj_dataset_train',
# 'input_checkpoint': 'trained/transfer_tdd/gmb+btc-tdd-elmo.ckpt',
# 'output_checkpoint': 'trained/transfer_tdd/gmb+btc+ritter_wsj_train-tdd-elmo.ckpt',
# 'model': 'tdd_elmo_embedding_model',
# 'train': 'train_tdd_output',
# 'freeze_bi_lstm': False,
# 'freeze_output_layer': False
# },
# 'test': ['wsj_dataset_test'],
# },
]
if __name__ == '__main__':
ExperimentTransferTddTraining().run_experiment()
| StarcoderdataPython |
1616432 | #-------------------------------------------------------------------------------
# Name: module2
# Purpose:
#
# Author: user
#
# Created: 28/03/2019
# Copyright: (c) user 2019
# Licence: <your licence>
#-------------------------------------------------------------------------------
num1=input('Please enter a number: ')
num2=input('Please enter another number: ')
result=float(num1)+float(num2)
print(result)
print("""Mathematical operator order is - BEDMAS
or PEDMAS""")
print("""print("BEDMAS" "=") will show , ("BEDMAS" "=") """)
print("BEDMAS" "=" """"Brackets, Exponents, Division,
Multiplication, Addition, Subtraction""")
print("""PEDMAS = Parenthecis (), Exponents **, Division /,
Multiplication *, Addition + , Subtraction - """)
print("""left to right or left associative rule
for * / - + operators""")
print(("print(6-3+2) =") + str(6-3+2))
print(("print(6-3+2) =") + str(6-3+2))
print(("print(6-3+2) =") + str(6-3+2))
print(("print(6/3*2) =") + str(6/3*2))
print("left to right or left associative rule for * / - + operators")
#----------------------
print("but for ** right to left or right associative rule for * / - + operators")
print(("print(2**3**2) =") + str(2**3**2))
print(("2+3")+ "=" + str(2+3))
print(2+(3-1))
print("(2+(3-1))"+ "=" + str(2+(3-1)))
print(9/3)
print(("9/3")+ "=" + str(9/3))
print(9/3.0)
print(("9/3.0")+ "=" + str(9/3.0))
print(9.0/3)
print(("9.0/3")+ "=" + str(9.0/3))
print(9*3)
print(("9*3")+ "=" + str(9*3))
print(9.0*3)
print(9*3.0)
print(("9*3.0")+ "=" + str(9*3.0))
print((9-1+2)+(9/3*3))
print(("((9-1+2)+(9/3*3))")+ "=" + str((9-1+2)+(9/3*3)))
print(("(((-9)-1+2)+(9/3*(-3)))")+ "=" + str(((-9)-1+2)+(9/3*(-3))))
print((-9)*(-3))
print(("(-9)*(-3)")+ "=" + str((-9)*(-3)))
print(("(-9)/(-3)")+ "=" + str((-9)/(-3)))
print(("-9/-3")+ "=" + str(-9/-3))
print(("-9*-3")+ "=" + str(-9*-3))
print("idleAllin2.py")
print("================03-03-2019=====================04-03-2019=========\n==========================\n=====================")
print(("10/3")+ "=" + str(10/3))
print(("-10/3")+ "=" + str(-10/3))
print(("10/-3")+ "=" + str(10/-3))
print(9.73800000000)
print(("9.73800000000")+ "=" + str(9.73800000000))
print(("9.738043625475237500")+ "=" + str(9.738043625475237500))
print(("9.73804362547523750082364862865821")+ "=" + str(9.73804362547523750082364862865821))
print(("10**3")+ "=" + str(10**3))
print((" Quotient of " ) + ( " 10//3 ")+ " = " + str(10//3))
print((" Remainder of " ) + ( " 10%3 " )+ " = " + str(10%3))
A=14
B=40
print(""" IF input is A=14 and B=40 then,
modolous // and remainder % is """)
print("print(A//B) = " , A//B)
print("print(A%B) = " , (A%B))
print('''
''')
a1=float(input("Enter the first number: "))
op=input("Enter the operator or sign : ")
b1=float(input("Enter the second number: "))
if op=="+":
print(str(a1),"+", str(b1), "=", a1+b1 )
elif op=="-":
print(str(a1),"-", str(b1), "=", a1-b1 )
elif op=="/":
print(str(a1),"/", str(b1), "=", a1/b1 )
elif op=="*":
print(str(a1),"*", str(b1), "=", a1*b1 )
elif op=="**":
print(str(a1),"**", str(b1), "=", a1**b1 )
elif op=="//":
print(str(a1),"//", str(b1), "=", a1//b1 )
elif op=="%":
print(str(a1),"%", str(b1), "=", a1%b1 )
else:
print("invalid operator or number")
print('''
print(eval(input()))
''')
print(eval(input()))
print('''
op=input( """please input a calculating or math operetion:
like 12+14 or 14*30 or 34//22 etc
""" )
print(eval(op))
''')
op=input( """please input a calculating or math operetion:
like 12+14 or 14*30 or 34//22 etc
""" )
print(eval(op))
print('''
''')
print('''
codex=input(''' please input your code by typing
copy paste will not work till now :
''' )
print(codex)
print("The above code that you typed - will give output")
exec(codex)
''')
codex=input(''' please input your code by typing
copy paste will not work till now :
''' )
print(codex)
print("The above code that you typed - will give output")
exec(codex) | StarcoderdataPython |
1641120 | <filename>examples/plot_interpolate_bad_channels.py<gh_stars>1-10
"""
================================================
Interpolate bad channels using spherical splines
================================================
This example shows how to interpolate bad EEG channels using spherical
splines as described in [1].
References
----------
[1] <NAME>., <NAME>., <NAME>. and <NAME>. (1989) Spherical
splines for scalp potential and current density mapping.
Electroencephalography and Clinical Neurophysiology, Feb; 72(2):184-7.
"""
# Authors: <NAME> <<EMAIL>>
#
# License: BSD (3-clause)
import mne
from mne import io
from mne.datasets import sample
print(__doc__)
data_path = sample.data_path()
###############################################################################
# Set parameters
raw_fname = data_path + '/MEG/sample/sample_audvis_filt-0-40_raw.fif'
event_fname = data_path + '/MEG/sample/sample_audvis_filt-0-40_raw-eve.fif'
event_id, tmin, tmax = 1, -0.2, 0.5
# Setup for reading the raw data
raw = io.Raw(raw_fname)
events = mne.read_events(event_fname)
raw.info['bads'] = ['EEG 053'] # mark bad channels
# pick EEG channels and keep bads for interpolation
picks = mne.pick_types(raw.info, meg=False, eeg=True, eog=True,
exclude=[])
# Read epochs
epochs = mne.Epochs(raw, events, event_id, tmin, tmax, picks=picks,
baseline=(None, 0),
reject=dict(eog=150e-6, eeg=80e-6),
preload=True)
# plot with bads
evoked_before = epochs.average()
evoked_before.plot(exclude=[])
# compute interpolation (also works with Raw and Epochs objects)
evoked_after = evoked_before.copy().interpolate_bads_eeg()
# plot interpolated (prevsious bads)
evoked_after.plot(exclude=[])
| StarcoderdataPython |
114612 | # -*- coding: utf-8 -*-
"""コントローラ
本ライブラリにおけるコントローラとは人間に代わってアクション選択を行うプログラムを指す。
"""
import random
class Randomizer(object):
"""選択可能なアクションからランダムに選択を行うコントローラ
"""
def __init__(self, params):
"""初期化
@param params: dict パラメータ
'actions': 選択可能なアクションの数
'seed': 乱数シード
"""
self.actions = 1
if 'actions' in params:
self.actions = int(params['actions'])
if 'seed' in params:
random.seed(params['seed'])
def action(self, observation):
"""アクション選択を行う
@param observation: 観測結果(本コントローラでは使用しない)
@return: 選択されたアクションのインデックス[0-(actions-1)]
"""
return random.randint(0, self.actions - 1)
| StarcoderdataPython |
1748807 | import os
import datetime
# change your parent dir accordingly
try:
directory = "ExDirFiles"
parent_dir = "E:/GitHub/1) Git_Tutorials_Repo_Projects/core-python/Core_Python/"
path = os.path.join(parent_dir,directory)
''' mode is set to '0o666' which allowed both read and write functionality for the
newly created file. We can create directory with mode or with out mode'''
''' we can also use os.path.exists(path)'''
if not os.path.isdir(path):
''' IF we have to create nested dirs like E:/GitHub/1) Git_Tutorials_Repo_Projects/core-python/Core_Python/a/b
then we have to use makedirs'''
os.mkdir(path,mode=0o666)
f = open(os.path.join(path+"/","DirMethods.txt"),'a')
print("seperator : ", os.sep, "\nos path seperator : ", os.pathsep)
print("Operating system name Name : ",os.name)
print("Current Working Directory : ",os.getcwd())
print("Change Directory : ",os.chdir(path))
print("Current Working Directory after change directory : ",os.getcwd())
print("Access (test the existence of path) : ",os.access(path,mode=os.F_OK))
print("Access (test the readability of path) : ",os.access(path,os.R_OK))
print("Access (test the writbaility of path) : ",os.access(path,os.W_OK))
print("Access (determine if path can be executed.) : ",os.access(path,os.X_OK))
print("Path : ", f.name)
base_name = os.path.basename(path +"/"+ "DirMethods.txt")
print("File name with extension : ", base_name)
print("File name with out extension (tuple) : ", os.path.splitext(base_name))
print("File name with out extension (file name) : ", os.path.splitext(base_name)[0], "\nExtension name : ",
os.path.splitext(base_name)[1])
print("Before Rename : ",os.listdir(path))
os.rename(os.path.join(path+"/","OldFoo.txt"),os.path.join(path+"/","NewFoo.txt"))
print("After Rename : ",os.listdir(path))
f_path = os.path.join(path+"/","NewFoo.txt")
print("File size in bytes : ",os.path.getsize(f_path))
# it return UNIX time stamps.it represent the number of seconds since January 1st 1970
print("Last modified Date (UNIX Timestamp) : ",os.path.getsize(f_path))
print("Last modified Date (Datetime Module) : ",datetime.datetime.fromtimestamp(os.path.getsize(f_path)))
print("Absolute path : ",os.path.abspath("NewFoo.txt"))
f.close()
except (IOError,FileNotFoundError,OSError,Exception) as e:
print(e)
| StarcoderdataPython |
3245055 | # -*- coding: utf-8 -*-
"""
Solution to Project Euler problem 346
Author: <NAME>
https://github.com/jaimeliew1/Project_Euler_Solutions
"""
'''
x = 1111_2 is a 4 digit number in base 2 consisting of only ones.
x = 2^0 + 2^1 + 2^2 + 2^3 = 15
y = 111...111_b is an n digit number in base b consisting of only ones.
y = b^0 + b^1 + ... + b^(n-1)
lemma 1:
y is a repunit in base y-1 with length n=2. So ignore these and start search
with n=3.
lemma 2:
the base of repunit y is always less than y.
lemma 3:
there are only two numbers which are repnumbers in 3 bases (or more): 31
and ?. so use a set.
lemma 4:
10e12^(1/2) = 1000000. This is the largest base that needs to b searched as
we are only looking for repunits with length => 3. y_b,3 = 1 + b + b^2
'''
def run():
limit = 1e12
baselimit = int(limit**0.5)
repunit = {1}
for b in range(2, baselimit):
n = 3
y = (b**n - 1)//(b-1)
while y < limit:
repunit.add(y)
n += 1
y = (b**n - 1)//(b-1)
return sum(repunit)
if __name__ == "__main__":
print(run())
| StarcoderdataPython |
3302286 | <filename>python/flowdec/__init__.py
import os.path as osp
pkg_dir = osp.abspath(osp.dirname(__file__))
data_dir = osp.normpath(osp.join(pkg_dir, 'datasets'))
tf_graph_dir = osp.normpath(osp.join(pkg_dir, '../../tensorflow'))
| StarcoderdataPython |
1743772 | <filename>commands/cmdsets/standard.py
"""
Basic starting cmdsets for characters. Each of these
cmdsets attempts to represent some aspect of how
characters function, so that different conditions
on characters can extend/modify/remove functionality
from them without explicitly calling individual commands.
"""
import traceback
from commands.base_commands import exchanges
try:
from evennia.commands.default import help, admin, system, building, batchprocess
except Exception as err:
traceback.print_exc()
print("<<ERROR>>: Error encountered in loading default commands: %s" % err)
try:
from evennia.commands.default import general as default_general
except Exception as err:
traceback.print_exc()
print("<<ERROR>>: Error encountered in loading default.general commands: %s" % err)
try:
from commands.base_commands import staff_commands
except Exception as err:
traceback.print_exc()
print("<<ERROR>>: Error encountered in loading staff_commands: %s" % err)
try:
from commands.base_commands import roster
except Exception as err:
traceback.print_exc()
print("<<ERROR>>: Error encountered in loading roster commands: %s" % err)
try:
from commands.base_commands import general
except Exception as err:
traceback.print_exc()
print("<<ERROR>>: Error encountered in loading general commands: %s" % err)
try:
from typeclasses import rooms as extended_room
except Exception as err:
traceback.print_exc()
print("<<ERROR>>: Error encountered in loading extended_room: %s" % err)
try:
from commands.base_commands import social
except Exception as err:
traceback.print_exc()
print("<<ERROR>>: Error encountered in loading social commands: %s" % err)
try:
from commands.base_commands import xp
except Exception as err:
traceback.print_exc()
print("<<ERROR>>: Error encountered in loading xp commands: %s" % err)
try:
from commands.base_commands import maps
except Exception as err:
traceback.print_exc()
print("<<ERROR>>: Error encountered in loading maps commands: %s" % err)
try:
from typeclasses.places import cmdset_places
except Exception as err:
traceback.print_exc()
print("<<ERROR>>: Error encountered in loading places commands: %s" % err)
try:
from commands.cmdsets import combat
except Exception as err:
traceback.print_exc()
print("<<ERROR>>: Error encountered in loading combat commands: %s" % err)
try:
from world.dominion import general_dominion_commands as domcommands
except Exception as err:
traceback.print_exc()
print("<<ERROR>>: Error encountered in loading dominion commands: %s" % err)
try:
from world.dominion import agent_commands
except Exception as err:
traceback.print_exc()
print("<<ERROR>>: Error encountered in loading agent commands: %s" % err)
try:
from commands.base_commands import crafting
except Exception as err:
traceback.print_exc()
print("<<ERROR>>: Error encountered in loading crafting commands: %s" % err)
try:
from commands.cmdsets import home
except Exception as err:
traceback.print_exc()
print("<<ERROR>>: Error encountered in loading home commands: %s" % err)
try:
from web.character import investigation
except Exception as err:
traceback.print_exc()
print("<<ERROR>>: Error encountered in loading investigation commands: %s" % err)
try:
from commands.base_commands import overrides
except Exception as err:
traceback.print_exc()
print("<<ERROR>>: Error encountered in override commands: %s" % err)
try:
from typeclasses.consumable.use_commands import CmdApplyConsumable
except Exception as err:
traceback.print_exc()
print("<<ERROR>>: Error encountered in consumable commands: %s" % err)
try:
from typeclasses.gambling import cmdset_gambling as gambling
except Exception as err:
traceback.print_exc()
print("<<ERROR>>: Error encountered in gambling commands: %s" % err)
try:
from commands.base_commands import rolling
except Exception as err:
traceback.print_exc()
print("<<ERROR>>: Error encountered in roll commands: %s" % err)
try:
from commands.base_commands import story_actions
except Exception as err:
traceback.print_exc()
print("<<ERROR>>: Error encountered in storyaction commands: %s" % err)
try:
from world.conditions import condition_commands
except Exception as err:
traceback.print_exc()
print("<<ERROR>>: Error encountered in condition commands: %s" % err)
try:
from world.fashion import fashion_commands
except Exception as err:
traceback.print_exc()
print("<<ERROR>>: Error encountered in fashion commands: %s" % err)
try:
from world.petitions import petitions_commands
except Exception as err:
traceback.print_exc()
print("<<ERROR>>: Error encountered in petition commands: %s" % err)
try:
from typeclasses.containers.container import CmdRoot
except Exception as err:
print("<<ERROR>>: Error encountered in container commands: %s" % err)
try:
from world.weather import weather_commands
except Exception as err:
print("<<ERROR>>: Error encountered in weather commands: %s" % err)
try:
from world.templates.template_commands import CmdTemplateForm
except Exception as err:
print("<<ERROR>>: Error encountered in container commands: %s" % err)
try:
from world.exploration import exploration_commands
except Exception as err:
print("<<ERROR>>: Error encountered in exploration commands: %s" % err)
try:
from world.dominion.plots import plot_commands
except Exception as err:
print("<<ERROR>>: Error encountered in plot commands: %s" % err)
try:
from web.character import goal_commands
except Exception as err:
print("<<ERROR>>: Error encountered in goal commands: %s" % err)
try:
from world.magic import magic_commands
except Exception as err:
print("<<ERROR>>: Error encountered in magic commands: %s" % err)
from world.stat_checks import check_commands
from world.prayer import prayer_commands
from evennia.commands.cmdset import CmdSet
class OOCCmdSet(CmdSet):
"""Character-specific OOC commands. Most OOC commands defined in player."""
key = "OOCCmdSet"
def at_cmdset_creation(self):
"""
This is the only method defined in a cmdset, called during
its creation. It should populate the set with command instances.
Note that it can also take other cmdsets as arguments, which will
be used by the character default cmdset to add all of these onto
the internal cmdset stack. They will then be able to removed or
replaced as needed.
"""
self.add(overrides.CmdInventory())
self.add(default_general.CmdNick())
self.add(default_general.CmdAccess())
self.add(rolling.CmdDiceString())
self.add(rolling.CmdDiceCheck())
self.add(rolling.CmdOldSpoofCheck())
self.add(check_commands.CmdStatCheck())
self.add(check_commands.CmdHarm())
self.add(check_commands.CmdSpoofCheck())
self.add(general.CmdBriefMode())
self.add(general.CmdTidyUp())
self.add(extended_room.CmdGameTime())
self.add(extended_room.CmdSetGameTimescale())
self.add(extended_room.CmdStudyRawAnsi())
self.add(xp.CmdVoteXP())
self.add(social.CmdPosebreak())
self.add(social.CmdSocialNotable())
self.add(social.CmdSocialNominate())
self.add(social.CmdSocialReview())
# self.add(social.CmdFavor()) #when enabled, please re-add "favor" to random_rp_command_keys in CmdRandomScene
self.add(overrides.SystemNoMatch())
self.add(weather_commands.CmdAdminWeather())
self.add(roster.CmdPropriety())
# Exploration!
self.add(exploration_commands.CmdExplorationCmdSet())
class StateIndependentCmdSet(CmdSet):
"""
Character commands that will always exist, regardless of character state.
Poses and emits, for example, should be allowed even when a character is
dead, because they might be posing something about the corpse, etc.
"""
key = "StateIndependentCmdSet"
def at_cmdset_creation(self):
self.add(overrides.CmdPose())
# emit was originally an admin command. Replaced those with gemit
self.add(overrides.CmdEmit())
self.add(overrides.CmdArxTime())
self.add(general.CmdOOCSay())
self.add(general.CmdDirections())
self.add(general.CmdKeyring())
self.add(general.CmdGlance())
# sorta IC commands, since information is interpretted by the
# character and may not be strictly accurate.
self.add(extended_room.CmdExtendedLook())
self.add(roster.CmdHere())
self.add(social.CmdHangouts())
self.add(social.CmdWhere())
self.add(social.CmdJournal())
self.add(social.CmdMessenger())
self.add(social.CmdRoomHistory())
self.add(social.CmdRoomMood())
self.add(social.CmdRandomScene())
self.add(social.CmdRoomTitle())
self.add(social.CmdTempDesc())
self.add(social.CmdLanguages())
self.add(maps.CmdMap())
self.add(story_actions.CmdAction())
self.add(plot_commands.CmdPlots())
self.add(goal_commands.CmdGoals())
self.add(combat.CmdHeal())
# Magic!
self.add(magic_commands.MagicCmdSet())
class MobileCmdSet(CmdSet):
"""
Commands that should only be allowed if the character is able to move.
Thought about making a 'living' cmdset, but there honestly aren't any
current commands that could be executed while a player is alive but
unable to move. The sets are just equal.
"""
key = "MobileCmdSet"
def at_cmdset_creation(self):
self.add(overrides.CmdGet())
self.add(overrides.CmdDrop())
self.add(exchanges.CmdGive())
self.add(exchanges.CmdTrade())
self.add(overrides.CmdArxSay())
self.add(general.CmdWhisper())
self.add(general.CmdFollow())
self.add(general.CmdDitch())
self.add(general.CmdShout())
self.add(general.CmdPut())
self.add(general.CmdLockObject())
self.add(xp.CmdTrain())
self.add(xp.CmdUseXP())
self.add(cmdset_places.CmdListPlaces())
self.add(combat.CmdStartCombat())
self.add(combat.CmdProtect())
self.add(combat.CmdAutoattack())
self.add(combat.CmdCombatStats())
self.add(combat.CmdOldHarm())
self.add(combat.CmdFightStatus())
self.add(agent_commands.CmdGuards())
self.add(domcommands.CmdPlotRoom())
# self.add(domcommands.CmdTask())
# self.add(domcommands.CmdSupport())
self.add(domcommands.CmdWork())
self.add(domcommands.CmdCleanupDomain())
self.add(crafting.CmdCraft())
self.add(crafting.CmdRecipes())
self.add(crafting.CmdJunk())
self.add(social.CmdPraise())
# self.add(social.CmdCondemn())
self.add(social.CmdThink())
self.add(social.CmdFeel())
self.add(social.CmdDonate())
self.add(social.CmdFirstImpression())
self.add(social.CmdGetInLine())
self.add(investigation.CmdInvestigate())
self.add(investigation.CmdAssistInvestigation())
self.add(general.CmdDump())
self.add(CmdApplyConsumable())
self.add(gambling.CmdDice())
self.add(fashion_commands.CmdFashionModel())
self.add(fashion_commands.CmdFashionOutfit())
self.add(petitions_commands.CmdPetition())
self.add(condition_commands.CmdKnacks())
self.add(prayer_commands.CmdPray())
self.add(plot_commands.CmdStlist())
class StaffCmdSet(CmdSet):
"""OOC staff and building commands. Character-based due to interacting with game world."""
key = "StaffCmdSet"
def at_cmdset_creation(self):
# The help system
self.add(help.CmdSetHelp())
# System commands
self.add(overrides.CmdArxScripts())
self.add(system.CmdObjects())
self.add(system.CmdAccounts())
self.add(system.CmdService())
self.add(system.CmdAbout())
self.add(system.CmdServerLoad())
# Admin commands
self.add(admin.CmdBoot())
self.add(admin.CmdBan())
self.add(admin.CmdUnban())
self.add(admin.CmdPerm())
self.add(admin.CmdWall())
# Building and world manipulation
self.add(overrides.CmdTeleport())
self.add(building.CmdSetObjAlias())
self.add(building.CmdListCmdSets())
self.add(building.CmdWipe())
self.add(building.CmdName())
self.add(building.CmdCpAttr())
self.add(building.CmdMvAttr())
self.add(building.CmdCopy())
self.add(building.CmdFind())
self.add(building.CmdOpen())
self.add(building.CmdLink())
self.add(building.CmdUnLink())
self.add(building.CmdCreate())
self.add(overrides.CmdDig())
self.add(building.CmdTunnel())
self.add(overrides.CmdArxDestroy())
self.add(overrides.CmdArxExamine())
self.add(building.CmdTypeclass())
self.add(overrides.CmdArxLock())
self.add(building.CmdScript())
self.add(building.CmdSetHome())
self.add(overrides.CmdArxTag())
# Batchprocessor commands
self.add(batchprocess.CmdBatchCommands())
self.add(batchprocess.CmdBatchCode())
# more recently implemented staff commands
self.add(staff_commands.CmdGemit())
self.add(staff_commands.CmdWall())
self.add(staff_commands.CmdHome())
self.add(staff_commands.CmdResurrect())
self.add(staff_commands.CmdKill())
self.add(staff_commands.CmdForce())
self.add(staff_commands.CmdCcolor())
self.add(staff_commands.CmdGMDisguise())
self.add(staff_commands.CmdGMEvent())
self.add(staff_commands.CmdRelocateExit())
self.add(staff_commands.CmdAdminKey())
self.add(staff_commands.CmdAdminPropriety())
self.add(staff_commands.CmdAdjustFame())
self.add(staff_commands.CmdAdjust())
self.add(plot_commands.CmdGMPlots())
self.add(plot_commands.CmdStoryCoordinators())
self.add(goal_commands.CmdGMGoals())
self.add(extended_room.CmdExtendedDesc())
self.add(xp.CmdAdjustSkill())
self.add(xp.CmdAwardXP())
self.add(maps.CmdMapCreate())
self.add(maps.CmdMapRoom())
self.add(combat.CmdObserveCombat())
self.add(combat.CmdAdminCombat())
self.add(combat.CmdCreateAntagonist())
self.add(combat.CmdStandYoAssUp())
self.add(domcommands.CmdSetRoom())
self.add(condition_commands.CmdModifiers())
# home commands
self.add(home.CmdAllowBuilding())
self.add(home.CmdBuildRoom())
self.add(home.CmdManageRoom())
self.add(CmdRoot())
# still pending implementation of additional details
self.add(CmdTemplateForm())
| StarcoderdataPython |
1611903 | <reponame>Christian-B/my_spinnaker
from collections import namedtuple
import time
from typing import NamedTuple
class Foo(object):
__slots__ = ("alpha", "beta", "gamma")
def __init__(self, alpha, beta, gamma):
self.alpha = alpha
self.beta = beta
self.gamma = gamma
Bar = namedtuple('Bar', ['alpha', 'beta', 'gamma'])
class Gamma(object):
__slots__ = ("_alpha", "_beta", "_gamma")
def __init__(self, alpha, beta, gamma):
self._alpha = alpha
self._beta = beta
self._gamma = gamma
@property
def alpha(self):
return self._alpha
@property
def beta(self):
return self._beta
@property
def gamma(self):
return self._gamma
class Epsilon(NamedTuple):
alpha:int
beta:int
gamma:int
class Bacon(object):
def __init__(self, alpha, beta, gamma):
self._alpha = alpha
self._beta = beta
self._gamma = gamma
@property
def alpha(self):
return self._alpha
@property
def beta(self):
return self._beta
@property
def gamma(self):
return self._gamma
class Eggs(object):
def __init__(self, alpha, beta, gamma):
self.alpha = alpha
self.beta = beta
self.gamma = gamma
loops = 1000000
start = time.time()
for i in range(loops):
foo = Foo(1,2,3)
a = foo.alpha
b = foo.beta
c = foo.gamma
end = time.time()
print("Foo", end-start)
start = time.time()
for i in range(loops):
bar = Bar(1,2,3)
a = bar.alpha
b = bar.beta
c= foo.gamma
end = time.time()
print("Bar", end-start)
start = time.time()
for i in range(loops):
gamma = Gamma(1,2,3)
a = gamma.alpha
g = gamma.beta
c = gamma.gamma
end = time.time()
print("Gamma", end-start)
start = time.time()
for i in range(loops):
epsilon = Epsilon(1,2,3)
a = epsilon.alpha
b = epsilon.beta
c = epsilon.gamma
end = time.time()
print("Epsilon", end-start)
boo = Epsilon("a","2","3")
print(boo)
start = time.time()
for i in range(loops):
bacon = Bacon(1,2,3)
a = bacon.alpha
b = bacon.beta
c = bacon.gamma
end = time.time()
print("Bacon", end-start)
start = time.time()
for i in range(loops):
eggs = Eggs(1,2,3)
a = eggs.alpha
b = eggs.beta
c = eggs.gamma
end = time.time()
print("Eggs", end-start)
| StarcoderdataPython |
169056 | from ctypes import util
from typing import List
from ._augment import Augment
from ._augment_groups import AugmentGroups
from effect import *
from effect import EffectTypes as ET
from util import many_effs_with_same_amount
GROUP = AugmentGroups.TRIA
CONFLICT = (GROUP,)
augments: List[Augment] = []
_primary_names = ("mel", "ra", "tech")
_primary_effs = many_effs_with_same_amount(OFFENSIVE_POT, 1.0225)
_secondary_names = ("staro", "spiro", "deftro", "guaro")
_secondary_effs = (
Effect(ET.HP, -5),
Effect(ET.PP, -3),
Effect(ET.FLOOR_POT, 0.99),
Effect(ET.DMG_RES, 0.99),
)
# -----------------------------------------------
for pri_name, pri_eff in zip(_primary_names, _primary_effs):
for sec_name, sec_eff in zip(_secondary_names, _secondary_effs):
# make sure order of the effects is correct.
if sec_eff.eff in ADDITIVE_EFFECT_TYPE:
effs = (sec_eff, pri_eff)
else:
effs = (pri_eff, sec_eff)
augments.append(
Augment(
f"tria {sec_name}{pri_name}",
0,
6,
effs,
GROUP,
CONFLICT,
)
)
# -----------------------------------------------
| StarcoderdataPython |
3300184 | '''
You are given an n x n 2D matrix representing an image.
Rotate the image by 90 degrees (clockwise).
Note:
You have to rotate the image in-place, which means you have to modify the input 2D matrix directly. DO NOT allocate another 2D matrix and do the rotation.
Example 1:
Given input matrix =
[
[1,2,3],
[4,5,6],
[7,8,9]
],
rotate the input matrix in-place such that it becomes:
[
[7,4,1],
[8,5,2],
[9,6,3]
]
Example 2:
Given input matrix =
[
[ 5, 1, 9,11],
[ 2, 4, 8,10],
[13, 3, 6, 7],
[15,14,12,16]
],
rotate the input matrix in-place such that it becomes:
[
[15,13, 2, 5],
[14, 3, 4, 1],
[12, 6, 8, 9],
[16, 7,10,11]
]
SOLUTION:
- Transpose the matrix. (For each [i, j] swap arr[i, j] with arr[j, i]).
- Reverse each row.
Complexity -> O(n*n)
'''
| StarcoderdataPython |
1638860 | <filename>visualizer/visualizer.py
# TODO 1. Implement parent visualizer class
# TODO 2. Integrate to GUI
import cv2
import numpy as np
import re
import enum
from jcr.rnn import JcrGRU
from jcr.trainer import load_checkpoint
from jcr.evaluator import evaluate_frame
from utils.misc import get_folders_and_files
from utils.converter import *
class _SensorJointNumber(enum.IntEnum):
openni = 15
kinect_v1 = 20
kinect_v2 = 25
class Visualizer(object):
def __init__(self, directory: str, trained_model, labels):
pass
class VisualizerUTKinect(Visualizer):
def __init__(self, directory: str, trained_model, labels):
super().__init__(directory, trained_model, labels)
directory.replace('\\', '/')
if directory[-1] != '/':
directory += '/'
self.root_dir = directory
self.seq_ids = list()
self._joints_per_person = _SensorJointNumber.kinect_v1
self._val_model = trained_model
self._labels = labels
self.load_sequence_list()
self.seq_ids = sorted(self.seq_ids)
def load_sequence_list(self):
rgb_dir = self.root_dir + 'RGB/'
seq_dirs, _ = get_folders_and_files(rgb_dir)
self.seq_ids.extend(seq_dirs)
def visualize_sequence(self, idx):
seq_dir = self.root_dir + 'RGB/' + self.seq_ids[idx] + '/'
_, file_list = get_folders_and_files(seq_dir)
img_ids = []
for filename in file_list:
if filename.endswith('.jpg'):
img_ids.append(int(re.findall(r'\d+', filename)[0]))
else:
continue
img_ids = sorted(img_ids)
not_visited_joint_frames = []
joint_seq = np.array([], dtype=np.float32).reshape(0, self._joints_per_person*3)
with open(self.root_dir + "joints/" + "joints_" + self.seq_ids[idx] + ".txt") as joint_file:
not_visited_joint_frames.extend(img_ids)
for line in joint_file:
line = line.rstrip()
if line == '': # end of file
break
temp_list = line.split()
current_frame_idx = int(temp_list[0])
if current_frame_idx in not_visited_joint_frames:
not_visited_joint_frames.remove(current_frame_idx) # There can be frame duplicates
frame = []
for joint_idx in range(self._joints_per_person):
frame.append(float(temp_list[joint_idx * 3 + 1]) * 1000.) # x
frame.append(float(temp_list[joint_idx * 3 + 2]) * 1000.) # y
frame.append(float(temp_list[joint_idx * 3 + 3]) * 1000.) # z
joint_seq = np.vstack((joint_seq, np.array(frame, dtype=np.float32)))
assert len(joint_seq) == len(img_ids), 'Image sequence length and joint sequence length do not match.'
joint_seq_3d = reshape_2d_sequence_to_3d(joint_seq)
for frame_idx, img_id in enumerate(img_ids, 0):
joints = joint_seq_3d[frame_idx]
pred_label_idx, pred_label_conf, _, _ = evaluate_frame(joint_seq[frame_idx], self._val_model)
img = cv2.imread(seq_dir + 'colorImg' + str(img_id) + '.jpg')
cv2.putText(img,
'Class prediction: ' + self._labels[pred_label_idx] +
', Confidence: ' + str(round(pred_label_conf * 100., 3)) + '%',
(0, 20),
cv2.FONT_HERSHEY_COMPLEX_SMALL,
1,
(0, 64, 224),
2)
for joint_coords in joints:
joint_img_coords = world_coords_to_image(joint_coords, 531.15, 640, 480)
cv2.circle(img, joint_img_coords, 5, (255, 255, 0), thickness=2)
cv2.namedWindow('UTKinect')
cv2.imshow('UTKinect', img)
if cv2.waitKey(34) == 27:
break
| StarcoderdataPython |
1783661 | """
Install Module
"""
# Django
from django.core.management import execute_from_command_line
# local Django
from app.modules.util.helpers import Helpers
from app.modules.entity.option_entity import Option_Entity
from app.modules.entity.user_entity import User_Entity
class Install():
__option_entity = None
__user_entity = None
__options = [
{"key": "app_installed", "value": "true", "autoload": True},
{"key": "app_description", "value": "", "autoload": False},
{"key": "google_analytics_account", "value": "", "autoload": True},
{"key": "reset_mails_messages_count", "value": "5", "autoload": False},
{"key": "reset_mails_expire_after", "value": "24", "autoload": False},
{"key": "access_tokens_expire_after", "value": "48", "autoload": False}
]
__admin = {
"username" : "",
"email" : "",
"password" : "",
"is_superuser": True,
"is_active": True,
"is_staff": False
}
__helpers = None
__logger = None
def __init__(self):
self.__option_entity = Option_Entity()
self.__user_entity = User_Entity()
self.__helpers = Helpers()
self.__logger = self.__helpers.get_logger(__name__)
def is_installed(self):
return False if self.__option_entity.get_one_by_key("app_installed") == False else True
def set_app_data(self, name, email, url):
self.__options.append({"key": "app_name", "value": name, "autoload": True})
self.__options.append({"key": "app_email", "value": email, "autoload": True})
self.__options.append({"key": "app_url", "value": url, "autoload": True})
def set_admin_data(self, username, email, password):
self.__admin["username"] = username
self.__admin["email"] = email
self.__admin["password"] = password
def install(self):
try:
execute_from_command_line(["manage.py", "migrate"])
except Exception as e:
self.__logger.error("Error While Running Migrations: %s" % e)
return False
status = True
status &= self.__option_entity.insert_many(self.__options)
status &= (self.__user_entity.insert_one(self.__admin) != False)
return status | StarcoderdataPython |
1786085 | desc = "Windows based eth0 interface file template for private cloud"
data = """@echo off
echo ##############################################################################
echo # #
echo # FIRST BOOT SETUP #
echo # #
echo # Please don't close the window it will closed automatically #
echo # #
echo ##############################################################################
REM IPv4 Public
netsh interface ipv4 delete dnsservers "Ethernet" all
netsh interface ipv4 reset
netsh interface ipv4 set address "Ethernet" static {ipv4_addr} {ipv4_mask} {ipv4_gw}
netsh interface ipv4 add dnsservers "Ethernet" {ipv4_dns1}
netsh interface ipv4 add dnsservers "Ethernet" {ipv4_dns2} index=2"""
| StarcoderdataPython |
3279990 | <reponame>askprash/pyCycle
import sys
import openmdao.api as om
import pycycle.api as pyc
class WetTurbojet(pyc.Cycle):
def initialize(self):
self.options.declare('design', default=True,
desc='Switch between on-design and off-design calculation.')
def setup(self):
wet_thermo_spec = pyc.species_data.wet_air #special species library is called that allows for using initial compositions that include both H and C
janaf_thermo_spec = pyc.species_data.janaf #standard species library is called for use in and after burner
design = self.options['design']
# Add engine elements
self.pyc_add_element('fc', pyc.FlightConditions(thermo_data=wet_thermo_spec, use_WAR=True,
elements=pyc.WET_AIR_ELEMENTS))#WET_AIR_ELEMENTS contains standard dry air compounds as well as H2O
self.pyc_add_element('inlet', pyc.Inlet(design=design, thermo_data=wet_thermo_spec,
elements=pyc.WET_AIR_ELEMENTS))
self.pyc_add_element('comp', pyc.Compressor(map_data=pyc.AXI5, design=design,
thermo_data=wet_thermo_spec, elements=pyc.WET_AIR_ELEMENTS,),
promotes_inputs=['Nmech'])
###Note###
#The Combustor element automatically assumes that the thermo data to use for both the inflowing air
#and the outflowing mixed air and fuel is the data specified by the thermo_data option
#unless the inflow_thermo_data option is set. If the inflow_thermo_data option is set,
#the Combustor element will use the thermo data specified by inflow_thermo_data for the inflowing air
#to the burner, and it will use the thermo data specified by thermo_data for the outflowing mixed
#air and fuel. This is necessary to do if the airflow upstream of the burner contains both C and H
#within its compounds, because without the addition of the hydrocarbons from fuel, the solver has
#a difficult time converging the trace amount of hydrocarbons "present" in the original flow.
self.pyc_add_element('burner', pyc.Combustor(design=design,inflow_thermo_data=wet_thermo_spec,
thermo_data=janaf_thermo_spec, inflow_elements=pyc.WET_AIR_ELEMENTS,
air_fuel_elements=pyc.AIR_FUEL_ELEMENTS,
fuel_type='JP-7'))
self.pyc_add_element('turb', pyc.Turbine(map_data=pyc.LPT2269, design=design,
thermo_data=janaf_thermo_spec, elements=pyc.AIR_FUEL_ELEMENTS,),
promotes_inputs=['Nmech'])
self.pyc_add_element('nozz', pyc.Nozzle(nozzType='CD', lossCoef='Cv',
thermo_data=janaf_thermo_spec, elements=pyc.AIR_FUEL_ELEMENTS))
self.pyc_add_element('shaft', pyc.Shaft(num_ports=2),promotes_inputs=['Nmech'])
self.pyc_add_element('perf', pyc.Performance(num_nozzles=1, num_burners=1))
# Connect flow stations
self.pyc_connect_flow('fc.Fl_O', 'inlet.Fl_I', connect_w=False)
self.pyc_connect_flow('inlet.Fl_O', 'comp.Fl_I')
self.pyc_connect_flow('comp.Fl_O', 'burner.Fl_I')
self.pyc_connect_flow('burner.Fl_O', 'turb.Fl_I')
self.pyc_connect_flow('turb.Fl_O', 'nozz.Fl_I')
# Connect turbomachinery elements to shaft
self.connect('comp.trq', 'shaft.trq_0')
self.connect('turb.trq', 'shaft.trq_1')
# Connnect nozzle exhaust to freestream static conditions
self.connect('fc.Fl_O:stat:P', 'nozz.Ps_exhaust')
# Connect outputs to pefromance element
self.connect('inlet.Fl_O:tot:P', 'perf.Pt2')
self.connect('comp.Fl_O:tot:P', 'perf.Pt3')
self.connect('burner.Wfuel', 'perf.Wfuel_0')
self.connect('inlet.F_ram', 'perf.ram_drag')
self.connect('nozz.Fg', 'perf.Fg_0')
# Add balances for design and off-design
balance = self.add_subsystem('balance', om.BalanceComp())
if design:
balance.add_balance('W', units='lbm/s', eq_units='lbf')
self.connect('balance.W', 'inlet.Fl_I:stat:W')
self.connect('perf.Fn', 'balance.lhs:W')
balance.add_balance('FAR', eq_units='degR', lower=1e-4, val=.017)
self.connect('balance.FAR', 'burner.Fl_I:FAR')
self.connect('burner.Fl_O:tot:T', 'balance.lhs:FAR')
balance.add_balance('turb_PR', val=1.5, lower=1.001, upper=8, eq_units='hp', rhs_val=0.)
self.connect('balance.turb_PR', 'turb.PR')
self.connect('shaft.pwr_net', 'balance.lhs:turb_PR')
else:
balance.add_balance('FAR', eq_units='lbf', lower=1e-4, val=.3)
self.connect('balance.FAR', 'burner.Fl_I:FAR')
self.connect('perf.Fn', 'balance.lhs:FAR')
balance.add_balance('Nmech', val=1.5, units='rpm', lower=500., eq_units='hp', rhs_val=0.)
self.connect('balance.Nmech', 'Nmech')
self.connect('shaft.pwr_net', 'balance.lhs:Nmech')
balance.add_balance('W', val=168.0, units='lbm/s', eq_units='inch**2')
self.connect('balance.W', 'inlet.Fl_I:stat:W')
self.connect('nozz.Throat:stat:area', 'balance.lhs:W')
# Setup solver to converge engine
self.set_order(['balance', 'fc', 'inlet', 'comp', 'burner', 'turb', 'nozz', 'shaft', 'perf'])
newton = self.nonlinear_solver = om.NewtonSolver()
newton.options['atol'] = 1e-6
newton.options['rtol'] = 1e-6
newton.options['iprint'] = 2
newton.options['maxiter'] = 15
newton.options['solve_subsystems'] = True
newton.options['max_sub_solves'] = 100
newton.options['reraise_child_analysiserror'] = False
newton.linesearch = om.BoundsEnforceLS()
# newton.linesearch = ArmijoGoldsteinLS()
# newton.linesearch.options['c'] = .0001
newton.linesearch.options['bound_enforcement'] = 'scalar'
newton.linesearch.options['iprint'] = -1
self.linear_solver = om.DirectSolver(assemble_jac=True)
def viewer(prob, pt, file=sys.stdout):
"""
print a report of all the relevant cycle properties
"""
summary_data = (prob[pt+'.fc.Fl_O:stat:MN'], prob[pt+'.fc.alt'], prob[pt+'.inlet.Fl_O:stat:W'],
prob[pt+'.perf.Fn'], prob[pt+'.perf.Fg'], prob[pt+'.inlet.F_ram'],
prob[pt+'.perf.OPR'], prob[pt+'.perf.TSFC'])
print(file=file, flush=True)
print(file=file, flush=True)
print(file=file, flush=True)
print("----------------------------------------------------------------------------", file=file, flush=True)
print(" POINT:", pt, file=file, flush=True)
print("----------------------------------------------------------------------------", file=file, flush=True)
print(" PERFORMANCE CHARACTERISTICS", file=file, flush=True)
print(" Mach Alt W Fn Fg Fram OPR TSFC ", file=file, flush=True)
print(" %7.5f %7.1f %7.3f %7.1f %7.1f %7.1f %7.3f %7.5f" %summary_data, file=file, flush=True)
fs_names = ['fc.Fl_O', 'inlet.Fl_O', 'comp.Fl_O', 'burner.Fl_O',
'turb.Fl_O', 'nozz.Fl_O']
fs_full_names = [f'{pt}.{fs}' for fs in fs_names]
pyc.print_flow_station(prob, fs_full_names, file=file)
comp_names = ['comp']
comp_full_names = [f'{pt}.{c}' for c in comp_names]
pyc.print_compressor(prob, comp_full_names, file=file)
pyc.print_burner(prob, [f'{pt}.burner'])
turb_names = ['turb']
turb_full_names = [f'{pt}.{t}' for t in turb_names]
pyc.print_turbine(prob, turb_full_names, file=file)
noz_names = ['nozz']
noz_full_names = [f'{pt}.{n}' for n in noz_names]
pyc.print_nozzle(prob, noz_full_names, file=file)
shaft_names = ['shaft']
shaft_full_names = [f'{pt}.{s}' for s in shaft_names]
pyc.print_shaft(prob, shaft_full_names, file=file)
class MPWetTurbojet(pyc.MPCycle):
def setup(self):
# Create design instance of model
self.pyc_add_pnt('DESIGN', WetTurbojet())
self.set_input_defaults('DESIGN.fc.alt', 0.0, units='ft'),
self.set_input_defaults('DESIGN.fc.MN', 0.000001),
self.set_input_defaults('DESIGN.balance.rhs:FAR', 2370.0, units='degR'),
self.set_input_defaults('DESIGN.balance.rhs:W', 11800.0, units='lbf'),
self.set_input_defaults('DESIGN.Nmech', 8070.0, units='rpm'),
self.set_input_defaults('DESIGN.inlet.MN', 0.60),
self.set_input_defaults('DESIGN.comp.MN', 0.20),
self.set_input_defaults('DESIGN.burner.MN', 0.20),
self.set_input_defaults('DESIGN.turb.MN', 0.4),
self.pyc_add_cycle_param('burner.dPqP', .03)
self.pyc_add_cycle_param('nozz.Cv', 0.99)
self.pyc_add_cycle_param('fc.WAR', .001)
self.od_pts = ['OD1']
self.od_MNs = [0.000001,]
self.od_alts = [0,0,]
self.od_pwrs = [11000.0,]
for i, pt in enumerate(self.od_pts):
self.pyc_add_pnt(pt, WetTurbojet(design=False))
self.set_input_defaults(pt+'.fc.MN', self.od_MNs[i]),
self.set_input_defaults(pt+'.fc.alt', self.od_alts[i], units='ft'),
self.set_input_defaults(pt+'.balance.rhs:FAR', self.od_pwrs[i], units='lbf')
self.pyc_use_default_des_od_conns()
self.pyc_connect_des_od('nozz.Throat:stat:area', 'balance.rhs:W')
if __name__ == "__main__":
import time
from openmdao.api import Problem, IndepVarComp
from openmdao.utils.units import convert_units as cu
prob = om.Problem()
prob.model = mp_wet_turbojet = MPWetTurbojet()
prob.setup()
#Define the design point
prob.set_val('DESIGN.comp.PR', 13.5),
prob.set_val('DESIGN.comp.eff', 0.83),
prob.set_val('DESIGN.turb.eff', 0.86),
# Set initial guesses for balances
prob['DESIGN.balance.FAR'] = 0.0175506829934
prob['DESIGN.balance.W'] = 168.453135137
prob['DESIGN.balance.turb_PR'] = 4.46138725662
prob['DESIGN.fc.balance.Pt'] = 14.6955113159
prob['DESIGN.fc.balance.Tt'] = 518.665288153
for i, pt in enumerate(mp_wet_turbojet.od_pts):
prob[pt+'.balance.W'] = 166.073
prob[pt+'.balance.FAR'] = 0.01680
prob[pt+'.balance.Nmech'] = 8197.38
prob[pt+'.fc.balance.Pt'] = 15.703
prob[pt+'.fc.balance.Tt'] = 558.31
prob[pt+'.turb.PR'] = 4.6690
st = time.time()
prob.set_solver_print(level=-1)
prob.set_solver_print(level=2, depth=1)
prob.run_model()
for pt in ['DESIGN']+mp_wet_turbojet.od_pts:
viewer(prob, pt)
print()
print("time", time.time() - st) | StarcoderdataPython |
1688238 | <gh_stars>0
from rest_framework.viewsets import ModelViewSet
from api.serializers.application_form import ApplicationFormSerializer, \
ApplicationFormCreateSerializer
from api.models.go_electric_rebate_application import GoElectricRebateApplication
class ApplicationFormViewset(ModelViewSet):
queryset = GoElectricRebateApplication.objects.all()
serializer_classes = {
'default': ApplicationFormSerializer,
'create': ApplicationFormCreateSerializer,
}
def get_serializer_class(self):
if self.action in list(self.serializer_classes.keys()):
return self.serializer_classes.get(self.action)
return self.serializer_classes.get('default')
| StarcoderdataPython |
158214 | <gh_stars>0
import random
def random_mirror(data_numpy):
# TODO random!!!
if random.random() < 0.5:
data_numpy[0] = - data_numpy[0]
return data_numpy
def augment(data_numpy):
data_numpy = random_mirror(data_numpy)
return data_numpy | StarcoderdataPython |
57245 | <reponame>ohpauleez/aupy
#!/usr/bin/env python
from __future__ import with_statement
import time
import sys
from SocketServer import ThreadingMixIn, TCPServer, StreamRequestHandler
#import pydysh
from aupy2 import Utility
class Master(object):
def __init__(self):
self.master_server = None
super(Master, self).__init__()
def startMonitor(self, ips, port=9999):
for ip in ips:
print "%10.5f :: Using pydsh to start up a slave" % time.time()
#pydsh(python slave.py -master_ip 127.0.0.1 -master_port 8888)
import os, threading
t = threading.Thread(target=os.popen, args=["python slave.py --master_ip 127.0.0.1 --master_port 8888"])
t.start()
def initMasterServer(self, ip, port=8888):
if not self.master_server:
self.master_server = MasterServer((ip, port), MasterRequestHandler)
def serve_forever(self):
if self.master_server:
self.master_server.serve_forever()
else:
raise Exception("You need to init the server first")
past_log_writes = None
past_timestamp = None
past_rate = 1.0
class MasterRequestHandler(StreamRequestHandler):
def logUtility(self, timestamp, log_writes):
global past_log_writes, past_timestamp, past_rate
if not past_timestamp:
print "FIRST"
past_log_writes = log_writes
past_timestamp = timestamp
past_rate = 1.0
return True
print "old write", past_log_writes
print "new write", log_writes
print "old time", past_timestamp
print "new time", timestamp
delta_writes = log_writes - past_log_writes
past_log_writes = log_writes
delta_time = timestamp - past_timestamp
past_timestamp = timestamp
print "delta write: %d delta t: %f" % (delta_writes, delta_time)
rate = float(delta_writes)/delta_time
delta_rate = rate - past_rate
past_rate = rate
print "%10.5f :: Delta rate = %f" % (time.time(), delta_rate)
if delta_rate > 2:
print "%10.5f :: LOG EXPLOSION" % time.time()
return False
return delta_rate or True
def explosionHandler(self, slave_addr):
print "%10.5f :: handling (callback) from the explosion on %s" % (time.time(), str(slave_addr))
#sys.exit(0)
def handle(self):
print "%10.5f :: Handling request" % time.time()
req = self.rfile.readline().strip()
req = req.split(":")
if req[1] == "log utility":
print "%10.5f :: Log request" % time.time()
slave_addr = tuple(req[-2:])
slave_ip = slave_addr[0]
slave_port = slave_addr[1]
log_writes = int(req[2])
timestamp = float(req[0])
print "WE GOT TIME:", timestamp
try:
with Utility.contextPreUtility(self.logUtility, timestamp, log_writes, evalFunc=bool,
utilityCallback=self.explosionHandler, utilityCallbackArgs=[(slave_addr)]) as status:
if not isinstance(status, Utility.UtilityError):
print "%10.5f :: No Explosion Before :: slave->%s" %(time.time(), str(slave_addr))
except RuntimeError:
# you should never see this.. you should check "status" for an exception
print "\tSEE TODO: yield was never reached in the generator"
else:
pass
class MasterServer(ThreadingMixIn, TCPServer):
allow_reuse_addr = 1
def handle_error(self, request, client_address):
#TODO We need to exit and stop everything when we see the error from handler's sys.exit() call
#raise Exception("Most Likely An Explosion")
#sys.exit(0)
pass
if __name__ == "__main__":
master = Master()
master.initMasterServer("127.0.0.1", port=8888)
master.startMonitor(["127.0.0.1"], port=9999)
try:
master.serve_forever()
except KeyboardInterrupt:
print "\nGodspeed and Rock 'n Roll!"
sys.exit(0)
except:
sys.exit(0)
| StarcoderdataPython |
3376673 |
from neural.loss.naive_entropy import NaiveEntropy
from neural.loss.mse import MeanSquaredError
__all__ = ['NaiveEntropy', 'MeanSquaredError']
| StarcoderdataPython |
3385671 | <reponame>mlin/sqlite_zstd_vfs
#!/usr/bin/env python3
import sys
import os
import subprocess
import contextlib
import time
import sqlite3
import argparse
import json
HERE = os.path.dirname(__file__)
BUILD = os.path.abspath(os.path.join(HERE, "..", "build"))
DB_URL = "https://github.com/mlin/sqlite_web_vfs/releases/download/test-db-v1/TPC-H.db"
queries = {}
queries[
"Q1"
] = """
select
l_returnflag,
l_linestatus,
sum(l_quantity) as sum_qty,
sum(l_extendedprice) as sum_base_price,
sum(l_extendedprice*(1-l_discount)) as sum_disc_price,
sum(l_extendedprice*(1-l_discount)*(1+l_tax)) as sum_charge,
avg(l_quantity) as avg_qty,
avg(l_extendedprice) as avg_price,
avg(l_discount) as avg_disc, count(*) as count_order
from lineitem
where l_shipdate <= date('1998-12-01', '-90 day')
group by l_returnflag, l_linestatus order by l_returnflag, l_linestatus;
"""
queries[
"Q8"
] = """
select
o_year,
sum(case when nation = 'BRAZIL' then volume else 0 end) / sum(volume) as mkt_share
from
(select
strftime("%Y", o_orderdate) as o_year,
l_extendedprice * (1-l_discount) as volume,
n2.n_name as nation
from part, supplier, lineitem, orders, customer, nation n1, nation n2, region
where
p_partkey = l_partkey
and s_suppkey = l_suppkey
and l_orderkey = o_orderkey
and o_custkey = c_custkey
and c_nationkey = n1.n_nationkey
and n1.n_regionkey = r_regionkey
and r_name = 'AMERICA'
and s_nationkey = n2.n_nationkey
and o_orderdate between date('1995-01-01') and date('1996-12-31')
and p_type = 'ECONOMY ANODIZED STEEL')
as all_nations
group by o_year order by o_year;
"""
def main():
parser = argparse.ArgumentParser()
parser.add_argument("--cache-MiB", type=int, help="SQLite page cache size (default ~2)")
parser.add_argument("--level", type=int, help="zstd compression level (default 3)", default=3)
parser.add_argument(
"--threads", type=int, help="background compression threads (default 1)", default=1
)
parser.add_argument(
"--inner-page-KiB",
type=int,
help="inner db page size (before compression; default 4)",
default=4,
)
parser.add_argument(
"--outer-page-KiB", type=int, help="outer db page size (default 4)", default=4
)
args = parser.parse_args(sys.argv[1:])
valid_page_KiB = [1, 2, 4, 8, 16, 32, 64]
assert args.inner_page_KiB in valid_page_KiB
assert args.outer_page_KiB in valid_page_KiB
ans = run(args.cache_MiB, args.level, args.threads, args.inner_page_KiB, args.outer_page_KiB)
print(json.dumps(ans, indent=2))
def run(cache_MiB, level, threads, inner_page_KiB, outer_page_KiB):
tmpdir = os.environ.get("TMPDIR", "/tmp")
# download db to /tmp/TPC-H.db; --continue=true avoids re-download
subprocess.run(
f"aria2c -x 10 -j 10 -s 10 --file-allocation=none --continue=true {DB_URL} >&2",
check=True,
shell=True,
cwd=tmpdir,
)
# VACUUM INTO a fresh (uncompressed) copy
timings = {}
try:
os.unlink(os.path.join(tmpdir, "TPC-H.vacuum.db"))
except FileNotFoundError:
pass
subprocess.run(f"cat {os.path.join(tmpdir, 'TPC-H.db')} > /dev/null", check=True, shell=True)
con = sqlite3.connect(f"file:{os.path.join(tmpdir, 'TPC-H.db')}?mode=ro", uri=True)
with timer(timings, "load"):
con.execute(f"PRAGMA page_size={1024*inner_page_KiB}")
con.execute(f"VACUUM INTO '{os.path.join(tmpdir, 'TPC-H.vacuum.db')}'")
con.close()
# run each query twice, with a fresh db connection, and measure the second run; so it should
# have a hot filesystem cache and cold db page cache.
expected_results = {}
for query_name, query_sql in queries.items():
con = sqlite3.connect(f"file:{os.path.join(tmpdir, 'TPC-H.vacuum.db')}?mode=ro", uri=True)
expected_results[query_name] = list(con.execute(query_sql))
con.close()
con = sqlite3.connect(f"file:{os.path.join(tmpdir, 'TPC-H.vacuum.db')}?mode=ro", uri=True)
if cache_MiB:
con.execute(f"PRAGMA cache_size={-1024*cache_MiB}")
with timer(timings, query_name):
results = list(con.execute(query_sql))
con.close()
assert results == expected_results[query_name]
# create zstd-compressed db using VACUUM INTO
try:
os.unlink(os.path.join(tmpdir, "TPC-H.zstd.db"))
except FileNotFoundError:
pass
con = sqlite3.connect(f"file:{os.path.join(tmpdir, 'TPC-H.vacuum.db')}?mode=ro", uri=True)
con.enable_load_extension(True)
con.load_extension(os.path.join(BUILD, "zstd_vfs"))
with timer(timings, "load_zstd"):
con.execute(f"PRAGMA page_size={1024*inner_page_KiB}")
con.execute(
f"VACUUM INTO 'file:{os.path.join(tmpdir, 'TPC-H.zstd.db')}?vfs=zstd&outer_unsafe=true&outer_page_size={1024*outer_page_KiB}&level={level}&threads={threads}'"
)
con.close()
timings["db_size"] = os.path.getsize(os.path.join(tmpdir, "TPC-H.vacuum.db"))
timings["zstd_db_size"] = os.path.getsize(os.path.join(tmpdir, "TPC-H.zstd.db"))
# repeat queries on compressed db
for query_name, query_sql in queries.items():
con = connect_zstd(
os.path.join(tmpdir, "TPC-H.zstd.db"), cache_MiB=cache_MiB, threads=threads
)
results = list(con.execute(query_sql))
con.close()
assert results == expected_results[query_name]
con = connect_zstd(
os.path.join(tmpdir, "TPC-H.zstd.db"), cache_MiB=cache_MiB, threads=threads
)
with timer(timings, "zstd_" + query_name):
results = list(con.execute(query_sql))
con.close()
assert results == expected_results[query_name]
# verify outer application_id
outer = sqlite3.connect(os.path.join(tmpdir, "TPC-H.zstd.db"))
assert next(outer.execute("PRAGMA application_id"))[0] == 0x7A737464
return timings
def connect_zstd(dbfn, mode="ro", cache_MiB=None, threads=1):
con = sqlite3.connect(f":memory:")
con.enable_load_extension(True)
con.load_extension(os.path.join(BUILD, "zstd_vfs"))
con = sqlite3.connect(f"file:{dbfn}?vfs=zstd&mode={mode}&threads={threads}", uri=True)
if cache_MiB:
con.execute(f"PRAGMA cache_size={-1024*cache_MiB}")
return con
@contextlib.contextmanager
def timer(timings_dict, name):
t0 = time.time()
yield
timings_dict[name] = round(time.time() - t0, 3)
if __name__ == "__main__":
main()
| StarcoderdataPython |
3377342 | """
Tests for `kolibri` module.
"""
from __future__ import absolute_import
from __future__ import print_function
from __future__ import unicode_literals
import unittest
import mock
import kolibri
from kolibri.utils import version
#: Because we don't want to call the original (decorated function), it uses
#: caching and will return the result of the first call always. We call
#: the wrapped function `__wrapped__` directly.
get_version = version.get_version.__wrapped__ # @UndefinedVariable
def dont_call_me_maybe(msg):
raise AssertionError(msg)
class TestKolibriVersion(unittest.TestCase):
def test_version(self):
"""
Test that the major version is set as expected
"""
major_version_tuple = "{}.{}".format(*kolibri.VERSION[0:2])
self.assertIn(major_version_tuple, kolibri.__version__)
@mock.patch('kolibri.utils.version.get_git_describe', return_value=None)
@mock.patch('kolibri.utils.version.get_version_file', return_value=None)
def test_alpha_0_version(self, file_mock, describe_mock):
"""
Test that when doing something with a 0th alpha doesn't provoke any
hickups with ``git describe --tag``.
"""
v = get_version((0, 1, 0, "alpha", 0))
self.assertIn("0.1.0.dev", v)
@mock.patch('kolibri.utils.version.get_git_describe', return_value=None)
@mock.patch('kolibri.utils.version.get_version_file', return_value=None)
def test_alpha_1_version(self, file_mock, describe_mock):
"""
Test some normal alpha version, but don't assert that the
``git describe --tag`` is consistent (it will change in future test
runs)
"""
v = get_version((0, 1, 0, "alpha", 1))
self.assertIn("0.1.0a1", v)
@mock.patch('kolibri.utils.version.get_git_describe', return_value=None)
def test_alpha_1_version_no_git(self, describe_mock):
"""
Not running from git and no VERSION file.
"""
# Simple mocking
get_version_file = version.get_version_file
version.get_version_file = lambda: None
try:
v = get_version((0, 1, 0, "alpha", 1))
self.assertIn("0.1.0a1", v)
finally:
version.get_version_file = get_version_file
@mock.patch('kolibri.utils.version.get_version_file', return_value="0.1.0a1")
@mock.patch('kolibri.utils.version.get_git_describe', return_value=None)
def test_alpha_1_version_file(self, describe_mock, file_mock):
"""
Test that a simple 0.1a1 works when loaded from a VERSION file
"""
v = get_version((0, 1, 0, "alpha", 1))
self.assertIn("0.1.0a1", v)
@mock.patch('kolibri.utils.version.get_version_file', return_value="0.1.0a1\n")
@mock.patch('kolibri.utils.version.get_git_describe', return_value=None)
def test_version_file_linebreaks(self, describe_mock, file_mock):
"""
Test that line breaks don't get included in the final version
See: https://github.com/learningequality/kolibri/issues/2464
"""
v = get_version((0, 1, 0, "alpha", 1))
self.assertIn("0.1.0a1", v)
@mock.patch('kolibri.utils.version.get_version_file', return_value="0.7.1b1.dev+git.2.gfd48a7a")
@mock.patch('kolibri.utils.version.get_git_describe', return_value=None)
def test_version_file_local_git_version(self, describe_mock, file_mock):
"""
Test that a version file with git describe output is correctly parsed
"""
v = get_version((0, 7, 1, "beta", 1))
self.assertIn("0.7.1b1.dev+git.2.gfd48a7a", v)
@mock.patch('kolibri.utils.version.get_git_describe', return_value=None)
@mock.patch('kolibri.utils.version.get_git_changeset', return_value=None)
def test_alpha_0_inconsistent_version_file(self, get_git_changeset_mock, describe_mock):
"""
Test that inconsistent file data also just fails
"""
# Simple mocking
get_version_file = version.get_version_file
inconsistent_versions = ("0.2.0a1", "0.1.1a1", "0.1.0")
for v in inconsistent_versions:
version.get_version_file = lambda: v
try:
self.assertRaises(
AssertionError,
get_version,
(0, 1, 0, "alpha", 0)
)
finally:
version.get_version_file = get_version_file
@mock.patch('kolibri.utils.version.get_git_describe', return_value=None)
@mock.patch('kolibri.utils.version.get_git_changeset', return_value=None)
def test_alpha_1_inconsistent_version_file(self, get_git_changeset_mock, describe_mock):
"""
Test that inconsistent file data also just fails
"""
# Simple mocking
get_version_file = version.get_version_file
inconsistent_versions = ("0.2.0a1", "0.1.1a1", "0.1.0")
for v in inconsistent_versions:
version.get_version_file = lambda: v
try:
self.assertRaises(
AssertionError,
get_version,
(0, 1, 0, "alpha", 1)
)
finally:
version.get_version_file = get_version_file
@mock.patch('kolibri.utils.version.get_version_file', return_value="0.1.0b1")
@mock.patch('kolibri.utils.version.get_git_describe', return_value=None)
@mock.patch('kolibri.utils.version.get_git_changeset', return_value=None)
def test_alpha_0_consistent_version_file(self, get_git_changeset_mock, describe_mock, file_mock):
"""
Test that a VERSION file can overwrite an alpha-0 (dev) state.
Because a prerelease can be made with a version file.
"""
assert get_version((0, 1, 0, "alpha", 0)) == "0.1.0b1"
@mock.patch('kolibri.utils.version.get_version_file', return_value=None)
@mock.patch('kolibri.utils.version.get_git_describe', return_value="v0.1.0-alpha1-123-abcdfe12")
def test_alpha_0_consistent_git(self, describe_mock, file_mock):
"""
Tests that git describe data for an alpha-1 tag generates an a1 version
string.
"""
assert get_version((0, 1, 0, "alpha", 0)) == "0.1.0a1.dev+git.123.abcdfe12"
@mock.patch('kolibri.utils.version.get_version_file', return_value=None)
@mock.patch('kolibri.utils.version.get_git_describe', return_value="v0.1.0-alpha1-123-abcdfe12")
def test_alpha_1_consistent_git(self, describe_mock, file_mock):
"""
Tests that git describe data for an alpha-1 tag generates an a1 version
string.
"""
assert get_version((0, 1, 0, "alpha", 1)) == "0.1.0a1.dev+git.123.abcdfe12"
@mock.patch('kolibri.utils.version.get_version_file', return_value="0.1.0b2")
@mock.patch('kolibri.utils.version.get_git_describe', return_value=None)
@mock.patch('kolibri.utils.version.get_git_changeset', return_value=None)
def test_beta_1_consistent_version_file(self, get_git_changeset_mock, describe_mock, file_mock):
"""
Test that a VERSION file can overwrite an beta-1 state in case the
version was bumped in ``kolibri.VERSION``.
"""
assert get_version((0, 1, 0, "beta", 1)) == "0.1.0b2"
@mock.patch('kolibri.utils.version.get_version_file', return_value="0.7.1b1.dev+git.12.g2a8fe31")
@mock.patch('kolibri.utils.version.get_git_describe', return_value=None)
@mock.patch('kolibri.utils.version.get_git_changeset', return_value=None)
def test_beta_1_consistent_dev_release_version_file(self, get_git_changeset_mock, describe_mock, file_mock):
"""
Test that a VERSION file can overwrite an beta-1 state in case the
version was bumped in ``kolibri.VERSION``.
"""
assert get_version((0, 7, 1, "alpha", 0)) == "0.7.1b1.dev+git.12.g2a8fe31"
@mock.patch('kolibri.utils.version.get_version_file', return_value="0.1.0b1")
@mock.patch('kolibri.utils.version.get_git_describe', return_value="v0.0.1")
@mock.patch('kolibri.utils.version.get_git_changeset', return_value="+git123")
def test_version_file_ignored(self, get_git_changeset_mock, describe_mock, file_mock):
"""
Test that the VERSION file is NOT used where git data is available
"""
assert get_version((0, 1, 0, "alpha", 0)) == "0.1.0.dev+git123"
@mock.patch('kolibri.utils.version.get_version_file', return_value="0.1.0")
@mock.patch('kolibri.utils.version.get_git_describe', return_value=None)
@mock.patch('kolibri.utils.version.get_git_changeset', return_value=None)
def test_version_file_final(self, get_git_changeset_mock, describe_mock, file_mock):
"""
Test that a VERSION specifying a final version will work when the
kolibri.VERSION tuple is consistent.
"""
assert get_version((0, 1, 0, "final", 0)) == "0.1.0"
def test_alpha_1_inconsistent_git(self):
"""
Test that we fail when git returns inconsistent data
"""
# Simple mocking
git_describe = version.get_git_describe
try:
version.get_git_describe = lambda *x: 'v0.2.0-beta1'
self.assertRaises(
AssertionError,
get_version,
(0, 1, 0, "alpha", 1)
)
version.get_git_describe = lambda *x: 'v0.2.0-beta2'
self.assertRaises(
AssertionError,
get_version,
(0, 1, 0, "beta", 0)
)
version.get_git_describe = lambda *x: 'v0.1.0'
self.assertRaises(
AssertionError,
get_version,
(0, 1, 0, "alpha", 0)
)
finally:
version.get_git_describe = git_describe
@mock.patch('kolibri.utils.version.get_git_describe', return_value="v0.1.0-beta1-123-abcdfe12")
def test_alpha_1_beta_1_consistent_git(self, describe_mock):
"""
Test that a beta1 git tag can override kolibri.__version__ reading
alpha0.
"""
assert get_version((0, 1, 0, "alpha", 1)) == "0.1.0b1.dev+git.123.abcdfe12"
@mock.patch('subprocess.Popen')
def test_git_describe_parser(self, popen_mock):
"""
Test that we get the git describe data when it's there
"""
process_mock = mock.Mock()
attrs = {'communicate.return_value': ('v0.1.0-beta1-123-abcdfe12', '')}
process_mock.configure_mock(**attrs)
popen_mock.return_value = process_mock
assert get_version((0, 1, 0, "alpha", 1)) == "0.1.0b1.dev+git.123.abcdfe12"
@mock.patch('subprocess.Popen')
@mock.patch('kolibri.utils.version.get_version_file', return_value=None)
def test_git_random_tag(self, file_mock, popen_mock):
"""
Test that we don't fail if some random tag appears
"""
process_mock = mock.Mock()
attrs = {'communicate.return_value': ('foobar', '')}
process_mock.configure_mock(**attrs)
popen_mock.return_value = process_mock
assert get_version((0, 1, 0, "alpha", 1)) == "0.1.0a1"
@mock.patch('subprocess.Popen', side_effect=EnvironmentError())
@mock.patch('kolibri.utils.version.get_version_file', return_value="0.1.0a2")
def test_prerelease_no_git(self, file_mock, popen_mock):
"""
Test that we don't fail and that the version file is used
"""
assert get_version((0, 1, 0, "alpha", 1)) == "0.1.0a2"
@mock.patch('kolibri.utils.version.get_complete_version', side_effect=lambda x: x if x else (0, 2, 0, 'alpha', 2))
@mock.patch('kolibri.utils.version.get_git_describe', return_value="v0.2.0-beta1")
def test_beta_1_git(self, describe_mock, complete_mock):
"""
Test that we use git tag data when our version is alpha
"""
self.assertEqual(
get_version(),
'0.2.0b1'
)
@mock.patch('kolibri.utils.version.get_git_describe', return_value=None)
def test_final(self, describe_mock):
"""
Test that the major version is set as expected on a final release
"""
v = get_version((0, 1, 0, "final", 0))
self.assertEqual(v, "0.1.0")
assert describe_mock.call_count == 0
@mock.patch('kolibri.utils.version.get_git_describe')
def test_final_patch(self, describe_mock):
"""
Test that the major version is set as expected on a final release
"""
v = get_version((0, 1, 1, "final", 0))
self.assertEqual(v, "0.1.1")
assert describe_mock.call_count == 0
@mock.patch('kolibri.utils.version.get_git_describe')
def test_final_post(self, describe_mock):
"""
Test that the major version is set as expected on a final release
"""
v = get_version((0, 1, 1, "final", 1))
self.assertEqual(v, "0.1.1.post1")
assert describe_mock.call_count == 0
def test_version_compat(self):
"""
Test that our version glue works for some really old releases of
setuptools, like the one in Ubuntu 14.04.
We don't have a reference implementation, but parse_version will return
a tuple, and this is from a live system::
test@test-VirtualBox:~$ python
Python 2.7.6 (default, Jun 22 2015, 17:58:13)
[GCC 4.8.2] on linux2
Type "help", "copyright", "credits" or "license" for more information.
>>> from pkg_resources import parse_version
>>> parse_version("1.2.3")
('00000001', '00000002', '00000003', '*final')
>>> parse_version("1.2.3.dev0")
('00000001', '00000002', '00000003', '*@', '*final')
>>> parse_version("1.2.3a1")
('00000001', '00000002', '00000003', '*a', '00000001', '*final')
>>> parse_version("1.2.3a0")
('00000001', '00000002', '00000003', '*a', '*final')
>>> parse_version("1.2.3b1")
('00000001', '00000002', '00000003', '*b', '00000001', '*final')
>>> parse_version("1.2.3b1+git.123")
('00000001', '00000002', '00000003', '*b', '00000001', '*+', '*git', '*final-', '00000123', '*final')
"""
from kolibri.utils.compat import VersionCompat
assert VersionCompat(
('00000001', '00000002', '00000003', '*final')
).base_version == "1.2.3"
assert VersionCompat(
('00000001', '00000002', '00000003', '*@', '*final')
).base_version == "1.2.3"
assert VersionCompat(
('00000001', '00000002', '00000003', '*a', '00000001', '*final')
).base_version == "1.2.3"
assert VersionCompat(
('00000001', '00000002', '00000003', '*b', '00000001', '*final')
).base_version == "1.2.3"
assert VersionCompat(
('00000001', '00000002', '00000003', '*b', '00000001', '*+', '*git', '*final-', '00000123', '*final')
).base_version == "1.2.3"
assert VersionCompat(
('00000000', '00000002', '00000003', '*b', '00000001', '*+', '*git', '*final-', '00000123', '*final')
).base_version == "0.2.3"
| StarcoderdataPython |
3338085 | from io import BytesIO
from PIL import Image
from django.core.files import File
def get_image_file(name='test.png', ext='png', size=(50, 50), color=(256, 0, 0)):
file_obj = BytesIO()
image = Image.new("RGB", size=size, color=color)
image.save(file_obj, ext)
file_obj.seek(0)
return File(file_obj, name=name) | StarcoderdataPython |
163751 | # Copyright (C)2016, International Business Machines Corporation
# All rights reserved.
import testutils as tt
import shutil
import sys
def test_execution(name, python_command):
'''Execute the test case'''
#tt.assert_pass(err != 0, stdout, stderr)
print "Execute scenario ContentRankingSample"
tt.run_checked(['output/ModelBuilder/bin/standalone', 'pythonCommand='+python_command])
# the test script runs in python2
# check the python version in the environment since the Streams job might use a different python version
ver = tt.get_major_version(python_command)
shutil.copy('data/model_KB/d_lemms.json.provided'+str(ver), 'data/model_KB/d_lemms.json')
shutil.copy('data/model_KB/kb_lstm_model.pklz.provided'+str(ver), 'data/model_KB/kb_lstm_model.pklz')
print 'XXXXXXXXXXXXXXXXXXXXXXXXXXX'
tt.run_checked(['output/bin/standalone', 'pythonCommand='+python_command])
print 'XXXXXXXXXXXXXXXXXXXXXXXXXXX'
tt.run_checked(['diff', 'data/out.txt', 'data/expected'+str(ver)+'.txt'])
def test_cleanup(name):
'''Removes all output files which are produced during test execution'''
tt.remove_f('data/out.txt')
tt.remove_f('data/model_KB/d_lemms.json')
tt.remove_f('data/model_KB/kb_lstm_model.pklz')
| StarcoderdataPython |
23369 | """
stanCode Breakout Project
Adapted from <NAME>'s Breakout by
<NAME>, <NAME>, <NAME>,
and <NAME>
File: breakoutgraphics.py
Name: <NAME>
-------------------------
This python file will create a class named BreakoutGraphics for the break out game.
This class will contain the building block for creating that game.
"""
from campy.graphics.gwindow import GWindow
from campy.graphics.gobjects import GOval, GRect, GLabel
from campy.gui.events.mouse import onmouseclicked, onmousemoved
import random
BRICK_SPACING = 5 # Space between bricks (in pixels). This space is used for horizontal and vertical spacing.
BRICK_WIDTH = 40 # Height of a brick (in pixels).
BRICK_HEIGHT = 15 # Height of a brick (in pixels).
BRICK_ROWS = 10 # Number of rows of bricks.
BRICK_COLS = 10 # Number of columns of bricks.
BRICK_OFFSET = 50 # Vertical offset of the topmost brick from the window top (in pixels).
BALL_RADIUS = 10 # Radius of the ball (in pixels).
PADDLE_WIDTH = 75 # Width of the paddle (in pixels).
PADDLE_HEIGHT = 15 # Height of the paddle (in pixels).
PADDLE_OFFSET = 50 # Vertical offset of the paddle from the window bottom (in pixels).
INITIAL_Y_SPEED = 7 # Initial vertical speed for the ball.
MAX_X_SPEED = 5 # Maximum initial horizontal speed for the ball.
class BreakoutGraphics:
def __init__(self, ball_radius = BALL_RADIUS, paddle_width = PADDLE_WIDTH,
paddle_height = PADDLE_HEIGHT, paddle_offset = PADDLE_OFFSET,
brick_rows = BRICK_ROWS, brick_cols = BRICK_COLS,
brick_width = BRICK_WIDTH, brick_height = BRICK_HEIGHT,
brick_offset = BRICK_OFFSET, brick_spacing = BRICK_SPACING,
title='Breakout'):
"""
The basic parameters for building these breakout game.
:param ball_radius: The radius of the ball.
:param paddle_width: The width of the paddle.
:param paddle_height: The height of the paddle.
:param paddle_offset: The distance between paddle and the bottom of the window.
:param brick_rows: The number of rows in bricks.
:param brick_cols: The number of column in bricks.
:param brick_width: The width of each brick.
:param brick_height: The height of each brick.
:param brick_offset: The distance between the first row of bricks and the top of the window.
:param brick_spacing: The spacing between each brick.
:param title: The name of this program.
"""
# Create a graphical window, with some extra space
self.window_width = brick_cols * (brick_width + brick_spacing) - brick_spacing
self.window_height = brick_offset + 3 * (brick_rows * (brick_height + brick_spacing) - brick_spacing)
self.window = GWindow(width=self.window_width, height=self.window_height, title=title)
# Create a paddle
self.paddle = GRect(paddle_width, paddle_height, x=(self.window_width-paddle_width)/2, y=(self.window_height-paddle_offset))
self.paddle.filled = True
self.paddle.color = 'black'
self.paddle.fill_color = 'black'
self.window.add(self.paddle)
self.paddle_width = paddle_width
self.paddle_height = paddle_height
self.paddle_offset = paddle_offset
# Center a filled ball in the graphical window
self.ball = GOval(ball_radius*2, ball_radius*2, x=(self.window_width-ball_radius*2)/2, y=(self.window_height-ball_radius*2)/2)
self.ball.filled = True
self.ball.fill_color = 'black'
self.window.add(self.ball)
self.ball_radius = ball_radius
# Default initial velocity for the ball
self.__dx = 0 # self.__dx = random.randint(1, MAX_X_SPEED)
self.__dy = 0 # self.__dy = INITIAL_Y_SPEED
# if random.random() > 0.5:
# self.__dx = -self.__dx
# The above is the mistake I made during doing this homework.
# Draw bricks
for i in range(brick_cols):
for j in range(brick_rows):
# Crucial point! This can't be placed at the outside of for loop.
brick = GRect(brick_width, brick_height)
brick.x = (brick_width+brick_spacing)*i
brick.y = brick_offset+(brick_height+brick_spacing)*j
brick.filled = True
if j < 2:
brick.fill_color = 'red'
elif j < 4:
brick.fill_color = 'orange'
elif j < 6:
brick.fill_color = 'yellow'
elif j < 8:
brick.fill_color = 'green'
elif j < 10:
brick.fill_color = 'blue'
elif j < 12:
brick.fill_color = 'teal'
elif j < 14:
brick.fill_color = 'chocolate'
self.window.add(brick)
# Initialize our mouse listeners
onmouseclicked(self.is_start_game)
onmousemoved(self.moving_paddle)
# Total bricks
self.total_bricks = brick_cols * brick_rows
def is_start_game(self, event): # Crucial point!!! Stuck here for three days! The initial velocity!
"""
The check point of the game start.
:param event: The information of the mouse, including (x,y) of it.
:return: Set the __dx and __dy of the ball.
"""
if event.x != -1 and event.y != -1 and self.__dx == 0 and self.__dy == 0:
self.__dx = random.randint(1, MAX_X_SPEED)
self.__dy = INITIAL_Y_SPEED
if random.random() > 0.5:
self.__dx = -self.__dx
def check_for_collisions(self):
"""
Four check points of the ball to check the collision with objects.
:return: boolean value. Build the information of object that the ball collide with.
"""
one = self.window.get_object_at(self.ball.x, self.ball.y)
two = self.window.get_object_at(self.ball.x + 2*self.ball_radius, self.ball.y)
three = self.window.get_object_at(self.ball.x, self.ball.y + 2*self.ball_radius)
four = self.window.get_object_at(self.ball.x + 2*self.ball_radius, self.ball.y + 2*self.ball_radius)
if one is not None:
self.obj = self.window.get_object_at(self.ball.x, self.ball.y)
return True
elif two is not None:
self.obj = self.window.get_object_at(self.ball.x + 2*self.ball_radius, self.ball.y)
return True
elif three is not None:
self.obj = self.window.get_object_at(self.ball.x, self.ball.y + 2*self.ball_radius)
return True
elif four is not None:
self.obj = self.window.get_object_at(self.ball.x + 2*self.ball_radius, self.ball.y + 2*self.ball_radius)
return True
def check_object_type(self):
"""
The objects above the half of the window height are bricks and the object below the half of the window height is paddle.
:return: boolean value. Bricks return True and paddle returns False.
"""
if self.ball.y > self.window.height/2:
return True
else:
return False
def moving_ball(self):
"""
The method for moving ball.
:return: The moving result of the ball.
"""
self.ball.move(self.__dx, self.__dy)
def moving_paddle(self, event):
"""
The method for moving paddle.
:param event: The information of the mouse, including (x,y) of it.
:return: The moving result of the paddle.
"""
if event.x - self.paddle_width/2 >= 0 and event.x-self.paddle_width/2 <= self.window_width-self.paddle_width:
self.paddle.x = event.x - self.paddle_width / 2
def reset_ball(self):
"""
As the ball falls below the paddle and the game hasn't overed, the ball will be reset to the original position.
:return: The ball at the original position.
"""
self.ball = GOval(self.ball_radius * 2, self.ball_radius * 2, x=(self.window_width - self.ball_radius * 2) / 2,
y=(self.window_height - self.ball_radius * 2) / 2)
self.ball.filled = True
self.ball.fill_color = 'black'
self.window.add(self.ball)
self.ball_radius = self.ball_radius
self.__dx = 0
self.__dy = 0
def set_dx(self, new_dx):
"""
Set the new __dx.
:param new_dx: The new dx.
:return: __dx.
"""
self.__dx = new_dx
def set_dy(self, new_dy):
"""
Set the new __dy.
:param new_dy: The new dy.
:return: __dy.
"""
self.__dy = new_dy
def get_dx(self):
"""
Get the information of __dx from class BreakoutGraphics.
:return: The __dx for the ball.
"""
return self.__dx
def get_dy(self):
"""
Get the information of __dy from class BreakoutGraphics.
:return: The __dy for the ball.
"""
return self.__dy
def set_dx_collision(self, new_dx):
"""
Set the new __dx for ball after colliding with bricks.
:param new_dx: The new dx.
:return: __dx.
"""
if random.random() > 0.5:
self.__dx = new_dx
else:
self.__dx = -new_dx
def game_over(self):
"""
The label for game over.
:return: The label for game over.
"""
label = GLabel('Game Over!!!')
# The condition below is for 10*10 bricks.
# If coder change the number of rows or columns, the size would probably not fit.
label.font = '-40'
self.window.add(label, x= self.window_width/2 - 100, y=self.window_height/2 + 100)
def game_win(self):
"""
The label for game win.
:return: The label for game win.
"""
label = GLabel('You Win!!!')
# The condition below is for 10*10 bricks.
# If coder change the number of rows or columns, the size would probably not fit.
label.font = '-40'
self.window.add(label, x=self.window_width / 2 - 100, y=self.window_height / 2 + 100) | StarcoderdataPython |
3390005 | <gh_stars>0
from .transaction import *
from .investor import *
from .ROICalculator import *
| StarcoderdataPython |
51617 | import numpy as np
from scipy.spatial.distance import pdist, squareform
from scipy import exp
from scipy.linalg import eigh
def rbf_kernel_pca(X, gamma, n_components):
"""
RBF kernel PCA implementation.
Parameters
------------
X: {NumPy ndarray}, shape = [n_samples, n_features]
gamma: float
Tuning parameter of the RBF kernel
n_components: int
Number of principal components to return
Returns
------------
X_pc: {NumPy ndarray}, shape = [n_samples, k_features]
Projected dataset
"""
# Calculate pairwise squared Euclidean distances
# in the MxN dimensional dataset.
sq_dists = pdist(X, 'sqeuclidean')
# Convert pairwise distances into a square matrix.
mat_sq_dists = squareform(sq_dists)
# Compute the symmetric kernel matrix.
K = exp(-gamma * mat_sq_dists)
# Center the kernel matrix.
N = K.shape[0]
one_n = np.ones((N, N)) / N
K = K - one_n.dot(K) - K.dot(one_n) + one_n.dot(K).dot(one_n)
# Obtaining eigenpairs from the centered kernel matrix
# numpy.eigh returns them in sorted order
eigvals, eigvecs = eigh(K)
print(np.sqrt(eigvals[-1]))
print(np.sqrt(eigvals[-2]))
# Collect the top k eigenvectors (projected samples)
#X_pc = np.column_stack((eigvecs[:, -i]
# for i in range(1, n_components + 1)))
# scikit-learnの結果と比べてみても, たぶんこれが正しい気がする
# ただ結局各成分にスケール因子が入るだけなので、
# 学習という意味ではどちらでも良いのかもしれない
X_pc = np.column_stack((np.sqrt(eigvals[-i]) * eigvecs[:, -i]
for i in range(1, n_components + 1)))
# PCA固有ベクトルvをデータサンプルに直すには X v とする必要がある
# ここで正規化された特異ベクトルの間の関係を使う。
# X v_i = sigma_i a_i (sigma_i = sqrt(lambda_i))
# よって sqrt(lambda_i) a_i で主成分方向に基底変換したデータサンプルになる。
return X_pc
##
# 本文の後で出てくるバージョン
# 計算は一緒で返すものが違うだけ
# 固有値と固有ベクトルを返す
#
def rbf_kernel_pca2(X, gamma, n_components):
"""
RBF kernel PCA implementation.
Parameters
------------
X: {NumPy ndarray}, shape = [n_samples, n_features]
gamma: float
Tuning parameter of the RBF kernel
n_components: int
Number of principal components to return
Returns
------------
X_pc: {NumPy ndarray}, shape = [n_samples, k_features]
Projected dataset
lambdas: list
Eigenvalues
"""
# Calculate pairwise squared Euclidean distances
# in the MxN dimensional dataset.
sq_dists = pdist(X, 'sqeuclidean')
# Convert pairwise distances into a square matrix.
mat_sq_dists = squareform(sq_dists)
# Compute the symmetric kernel matrix.
K = exp(-gamma * mat_sq_dists)
# Center the kernel matrix.
N = K.shape[0]
one_n = np.ones((N, N)) / N
K = K - one_n.dot(K) - K.dot(one_n) + one_n.dot(K).dot(one_n)
# Obtaining eigenpairs from the centered kernel matrix
# numpy.eigh returns them in sorted order
eigvals, eigvecs = eigh(K)
# Collect the top k eigenvectors (projected samples)
alphas = np.column_stack((eigvecs[:, -i]
for i in range(1, n_components + 1)))
# Collect the corresponding eigenvalues
lambdas = [eigvals[-i] for i in range(1, n_components + 1)]
return alphas, lambdas
import numpy as np
from scipy.spatial.distance import pdist, squareform
from scipy import exp
from scipy.linalg import eigh
##
# カーネルを線形にしてみた
# test_kpca.py で使う
#
def linear_kernel_pca(X, n_components):
"""
RBF kernel PCA implementation.
Parameters
------------
X: {NumPy ndarray}, shape = [n_samples, n_features]
gamma: float
Tuning parameter of the RBF kernel
n_components: int
Number of principal components to return
Returns
------------
X_pc: {NumPy ndarray}, shape = [n_samples, k_features]
Projected dataset
"""
# 線形カーネル関数は内積(x_i, x_j)とする
N = X.shape[0]
K = np.ones((N, N))
for i in range(N):
for j in range(N):
K[i, j] = np.dot(X[i, :], X[j, :])
print(K.shape)
# Center the kernel matrix.
N = K.shape[0]
one_n = np.ones((N, N)) / N
K = K - one_n.dot(K) - K.dot(one_n) + one_n.dot(K).dot(one_n)
# Obtaining eigenpairs from the centered kernel matrix
# numpy.eigh returns them in sorted order
eigvals, eigvecs = eigh(K)
print(np.sqrt(eigvals[-1]))
print(np.sqrt(eigvals[-2]))
# Collect the top k eigenvectors (projected samples)
#X_pc = np.column_stack((eigvecs[:, -i]
# for i in range(1, n_components + 1)))
# scikit-learnの結果と比べてみても, たぶんこれが正しい気がする
# ただ結局各成分にスケール因子が入るだけなので、
# 学習という意味ではどちらでも良いのかもしれない
X_pc = np.column_stack((np.sqrt(eigvals[-i]) * eigvecs[:, -i]
for i in range(1, n_components + 1)))
# PCA固有ベクトルvをデータサンプルに直すには X v とする必要がある
# ここで正規化された特異ベクトルの間の関係を使う。
# X v_i = sigma_i a_i (sigma_i = sqrt(lambda_i))
# よって sqrt(lambda_i) a_i で主成分方向に基底変換したデータサンプルになる。
return X_pc
| StarcoderdataPython |
3291269 | <filename>plexapi/media.py<gh_stars>0
# -*- coding: utf-8 -*-
import xml
from urllib.parse import quote_plus
from plexapi import log, settings, utils
from plexapi.base import PlexObject
from plexapi.exceptions import BadRequest
from plexapi.utils import cast
@utils.registerPlexObject
class Media(PlexObject):
""" Container object for all MediaPart objects. Provides useful data about the
video or audio this media belong to such as video framerate, resolution, etc.
Attributes:
TAG (str): 'Media'
aspectRatio (float): The aspect ratio of the media (ex: 2.35).
audioChannels (int): The number of audio channels of the media (ex: 6).
audioCodec (str): The audio codec of the media (ex: ac3).
audioProfile (str): The audio profile of the media (ex: dts).
bitrate (int): The bitrate of the media (ex: 1624).
container (str): The container of the media (ex: avi).
duration (int): The duration of the media in milliseconds (ex: 6990483).
height (int): The height of the media in pixels (ex: 256).
id (int): The unique ID for this media on the server.
has64bitOffsets (bool): True if video has 64 bit offsets.
optimizedForStreaming (bool): True if video is optimized for streaming.
parts (List<:class:`~plexapi.media.MediaPart`>): List of media part objects.
proxyType (int): Equals 42 for optimized versions.
target (str): The media version target name.
title (str): The title of the media.
videoCodec (str): The video codec of the media (ex: ac3).
videoFrameRate (str): The video frame rate of the media (ex: 24p).
videoProfile (str): The video profile of the media (ex: high).
videoResolution (str): The video resolution of the media (ex: sd).
width (int): The width of the video in pixels (ex: 608).
<Photo_only_attributes>: The following attributes are only available for photos.
* aperture (str): The apeture used to take the photo.
* exposure (str): The exposure used to take the photo.
* iso (int): The iso used to take the photo.
* lens (str): The lens used to take the photo.
* make (str): The make of the camera used to take the photo.
* model (str): The model of the camera used to take the photo.
"""
TAG = 'Media'
def _loadData(self, data):
""" Load attribute values from Plex XML response. """
self._data = data
self.aspectRatio = cast(float, data.attrib.get('aspectRatio'))
self.audioChannels = cast(int, data.attrib.get('audioChannels'))
self.audioCodec = data.attrib.get('audioCodec')
self.audioProfile = data.attrib.get('audioProfile')
self.bitrate = cast(int, data.attrib.get('bitrate'))
self.container = data.attrib.get('container')
self.duration = cast(int, data.attrib.get('duration'))
self.height = cast(int, data.attrib.get('height'))
self.id = cast(int, data.attrib.get('id'))
self.has64bitOffsets = cast(bool, data.attrib.get('has64bitOffsets'))
self.optimizedForStreaming = cast(bool, data.attrib.get('optimizedForStreaming'))
self.parts = self.findItems(data, MediaPart)
self.proxyType = cast(int, data.attrib.get('proxyType'))
self.target = data.attrib.get('target')
self.title = data.attrib.get('title')
self.videoCodec = data.attrib.get('videoCodec')
self.videoFrameRate = data.attrib.get('videoFrameRate')
self.videoProfile = data.attrib.get('videoProfile')
self.videoResolution = data.attrib.get('videoResolution')
self.width = cast(int, data.attrib.get('width'))
if self._isChildOf(etag='Photo'):
self.aperture = data.attrib.get('aperture')
self.exposure = data.attrib.get('exposure')
self.iso = cast(int, data.attrib.get('iso'))
self.lens = data.attrib.get('lens')
self.make = data.attrib.get('make')
self.model = data.attrib.get('model')
@property
def isOptimizedVersion(self):
""" Returns True if the media is a Plex optimized version. """
return self.proxyType == utils.SEARCHTYPES['optimizedVersion']
def delete(self):
part = self._initpath + '/media/%s' % self.id
try:
return self._server.query(part, method=self._server._session.delete)
except BadRequest:
log.error("Failed to delete %s. This could be because you havn't allowed "
"items to be deleted" % part)
raise
@utils.registerPlexObject
class MediaPart(PlexObject):
""" Represents a single media part (often a single file) for the media this belongs to.
Attributes:
TAG (str): 'Part'
accessible (bool): True if the file is accessible.
audioProfile (str): The audio profile of the file.
container (str): The container type of the file (ex: avi).
decision (str): Unknown.
deepAnalysisVersion (int): The Plex deep analysis version for the file.
duration (int): The duration of the file in milliseconds.
exists (bool): True if the file exists.
file (str): The path to this file on disk (ex: /media/Movies/Cars (2006)/Cars (2006).mkv)
has64bitOffsets (bool): True if the file has 64 bit offsets.
hasThumbnail (bool): True if the file (track) has an embedded thumbnail.
id (int): The unique ID for this media part on the server.
indexes (str, None): sd if the file has generated BIF thumbnails.
key (str): API URL (ex: /library/parts/46618/1389985872/file.mkv).
optimizedForStreaming (bool): True if the file is optimized for streaming.
packetLength (int): The packet length of the file.
requiredBandwidths (str): The required bandwidths to stream the file.
size (int): The size of the file in bytes (ex: 733884416).
streams (List<:class:`~plexapi.media.MediaPartStream`>): List of stream objects.
syncItemId (int): The unique ID for this media part if it is synced.
syncState (str): The sync state for this media part.
videoProfile (str): The video profile of the file.
"""
TAG = 'Part'
def _loadData(self, data):
""" Load attribute values from Plex XML response. """
self._data = data
self.accessible = cast(bool, data.attrib.get('accessible'))
self.audioProfile = data.attrib.get('audioProfile')
self.container = data.attrib.get('container')
self.decision = data.attrib.get('decision')
self.deepAnalysisVersion = cast(int, data.attrib.get('deepAnalysisVersion'))
self.duration = cast(int, data.attrib.get('duration'))
self.exists = cast(bool, data.attrib.get('exists'))
self.file = data.attrib.get('file')
self.has64bitOffsets = cast(bool, data.attrib.get('has64bitOffsets'))
self.hasThumbnail = cast(bool, data.attrib.get('hasThumbnail'))
self.id = cast(int, data.attrib.get('id'))
self.indexes = data.attrib.get('indexes')
self.key = data.attrib.get('key')
self.optimizedForStreaming = cast(bool, data.attrib.get('optimizedForStreaming'))
self.packetLength = cast(int, data.attrib.get('packetLength'))
self.requiredBandwidths = data.attrib.get('requiredBandwidths')
self.size = cast(int, data.attrib.get('size'))
self.streams = self._buildStreams(data)
self.syncItemId = cast(int, data.attrib.get('syncItemId'))
self.syncState = data.attrib.get('syncState')
self.videoProfile = data.attrib.get('videoProfile')
def _buildStreams(self, data):
streams = []
for cls in (VideoStream, AudioStream, SubtitleStream, LyricStream):
items = self.findItems(data, cls, streamType=cls.STREAMTYPE)
streams.extend(items)
return streams
def videoStreams(self):
""" Returns a list of :class:`~plexapi.media.VideoStream` objects in this MediaPart. """
return [stream for stream in self.streams if isinstance(stream, VideoStream)]
def audioStreams(self):
""" Returns a list of :class:`~plexapi.media.AudioStream` objects in this MediaPart. """
return [stream for stream in self.streams if isinstance(stream, AudioStream)]
def subtitleStreams(self):
""" Returns a list of :class:`~plexapi.media.SubtitleStream` objects in this MediaPart. """
return [stream for stream in self.streams if isinstance(stream, SubtitleStream)]
def lyricStreams(self):
""" Returns a list of :class:`~plexapi.media.SubtitleStream` objects in this MediaPart. """
return [stream for stream in self.streams if isinstance(stream, LyricStream)]
def setDefaultAudioStream(self, stream):
""" Set the default :class:`~plexapi.media.AudioStream` for this MediaPart.
Parameters:
stream (:class:`~plexapi.media.AudioStream`): AudioStream to set as default
"""
if isinstance(stream, AudioStream):
key = "/library/parts/%d?audioStreamID=%d&allParts=1" % (self.id, stream.id)
else:
key = "/library/parts/%d?audioStreamID=%d&allParts=1" % (self.id, stream)
self._server.query(key, method=self._server._session.put)
def setDefaultSubtitleStream(self, stream):
""" Set the default :class:`~plexapi.media.SubtitleStream` for this MediaPart.
Parameters:
stream (:class:`~plexapi.media.SubtitleStream`): SubtitleStream to set as default.
"""
if isinstance(stream, SubtitleStream):
key = "/library/parts/%d?subtitleStreamID=%d&allParts=1" % (self.id, stream.id)
else:
key = "/library/parts/%d?subtitleStreamID=%d&allParts=1" % (self.id, stream)
self._server.query(key, method=self._server._session.put)
def resetDefaultSubtitleStream(self):
""" Set default subtitle of this MediaPart to 'none'. """
key = "/library/parts/%d?subtitleStreamID=0&allParts=1" % (self.id)
self._server.query(key, method=self._server._session.put)
class MediaPartStream(PlexObject):
""" Base class for media streams. These consist of video, audio, subtitles, and lyrics.
Attributes:
bitrate (int): The bitrate of the stream.
codec (str): The codec of the stream (ex: srt, ac3, mpeg4).
default (bool): True if this is the default stream.
displayTitle (str): The display title of the stream.
extendedDisplayTitle (str): The extended display title of the stream.
key (str): API URL (/library/streams/<id>)
id (int): The unique ID for this stream on the server.
index (int): The index of the stream.
language (str): The language of the stream (ex: English, ไทย).
languageCode (str): The Ascii language code of the stream (ex: eng, tha).
requiredBandwidths (str): The required bandwidths to stream the file.
selected (bool): True if this stream is selected.
streamType (int): The stream type (1= :class:`~plexapi.media.VideoStream`,
2= :class:`~plexapi.media.AudioStream`, 3= :class:`~plexapi.media.SubtitleStream`).
title (str): The title of the stream.
type (int): Alias for streamType.
"""
def _loadData(self, data):
""" Load attribute values from Plex XML response. """
self._data = data
self.bitrate = cast(int, data.attrib.get('bitrate'))
self.codec = data.attrib.get('codec')
self.default = cast(bool, data.attrib.get('default'))
self.displayTitle = data.attrib.get('displayTitle')
self.extendedDisplayTitle = data.attrib.get('extendedDisplayTitle')
self.key = data.attrib.get('key')
self.id = cast(int, data.attrib.get('id'))
self.index = cast(int, data.attrib.get('index', '-1'))
self.language = data.attrib.get('language')
self.languageCode = data.attrib.get('languageCode')
self.requiredBandwidths = data.attrib.get('requiredBandwidths')
self.selected = cast(bool, data.attrib.get('selected', '0'))
self.streamType = cast(int, data.attrib.get('streamType'))
self.title = data.attrib.get('title')
self.type = cast(int, data.attrib.get('streamType'))
@utils.registerPlexObject
class VideoStream(MediaPartStream):
""" Represents a video stream within a :class:`~plexapi.media.MediaPart`.
Attributes:
TAG (str): 'Stream'
STREAMTYPE (int): 1
anamorphic (str): If the video is anamorphic.
bitDepth (int): The bit depth of the video stream (ex: 8).
cabac (int): The context-adaptive binary arithmetic coding.
chromaLocation (str): The chroma location of the video stream.
chromaSubsampling (str): The chroma subsampling of the video stream (ex: 4:2:0).
codecID (str): The codec ID (ex: XVID).
codedHeight (int): The coded height of the video stream in pixels.
codedWidth (int): The coded width of the video stream in pixels.
colorPrimaries (str): The color primaries of the video stream.
colorRange (str): The color range of the video stream.
colorSpace (str): The color space of the video stream (ex: bt2020).
colorTrc (str): The color trc of the video stream.
DOVIBLCompatID (int): Dolby Vision base layer compatibility ID.
DOVIBLPresent (bool): True if Dolby Vision base layer is present.
DOVIELPresent (bool): True if Dolby Vision enhancement layer is present.
DOVILevel (int): Dolby Vision level.
DOVIPresent (bool): True if Dolby Vision is present.
DOVIProfile (int): Dolby Vision profile.
DOVIRPUPresent (bool): True if Dolby Vision reference processing unit is present.
DOVIVersion (float): The Dolby Vision version.
duration (int): The duration of video stream in milliseconds.
frameRate (float): The frame rate of the video stream (ex: 23.976).
frameRateMode (str): The frame rate mode of the video stream.
hasScallingMatrix (bool): True if video stream has a scaling matrix.
height (int): The hight of the video stream in pixels (ex: 1080).
level (int): The codec encoding level of the video stream (ex: 41).
profile (str): The profile of the video stream (ex: asp).
pixelAspectRatio (str): The pixel aspect ratio of the video stream.
pixelFormat (str): The pixel format of the video stream.
refFrames (int): The number of reference frames of the video stream.
scanType (str): The scan type of the video stream (ex: progressive).
streamIdentifier(int): The stream identifier of the video stream.
width (int): The width of the video stream in pixels (ex: 1920).
"""
TAG = 'Stream'
STREAMTYPE = 1
def _loadData(self, data):
""" Load attribute values from Plex XML response. """
super(VideoStream, self)._loadData(data)
self.anamorphic = data.attrib.get('anamorphic')
self.bitDepth = cast(int, data.attrib.get('bitDepth'))
self.cabac = cast(int, data.attrib.get('cabac'))
self.chromaLocation = data.attrib.get('chromaLocation')
self.chromaSubsampling = data.attrib.get('chromaSubsampling')
self.codecID = data.attrib.get('codecID')
self.codedHeight = cast(int, data.attrib.get('codedHeight'))
self.codedWidth = cast(int, data.attrib.get('codedWidth'))
self.colorPrimaries = data.attrib.get('colorPrimaries')
self.colorRange = data.attrib.get('colorRange')
self.colorSpace = data.attrib.get('colorSpace')
self.colorTrc = data.attrib.get('colorTrc')
self.DOVIBLCompatID = cast(int, data.attrib.get('DOVIBLCompatID'))
self.DOVIBLPresent = cast(bool, data.attrib.get('DOVIBLPresent'))
self.DOVIELPresent = cast(bool, data.attrib.get('DOVIELPresent'))
self.DOVILevel = cast(int, data.attrib.get('DOVILevel'))
self.DOVIPresent = cast(bool, data.attrib.get('DOVIPresent'))
self.DOVIProfile = cast(int, data.attrib.get('DOVIProfile'))
self.DOVIRPUPresent = cast(bool, data.attrib.get('DOVIRPUPresent'))
self.DOVIVersion = cast(float, data.attrib.get('DOVIVersion'))
self.duration = cast(int, data.attrib.get('duration'))
self.frameRate = cast(float, data.attrib.get('frameRate'))
self.frameRateMode = data.attrib.get('frameRateMode')
self.hasScallingMatrix = cast(bool, data.attrib.get('hasScallingMatrix'))
self.height = cast(int, data.attrib.get('height'))
self.level = cast(int, data.attrib.get('level'))
self.profile = data.attrib.get('profile')
self.pixelAspectRatio = data.attrib.get('pixelAspectRatio')
self.pixelFormat = data.attrib.get('pixelFormat')
self.refFrames = cast(int, data.attrib.get('refFrames'))
self.scanType = data.attrib.get('scanType')
self.streamIdentifier = cast(int, data.attrib.get('streamIdentifier'))
self.width = cast(int, data.attrib.get('width'))
@utils.registerPlexObject
class AudioStream(MediaPartStream):
""" Represents a audio stream within a :class:`~plexapi.media.MediaPart`.
Attributes:
TAG (str): 'Stream'
STREAMTYPE (int): 2
audioChannelLayout (str): The audio channel layout of the audio stream (ex: 5.1(side)).
bitDepth (int): The bit depth of the audio stream (ex: 16).
bitrateMode (str): The bitrate mode of the audio stream (ex: cbr).
channels (int): The number of audio channels of the audio stream (ex: 6).
duration (int): The duration of audio stream in milliseconds.
profile (str): The profile of the audio stream.
samplingRate (int): The sampling rate of the audio stream (ex: xxx)
streamIdentifier (int): The stream identifier of the audio stream.
<Track_only_attributes>: The following attributes are only available for tracks.
* albumGain (float): The gain for the album.
* albumPeak (float): The peak for the album.
* albumRange (float): The range for the album.
* endRamp (str): The end ramp for the track.
* gain (float): The gain for the track.
* loudness (float): The loudness for the track.
* lra (float): The lra for the track.
* peak (float): The peak for the track.
* startRamp (str): The start ramp for the track.
"""
TAG = 'Stream'
STREAMTYPE = 2
def _loadData(self, data):
""" Load attribute values from Plex XML response. """
super(AudioStream, self)._loadData(data)
self.audioChannelLayout = data.attrib.get('audioChannelLayout')
self.bitDepth = cast(int, data.attrib.get('bitDepth'))
self.bitrateMode = data.attrib.get('bitrateMode')
self.channels = cast(int, data.attrib.get('channels'))
self.duration = cast(int, data.attrib.get('duration'))
self.profile = data.attrib.get('profile')
self.samplingRate = cast(int, data.attrib.get('samplingRate'))
self.streamIdentifier = cast(int, data.attrib.get('streamIdentifier'))
if self._isChildOf(etag='Track'):
self.albumGain = cast(float, data.attrib.get('albumGain'))
self.albumPeak = cast(float, data.attrib.get('albumPeak'))
self.albumRange = cast(float, data.attrib.get('albumRange'))
self.endRamp = data.attrib.get('endRamp')
self.gain = cast(float, data.attrib.get('gain'))
self.loudness = cast(float, data.attrib.get('loudness'))
self.lra = cast(float, data.attrib.get('lra'))
self.peak = cast(float, data.attrib.get('peak'))
self.startRamp = data.attrib.get('startRamp')
@utils.registerPlexObject
class SubtitleStream(MediaPartStream):
""" Represents a audio stream within a :class:`~plexapi.media.MediaPart`.
Attributes:
TAG (str): 'Stream'
STREAMTYPE (int): 3
container (str): The container of the subtitle stream.
forced (bool): True if this is a forced subtitle.
format (str): The format of the subtitle stream (ex: srt).
headerCommpression (str): The header compression of the subtitle stream.
transient (str): Unknown.
"""
TAG = 'Stream'
STREAMTYPE = 3
def _loadData(self, data):
""" Load attribute values from Plex XML response. """
super(SubtitleStream, self)._loadData(data)
self.container = data.attrib.get('container')
self.forced = cast(bool, data.attrib.get('forced', '0'))
self.format = data.attrib.get('format')
self.headerCompression = data.attrib.get('headerCompression')
self.transient = data.attrib.get('transient')
class LyricStream(MediaPartStream):
""" Represents a lyric stream within a :class:`~plexapi.media.MediaPart`.
Attributes:
TAG (str): 'Stream'
STREAMTYPE (int): 4
format (str): The format of the lyric stream (ex: lrc).
minLines (int): The minimum number of lines in the (timed) lyric stream.
provider (str): The provider of the lyric stream (ex: com.plexapp.agents.lyricfind).
timed (bool): True if the lyrics are timed to the track.
"""
TAG = 'Stream'
STREAMTYPE = 4
def _loadData(self, data):
""" Load attribute values from Plex XML response. """
super(LyricStream, self)._loadData(data)
self.format = data.attrib.get('format')
self.minLines = cast(int, data.attrib.get('minLines'))
self.provider = data.attrib.get('provider')
self.timed = cast(bool, data.attrib.get('timed', '0'))
@utils.registerPlexObject
class Session(PlexObject):
""" Represents a current session.
Attributes:
TAG (str): 'Session'
id (str): The unique identifier for the session.
bandwidth (int): The Plex streaming brain reserved bandwidth for the session.
location (str): The location of the session (lan, wan, or cellular)
"""
TAG = 'Session'
def _loadData(self, data):
self.id = data.attrib.get('id')
self.bandwidth = utils.cast(int, data.attrib.get('bandwidth'))
self.location = data.attrib.get('location')
@utils.registerPlexObject
class TranscodeSession(PlexObject):
""" Represents a current transcode session.
Attributes:
TAG (str): 'TranscodeSession'
audioChannels (int): The number of audio channels of the transcoded media.
audioCodec (str): The audio codec of the transcoded media.
audioDecision (str): The transcode decision for the audio stream.
complete (bool): True if the transcode is complete.
container (str): The container of the transcoded media.
context (str): The context for the transcode sesson.
duration (int): The duration of the transcoded media in milliseconds.
height (int): The height of the transcoded media in pixels.
key (str): API URL (ex: /transcode/sessions/<id>).
maxOffsetAvailable (float): Unknown.
minOffsetAvailable (float): Unknown.
progress (float): The progress percentage of the transcode.
protocol (str): The protocol of the transcode.
remaining (int): Unknown.
size (int): The size of the transcoded media in bytes.
sourceAudioCodec (str): The audio codec of the source media.
sourceVideoCodec (str): The video codec of the source media.
speed (float): The speed of the transcode.
subtitleDecision (str): The transcode decision for the subtitle stream
throttled (bool): True if the transcode is throttled.
timestamp (int): The epoch timestamp when the transcode started.
transcodeHwDecoding (str): The hardware transcoding decoder engine.
transcodeHwDecodingTitle (str): The title of the hardware transcoding decoder engine.
transcodeHwEncoding (str): The hardware transcoding encoder engine.
transcodeHwEncodingTitle (str): The title of the hardware transcoding encoder engine.
transcodeHwFullPipeline (str): True if hardware decoding and encoding is being used for the transcode.
transcodeHwRequested (str): True if hardware transcoding was requested for the transcode.
videoCodec (str): The video codec of the transcoded media.
videoDecision (str): The transcode decision for the video stream.
width (str): The width of the transcoded media in pixels.
"""
TAG = 'TranscodeSession'
def _loadData(self, data):
""" Load attribute values from Plex XML response. """
self._data = data
self.audioChannels = cast(int, data.attrib.get('audioChannels'))
self.audioCodec = data.attrib.get('audioCodec')
self.audioDecision = data.attrib.get('audioDecision')
self.complete = cast(bool, data.attrib.get('complete', '0'))
self.container = data.attrib.get('container')
self.context = data.attrib.get('context')
self.duration = cast(int, data.attrib.get('duration'))
self.height = cast(int, data.attrib.get('height'))
self.key = data.attrib.get('key')
self.maxOffsetAvailable = cast(float, data.attrib.get('maxOffsetAvailable'))
self.minOffsetAvailable = cast(float, data.attrib.get('minOffsetAvailable'))
self.progress = cast(float, data.attrib.get('progress'))
self.protocol = data.attrib.get('protocol')
self.remaining = cast(int, data.attrib.get('remaining'))
self.size = cast(int, data.attrib.get('size'))
self.sourceAudioCodec = data.attrib.get('sourceAudioCodec')
self.sourceVideoCodec = data.attrib.get('sourceVideoCodec')
self.speed = cast(float, data.attrib.get('speed'))
self.subtitleDecision = data.attrib.get('subtitleDecision')
self.throttled = cast(bool, data.attrib.get('throttled', '0'))
self.timestamp = cast(float, data.attrib.get('timeStamp'))
self.transcodeHwDecoding = data.attrib.get('transcodeHwDecoding')
self.transcodeHwDecodingTitle = data.attrib.get('transcodeHwDecodingTitle')
self.transcodeHwEncoding = data.attrib.get('transcodeHwEncoding')
self.transcodeHwEncodingTitle = data.attrib.get('transcodeHwEncodingTitle')
self.transcodeHwFullPipeline = cast(bool, data.attrib.get('transcodeHwFullPipeline', '0'))
self.transcodeHwRequested = cast(bool, data.attrib.get('transcodeHwRequested', '0'))
self.videoCodec = data.attrib.get('videoCodec')
self.videoDecision = data.attrib.get('videoDecision')
self.width = cast(int, data.attrib.get('width'))
@utils.registerPlexObject
class TranscodeJob(PlexObject):
""" Represents an Optimizing job.
TrancodeJobs are the process for optimizing conversions.
Active or paused optimization items. Usually one item as a time."""
TAG = 'TranscodeJob'
def _loadData(self, data):
self._data = data
self.generatorID = data.attrib.get('generatorID')
self.key = data.attrib.get('key')
self.progress = data.attrib.get('progress')
self.ratingKey = data.attrib.get('ratingKey')
self.size = data.attrib.get('size')
self.targetTagID = data.attrib.get('targetTagID')
self.thumb = data.attrib.get('thumb')
self.title = data.attrib.get('title')
self.type = data.attrib.get('type')
@utils.registerPlexObject
class Optimized(PlexObject):
""" Represents a Optimized item.
Optimized items are optimized and queued conversions items."""
TAG = 'Item'
def _loadData(self, data):
self._data = data
self.id = data.attrib.get('id')
self.composite = data.attrib.get('composite')
self.title = data.attrib.get('title')
self.type = data.attrib.get('type')
self.target = data.attrib.get('target')
self.targetTagID = data.attrib.get('targetTagID')
def remove(self):
""" Remove an Optimized item"""
key = '%s/%s' % (self._initpath, self.id)
self._server.query(key, method=self._server._session.delete)
def rename(self, title):
""" Rename an Optimized item"""
key = '%s/%s?Item[title]=%s' % (self._initpath, self.id, title)
self._server.query(key, method=self._server._session.put)
def reprocess(self, ratingKey):
""" Reprocess a removed Conversion item that is still a listed Optimize item"""
key = '%s/%s/%s/enable' % (self._initpath, self.id, ratingKey)
self._server.query(key, method=self._server._session.put)
@utils.registerPlexObject
class Conversion(PlexObject):
""" Represents a Conversion item.
Conversions are items queued for optimization or being actively optimized."""
TAG = 'Video'
def _loadData(self, data):
self._data = data
self.addedAt = data.attrib.get('addedAt')
self.art = data.attrib.get('art')
self.chapterSource = data.attrib.get('chapterSource')
self.contentRating = data.attrib.get('contentRating')
self.duration = data.attrib.get('duration')
self.generatorID = data.attrib.get('generatorID')
self.generatorType = data.attrib.get('generatorType')
self.guid = data.attrib.get('guid')
self.key = data.attrib.get('key')
self.lastViewedAt = data.attrib.get('lastViewedAt')
self.librarySectionID = data.attrib.get('librarySectionID')
self.librarySectionKey = data.attrib.get('librarySectionKey')
self.librarySectionTitle = data.attrib.get('librarySectionTitle')
self.originallyAvailableAt = data.attrib.get('originallyAvailableAt')
self.playQueueItemID = data.attrib.get('playQueueItemID')
self.playlistID = data.attrib.get('playlistID')
self.primaryExtraKey = data.attrib.get('primaryExtraKey')
self.rating = data.attrib.get('rating')
self.ratingKey = data.attrib.get('ratingKey')
self.studio = data.attrib.get('studio')
self.summary = data.attrib.get('summary')
self.tagline = data.attrib.get('tagline')
self.target = data.attrib.get('target')
self.thumb = data.attrib.get('thumb')
self.title = data.attrib.get('title')
self.type = data.attrib.get('type')
self.updatedAt = data.attrib.get('updatedAt')
self.userID = data.attrib.get('userID')
self.username = data.attrib.get('username')
self.viewOffset = data.attrib.get('viewOffset')
self.year = data.attrib.get('year')
def remove(self):
""" Remove Conversion from queue """
key = '/playlists/%s/items/%s/%s/disable' % (self.playlistID, self.generatorID, self.ratingKey)
self._server.query(key, method=self._server._session.put)
def move(self, after):
""" Move Conversion items position in queue
after (int): Place item after specified playQueueItemID. '-1' is the active conversion.
Example:
Move 5th conversion Item to active conversion
conversions[4].move('-1')
Move 4th conversion Item to 3rd in conversion queue
conversions[3].move(conversions[1].playQueueItemID)
"""
key = '%s/items/%s/move?after=%s' % (self._initpath, self.playQueueItemID, after)
self._server.query(key, method=self._server._session.put)
class MediaTag(PlexObject):
""" Base class for media tags used for filtering and searching your library
items or navigating the metadata of media items in your library. Tags are
the construct used for things such as Country, Director, Genre, etc.
Attributes:
server (:class:`~plexapi.server.PlexServer`): Server this client is connected to.
id (id): Tag ID (This seems meaningless except to use it as a unique id).
role (str): Unknown
tag (str): Name of the tag. This will be Animation, SciFi etc for Genres. The name of
person for Directors and Roles (ex: Animation, <NAME>, etc).
<Hub_Search_Attributes>: Attributes only applicable in search results from
PlexServer :func:`~plexapi.server.PlexServer.search`. They provide details of which
library section the tag was found as well as the url to dig deeper into the results.
* key (str): API URL to dig deeper into this tag (ex: /library/sections/1/all?actor=9081).
* librarySectionID (int): Section ID this tag was generated from.
* librarySectionTitle (str): Library section title this tag was found.
* librarySectionType (str): Media type of the library section this tag was found.
* tagType (int): Tag type ID.
* thumb (str): URL to thumbnail image.
"""
def _loadData(self, data):
""" Load attribute values from Plex XML response. """
self._data = data
self.id = cast(int, data.attrib.get('id'))
self.role = data.attrib.get('role')
self.tag = data.attrib.get('tag')
# additional attributes only from hub search
self.key = data.attrib.get('key')
self.librarySectionID = cast(int, data.attrib.get('librarySectionID'))
self.librarySectionTitle = data.attrib.get('librarySectionTitle')
self.librarySectionType = data.attrib.get('librarySectionType')
self.tagType = cast(int, data.attrib.get('tagType'))
self.thumb = data.attrib.get('thumb')
def items(self, *args, **kwargs):
""" Return the list of items within this tag. This function is only applicable
in search results from PlexServer :func:`~plexapi.server.PlexServer.search`.
"""
if not self.key:
raise BadRequest('Key is not defined for this tag: %s' % self.tag)
return self.fetchItems(self.key)
class GuidTag(PlexObject):
""" Base class for guid tags used only for Guids, as they contain only a string identifier
Attributes:
id (id): The guid for external metadata sources (e.g. IMDB, TMDB, TVDB).
"""
def _loadData(self, data):
""" Load attribute values from Plex XML response. """
self._data = data
self.id = data.attrib.get('id')
@utils.registerPlexObject
class Collection(MediaTag):
""" Represents a single Collection media tag.
Attributes:
TAG (str): 'Collection'
FILTER (str): 'collection'
"""
TAG = 'Collection'
FILTER = 'collection'
@utils.registerPlexObject
class Label(MediaTag):
""" Represents a single Label media tag.
Attributes:
TAG (str): 'Label'
FILTER (str): 'label'
"""
TAG = 'Label'
FILTER = 'label'
@utils.registerPlexObject
class Tag(MediaTag):
""" Represents a single Tag media tag.
Attributes:
TAG (str): 'Tag'
FILTER (str): 'tag'
"""
TAG = 'Tag'
FILTER = 'tag'
def _loadData(self, data):
self._data = data
self.id = cast(int, data.attrib.get('id', 0))
self.filter = data.attrib.get('filter')
self.tag = data.attrib.get('tag')
self.title = self.tag
@utils.registerPlexObject
class Country(MediaTag):
""" Represents a single Country media tag.
Attributes:
TAG (str): 'Country'
FILTER (str): 'country'
"""
TAG = 'Country'
FILTER = 'country'
@utils.registerPlexObject
class Director(MediaTag):
""" Represents a single Director media tag.
Attributes:
TAG (str): 'Director'
FILTER (str): 'director'
"""
TAG = 'Director'
FILTER = 'director'
@utils.registerPlexObject
class Genre(MediaTag):
""" Represents a single Genre media tag.
Attributes:
TAG (str): 'Genre'
FILTER (str): 'genre'
"""
TAG = 'Genre'
FILTER = 'genre'
@utils.registerPlexObject
class Guid(GuidTag):
""" Represents a single Guid media tag.
Attributes:
TAG (str): 'Guid'
"""
TAG = "Guid"
@utils.registerPlexObject
class Mood(MediaTag):
""" Represents a single Mood media tag.
Attributes:
TAG (str): 'Mood'
FILTER (str): 'mood'
"""
TAG = 'Mood'
FILTER = 'mood'
@utils.registerPlexObject
class Style(MediaTag):
""" Represents a single Style media tag.
Attributes:
TAG (str): 'Style'
FILTER (str): 'style'
"""
TAG = 'Style'
FILTER = 'style'
class BaseImage(PlexObject):
""" Base class for all Art, Banner, and Poster objects.
Attributes:
TAG (str): 'Photo'
key (str): API URL (/library/metadata/<ratingkey>).
provider (str): The source of the poster or art.
ratingKey (str): Unique key identifying the poster or art.
selected (bool): True if the poster or art is currently selected.
thumb (str): The URL to retrieve the poster or art thumbnail.
"""
TAG = 'Photo'
def _loadData(self, data):
self._data = data
self.key = data.attrib.get('key')
self.provider = data.attrib.get('provider')
self.ratingKey = data.attrib.get('ratingKey')
self.selected = cast(bool, data.attrib.get('selected'))
self.thumb = data.attrib.get('thumb')
def select(self):
key = self._initpath[:-1]
data = '%s?url=%s' % (key, quote_plus(self.ratingKey))
try:
self._server.query(data, method=self._server._session.put)
except xml.etree.ElementTree.ParseError:
pass
class Art(BaseImage):
""" Represents a single Art object. """
class Banner(BaseImage):
""" Represents a single Banner object. """
class Poster(BaseImage):
""" Represents a single Poster object. """
@utils.registerPlexObject
class Producer(MediaTag):
""" Represents a single Producer media tag.
Attributes:
TAG (str): 'Producer'
FILTER (str): 'producer'
"""
TAG = 'Producer'
FILTER = 'producer'
@utils.registerPlexObject
class Role(MediaTag):
""" Represents a single Role (actor/actress) media tag.
Attributes:
TAG (str): 'Role'
FILTER (str): 'role'
"""
TAG = 'Role'
FILTER = 'role'
@utils.registerPlexObject
class Similar(MediaTag):
""" Represents a single Similar media tag.
Attributes:
TAG (str): 'Similar'
FILTER (str): 'similar'
"""
TAG = 'Similar'
FILTER = 'similar'
@utils.registerPlexObject
class Writer(MediaTag):
""" Represents a single Writer media tag.
Attributes:
TAG (str): 'Writer'
FILTER (str): 'writer'
"""
TAG = 'Writer'
FILTER = 'writer'
@utils.registerPlexObject
class Chapter(PlexObject):
""" Represents a single Writer media tag.
Attributes:
TAG (str): 'Chapter'
"""
TAG = 'Chapter'
def _loadData(self, data):
self._data = data
self.id = cast(int, data.attrib.get('id', 0))
self.filter = data.attrib.get('filter') # I couldn't filter on it anyways
self.tag = data.attrib.get('tag')
self.title = self.tag
self.index = cast(int, data.attrib.get('index'))
self.start = cast(int, data.attrib.get('startTimeOffset'))
self.end = cast(int, data.attrib.get('endTimeOffset'))
@utils.registerPlexObject
class Marker(PlexObject):
""" Represents a single Marker media tag.
Attributes:
TAG (str): 'Marker'
"""
TAG = 'Marker'
def __repr__(self):
name = self._clean(self.firstAttr('type'))
start = utils.millisecondToHumanstr(self._clean(self.firstAttr('start')))
end = utils.millisecondToHumanstr(self._clean(self.firstAttr('end')))
return '<%s:%s %s - %s>' % (self.__class__.__name__, name, start, end)
def _loadData(self, data):
self._data = data
self.type = data.attrib.get('type')
self.start = cast(int, data.attrib.get('startTimeOffset'))
self.end = cast(int, data.attrib.get('endTimeOffset'))
@utils.registerPlexObject
class Field(PlexObject):
""" Represents a single Field.
Attributes:
TAG (str): 'Field'
"""
TAG = 'Field'
def _loadData(self, data):
self._data = data
self.name = data.attrib.get('name')
self.locked = cast(bool, data.attrib.get('locked'))
@utils.registerPlexObject
class SearchResult(PlexObject):
""" Represents a single SearchResult.
Attributes:
TAG (str): 'SearchResult'
"""
TAG = 'SearchResult'
def __repr__(self):
name = self._clean(self.firstAttr('name'))
score = self._clean(self.firstAttr('score'))
return '<%s>' % ':'.join([p for p in [self.__class__.__name__, name, score] if p])
def _loadData(self, data):
self._data = data
self.guid = data.attrib.get('guid')
self.lifespanEnded = data.attrib.get('lifespanEnded')
self.name = data.attrib.get('name')
self.score = cast(int, data.attrib.get('score'))
self.year = data.attrib.get('year')
@utils.registerPlexObject
class Agent(PlexObject):
""" Represents a single Agent.
Attributes:
TAG (str): 'Agent'
"""
TAG = 'Agent'
def __repr__(self):
uid = self._clean(self.firstAttr('shortIdentifier'))
return '<%s>' % ':'.join([p for p in [self.__class__.__name__, uid] if p])
def _loadData(self, data):
self._data = data
self.hasAttribution = data.attrib.get('hasAttribution')
self.hasPrefs = data.attrib.get('hasPrefs')
self.identifier = data.attrib.get('identifier')
self.primary = data.attrib.get('primary')
self.shortIdentifier = self.identifier.rsplit('.', 1)[1]
if 'mediaType' in self._initpath:
self.name = data.attrib.get('name')
self.languageCode = []
for code in data:
self.languageCode += [code.attrib.get('code')]
else:
self.mediaTypes = [AgentMediaType(server=self._server, data=d) for d in data]
def _settings(self):
key = '/:/plugins/%s/prefs' % self.identifier
data = self._server.query(key)
return self.findItems(data, cls=settings.Setting)
class AgentMediaType(Agent):
def __repr__(self):
uid = self._clean(self.firstAttr('name'))
return '<%s>' % ':'.join([p for p in [self.__class__.__name__, uid] if p])
def _loadData(self, data):
self.mediaType = cast(int, data.attrib.get('mediaType'))
self.name = data.attrib.get('name')
self.languageCode = []
for code in data:
self.languageCode += [code.attrib.get('code')]
| StarcoderdataPython |
83027 | """
Useful tools when working with Figura configs.
"""
import os
from .settings import get_setting
from .errors import ConfigError, ConfigParsingError, ConfigValueError
from .path import to_figura_path
from .container import ConfigContainer
from .parser import ConfigParser
from .importutils import is_importable_path, figura_importing
################################################################################
# convenience functions
@figura_importing
def read_config(path, enable_path_spliting=True, should_step_in_package=True):
"""
Flexibly read/process a Figura config file.
The path can point to:
- a config file. E.g. ``figura.tests.config.basic1``
- a config directory. E.g. ``figura.tests.config``
- a value (or section) inside a config. E.g. ``figura.tests.config.basic1.some_params.a``
:param path: a string or a `FiguraPath <#figura.path.FiguraPath>`_.
:param enable_path_spliting: set to False if the path points to a file (as opposed to
PATH.TO.FILE.PATH.TO.ATTR), if you want to suppress auto-splitting.
:return: a `ConfigContainer <#figura.container.ConfigContainer>`_.
In case of a deep path, the return value is the value from inside the
conainer, which is not necessarilly a ConfigContainer.
.. testsetup::
from figura.utils import read_config
>>> read_config('figura.tests.config.basic1').some_params.a # read a config file
1
>>> read_config('figura.tests.config.basic1.some_params.a') # read a value inside a config
1
>>> read_config('figura.tests.config').basic1.some_params.a # read a dir of config files
1
"""
return _read_config(
path,
enable_path_spliting=enable_path_spliting,
should_step_in_package=should_step_in_package,
)
def _read_config(path, enable_path_spliting=True, should_step_in_package=True):
"""
Should be called from inside a FiguraImportContext_.
"""
if enable_path_spliting:
# process the path, split into file-path and attr-path
file_path, attr_path = to_figura_path(path).split_parts()
else:
# path is known to be a file-path, so avoid the lookups
file_path = path
attr_path = ''
if not file_path:
raise ConfigParsingError('No config file found for path: %r' % str(path))
# parse the path:
parser = ConfigParser()
config = parser.parse(file_path)
is_pkg = config.__package__ == path
if should_step_in_package and is_pkg:
# support reading all modules under a package, and create a ConfigContainer
# reflecting the structure:
pkg_module = _get_module_of_config_container(config)
for filename, rel_mod_path, ispkg in _figura_walk_packages(pkg_module):
mod_path = '%s.%s' % (file_path, rel_mod_path)
sub_config = parser.parse(mod_path)
config.deep_setattr(rel_mod_path, sub_config)
# apply the attr-path:
if attr_path:
try:
config = config.deep_getattr(attr_path)
except AttributeError:
raise ConfigValueError('Attribute %r is missing from config loaded from %r' % (
attr_path, config.__file__))
return config
def _figura_walk_packages(pkg_module, prefix=''):
"""
``pkgutil.walk_packages`` is completely broken, so we use our own implementation.
"""
fig_ext = get_setting('CONFIG_FILE_EXT')
base_path = pkg_module.__name__
base_dir = os.path.dirname(pkg_module.__file__)
suffix = '.%s' % fig_ext
for rel_filename in os.listdir(base_dir):
if rel_filename.startswith('_'):
# private, skip.
# NOTE: this captures both ``__init__.fig`` and ``_privateconf.fig``.
continue
abs_filename = os.path.join(base_dir, rel_filename)
if os.path.isdir(abs_filename):
# a sub-directory
# check if it contains configs:
rel_mod_path = rel_filename
full_mod_path = '%s.%s' % (base_path, rel_mod_path)
result_rel_mod_path = prefix + rel_mod_path
if is_importable_path(full_mod_path, with_ext=fig_ext):
yield (abs_filename, result_rel_mod_path, True)
parser = ConfigParser()
subpkg_config = parser.parse(full_mod_path)
subpkg_module = _get_module_of_config_container(subpkg_config)
yield from _figura_walk_packages(subpkg_module, prefix=result_rel_mod_path + '.')
elif rel_filename.endswith(suffix):
# a config file
rel_mod_path = rel_filename[:-len(suffix)]
if rel_mod_path:
result_rel_mod_path = prefix + rel_mod_path
yield (abs_filename, result_rel_mod_path, False)
def _get_module_of_config_container(conf):
parser = ConfigParser()
return parser.get_module(conf.__name__)
@figura_importing
def build_config(*paths, **kwargs):
"""
Build a configuration by reading Figura configs and optional
override sets, and combining them into a final configuration.
`read_config <#figura.utils.read_config>`_ is called for processing each path.
:param paths: paths (strings or FiguraPaths) to config files. All but the first are
treated as override-sets, and are applied to the base config.
The first may also be an override-set, in which case,
``default_config`` must be specified, and is used as the base
config.
:param kwargs['default_config']: if the first path in ``paths`` is an overridet-set,
default_config is used as the base config
:param kwargs['extra_overrides']: a ConfigContainer of extra overrides to
be applied to the config. ``extra_overrides`` are applied last.
:param kwargs['enforce_override_set']:
ensure that an override-sets is not used as base-config, and that a non-override-set
is not used for overriding.
:return: a `ConfigContainer <#figura.container.ConfigContainer>`_
"""
default_config = kwargs.pop('default_config', None)
extra_overrides = kwargs.pop('extra_overrides', None)
enforce_override_set = kwargs.pop('enforce_override_set', True)
if kwargs:
raise TypeError('build_config() got an invalid keyword argument: %s' % list(kwargs)[0])
configs = [_to_config(conf) for conf in paths]
# using the default_config if the first config passed is an overrideset
use_default = (len(configs) == 0) or \
(isinstance(configs[0], ConfigContainer) and configs[0].get_metadata().is_override_set)
if default_config is not None and use_default:
configs = [_to_config(default_config)] + configs
# read each config and combine them:
is_first = True
config = ConfigContainer()
for cur_config in configs:
if is_first:
# This is the base config
if enforce_override_set and cur_config.get_metadata().is_override_set:
raise ConfigError('Attempting to use an override-set as a base config', cur_config)
config = cur_config
is_first = False
else:
# This is an override set to apply to the config
config.apply_overrides(cur_config, enforce_override_set=enforce_override_set)
if extra_overrides:
config.apply_overrides(extra_overrides, enforce_override_set=enforce_override_set)
return config
def _to_config(x):
if isinstance(x, ConfigContainer):
return x
else:
return _read_config(x)
################################################################################
| StarcoderdataPython |
3294326 | <gh_stars>0
import pygame
import random, math
from entities.entity import Entity
class Chaser(Entity):
def __init__(self, x=0, y=0) -> None:
super().__init__(x=x, y=y)
self.mode = 'chase'
self.mode_ticker = 0
self.switch_readiness = 100
self.goal = [0, 0]
#to introduce variability to individual attack patterns
self.chase_period = random.randint(97, 103)
self.charge_intensity = 0.95 + random.random() / 4
def accept_stimuli(self, app): #change modes depending on stimuli
if self.rect.colliderect(app.player.rect):
self.mode = 'idle'
self.mode_ticker = 0
app.player.get_hurt(app, 30, f"Player was hit by chaser in {self.mode} for 30 damage.")
else:
if self.mode == 'chase':
if self.mode_ticker >= 5 * self.chase_period:
self.mode = 'charge'
self.mode_ticker = 0
elif self.mode_ticker == int(4.5 * self.chase_period):
self.switch_readiness = 50
elif self.mode == 'charge':
if self.mode_ticker >= 4 * self.chase_period and random.random() < 0.1:
self.mode = 'chase'
self.charge_intensity = 1
self.mode_ticker = 0
elif math.sqrt(pow(self.pos[0] - app.player.pos[0],2) +\
pow(self.pos[1] - app.player.pos[1],2)) > 3 * self.chase_period:
self.mode = 'chase'
self.mode_ticker = 0
elif self.mode == 'idle':
if self.mode_ticker >= 2 * self.chase_period:
self.mode = 'chase'
self.mode_ticker = 0
elif self.i_vel[0] != 0 or self.i_vel[1] != 0:
self.i_vel = [0, 0]
else:
self.mode = 'chase'
def mode_act(self, app): #direct mode-specific behaviors
self.mode_ticker += 1 #increment/decrement timer values
if self.switch_readiness > 0:
self.switch_readiness -= 1
if self.mode == 'chase': #move stratght toward player
self.i_vel[0] = (app.player.pos[0] - self.pos[0]) / 400
self.i_vel[1] = (app.player.pos[1] - self.pos[1]) / 400
elif self.mode == 'charge': #move straight towards a goal
if self.mode_ticker == 0:
self.goal[0] = (app.player.pos[0] - self.pos[0] +
app.player.vel[0] * 2)
self.goal[1] = (app.player.pos[1] - self.pos[1] +
app.player.vel[1] * 2)
d = math.sqrt(pow(self.pos[0] - self.goal[0],2) +\
pow(self.pos[1] - self.goal[1],2))
angle = math.atan2(self.goal[1], self.goal[0])
self.i_vel[0] = (100 * math.cos(angle) * max(d / 70, 1) +\
(self.goal[0] - self.pos[0]) / 250) * self.charge_intensity
self.i_vel[1] = (100 * math.sin(angle) * max(d / 70, 1) +\
(self.goal[1] - self.pos[1]) / 250) * self.charge_intensity
else:
self.i_vel[0] *= 1.007
self.i_vel[1] *= 1.007
def render(self, canvas):
#flash just before charging
if self.switch_readiness % 5 == 1 and self.mode == 'chase':
canvas.create_rectangle(self.rect.x, self.rect.y, self.rect.right,
self.rect.bottom, fill="#ffffff")
elif self.invincibility % 6 != 1:
canvas.create_rectangle(self.rect.x, self.rect.y, self.rect.right,
self.rect.bottom, fill=self.color)
class TChaser(Chaser): #a Chaser that occasionally teleports
def __init__(self, x=0, y=0):
super().__init__(x=x, y=y)
self.t_goal = [0, 0]
self.teleport_counter = 80
self.color = "#0d0da0" #navy blue
def teleport(self, app, inclusive=False):
#get average position of all Chasers
avg_pos = [0, 0]
cc = 0
for ap in app.entities:
if isinstance(ap, Chaser) or inclusive:
cc += 1
avg_pos[0] += ap.pos[0]
avg_pos[1] += ap.pos[1]
avg_pos[0] /= cc
avg_pos[1] /= cc
#and get a teleportation goal exactly opposite of that position
angle = math.atan2(app.player.pos[0] - avg_pos[0], app.player.pos[1] - avg_pos[1])
angle += math.pi
multiplier = random.randrange(100, 300)
self.t_goal[0] = app.player.pos[0] + math.cos(angle) * multiplier
self.t_goal[1] = app.player.pos[1] + math.sin(angle) * multiplier
self.teleport_counter = self.chase_period // 2
def mode_act(self, app):
if self.teleport_counter > 0:
self.teleport_counter -= 1
super().mode_act(app)
#actually teleport
if self.teleport_counter == 0:
self.pos[0] = self.t_goal[0]
self.pos[1] = self.t_goal[1]
self.teleport_counter = -1 #lock teleportation until activation
#teleport randomly
elif random.random() < 0.05 and self.mode_ticker % (self.chase_period // 2) == 0:
self.teleport(app)
#teleport if touching another enemy
elif self.mode_ticker % 5 == 0 and self.teleport_counter > 46:
for e in app.entities:
if e is not self and e.rect.colliderect(self.rect):
self.teleport(app, True)
def render(self, canvas):
#flash just before teleporting
if self.teleport_counter % 3 == 1:
canvas.create_rectangle(self.rect.x, self.rect.y, self.rect.right,
self.rect.bottom, fill="#0affff")
canvas.create_rectangle(self.t_goal[0], self.t_goal[1],
self.t_goal[0] + self.rect.w,
self.t_goal[1] + self.rect.w, outline="#00ffff")
else:
super().render(canvas)
| StarcoderdataPython |
162957 | <filename>test_plugins/BondingTest.py<gh_stars>0
import os
import requests
import tempfile
import nanome
from nanome.util import Logs
import sys
import time
NAME = "<NAME>"
DESCRIPTION = "Tests add_bonds."
CATEGORY = "testing"
HAS_ADVANCED_OPTIONS = False
class BondingTest(nanome.PluginInstance):
def start(self):
self.on_run()
def on_run(self):
# load_url = 'https://files.rcsb.org/download/1tyl.pdb'
# response = requests.get(load_url)
# temp = tempfile.NamedTemporaryFile(delete=False)
# temp.write(response.text.encode("utf-8"))
# temp.close()
# complex = nanome.structure.Complex.io.from_pdb(path=temp.name)
# os.remove(temp.name)
pdb_path = os.path.join(os.getcwd(), "testing/test_assets/pdb/1tyl.pdb")
complex = nanome.structure.Complex.io.from_pdb(path=pdb_path)
def bonding_done(complex_list):
self.add_to_workspace(complex_list)
print('done')
self.add_bonds([complex], bonding_done)
print('starting bonding')
nanome.Plugin.setup(NAME, DESCRIPTION, CATEGORY, False, BondingTest) | StarcoderdataPython |
139961 | <reponame>timwoj/tlmbot
import os
import sqlite3
import sys
import unittest
from furl import furl
from urllib.parse import urlparse,parse_qs
from datetime import datetime
def connect(path, read_only):
full_path = path
if read_only:
full_path = f'file:{path}?mode=ro'
conn = None
try:
conn = sqlite3.connect(full_path)
except:
if 'unittest' not in sys.modules.keys():
print(f'Failed to load database file \'{path}\': {sys.exc_info()[1]}')
return None
# Ensure we get Row objects back for queries. This makes handling
# the results a little easier later.
conn.row_factory = sqlite3.Row
# Create a cursor and add the table if it doesn't exist already.
cur = conn.cursor()
cur.execute('create table if not exists urls('
'url text primary key not null, '
'count integer default 1, '
'latest datetime not null default current_timestamp, '
'orig_paster text not null, '
'orig_date datetime not null default current_timestamp)')
cur.execute('create table if not exists karma('
'string text primary key not null, '
'count integer default 1, '
'orig_paster text not null, '
'orig_date datetime not null default current_timestamp)')
conn.commit()
return conn
# Parses a URL and strips unwanted params from
def filter_query(url):
f = furl(url)
keys_to_remove = set()
# for Amazon URLs, there's a few things that are always true. First,
# all params are useless and can be removed. Also, the ending part of
# the path starting with 'ref=' can be removed, if it exists.
if f.host.find('amazon') != -1:
f.args.clear()
if f.path.segments[-1].startswith('ref='):
f.path.segments.pop()
elif f.host.find('twitter') != -1:
# For Twitter, the same thing holds about params
f.args.clear()
else:
for k in f.args:
if f.host.find('amazon') != -1:
keys_to_remove.add(k)
elif k.startswith('utm_'):
keys_to_remove.add(k)
for k in keys_to_remove:
f.args.pop(k)
return f.url
def store_url(db, url, paster):
# trim all of the tracking stuff off it
new_url = filter_query(url)
# check if the new URI is in the database yet
cur = db.cursor()
cur.execute('select * from urls where url = ?', [new_url])
results = cur.fetchone()
ret = None
if results:
ret = {
'count': results['count'],
'paster': results['orig_paster'],
'when': results['orig_date']
}
now = datetime.now().replace(microsecond = 0)
cur.execute('update urls set count = ?, latest = ? where url = ?',
[results['count']+1, now, new_url])
else:
# insert new URL with new count and original date
cur.execute('insert into urls (url, orig_paster) values(?, ?)',
[new_url, paster])
db.commit()
return ret
def _query_urls(db, command, stack):
cur = db.cursor()
if stack:
cur.execute(command, stack)
else:
cur.execute(command)
results = cur.fetchall()
ret = []
for r in results:
ret.append({'url': r['url'],
'count': r['count'],
'when': r['latest']})
return ret
def get_urls(db, day_range=None, start_date=None, end_date=None):
command = 'select * from urls '
stack = []
if day_range == 'all':
# Nothing happens here. We just have the if statement to avoid
# fallthrough into one of the other cases
None
elif day_range:
command += f'where date(latest) between date("now", "{day_range}", "localtime") and date("now","localtime")'
elif start_date:
command += f'where datetime(latest) between datetime(?,"unixepoch") and datetime(?,"unixepoch")'
stack.append(start_date)
stack.append(end_date)
else:
command += 'where date(latest) = date("now","localtime")'
command += ' order by latest desc'
return _query_urls(db, command, stack)
def search_urls(db, text):
cur = db.cursor()
command = 'select * from urls where url like ? order by latest desc'
stack = [f'%{text}%']
return _query_urls(db, command, stack)
def set_karma(db, text, paster, increase):
cur = db.cursor()
cur.execute('select * from karma where string = ?', [text])
results = cur.fetchone()
if results:
count = int(results['count'])
if increase:
count += 1
else:
count -= 1
cur.execute('update karma set count = ? where string = ?',
[count, text])
else:
cur.execute('insert into karma (string, orig_paster) values(?, ?)',
[text, paster])
db.commit()
def get_karma(db, text):
cur = db.cursor()
cur.execute('select * from karma where string = ?', [text])
results = cur.fetchone()
count = 0
if results:
count = int(results['count'])
return count
class _TestCases(unittest.TestCase):
def test_connect_failure(self):
db = connect('/tmp/dbutil-test/sqlite.db')
self.assertEqual(db, None)
def test_connect_success(self):
import tempfile
path = os.path.join(tempfile.gettempdir(), 'testing-dbutils-sqlite.db')
db = connect(str(path))
self.assertNotEqual(db, None)
db.close()
os.unlink(str(path))
def test_filter_query(self):
a = filter_query('https://test.com')
self.assertEqual(a, 'https://test.com')
a = filter_query('https://test.com?test=abcd&utm_thing=goaway')
self.assertEqual(a, 'https://test.com?test=abcd')
a = filter_query('https://www.amazon.com/Minions-Super-Size-Blaster-Sounds-Realistic/dp/B082G4ZZWH/ref=dp_fod_1?pd_rd_i=B082G4ZZWH&psc=1')
self.assertEqual(a, 'https://www.amazon.com/Minions-Super-Size-Blaster-Sounds-Realistic/dp/B082G4ZZWH')
# TODO: decide how best to test the rest of these
def test_store_url(self):
import tempfile
path = os.path.join(tempfile.gettempdir(), 'testing-dbutils-sqlite.db')
db = connect(str(path))
db.close()
os.unlink(str(path))
return
def test_get_urls(self):
import tempfile
path = os.path.join(tempfile.gettempdir(), 'testing-dbutils-sqlite.db')
db = connect(str(path))
db.close()
os.unlink(str(path))
return
def test_set_karma(self):
import tempfile
path = os.path.join(tempfile.gettempdir(), 'testing-dbutils-sqlite.db')
db = connect(str(path))
db.close()
os.unlink(str(path))
return
def test_get_karma(self):
import tempfile
path = os.path.join(tempfile.gettempdir(), 'testing-dbutils-sqlite.db')
db = connect(str(path))
db.close()
os.unlink(str(path))
return
if __name__ == "__main__":
unittest.main()
| StarcoderdataPython |
3226180 | <filename>jni-build/jni/include/tensorflow/python/summary/summary.py
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
# pylint: disable=line-too-long
"""This module contains ops for generating summaries.
## Summary Ops
@@tensor_summary
"""
# pylint: enable=line-too-long
# Optimizers.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# pylint: disable=g-bad-import-order,unused-import
from tensorflow.python.ops.summary_ops import tensor_summary
from tensorflow.python.util.all_util import make_all
__all__ = make_all(__name__)
| StarcoderdataPython |
1790306 | <reponame>stay-whimsical/screamchess
"""
Code used to play various sounds, blink LEDs, and manage the media generally.
"""
import random
import sound
from chess.models import King, Rook, Bishop, Knight, Queen, Pawn
sound.create_sound_bank()
def test_sound(gamestate, events):
try:
sound.play_sound(_random_piece(), sound.random_action())
except ValueError:
test_sound(gamestate, events)
return gamestate
def test_sound_sequence(gamestate, events):
sound.play_sounds([(_random_piece(), sound.random_action()),
(_random_piece(), sound.random_action())])
return gamestate
def _random_piece():
pieces = [
Rook('white', 1),
Knight('white', 1),
Bishop('white', 1),
Queen('white'),
King('white'),
Bishop('white', 2),
Knight('white', 2),
Rook('white', 2),
Pawn('white', 1),
# Pawn('white', 2),
# Pawn('white', 3),
Pawn('white', 4),
Pawn('white', 5),
# Pawn('white', 6),
# Pawn('white', 7),
# Pawn('white', 8),
# Rook('black', 1),
Knight('black', 1),
Bishop('black', 1),
# Queen('black'),
# King('black'),
Bishop('black', 2),
Knight('black', 2),
# Rook('black', 2),
# Pawn('black', 1),
# Pawn('black', 2),
# Pawn('black', 3),
Pawn('black', 4),
Pawn('black', 5)
# Pawn('black', 6),
# Pawn('black', 7),
# Pawn('black', 8),
]
piece_index = random.randint(0, len(pieces) - 1)
return pieces[piece_index]
| StarcoderdataPython |
1739153 | from pyramid.config import Configurator
def main(global_config, **settings):
""" This function returns a Pyramid WSGI application.
"""
config = Configurator(settings=settings)
config.add_static_view('static', 'static', cache_max_age=3600)
config.add_route('measure', '/measure')
config.add_route('measuredata', '/measuredata')
config.add_route('home', '/')
config.scan()
return config.make_wsgi_app()
| StarcoderdataPython |
1752340 | <gh_stars>0
import pickle
import re
import sys
import time
import pytest
from loguru import logger
from .conftest import default_threading_excepthook
class NotPicklable:
def __getstate__(self):
raise pickle.PicklingError("You shall not serialize me!")
def __setstate__(self, state):
pass
class NotUnpicklable:
def __getstate__(self):
return "..."
def __setstate__(self, state):
raise pickle.UnpicklingError("You shall not de-serialize me!")
class NotWritable:
def write(self, message):
if "fail" in message.record["extra"]:
raise RuntimeError("You asked me to fail...")
print(message, end="")
def test_enqueue():
x = []
def sink(message):
time.sleep(0.1)
x.append(message)
logger.add(sink, format="{message}", enqueue=True)
logger.debug("Test")
assert len(x) == 0
logger.complete()
assert len(x) == 1
assert x[0] == "Test\n"
def test_enqueue_with_exception():
x = []
def sink(message):
time.sleep(0.1)
x.append(message)
logger.add(sink, format="{message}", enqueue=True)
try:
1 / 0
except ZeroDivisionError:
logger.exception("Error")
assert len(x) == 0
logger.complete()
assert len(x) == 1
lines = x[0].splitlines()
assert lines[0] == "Error"
assert lines[-1] == "ZeroDivisionError: division by zero"
def test_caught_exception_queue_put(writer, capsys):
logger.add(writer, enqueue=True, catch=True, format="{message}")
logger.info("It's fine")
logger.bind(broken=NotPicklable()).info("Bye bye...")
logger.info("It's fine again")
logger.remove()
out, err = capsys.readouterr()
lines = err.strip().splitlines()
assert writer.read() == "It's fine\nIt's fine again\n"
assert out == ""
assert lines[0] == "--- Logging error in Loguru Handler #0 ---"
assert re.match(r"Record was: \{.*Bye bye.*\}", lines[1])
assert lines[-2].endswith("PicklingError: You shall not serialize me!")
assert lines[-1] == "--- End of logging error ---"
def test_caught_exception_queue_get(writer, capsys):
logger.add(writer, enqueue=True, catch=True, format="{message}")
logger.info("It's fine")
logger.bind(broken=NotUnpicklable()).info("Bye bye...")
logger.info("It's fine again")
logger.remove()
out, err = capsys.readouterr()
lines = err.strip().splitlines()
assert writer.read() == "It's fine\nIt's fine again\n"
assert out == ""
assert lines[0] == "--- Logging error in Loguru Handler #0 ---"
assert lines[1] == "Record was: None"
assert lines[-2].endswith("UnpicklingError: You shall not de-serialize me!")
assert lines[-1] == "--- End of logging error ---"
def test_caught_exception_sink_write(capsys):
logger.add(NotWritable(), enqueue=True, catch=True, format="{message}")
logger.info("It's fine")
logger.bind(fail=True).info("Bye bye...")
logger.info("It's fine again")
logger.remove()
out, err = capsys.readouterr()
lines = err.strip().splitlines()
assert out == "It's fine\nIt's fine again\n"
assert lines[0] == "--- Logging error in Loguru Handler #0 ---"
assert re.match(r"Record was: \{.*Bye bye.*\}", lines[1])
assert lines[-2] == "RuntimeError: You asked me to fail..."
assert lines[-1] == "--- End of logging error ---"
def test_not_caught_exception_queue_put(writer, capsys):
logger.add(writer, enqueue=True, catch=False, format="{message}")
logger.info("It's fine")
with pytest.raises(pickle.PicklingError, match=r"You shall not serialize me!"):
logger.bind(broken=NotPicklable()).info("Bye bye...")
logger.remove()
out, err = capsys.readouterr()
assert writer.read() == "It's fine\n"
assert out == ""
assert err == ""
def test_not_caught_exception_queue_get(writer, capsys):
logger.add(writer, enqueue=True, catch=False, format="{message}")
with default_threading_excepthook():
logger.info("It's fine")
logger.bind(broken=NotUnpicklable()).info("Bye bye...")
logger.info("It's not fine")
logger.remove()
out, err = capsys.readouterr()
lines = err.strip().splitlines()
assert writer.read() == "It's fine\n"
assert out == ""
assert lines[0].startswith("Exception")
assert lines[-1].endswith("UnpicklingError: You shall not de-serialize me!")
def test_not_caught_exception_sink_write(capsys):
logger.add(NotWritable(), enqueue=True, catch=False, format="{message}")
with default_threading_excepthook():
logger.info("It's fine")
logger.bind(fail=True).info("Bye bye...")
logger.info("It's not fine")
logger.remove()
out, err = capsys.readouterr()
lines = err.strip().splitlines()
assert out == "It's fine\n"
assert lines[0].startswith("Exception")
assert lines[-1] == "RuntimeError: You asked me to fail..."
def test_not_caught_exception_sink_write_then_complete(capsys):
logger.add(NotWritable(), enqueue=True, catch=False, format="{message}")
with default_threading_excepthook():
logger.bind(fail=True).info("Bye bye...")
logger.complete()
logger.remove()
out, err = capsys.readouterr()
lines = err.strip().splitlines()
assert out == ""
assert lines[0].startswith("Exception")
assert lines[-1] == "RuntimeError: You asked me to fail..."
def test_not_caught_exception_queue_get_then_complete(writer, capsys):
logger.add(writer, enqueue=True, catch=False, format="{message}")
with default_threading_excepthook():
logger.bind(broken=NotUnpicklable()).info("Bye bye...")
logger.complete()
logger.remove()
out, err = capsys.readouterr()
lines = err.strip().splitlines()
assert writer.read() == ""
assert out == ""
assert lines[0].startswith("Exception")
assert lines[-1].endswith("UnpicklingError: You shall not de-serialize me!")
def test_wait_for_all_messages_enqueued(capsys):
def slow_sink(message):
time.sleep(0.01)
sys.stderr.write(message)
logger.add(slow_sink, enqueue=True, catch=False, format="{message}")
for i in range(10):
logger.info(i)
logger.complete()
out, err = capsys.readouterr()
assert out == ""
assert err == "".join("%d\n" % i for i in range(10))
def test_logging_not_picklable_exception():
exception = None
def sink(message):
nonlocal exception
exception = message.record["exception"]
logger.add(sink, enqueue=True, catch=False)
try:
raise ValueError(NotPicklable())
except Exception:
logger.exception("Oups")
logger.remove()
type_, value, traceback_ = exception
assert type_ is ValueError
assert value is None
assert traceback_ is None
@pytest.mark.skip(reason="No way to safely deserialize exception yet")
def test_logging_not_unpicklable_exception():
exception = None
def sink(message):
nonlocal exception
exception = message.record["exception"]
logger.add(sink, enqueue=True, catch=False)
try:
raise ValueError(NotUnpicklable())
except Exception:
logger.exception("Oups")
logger.remove()
type_, value, traceback_ = exception
assert type_ is ValueError
assert value is None
assert traceback_ is None
| StarcoderdataPython |
122811 | <reponame>TristenSeth/campy
import namesurferentry
class NameSurferDatabase:
"""In this case, we'll oblige the OOP design and initialize from a filename."""
def __init__(self, filename):
self._lookup = {}
with open(filename) as f:
for line in f:
entry = namesurferentry.from_line(line)
self._lookup[entry.name.lower()] = entry
def __getitem__(self, name):
return self._lookup.get(name.lower())
| StarcoderdataPython |
177540 | import math
import time
#compute \pi using formula shown below:
#\pi=\int_{0}^{1}\frac{4}{1+x^2}dx \sim =\frac{1}{n}\sum_{i=0}^{n-1}\frac{4}{1+(\frac{i+0.5}{n})^2}
#
def compute_pi(num_step):
h=1.0/num_step
s=0.0
for i in range(num_step):
x=h*(i+0.5)
s+=4.0/(1.0+x**2)
return s*h
def main():
n=int(1.0e8)
start=time.time()
pi=compute_pi(n)
diff=abs(pi-math.pi)
print("pi is approximately %.16f,diff=compute_pi-math.pi is %.16f" % (pi,diff))
elapsed_time=time.time()-start
print("elapsed_time=%f" % elapsed_time )
if __name__ == '__main__':
main() | StarcoderdataPython |
106886 | # -*- coding: utf-8 -*-
"""
Created Nov 2018
@author: henss
"""
# import built in libarys
import os
from urllib.request import urlretrieve, urlopen
# import 3rd party libarys
from bs4 import BeautifulSoup
# import local libarys
# define classes and functions
class Weblink():
"""
Class for weblinks containing Filetypes to be downloaded.
"""
def __init__(self, weblink, LinkList=list()):
"""
Class constructor
"""
self.Weblink = weblink
self.LinkList = LinkList
def tweak_links(self):
"""
Instance method to tweak relative links to absolute links
"""
for tp in enumerate(self.LinkList):
if not(tp[1].startswith('http:') or
tp[1].startswith('https:') or
tp[1].startswith('www.')):
if self.LinkList[tp[0]][0] == '/':
first_part = self.Weblink.partition('www.')[0]
sec_part = self.Weblink.partition('www.')[2]
sec_part = sec_part.partition('/')[0]+tp[1]
self.LinkList[tp[0]] = str(first_part + sec_part)
else:
first_part = self.Weblink.partition('www.')[0]
sec_part = self.Weblink.partition('www.')[2]
sec_part = sec_part.partition('/')[0]+'/'+tp[1]
self.LinkList[tp[0]] = str(first_part + sec_part)
return(self.LinkList)
def fetch_links(self, filetype='pdf'):
"""
Instance method to fetch only links of the given filetype from weblink
"""
html = urlopen(self.Weblink).read()
pagesoup = BeautifulSoup(html, 'html.parser')
tp_link = ''
for tp in pagesoup.find_all('a'):
tp_link = tp.get('href')
try:
if (tp_link.endswith(filetype) and tp_link is not None):
self.LinkList.append(tp_link)
except AttributeError:
pass
# html.close()
self.tweak_links()
return(self.LinkList)
def download_links(self, localpath):
"""
Instance method to download Links
"""
os.chdir(localpath)
for tp in enumerate(self.LinkList):
link_html = self.LinkList[tp[0]]
link_name = self.LinkList[tp[0]].rpartition('/')[2]
urlretrieve(link_html, link_name)
print("Links are downloaed to: " + str(localpath))
| StarcoderdataPython |
3374618 | from django import forms
from django.core.exceptions import FieldDoesNotExist
from django.http import HttpResponseRedirect
from django.views.generic.edit import FormMixin
class AssignUserMixin(FormMixin):
def form_valid(self, form: forms.ModelForm) -> HttpResponseRedirect:
instance = form.save(commit=False)
try:
instance._meta.get_field("user")
instance.user = self.request.user
except FieldDoesNotExist:
pass
instance.save()
self.object = instance
return HttpResponseRedirect(self.get_success_url())
class UserFormKwargsMixin(FormMixin):
def get_form_kwargs(self) -> dict:
kwargs = super().get_form_kwargs()
kwargs.setdefault("user", self.request.user)
return kwargs
| StarcoderdataPython |
4702 | <reponame>JamesWang007/Open3D-PointNet<gh_stars>100-1000
#!/usr/bin/env python3
# Copyright (c) 2017 Computer Vision Center (CVC) at the Universitat Autonoma de
# Barcelona (UAB).
#
# This work is licensed under the terms of the MIT license.
# For a copy, see <https://opensource.org/licenses/MIT>.
"""Download big files from Google Drive."""
import shutil
import sys
import requests
import os
import time
import urllib.request
import zipfile
def reporthook(count, block_size, total_size):
global start_time
if count == 0:
start_time = time.time()
return
duration = time.time() - start_time
progress_size = int(count * block_size)
speed = int(progress_size / (1024 * duration))
percent = int(count * block_size * 100 / total_size)
if percent % 5 == 0:
sys.stdout.write("\r...%d%%, %d MB, %d KB/s, %d seconds passed" %
(percent, progress_size / (1024 * 1024), speed, duration))
sys.stdout.flush()
def sizeof_fmt(num, suffix='B'):
# https://stackoverflow.com/a/1094933/5308925
for unit in ['','K','M','G','T','P','E','Z']:
if abs(num) < 1000.0:
return "%3.2f%s%s" % (num, unit, suffix)
num /= 1000.0
return "%.2f%s%s" % (num, 'Yi', suffix)
def print_status(destination, progress):
message = "Downloading %s... %s" % (destination, sizeof_fmt(progress))
empty_space = shutil.get_terminal_size((80, 20)).columns - len(message)
sys.stdout.write('\r' + message + empty_space * ' ')
sys.stdout.flush()
def download_file_from_google_drive(id, destination):
# https://stackoverflow.com/a/39225039/5308925
def save_response_content(response, destination):
chunk_size = 32768
written_size = 0
with open(destination, "wb") as f:
for chunk in response.iter_content(chunk_size):
if chunk: # filter out keep-alive new chunks
f.write(chunk)
written_size += chunk_size
print_status(destination, written_size)
print('Done.')
def get_confirm_token(response):
for key, value in response.cookies.items():
if key.startswith('download_warning'):
return value
return None
url = "https://docs.google.com/uc?export=download"
session = requests.Session()
response = session.get(url, params={'id': id}, stream=True)
token = get_confirm_token(response)
if token:
params = {'id': id, 'confirm': token}
response = session.get(url, params=params, stream=True)
save_response_content(response, destination)
def download_contents():
# download model
model_path = './cls_model.pth'
if os.path.isfile(model_path):
print('Model file already downloaded in', model_path)
else:
download_file_from_google_drive('1WWf5B5fmik5_P1dwxltJ-atRkYeCcCC5', './cls_model.pth')
# download dataset
dataset_path = './shapenetcore_partanno_segmentation_benchmark_v0.zip'
if os.path.isfile(dataset_path):
print('Dataset file already downloaded in', dataset_path)
else:
dataset_url = 'https://shapenet.cs.stanford.edu/ericyi/shapenetcore_partanno_segmentation_benchmark_v0.zip'
urllib.request.urlretrieve(dataset_url, os.path.basename(dataset_url), reporthook)
# unzip dataset
zip_ref = zipfile.ZipFile(os.path.basename(dataset_url), 'r')
zip_ref.extractall('.')
zip_ref.close()
print('Now unzipping...Wait for 2 minutes ish...!')
return 0
if __name__ == '__main__':
download_contents()
| StarcoderdataPython |
3281619 | <reponame>wqu-bom/pybufrkit
"""
pybufrkit.script
~~~~~~~~~~~~~~~~
"""
from __future__ import absolute_import
from __future__ import print_function
import functools
import ast
from pybufrkit.dataquery import QueryResult
from pybufrkit.query import BufrMessageQuerent
__all__ = ['process_embedded_query_expr', 'ScriptRunner']
STATE_IDLE = ''
STATE_EMBEDDED_QUERY = '${'
STATE_SINGLE_QUOTE = "'"
STATE_DOUBLE_QUOTE = '"'
STATE_COMMENT = '#'
def process_embedded_query_expr(input_string):
"""
This function scans through the given script and identify any path/metadata
expressions. For each expression found, an unique python variable name will
be generated. The expression is then substituted by the variable name.
:param str input_string: The input script
:return: A 2-element tuple of the substituted string and a dict of substitutions
:rtype: (str, dict)
"""
keep = []
state = ''
idx_char = idx_var = 0
substitutions = {} # keyed by query expression
query_expr = []
while idx_char < len(input_string):
c = input_string[idx_char]
if state == STATE_EMBEDDED_QUERY:
if c == '}':
state = STATE_IDLE
s = ''.join(query_expr).strip()
query_expr = []
if s not in substitutions:
varname = 'PBK_{}'.format(idx_var)
idx_var += 1
substitutions[s] = varname
else:
varname = substitutions[s]
keep.append(varname)
else:
query_expr.append(c)
elif (c == "'" or c == '"') and state != STATE_EMBEDDED_QUERY:
if state == c: # quoting pair found, pop it
state = STATE_IDLE
elif state == '': # new quote begins
state = c
keep.append(c)
elif c == '$' and state == STATE_IDLE: # an unquoted $
if idx_char + 1 < len(input_string) and input_string[idx_char + 1] == '{':
state = STATE_EMBEDDED_QUERY
# Once it enters the embedded query state, any pond,
# double/single quotes will be ignored
idx_char += 1
else:
keep.append(c)
elif c == '#' and state == STATE_IDLE:
state = STATE_COMMENT
keep.append(c)
elif c == '\n' and state == STATE_COMMENT:
state = STATE_IDLE
keep.append(c)
else:
keep.append(c)
idx_char += 1
return ''.join(keep), substitutions
# The following constants represent the nesting levels for values from BUFR data
# section. The nesting levels are decided by the level of parenthesis, which is
# represented by the numbers. A number Zero means no parenthesis at all, i.e.
# scalar. A number One means one level of parenthesis, i.e. a simple list with
# no nesting.
DATA_VALUES_NEST_LEVEL_0 = 0 # flatten to scalar by return only the first element
DATA_VALUES_NEST_LEVEL_1 = 1 # flatten to a list with no nesting, this is the default
DATA_VALUES_NEST_LEVEL_2 = 2 # flatten to a list nested with subsets
DATA_VALUES_NEST_LEVEL_4 = 4 # no flatten at all, fully nested by subsets, replications
class ScriptRunner(object):
"""
This class is responsible for running the given script against BufrMessage
object.
.. attribute:: code_string
The processed/substituted source code.
.. attribute:: code_object
The compiled code object from the code string.
.. attribute:: pragma
Extra processing directives
.. attribute:: metadata_only
Whether the script requires only metadata part of the BUFR message to work.
.. attribute:: querent
The BufrMessageQuerent object for performing the values query.
"""
def __init__(self, input_string,
data_values_nest_level=None):
self.code_string, self.substitutions = process_embedded_query_expr(input_string)
self.pragma = {
'data_values_nest_level': DATA_VALUES_NEST_LEVEL_1,
}
# Read pragma from inside the script
self.process_pragma()
# Pragma passed from function call has higher priority
if data_values_nest_level is not None:
self.pragma['data_values_nest_level'] = data_values_nest_level
self.code_object = compile(self.code_string, '', 'exec')
self.metadata_only = True
for query_str in self.substitutions.keys():
if not query_str.startswith('%'):
self.metadata_only = False
break
self.querent = BufrMessageQuerent()
def run(self, bufr_message):
variables = {
varname: self.get_query_result(bufr_message, query_string)
for query_string, varname in self.substitutions.items()
}
variables.update(
{
'PBK_BUFR_MESSAGE': bufr_message,
'PBK_FILENAME': bufr_message.filename,
}
)
exec (self.code_object, variables)
return variables
def get_query_result(self, bufr_message, query_expr):
qr = self.querent.query(bufr_message, query_expr)
if isinstance(qr, QueryResult):
return self.flatten_data_values(qr)
return qr
def flatten_data_values(self, qr):
data_values_nest_level = self.pragma['data_values_nest_level']
if data_values_nest_level == DATA_VALUES_NEST_LEVEL_0:
values = qr.all_values(flat=True)
values = functools.reduce(lambda x, y: x + y, values, [])
return values[0] if len(values) > 0 else None
elif data_values_nest_level == DATA_VALUES_NEST_LEVEL_1:
values = qr.all_values(flat=True)
return functools.reduce(lambda x, y: x + y, values, [])
elif data_values_nest_level == DATA_VALUES_NEST_LEVEL_2:
return qr.all_values(flat=True)
else: # No flatten, fully nested
return qr.all_values()
def process_pragma(self):
for line in self.code_string.splitlines():
if not line.startswith('#$'):
return
for assignment in line[3:].split(','):
k, v = assignment.split('=')
k = k.strip()
if k in self.pragma:
self.pragma[k] = ast.literal_eval(v.strip())
| StarcoderdataPython |
1693039 | """
Plugin for getting data from sheet and generate pdf from it
"""
import json
import os
import os.path
import calendar
import time
from datetime import datetime
from urllib.parse import urlencode
import gspread
from gspread.exceptions import SpreadsheetNotFound
import requests
from requests.auth import HTTPDigestAuth
from googleapiclient.discovery import build
from oauth2client.service_account import ServiceAccountCredentials
from interface import implements
from src.pdf.base.interfaces.plugin import Plugin
import logging
class GoogleDocsSheetsPluginOld:
"""
Plugin class which implement PDFPlugin interface
"""
def __init__(self):
"""
get googledoc-config.json file content and then save this data to class config variable
"""
# Get the logger specified in the file
self.logger = logging.getLogger()
with open(os.path.dirname(__file__) + '/googledoc-config.json') as json_file:
config = json.load(json_file)
self.config = config
self.raw_data = None
self.tags = None
def set_raw_data(self, raw_data):
"""
initialize raw data
:param raw_data:
:return:
"""
self.raw_data = raw_data
def _get_token(self):
""" The file token.pickle stores the user's access and refresh tokens, and is
created automatically when the authorization flow completes for the first
time."""
client = None
creds = None
try:
sheet_scopes = [
'https://spreadsheets.google.com/feeds',
'https://www.googleapis.com/auth/drive'
]
base_path = os.path.dirname(__file__)
creds = ServiceAccountCredentials.from_json_keyfile_name(base_path + '/gcs-gdrive_dev_creds.json',
sheet_scopes)
client = gspread.authorize(creds)
except Exception as ex:
print(ex)
self.logger.error("Exception occurred", exc_info=True)
return client, creds
def _get_session_cookie(self):
error = cookie = None
cookie_request = requests.get(
self.raw_data['SESSIONCOOKIEBASEURL'],
auth=HTTPDigestAuth(self.raw_data['ODKUSERNAME'],
self.raw_data['ODKPASSWORD']),
timeout=10) # Sending the digest authorization request
headers = str(cookie_request.headers)
if cookie_request.status_code == 200: # Check if request is OK
start_index = headers.find('JSESSIONID=')
end_index = headers.find(';')
session_cookie = headers[
start_index + 11:
end_index] # Getting the value of json cookie from the string
if len(session_cookie) == 32: # Making sure got the cookie value right
cookie = session_cookie # Saving the cookie value
else:
error = "No session cookie found"
else:
error = "Authorization error"
return error, cookie
def get_sheetvalues(self, sheet_id, var_mapping):
"""
get google sheet data of the specified sheet id and range
"""
error = None
try:
client = self._get_token()[0]
base_sheet = client.open_by_key(sheet_id)
sheet = base_sheet.worksheet(var_mapping)
values = sheet.get_all_values()
# print(values)
if not values:
error = "No Mapping details found"
else:
mapping_values = values
except SpreadsheetNotFound as ex:
error = "Failed to fetch mapping detials"
mapping_values = None
self.logger.error("Exception occurred", exc_info=True)
except Exception as ex:
error = "Failed to fetch mapping detials"
mapping_values = None
self.logger.error("Exception occurred", exc_info=True)
return mapping_values, error
def get_tags(self):
"""
this method return all the tags on the basis of which we filter the request
"""
tags = dict()
tags["SHEETID"] = self.config["SHEETID"]
tags["SHEETNAME"] = self.config["SHEETNAME"]
tags["RANGE"] = self.config["RANGE"]
tags["MAPPINGDETAILS"] = self.config["MAPPINGDETAILS"]
tags["OPTIONSSHEET"] = self.config["OPTIONSSHEET"]
tags["DOCTEMPLATEID"] = self.config["DOCTEMPLATEID"]
tags["APPLICATIONID"] = self.config["APPLICATIONID"]
self.tags = tags
return tags
def publish_message(self, producer_instance, topic_name, key, value):
"""
publish message to kafka
:param producer_instance:
:param topic_name: name of topic
:param key: message key
:param value: message value
:return: status
"""
error = ''
try:
key_bytes = bytes(key, encoding='utf-8')
value_bytes = bytes(value, encoding='utf-8')
producer_instance.send(topic_name, key=key_bytes, value=value_bytes)
producer_instance.flush()
except Exception as ex:
self.logger.error('Exception in publishing message')
error = str(ex)
self.logger.error("Exception occurred", exc_info=True)
return error
def connect_kafka_producer(self):
"""
connect with kafka producer
:return: kafkaproducer object
"""
_producer = None
try:
_producer = KafkaProducer(bootstrap_servers=KAFKA_CREDENTIAL['bootstrap_servers'],
security_protocol=KAFKA_CREDENTIAL['security_protocol'],
sasl_mechanism=KAFKA_CREDENTIAL['sasl_mechanism'],
sasl_plain_username=KAFKA_CREDENTIAL['sasl_plain_username'],
sasl_plain_password=KAFKA_CREDENTIAL['sasl_plain_password'],
api_version=(0, 10))
except Exception as ex:
self.logger.error('Exception while connecting Kafka')
print(str(ex))
self.logger.error("Exception occurred", exc_info=True)
return _producer
def fetch_data(self):
"""
this method fetches the data from google sheet and return it as raw_data and also send tag
"""
error = None
tags = None
try:
range_name = self.config['SHEETNAME']
# call class method which return sheet data and error if permission is not there
get_value_mapping = self.get_sheetvalues(self.config['SHEETID'], range_name)
mapping_error = get_value_mapping[1] # Error in fetching mapping
mapping_values = get_value_mapping[0] # mapping values list
if not mapping_error:
raw_data = mapping_values
# Create a JSON from data.
column_names = raw_data[0]
for data in raw_data[2:]:
single_data = dict()
counter = 0
for col in column_names:
if col != '':
single_data[col] = data[counter]
counter += 1
tags = self.get_tags()
all_data = dict()
all_data['req_data'] = single_data
all_data.update(self.config) # merge tags with sheet each row data
raw_data = dict()
raw_data['reqd_data'] = all_data
raw_data['tags'] = tags
kafka_producer = self.connect_kafka_producer()
value = json.dumps(raw_data)
error = self.publish_message(kafka_producer, 'form-data', 'form-data', value)
else:
error = "No Mapping details found"
except Exception as ex:
error = "Failed to fetch mapping detials"
mapping_values = None
self.logger.error("Exception occurred", exc_info=True)
return error
def fetch_mapping(self, data):
"""
this method fetches mapping values and options from google sheet and update this in raw_data
return it as raw_data
"""
error = None
raw_data = None
try:
self.raw_data = data
self.get_tags()
info_log(self.logger.info, "Step3 Fetch Mapping Start", self.raw_data)
get_value_mapping = self.get_sheetvalues(data['SHEETID'], data['MAPPINGDETAILS'])
mapping_error = get_value_mapping[1] # Error in fetching mapping
mapping_values = get_value_mapping[0] # mapping values list
get_options_mapping = self.get_sheetvalues(data['SHEETID'],
data['OPTIONSSHEET'])
options_error = get_options_mapping[1] # Error in fetching options
options_mapping = get_options_mapping[0] # options mapping list
if not mapping_error and not options_error:
raw_data = dict()
raw_data['value_mapping'] = mapping_values
raw_data['options_mapping'] = options_mapping
data.update(raw_data)
raw_data = data
self.raw_data = raw_data
else:
error = str(mapping_error) + str(options_error)
info_log(self.logger.info, "Step3 Fetch Mapping End", self.raw_data)
except Exception as ex:
error = "Failed to fetch mapping detials"
info_log(self.logger.error, "Error1 " + error, self.raw_data)
self.logger.error("Exception occurred", exc_info=True)
return raw_data, error
def _map_data(self, all_data, mapping_values, options_mapping):
error = None
final_data = None
try:
info_log(self.logger.info, "Step4.1 Mapping Start", self.raw_data)
mapping_values.pop(0) # removing the header row of mapping sheet
final_data = [] # List to hold the final values
options_mapping.pop(0) # removing the header row of options sheet
for row in mapping_values:
if row[1] == 'options':
# row[2] option value name
# str(all_data[row[2]]) option value a,b,c,d
# options_mapping[] the list with valuename and options value
this_list = ''.join(
str(opt) for opt in options_mapping) # Converted list to string
new_list = this_list.split(str(
row[2]), 1)[1] # List split to start with required string
index_end = new_list.find("]") # Find the stopping point
# The semi final string to find values from obtained
str_to_check = new_list[:
index_end]
option_value_start = str_to_check.find(str(all_data[row[2]]))
if option_value_start == -1:
all_data[row[
2]] = 'NO_TEXT_FOUND' # If the particular option is not found
# Appending the received data to the final list
final_data.append(all_data[row[2]])
else:
length = len(all_data[row[2]])
option_value_end = str_to_check.find("'", option_value_start)
option_value = str_to_check[option_value_start + length + 2:
option_value_end]
final_data.append(
option_value
) # Appending the correct option to the final list
else:
if not all_data[row[2]]:
all_data[row[2]] = 'NO_TEXT_FOUND' # If data is None
final_data.append(all_data[row[
2]]) # Appending the received data to the final list
info_log(self.logger.info, "Step4.1 Mapping End", self.raw_data)
except Exception as ex:
error = "Failed to map data"
info_log(self.logger.error, "Error3 " + error, self.raw_data)
self.logger.error("Exception occurred", exc_info=True)
return final_data, error
def get_config(self):
"""
return config
"""
return self.config
def _generate_file_drive(self, url):
error = document_id = file_name = pdf_url = None
try:
info_log(self.logger.info, "Step4.2 Generate File Drive Start", self.raw_data)
# call the app script url
contents = requests.get(url, timeout=60).json()
print(contents)
if contents.get("error") != "null":
error = contents.get('error')
if error == "undefined":
error = None
document_id = contents.get("documentId")
file_name = contents.get("fileName")
pdf_url = contents.get("url")
info_log(self.logger.info, "Step4.2 Generate File Drive End", self.raw_data)
except Exception as ex:
error = "Failed to get response from App Script"
info_log(self.logger.error, "Error4 " + error, self.raw_data)
self.logger.error("Exception occurred", exc_info=True)
return document_id, file_name, pdf_url, error
def build_pdf(self, raw_data, file_name):
"""
this method get raw_data and file name and generate pdf having this file_name
"""
error = None
pdf_name = ''
pdf_url = ''
try:
data = raw_data['req_data']
info_log(self.logger.info, "Step4 Build Pdf Start", self.raw_data)
mapping_values = raw_data['value_mapping']
options_mapping = raw_data['options_mapping']
mapped_data = self._map_data(data, mapping_values, options_mapping)
mapping_error = mapped_data[1]
final_data = mapped_data[0]
if not mapping_error:
# URL of google app script
final_data_str = json.dumps(final_data)
if 'FILENAMEFIELD' in raw_data and raw_data['FILENAMEFIELD'] in data:
file_name = data[raw_data['FILENAMEFIELD']] + '_' + str(
calendar.timegm(time.gmtime()))
print(file_name)
payload = {
"fileName": file_name,
"mylist": final_data_str,
"templateId": raw_data['DOCTEMPLATEID']
} # Encoding the url with payload
if ('ODKUSERNAME' in self.raw_data.keys() and self.raw_data['ODKUSERNAME']
and 'ODKPASSWORD' in self.raw_data.keys() and self.raw_data['ODKPASSWORD']):
call_session_cookie = self._get_session_cookie()
if not call_session_cookie[0]:
session_cookie = call_session_cookie[1]
else:
error = call_session_cookie[0]
if not error:
payload['sessionCookie'] = session_cookie
payload['username'] = self.raw_data['ODKUSERNAME']
payload['password'] = self.raw_data['ODKPASSWORD']
if not error:
gas_url = self.config['URL'] + urlencode(payload)
# Calling the GAS url and Getting the GAS response
app_script_response = self._generate_file_drive(gas_url)
error = app_script_response[3]
if not error:
pdf_url = app_script_response[2]
pdf_name = app_script_response[1] + '.pdf'
else:
error = mapping_error
info_log(self.logger.info, "Step4 Build Pdf End", self.raw_data)
except Exception as ex:
error = "Failed to generate pdf"
info_log(self.logger.error, "Error2 " + error, self.raw_data)
self.logger.error("Exception occurred", exc_info=True)
return pdf_name, error, pdf_url
def upload_pdf(self, key, file_url):
"""
Uploads a file to the local server and if we specify UPLOADTO in config file then save this
file to cdn and delete file from local server.
"""
error = ''
upload_file_url = None
expire_timestamp = None
try:
info_log(self.logger.info, "Step5 Upload Pdf Start", self.raw_data)
response = requests.get(file_url)
base_path = os.path.dirname(__file__) + self.config['DIRPATH']
if not os.path.exists(base_path):
os.makedirs(base_path)
with open(base_path + key, 'wb') as file_obj:
file_obj.write(response.content)
upload_file_url = base_path + key
base_path = os.path.dirname(__file__)
if ('UPLOADTO' in self.config.keys() and self.config['UPLOADTO']):
info_log(self.logger.info, "Step5.1 Upload To Cdn Start", self.raw_data)
if self.config['UPLOADTO'] == 's3':
cdn_upload = FileUploader(self.config['UPLOADTO'], self.config['ACCESSKEY'],
self.config['SECRETKEY'])
else:
cdn_upload = FileUploader(self.config['UPLOADTO'],
base_path + '/' +
self.config['GOOGLE_APPLICATION_CREDENTIALS'])
resp = cdn_upload.upload_file(base_path + self.config['DIRPATH'] + key,
self.config['BUCKET'], key)
url = resp[0]
error = resp[1]
if url:
upload_file_url = url
expire_timestamp = resp[2]
os.remove(os.path.dirname(__file__) + self.config['DIRPATH'] + key)
else:
info_log(self.logger.error, "Error6 " + error, self.raw_data)
info_log(self.logger.info, "Step5.1 Upload To Cdn End", self.raw_data)
# self._delete_file_drive(file_url)
info_log(self.logger.info, "Step5 Upload Pdf End", self.raw_data)
except Exception as ex:
error = "Failed to download file from drive"
info_log(self.logger.error, "Error5 " + error, self.raw_data)
self.logger.error("Exception occurred", exc_info=True)
return upload_file_url, error, expire_timestamp
def retrieve_pdf(self, key):
"""
this method return pdf url
"""
filedata = ''
error = None
file_name = self.config['DIRPATH'] + key + '.pdf'
try:
with open(file_name, 'rb') as file_obj:
filedata = file_obj.read()
except Exception as ex:
error = 'File not found'
self.logger.error("Exception occurred", exc_info=True)
return filedata, error
def _delete_file_drive(self, file):
"""
Google drive API to Permanently delete a file, skipping the trash.
"""
error = done = None
try:
creds = None
creds = self._get_token()[1]
service = build('drive', 'v3', credentials=creds)
doc_id = file.split('/')
file_id = doc_id[5] # find file id from url here
service.files().delete(fileId=file_id).execute()
done = True
except Exception as ex:
error = 'Failed to delete file'
print(ex)
self.logger.error("Exception occurred", exc_info=True)
return error, done
def delete_file_drive_google_script(self, file):
"""
Trash Google drive file using google app script.
"""
error = done = None
try:
# fileId = '1Bk48xG8buQu6Y1z7QlXc-GffRwoRsR3ciDb7aeTQQMo'
payload = {
"fileId": file
}
url = self.config['DRIVE_DELETE_URL']
gas_url = url + urlencode(payload)
contents = requests.get(gas_url, timeout=60).json()
print(contents)
if contents.get("error") != "null":
error = contents.get('error')
if error == "undefined":
error = None
if error:
self.logger.error("Error occurred in delete file drive " + error)
else:
done = True
except Exception as ex:
error = 'Failed to delete file'
print(ex)
self.logger.error("Exception occurred", exc_info=True)
return error, done
def shorten_url(self, url, doc_url):
"""
Generate short url
:param url:
:return:
"""
info_log(self.logger.info, "Step6 Shorten Url Start", self.raw_data)
short_url = None
error = None
api_key = self.config['POLRACCESSTOKEN']
try:
querystring = {"key": api_key,
"url": url}
resp = requests.request("GET", self.config['POLRAPIURL'], params=querystring)
if resp.status_code == 200:
short_url = resp._content.decode("utf-8")
tags = self.get_tags()
new_doc_url = doc_url.replace('export?format=pdf', 'edit')
print(new_doc_url)
print(short_url)
if 'SENDMSG' in self.config[tags["FORMID"]].keys() and \
self.config[tags["FORMID"]]['SENDMSG']:
info_log(self.logger.info, "Step6.2 Msg Send Start", self.raw_data)
raw_data = self.raw_data
req_data = raw_data['req_data']
name = req_data[self.config[tags["FORMID"]]['NAMEFIELD']]
mobile = req_data[self.config[tags["FORMID"]]['MSGFIELD']]
print(name)
print(mobile)
# req_data = raw_data['req_data']
msg_result = send_whatsapp_msg(mobile,
short_url,
name, new_doc_url)
info_log(self.logger.info, "Step6.2 Msg Send End", self.raw_data)
msg_error = msg_result[0]
msg_resp = msg_result[1]
info_log(self.logger.info, "Step6.4 Email Send to admin Start", self.raw_data)
now = datetime.now()
dt_string = now.strftime("%d/%m/%Y %H:%M:%S")
content = msg_resp.__dict__
content = content['_content'].decode('utf-8')
custom_fields = {'mobile': mobile, "sent_time": dt_string,
"msg_status": content}
# print(custom_fields)
# req_data = raw_data['req_data']
mail_result = send_mail('<EMAIL>', '',
custom_fields,
'resume', 5)
mail_error = mail_result[0]
mail_resp = mail_result[1]
info_log(self.logger.info, "Step6.4 Email Send to admin End", self.raw_data)
if msg_error:
error = msg_error
if all(raw_key in raw_data for raw_key in ("INSTANCEID", "FORMID")) and \
raw_data['INSTANCEID'] and raw_data['FORMID']:
self.logger.error(
"Step6.2 Send Msg Error %s - instance id %s - Form id %s",
msg_resp.__dict__,
raw_data['INSTANCEID'], raw_data['FORMID'])
info_log(self.logger.info, "Step6.2 Msg Send End", self.raw_data)
if 'SENDEMAIL' in self.config[tags["FORMID"]].keys() and \
self.config[tags["FORMID"]]['SENDEMAIL']:
info_log(self.logger.info, "Step6.3 Email Send Start", self.raw_data)
raw_data = self.raw_data
req_data = raw_data['req_data']
name = req_data[self.config[tags["FORMID"]]['NAMEFIELD']]
email = req_data[self.config[tags["FORMID"]]['EMAILFIELD']]
template_id = self.config[tags["FORMID"]]['EMAILTEMPLATEID']
print(name)
print(email)
custom_fields = {'FULL_NAME': name, "LINK": short_url, "DOC_LINK": new_doc_url}
# req_data = raw_data['req_data']
mail_result = send_mail(email,
url, custom_fields,
'resume', template_id)
mail_error = mail_result[0]
mail_resp = mail_result[1]
if mail_error:
if all(raw_key in raw_data for raw_key in ("INSTANCEID", "FORMID")) and \
raw_data['INSTANCEID'] and raw_data['FORMID']:
self.logger.error(
"Step6.2 Email Send Error %s - instance id %s - Form id %s",
mail_resp.__dict__,
raw_data['INSTANCEID'], raw_data['FORMID'])
if error:
error += mail_error
else:
error = mail_error
info_log(self.logger.info, "Step6.3 Email Send End", self.raw_data)
else:
error = resp._content.decode("utf-8")
info_log(self.logger.error, "Error7 " + error, self.raw_data)
info_log(self.logger.info, "Step6 Shorten Url End", self.raw_data)
except Exception as ex:
error = "Unable to shorten a url"
info_log(self.logger.error, "Error7 " + error, self.raw_data)
self.logger.error("Exception occurred", exc_info=True)
return short_url, error
| StarcoderdataPython |
3377319 | <gh_stars>0
from datetime import date
from time import sleep
idade = date.today().year - int(input('Em que ano você nasceu: ')) #idade == ano atual - ano de nascimento
print('Analisando dados...')
sleep(1)
print('Determinando sua categoria...')
sleep(2)
if idade <= 9:
print('Você tem {} anos \nCategoria: MIRIM'.format(idade))
elif idade <= 14:
print('Você tem {} anos \nCategoria: INFANTIL'.format(idade))
elif idade <= 19:
print('Você tem {} anos \nCategoria: JÚNIOR'.format(idade))
elif idade <= 25:
print('Você tem {} anos \nCategoria: SÊNIOR'.format(idade))
else:
print('Você tem {} anos \nCategoria: MASTER'.format(idade))
| StarcoderdataPython |
1774090 | <reponame>rachelaus/capstone
import pytest
import json
from scripts import update_snippets
from capdb.models import Snippet
@pytest.mark.django_db(databases=['capdb'])
def test_map_numbers(case_factory, jurisdiction):
[case_factory(jurisdiction=jurisdiction) for i in range(3)]
update_snippets.update_map_numbers()
snippet = Snippet.objects.get(label="map_numbers")
parsed = json.loads(snippet.contents)
assert len(parsed) == 1
assert parsed[jurisdiction.slug]['case_count'] == 3
assert parsed[jurisdiction.slug]['volume_count'] == 3
assert parsed[jurisdiction.slug]['page_count'] == 15
@pytest.mark.django_db(databases=['capdb'])
def test_cases_by_decision_date(case_factory):
dates = ["2000", "2000-04", "2000-04", "2000-04-15"]
_ = [case_factory(decision_date_original=d) for d in dates]
update_snippets.cases_by_decision_date_tsv()
cases_by_decision_date = Snippet.objects.get(label='cases_by_decision_date')
assert cases_by_decision_date.contents == (
'"2000"\t4\t"https://api.case.test:8000/v1/cases/?decision_date__gte=2000&decision_date__lte=2000-12-31"\r\n'
'"2000-04"\t3\t"https://api.case.test:8000/v1/cases/?decision_date__gte=2000-04&decision_date__lte=2000-04-31"\r\n'
'"2000-04-15"\t1\t"https://api.case.test:8000/v1/cases/?decision_date__gte=2000-04-15&decision_date__lte=2000-04-15"\r\n'
)
@pytest.mark.django_db(databases=['capdb'])
def test_cases_by_jurisdiction(jurisdiction, case_factory):
[case_factory(jurisdiction=jurisdiction) for i in range(3)]
update_snippets.cases_by_jurisdiction_tsv()
cases_by_jurisdiction = Snippet.objects.get(label='cases_by_jurisdiction')
rows = cases_by_jurisdiction.contents.split("\r\n")[:-1]
assert len(rows) == 1
assert rows[0].split("\t")[1] == '"%s"' % jurisdiction.name_long
assert rows[0].split("\t")[2] == '3'
@pytest.mark.django_db(databases=['capdb'])
def test_cases_by_reporter(reporter, case_factory):
[case_factory(reporter=reporter) for i in range(3)]
update_snippets.cases_by_reporter_tsv()
cases_by_reporter = Snippet.objects.get(label='cases_by_reporter')
rows = cases_by_reporter.contents.split("\r\n")[:-1]
assert len(rows) == 1
assert rows[0].split("\t")[1] == '"%s"' % reporter.full_name
assert rows[0].split("\t")[2] == '3'
@pytest.mark.django_db(databases=['capdb'])
def test_search_jurisdiction_list(jurisdiction):
update_snippets.search_jurisdiction_list()
jurisdictions = Snippet.objects.get(label='search_jurisdiction_list')
parsed = json.loads(jurisdictions.contents)
assert parsed[0][1] == jurisdiction.name_long
@pytest.mark.django_db(databases=['capdb'])
def test_search_court_list(court):
update_snippets.search_court_list()
courts = Snippet.objects.get(label='search_court_list')
parsed = json.loads(courts.contents)
assert parsed[0][1] == '%s: %s' % (court.jurisdiction.name_long, court.name)
@pytest.mark.django_db(databases=['capdb'])
def test_court_abbrev_list(court):
update_snippets.court_abbrev_list()
courts = Snippet.objects.get(label='court_abbrev_list')
parsed = json.loads(courts.contents)
assert parsed[0][1] == court.name
@pytest.mark.django_db(databases=['capdb'])
def test_search_reporter_list(reporter):
update_snippets.search_reporter_list()
reporters = Snippet.objects.get(label='search_reporter_list')
parsed = json.loads(reporters.contents)
assert len(parsed) == 1
assert parsed[0][1] == '%s- %s' % (reporter.short_name, reporter.full_name)
| StarcoderdataPython |
3282298 | import mock
import time
from copy import deepcopy
from unittest import TestCase
from authlib.common.urls import url_encode
from authlib.integrations.httpx_client import (
OAuthError,
OAuth2Client,
)
from tests.py3.utils import MockDispatch
class OAuth2ClientTest(TestCase):
def setUp(self):
self.token = {
'token_type': 'Bearer',
'access_token': 'a',
'refresh_token': 'b',
'expires_in': '3600',
'expires_at': int(time.time()) + 3600,
}
self.client_id = 'foo'
def test_invalid_token_type(self):
token = {
'token_type': 'invalid',
'access_token': 'a',
'refresh_token': 'b',
'expires_in': '3600',
'expires_at': int(time.time()) + 3600,
}
with OAuth2Client(self.client_id, token=token) as client:
self.assertRaises(OAuthError, client.get, 'https://i.b')
def test_add_token_to_header(self):
def assert_func(request):
token = 'Bearer ' + self.token['access_token']
auth_header = request.headers.get('authorization')
self.assertEqual(auth_header, token)
mock_response = MockDispatch({'a': 'a'}, assert_func=assert_func)
with OAuth2Client(self.client_id, token=self.token, dispatch=mock_response) as client:
resp = client.get('https://i.b')
data = resp.json()
self.assertEqual(data['a'], 'a')
def test_add_token_to_body(self):
def assert_func(request):
self.assertIn(self.token['access_token'], request.content.decode())
mock_response = MockDispatch({'a': 'a'}, assert_func=assert_func)
with OAuth2Client(
self.client_id,
token=self.token,
token_placement='body',
dispatch=mock_response
) as client:
resp = client.get('https://i.b')
data = resp.json()
self.assertEqual(data['a'], 'a')
def test_add_token_to_uri(self):
def assert_func(request):
self.assertIn(self.token['access_token'], str(request.url))
mock_response = MockDispatch({'a': 'a'}, assert_func=assert_func)
with OAuth2Client(
self.client_id,
token=self.token,
token_placement='uri',
dispatch=mock_response
) as client:
resp = client.get('https://i.b')
data = resp.json()
self.assertEqual(data['a'], 'a')
def test_create_authorization_url(self):
url = 'https://example.com/authorize?foo=bar'
sess = OAuth2Client(client_id=self.client_id)
auth_url, state = sess.create_authorization_url(url)
self.assertIn(state, auth_url)
self.assertIn(self.client_id, auth_url)
self.assertIn('response_type=code', auth_url)
sess = OAuth2Client(client_id=self.client_id, prompt='none')
auth_url, state = sess.create_authorization_url(
url, state='foo', redirect_uri='https://i.b', scope='profile')
self.assertEqual(state, 'foo')
self.assertIn('i.b', auth_url)
self.assertIn('profile', auth_url)
self.assertIn('prompt=none', auth_url)
def test_code_challenge(self):
sess = OAuth2Client(client_id=self.client_id, code_challenge_method='S256')
url = 'https://example.com/authorize'
auth_url, _ = sess.create_authorization_url(
url, code_verifier='hello')
self.assertIn('code_challenge', auth_url)
self.assertIn('code_challenge_method=S256', auth_url)
def test_token_from_fragment(self):
sess = OAuth2Client(self.client_id)
response_url = 'https://i.b/callback#' + url_encode(self.token.items())
self.assertEqual(sess.token_from_fragment(response_url), self.token)
token = sess.fetch_token(authorization_response=response_url)
self.assertEqual(token, self.token)
def test_fetch_token_post(self):
url = 'https://example.com/token'
def assert_func(request):
body = request.content.decode()
self.assertIn('code=v', body)
self.assertIn('client_id=', body)
self.assertIn('grant_type=authorization_code', body)
mock_response = MockDispatch(self.token, assert_func=assert_func)
with OAuth2Client(self.client_id, dispatch=mock_response) as client:
token = client.fetch_token(url, authorization_response='https://i.b/?code=v')
self.assertEqual(token, self.token)
with OAuth2Client(
self.client_id,
token_endpoint_auth_method='none',
dispatch=mock_response
) as client:
token = client.fetch_token(url, code='v')
self.assertEqual(token, self.token)
mock_response = MockDispatch({'error': 'invalid_request'})
with OAuth2Client(self.client_id, dispatch=mock_response) as client:
self.assertRaises(OAuthError, client.fetch_token, url)
def test_fetch_token_get(self):
url = 'https://example.com/token'
def assert_func(request):
url = str(request.url)
self.assertIn('code=v', url)
self.assertIn('client_id=', url)
self.assertIn('grant_type=authorization_code', url)
mock_response = MockDispatch(self.token, assert_func=assert_func)
with OAuth2Client(self.client_id, dispatch=mock_response) as client:
authorization_response = 'https://i.b/?code=v'
token = client.fetch_token(
url, authorization_response=authorization_response, method='GET')
self.assertEqual(token, self.token)
with OAuth2Client(
self.client_id,
token_endpoint_auth_method='none',
dispatch=mock_response
) as client:
token = client.fetch_token(url, code='v', method='GET')
self.assertEqual(token, self.token)
token = client.fetch_token(url + '?q=a', code='v', method='GET')
self.assertEqual(token, self.token)
def test_token_auth_method_client_secret_post(self):
url = 'https://example.com/token'
def assert_func(request):
body = request.content.decode()
self.assertIn('code=v', body)
self.assertIn('client_id=', body)
self.assertIn('client_secret=bar', body)
self.assertIn('grant_type=authorization_code', body)
mock_response = MockDispatch(self.token, assert_func=assert_func)
with OAuth2Client(
self.client_id, 'bar',
token_endpoint_auth_method='client_secret_post',
dispatch=mock_response
) as client:
token = client.fetch_token(url, code='v')
self.assertEqual(token, self.token)
def test_access_token_response_hook(self):
url = 'https://example.com/token'
def _access_token_response_hook(resp):
self.assertEqual(resp.json(), self.token)
return resp
access_token_response_hook = mock.Mock(side_effect=_access_token_response_hook)
dispatch = MockDispatch(self.token)
with OAuth2Client(self.client_id, token=self.token, dispatch=dispatch) as sess:
sess.register_compliance_hook(
'access_token_response',
access_token_response_hook
)
self.assertEqual(sess.fetch_token(url), self.token)
self.assertTrue(access_token_response_hook.called)
def test_password_grant_type(self):
url = 'https://example.com/token'
def assert_func(request):
body = request.content.decode()
self.assertIn('username=v', body)
self.assertIn('scope=profile', body)
self.assertIn('grant_type=password', body)
dispatch = MockDispatch(self.token, assert_func=assert_func)
with OAuth2Client(self.client_id, scope='profile', dispatch=dispatch) as sess:
token = sess.fetch_token(url, username='v', password='v')
self.assertEqual(token, self.token)
token = sess.fetch_token(
url, username='v', password='v', grant_type='password')
self.assertEqual(token, self.token)
def test_client_credentials_type(self):
url = 'https://example.com/token'
def assert_func(request):
body = request.content.decode()
self.assertIn('scope=profile', body)
self.assertIn('grant_type=client_credentials', body)
dispatch = MockDispatch(self.token, assert_func=assert_func)
with OAuth2Client(self.client_id, scope='profile', dispatch=dispatch) as sess:
token = sess.fetch_token(url)
self.assertEqual(token, self.token)
token = sess.fetch_token(url, grant_type='client_credentials')
self.assertEqual(token, self.token)
def test_cleans_previous_token_before_fetching_new_one(self):
"""Makes sure the previous token is cleaned before fetching a new one.
The reason behind it is that, if the previous token is expired, this
method shouldn't fail with a TokenExpiredError, since it's attempting
to get a new one (which shouldn't be expired).
"""
now = int(time.time())
new_token = deepcopy(self.token)
past = now - 7200
self.token['expires_at'] = past
new_token['expires_at'] = now + 3600
url = 'https://example.com/token'
dispatch = MockDispatch(new_token)
with mock.patch('time.time', lambda: now):
with OAuth2Client(self.client_id, token=self.token, dispatch=dispatch) as sess:
self.assertEqual(sess.fetch_token(url), new_token)
def test_token_status(self):
token = dict(access_token='a', token_type='bearer', expires_at=100)
sess = OAuth2Client('foo', token=token)
self.assertTrue(sess.token.is_expired())
def test_auto_refresh_token(self):
def _update_token(token, refresh_token=None, access_token=None):
self.assertEqual(refresh_token, 'b')
self.assertEqual(token, self.token)
update_token = mock.Mock(side_effect=_update_token)
old_token = dict(
access_token='a', refresh_token='b',
token_type='bearer', expires_at=100
)
dispatch = MockDispatch(self.token)
with OAuth2Client(
'foo', token=old_token, token_endpoint='https://i.b/token',
update_token=update_token, dispatch=dispatch
) as sess:
sess.get('https://i.b/user')
self.assertTrue(update_token.called)
old_token = dict(
access_token='a',
token_type='bearer',
expires_at=100
)
with OAuth2Client(
'foo', token=old_token, token_endpoint='https://i.b/token',
update_token=update_token, dispatch=dispatch
) as sess:
self.assertRaises(OAuthError, sess.get, 'https://i.b/user')
def test_auto_refresh_token2(self):
def _update_token(token, refresh_token=None, access_token=None):
self.assertEqual(access_token, 'a')
self.assertEqual(token, self.token)
update_token = mock.Mock(side_effect=_update_token)
old_token = dict(
access_token='a',
token_type='bearer',
expires_at=100
)
dispatch = MockDispatch(self.token)
with OAuth2Client(
'foo', token=old_token,
token_endpoint='https://i.b/token',
grant_type='client_credentials',
dispatch=dispatch
) as sess:
sess.get('https://i.b/user')
self.assertFalse(update_token.called)
with OAuth2Client(
'foo', token=old_token, token_endpoint='https://i.b/token',
update_token=update_token, grant_type='client_credentials',
dispatch=dispatch
) as sess:
sess.get('https://i.b/user')
self.assertTrue(update_token.called)
def test_revoke_token(self):
answer = {'status': 'ok'}
dispatch = MockDispatch(answer)
def _revoke_token_request(url, headers, data):
self.assertEqual(url, 'https://i.b/token')
return url, headers, data
revoke_token_request = mock.Mock(side_effect=_revoke_token_request)
with OAuth2Client('a', dispatch=dispatch) as sess:
resp = sess.revoke_token('https://i.b/token', 'hi')
self.assertEqual(resp.json(), answer)
resp = sess.revoke_token(
'https://i.b/token', 'hi',
token_type_hint='access_token'
)
self.assertEqual(resp.json(), answer)
sess.register_compliance_hook(
'revoke_token_request',
revoke_token_request,
)
sess.revoke_token(
'https://i.b/token', 'hi',
body='',
token_type_hint='access_token'
)
self.assertTrue(revoke_token_request.called)
def test_request_without_token(self):
with OAuth2Client('a') as client:
self.assertRaises(OAuthError, client.get, 'https://i.b/token')
| StarcoderdataPython |
1770482 | import os
import time
import datetime
import shutil
from shared.logger_factory import LoggerFactory
from shared.utils import read_data_from_json, write_data_to_json
ROOT_DIR = os.environ['ROOT_DIR']
class DataManager:
""" Class that collects the enhanced data from the scraping process, manages backups and composes the data that is
will be uploaded to elasticsearch
- After the enhancement process, the results are stored in a backup folder named after the time of the backup
- the upload folder contains the data that will be uploaded to elasticsearch
For the upload data, the most recent backup data is selected. The amount of posts in the selected dataset is
compared against older backups according to the value set in fallback_depth. A fallback_depth of 2 means that the
data from the last 2 backups that are older than the current selected backup is compared with the upload data.
If the current dataset for the upload contains less than X% of the posts in the backup dataset, where X is the
defined threshold, the backup dataset is selected instead. This is done on a file by file basis, meaning the
upload can contain a mixture of files from different backups.
The source of all files in the upload folder at the end of the process gets logged."""
# manages how many backups are to be kept. if the number of existing backups would exceed this threshold, the oldest
# backup gets deleted
max_number_of_backups = 7
# manages how many backups into the past should be considered for the upload
fallback_depth = 3
# defines the percentage threshold at which data from an older backup may be used. data from an older backup may be
# used if the current data selected for upload contains less than [threshold] * [number of posts in backup], the
# data from the backup is selected for upload instead
threshold = 0.75
enhanced_data_location = os.path.join(ROOT_DIR, 'data_enhancement/data')
backup_directory = os.path.join(ROOT_DIR, 'data_management', 'backups')
upload_directory = os.path.join(ROOT_DIR, 'data_management', 'upload')
file_upload_data_origin = os.path.join(ROOT_DIR, 'logs', 'upload_data_origin.log')
mask_timestamp = '%d.%m.%Y'
logger = LoggerFactory.get_datamanagement_logger()
data_origin = dict()
@staticmethod
def timestamp_to_datestring(timestamp):
"""Converts unix timestamp into datestring"""
DataManager.logger.debug('timestamp_to_datestring()')
return datetime.datetime.fromtimestamp(timestamp).strftime(DataManager.mask_timestamp)
@staticmethod
def datestring_to_timestamp(datestring):
"""Converts datestring into unix timestamp"""
DataManager.logger.debug('datestring_to_timestamp()')
return time.mktime(datetime.datetime.strptime(datestring, DataManager.mask_timestamp).timetuple())
@staticmethod
def save_upload_data_origin(upload_data_origin):
"""Saves the information about the origin of the data inside the upload folder into a text file"""
file = open(DataManager.file_upload_data_origin, 'w', encoding='utf-8')
file.write(f'last upload: {datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S")}\n')
file.write(f'Source for upload data:')
file.write(upload_data_origin)
file.close()
@staticmethod
def copy_from_backup(backup):
"""Copies all the files from a backup into the upload folder and documents the files origin"""
DataManager.logger.debug('copy_from_backup()')
path_backup = os.path.join(DataManager.backup_directory, backup)
for file in os.listdir(path_backup):
shutil.copy(os.path.join(path_backup, file), os.path.join(DataManager.upload_directory, file))
DataManager.data_origin[file] = backup
@staticmethod
def backup_current_data():
"""Creates a backup for the data in einander-helfen/etl/data_enhancement/data with current date as timestamp"""
DataManager.logger.debug('backup_current_data()')
backup_location = os.path.join(DataManager.backup_directory, DataManager.timestamp_to_datestring(time.time()))
if os.path.exists(backup_location):
DataManager.logger.warning('There already exists a backup from today, deleting old backup')
shutil.rmtree(backup_location)
os.makedirs(backup_location)
enhancement_files = os.listdir(DataManager.enhanced_data_location)
for file in enhancement_files:
enhancement_file = os.path.join(DataManager.enhanced_data_location, file)
if os.path.isfile(enhancement_file):
shutil.copy(enhancement_file, backup_location)
@staticmethod
def get_sorted_list_of_backups():
""" returns a list containing all the backup folders in a sorted order from old to new"""
DataManager.logger.debug('get_sorted_list_of_backups()')
backups = os.listdir(DataManager.backup_directory)
backup_timestamps = list()
for folder in backups:
backup_timestamps.append(DataManager.datestring_to_timestamp(folder))
backup_timestamps.sort()
sorted_filenames = list()
for timestamp in backup_timestamps:
sorted_filenames.append(DataManager.timestamp_to_datestring(timestamp))
return sorted_filenames
@staticmethod
def remove_old_backups():
"""Checks if the backup folder contains more than the maximum of set backups and deletes surplus"""
DataManager.logger.debug('remove_old_backups()')
backups = DataManager.get_sorted_list_of_backups()
if len(backups) > DataManager.max_number_of_backups:
DataManager.logger.info(f'More than {DataManager.max_number_of_backups} backups exist({len(backups)})'
f', deleting {len(backups)- DataManager.max_number_of_backups} backup(s)')
for file in backups[:len(backups)-DataManager.max_number_of_backups]:
DataManager.logger.info(f'Deleting backup {file}')
shutil.rmtree(os.path.join(DataManager.backup_directory, file))
@staticmethod
def clear_upload():
"""Clears the upload folder as preparation for the fresh upload data"""
DataManager.logger.debug('clear_upload()')
shutil.rmtree(DataManager.upload_directory)
os.makedirs(DataManager.upload_directory)
@staticmethod
def get_eligible_backups():
"""Returns list of backups that are eligible as a fallback"""
DataManager.logger.debug('get_eligible_backups()')
if len(DataManager.get_sorted_list_of_backups()) < DataManager.fallback_depth:
DataManager.fallback_depth = len(DataManager.get_sorted_list_of_backups())
return DataManager.get_sorted_list_of_backups()[-DataManager.fallback_depth-1:]
@staticmethod
def initialise_upload_data(backups):
"""Copies files from all backups within fallback depth into upload folder, the most recent backup is the last to
get copied. As a result, upload now contains all files from the most recent scrape and any additional files
from older backups within fallback range."""
DataManager.logger.debug('initialise_upload_data()')
for backup_folder in backups[-DataManager.fallback_depth-1:]:
DataManager.copy_from_backup(backup_folder)
@staticmethod
def build_string_data_origin():
"""Builds string with summary of which backup files in the upload folder are taken from"""
DataManager.logger.debug('build_string_data_origin()')
max_length = 0
for entry in DataManager.data_origin:
if len(entry) > max_length:
max_length = len(entry)
string_data_origin = ''
for entry in DataManager.data_origin:
string_data_origin = string_data_origin+"\n" + \
f'{entry.rjust(max_length)} : {DataManager.data_origin[entry]}'
return string_data_origin
@staticmethod
def compose_upload():
"""Composes the upload according to the general behaviour described for this class and the set parameters"""
DataManager.logger.debug('compose_upload()')
DataManager.clear_upload()
eligible_backups = DataManager.get_eligible_backups()
DataManager.initialise_upload_data(eligible_backups)
eligible_backups = eligible_backups[:-1] # ignore most recent backup
for backup in eligible_backups:
for upload_file in os.listdir(DataManager.upload_directory):
if os.path.isfile(os.path.join(DataManager.backup_directory, backup, upload_file)):
data_in_upload = read_data_from_json(os.path.join(DataManager.upload_directory, upload_file))
data_in_backup = read_data_from_json(os.path.join(DataManager.backup_directory, backup,
upload_file))
if len(data_in_upload) < DataManager.threshold*len(data_in_backup):
DataManager.logger.info(f'{upload_file} contains less than 75% of the posts in backup '
f'\'{backup}\' ({len(data_in_upload)} posts vs {len(data_in_backup)} '
f'posts). Current data for {upload_file} will be replaced with backup '
f'data')
write_data_to_json(os.path.join(DataManager.upload_directory, upload_file), data_in_backup)
DataManager.data_origin[upload_file] = backup
upload_data_origin = DataManager.build_string_data_origin()
DataManager.save_upload_data_origin(upload_data_origin)
DataManager.logger.info(f'Source for upload data: {upload_data_origin}')
@staticmethod
def init():
"""Sets up the required folders and corrects set parameters if needed"""
DataManager.logger.debug('init()')
if not os.path.exists(DataManager.backup_directory):
DataManager.logger.info('Creating backup directory')
os.makedirs(DataManager.backup_directory)
if not os.path.exists(DataManager.upload_directory):
DataManager.logger.info('Creating upload directory')
os.makedirs(DataManager.upload_directory)
if DataManager.fallback_depth > DataManager.max_number_of_backups:
DataManager.logger.warning(f'fallback depth exceeds maximal number of backups ('
f'{DataManager.fallback_depth} > {DataManager.max_number_of_backups}), '
f'fallback depth will be limited to number of backups')
DataManager.fallback_depth = DataManager.max_number_of_backups
@staticmethod
def run_backup_process():
"""Runs the datamangement process for creating backups"""
DataManager.logger.debug('run_backup_process()')
DataManager.init()
DataManager.backup_current_data()
DataManager.remove_old_backups()
@staticmethod
def run_compose_upload_process():
"""Runs the datamangement process for composing the upload"""
DataManager.logger.debug('run_compose_upload_process()')
DataManager.init()
DataManager.compose_upload()
| StarcoderdataPython |
188303 | from bs4 import BeautifulSoup
import requests
import time
import json
import os.path
import concurrent.futures
from kivymd.uix.card import MDCardSwipe
from kivy.properties import StringProperty
from kivymd.utils import asynckivy
#make this asynchronous
class SwipeStockItem(MDCardSwipe):
sCode=StringProperty()
def Tick(self,*args):
async def Update():
req = requests.get("https://www.investagrams.com/Stock/PSE:"+self.sCode)
s=BeautifulSoup(req.text,"lxml")
lp=s.find('span',{'id':'lblStockLatestLastPrice'}).text
self.ids.curr_price.text=f'{lp}'
asynckivy.start(Update())
class ProcessManager():
stocksList=[]
def WriteToJSON(self,data,file='data.json'):
with open(file,'w') as f:
json.dump(data, f, indent=4)
"""
def Tick(self):
with concurrent.futures.ProcessPoolExecutor(max_workers=1) as executor:
executor.map(self.AnalyzeInput,self.stocksList)"""
def ReadJSON(self):
return 0
def CreateJSON(self,sCard,resPrice,supPrice,file="data.json"):
data=''
if not(os.path.exists(file)): #checks if data.json is not yet created
data = {"stock":[{sCard:{
"resist":resPrice,
"support":supPrice
}
}
]
}
else:
with open('data.json') as json_file:
data = json.load(json_file)
temp = data['stock']
# python object to be appended
obj = {sCard:{
"resist":resPrice,
"support":supPrice
}
}
# appending data to emp_details
temp.append(obj)
self.stocksList.append(sCard)
return data
| StarcoderdataPython |
3303482 | import json
from django.http import HttpResponse
from datetime import datetime
import json
def hello_world(request):
now = datetime.now().strftime("%b, %dth, %Y - %H, %M hrs")
return HttpResponse(f'Oh, hi! current server time is {str(now)}')
def hi(request):
# numbers = (request.GET["numbers"].split(","))
numbers = [int(x) for x in request.GET['numbers'].split(',')]
sorted_int = sorted(numbers)
print(sorted_int)
data = {
'status': 'ok',
'numbers': sorted_int,
'message': 'Integers sorted succesfully'
}
return HttpResponse(json.dumps(data, indent=4), content_type='application/json')
def say_hi(request, name , age):
if age < 12:
message = f'Sorry {name}'
else:
message = f'Welcome {name}'
return HttpResponse(message)
| StarcoderdataPython |
127869 | import numpy as np
import pandas as pd
from scipy import interpolate
from astropy.cosmology import LambdaCDM
import Corrfunc
from Corrfunc.utils import convert_rp_pi_counts_to_wp
from Corrfunc.mocks.DDrppi_mocks import DDrppi_mocks
from astropy.cosmology import LambdaCDM
import time
import plotter
def main():
#nd = 1012
nd = 10
data1fn = '../lss/mangler/samples/a0.6452_0001.v5_ngc_ifield_ndata{}.rdzw'.format(nd)
rand1fn = '../lss/mangler/samples/a0.6452_rand20x.dr12d_cmass_ngc_ifield_ndata{}.rdz'.format(nd)
data2fn = data1fn
rand2fn = rand1fn
print 'Running for n_data={}'.format(nd)
K = 20
pimax = 40 #Mpc/h
rpmin = 0.5
rpmax = 80 #Mpc/h
#rpbins = np.array([0.1, 1., 10.])
rpbins = np.logspace(np.log10(rpmin), np.log10(rpmax), K+1)
rpbins_avg = 0.5 * (rpbins[1:] + rpbins[:-1])
start = time.time()
wp, wprp_corrfunc, wprp_nopi = run_corrfunc(data1fn, rand1fn, data2fn, rand2fn, rpbins, pimax)
end = time.time()
print 'Time: {:3f} s'.format(end-start)
# rps = [rpbins_avg, rpbins_avg, rpbins_avg]
# wprps = [wp, wprp_corrfunc, wprp_nopi]
# labels = ['wp built-in', 'wp calculated', 'wp no pi']
rps = [rpbins_avg]
wprps = [wp]
labels = ['wp built-in']
#plotter.plot_wprp(rps, wprps, labels, wp_tocompare='wp built-in')
def run_corrfunc(data1fn, rand1fn, data2fn, rand2fn, rpbins, pimax):
print 'Loading data'
data1 = pd.read_csv(data1fn)
rand1 = pd.read_csv(rand1fn)
data2 = pd.read_csv(data2fn)
rand2 = pd.read_csv(rand2fn)
#can only do autocorrelations right now
wp, wprp_corrfunc, wprp_nopi = counts(data1['ra'].values, data1['dec'].values, data1['z'].values,
rand1['ra'].values, rand1['dec'].values, rand1['z'].values,
rpbins, pimax, comoving=True)
return wp, wprp_corrfunc, wprp_nopi
cosmo = LambdaCDM(H0=70, Om0=0.3, Ode0=0.7)
def counts(ra_data, dec_data, z_data, ra_rand, dec_rand, z_rand, rpbins, pimax,
weights_data=None, weights_rand=None, pibinwidth=1, comoving=False):
assert(len(ra_data)==len(dec_data) and len(ra_data)==len(z_data))
assert(len(ra_rand)==len(dec_rand) and len(ra_rand)==len(z_rand))
ndata = len(ra_data)
nrand = len(ra_rand)
nbins = len(rpbins)-1
pibins = np.arange(0, pimax + pibinwidth, pibinwidth)
if comoving:
zdf = pd.DataFrame(z_data)
z_data = zdf.apply(get_comoving_dist)[0].values
rzdf = pd.DataFrame(z_rand)
z_rand = rzdf.apply(get_comoving_dist)[0].values
dd_res_corrfunc = DDrppi_mocks(1, 2, 0, pimax, rpbins, ra_data, dec_data, z_data, is_comoving_dist=comoving)
dr_res_corrfunc = DDrppi_mocks(0, 2, 0, pimax, rpbins, ra_data, dec_data, z_data,
RA2=ra_rand, DEC2=dec_rand, CZ2=z_rand, is_comoving_dist=comoving)
rr_res_corrfunc = DDrppi_mocks(1, 2, 0, pimax, rpbins, ra_rand, dec_rand, z_rand, is_comoving_dist=comoving)
wp = convert_rp_pi_counts_to_wp(ndata, ndata, nrand, nrand, dd_res_corrfunc, dr_res_corrfunc,
dr_res_corrfunc, rr_res_corrfunc, nbins, pimax)
dd_rp_pi_corrfunc = np.zeros((len(pibins) - 1, len(rpbins) - 1))
dr_rp_pi_corrfunc = np.zeros((len(pibins) - 1, len(rpbins) - 1))
rr_rp_pi_corrfunc = np.zeros((len(pibins) - 1, len(rpbins) - 1))
for m in range(len(pibins)-1):
for n in range(len(rpbins)-1):
idx = (len(pibins)-1) * n + m
dd_rp_pi_corrfunc[m][n] = dd_res_corrfunc[idx][4]
dr_rp_pi_corrfunc[m][n] = dr_res_corrfunc[idx][4]
rr_rp_pi_corrfunc[m][n] = rr_res_corrfunc[idx][4]
estimator_corrfunc = calc_ls(dd_rp_pi_corrfunc, dr_rp_pi_corrfunc, rr_rp_pi_corrfunc, ndata, nrand)
wprp_corrfunc = 2*pibinwidth*np.sum(estimator_corrfunc, axis=0)
est_ls, wprp_nopi = calc_wprp_nopi(dd_rp_pi_corrfunc, dr_rp_pi_corrfunc, rr_rp_pi_corrfunc, ndata, nrand)
return wp, wprp_corrfunc, wprp_nopi
def calc_wprp_nopi(dd, dr, rr, ndata, nrand):
dd = np.sum(dd, axis=0)
dr = np.sum(dr, axis=0)
rr = np.sum(rr, axis=0)
est_ls = calc_ls(dd, dr, rr, ndata, nrand)
#wprp = 2*np.sum(est_ls, axis=0)
wprp = 2*est_ls
return est_ls, wprp
def get_comoving_dist(z):
comov = cosmo.comoving_distance(z)
return comov.value*cosmo.h
def calc_ls(dd_counts, dr_counts, rr_counts, ndata, nrand):
fN = float(nrand)/float(ndata)
return (fN*fN*dd_counts - 2*fN*dr_counts + rr_counts)/rr_counts
if __name__=='__main__':
main() | StarcoderdataPython |
1631126 | <gh_stars>1-10
import numpy as np
import am_sim as ams
from utilities.dataset_interface import load_all_datasets
# only executed in the main process, not in child processes
if __name__ == '__main__':
# setup numpy random seed for reproducibility
np.random.seed(1)
# initial value of model parameters
par_i = ams.st_par()
par_i['k_consumption_per_day'] = 2.e-05
par_i['mu_i'] = -14.6
par_i['sigma_i'] = 1.6
par_i['g_1d'] = 0.5
par_i['g_4d'] = 0.5
par_i['a_selection'] = 0.2
par_i['b_selection'] = 0.2
par_i['alpha_C'] = 0.025
# load all datasets
dsets = load_all_datasets()
# name of the folder in which to save the results.
# It must be an empty or non-existent folder.
save_folder = 'reproduce_inference_results'
# parameters whose maximum-likelihood estimate must be retrieved
pars_to_mutate = ['k_consumption_per_day', 'mu_i', 'sigma_i',
'g_1d', 'g_4d', 'a_selection', 'b_selection', 'alpha_C']
# number of iterations of the parallel-tempering algorithm
T_max = 10000
# number of layers in the parallel-tempering algorithm
n_layers = 10
# launch search
inference = ams.parallel_tempering(dset_list=dsets,
par_i=par_i,
n_layers=n_layers,
T_max=T_max,
pars_to_mutate=pars_to_mutate,
save_folder=save_folder,
save_every=100)
# run the parallelized version of the inference algorithm
inference.search_parallel()
# for the non parallelized version uncomment the following line
# inference.search()
# results will be saved in 'save_folder' in a .csv file, that will be
# progressively updated during the course of the search. This file contains
# the value of all parameters updates performed during the search, for all
# layers of the parallel-tempering algorithm. Each parameter set saved is
# accompanied by other information such as its log-likelihood, the round at
# which it was saved, the temperature of the layer to whom it belogns etc.
| StarcoderdataPython |
151860 | <reponame>kingjr/jr-tools
import numpy as np
from numpy.testing import assert_equal, assert_array_almost_equal
from .. import align_signals
from .. import fast_mannwhitneyu
def test_align_signal():
a = np.asarray(np.random.rand(1000) > .9, float)
for pad in [10, 11, 0]:
b = np.hstack((np.zeros(pad), a))
for a_, b_, sign in ((a, b, 1), (b, a, -1)):
# default
assert_equal(align_signals(a_, b_), sign * pad)
# even / odd lengths
assert_equal(align_signals(a_[:-1], b_[:-1]), sign * pad)
assert_equal(align_signals(a_[:-1], b_), sign * pad)
assert_equal(align_signals(a_, b_[:-1]), sign * pad)
def test_auc():
from sklearn.metrics import roc_auc_score
for ii in range(10):
X = np.random.rand(20, 50)
y = np.random.randint(0, 2, 20)
_, _, auc = fast_mannwhitneyu(X[y == 0, ...],
X[y == 1, ...])
_, _, auc2 = fast_mannwhitneyu(X[y == 1, ...],
X[y == 0, ...])
auc3 = [roc_auc_score(y, x) for x in X.T]
assert_array_almost_equal(auc, 1. - auc2)
assert_array_almost_equal(auc, auc3)
| StarcoderdataPython |
1794643 | <reponame>jasmineyadeta/PythonPractice
# function to find average marks
def find_average_marks(marks):
sum_of_marks = sum(marks)
total_subjects = len(marks)
average_mark = sum_of_marks/total_subjects
return average_mark
# function to calculate grade and return it
def grading_scale(average_mark):
if average_mark >= 80:
grade = "A"
elif average_mark >= 60 and average_mark < 80:
grade = "B"
elif average_mark >= 50 and average_mark < 60:
grade = "C"
else:
grade = "F"
return(grade)
marks = [55,64, 75, 80, 65, 99, 50]
average_marks = find_average_marks(marks)
print ("Your average mark is:", average_marks)
grade = grading_scale(average_marks)
print ("Your grade is:", grade)
| StarcoderdataPython |
73994 | import re
def Remove_Duplicates(Test_string):
Pattern = r"\b(\w+)(?:\W\1\b)+"
return re.sub(Pattern, r"\1", Test_string, flags=re.IGNORECASE)
Test_string1 = "Good bye bye world world"
Test_string2 = "Ram went went to to his home"
Test_string3 = "Hello hello world world"
print(Remove_Duplicates(Test_string1))
print(Remove_Duplicates(Test_string2))
print(Remove_Duplicates(Test_string3))
| StarcoderdataPython |
3264905 | import os
import json
import yaml
import logging
import click
import re
from .project import Project
from .plugin import PluginType, Plugin
from .plugin.factory import plugin_factory
from .config_service import ConfigService
from .utils import setting_env
class ProjectAddCustomService:
def __init__(self, project: Project, config_service: ConfigService = None):
self.project = project
self.config_service = config_service or ConfigService(project)
def add(self, plugin_type: PluginType, plugin_name: str):
click.secho(
f"Adding new custom {plugin_type.descriptor} with name '{plugin_name}'...",
fg="green",
)
click.echo()
click.echo(
f"Specify the plugin's {click.style('namespace', fg='blue')}, which will serve as the:"
)
click.echo("- prefix for configuration environment variables")
click.echo("- identifier to find related/compatible plugins")
if plugin_type == PluginType.EXTRACTORS:
click.echo("- default value for the `schema` setting when used")
click.echo(" with loader target-postgres or target-snowflake")
elif plugin_type == PluginType.LOADERS:
click.echo("- default value for the `target` setting when used")
click.echo(" with transformer dbt")
click.echo()
if plugin_type == PluginType.LOADERS:
default_namespace = re.sub(r"^.*target-", "", plugin_name)
default_description = "plugin name without `target-` prefix"
else:
default_namespace = plugin_name.replace("-", "_")
default_description = "plugin name with underscores instead of dashes"
click.echo(f"Hit Return to accept the default: {default_description}")
click.echo()
namespace = click.prompt(
click.style("(namespace)", fg="blue"), type=str, default=default_namespace
)
click.echo()
click.echo(
f"Specify the plugin's {click.style('`pip install` argument', fg='blue')}, for example:"
)
click.echo("- PyPI package name:")
click.echo(f"\t{plugin_name}")
click.echo("- VCS repository URL:")
click.echo(f"\tgit+https://gitlab.com/meltano/{plugin_name}.git")
click.echo("- local directory, in editable/development mode:")
click.echo(f"\t-e extract/{plugin_name}")
click.echo()
click.echo("Default: plugin name as PyPI package name")
click.echo()
pip_url = click.prompt(
click.style("(pip_url)", fg="blue"), type=str, default=plugin_name
)
click.echo()
click.echo(f"Specify the package's {click.style('executable name', fg='blue')}")
click.echo()
click.echo("Default: package name derived from `pip_url`")
click.echo()
package_name, _ = os.path.splitext(os.path.basename(pip_url))
executable = click.prompt(
click.style("(executable)", fg="blue"), default=package_name
)
capabilities = []
if plugin_type == PluginType.EXTRACTORS:
click.echo()
click.echo(
f"Specify the tap's {click.style('supported Singer features', fg='blue')} (executable flags), for example:"
)
click.echo("\t`catalog`: supports the `--catalog` flag")
click.echo("\t`discover`: supports the `--discover` flag")
click.echo("\t`properties`: supports the `--properties` flag")
click.echo("\t`state`: supports the `--state` flag")
click.echo()
click.echo(
"To find out what features a tap supports, reference its documentation or try one"
)
click.echo(
"of the tricks under https://meltano.com/docs/contributor-guide.html#how-to-test-a-tap."
)
click.echo()
click.echo("Multiple capabilities can be separated using commas.")
click.echo()
click.echo("Default: no capabilities")
click.echo()
capabilities = click.prompt(
click.style("(capabilities)", fg="blue"),
type=list,
default=[],
value_proc=lambda value: [c.strip() for c in value.split(",")],
)
settings = []
if plugin_type in (PluginType.EXTRACTORS, PluginType.LOADERS):
singer_type = "tap" if plugin_type == PluginType.EXTRACTORS else "target"
click.echo()
click.echo(
f"Specify the {singer_type}'s {click.style('supported settings', fg='blue')} (`config.json` keys)"
)
click.echo()
click.echo("Nested properties can be represented using the `.` seperator,")
click.echo('e.g. `auth.username` for `{ "auth": { "username": value } }`.')
click.echo()
click.echo(
f"To find out what settings a {singer_type} supports, reference its documentation."
)
click.echo()
click.echo("Multiple setting names (keys) can be separated using commas.")
click.echo()
click.echo("Default: no settings")
click.echo()
settings = click.prompt(
click.style("(settings)", fg="blue"),
type=list,
default=[],
value_proc=lambda value: [c.strip() for c in value.split(",")],
)
plugin = Plugin(
plugin_type,
plugin_name,
namespace,
pip_url=pip_url,
executable=executable,
capabilities=capabilities,
settings=[
{"name": name, "env": setting_env(namespace, name)} for name in settings
],
)
installed = plugin.as_installed(custom=True)
return self.config_service.add_to_file(installed)
def add_related(self, *args, **kwargs):
return []
| StarcoderdataPython |
1617256 | import yaml
class File:
def __init__(self, url):
self._url = url
@property
def yaml_dict(self):
with open(self._url, 'r') as stream:
try:
config_dict = yaml.safe_load(stream)
return config_dict
except yaml.YAMLError as exc:
print(exc)
return None
@property
def content(self):
with open(self._url, 'r') as myfile:
try:
return myfile.read()
except Exception as e:
print(str(e))
return None
| StarcoderdataPython |
1768049 | #
# copyright (c) 2010 <NAME> <<EMAIL>>
#
from BuildSystem.AutoToolsBuildSystem import *
from Package.PackageBase import *
from Packager.TypePackager import *
from Source.MultiSource import *
class AutoToolsPackageBase(PackageBase, MultiSource, AutoToolsBuildSystem, TypePackager):
"""provides a base class for autotools based packages from any source"""
def __init__(self):
CraftCore.log.debug("AutoToolsPackageBase.__init__ called")
PackageBase.__init__(self)
MultiSource.__init__(self)
AutoToolsBuildSystem.__init__(self)
TypePackager.__init__(self)
# needed to run autogen sh, this is needed in all checkouts but normaly not in a tarball
if self.subinfo.hasSvnTarget():
self.subinfo.options.configure.bootstrap = True
| StarcoderdataPython |
3310211 | <reponame>Expert37/python_lesson_3
# кортежи - это тот же самый список, только не изменяемый
# те же самые объекты, которые хранятся в списке. Если они будут храниться в кортеже, то доступ к ним будет гораздо быстрее и они будут занимать меньше памяти
# кортежи задаются (). Доступ, срезы - всё как в списках.
# Инициализаци ()
temp_tuple = (1,2,3)
print(type(temp_tuple), temp_tuple)
# Отбращение к элементам кортежа (как в списках)
print(temp_tuple[0])
# проитерируемся по кортежу
for i in range(len(temp_tuple)):
print(temp_tuple[i])
# Функции с кортежами
#--------- всё как в списках
# Операции с кортежами
#--------- всё как в списках
# Методы
#--------- всё как в списках
# сравним количество памяти, которое занимают список и кортеж
temp_list = [1,2,3]
temp_tuple = (1,2,3)
print(temp_list.__sizeof__()) # __sizeof__ - функция, которая определяет размер памяти
print(temp_tuple.__sizeof__())
# если возникает необходимость поменять что-то в кортеже, то его конвертируют в лист, а потом обратно:
temp_tuple = (1,2,3)
temp_list = list(temp_tuple)
temp_list.append(4)
temp_tuple = tuple(temp_list)
print(type(temp_tuple), temp_tuple)
print(type(temp_list), temp_list)
| StarcoderdataPython |
1608142 | class Solution:
def removeStones(self, stones):
"""
:type stones: List[List[int]]
:rtype: int
"""
parent = {}
def find(x):
if x != parent[x]:
parent[x] = find(parent[x])
return parent[x]
def union(x, y):
parent.setdefault(x, x)
parent.setdefault(y, y)
parent[find(x)] = find(y)
for x, y in stones:
union(x, ~y)
return len(stones) - len({find(x) for x in parent}) | StarcoderdataPython |
72960 | <filename>scripts/prepare_data_multi_process.py
import redis
import json
import h5py
import pickle
import numpy as np
import random
import jieba
import multiprocessing
word2idx, idx2word ,allwords, corpus = None, None,{},[]
DUMP_FILE = 'data/basic_data_700k_v2.pkl'
check_sample_size = 10
TF_THRES = 5
DF_THRES = 2
r0 = redis.StrictRedis(host='localhost', port=6379, db=0)
r1 = redis.StrictRedis(host='localhost', port=6379, db=1)
id_beg = 0
id_eos = 1
id_emp = 2
id_unk = 3
r = None
class Word:
def __init__(self,val,tf,df):
self.val = val
self.tf = tf
self.df = df
def __repr__(self):
pass
def parse_all_crawled_data(keys, idx):
res = []
if idx == 0:
conn = r0
else:
conn = r1
for data in conn.mget(keys):
data = json.loads(data)
key = data.get("group_id")
title = data.get("title","").replace('\t',' ')
abstract = data.get("abstract","").replace('\t',' ')
if abstract == "":
abstract = title
res.append((key,title,abstract))
return res
def cal_word_tf_df(corpus):
words = {}
title_abstract_pairs = []
for doc in corpus:
title, abstract = doc[1].lower(),doc[2].lower()
ts_ = list(jieba.cut(title,cut_all = False))
as_ = list(jieba.cut(abstract,cut_all = False))
title_abstract_pairs.append((ts_, as_))
# acumulate the term frequency
for word in ts_ + as_:
if not words.get(word):
words[word] = Word(val = word,tf = 1,df = 0)
else:
words[word].tf += 1
# acummulate the doc frequency
for word in set(ts_ + as_):
words[word].df += 1
return words,title_abstract_pairs
def build_idx_for_words_tf_df(chars,tf_thres = TF_THRES, df_thres = DF_THRES):
start_idx = id_unk + 1
char2idx = {}
idx2char = {}
char2idx['<eos>'] = id_eos
char2idx['<unk>'] = id_unk
char2idx['<emp>'] = id_emp
char2idx['<beg>'] = id_beg
#filter out tf>20 and df > 10 terms
chars = filter(lambda char:char.tf > tf_thres or char.df > df_thres,chars)
char2idx.update(dict([(char.val,start_idx + idx) for idx,char in enumerate(chars)]))
idx2char = dict([(idx,char) for char,idx in char2idx.items()])
return char2idx, idx2char
def prt(label, x):
print label+':',
for w in x:
if w == id_emp:
continue
print idx2word[w],
print
def worker(i,keys,idx):
print "worker [%2d] started with keys:[%d]!"%(i,len(keys))
corpus = parse_all_crawled_data(keys, idx)
print "worker [%2d] get docs :[%d]!"%(i,len(corpus))
words,sub_corpus = cal_word_tf_df(corpus)
return words,sub_corpus
def combine_results(res):
global copurs,word2idx,idx2word
words,sub_corpus = res[0], res[1]
corpus.extend(sub_corpus)
for word in words:
if word not in allwords:
allwords[word] = Word(val = word,tf = 0,df = 0)
allwords[word].tf += words[word].tf
allwords[word].df += words[word].df
word2idx, idx2word = build_idx_for_words_tf_df(allwords.values())
def dump_all_results():
datafile = open(DUMP_FILE,'wb')
titles, abstracts = [],[]
for ts_,as_ in corpus:
titles.append([word2idx.get(word,id_unk) for word in ts_])
abstracts.append([word2idx.get(word,id_unk) for word in as_])
pickle.dump((allwords, word2idx, idx2word, titles, abstracts),datafile,-1)
def check_dump():
allwords, word2idx, idx2word, titles, abstracts = pickle.load(open(DUMP_FILE))
print "allwords size is:",len(allwords)
print "word2idx size is:",len(word2idx)
print "titles size is:",len(titles)
for k in range(check_sample_size):
k = random.randint(0,len(titles) - 1)
print "[%s]th Example"%(k)
prt('title',titles[k])
prt('abstract',abstracts[k])
worker_size = 10
pool = multiprocessing.Pool()
for idx,conn in enumerate([r0,r1]):
keys = conn.keys()
batch = len(keys) / worker_size
for i in range(worker_size):
if i == worker_size - 1:
sub_keys = keys[i * batch : ]
else:
sub_keys = keys[i * batch : i * batch + batch]
pool.apply_async(worker,(idx * 10 + i,sub_keys,idx,),callback=combine_results)
pool.close()
pool.join()
dump_all_results()
check_dump()
print "all job finished!"
| StarcoderdataPython |
101416 | <gh_stars>10-100
#!/usr/bin/env python3
import os
import sys
import time
import numpy as np
import simpleaudio as sa
class Player:
def __init__(self, volume: float = 0.3,
mute_output: bool = False):
if volume < 0 or volume > 1:
raise ValueError("Volume must be a float between 0 and 1")
# Frequencies for the lowest octave
self.note_frequencies = {
'A': 27.50000,
'B': 30.86771,
'C': 16.35160,
'D': 18.35405,
'E': 20.60172,
'F': 21.82676,
'G': 24.49971
}
self.volume = volume
self.mute_output = mute_output
self.rate = 44100
self.freq = 0
self.fade = 800
self._valid_note = True
self._fade_in = np.arange(0., 1., 1 / self.fade)
self._fade_out = np.arange(1., 0., -1 / self.fade)
self._play_obj = None
self._destructor_sleep = 0
def __set_base_frequency(self, note: str):
letter = note[:1].upper()
try:
self.freq = self.note_frequencies[letter]
except:
self._valid_note = False
print("Error: invalid note: '"
+ note[:1]
+ "'",
file=sys.stderr)
def __set_octave(self, octave: str = '4'):
if not self._valid_note:
return
try:
octaveValue = int(octave)
if octaveValue < 0 or octaveValue > 8:
raise ValueError('octave value error')
self.freq *= (2 ** octaveValue)
except:
self._valid_note = False
print("Error: invalid octave: '"
+ octave
+ "'",
file=sys.stderr)
def __set_semitone(self, symbol: str):
if not self._valid_note:
return
if symbol == '#':
self.freq *= (2 ** (1. / 12.))
elif symbol == 'b':
self.freq /= (2 ** (1. / 12.))
else:
self._valid_note = False
print("Error: invalid symbol: '"
+ symbol
+ "'",
file=sys.stderr)
def __calc_frequency(self, note: str):
self.__set_base_frequency(note)
if len(note) == 1:
self.__set_octave()
elif len(note) == 2:
if note[1:2] == '#' or note[1:2] == 'b':
self.__set_octave()
self.__set_semitone(note[1:2])
else:
self.__set_octave(note[1:2])
elif len(note) == 3:
self.__set_octave(note[1:2])
self.__set_semitone(note[2:3])
else:
if self._valid_note:
print("Errror: invalid note: '"
+ note
+ "'",
file=sys.stderr)
self._valid_note = False
def __wait_for_prev_sound(self):
if self._play_obj is not None:
while self._play_obj.is_playing(): pass
def __write_stream(self, duration: float):
t = np.linspace(0, duration, int(duration * self.rate), False)
audio = np.sin(self.freq * t * 2 * np.pi)
audio *= 32767 / np.max(np.abs(audio))
audio *= self.volume
if len(audio) > self.fade:
audio[:self.fade] *= self._fade_in
audio[-self.fade:] *= self._fade_out
audio = audio.astype(np.int16)
self.__wait_for_prev_sound()
self._play_obj = sa.play_buffer(audio, 1, 2, self.rate)
def __print_played_note(self, note: str, duration: float):
if self.mute_output or not self._valid_note:
return
if note == 'pause':
print("Pausing for " + str(duration) + "s")
else:
print("Playing " + note + " (" + format(self.freq, '.2f') + " Hz) for " + str(duration) + "s")
def play_note(self, note: str, duration: float = 0.5):
self._valid_note = True
if note == 'pause':
self.__wait_for_prev_sound()
self.__print_played_note(note, duration)
time.sleep(duration)
self._destructor_sleep = 0
else:
self.__calc_frequency(note)
if self._valid_note:
self.__write_stream(duration)
self.__print_played_note(note, duration)
self._destructor_sleep = duration
def __del__(self):
time.sleep(self._destructor_sleep) | StarcoderdataPython |
1759484 | <reponame>AkasDutta/veros
from veros.core import friction
from veros.pyom_compat import get_random_state
from test_base import compare_state
TEST_SETTINGS = dict(
nx=70,
ny=60,
nz=50,
dt_tracer=3600,
dt_mom=3600,
enable_cyclic_x=True,
enable_conserve_energy=True,
enable_bottom_friction_var=True,
enable_hor_friction_cos_scaling=True,
enable_momentum_sources=True,
r_ray=1,
r_bot=1,
r_quad_bot=1,
A_h=1,
A_hbi=1,
)
def test_explicit_vert_friction(pyom2_lib):
vs_state, pyom_obj = get_random_state(pyom2_lib, extra_settings=TEST_SETTINGS)
vs_state.variables.update(friction.explicit_vert_friction(vs_state))
pyom_obj.explicit_vert_friction()
compare_state(vs_state, pyom_obj)
def test_implicit_vert_friction(pyom2_lib):
vs_state, pyom_obj = get_random_state(pyom2_lib, extra_settings=TEST_SETTINGS)
vs_state.variables.update(friction.implicit_vert_friction(vs_state))
pyom_obj.implicit_vert_friction()
compare_state(vs_state, pyom_obj)
def test_rayleigh_friction(pyom2_lib):
vs_state, pyom_obj = get_random_state(pyom2_lib, extra_settings=TEST_SETTINGS)
vs_state.variables.update(friction.rayleigh_friction(vs_state))
pyom_obj.rayleigh_friction()
compare_state(vs_state, pyom_obj)
def test_linear_bottom_friction(pyom2_lib):
vs_state, pyom_obj = get_random_state(pyom2_lib, extra_settings=TEST_SETTINGS)
vs_state.variables.update(friction.linear_bottom_friction(vs_state))
pyom_obj.linear_bottom_friction()
compare_state(vs_state, pyom_obj)
def test_quadratic_bottom_friction(pyom2_lib):
vs_state, pyom_obj = get_random_state(pyom2_lib, extra_settings=TEST_SETTINGS)
vs_state.variables.update(friction.quadratic_bottom_friction(vs_state))
pyom_obj.quadratic_bottom_friction()
compare_state(vs_state, pyom_obj)
def test_harmonic_friction(pyom2_lib):
vs_state, pyom_obj = get_random_state(pyom2_lib, extra_settings=TEST_SETTINGS)
vs_state.variables.update(friction.harmonic_friction(vs_state))
pyom_obj.harmonic_friction()
compare_state(vs_state, pyom_obj)
def test_biharmonic_friction(pyom2_lib):
vs_state, pyom_obj = get_random_state(pyom2_lib, extra_settings=TEST_SETTINGS)
vs_state.variables.update(friction.biharmonic_friction(vs_state))
pyom_obj.biharmonic_friction()
compare_state(vs_state, pyom_obj)
def test_momentum_sources(pyom2_lib):
vs_state, pyom_obj = get_random_state(pyom2_lib, extra_settings=TEST_SETTINGS)
vs_state.variables.update(friction.momentum_sources(vs_state))
pyom_obj.momentum_sources()
compare_state(vs_state, pyom_obj)
| StarcoderdataPython |
36669 | <gh_stars>0
# -*- coding: utf-8 -*-
# FOGLAMP_BEGIN
# See: http://foglamp.readthedocs.io/
# FOGLAMP_END
"""HTTP Listener handler for sensor readings"""
import asyncio
import copy
import sys
from aiohttp import web
from foglamp.common import logger
from foglamp.common.web import middleware
from foglamp.plugins.common import utils
from foglamp.services.south.ingest import Ingest
__author__ = "<NAME>"
__copyright__ = "Copyright (c) 2017 OSIsoft, LLC"
__license__ = "Apache 2.0"
__version__ = "${VERSION}"
_LOGGER = logger.setup(__name__, level=20)
_CONFIG_CATEGORY_NAME = 'HTTP_SOUTH'
_CONFIG_CATEGORY_DESCRIPTION = 'South Plugin HTTP Listener'
_DEFAULT_CONFIG = {
'plugin': {
'description': 'South Plugin HTTP Listener',
'type': 'string',
'default': 'http_south'
},
'port': {
'description': 'Port to listen on',
'type': 'integer',
'default': '6683',
},
'host': {
'description': 'Address to accept data on',
'type': 'string',
'default': '0.0.0.0',
},
'uri': {
'description': 'URI to accept data on',
'type': 'string',
'default': 'sensor-reading',
},
'management_host': {
'description': 'Management host',
'type': 'string',
'default': '127.0.0.1',
}
}
def plugin_info():
return {
'name': 'http_south',
'version': '1.0',
'mode': 'async',
'type': 'south',
'interface': '1.0',
'config': _DEFAULT_CONFIG
}
def plugin_init(config):
"""Registers HTTP Listener handler to accept sensor readings
Args:
config: JSON configuration document for the South device configuration category
Returns:
handle: JSON object to be used in future calls to the plugin
Raises:
"""
handle = config
return handle
def plugin_start(data):
try:
host = data['host']['value']
port = data['port']['value']
uri = data['uri']['value']
loop = asyncio.get_event_loop()
app = web.Application(middlewares=[middleware.error_middleware])
app.router.add_route('POST', '/{}'.format(uri), HttpSouthIngest.render_post)
handler = app.make_handler()
server_coro = loop.create_server(handler, host, port)
future = asyncio.ensure_future(server_coro)
data['app'] = app
data['handler'] = handler
data['server'] = None
def f_callback(f):
# _LOGGER.info(repr(f.result()))
""" <Server sockets=
[<socket.socket fd=17, family=AddressFamily.AF_INET, type=2049,proto=6, laddr=('0.0.0.0', 6683)>]>"""
data['server'] = f.result()
future.add_done_callback(f_callback)
except Exception as e:
_LOGGER.exception(str(e))
def plugin_reconfigure(handle, new_config):
""" Reconfigures the plugin
it should be called when the configuration of the plugin is changed during the operation of the South device service;
The new configuration category should be passed.
Args:
handle: handle returned by the plugin initialisation call
new_config: JSON object representing the new configuration category for the category
Returns:
new_handle: new handle to be used in the future calls
Raises:
"""
_LOGGER.info("Old config for HTTP_SOUTH plugin {} \n new config {}".format(handle, new_config))
# Find diff between old config and new config
diff = utils.get_diff(handle, new_config)
# Plugin should re-initialize and restart if key configuration is changed
if 'port' in diff or 'host' in diff or 'management_host' in diff:
_plugin_stop(handle)
new_handle = plugin_init(new_config)
new_handle['restart'] = 'yes'
_LOGGER.info("Restarting HTTP_SOUTH plugin due to change in configuration keys [{}]".format(', '.join(diff)))
else:
new_handle = copy.deepcopy(handle)
new_handle['restart'] = 'no'
return new_handle
def _plugin_stop(handle):
""" Stops the plugin doing required cleanup, to be called prior to the South device service being shut down.
Args:
handle: handle returned by the plugin initialisation call
Returns:
Raises:
"""
_LOGGER.info('Stopping South HTTP plugin.')
try:
app = handle['app']
handler = handle['handler']
server = handle['server']
if server:
server.close()
asyncio.ensure_future(server.wait_closed())
asyncio.ensure_future(app.shutdown())
asyncio.ensure_future(handler.shutdown(60.0))
asyncio.ensure_future(app.cleanup())
except Exception as e:
_LOGGER.exception(str(e))
raise
def plugin_shutdown(handle):
""" Shutdowns the plugin doing required cleanup, to be called prior to the South device service being shut down.
Args:
handle: handle returned by the plugin initialisation call
Returns:
Raises:
"""
_plugin_stop(handle)
_LOGGER.info('South HTTP plugin shut down.')
# TODO: Implement FOGL-701 (implement AuditLogger which logs to DB and can be used by all ) for this class
class HttpSouthIngest(object):
"""Handles incoming sensor readings from HTTP Listener"""
@staticmethod
async def render_post(request):
"""Store sensor readings from CoAP to FogLAMP
Args:
request:
The payload decodes to JSON similar to the following:
.. code-block:: python
{
"timestamp": "2017-01-02T01:02:03.23232Z-05:00",
"asset": "pump1",
"key": "80a43623-ebe5-40d6-8d80-3f892da9b3b4",
"readings": {"humidity": 0.0, "temperature": -40.0}
}
}
Example:
curl -X POST http://localhost:6683/sensor-reading -d '{"timestamp": "2017-01-02T01:02:03.23232Z-05:00", "asset": "pump1", "key": "80a43623-ebe5-40d6-8d80-3f892da9b3b4", "readings": {"humidity": 0.0, "temperature": -40.0}}'
"""
# TODO: The payload is documented at
# https://docs.google.com/document/d/1rJXlOqCGomPKEKx2ReoofZTXQt9dtDiW_BHU7FYsj-k/edit#
# and will be moved to a .rst file
# TODO: Decide upon the correct format of message
message = {'result': 'success'}
try:
if not Ingest.is_available():
message = {'busy': True}
raise web.HTTPServiceUnavailable(reason=message)
try:
payload = await request.json()
except Exception:
raise ValueError('Payload must be a dictionary')
asset = payload['asset']
timestamp = payload['timestamp']
key = payload['key']
# readings or sensor_values are optional
try:
readings = payload['readings']
except KeyError:
readings = payload['sensor_values'] # sensor_values is deprecated
# if optional then
# TODO: confirm, do we want to check this?
if not isinstance(readings, dict):
raise ValueError('readings must be a dictionary')
await Ingest.add_readings(asset=asset, timestamp=timestamp, key=key, readings=readings)
except (KeyError, ValueError, TypeError) as e:
Ingest.increment_discarded_readings()
_LOGGER.exception("%d: %s", web.HTTPBadRequest.status_code, str(e))
raise web.HTTPBadRequest(reason=str(e))
except Exception as ex:
Ingest.increment_discarded_readings()
_LOGGER.exception("%d: %s", web.HTTPInternalServerError.status_code, str(ex))
raise web.HTTPInternalServerError(reason=str(ex))
return web.json_response(message)
| StarcoderdataPython |
1606685 | #!/usr/bin/env python
try:
from setuptools import setup
except ImportError:
from distutils.core import setup
try:
from catkin_pkg.python_setup import generate_distutils_setup
d = generate_distutils_setup()
except ImportError:
# extract information from package.xml manually when catkin_pkg is unavailable
from xml.etree import ElementTree
tree = ElementTree.parse('package.xml')
root = tree.getroot()
d = {
'name': root.find('./name').text,
'version': root.find('./version').text,
'maintainer': root.findall('./maintainer')[0].text,
'maintainer_email': root.findall('./maintainer')[0].attrib['email'],
'license': ', '.join([x.text for x in root.findall('./license')]),
'url': root.findall('./url')[0].text,
'author': ', '.join([x.text for x in root.findall('./author')]),
}
description = root.find('./description').text.strip()
if len(description) <= 200:
d['description'] = description
else:
d['description'] = description[:197] + '...'
d['long_description'] = description
d.update({
'packages': [d['name']],
'package_dir': {'': 'src'},
'classifiers': [
'Development Status :: 5 - Production/Stable',
'Intended Audience :: Developers',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Topic :: Software Development :: Libraries :: Python Modules',
'License :: OSI Approved :: BSD License',
'License :: OSI Approved :: GNU Library or Lesser General Public License (LGPL)',
'License :: OSI Approved :: GNU General Public License (GPL)',
],
})
setup(**d)
| StarcoderdataPython |
1601623 | """XML Utilities"""
import xml.etree.ElementTree as ET
def quote(tag):
"""
Turn a namespace
prefixed tag name into a Clark-notation qualified tag name for lxml. For
example, ``qn('p:cSld')`` returns ``'{http://schemas.../main}cSld'``.
Source: https://github.com/python-openxml/python-docx/
"""
nsmap = {
'w': 'http://schemas.openxmlformats.org/wordprocessingml/2006/main'}
prefix, tagroot = tag.split(':')
uri = nsmap[prefix]
return '{{{}}}{}'.format(uri, tagroot)
def unquote(tag):
# type: (str) -> str
"""Remove namespace from prefixed tag.
See: [Python issue 18304](https://bugs.python.org/issue18304)
Arguments:
tag {str} -- (possibly-)namespaced tag
Returns:
str -- tag name without namespace
"""
return tag.split('}').pop()
def parse(xml_bytes):
# type: (bytes) -> ET.Element
return ET.fromstring(xml_bytes)
| StarcoderdataPython |
3316886 | <gh_stars>1-10
from flask import render_template, Blueprint, redirect
from app.models import card
from app.forms import card_form
from flask_user import roles_accepted, login_required
cards_blueprint = Blueprint('cards', __name__)
@cards_blueprint.route('/')
def index():
cards = card.query.all()
return render_template('cards/index.jinja2', cards=cards)
@cards_blueprint.route('/new', methods=('GET', 'POST'))
@login_required
@roles_accepted('user', 'admin')
def new():
form = card_form()
if form.validate_on_submit():
new_card = card(
name=form.name.data,
size=form.size.data
)
new_card.save()
return redirect('/cards')
return render_template('cards/new.jinja2', form=form)
@cards_blueprint.route('/edit/<id>', methods=('GET', 'POST'))
@login_required
@roles_accepted('user', 'admin')
def edit(id):
form = card_form()
data = card.query.filter(card.id == id).first()
if form.validate_on_submit():
data.name = form.name.data,
data.size = form.size.data
data.save()
return redirect('/cards')
form.name.data = data.name
form.size.data = data.size
return render_template('cards/edit.jinja2', form=form)
| StarcoderdataPython |
3253974 | <reponame>sixhobbits/nps-sample-data
import random
from collections import Counter
from datetime import datetime
from datetime import timedelta
# some NPS score weightings that lead to an NPS of around 70
BASE_WEIGHTS = [0.05, 0.045, 0.002, 0.002, 0.002, 0.01, 0.029, 0.05, 0.06, 0.3, 0.45]
# all possible NPS scores
POPULATION = [0,1,2,3,4,5,6,7,8,9,10]
def flip_coin(probability=0.5):
weights = [1-probability, probability]
dice = random.choices(population=[0,1], weights=weights)[0]
return bool(dice)
def adjust_by_x_with_probability(n, adjustment, probability=0.1):
dice = flip_coin(probability)
adjustment = adjustment * dice # tails - no adjustment, heads adjustment
return n + adjustment
def get_random_date(start, end):
# https://stackoverflow.com/questions/553303/generate-a-random-date-between-two-other-dates
"""
This function will return a random datetime between two datetime
objects.
"""
delta = end - start
int_delta = (delta.days * 24 * 60 * 60) + delta.seconds
random_second = random.randrange(int_delta)
return start + timedelta(seconds=random_second)
def generate_random_nps(weights, num_samples=5000, adjust=1, adjust_chance=0.25):
base_nps = random.choices(
population=POPULATION,
weights=weights,
k=num_samples
)
adjusted_nps = []
for nps in base_nps:
nps = adjust_by_x_with_probability(nps, adjust, adjust_chance)
if nps < 0:
nps = 0
if nps > 10:
nps = 10
adjusted_nps.append(nps)
return adjusted_nps
def get_adjusted_weights(original_weights, adjustments):
adjusted = [x + y for x, y in zip(original_weights, adjustments)]
adjusted = [round(x, 5) for x in adjusted]
return adjusted
def calculate_nps(scores):
promoters = 0
passives = 0
detractors = 0
total = len(scores)
for score in scores:
if score in (9, 10):
promoters += 1
elif score in (7, 8):
passives += 1
elif score in (0,1,2,3,4,5,6):
detractors += 1
else:
raise Exception("Invalid NPS Score: {}".format(score))
nps = (promoters/total) - (detractors/total)
return round(nps * 100)
| StarcoderdataPython |
1719788 | try:
file = open('eeee','r+')
except Exception as e:
print('There is no file named as eeee')
response = input("do you want to create a eeee file?(y/n) ")
if response == 'y':
file = open('eeee','w')
else:
pass
else:
file.write('hahahaha')
file.close()
| StarcoderdataPython |
134858 | <gh_stars>0
#!/bin/python3
import math
import os
import random
import re
import sys
if __name__ == '__main__':
s = input()
the_letters = {}
total = len(s)
for character in s:
if character in the_letters: continue
else: the_letters[character] = s.count(character)
frequencies = sorted(the_letters.items(), key = lambda kv:(total - kv[1], kv[0]))
count = 0
for item in frequencies:
count += 1
print(item[0], item[1])
if count >= 3: break | StarcoderdataPython |
3328772 | #!/usr/bin/env python3
# The Expat License
#
# Copyright (c) 2017, <NAME>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import sys
if sys.version_info > (3,):
long = int
xrange = range
MOD = 1000000007
E = [0, 6]
while len(E) < 3001136:
E.append((E[-1] * 5) % MOD)
def pascal_sum(n, p):
global MOD
b = n
l_int = long(1)
C = E[b] * l_int
for k in xrange(p):
b -= 1
l_int = l_int * (n-1-k) / (k+1)
C += E[b] * l_int
return int(C % MOD)
if __name__ == "__main__":
s_n = 0
# a0 a1 a2 a3 | a4
# b0 b1 b2 b3 | b4
# S[b4] = S[a4] * 6 - (S[a4]-S[a3]) = 5*S[a4]+S[a3]
primes_s = open("primes.txt").read()
primes = [int(p) for p in (str(primes_s)).split("\n") if len(p) > 0] + [-1]
def base6(n):
if n == 0:
return ''
return base6(n/6) + str(n % 6)
sums = [6, 6, 6]
pi = 0
# PS = 1000
PS = 1
pa = PS
# for n in xrange(1,50+1):
for n in xrange(1, 50000000 + 1):
if primes[0] == n:
primes.pop(0)
pi += 1
C = pascal_sum(n, pi)
s_n += C
s_n %= MOD
if n == pa:
pa += PS
print("C(%d) = %d; S = %d" % (n, C, s_n))
| StarcoderdataPython |
151019 | # coding=utf-8
# Copyright (c) 2016-2018, F5 Networks, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from enum import Enum
from f5_openstack_agent.lbaasv2.drivers.bigip.utils import get_filter
from oslo_log import log as logging
LOG = logging.getLogger(__name__)
class ResourceType(Enum):
u"""Defines supported BIG-IP resource types."""
nat = 1
pool = 2
sys = 3
virtual = 4
member = 5
folder = 6
http_monitor = 7
https_monitor = 8
tcp_monitor = 9
ping_monitor = 10
node = 11
snat = 12
snatpool = 13
snat_translation = 14
selfip = 15
rule = 16
vlan = 17
arp = 18
route_domain = 19
tunnel = 20
virtual_address = 21
l7policy = 22
client_ssl_profile = 23
server_ssl_profile = 24
tcp_profile = 25
persistence = 26
cookie_persistence = 27
dest_addr_persistence = 28
hash_persistence = 29
msrdp_persistence = 30
sip_persistence = 31
source_addr_persistence = 32
ssl_persistence = 33
universal_persistence = 34
ssl_cert_file = 35
http_profile = 36
oneconnect = 37
class BigIPResourceHelper(object):
u"""Helper class for creating, updating and deleting BIG-IP resources.
Reduces some of the boilerplate that surrounds using the F5 SDK.
Example usage:
bigip = BigIP("10.1.1.1", "admin", "admin")
pool = {"name": "pool1",
"partition": "Common",
"description": "Default pool",
"loadBalancingMode": "round-robin"}
pool_helper = BigIPResourceHelper(ResourceType.pool)
p = pool_helper.create(bigip, pool)
"""
def __init__(self, resource_type):
"""Initialize a resource helper."""
self.resource_type = resource_type
def create(self, bigip, model):
u"""Create/update resource (e.g., pool) on a BIG-IP system.
First checks to see if resource has been created and creates
it if not.
:param bigip: BigIP instance to use for creating resource.
:param model: Dictionary of BIG-IP attributes to add resource. Must
include name and partition.
:returns: created or updated resource object.
"""
resource = self._resource(bigip)
obj = resource.create(**model)
return obj
def exists(self, bigip, name=None, partition=None):
"""Test for the existence of a resource."""
resource = self._resource(bigip)
return resource.exists(name=name, partition=partition)
def delete(self, bigip, name=None, partition=None):
u"""Delete a resource on a BIG-IP system.
Checks if resource exists and deletes it. Returns without error
if resource does not exist.
:param bigip: BigIP instance to use for creating resource.
:param name: Name of resource to delete.
:param partition: Partition name for resou
"""
resource = self._resource(bigip)
if resource.exists(name=name, partition=partition):
obj = resource.load(name=name, partition=partition)
obj.delete()
def load(self, bigip, name=None, partition=None):
u"""Retrieve a BIG-IP resource from a BIG-IP.
Populates a resource object with attributes for instance on a
BIG-IP system.
:param bigip: BigIP instance to use for creating resource.
:param name: Name of resource to load.
:param partition: Partition name for resource.
:returns: created or updated resource object.
"""
resource = self._resource(bigip)
return resource.load(name=name, partition=partition)
def update(self, bigip, model):
u"""Update a resource (e.g., pool) on a BIG-IP system.
Modifies a resource on a BIG-IP system using attributes
defined in the model object.
:param bigip: BigIP instance to use for creating resource.
:param model: Dictionary of BIG-IP attributes to update resource.
Must include name and partition in order to identify resource.
"""
partition = None
if "partition" in model:
partition = model["partition"]
resource = self.load(bigip, name=model["name"], partition=partition)
resource.modify(**model)
return resource
def get_resources(self, bigip, partition=None,
expand_subcollections=False):
u"""Retrieve a collection BIG-IP of resources from a BIG-IP.
Generates a list of resources objects on a BIG-IP system.
:param bigip: BigIP instance to use for creating resource.
:param name: Name of resource to load.
:param partition: Partition name for resource.
:returns: list of created or updated resource objects.
"""
resources = []
try:
collection = self._collection(bigip)
except KeyError as err:
LOG.exception(err.message)
raise err
if collection:
params = {'params': ''}
if partition:
params['params'] = get_filter(
bigip, 'partition', 'eq', partition)
if expand_subcollections and \
isinstance(params['params'], dict):
params['params']['expandSubcollections'] = 'true'
elif expand_subcollections:
params['params'] += '&expandSubCollections=true'
resources = collection.get_collection(requests_params=params)
else:
resources = collection.get_collection()
return resources
def exists_in_collection(self, bigip, name, partition='Common'):
collection = self.get_resources(bigip, partition='Common')
for item in collection:
if item.name == name:
return True
return False
def _resource(self, bigip):
return {
ResourceType.nat: lambda bigip: bigip.tm.ltm.nats.nat,
ResourceType.pool: lambda bigip: bigip.tm.ltm.pools.pool,
ResourceType.sys: lambda bigip: bigip.tm.sys,
ResourceType.virtual: lambda bigip: bigip.tm.ltm.virtuals.virtual,
ResourceType.member: lambda bigip: bigip.tm.ltm.pools.pool.member,
ResourceType.folder: lambda bigip: bigip.tm.sys.folders.folder,
ResourceType.http_monitor:
lambda bigip: bigip.tm.ltm.monitor.https.http,
ResourceType.https_monitor:
lambda bigip: bigip.tm.ltm.monitor.https_s.https,
ResourceType.tcp_monitor:
lambda bigip: bigip.tm.ltm.monitor.tcps.tcp,
ResourceType.ping_monitor:
lambda bigip: bigip.tm.ltm.monitor.gateway_icmps.gateway_icmp,
ResourceType.node: lambda bigip: bigip.tm.ltm.nodes.node,
ResourceType.snat: lambda bigip: bigip.tm.ltm.snats.snat,
ResourceType.snatpool:
lambda bigip: bigip.tm.ltm.snatpools.snatpool,
ResourceType.snat_translation:
lambda bigip: bigip.tm.ltm.snat_translations.snat_translation,
ResourceType.selfip:
lambda bigip: bigip.tm.net.selfips.selfip,
ResourceType.rule:
lambda bigip: bigip.tm.ltm.rules.rule,
ResourceType.vlan:
lambda bigip: bigip.tm.net.vlans.vlan,
ResourceType.arp:
lambda bigip: bigip.tm.net.arps.arp,
ResourceType.route_domain:
lambda bigip: bigip.tm.net.route_domains.route_domain,
ResourceType.tunnel:
lambda bigip: bigip.tm.net.tunnels.tunnels.tunnel,
ResourceType.virtual_address:
lambda bigip: bigip.tm.ltm.virtual_address_s.virtual_address,
ResourceType.l7policy:
lambda bigip: bigip.tm.ltm.policys.policy,
ResourceType.client_ssl_profile:
lambda bigip: bigip.tm.ltm.profile.client_ssls.client_ssl,
ResourceType.server_ssl_profile:
lambda bigip: bigip.tm.ltm.profile.server_ssls.server_ssl,
ResourceType.tcp_profile:
lambda bigip: bigip.tm.ltm.profile.tcps.tcp,
ResourceType.persistence:
lambda bigip: bigip.tm.ltm.persistence,
ResourceType.cookie_persistence:
lambda bigip: bigip.tm.ltm.persistence.cookies.cookie,
ResourceType.dest_addr_persistence:
lambda bigip: bigip.tm.ltm.persistence.dest_addrs.dest_addr,
ResourceType.hash_persistence:
lambda bigip: bigip.tm.ltm.persistence.hashs.hash,
ResourceType.msrdp_persistence:
lambda bigip: bigip.tm.ltm.persistence.msrdp,
ResourceType.sip_persistence:
lambda bigip: bigip.tm.ltm.persistence.sips,
ResourceType.source_addr_persistence:
lambda bigip: bigip.tm.ltm.persistence.source_addr,
ResourceType.ssl_persistence:
lambda bigip: bigip.tm.ltm.persistence.ssl,
ResourceType.universal_persistence:
lambda bigip: bigip.tm.ltm.persistence.universal,
ResourceType.ssl_cert_file:
lambda bigip: bigip.tm.sys.file.ssl_certs.ssl_cert
}[self.resource_type](bigip)
def _collection(self, bigip):
collection_map = {
ResourceType.nat: lambda bigip: bigip.tm.ltm.nats,
ResourceType.pool: lambda bigip: bigip.tm.ltm.pools,
ResourceType.sys: lambda bigip: bigip.tm.sys,
ResourceType.virtual: lambda bigip: bigip.tm.ltm.virtuals,
ResourceType.member: lambda bigip: bigip.tm.ltm.pools.pool.member,
ResourceType.folder: lambda bigip: bigip.tm.sys.folders,
ResourceType.http_monitor:
lambda bigip: bigip.tm.ltm.monitor.https,
ResourceType.https_monitor:
lambda bigip: bigip.tm.ltm.monitor.https_s,
ResourceType.tcp_monitor:
lambda bigip: bigip.tm.ltm.monitor.tcps,
ResourceType.ping_monitor:
lambda bigip: bigip.tm.ltm.monitor.gateway_icmps,
ResourceType.node: lambda bigip: bigip.tm.ltm.nodes,
ResourceType.snat: lambda bigip: bigip.tm.ltm.snats,
ResourceType.snatpool:
lambda bigip: bigip.tm.ltm.snatpools,
ResourceType.snat_translation:
lambda bigip: bigip.tm.ltm.snat_translations,
ResourceType.selfip:
lambda bigip: bigip.tm.net.selfips,
ResourceType.rule:
lambda bigip: bigip.tm.ltm.rules,
ResourceType.route_domain:
lambda bigip: bigip.tm.net.route_domains,
ResourceType.vlan:
lambda bigip: bigip.tm.net.vlans,
ResourceType.arp:
lambda bigip: bigip.tm.net.arps,
ResourceType.tunnel:
lambda bigip: bigip.tm.net.tunnels.tunnels,
ResourceType.virtual_address:
lambda bigip: bigip.tm.ltm.virtual_address_s,
ResourceType.l7policy:
lambda bigip: bigip.tm.ltm.policys,
ResourceType.client_ssl_profile:
lambda bigip: bigip.tm.ltm.profile.client_ssls,
ResourceType.server_ssl_profile:
lambda bigip: bigip.tm.ltm.profile.server_ssls,
ResourceType.tcp_profile:
lambda bigip: bigip.tm.ltm.profile.tcps,
ResourceType.persistence:
lambda bigip: bigip.tm.ltm.persistence,
ResourceType.cookie_persistence:
lambda bigip: bigip.tm.ltm.persistence.cookies,
ResourceType.dest_addr_persistence:
lambda bigip: bigip.tm.ltm.persistence.dest_addrs,
ResourceType.hash_persistence:
lambda bigip: bigip.tm.ltm.persistence.hashs,
ResourceType.msrdp_persistence:
lambda bigip: bigip.tm.ltm.persistence.msrdps,
ResourceType.sip_persistence:
lambda bigip: bigip.tm.ltm.persistence.sips,
ResourceType.source_addr_persistence:
lambda bigip: bigip.tm.ltm.persistence.source_addrs,
ResourceType.ssl_persistence:
lambda bigip: bigip.tm.ltm.persistence.ssls,
ResourceType.universal_persistence:
lambda bigip: bigip.tm.ltm.persistence.universals,
ResourceType.ssl_cert_file:
lambda bigip: bigip.tm.sys.file.ssl_certs,
ResourceType.http_profile:
lambda bigip: bigip.tm.ltm.profile.https,
ResourceType.oneconnect:
lambda bigip: bigip.tm.ltm.profile.one_connects
}
if self.resource_type in collection_map:
return collection_map[self.resource_type](bigip)
else:
LOG.error("Error attempting to get collection for "
"resource %s", self.resource_type)
raise KeyError("No collection available for %s" %
(self.resource_type))
def get_stats(self, bigip, name=None, partition=None, stat_keys=[]):
"""Returns dictionary of stats.
Use by calling with an array of stats to get from resource. Return
value will be a dict with key/value pairs. The stat key will only
be included in the return dict if the resource includes that stat.
:param bigip: BIG-IP to get stats from.
:param name: name of resource object.
:param partition: partition where to get resource.
:param stat_keys: Array of strings that define stats to collect.
:return: dictionary with key/value pairs where key is string
defined in input array, if present in resource stats, and value
as the value of resource stats 'value' key.
"""
collected_stats = {}
# get resource, then its stats
if self.exists(bigip, name=name, partition=partition):
resource = self.load(bigip, name=name, partition=partition)
collected_stats = self.collect_stats(resource, stat_keys)
return collected_stats
def collect_stats(self, resource, stat_keys=[]):
collected_stats = {}
resource_stats = resource.stats.load()
stat_entries = resource_stats.entries
# Difference between 11.6 and 12.1. Stats in 12.1 are embedded
# in nestedStats. In 11.6, they are directly accessible in entries.
if stat_keys[0] not in stat_entries:
# find nestedStats
for key in stat_entries.keys():
value = stat_entries.get(key, None)
if 'nestedStats' in value:
stat_entries = value['nestedStats']['entries']
# add stats defined in input stats array
for stat_key in stat_keys:
if stat_key in stat_entries:
if 'value' in stat_entries[stat_key]:
collected_stats[stat_key] = stat_entries[stat_key][
'value']
elif 'description' in stat_entries[stat_key]:
collected_stats[stat_key] = \
stat_entries[stat_key]['description']
return collected_stats
| StarcoderdataPython |
3203198 | <gh_stars>10-100
# Copyright 2016 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Test 'list'.
"""
# isort: FIRSTPARTY
from dbus_client_gen import DbusClientUniqueResultError
# isort: LOCAL
from stratis_cli import StratisCliErrorCodes
from .._misc import RUNNER, TEST_RUNNER, SimTestCase, device_name_list
_DEVICE_STRATEGY = device_name_list(1)
class ListTestCase(SimTestCase):
"""
Test listing devices for a non-existant pool.
"""
_MENU = ["--propagate", "blockdev", "list"]
_POOLNAME = "deadpool"
def test_list(self):
"""
Listing the devices must fail since the pool does not exist.
"""
command_line = self._MENU + [self._POOLNAME]
self.check_error(
DbusClientUniqueResultError, command_line, StratisCliErrorCodes.ERROR
)
def test_list_empty(self):
"""
Listing the devices should succeed without a pool name specified.
The list should be empty.
"""
command_line = self._MENU
TEST_RUNNER(command_line)
def test_list_default(self):
"""
Blockdev subcommand should default to listing all blockdevs for all
pools. The list should be empty.
"""
command_line = self._MENU[:-1]
TEST_RUNNER(command_line)
class List2TestCase(SimTestCase):
"""
Test listing devices in an existing pool.
"""
_MENU = ["--propagate", "blockdev", "list"]
_POOLNAME = "deadpool"
def setUp(self):
"""
Start the stratisd daemon with the simulator.
"""
super().setUp()
command_line = ["pool", "create"] + [self._POOLNAME] + _DEVICE_STRATEGY()
RUNNER(command_line)
def test_list(self):
"""
Listing the devices should succeed.
"""
command_line = self._MENU + [self._POOLNAME]
TEST_RUNNER(command_line)
def test_list_empty(self):
"""
Listing the devices should succeed without a pool name specified.
"""
command_line = self._MENU
TEST_RUNNER(command_line)
def test_list_default(self):
"""
Blockdev subcommand should default to listing all blockdevs for all
pools.
"""
command_line = self._MENU[:-1]
TEST_RUNNER(command_line)
| StarcoderdataPython |
72040 | """Forward and back projector for PET data reconstruction"""
__author__ = "<NAME>"
__copyright__ = "Copyright 2018"
#------------------------------------------------------------------------------
import numpy as np
import sys
import os
import logging
import petprj
from niftypet.nipet.img import mmrimg
from niftypet.nipet import mmraux
#=========================================================================
# forward projector
#-------------------------------------------------------------------------
def frwd_prj(im, scanner_params, isub=np.array([-1], dtype=np.int32), dev_out=False, attenuation=False):
''' Calculate forward projection (a set of sinograms) for the provided input image.
Arguments:
im -- input image (can be emission or mu-map image).
scanner_params -- dictionary of all scanner parameters, containing scanner constants,
transaxial and axial look up tables (LUT).
isub -- array of transaxial indices of all sinograms (angles x bins) used for subsets.
when the first element is negative, all transaxial bins are used (as in pure EM-ML).
dev_out -- if True, output sinogram is in the device form, i.e., with two dimensions
(# bins/angles, # sinograms) instead of default three (# sinograms, # bins, # angles).
attenuation -- controls whether emission or LOR attenuation probability sinogram
is calculated; the default is False, meaning emission sinogram; for attenuation
calculations (attenuation=True), the exponential of the negative of the integrated
mu-values along LOR path is taken at the end.
'''
log = logging.getLogger(__name__)
# Get particular scanner parameters: Constants, transaxial and axial LUTs
Cnt = scanner_params['Cnt']
txLUT = scanner_params['txLUT']
axLUT = scanner_params['axLUT']
#>choose between attenuation forward projection (mu-map is the input)
#>or the default for emission image forward projection
if attenuation:
att = 1
else:
att = 0
if Cnt['SPN']==1:
# number of rings calculated for the given ring range (optionally we can use only part of the axial FOV)
NRNG_c = Cnt['RNG_END'] - Cnt['RNG_STRT']
# number of sinos in span-1
nsinos = NRNG_c**2
# correct for the max. ring difference in the full axial extent (don't use ring range (1,63) as for this case no correction)
if NRNG_c==64:
nsinos -= 12
elif Cnt['SPN']==11: nsinos=Cnt['NSN11']
elif Cnt['SPN']==0: nsinos=Cnt['NSEG0']
if im.shape[0]==Cnt['SO_IMZ'] and im.shape[1]==Cnt['SO_IMY'] and im.shape[2]==Cnt['SO_IMX']:
ims = mmrimg.convert2dev(im, Cnt)
elif im.shape[0]==Cnt['SZ_IMX'] and im.shape[1]==Cnt['SZ_IMY'] and im.shape[2]==Cnt['SZ_IMZ']:
ims = im
elif im.shape[0]==Cnt['rSO_IMZ'] and im.shape[1]==Cnt['SO_IMY'] and im.shape[2]==Cnt['SO_IMX']:
ims = mmrimg.convert2dev(im, Cnt)
elif im.shape[0]==Cnt['SZ_IMX'] and im.shape[1]==Cnt['SZ_IMY'] and im.shape[2]==Cnt['rSZ_IMZ']:
ims = im
else:
log.error('wrong image size; it has to be one of these: (z,y,x) = (127,344,344) or (y,x,z) = (320,320,128)')
log.debug('number of sinos:%d' % nsinos)
#predefine the sinogram. if subsets are used then only preallocate those bins which will be used.
if isub[0]<0:
sinog = np.zeros((txLUT['Naw'], nsinos), dtype=np.float32)
else:
sinog = np.zeros((len(isub), nsinos), dtype=np.float32)
# --------------------
petprj.fprj(sinog, ims, txLUT, axLUT, isub, Cnt, att)
# --------------------
# get the sinogram bins in a proper sinogram
sino = np.zeros((txLUT['Naw'], nsinos), dtype=np.float32)
if isub[0]>=0: sino[isub,:] = sinog
else: sino = sinog
# put the gaps back to form displayable sinogram
if not dev_out:
sino = mmraux.putgaps(sino, txLUT, Cnt)
return sino
#=========================================================================
# back projector
#-------------------------------------------------------------------------
def back_prj(sino, scanner_params, isub=np.array([-1], dtype=np.int32)):
''' Calculate forward projection for the provided input image.
Arguments:
sino -- input emission sinogram to be back projected to the image space.
scanner_params -- dictionary of all scanner parameters, containing scanner constants,
transaxial and axial look up tables (LUT).
isub -- array of transaxial indices of all sinograms (angles x bins) used for subsets;
when the first element is negative, all transaxial bins are used (as in pure EM-ML).
'''
# Get particular scanner parameters: Constants, transaxial and axial LUTs
Cnt = scanner_params['Cnt']
txLUT = scanner_params['txLUT']
axLUT = scanner_params['axLUT']
if Cnt['SPN']==1:
# number of rings calculated for the given ring range (optionally we can use only part of the axial FOV)
NRNG_c = Cnt['RNG_END'] - Cnt['RNG_STRT']
# number of sinos in span-1
nsinos = NRNG_c**2
# correct for the max. ring difference in the full axial extent (don't use ring range (1,63) as for this case no correction)
if NRNG_c==64:
nsinos -= 12
elif Cnt['SPN']==11: nsinos=Cnt['NSN11']
elif Cnt['SPN']==0: nsinos=Cnt['NSEG0']
#> check first the Siemens default sinogram;
#> for this default shape only full sinograms are expected--no subsets.
if len(sino.shape)==3:
if sino.shape[0]!=nsinos or sino.shape[1]!=Cnt['NSANGLES'] or sino.shape[2]!=Cnt['NSBINS']:
raise ValueError('Unexpected sinogram array dimensions/shape for Siemens defaults.')
sinog = mmraux.remgaps(sino, txLUT, Cnt)
elif len(sino.shape)==2:
if isub[0]<0 and sino.shape[0]!=txLUT["Naw"]:
raise ValueError('Unexpected number of transaxial elements in the full sinogram.')
elif isub[0]>=0 and sino.shape[0]!=len(isub):
raise ValueError('Unexpected number of transaxial elements in the subset sinogram.')
#> check if the number of sinograms is correct
if sino.shape[1]!=nsinos:
raise ValueError('Inconsistent number of sinograms in the array.')
#> when found the dimensions/shape are fine:
sinog = sino
else:
raise ValueError('Unexpected shape of the input sinogram.')
#predefine the output image depending on the number of rings used
if Cnt['SPN']==1 and 'rSZ_IMZ' in Cnt:
nvz = Cnt['rSZ_IMZ']
else:
nvz = Cnt['SZ_IMZ']
bimg = np.zeros((Cnt['SZ_IMX'], Cnt['SZ_IMY'], nvz), dtype=np.float32)
#> run back-projection
petprj.bprj(bimg, sinog, txLUT, axLUT, isub, Cnt)
#> change from GPU optimised image dimensions to the standard Siemens shape
bimg = mmrimg.convert2e7(bimg, Cnt)
return bimg
#-------------------------------------------------------------------------
| StarcoderdataPython |
4824551 | from .case import Case
from .abstract import Point, Form
from . import colors
import numpy as np
import shelve
from pygame.locals import *
class Painter:
def __init__(self, *args, **kwargs):
"""Create a painter."""
self.paints = [Board(), Paint(*args, **kwargs)]
self.paint_brush = PaintBrush()
self.painting = 0
def __call__(self, surface):
"""Main loop of the painter."""
while surface.open:
surface.check()
surface.control()
surface.clear()
surface.show()
self.show(surface)
self.control(surface)
surface.flip()
def control(self, surface):
"""Control the painter."""
cursor = surface.point()
cursor = [round(c + 1 / 2) for c in cursor]
self.print(surface)
self.paint(surface)
def print(self, surface):
"""Print the state of the painter on the surface."""
if self.painting == None:
surface.print("Create a new painting.", [-10, 12])
def paint(self, surface):
"""Paint using the surface and the paint."""
keys = surface.press()
click = surface.click()
cursor = surface.point()
cursor = [round(c + 1 / 2) for c in cursor]
self.paint_brush.setPosition(cursor)
p = self.getPaint(cursor)
if p is not None:
c = self.paints[p].getCase(cursor)
if keys[K_r]:
self.paint_brush.setRandomColor()
if keys[K_a]:
self.paint_brush.lightenColor()
if keys[K_b]:
self.paint_brush.darkenColor()
if keys[K_f]:
self.refreshBoard()
if p is None:
if click:
self.createPaint(cursor)
return
if keys[K_s]:
self.save(self.paints[p])
if keys[K_l]:
self.load(p)
if c is None:
return
if keys[K_c]:
self.paint_brush.copyColor(self.paints[p].cases[c])
if not click:
return
self.paint_brush.paint(surface, self.paints[p], c)
def createPaint(self, position):
"""Create a paint."""
size = [20, 20]
self.paints.append(Paint(position, size))
def save(self, paint):
"""Save the paint."""
print("File saved")
with shelve.open('paints') as p:
p["test"] = paint
def load(self, p):
"""Load a paint."""
print("File loaded")
with shelve.open("paints") as paints:
paint = paints["test"]
self.paints[p] = paint
def refreshBoard(self):
"""Change the colors of the board."""
self.paints[0].generate()
def show(self, surface):
"""Show the paints of the painter."""
for paint in self.paints:
paint.show(surface)
self.paint_brush.show(surface)
def getPaint(self, position):
"""Return the case containing the position if there is one."""
for i in range(len(self.paints)):
if position in self.paints[i]:
return i
class PaintBrush:
def __init__(self, position=[0, 0], size=[1, 1], color=colors.GREEN):
"""Create a paint brush for the painter."""
self.position = position
self.size = size
self.color = color
def paint(self, surface, paint, c):
"""Color a case."""
paint.cases[c].color = self.color
def copyColor(self, case):
"""Copy the color of the case."""
self.color = case.color
def setRandomColor(self):
"""Set the color of the brush to a random color."""
self.color = colors.random()
def lightenColor(self, surface):
"""Lighten the brush."""
self.color = colors.lighten(self.color)
def darkencolor(self, surface):
"""Darken the color."""
self.color = colors.darken(self.color)
def setPosition(self, position):
"""Set the position of the brush."""
self.position = position
def show(self, surface):
"""Show the paint brush on the surface."""
x, y = self.position
case = Case((x - 1, y - 1), size=self.size, color=self.color)
case.show(surface, fill=False, side_color=colors.RED)
class Paint:
"""Paint object reserves an area to draw objects in."""
@classmethod
def random(cls, position=[0, 0], size=[10, 10]):
"""Create a random paint."""
return cls(position, size)
def __init__(self, position=[0, 0], size=[10, 10]):
"""Create a board object."""
self.position = position
self.size = size
self.cases = []
self.generate()
def getCorners(self):
"""Return the corners of the paint."""
px, py = self.position
sx, sy = self.size
corners = (px, py, px + sx, py + sy)
return corners
def generate(self):
"""Generate random cases all over the paint."""
cases = []
xmin, ymin, xmax, ymax = self.getCorners()
for y in np.arange(ymin, ymax):
for x in np.arange(xmin, xmax):
case = Case([float(x), float(y)], color=colors.WHITE)
cases.append(case)
self.cases = cases
def __contains__(self, position):
"""Determine if the point is in the paint."""
x, y = position
xmin, ymin, xmax, ymax = self.getCorners()
return (xmin <= x <= xmax) and (ymin <= ymax)
def getCase(self, position):
"""Return the case containing the position if there is one."""
for i in range(len(self.cases)):
if position in self.cases[i]:
return i
def getForm(self):
"""Return the form corresponding to the area of the painting."""
xmin, ymin, xmax, ymax = self.getCorners()
ps = [Point(xmin, ymin), Point(xmax, ymin),
Point(xmax, ymax), Point(xmin, ymax)]
return Form(ps)
def show(self, surface):
"""Show the paint by showing all its cases."""
f = self.getForm()
for case in self.cases:
case.show(surface, side_color=colors.WHITE)
f.side_color = colors.WHITE
f.side_width = 3
f.show(surface)
def save(self):
"""Save the paint."""
with shelve.open('paints') as paints:
paints[test] = self
class Board(Paint):
def __init__(self):
"""Create an accesory for the painter."""
self.position = [-12, -10]
self.size = [1, 20]
self.generate()
def generate(self):
"""Generate random cases for the board."""
x, y = self.position
sx, sy = self.size
self.cases = [Case([x, y - sy // 2], color=colors.random())
for y in range(sy)]
def show(self, surface):
"""Show the paint by showing all its cases."""
f = self.getForm()
for case in self.cases:
case.show(surface, side_color=colors.BLACK)
f.side_color = colors.BLACK
f.side_width = 3
f.show(surface)
f[0].showText(surface, "Board")
if __name__ == "__main__":
from .context import Surface
from .zone import Zone
surface = Surface(name="Painter")
painter = Painter([0, 0], [8, 8])
#print([0,0] in painter.paints[0])
painter(surface)
| StarcoderdataPython |
4828025 | <gh_stars>0
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.utils.data import Dataset, DataLoader
def get_device():
if torch.cuda.is_available():
device = torch.device('cuda:0')
else:
device = torch.device('cpu') # don't have GPU
return device
class PytorchDataset(Dataset):
"""
Pytorch dataset
...
Attributes
----------
X_tensor : Pytorch tensor
Features tensor
y_tensor : Pytorch tensor
Target tensor
Methods
-------
__getitem__(index)
Return features and target for a given index
__len__
Return the number of observations
to_tensor(data)
Convert Pandas Series to Pytorch tensor
"""
def __init__(self, X, y):
self.X_tensor = self.to_tensor(X)
self.y_tensor = self.to_tensor(y)
def __getitem__(self, index):
return self.X_tensor[index], self.y_tensor[index]
def __len__ (self):
return len(self.X_tensor)
def to_tensor(self, data):
return torch.Tensor(np.array(data))
class PytorchMultiClass(nn.Module):
def __init__(self, num_features):
super(PytorchMultiClass, self).__init__()
self.layer_1 = nn.Linear(num_features, 32)
self.layer_out = nn.Linear(32, 105)
self.softmax = nn.Softmax(dim=1)
def forward(self, x):
x = F.dropout(F.relu(self.layer_1(x)), training=self.training)
x = self.layer_out(x)
return self.softmax(x)
def train_classification(train_data, model, criterion, optimizer, batch_size, device, scheduler=None, generate_batch=None):
"""Train a Pytorch multi-class classification model
Parameters
----------
train_data : torch.utils.data.Dataset
Pytorch dataset
model: torch.nn.Module
Pytorch Model
criterion: function
Loss function
optimizer: torch.optim
Optimizer
bacth_size : int
Number of observations per batch
device : str
Name of the device used for the model
scheduler : torch.optim.lr_scheduler
Pytorch Scheduler used for updating learning rate
collate_fn : function
Function defining required pre-processing steps
Returns
-------
Float
Loss score
Float:
Accuracy Score
"""
# Set model to training mode
model.train()
train_loss = 0
train_acc = 0
# Create data loader
data = DataLoader(train_data, batch_size=batch_size, shuffle=True, collate_fn=generate_batch)
# Iterate through data by batch of observations
for feature, target_class in data:
# Reset gradients
optimizer.zero_grad()
# Load data to specified device
feature, target_class = feature.to(device), target_class.to(device)
# Make predictions
output = model(feature)
# Calculate loss for given batch
loss = criterion(output, target_class.long())
# Calculate global loss
train_loss += loss.item()
# Calculate gradients
loss.backward()
# Update Weights
optimizer.step()
# Calculate global accuracy
train_acc += (output.argmax(1) == target_class).sum().item()
# Adjust the learning rate
if scheduler:
scheduler.step()
return train_loss / len(train_data), train_acc / len(train_data)
def test_classification(test_data, model, criterion, batch_size, device, generate_batch=None):
"""Calculate performance of a Pytorch multi-class classification model
Parameters
----------
test_data : torch.utils.data.Dataset
Pytorch dataset
model: torch.nn.Module
Pytorch Model
criterion: function
Loss function
bacth_size : int
Number of observations per batch
device : str
Name of the device used for the model
collate_fn : function
Function defining required pre-processing steps
Returns
-------
Float
Loss score
Float:
Accuracy Score
"""
# Set model to evaluation mode
model.eval()
test_loss = 0
test_acc = 0
# Create data loader
data = DataLoader(test_data, batch_size=batch_size, collate_fn=generate_batch)
# Iterate through data by batch of observations
for feature, target_class in data:
# Load data to specified device
feature, target_class = feature.to(device), target_class.to(device)
# Set no update to gradients
with torch.no_grad():
# Make predictions
output = model(feature)
# Calculate loss for given batch
loss = criterion(output, target_class.long())
# Calculate global loss
test_loss += loss.item()
# Calculate global accuracy
test_acc += (output.argmax(1) == target_class).sum().item()
return test_loss / len(test_data), test_acc / len(test_data) | StarcoderdataPython |
103252 | import os
import numpy as np
from data.dataset import VoxelizationDataset, DatasetPhase
class S3DISDataset(VoxelizationDataset):
category = ['wall', 'floor', 'beam', 'chair', 'sofa', 'table',
'door', 'window', 'bookcase', 'column', 'clutter', 'ceiling', 'board']
CLIP_SIZE = None
CLIP_BOUND = None
LOCFEAT_IDX = 2
ROTATION_AXIS = 'z'
NUM_LABELS = 12
# Voxelization arguments
CLIP_BOUND = None
TEST_CLIP_BOUND = None
CLIP_BOUND = None
IGNORE_LABELS = [] # remove stairs, following SegCloud
# Augmentation arguments
ELASTIC_DISTORT_PARAMS = ((20, 100), (80, 320))
ROTATION_AUGMENTATION_BOUND = ((-np.pi / 32, np.pi / 32), (-np.pi / 32, np.pi / 32), (-np.pi, np.pi))
SCALE_AUGMENTATION_BOUND = (0.8, 1.2)
TRANSLATION_AUGMENTATION_RATIO_BOUND = ((-0.2, 0.2), (-0.2, 0.2), (-0.05, 0.05))
def __init__(self, config, prevoxel_transform=None, input_transform=None, target_transform=None, cache=False, augment_data=True, elastic_distortion=False, phase=DatasetPhase.Train):
voxel_size = 0.02
if phase == DatasetPhase.Train:
data_root = os.path.join(config["data_path"], 'train')
voxel_size = config["train_voxel_size"]
elif phase == DatasetPhase.Val:
data_root = os.path.join(config["data_path"], 'val')
voxel_size = config["val_voxel_size"]
VoxelizationDataset.__init__(
self,
os.listdir(data_root),
data_root=data_root,
input_transform=input_transform,
target_transform=target_transform,
ignore_label=config["ignore_label"],
return_transformation=config["return_transformation"],
augment_data=augment_data,
elastic_distortion=elastic_distortion,
voxel_size=voxel_size)
| StarcoderdataPython |
62590 | from typing import Generator
import pytest
from fastapi.testclient import TestClient
from app.main import app
@pytest.fixture(scope="module")
def test_data():
sample_data = {"user_handle": 1}
return sample_data
@pytest.fixture()
def client() -> Generator:
with TestClient(app) as _client:
yield _client
app.dependency_overrides = {}
| StarcoderdataPython |
4840196 | <gh_stars>10-100
#! /usr/bin/python3
from Parser import Parser
from Commands import Commands
from Data import Data
from Code import Code
from Resolver import Resolver
from Configuration import Configuration
from Generator import Generator
from Operators import Operators
from sys import argv
if __name__ == "__main__":
operators = Operators()
configuration = Configuration()
data = Data(configuration)
code = Code(data, configuration, operators)
commands = Commands(data, code)
parser = Parser(commands)
parser.parse_file(argv[1])
# Won't need these after Parsing and Allocation.
# So let's enforce that for our own discipline.
# State is carried in Code and Data.
del parser
del commands
print("Parsing and Allocation Done")
# Dump initial state of code and data
# immediately after Allocation
configuration.filedump("LOG.allocate")
data.filedump("LOG.allocate", append = True)
code.filedump("LOG.allocate", append = True)
resolver = Resolver(data, code, configuration)
resolver.resolve()
print("Resolution Done")
# Dump state of code and data after Resolution
# use gvimdiff (or your tool of choice) to see the differences
# There should be no remaining strings and unset variables
# or instruction operands at this point.
configuration.filedump("LOG.resolve")
data.filedump("LOG.resolve", append = True)
code.filedump("LOG.resolve", append = True)
generator = Generator(data, code, configuration, operators)
generator.generate()
print("Generation done")
configuration.filedump("LOG.generate")
data.filedump("LOG.generate", append = True)
code.filedump("LOG.generate", append = True)
print("OK")
| StarcoderdataPython |
74135 | from .quantum_register import QuantumRegister
from .classical_register import ClassicalRegister
import qsy.gates as gates
__version__ = '0.4.4'
| StarcoderdataPython |
3215572 | from pdb import set_trace as breakpoint
class Dog():
def __init__(self, name, age, housebroke):
self.name = name
self.age = age
self.housebroke = housebroke
def is_housebroke(self):
if self.housebroke == True:
print(f'{self.name} is housebroken!')
else:
print(f'{self.name} is not housebroken... :(')
class Beagle(Dog):
def barks_alot(self, barker)
if __name__ == "__main__":
lucky = Dog('Lucky', 2, True)
breakpoint() | StarcoderdataPython |
9407 | # Licensed to Modin Development Team under one or more contributor license agreements.
# See the NOTICE file distributed with this work for additional information regarding
# copyright ownership. The Modin Development Team licenses this file to you under the
# Apache License, Version 2.0 (the "License"); you may not use this file except in
# compliance with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software distributed under
# the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific language
# governing permissions and limitations under the License.
"""Module houses class that implements ``PandasOnRayDataframe`` class using cuDF."""
import numpy as np
import ray
from ..partitioning.partition import cuDFOnRayDataframePartition
from ..partitioning.partition_manager import cuDFOnRayDataframePartitionManager
from modin.core.execution.ray.implementations.pandas_on_ray.dataframe.dataframe import (
PandasOnRayDataframe,
)
from modin.error_message import ErrorMessage
class cuDFOnRayDataframe(PandasOnRayDataframe):
"""
The class implements the interface in ``PandasOnRayDataframe`` using cuDF.
Parameters
----------
partitions : np.ndarray
A 2D NumPy array of partitions.
index : sequence
The index for the dataframe. Converted to a ``pandas.Index``.
columns : sequence
The columns object for the dataframe. Converted to a ``pandas.Index``.
row_lengths : list, optional
The length of each partition in the rows. The "height" of
each of the block partitions. Is computed if not provided.
column_widths : list, optional
The width of each partition in the columns. The "width" of
each of the block partitions. Is computed if not provided.
dtypes : pandas.Series, optional
The data types for the dataframe columns.
"""
_partition_mgr_cls = cuDFOnRayDataframePartitionManager
def synchronize_labels(self, axis=None):
"""
Synchronize labels by applying the index object (Index or Columns) to the partitions eagerly.
Parameters
----------
axis : {0, 1, None}, default: None
The axis to apply to. If None, it applies to both axes.
"""
ErrorMessage.catch_bugs_and_request_email(
axis is not None and axis not in [0, 1]
)
cum_row_lengths = np.cumsum([0] + self._row_lengths)
cum_col_widths = np.cumsum([0] + self._column_widths)
def apply_idx_objs(df, idx, cols, axis):
# cudf does not support set_axis. It only supports rename with 1-to-1 mapping.
# Therefore, we need to create the dictionary that have the relationship between
# current index and new ones.
idx = {df.index[i]: idx[i] for i in range(len(idx))}
cols = {df.index[i]: cols[i] for i in range(len(cols))}
if axis == 0:
return df.rename(index=idx)
elif axis == 1:
return df.rename(columns=cols)
else:
return df.rename(index=idx, columns=cols)
keys = np.array(
[
[
self._partitions[i][j].apply(
apply_idx_objs,
idx=self.index[
slice(cum_row_lengths[i], cum_row_lengths[i + 1])
],
cols=self.columns[
slice(cum_col_widths[j], cum_col_widths[j + 1])
],
axis=axis,
)
for j in range(len(self._partitions[i]))
]
for i in range(len(self._partitions))
]
)
self._partitions = np.array(
[
[
cuDFOnRayDataframePartition(
self._partitions[i][j].get_gpu_manager(),
keys[i][j],
self._partitions[i][j]._length_cache,
self._partitions[i][j]._width_cache,
)
for j in range(len(keys[i]))
]
for i in range(len(keys))
]
)
def mask(
self,
row_indices=None,
row_numeric_idx=None,
col_indices=None,
col_numeric_idx=None,
):
"""
Lazily select columns or rows from given indices.
Parameters
----------
row_indices : list of hashable, optional
The row labels to extract.
row_numeric_idx : list of int, optional
The row indices to extract.
col_indices : list of hashable, optional
The column labels to extract.
col_numeric_idx : list of int, optional
The column indices to extract.
Returns
-------
cuDFOnRayDataframe
A new ``cuDFOnRayDataframe`` from the mask provided.
Notes
-----
If both `row_indices` and `row_numeric_idx` are set, `row_indices` will be used.
The same rule applied to `col_indices` and `col_numeric_idx`.
"""
if isinstance(row_numeric_idx, slice) and (
row_numeric_idx == slice(None) or row_numeric_idx == slice(0, None)
):
row_numeric_idx = None
if isinstance(col_numeric_idx, slice) and (
col_numeric_idx == slice(None) or col_numeric_idx == slice(0, None)
):
col_numeric_idx = None
if (
row_indices is None
and row_numeric_idx is None
and col_indices is None
and col_numeric_idx is None
):
return self.copy()
if row_indices is not None:
row_numeric_idx = self.index.get_indexer_for(row_indices)
if row_numeric_idx is not None:
row_partitions_list = self._get_dict_of_block_index(0, row_numeric_idx)
if isinstance(row_numeric_idx, slice):
# Row lengths for slice are calculated as the length of the slice
# on the partition. Often this will be the same length as the current
# length, but sometimes it is different, thus the extra calculation.
new_row_lengths = [
len(range(*idx.indices(self._row_lengths[p])))
for p, idx in row_partitions_list.items()
]
# Use the slice to calculate the new row index
new_index = self.index[row_numeric_idx]
else:
new_row_lengths = [len(idx) for _, idx in row_partitions_list.items()]
new_index = self.index[sorted(row_numeric_idx)]
else:
row_partitions_list = {
i: slice(None) for i in range(len(self._row_lengths))
}
new_row_lengths = self._row_lengths
new_index = self.index
if col_indices is not None:
col_numeric_idx = self.columns.get_indexer_for(col_indices)
if col_numeric_idx is not None:
col_partitions_list = self._get_dict_of_block_index(1, col_numeric_idx)
if isinstance(col_numeric_idx, slice):
# Column widths for slice are calculated as the length of the slice
# on the partition. Often this will be the same length as the current
# length, but sometimes it is different, thus the extra calculation.
new_col_widths = [
len(range(*idx.indices(self._column_widths[p])))
for p, idx in col_partitions_list.items()
]
# Use the slice to calculate the new columns
new_columns = self.columns[col_numeric_idx]
assert sum(new_col_widths) == len(
new_columns
), "{} != {}.\n{}\n{}\n{}".format(
sum(new_col_widths),
len(new_columns),
col_numeric_idx,
self._column_widths,
col_partitions_list,
)
if self._dtypes is not None:
new_dtypes = self.dtypes[col_numeric_idx]
else:
new_dtypes = None
else:
new_col_widths = [len(idx) for _, idx in col_partitions_list.items()]
new_columns = self.columns[sorted(col_numeric_idx)]
if self._dtypes is not None:
new_dtypes = self.dtypes.iloc[sorted(col_numeric_idx)]
else:
new_dtypes = None
else:
col_partitions_list = {
i: slice(None) for i in range(len(self._column_widths))
}
new_col_widths = self._column_widths
new_columns = self.columns
if self._dtypes is not None:
new_dtypes = self.dtypes
else:
new_dtypes = None
key_and_gpus = np.array(
[
[
[
self._partitions[row_idx][col_idx].mask(
row_internal_indices, col_internal_indices
),
self._partitions[row_idx][col_idx].get_gpu_manager(),
]
for col_idx, col_internal_indices in col_partitions_list.items()
if isinstance(col_internal_indices, slice)
or len(col_internal_indices) > 0
]
for row_idx, row_internal_indices in row_partitions_list.items()
if isinstance(row_internal_indices, slice)
or len(row_internal_indices) > 0
]
)
shape = key_and_gpus.shape[:2]
keys = ray.get(key_and_gpus[:, :, 0].flatten().tolist())
gpu_managers = key_and_gpus[:, :, 1].flatten().tolist()
new_partitions = self._partition_mgr_cls._create_partitions(
keys, gpu_managers
).reshape(shape)
intermediate = self.__constructor__(
new_partitions,
new_index,
new_columns,
new_row_lengths,
new_col_widths,
new_dtypes,
)
# Check if monotonically increasing, return if it is. Fast track code path for
# common case to keep it fast.
if (
row_numeric_idx is None
or isinstance(row_numeric_idx, slice)
or len(row_numeric_idx) == 1
or np.all(row_numeric_idx[1:] >= row_numeric_idx[:-1])
) and (
col_numeric_idx is None
or isinstance(col_numeric_idx, slice)
or len(col_numeric_idx) == 1
or np.all(col_numeric_idx[1:] >= col_numeric_idx[:-1])
):
return intermediate
# The new labels are often smaller than the old labels, so we can't reuse the
# original order values because those were mapped to the original data. We have
# to reorder here based on the expected order from within the data.
# We create a dictionary mapping the position of the numeric index with respect
# to all others, then recreate that order by mapping the new order values from
# the old. This information is sent to `_reorder_labels`.
if row_numeric_idx is not None:
row_order_mapping = dict(
zip(sorted(row_numeric_idx), range(len(row_numeric_idx)))
)
new_row_order = [row_order_mapping[idx] for idx in row_numeric_idx]
else:
new_row_order = None
if col_numeric_idx is not None:
col_order_mapping = dict(
zip(sorted(col_numeric_idx), range(len(col_numeric_idx)))
)
new_col_order = [col_order_mapping[idx] for idx in col_numeric_idx]
else:
new_col_order = None
return intermediate._reorder_labels(
row_numeric_idx=new_row_order, col_numeric_idx=new_col_order
)
| StarcoderdataPython |
3251288 | <reponame>carlosparaciari/ArXivBot
#!/usr/bin/env python
import psycopg2
import os
import yaml
import sys
# This script will create a new user and database, and will create the tables
# needed for ArXivBot to work in this database. The tables are the following:
#
# 1. - Name : preferences
# - Columns : ( user_identity integer , category text)
# 2. - Name : feedbacks
# - Columns : ( message_time timestamp , user_identity integer , comment text )
# 3. - Name : errors
# - Columns : (error_time timestamp, user_identity bigint, error_type text, details text)
# 4. - Name : chat
# - Columns : (message_time timestamp , user_identity integer, content_type text, content text, query_identity bigint)
# NOTE 1 : You need to specify the user which can create a new user and a new database on your local server.
# For example, you could use the superuser credentials to do this:
#
# - user = 'postgres'
# - password = <<PASSWORD>>
#
# Insert this information below:
existing_user = 'postgres'
existing_pswd = ''
# Connect to PostgreSQL
try:
conn = psycopg2.connect(user = existing_user, password = existing_pswd)
conn.set_session(autocommit=True)
print "Connection to PostgreSQ established."
except:
print "ERROR: Impossible to connect to PostgreSQL. Please check the username and password provided."
sys.exit()
cur = conn.cursor()
# Create a new user using the information stored in the yaml file with all the details of the bot.
# NOTE 2 : First of all, you need to create the yaml file where all details ArXivBot needs are stored.
# See the example yaml file in the ./Bot/Data/ folder.
# In the same folder create the file 'bot_details.yaml' and fill all the necessary
# fields as shown in the example yaml file.
import yaml
yamlfile_details = 'bot_details.yaml'
with open(os.path.join('Bot', 'Data', yamlfile_details), 'r') as file_input:
detail = yaml.load(file_input)
try:
sql_command = "CREATE USER " + detail['database_user'] + " WITH PASSWORD '" + detail['database_password'] + "';"
cur.execute(sql_command)
print "New PostgreSQL user created."
except:
print "ERROR: Impossible to create a new user for PostgreSQL. Please check that the user connected to PostgreSQL can create new users."
cur.close()
conn.close()
sys.exit()
# Create a database where the bot can save the information.
try:
sql_command = "CREATE DATABASE " + detail['database_name'] + ";"
cur.execute(sql_command)
print "New database created."
except:
print "ERROR: Impossible to create a new database for PostgreSQL. Please check that the user connected to PostgreSQL can create new database."
cur.close()
conn.close()
sys.exit()
# Close connection with existing user.
cur.close()
conn.close()
# Start connection with newly create user.
try:
new_conn = psycopg2.connect(dbname = detail['database_name'], user = detail['database_user'], password = detail['database_password'])
print "Connection to new database established."
except:
print "ERROR: Impossible to connect to PostgreSQL with the new user. Please check the existing user had privileges to create new users."
sys.exit()
new_cur = new_conn.cursor()
# Create the four tables we need.
try:
sql_command = "CREATE TABLE preferences ( user_identity integer , category text);"
new_cur.execute(sql_command)
print "Table 'preferences' created."
sql_command = "CREATE TABLE feedbacks ( message_time timestamp , user_identity integer , comment text );"
new_cur.execute(sql_command)
print "Table 'feedbacks' created."
sql_command = "CREATE TABLE errors (error_time timestamp, user_identity bigint, error_type text, details text);"
new_cur.execute(sql_command)
print "Table 'errors' created."
sql_command = "CREATE TABLE chat (message_time timestamp , user_identity integer, content_type text, content text, query_identity bigint);"
new_cur.execute(sql_command)
print "Table 'chat' created."
except:
print "ERROR: Impossible to create the tables. Please check the privileges of the new user."
new_cur.close()
new_conn.close()
sys.exit()
# Close connection with new user.
new_conn.commit()
new_cur.close()
new_conn.close() | StarcoderdataPython |
3371082 | <reponame>domenicodigangi/GraphsData
# Script to uniformly aggregate all streaming graphs data available into discrete time sequences of networks
#%% Import packages
import os
import pandas as pd
import networkx as nx
import numpy as np
#%% load all datasets
all_df = []
load_direct = "./raw_data_files/"
files = os.listdir(load_direct)
for f in files:
if f[-5:] == "edges":
print(f)
df_load = pd.read_csv(load_direct + f, sep = " ", skiprows=1)
if df_load.shape[1]==3:
all_df.append(df_load)
#%% given a target size N for the subnetwork, select the minimal aggregation that guarantees a subnetwork density of 0.5
N = 15
all_df_dense = []
for df in all_df:
time_steps_max = df.iloc[:,2].unique()
edges = df.iloc[:,:2].rename(columns = {df.columns[0]:"source", df.columns[1]:"target"})
num_nodes = pd.unique(edges[['source', 'target']].values.ravel('K')).shape[0]
# select most active nodes using an euristic rule on the network collapsed on one snapshot
G = nx.from_pandas_edgelist(edges, create_using = nx.MultiDiGraph)
degs = sorted(G.degree, key=lambda x: x[1], reverse=True)
nodes_dense = [d[0] for d in degs[:N]]
df_dense = df[edges.source.isin(nodes_dense) & edges.target.isin(nodes_dense)]
time_steps_max_dense = df_dense.iloc[:,2].unique().shape[0]
if time_steps_max_dense >= 500:
all_df_dense.append(df_dense)
print(time_steps_max_dense)
#%%
N_pox_links = N*(N-1)/2
df = all_df_dense[1]
edges = df.iloc[:,:3].rename(columns = {df.columns[0]:"source", df.columns[1]:"target", df.columns[2]:"time"})
edges["undir_link_id"] = edges.apply(lambda x: frozenset({x.source, x.target}), axis=1)
edges.drop_duplicates(subset=["time", "undir_link_id"], inplace=True)
snapshots = edges.groupby(["time"]).agg({ "undir_link_id":lambda x: np.round(x.shape[0]/(N_pox_links), decimals=4)}).rename(columns={"undir_link_id":"density"}).reset_index()
snapshots.plot( y="density", use_index=True)
# %%
| StarcoderdataPython |
88652 | <gh_stars>0
from modules.lib.alarm_condition import AlarmCondition
from modules.lib.alarm_condition_parser import parse_alarm_condition
import json
def alarm_condition_from_params(params, key):
try:
alarm_condition = params[key]['alarms']
if alarm_condition is None:
return None
return [parse_alarm_condition(cond) for cond in alarm_condition]
except KeyError:
return None
def interval_from_params(params, key):
interval = 0
try:
interval_string = params[key]['interval']
interval = str_to_interval(interval_string)
except KeyError:
print('KeyError:', key)
return interval
def str_to_interval(interval_string):
interval = 0
if interval_string is not None:
try:
interval = max(int(interval_string), 0)
except ValueError:
pass
return interval
class ThingsCloudParams:
@classmethod
def from_file(cls, filename):
with open(filename, 'r') as f:
params_str = f.read()
params = cls(origin_file=filename)
params.load_json(params_str)
return params
def load_json(self, params_json):
parsed = json.loads(params_json)
for key in parsed.keys():
interval = interval_from_params(parsed, key)
self.add_param(key, interval)
condition = alarm_condition_from_params(parsed, key) or []
self.set_alarms(key, condition)
self.set_serialization(params_json)
def __init__(self, serialization=None, origin_file=None):
self.__params = {
"cloud": {
"interval": 30,
"alarms": [],
}
}
self.__serialization = serialization
self.__origin_file = origin_file
def __str__(self):
def default(o):
if type(o) is AlarmCondition:
return json.loads(repr(o))
else:
raise TypeError(repr(o) + " is not JSON serializable")
if self.__serialization is not None:
return self.__serialization
return json.dumps(self.__params, default=default)
def set_serialization(self, serialization):
self.__serialization = serialization
def __check_presense(self, name):
if name not in self.__params:
raise Exception(
"No reporter with the name exists: {}"
.format(name)
)
def update_with_json(self, json):
if self.__origin_file is not None:
with open(self.__origin_file, 'w') as f:
f.write(json)
self.load_json(json)
def add_param(self, name, interval):
self.__serialization = None
self.__params[name] = {
"interval": interval,
"alarms": [],
}
def set_interval(self, name, interval):
self.__serialization = None
self.__check_presense(name)
self.__params[name]["interval"] = interval
def set_alarms(self, name, alarms):
self.__serialization = None
self.__check_presense(name)
self.__params[name]["alarms"] = alarms
def add_alarm(self, name, alarm):
self.__serialization = None
self.__check_presense(name)
self.__params[name]["alarms"].append(alarm)
def alarms(self, name):
self.__check_presense(name)
return self.__params[name]["alarms"]
def interval(self, name):
self.__check_presense(name)
return self.__params[name]["interval"]
| StarcoderdataPython |
1686449 | import torch
import torch.optim
from torch import nn
from utils import *
from model import *
from load_data import trainloader, testloader
def get_noise(n, size):
'''
out (n, size, 1, 1) noise vector
'''
return torch.randn(n, size, 1, 1).to(device)
def weights_init(m):
classname = m.__class__.__name__
if classname.find('Conv') != -1:
nn.init.normal_(m.weight.data, 0.0, 0.02)
elif classname.find('BatchNorm') != -1:
nn.init.normal_(m.weight.data, 1.0, 0.02)
nn.init.constant_(m.bias.data, 0)
def train(noise_size=100, num_kernels=64, lr=.0005, b1=.5, b2=.999, epochs=50):
g = Generator(noise_size, num_kernels).to(device)
g.apply(weights_init)
g_optimizer = torch.optim.Adam(g.parameters(), lr=lr, betas=(b1, b2))
d = Discriminator(num_kernels).to(device)
d.apply(weights_init)
d_optimizer = torch.optim.Adam(d.parameters(), lr=lr, betas=(b1, b2))
d_loss_fn = nn.BCEWithLogitsLoss()
for epoch in range(epochs):
d_losses = []
g_losses = []
for i, (img, class_index) in enumerate(trainloader):
img = img.to(device)
g.zero_grad()
d.zero_grad()
batch_size = img.shape[0]
# train d on real images
real_truth = torch.ones((batch_size, 1)).to(device)
real_pred = d(img)
real_pred_flat = real_pred.view(batch_size, -1)
real_loss = d_loss_fn(real_pred_flat, real_truth)
real_loss.backward(retain_graph=True)
d_optimizer.step()
# generate fakes
noise = get_noise(batch_size, noise_size)
fake_img = g(noise)
# train g on fake images
fake_truth = torch.zeros((batch_size, 1)).to(device)
fake_pred = d(fake_img)
fake_pred_flat = fake_pred.view(batch_size, -1)
fake_loss = d_loss_fn(fake_pred_flat, fake_truth)
fake_loss.backward(retain_graph=True)
d_optimizer.step()
d_loss = (fake_loss.item() + real_loss.item()) / 2
d_losses.append(d_loss)
# backprob losses
fake_pred = d(fake_img)
fake_pred_flat = fake_pred.view(batch_size, -1)
g_loss = d_loss_fn(fake_pred_flat, real_truth)
g_losses.append(g_loss.item())
g_loss.backward()
g_optimizer.step()
if i % 50 == 0:
print('losses at iteration {} in epoch {}:\n\tdiscriminator: {}\n\tgenerator: {}'.format(i, epoch, d_loss, g_loss))
save_model(d, 'discriminator')
save_model(g, 'generator')
avg_d_loss = sum(d_losses) / max(1, len(d_losses))
avg_g_loss = sum(g_losses) / max(1, len(g_losses))
print('losses at epoch {}:\n\tdiscriminator: {}\n\tgenerator: {}'.format(epoch, avg_d_loss, avg_g_loss))
if __name__ == '__main__':
train()
| StarcoderdataPython |
1646719 | # -*- coding: utf-8 -*-
"""
PyThaiNLP package installation and data tools
"""
import os
import subprocess
import sys
import pythainlp
PYTHAINLP_DATA_DIR = "pythainlp-data"
def install_package(package):
"""
Install package using pip
Use with caution.
User may not like their system to be installed with a package they don't explicitly known about.
"""
subprocess.call([sys.executable, "-m", "pip", "install", package])
def get_full_data_path(path):
"""
Get filename/path of a dataset, return full path of that filename/path
"""
return os.path.join(get_pythainlp_data_path(), path)
def get_pythainlp_data_path():
"""
Return full path where PyThaiNLP keeps its (downloaded) data
"""
path = os.path.join(os.path.expanduser("~"), PYTHAINLP_DATA_DIR)
if not os.path.exists(path):
os.makedirs(path)
return path
def get_pythainlp_path():
"""
Return full path of PyThaiNLP code
"""
return os.path.dirname(pythainlp.__file__)
| StarcoderdataPython |
1613855 | <reponame>alemr214/curso-python-desde-cero<gh_stars>0
import sqlite3
conexion = sqlite3.connect("GestionProductos.db")
miCursor = conexion.cursor()
miCursor.execute("""
CREATE TABLE IF NOT EXISTS PRODUCTOS (
codigo_articulo VARCHAR(4) PRIMARY KEY,
nombre_articulo VARCHAR(50),
precio INTEGER,
seccion VARCHAR(20))""")
productos = [
("AR01", "Leche", 15, "Alimentos"),
("AR02", "Jabon", 12, "Alimentos"),
("AR03", "Cereal", 8, "Alimentos"),
("AR04", "Manteca", 25, "Alimentos"),
("AR05", "Lavadora", 1200, "Electrodomesticos"),
("AR06", "Television", 1500, "Electrodomesticos"),
("AR07", "Refrigerador", 1800, "Electrodomesticos"),
]
miCursor.executemany("INSERT INTO PRODUCTOS VALUES(?, ?, ?, ?)", productos)
conexion.commit()
miCursor.close()
conexion.close()
| StarcoderdataPython |
3372503 | from .generator import Generator
from . import _version
__version__ = _version.get_versions()['version']
| StarcoderdataPython |
143556 | import os
import io
import json
import trimesh
import random
import matplotlib.pyplot as plt
import numpy as np
import cv2
import torch
def load_bop_meshes(model_path, obj_ids="all"):
"""
Returns:
meshes: list[Trimesh]
objID2clsID: dict, objID (original) --> i (0-indexed)
"""
# load meshes
meshFiles = [f for f in os.listdir(model_path) if f.endswith('.ply')]
meshFiles.sort()
meshes = []
objID_2_clsID = {}
i = 0
for _i in range(len(meshFiles)):
mFile = meshFiles[_i]
objId = int(os.path.splitext(mFile)[0][4:])
# skip non-selected objects
if isinstance(obj_ids, list) and str(objId) not in obj_ids:
continue
objID_2_clsID[str(objId)] = i
meshes.append(trimesh.load(os.path.join(model_path, mFile)))
i += 1
# print('mesh from "%s" is loaded' % (model_path + mFile))
#
return meshes, objID_2_clsID
def load_bbox_3d(jsonFile):
with open(jsonFile, 'r') as f:
bbox_3d = json.load(f)
return bbox_3d
def collect_mesh_bbox(meshpath, outjson, oriented=False):
meshes, _ = load_bop_meshes(meshpath)
allv = []
for ms in meshes:
if oriented:
bbox = ms.bounding_box_oriented.vertices
else:
bbox = ms.bounding_box.vertices
allv.append(bbox.tolist())
with open(outjson, 'w') as outfile:
json.dump(allv, outfile, indent=4)
def generate_shiftscalerotate_matrix(shift_limit, scale_limit, rotate_limit, width, height):
dw = int(width * shift_limit)
dh = int(height * shift_limit)
pleft = random.randint(-dw, dw)
ptop = random.randint(-dh, dh)
shiftM = np.array([[1.0, 0.0, -pleft], [0.0, 1.0, -ptop], [0.0, 0.0, 1.0]]) # translation
# random rotation and scaling
cx = width / 2 # fix the rotation center to the image center
cy = height / 2
ang = random.uniform(-rotate_limit, rotate_limit)
sfactor = random.uniform(-scale_limit, +scale_limit) + 1
tmp = cv2.getRotationMatrix2D((cx, cy), ang, sfactor) # rotation with scaling
rsM = np.concatenate((tmp, [[0, 0, 1]]), axis=0)
# combination
M = np.matmul(rsM, shiftM)
return M.astype(np.float32)
def draw_bounding_box(cvImg, R, T, bbox, intrinsics, color, thickness):
rep = np.matmul(intrinsics, np.matmul(R, bbox.T) + T)
x = np.int32(rep[0]/rep[2] + 0.5)
y = np.int32(rep[1]/rep[2] + 0.5)
bbox_lines = [0, 1, 0, 2, 0, 4, 5, 1, 5, 4, 6, 2, 6, 4, 3, 2, 3, 1, 7, 3, 7, 5, 7, 6]
for i in range(12):
id1 = bbox_lines[2*i]
id2 = bbox_lines[2*i+1]
cvImg = cv2.line(cvImg, (x[id1],y[id1]), (x[id2],y[id2]), color, thickness=thickness, lineType=cv2.LINE_AA)
return cvImg
def draw_pose_axis(cvImg, R, T, bbox, intrinsics, thickness):
radius = np.linalg.norm(bbox, axis=1).mean()
aPts = np.array([[0,0,0],[0,0,radius],[0,radius,0],[radius,0,0]])
rep = np.matmul(intrinsics, np.matmul(R, aPts.T) + T)
x = np.int32(rep[0]/rep[2] + 0.5)
y = np.int32(rep[1]/rep[2] + 0.5)
cvImg = cv2.line(cvImg, (x[0],y[0]), (x[1],y[1]), (0,0,255), thickness=thickness, lineType=cv2.LINE_AA)
cvImg = cv2.line(cvImg, (x[0],y[0]), (x[2],y[2]), (0,255,0), thickness=thickness, lineType=cv2.LINE_AA)
cvImg = cv2.line(cvImg, (x[0],y[0]), (x[3],y[3]), (255,0,0), thickness=thickness, lineType=cv2.LINE_AA)
return cvImg
def get_single_bop_annotation(img_path, objID_2_clsID, obj_ids=None):
"""
Returns:
K: camera intrinsic (3, 3)
merged_mask: multiple object segmentation, merged_mask == instance_id --> mask; instance_id is not relevant to cls_id or obj_id
class_ids: list[int], cls_id
rotations: list[np.ndarray (3, 3)]
translations: list[np.ndarray (3, 1)]
"""
# add attributes to function, for fast loading
if not hasattr(get_single_bop_annotation, "dir_annots"):
get_single_bop_annotation.dir_annots = {}
#
img_path = img_path.strip()
cvImg = cv2.imread(img_path)
height, width, _ = cvImg.shape
#
gt_dir, tmp, imgName = img_path.rsplit('/', 2)
assert(tmp == 'rgb')
imgBaseName, _ = os.path.splitext(imgName)
im_id = int(imgBaseName)
#
camera_file = gt_dir + '/scene_camera.json'
gt_file = gt_dir + "/scene_gt.json"
# gt_info_file = gt_dir + "/scene_gt_info.json"
gt_mask_visib = gt_dir + "/mask_visib/"
if gt_dir in get_single_bop_annotation.dir_annots:
gt_json, cam_json = get_single_bop_annotation.dir_annots[gt_dir]
else:
gt_json = json.load(open(gt_file))
# gt_info_json = json.load(open(gt_info_file))
cam_json = json.load(open(camera_file))
#
get_single_bop_annotation.dir_annots[gt_dir] = [gt_json, cam_json]
if str(im_id) in cam_json:
annot_camera = cam_json[str(im_id)]
else:
annot_camera = cam_json[("%06d" % im_id)]
if str(im_id) in gt_json:
annot_poses = gt_json[str(im_id)]
else:
annot_poses = gt_json[("%06d" % im_id)]
# annot_infos = gt_info_json[str(im_id)]
objCnt = len(annot_poses)
K = np.array(annot_camera['cam_K']).reshape(3,3)
class_ids = []
# bbox_objs = []
rotations = []
translations = []
merged_mask = np.zeros((height, width), np.uint8) # segmenation masks
instance_idx = 1
for i in range(objCnt):
mask_vis_file = gt_mask_visib + ("%06d_%06d.png" %(im_id, i))
mask_vis = cv2.imread(mask_vis_file, cv2.IMREAD_UNCHANGED)
#
# bbox = annot_infos[i]['bbox_visib']
# bbox = annot_infos[i]['bbox_obj']
# contourImg = cv2.rectangle(contourImg, (bbox[0], bbox[1]), (bbox[0]+bbox[2], bbox[1]+bbox[3]), (0,0,255))
# cv2.imshow(str(i), mask_vis)
#
R = np.array(annot_poses[i]['cam_R_m2c']).reshape(3,3)
T = np.array(annot_poses[i]['cam_t_m2c']).reshape(3,1)
obj_id = str(annot_poses[i]['obj_id'])
if obj_ids is not None and obj_id not in obj_ids:
continue
if not obj_id in objID_2_clsID:
continue
cls_id = objID_2_clsID[obj_id]
#
# bbox_objs.append(bbox)
class_ids.append(cls_id)
rotations.append(R)
translations.append(T)
# compose segmentation labels
merged_mask[mask_vis==255] = instance_idx
instance_idx += 1
return K, merged_mask, class_ids, rotations, translations
def visualize_pred(img, gt, pred, mean, std):
cvImg = img.to('cpu').numpy().transpose(1,2,0)
# de-normalize
cvImg = cvImg * (np.array(std).reshape(1,1,3) * 255)
cvImg = cvImg + (np.array(mean).reshape(1,1,3) * 255)
#
cvImg = cv2.cvtColor(cvImg.astype(np.uint8), cv2.COLOR_RGB2BGR)
# cvImg[:] = 255
cvRawImg = cvImg.copy()
#
gtPoses = gt.to('cpu').to_numpy()
gtVisual = gtPoses.visualize(cvImg)
# show predicted poses
for score, cls_id, R, T in pred:
pt3d = np.array(gtPoses.keypoints_3d[cls_id])
try:
cvImg = draw_pose_axis(cvImg, R, T, pt3d, gtPoses.K, 2)
except:
pass
return cvRawImg, cvImg, gtVisual
def remap_pose(srcK, srcR, srcT, pt3d, dstK, transM):
ptCnt = len(pt3d)
pts = np.matmul(transM, np.matmul(srcK, np.matmul(srcR, pt3d.transpose()) + srcT))
xs = pts[0] / (pts[2] + 1e-8)
ys = pts[1] / (pts[2] + 1e-8)
xy2d = np.concatenate((xs.reshape(-1,1),ys.reshape(-1,1)), axis=1)
retval, rot, trans = cv2.solvePnP(pt3d.reshape(ptCnt,1,3), xy2d.reshape(ptCnt,1,2), dstK, None, flags=cv2.SOLVEPNP_EPNP)
if retval:
newR = cv2.Rodrigues(rot)[0] # convert to rotation matrix
newT = trans.reshape(-1, 1)
newPts = np.matmul(dstK, np.matmul(newR, pt3d.transpose()) + newT)
newXs = newPts[0] / (newPts[2] + 1e-8)
newYs = newPts[1] / (newPts[2] + 1e-8)
newXy2d = np.concatenate((newXs.reshape(-1,1),newYs.reshape(-1,1)), axis=1)
diff_in_pix = np.linalg.norm(xy2d - newXy2d, axis=1).mean()
return newR, newT, diff_in_pix
else:
print('Error in pose remapping!')
return srcR, srcT, -1
# define a function which returns an image as numpy array from figure
def get_img_from_matplotlib_fig(fig, dpi=300):
buf = io.BytesIO()
fig.savefig(buf, format="png", dpi=dpi)
buf.seek(0)
img_arr = np.frombuffer(buf.getvalue(), dtype=np.uint8)
buf.close()
img = cv2.imdecode(img_arr, 1)
return img
def visualize_accuracy_per_depth(
accuracy_adi_per_class,
accuracy_rep_per_class,
accuracy_adi_per_depth,
accuracy_rep_per_depth,
depth_range):
fig, (ax1, ax2) = plt.subplots(nrows=1, ncols=2, figsize=(8, 4))
rep_keys = accuracy_rep_per_class[0].keys()
adi_keys = accuracy_adi_per_class[0].keys()
depth_bins = len(accuracy_rep_per_depth)
assert(len(accuracy_adi_per_depth) == len(accuracy_rep_per_depth))
ax1.set_title('Statistics of 2D error')
ax1.set_xlabel('Depth')
ax1.set_ylabel('Success Rate (%)')
ax2.set_title('Statistics of 3D error')
ax2.set_xlabel('Depth')
# ax2.set_ylabel('Success Rate (%)')
# ax2.yaxis.tick_right()
for k in rep_keys:
xs = np.arange(depth_range[0], depth_range[1], (depth_range[1]-depth_range[0])/depth_bins)
ys = []
for i in range(depth_bins):
if k in accuracy_rep_per_depth[i]:
ys.append(accuracy_rep_per_depth[i][k])
else:
ys.append(0)
ys = np.array(ys)
#
# xnew = np.linspace(depth_range[0], depth_range[1], 300) / 1000
# ynew = UnivariateSpline(xs, ys, k=2, s=100)(xnew)
# ax1.plot(xnew, ynew, label=k)
ax1.plot(xs, ys, marker='o', label=k)
for k in adi_keys:
xs = np.arange(depth_range[0], depth_range[1], (depth_range[1]-depth_range[0])/depth_bins)
ys = []
for i in range(depth_bins):
if k in accuracy_adi_per_depth[i]:
ys.append(accuracy_adi_per_depth[i][k])
else:
ys.append(0)
ys = np.array(ys)
#
# xnew = np.linspace(depth_range[0], depth_range[1], 300) / 1000
# ynew = UnivariateSpline(xs, ys, k=2, s=100)(xnew)
# ax2.plot(xnew, ynew, label=k)
ax2.plot(xs, ys, marker='o', label=k)
ax1.legend(loc='lower right')
ax2.legend(loc='upper right')
ax1.grid()
ax2.grid()
matFig = get_img_from_matplotlib_fig(fig)
# cv2.imshow("xx", matFig)
# cv2.waitKey(0)
return matFig
def print_accuracy_per_class(accuracy_adi_per_class, accuracy_rep_per_class):
assert(len(accuracy_adi_per_class) == len(accuracy_rep_per_class))
classNum = len(accuracy_adi_per_class)
firstMeet = True
for clsIdx in range(classNum):
if len(accuracy_adi_per_class[clsIdx]) == 0:
continue
if firstMeet:
adi_keys = accuracy_adi_per_class[clsIdx].keys()
rep_keys = accuracy_rep_per_class[clsIdx].keys()
titleLine = "\t"
for k in adi_keys:
titleLine += (k + ' ')
titleLine += '\t'
for k in rep_keys:
titleLine += (k + ' ')
print(titleLine)
firstMeet = False
line_per_class = ("cls_%02d" % clsIdx)
for k in adi_keys:
line_per_class += ('\t%.2f' % accuracy_adi_per_class[clsIdx][k])
line_per_class += '\t'
for k in rep_keys:
line_per_class += ('\t%.2f' % accuracy_rep_per_class[clsIdx][k])
print(line_per_class)
def compute_pose_diff(mesh3ds, K, gtR, gtT, predR, predT):
ptCnt = len(mesh3ds)
pred_3d1 = (np.matmul(gtR, mesh3ds.T) + gtT).T
pred_3d2 = (np.matmul(predR, mesh3ds.T) + predT).T
p = np.matmul(K, pred_3d1.T)
p[0] = p[0] / (p[2] + 1e-8)
p[1] = p[1] / (p[2] + 1e-8)
pred_2d1 = p[:2].T
p = np.matmul(K, pred_3d2.T)
p[0] = p[0] / (p[2] + 1e-8)
p[1] = p[1] / (p[2] + 1e-8)
pred_2d2 = p[:2].T
error_3d = np.linalg.norm(pred_3d1 - pred_3d2, axis=1).mean()
error_2d = np.linalg.norm(pred_2d1 - pred_2d2, axis=1).mean()
return error_3d, error_2d
| StarcoderdataPython |
4833583 | <gh_stars>100-1000
# -*- coding: utf-8 -*-
import io
import os
from .specs import pipelines
from .manager import execute_pipeline
VERSION_FILE = os.path.join(os.path.dirname(__file__), 'VERSION')
__version__ = io.open(VERSION_FILE, encoding='utf-8').readline().strip()
| StarcoderdataPython |
45187 | <reponame>genestack/python-client
#!python
# -*- coding: utf-8 -*-
from __future__ import print_function
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
from future import standard_library
standard_library.install_aliases()
from builtins import input
from builtins import *
import os
import re
import sys
from argparse import ArgumentParser
from getpass import getpass
from operator import attrgetter
from genestack_client import GenestackAuthenticationException
from genestack_client.genestack_shell import Command, GenestackShell
from genestack_client.settings import DEFAULT_HOST, User, config
from genestack_client.utils import interactive_select
def input_host():
host = input('host [%s]: ' % DEFAULT_HOST).strip()
return host or DEFAULT_HOST
def validate_alias(alias):
expression = re.compile('[a-zA-Z0-9_@\-]+$')
return bool(alias and expression.match(alias))
def input_alias(existing):
print('Please input alias. (Alias can contain: letters (a-z, A-Z), '
'digits (0-9), at-sign (@), underscore (_), hyphen (-))')
while True:
alias = input('alias: ').strip()
if not alias:
print('Alias cannot be empty')
continue
if not validate_alias(alias):
print('Restricted symbols message')
continue
if alias in existing:
print('Alias must be unique')
continue
return alias
def create_user_from_input(host, alias):
"""
Ask credentials interactively and return user that can login to platform.
:param host: server host
:type host: basestring
:param alias: user alias
:type alias: basestring
:return: user
:rtype: User
"""
by_token = 'by token'
items = [by_token, 'by email and password']
use_token = interactive_select(items, 'Select authentication') == by_token
if use_token:
return create_user_from_token(host, alias=alias)
else:
return create_user_from_input_email_and_password(host, alias=alias)
def create_user_from_input_email_and_password(host, alias=None):
"""
Ask email and password, check that it is possible to login with this credentials
and return user.
:param host: server host
:type host: basestring
:param alias: user alias
:type alias: basestring
:return: user
:rtype: User
"""
print('Specify email and password for host: "%s"' % host, end=' ')
if alias:
print('and alias: "%s"' % alias)
else:
print()
user_login = None
while True:
if user_login:
res = input('Please specify your user login (email) [%s]: ' % user_login).strip()
if res:
user_login = res
else:
user_login = input('Please specify your user login (email): ').strip()
if not user_login:
print('Login cannot be empty')
continue
user_password = getpass('Please specify your password for %s: ' % user_login)
if not user_password:
print('Password cannot be empty')
continue
if not user_login or not user_password:
print()
continue
user = User(user_login, host=host, password=<PASSWORD>, alias=alias)
try:
user.get_connection()
break
except GenestackAuthenticationException:
print('Your username or password was incorrect, please try again')
return user
def create_user_from_token(host, alias=None):
print('Host: %s' % host)
msg = 'Please specify Genestack API token%s: '
with_alias = '' if not alias else ' for "%s"' % alias
msg = msg % with_alias
while True:
token = getpass(msg)
if not token:
print('Token cannot be empty')
continue
user = User(email=None, host=host, password=None, alias=alias, token=token)
try:
user.get_connection()
break
except GenestackAuthenticationException:
print('Could not login with given token, please try again')
return user
def check_config():
config_path = config.get_settings_file()
if not os.path.exists(config_path):
print('You do not seem to have a config file yet. '
'Please run `genestack-user-setup init`. Exiting')
exit(1)
class AddUser(Command):
COMMAND = 'add'
DESCRIPTION = 'Add new user.'
OFFLINE = True
def run(self):
alias = input_alias(config.users.keys())
host = input_host()
user = create_user_from_input(host, alias)
config.add_user(user)
print('User "%s" has been created' % user.alias)
def select_user(users, selected=None):
"""
Choose user from users stored in config.
:param users:
:param selected:
:return:
:rtype: User
"""
user_list = sorted(users.values(), key=lambda x: x.alias)
return interactive_select(user_list, 'Select user', to_string=attrgetter('alias'), selected=selected)
class ChangePassword(Command):
COMMAND = 'change-password'
DESCRIPTION = 'Change password for user.'
OFFLINE = True
def update_parser(self, parent):
parent.add_argument('alias', metavar='<alias>', help='Alias for user to change password', nargs='?')
def run(self):
check_config()
users = config.users
user = users.get(self.args.alias)
if not user:
user = select_user(users)
if not user.email:
print('User without email could be authorized only by token')
return
while True:
user.password = getpass('Input password for %s: ' % user.alias.encode('utf-8'))
try:
user.get_connection()
break
except GenestackAuthenticationException:
continue
config.change_password(user.alias, user.password)
print('Password has been changed successfully')
class ChangeToken(Command):
COMMAND = 'change-token'
DESCRIPTION = 'Change token for user.'
OFFLINE = True
def update_parser(self, parent):
parent.add_argument('alias', metavar='<alias>',
help='Alias for user to change token for', nargs='?')
def run(self):
check_config()
users = config.users
user = users.get(self.args.alias)
if not user:
user = select_user(users)
new_user = create_user_from_token(user.host, alias=user.alias)
user.token = new_user.token
config.change_token(user.alias, user.token)
print('Token has been changed successfully')
class SetDefault(Command):
COMMAND = 'default'
DESCRIPTION = 'Set default user.'
OFFLINE = True
def update_parser(self, parent):
parent.add_argument('alias', metavar='<alias>', help='Alias for user to change password', nargs='?')
def run(self):
check_config()
users = config.users
user = users.get(self.args.alias)
if not user:
user = select_user(users, selected=config.default_user)
if user.alias != config.default_user.alias:
config.set_default_user(user)
print('Default user has been set to "%s"' % user.alias)
else:
print('Default user has not been changed')
class Remove(Command):
COMMAND = 'remove'
DESCRIPTION = 'Remove user.'
OFFLINE = True
def update_parser(self, parent):
parent.add_argument('alias', metavar='<alias>', help='Alias for user to change password', nargs='?')
def run(self):
check_config()
users = config.users
user = users.get(self.args.alias)
if not user:
user = select_user(users)
if user.alias == config.default_user.alias:
print('Cannot delete default user')
return
config.remove_user(user)
print('"%s" has been removed from config' % user.alias)
class RenameUser(Command):
COMMAND = 'rename'
DESCRIPTION = 'Rename user.'
OFFLINE = True
def update_parser(self, parent):
parent.add_argument('alias', metavar='<alias>', help='Alias to be renamed', nargs='?')
parent.add_argument('new_alias', metavar='<new_alias>', help='New alias', nargs='?')
def run(self):
check_config()
users = config.users
user = users.get(self.args.alias)
if not user:
print('Select user to rename')
user = select_user(users)
if not self.args.new_alias or not validate_alias(self.args.new_alias):
print('Enter new alias')
new_alias = input_alias(users.keys())
else:
new_alias = self.args.new_alias
new_user = User(email=user.email, alias=new_alias, host=user.host, password=<PASSWORD>,
token=user.token)
config.add_user(new_user, save=False)
if user.alias == config.default_user.alias:
config.set_default_user(new_user, save=False)
config.remove_user(user)
print('"%s" alias changed to "%s"' % (user.alias, new_user.alias))
class List(Command):
COMMAND = 'list'
DESCRIPTION = 'List all users.'
OFFLINE = True
def run(self):
check_config()
users = sorted(config.users.items())
default_user_alias = config.default_user and config.default_user.alias
for key, user in users:
print()
print('%s%s:' % (key, ' (default)' if default_user_alias == key else ''))
print(' %-10s%s' % ('email', user.email))
print(' %-10s%s' % ('host', user.host))
class Path(Command):
COMMAND = 'path'
DESCRIPTION = 'Show path to configuration file.'
OFFLINE = True
def run(self):
print(config.get_settings_file())
class Init(Command):
COMMAND = 'init'
DESCRIPTION = 'Create default settings.'
OFFLINE = True
def get_command_parser(self, parser=None):
parser = parser or ArgumentParser(description=self.DESCRIPTION)
parser.description = self.DESCRIPTION
group = parser.add_argument_group('command arguments')
self.update_parser(group)
group.add_argument('-H', '--host', default=DEFAULT_HOST,
help='Genestack host, '
'change it to connect somewhere else than %s' % DEFAULT_HOST,
metavar='<host>')
return parser
def run(self):
"""
Create config file if it is not present.
Catch ``KeyboardInterrupt`` and ``EOFError`` is required here for case
when this command is run for first time and in shell mode.
If we don't quit here, shell will continue execution and ask credentials once more.
"""
# Hardcoded alias that created for the first user only.
# Normal usecase is when user have single account and don't care about alias name.
# Advanced users can rename alias.
default_alias = 'Default'
try:
config_path = config.get_settings_file()
if os.path.exists(config_path):
print('A config file already exists at %s' % config_path)
return
print('If you do not have a Genestack account, you need to create one first')
user = create_user_from_input(self.args.host, default_alias)
config.add_user(user) # adding first user make him default.
print('Config file at "%s" has been created successfully' % config_path)
except (KeyboardInterrupt, EOFError):
sys.stdout.flush()
sys.stderr.write('\nError: Init is not finished\n')
exit(1)
class UserManagement(GenestackShell):
DESCRIPTION = 'Genestack user management application.'
COMMAND_LIST = [
Init,
List,
AddUser,
SetDefault,
ChangePassword,
ChangeToken,
Path,
Remove,
RenameUser
]
intro = "User setup shell.\nType 'help' for list of available commands.\n\n"
prompt = 'user_setup> '
def set_shell_user(self, args):
config_path = config.get_settings_file()
if not os.path.exists(config_path):
print('No config file was found, creating one interactively')
self.process_command(Init(), ['--host', args.host or DEFAULT_HOST], False)
args.host = None # do not provide host for future use of arguments
def main():
shell = UserManagement()
shell.cmdloop()
if __name__ == '__main__':
main()
| StarcoderdataPython |
1708014 | # Generated by Django 3.1.2 on 2020-10-24 22:22
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('users', '0001_initial'),
('cards', '0003_card_user'),
]
operations = [
migrations.AlterField(
model_name='card',
name='user',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, related_name='cards', to='users.user'),
),
]
| StarcoderdataPython |
1660716 | import unittest
from main import tilingCost
class TestTiles(unittest.TestCase):
def test_Cost_of_tiles_1(self):
self.assertEqual(1_700, tilingCost(4, 5, cost=25))
def test_Cost_of_tiles_2(self):
self.assertEqual(2_100, tilingCost(3, 7, cost=40))
def test_Cost_of_tiles_3(self):
self.assertEqual(1_680, tilingCost(4, 4, cost=45))
def test_Raise_ValueError_on_zero_width(self):
with self.assertRaises(ValueError):
tilingCost(0, 4, cost=25)
def test_Raise_ValueError_on_zero_length(self):
with self.assertRaises(ValueError):
tilingCost(4, 0, cost=25)
if __name__ == '__main__':
unittest.main() | StarcoderdataPython |
1697182 | <reponame>chudur-budur/visualization<gh_stars>0
"""scatter.py -- A customized scatter plotting module.
This module provides a customized scatter plotting functions for
high-dimensional Pareto-optimal fronts. It also provides different
relevant parameters, tools and utilities.
Copyright (C) 2016
Computational Optimization and Innovation (COIN) Laboratory
Department of Computer Science and Engineering
Michigan State University
428 S. Shaw Lane, Engineering Building
East Lansing, MI 48824-1226, USA
.. moduleauthor:: <NAME> <<EMAIL>>
"""
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.cm as cm
import matplotlib.colors as mc
from matplotlib.colors import ListedColormap
from mpl_toolkits.mplot3d import Axes3D
from viz.plotting.utils import pop, group_labels_by_appearance
__all__ = ["camera_angles", "plot", "camera_angles"]
# Some good camera angles for scatter plots.
camera_angles = {
'dtlz2': {'3d': (60,10), '4d':(105,15), '8d': (15,15)}, \
'dtlz2-nbi': {'3d': (60,10), '4d':(105,15), '5d': (105,15), '8d': (110,15)}, \
'debmdk': {'3d': (115,5), '4d': (105,15), '8d': (110,15)}, \
'debmdk-nbi': {'3d': (115,5), '4d': (105,15), '8d': (110,15)}, \
'debmdk-all': {'3d': (115,5), '4d': (105,15), '8d': (110,15)}, \
'debmdk-all-nbi': {'3d': (115,5), '4d': (105,15), '8d': (110,15)}, \
'dtlz8': {'3d': (110,15), '4d': (-65,15), '6d': (-65,15), '8d': (-65,15)}, \
'dtlz8-nbi': {'3d': (110,15), '4d': (-65,15), '6d': (-65,15), '8d': (-65,15)}, \
'c2dtlz2': {'3d': (30,10), '4d': (-75,15), '5d': (-65,15), '8d': (110,15)}, \
'c2dtlz2-nbi': {'3d': (30,10), '4d': (-75,15), '5d': (-65,15), '8d': (110,15)}, \
'cdebmdk': {'3d': (30,10), '4d': (-75,15), '8d': (110,15)}, \
'cdebmdk-nbi': {'3d': (30,10), '4d': (-75,15), '8d': (110,15)}, \
'c0dtlz2': {'3d': (30,10), '4d': (-75,15), '8d': (110,15)}, \
'c0dtlz2-nbi': {'3d': (30,10), '4d': (-75,15), '8d': (110,15)}, \
'crash-nbi': {'3d': (30,15)}, 'crash-c1-nbi': {'3d': (30,15)}, 'crash-c2-nbi': {'3d': (30,15)}, \
'gaa': {'10d': (-65,15)}, \
'gaa-nbi': {'10d': (-65,15)}
}
def plot(A, ax=None, s=1, c=mc.TABLEAU_COLORS['tab:blue'], **kwargs):
r"""A scatter plot function.
This uses `matplotlib.axes.Axes.scatter` function to do a scatter plot.
Parameters
----------
A : ndarray
`n` number of `m` dim. points to be plotted.
ax : An `mpl_toolkits.mplot3d.axes.Axes3D` or an `matplotlib.axes.Axes` object, optional
Axes to be used to plotting. Default `None` when optional.
s : int or 1-D array_like, optional
Point size, or an array of point sizes. Default 1 when optional.
c : A `matplotlib.colors` object, str or an array RGBA color values.
Colors to be used. Default `mc.TABLEAU_COLORS['tab:blue']` when optional.
Other Parameters
----------------
euler : tuple (i.e. a pair) of int, optional
The azmiuth and elevation angle. Default `(-60,30)` when optional.
title : str, optional
The plot title. Default `None` when optional.
axes : tuple of int, optional
The list of columns of `A` to be plotted. Default `(0, 1, 2)` when optional.
labels : str, array_like or list of str, optional
A string or an array/list of strings for labeling each point. Which basically
means the class label of each row. Default `None` when optional. This will be
used to set the legend in the figure. If `None` there will be no legend.
colorbar : (Cbc, Cbg, Cbl, Cbp) a tuple of (ndarray, ndarray, str, float), optional
If a user wants to put a colorbar, a tuple `(Cbc, Cbg, Cbl)` tuple can be
provided. `Cbc` is an array of RGBA color values or an `matplotlib.colors`
object. The gradient of the colorbar is specified in `Cbg` which is an 1-D
array of float. Cbl is the label of the colorbar, a string. `Cbp` is the
colorbar padding width in `float`. `colorbar` is default `None` when optional.
xlim : tuple (i.e. a pair) of int, optional
The limits on the X-axis. Default `None` when optional.
ylim : tuple (i.e. a pair) of int, optional
The limits on the Y-axis. Default `None` when optional.
zlim : tuple (i.e. a pair) of int, optional
The limits on the Z-axis. Default `None` when optional.
label_prefix : str, optional
The axis-label-prefix to be used, default `r"$f_{{{:d}}}$"` when optional.
label_fontsize : str or int, optional
The fontsize for the axes labels. Default `'large'` when optional.
**kwargs : dict
All other keyword args for matplotlib's `scatter()` function.
Returns
-------
ax : An `mpl_toolkits.mplot3d.axes.Axes3D` or `matplotlib.axes.Axes` object
An `mpl_toolkits.mplot3d.axes.Axes3D` or an `matplotlib.axes.Axes` object.
"""
# all other parameters
euler = kwargs['euler'] if kwargs and 'euler' in kwargs else (-60, 30)
title = kwargs['title'] if kwargs and 'title' in kwargs else None
axes = kwargs['axes'] if kwargs and 'axes' in kwargs else (0, 1, 2)
labels = kwargs['labels'] if kwargs and 'labels' in kwargs else None
colorbar = kwargs['colorbar'] if kwargs and 'colorbar' in kwargs else None
xlim = kwargs['xlim'] if kwargs and 'xlim' in kwargs else None
ylim = kwargs['ylim'] if kwargs and 'ylim' in kwargs else None
zlim = kwargs['zlim'] if kwargs and 'zlim' in kwargs else None
label_prefix = kwargs['label_prefix'] if kwargs and 'label_prefix' in kwargs else r"$f_{{{:d}}}$"
label_fontsize = kwargs['label_fontsize'] if kwargs and 'label_fontsize' in kwargs else 'large'
# remove once they are read
kwargs = pop(kwargs, 'euler')
kwargs = pop(kwargs, 'title')
kwargs = pop(kwargs, 'axes')
kwargs = pop(kwargs, 'labels')
kwargs = pop(kwargs, 'colorbar')
kwargs = pop(kwargs, 'xlim')
kwargs = pop(kwargs, 'ylim')
kwargs = pop(kwargs, 'zlim')
kwargs = pop(kwargs, 'label_prefix')
kwargs = pop(kwargs, 'label_fontsize')
# decide on what kind of axes to use
if not ax:
ax = Axes3D(plt.figure()) if A.shape[1] > 2 else plt.figure().gca()
if ax:
# do the plot
if labels is not None:
if isinstance(labels, str):
if A.shape[1] > 2:
ax.scatter(A[:,axes[0]], A[:,axes[1]], A[:,axes[2]], s=s, c=c, label=labels, **kwargs)
else:
ax.scatter(A[:,axes[0]], A[:,axes[1]], s=s, c=c, label=labels, **kwargs)
else:
if isinstance(labels, np.ndarray):
labels = labels.tolist()
label_groups = group_labels_by_appearance(labels)
for i,v in enumerate(label_groups):
if A.shape[1] > 2:
ax.scatter(A[v[0],axes[0]], A[v[0],axes[1]], A[v[0],axes[2]], \
s=s[v[0]], c=c[v[0]], label=v[1], zorder=label_groups.shape[0]-i, **kwargs)
else:
ax.scatter(A[v[0],axes[0]], A[v[0],axes[1]], s=s[v[0]], c=c[v[0]], \
label=v[1], zorder=label_groups.shape[0]-i, **kwargs)
else:
if A.shape[1] > 2:
ax.scatter(A[:,axes[0]], A[:,axes[1]], A[:,axes[2]], s=s, c=c, **kwargs)
else:
ax.scatter(A[:,axes[0]], A[:,axes[1]], s=s, c=c, **kwargs)
# set limits, put labels, fix labels, view and title
if A.shape[1] > 2:
ax.set_xlim(ax.get_xlim() if xlim is None else xlim)
ax.set_ylim(ax.get_ylim() if ylim is None else ylim)
ax.set_zlim(ax.get_zlim() if zlim is None else zlim)
if len(axes) > 0:
ax.set_xlabel(label_prefix.format(axes[0] + 1), fontsize=label_fontsize)
if len(axes) > 1:
ax.set_ylabel(label_prefix.format(axes[1] + 1), fontsize=label_fontsize)
if len(axes) > 2:
ax.set_zlabel(label_prefix.format(axes[2] + 1), fontsize=label_fontsize)
ax.xaxis.set_rotate_label(False)
ax.yaxis.set_rotate_label(False)
ax.zaxis.set_rotate_label(False)
ax.view_init(euler[1], euler[0])
ax.set_title(title, pad=0.1)
else:
ax.set_xlim(ax.get_xlim() if xlim is None else xlim)
ax.set_ylim(ax.get_ylim() if ylim is None else ylim)
if len(axes) > 0:
ax.set_xlabel(label_prefix.format(axes[0] + 1), fontsize=label_fontsize)
if len(axes) > 1:
ax.set_ylabel(label_prefix.format(axes[1] + 1), fontsize=label_fontsize)
ax.set_title(title, y=ax.get_ylim()[-1]-0.05)
# colorbar?
if colorbar and isinstance(colorbar, tuple) and len(colorbar) >= 2 \
and isinstance(colorbar[0], np.ndarray) and isinstance(colorbar[1], np.ndarray):
vmin,vmax = 0.0, 1.0
cbc, cbg = colorbar[0], colorbar[1]
cbl = colorbar[2] if len(colorbar) > 2 and colorbar[2] else None
cbp = colorbar[3] if len(colorbar) > 3 and colorbar[3] else 0.01
Id = np.column_stack((cbg,cbc)).astype(object)
Id = Id[np.argsort(Id[:, 0])]
c, g = Id[:,1:].astype(float), Id[:,0].astype(float)
vmin, vmax = np.min(g), np.max(g)
norm = mc.Normalize(vmin=vmin, vmax=vmax)
cmap = ListedColormap(c)
if cbl:
ax.figure.colorbar(cm.ScalarMappable(norm=norm, cmap=cmap), \
orientation='vertical', label=cbl, pad=cbp, shrink=0.5)
else:
ax.figure.colorbar(cm.ScalarMappable(norm=norm, cmap=cmap), \
orientation='vertical', pad=cbp, shrink=0.5)
# where to put the legend
if labels:
ax.legend(loc="best", ncol=1)
return ax
else:
raise TypeError("A valid `mpl_toolkits.mplot3d.axes.Axes3D`/`matplotlib.axes.Axes` "
+ "object is not found.")
| StarcoderdataPython |
3375536 | from array import array
from random import random
from math import sin
import arcade
import imgui
from imflo.node import Node
from imflo.pin import Input
class MeterNode(Node):
def __init__(self, page):
super().__init__(page)
self.values = array('f', [sin(x * 0.1) for x in range(100)])
self.input = Input(self, 'input')
self.add_pin(self.input)
def draw(self):
#imgui.set_next_window_position(self.window.width - 256 - 16, 32, imgui.ONCE)
#imgui.set_next_window_size(256, 256, imgui.ONCE)
imgui.begin("Meter")
self.mark_input(self.input)
imgui.text('input')
imgui.same_line(spacing=16)
imgui.plot_lines("Sin(t)", self.values)
imgui.end()
| StarcoderdataPython |
3245565 | import vdomr as vd
import spiketoolkit as st
import spikeextractors as se
from .unitwaveformswidget import UnitWaveformsWidget
from .correlogramswidget import CorrelogramsWidget
from .timeserieswidget import TimeseriesWidget
class ButtonList(vd.Component):
def __init__(self,data):
vd.Component.__init__(self)
self._data=data
def render(self):
button_style={'margin':'3px'}
buttons=[
vd.button(item[0],onclick=item[1],style=button_style)
for item in self._data
]
return vd.div(buttons)
class ScrollArea(vd.Component):
def __init__(self,child,*,size):
vd.Component.__init__(self)
self._child=child
self._size=size
def render(self):
return vd.div(self._child,style=dict(overflow='auto',width='{}px'.format(self._size[0]),height='{}px'.format(self._size[1])))
class TabBarTab(vd.Component):
def __init__(self,*,height,label,key):
vd.Component.__init__(self)
self._height=height
self._label=label
self._click_handlers=[]
self._selected=False
self._key=key
def key(self):
return self._key
def setSelected(self,selected):
self._selected=selected
self.refresh()
def onClick(self,handler):
self._click_handlers.append(handler)
def _on_click(self):
for handler in self._click_handlers:
handler()
def render(self):
style=dict(
border='solid 1px gray',
height='{}px'.format(self._height),
cursor='pointer'
)
classes='tabbartab'
if self._selected:
classes=classes+' selected'
return vd.div(self._label,style=style,class_=classes,onclick=self._on_click)
class TabBar(vd.Component):
def __init__(self,height=25):
vd.Component.__init__(self)
self._height=height
self._tabs=[]
self._current_tab=None
self._current_tab_changed_handlers=[]
def height(self):
return self._height
def onCurrentTabChanged(self,handler):
self._current_tab_changed_handlers.append(handler)
def setCurrentTab(self,tab):
if tab==self._current_tab:
return
self._current_tab=tab
for tab in self._tabs:
if tab==self._current_tab:
tab.setSelected(True)
else:
tab.setSelected(False)
for handler in self._current_tab_changed_handlers:
handler()
def currentTabKey(self):
if not self._current_tab:
return None
return self._current_tab.key()
def addTab(self,key,label):
tab=TabBarTab(height=self._height,label=label,key=key)
def set_current_tab():
self.setCurrentTab(tab)
self.refresh()
tab.onClick(set_current_tab)
self._tabs.append(tab)
self.setCurrentTab(tab)
self.refresh()
def render(self):
style0=dict(
position='absolute',
left='0px',
bottom='0px',
height='{}px'.format(self._height)
)
divs=[vd.div(tab,style={'float':'left'}) for tab in self._tabs]
return vd.div(divs,style=style0)
class ViewFrame(vd.Component):
def __init__(self,child):
vd.Component.__init__(self)
self._child=child
def child(self):
return self._child
def render(self):
return vd.div(self._child)
class Container(vd.Component):
def __init__(self,*args,position,size,position_mode='absolute',style=dict()):
vd.Component.__init__(self)
self._children=list(args)
self._position=position
self._size=size
self._position_mode=position_mode
self._style=style
def render(self):
style=self._style
style['position']=self._position_mode
style['width']='{}px'.format(self._size[0])
style['height']='{}px'.format(self._size[1])
style['left']='{}px'.format(self._position[0])
style['top']='{}px'.format(self._position[1])
return vd.div(
self._children,
style=style
)
class ViewLauncher(vd.Component):
def __init__(self,context,view_classes):
vd.Component.__init__(self)
self._context=context
self._registered_view_classes=view_classes
self._launch_view_handlers=[]
def render(self):
list=[
(VC.LABEL,self._create_launch_function(VC))
for VC in self._registered_view_classes
]
list1=ButtonList(list)
return vd.div(
list1
)
def onLaunchView(self,handler):
self._launch_view_handlers.append(handler)
def _create_launch_function(self,view_class):
def launch():
for handler in self._launch_view_handlers:
handler(view_class)
return launch
class HighlightBox(vd.Component):
def __init__(self):
vd.Component.__init__(self)
self._highlight=True
def setHighlight(self,val):
if self._highlight==val:
return
self._highlight=val
self.refresh()
def render(self):
style0=dict(width='100%',height='100%',position='absolute')
style0['z-index']='-1'
if self._highlight:
style0['border']='solid 2px black'
return vd.div(style=style0)
class ViewContainer(vd.Component):
def __init__(self):
vd.Component.__init__(self)
self._view_frames=[]
self._current_frame=None
self._size=(0,0)
self._tab_bar=TabBar()
self._tab_bar.onCurrentTabChanged(self._on_current_tab_changed)
self._click_handlers=[]
self._highlight_box=HighlightBox()
def onClick(self,handler):
self._click_handlers.append(handler)
def setSize(self,size):
self._size=size
def addView(self,view):
view.setSize((self._size[0]-self._tab_bar.height()-10,self._size[1]-self._tab_bar.height()-10))
frame=ViewFrame(view)
self._view_frames.append(frame)
self._tab_bar.addTab(view,view.tabLabel())
self._current_frame=frame
self.refresh()
def setHighlight(self,val):
self._highlight_box.setHighlight(val)
def _current_frame(self):
return self._current_frame
def _on_current_tab_changed(self):
self._current_frame=self._tab_bar.currentTabKey()
self.refresh()
def currentView(self):
f=self._current_frame
if not f:
return None
return f.child()
def _on_click(self):
for handler in self._click_handlers:
handler()
def render(self):
f=self._current_frame
style0=dict(width='100%',height='100%',position='absolute')
style1=dict(left='5px',right='5px',top='5px',bottom='5px',position='absolute')
onclick=self._on_click
if not f:
style1['background-color']='lightgray'
f=''
return vd.div(self._highlight_box,vd.div(f,style=style1),self._tab_bar,style=style0,onclick=onclick)
class VIEW_GeneralInfo(vd.Component):
LABEL='General info'
def __init__(self,context):
vd.Component.__init__(self)
self._context=context
self._size=(300,300)
def tabLabel(self):
return 'General info'
def setSize(self,size):
self._size=size
def render(self):
rec=self._context.recording
res=self._context.sorting_result
rows=[]
rows.append(vd.tr(
vd.th('Study'),vd.td(rec.study().name())
))
rows.append(vd.tr(
vd.th('Recording'),vd.td(rec.name())
))
rows.append(vd.tr(
vd.th('Directory'),vd.td(rec.directory())
))
true_units=rec.trueUnitsInfo(format='json')
rows.append(vd.tr(
vd.th('Num. true units'),vd.td('{}'.format(len(true_units)))
))
RX=rec.recordingExtractor()
rows.append(vd.tr(
vd.th('Num. channels'),vd.td('{}'.format(len(RX.getChannelIds())))
))
rows.append(vd.tr(
vd.th('Samplerate'),vd.td('{}'.format(RX.getSamplingFrequency()))
))
recording_file_is_local=rec.recordingFileIsLocal()
if recording_file_is_local:
elmt='True'
else:
elmt=vd.span('False',' ',vd.a('(download)',onclick=self._on_download_recording_file))
rows.append(vd.tr(
vd.th('raw.mda is downloaded'),vd.td(elmt))
)
firings_true_file_is_local=rec.firingsTrueFileIsLocal()
if firings_true_file_is_local:
elmt='True'
else:
elmt=vd.span('False',' ',vd.a('(download)',onclick=self._on_download_firings_true_file))
rows.append(vd.tr(
vd.th('firings_true.mda is downloaded'),vd.td(elmt))
)
if res:
rows.append(vd.tr(
vd.th('Sorting result'),vd.td(res.sorterName())
))
sorting=res.sorting()
rows.append(vd.tr(
vd.th('Num. sorted units'),vd.td('{}'.format(len(sorting.getUnitIds())))
))
table=vd.table(rows,style={'text-align':'left','width':'auto','font-size':'13px'},class_='table')
return ScrollArea(vd.div(table),size=self._size)
class VIEW_Timeseries(vd.Component):
LABEL='Timeseries'
def __init__(self,context):
vd.Component.__init__(self)
self._context=context
rx=self._context.recording.recordingExtractor()
sf=rx.getSamplingFrequency()
print(self._context.recording.recordingFileIsLocal())
if self._context.recording.recordingFileIsLocal():
rx=se.SubRecordingExtractor(parent_recording=rx,start_frame=int(sf*0),end_frame=int(sf*10))
else:
rx=se.SubRecordingExtractor(parent_recording=rx,start_frame=int(sf*0),end_frame=int(sf*1))
rx=st.preprocessing.bandpass_filter(recording=rx,freq_min=300,freq_max=6000)
self._timeseries_widget=TimeseriesWidget(recording=rx)
def tabLabel(self):
return 'Timeseries'
def setSize(self,size):
self._timeseries_widget.setSize(size)
def render(self):
return vd.div(self._timeseries_widget)
class VIEW_TrueUnitWaveforms(vd.Component):
LABEL='True waveforms'
def __init__(self,context):
vd.Component.__init__(self)
self._context=context
rx=self._context.recording.recordingExtractor()
sf=rx.getSamplingFrequency()
rx=st.preprocessing.bandpass_filter(recording=rx,freq_min=300,freq_max=6000)
sx=self._context.recording.sortingTrue()
self._widget=UnitWaveformsWidget(recording=rx,sorting=sx)
self._size=(300,300)
self._update_selection()
self._context.onSelectionChanged(self._update_selection)
def _update_selection(self):
self._widget.setSelectedUnitIds(self._context.selectedTrueUnitIds())
def tabLabel(self):
return 'True waveforms'
def setSize(self,size):
self._size=size
self.refresh()
def render(self):
return ScrollArea(self._widget,size=self._size)
class VIEW_UnitWaveforms(vd.Component):
LABEL='Waveforms'
def __init__(self,context):
vd.Component.__init__(self)
self._context=context
rx=self._context.recording.recordingExtractor()
sf=rx.getSamplingFrequency()
rx=st.preprocessing.bandpass_filter(recording=rx,freq_min=300,freq_max=6000)
sx=self._context.sorting_result.sorting()
self._widget=UnitWaveformsWidget(recording=rx,sorting=sx)
self._size=(300,300)
self._update_selection()
self._context.onSelectionChanged(self._update_selection)
def _update_selection(self):
self._widget.setSelectedUnitIds(self._context.selectedUnitIds())
def tabLabel(self):
return 'Waveforms'
def setSize(self,size):
self._size=size
self.refresh()
def render(self):
return ScrollArea(self._widget,size=self._size)
class VIEW_TrueAutocorrelograms(vd.Component):
LABEL='True autocorrelograms'
def __init__(self,context):
vd.Component.__init__(self)
self._context=context
rx=self._context.recording.recordingExtractor()
sf=rx.getSamplingFrequency()
sx=self._context.recording.sortingTrue()
self._widget=CorrelogramsWidget(sorting=sx,samplerate=sf)
self._size=(300,300)
self._update_selection()
self._context.onSelectionChanged(self._update_selection)
def _update_selection(self):
self._widget.setSelectedUnitIds(self._context.selectedTrueUnitIds())
def tabLabel(self):
return 'True autocorrelograms'
def setSize(self,size):
self._size=size
self.refresh()
def render(self):
return ScrollArea(self._widget,size=self._size)
class VIEW_Autocorrelograms(vd.Component):
LABEL='Autocorrelograms'
def __init__(self,context):
vd.Component.__init__(self)
self._context=context
rx=self._context.recording.recordingExtractor()
sf=rx.getSamplingFrequency()
sx=self._context.sorting_result.sorting()
self._widget=CorrelogramsWidget(sorting=sx,samplerate=sf)
self._size=(300,300)
self._update_selection()
self._context.onSelectionChanged(self._update_selection)
def _update_selection(self):
self._widget.setSelectedUnitIds(self._context.selectedUnitIds())
def tabLabel(self):
return 'Autocorrelograms'
def setSize(self,size):
self._size=size
self.refresh()
def render(self):
return ScrollArea(self._widget,size=self._size)
class Context():
def __init__(self):
sorting_result=None
recording=None
self._selected_true_unit_ids=[]
self._selected_unit_ids=[]
self._selection_changed_handlers=[]
def selectedTrueUnitIds(self):
return self._selected_true_unit_ids
def selectedUnitIds(self):
return self._selected_unit_ids
def setSelectedTrueUnitIds(self,ids):
self._selected_true_unit_ids=ids
for handler in self._selection_changed_handlers:
handler()
def setSelectedUnitIds(self,ids):
self._selected_unit_ids=ids
for handler in self._selection_changed_handlers:
handler()
def onSelectionChanged(self,handler):
self._selection_changed_handlers.append(handler)
def _f3(num):
return '{:.5g}'.format(float(num))
class CheckBox(vd.Component):
def __init__(self,checked=False,onchange=None,**kwargs):
vd.Component.__init__(self)
self._kwargs=kwargs
self._checked=checked
self._on_change_handlers=[]
if onchange:
self._on_change_handlers.append(onchange)
def checked(self):
return self._checked
def setChecked(self,val):
self._checked=val
self.refresh()
def onChange(self,handler):
self._on_change_handlers.append(handler)
def _onchange(self,value): # somehow the value is useless here, so we just toggle
self._checked = (not self._checked)
for handler in self._on_change_handlers:
handler()
def render(self):
attrs=dict()
if self._checked:
attrs['checked']='checked'
X=vd.input(type='checkbox',**attrs,onchange=self._onchange,**self._kwargs)
return X
class Table(vd.Component):
def __init__(self):
vd.Component.__init__(self)
self._column_labels=[]
self._rows=[]
self._size=(300,300)
self._selection_mode='none' # 'none', 'single', 'multiple'
self._selection_changed_handlers=[]
def setColumnLabels(self,labels):
self._column_labels=labels
def clearRows(self):
self._rows=[]
def addRow(self,*,id,values):
cb=CheckBox(onchange=self._on_checkbox_changed)
row0=dict(
values=values,
id=id,
checkbox=cb
)
self._rows.append(row0)
def setSize(self,size):
self._size=size
def setSelectionMode(self,mode):
self._selection_mode=mode
def selectedRowIds(self):
ret=[]
for row in self._rows:
if row['checkbox'].checked():
ret.append(row['id'])
return ret
def setSelectedRowIds(self,ids):
for row in self._rows:
row['checkbox'].setChecked(row['id'] in ids)
def onSelectionChanged(self,handler):
self._selection_changed_handlers.append(handler)
def _on_checkbox_changed(self):
for handler in self._selection_changed_handlers:
handler()
def render(self):
self._checkboxes=[]
rows=[]
elmts=[vd.th(val) for val in self._column_labels]
if self._selection_mode=='multiple':
elmts = [vd.th('')] + elmts
rows.append(vd.tr(elmts))
for row in self._rows:
elmts=[vd.td(str(val)) for val in row['values']]
if self._selection_mode=='multiple':
elmts = [vd.td(row['checkbox'])] + elmts
rows.append(vd.tr(elmts))
table=vd.table(rows,class_='table')
return ScrollArea(vd.div(table),size=self._size)
class VIEW_TrueUnitsTable(Table):
LABEL='True units table'
def __init__(self,context):
Table.__init__(self)
self.setSelectionMode('multiple')
self._context=context
try:
self._true_units_info=context.recording.trueUnitsInfo(format='json')
self._comparison_by_unit=dict()
SR=self._context.sorting_result
cwt_list=SR.comparisonWithTruth(format='json')
for i in cwt_list:
item=cwt_list[i]
self._comparison_by_unit[item['unit_id']]=item
except Exception as err:
print('warning: ',err)
self._true_units_info=None
self.onSelectionChanged(self._on_selection_changed)
self._context.onSelectionChanged(self._update_selection)
self._update()
def _update_selection(self):
self.setSelectedRowIds(self._context.selectedTrueUnitIds())
def tabLabel(self):
return 'True units'
def _on_selection_changed(self):
self._context.setSelectedTrueUnitIds(self.selectedRowIds())
def _update(self):
self.setColumnLabels([
'Unit ID','SNR','Peak channel',
'Num. events','Firing rate','Accuracy',
'Best unit','Matched unit',
'False neg rate','False pos rate',
'Num matches','Num false neg','Num false pos'
])
self.clearRows()
if not self._true_units_info:
print('WARNING: _true_units_info is null.')
return
for unit in self._true_units_info:
unit_id=unit['unit_id']
if unit_id in self._comparison_by_unit:
item=self._comparison_by_unit[unit_id]
accuracy=item['accuracy']
best_unit=item['best_unit']
matched_unit=item['matched_unit']
f_n=item['f_n']
f_p=item['f_p']
num_matches=item['num_matches']
num_false_negatives=item.get('num_false_negatives','')
num_false_positives=item.get('num_false_positives','')
else:
accuracy=''
best_unit=''
matched_unit=''
f_n=''
f_p=''
num_matches=''
num_false_negatives=''
num_false_positives=''
self.addRow(
id=unit_id,
values=[
unit_id,
_f3(unit['snr']),
unit['peak_channel'],
unit['num_events'],
_f3(unit['firing_rate']),
_f3(accuracy),
best_unit,
matched_unit,
_f3(f_n),
_f3(f_p),
num_matches,
num_false_negatives,
num_false_positives
]
)
self._update_selection()
self.refresh() | StarcoderdataPython |
1707449 | # -*- coding: utf-8 -*-
# Copyright (c) 2015, Dataent Technologies Pvt. Ltd. and Contributors
# MIT License. See license.txt
from __future__ import unicode_literals
import os
from six import iteritems
import logging
from werkzeug.wrappers import Request
from werkzeug.local import LocalManager
from werkzeug.exceptions import HTTPException, NotFound
from werkzeug.contrib.profiler import ProfilerMiddleware
from werkzeug.wsgi import SharedDataMiddleware
import dataent
import dataent.handler
import dataent.auth
import dataent.api
import dataent.utils.response
import dataent.website.render
from dataent.utils import get_site_name
from dataent.middlewares import StaticDataMiddleware
from dataent.utils.error import make_error_snapshot
from dataent.core.doctype.communication.comment import update_comments_in_parent_after_request
from dataent import _
# imports - third-party imports
import pymysql
from pymysql.constants import ER
# imports - module imports
local_manager = LocalManager([dataent.local])
_site = None
_sites_path = os.environ.get("SITES_PATH", ".")
class RequestContext(object):
def __init__(self, environ):
self.request = Request(environ)
def __enter__(self):
init_request(self.request)
def __exit__(self, type, value, traceback):
dataent.destroy()
@Request.application
def application(request):
response = None
try:
rollback = True
init_request(request)
if dataent.local.form_dict.cmd:
response = dataent.handler.handle()
elif dataent.request.path.startswith("/api/"):
if dataent.local.form_dict.data is None:
dataent.local.form_dict.data = request.get_data()
response = dataent.api.handle()
elif dataent.request.path.startswith('/backups'):
response = dataent.utils.response.download_backup(request.path)
elif dataent.request.path.startswith('/private/files/'):
response = dataent.utils.response.download_private_file(request.path)
elif dataent.local.request.method in ('GET', 'HEAD', 'POST'):
response = dataent.website.render.render()
else:
raise NotFound
except HTTPException as e:
return e
except dataent.SessionStopped as e:
response = dataent.utils.response.handle_session_stopped()
except Exception as e:
response = handle_exception(e)
else:
rollback = after_request(rollback)
finally:
if dataent.local.request.method in ("POST", "PUT") and dataent.db and rollback:
dataent.db.rollback()
# set cookies
if response and hasattr(dataent.local, 'cookie_manager'):
dataent.local.cookie_manager.flush_cookies(response=response)
dataent.destroy()
return response
def init_request(request):
dataent.local.request = request
dataent.local.is_ajax = dataent.get_request_header("X-Requested-With")=="XMLHttpRequest"
site = _site or request.headers.get('X-Dataent-Site-Name') or get_site_name(request.host)
dataent.init(site=site, sites_path=_sites_path)
if not (dataent.local.conf and dataent.local.conf.db_name):
# site does not exist
raise NotFound
if dataent.local.conf.get('maintenance_mode'):
raise dataent.SessionStopped
make_form_dict(request)
dataent.local.http_request = dataent.auth.HTTPRequest()
def make_form_dict(request):
import json
if 'application/json' in (request.content_type or '') and request.data:
args = json.loads(request.data)
else:
args = request.form or request.args
try:
dataent.local.form_dict = dataent._dict({ k:v[0] if isinstance(v, (list, tuple)) else v \
for k, v in iteritems(args) })
except IndexError:
dataent.local.form_dict = dataent._dict(args)
if "_" in dataent.local.form_dict:
# _ is passed by $.ajax so that the request is not cached by the browser. So, remove _ from form_dict
dataent.local.form_dict.pop("_")
def handle_exception(e):
response = None
http_status_code = getattr(e, "http_status_code", 500)
return_as_message = False
if dataent.get_request_header('Accept') and (dataent.local.is_ajax or 'application/json' in dataent.get_request_header('Accept')):
# handle ajax responses first
# if the request is ajax, send back the trace or error message
response = dataent.utils.response.report_error(http_status_code)
elif (http_status_code==500
and isinstance(e, pymysql.InternalError)
and e.args[0] in (ER.LOCK_WAIT_TIMEOUT, ER.LOCK_DEADLOCK)):
http_status_code = 508
elif http_status_code==401:
dataent.respond_as_web_page(_("Session Expired"),
_("Your session has expired, please login again to continue."),
http_status_code=http_status_code, indicator_color='red')
return_as_message = True
elif http_status_code==403:
dataent.respond_as_web_page(_("Not Permitted"),
_("You do not have enough permissions to complete the action"),
http_status_code=http_status_code, indicator_color='red')
return_as_message = True
elif http_status_code==404:
dataent.respond_as_web_page(_("Not Found"),
_("The resource you are looking for is not available"),
http_status_code=http_status_code, indicator_color='red')
return_as_message = True
else:
traceback = "<pre>"+dataent.get_traceback()+"</pre>"
if dataent.local.flags.disable_traceback:
traceback = ""
dataent.respond_as_web_page("Server Error",
traceback, http_status_code=http_status_code,
indicator_color='red', width=640)
return_as_message = True
if e.__class__ == dataent.AuthenticationError:
if hasattr(dataent.local, "login_manager"):
dataent.local.login_manager.clear_cookies()
if http_status_code >= 500:
dataent.logger().error('Request Error', exc_info=True)
make_error_snapshot(e)
if return_as_message:
response = dataent.website.render.render("message",
http_status_code=http_status_code)
return response
def after_request(rollback):
if (dataent.local.request.method in ("POST", "PUT") or dataent.local.flags.commit) and dataent.db:
if dataent.db.transaction_writes:
dataent.db.commit()
rollback = False
# update session
if getattr(dataent.local, "session_obj", None):
updated_in_db = dataent.local.session_obj.update()
if updated_in_db:
dataent.db.commit()
rollback = False
update_comments_in_parent_after_request()
return rollback
application = local_manager.make_middleware(application)
def serve(port=8000, profile=False, no_reload=False, no_threading=False, site=None, sites_path='.'):
global application, _site, _sites_path
_site = site
_sites_path = sites_path
from werkzeug.serving import run_simple
if profile:
application = ProfilerMiddleware(application, sort_by=('cumtime', 'calls'))
if not os.environ.get('NO_STATICS'):
application = SharedDataMiddleware(application, {
'/assets': os.path.join(sites_path, 'assets'),
})
application = StaticDataMiddleware(application, {
'/files': os.path.abspath(sites_path)
})
application.debug = True
application.config = {
'SERVER_NAME': 'localhost:8000'
}
in_test_env = os.environ.get('CI')
if in_test_env:
log = logging.getLogger('werkzeug')
log.setLevel(logging.ERROR)
run_simple('0.0.0.0', int(port), application,
use_reloader=False if in_test_env else not no_reload,
use_debugger=not in_test_env,
use_evalex=not in_test_env,
threaded=not no_threading)
| StarcoderdataPython |
8820 | from typing import (Any, Union, Type) # noqa: F401
from ..keys.datatypes import (
LazyBackend,
PublicKey,
PrivateKey,
Signature,
)
from eth_keys.exceptions import (
ValidationError,
)
from eth_keys.validation import (
validate_message_hash,
)
# These must be aliased due to a scoping issue in mypy
# https://github.com/python/mypy/issues/1775
_PublicKey = PublicKey
_PrivateKey = PrivateKey
_Signature = Signature
class KeyAPI(LazyBackend):
#
# datatype shortcuts
#
PublicKey = PublicKey # type: Type[_PublicKey]
PrivateKey = PrivateKey # type: Type[_PrivateKey]
Signature = Signature # type: Type[_Signature]
#
# Proxy method calls to the backends
#
def ecdsa_sign(self,
message_hash, # type: bytes
private_key # type: _PrivateKey
):
# type: (...) -> _Signature
validate_message_hash(message_hash)
if not isinstance(private_key, PrivateKey):
raise ValidationError(
"The `private_key` must be an instance of `eth_keys.datatypes.PrivateKey`"
)
signature = self.backend.ecdsa_sign(message_hash, private_key)
if not isinstance(signature, Signature):
raise ValidationError(
"Backend returned an invalid signature. Return value must be "
"an instance of `eth_keys.datatypes.Signature`"
)
return signature
def ecdsa_verify(self,
message_hash, # type: bytes
signature, # type: _Signature
public_key # type: _PublicKey
) -> bool:
if not isinstance(public_key, PublicKey):
raise ValidationError(
"The `public_key` must be an instance of `eth_keys.datatypes.PublicKey`"
)
return self.ecdsa_recover(message_hash, signature) == public_key
def ecdsa_recover(self,
message_hash, # type: bytes
signature # type: _Signature
):
# type: (...) -> _PublicKey
validate_message_hash(message_hash)
if not isinstance(signature, Signature):
raise ValidationError(
"The `signature` must be an instance of `eth_keys.datatypes.Signature`"
)
public_key = self.backend.ecdsa_recover(message_hash, signature)
if not isinstance(public_key, _PublicKey):
raise ValidationError(
"Backend returned an invalid public_key. Return value must be "
"an instance of `eth_keys.datatypes.PublicKey`"
)
return public_key
def private_key_to_public_key(self, private_key):
if not isinstance(private_key, PrivateKey):
raise ValidationError(
"The `private_key` must be an instance of `eth_keys.datatypes.PrivateKey`"
)
public_key = self.backend.private_key_to_public_key(private_key)
if not isinstance(public_key, PublicKey):
raise ValidationError(
"Backend returned an invalid public_key. Return value must be "
"an instance of `eth_keys.datatypes.PublicKey`"
)
return public_key
# This creates an easy to import backend which will lazily fetch whatever
# backend has been configured at runtime (as opposed to import or instantiation time).
lazy_key_api = KeyAPI(backend=None)
| StarcoderdataPython |
1636553 | import pathlib
import tempfile
from flask import Flask
from flask_restful import Resource, Api, reqparse
from werkzeug.datastructures import FileStorage
from predict import predict
app = Flask(__name__)
api = Api(app)
ALLOWED_EXTENSIONS = ['.jpeg']
class ClassifyImage(Resource):
def post(self):
"""
Get classification of image.
"""
# Parse arguments and obtain file name.
parse = reqparse.RequestParser()
parse.add_argument('file', type=FileStorage, location='files')
args = parse.parse_args()
# Upload images.
fs_object = args['file']
if pathlib.Path(fs_object.filename).suffix not in ALLOWED_EXTENSIONS:
return f"File extension must be: {ALLOWED_EXTENSIONS}"
# Update file and get predictions
with tempfile.NamedTemporaryFile() as temp_img_file:
fs_object.save(temp_img_file.name)
predictions = predict.get_pred_from_file(temp_img_file.name)
return predictions
api.add_resource(ClassifyImage, '/classify/')
if __name__ == '__main__':
app.run()
| StarcoderdataPython |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.