id stringlengths 2 8 | text stringlengths 16 264k | dataset_id stringclasses 1 value |
|---|---|---|
288595 | <reponame>deviant-syndrome/spear2sc
# -*- coding: utf-8 -*-
"""spear2sc.spear_utils: Utitlity methods to read SPEAR files"""
def process_line(line):
""" (list of str) -> list of list of float
Parses line, a line of time, frequency and amplitude data output by
SPEAR in the 'text - partials' format.
Returns a list of timepoints. Each timepoint is a list of floats
in the form: [<time in s>, <frequency in Hz>, <amplitude 0.0-1.0>]
>>> process_line('0.145 443.309723 0.112565 0.1575 443.597656 0.124895')
[[0.145, 443.309723, 0.112565], [0.1575, 443.597656, 0.124895]]
"""
partial = []
split_line = line.strip().split()
while len(split_line) > 0:
time_point = []
for i in range(3):
item = float(split_line.pop(0))
time_point.append(item)
partial.append(time_point)
return pad_duration(partial)
index_time = 0
index_freq = 1
index_amp = 2
def get_durations(partial):
"""Converts partial's absolute time offsets into durations
Note, that the size of duration's list is one element smaller than partial's entry count.
:param partial: Sound partial, [<time in s>, <frequency in Hz>, <amplitude 0.0-1.0>]
:type partial: list
:return: A list of partial's duration, e.g. partial's time envelope
:rtype: list
"""
res = []
for x in range(1, len(partial)):
res.append((partial[x][index_time] - partial[x - 1][index_time]))
return res
def pad_duration(partial):
"""Pads the envelope of the partial if it has a time offset
Auxiliary node added to the envelope to smooth the transition.
Coefficients are empirical
:param partial:
:type partial: list
:return:
:rtype: list
"""
offset = partial[0][index_time]
if offset > 0:
next_node = partial[1]
pad_node = [[0, 0, 0], [offset * 0.99, 0, 0], [offset * 0.999, next_node[index_freq] * 0.9, next_node[index_amp] * 0.9]]
padded_partial = pad_node + partial
return padded_partial
return partial
| StarcoderdataPython |
1731593 | <gh_stars>1-10
from importlib import import_module
def import_spec_object(obj_str):
path, obj = obj_str.rsplit('.', maxsplit=1)
module = import_module(path)
return getattr(module, obj)
| StarcoderdataPython |
1668830 | <reponame>webhacking/finance
from math import nan
class BaseProfile:
def __init__(self, symbol: str):
self.symbol = symbol
self.name = None
self.current_price = nan
self.outstanding_shares = nan
self.eps = nan
self.bps = nan
def parse(self, raw: str):
raise NotImplementedError
@property
def market_cap(self):
return self.current_price * self.outstanding_shares
@property
def per(self):
if self.eps > 0:
return self.current_price / self.eps
else:
return nan
@property
def pbr(self):
return self.current_price / self.bps
| StarcoderdataPython |
1924915 | '''
/*
* @Author: <NAME>
* @Date: 2021-01-22 22:54:13
* @Last Modified by: <NAME>
* @Last Modified time: 2021-01-23 01:10:54
*/
'''
import tkinter as tk
from PIL import Image,ImageDraw,ImageTk
import numpy as np
import cv2
import os
import joblib
model = joblib.load('English_Char_SVC.sav')
win = tk.Tk()
count = 0
width = 500
height = 500
font_btn = 'Helvetica 20 bold'
font_lbl = 'Helvetica 22 bold'
lbl_dict = {0:'A',1:'B',2:'C',3:'D',4:'E',5:'F',6:'G',7:'H',8:'I',9:'J',10:'K',11:'L',12:'M',13:'N',14:'O',15:'P',16:'Q',17:'R',18:'S',19:'T',20:'U',21:'V',22:'W',23:'X',24:'Y',25:'Z'}
def event_function(event):
x = event.x
y = event.y
x1 = x-20
x2 = x+20
y1 = y-20
y2 = y+20
canvas.create_oval((x1,y1,x2,y2),fill='black')
img_draw.ellipse((x1,y1,x2,y2),fill='black')
def save():
global count
if not os.path.exists('data'):
os.makedirs('data')
img_array = np.array(img)
path = os.path.join('data',str(count)+'.jpg')
cv2.imwrite(path,img_array)
count = count + 1
def clear():
global img,img_draw
canvas.delete('all')
img = Image.new('RGB',(width,height),(255,255,255))
img_draw = ImageDraw.Draw(img)
pLbl.config(text='PREDICTED CHARACTER: NONE')
def predict():
img_array = np.array(img)
img_array = cv2.cvtColor(img_array,cv2.COLOR_BGR2GRAY)
img_array = cv2.resize(img_array,(28,28))
img_array = np.reshape(img_array,(1,784))
result = model.predict(img_array)[0]
#print(lbl_dict[result])
sCharacter = lbl_dict[result]
pLbl.config(text='PREDICTED CHARACTER:'+sCharacter)
canvas = tk.Canvas(win,width=width,height=height,bg='white')
canvas.grid(row=0,column=0,columnspan=4)
saveBtn = tk.Button(win,text='SAVE',bg='green',fg='white',font=font_btn,command=save)
saveBtn.grid(row=1,column=0)
predictBtn = tk.Button(win,text='PREDICT',bg='blue',fg='white',font=font_btn,command=predict)
predictBtn.grid(row=1,column=1)
clearBtn = tk.Button(win,text='CLEAR',bg='yellow',fg='white',font=font_btn,command=clear)
clearBtn.grid(row=1,column=2)
exitBtn = tk.Button(win,text='EXIT',bg='red',fg='white',font=font_btn,command=win.destroy)
exitBtn.grid(row=1,column=3)
pLbl = tk.Label(win,text='PREDICTED CHARACTER: NONE',bg='white',font=font_lbl)
pLbl.grid(row=2,column=0,columnspan=4)
canvas.bind('<B1-Motion>',event_function)
img = Image.new('RGB',(width,height),(255,255,255))
img_draw = ImageDraw.Draw(img)
#win.iconbitmap('src/icon.ico')
win.title('English Character Recognizer')
win.mainloop() | StarcoderdataPython |
11282584 | <reponame>frank-gear/tiny_python_projects
#!/usr/bin/env python3
"""tests for sampler.py"""
import os
import random
import re
import string
from subprocess import getstatusoutput
from Bio import SeqIO
from Bio.SeqUtils import GC
from numpy import mean
from itertools import chain
from shutil import rmtree
prg = './sampler.py'
n1k = './n1k.fa'
n10k = './n10k.fa'
n100k = './n100k.fa'
n1m = './n1m.fa'
# --------------------------------------------------
def random_string():
"""generate a random string"""
return ''.join(random.choices(string.ascii_uppercase + string.digits, k=5))
# --------------------------------------------------
def test_exists():
"""usage"""
for file in [prg, n1k, n10k, n100k, n1m]:
assert os.path.isfile(file)
# --------------------------------------------------
def test_usage():
"""usage"""
for flag in ['-h', '--help']:
rv, out = getstatusoutput('{} {}'.format(prg, flag))
assert rv == 0
assert re.match("usage", out, re.IGNORECASE)
# --------------------------------------------------
def test_bad_file():
"""die on bad file"""
bad = random_string()
rv, out = getstatusoutput(f'{prg} {bad}')
assert rv != 0
assert re.match('usage:', out, re.I)
assert re.search(f"No such file or directory: '{bad}'", out)
# --------------------------------------------------
def test_bad_pct():
"""die on bad pct"""
bad = random.randint(1, 10)
rv, out = getstatusoutput(f'{prg} -p {bad} {n1k}')
assert rv != 0
assert re.match('usage:', out, re.I)
assert re.search(f'--pct "{float(bad)}" must be between 0 and 1', out)
# --------------------------------------------------
def test_defaults():
"""runs on good input"""
out_dir = 'out'
try:
if os.path.isdir(out_dir):
rmtree(out_dir)
rv, out = getstatusoutput(f'{prg} -s 10 {n1k}')
assert rv == 0
expected = (' 1: n1k.fa\n'
'Wrote 108 sequences from 1 file to directory "out"')
assert out == expected
assert os.path.isdir(out_dir)
files = os.listdir(out_dir)
assert len(files) == 1
out_file = os.path.join(out_dir, 'n1k.fa')
assert os.path.isfile(out_file)
# correct number of seqs
seqs = list(SeqIO.parse(out_file, 'fasta'))
assert len(seqs) == 108
finally:
if os.path.isdir(out_dir):
rmtree(out_dir)
# --------------------------------------------------
def test_options():
"""runs on good input"""
out_dir = random_string()
try:
if os.path.isdir(out_dir):
rmtree(out_dir)
cmd = f'{prg} -s 4 -o {out_dir} -p .25 {n1k} {n10k} {n100k}'
print(cmd)
rv, out = getstatusoutput(cmd)
assert rv == 0
assert re.search('1: n1k.fa', out)
assert re.search('2: n10k.fa', out)
assert re.search('3: n100k.fa', out)
assert re.search(
f'Wrote 27,688 sequences from 3 files to directory "{out_dir}"',
out)
assert os.path.isdir(out_dir)
files = os.listdir(out_dir)
assert len(files) == 3
seqs_written = 0
for file in files:
seqs_written += len(
list(SeqIO.parse(os.path.join(out_dir, file), 'fasta')))
assert seqs_written == 27688
finally:
if os.path.isdir(out_dir):
rmtree(out_dir)
| StarcoderdataPython |
6503485 | import re
import os
import sys
import time
import numpy as np
import tensorflow as tf
import tensorflow.keras as keras
from GAE import *
from utils import npytar, save_volume
def atoi(text):
return int(text) if text.isdigit() else text
def natural_keys(text):
return [atoi(c) for c in re.split(r'(\d+)', text)]
def data_loader(fname):
x_dic = {}
reader = npytar.NpyTarReader(fname)
for ix, (x, name) in enumerate(reader):
x_dic[name] = x.astype(np.float32)
reader.reopen()
xc = np.zeros((reader.length(), ) + input_shape, dtype = np.float32)
i = 0
for ik in sorted(x_dic.keys(), key = natural_keys):
xc[i] = x_dic[ik]
i += 1
return xc
if __name__ == '__main__':
model = get_model()
inputs = model['inputs']
indices = model['indices']
outputs = model['outputs']
z = model['z']
encoder = model['encoder']
decoder = model['decoder']
gae = model['gae']
gae.load_weights('results_gae/gae.h5')
gae.trainable = False
data_test = data_loader('datasets/chairs_32.tar')
z_vectors = encoder.predict(data_test)
np.savetxt('results_gae/z_vectors.csv', z_vectors, delimiter = ',')
shape_indices = np.array(range(data_test.shape[0]))
start_time = time.time()
reconstructions = gae.predict([data_test, shape_indices])
end_time = time.time()
reconstructions[reconstructions >= 0.5] = 1
reconstructions[reconstructions < 0.5] = 0
data_test[data_test > 0] = 1
data_test[data_test < 0] = 0
error_rates = []
fout = open('results_gae/test.out', 'w')
sys.stdout = fout
if not os.path.exists('results_gae/reconstructions'):
os.makedirs('results_gae/reconstructions')
for i in range(reconstructions.shape[0]):
save_volume.save_output(reconstructions[i, 0, :], 32, 'results_gae/reconstructions', i)
error_rate = np.mean((reconstructions[i, 0, :] - data_test[i, 0, :]) ** 2)
error_rates.append(error_rate)
print('Mean squared error of shape {}: {}'.format(i, error_rate))
error_rate_total = np.mean((reconstructions - data_test) ** 2)
print('Mean squared error total: {}'.format(error_rate_total))
print('Prediction time per shape: {}'.format((end_time - start_time) / reconstructions.shape[0]))
np.savetxt('results_gae/test_loss_gae.csv', error_rates, delimiter = ',')
fout.close()
| StarcoderdataPython |
3464395 | <filename>daedalus/lexer.py
#! cd .. && python3 -m daedalus.lexer
# TODO: better support for regex
import logging
import sys
import io
from .token import Token, TokenError
class LexError(TokenError):
pass
# special characters that never combine with other characters
chset_special1 = "{}[](),~;:#"
# special characters that may combine with other special characters
chset_special2 = "+-*/&|^=<>%!@?"
chset_number_base = "0123456789"
# characters that may exist in a number, either:
# int, oct, hex, float, imaginary
# this includes all possible and many impossible combinations
# the compiler will determine if the token is valid
chset_number = "0123456789nxob_.jtgmkABCDEFabcdef"
# the number of characters to read for a string encoding
char_len = {'o': 3, 'x': 2, 'u': 4, 'U': 8}
# the base used to convert a string to a single character
char_base = {'o': 8, 'x': 16, 'u': 16, 'U': 16}
reserved_types = {
'boolean', 'byte', 'char', 'double', 'int', 'long', 'short',
'false'
}
reserved_words = {
# types
'boolean', 'byte', 'char', 'double', 'int', 'long', 'short',
'false', 'true', 'null', 'undefined',
# keywords
'async', 'await', 'break', 'case',
'class', 'const', 'continue', 'debugger', 'default',
'delete', 'do', 'else', 'enum', 'eval', 'export', 'extends',
'final', 'finally', 'float', 'for', 'function', 'goto', 'if',
'implements', 'import', 'in', 'instanceof', 'interface', 'let',
'new', 'package',
'return', 'static', 'super', 'switch',
'this', 'throw', 'throws', 'transient', 'try', 'typeof', 'var',
'void', 'volatile', 'while', 'with', 'yield', 'do',
}
# TODO: e.x. {public: 'abc123'} will fail since public is a keyword
reserved_words_extra = {
'private', 'protected', 'public', 'native',
'abstract', 'arguments', 'synchronized', 'from',
'module', "pyimport", "catch",
"constexpr",
}
# symbols for operators that have length 1
operators1 = set("+-~*/%@&^|!:.,;=(){}[]#")
# operators composed of 2 or more special characters
# which do not form a prefix of some other operator
# used to break longers strings of special characters into valid operators
operators2 = {
"+=", "-=", "*=", "**=", "/=", "%=", "@=", "^=", "<<=", ">>>=",
"<", "<=", ">", ">=", "===", "!==",
"=>",
"|>",
"++", "--",
"->", "=>", "?.", "...", "??=", "||=", "&&="
}
# operators composed of 2 or more characters that are also a prefix
# of an operator found in the previous list.
operators2_extra = set(["?", "==", "!=", "**",
">>", "<<", "||", "??", "&&", "|=", "&=", ">>=", ">>>"])
# the set of all valid operators for this language
# if an operator is not in this list, then it is a syntax error
operators3 = operators1 | operators2 | operators2_extra
def char_reader(f):
# convert a file like object into a character generator
buf = f.read(1024)
while buf:
for c in buf:
yield c
buf = f.read(1024)
class LexerBase(object):
"""
base class for a generic look-ahead-by-N lexer
Note: using an array and ''.join() cut the time spent in _putch
down from 30 seconds to 1 second for inputs as long as 1024 * 1024 bytes.
previous it was well over a minute when running under cProfile.
using the char_reader for files is slower than loading the whole file
into memory first
"""
def __init__(self):
super(LexerBase, self).__init__()
def _init(self, seq, default_type):
# the line of the most recently consumed character
self._line = 1
# the column of the line of the most recently consumed character
self._index = -1
self._default_type = default_type
# the type of the current token
self._type = default_type
# the value of the current token
self._tok = []
self._len = 0
# the line where the current token began
self._initial_line = -1
# the column of the current line where the token began
self._initial_index = -1
# list of characters read from the input stream, but not consumed
self._peek_char = []
# the last token successfully pushed
self._prev_token = None
# define an iterator (generator) which
# yields individual (utf-8) characters from
# either an open file or an existing iterable
if hasattr(seq, 'read'):
self.g = char_reader(seq)
self.g_iter = True
else:
self.g = list(seq)
self.g_iter = False
self.g_idx = 0
self.g_len = len(self.g)
self.tokens = []
def _getch_impl(self):
""" read one character from the input stream"""
if self.g_iter:
c = next(self.g)
else:
if self.g_idx >= self.g_len:
raise StopIteration()
c = self.g[self.g_idx]
self.g_idx += 1
if c == '\n':
self._line += 1
self._index = -1
else:
self._index += 1
return c
def _getch(self):
""" return the next character """
if self._peek_char:
c = self._peek_char.pop(0)
else:
c = self._getch_impl()
return c
def _getstr(self, n):
""" return the next N characters """
s = []
for i in range(n):
s.append(self._getch())
return ''.join(s)
def _peekch(self):
""" return the next character, do not advance the iterator """
if not self._peek_char:
self._peek_char.append(self._getch_impl())
return self._peek_char[0]
def _peekstr(self, n):
""" return the next N characters, do not advance the iterator """
while len(self._peek_char) < n:
self._peek_char.append(self._getch_impl())
return ''.join(self._peek_char[:n])
def _putch(self, c):
""" append a character to the current token """
if self._initial_line < 0:
self._initial_line = self._line
self._initial_index = self._index
self._tok.append(c)
def _gettok(self):
return ''.join(self._tok)
def _restok(self):
self._tok = []
def _push_endl(self):
""" push an end of line token """
if self.tokens and self.tokens[-1].type == Token.T_NEWLINE:
return
self.tokens.append(Token(
Token.T_NEWLINE,
self._line,
0,
"")
)
self._type = self._default_type
self._initial_line = -1
self._initial_index = -1
self._restok()
def _push(self):
""" push a new token """
self._prev_token = Token(
self._type,
self._initial_line,
self._initial_index,
self._gettok()
)
self.tokens.append(self._prev_token)
self._type = self._default_type
self._initial_line = -1
self._initial_index = -1
self._restok()
def _maybe_push(self):
""" push a new token if there is a token to push """
if self._tok:
self._push()
def _error(self, message):
token = Token(self._type, self._initial_line, self._initial_index, self._tok)
raise LexError(token, message)
class Lexer(LexerBase):
"""
read tokens from a file or string
"""
def __init__(self, opts=None):
super(Lexer, self).__init__()
if not opts:
opts = {}
self.preserve_documentation = opts.get('preserve_documentation', False)
def lex(self, seq):
self._init(seq, Token.T_TEXT)
error = 0
try:
self._lex()
except StopIteration:
error = 1
if error:
tok = Token("", self._line, self._index, "")
raise LexError(tok, "Unexpected End of Sequence")
return self.tokens
def _lex(self):
while True:
try:
c = self._getch()
except StopIteration:
break
if c == '\n':
self._maybe_push()
self._push_endl()
elif c == '/':
self._lex_comment()
elif c == '\\':
c = self._peekch()
if c != '\n':
raise self._error("expected newline after '\\'. found '%s'" % c)
self._getch() # consume the newline
elif c == '\'' or c == '\"' or c == '`':
self._lex_string(c)
elif c in chset_special1:
self._maybe_push()
self._putch(c)
self._type = Token.T_SPECIAL
self._push()
elif c == '*':
self._maybe_push()
# generator keywords mix special charactes and alpha characters
# this allows for space between the *, which would normally
# be a syntax error
if len(self.tokens) and self.tokens[-1].value in ('function', 'yield'):
self.tokens[-1].value += c
else:
self._putch(c)
self._lex_special2()
elif c == '?':
# collect optional chaining operator when a . follows ?
# otherwise collect the ternary operator
self._maybe_push()
self._type = Token.T_SPECIAL
self._putch(c)
#try:
nc = self._peekch()
#except StopIteration:
# nc = None
if nc:
if nc == '.':
self._putch(self._getch())
elif nc == '?':
# collect ?. or ??
self._putch(self._getch())
nc = self._peekch()
if nc == '=':
self._putch(self._getch())
self._push()
#else:
# self._push()
elif c in chset_special2:
self._maybe_push()
self._putch(c)
self._lex_special2()
elif c == '.':
self._maybe_push()
self._putch(c)
try:
nc = self._peekch()
except StopIteration:
nc = None
if nc and nc == '.':
# collect .. and ...
self._type = Token.T_SPECIAL
self._putch(self._getch())
try:
nc = self._peekch()
except StopIteration:
nc = None
if nc and nc == '.':
self._putch(self._getch())
self._push()
elif nc and nc in chset_number_base:
self._lex_number()
else:
self._type = Token.T_SPECIAL
self._push()
elif not self._tok and c in chset_number_base:
self._maybe_push()
self._putch(c)
self._lex_number()
elif c == ' ' or c == '\t' or ord(c) < 0x20:
# ignore white space and ASCII control codes
# newline was already processed above
self._maybe_push()
else:
self._putch(c)
self._maybe_push()
def _lex_special2(self):
"""
lex sequences of special characters
break these characters apart using prefix matching
e.g. '=-' becomes '=' and '-'
e.g. '+++' becomes '++' and '+'
"""
self._type = Token.T_SPECIAL
while True:
try:
nc = self._peekch()
except StopIteration:
nc = None
if nc and nc in chset_special2:
if nc == "/":
# next time around the loop lex as a comment/regex/etc
break
else:
if self._gettok() + nc not in operators3:
self._push()
self._type = Token.T_SPECIAL
self._putch(self._getch())
self._maybe_push_op()
else:
self._maybe_push()
self._type = Token.T_TEXT
break
def _lex_string(self, string_terminal):
""" read a string from the stream, terminated by the given character
strings are read with no processing so that the compiler can produce
an identical string token.
"""
self._maybe_push()
self._type = Token.T_TEMPLATE_STRING if string_terminal == '`' else Token.T_STRING
self._putch(string_terminal)
escape = False
while True:
try:
c = self._getch()
except StopIteration:
c = None
if c is None:
raise self._error("unterminated string")
elif c == "\\":
# expect exactly one character after an escape
# pass through unmodified, let the downstream
# parser/compiler handle string processing.
self._putch(c)
try:
c = self._getch()
except StopIteration:
c = None
if c is None:
raise self._error("expected character")
self._putch(c)
elif c == "\n" and string_terminal != '`':
raise self._error("unterminated string")
elif c == string_terminal:
self._putch(string_terminal)
prev_tok = self._prev()
if prev_tok and \
prev_tok.value and \
prev_tok.value[-1] == string_terminal:
# merge consecutive strings of the same type (single-quote, double-quote, backtick)
# if strings are on multiple lines, then the newline character must be escaped.
# TODO: consider using self._prev() to automatically skip previous new lines
prev_tok.value = prev_tok.value[:-1] + self._gettok()[1:]
self._type = self._default_type
self._initial_line = -1
self._initial_index = -1
self._restok()
else:
self._push()
break
else:
self._putch(c)
def _lex_number(self):
""" read a number from the stream """
self._type = Token.T_NUMBER
while True:
try:
c = self._peekch()
except StopIteration:
break
if c == 'e':
# numbers with exponents allow for unary signs
# as part of the number
# allow for 1e-5, 1e+5, 1e5
self._putch(self._getch())
try:
c = self._peekch()
except StopIteration:
break
if c in "+-" or c in chset_number:
self._putch(self._getch())
else:
self._push()
break
elif c in chset_number:
self._putch(self._getch())
else:
self._push()
break
def _lex_comment(self):
"""
the character '/' is overloaded to mean 1 of 5 things
- // : single line comment
- /* */ : multi line comment
- /** */ : multi line documentation
- a / b : division
- a /= b : inplace division
- /^$/ : regular expression
comments produce no token
"""
self._maybe_push()
c = self._peekch()
if c == '/':
self._lex_single_comment()
elif c == '=':
# TODO: this can be cleaned up to produce
# one token after reading two chars
self._putch('/')
self._lex_special2()
elif c == '*':
# TODO: remove _peekstr
s = self._peekstr(2)
if s == '**' and self.preserve_documentation:
self._lex_documentation()
else:
self._lex_multi_comment()
elif not Token.basicType(self._prev()):
self._type = Token.T_REGEX
self._putch('/')
self._lex_regex()
else:
self._type = Token.T_SPECIAL
self._putch('/')
self._push()
def _lex_single_comment(self):
""" read a comment and produce no token """
while True:
try:
c = self._getch()
except StopIteration:
break
if c == '\n':
self._push_endl()
break
def _lex_multi_comment(self):
""" read a comment and produce no token """
error = 0
while True:
try:
c = self._getch()
except StopIteration:
error=1
break
if c == '*':
try:
c = self._peekch()
except StopIteration:
error=1
break
if c == '/':
self._getch()
break
if error:
self._error("unterminated multiline comment")
def _lex_documentation(self):
""" read a comment and produce a documentation token
- /** */ : multi line documentation
"""
error = 0
self._type = Token.T_DOCUMENTATION
self._putch("/")
while True:
try:
c = self._getch()
except StopIteration:
error=1
break
if c == '*':
try:
c2 = self._peekch()
except StopIteration:
error=1
break
self._putch(c)
if c2 == '/':
self._putch(self._getch())
self._push()
break
else:
self._putch(c)
if error:
self._error("unterminated documentation comment")
def _lex_regex(self):
error = 0
while True:
try:
c = self._getch()
except StopIteration:
error = 1
break
if c == '\\':
self._putch(c)
self._putch(self._getch())
elif c == '/':
# terminate the regex parsing but
# don't push the token
# this will allow for arbitrary
# flags to be appended
# TODO: enumerate the allowed flags
self._putch(c)
break
else:
self._putch(c)
if error:
self._error("unterminated regex")
def _prev(self):
i = len(self.tokens) - 1
while i >= 0:
if self.tokens[i].type != Token.T_NEWLINE:
return self.tokens[i]
i -= 1
return None
def _maybe_push_op(self):
if self._tok and self._gettok() in operators2:
self._push()
self._type = Token.T_SPECIAL
def _push(self):
t = self._gettok()
if self._type == Token.T_TEXT and t in reserved_words:
self._type = Token.T_KEYWORD
if self._type == Token.T_SPECIAL and t not in operators3:
self._error("unknown operator")
super()._push()
Lexer.reserved_words = {*reserved_words, *reserved_words_extra}
import timeit
import cProfile
import re
def perf(): # pragma: no cover
text = "const x = '%s';" % ("a" * (1024 * 1024))
lexer = Lexer()
lexer.lex(text)
return 0
def mainx(): # pragma: no cover
cProfile.run("perf()")
print("done")
def main(): # pragma: no cover
# r.match(/filename[^;=\\n]*=((['"]).*?\\2|[^;\\n]*)/)
# r.match(2/3)
text1 = """
//var f=/\\{ *([\\w_-]+) *\\}/g
/**/
"""
if len(sys.argv) == 2 and sys.argv[1] == "-":
text1 = sys.stdin.read()
print(text1)
tokens = Lexer({"preserve_documentation": True}).lex(text1)
for token in tokens:
print(token)
if __name__ == '__main__': # pragma: no cover
main()
| StarcoderdataPython |
11349465 | #!/usr/bin/env python3
#
# Tolka loggfil med transaktioner från Nexo.
#
# Nexo tolkas som en låneplattform. Dvs, all insättning och uttag är realisering.
# Interna transkationer (låsning) ger ingen realisering.
# Räntor och utdelningar blir ränta.
#
# Exempel: nexo_transactions.csv
#
# Transaction,Type,Currency,Amount,USD Equivalent,Details,Outstanding Loan,Date / Time
# NXTIISMjM2KCJ,Interest,NEXONEXO,0.17381980,$0.38,approved / 0.0042342262 AVAX,$0.00,2022-02-08 07:00:06
# NXTiucm8Fwbub,FixedTermInterest,NEXONEXO,1.99298599,$1.344756180701,approved / Term Deposit Interest,$0.00,2022-01-29 07:01:17
# NXT6atyFs1z2Q,UnlockingTermDeposit,AVAX,4.04993264,$63.63259997825,approved / Transfer from Term Wallet to Savings Wallet,$0.00,2022-01-29 07:01:17
# NXToTtEZ9Mutj,LockingTermDeposit,AVAX,-4.04993264,$516.4234202672,approved / Transfer from Savings Wallet to Term Wallet,$0.00,2021-12-29 08:39:33
# Olika Transaction Type:
POLICY_IGNORE = [
'LockingTermDeposit',
'UnlockingTermDeposit',
'ExchangeDepositedOn' # Extra onödig?
]
# Ränta
POLICY_INTEREST = [
'Interest',
'FixedTermInterest',
'Dividend'
]
POLICY_OTHER = [
'Deposit', # Utlåning
'Withdrawal', # Retur
'Exchange', # Krypto till krypto, OBS: loggfil brister, hanteras manuellt
'DepositToExchange' # Köp krypto för fiat
]
import sys, valuta
UTFIL = "resultat_nexo.csv"
def main():
if len(sys.argv) < 2:
print("Ange csv-filens namn som indata (crypto.com transaktionslogg)!")
print("Utdata hamnar alltid i resultat_nexo.csv")
exit(1)
loggfil = sys.argv[1]
processfile(loggfil, UTFIL)
def processfile(loggfil, utfil):
infil = open(loggfil)
infil.readline() # skip header line
lines = infil.readlines()
infil.close()
f = open(utfil, "w")
print("Nexo", file=f)
print("Datum,Var,Händelse,Antal,Valuta,Belopp", file=f)
for line in reversed(lines):
splitted = line.rstrip().split(",")
_, kind, currency1, amount1, amountUSD, desc, _, date_time = splitted
if kind in POLICY_IGNORE:
continue
date = date_time.split(" ")[0]
if currency1 == "NEXONEXO":
currency1 = "NEXO"
if kind == 'Exchange':
print("Varning: hantera Exchange manuellt, loggfil saknar information:")
print(" ", date, kind, amount1, currency1)
continue
amount2 = 0.0
amount1, amountUSD = [float(amount1), float(amountUSD[1:])]
if kind in POLICY_INTEREST:
# Ska bli ränta i redovisningen, räntan kommer på nexo-skuldvalutan
usdkurs = valuta.lookup(date, "usd")
print(f"{date},{kind},ränta," +
f"{amount1},nexo{currency1},{amountUSD*usdkurs},,{desc}", file=f)
elif kind in POLICY_OTHER:
# Deposit, växling till konstgjord valuta
usdkurs = valuta.lookup(date, "usd")
if kind == 'Deposit':
amount1 = -amount1
currency2 = "nexo" + currency1
amount2 = -amount1
amountUSD = amountUSD
print(f"{date},{kind},sälj,{amount1},{currency1},{amountUSD*usdkurs}" +
f",,{desc}", file=f)
print(f",,köp,{amount2},{currency2},{amountUSD*usdkurs}", file=f)
elif kind == 'Withdrawal':
currency2, amount2 = currency1, -amount1
currency1 = "nexo" + currency2
print(f"{date},{kind},sälj,{amount1},{currency1},{amountUSD*usdkurs}" +
f",,{desc}", file=f)
print(f",,köp,{amount2},{currency2},{amountUSD*usdkurs}", file=f)
elif kind == 'DepositToExchange':
# Om EUR så ska det nog inte hanteras som krypto men enklast att
# hantera det som allt annat
print(f"{date},{kind},köp,{amount1},nexo{currency1},{amountUSD*usdkurs}" +
f",,{desc}", file=f)
else:
raise Exception("Okänd POLICY_OTHER:", kind)
else:
raise Exception("Okänd typ (kind) i loggen:", kind)
f.close()
if __name__ == "__main__":
main()
| StarcoderdataPython |
127099 | <reponame>NOBLEGG/RPS<gh_stars>0
def active_message(domain, uidb64, token):
return f"아래 링크를 클릭하시면 인증이 완료되며, 바로 로그인하실 수 있습니다.\n\n링크 : https://{domain}/activate/{uidb64}/{token}\n\n감사합니다."
def reset_message(domain, uidb64, token):
return f"아래 링크를 클릭하시면 비밀번호 변경을 진행하실 수 있습니다.\n\n링크 : https://{domain}/reset/{uidb64}/{token}\n\n감사합니다."
| StarcoderdataPython |
9608101 | '''
***
Modified generic daemon class
***
Author: http://www.jejik.com/articles/2007/02/
a_simple_unix_linux_daemon_in_python/www.boxedice.com
License: http://creativecommons.org/licenses/by-sa/3.0/
Changes: 23rd Jan 2009 (<NAME> <<EMAIL>>)
- Replaced hard coded '/dev/null in __init__ with os.devnull
- Added OS check to conditionally remove code that doesn't
work on OS X
- Added output to console on completion
- Tidied up formatting
11th Mar 2009 (<NAME> <<EMAIL>>)
- Fixed problem with daemon exiting on Python 2.4
(before SystemExit was part of the Exception base)
13th Aug 2010 (<NAME> <<EMAIL>>
- Fixed unhandled exception if PID file is empty
3rd May 2014 (<NAME> <<EMAIL>>)
- Ported to Python 3
29th August 2014 (<NAME> <<EMAIL>>)
- Removed PID file handling, which is not used by jobmon
- Changed sys.exit to os._exit, to avoid unittest catching
the SystemExit and doing odd things.
- Allowed the parent process to stick around, which also aids
unit testing
27th November 2013
- Added the option to kill the parent, rather than forcing it
to stick around for no reason.
'''
# Core modules
import atexit
import os
import sys
import time
import signal
class Daemon(object):
"""
A generic daemon class.
Usage: subclass the Daemon class and override the run() method
"""
def __init__(self, stdin=os.devnull, stdout=os.devnull,
stderr=os.devnull, home_dir='.', umask=0o22,
kill_parent=True, verbose=1):
self.kill_parent = kill_parent
self.stdin = stdin
self.stdout = stdout
self.stderr = stderr
self.home_dir = home_dir
self.verbose = verbose
self.umask = umask
self.daemon_alive = True
def daemonize(self):
"""
Do the UNIX double-fork magic, see Stevens' "Advanced
Programming in the UNIX Environment" for details (ISBN 0201563177)
http://www.erlenstar.demon.co.uk/unix/faq_2.html#SEC16
"""
try:
pid = os.fork()
if pid > 0:
if self.kill_parent:
os._exit(0)
else:
# Let the first parent continue, since that could be running
# from the CLI, or running a test, or something
return False
except OSError as e:
sys.stderr.write(
"fork #1 failed: %d (%s)\n" % (e.errno, e.strerror))
os._exit(1)
# Decouple from parent environment
os.chdir(self.home_dir)
os.setsid()
os.umask(self.umask)
# Do second fork
try:
pid = os.fork()
if pid > 0:
# Exit from second parent
os._exit(0)
except OSError as e:
sys.stderr.write(
"fork #2 failed: %d (%s)\n" % (e.errno, e.strerror))
os._exit(1)
if sys.platform != 'darwin': # This block breaks on OS X
# Redirect standard file descriptors
sys.stdout.flush()
sys.stderr.flush()
si = open(self.stdin, 'r')
so = open(self.stdout, 'a+')
if self.stderr:
se = open(self.stderr, 'a+')
else:
se = so
os.dup2(si.fileno(), sys.stdin.fileno())
os.dup2(so.fileno(), sys.stdout.fileno())
os.dup2(se.fileno(), sys.stderr.fileno())
def sigtermhandler(signum, frame):
self.daemon_alive = False
signal.signal(signal.SIGTERM, sigtermhandler)
signal.signal(signal.SIGINT, sigtermhandler)
return True
def start(self, *args, **kwargs):
"""
Start the daemon
"""
# Start the daemon
if self.daemonize():
self.run(*args, **kwargs)
def stop(self):
"""
Stop the daemon
"""
# Try killing the daemon process
try:
i = 0
while 1:
os.kill(pid, signal.SIGTERM)
time.sleep(0.1)
i = i + 1
if i % 10 == 0:
os.kill(pid, signal.SIGHUP)
except OSError as err:
err = str(err)
if err.find("No such process") > 0:
if os.path.exists(self.pidfile):
os.remove(self.pidfile)
else:
print(str(err))
os._exit(1)
def restart(self):
"""
Restart the daemon
"""
self.stop()
self.start()
def run(self):
"""
You should override this method when you subclass Daemon.
It will be called after the process has been
daemonized by start() or restart().
"""
| StarcoderdataPython |
3458205 | <filename>goethe/utils/context/methods.py<gh_stars>1-10
import itertools as it
from collections import deque
import random
import spacy
class ContextMethod:
def __init__(self, window=None, **kwargs):
self.window = window
def tokenwise_context(self, doc):
"""Go over each word in `context` and get its text.
"""
raise NotImplementedError
def apply_context(self, token):
"""Apply to each context token.
"""
return token.text
def pairs(self, doc):
"""Yield context in pairs.
"""
for token, context in self.tokenwise_context(doc):
yield from zip(it.repeat(token), context)
def lines(self, doc):
"""Yield context in lists with the first element being the word.
"""
for token, context in self.tokenwise_context(doc):
yield (token, *context)
def tokenwise_context(self, doc):
"""Go over each word in `context` and get its text.
"""
for token in doc:
context = self.context(token)
context = (self.apply_context(c) for c in context)
context = it.islice(context, self.window)
yield token.text, context
class TreeOrder(ContextMethod):
def contextsort(self, tokens, tdists, start):
"""Sort a `start` words context `tokens` given the
tree distances (`tdists`).
"""
tokens = sorted(tokens, key=lambda t: abs(t.i - start.i))
tokens = sorted(tokens, key=lambda t: tdists[t.i])
return tokens
def context(self, start):
"""Take `start` word and return iterable over its context.
"""
tdists = [0] * len(start.doc)
queue = deque([start])
seen = {start}
def neighbors(token):
is_head = token.dep_ == 'ROOT'
return it.chain(token.children, [] if is_head else [token.head])
while queue:
t = queue.popleft()
nbrs = [n for n in neighbors(t)
if n not in seen]
for n in nbrs:
tdists[n.i] = tdists[t.i] + 1
seen.update(nbrs)
queue.extend(nbrs)
tokens = (t for t in seen if t is not start)
tokens = self.contextsort(tokens, tdists, start)
return tokens
class POSTreeOrder(TreeOrder):
@staticmethod
def fine(token):
"""Return a token's fine POS tag.
"""
return token.tag_
@staticmethod
def coarse(token):
"""Return a token's coarse POS tag.
"""
return token.pos_
def __init__(self, fine=False, **kwargs):
super().__init__(**kwargs)
self.pos = (self.fine if fine else self.coarse)
def apply_context(self, token):
"""Apply to each context token.
"""
return f'{token.text}/{self.pos(token)}'
class MinTreeOrder(TreeOrder):
def contextsort(self, tokens, tdists, stkrt):
"""Sort a `start` words context `tokens` given the
tree distances (`tdists`). As key, use
min(tree distance, sentence distance).
"""
def key(t): return min(tdists[t.i], abs(t.i - start.i))
tokens = sorted(tokens, key=key)
return tokens
class ShuffleTreeOrder(TreeOrder):
def contextsort(self, tokens, tdists, start):
"""Sort a `start` words context `tokens` given the
tree distances (`tdists`). Shuffle tokens with equal tree distance.
"""
tokens = [(t, tdists[t.i]) for t in tokens]
random.shuffle(tokens)
return sorted(tokens, key=lambda t: t[1])
def apply_context(self, token):
"""Apply to each context token.
"""
c, dist = token
return f'{c.text}/{dist}'
class TreeTraverse(ContextMethod):
def __init__(self, max_level, **kwargs):
super().__init__(**kwargs)
self.max_level = max_level
def context(self, word):
candidates = deque()
seen = {word}
def add_to_queue(word, level):
for w in it.chain(word.children, [word.head]):
if w not in seen:
seen.add(w)
candidates.append((level, w))
add_to_queue(word, 1)
while candidates:
level, candidate = candidates.popleft()
yield candidate
if level < self.max_level:
add_to_queue(candidate, level + 1)
class LevyGoldberg:
def __init__(self, **kwargs):
pass
def pairs(self, doc):
"""Go over each word in `context` and get its text.
"""
for token in doc:
if token.dep_ == 'ROOT':
continue
head = token.head
yield (head.text, f'{token.text}/{token.dep_}')
yield (token.text, f'{head.text}/{token.dep_}-1')
def lines(self, doc):
raise NotImplementedError
class Linear(ContextMethod):
def window_sort(self, tokens, n):
i = n - 1
j = n + 1
l = []
while i >= 0 or j < len(tokens):
if i >= 0:
l.append(tokens[i])
if j < len(tokens):
l.append(tokens[j])
i -= 1
j += 1
return l
def tokenwise_context(self, doc):
"""Go over each word in `context` and get its text.
"""
for i, token in enumerate(doc):
context = self.window_sort(doc, i)
context = (self.apply_context(c) for c in context)
context = it.islice(context, self.window)
yield token.text, context
| StarcoderdataPython |
11270227 | <reponame>meissnert/StarCluster-Plugins
from starcluster.clustersetup import ClusterSetup
from starcluster.logger import log
class OmicsPipeInstaller(ClusterSetup):
def run(self, nodes, master, user, user_shell, volumes):
for node in nodes:
log.info("Installing Docker on %s " % (node.alias))
#node.ssh.execute('wget https://bitbucket.org/sulab/omics_pipe/downloads/apt_sources -O /etc/apt/sources.list')
node.ssh.execute('apt-get -y update')
node.ssh.execute('apt-get -y install linux-image-extra-`uname -r`')
node.ssh.execute('apt-key adv --keyserver hkp://keyserver.ubuntu.com:80 --recv-keys 36A1D7869245C8950F966E92D8576A8BA88D21E9')
node.ssh.execute('echo deb http://get.docker.io/ubuntu docker main > /etc/apt/sources.list.d/docker.list')
node.ssh.execute('apt-get -y update')
node.ssh.execute('apt-get -y install lxc-docker')
log.info("Configuring perl locale on %s " % (node.alias))
node.ssh.execute('locale-gen en_US en_US.UTF-8 hu_HU hu_HU.UTF-8')
node.ssh.execute('dpkg-reconfigure locales')
log.info("Installing Graphviz on %s " % (node.alias))
node.ssh.execute('apt-get install -y graphviz')
log.info("Installing mdadm on %s " % (node.alias))
node.ssh.execute('export DEBIAN_FRONTEND=noninteractive && apt-get -q -y install mdadm --no-install-recommends')
log.info("Installing s3cmd on %s " % (node.alias))
node.ssh.execute('apt-get -y install s3cmd')
log.info("Installing lvm2 on %s " % (node.alias))
node.ssh.execute('apt-get -y install lvm2')
log.info("Installing xfs on %s " % (node.alias))
node.ssh.execute('apt-get -y install xfs xfsprogs')
log.info("Installing pigz on %s " % (node.alias))
node.ssh.execute('apt-get -y install pigz')
node.ssh.execute('apt-get -y install libjemalloc-dev libbz2-dev libsnappy-dev')
| StarcoderdataPython |
94963 | <reponame>alonbg/acos-client
# Copyright 2014, <NAME>, A10 Networks.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import acos_client.v21.base as base
from virtual_port import VirtualPort
class VirtualServer(base.BaseV21):
@property
def vport(self):
return VirtualPort(self.client)
def all(self, **kwargs):
return self._get("slb.virtual_server.getAll", **kwargs)
def get(self, name, **kwargs):
return self._post("slb.virtual_server.search", {'name': name},
**kwargs)
def _set(self, action, name, ip_address=None, status=1, **kwargs):
params = {
"virtual_server": self.minimal_dict({
"name": name,
"address": ip_address,
"status": status,
}),
}
self._post(action, params, **kwargs)
def create(self, name, ip_address, status=1, **kwargs):
self._set("slb.virtual_server.create", name, ip_address, status,
**kwargs)
def update(self, name, ip_address=None, status=1, **kwargs):
self._set("slb.virtual_server.update", name, ip_address, status,
**kwargs)
def delete(self, name, **kwargs):
self._post("slb.virtual_server.delete", {"name": name}, **kwargs)
def stats(self, name, **kwargs):
return self._post("slb.virtual_server.fetchStatistics", {"name": name},
**kwargs)
def all_stats(self, **kwargs):
return self._get("slb.virtual_server.fetchAllStatistics", **kwargs)
| StarcoderdataPython |
3518152 | <filename>ros2bag/test/test_record.py
# Copyright 2020 Open Source Robotics Foundation, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
from pathlib import Path
import shutil
import tempfile
import unittest
from launch import LaunchDescription
from launch.actions import ExecuteProcess
import launch_testing
import launch_testing.actions
import launch_testing.asserts
from launch_testing.asserts import EXIT_OK
import pytest
@pytest.mark.launch_test
def generate_test_description():
tmp_dir_name = tempfile.mkdtemp()
output_path = Path(tmp_dir_name) / 'ros2bag_test_record'
record_all_process = ExecuteProcess(
cmd=['ros2', 'bag', 'record', '-a', '--output', output_path.as_posix()],
name='ros2bag-cli',
output='screen',
)
return LaunchDescription([
record_all_process,
launch_testing.actions.ReadyToTest()
]), locals()
class TestRecord(unittest.TestCase):
def test_output(self, record_all_process, proc_output):
proc_output.assertWaitFor(
'Listening for topics...',
process=record_all_process
)
proc_output.assertWaitFor(
"Subscribed to topic '/rosout'",
process=record_all_process
)
proc_output.assertWaitFor(
"Subscribed to topic '/parameter_events'",
process=record_all_process
)
@launch_testing.post_shutdown_test()
class TestRecordAfterShutdown(unittest.TestCase):
def test_exit_code(self, tmp_dir_name, record_all_process, proc_info):
# Cleanup
shutil.rmtree(tmp_dir_name, ignore_errors=True)
# Check that the process exited with code 0
launch_testing.asserts.assertExitCodes(
proc_info,
# SIGINT (2) is the typical exit code we see coming from rclcpp
# On Windows, we get value '1'
allowable_exit_codes=[EXIT_OK, 2] if os.name != 'nt' else [EXIT_OK, 1, 2],
process=record_all_process
)
| StarcoderdataPython |
70613 | # LER O PESO DE 5 PESSOAS E MOSTRE O MAIOR E O MENOR
maior = 0
menor = 0
for pessoa in range(1, 6):
peso = float(input(f'Digite o {pessoa}° peso: '))
# SEMPRE NO 1° LAÇO O VALOR SERA O MAIOR E O MENOR POIS NAO HA REFERENCIA
if pessoa == 1:
maior = peso
menor = peso
# A PARTIR DO 2° LAÇO COMEÇA EFETUAR A VALIDAÇÃO
else:
if peso > maior:
maior = peso
if peso < menor:
menor = peso
print(f'O maior peso lido foi de {maior}KG')
print(f'O menor peso lido foi de {menor}KG')
| StarcoderdataPython |
309406 | from queue import Queue
import jsonpickle
from handlers.JobEventHandler import JobEventHandler
from infrastructor.multi_processing.ProcessManager import ProcessManager
from scheduler.JobSchedulerService import JobSchedulerService
class JobSchedulerEvent:
job_scheduler_type = None
job_event_queue: Queue = None
process_manager: ProcessManager = None
def __del__(self):
del JobSchedulerEvent.process_manager
@staticmethod
def create_event_handler():
JobSchedulerEvent.process_manager = ProcessManager()
JobSchedulerEvent.job_event_queue = JobSchedulerEvent.process_manager.create_queue()
process_kwargs = {
"event_queue": JobSchedulerEvent.job_event_queue,
}
JobSchedulerEvent.process_manager.start_processes(target_method=JobEventHandler.start_job_event_handler_process,
kwargs=process_kwargs)
def event_service_handler(func):
def inner(*args, **kwargs):
service = JobSchedulerService()
if service.job_scheduler_type is None:
service.set_job_scheduler_type(job_scheduler_type=JobSchedulerEvent.job_scheduler_type)
if service.job_event_queue is None:
service.set_job_event_queue(job_event_queue=JobSchedulerEvent.job_event_queue)
result = func(service=service, event=args[0], **kwargs)
del service
return result
return inner
@staticmethod
@event_service_handler
def listener_job_added(service: JobSchedulerService, event, *args, **kwargs):
job = service.get_job(event.job_id)
service.add_job(event)
service.add_job_event(event)
service.add_log(event, f'{job.name} added with funct_ref:{job.func_ref}, max_instances:{job.max_instances}')
@staticmethod
@event_service_handler
def listener_job_removed(service: JobSchedulerService, event, *args, **kwargs):
service.add_job_event(event)
service.remove_job(event)
service.add_log(event, f'Job removed')
@staticmethod
@event_service_handler
def listener_all_jobs_removed(service: JobSchedulerService, event, *args, **kwargs):
service.add_job_event(event)
service.remove_job(event)
service.add_log(event, f'Jobs removed')
@staticmethod
@event_service_handler
def listener_finish(service: JobSchedulerService, event, *args, **kwargs):
service.add_job_event(event)
if hasattr(event, 'exception') and event.exception:
if hasattr(event, 'traceback') and event.traceback:
service.add_log(event,
f'exception:{event.exception} traceback:{event.traceback}')
else:
service.add_log(event, f'exception:{event.exception}')
else:
retval = None
if hasattr(event, 'retval') and event.retval:
retval = event.retval
retval_string = jsonpickle.encode(retval)
service.add_log(event, f'return value:{retval_string}')
@staticmethod
@event_service_handler
def listener_job_submitted(service: JobSchedulerService, event, *args, **kwargs):
job = service.get_job(event.job_id)
next_run_time = None
if hasattr(job, 'next_run_time') and job.next_run_time:
next_run_time = job.next_run_time.strftime('%Y-%m-%d %H:%M:%S.%f')[:-3]
service.add_job_event(event)
service.update_job(event)
service.add_log(event, f'Next Run Time:{next_run_time}')
@staticmethod
@event_service_handler
def listener_job_others(service: JobSchedulerService, event, *args, **kwargs):
service.add_job_event(event)
service.add_log(event, f'')
@staticmethod
@event_service_handler
def listener_scheduler_other_events(service: JobSchedulerService, event, *args, **kwargs):
service.add_log(event, f'')
| StarcoderdataPython |
4972367 | # This sample tests a variety of unicode characters including those that
# require two-code (surrogate) forms.
# Old Italic
𐌎𐌘𐌟𐌁 = 42
# Egyptian hieroglyphs
𓃘𓐭𓇀𓅨𓆙 = 2
# Linear B Ideograms
𐂂𐃪𐃯 = ""
# Cuneiform
𒀟𒀕𒀰𒁜𒂐𒄊 = ""
# Old Persian
𐎠𐏊𐏏 = 3
# Lydian
𐤢𐤷𐤬𐤮 = 4
# Phoenician
𐤔𐤑𐤇 = 4
# Nabataean
𐢖𐢊ﬗ = 0
# This should generate an error because "𐢭" is outside the range of
# characters supported by the Python standard.
𐢭 = 0
| StarcoderdataPython |
5125250 | <reponame>GBHULLAR-POST/field_runner
from pygame.locals import*
import pygame
import sys
import random
import math
import effects
import levels
pygame.init()
SCREEN_SIZE = (1200, 700)
SCREEN = pygame.display.set_mode(SCREEN_SIZE)
pygame.display.set_caption('FIELD RUNNER')
clock = pygame.time.Clock()
white = (255, 255, 255)
black = (0, 0, 0)
red = (220, 20, 60)
grey = (128, 128, 128)
purple = (138, 43, 226)
blue = (0, 0, 255)
light_blue = (173, 188, 230)
tor = (64, 224, 208)
aqua = (0, 255, 191)
light_purple = (147, 112, 219)
right = 3
left = -3
mouse = [0, 0]
bullet_particle = -4
pygame.mixer.pre_init(44100, -16, 2, 4096)
pygame.mixer.set_num_channels(16)
# ------------------------------------------------------------------------------------------------------------------------------------------------
class player_class:
def __init__(self, combined):
self.color = purple
self.dimension = (40, 40)
self.player_rect = pygame.Rect(
SCREEN_SIZE[0]/2, SCREEN_SIZE[1]/2, self.dimension[0], self.dimension[1])
self.moving_right = False
self.moving_left = False
self.player_movement = [0, 0]
self.y_momentum = 0
self.fall = True
self.jump = False
self.jump_count = 0
self.stick = False
self.x_momentum = 0
self.shoot_right = False
self.shoot_left = False
self.player_hit = False
self.combined = combined
self.combined_dimension = (40, 70)
self.combined_rect = self.combined.get_rect()
self.player_c_child = False
self.player_c_moving = False
self.direction = 0
self.shooting = False
self.shock = False
self.menu = False
self.control = True
def draw(self, SCREEN):
if self.stick:
SCREEN.blit(self.combined,
(self.combined_rect[0], self.combined_rect[1]))
else:
pygame.draw.rect(SCREEN, self.color, [
self.player_rect[0], self.player_rect[1], self.dimension[0], self.dimension[1]])
def get_input(self, child, platform_rects, moving_platform_rects, enemies_rects, enemy_bullet_list, stick_power, bullet_list, impact_particles, shockwave_list, afterwave_list, total_wave, jump_sound, shoot_sound, hit_sound, wave_sound, stick_sound):
for event in pygame.event.get():
if event.type == pygame.QUIT:
pygame.quit()
sys.exit()
if event.type == pygame.MOUSEBUTTONDOWN:
shoot_sound.play()
self.shooting = True
mouse = pygame.mouse.get_pos()
bullet_list.append(bullet_class(self.player_rect, mouse))
if mouse[0]-self.player_rect[0] > 0:
self.x_momentum = -1
self.shoot_right = True
self.moving_left = True
else:
self.x_momentum = 1
self.shoot_left = True
self.shoot_right = False
self.moving_right = True
elif event.type == pygame.MOUSEBUTTONUP:
self.shooting = False
elif event.type == pygame.KEYDOWN and self.control:
if event.key == pygame.K_m and total_wave > 0:
wave_sound.play()
shockwave_list.append(effects.shockwave_class(
[self.player_rect.center[0], self.player_rect.center[1]], self.color))
afterwave_list.append(effects.afterwave_class(
[self.player_rect.center[0], self.player_rect.center[1]], light_purple))
self.shock = True
if event.key == pygame.K_ESCAPE:
self.menu = True
if event.key == pygame.K_d:
self.moving_right = True
if event.key == pygame.K_a:
self.moving_left = True
if event.key == pygame.K_w and self.jump_count < 2:
jump_sound.play()
self.fall = False
self.y_momentum = -8
self.jump = True
self.jump_count += 1
if event.key == pygame.K_SPACE and self.player_c_child == True:
stick_sound.play()
if stick_power.combined_dimension[0] > 0:
self.moving_right = False
self.moving_left = False
self.stick = True
self.combined_rect[0] = self.player_rect[0]
self.combined_rect[1] = self.player_rect[1]+30
self.player_rect = self.combined_rect
elif event.type == pygame.KEYUP:
if event.key == pygame.K_d:
self.moving_right = False
if event.key == pygame.K_a:
self.moving_left = False
self.movement(child, platform_rects, moving_platform_rects,
enemies_rects, enemy_bullet_list, impact_particles, hit_sound)
def movement(self, child, platform_rects, moving_platform_rects, enemies_rects, enemy_bullet_list, impact_particles, hit_sound):
self.player_movement = [0, 0]
if self.player_c_moving:
self.player_movement[0] += 1*self.direction
if self.moving_left and self.x_momentum == 0:
self.player_movement[0] += left
if self.moving_right and self.x_momentum == 0:
self.player_movement[0] += right
for bullet in enemy_bullet_list:
if bullet.bullet_rect.colliderect(self.player_rect):
hit_sound.play()
for i in range(10):
impact_particles.append(effects.impact_particles_class([bullet.x, bullet.y], [
self.player_rect.center[0], self.player_rect.center[1]], self.color))
self.player_hit = True
enemy_bullet_list.remove(bullet)
break
else:
self.player_hit = False
if self.shoot_left:
self.player_movement[0] += self.x_momentum
self.x_momentum -= 0.1
if self.x_momentum <= 0:
self.moving_right = False
self.x_momentum = 0
self.shoot_left = False
if self.shoot_right:
self.player_movement[0] += self.x_momentum
self.x_momentum += 0.1
if self.x_momentum >= 0:
self.moving_left = False
self.x_momentum = 0
self.shoot_right = False
if self.fall:
self.player_movement[1] += self.y_momentum
self.y_momentum += 0.3
if self.jump:
self.player_movement[1] += self.y_momentum
self.y_momentum += 0.3
if self.y_momentum >= 0:
self.fall = True
self.jump = False
self.check_move(child, platform_rects,
moving_platform_rects, enemies_rects)
def collision_test(self, tile_rects):
self.tiles_hit = []
for tile in tile_rects:
if self.player_rect.colliderect(tile):
self.tiles_hit.append(tile)
def collision_test_moving(self, tile_rects):
self.tiles_hit = []
for tile in tile_rects:
if self.player_rect.colliderect(tile[0]):
self.tiles_hit.append(tile[0])
self.direction = tile[1]
def check_move(self, child, platform_rects, moving_platform_rects, enemies_rects):
self.player_rect.x += self.player_movement[0]
self.collision_test(platform_rects)
if self.stick == False:
if self.player_rect.colliderect(child.child_rect):
self.side = True
else:
self.side = False
if self.moving_right:
for tile in self.tiles_hit:
self.player_rect.right = tile.left
if self.moving_left:
for tile in self.tiles_hit:
self.player_rect.left = tile.right
self.collision_test(enemies_rects)
if self.moving_right:
for tile in self.tiles_hit:
self.player_rect.right = tile.left
self.shoot_left = False
self.moving_right = False
self.x_momentum = 0
if self.moving_left:
for tile in self.tiles_hit:
self.player_rect.left = tile.right
self.moving_left = False
self.shoot_right = False
self.x_momentum = 0
self.player_rect.y += self.player_movement[1]
self.collision_test(platform_rects)
if self.side == False and self.stick == False:
if self.fall:
if self.player_rect.colliderect(child.child_rect):
self.player_rect.bottom = child.child_rect.top
self.jump_count = 0
self.y_momentum = 0
if self.jump:
if self.player_rect.colliderect(child.child_rect):
self.player_rect.top = child.child_rect.bottom
self.y_momentum = 0
if self.fall:
for tile in self.tiles_hit:
self.player_rect.bottom = tile.top
self.player_c_moving = False
self.jump_count = 0
self.y_momentum = 0
if self.jump:
for tile in self.tiles_hit:
self.player_rect.top = tile.bottom
self.y_momentum = 0
self.collision_test_moving(moving_platform_rects)
if self.side == False and self.stick == False:
if self.fall:
if self.player_rect.colliderect(child.child_rect):
self.player_rect.bottom = child.child_rect.top
self.jump_count = 0
self.y_momentum = 0
if self.jump:
if self.player_rect.colliderect(child.child_rect):
self.player_rect.top = child.child_rect.bottom
self.y_momentum = 0
if self.fall:
for tile in self.tiles_hit:
self.player_rect.bottom = tile.top
self.player_c_moving = True
self.jump_count = 0
self.y_momentum = 0
if self.moving_right or self.moving_left:
self.player_c_moving = False
if self.jump:
for tile in self.tiles_hit:
self.player_rect.top = tile.bottom
self.y_momentum = 0
self.collision_test(enemies_rects)
if self.fall:
for tile in self.tiles_hit:
self.player_rect.bottom = tile.top
self.jump_count = 0
self.y_momentum = 0
if self.jump:
for tile in self.tiles_hit:
self.player_rect.top = tile.bottom
self.y_momentum = 0
# ------------------------------------------------------------------------------------------------------------------------------------------------
class child_class:
def __init__(self):
self.color = light_blue
self.dimension = [30, 30]
self.child_rect = pygame.Rect(
100, 400, self.dimension[0], self.dimension[1])
self.fall = True
self.y_momentum = -15
self.child_movement = [0, 0]
self.side_hit = 'none'
self.moving_right = False
self.moving_left = False
self.x_momentum = 0
self.hit_right = False
self.hit_left = False
self.child_c_moving = False
self.direction = 0
self.child_hit = False
self.start = 0
self.letter = True
self.rate = 10
self.child_hit = False
def draw(self, SCREEN):
pygame.draw.rect(SCREEN, self.color, [self.child_rect.x, self.child_rect.y, int(
self.dimension[0]), int(self.dimension[1])])
def movement(self, player, platform_rects, enemies_rects, enemy, bullet_list, enemy_bullet_list, moving_platform_rects, impact_particles, hit_sound):
self.child_movement = [0, 0]
for bullet in enemy_bullet_list:
if bullet.bullet_rect.colliderect(self.child_rect):
for i in range(10):
impact_particles.append(effects.impact_particles_class([bullet.x, bullet.y], [
self.child_rect.center[0], self.child_rect.center[1]], self.color))
self.child_hit = True
enemy_bullet_list.remove(bullet)
break
else:
self.child_hit = False
if self.letter:
self.child_movement[0] += self.rate
self.rate -= 0.1
if self.letter:
self.start += 1
if self.start >= 90:
self.letter = False
if self.child_c_moving:
self.child_movement[0] += self.direction
if player.player_rect.colliderect(self.child_rect):
if player.player_movement[0] > 0:
self.side_hit = 'right'
elif player.player_movement[0] < 0:
self.side_hit = 'left'
else:
self.side_hit = 'none'
self.moving_right = False
self.moving_left = False
if self.side_hit != 'none':
player.player_c_child = True
else:
player.player_c_child = False
if self.side_hit == 'right':
self.child_rect.left = player.player_rect.right
self.moving_right = True
elif self.side_hit == 'left':
self.child_rect.right = player.player_rect.left
self.moving_left = True
for bullet in bullet_list:
if bullet.bullet_rect.colliderect(self.child_rect):
hit_sound.play()
for i in range(10):
impact_particles.append(effects.impact_particles_class([bullet.x, bullet.y], [
self.child_rect.center[0], self.child_rect.center[1]], self.color))
self.child_hit = True
if bullet.bullet_movement[0] > 0:
self.x_momentum = 2
self.hit_left = True
elif bullet.bullet_movement[0] < 0:
self.x_momentum = -3
self.hit_right = True
bullet_list.remove(bullet)
break
else:
self.child_hit = False
for bullet in enemy_bullet_list:
if bullet.bullet_rect.colliderect(self.child_rect):
hit_sound.play()
if bullet.enemy_bullet_movement[0] > 0:
self.x_momentum = 2
self.hit_left = True
elif bullet.enemy_bullet_movement[0] < 0:
self.x_momentum = -2
self.hit_right = True
enemy_bullet_list.remove(bullet)
break
self.child_movement[0] += self.x_momentum
if self.hit_right:
self.x_momentum += 0.3
if self.x_momentum >= 0:
self.x_momentum = 0
self.hit_right = False
elif self.hit_left:
self.x_momentum -= 0.3
if self.x_momentum <= 0:
self.x_momentum = 0
self.hit_left = False
if self.fall:
self.child_movement[1] += self.y_momentum
self.y_momentum += 0.5
self.check_move(player, platform_rects,
enemies_rects, moving_platform_rects)
def check_move(self, player, platform_rects, enemies_rects, moving_platform_rects):
self.child_rect[0] += self.child_movement[0]
self.collision_test(platform_rects)
for tile in self.tiles_hit:
self.child_rect[0] = self.child_rect[0]-self.child_movement[0]
self.x_momentum = 0
break
if self.moving_right:
for tile in self.tiles_hit:
self.child_rect.right = tile.left
player.player_rect.right = self.child_rect.left
if self.moving_left:
for tile in self.tiles_hit:
self.child_rect.left = tile.right
player.player_rect.left = self.child_rect.right
self.collision_test(enemies_rects)
for tile in self.tiles_hit:
if self.hit_left:
self.child_rect.right = tile.left
elif self.hit_right:
self.child_rect.left = tile.right
self.x_momentum = 0
break
if self.moving_right:
for tile in self.tiles_hit:
self.child_rect.right = tile.left
player.player_rect.right = self.child_rect.left
self.moving_left = False
if self.moving_left:
for tile in self.tiles_hit:
self.child_rect.left = tile.right
player.player_rect.left = self.child_rect.right
self.moving_right = False
self.child_rect[1] += int(self.child_movement[1])
self.collision_test(platform_rects)
if self.child_rect.colliderect(player.player_rect):
if player.side == False:
self.child_rect.bottom = player.player_rect.top
self.y_momentum = 0
if self.fall:
for tile in self.tiles_hit:
self.child_rect.bottom = tile.top
self.y_momentum = 0
self.child_c_moving = False
self.moving_collision_test(moving_platform_rects)
if self.child_rect.colliderect(player.player_rect):
if player.side == False:
self.child_rect.bottom = player.player_rect.top
self.y_momentum = 0
if self.fall:
for tile in self.tiles_hit:
self.child_rect.bottom = tile.top
self.y_momentum = 0
self.child_c_moving = True
self.collision_test(enemies_rects)
if self.fall:
for tile in self.tiles_hit:
self.child_rect.bottom = tile.top
self.y_momentum = 0
def collision_test(self, tile_rects):
self.tiles_hit = []
for tile in tile_rects:
if self.child_rect.colliderect(tile):
self.tiles_hit.append(tile)
def moving_collision_test(self, tile_rects):
self.tiles_hit = []
for tile in tile_rects:
if self.child_rect.colliderect(tile[0]):
self.tiles_hit.append(tile[0])
self.direction = tile[1]
# ------------------------------------------------------------------------------------------------------------------------------------------------
class enemy_class():
def __init__(self, player, offset):
self.color = red
self.dimension = (40, 40)
if offset == 0:
self.enemy_rect = pygame.Rect(random.randint(
player.player_rect[0]+140, SCREEN_SIZE[0]), -40, self.dimension[0], self.dimension[1])
else:
self.enemy_rect = random.choice([pygame.Rect(random.randint(0, player.player_rect[0]-100), -40, self.dimension[0], self.dimension[1]), pygame.Rect(
random.randint(player.player_rect[0]+140, SCREEN_SIZE[0]), -40, self.dimension[0], self.dimension[1])])
self.fall = True
self.hit_right = False
self.hit_left = False
self.y_momentum = 0
self.x_momentum = 0
self.moving_right = False
self.moving_left = False
self.enemy_c_moving = False
self.direction = 0
self.shoot_right = False
self.shoot_left = False
self.enemy_health = effects.enemy_health_bar(self.enemy_rect)
self.time = 1
def draw(self, SCREEN):
pygame.draw.rect(SCREEN, self.color, [self.enemy_rect.x, self.enemy_rect.y, int(
self.dimension[0]), int(self.dimension[1])])
self.enemy_health.update(self.enemy_rect, SCREEN)
def movement(self, platform_rects, enemies_rects, bullet_list, player, child, stick_power, moving_platform_rects, enemy_list, impact_particles, total_score, hit_sound):
self.enemy_movement = [0, 0]
self.time += 1
if self.time > 500:
self.time = 1
if self.time % 100 == 0 and abs(player.player_rect[0]-self.enemy_rect[0]) < 200:
if self.enemy_rect[0]-player.player_rect[0] > 0:
self.hit_left = True
self.moving_right = True
self.x_momentum = 4
elif self.enemy_rect[0]-player.player_rect[0] < 0:
self.hit_right = True
self.moving_left = True
self.x_momentum = -4
if self.enemy_c_moving:
self.enemy_movement[0] += self.direction
for bullet in bullet_list:
for enemy in enemy_list:
if bullet.bullet_rect.colliderect(self.enemy_rect):
hit_sound.play()
total_score += 1
for i in range(10):
impact_particles.append(effects.impact_particles_class([bullet.x, bullet.y], [
self.enemy_rect.center[0], self.enemy_rect.center[1]], self.color))
stick_power.combined_dimension[0] += 10
self.enemy_health.decrease = 10
if self.enemy_rect.center[0]-player.player_rect.center[0] > 0:
self.x_momentum = 3
self.hit_left = True
self.moving_right = True
else:
self.x_momentum = -3
self.hit_right = True
self.moving_left = True
bullet_list.remove(bullet)
break
self.enemy_movement[0] += self.x_momentum
if self.hit_right:
self.x_momentum += 0.3
if self.x_momentum >= 0:
self.x_momentum = 0
self.hit_right = False
self.moving_left = False
elif self.hit_left:
self.x_momentum -= 0.3
if self.x_momentum <= 0:
self.x_momentum = 0
self.hit_left = False
self.moving_right = False
if self.fall:
self.enemy_movement[1] += self.y_momentum
self.y_momentum += 0.3
self.check_move(platform_rects, enemies_rects,
bullet_list, child, player, moving_platform_rects)
def check_move(self, platform_rects, enemies_rects, bullet_list, child, player, moving_platform_rects):
self.enemy_rect.x += self.enemy_movement[0]
self.collision_test(platform_rects)
for tile in self.tiles_hit:
if self.enemy_movement[0] > 0:
self.enemy_rect.right = tile.left
else:
self.enemy_rect.left = tile.right
break
self.tiles_hit = []
if self.enemy_rect.colliderect(child.child_rect):
self.tiles_hit.append(child.child_rect)
if self.moving_right:
for tile in self.tiles_hit:
self.enemy_rect.right = tile.left
if self.moving_left:
for tile in self.tiles_hit:
self.enemy_rect.left = tile.right
self.collision_test(enemies_rects)
if self.moving_right:
for tile in self.tiles_hit:
self.enemy_rect.right = tile.left
self.x_momentum = 0
if self.moving_left:
for tile in self.tiles_hit:
self.enemy_rect.left = tile.right
self.x_momentum = 0
self.enemy_rect.y += self.enemy_movement[1]
self.collision_test(platform_rects)
if self.fall:
for tile in self.tiles_hit:
self.enemy_rect.bottom = tile.top
self.y_momentum = 0
self.enemy_c_moving = False
self.collision_test(enemies_rects)
if self.fall:
for tile in self.tiles_hit:
self.enemy_rect.bottom = tile.top
self.y_momentum = 0
self.moving_collision_test(moving_platform_rects)
if self.fall:
for tile in self.tiles_hit:
self.enemy_rect.bottom = tile.top
self.enemy_c_moving = True
self.y_momentum = 0
self.collision_test(enemies_rects)
if self.fall:
for tile in self.tiles_hit:
self.enemy_rect.bottom = tile.top
self.y_momentum = 0
self.enemy_c_moving = True
def collision_test(self, tile_rects):
self.tiles_hit = []
for tile in tile_rects:
if self.enemy_rect.colliderect(tile):
self.tiles_hit.append(tile)
def moving_collision_test(self, tile_rects):
self.tiles_hit = []
for tile in tile_rects:
if self.enemy_rect.colliderect(tile[0]):
self.tiles_hit.append(tile[0])
self.direction = tile[1]
# ------------------------------------------------------------------------------------------------------------------------------------------------
class bullet_class:
def __init__(self, player_rect, mouse):
self.color = blue
self.bullet_speed = 8
self.radius = 1
self.x = player_rect.center[0]
self.y = player_rect.center[1]
self.bullet_rect = pygame.Rect(self.x, self.y, 10, 10)
self.angle = math.atan2(mouse[1]-player_rect.y, mouse[0]-player_rect.x)
self.dx = math.cos(self.angle)*self.bullet_speed
self.dy = math.sin(self.angle)*self.bullet_speed
self.status = 'not_collided'
self.bullet_movement = [0, 0]
def draw(self, SCREEN):
pygame.draw.circle(SCREEN, self.color, [
self.bullet_rect[0], self.bullet_rect[1]], int(self.radius))
def move(self):
self.bullet_movement = [0, 0]
self.x += self.dx
self.y += self.dy
self.bullet_movement[0] += self.dx
self.bullet_movement[1] += self.dy
self.bullet_rect[0] = self.x
self.bullet_rect[1] = self.y-15
def check_move(self, platform_rects):
self.collision_test(platform_rects)
def collision_test(self, tile_rects):
self.status = 'not_collided'
for tile in tile_rects:
if tile.colliderect(self.bullet_rect):
self.status = 'collided'
break
# ------------------------------------------------------------------------------------------------------------------------------------------------
class enemy_bullet_class:
def __init__(self, enemy, player, child):
self.color = red
self.bullet_speed = 7
self.radius = 1
self.x = enemy.enemy_rect.center[0]
self.y = enemy.enemy_rect.center[1]
self.bullet_rect = pygame.Rect(self.x, self.y, 10, 10)
self.angle = math.atan2(
player.player_rect.y-enemy.enemy_rect.y, player.player_rect.x-enemy.enemy_rect.x)
self.dx = math.cos(self.angle)*self.bullet_speed
self.dy = math.sin(self.angle)*self.bullet_speed
self.status = 'not_collided'
self.enemy_bullet_movement = [0, 0]
def draw(self, SCREEN):
pygame.draw.circle(SCREEN, self.color, [int(
self.x), int(self.y)], int(self.radius))
def move(self):
self.enemy_bullet_movement = [0, 0]
self.x += self.dx
self.y += self.dy
self.enemy_bullet_movement[0] += self.dx
self.enemy_bullet_movement[1] += self.dy
self.bullet_rect[0] = int(self.x)
self.bullet_rect[1] = int(self.y)
def check_move(self, platform_rects):
self.collision_test(platform_rects)
def collision_test(self, tile_rects):
self.status = 'not_collided'
for tile in tile_rects:
if tile.colliderect(self.bullet_rect):
self.status = 'collided'
break
# ------------------------------------------------------------------------------------------------------------------------------------------------
class gun_class:
def __init__(self, player, glock1, glock2):
self.dimension = (20, 10)
self.angle = (180/math.pi)*-math.atan2(pygame.mouse.get_pos()
[1]-player.player_rect.center[1], pygame.mouse.get_pos()[0]-player.player_rect.center[0])
if self.angle <= 90 and self.angle >= -90:
self.org_glock = glock1
else:
self.org_glock = glock2
self.glock_rect = self.org_glock.get_rect()
def draw(self, SCREEN, player):
SCREEN.blit(self.glock, self.glock_rect)
def update(self, SCREEN, player, glock1, glock2):
self.angle = (180/math.pi)*-math.atan2(pygame.mouse.get_pos()
[1]-player.player_rect.center[1], pygame.mouse.get_pos()[0]-player.player_rect.center[0])
if self.angle <= 90 and self.angle >= -90:
self.org_glock = glock1
else:
self.org_glock = glock2
self.glock = pygame.transform.rotozoom(
self.org_glock, int(self.angle), 1)
self.glock_rect.center = player.player_rect.center
self.draw(SCREEN, player)
# ------------------------------------------------------------------------------------------------------------------------------------------------
class enemy_gun_class:
def __init__(self, enemy, player, glock1, glock2):
self.color = white
self.dimension = (20, 10)
self.angle = (180/math.pi)*-math.atan2(
player.player_rect.center[1]-enemy.enemy_rect.center[1], player.player_rect.center[0]-enemy.enemy_rect.center[0])
if self.angle <= 90 and self.angle >= -90:
self.org_glock = glock1
else:
self.org_glock = glock2
self.glock_rect = self.org_glock.get_rect()
def draw(self, SCREEN, enemy):
SCREEN.blit(self.glock, self.glock_rect)
def update(self, SCREEN, enemy, player, glock1, glock2):
self.angle = (180/math.pi)*-math.atan2(
player.player_rect.center[1]-enemy.enemy_rect.center[1], player.player_rect.center[0]-enemy.enemy_rect.center[0])
if self.angle <= 90 and self.angle >= -90:
self.org_glock = glock1
else:
self.org_glock = glock2
self.glock = pygame.transform.rotozoom(
self.org_glock, int(self.angle), 1)
self.glock_rect.center = enemy.enemy_rect.center
self.draw(SCREEN, enemy)
# ------------------------------------------------------------------------------------------------------------------------------------------------
def check(platform, rects):
for rect in rects:
if platform.colliderect(rect):
return 'collided'
# ------------------------------------------------------------------------------------------------------------------------------------------------
def main():
display = True
display_childupdate = True
offset = 0
time = 1
scroll = [0, 0]
enemy_bullet_list = []
bullet_list = []
tiles_hit = []
enemy_list = []
particle_list = []
lava_particles = []
impact_particles = []
shockwave_list = []
afterwave_list = []
jump_sound = pygame.mixer.Sound('data/jump.wav')
shoot_sound = pygame.mixer.Sound('data/shoot.wav')
explosion_sound = pygame.mixer.Sound('data/explosion.wav')
hit_sound = pygame.mixer.Sound('data/hit.wav')
wave_sound = pygame.mixer.Sound('data/wave_sound.wav')
stick_sound = pygame.mixer.Sound('data/stick.wav')
no_stick_sound = pygame.mixer.Sound('data/no_stick.wav')
wave_timer_sound = pygame.mixer.Sound('data/wave_timer.wav')
glock1 = pygame.image.load('data/glock1.png').convert_alpha()
glock2 = pygame.image.load('data/glock2.png').convert_alpha()
combined = pygame.image.load('data/combined.png').convert()
letter_box1 = pygame.image.load('data/letter_box1.png').convert()
letter_box2 = pygame.image.load('data/letter_box2.png').convert()
glock1 = pygame.transform.scale(glock1, (40, 40))
glock2 = pygame.transform.scale(glock2, (40, 40))
letter_box1 = pygame.transform.scale(letter_box1, (60, 60))
letter_box2 = pygame.transform.scale(letter_box2, (60, 60))
letter_box_rect1 = letter_box1.get_rect()
letter_box_rect2 = letter_box2.get_rect()
letter_box_rect1[0] = 100
letter_box_rect2[0] = 4700
combined.set_colorkey(white)
moving_platform_rects = []
child = child_class()
player = player_class(combined)
player_bar = effects.player_health_bar()
child_bar = effects.child_health_bar()
lava_timer = effects.lava_timer_class()
gun = gun_class(player, glock1, glock2)
stick_power = effects.stick_timer(player, child)
explosion_particles = []
text_1 = pygame.font.Font(
"data/impact.ttf", 30).render('SHOOT HERE', True, white)
text_2 = pygame.font.Font(
"data/impact.ttf", 30).render('SHOOT HERE', True, grey)
text_1_rect = text_1.get_rect()
text_1_rect[0] = 600
text_1_rect[1] = 100
text_2_rect = text_2.get_rect()
text_2_rect[0] = 600
text_2_rect[1] = 100
platform_time = -300
shoot_timer = False
text1 = True
text2 = False
shockwave_time = 0
fade_surf = pygame.Surface(SCREEN_SIZE)
fade_surf.fill(black)
alpha = 0
play = True
p_lava_hit = False
c_lava_hit = False
letter_box_combined = False
shock_wave_timer = 0
child_play = True
player_play = True
total_score = 0
total_wave = 1
score = pygame.font.Font(
"data/impact.ttf", 30).render('SCORE :'+str(total_score), True, white)
wave = pygame.font.Font(
"data/impact.ttf", 30).render('SHOCK_WAVES :'+str(total_wave), True, white)
while play:
scroll[0] += player.player_rect.center[0]-scroll[0]-620
offset += scroll[0]
if len(enemy_list) < 3:
enemy_list.append(enemy_class(player, offset))
player.player_rect[0] -= scroll[0]
child.child_rect[0] -= scroll[0]
platform_rects = []
lava_rects = []
enemies_rects = []
moving_platform_rects = []
letter_box_rect1[0] -= scroll[0]
letter_box_rect1[1] = 440
letter_box_rect2[0] -= scroll[0]
letter_box_rect2[1] = 440
if player.stick and abs(player.player_rect[0]-letter_box_rect2[0]) == 40:
letter_box_combined = True
if abs(child.child_rect[0]-letter_box_rect2[0]) == 30:
letter_box_combined = True
platform_rects.append(letter_box_rect1)
platform_rects.append(letter_box_rect2)
text_2_rect[0] -= scroll[0]
text_1_rect[0] -= scroll[0]
SCREEN.fill(black)
for platform in levels.platforms:
platform[0] -= scroll[0]
pygame.draw.rect(SCREEN, white, platform)
platform_rects.append(pygame.Rect(platform))
for lava in levels.lava:
lava[0] -= scroll[0]
pygame.draw.rect(SCREEN, red, lava)
lava_rects.append(pygame.Rect(lava))
for platform in levels.moving_platforms:
platform[0] -= scroll[0]
platform[0] += platform[4]
if check(pygame.Rect([platform[0], platform[1], platform[2], platform[3]]), platform_rects) == 'collided':
platform[4] = -platform[4]
pygame.draw.rect(
SCREEN, white, [platform[0], platform[1], platform[2], platform[3]])
moving_platform_rects.append(
[pygame.Rect([platform[0], platform[1], platform[2], platform[3]]), platform[4]])
for enemy in enemy_list:
if enemy.time % 100 == 0 and abs(player.player_rect[0]-enemy.enemy_rect[0]) < 250:
shoot_sound.play()
enemy_bullet_list.append(
enemy_bullet_class(enemy, player, child))
enemy_gun = enemy_gun_class(enemy, player, glock1, glock2)
enemy.enemy_rect[0] -= scroll[0]
enemy.draw(SCREEN)
enemy_gun.update(SCREEN, enemy, player, glock1, glock2)
enemy.movement(platform_rects, enemies_rects, bullet_list, player, child, stick_power,
moving_platform_rects, enemy_list, impact_particles, total_score, hit_sound)
if player.shock:
total_score += 1
explosion_sound.play()
for i in range(10):
explosion_particles.append(effects.explosion_particles_class(
enemy.color, [enemy.enemy_rect.center[0], enemy.enemy_rect.center[1]]))
enemy_list.remove(enemy)
shockwave_time += 1
if enemy.enemy_health.dimension[0] <= 0:
explosion_sound.play()
total_score += 1
for i in range(10):
explosion_particles.append(effects.explosion_particles_class(
enemy.color, [enemy.enemy_rect.center[0], enemy.enemy_rect.center[1]]))
enemy_list.remove(enemy)
continue
if enemy.enemy_rect.center[0] < -20 or enemy.enemy_rect.center[0] > 1240:
enemy_list.remove(enemy)
continue
for rect in lava_rects:
if enemy.enemy_rect.colliderect(rect):
explosion_sound.play()
for i in range(10):
explosion_particles.append(effects.explosion_particles_class(
enemy.color, [enemy.enemy_rect.center[0], enemy.enemy_rect.center[1]]))
enemy_list.remove(enemy)
break
for lava in lava_particles:
if lava.radius > 10:
if enemy.enemy_rect.colliderect(lava.lava_rect):
explosion_sound.play()
for i in range(10):
explosion_particles.append(effects.explosion_particles_class(
enemy.color, [enemy.enemy_rect.center[0], enemy.enemy_rect.center[1]]))
enemy_list.remove(enemy)
break
enemies_rects.append(enemy.enemy_rect)
for particle in particle_list:
particle.draw(SCREEN)
particle.move()
if particle.radius <= 0:
particle_list.remove(particle)
player.get_input(child, platform_rects, moving_platform_rects, enemies_rects, enemy_bullet_list, stick_power, bullet_list,
impact_particles, shockwave_list, afterwave_list, total_wave, jump_sound, shoot_sound, hit_sound, wave_sound, stick_sound)
if player.stick == False:
child.movement(player, platform_rects, enemies_rects, enemy, bullet_list,
enemy_bullet_list, moving_platform_rects, impact_particles, hit_sound)
if display_childupdate:
child.draw(SCREEN)
if display:
player.draw(SCREEN)
for bullet in bullet_list:
particle_list.append(effects.particle_class(bullet, 'player'))
bullet.move()
if bullet.bullet_rect.colliderect(text_1_rect):
bullet_list.remove(bullet)
shoot_timer = True
text1 = False
text2 = True
continue
if bullet.bullet_rect[0] < 0 or bullet.bullet_rect[0] > 1200:
bullet_list.remove(bullet)
continue
if bullet.bullet_rect[1] < 0 or bullet.bullet_rect[1] > 700:
bullet_list.remove(bullet)
continue
bullet.check_move(platform_rects)
if bullet.status == 'collided':
bullet_list.remove(bullet)
if bullet.bullet_rect.center[0] < -20 or bullet.bullet_rect.center[1] > 1240:
bullet_list.remove(bullet)
for bullet in enemy_bullet_list:
particle_list.append(effects.particle_class(bullet, 'enemy'))
bullet.move()
bullet.check_move(platform_rects)
if bullet.status == 'collided':
enemy_bullet_list.remove(bullet)
continue
if bullet.bullet_rect.center[0] < -20 or bullet.bullet_rect.center[1] > 1240:
enemy_bullet_list.remove(bullet)
if shoot_timer == False and offset < 1000:
if offset > 20:
lava_particles.append(effects.lava_class(offset))
for lava in lava_particles:
lava.move(scroll)
lava.check_move(lava_rects, platform_rects)
lava.draw(SCREEN)
if lava.center[1] > 1000 or lava.radius <= 0:
lava_particles.remove(lava)
for lava in lava_particles:
if lava.radius > 10:
if player.player_rect.colliderect(lava.lava_rect):
if player_play:
explosion_sound.play()
player_play = False
p_lava_hit = True
display = False
break
for lava in lava_particles:
if lava.radius > 10:
if child.child_rect.colliderect(lava.lava_rect):
if child_play:
explosion_sound.play()
child_play = False
c_lava_hit = True
display_childupdate = False
break
if display:
gun.update(SCREEN, player, glock1, glock2)
for tile in lava_rects:
if player.player_rect.colliderect(tile):
if player_play:
explosion_sound.play()
player_play = False
player.player_rect.bottom = tile.top
player.y_momentum = 0
display = False
p_lava_hit = True
break
for tile in lava_rects:
if child.child_rect.colliderect(tile):
if child_play:
explosion_sound.play()
child_play = False
child.child_rect.bottom = tile.top
child.y_momentum = 0
display_childupdate = False
c_lava_hit = True
break
if len(player_bar.list) == 0 or len(child_bar.list) == 0 or p_lava_hit == True or c_lava_hit:
if len(player_bar.list) == 0:
if player_play:
explosion_sound.play()
player_play = False
display = False
if len(child_bar.list) == 0:
display_childupdate = False
if child_play:
explosion_sound.play()
child_play = False
pygame.time.delay(20)
player.control = False
if len(player_bar.list) == 0 or p_lava_hit:
explosion_particles.append(effects.explosion_particles_class(
player.color, [player.player_rect.center[0], player.player_rect.center[1]]))
if len(child_bar.list) == 0 or c_lava_hit:
explosion_particles.append(effects.explosion_particles_class(
child.color, [child.child_rect.center[0], child.child_rect.center[1]]))
if display:
player_bar.update(player)
player_bar.draw(SCREEN, player)
player.player_hit = False
if display_childupdate:
child_bar.update(child)
child_bar.draw(SCREEN, child)
child.child_hit = False
time += 1
if time > 500:
time = 1
if shockwave_time > 2:
player.shock = False
shockwave_time = 0
if stick_power.combined_dimension[0] < 0:
no_stick_sound.play()
stick_power.combined_dimension[0] = 0
player.stick = False
player.player_rect = pygame.Rect(
player.combined_rect[0], player.combined_rect[1]+30, player.dimension[0], player.dimension[1])
child.child_rect = pygame.Rect(
player.player_rect[0]+5, player.player_rect[1]-40, child.dimension[0], child.dimension[1])
child.y_momentum = -5
if display_childupdate:
stick_power.draw(SCREEN, player, child)
for particle in explosion_particles:
particle.move(scroll)
particle.check_move(lava_rects, platform_rects)
particle.draw(SCREEN)
if particle.length <= 0 or particle.breadth <= 0:
explosion_particles.remove(particle)
for particle in impact_particles:
particle.move(SCREEN)
particle.update()
if particle.radius <= 0:
impact_particles.remove(particle)
for enemy in enemy_list:
enemy.enemy_health.draw(SCREEN)
if text1:
SCREEN.blit(text_1, text_1_rect)
elif text2:
SCREEN.blit(text_2, text_2_rect)
if shoot_timer == False:
lava_timer.location[0] -= scroll[0]
if shoot_timer:
lava_timer.update(SCREEN, scroll)
if lava_timer.dimension[0] <= 0:
shoot_timer = False
lava_timer.dimension[0] = 140
text1 = True
text2 = False
for circle in shockwave_list:
if circle.width <= 2:
shockwave_list.remove(circle)
circle.update(SCREEN)
for circle in afterwave_list:
if circle.width <= 2:
afterwave_list.remove(circle)
circle.update(SCREEN)
platform_time += 1
if platform_time > 300:
platform_time = -300
SCREEN.blit(letter_box1, letter_box_rect1)
SCREEN.blit(letter_box2, letter_box_rect2)
if child.letter:
pygame.time.delay(10)
if player.player_rect[1] > 700:
display = False
if child.child_rect[1] > 700:
display_childupdate = False
SCREEN.blit(score, (100, 100))
score = pygame.font.Font(
"data/impact.ttf", 20).render('SCORE :'+str(total_score), True, white)
SCREEN.blit(wave, (100, 150))
wave = pygame.font.Font(
"data/impact.ttf", 20).render('SHOCK_WAVE :'+str(total_wave), True, white)
if display == False or display_childupdate == False:
fade_surf.set_alpha(alpha)
SCREEN.blit(fade_surf, (0, 0))
alpha += 2
if letter_box_combined:
fade_surf.set_alpha(alpha)
SCREEN.blit(fade_surf, (0, 0))
alpha += 2
if alpha == 300:
player.menu = True
if player.menu:
play = False
for platform in levels.platforms:
platform[0] += offset
for platform in levels.lava:
platform[0] += offset
for platform in levels.moving_platforms:
platform[0] += offset
shock_wave_timer += 1
if shock_wave_timer > 1000:
if total_wave == 0:
total_wave += 1
wave_timer_sound.play()
shock_wave_timer = 0
if player.shock:
stick_power.combined_dimension[0] = 30
total_wave = 0
pygame.display.update()
clock.tick(120)
# ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------
while True:
play = False
Quit = False
alpha = 0
fade_surf = pygame.Surface(SCREEN_SIZE)
fade_surf.fill((0, 0, 0))
white = (255, 255, 255)
grey = (128, 128, 128)
blue = (0, 191, 255)
pink = (255, 0, 127)
yellow = (255, 255, 0)
red = (255, 0, 0)
green = (0, 255, 0)
tor = (64, 224, 208)
main_menu_surf = pygame.Surface(SCREEN_SIZE)
instruction_surf = pygame.Surface(SCREEN_SIZE)
animation_surf = pygame.Surface(SCREEN_SIZE)
instructions = [pygame.font.Font("data/impact.ttf", 30).render(
'PLAYER CONTROLS:', True, tor), pygame.font.SysFont("Impact", 30).render('POWERS:', True, tor)]
intro_surf = pygame.Surface(SCREEN_SIZE)
intro = pygame.font.Font(
"data/impact.ttf", 100).render('FIELD RUNNER', True, white)
intro_timer = 0
player_controls_surf = pygame.Surface((400, 400))
player_powers_surf = pygame.Surface((400, 400))
# ---------------------
control_list = []
up_1 = pygame.font.Font(
"data/arial.ttf", 30).render('UP/DOUBLE JUMP : W', True, white)
left_1 = pygame.font.Font(
"data/arial.ttf", 30).render('LEFT : A', True, white)
right_1 = pygame.font.Font(
"data/arial.ttf", 30).render('RIGHT : D', True, white)
click = pygame.font.Font(
"data/arial.ttf", 30).render('FIRE : LEFT CLICK', True, white)
control_list.append(up_1)
control_list.append(left_1)
control_list.append(right_1)
control_list.append(click)
# ---------------------
power_list = []
wave = pygame.font.Font(
"data/arial.ttf", 30).render('SHOCK WAVE : M', True, white)
stick = pygame.font.Font(
"data/arial.ttf", 30).render('STICK : SPACE + HOLD(A/D)', True, white)
power_list.append(wave)
power_list.append(stick)
# ---------------------
start_option1 = pygame.font.Font(
"data/impact.ttf", 40).render('START', True, tor)
start_option2 = pygame.font.Font(
"data/impact.ttf", 30).render('START', True, grey)
instruction_option1 = pygame.font.Font(
"data/impact.ttf", 40).render('INSTRUCTIONS', True, tor)
instruction_option2 = pygame.font.Font(
"data/impact.ttf", 30).render('INSTRUCTIONS', True, grey)
quit_option1 = pygame.font.Font(
"data/impact.ttf", 40).render('QUIT', True, tor)
quit_option2 = pygame.font.Font(
"data/impact.ttf", 30).render('QUIT', True, grey)
options = [[start_option1, start_option2], [instruction_option1,
instruction_option2], [quit_option1, quit_option2]]
location = 0
main_menu = True
instruction_menu = False
particle_list = []
add = False
explosion = False
explosion_location = [0, 0]
def explosion_effect(color):
if add:
particle_list.append([explosion_location[0], explosion_location[1], random.randint(
50, 80), random.randint(50, 80), random.choice([-5, -4, -3, 0, 3, 4]), random.choice([-20, -6, 0])])
for particle in particle_list:
pygame.draw.rect(animation_surf, color, [
particle[0], particle[1], particle[2], particle[3]])
particle[0] += particle[4]
particle[1] += particle[5]
particle[5] += 0.5
particle[3] -= 0.5
particle[2] -= 0.5
if particle[2] <= 0 or particle[3] <= 0:
particle_list.remove(particle)
class square_class:
def __init__(self):
self.color = random.choice([blue, pink, yellow, red, green])
self.dimension = [30, 30]
self.square_rect = pygame.Rect(
SCREEN_SIZE[0]/2, SCREEN_SIZE[1]-40, self.dimension[0], self.dimension[1])
self.y_momentum = -10
self.x_momentum = random.choice(
[-5, -4, -3, -2, -1, 0, 1, 2, 3, 4, 5])
self.square_surf = pygame.Surface(self.dimension)
self.rate = random.choice([-1, 1])
def draw(self, animation_surf):
pygame.draw.rect(animation_surf, self.color, [
self.square_rect[0], self.square_rect[1], self.dimension[0], self.dimension[1]])
def move(self):
self.sqaure_movement = [0, 0]
self.dimension[0] += 1
self.dimension[1] += 1
self.square_surf = pygame.Surface(self.dimension)
self.square_surf.fill(blue)
self.square_rect[1] += self.y_momentum
self.y_momentum += 0.1
length = 350
time = 0
square_list = []
while not play:
intro_timer += 1
SCREEN.fill((0, 0, 0))
if intro_timer <= 150:
SCREEN.blit(intro_surf, (0, 0))
intro_surf.blit(intro, (350, 200))
if length > 880:
length = 350
length += 4.5
pygame.draw.line(SCREEN, white, (350, 320), (int(length), 320), 3)
fade_surf.set_alpha(alpha)
SCREEN.blit(fade_surf, (0, 0))
alpha += 2
pygame.time.delay(30)
for event in pygame.event.get():
if event.type == pygame.QUIT:
pygame.quit()
sys.exit()
if event.type == pygame.KEYDOWN:
if event.key == pygame.K_RETURN and location == 1:
main_menu = False
instruction_menu = True
if event.key == pygame.K_RETURN and location == 0:
main_menu = False
instruction_menu = False
play = True
if event.key == pygame.K_RETURN and location == 2:
pygame.quit()
sys.exit()
if event.key == pygame.K_BACKSPACE:
main_menu = True
instruction_menu = False
if event.key == pygame.K_UP:
if main_menu:
location -= 1
if event.key == pygame.K_DOWN:
if main_menu:
location += 1
if intro_timer > 150:
time += 1
# ------------------------background_animation------------------------------------
if len(square_list) < 1:
square_list.append(square_class())
if main_menu:
i = 0
spacing = 0
if location < 0:
location = 2
if location > 2:
location = 0
SCREEN.blit(main_menu_surf, (0, 0))
for option in options:
if i == location:
SCREEN.blit(option[0], (900, 400+spacing))
else:
SCREEN.blit(option[1], (900, 400+spacing))
i += 1
spacing += 100
if instruction_menu:
t = 0
for i in control_list:
SCREEN.blit(i, (150, 200+t))
t += 100
t = 0
for i in power_list:
SCREEN.blit(i, (750, 200+t))
t += 100
i = 0
spacing = 0
pygame.draw.line(SCREEN, tor, (600, 100), (600, 600), 1)
for instruction in instructions:
SCREEN.blit(instruction, (100+spacing, 100))
i += 1
spacing += 600
for square in square_list:
square.move()
square.draw(animation_surf)
if square.dimension[0] > 150:
explosion = True
add = True
color = square.color
explosion_location = [
square.square_rect.center[0], square.square_rect.center[1]]
square_list.remove(square)
break
if explosion:
explosion_effect(color)
if time > 4:
time = 0
add = False
animation_surf.set_alpha(70)
SCREEN.blit(animation_surf, (0, 0))
animation_surf.fill((0, 0, 0))
pygame.display.update()
clock.tick(60)
alpha = 0
if play:
while alpha < 300:
fade_surf.set_alpha(alpha)
SCREEN.blit(fade_surf, (0, 0))
alpha += 10
pygame.time.delay(50)
pygame.display.update()
clock.tick(60)
if play:
main()
alpha = 0
pygame.mixer.music.fadeout(2000)
while alpha < 300:
fade_surf.set_alpha(alpha)
SCREEN.blit(fade_surf, (0, 0))
alpha += 10
pygame.time.delay(50)
pygame.display.update()
clock.tick(120)
| StarcoderdataPython |
11273469 | <gh_stars>100-1000
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from .. import _utilities
__all__ = ['DashboardArgs', 'Dashboard']
@pulumi.input_type
class DashboardArgs:
def __init__(__self__, *,
resource_group_name: pulumi.Input[str],
dashboard_properties: Optional[pulumi.Input[str]] = None,
location: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None):
"""
The set of arguments for constructing a Dashboard resource.
:param pulumi.Input[str] resource_group_name: The name of the resource group in which to
create the dashboard.
:param pulumi.Input[str] dashboard_properties: JSON data representing dashboard body. See above for details on how to obtain this from the Portal.
:param pulumi.Input[str] location: Specifies the supported Azure location where the resource exists. Changing this forces a new resource to be created.
:param pulumi.Input[str] name: Specifies the name of the Shared Dashboard. This should be be 64 chars max, only alphanumeric and hyphens (no spaces). For a more friendly display name, add the `hidden-title` tag.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags: A mapping of tags to assign to the resource.
"""
pulumi.set(__self__, "resource_group_name", resource_group_name)
if dashboard_properties is not None:
pulumi.set(__self__, "dashboard_properties", dashboard_properties)
if location is not None:
pulumi.set(__self__, "location", location)
if name is not None:
pulumi.set(__self__, "name", name)
if tags is not None:
pulumi.set(__self__, "tags", tags)
@property
@pulumi.getter(name="resourceGroupName")
def resource_group_name(self) -> pulumi.Input[str]:
"""
The name of the resource group in which to
create the dashboard.
"""
return pulumi.get(self, "resource_group_name")
@resource_group_name.setter
def resource_group_name(self, value: pulumi.Input[str]):
pulumi.set(self, "resource_group_name", value)
@property
@pulumi.getter(name="dashboardProperties")
def dashboard_properties(self) -> Optional[pulumi.Input[str]]:
"""
JSON data representing dashboard body. See above for details on how to obtain this from the Portal.
"""
return pulumi.get(self, "dashboard_properties")
@dashboard_properties.setter
def dashboard_properties(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "dashboard_properties", value)
@property
@pulumi.getter
def location(self) -> Optional[pulumi.Input[str]]:
"""
Specifies the supported Azure location where the resource exists. Changing this forces a new resource to be created.
"""
return pulumi.get(self, "location")
@location.setter
def location(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "location", value)
@property
@pulumi.getter
def name(self) -> Optional[pulumi.Input[str]]:
"""
Specifies the name of the Shared Dashboard. This should be be 64 chars max, only alphanumeric and hyphens (no spaces). For a more friendly display name, add the `hidden-title` tag.
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "name", value)
@property
@pulumi.getter
def tags(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]:
"""
A mapping of tags to assign to the resource.
"""
return pulumi.get(self, "tags")
@tags.setter
def tags(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]):
pulumi.set(self, "tags", value)
@pulumi.input_type
class _DashboardState:
def __init__(__self__, *,
dashboard_properties: Optional[pulumi.Input[str]] = None,
location: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None):
"""
Input properties used for looking up and filtering Dashboard resources.
:param pulumi.Input[str] dashboard_properties: JSON data representing dashboard body. See above for details on how to obtain this from the Portal.
:param pulumi.Input[str] location: Specifies the supported Azure location where the resource exists. Changing this forces a new resource to be created.
:param pulumi.Input[str] name: Specifies the name of the Shared Dashboard. This should be be 64 chars max, only alphanumeric and hyphens (no spaces). For a more friendly display name, add the `hidden-title` tag.
:param pulumi.Input[str] resource_group_name: The name of the resource group in which to
create the dashboard.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags: A mapping of tags to assign to the resource.
"""
if dashboard_properties is not None:
pulumi.set(__self__, "dashboard_properties", dashboard_properties)
if location is not None:
pulumi.set(__self__, "location", location)
if name is not None:
pulumi.set(__self__, "name", name)
if resource_group_name is not None:
pulumi.set(__self__, "resource_group_name", resource_group_name)
if tags is not None:
pulumi.set(__self__, "tags", tags)
@property
@pulumi.getter(name="dashboardProperties")
def dashboard_properties(self) -> Optional[pulumi.Input[str]]:
"""
JSON data representing dashboard body. See above for details on how to obtain this from the Portal.
"""
return pulumi.get(self, "dashboard_properties")
@dashboard_properties.setter
def dashboard_properties(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "dashboard_properties", value)
@property
@pulumi.getter
def location(self) -> Optional[pulumi.Input[str]]:
"""
Specifies the supported Azure location where the resource exists. Changing this forces a new resource to be created.
"""
return pulumi.get(self, "location")
@location.setter
def location(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "location", value)
@property
@pulumi.getter
def name(self) -> Optional[pulumi.Input[str]]:
"""
Specifies the name of the Shared Dashboard. This should be be 64 chars max, only alphanumeric and hyphens (no spaces). For a more friendly display name, add the `hidden-title` tag.
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "name", value)
@property
@pulumi.getter(name="resourceGroupName")
def resource_group_name(self) -> Optional[pulumi.Input[str]]:
"""
The name of the resource group in which to
create the dashboard.
"""
return pulumi.get(self, "resource_group_name")
@resource_group_name.setter
def resource_group_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "resource_group_name", value)
@property
@pulumi.getter
def tags(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]:
"""
A mapping of tags to assign to the resource.
"""
return pulumi.get(self, "tags")
@tags.setter
def tags(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]):
pulumi.set(self, "tags", value)
class Dashboard(pulumi.CustomResource):
@overload
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
dashboard_properties: Optional[pulumi.Input[str]] = None,
location: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
__props__=None):
"""
Manages a shared dashboard in the Azure Portal.
## Example Usage
```python
import pulumi
import pulumi_azure as azure
config = pulumi.Config()
md_content = config.get("mdContent")
if md_content is None:
md_content = "# Hello all :)"
video_link = config.get("videoLink")
if video_link is None:
video_link = "https://www.youtube.com/watch?v=......"
current = azure.core.get_subscription()
my_group = azure.core.ResourceGroup("my-group", location="West Europe")
my_board = azure.dashboard.Dashboard("my-board",
resource_group_name=my_group.name,
location=my_group.location,
tags={
"source": "managed",
},
dashboard_properties=f\"\"\"{{
"lenses": {{
"0": {{
"order": 0,
"parts": {{
"0": {{
"position": {{
"x": 0,
"y": 0,
"rowSpan": 2,
"colSpan": 3
}},
"metadata": {{
"inputs": [],
"type": "Extension/HubsExtension/PartType/MarkdownPart",
"settings": {{
"content": {{
"settings": {{
"content": "{md_content}",
"subtitle": "",
"title": ""
}}
}}
}}
}}
}},
"1": {{
"position": {{
"x": 5,
"y": 0,
"rowSpan": 4,
"colSpan": 6
}},
"metadata": {{
"inputs": [],
"type": "Extension/HubsExtension/PartType/VideoPart",
"settings": {{
"content": {{
"settings": {{
"title": "Important Information",
"subtitle": "",
"src": "{video_link}",
"autoplay": true
}}
}}
}}
}}
}},
"2": {{
"position": {{
"x": 0,
"y": 4,
"rowSpan": 4,
"colSpan": 6
}},
"metadata": {{
"inputs": [
{{
"name": "ComponentId",
"value": "/subscriptions/{current.subscription_id}/resourceGroups/myRG/providers/microsoft.insights/components/myWebApp"
}}
],
"type": "Extension/AppInsightsExtension/PartType/AppMapGalPt",
"settings": {{}},
"asset": {{
"idInputName": "ComponentId",
"type": "ApplicationInsights"
}}
}}
}}
}}
}}
}},
"metadata": {{
"model": {{
"timeRange": {{
"value": {{
"relative": {{
"duration": 24,
"timeUnit": 1
}}
}},
"type": "MsPortalFx.Composition.Configuration.ValueTypes.TimeRange"
}},
"filterLocale": {{
"value": "en-us"
}},
"filters": {{
"value": {{
"MsPortalFx_TimeRange": {{
"model": {{
"format": "utc",
"granularity": "auto",
"relative": "24h"
}},
"displayCache": {{
"name": "UTC Time",
"value": "Past 24 hours"
}},
"filteredPartIds": [
"StartboardPart-UnboundPart-ae44fef5-76b8-46b0-86f0-2b3f47bad1c7"
]
}}
}}
}}
}}
}}
}}
\"\"\")
```
It is recommended to follow the steps outlined
[here](https://docs.microsoft.com/en-us/azure/azure-portal/azure-portal-dashboards-create-programmatically#fetch-the-json-representation-of-the-dashboard) to create a Dashboard in the Portal and extract the relevant JSON to use in this resource. From the extracted JSON, the contents of the `properties: {}` object can used. Variables can be injected as needed - see above example.
## Import
Dashboards can be imported using the `resource id`, e.g.
```sh
$ pulumi import azure:dashboard/dashboard:Dashboard my-board /subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/rg1/providers/Microsoft.Portal/dashboards/00000000-0000-0000-0000-000000000000
```
Note the URI in the above sample can be found using the Resource Explorer tool in the Azure Portal.
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] dashboard_properties: JSON data representing dashboard body. See above for details on how to obtain this from the Portal.
:param pulumi.Input[str] location: Specifies the supported Azure location where the resource exists. Changing this forces a new resource to be created.
:param pulumi.Input[str] name: Specifies the name of the Shared Dashboard. This should be be 64 chars max, only alphanumeric and hyphens (no spaces). For a more friendly display name, add the `hidden-title` tag.
:param pulumi.Input[str] resource_group_name: The name of the resource group in which to
create the dashboard.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags: A mapping of tags to assign to the resource.
"""
...
@overload
def __init__(__self__,
resource_name: str,
args: DashboardArgs,
opts: Optional[pulumi.ResourceOptions] = None):
"""
Manages a shared dashboard in the Azure Portal.
## Example Usage
```python
import pulumi
import pulumi_azure as azure
config = pulumi.Config()
md_content = config.get("mdContent")
if md_content is None:
md_content = "# Hello all :)"
video_link = config.get("videoLink")
if video_link is None:
video_link = "https://www.youtube.com/watch?v=......"
current = azure.core.get_subscription()
my_group = azure.core.ResourceGroup("my-group", location="West Europe")
my_board = azure.dashboard.Dashboard("my-board",
resource_group_name=my_group.name,
location=my_group.location,
tags={
"source": "managed",
},
dashboard_properties=f\"\"\"{{
"lenses": {{
"0": {{
"order": 0,
"parts": {{
"0": {{
"position": {{
"x": 0,
"y": 0,
"rowSpan": 2,
"colSpan": 3
}},
"metadata": {{
"inputs": [],
"type": "Extension/HubsExtension/PartType/MarkdownPart",
"settings": {{
"content": {{
"settings": {{
"content": "{md_content}",
"subtitle": "",
"title": ""
}}
}}
}}
}}
}},
"1": {{
"position": {{
"x": 5,
"y": 0,
"rowSpan": 4,
"colSpan": 6
}},
"metadata": {{
"inputs": [],
"type": "Extension/HubsExtension/PartType/VideoPart",
"settings": {{
"content": {{
"settings": {{
"title": "Important Information",
"subtitle": "",
"src": "{video_link}",
"autoplay": true
}}
}}
}}
}}
}},
"2": {{
"position": {{
"x": 0,
"y": 4,
"rowSpan": 4,
"colSpan": 6
}},
"metadata": {{
"inputs": [
{{
"name": "ComponentId",
"value": "/subscriptions/{current.subscription_id}/resourceGroups/myRG/providers/microsoft.insights/components/myWebApp"
}}
],
"type": "Extension/AppInsightsExtension/PartType/AppMapGalPt",
"settings": {{}},
"asset": {{
"idInputName": "ComponentId",
"type": "ApplicationInsights"
}}
}}
}}
}}
}}
}},
"metadata": {{
"model": {{
"timeRange": {{
"value": {{
"relative": {{
"duration": 24,
"timeUnit": 1
}}
}},
"type": "MsPortalFx.Composition.Configuration.ValueTypes.TimeRange"
}},
"filterLocale": {{
"value": "en-us"
}},
"filters": {{
"value": {{
"MsPortalFx_TimeRange": {{
"model": {{
"format": "utc",
"granularity": "auto",
"relative": "24h"
}},
"displayCache": {{
"name": "UTC Time",
"value": "Past 24 hours"
}},
"filteredPartIds": [
"StartboardPart-UnboundPart-ae44fef5-76b8-46b0-86f0-2b3f47bad1c7"
]
}}
}}
}}
}}
}}
}}
\"\"\")
```
It is recommended to follow the steps outlined
[here](https://docs.microsoft.com/en-us/azure/azure-portal/azure-portal-dashboards-create-programmatically#fetch-the-json-representation-of-the-dashboard) to create a Dashboard in the Portal and extract the relevant JSON to use in this resource. From the extracted JSON, the contents of the `properties: {}` object can used. Variables can be injected as needed - see above example.
## Import
Dashboards can be imported using the `resource id`, e.g.
```sh
$ pulumi import azure:dashboard/dashboard:Dashboard my-board /subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/rg1/providers/Microsoft.Portal/dashboards/00000000-0000-0000-0000-000000000000
```
Note the URI in the above sample can be found using the Resource Explorer tool in the Azure Portal.
:param str resource_name: The name of the resource.
:param DashboardArgs args: The arguments to use to populate this resource's properties.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
...
def __init__(__self__, resource_name: str, *args, **kwargs):
resource_args, opts = _utilities.get_resource_args_opts(DashboardArgs, pulumi.ResourceOptions, *args, **kwargs)
if resource_args is not None:
__self__._internal_init(resource_name, opts, **resource_args.__dict__)
else:
__self__._internal_init(resource_name, *args, **kwargs)
def _internal_init(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
dashboard_properties: Optional[pulumi.Input[str]] = None,
location: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
__props__=None):
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = DashboardArgs.__new__(DashboardArgs)
__props__.__dict__["dashboard_properties"] = dashboard_properties
__props__.__dict__["location"] = location
__props__.__dict__["name"] = name
if resource_group_name is None and not opts.urn:
raise TypeError("Missing required property 'resource_group_name'")
__props__.__dict__["resource_group_name"] = resource_group_name
__props__.__dict__["tags"] = tags
super(Dashboard, __self__).__init__(
'azure:dashboard/dashboard:Dashboard',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None,
dashboard_properties: Optional[pulumi.Input[str]] = None,
location: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None) -> 'Dashboard':
"""
Get an existing Dashboard resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] dashboard_properties: JSON data representing dashboard body. See above for details on how to obtain this from the Portal.
:param pulumi.Input[str] location: Specifies the supported Azure location where the resource exists. Changing this forces a new resource to be created.
:param pulumi.Input[str] name: Specifies the name of the Shared Dashboard. This should be be 64 chars max, only alphanumeric and hyphens (no spaces). For a more friendly display name, add the `hidden-title` tag.
:param pulumi.Input[str] resource_group_name: The name of the resource group in which to
create the dashboard.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags: A mapping of tags to assign to the resource.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = _DashboardState.__new__(_DashboardState)
__props__.__dict__["dashboard_properties"] = dashboard_properties
__props__.__dict__["location"] = location
__props__.__dict__["name"] = name
__props__.__dict__["resource_group_name"] = resource_group_name
__props__.__dict__["tags"] = tags
return Dashboard(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter(name="dashboardProperties")
def dashboard_properties(self) -> pulumi.Output[str]:
"""
JSON data representing dashboard body. See above for details on how to obtain this from the Portal.
"""
return pulumi.get(self, "dashboard_properties")
@property
@pulumi.getter
def location(self) -> pulumi.Output[str]:
"""
Specifies the supported Azure location where the resource exists. Changing this forces a new resource to be created.
"""
return pulumi.get(self, "location")
@property
@pulumi.getter
def name(self) -> pulumi.Output[str]:
"""
Specifies the name of the Shared Dashboard. This should be be 64 chars max, only alphanumeric and hyphens (no spaces). For a more friendly display name, add the `hidden-title` tag.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="resourceGroupName")
def resource_group_name(self) -> pulumi.Output[str]:
"""
The name of the resource group in which to
create the dashboard.
"""
return pulumi.get(self, "resource_group_name")
@property
@pulumi.getter
def tags(self) -> pulumi.Output[Optional[Mapping[str, str]]]:
"""
A mapping of tags to assign to the resource.
"""
return pulumi.get(self, "tags")
| StarcoderdataPython |
227162 | import unittest
import numpy as np
import numdifftools.core as nd
import numdifftools.nd_algopy as nda
import numdifftools.nd_statsmodels as nds
from numpy.testing import assert_array_almost_equal
from numdifftools.example_functions import function_names, get_function
class TestExampleFunctions(unittest.TestCase):
@staticmethod
def test_high_order_derivative():
x = 0.5
min_dm = dict(complex=2, forward=2, backward=2, central=4)
methods = [ 'complex', 'central', 'backward', 'forward']
for i, derivative in enumerate([nd.Derivative, nda.Derivative]):
for name in function_names:
if i>0 and name in ['arcsinh', 'exp2']:
continue
for n in range(1, 11):
f, true_df = get_function(name, n=n)
if true_df is None:
continue
for method in methods[3*i:]:
if i==0 and n > 7 and method not in ['complex']:
continue
df = derivative(f, method=method, n=n, full_output=True)
val, info = df(x)
dm = max(int(-np.log10(info.error_estimate + 1e-16))-1,
min_dm.get(method, 4))
print(i, name, method, n, dm)
tval = true_df(x)
assert_array_almost_equal(val, tval, decimal=dm)
def test_first_order_derivative(self):
x = 0.5
methods = [ 'complex', 'central', 'backward', 'forward']
for i, derivative in enumerate([nd.Derivative, nds.Gradient, nda.Derivative]):
for name in function_names:
if i>1 and name in ['arcsinh', 'exp2']:
continue
f, true_df = get_function(name, n=1)
if true_df is None:
continue
for method in methods[3*(i>1):]:
df = derivative(f, method=method)
val = df(x)
tval = true_df(x)
dm = 7
print(i, name, method, dm, np.abs(val-tval))
assert_array_almost_equal(val, tval, decimal=dm)
if __name__ == '__main__':
unittest.main()
| StarcoderdataPython |
6650570 | import matplotlib.pyplot as plt
import pandas as pd
import numpy as np
import pywt
from scipy import signal
plt.figure(figsize=(1, 1))
fig = plt.gcf()
csv = pd.read_csv(r'C:\Users\<NAME>\Documents\data\PPG.csv', low_memory=False)
data = csv.iloc()[:]
_PPG = list(data['PPG'])
ABP = data['ABP']
def smooth(a, WSZ):
out0 = np.convolve(a,np.ones(WSZ,dtype=int), 'valid')/WSZ
r = np.arange(1,WSZ-1,2)
start = np.cumsum(a[:WSZ-1])[::2]/r
stop = (np.cumsum(a[:-WSZ:-1])[::2]/r)[::-1]
return np.concatenate((start, out0, stop))
def cwt(data, path, name):
plt.rcParams['savefig.dpi'] = 224
plt.axis('off')
plt.gca().xaxis.set_major_locator(plt.NullLocator())
plt.gca().xaxis.set_major_locator(plt.NullLocator())
plt.subplots_adjust(top = 1, bottom = 0, right = 1, left = 0, hspace = 0, wspace = 0)
plt.margins(0,0)
t = np.linspace(0, 1, len(data), endpoint=False)
cwtmatr1, freqs1 = pywt.cwt(data, np.arange(1, len(data)), 'cgau1')
plt.contourf(t, freqs1, abs(cwtmatr1))
fig.savefig(path + '%s.jpg' % name)
plt.clf()
def meanBP(indexes, base):
BPs = []
for index in indexes:
BPs.append(ABP[base+index])
return np.mean(BPs)
# pre-process
s = smooth(_PPG, len(_PPG) - 1)
PPG = []
for (index, _) in enumerate(_PPG):
PPG.append(_PPG[index] - s[index])
total = 311000
interval = 300
SBPs = []
for i in range(0,total,interval):
SBPs.append(meanBP(signal.find_peaks(ABP[i:i+interval])[0], 0))
index = 0
pre = 'PPG_'
for i in range(0,total,interval):
if SBPs[index] < 120.:
cwt(PPG[i:i+interval], r'C:\Users\<NAME>\Documents\data\Normal\\', pre + str(i))
else:
cwt(PPG[i:i+interval], r'C:\Users\<NAME>\Documents\data\Abnormal\\', pre + str(i))
index += 1 | StarcoderdataPython |
144154 | # Made for TeleBot
# Re-written by @
# Kangers kwwp the credits
#Made by @
#From Nekos API
import datetime
from telethon import events
from telethon.errors.rpcerrorlist import YouBlockedUserError
from telethon.tl.functions.account import UpdateNotifySettingsRequest
from uniborg.util import admin_cmd
@borg.on(admin_cmd("sologif"))
async def _(event):
if event.fwd_from:
return
chat = ""
await event.edit("```Finding an Anime Solo GIF..```\n**It's Barely SFW**")
async with borg.conversation(chat) as conv:
try:
response = conv.wait_event(events.NewMessage(incoming=True,from_users=1212429864))
await borg.send_message(chat, "/sologif")
response = await response
except YouBlockedUserError:
await event.reply("```Please unblock @KeikoSDbot and try again```")
return
if response.text.startswith("Forward"):
await event.edit("```can you kindly disable your forward privacy settings for good?```")
else:
await event.delete()
await borg.send_file(event.chat_id, response.message.media)
#By @WhySooSerious
@borg.on(admin_cmd("cumgif"))
async def _(event):
if event.fwd_from:
return
chat = "@KeikoSDbot"
await event.edit("```Finding a Henti Cum GIF..```\n**WARNING : It's NSFW**")
async with borg.conversation(chat) as conv:
try:
response = conv.wait_event(events.NewMessage(incoming=True,from_users=1212429864))
await borg.send_message(chat, "/cumgif")
response = await response
except YouBlockedUserError:
await event.reply("```Please unblock @KeikoSDbot and try again```")
return
if response.text.startswith("Forward"):
await event.edit("```can you kindly disable your forward privacy settings for good?```")
else:
await event.delete()
await borg.send_file(event.chat_id, response.message.media)
#By @WhySooSerious
@borg.on(admin_cmd("ngif"))
async def _(event):
if event.fwd_from:
return
chat = "@KeikoSDbot"
await event.edit("```Finding an Anime Neko GIF..```\n**It's SFW**")
async with borg.conversation(chat) as conv:
try:
response = conv.wait_event(events.NewMessage(incoming=True,from_users=1212429864))
await borg.send_message(chat, "/ngif")
response = await response
except YouBlockedUserError:
await event.reply("```Please unblock @KeikoSDbot and try again```")
return
if response.text.startswith("Forward"):
await event.edit("```can you kindly disable your forward privacy settings for good?```")
else:
await event.delete()
await borg.send_file(event.chat_id, response.message.media)
#By @WhySooSerious
@borg.on(admin_cmd("tickle"))
async def _(event):
if event.fwd_from:
return
chat = "@KeikoSDbot"
await event.edit("```Finding an Anime Tickle GIF..```\n**It's SFW**")
async with borg.conversation(chat) as conv:
try:
response = conv.wait_event(events.NewMessage(incoming=True,from_users=1212429864))
await borg.send_message(chat, "/tickle")
response = await response
except YouBlockedUserError:
await event.reply("```Please unblock @KeikoSDbot and try again```")
return
if response.text.startswith("Forward"):
await event.edit("```can you kindly disable your forward privacy settings for good?```")
else:
await event.delete()
await borg.send_file(event.chat_id, response.message.media)
#By @WhySooSerious
@borg.on(admin_cmd("feed"))
async def _(event):
if event.fwd_from:
return
chat = "@KeikoSDbot"
await event.edit("```Finding an Anime Feeding GIF..```\n**It's SFW**")
async with borg.conversation(chat) as conv:
try:
response = conv.wait_event(events.NewMessage(incoming=True,from_users=1212429864))
await borg.send_message(chat, "/feed")
response = await response
except YouBlockedUserError:
await event.reply("```Please unblock @KeikoSDbot and try again```")
return
if response.text.startswith("Forward"):
await event.edit("```can you kindly disable your forward privacy settings for good?```")
else:
await event.delete()
await borg.send_file(event.chat_id, response.message.media)
#By @WhySooSerious
@borg.on(admin_cmd("bjgif"))
async def _(event):
if event.fwd_from:
return
chat = "@KeikoSDbot"
await event.edit("```Finding a Hentai Blow Job GIF..```\n**WARNING : It's NSFW**")
async with borg.conversation(chat) as conv:
try:
response = conv.wait_event(events.NewMessage(incoming=True,from_users=1212429864))
await borg.send_message(chat, "/bjgif")
response = await response
except YouBlockedUserError:
await event.reply("```Please unblock @KeikoSDbot and try again```")
return
if response.text.startswith("Forward"):
await event.edit("```can you kindly disable your forward privacy settings for good?```")
else:
await event.delete()
await borg.send_file(event.chat_id, response.message.media)
#By @WhySooSerious
@borg.on(admin_cmd("analgif"))
async def _(event):
if event.fwd_from:
return
chat = "@KeikoSDbot"
await event.edit("```Finding a Hentai Anal GIF..```\n**WARNING : It's NSFW**")
async with borg.conversation(chat) as conv:
try:
response = conv.wait_event(events.NewMessage(incoming=True,from_users=1212429864))
await borg.send_message(chat, "/bj")
response = await response
except YouBlockedUserError:
await event.reply("```Please unblock @KeikoSDbot and try again```")
return
if response.text.startswith("Forward"):
await event.edit("```can you kindly disable your forward privacy settings for good?```")
else:
await event.delete()
await borg.send_file(event.chat_id, response.message.media)
#By @WhySooSerious
@borg.on(admin_cmd("poke"))
async def _(event):
if event.fwd_from:
return
chat = "@KeikoSDbot"
await event.edit("```Finding an Anime Poke GIF..```\n**Oh! It's SFW**")
async with borg.conversation(chat) as conv:
try:
response = conv.wait_event(events.NewMessage(incoming=True,from_users=1212429864))
await borg.send_message(chat, "/poke")
response = await response
except YouBlockedUserError:
await event.reply("```Please unblock @KeikoSDbot and try again```")
return
if response.text.startswith("Forward"):
await event.edit("```can you kindly disable your forward privacy settings for good?```")
else:
await event.delete()
await borg.send_file(event.chat_id, response.message.media)
#By @WhySooSerious
@borg.on(admin_cmd("pussygif"))
async def _(event):
if event.fwd_from:
return
chat = "@KeikoSDbot"
await event.edit("```Finding a Hentai Pussy GIF..```\n**WARNING : It's NSFW**")
async with borg.conversation(chat) as conv:
try:
response = conv.wait_event(events.NewMessage(incoming=True,from_users=1212429864))
await borg.send_message(chat, "/pussygif")
response = await response
except YouBlockedUserError:
await event.reply("```Please unblock @KeikoSDbot and try again```")
return
if response.text.startswith("Forward"):
await event.edit("```can you kindly disable your forward privacy settings for good?```")
else:
await event.delete()
await borg.send_file(event.chat_id, response.message.media)
#By @WhySooSerious
@borg.on(admin_cmd("hentaigif"))
async def _(event):
if event.fwd_from:
return
chat = "@KeikoSDbot"
await event.edit("```Finding a Hentai GIF..```\n**WARNING : It's NSFW**")
async with borg.conversation(chat) as conv:
try:
response = conv.wait_event(events.NewMessage(incoming=True,from_users=1212429864))
await borg.send_message(chat, "/hentaigif")
response = await response
except YouBlockedUserError:
await event.reply("```Please unblock @KeikoSDbot and try again```")
return
if response.text.startswith("Forward"):
await event.edit("```can you kindly disable your forward privacy settings for good?```")
else:
await event.delete()
await borg.send_file(event.chat_id, response.message.media)
#By @WhySooSerious
@borg.on(admin_cmd("classic"))
async def _(event):
if event.fwd_from:
return
chat = "@KeikoSDbot"
await event.edit("```Finding a Hentai Classic GIF..```\n**WARNING : It's NSFW**")
async with borg.conversation(chat) as conv:
try:
response = conv.wait_event(events.NewMessage(incoming=True,from_users=1212429864))
await borg.send_message(chat, "/classic")
response = await response
except YouBlockedUserError:
await event.reply("```Please unblock @KeikoSDbot and try again```")
return
if response.text.startswith("Forward"):
await event.edit("```can you kindly disable your forward privacy settings for good?```")
else:
await event.delete()
await borg.send_file(event.chat_id, response.message.media)
#By @WhySooSerious
@borg.on(admin_cmd("kuni"))
async def _(event):
if event.fwd_from:
return
chat = "@KeikoSDbot"
await event.edit("```Finding a Hentai Pussy Lick GIF..```\n**WARNING : It's NSFW**")
async with borg.conversation(chat) as conv:
try:
response = conv.wait_event(events.NewMessage(incoming=True,from_users=1212429864))
await borg.send_message(chat, "/kuni")
response = await response
except YouBlockedUserError:
await event.reply("```Please unblock @KeikoSDbot and try again```")
return
if response.text.startswith("Forward"):
await event.edit("```can you kindly disable your forward privacy settings for good?```")
else:
await event.delete()
await borg.send_file(event.chat_id, response.message.media)
#By @WhySooSerious
@borg.on(admin_cmd("cuddle"))
async def _(event):
if event.fwd_from:
return
chat = "@KeikoSDbot"
await event.edit("```Finding an Anime Cuddle GIF..```\n**WARNING : It's Really Kawaii**")
async with borg.conversation(chat) as conv:
try:
response = conv.wait_event(events.NewMessage(incoming=True,from_users=1212429864))
await borg.send_message(chat, "/cuddle")
response = await response
except YouBlockedUserError:
await event.reply("```Please unblock @KeikoSDbot and try again```")
return
if response.text.startswith("Forward"):
await event.edit("```can you kindly disable your forward privacy settings for good?```")
else:
await event.delete()
await borg.send_file(event.chat_id, response.message.media)
#By @WhySooSerious
@borg.on(admin_cmd("titsgif"))
async def _(event):
if event.fwd_from:
return
chat = "@KeikoSDbot"
await event.edit("```Finding a Hentai Tits GIF..```\n**WARNING : It's NSFW**")
async with borg.conversation(chat) as conv:
try:
response = conv.wait_event(events.NewMessage(incoming=True,from_users=1212429864))
await borg.send_message(chat, "/titsgif")
response = await response
except YouBlockedUserError:
await event.reply("```Please unblock @KeikoSDbot and try again```")
return
if response.text.startswith("Forward"):
await event.edit("```can you kindly disable your forward privacy settings for good?```")
else:
await event.delete()
await borg.send_file(event.chat_id, response.message.media)
#by @WhySooSerious
@borg.on(admin_cmd("smug"))
async def _(event):
if event.fwd_from:
return
chat = "@KeikoSDbot"
await event.edit("```Finding an Anime Smug GIF..```\n**It's SFW**")
async with borg.conversation(chat) as conv:
try:
response = conv.wait_event(events.NewMessage(incoming=True,from_users=1212429864))
await borg.send_message(chat, "/smug")
response = await response
except YouBlockedUserError:
await event.reply("```Please unblock @KeikoSDbot and try again```")
return
if response.text.startswith("Forward"):
await event.edit("```can you kindly disable your forward privacy settings for good?```")
else:
await event.delete()
await borg.send_file(event.chat_id, response.message.media)
#By @WhySooSerious
@borg.on(admin_cmd("baka"))
async def _(event):
if event.fwd_from:
return
chat = "@KeikoSDbot"
await event.edit("```Finding an Anime Baka GIF..```\n**It's SFW**")
async with borg.conversation(chat) as conv:
try:
response = conv.wait_event(events.NewMessage(incoming=True,from_users=1212429864))
await borg.send_message(chat, "/bj")
response = await response
except YouBlockedUserError:
await event.reply("```Please unblock @KeikoSDbot and try again```")
return
if response.text.startswith("Forward"):
await event.edit("```can you kindly disable your forward privacy settings for good?```")
else:
await event.delete()
await borg.send_file(event.chat_id, response.message.media)
#By @WhySooSerious
@borg.on(admin_cmd("lesbian"))
async def _(event):
if event.fwd_from:
return
chat = "@KeikoSDbot"
await event.edit("```Finding an A Hentai Lesbian GIF..```\n**WARNING : It's NSFW**")
async with borg.conversation(chat) as conv:
try:
response = conv.wait_event(events.NewMessage(incoming=True,from_users=1212429864))
await borg.send_message(chat, "/lesbian")
response = await response
except YouBlockedUserError:
await event.reply("```Please unblock @KeikoSDbot and try again```")
return
if response.text.startswith("Forward"):
await event.edit("```can you kindly disable your forward privacy settings for good?```")
else:
await borg.send_file(event.chat_id, response.message.media)
#By @WhySooSerious
@borg.on(admin_cmd("nsfwneko"))
async def _(event):
if event.fwd_from:
return
chat = "@KeikoSDbot"
await event.edit("```Finding an A Hentai Neko GIF..```\n**WARNING : It's NSFW**")
async with borg.conversation(chat) as conv:
try:
response = conv.wait_event(events.NewMessage(incoming=True,from_users=1212429864))
await borg.send_message(chat, "/nekonsfw")
response = await response
except YouBlockedUserError:
await event.reply("```Please unblock @KeikoSDbot and try again```")
return
if response.text.startswith("Forward"):
await event.edit("```can you kindly disable your forward privacy settings for good?```")
else:
await borg.send_file(event.chat_id, response.message.media)
#By @WhySooSerious
@borg.on(admin_cmd("kiss"))
async def _(event):
if event.fwd_from:
return
chat = "@KeikoSDbot"
await event.edit("```Finding an an Anime Kissing GIF..```\n**WARNING : It's NSFW**")
async with borg.conversation(chat) as conv:
try:
response = conv.wait_event(events.NewMessage(incoming=True,from_users=1212429864))
await borg.send_message(chat, "/kiss")
response = await response
except YouBlockedUserError:
await event.reply("```Please unblock @KeikoSDbot and try again```")
return
if response.text.startswith("Forward"):
await event.edit("```can you kindly disable your forward privacy settings for good?```")
else:
await borg.send_file(event.chat_id, response.message.media)
#By @WhySooSerious
| StarcoderdataPython |
3426279 | import os
from tempfile import mkdtemp
from unittest import TestCase
from warnings import catch_warnings
from testfixtures.mock import Mock
from testfixtures import (
TempDirectory, Replacer, ShouldRaise, compare, OutputCapture
)
from ..compat import Unicode, PY3
from ..rmtree import rmtree
if PY3:
some_bytes = '\xa3'.encode('utf-8')
some_text = '\xa3'
else:
some_bytes = '\xc2\xa3'
some_text = '\xc2\xa3'.decode('utf-8')
class TestTempDirectory(TestCase):
def test_cleanup(self):
d = TempDirectory()
p = d.path
assert os.path.exists(p) is True
p = d.write('something', b'stuff')
d.cleanup()
assert os.path.exists(p) is False
def test_cleanup_all(self):
d1 = TempDirectory()
d2 = TempDirectory()
assert os.path.exists(d1.path) is True
p1 = d1.path
assert os.path.exists(d2.path) is True
p2 = d2.path
TempDirectory.cleanup_all()
assert os.path.exists(p1) is False
assert os.path.exists(p2) is False
def test_with_statement(self):
with TempDirectory() as d:
p = d.path
assert os.path.exists(p) is True
d.write('something', b'stuff')
assert os.listdir(p) == ['something']
with open(os.path.join(p, 'something')) as f:
assert f.read() == 'stuff'
assert os.path.exists(p) is False
def test_listdir_sort(self): # pragma: no branch
with TempDirectory() as d:
d.write('ga', b'')
d.write('foo1', b'')
d.write('Foo2', b'')
d.write('g.o', b'')
with OutputCapture() as output:
d.listdir()
output.compare('Foo2\nfoo1\ng.o\nga')
class TempDirectoryTests(TestCase):
def test_write_with_slash_at_start(self):
with TempDirectory() as d:
with ShouldRaise(ValueError(
'Attempt to read or write outside the temporary Directory'
)):
d.write('/some/folder', 'stuff')
def test_makedir_with_slash_at_start(self):
with TempDirectory() as d:
with ShouldRaise(ValueError(
'Attempt to read or write outside the temporary Directory'
)):
d.makedir('/some/folder')
def test_read_with_slash_at_start(self):
with TempDirectory() as d:
with ShouldRaise(ValueError(
'Attempt to read or write outside the temporary Directory'
)):
d.read('/some/folder')
def test_listdir_with_slash_at_start(self):
with TempDirectory() as d:
with ShouldRaise(ValueError(
'Attempt to read or write outside the temporary Directory'
)):
d.listdir('/some/folder')
def test_compare_with_slash_at_start(self):
with TempDirectory() as d:
with ShouldRaise(ValueError(
'Attempt to read or write outside the temporary Directory'
)):
d.compare((), path='/some/folder')
def test_read_with_slash_at_start_ok(self):
with TempDirectory() as d:
path = d.write('foo', b'bar')
compare(d.read(path), b'bar')
def test_dont_cleanup_with_path(self):
d = mkdtemp()
fp = os.path.join(d, 'test')
with open(fp, 'w') as f:
f.write('foo')
try:
td = TempDirectory(path=d)
self.assertEqual(d, td.path)
td.cleanup()
# checks
self.assertEqual(os.listdir(d), ['test'])
with open(fp) as f:
self.assertEqual(f.read(), 'foo')
finally:
rmtree(d)
def test_dont_create_with_path(self):
d = mkdtemp()
rmtree(d)
td = TempDirectory(path=d)
self.assertEqual(d, td.path)
self.failIf(os.path.exists(d))
def test_deprecated_check(self):
with TempDirectory() as d:
d.write('x', b'')
d.check('x')
def test_deprecated_check_dir(self):
with TempDirectory() as d:
d.write('foo/x', b'')
d.check_dir('foo', 'x')
def test_deprecated_check_all(self):
with TempDirectory() as d:
d.write('a/b/c', b'')
d.check_all('', 'a/', 'a/b/', 'a/b/c')
d.check_all('a', 'b/', 'b/c')
def test_compare_sort_actual(self):
with TempDirectory() as d:
d.write('ga', b'')
d.write('foo1', b'')
d.write('Foo2', b'')
d.write('g.o', b'')
d.compare(['Foo2', 'foo1', 'g.o', 'ga'])
def test_compare_sort_expected(self):
with TempDirectory() as d:
d.write('ga', b'')
d.write('foo1', b'')
d.write('Foo2', b'')
d.write('g.o', b'')
d.compare(['Foo2', 'ga', 'foo1', 'g.o'])
def test_compare_path_tuple(self):
with TempDirectory() as d:
d.write('a/b/c', b'')
d.compare(path=('a', 'b'),
expected=['c'])
def test_recursive_ignore(self):
with TempDirectory(ignore=['.svn']) as d:
d.write('.svn/rubbish', b'')
d.write('a/.svn/rubbish', b'')
d.write('a/b/.svn', b'')
d.write('a/b/c', b'')
d.write('a/d/.svn/rubbish', b'')
d.compare([
'a/',
'a/b/',
'a/b/c',
'a/d/',
])
def test_files_only(self):
with TempDirectory() as d:
d.write('a/b/c', b'')
d.compare(['a/b/c'], files_only=True)
def test_path(self):
with TempDirectory() as d:
expected1 = d.makedir('foo')
expected2 = d.write('baz/bob', b'')
expected3 = d.getpath('a/b/c')
actual1 = d.getpath('foo')
actual2 = d.getpath('baz/bob')
actual3 = d.getpath(('a', 'b', 'c'))
self.assertEqual(expected1, actual1)
self.assertEqual(expected2, actual2)
self.assertEqual(expected3, actual3)
def test_atexit(self):
# http://bugs.python.org/issue25532
from testfixtures.mock import call
m = Mock()
with Replacer() as r:
# make sure the marker is false, other tests will
# probably have set it
r.replace('testfixtures.TempDirectory.atexit_setup', False)
r.replace('atexit.register', m.register)
d = TempDirectory()
expected = [call.register(d.atexit)]
compare(expected, m.mock_calls)
with catch_warnings(record=True) as w:
d.atexit()
self.assertTrue(len(w), 1)
compare(str(w[0].message), ( # pragma: no branch
"TempDirectory instances not cleaned up by shutdown:\n" +
d.path
))
d.cleanup()
compare(set(), TempDirectory.instances)
# check re-running has no ill effects
d.atexit()
def test_read_decode(self):
with TempDirectory() as d:
with open(os.path.join(d.path, 'test.file'), 'wb') as f:
f.write(b'\xc2\xa3')
compare(d.read('test.file', 'utf8'), some_text)
def test_read_no_decode(self):
with TempDirectory() as d:
with open(os.path.join(d.path, 'test.file'), 'wb') as f:
f.write(b'\xc2\xa3')
compare(d.read('test.file'), b'\xc2\xa3')
def test_write_bytes(self):
with TempDirectory() as d:
d.write('test.file', b'\xc2\xa3')
with open(os.path.join(d.path, 'test.file'), 'rb') as f:
compare(f.read(), b'\xc2\xa3')
def test_write_unicode(self):
with TempDirectory() as d:
d.write('test.file', some_text, 'utf8')
with open(os.path.join(d.path, 'test.file'), 'rb') as f:
compare(f.read(), b'\xc2\xa3')
def test_write_unicode_bad(self):
if PY3:
expected = TypeError(
"a bytes-like object is required, not 'str'"
)
else:
expected = UnicodeDecodeError(
'ascii', '\xa3', 0, 1, 'ordinal not in range(128)'
)
with TempDirectory() as d:
with ShouldRaise(expected):
d.write('test.file', Unicode('\xa3'))
def test_just_empty_non_recursive(self):
with TempDirectory() as d:
d.makedir('foo/bar')
d.makedir('foo/baz')
d.compare(path='foo',
expected=['bar', 'baz'],
recursive=False)
def test_just_empty_dirs(self):
with TempDirectory() as d:
d.makedir('foo/bar')
d.makedir('foo/baz')
d.compare(['foo/', 'foo/bar/', 'foo/baz/'])
def test_symlink(self):
with TempDirectory() as d:
d.write('foo/bar.txt', b'x')
os.symlink(d.getpath('foo'), d.getpath('baz'))
d.compare(['baz/', 'foo/', 'foo/bar.txt'])
def test_follow_symlinks(self):
with TempDirectory() as d:
d.write('foo/bar.txt', b'x')
os.symlink(d.getpath('foo'), d.getpath('baz'))
d.compare(['baz/', 'baz/bar.txt', 'foo/', 'foo/bar.txt'],
followlinks=True)
def test_trailing_slash(self):
with TempDirectory() as d:
d.write('source/foo/bar.txt', b'x')
d.compare(path='source/', expected=['foo/', 'foo/bar.txt'])
def test_default_encoding(self):
encoded = b'\xc2\xa3'
decoded = encoded.decode('utf-8')
with TempDirectory(encoding='utf-8') as d:
d.write('test.txt', decoded)
compare(d.read('test.txt'), expected=decoded)
def test_override_default_encoding(self):
encoded = b'\xc2\xa3'
decoded = encoded.decode('utf-8')
with TempDirectory(encoding='ascii') as d:
d.write('test.txt', decoded, encoding='utf-8')
compare(d.read('test.txt', encoding='utf-8'), expected=decoded)
| StarcoderdataPython |
3541707 | <reponame>terasakisatoshi/pythonCodes
"""
reference
http://qiita.com/_329_/items/bcc306194d52f7b81b5a
"""
from sklearn.datasets import fetch_mldata
from sklearn.cross_validation import train_test_split
import numpy as np
import chainer
from chainer import functions as F
from chainer import links as L
from chainer import optimizers
class MnistModel(chainer.Chain):
def __init__(self):
super(MnistModel, self).__init__(
l1=L.Linear(784, 100),
l2=L.Linear(100, 100),
l3=L.Linear(100, 10)
)
def __call__(self, x, t, train):
x = chainer.Variable(x)
t = chainer.Variable(t)
h = F.relu(self.l1(x))
h = F.relu(self.l2(h))
h = self.l3(h)
if train:
return F.softmax_cross_entropy(h, t), F.accuracy(h, t)
else:
return F.accuracy(h, t)
def main():
mnist = fetch_mldata('MNIST original', data_home=".")
mnist.data = mnist.data.astype(np.float32)*1.0/255.0
mnist.target = mnist.target.astype(np.int32)
train_data, test_data, train_label, test_label = train_test_split(
mnist.data, mnist.target, test_size=10000, random_state=222)
print("data shape", mnist.data.dtype, mnist.data.shape)
print("label shape", mnist.target.dtype, mnist.target.shape)
model = MnistModel()
optimizer = optimizers.Adam()
optimizer.setup(model)
for each in range(100):
model.zerograds()
loss, acc = model(train_data, train_label, train=True)
loss.backward()
optimizer.update()
print("acc", acc.data)
acc = model(test_data, test_label, train=False)
print("acc test", acc.data)
if __name__ == '__main__':
main()
| StarcoderdataPython |
3260907 | import numpy as np
import cv2
import matplotlib.pyplot as plt
from helper_functions import *
# Define a class to receive the characteristics of each line detection
class Line():
def __init__(self, image_shape, debug = False):
# HYPERPARAMETERS
# Number of sliding windows
self.nwindows = 12
# Width of the windows +/- margin
self.margin = 50 #100
# Minimum number of pixels found to recenter window
self.minpix = 50
# Iteration number to average the polynomial coefficients
self.n_iteration = 10
# Image size
self.image_height = image_shape[0]
self.image_width = image_shape[1]
# y values of the line, spaced by 1 pixel
self.line_y = np.linspace(0, self.image_height-1, self.image_height )
# was the line detected in the last iteration?
self.detected = False
# x values of the last n fits of the line
self.recent_xfitted = []
#average x values of the fitted line over the last n iterations
self.bestx = None
#polynomial coefficients averaged over the last n iterations
self.best_fit = None
#polynomial coefficients for the most recent fit
self.current_fit = [np.array([False])]
#radius of curvature of the line in some units
self.radius_of_curvature = None
#distance in meters of vehicle center from the line
self.line_base_pos = None
#difference in fit coefficients between last and new fits
self.diffs = np.array([0,0,0], dtype='float')
#x values for detected line pixels
self.allx = None
#y values for detected line pixels
self.ally = None
self.debug = debug
def find_lane_pixels(self, binary_warped, base, out_img = None):
"""
Find the x and y pixels for the lane line.
Sliding windows will be used around starting points (base) passed as argument
"""
# Set height of windows - based on nwindows above and image shape
window_height = np.int(binary_warped.shape[0]//self.nwindows)
# Identify the x and y positions of all nonzero pixels in the image
nonzero = binary_warped.nonzero()
nonzeroy = np.array(nonzero[0])
nonzerox = np.array(nonzero[1])
# Current positions to be updated later for each window in nwindows
x_current = base
# Create empty lists to receive the lane pixel indices
lane_inds = []
counter_empty_win = 0
dx_current = []
#last_y = binary_warped.shape[0] - window_height
#self.line_y = np.linspace(0, self.image_height-1, self.image_height )
# Step through the windows one by one
for window in range(self.nwindows):
# Identify window boundaries in x and y
win_y_low = binary_warped.shape[0] - (window+1)*window_height
win_y_high = binary_warped.shape[0] - window*window_height
win_x_low = x_current - self.margin
win_x_high = x_current + self.margin
## For Visualization - Draw the windows on the visualization image ##
if out_img is not None:
cv2.rectangle(out_img,(win_x_low,win_y_low),
(win_x_high,win_y_high),(0,255,0), 2)
# Identify the nonzero pixels in x and y within the window
good_inds = ((nonzerox>=win_x_low) & (nonzerox<win_x_high) &
(nonzeroy>=win_y_low) & (nonzeroy<win_y_high)).nonzero()[0]
# Append these indices to the lists
lane_inds.append(good_inds)
# If > minpix pixels, recenter next window on their mean position
if len(good_inds) > self.minpix:
new_x_current = np.int(np.mean(nonzerox[good_inds]))
dx_current.append(new_x_current - x_current)
x_current = np.int(new_x_current + sum(dx_current)/len(dx_current))
#last_y = win_y_low
counter_empty_win = 0
else:
if len(dx_current) > 0:
x_current = np.int(x_current + sum(dx_current)/len(dx_current))
counter_empty_win +=1
# if 4 sequence windows have few pixels, it is better to stop searching more
if counter_empty_win == 4:
self.allx = []
self.ally = []
#self.line_y = np.linspace(last_y, self.image_height-1, (self.image_height-win_y_high) )
return
if ((x_current - self.margin ) <= 0) or ((x_current + self.margin)>= self.image_width):
#self.line_y = np.linspace(win_y_high, self.image_height-1, (self.image_height-win_y_high) )
if self.debug:
print("The curve crosses the lateral boundaries")
break
# Concatenate the arrays of indices (previously was a list of lists of pixels)
try:
lane_inds = np.concatenate(lane_inds)
except ValueError:
# Avoids an error if the above is not implemented fully
pass
# Extract left and right line pixel positions
self.allx = nonzerox[lane_inds]
self.ally = nonzeroy[lane_inds]
def update_poly(self, order = 2, out_img = None):
"""
Append the polynomial x points to a list "recent_xfitted" and calculate the average x values of the fitted line over the last 'self.n_iteration' iterations.
"""
if len(self.allx) > 0 and len(self.ally)>0:
# Fit a polynomial to each using `np.polyfit'
self.current_fit = np.polyfit(self.ally, self.allx, order )
# Get the difference in fit coefficients between last and new fits
if self.best_fit is not None:
self.diffs = (self.best_fit - self.current_fit) / self.best_fit
# Generate x and y values for plotting
try:
x = self.current_fit[order]
for i in range(order):
x += self.current_fit[i]*self.line_y**(order-i)
if len(self.recent_xfitted) == self.n_iteration:
self.recent_xfitted.pop(0)
if (len(self.recent_xfitted) > 1) and (len(x) < len(self.recent_xfitted[0])):
if self.debug:
print("Before concatenating x values", len(x), len(self.recent_xfitted[0]))
x = np.concatenate([np.array([x[0]]*(len(self.recent_xfitted[0]) - len(x))), x])
self.recent_xfitted.append(x)
except TypeError:
# Avoids an error
print('The function failed to fit a line!')
self.detected = False
# Calculate the average x values of the fitted line over the last 'self.n_iteration' iterations
self.bestx = np.mean(self.recent_xfitted, axis = 0)
self.best_fit = np.polyfit(self.line_y, self.bestx, order )
self.detected = True
else:
if self.debug:
print('No x, y points fitting the line')
self.detected = False
## For Visualization ##
if out_img is not None:
# Colors in the left and right lane regions
out_img[self.ally, self.allx] = [255, 0, 0]
# Plots the left and right polynomials on the lane lines
plt.plot(self.bestx, self.line_y, color='yellow')
def first_fit_polynomial(self, binary_warped, basex, order = 2, out_img = None):
"""
Fit a polynomial with order "order" for the lane line based on the x,y pixels which fall on sliding windows.
"""
# Find the lane pixels first
self.find_lane_pixels(binary_warped, basex, out_img)
# Update the polynomial
self.update_poly(order, out_img)
def search_around_poly(self, binary_warped, order, out_img = None):
"""
Fit a polynomial with order "order" for the lane line based on the x,y pixels which are around a lane line detected on previous frame
"""
# Grab activated pixels
nonzero = binary_warped.nonzero()
nonzeroy = np.array(nonzero[0])
nonzerox = np.array(nonzero[1])
# Set the area of search based on activated x-values
# within the +/- margin of our polynomial function
x_current = 0
for i in range(order+1):
x_current += self.best_fit[i]*nonzeroy**(order-i)
win_x_low = x_current - self.margin
win_x_high = x_current + self.margin
lane_inds = ((nonzerox>=win_x_low) & (nonzerox<win_x_high)).nonzero()[0]
# Again, extract left and right line pixel positions
self.allx = nonzerox[lane_inds]
self.ally = nonzeroy[lane_inds]
self.update_poly(order, out_img)
## For Visualization ##
if out_img is not None:
# Create an image to draw on and an image to show the selection window
window_img = np.zeros_like(out_img)
# Generate a polygon to illustrate the search window area
# And recast the x and y points into usable format for cv2.fillPoly()
line_window1 = np.array([np.transpose(np.vstack([self.bestx-self.margin, self.line_y]))])
line_window2 = np.array([np.flipud(np.transpose(np.vstack([self.bestx+self.margin,
self.line_y])))])
line_pts = np.hstack((line_window1, line_window2))
# Draw the lane onto the warped blank image
cv2.fillPoly(window_img, np.int_([line_pts]), (0,255, 0))
out_img = cv2.addWeighted(out_img, 1, window_img, 0.3, 0)
| StarcoderdataPython |
275998 | #!/usr/bin/env python
# Reads the PAUSE button using interupts and sets the LED
# Pin table at https://github.com/beagleboard/beaglebone-blue/blob/master/BeagleBone_Blue_Pin_Table.csv
# Import PyBBIO library:
import Adafruit_BBIO.GPIO as GPIO
import time
button="P8_9" # PAUSE=P8_9, MODE=P8_10
LED ="USR3"
# Set the GPIO pins:
GPIO.setup(LED, GPIO.OUT)
GPIO.setup(button, GPIO.IN)
print("Running...")
while True:
state = GPIO.input(button)
GPIO.output(LED, state)
GPIO.wait_for_edge(button, GPIO.BOTH)
print("Pressed") | StarcoderdataPython |
3343262 | <filename>atlas/foundations_contrib/src/foundations_contrib/option.py
def Option(value):
from foundations_contrib.something import Something
from foundations_contrib.nothing import Nothing
if isinstance(value, Nothing) or isinstance(value, Something):
return value
return Nothing() if value is None else Something(value)
| StarcoderdataPython |
8193833 | <gh_stars>1-10
import os
import shutil
from PyQt5 import QtCore, QtGui, QtWidgets
# import time
from SeaBASSHeader import SeaBASSHeader
from ConfigFile import ConfigFile
from MainConfig import MainConfig
class SeaBASSHeaderWindow(QtWidgets.QDialog):
def __init__(self, name, inputDir, parent=None):
super().__init__(parent)
self.setModal(True)
self.name = name
self.inputDirectory = inputDir
# print(self.inputDirectory)
self.initUI()
# self.outputDirectory = outputDirectory
def initUI(self):
# print("ConfigWindow - initUI")
#self.label = QtWidgets.QLabel("Popup", self)
# intValidator = QtGui.QIntValidator()
# doubleValidator = QtGui.QDoubleValidator()
self.nameLabel = QtWidgets.QLabel(f'Editing: {self.name}', self)
linkSeaBASSLabel = QtWidgets.QLabel(
"Separate multiple entries with commas, and replace spaces with underscores. For input assistance, go to \
<a href=\"https://seabass.gsfc.nasa.gov/wiki/metadataheaders\"> SeaBASS Metadata Headers</a>")
linkSeaBASSLabel.setOpenExternalLinks(True)
instructionLabel = QtWidgets.QLabel("Separate multiple entries with commas, and replace spaces with underscores.")
instructionLabel_font = instructionLabel.font()
instructionLabel_font.setPointSize(10)
instructionLabel_font.setBold(True)
instructionLabel.setFont(instructionLabel_font)
versionLabel = QtWidgets.QLabel("SeaBASS submission verion (e.g. 'R1', 'R2')", self)
self.versionLineEdit = QtWidgets.QLineEdit(self)
self.versionLineEdit.setText(str(SeaBASSHeader.settings["version"]))
instructionLabelSub = QtWidgets.QLabel("To match fields to existing SeaBASS entries,")
instructionLabelSub1 = QtWidgets.QLabel(
"check the 'Lists' pull-down menu<a href=\"https://seabass.gsfc.nasa.gov\"> here</a>.")
instructionLabelSub1.setOpenExternalLinks(True)
investigatorsLabel = QtWidgets.QLabel("Investigators", self)
self.investigatorsLineEdit = QtWidgets.QLineEdit(self)
self.investigatorsLineEdit.setText(str(SeaBASSHeader.settings["investigators"]))
affiliationsLabel = QtWidgets.QLabel("affiliations", self)
self.affiliationsLineEdit = QtWidgets.QLineEdit(self)
self.affiliationsLineEdit.setText(str(SeaBASSHeader.settings["affiliations"]))
contactLabel = QtWidgets.QLabel("contact", self)
self.contactLineEdit = QtWidgets.QLineEdit(self)
self.contactLineEdit.setText(str(SeaBASSHeader.settings["contact"]))
experimentLabel = QtWidgets.QLabel("experiment", self)
self.experimentLineEdit = QtWidgets.QLineEdit(self)
self.experimentLineEdit.setText(str(SeaBASSHeader.settings["experiment"]))
cruiseLabel = QtWidgets.QLabel("cruise (different from experiment)", self)
self.cruiseLineEdit = QtWidgets.QLineEdit(self)
self.cruiseLineEdit.setText(str(SeaBASSHeader.settings["cruise"]))
stationLabel = QtWidgets.QLabel("station (RAW filename if blank)", self)
self.stationLineEdit = QtWidgets.QLineEdit(self)
self.stationLineEdit.setText(str(SeaBASSHeader.settings["station"]))
documentsLabel = QtWidgets.QLabel("documents", self)
self.documentsLineEdit = QtWidgets.QLineEdit(self)
self.documentsLineEdit.setText(str(SeaBASSHeader.settings["documents"]))
instrument_manufacturerLabel = QtWidgets.QLabel("instrument_manufacturer", self)
self.instrument_manufacturerLineEdit = QtWidgets.QLineEdit(self)
self.instrument_manufacturerLineEdit.setText(str(SeaBASSHeader.settings["instrument_manufacturer"]))
instrument_modelLabel = QtWidgets.QLabel("instrument_model", self)
self.instrument_modelLineEdit = QtWidgets.QLineEdit(self)
self.instrument_modelLineEdit.setText(str(SeaBASSHeader.settings["instrument_model"]))
calibration_dateLabel = QtWidgets.QLabel("calibration_date (YYYYMMDD)", self)
self.calibration_dateLineEdit = QtWidgets.QLineEdit(self)
self.calibration_dateLineEdit.setText(str(SeaBASSHeader.settings["calibration_date"]))
# These will always be refreshed from the ConfigFile
SeaBASSHeader.refreshCalibrationFiles()
calibration_filesLabel = QtWidgets.QLabel("calibration_files", self)
self.calibration_filesLineEdit = QtWidgets.QLineEdit(self)
self.calibration_filesLineEdit.setText(str(SeaBASSHeader.settings["calibration_files"]))
data_typeLabel = QtWidgets.QLabel("data_type", self)
self.data_typeLineEdit = QtWidgets.QLineEdit(self)
self.data_typeLineEdit.setText(str(SeaBASSHeader.settings["data_type"]))
data_statusLabel = QtWidgets.QLabel("data_status (e.g. preliminary)", self)
self.data_statusLineEdit = QtWidgets.QLineEdit(self)
self.data_statusLineEdit.setText(str(SeaBASSHeader.settings["data_status"]))
water_depthLabel = QtWidgets.QLabel("water_depth (use -999 for missing)", self)
self.water_depthLineEdit = QtWidgets.QLineEdit(self)
self.water_depthLineEdit.setText(str(SeaBASSHeader.settings["water_depth"]))
''' doubleValidator causing a block to data entry'''
# self.water_depthLineEdit.setValidator(doubleValidator)
measurement_depthLabel = QtWidgets.QLabel("measurement_depth", self)
self.measurement_depthLineEdit = QtWidgets.QLineEdit(self)
self.measurement_depthLineEdit.setText(str(SeaBASSHeader.settings["measurement_depth"]))
# self.measurement_depthLineEdit.setValidator(doubleValidator)
cloud_percentLabel = QtWidgets.QLabel("cloud_percent", self)
self.cloud_percentLineEdit = QtWidgets.QLineEdit(self)
self.cloud_percentLineEdit.setText(str(SeaBASSHeader.settings["cloud_percent"]))
''' intValidator causing block to data entry '''
# self.cloud_percentLineEdit.setValidator(intValidator)
wave_heightLabel = QtWidgets.QLabel("wave_height", self)
self.wave_heightLineEdit = QtWidgets.QLineEdit(self)
self.wave_heightLineEdit.setText(str(SeaBASSHeader.settings["wave_height"]))
# self.wave_heightLineEdit.setValidator(doubleValidator)
secchi_depthLabel = QtWidgets.QLabel("secchi_depth", self)
self.secchi_depthLineEdit = QtWidgets.QLineEdit(self)
self.secchi_depthLineEdit.setText(str(SeaBASSHeader.settings["secchi_depth"]))
# self.secchi_depthLineEdit.setValidator(doubleValidator)
#############################
commentsLabel = QtWidgets.QLabel("Config Comments (lead with !)", self)
self.commentsLineEdit = QtWidgets.QTextEdit(self)
self.commentsLineEdit.setPlainText(SeaBASSHeader.settings["comments"])
self.configUpdateButton = QtWidgets.QPushButton("Update from Config Window")
self.configUpdateButton.clicked.connect(lambda: self.configUpdateButtonPressed( 'local'))
other_commentsLabel = QtWidgets.QLabel("Other Comments", self)
other_commentsLabel2 = QtWidgets.QLabel("(lead with !)", self)
self.other_commentsLineEdit = QtWidgets.QTextEdit(self)
self.other_commentsLineEdit.setPlainText(SeaBASSHeader.settings["other_comments"])
#############################
instructionLabel2 = QtWidgets.QLabel(" If left blank, the entries below will be extracted from processed files")
instructionLabel2_font = instructionLabel2.font()
instructionLabel2_font.setPointSize(10)
instructionLabel2_font.setBold(True)
instructionLabel2.setFont(instructionLabel2_font)
data_file_nameLabel = QtWidgets.QLabel("data_file_name", self)
self.data_file_nameLineEdit = QtWidgets.QLineEdit(self)
self.data_file_nameLineEdit.setText(str(SeaBASSHeader.settings["data_file_name"]))
original_file_nameLabel = QtWidgets.QLabel("original_file_name", self)
self.original_file_nameLineEdit = QtWidgets.QLineEdit(self)
self.original_file_nameLineEdit.setText(str(SeaBASSHeader.settings["original_file_name"]))
start_dateLabel = QtWidgets.QLabel("start_date (RAW data should be in GMT)", self)
self.start_dateLineEdit = QtWidgets.QLineEdit(self)
self.start_dateLineEdit.setText(str(SeaBASSHeader.settings["start_date"]))
end_dateLabel = QtWidgets.QLabel("end_date [GMT]", self)
self.end_dateLineEdit = QtWidgets.QLineEdit(self)
self.end_dateLineEdit.setText(str(SeaBASSHeader.settings["end_date"]))
start_timeLabel = QtWidgets.QLabel("start_time [GMT]", self)
self.start_timeLineEdit = QtWidgets.QLineEdit(self)
self.start_timeLineEdit.setText(str(SeaBASSHeader.settings["start_time"]))
end_timeLabel = QtWidgets.QLabel("end_time [GMT]", self)
self.end_timeLineEdit = QtWidgets.QLineEdit(self)
self.end_timeLineEdit.setText(str(SeaBASSHeader.settings["end_time"]))
north_latitudeLabel = QtWidgets.QLabel("north_latitude [dec deg]", self)
self.north_latitudeLineEdit = QtWidgets.QLineEdit(self)
self.north_latitudeLineEdit.setText(str(SeaBASSHeader.settings["north_latitude"]))
# self.north_latitudeLineEdit.setValidator(doubleValidator)
south_latitudeLabel = QtWidgets.QLabel("south_latitude", self)
self.south_latitudeLineEdit = QtWidgets.QLineEdit(self)
self.south_latitudeLineEdit.setText(str(SeaBASSHeader.settings["south_latitude"]))
# self.south_latitudeLineEdit.setValidator(doubleValidator)
east_longitudeLabel = QtWidgets.QLabel("east_longitude", self)
self.east_longitudeLineEdit = QtWidgets.QLineEdit(self)
self.east_longitudeLineEdit.setText(str(SeaBASSHeader.settings["east_longitude"]))
# self.east_longitudeLineEdit.setValidator(doubleValidator)
west_longitudeLabel = QtWidgets.QLabel("west_longitude", self)
self.west_longitudeLineEdit = QtWidgets.QLineEdit(self)
self.west_longitudeLineEdit.setText(str(SeaBASSHeader.settings["west_longitude"]))
# self.west_longitudeLineEdit.setValidator(doubleValidator)
wind_speedLabel = QtWidgets.QLabel("wind_speed (only autopopulated at L4)", self)
self.wind_speedLineEdit = QtWidgets.QLineEdit(self)
self.wind_speedLineEdit.setText(str(SeaBASSHeader.settings["wind_speed"]))
# self.wind_speedLineEdit.setValidator(doubleValidator)
##
self.openButton = QtWidgets.QPushButton("Open/Copy")
self.saveButton = QtWidgets.QPushButton("Save")
self.saveAsButton = QtWidgets.QPushButton("Save As")
self.cancelButton = QtWidgets.QPushButton("Cancel")
self.openButton.clicked.connect(self.openButtonPressed)
self.saveButton.clicked.connect(self.saveButtonPressed)
self.saveAsButton.clicked.connect(self.saveAsButtonPressed)
self.cancelButton.clicked.connect(self.cancelButtonPressed)
# ####################################################################################
# Whole Window Box
VBox = QtWidgets.QVBoxLayout()
VBox.addWidget(self.nameLabel)
VBox.addWidget(linkSeaBASSLabel)
VBox1 = QtWidgets.QVBoxLayout()
VBox1.addSpacing(10)
#VBox1.addWidget(instructionLabel)
HBoxVersion = QtWidgets.QHBoxLayout()
HBoxVersion.addWidget(versionLabel)
HBoxVersion.addWidget(self.versionLineEdit)
VBox1.addLayout(HBoxVersion)
VBox1.addWidget(instructionLabelSub)
VBox1.addWidget(instructionLabelSub1)
# Horizontal Box
HBox1 = QtWidgets.QHBoxLayout()
HBox1.addWidget(investigatorsLabel)
HBox1.addWidget(self.investigatorsLineEdit)
VBox1.addLayout(HBox1)
HBox2 = QtWidgets.QHBoxLayout()
HBox2.addWidget(affiliationsLabel)
HBox2.addWidget(self.affiliationsLineEdit)
VBox1.addLayout(HBox2)
HBox3 = QtWidgets.QHBoxLayout()
HBox3.addWidget(contactLabel)
HBox3.addWidget(self.contactLineEdit)
VBox1.addLayout(HBox3)
HBox4 = QtWidgets.QHBoxLayout()
HBox4.addWidget(experimentLabel)
HBox4.addWidget(self.experimentLineEdit)
VBox1.addLayout(HBox4)
HBox5 = QtWidgets.QHBoxLayout()
HBox5.addWidget(cruiseLabel)
HBox5.addWidget(self.cruiseLineEdit)
VBox1.addLayout(HBox5)
HBox9 = QtWidgets.QHBoxLayout()
HBox9.addWidget(documentsLabel)
HBox9.addWidget(self.documentsLineEdit)
VBox1.addLayout(HBox9)
HBox27 = QtWidgets.QHBoxLayout()
HBox27.addWidget(instrument_manufacturerLabel)
HBox27.addWidget(self.instrument_manufacturerLineEdit)
VBox1.addLayout(HBox27)
HBox28 = QtWidgets.QHBoxLayout()
HBox28.addWidget(instrument_modelLabel)
HBox28.addWidget(self.instrument_modelLineEdit)
VBox1.addLayout(HBox28)
HBox29 = QtWidgets.QHBoxLayout()
HBox29.addWidget(calibration_dateLabel)
HBox29.addWidget(self.calibration_dateLineEdit)
VBox1.addLayout(HBox29)
HBox10 = QtWidgets.QHBoxLayout()
HBox10.addWidget(calibration_filesLabel)
HBox10.addWidget(self.calibration_filesLineEdit)
VBox1.addLayout(HBox10)
HBox11 = QtWidgets.QHBoxLayout()
HBox11.addWidget(data_typeLabel)
HBox11.addWidget(self.data_typeLineEdit)
VBox1.addLayout(HBox11)
HBox12 = QtWidgets.QHBoxLayout()
HBox12.addWidget(data_statusLabel)
HBox12.addWidget(self.data_statusLineEdit)
VBox1.addLayout(HBox12)
HBox21 = QtWidgets.QHBoxLayout()
HBox21.addWidget(water_depthLabel)
HBox21.addWidget(self.water_depthLineEdit)
VBox1.addLayout(HBox21)
HBox22 = QtWidgets.QHBoxLayout()
HBox22.addWidget(measurement_depthLabel)
HBox22.addWidget(self.measurement_depthLineEdit)
VBox1.addLayout(HBox22)
HBox23 = QtWidgets.QHBoxLayout()
HBox23.addWidget(cloud_percentLabel)
HBox23.addWidget(self.cloud_percentLineEdit)
VBox1.addLayout(HBox23)
HBox25 = QtWidgets.QHBoxLayout()
HBox25.addWidget(wave_heightLabel)
HBox25.addWidget(self.wave_heightLineEdit)
VBox1.addLayout(HBox25)
HBox31 = QtWidgets.QHBoxLayout()
HBox31.addWidget(secchi_depthLabel)
HBox31.addWidget(self.secchi_depthLineEdit)
VBox1.addLayout(HBox31)
##############
VBox2 = QtWidgets.QVBoxLayout()
#############
HBoxSub = QtWidgets.QHBoxLayout()
VBoxSub = QtWidgets.QVBoxLayout()
VBoxSub.addWidget(commentsLabel)
# VBoxSub.addWidget(self.configUpdateButton)
HBoxSub.addLayout(VBoxSub)
HBoxSub.addWidget(self.commentsLineEdit)
# HBoxSub.addLayout(VBoxSub)
VBox2.addLayout(HBoxSub)
############
HBox30 = QtWidgets.QHBoxLayout()
HBox30.addWidget(other_commentsLabel)
HBox30.addWidget(other_commentsLabel2)
HBox30.addWidget(self.other_commentsLineEdit)
VBox2.addLayout(HBox30)
# VBox1.addSpacing(20)
VBox2.addWidget(instructionLabel2)
HBox6 = QtWidgets.QHBoxLayout()
HBox6.addWidget(stationLabel)
HBox6.addWidget(self.stationLineEdit)
VBox2.addLayout(HBox6)
HBox7 = QtWidgets.QHBoxLayout()
HBox7.addWidget(data_file_nameLabel)
HBox7.addWidget(self.data_file_nameLineEdit)
VBox2.addLayout(HBox7)
HBox8 = QtWidgets.QHBoxLayout()
HBox8.addWidget(original_file_nameLabel)
HBox8.addWidget(self.original_file_nameLineEdit)
VBox2.addLayout(HBox8)
HBox13 = QtWidgets.QHBoxLayout()
HBox13.addWidget(start_dateLabel)
HBox13.addWidget(self.start_dateLineEdit)
VBox2.addLayout(HBox13)
HBox14 = QtWidgets.QHBoxLayout()
HBox14.addWidget(end_dateLabel)
HBox14.addWidget(self.end_dateLineEdit)
VBox2.addLayout(HBox14)
HBox15 = QtWidgets.QHBoxLayout()
HBox15.addWidget(start_timeLabel)
HBox15.addWidget(self.start_timeLineEdit)
VBox2.addLayout(HBox15)
HBox16 = QtWidgets.QHBoxLayout()
HBox16.addWidget(end_timeLabel)
HBox16.addWidget(self.end_timeLineEdit)
VBox2.addLayout(HBox16)
HBox17 = QtWidgets.QHBoxLayout()
HBox17.addWidget(north_latitudeLabel)
HBox17.addWidget(self.north_latitudeLineEdit)
VBox2.addLayout(HBox17)
HBox18 = QtWidgets.QHBoxLayout()
HBox18.addWidget(south_latitudeLabel)
HBox18.addWidget(self.south_latitudeLineEdit)
VBox2.addLayout(HBox18)
HBox19 = QtWidgets.QHBoxLayout()
HBox19.addWidget(east_longitudeLabel)
HBox19.addWidget(self.east_longitudeLineEdit)
VBox2.addLayout(HBox19)
HBox20 = QtWidgets.QHBoxLayout()
HBox20.addWidget(west_longitudeLabel)
HBox20.addWidget(self.west_longitudeLineEdit)
VBox2.addLayout(HBox20)
HBox24 = QtWidgets.QHBoxLayout()
HBox24.addWidget(wind_speedLabel)
HBox24.addWidget(self.wind_speedLineEdit)
VBox2.addLayout(HBox24)
# Add 3 Vertical Boxes to Horizontal Box hBox
hBox = QtWidgets.QHBoxLayout()
hBox.addLayout(VBox1)
hBox.addLayout(VBox2)
# Save/Cancel
saveHBox = QtWidgets.QHBoxLayout()
saveHBox.addStretch(1)
saveHBox.addWidget(self.openButton)
saveHBox.addWidget(self.saveButton)
saveHBox.addWidget(self.saveAsButton)
saveHBox.addWidget(self.cancelButton)
# Adds hBox and saveHBox to primary VBox
VBox.addLayout(hBox)
VBox.addLayout(saveHBox)
self.setLayout(VBox)
self.setGeometry(300, 100, 0, 0)
self.setWindowTitle('Edit SeaBASS Header')
def configUpdateButtonPressed(self, caller):
print("Updating comments from values in ConFigWindow")
# This will update subsequently from the ConfigFile on demand
if ConfigFile.settings["bL1aCleanSZA"]:
szaFilt = "On"
else:
szaFilt = "Off"
if ConfigFile.settings["bL1cCleanPitchRoll"]:
pitchRollFilt = "On"
else:
pitchRollFilt = "Off"
if ConfigFile.settings["bL1cRotatorAngle"]:
cleanRotFilt = "On"
else:
cleanRotFilt = "Off"
if ConfigFile.settings["bL1cCleanSunAngle"]:
cleanRelAzFilt = "On"
else:
cleanRelAzFilt = "Off"
if ConfigFile.settings["bL1dDeglitch"]:
deglitchFilt = "On"
else:
deglitchFilt = "Off"
if ConfigFile.settings["bL2EnableSpecQualityCheck"]:
specFilt = "On"
else:
specFilt = "Off"
if ConfigFile.settings["bL2EnableQualityFlags"]:
metFilt = "On"
else:
metFilt = "Off"
if ConfigFile.settings["bL2EnablePercentLt"]:
ltFilt = "On"
else:
ltFilt = "Off"
if ConfigFile.settings["bL23CRho"]:
rhoCorr = "3C"
elif ConfigFile.settings["bL2ZhangRho"]:
rhoCorr = "Zhang2017"
else:
rhoCorr = f"Mobley1999"
if ConfigFile.settings["bL2PerformNIRCorrection"]:
if ConfigFile.settings["bL2SimpleNIRCorrection"]:
NIRFilt = "Hooker2003"
else:
NIRFilt = "Ruddick2006"
else:
NIRFilt = "Off"
if ConfigFile.settings["bL2NegativeSpec"]:
NegativeFilt = "On"
else:
NegativeFilt = "Off"
SeaBASSHeader.settings["comments"] =\
f'! HyperInSPACE vers = {MainConfig.settings["version"]}\n'+\
f'! HyperInSPACE Config = {ConfigFile.filename}\n'+\
f'! SZA Filter = {szaFilt}\n'+\
f'! SZA Max = {ConfigFile.settings["fL1aCleanSZAMax"]}\n'+\
f'! Rotator Home Angle = {ConfigFile.settings["fL1cRotatorHomeAngle"]}\n'+\
f'! Rotator Delay = {ConfigFile.settings["fL1cRotatorDelay"]}\n'+\
f'! Pitch/Roll Filter = {pitchRollFilt}\n'+\
f'! Max Pitch/Roll = {ConfigFile.settings["fL1cPitchRollPitch"]}\n'+\
f'! Rotator Min/Max Filter = {cleanRotFilt}\n'+\
f'! Rotator Min = {ConfigFile.settings["fL1cRotatorAngleMin"]}\n'+\
f'! Rotator Max = {ConfigFile.settings["fL1cRotatorAngleMax"]}\n'+\
f'! Rel Azimuth Filter = {cleanRelAzFilt}\n'+\
f'! Rel Azimuth Min = {ConfigFile.settings["fL1cSunAngleMin"]}\n'+\
f'! Rel Azimuth Max = {ConfigFile.settings["fL1cSunAngleMax"]}\n'+\
f'! Deglitch Filter = {deglitchFilt}\n'+\
f'! ES Dark Window = {ConfigFile.settings["fL1dESWindowDark"]}\n'+\
f'! ES Light Window = {ConfigFile.settings["fL1dESWindowLight"]}\n'+\
f'! ES Dark Sigma = {ConfigFile.settings["fL1dESSigmaDark"]}\n'+\
f'! ES Light Sigma = {ConfigFile.settings["fL1dESSigmaLight"]}\n'+\
f'! LI Dark Window = {ConfigFile.settings["fL1dLIWindowDark"]}\n'+\
f'! LI Light Window = {ConfigFile.settings["fL1dLIWindowLight"]}\n'+\
f'! LI Dark Sigma = {ConfigFile.settings["fL1dLISigmaDark"]}\n'+\
f'! LI Light Sigma = {ConfigFile.settings["fL1dLISigmaLight"]}\n'+\
f'! LT Dark Window = {ConfigFile.settings["fL1dLTWindowDark"]}\n'+\
f'! LT Light Window = {ConfigFile.settings["fL1dLTWindowLight"]}\n'+\
f'! LT Dark Sigma = {ConfigFile.settings["fL1dLTSigmaDark"]}\n'+\
f'! LT Light Sigma = {ConfigFile.settings["fL1dLTSigmaLight"]}\n'+\
f'! Wavelength Interp Int = {ConfigFile.settings["fL1eInterpInterval"]}\n'+\
f'! Max Wind = {ConfigFile.settings["fL2MaxWind"]}\n'+\
f'! Min SZA = {ConfigFile.settings["fL2SZAMin"]}\n'+\
f'! Max SZA = {ConfigFile.settings["fL2SZAMax"]}\n'+\
f'! Spectral Filter = {specFilt}\n'+\
f'! Filter Sigma Es = {ConfigFile.settings["fL2SpecFilterEs"]}\n'+\
f'! Filter Sigma Li = {ConfigFile.settings["fL2SpecFilterLi"]}\n'+\
f'! Filter Sigma Lt = {ConfigFile.settings["fL2SpecFilterLt"]}\n'+\
f'! Meteorological Filter = {metFilt}\n'+\
f'! Cloud Flag = {ConfigFile.settings["fL2CloudFlag"]}\n'+\
f'! Es Flag = {ConfigFile.settings["fL2SignificantEsFlag"]}\n'+\
f'! Dawn/Dusk Flag = {ConfigFile.settings["fL2DawnDuskFlag"]}\n'+\
f'! Rain/Humidity Flag = {ConfigFile.settings["fL2RainfallHumidityFlag"]}\n'+\
f'! Ensemble Interval = {ConfigFile.settings["fL2TimeInterval"]}\n'+\
f'! Percent Lt Filter = {ltFilt}\n'+\
f'! Percent Light = {ConfigFile.settings["fL2PercentLt"]}\n'+\
f'! Glint_Correction = {rhoCorr}\n'+\
f'! Default Wind = {ConfigFile.settings["fL2DefaultWindSpeed"]}\n'+\
f'! Default AOD = {ConfigFile.settings["fL2DefaultAOD"]}\n'+\
f'! Default Salt = {ConfigFile.settings["fL2DefaultSalt"]}\n'+\
f'! Default SST = {ConfigFile.settings["fL2DefaultSST"]}\n'+\
f'! NIR Correction = {NIRFilt}\n'+\
f'! Remove Negatives = {NegativeFilt}'
# f'! Processing DateTime = {time.asctime()}'
if caller == 'local':
self.commentsLineEdit.setPlainText(SeaBASSHeader.settings["comments"])
self.commentsLineEdit.update()
# print(SeaBASSHeader.settings["comments"])
def openButtonPressed(self):
print('SeaBASSHeaderWindow - Open/Copy Pressed')
# fileToCopy, ok = QtWidgets.QInputDialog.getText(self, 'Save As SeaBASS Header File', 'Enter File Name')
caption = "Select .hdr File to Copy"
directory = "Config"
fpfToCopy, ok = QtWidgets.QFileDialog.getOpenFileNames(self,caption,directory,filter="*.hdr")
(_, fileToCopy) = os.path.split(fpfToCopy[0])
if ok:
print("Copying SeaBASS Header: ", fileToCopy)
SeaBASSHeader.loadSeaBASSHeader(fileToCopy)
# How to refresh...
self.name = fileToCopy
SeaBASSHeaderWindow.refreshWindow(self)
def saveButtonPressed(self):
print("SeaBASSHeaderWindow - Save Pressed")
SeaBASSHeader.settings["version"] = self.versionLineEdit.text()
SeaBASSHeader.settings["investigators"] = self.investigatorsLineEdit.text()
SeaBASSHeader.settings["affiliations"] = self.affiliationsLineEdit.text()
SeaBASSHeader.settings["contact"] = self.contactLineEdit.text()
SeaBASSHeader.settings["experiment"] = self.experimentLineEdit.text()
SeaBASSHeader.settings["cruise"] = self.cruiseLineEdit.text()
SeaBASSHeader.settings["station"] = self.stationLineEdit.text()
SeaBASSHeader.settings["documents"] = self.documentsLineEdit.text()
SeaBASSHeader.settings["calibration_files"] = self.calibration_filesLineEdit.text()
SeaBASSHeader.settings["data_type"] = self.data_typeLineEdit.text()
SeaBASSHeader.settings["data_status"] = self.data_statusLineEdit.text()
SeaBASSHeader.settings["water_depth"] = self.water_depthLineEdit.text()
SeaBASSHeader.settings["measurement_depth"] = self.measurement_depthLineEdit.text()
SeaBASSHeader.settings["cloud_percent"] = self.cloud_percentLineEdit.text()
SeaBASSHeader.settings["wave_height"] = self.wave_heightLineEdit.text()
SeaBASSHeader.settings["secchi_depth"] = self.secchi_depthLineEdit.text()
SeaBASSHeader.settings["instrument_manufacturer"] = self.instrument_manufacturerLineEdit.text()
SeaBASSHeader.settings["instrument_model"] = self.instrument_modelLineEdit.text()
SeaBASSHeader.settings["calibration_date"] = self.calibration_dateLineEdit.text()
SeaBASSHeader.settings["data_file_name"] = self.data_file_nameLineEdit.text()
SeaBASSHeader.settings["original_file_name"] = self.original_file_nameLineEdit.text()
SeaBASSHeader.settings["start_date"] = self.start_dateLineEdit.text()
SeaBASSHeader.settings["end_date"] = self.end_dateLineEdit.text()
SeaBASSHeader.settings["start_time"] = self.start_timeLineEdit.text()
SeaBASSHeader.settings["end_date"] = self.end_dateLineEdit.text()
SeaBASSHeader.settings["north_latitude"] = self.north_latitudeLineEdit.text()
SeaBASSHeader.settings["south_latitude"] = self.south_latitudeLineEdit.text()
SeaBASSHeader.settings["east_longitude"] = self.east_longitudeLineEdit.text()
SeaBASSHeader.settings["west_longitude"] = self.west_longitudeLineEdit.text()
SeaBASSHeader.settings["wind_speed"] = self.wind_speedLineEdit.text()
SeaBASSHeader.settings["comments"] = self.commentsLineEdit.toPlainText()
SeaBASSHeader.settings["other_comments"] = self.other_commentsLineEdit.toPlainText()
SeaBASSHeader.saveSeaBASSHeader(self.name)
# print(SeaBASSHeader.settings["comments"])
ConfigFile.settings["seaBASSHeaderFileName"] = self.name
# QtWidgets.QMessageBox.about(self, "Edit SeaBASSHeader File", "SeaBASSHeader File Saved")
self.close()
def refreshWindow(self):
print("SeaBASSHeaderWindow - refreshWindow")
self.nameLabel.setText(f'Editing: {self.name}')
self.versionLineEdit.setText(str(SeaBASSHeader.settings["version"]))
self.investigatorsLineEdit.setText(str(SeaBASSHeader.settings["investigators"]))
self.affiliationsLineEdit.setText(str(SeaBASSHeader.settings["affiliations"]))
self.contactLineEdit.setText(str(SeaBASSHeader.settings["contact"]))
self.experimentLineEdit.setText(str(SeaBASSHeader.settings["experiment"]))
self.cruiseLineEdit.setText(str(SeaBASSHeader.settings["cruise"]))
self.stationLineEdit.setText(str(SeaBASSHeader.settings["station"]))
self.documentsLineEdit.setText(str(SeaBASSHeader.settings["documents"]))
self.instrument_manufacturerLineEdit.setText(str(SeaBASSHeader.settings["instrument_manufacturer"]))
self.instrument_modelLineEdit.setText(str(SeaBASSHeader.settings["instrument_model"]))
self.calibration_dateLineEdit.setText(str(SeaBASSHeader.settings["calibration_date"]))
self.commentsLineEdit.setPlainText(SeaBASSHeader.settings["comments"])
self.other_commentsLineEdit.setText(SeaBASSHeader.settings["other_comments"])
def saveAsButtonPressed(self):
print("ConfigWindow - Save As Pressed")
self.name, ok = QtWidgets.QInputDialog.getText(self, 'Save As SeaBASS Header File', 'Enter File Name')
if ok:
print("Create SeaBASS Header: ", self.name)
if not self.name.endswith(".hdr"):
self.name = self.name + ".hdr"
self.nameLabel.update()
SeaBASSHeaderWindow.saveButtonPressed(self)
ConfigFile.settings["seaBASSHeaderFileName"] = self.name
def cancelButtonPressed(self):
print("SeaBASSWindow - Cancel Pressed")
self.close() | StarcoderdataPython |
9742250 | import os
from dotenv import load_dotenv
from abelardoBot import AbelardoBot
import discord
import random
from discord.ext import commands
load_dotenv()
TOKEN = os.getenv('DISCORD_BOT_TOKEN')
VERBOSE_LEVEL = os.getenv('VERBOSE_LEVEL')
bot = AbelardoBot(command_prefix='!',
verbose_level=0 if VERBOSE_LEVEL is None else VERBOSE_LEVEL)
@bot.event
async def on_member_join(member):
await bot.procesar_member_join(member)
#@<EMAIL>
#async def on_message(message):
# if not message.content.startswith(bot.command_prefix):
# await bot.procesar_mensaje(message)
# else:
# await bot.super().on_message()
@bot.event
async def on_error(event, *args, **kwargs):
await bot.procesar_error(event, *args, **kwargs)
@bot.command(name='raise-exception', help='Lanza una excepcion de prueba')
async def nine_nine(ctx):
await ctx.send('_choca_ **fuertemente**')
raise discord.DiscordException
@bot.command(name='dados', help='Simula el lanzamiento de un dado.')
async def roll(ctx, numero_dado: int, numero_caras: int):
dado = [
str(random.choice(range(1, numero_caras + 1)))
for _ in range(numero_dado)
]
await ctx.send(f'resultados: {", ".join(dado)}')
@bot.command(name='soy-admin', help='te reconoce como administrador')
@commands.has_role('admin')
async def que_soy(ctx):
await ctx.send(f'{ctx.message.author.name} eres admin')
@bot.event
async def on_command_error(ctx, error):
await bot.procesar_command_error(ctx, error)
if __name__ == "__main__":
print("inicializando el bot")
bot.run(TOKEN)
| StarcoderdataPython |
6494121 | # proxy module
from __future__ import absolute_import
from blockcanvas.function_tools.rest_html import *
| StarcoderdataPython |
11320827 | <gh_stars>0
import argparse
import sys
from logging import DEBUG as logging_DEBUG
from logging import INFO as logging_INFO
from logging import FileHandler, Formatter, StreamHandler, getLogger
# stdout Handler
stdout_handler = StreamHandler(stream=sys.stdout)
stdout_handler.setLevel(logging_DEBUG) # DEBUGまで出すが上の階層次第で出さないこともある
stdout_handler.addFilter(
lambda record: record.levelno <= logging_INFO
) # warning, error, criticalを出さない
stdout_handler.setFormatter(
Formatter(
"%(asctime)s [%(levelname)s] - %(filename)s - %(funcName)s() --- %(message)s"
)
)
# file Handler
file_handler = FileHandler("out.log")
file_handler.setLevel(logging_DEBUG)
file_handler.setFormatter(
Formatter(
"%(asctime)s [%(levelname)s] - %(filename)s - %(funcName)s() --- %(message)s"
)
)
# Logger
logger = getLogger(__name__)
logger.addHandler(stdout_handler)
logger.addHandler(file_handler)
logger.setLevel(logging_INFO) # DEBUGは出さない
def main():
parser = argparse.ArgumentParser()
parser.add_argument("--debug", action="store_true")
args = parser.parse_args()
if args.debug:
logger.setLevel(logging_DEBUG) # DEBUGも出力するようにする
logger.debug("This is a debug log.")
logger.info("This is a info log.")
logger.warning("This is a warning log.")
logger.error("This is a error log.")
logger.critical("This is a critical log.")
if __name__ == "__main__":
main()
| StarcoderdataPython |
6646596 | <gh_stars>0
import os
import unittest
from PIL import Image
from directdemod import constants
from directdemod.georeferencer import tif_to_png
class TestConvert(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.tif = constants.MODULE_PATH + '/tests/data/tif_to_png/sample.tif'
cls.png = constants.MODULE_PATH + '/tests/data/tif_to_png/sample.png'
cls.f = constants.MODULE_PATH + '/tests/data/tif_to_png/_sample.png'
@classmethod
def tearDownClass(cls):
if os.path.isfile(cls.f):
os.remove(cls.f)
def test_tif_to_png(self):
tif_to_png(self.tif, self.f)
img = Image.open(self.png)
img_new = Image.open(self.f)
self.assertEqual(img, img_new)
self.assertRaises(NotImplementedError, tif_to_png, '', '', False)
| StarcoderdataPython |
11242813 | # Day 21: Splitting Code Into Multiple Files
# Exercises
# For today's (only) exercise, we're giving you a bunch of code that is all in one file.
# Your task is to split that code into multiple files. You can choose how many and which files you want to split the code into, but think about why you're putting each piece on code in each file!
print("Did this locally.") | StarcoderdataPython |
6425540 | from flask import Flask, jsonify
import numpy as np
from sqlalchemy.ext.automap import automap_base
from sqlalchemy.orm import Session
from sqlalchemy import create_engine, func
engine = create_engine("sqlite:///Resources/hawaii.sqlite")
# reflect an existing database into a new model
Base = automap_base()
# reflect the tables
Base.prepare(engine, reflect=True)
Station = Base.classes.station
Measurement = Base.classes.measurement
session = Session(engine)
app = Flask(__name__)
@app.route("/")
def welcome():
"""List all available api routes."""
return (
f"Available Routes:<br/>"
f"/api/v1.0/precipitation<br/>"
f"/api/v1.0/stations<br/>"
f"/api/v1.0/tobs<br/>"
f"/api/v1.0/<start><br/>"
f"/api/v1.0/<start>/<end>"
)
@app.route("/api/v1.0/precipitation")
def precipitation():
data = session.query(Measurement.date, Measurement.prcp).filter(Measurement.date >= "2016-08-24").all()
session.close()
all_prcp = []
for date,prcp in data:
prcp_dict = {}
prcp_dict["date"] = date
prcp_dict["prcp"] = prcp
all_prcp.append(prcp_dict)
return jsonify(all_prcp)
@app.route("/api/v1.0/stations")
def stations():
stations = session.query(Station.station).\
order_by(Station.station).all()
session.close()
list_stations = list(np.ravel(stations))
return jsonify(list_stations)
#Same as percipitation?
@app.route("/api/v1.0/tobs")
def tobs():
session = Session(engine)
temps = session.query(Measurement.date, Measurement.tobs,Measurement.prcp).filter(Measurement.date >= '2016-08-23').filter(Measurement.station=='USC00519281').order_by(Measurement.date).all()
session.close()
list_tobs = []
for prcp, date,tobs in temps:
tobs_dict = {}
tobs_dict["prcp"] = prcp
tobs_dict["date"] = date
tobs_dict["tobs"] = tobs
list_tobs.append(tobs_dict)
return jsonify(list_tobs)
@app.route("/api/v1.0/<start_date>")
def start_date(start_date):
results = session.query(func.min(Measurement.tobs), func.avg(Measurement.tobs), func.max(Measurement.tobs)).filter(Measurement.date >= start_date).all()
session.close()
sdate_tobs = []
for min, avg, max in results:
sdate_tobs_dict = {}
sdate_tobs_dict["min_temp"] = min
sdate_tobs_dict["avg_temp"] = avg
sdate_tobs_dict["max_temp"] = max
sdate_tobs.append(sdate_tobs_dict)
return jsonify(sdate_tobs)
#mimic startdate function
@app.route("/api/v1.0/<start_date>/<end_date>")
def start_end_date(start_date, end_date):
results = session.query(func.min(Measurement.tobs), func.avg(Measurement.tobs), func.max(Measurement.tobs)).filter(Measurement.date >= start_date).filter(Measurement.date <= end_date).all()
session.close()
start_end_tobs = []
for min, avg, max in results:
start_end_tobs_dict = {}
start_end_tobs_dict["min_temp"] = min
start_end_tobs_dict["avg_temp"] = avg
start_end_tobs_dict["max_temp"] = max
start_end_tobs.append(start_end_tobs_dict)
return jsonify(start_end_tobs)
if __name__ == '__main__':
app.run(debug=True) | StarcoderdataPython |
131031 | <reponame>salesforce/coco-dst<filename>coco-dst/run_demo.py
"""
Copyright (c) 2020, salesforce.com, inc.
All rights reserved.
SPDX-License-Identifier: BSD-3-Clause
For full license text, see the LICENSE file in the repo root or https://opensource.org/licenses/BSD-3-Clause
"""
import argparse
import logging
import numpy as np
import torch
import random
from utility.data import *
from transformers import (
T5ForConditionalGeneration,
T5Tokenizer
)
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s", datefmt="%m/%d/%Y %H:%M:%S", level=logging.INFO,
)
logger = logging.getLogger(__name__)
MAX_LENGTH = int(10000) # Hardcoded max length to avoid infinite loop
MODEL_CLASSES = {
"t5": (T5ForConditionalGeneration, T5Tokenizer)
}
def set_seed(args):
np.random.seed(args.seed)
torch.manual_seed(args.seed)
random.seed(args.seed)
if args.n_gpu > 0:
torch.cuda.manual_seed_all(args.seed)
def adjust_length_to_model(length, max_sequence_length):
if length < 0 and max_sequence_length > 0:
length = max_sequence_length
elif 0 < max_sequence_length < length:
length = max_sequence_length # No generation bigger than model size
elif length < 0:
length = MAX_LENGTH # avoid infinite loop
return length
def main():
parser = argparse.ArgumentParser()
parser.add_argument(
"--model_type",
default=None,
type=str,
required=True,
help="Model type selected in the list: " + ", ".join(MODEL_CLASSES.keys()),
)
parser.add_argument("--eval_data_file",
default=None,
type=str,
required=True,
help="Dataset to do eval")
parser.add_argument(
"--model_name_or_path",
default=None,
type=str,
required=True,
help="Path to pre-trained model or shortcut name selected in the list: " + ", ".join(MODEL_CLASSES.keys()),
)
parser.add_argument("--length", type=int, default=20)
parser.add_argument("--stop_token", type=str, default=None, help="Token at which text generation is stopped")
parser.add_argument(
"--temperature",
type=float,
default=1.0,
help="temperature of 1.0 has no effect, lower tend toward greedy sampling",
)
parser.add_argument(
"--repetition_penalty", type=float, default=1.0, help="primarily useful for CTRL model; in that case, use 1.2"
)
parser.add_argument("--k", type=int, default=0)
parser.add_argument("--num_beams", type=int, default=5)
parser.add_argument("--p", type=float, default=0.9)
parser.add_argument("--seed", type=int, default=42, help="random seed for initialization")
parser.add_argument("--no_cuda", action="store_true", help="Avoid using CUDA when available")
parser.add_argument("--do_sample", action="store_true", help="Do sample during decoding")
parser.add_argument("--num_return_sequences", type=int, default=1, help="The number of samples to generate.")
parser.add_argument("--shuffle_turn_label", action='store_true', help="if we shuffle conditional turn label")
args = parser.parse_args()
args.device = torch.device("cuda" if torch.cuda.is_available() and not args.no_cuda else "cpu")
args.n_gpu = 0 if args.no_cuda else torch.cuda.device_count()
set_seed(args)
# Initialize the model and tokenizer
try:
args.model_type = args.model_type.lower()
model_class, tokenizer_class = MODEL_CLASSES[args.model_type]
except KeyError:
raise KeyError("the model {} you specified is not supported. You are welcome to add it and open a PR :)")
tokenizer = tokenizer_class.from_pretrained(args.model_name_or_path)
model = model_class.from_pretrained(args.model_name_or_path)
model.to(args.device)
args.length = adjust_length_to_model(args.length, max_sequence_length=model.config.max_position_embeddings)
dataset = MultiWOZForT5_Interact(data_dir=args.eval_data_file, tokenizer=tokenizer,
shuffle_turn_label=args.shuffle_turn_label)
logger.info(args)
play = True
while (play):
print("------------------------------------------")
print("----------------- NEXT -------------------")
print("Dataset len: ", len(dataset))
valid_input_flag = True
while (valid_input_flag):
try:
data_idx = input("Dataset index >>> ")
idx = int(data_idx.strip())
assert idx >= 0 and idx < len(dataset)
dataset.print_value(idx)
valid_input_flag = False
except:
if ("exit" in data_idx or "quit" in data_idx):
import sys
sys.exit()
print("Index out of boundary or not valid")
valid_input_flag = True
while (valid_input_flag):
try:
print(
"Input your belief state as "'domain1-name1-value1 , domain2-name2-value2'" or ENTER to use default belief state")
prompt_text = input(">>> ").strip().lower()
dataset.prompt_text = prompt_text
valid_input_flag = False
except:
valid_input_flag = True
encoded_prompt = dataset[idx]
print("-------------------------")
print("Input Tokens:")
print(tokenizer.decode(encoded_prompt))
print("-------------------------")
print("Generated User Utterence Candidates:")
print("-------------------------")
encoded_prompt = torch.tensor(encoded_prompt, dtype=torch.long).view(1, -1)
encoded_prompt = encoded_prompt.to(args.device)
if encoded_prompt.size()[-1] == 0:
input_ids = None
else:
input_ids = encoded_prompt
output_sequences = model.generate(
input_ids=input_ids,
max_length=args.length + len(encoded_prompt[0]),
temperature=args.temperature,
top_k=args.k,
top_p=args.p,
num_beams=args.num_beams,
repetition_penalty=args.repetition_penalty,
do_sample=args.do_sample,
num_return_sequences=args.num_return_sequences,
)
# Remove the batch dimension when returning multiple sequences
if len(output_sequences.shape) > 2:
output_sequences.squeeze_()
generated_sequences = []
for generated_sequence_idx, generated_sequence in enumerate(output_sequences):
generated_sequence = generated_sequence.tolist()
text = tokenizer.decode(generated_sequence, clean_up_tokenization_spaces=True)
text = text[: text.find(args.stop_token) if args.stop_token else None]
generated_sequences.append(text)
print("(" + str(generated_sequence_idx) + "): " + text)
if __name__ == "__main__":
main()
| StarcoderdataPython |
5062974 | # Copyright (c) 2013, <NAME> and contributors
# For license information, please see license.txt
from __future__ import unicode_literals
import frappe
from frappe import _
def execute(filters=None):
columns, data = [], []
data = get_data(filters)
columns = get_columns(filters)
return columns, data
def get_columns(filters):
if filters.date_wise:
return [
{
"fieldname": "workstation_head",
"label": _("Workstation Head"),
"fieldtype": "Data",
"width": 150
},
{
"fieldname": "date",
"label": _("Date"),
"fieldtype": "Date",
"width": 150
},
{
"fieldname": "machine_helper_id",
"label": _("Machine Helper ID"),
"fieldtype": "Link",
"options": "Employee",
"width": 150
},
{
"fieldname": "machine_helper",
"label": _("Machine Helper"),
"fieldtype": "Data",
"width": 150
},
{
"fieldname": "total_stock",
"label": _("Total Stock"),
"fieldtype": "Float",
"width": 150,
"precision": 4
},
{
"fieldname": "total_pay",
"label": _("Total Pay"),
"fieldtype": "Float",
"width": 150,
"precision": 4
},
{
"fieldname": "dumy",
"label": _("Empty"),
"fieldtype": "Data",
"width": 150,
"precision": 4
}
]
else:
return [
{
"fieldname": "workstation_head",
"label": _("Workstation Head"),
"fieldtype": "Data",
"width": 150
},
{
"fieldname": "machine_helper_id",
"label": _("Machine Helper ID"),
"fieldtype": "Link",
"options": "Employee",
"width": 150
},
{
"fieldname": "machine_helper",
"label": _("Machine Helper"),
"fieldtype": "Data",
"width": 150
},
{
"fieldname": "total_stock",
"label": _("Total Stock"),
"fieldtype": "Float",
"width": 150,
"precision": 4
},
{
"fieldname": "total_pay",
"label": _("Total Pay"),
"fieldtype": "Float",
"width": 150,
"precision": 4
},
{
"fieldname": "dumy",
"label": _("Empty"),
"fieldtype": "Data",
"width": 150,
"precision": 4
}
]
def get_data(filters=None):
conditions = ""
columns = """workstation_head,
machine_helper_id,
machine_helper,
sum(total_shift_stock),
sum(total_shift_stock * rate)"""
group_by = ""
if filters.from_date:
conditions += " and date >= '{0}' ".format(filters.from_date)
if filters.to_date:
conditions += " and date <='{0}' ".format(filters.to_date)
if filters.workstation_head:
conditions += " and workstation_head like '%{0}%' ".format(
filters.workstation_head)
if filters.date_wise:
columns = """workstation_head,
date,
machine_helper_id,
machine_helper,
sum(total_shift_stock),
sum(total_shift_stock * rate)"""
group_by = " ,date"
return frappe.db.sql(""" select {2} from `tabPNI Packing` where docstatus = 1 {0} group by machine_helper {1};""".format(conditions, group_by, columns))
"""
SET SQL_SAFE_UPDATES = 0;
update `tabPNI Packing` as packing, `tabWorkstation` as workstation set packing.rate = workstation.pni_rate where packing.workstation = workstation.name and packing.name <> "" and workstation.name <> "";
SET SQL_SAFE_UPDATES = 1;
"""
| StarcoderdataPython |
228104 | <reponame>wharton/django-cybersource-hosted-checkout<filename>cybersource_hosted_checkout/tests/test_utils.py
from django.conf import settings
from django.test import TestCase
from cybersource_hosted_checkout.utils import create_sha256_signature, sign_fields_to_context
class UtilTests(TestCase):
def setUp(self):
pass
def tearDown(self):
pass
def test_sha256_signature(self):
key = settings.CYBERSOURCE_SECRET_KEY
message = 'key1=value1,key2=value2,key3=value3'
response = create_sha256_signature(key, message)
self.assertEqual(response, 'A8ew8SEYdgbyeiiQBWFYHsW1pcAAZFroS331gMDzBaI=')
def test_sign_fields(self):
context = {
'contextkey1': 'contextvalue1',
}
fields = {
'key1': 'value1',
'key2': 'value2',
'key3': 'value3',
}
response = sign_fields_to_context(fields, context)
self.assertEqual(
response['contextkey1'],
'contextvalue1',
)
self.assertEqual(
response['fields']['key1'],
'value1',
)
self.assertEqual(
response['fields']['signed_field_names'],
'key1,key2,key3,signed_date_time,unsigned_field_names,signed_field_names',
)
self.assertEqual(
response['url'],
'https://testsecureacceptance.cybersource.com/pay',
)
| StarcoderdataPython |
6700776 | <filename>tests/test_model_bundle.py
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Tests for `model_bundle` package."""
import unittest
from model_bundle.model_bundle import ModelBundle
class TestModelBundle(unittest.TestCase):
"""Tests for `model_bundle` package."""
def setUp(self):
"""Set up test fixtures, if any."""
import numpy as np
import matplotlib.pyplot as plt
from sklearn.datasets import make_blobs
from model_bundle.model_params import model_list_pca, model_params_pca
self.model_list, self.model_params = model_list_pca, model_params_pca
X, y = make_blobs(n_samples=200, n_features=10, centers=3, random_state=42)
X_rand = np.random.random(X[:4].shape)
self.X_test = X_rand
self.X, self.y = X, y
# plt.scatter(X[:, 0], X[:, 1], marker='o')
# plt.scatter(X_rand[:, 0], X_rand[:, 1], marker='*')
# plt.show()
def tearDown(self):
"""Tear down test fixtures, if any."""
def test_000_init(self):
"""Test workflow."""
ModelBundle()
def test_001_init(self):
"""Test workflow."""
ModelBundle(model_list=self.model_list)
def test_002_fit(self):
"""Test workflow."""
models = ModelBundle(model_list=self.model_list)
models.fit(self.X)
# models.transform(self.X)
def test_002_fit_cluster(self):
"""Test workflow."""
from model_bundle.model_params import model_list_cluster, model_params_cluster
models = ModelBundle(model_list=model_list_cluster)
models.fit(self.X)
# models.transform(self.X)
def test_003_fit(self):
"""Test workflow."""
models = ModelBundle(model_list=self.model_list)
models.fit(self.X)
models.transform(self.X)
def test_000_model_params(self):
# from model_bundle.model_params import *
pass
def test_do_nonthing(self):
pass
if __name__ == "__main__":
unittest.TestCase() | StarcoderdataPython |
11310143 | <filename>webverify/webverify.py
import logging
import time
import base64
from binascii import Error as BinAsciiError
import hmac
import hashlib
import uuid
from flask import Flask
from flask import request, g, current_app
from flask import render_template, redirect, url_for
import requests
from requests.exceptions import Timeout as RequestsTimeout
import psycopg2
IS_UWSGI = False
try:
import uwsgi
IS_UWSGI = True
except:
pass
app = application = Flask(__name__)
cfg = app.config
cfg.from_pyfile('../webverify.cfg')
app.logger.setLevel(logging.INFO)
if 'MOUNT_POINT' in cfg and cfg['MOUNT_POINT'] != '/' and cfg['MOUNT_POINT'][0] == '/':
from werkzeug.middleware.dispatcher import DispatcherMiddleware
from werkzeug.wrappers import Response
default_app = Response('Not Found', status=404)
mounts = {cfg['MOUNT_POINT']: app.wsgi_app}
app.wsgi_app = DispatcherMiddleware(default_app, mounts)
class WebverifyException(Exception):
pass
class DbException(WebverifyException):
pass
class DbRetryDelayException(WebverifyException):
pass
class TokenException(WebverifyException):
pass
class CaptchaException(WebverifyException):
pass
from werkzeug.exceptions import InternalServerError, HTTPException
@app.errorhandler(HTTPException)
def unhandled_exception(e):
error_id = uuid.uuid4()
app.logger.error("unhandled exception error_id=%s, path=%s", error_id, request.path)
return render_template("error_exception.html", error_id=error_id), 500
@app.before_first_request
def before_first_request():
current_app.last_failed_db_conn = None
try:
current_app.conn = db_connect()
except DbException:
current_app.conn = DummyConn()
@app.before_request
def before_request():
conn = current_app.conn
if conn.closed != 0:
try:
conn = db_connect()
except DbException as e:
error_id = uuid.uuid4()
app.logger.error("database exception error_id=%s: %s", error_id, str(e))
return render_template("error_db.html", error_id=error_id), 503
current_app.conn = conn
g.conn = conn
class DummyConn:
def __init__(self):
self.closed = 1
def db_connect():
try:
if current_app.last_failed_db_conn:
if time.time() - current_app.last_failed_db_conn <= cfg['PG_RECONN_DELAY']:
raise DbRetryDelayException('retry-delay')
else:
current_app.last_failed_db_conn = None
conn = psycopg2.connect(
host=cfg['PG_HOST'], port=cfg['PG_PORT'], sslmode=cfg['PG_SSLMODE'],
database=cfg['PG_DATABASE'], user=cfg['PG_USER'], password=cfg['PG_PASSWORD'],
connect_timeout=cfg['PG_TIMEOUT'], application_name='webverify',
keepalives=1, keepalives_idle=30, keepalives_interval=10, keepalives_count=5,
target_session_attrs='read-write',
)
except (psycopg2.OperationalError, DbRetryDelayException) as e:
if current_app.last_failed_db_conn == None:
current_app.last_failed_db_conn = time.time()
raise DbException(e)
conn.autocommit = True
return conn
@app.route('/')
def index():
return render_template('index.html')
@app.route('/<token>', methods=['GET', 'POST'])
def verify(token=None):
try:
key = cfg['TOKEN_HMAC_KEY']
if cfg['HASH_TOKEN_HMAC_KEY_BEFORE_USE'] == True:
key = hashlib.sha1(key).digest()
nick = validate_token(token, key, cfg['TOKEN_EXPIRE'], int(time.time()))
except TokenException as e:
error = str(e)
error_id = None
if error == 'token-future':
error_id = uuid.uuid4()
app.logger.fatal("token exception error=%s, error_id=%s, token=%s", error, error_id, token)
return render_template('error_token.html', error=error, error_id=error_id), 400
try:
verified = is_nick_verified(nick)
if verified is None:
return render_template("error_nonick.html", nick=nick), 404
elif verified is True:
return render_template("success_already.html", nick=nick)
else:
if request.method == 'GET':
return get(token, nick)
elif request.method == 'POST':
return post(token, nick)
except DbException as e:
error_id = uuid.uuid4()
app.logger.error("database exception error_id=%s: %s", error_id, str(e))
return render_template("error_db.html", error_id=error_id), 503
def get(token, nick):
return render_template('form.html',
hcaptcha_site_key=cfg['HCAPTCHA_SITE_KEY'],
token=token, nick=nick, auto_submit=cfg['AUTO_SUBMIT'])
def post(token, nick):
if not 'token' in request.form:
return render_template('error_token.html', error='token-missing'), 400
if not 'h-captcha-response' in request.form:
return redirect(url_for('index'), code=303)
if not token == request.form['token']:
return render_template('error_token.html', error='token-mismatch'), 400
try:
if validate_hcaptcha(request.form['h-captcha-response']):
did_set = set_nick_verified(nick)
if did_set == None:
app.logger.info("verify no_nick nick='%s' token='%s'", nick, token)
return render_template("error_nonick.html", nick=nick), 404
elif did_set == True:
app.logger.info("verify success nick='%s' token='%s'", nick, token)
return render_template('success.html', nick=nick)
else:
app.logger.warning("verify fail nick='%s' token='%s'", nick, token)
return render_template('failure.html')
except CaptchaException as e:
error = str(e)
skip_log = [
'invalid-or-already-seen-response', 'missing-input-response',
'invalid-input-response', 'captcha-timeout', 'captcha-bad-response'
]
if error not in skip_log:
error_id = uuid.uuid4()
app.logger.critical("captcha exception error=%s, error_id=%s", error, error_id)
return render_template('error_captcha.html', token=token, error=error, error_id=error_id)
return render_template('error_captcha.html', token=token, error=error)
def validate_token(token, key, expire=3600, now=None):
if not token:
raise TokenException('token-invalid')
if now is None:
now = int(time.time())
try:
[token_nick, token_time, token_hash] = token.split(':')
except ValueError:
raise TokenException('token-invalid')
try:
nick = base64.b16decode(token_nick, casefold=True).decode()
token_hash = base64.b16decode(token_hash, casefold=True)
except BinAsciiError:
raise TokenException('token-invalid')
message = nick + ':' + token_time
hash_computed = hmac.new(key, message.encode(), hashlib.sha1).digest()
if hmac.compare_digest(hash_computed, token_hash):
try:
token_time = int(token_time)
except ValueError:
raise TokenException('token-invalid')
if now < token_time:
raise TokenException('token-future')
elif (now - token_time) > expire:
raise TokenException('token-expired')
else:
return nick
else:
raise TokenException('token-invalid')
def is_nick_verified(nick):
"""
Returns True if verified, False if not, and None if nick doesn't
exist.
If running in uWSGI, it attempts to use a cached value set in
`maybe_set_cache`.
"""
if IS_UWSGI:
key = hashlib.sha1(nick.encode()).digest()
val = uwsgi.cache_get(key, 'verified')
if val == b'1':
return True
elif val == b'0':
return None
is_verified = run_query(r'SELECT webverify_check(%s)', (nick,))
maybe_set_cache(nick, is_verified)
return is_verified
def set_nick_verified(nick):
did_verify = run_query(r'SELECT webverify_verify(%s)', (nick,))
maybe_set_cache(nick, did_verify)
return did_verify
def run_query(query, params):
try:
with g.conn.cursor() as cur:
cur.execute(query, params)
return cur.fetchone()[0]
except psycopg2.OperationalError as e:
raise DbException(e)
def maybe_set_cache(nick, verified):
"""
If running in uWSGI, it sets a key to a value depending on if it verified
successfully or if the nick no longer exists.
key is the sha1 hash of the nick, and the value is 1 for successful
verification and 0 for nonexistent.
"""
if IS_UWSGI:
if verified == True:
key = hashlib.sha1(nick.encode()).digest()
uwsgi.cache_set(key, b'1', cfg['CACHE_EXPIRE'], 'verified')
elif verified == None:
key = hashlib.sha1(nick.encode()).digest()
uwsgi.cache_set(key, b'0', cfg['CACHE_EXPIRE'], 'verified')
def validate_hcaptcha(response):
data = { 'response': response, 'secret': cfg['HCAPTCHA_SECRET_KEY'] }
try:
r = requests.post('https://hcaptcha.com/siteverify', data=data, timeout=cfg.get('REQUESTS_TIMEOUT', (2, 5)))
except RequestsTimeout:
raise CaptchaException('captcha-timeout')
if not (r.status_code == 200 and r.headers['Content-Type'] == 'application/json'):
raise CaptchaException('captcha-bad-response')
try:
resp = r.json()
except ValueError:
raise CaptchaException('captcha-bad-response')
if 'success' in resp and resp['success'] == True:
return True
else:
if not 'error-codes' in resp:
raise CaptchaException('captcha-bad-response')
errors = resp['error-codes']
if len(errors) == 1:
raise CaptchaException(errors[0])
else:
raise CaptchaException('captcha-multiple-errors', errors)
| StarcoderdataPython |
6695829 | <filename>rdr_service/alembic/versions/fdc0fb9ca67a_rename_participant_view.py
"""rename participant_view
Revision ID: fdc0fb9ca67a
Revises: <KEY>
Create Date: 2019-06-06 10:40:31.617393
"""
from alembic import op
# revision identifiers, used by Alembic.
revision = "fdc0fb9ca67a"
down_revision = "<KEY>"
branch_labels = None
depends_on = None
from rdr_service.dao.alembic_utils import ReplaceableObject
PARTICIPANT_VIEW = ReplaceableObject(
"participant_view",
"""
SELECT
p.participant_id,
p.sign_up_time,
p.withdrawal_status,
p.withdrawal_time,
p.suspension_status,
p.suspension_time,
hpo.name hpo,
ps.zip_code,
state_code.value state,
recontact_method_code.value recontact_method,
language_code.value language,
TIMESTAMPDIFF(YEAR, ps.date_of_birth, CURDATE()) age_years,
gender_code.value gender,
sex_code.value sex,
sexual_orientation_code.value sexual_orientation,
education_code.value education,
income_code.value income,
ps.enrollment_status,
ps.race,
ps.physical_measurements_status,
ps.physical_measurements_finalized_time,
ps.physical_measurements_time,
ps.physical_measurements_created_site_id,
ps.physical_measurements_finalized_site_id,
ps.consent_for_study_enrollment,
ps.consent_for_study_enrollment_time,
ps.consent_for_electronic_health_records,
ps.consent_for_electronic_health_records_time,
ps.questionnaire_on_overall_health,
ps.questionnaire_on_overall_health_time,
ps.questionnaire_on_lifestyle,
ps.questionnaire_on_lifestyle_time,
ps.questionnaire_on_the_basics,
ps.questionnaire_on_the_basics_time,
ps.questionnaire_on_healthcare_access,
ps.questionnaire_on_healthcare_access_time,
ps.questionnaire_on_medical_history,
ps.questionnaire_on_medical_history_time,
ps.questionnaire_on_medications,
ps.questionnaire_on_medications_time,
ps.questionnaire_on_family_health,
ps.questionnaire_on_family_health_time,
ps.biospecimen_status,
ps.biospecimen_order_time,
ps.biospecimen_source_site_id,
ps.biospecimen_collected_site_id,
ps.biospecimen_processed_site_id,
ps.biospecimen_finalized_site_id,
ps.sample_order_status_1sst8,
ps.sample_order_status_1sst8_time,
ps.sample_order_status_1pst8,
ps.sample_order_status_1pst8_time,
ps.sample_order_status_1hep4,
ps.sample_order_status_1hep4_time,
ps.sample_order_status_1ed04,
ps.sample_order_status_1ed04_time,
ps.sample_order_status_1ed10,
ps.sample_order_status_1ed10_time,
ps.sample_order_status_2ed10,
ps.sample_order_status_2ed10_time,
ps.sample_order_status_1ur10,
ps.sample_order_status_1ur10_time,
ps.sample_order_status_1sal,
ps.sample_order_status_1sal_time,
ps.sample_order_status_1sal2,
ps.sample_order_status_1sal2_time,
ps.sample_order_status_1cfd9,
ps.sample_order_status_1cfd9_time,
ps.sample_order_status_1pxr2,
ps.sample_order_status_1pxr2_time,
ps.sample_status_1sst8,
ps.sample_status_1sst8_time,
ps.sample_status_1pst8,
ps.sample_status_1pst8_time,
ps.sample_status_1hep4,
ps.sample_status_1hep4_time,
ps.sample_status_1ed04,
ps.sample_status_1ed04_time,
ps.sample_status_1ed10,
ps.sample_status_1ed10_time,
ps.sample_status_2ed10,
ps.sample_status_2ed10_time,
ps.sample_status_1ur10,
ps.sample_status_1ur10_time,
ps.sample_status_1sal,
ps.sample_status_1sal_time,
ps.sample_status_1sal2,
ps.sample_status_1sal2_time,
ps.sample_status_1cfd9,
ps.sample_status_1cfd9_time,
ps.sample_status_1pxr2,
ps.sample_status_1pxr2_time,
ps.num_completed_baseline_ppi_modules,
ps.num_completed_ppi_modules,
ps.num_baseline_samples_arrived,
ps.samples_to_isolate_dna,
ps.consent_for_cabor,
ps.consent_for_cabor_time,
(SELECT IFNULL(GROUP_CONCAT(
IF(ac.value = 'WhatRaceEthnicity_RaceEthnicityNoneOfThese',
'NoneOfThese',
TRIM(LEADING 'WhatRaceEthnicity_' FROM
TRIM(LEADING 'PMI_' FROM ac.value)))),
'None')
FROM questionnaire_response qr, questionnaire_response_answer qra,
questionnaire_question qq, code c, code ac
WHERE qra.end_time IS NULL AND
qr.questionnaire_response_id = qra.questionnaire_response_id AND
qra.question_id = qq.questionnaire_question_id AND
qq.code_id = c.code_id AND c.value = 'Race_WhatRaceEthnicity' AND
qr.participant_id = p.participant_id AND
qra.value_code_id = ac.code_id AND
ac.value != 'WhatRaceEthnicity_Hispanic'
) race_codes,
(SELECT COUNT(ac.value)
FROM questionnaire_response qr, questionnaire_response_answer qra,
questionnaire_question qq, code c, code ac
WHERE qra.end_time IS NULL AND
qr.questionnaire_response_id = qra.questionnaire_response_id AND
qra.question_id = qq.questionnaire_question_id AND
qq.code_id = c.code_id AND c.value = 'Race_WhatRaceEthnicity' AND
qr.participant_id = p.participant_id AND
qra.value_code_id = ac.code_id AND
ac.value = 'WhatRaceEthnicity_Hispanic'
) hispanic
FROM
participant p
LEFT OUTER JOIN hpo ON p.hpo_id = hpo.hpo_id
LEFT OUTER JOIN participant_summary ps ON p.participant_id = ps.participant_id
LEFT OUTER JOIN code state_code ON ps.state_id = state_code.code_id
LEFT OUTER JOIN code recontact_method_code ON ps.recontact_method_id = recontact_method_code.code_id
LEFT OUTER JOIN code language_code ON ps.language_id = language_code.code_id
LEFT OUTER JOIN code gender_code ON ps.gender_identity_id = gender_code.code_id
LEFT OUTER JOIN code sex_code ON ps.sex_id = sex_code.code_id
LEFT OUTER JOIN code sexual_orientation_code ON ps.sexual_orientation_id = sexual_orientation_code.code_id
LEFT OUTER JOIN code education_code ON ps.education_id = education_code.code_id
LEFT OUTER JOIN code income_code ON ps.income_id = income_code.code_id
WHERE (ps.email IS NULL OR ps.email NOT LIKE <EMAIL>') AND
(hpo.name IS NULL OR hpo.name != 'TEST')
AND p.is_ghost_id IS NOT TRUE
""",
)
def upgrade(engine_name):
globals()["upgrade_%s" % engine_name]()
def downgrade(engine_name):
globals()["downgrade_%s" % engine_name]()
def upgrade_rdr():
# ### commands auto generated by Alembic - please adjust! ###
op.replace_view(PARTICIPANT_VIEW, replaces="f512f8ca07c2.PARTICIPANT_VIEW")
# ### end Alembic commands ###
def downgrade_rdr():
# ### commands auto generated by Alembic - please adjust! ###
op.replace_view(PARTICIPANT_VIEW, replace_with="f512f8ca07c2.PARTICIPANT_VIEW")
# ### end Alembic commands ###
def upgrade_metrics():
# ### commands auto generated by Alembic - please adjust! ###
pass
# ### end Alembic commands ###
def downgrade_metrics():
# ### commands auto generated by Alembic - please adjust! ###
pass
# ### end Alembic commands ###
| StarcoderdataPython |
8030492 | #Write a Python program to convert true to 1 and false to 0.
word = "true"
word = int(word=="true")
print(word)
word1 = "false"
word1 = int(word1=="true")
print(word1) | StarcoderdataPython |
8132647 | # ***************************************************************
# Copyright (c) 2020 Jittor. Authors: <NAME> <<EMAIL>>. All Rights Reserved.
# This file is subject to the terms and conditions defined in
# file 'LICENSE.txt', which is part of this source code package.
# ***************************************************************
import unittest
import jittor as jt
import numpy as np
from jittor import compile_extern
class TestFetcher(unittest.TestCase):
def test_fetch(self):
a = jt.array([1,2,3])
a = a*2
v = []
jt.fetch([a], lambda a: v.append(a))
jt.sync_all(True)
assert len(v)==1 and (v[0]==[2,4,6]).all()
@unittest.skipIf(not jt.has_cuda, "Cuda not found")
class TestFetcherCuda(TestFetcher):
@classmethod
def setUpClass(self):
jt.flags.use_cuda = 1
@classmethod
def tearDownClass(self):
jt.flags.use_cuda = 0
if __name__ == "__main__":
unittest.main() | StarcoderdataPython |
12838703 | <reponame>ibnmasud/AI-102-AIEngineer
import os
from dotenv import load_dotenv
from azure.core.exceptions import ResourceNotFoundError
from azure.ai.formrecognizer import FormRecognizerClient
from azure.ai.formrecognizer import FormTrainingClient
from azure.core.credentials import AzureKeyCredential
def main():
try:
# Get configuration settings
load_dotenv()
form_endpoint = os.getenv('FORM_ENDPOINT')
form_key = os.getenv('FORM_KEY')
# Create client using endpoint and key
form_recognizer_client = FormRecognizerClient(form_endpoint, AzureKeyCredential(form_key))
form_training_client = FormTrainingClient(form_endpoint, AzureKeyCredential(form_key))
# Model ID from when you trained your model.
model_id = os.getenv('MODEL_ID')
# Test trained model with a new form
with open('test1.jpg', "rb") as f:
poller = form_recognizer_client.begin_recognize_custom_forms(
model_id=model_id, form=f)
result = poller.result()
for recognized_form in result:
print("Form type: {}".format(recognized_form.form_type))
for name, field in recognized_form.fields.items():
print("Field '{}' has label '{}' with value '{}' and a confidence score of {}".format(
name,
field.label_data.text if field.label_data else name,
field.value,
field.confidence
))
except Exception as ex:
print(ex)
if __name__ == '__main__':
main() | StarcoderdataPython |
174523 | import pickle
import xlsxwriter
import numpy as np
import os
def load(filename):
loaded_dict = pickle.load(open(filename, 'rb'))
return dict
def np_2darray_converter(matrix):
if(type(matrix) == type({})): # making dictionary suitable for excel
keys = list(matrix.keys())
values = list(matrix.values())
values = [str(value) for value in values]
matrix = np.array([keys, values]).transpose()
new_matrix = np.array(matrix, ndmin = 2)
if (new_matrix.dtype == 'O'): # should I add more data types, for example dictionaries?
return -1, -1, -1
rows = new_matrix.shape[0]
cols = new_matrix.shape[1]
return rows, cols, new_matrix
def create_dat(filename, keys, data = None):
if(data == None):
pickle.dump(keys, open(filename, 'wb'))
else:
data_dict = {}
for i, key in enumerate(keys):
data_dict[key] = data[i]
pickle.dump(data_dict, open(filename, 'wb'))
def create_xlsx(filename, keys, data = None, dat = False):
if(dat == True):
fname, fext = os.path.splitext(filename)
create_dat(fname + '.dat', keys, data)
workbook = xlsxwriter.Workbook(filename)
worksheet = workbook.add_worksheet()
title_format = workbook.add_format({ 'font_size':16, 'font_color':'#02172C',
'bold':True, 'bg_color':'#7FF000'}) #A9E063
separator_format = workbook.add_format({'bg_color':'#434446'}) #A72307
if(data == None):
try:
d_keys = list(keys.keys())
d_data = list(keys.values())
except:
print('A dictionary is expected as 2nd positional argument')
return
else:
d_keys = keys
d_data = data
current_column = 3
current_row = 3
for i in range(len(d_data)):
worksheet.write(current_row, current_column, d_keys[i], title_format)
rows, cols, current_data = np_2darray_converter(d_data[i])
if(rows == -1):
continue
if(rows == 1):
worksheet.write_column(current_row + 2, current_column, current_data[0])
current_column += 4
worksheet.set_column(current_column-2, current_column-2, width = 3, cell_format = separator_format)
else:
worksheet.conditional_format(current_row, current_column + 1, current_row, current_column + cols - 1, {'type':'blanks',
'format':title_format})
for j in range(rows):
worksheet.write_row(current_row + 2 + j, current_column, current_data[j])
current_column += cols + 3
worksheet.set_column(current_column-2, current_column-2,width = 3, cell_format = separator_format)
workbook.close()
| StarcoderdataPython |
9604742 | """ Entity Extraction from OCR text Analysis """
import extract as extract
import ocr as ocr
import pandas as pd
import numpy as np
from fuzzywuzzy import fuzz
# Read the csv file to be evaluated
df = pd.read_csv("data/ocrdata.csv")
# Process Text
df["Processed Text"] = df["OCR Text"].str.split()
ocrmodel = ocr.handwrittenOCR()
df["Processed Text"] = df["Processed Text"].apply(ocrmodel.processText)
# Predict the entities from the input OCR text
model = extract.handwrittenText()
df["Predicted Scientific Name"] = (
df["Processed Text"].dropna().apply(model.findScientificName)
)
df["Predicted Geography"] = df["Processed Text"].dropna().apply(model.findGeography)
df["Predicted Collector"] = df["Processed Text"].dropna().apply(model.findCollector)
df["Predicted Year"] = df["Processed Text"].dropna().apply(model.findYear)
# Calculate the number of approximately matched entities
df["Barcode Matches"] = np.where(df["Barcode"] == df["Predicted Barcode"], True, False)
df["Scientific Name Matches"] = df.apply(
lambda x: fuzz.partial_ratio(x["Scientific Name"], x["Predicted Scientific Name"]),
axis=1,
)
df["Geography Matches"] = df.apply(
lambda x: fuzz.partial_ratio(x["Geography"], x["Predicted Geography"]),
axis=1,
)
df["Collector Matches"] = df.apply(
lambda x: fuzz.partial_ratio(x["Collector"], x["Predicted Collector"]),
axis=1,
)
df["Year Matches"] = df.apply(
lambda x: fuzz.partial_ratio(x["Year"], x["Predicted Year"]),
axis=1,
)
# Calculate and Print Accuracy for each entity
barcode_accuracy = 100 * df["Barcode Matches"].values.sum() / df.shape[0]
scientific_name_accuracy = df["Scientific Name Matches"].mean(skipna=True)
geography_accuracy = df["Geography Matches"].mean(skipna=True)
collector_accuracy = df["Collector Matches"].mean(skipna=True)
year_accuracy = df["Year Matches"].mean(skipna=True)
# Print the accuracies
print("******************Accuracy******************")
print("Barcode : {:}".format(barcode_accuracy))
print("Scientific Name : {:}".format(scientific_name_accuracy))
print("Geography : {:}".format(geography_accuracy))
print("Collector : {:}".format(collector_accuracy))
print("Year : {:}".format(year_accuracy))
# Export the predicted results to a csv file
outfile = "data/outputocr.csv"
df.to_csv(outfile, index=True)
| StarcoderdataPython |
286627 | <reponame>davidsoergel/tensorboard
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Demo data for the profile dashboard"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
RUNS = ['foo', 'bar']
TRACE_ONLY = ['foo']
TRACES = {}
TRACES['foo'] = """
devices { key: 2 value {
name: 'Foo2'
device_id: 2
resources { key: 2 value {
resource_id: 2
name: 'R2.2'
} }
} }
devices { key: 1 value {
name: 'Foo1'
device_id: 1
resources { key: 2 value {
resource_id: 1
name: 'R1.2'
} }
} }
trace_events {
device_id: 1
resource_id: 2
name: "E1.2.1"
timestamp_ps: 100
duration_ps: 10
}
trace_events {
device_id: 2
resource_id: 2
name: "E2.2.1"
timestamp_ps: 90
duration_ps: 40
}
"""
TRACES['bar'] = """
devices { key: 2 value {
name: 'Bar2'
device_id: 2
resources { key: 2 value {
resource_id: 2
name: 'R2.2'
} }
} }
devices { key: 1 value {
name: 'Bar1'
device_id: 1
resources { key: 2 value {
resource_id: 1
name: 'R1.2'
} }
} }
trace_events {
device_id: 1
resource_id: 2
name: "E1.2.1"
timestamp_ps: 10
duration_ps: 1000
}
trace_events {
device_id: 2
resource_id: 2
name: "E2.2.1"
timestamp_ps: 105
}
"""
| StarcoderdataPython |
6497116 | <filename>tests/writer/test_xml_writer.py
"""
Test objconfig.writer.Xml
"""
from objconfig.writer import Xml as XmlWriter
from objconfig.reader import Xml as XmlReader
from objconfig.writer import AbstractWriter
from objconfig.writer import WriterInterface
import os
def test_emptyinstantiation_xml():
writer = XmlWriter()
assert isinstance(writer, AbstractWriter), "Xml not child of AbstractWriter"
assert isinstance(writer, WriterInterface), "Xml not child of WriterInterface"
def test_render_xml():
writer = XmlWriter()
conf = {
"webhost" : "www.example.com",
"database" : {
"adapter" : "pdo_mysql",
"params" : {
"host" : "db.example.com",
"username" : "dbuser",
"password" : "<PASSWORD>",
"dbname" : "dbproduction"
}
}
}
xmlcontents = writer.toString(conf)
reader = XmlReader()
compconf = reader.fromString(xmlcontents)
assert conf == compconf, "Xml improperly rendered"
def test_render_tofile_xml():
writer = XmlWriter()
conf = {
"webhost" : "www.example.com",
"database" : {
"adapter" : "pdo_mysql",
"params" : {
"host" : "db.example.com",
"username" : "dbuser",
"password" : "<PASSWORD>",
"dbname" : "dbproduction"
}
}
}
writer.toFile(os.path.join(os.path.dirname(os.path.realpath(__file__)), "test.xml"), conf)
reader = XmlReader()
compconf = reader.fromFile(os.path.join(os.path.dirname(os.path.realpath(__file__)), "test.xml"))
os.remove(os.path.join(os.path.dirname(os.path.realpath(__file__)), "test.xml"))
assert conf == compconf, "Xml improperly rendered in file"
| StarcoderdataPython |
3477148 | #!/usr/bin/env python3
#A shell is a command line interface used to interact with your operating system"
#EX: Zsh Fish Bash(<-for Linux shell)
#environment variables
#The commands:
#Echo: print text in Linux shell terminal $Variable
import os
#Python
#os.environ dictionary to access environment variables in Python
#.get([variable to find], [if var not found]) Method to access dictionary values in environ
print("HOME: " + os.environ.get("HOME", ""))
print("SHELL: " + os.environ.get("SHELL", ""))
print("FRUIT: " + os.environ.get("FRUIT", ""))
| StarcoderdataPython |
11372546 | <filename>spinup/utils/constants.py
class PlottingConstants:
EPISODE_TIME_STEPS = 'episode_time_steps'
EPISODE_REWARD = 'episode_reward'
MINIMAL_DISTANCE_PER_EPISODE = 'minimal_distance_per_episode'
AVERAGE_DISTANCE_PER_EPISODE = 'average_distance_per_episode'
NUMBER_OF_FAILURES = 'number_of_failures'
WHOLE_RUN_STATISTICS = [EPISODE_TIME_STEPS, EPISODE_REWARD,
MINIMAL_DISTANCE_PER_EPISODE,
AVERAGE_DISTANCE_PER_EPISODE,
NUMBER_OF_FAILURES]
EPISODE_TIME_STEPS_SUCCESSFUL = 'episode_time_steps_successful'
EPISODE_REWARD_SUCCESSFUL = 'episode_reward_successful'
MINIMAL_DISTANCE_PER_EPISODE_SUCCESSFUL = \
'minimal_distance_per_episode_successful'
AVERAGE_DISTANCE_PER_EPISODE_SUCCESSFUL = \
'average_distance_per_episode_successful'
AVERAGE_SUCCESSFUL_EPISODE_EXPERIMENT_STATISTICS = [
EPISODE_TIME_STEPS_SUCCESSFUL,
EPISODE_REWARD_SUCCESSFUL,
MINIMAL_DISTANCE_PER_EPISODE_SUCCESSFUL,
AVERAGE_DISTANCE_PER_EPISODE_SUCCESSFUL]
AVERAGE_ACTION_SUCCESSFUL = 'average_action_successful'
AVERAGE_LOCATION_SUCCESSFUL = 'average_location_successful'
AVERAGE_DISTANCE_SUCCESSFUL = 'average_distance_successful'
AVERAGE_LINEAR_VELOCITY_SUCCESSFUL = 'average_linear_velocity_successful'
AVERAGE_ANGLE_SUCCESSFUL = 'average_angle_successful'
AVERAGE_ANGULAR_VELOCITY_SUCCESSFUL = 'average_angular_velocity_successful'
AVERAGE_SUCCESSFUL_EPISODE_STATISTICS = [AVERAGE_ACTION_SUCCESSFUL,
AVERAGE_LOCATION_SUCCESSFUL,
AVERAGE_DISTANCE_SUCCESSFUL,
AVERAGE_LINEAR_VELOCITY_SUCCESSFUL,
AVERAGE_ANGLE_SUCCESSFUL,
AVERAGE_ANGULAR_VELOCITY_SUCCESSFUL]
AVERAGE_ACTION = 'average_action'
AVERAGE_LOCATION = 'average_location'
AVERAGE_DISTANCE = 'average_distance'
AVERAGE_LINEAR_VELOCITY = 'average_linear_velocity'
AVERAGE_ANGLE = 'average_angle'
AVERAGE_ANGULAR_VELOCITY = 'average_angular_velocity'
AVERAGE_HEIGHT = 'average_height'
AVERAGE_EPISODE_STATISTICS = [AVERAGE_ACTION,
AVERAGE_LOCATION,
AVERAGE_DISTANCE,
AVERAGE_LINEAR_VELOCITY,
AVERAGE_ANGLE,
AVERAGE_ANGULAR_VELOCITY]
ACTION = 'action'
LOCATION = 'location'
DISTANCE = 'distance'
LINEAR_VELOCITY = 'linear_velocity'
ANGLE = 'angle'
ANGULAR_VELOCITY = 'angular_velocity'
HEIGHT = 'height'
SINGLE_EPISODE_STATISTICS = [ACTION,
LOCATION,
DISTANCE,
LINEAR_VELOCITY,
ANGLE,
ANGULAR_VELOCITY]
WHOLE_EXPERIMENT_REWARDS = 'whole_experiment_rewards'
WHOLE_EXPERIMENT_TIME_STEPS = 'whole_experiment_time_steps'
WHOLE_EXPERIMENT_STATISTICS = [WHOLE_EXPERIMENT_REWARDS,
WHOLE_EXPERIMENT_TIME_STEPS]
| StarcoderdataPython |
201160 | #////////////////////////////////////////
#////////////////////////////////////////
#////////<NAME> -- @SP1D5R//////////
#////////////////////////////////////////
#////////////////////////////////////////
from qiskit import QuantumProgram
QP = QuantumProgram() #Definine QuantumProgram as pq
QR = QP.create_quantum_register('QR', 2) #Defining a Quantum Register With 2 Qubits
CR = QP.create_classical_register('CR',2) #Defining a Classical Register With 2 bits
QC = QP.create_circuit('QC', [QR], [CR]) #Creating a Quantum Circuit named as QC and passing it through the QuantumRegister (QR) and the ClassicalRegister (CR)
#//////////////////////
#//////////////////////
#Creating components of the objects just creates and store them in variables
#//////////////////////
#//////////////////////
Circuit = QP.get_circuit('QC') #Create a variable named Circuit and its the quantum program object and the method in the object is called get_circuit and the circuit we are getting is teh QuantumCircuit (QC)
Quantum_R = QP.get_quantum_register('QR') # Create a variable named quantum register and thats holding the results of running the get_quantum_register on the QuantumRegister
Classical_R = QP.get_classical_register('CR') # Does the same thing as above but for the classical register
#We can add operators to the circuit i.e. gates, joins, nots and things... the gates can simulate an applied magnetic field to a wave function which can collapse it in a certain direction once measured
Circuit.x(Quantum_R[0]) #on line 11 we defined that there are 2 qubits on the quantum register but its zero origon indexed so we will do it on the first one, and we are doing a measuremnt on the x
Circuit.y(Quantum_R[1]) # now we are doing a measurment on the y.
Circuit.cx(Quantum_R[0], Quantum_R[1]) #we are running a controlled not gate (cx)
# now we take a measurement of that and compare it to the classical one
Circuit.measure(Quantum_R[0], Classical_R[0])
Circuit.measure(Quantum_R[1], Classical_R[1])
QC.measure(QR, CR)
#setting the backend
Backend = 'local_qasm_simulator'
Circuits = ['QC']
# pass that through a variable named results which will run and compile the circuits
Result = QP.execute(Circuits, Backend, wait = 2, timeout = 240)
print(Result) #print that the circuit has been completed
Result.get_counts('QC')
OUT = Result.get_ran_qasm('QC')
print(OUT)
''' The Output
1. COMPLETED
2. OPENQASM 2.0;
3. include "qelib1.inc";
4. qreg QR[2];
5. creg CR[2];
6. u3(3.14159265358979,1.57079632679490,1.57079632679490) QR[1];
7. u3(3.14159265358979,0,3.14159265358979) QR[0];
8. cx QR[0],QR[1];
9. measure QR[0] -> CR[0];
10. measure QR[1] -> CR[1];
'''
# line 1. shows the COMPLETED as the result from line 46
# We got out expectation values from line 49 in line 6. is pi, pi/2, pi/2
# And for the first Quantum bit we got pi, pi/0, pi/1
| StarcoderdataPython |
6537242 | <reponame>EllenRoberts/MA
# -*- coding: utf-8 -*-
"""
Created on Fri Mar 15 09:44:32 2019
@author: efr587
"""
import pandas
import pandas as pd
import numpy as np
#read csv into pandas with tab deliniation------
genre_data = pandas.read_csv('U:/Git/MA_Data_Processing/Milton_texts_classifier.csv', sep=',', encoding='utf-8')
Milton_data = pandas.read_csv('U:/Git/data_files/data_files_milton/Milton_data_all.csv', sep='\t', encoding='utf-8')
new_df = pd.merge(Milton_data, genre_data, how='left')
new_df.to_csv('U:/Git/data_files/data_files_milton/Milton_data_all_with_genres.csv', sep='\t', encoding='utf-8') | StarcoderdataPython |
5089845 | <filename>final/170401024/istemci.py
#E<NAME>AN 170401024
import socket
import os
from time import gmtime,strftime
from datetime import datetime
import time
import datetime
host = input("Sunucu ip adresini girin: ")
#host = '192.168.1.36'
port = 142
msg = "gecikme hesabi"
kmt = 'sudo date --set='
frmt = '%m/%d/%Y %H:%M:%S.%f'
try:
s = socket.socket(socket.AF_INET,socket.SOCK_STREAM)
s.connect((host,port))
print("Sunucu ile bağlanılıyor.")
except s.error:
print("Hata!",s.error)
sock.close()
msj=s.recv(1024)
offset=s.recv(1024)
offset=offset.decode()
data=s.recv(1024)
data=data.decode()
print("data: ",data)
print(str(data)+' UTC'+str(offset))
a = float(data)/1000.0
saat = datetime.datetime.fromtimestamp(a).strftime(frmt)
print("Tarih ve Saat: ",saat)
kmt = kmt + '"' + saat + '"'
print(kmt, 'komut')
os.system(kmt)
s.close()
| StarcoderdataPython |
5058077 | #!/usr/bin/env python
import time, os, sys, argparse, pwd, grp
from scapy.all import *
# Many systems require root access for packet sniffing, but we probably
# don't want to be running any commands as root. Drop to whatever user
# has been requested before we run anything.
def dropPrivileges(username):
if( os.getuid() != 0 or username == None ):
return
uid = pwd.getpwnam(username).pw_uid
gid = pwd.getpwnam(username).pw_gid
# Remove existing groups
os.setgroups([])
# And set the new credentials
# Note: Set GID *first* so we still have root power to set uid after
os.setgid(gid)
os.setuid(uid)
def trigger(username, command, addr):
pid = os.fork()
if( pid == 0 ): # Child
dropPrivileges(username)
# We *should* use exec here, but that requires parsing the command
# to split arguments, and getting the path to the executable
ret = os.system(command)
sys.exit(ret)
def clearOldKnocks(maxAge, clients):
now = int(time.strftime("%s"))
for c in list(clients.keys()):
if( now - clients[c][0] > maxAge ):
del clients[c]
def addKnock(maxAge, sequence, addr, port, clients):
now = int(time.strftime("%s"))
clearOldKnocks(maxAge, clients)
if( addr in clients ):
numEntered = len(clients[addr])
# Make sure this is the next knock in the sequence
if( port == sequence[numEntered] ):
clients[addr] += [now]
else:
del clients[addr]
return False
# First knock
else:
if( port == sequence[0] ):
clients[addr] = [now]
if( len(clients[addr]) == len(sequence) ):
return True
return False
def process(maxAge, sequence, command, username, clients):
def process_packet(packet):
src = packet[1].src
port = packet[2].dport
if( port in sequence ):
triggered = addKnock(maxAge, sequence, src, port, clients)
if( triggered ):
trigger(username, command, src)
# Sequence broken
elif( src in clients ):
del clients[src]
return process_packet
class Parser(argparse.ArgumentParser):
def error(self, message):
sys.stderr.write("error: %s\n" % message)
self.print_help()
sys.exit(2)
def parseOptions():
descr = "A trivial port knocker."
parser = Parser(description=descr)
parser.add_argument("-t", "--timeout",
action="store", type=int, dest="maxAge", default=10,
metavar="TIMEOUT", help="Maximum timeout for knocking sequence")
parser.add_argument("-u", "--user",
action="store", type=str, dest="user", default=None,
metavar="USER", help="User to run commands as if script running as root")
parser.add_argument("-s", "--sequence",
action="store", nargs="+", type=int, required=True, dest="sequence",
metavar="SEQUENCE", help="Port knock sequence to listen for")
parser.add_argument("command", metavar="<command>", nargs=1,
help="Command to run on successful port knock")
options = parser.parse_args()
sequence = options.sequence
command = options.command[0]
username = options.user
maxAge = options.maxAge
return [maxAge, sequence, command, username]
if __name__ == "__main__":
[maxAge, sequence, command, username] = parseOptions()
sniff(filter="tcp", prn=process(maxAge, sequence, command, username, dict()))
| StarcoderdataPython |
247374 | <reponame>mammothb/syndata-generation
import argparse
import random
import signal
from collections import namedtuple
from functools import partial
from multiprocessing import Pool
from pathlib import Path
import yaml
from PIL import Image
from defaults import CONFIG_FILE
from util_bbox import overlap
from util_image import (
add_localized_distractor,
blend_object,
get_annotation_from_mask,
get_annotation_from_mask_file,
invert_mask,
linear_motion_blur_3c,
perspective_transform,
pil_to_array_3c,
scale_object,
rotate_object,
)
from util_io import (
get_labels,
get_occlusion_coords,
get_list_of_images,
get_mask_file,
print_paths,
write_imageset_file,
write_labels_file,
)
Rectangle = namedtuple("Rectangle", "xmin ymin xmax ymax")
CWD = Path(__file__).resolve().parent
# SEED = 123
# random.seed(SEED)
def keep_selected_labels(img_files, labels, occ_coords, conf):
"""Filters image files and labels to only retain those that are selected. Useful when one doesn't
want all objects to be used for synthesis
Args:
img_files(list): List of images in the root directory
labels(list): List of labels corresponding to each image
conf(dict): Config options
Returns:
new_image_files(list): Selected list of images
new_labels(list): Selected list of labels corresponding to each image in above list
"""
new_img_files = []
new_labels = []
new_occ_coords = []
for i, img_file in enumerate(img_files):
if labels[i] in conf["selected"]:
new_img_files.append(img_file)
new_labels.append(labels[i])
new_occ_coords.append(occ_coords[i])
return new_img_files, new_labels, new_occ_coords
def create_image_anno_wrapper(
args, conf, opt, blending_list=["none"],
):
"""Wrapper used to pass params to workers"""
return create_image_anno(*args, conf, opt, blending_list=blending_list)
def constrained_randint(frac, fg_dim, bg_dim):
"""Return a random int constrained by the allowed trunction fraction and
bg/fg sizes
Args:
frac(float): Max allowed truncation fraction
fg_dim(int): Foreground dimension (height or width)
bg_dim(int): Background dimension (height or width)
"""
return random.randint(int(-frac * fg_dim), int(bg_dim - fg_dim + frac * fg_dim))
def constrained_rand_num(obj_type, max_num, conf):
return min(
random.randint(conf[f"min_{obj_type}_num"], conf[f"max_{obj_type}_num"]),
max_num,
)
def create_image_anno(
objects,
distractor_objects,
localized_distractors,
img_file,
anno_file,
bg_file,
conf,
opt,
blending_list=["none"],
):
"""Add data augmentation, synthesizes images and generates annotations according to given parameters
Args:
objects(list): List of objects whose annotations are also important
distractor_objects(list): List of distractor objects that will be synthesized but whose annotations are not required
img_file(str): Image file name
anno_file(str): Annotation file name
bg_file(str): Background image path
conf(dict): Config options
opt(Namespace): Contains options to:
1. Add scale data augmentation
2. Add rotation data augmentation
3. Generate images with occlusion
4. Add distractor objects whose annotations are not required
blending_list(list): List of blending modes to synthesize for each image
"""
if "none" not in img_file.name:
return
print(f"Working on {img_file}")
if anno_file.exists():
return anno_file
all_objects = objects + distractor_objects
assert len(all_objects) > 0
w = conf["width"]
h = conf["height"]
while True:
boxes = []
background = Image.open(bg_file)
background = background.resize((w, h), Image.ANTIALIAS)
backgrounds = []
for _ in blending_list:
backgrounds.append(background.copy())
if not opt.occlude:
already_syn = []
for idx, obj in enumerate(all_objects):
foreground = Image.open(obj[0])
mask_file = get_mask_file(obj[0])
xmin, xmax, ymin, ymax = get_annotation_from_mask_file(
mask_file, conf["inverted_mask"]
)
mask = Image.open(mask_file)
if conf["inverted_mask"]:
mask = invert_mask(mask)
mask_bb = mask.copy()
if (
xmin == -1
or ymin == -1
or xmax - xmin < conf["min_width"]
or ymax - ymin < conf["min_height"]
):
continue
# foreground_crop = foreground.crop((xmin, ymin, xmax, ymax))
# orig_w, orig_h = foreground_crop.size
orig_w, orig_h = xmax - xmin, ymax - ymin
if idx < len(objects) and localized_distractors[idx]:
for distractor in localized_distractors[idx]:
foreground, mask, mask_bb = add_localized_distractor(
distractor,
foreground.size,
(orig_w, orig_h),
conf,
opt,
foreground,
mask,
mask_bb,
)
xmin, xmax, ymin, ymax = get_annotation_from_mask(mask)
foreground = foreground.crop((xmin, ymin, xmax, ymax))
mask = mask.crop((xmin, ymin, xmax, ymax))
mask_bb = mask_bb.crop((xmin, ymin, xmax, ymax))
orig_w, orig_h = foreground.size
rel_scale = 1
if orig_w > orig_h and orig_w > w * 0.75:
rel_scale = w * 0.75 / orig_w
elif orig_h > orig_w and orig_h > h * 0.75:
rel_scale = h * 0.75 / orig_h
orig_w, orig_h = int(orig_w * rel_scale), int(orig_h * rel_scale)
foreground = foreground.resize((orig_w, orig_h), Image.ANTIALIAS)
mask = mask.resize((orig_w, orig_h), Image.ANTIALIAS)
mask_bb = mask_bb.resize((orig_w, orig_h), Image.ANTIALIAS)
o_w, o_h = orig_w, orig_h
if opt.scale:
foreground, mask, mask_bb = scale_object(
foreground, mask, mask_bb, h, w, orig_h, orig_w, conf
)
if opt.rotate:
foreground, mask, mask_bb = rotate_object(
foreground, mask, mask_bb, h, w, conf
)
o_w, o_h = foreground.size
if opt.perspective:
foreground, mask, mask_bb = perspective_transform(
foreground, mask, mask_bb, o_h, o_w, conf
)
o_w, o_h = foreground.size
xmin, xmax, ymin, ymax = get_annotation_from_mask(mask_bb)
attempt = 0
while True:
attempt += 1
x = constrained_randint(conf["max_truncation_frac"], o_w, w)
y = constrained_randint(conf["max_truncation_frac"], o_h, h)
if opt.occlude:
break
found = True
for prev in already_syn:
ra = Rectangle(prev[0], prev[2], prev[1], prev[3])
rb = Rectangle(x + xmin, y + ymin, x + xmax, y + ymax)
if overlap(ra, rb, conf["max_allowed_iou"]):
found = False
break
if found:
break
if attempt == conf["max_attempts"]:
break
if not opt.occlude:
already_syn.append([x + xmin, x + xmax, y + ymin, y + ymax])
for i, blending in enumerate(blending_list):
backgrounds[i] = blend_object(
blending, backgrounds[i], foreground, mask, x, y
)
if idx >= len(objects):
continue
x_min = max(1, x + xmin)
x_max = min(w, x + xmax)
y_min = max(1, y + ymin)
y_max = min(h, y + ymax)
boxes.append(
f"{str(obj[1])} "
f"{(x_min + x_max) / 2 / w} "
f"{(y_min + y_max) / 2 / h} "
f"{(x_max - x_min) / w} "
f"{(y_max - y_min) / h}"
)
if attempt == conf["max_attempts"]:
continue
else:
break
for i, blending in enumerate(blending_list):
if blending == "motion":
backgrounds[i] = linear_motion_blur_3c(pil_to_array_3c(backgrounds[i]))
backgrounds[i].save(str(img_file).replace("none", blending))
with open(anno_file, "w") as f:
f.write("\n".join(boxes))
def gen_syn_data(
img_files, labels, occ_coords, img_dir, anno_dir, conf, opt,
):
"""Creates list of objects and distrctor objects to be pasted on what images.
Spawns worker processes and generates images according to given params
Args:
img_files(list): List of image files
labels(list): List of labels for each image
occ_coords(list): List of coordinates for localized distractor
img_dir(str): Directory where synthesized images will be stored
anno_dir(str): Directory where corresponding annotations will be stored
conf(dict): Config options
opt(Namespace): Contains options to:
1. Add scale data augmentation
2. Add rotation data augmentation
3. Generate images with occlusion
4. Add distractor objects whose annotations are not required
"""
background_files = list(
(CWD / conf["background_dir"]).glob(conf["background_glob"])
)
print(f"Number of background images: {len(background_files)}")
img_labels = list(zip(img_files, labels, occ_coords))
random.shuffle(img_labels)
if opt.distract:
distractor_list = []
for label in conf["distractor"]:
distractor_list += list(
(CWD / conf["distractor_dir"] / label).glob(conf["distractor_glob"])
)
distractor_files = list(zip(distractor_list, len(distractor_list) * [None]))
random.shuffle(distractor_files)
else:
distractor_files = []
print_paths("List of distractor files collected:", distractor_files)
idx = 0
img_files = []
anno_files = []
params_list = []
while len(img_labels) > 0:
# Get list of objects
objects = []
n = constrained_rand_num("object", len(img_labels), conf)
for _ in range(n):
objects.append(img_labels.pop())
# Get list of distractor objects
distractor_objects = []
if opt.distract:
n = constrained_rand_num("distractor", len(distractor_files), conf)
for _ in range(n):
distractor_objects.append(random.choice(distractor_files))
print_paths("Chosen distractor objects:", distractor_objects)
localized_distractors = []
if opt.localized_distractor:
for obj in objects:
localized_distractors.append(
[
(random.choice(distractor_files), coord)
for coord in obj[2]
if random.random() < 1.0
]
)
idx += 1
bg_file = random.choice(background_files)
for blur in conf["blending"]:
img_file = img_dir / f"{idx}_{blur}.jpg"
anno_file = anno_dir / f"{idx}.txt"
params = (
objects,
distractor_objects,
localized_distractors,
img_file,
anno_file,
bg_file,
)
params_list.append(params)
img_files.append(img_file)
anno_files.append(anno_file)
partial_func = partial(
create_image_anno_wrapper, conf=conf, opt=opt, blending_list=conf["blending"],
)
p = Pool(conf["num_workers"], init_worker)
try:
p.map(partial_func, params_list)
except KeyboardInterrupt:
print("....\nCaught KeyboardInterrupt, terminating workers")
p.terminate()
else:
p.close()
p.join()
# for params in params_list:
# create_image_anno_wrapper(
# params, conf=conf, opt=opt, blending_list=conf["blending"],
# )
return img_files, anno_files
def init_worker():
"""
Catch Ctrl+C signal to termiante workers
"""
signal.signal(signal.SIGINT, signal.SIG_IGN)
def generate_synthetic_dataset(opt):
"""Generate synthetic dataset according to given args"""
img_files = get_list_of_images(CWD / opt.root, opt.num)
labels = get_labels(img_files)
occ_coords = get_occlusion_coords(img_files)
with open(CONFIG_FILE, "r") as infile:
conf = yaml.safe_load(infile)
if opt.selected:
img_files, labels, occ_coords = keep_selected_labels(
img_files, labels, occ_coords, conf
)
exp_dir = CWD / opt.exp
exp_dir.mkdir(parents=True, exist_ok=True)
write_labels_file(exp_dir, labels)
anno_dir = exp_dir / "annotations"
img_dir = exp_dir / "images"
anno_dir.mkdir(parents=True, exist_ok=True)
img_dir.mkdir(parents=True, exist_ok=True)
syn_img_files, anno_files = gen_syn_data(
img_files, labels, occ_coords, img_dir, anno_dir, conf, opt
)
write_imageset_file(exp_dir, syn_img_files, anno_files)
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="Create dataset with augmentations")
parser.add_argument("root", help="Root directory containing images and annotations")
parser.add_argument("exp", help="Output directory for images and annotations")
parser.add_argument(
"--selected",
help="Keep only selected instances in the test dataset. Default False",
action="store_true",
)
parser.add_argument(
"--distract", help="Add distractors objects. Default False", action="store_true"
)
parser.add_argument(
"--occlude",
help="Allow objects with full occlusion. Default False",
action="store_true",
)
parser.add_argument(
"--perspective",
help="Add perspective transform. Default True",
action="store_false",
)
parser.add_argument(
"--rotate", help="Add rotation augmentation. Default True", action="store_false"
)
parser.add_argument(
"--scale", help="Add scale augmentation. Default True", action="store_false"
)
parser.add_argument(
"--num",
help="Number of times each image will be in dataset",
default=1,
type=int,
)
parser.add_argument(
"--localized_distractor",
help="Add occluding distractors to specified spots. Default False",
action="store_true",
)
opt = parser.parse_args()
generate_synthetic_dataset(opt)
| StarcoderdataPython |
196575 | import re
from datetime import datetime
from moto.core import get_account_id, BaseBackend
from moto.core.utils import iso_8601_datetime_without_milliseconds, BackendDict
from .exceptions import (
InvalidInputException,
ResourceAlreadyExistsException,
ResourceNotFoundException,
ValidationException,
)
class DatasetGroup:
accepted_dataset_group_name_format = re.compile(r"^[a-zA-Z][a-z-A-Z0-9_]*")
accepted_dataset_group_arn_format = re.compile(r"^[a-zA-Z0-9\-\_\.\/\:]+$")
accepted_dataset_types = [
"INVENTORY_PLANNING",
"METRICS",
"RETAIL",
"EC2_CAPACITY",
"CUSTOM",
"WEB_TRAFFIC",
"WORK_FORCE",
]
def __init__(
self, region_name, dataset_arns, dataset_group_name, domain, tags=None
):
self.creation_date = iso_8601_datetime_without_milliseconds(datetime.now())
self.modified_date = self.creation_date
self.arn = (
"arn:aws:forecast:"
+ region_name
+ ":"
+ str(get_account_id())
+ ":dataset-group/"
+ dataset_group_name
)
self.dataset_arns = dataset_arns if dataset_arns else []
self.dataset_group_name = dataset_group_name
self.domain = domain
self.tags = tags
self._validate()
def update(self, dataset_arns):
self.dataset_arns = dataset_arns
self.last_modified_date = iso_8601_datetime_without_milliseconds(datetime.now())
def _validate(self):
errors = []
errors.extend(self._validate_dataset_group_name())
errors.extend(self._validate_dataset_group_name_len())
errors.extend(self._validate_dataset_group_domain())
if errors:
err_count = len(errors)
message = str(err_count) + " validation error"
message += "s" if err_count > 1 else ""
message += " detected: "
message += "; ".join(errors)
raise ValidationException(message)
def _validate_dataset_group_name(self):
errors = []
if not re.match(
self.accepted_dataset_group_name_format, self.dataset_group_name
):
errors.append(
"Value '"
+ self.dataset_group_name
+ "' at 'datasetGroupName' failed to satisfy constraint: Member must satisfy regular expression pattern "
+ self.accepted_dataset_group_name_format.pattern
)
return errors
def _validate_dataset_group_name_len(self):
errors = []
if len(self.dataset_group_name) >= 64:
errors.append(
"Value '"
+ self.dataset_group_name
+ "' at 'datasetGroupName' failed to satisfy constraint: Member must have length less than or equal to 63"
)
return errors
def _validate_dataset_group_domain(self):
errors = []
if self.domain not in self.accepted_dataset_types:
errors.append(
"Value '"
+ self.domain
+ "' at 'domain' failed to satisfy constraint: Member must satisfy enum value set "
+ str(self.accepted_dataset_types)
)
return errors
class ForecastBackend(BaseBackend):
def __init__(self, region_name):
super().__init__()
self.dataset_groups = {}
self.datasets = {}
self.region_name = region_name
def create_dataset_group(self, dataset_group_name, domain, dataset_arns, tags):
dataset_group = DatasetGroup(
region_name=self.region_name,
dataset_group_name=dataset_group_name,
domain=domain,
dataset_arns=dataset_arns,
tags=tags,
)
if dataset_arns:
for dataset_arn in dataset_arns:
if dataset_arn not in self.datasets:
raise InvalidInputException(
"Dataset arns: [" + dataset_arn + "] are not found"
)
if self.dataset_groups.get(dataset_group.arn):
raise ResourceAlreadyExistsException(
"A dataset group already exists with the arn: " + dataset_group.arn
)
self.dataset_groups[dataset_group.arn] = dataset_group
return dataset_group
def describe_dataset_group(self, dataset_group_arn):
try:
dataset_group = self.dataset_groups[dataset_group_arn]
except KeyError:
raise ResourceNotFoundException("No resource found " + dataset_group_arn)
return dataset_group
def delete_dataset_group(self, dataset_group_arn):
try:
del self.dataset_groups[dataset_group_arn]
except KeyError:
raise ResourceNotFoundException("No resource found " + dataset_group_arn)
def update_dataset_group(self, dataset_group_arn, dataset_arns):
try:
dsg = self.dataset_groups[dataset_group_arn]
except KeyError:
raise ResourceNotFoundException("No resource found " + dataset_group_arn)
for dataset_arn in dataset_arns:
if dataset_arn not in dsg.dataset_arns:
raise InvalidInputException(
"Dataset arns: [" + dataset_arn + "] are not found"
)
dsg.update(dataset_arns)
def list_dataset_groups(self):
return [v for (_, v) in self.dataset_groups.items()]
def reset(self):
region_name = self.region_name
self.__dict__ = {}
self.__init__(region_name)
forecast_backends = BackendDict(ForecastBackend, "forecast")
| StarcoderdataPython |
9736570 | <gh_stars>0
# !/usr/bin/python
# -*- coding: UTF-8 -*-
"""
Created on OCT 29, 2020
@author: <EMAIL>
"""
import time
import cProfile
import pstats
import os
def do_cprofile(filename):
"""
Decorator for function profiling.
"""
def wrapper(func):
def profiled_func(*args, **kwargs):
# Flag for do profiling or not.
DO_PROF = os.getenv("PROFILING")
if DO_PROF:
profile = cProfile.Profile()
profile.enable()
result = func(*args, **kwargs)
profile.disable()
# Sort stat by internal time.
sortby = "tottime"
ps = pstats.Stats(profile).sort_stats(sortby)
ps.dump_stats(filename)
else:
result = func(*args, **kwargs)
return result
return profiled_func
return wrapper
def print_run_time(func):
""" 计算时间的装饰器
"""
def wrapper(*args, **kw):
local_time = time.time()
res = func(*args, **kw)
print("Current function : {function}, time used : {temps}".format(
function=func.__name__, temps=time.time() - local_time))
return res
return wrapper
| StarcoderdataPython |
3223793 | import json
import prox
from flask import Flask
app = Flask(__name__)
@app.route("/check/<ip>/<int:port>")
def check(ip, port):
return json.dumps(prox.check_proxy(ip, port))
if __name__ == '__main__':
app.run(host="0.0.0.0", debug=True)
| StarcoderdataPython |
9791147 | #!flask/bin/python
from app import app
app.run(debug=True)
| StarcoderdataPython |
8056624 | <filename>tests/test_wrapped_vc_validators.py
# -*- coding: utf-8 -*-
import datetime
import uuid
from immutable_data_validation import validate_datetime
from immutable_data_validation import validate_float
from immutable_data_validation import validate_int
from immutable_data_validation import validate_str
from immutable_data_validation import validate_uuid
from immutable_data_validation.errors import ValidationCollectionCannotCoerceError
from immutable_data_validation.errors import ValidationCollectionEmptyValueError
from immutable_data_validation.errors import ValidationCollectionMaximumLengthError
from immutable_data_validation.errors import ValidationCollectionMaximumValueError
from immutable_data_validation.errors import ValidationCollectionMinimumLengthError
from immutable_data_validation.errors import ValidationCollectionMinimumValueError
from immutable_data_validation.errors import ValidationCollectionNotAnIntegerError
import pytest
GENERIC_UUID = uuid.uuid4()
GENERIC_DATETIME = datetime.datetime.now()
EMPTY_VALUE_ERROR_EXPECTED_TEXT = "was empty"
@pytest.mark.parametrize(
"func,value,kwargs,expected_error,expected_text_in_error,test_description",
[
(
validate_str,
None,
{"extra_error_msg": "errmsg26"},
ValidationCollectionEmptyValueError,
(EMPTY_VALUE_ERROR_EXPECTED_TEXT, "errmsg26"),
"null value",
),
(
validate_str,
None,
{"allow_null": True},
None,
None,
"allows None when specified",
),
(
validate_str,
293,
{"extra_error_msg": "errmsg25"},
ValidationCollectionCannotCoerceError,
("was not coerced", "errmsg25"),
"not a string",
),
(
validate_str,
"a",
{"minimum_length": 2, "extra_error_msg": "errmsg24"},
ValidationCollectionMinimumLengthError,
("below the minimum length", "errmsg24"),
"too short",
),
(
validate_str,
"eli",
{"maximum_length": 1, "extra_error_msg": "errmsg23"},
ValidationCollectionMaximumLengthError,
("exceeds maximum length", "errmsg23"),
"too long",
),
(
validate_uuid,
None,
{"extra_error_msg": "error message 7"},
ValidationCollectionEmptyValueError,
(EMPTY_VALUE_ERROR_EXPECTED_TEXT, "error message 7"),
"null value",
),
(
validate_uuid,
"notrealuuid",
{"extra_error_msg": "what error"},
ValidationCollectionCannotCoerceError,
("coerced to a valid UUID", "what error"),
"not a uuid",
),
(
validate_uuid,
None,
{"allow_null": True},
None,
None,
"allows None when specified",
),
(
validate_int,
None,
{"extra_error_msg": "bobs error"},
ValidationCollectionEmptyValueError,
(EMPTY_VALUE_ERROR_EXPECTED_TEXT, "bobs error"),
"null value",
),
(
validate_int,
None,
{"allow_null": True},
None,
None,
"allows None when specified",
),
(
validate_int,
293.9,
{"extra_error_msg": "error things"},
ValidationCollectionNotAnIntegerError,
("not an integer-type", "error things"),
"not an int",
),
(
validate_int,
-1,
{"minimum": 0, "extra_error_msg": "error stuff"},
ValidationCollectionMinimumValueError,
("less than minimum", "error stuff"),
"too low",
),
(
validate_int,
300,
{"maximum": 200, "extra_error_msg": "error info"},
ValidationCollectionMaximumValueError,
("exceeds maximum (", "error info"),
"too high",
),
(
validate_int,
"bob",
{"extra_error_msg": "error9928"},
ValidationCollectionCannotCoerceError,
("coerced to a numeric", "error9928"),
"a string",
),
(
validate_datetime,
None,
{"extra_error_msg": "special error info"},
ValidationCollectionEmptyValueError,
(EMPTY_VALUE_ERROR_EXPECTED_TEXT, "special error info"),
"null value",
),
(
validate_datetime,
None,
{"allow_null": True},
None,
None,
"allows None when specified",
),
(
validate_datetime,
"two thousand nineteen",
{"extra_error_msg": "new message"},
ValidationCollectionCannotCoerceError,
("datetime object", "new message"),
"not a datetime",
),
(
validate_datetime,
GENERIC_DATETIME,
{
"minimum": datetime.date(year=5000, month=2, day=5),
"extra_error_msg": "mymessage",
},
ValidationCollectionMinimumValueError,
("is before the minimum", "mymessage"),
"too soon",
),
(
validate_datetime,
GENERIC_DATETIME,
{
"maximum": datetime.date(year=1990, month=2, day=5),
"extra_error_msg": "dates and times",
},
ValidationCollectionMaximumValueError,
("after the maximum", "dates and times"),
"too late",
),
(
validate_float,
None,
{"extra_error_msg": "what a float"},
ValidationCollectionEmptyValueError,
[EMPTY_VALUE_ERROR_EXPECTED_TEXT, "what a float"],
"null value",
),
(
validate_float,
None,
{"allow_null": True},
None,
None,
"allows None when specified",
),
(
validate_float,
"pi",
{"extra_error_msg": "cool float"},
ValidationCollectionCannotCoerceError,
["coerced to a numeric form", "cool float"],
"not a float",
),
(
validate_float,
-1,
{"minimum": 0},
ValidationCollectionMinimumValueError,
"less than minimum",
"too low",
),
(
validate_float,
300,
{"maximum": 200},
ValidationCollectionMaximumValueError,
"exceeds maximum (",
"too high",
),
(
validate_float,
300,
{
"maximum": 200,
"minimum": 23,
"extra_error_msg": "my_favorite_float is cool",
},
ValidationCollectionMaximumValueError,
" my_favorite_float is cool",
"extra info in error message",
),
(
validate_float,
5,
{
"maximum": 200,
"minimum": 23,
"extra_error_msg": "my_favorite_float is cool",
},
ValidationCollectionMinimumValueError,
" my_favorite_float is cool",
"extra info in error message",
),
],
)
def test_wrapped_error(
func, value, kwargs, expected_error, expected_text_in_error, test_description
):
if kwargs is None:
kwargs = dict()
def run_func():
return func(value, **kwargs)
if expected_error is not None:
with pytest.raises(expected_error) as e:
run_func()
if expected_text_in_error is not None:
if not isinstance(expected_text_in_error, (list, tuple)):
expected_text_in_error = [expected_text_in_error]
for this_expected_text_in_error in expected_text_in_error:
assert this_expected_text_in_error in str(e)
else:
run_func()
@pytest.mark.parametrize(
"func,value,kwargs,expected,test_description",
[
(validate_str, "bob", None, "bob", "standard"),
(validate_uuid, GENERIC_UUID, None, GENERIC_UUID, "standard"),
(validate_int, 173, None, 173, "standard"),
(validate_datetime, GENERIC_DATETIME, None, GENERIC_DATETIME, "standard"),
(validate_float, 3.1415, None, 3.1415, "standard"),
],
)
def test_returns_valid_value(func, value, kwargs, expected, test_description):
if kwargs is None:
kwargs = dict()
actual = func(value, **kwargs)
assert actual == expected
| StarcoderdataPython |
4811811 | #########################################
nrb = geo[0]
from caid.cad_geometry import cad_nurbs
C = np.zeros_like(nrb.points)
_C = np.genfromtxt("u.txt")
shape = list(nrb.shape)
C = np.zeros(shape+[3])
C[...,0] = _C
srf = cad_nurbs(nrb.knots, C, weights= nrb.weights)
#print srf.points
ntx = 80
nty = 80
#nty = 40
#ntx = 60
#nty = 60
tx = np.linspace(0., 1., ntx)
ty = np.linspace(0., 1., nty)
#tx = np.unique(nrb.knots[0])
#ty = np.unique(nrb.knots[1])
## ...
#P = nrb.evaluate_deriv(tx,ty,nderiv=1)
#x = P[0,:,:,0]
#xdu = P[1,:,:,0]
#xdv = P[2,:,:,0]
#
#y = P[0,:,:,1]
#ydu = P[1,:,:,1]
#ydv = P[2,:,:,1]
#
#jac = xdu * ydv - xdv * ydu
## ...
#
## ...
#D = srf.evaluate_deriv(tx,ty,nderiv=1)
#Udu = D[1,...,0]
#Udv = D[2,...,0]
#
#Udx = ydv * Udu - ydu * Udv
#Udx /= jac
#Udy = - xdv * Udu + xdu * Udv
#Udy /= jac
## ...
# ...
P = nrb.evaluate_deriv(tx,ty,nderiv=2)
x = P[0,:,:,0]
xdu = P[1,:,:,0]
xdv = P[2,:,:,0]
xduu = P[3,:,:,0]
xduv = P[4,:,:,0]
xdvv = P[5,:,:,0]
y = P[0,:,:,1]
ydu = P[1,:,:,1]
ydv = P[2,:,:,1]
yduu = P[3,:,:,1]
yduv = P[4,:,:,1]
ydvv = P[5,:,:,1]
jac = xdu * ydv - xdv * ydu
# ...
# ...
D = srf.evaluate_deriv(tx,ty,nderiv=2)
Udu = D[1,...,0]
Udv = D[2,...,0]
Uduu = D[3,...,0]
Uduv = D[4,...,0]
Udvv = D[5,...,0]
Udx = ydv * Udu - ydu * Udv
Udx /= jac
Udy = - xdv * Udu + xdu * Udv
Udy /= jac
C1 = Uduu - xduu * Udx - yduu * Udy
C2 = Uduv - xduv * Udx - yduv * Udy
C3 = Udvv - xdvv * Udx - ydvv * Udy
Udxx = C1 * ydv**2 - 2 * C2 * ydu * ydv + C3 * ydu**2
Udxy = - C1 * xdv * ydv + C2 *(xdu * ydv + xdv * ydu) - C3 * xdu * ydu
Udyy = C1 * xdv**2 - 2 * C2 * xdu * xdv + C3 * xdu**2
# ...
# ...
#P = srf(u=tx,v=ty)
#x = P[:,:,0]
#y = P[:,:,1]
fig = plt.figure()
Udx[:,0] = 0.
Udx[:,-1] = 1.
Udy[0,:] = 0.
Udy[-1,:] = 1.
for i,v in enumerate(ty):
# phidx = Udu[:,i]
# phidy = Udv[:,i]
phidx = Udx[:,i]
phidy = Udy[:,i]
plt.plot(phidx, phidy, '-b')
for i,u in enumerate(tx):
# phidx = Udu[i,:]
# phidy = Udv[i,:]
phidx = Udx[i,:]
phidy = Udy[i,:]
plt.plot(phidx, phidy, '-b')
plt.show()
#########################################
| StarcoderdataPython |
11223691 | <filename>solutions/0187.Repeated_DNA_Sequences/python_solution.py
class Solution:
def findRepeatedDnaSequences(self, s):
Count = Counter(s[i-10:i] for i in range(10, len(s) + 1))
return [key for key in Count if Count[key] > 1] | StarcoderdataPython |
11316982 | import mbuild as mb
import numpy as np
class UCer2(mb.Compound):
def __init__(self):
"""Returns a CER NS C24 with the head-to-tail vector pointing in +z.
"""
super(UCer2, self).__init__(name='ucer2')
mb.load('ucer2.pdb', compound=self, relative_to_module=self.__module__)
mb.coordinate_transform.z_axis_transform(self,
new_origin=self[2], point_on_z_axis=self[58])
| StarcoderdataPython |
11311564 | <filename>research/minimax/domino_ux.py
from minimax import Minimax
from minimax_domino import Game
from random import Random
def generate_game(seed=None):
rnd = Random(seed)
tokens = [(j, i) for i in range(7) for j in range(i + 1)]
assert len(tokens) == 28
tokens = rnd.sample(tokens, 28)
assigned_tokens = []
while len(tokens) > 0:
assigned_tokens.append(tokens[:7])
tokens = tokens[7:]
return Game(assigned_tokens)
def main():
team = [0]
game = generate_game(seed)
cur_state = game.first_state()
mm = Minimax(game)
for tokens in game.pieces:
print(tokens)
first_value = mm.find(cur_state)
while not game.is_over(cur_state):
print(cur_state)
value = mm.find(cur_state)
print(value)
moves = mm.get_moves(cur_state)
move = moves[0]
print(move)
cur_state = game.apply(cur_state, move)
assert first_value == value
if __name__ == '__main__':
main() | StarcoderdataPython |
9631464 | <filename>tests/utils.py
# -*- coding: utf-8 -*-
from django.core.urlresolvers import get_script_prefix, set_script_prefix
class script_prefix(object):
def __init__(self, newpath):
self.newpath = newpath
self.oldprefix = get_script_prefix()
def __enter__(self):
set_script_prefix(self.newpath)
def __exit__(self, type, value, traceback):
set_script_prefix(self.oldprefix)
| StarcoderdataPython |
3584655 | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import tensorflow as tf
from tensorflow.keras import layers
from tensorflow.keras.applications.resnet50 import preprocess_input
import json
import os
import glob
import sys
import pandas as pd
import itk
from sklearn.model_selection import train_test_split
from sklearn.utils import class_weight
class bcolors:
HEADER = '\033[95m'
OKBLUE = '\033[94m'
OKCYAN = '\033[96m'
OKGREEN = '\033[92m'
WARNING = '\033[93m'
FAIL = '\033[91m'
ENDC = '\033[0m'
BOLD = '\033[1m'
UNDERLINE = '\033[4m'
class DatasetGenerator:
def __init__(self, df):
self.df = df
self.dataset = tf.data.Dataset.from_generator(self.generator,
output_types=(tf.float32, tf.int32),
output_shapes=((256, 256, 3), [1])
)
self.dataset = self.dataset.batch(1)
self.dataset = self.dataset.prefetch(48)
def get(self):
return self.dataset
def generator(self):
for idx, row in self.df.iterrows():
img = row["patch_im"]
sev = row["sev"]
img_np = itk.GetArrayViewFromImage(itk.imread(img))
img_np_orig = (np.array(img_np.shape) - np.array((256,256,3)))/np.array([2, 2, 1])
img_np_orig = img_np_orig.astype(int)
img_np_end = img_np_orig + np.array([256,256,3])
img_np_end = img_np_end.astype(int)
img_np = img_np[img_np_orig[0]:img_np_end[0], img_np_orig[1]:img_np_end[1], img_np_orig[2]:img_np_end[2]]
# img_np = tf.image.random_crop(img_np, size=(256, 256, 3))
yield img_np, np.array([sev])
gpus_index = [1]
print("Using gpus:", gpus_index)
gpus = tf.config.list_physical_devices('GPU')
if gpus:
# Restrict TensorFlow to only use the first GPU
try:
gpu_visible_devices = []
for i in gpus_index:
gpu_visible_devices.append(gpus[i])
print(bcolors.OKGREEN, "Using gpus:", gpu_visible_devices, bcolors.ENDC)
tf.config.set_visible_devices(gpu_visible_devices, 'GPU')
# strategy = tf.distribute.MirroredStrategy(cross_device_ops=tf.distribute.HierarchicalCopyAllReduce())
except RuntimeError as e:
# Visible devices must be set before GPUs have been initialized
print(bcolors.FAIL, e, bcolors.ENDC)
# else:
# # strategy = tf.distribute.get_strategy()
fname = "/work/jprieto/data/remote/EGower/hinashah/patch_training_07162021/test_patches.csv"
df = pd.read_csv(fname)
checkpoint_path = "/work/jprieto/data/remote/EGower/train/patch_training_08252021_2class_random_crop"
dataset = DatasetGenerator(df).get()
model = tf.keras.models.load_model(checkpoint_path, custom_objects={'tf': tf})
model.summary()
predictions = model.predict(dataset)
df["prediction"] = np.around(predictions).astype(int)
df["sev"] = (df["sev"] >= 1).astype(int)
df.to_csv(fname.replace(".csv", "_prediction_2class_random_crop.csv"), index=False)
| StarcoderdataPython |
1954056 | <reponame>rcbops/glance-buildpackage
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2012 OpenStack LLC.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import json
import os
import shutil
import unittest
import stubout
from glance.tests import stubs
from glance.tests import utils as test_utils
class IsolatedUnitTest(unittest.TestCase):
"""
Unit test case that establishes a mock environment within
a testing directory (in isolation)
"""
def setUp(self):
self.test_id, self.test_dir = test_utils.get_isolated_test_env()
self.stubs = stubout.StubOutForTesting()
stubs.stub_out_registry_and_store_server(self.stubs, self.test_dir)
policy_file = self._copy_data_file('policy.json', self.test_dir)
options = {'sql_connection': 'sqlite://',
'verbose': False,
'debug': False,
'default_store': 'filesystem',
'filesystem_store_datadir': os.path.join(self.test_dir),
'policy_file': policy_file}
self.conf = test_utils.TestConfigOpts(options)
def _copy_data_file(self, file_name, dst_dir):
src_file_name = os.path.join('glance/tests/etc', file_name)
shutil.copy(src_file_name, dst_dir)
dst_file_name = os.path.join(dst_dir, file_name)
return dst_file_name
def set_policy_rules(self, rules):
fap = open(self.conf.policy_file, 'w')
fap.write(json.dumps(rules))
fap.close()
def tearDown(self):
self.stubs.UnsetAll()
if os.path.exists(self.test_dir):
shutil.rmtree(self.test_dir)
| StarcoderdataPython |
8087730 | <filename>examples/python/gift_giver/models/attendee.py
from orator import Model
class Attendee(Model):
__table__ = 'attendees'
__fillable__ = ['name', 'vendor_user_id', 'rsvp_answer', 'awarded']
pass
| StarcoderdataPython |
11220706 | <reponame>ismailqau/libxayagame
# Copyright (C) 2020 The Xaya developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
from xayagametest.testcase import XayaGameTest
import os
import os.path
class NonFungibleTest (XayaGameTest):
"""
An integration test for the non-fungible GSP.
"""
def __init__ (self):
top_builddir = os.getenv ("top_builddir")
if top_builddir is None:
top_builddir = "../.."
nfd = os.path.join (top_builddir, "nonfungible", "nonfungibled")
super ().__init__ ("nf", nfd)
def getRpc (self, method, *args, **kwargs):
"""
Calls a custom-state RPC method and returns the data field.
"""
return self.getCustomState ("data", method, *args, **kwargs)
| StarcoderdataPython |
1855633 | ## ##
## GEOG 485 Final Project ##
## ##
## This script downloads earthquake data from USGS feeds in KML and in ##
## GeoJSON format. The script converts the GeoJSON into a geodatabase ##
## if the user has the Data Interoperability extension. If not the ##
## GeoJSON is converted to a shapefile using an online converter. ##
## Earthquake data in KML format is converted using and arcpy tool ##
## which creates a file geodatabase containing a feature class within ##
## a feature dataset. ##
## ##
## Author: <NAME> ##
# Import the necessary modules
import zipfile
import arcpy
import urllib, urllib2
import os
import datetime
arcpy.env.overwiteOutput = True
# Designate the urls of the earthquake data feeds. The KML feed contains all
# earthquakes in the last month greater than 2.5+ magnitude. The GeoJSON feed
# contains all the earthquakes in the last month greater than 4.5+ magnitude.
kmlUrl = 'https://earthquake.usgs.gov/earthquakes/feed/v1.0/summary/2.5_month_age.kml'
geojsonUrl = 'https://earthquake.usgs.gov/earthquakes/feed/v1.0/summary/4.5_month.geojson'
# Designate the target folder for script output. Create it if it does not already exist.
targetFolder = 'C:/EQData'
if not os.path.exists(targetFolder):
os.makedirs(targetFolder)
# Create a string with date information to label output datasets.
date = datetime.datetime.now()
dateString = '_' + date.strftime('%b') + '_' + date.strftime('%d') + '_' + date.strftime('%Y')
# Attempt to download data over HTTP. Create new files with the open command with
# path and date information previously designated. Write the downloaded data to each
# file handle. Close the file handles after.
try:
kmlData = urllib2.urlopen(kmlUrl).read()
geojsonData = urllib2.urlopen(geojsonUrl).read()
kmlPath = targetFolder + '/2.5_month' + dateString + '.kml'
geojsonPath = targetFolder + '/4.5_month' + dateString + '.geojson'
kmlHand = open(kmlPath, 'w')
geojsonHand = open(geojsonPath, 'w')
kmlHand.write(kmlData)
geojsonHand.write(geojsonData)
kmlHand.close()
geojsonHand.close()
except:
print('Error retrieving earthquake data')
# Check if the user has the Data Interoperability extension for ArcGIS
if arcpy.CheckExtension('DataInteroperability') == 'Available':
# Try using the QuickImport tool to create the output geodatabase.
try:
gdbPath = targetFolder + '/4.5_month' + dateString + '.gdb'
arcpy.CheckOutExtension('DataInteroperability')
arcpy.QuickImport_interop(geojsonPath, gdbPath)
arcpy.CheckInExtension('DataInteroperability')
except:
print(arcpy.GetMessages())
# Also try using the KMLToLayer tool to create output datasets.
try:
arcpy.KMLToLayer_conversion(kmlPath, targetFolder)
except:
print(arcpy.GetMessages())
# If the user does not have the required extension, then attempt use the online GeoJSON
# converter.
else:
try:
# Define the url and the desired output name,
url = 'http://ogre.adc4gis.com/convertJson/'
outputName = '4.5_month' + dateString
# Encode the parameters and attempt to download the data with a POST request
params = urllib.urlencode({'jsonUrl' : geojsonUrl, 'outputName' : outputName})
data = urllib2.urlopen(url, params).read()
# Designate where the file should be written.
zipPath = targetFolder + '/4.5_month' + dateString + '.zip'
# Create a file handle to write to, write the data, and close the handle
zipHand = open(zipPath, 'wb')
zipHand.write(data)
zipHand.close()
# Create a zipfile object and extract the files to a desired folder
zip = zipfile.ZipFile(zipPath, 'r')
unzipPath = targetFolder + '/4.5_month' + dateString
zip.extractall(unzipPath)
# Rename all the zipped shapefile components so that they have descriptive names.
# Some work is done as all files come with the same name but different extensions.
for file in os.listdir(unzipPath):
nameWoExt = os.path.splitext(file)[0]
ext = os.path.splitext(file)[1]
newNameWoExt = outputName
newName = newNameWoExt + ext
os.rename(os.path.join(unzipPath, file), os.path.join(unzipPath, newName))
except:
print('Problem retrieving shapefiles from webpage GeoJSON converter')
# Also try using the KMLToLayer tool to create output datasets.
try:
arcpy.KMLToLayer_conversion(kmlPath, targetFolder)
except:
print(arcpy.GetMessage())
| StarcoderdataPython |
3255455 | from glyphNanny import toggleObserverVisibility
toggleObserverVisibility() | StarcoderdataPython |
11327308 | <reponame>pavel-paulau/moveit
from setuptools import setup
setup(
name='moveit',
version='0.7.5',
description='ns_server master events analyzer',
author='<NAME>',
author_email='<EMAIL>',
packages=[
'moveit',
],
entry_points={
'console_scripts': [
'flow = moveit.flow:main',
'moveit = moveit.moveit:main',
]
},
install_requires=[
'svgwrite',
],
classifiers=[
'Programming Language :: Python',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3.3',
'Development Status :: 5 - Production/Stable',
'License :: OSI Approved :: Apache Software License',
'Operating System :: OS Independent',
'Topic :: Software Development :: Libraries :: Python Modules',
],
)
| StarcoderdataPython |
155061 | from output.models.ms_data.regex.re_l15_xsd.re_l15 import Doc
__all__ = [
"Doc",
]
| StarcoderdataPython |
4912717 | <filename>docker/website-builder/createPayoutPages.py
# coding=utf-8
import json
import sys
save_path = sys.argv[1]
with open('payouts.json') as json_file:
raw_payouts = json.load(json_file)
delegators = {}
for cycle,cycle_val in raw_payouts["payoutsByCycle"].items():
for delegator in cycle_val["delegators"]:
if not delegator in delegators:
delegators[delegator] = ["---"]
delegators[delegator].append("layout: about")
delegators[delegator].append("---")
delegators[delegator].append("### Payout to address [%s](https://tezblock.io/account/%s):" % (delegator, delegator))
delegators[delegator].append("")
delegators[delegator].append("|Cycle|Balance|Payout|Actual fee|Payout operation|")
delegators[delegator].append("|-----|-------|------|----------|----------------|")
delegator_val=cycle_val["delegators"][delegator]
if "payoutOperationHash" in delegator_val:
payout_operation = "[%s...](https://tezblock.io/transaction/%s)" % ( delegator_val["payoutOperationHash"][0:7], delegator_val["payoutOperationHash"])
else:
payout_operation = ""
if delegator_val["estimatedRewards"] != "0":
delegators[delegator].append("|%s|%sꜩ|%sꜩ|%s|%s|" % (cycle, int(delegator_val["balance"]) / 1000000,
int(delegator_val["estimatedRewards"]) / 1000000,
"%s%%" % round( ( 1 - ( 0.95 * int(delegator_val["estimatedRewards"]) / int(delegator_val["finalRewards"]) ) ) * 100, 3) if "finalRewards" in delegator_val else "Not yet known",
payout_operation ) )
for delegator, delegator_val in delegators.items():
delegator_val.append("")
delegator_val.append("[How do payouts work ?](https://hodl.farm/faq.html#how-do-payouts-work-)")
print("\n".join(delegator_val), file=open("%s/%s.md" % ( save_path, delegator), "a"))
# Explanation of "actual fee"
# How to calculate the actual fee with the estimated rewards and actual rewards:
# An example when the nominal fee is 5%
# Your estimated rewards (that we pay you) is x
# Your share of our idealized earnings is x/(1-fee) i.e x/0.95
# The actual rewards we should have paid you is y
# Your share of actual earnings is y/0.95
# The effective fee is ( y/0.95 - x ) / ( y/0.95) = 1 - 0.95 ( x / y )
# We can confirm this calculation with the following hypothesis:
# Let's say the network behaved optimally. In that case, y = x
# Then the effective fee is 1 - 0.95 = 0.05, which is correct
| StarcoderdataPython |
1988415 | from Node_Depths import nodeDepths, BinaryTree
def findNode(nodes, id):
if id==None:
return None
for node in nodes:
if node["id"] == id:
n = BinaryTree(node['value'])
n.left = findNode(nodes, node['left'])
n.right = findNode(nodes, node['right'])
return n
def buildTree(tree):
nodes = tree['nodes']
head = tree['root']
return findNode(nodes, head)
def test_nodeDepths_case_1():
tree = {'nodes': [{'id': '1', 'left': '2', 'right': '3', 'value': 1}, {'id': '2', 'left': '4', 'right': '5', 'value': 2}, {'id': '3', 'left': '6', 'right': '7', 'value': 3}, {'id': '4', 'left': '8', 'right': '9', 'value': 4}, {'id': '5', 'left': None, 'right': None, 'value': 5}, {'id': '6', 'left': None, 'right': None, 'value': 6}, {'id': '7', 'left': None, 'right': None, 'value': 7}, {'id': '8', 'left': None, 'right': None, 'value': 8}, {'id': '9', 'left': None, 'right': None, 'value': 9}], 'root': '1'}
tree = buildTree(tree)
res = 16
assert nodeDepths(tree) == res
def test_nodeDepths_case_2():
tree = {'nodes': [{'id': '1', 'left': None, 'right': None, 'value': 1}], 'root': '1'}
tree = buildTree(tree)
res = 0
assert nodeDepths(tree) == res
def test_nodeDepths_case_3():
tree = {'nodes': [{'id': '1', 'left': '2', 'right': None, 'value': 1}, {'id': '2', 'left': None, 'right': None, 'value': 2}], 'root': '1'}
tree = buildTree(tree)
res = 1
assert nodeDepths(tree) == res
def test_nodeDepths_case_4():
tree = {'nodes': [{'id': '1', 'left': '2', 'right': '3', 'value': 1}, {'id': '2', 'left': None, 'right': None, 'value': 2}, {'id': '3', 'left': None, 'right': None, 'value': 3}], 'root': '1'}
tree = buildTree(tree)
res = 2
assert nodeDepths(tree) == res
def test_nodeDepths_case_5():
tree = {'nodes': [{'id': '1', 'left': '2', 'right': '3', 'value': 1}, {'id': '2', 'left': '4', 'right': None, 'value': 2}, {'id': '3', 'left': None, 'right': None, 'value': 3}, {'id': '4', 'left': None, 'right': None, 'value': 4}], 'root': '1'}
tree = buildTree(tree)
res = 4
assert nodeDepths(tree) == res
def test_nodeDepths_case_6():
tree = {'nodes': [{'id': '1', 'left': '2', 'right': None, 'value': 1}, {'id': '2', 'left': '3', 'right': None, 'value': 2}, {'id': '3', 'left': '4', 'right': None, 'value': 3}, {'id': '4', 'left': '5', 'right': None, 'value': 4}, {'id': '5', 'left': '6', 'right': None, 'value': 5}, {'id': '6', 'left': None, 'right': '7', 'value': 6}, {'id': '7', 'left': None, 'right': None, 'value': 7}], 'root': '1'}
tree = buildTree(tree)
res = 21
assert nodeDepths(tree) == res
def test_nodeDepths_case_7():
tree = {'nodes': [{'id': '1', 'left': '2', 'right': '8', 'value': 1}, {'id': '2', 'left': '3', 'right': None, 'value': 2}, {'id': '3', 'left': '4', 'right': None, 'value': 3}, {'id': '4', 'left': '5', 'right': None, 'value': 4}, {'id': '5', 'left': '6', 'right': None, 'value': 5}, {'id': '6', 'left': None, 'right': '7', 'value': 6}, {'id': '7', 'left': None, 'right': None, 'value': 7}, {'id': '8', 'left': None, 'right': '9', 'value': 8}, {'id': '9', 'left': None, 'right': '10', 'value': 9}, {'id': '10', 'left': None, 'right': '11', 'value': 10}, {'id': '11', 'left': None, 'right': '12', 'value': 11}, {'id': '12', 'left': '13', 'right': None, 'value': 12}, {'id': '13', 'left': None, 'right': None, 'value': 13}], 'root': '1'}
tree = buildTree(tree)
res = 42
assert nodeDepths(tree) == res
def test_nodeDepths_case_8():
tree = {'nodes': [{'id': '1', 'left': '2', 'right': '3', 'value': 1}, {'id': '2', 'left': '4', 'right': '5', 'value': 2}, {'id': '3', 'left': '6', 'right': '7', 'value': 3}, {'id': '4', 'left': '8', 'right': '9', 'value': 4}, {'id': '5', 'left': None, 'right': None, 'value': 5}, {'id': '6', 'left': '10', 'right': None, 'value': 6}, {'id': '7', 'left': None, 'right': None, 'value': 7}, {'id': '8', 'left': None, 'right': None, 'value': 8}, {'id': '9', 'left': None, 'right': None, 'value': 9}, {'id': '10', 'left': None, 'right': '11', 'value': 10}, {'id': '11', 'left': '12', 'right': '13', 'value': 11}, {'id': '12', 'left': '14', 'right': None, 'value': 12}, {'id': '13', 'left': '15', 'right': '16', 'value': 13}, {'id': '14', 'left': None, 'right': None, 'value': 14}, {'id': '15', 'left': None, 'right': None, 'value': 15}, {'id': '16', 'left': None, 'right': None, 'value': 16}], 'root': '1'}
tree = buildTree(tree)
res = 51
assert nodeDepths(tree) == res
def test_nodeDepths_case_9():
tree = {'nodes': [{'id': '1', 'left': '2', 'right': None, 'value': 1}, {'id': '2', 'left': '3', 'right': None, 'value': 2}, {'id': '3', 'left': '4', 'right': None, 'value': 3}, {'id': '4', 'left': '5', 'right': None, 'value': 4}, {'id': '5', 'left': '6', 'right': None, 'value': 5}, {'id': '6', 'left': '7', 'right': None, 'value': 6}, {'id': '7', 'left': '8', 'right': None, 'value': 7}, {'id': '8', 'left': '9', 'right': None, 'value': 8}, {'id': '9', 'left': None, 'right': None, 'value': 9}], 'root': '1'}
tree = buildTree(tree)
res = 36
assert nodeDepths(tree) == res
| StarcoderdataPython |
3465599 | <filename>gym-xplane/gym_xplane/space_definition.py
import numpy as np
from gym import spaces
class xplane_space():
def Action_space(self):
"""
return spaces.Dict({"Latitudinal_Stick": spaces.Box(low=-1, high=1, shape=()),
"Longitudinal_Stick": spaces.Box(low=-1, high=1, shape=()),
"Rudder_Pedals": spaces.Box(low=-1, high=1, shape=()),
"Throttle": spaces.Box(low=-1, high=1, shape=()),
"Gear": spaces.Discrete(2),
"Flaps": spaces.Box(low=0, high=1, shape=()),
"Speedbrakes": spaces.Box(low=-0.5, high=1.5, shape=())})
"""
return spaces.Box(np.array([-1, -1, -1, -1 / 4]), np.array([1, 1, 1, 1]))
def Observation_space(self):
"""
return spaces.Dict({"Latitude": spaces.Box(low=0, high=360, shape=()),
"Longitude": spaces.Box(low=0, high=360, shape=()),
"Altitude": spaces.Box(low=0, high=8500, shape=()),
"Pitch": spaces.Box(low=-290, high=290, shape=()),
"Roll": spaces.Box(low=-100, high=100, shape=()),
"Heading": spaces.Box(low=0, high=360, shape=()),
"gear": spaces.Discrete(2),
"yoke_pitch_ratio": spaces.Box(low=-2.5, high=2.5, shape=()),
"yoke_roll_ratio": spaces.Box(low=-300, high=300, shape=()),
"yoke_heading_ratio": spaces.Box(low=-180, high=180,shape=()),
"alpha": spaces.Box(low=-100, high=100,shape=()),
"wing_sweep_ratio": spaces.Box(low=-100, high=100, shape=()),
"flap_ratio": spaces.Box(low=-100, high=100, shape=()),
"speed": spaces.Box(low=-2205, high=2205, shape=())})
"""
return spaces.Box(np.array([-360, -360, 0, -290, -100, -360, -360, -1000, -1300, -1000, -1000]),
np.array([360, 360, 8500, 290, 100, 360, 360, 1000, 1300, 1000, 1000]))
| StarcoderdataPython |
3577811 | <filename>app1/views.py
from django.shortcuts import render,HttpResponse
from app1.models import Contact
from app1.forms import ContactForm,NewsletterForm
from django.contrib import messages
# Create your views here.
from django.http import HttpResponse,JsonResponse,HttpResponseRedirect
def index(request):
return render(request,'app1/index.html')
def about(request):
return render(request,'app1/about.html')
def elements(request):
return render(request,'app1/elements.html')
def contact(request):
if request.method == 'POST':
form=ContactForm(request.POST)
if form.is_valid():
# name=form.cleaned_data.get('name')
form=form.save()
form.name="unknown"
form.save()
messages.add_message(request, messages.SUCCESS, 'aqa shod.')
else:
messages.add_message(request, messages.ERROR, 'aqa nashod.')
form=ContactForm()
return render(request,'app1/contact.html',{'form':form})
def newsletter(request):
if request.method == 'POST':
form=NewsletterForm(request.POST)
if form.is_valid():
form.save()
messages.add_message(request, messages.SUCCESS, 'aqa shodaaa.')
else:
messages.add_message(request, messages.ERROR, 'aqa nashodaaa.')
return HttpResponseRedirect('/')
else:
return HttpResponseRedirect('/')
| StarcoderdataPython |
6608614 | <filename>programme.py
from argparse import ArgumentParser
from pathlib import Path
from libprogramme import lecture, conversion, ecriture
parser = ArgumentParser(description = 'Générateur de site statique')
# Argument positionnel
parser.add_argument('convert', help = "Lance la conversion du markdown en html")
# Arguments optionnels
parser.add_argument("-i", "--input_directory",
help = "Chemin du dossier de fichiers source (contenant les fichiers markdown",
default = './md')
parser.add_argument("-o", "--output_directory",
help = "Chemin du dossier où seront mis les fichiers générés pour le site statique",
default = './html')
parser.add_argument("-t", "--template_directory",
help = "Dossier contenant des modèles de pages web à compléter",
default = './template')
args = parser.parse_args()
if args.input_directory:
print(f'Le chemin du dossier de fichiers source : {args.input_directory}')
if args.output_directory:
print(f'Le chemin du dossier avec les fichiers créés : {args.output_directory}')
if args.template_directory:
print(f'Le chemin du dossier contenant des modèles de pages web : {args.template_directory}')
p = Path('.')
files_md = sorted(p.glob(args.input_directory + '/*.md'))
files_temp = sorted(p.glob(args.template_directory + '/*.html'))
for md in files_md:
lignes = lecture(md)
lignes_modif = conversion(lignes)
contenu = ('\n').join(lignes_modif)
chemin = args.output_directory + '/' + str(md)[3:-3]
for temp in files_temp:
template = './' + ('/').join(str(temp).split('\\'))
ecriture(chemin, contenu, template)
| StarcoderdataPython |
1709334 | <gh_stars>0
from bs4 import BeautifulSoup
import requests
from urllib.request import Request, urlopen
import sqlalchemy
from sqlalchemy.sql.functions import user
import creds
import db
from sqlalchemy import Table, Column, Integer, String, MetaData
from datetime import datetime
username = creds.db['username']
password = creds.db['password']
db_name = creds.db['name']
host = creds.db['host']
port = creds.db['port']
engine = sqlalchemy.create_engine('mysql://'+username+':'+password+'@'+host+':'+str(port)) # connect to server
engine.execute("USE "+db_name)
events = requests.get("https://www.sherdog.com/organizations/Ultimate-Fighting-Championship-UFC-2")
events_soup = BeautifulSoup(events.content, 'html.parser')
links = []
for link in events_soup.find_all('a'):
links.append(link.get('href'))
for l in links:
# print(l)
if '/events/' in l and 'UFC' in l:
# https://www.sherdog.com/events/UFC-Fight-Night-192-Smith-vs-Spann-89440
individual = requests.get('https://www.sherdog.com'+l)
individual_soup = BeautifulSoup(individual.content, 'html.parser')
date = individual_soup.find_all(class_='date')[1].meta['content']
# print(date)
date = str(date).split('T')
print(date[0])
datetime_object = datetime.strptime(date[0], '%Y-%m-%d')
# date = date.replace(",", "")
# print(date)
# datetime_object = datetime.strptime(date, '%B %d %Y')
# print('===============================================')
sherdog_link = 'https://www.sherdog.com'+l
query = (f'INSERT INTO events (date,name,source) VALUES (\'{datetime_object}\',\'{individual_soup.title.contents[0]}\',\'{sherdog_link}\');')
engine.execute(query) #create db
print(individual_soup.title.contents[0])
print(datetime_object)
print('https://www.sherdog.com'+l)
print('===============================================')
| StarcoderdataPython |
1752586 | from django.urls import path
app_name = 'accounts'
urlpatterns = [
]
| StarcoderdataPython |
3474438 | # Generated by Django 2.2.10 on 2020-05-08 13:33
from django.db import migrations, models
import django.db.models.deletion
import uuid
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='ImageInfo',
fields=[
('image_id', models.UUIDField(default=uuid.uuid4, editable=False, primary_key=True, serialize=False)),
('image_name', models.CharField(max_length=256, unique=True, verbose_name='Docker镜像名称')),
('image_vul_name', models.CharField(max_length=256, verbose_name='漏洞名称')),
('image_port', models.CharField(max_length=256, verbose_name='暴露端口')),
('image_desc', models.TextField(null=True, verbose_name='镜像描述')),
('rank', models.FloatField(verbose_name='Rank')),
('is_ok', models.BooleanField(default=True, verbose_name='镜像是否可用')),
('create_date', models.DateTimeField(auto_now_add=True, verbose_name='Docker创建时间,默认为当前时间')),
('update_date', models.DateTimeField(auto_now=True, verbose_name='Docker更新时间,默认为当前时间')),
],
options={
'db_table': 'image_info',
},
),
migrations.CreateModel(
name='SysConfig',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('config_key', models.CharField(max_length=255, unique=True, verbose_name='配置名称对应key')),
('config_value', models.TextField(default='', null=True, verbose_name='对应值')),
],
options={
'db_table': 'sys_config',
},
),
migrations.CreateModel(
name='SysLog',
fields=[
('log_id', models.UUIDField(default=uuid.uuid4, editable=False, primary_key=True, serialize=False, verbose_name='ID')),
('user_id', models.IntegerField(verbose_name='使用用户ID')),
('operation_type', models.CharField(max_length=255, verbose_name='操作类型')),
('operation_name', models.CharField(max_length=255, verbose_name='操作名称')),
('operation_value', models.CharField(max_length=255, verbose_name='操作内容')),
('operation_args', models.TextField(default='', null=True, verbose_name='参数')),
('ip', models.CharField(max_length=255, verbose_name='IP地址')),
('create_date', models.DateTimeField(auto_now_add=True, verbose_name='创建时间')),
],
options={
'db_table': 'sys_log',
},
),
migrations.CreateModel(
name='ContainerVul',
fields=[
('container_id', models.UUIDField(default=uuid.uuid4, editable=False, primary_key=True, serialize=False, verbose_name='漏洞容器创建ID')),
('docker_container_id', models.CharField(max_length=255, verbose_name='Docker容器运行进ID')),
('user_id', models.IntegerField(verbose_name='用户ID')),
('vul_host', models.CharField(max_length=255, verbose_name='容器漏洞URL')),
('container_status', models.CharField(max_length=255, verbose_name='容器当前状态')),
('container_port', models.CharField(max_length=255, verbose_name='容器端口')),
('vul_port', models.TextField(default='', verbose_name='容器对应端口')),
('container_flag', models.CharField(max_length=255, verbose_name='flag')),
('create_date', models.DateTimeField(auto_now_add=True, verbose_name='容器创建时间,默认为当前时间')),
('is_check', models.BooleanField(default=False, verbose_name='Flag是否通过')),
('is_check_date', models.DateTimeField(null=True, verbose_name='Flag提交时间')),
('time_model_id', models.CharField(max_length=255, verbose_name='时间模式 ID')),
('image_id', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='dockerapi.ImageInfo', verbose_name='Docker ID')),
],
options={
'db_table': 'container_vul',
},
),
]
| StarcoderdataPython |
1771405 | #!/usr/bin/env python
# -*- encoding: utf-8 -*-
## @package TBTKview
# @file plotMAG.py
# @brief Plot magnetization
#
# @author <NAME>
import h5py
import numpy
import matplotlib.pyplot
import matplotlib.axes
import matplotlib.cm
import scipy.ndimage.filters
import mpl_toolkits.mplot3d
import sys
import math
import cmath
if(len(sys.argv) != 4):
print "Error: Needs one argument for .hdf5-filename, theta, phi"
exit(1)
filename = sys.argv[1]
theta = float(sys.argv[2])
phi = float(sys.argv[3])
file = h5py.File(filename, 'r');
dataset = file['Magnetization']
data_dimensions = dataset.shape
physical_dimensions = len(data_dimensions) - 2 #Last two dimension for matrix elements and real/imaginary decomposition.
print "Dimensions: " + str(physical_dimensions)
if(physical_dimensions != 2):
print "Error, can only plot for 2 physical dimensions"
exit(0)
size_x = data_dimensions[0]
size_y = data_dimensions[1]
x = numpy.arange(0, size_x, 1)
y = numpy.arange(0, size_y, 1)
X, Y = numpy.meshgrid(x, y)
#mag_real = dataset[:,:,:,0]
#mag_imag = dataset[:,:,:,1]
Z=numpy.zeros((size_x, size_y))
for xp in range(0,size_x):
for yp in range(0, size_y):
uu = dataset[xp,yp,0,0] + 1j*dataset[xp,yp,0,1]
ud = dataset[xp,yp,1,0] + 1j*dataset[xp,yp,1,1]
du = dataset[xp,yp,2,0] + 1j*dataset[xp,yp,2,1]
dd = dataset[xp,yp,3,0] + 1j*dataset[xp,yp,3,1]
Z[xp,yp] = numpy.real( \
+ (ud + du)*math.sin(theta)*math.cos(phi) \
- 1j*(ud - du)*math.sin(theta)*math.sin(phi) \
+ (uu-dd)*math.cos(theta) \
)
fig = matplotlib.pyplot.figure()
ax = fig.gca(projection='3d')
ax.plot_surface(X.transpose(), Y.transpose(), Z, rstride=1, cstride=1, cmap=matplotlib.cm.coolwarm, linewidth=0, antialiased=False)
ax.set_zlim(numpy.min(Z), numpy.max(Z))
ax.set_xlabel('x');
ax.set_ylabel('y');
ax.set_zlabel('Magnetization');
fig.savefig('figures/MAG.png')
#for n in range(0, 3):
# fig = matplotlib.pyplot.figure()
# ax = fig.gca(projection='3d')
# Z = dataset[:,:,n]
# ax.plot_surface(X, Y, Z, rstride=1, cstride=1, cmap=matplotlib.cm.coolwarm, linewidth=0, antialiased=False)
# ax.set_zlim(numpy.min(Z), numpy.max(Z))
## matplotlib.pyplot.show()
# fig.savefig('figures/MAG' + str(n) + '.png')
| StarcoderdataPython |
8042267 | from tensorboard.backend.event_processing.event_accumulator import EventAccumulator
import os
from scipy.interpolate import interp1d
import tensorflow as tf
import matplotlib.pyplot as plt
import numpy as np
import argparse
def get_rid_of_num(name):
while(name[-1].isdigit()):
del name[-1]
return ''.join(name)
def merge_runs(dir_path, result_name, new_dir_name='tf_merged', tensorboard=False):
diff_run_types = list(set([get_rid_of_num(list(name)) for name in os.listdir(dir_path) if name != 'merge_runs.py']))
summary_iterators = []
for name in diff_run_types:
summary_iterators.append([EventAccumulator(os.path.join(dir_path, dname)).Reload()
for dname in os.listdir(dir_path) if name in dname])
tags = [iterator[0].Tags()['scalars'] for iterator in summary_iterators]
for idx, sum_it in enumerate(summary_iterators):
for it in sum_it:
assert it.Tags()['scalars'] == tags[idx]
to_merge = ['episode_reward']
for tag in to_merge:
fig, ax = plt.subplots(1)
ax.set_title(tag)
ax.set_xlabel('steps')
ax.set_ylabel('episode reward')
ax.grid()
colors = ['red', 'green', 'blue', 'yellow']
fig.tight_layout()
for idx, sum_it in enumerate(summary_iterators):
summaries_events = [summary.Scalars(tag) for summary in sum_it]
end_point = min([events[-1].step for events in summaries_events])
start_point = max([events[0].step for events in summaries_events])
steps = [step for step in range(start_point, end_point + 1)]
interpolated_data = []
for events in summaries_events:
event_steps = [event.step for event in events]
event_data = [event.value for event in events]
interpolated_data.append(interp1d(event_steps, event_data))
matrix_form = []
for step in steps:
matrix_form.append([data(step).item(0) for data in interpolated_data])
matrix_form = np.asarray(matrix_form)
max_values = np.amax(matrix_form, axis=1)
min_values = np.amin(matrix_form, axis=1)
mean = matrix_form.mean(axis=1)
sigma = matrix_form.std(axis=1)
#fig, ax = plt.subplots(1)
ax.plot(steps, mean, lw=1, label=diff_run_types[idx], color=colors[idx % len(colors)])
ax.fill_between(steps, mean + sigma, mean - sigma, facecolor=colors[idx % len(colors)], alpha=0.5)
if tensorboard:
merged_data_ = tf.placeholder(tf.float32)
summary_op = tf.summary.histogram(tag + '_merged', merged_data_)
with tf.Session() as sess:
writer = tf.summary.FileWriter('./log/' + new_dir_name)
for step in steps:
merged_summary = sess.run(summary_op, feed_dict={merged_data_: [data(step).item(0) for data in interpolated_data]})
writer.add_summary(merged_summary, step)
lgd = ax.legend(loc='upper left')
plt.savefig(result_name, bbox_extra_artists=(lgd,), bbox_inches='tight')
plt.close()
# plt.show()
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('work_directory', type=str, help='work directory path')
parser.add_argument('result_name', type=str, help='result graph name')
args = parser.parse_args()
merge_runs(args.work_directory, args.result_name)
| StarcoderdataPython |
3562678 | from setuptools import setup
setup(name='py-pacman',
version='1.0.0',
install_requires=['pygame', 'gym', 'numpy', 'pygame-menu'] # And any other dependencies foo needs
) | StarcoderdataPython |
8104934 | """
Regression tests for defer() / only() behavior.
"""
from django.db import models
class Item(models.Model):
name = models.CharField(max_length=15)
text = models.TextField(default="xyzzy")
value = models.IntegerField()
other_value = models.IntegerField(default=0)
def __unicode__(self):
return self.name
class RelatedItem(models.Model):
item = models.ForeignKey(Item)
class Child(models.Model):
name = models.CharField(max_length=10)
value = models.IntegerField()
class Leaf(models.Model):
name = models.CharField(max_length=10)
child = models.ForeignKey(Child)
second_child = models.ForeignKey(Child, related_name="other", null=True)
value = models.IntegerField(default=42)
def __unicode__(self):
return self.name
class ResolveThis(models.Model):
num = models.FloatField()
name = models.CharField(max_length=16)
class Proxy(Item):
class Meta:
proxy = True
class SimpleItem(models.Model):
name = models.CharField(max_length=15)
value = models.IntegerField()
def __unicode__(self):
return self.name
class Feature(models.Model):
item = models.ForeignKey(SimpleItem)
| StarcoderdataPython |
9748099 | <filename>test/nn/test_initializers.py
# Copyright 2021 The NetKet Authors - All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from math import sqrt
import jax
import netket as nk
import numpy as np
import pytest
from jax import numpy as jnp
from netket.jax.utils import dtype_real
from netket.nn.initializers import _complex_truncated_normal
from scipy.stats import kstest
seed = 12345
@pytest.mark.parametrize("dtype", [jnp.complex64, jnp.complex128])
@pytest.mark.parametrize("ndim", [2, 3, 4])
def test_complex_truncated_normal(ndim, dtype):
np.random.seed(seed)
key, rand_key = jax.random.split(nk.jax.PRNGKey(seed))
# The lengths of the weight dimensions and the input dimension are random
shape = tuple(np.random.randint(1, 10) for _ in range(ndim - 1))
# The length of the output dimension is a statistically large number, but not too large that OOM
len_out = int(10 ** 6 / np.prod(shape))
shape += (len_out,)
upper = 2
stddev = 0.96196182800821354
param = _complex_truncated_normal(key, upper, shape, dtype)
assert param.shape == shape
assert param.dtype == dtype
assert param.mean() == pytest.approx(0, abs=2e-3)
assert param.std() == pytest.approx(stddev, abs=1e-3)
assert jnp.abs(param).max() == pytest.approx(upper, abs=1e-3)
# Draw random samples using rejection sampling, and test if `param` and
# `samples` are from the same distribution
rand_shape = (10 ** 4,)
rand_dtype = dtype_real(dtype)
rand_stddev = 1 / sqrt(2)
samples = jax.random.normal(rand_key, rand_shape, rand_dtype) * rand_stddev
samples = samples[jnp.abs(samples) < upper]
_, pvalue = kstest(param.flatten(), samples)
assert pvalue > 0.01
| StarcoderdataPython |
4971823 | """
Display Hooks for pycairo, cairocffi Surfaces and Contexts.
"""
from io import BytesIO
from IPython.core import display
def display_cairo_surface(surface):
"""Displayhook function for Surfaces Images, rendered as PNG."""
b = BytesIO()
surface.write_to_png(b)
b.seek(0)
data = b.read()
ip_img = display.Image(data=data, format='png', embed=True)
return ip_img._repr_png_()
def display_cairo_context(ctx):
"""Displayhook function for cairo Context Images, target is rendered as PNG."""
surface = ctx.get_target()
return display_cairo_surface(surface)
def load_ipython_extension(ipython):
# register display func with PNG formatter:
png_formatter = get_ipython().display_formatter.formatters['image/png']
try:
import cairo
dpi = png_formatter.for_type(cairo.Surface, display_cairo_surface)
dpi = png_formatter.for_type(cairo.Context, display_cairo_context)
except ImportError:
pass
try:
import cairocffi
dpi = png_formatter.for_type(cairocffi.surfaces.ImageSurface, display_cairo_surface)
dpi = png_formatter.for_type(cairocffi.Surface, display_cairo_surface)
dpi = png_formatter.for_type(cairocffi.Context, display_cairo_context)
except ImportError:
pass
def unload_ipython_extension(ipython):
pass
| StarcoderdataPython |
102702 | <reponame>neale/CS-program
from __future__ import print_function
import sys
import numpy as np
from Layers import Linear, ReLU, Sigmoid
from Loss import CrossEntropy
class FullyConnected(object):
def __init__(self, input_dims, hidden_units, batch_size):
self.grad = None
self.outputs = None
self.dInput = input_dims
self.nHidden = hidden_units
self.nOutput = 1
self.lossF = 0.
self.lossG = 0.
self.lr = .1
self.momentum = 0.9
self.batch_size = batch_size
def initLayers(self):
#self.hiddenLayer = Linear(0, .1, self.dInput, self.nHidden)
self.hiddenLayer = Linear(0, .1, self.nHidden, self.dInput)
self.reluLayer = ReLU()
self.outputLayer = Linear(0, .1, self.nOutput, self.nHidden)
self.sigmoidLayer = Sigmoid()
self.lossLayer = CrossEntropy()
def initAcivations(self):
self.startOfBatch = True
self.hiddenActivations = np.zeros((self.batch_size, self.nOutput, self.nHidden))
self.reluActivations = np.zeros((self.batch_size, self.nOutput, self.nHidden))
self.outputActivations = np.zeros((self.batch_size, self.nOutput, self.nOutput))
self.sigmoidActivations = np.zeros((self.batch_size, self.nOutput, self.nOutput))
self.outputs = np.zeros((self.batch_size, self.nOutput))
def initGradients(self):
self.startOfGrad = True
self.lossG = np.zeros((self.batch_size, 1, self.nOutput ))
self.gradSigmoid = np.zeros((self.batch_size, 1, self.nOutput ))
self.gradOutput = np.zeros((self.batch_size, 1, self.nHidden ))
self.gradRelu = np.zeros((self.batch_size, 1, self.nHidden ))
self.gradHidden = np.zeros((self.batch_size, 1, self.dInput ))
def forward(self, x, y):
for i in range(len(x)):
self.input = np.array(x[i])
self.hiddenActivations[i] = self.hiddenLayer.forward(self.input)
self.reluActivations[i] = self.reluLayer.forward(self.hiddenActivations[i])
self.outputActivations[i] = self.outputLayer.forward(self.reluActivations[i].reshape(-1))
self.sigmoidActivations[i] = self.sigmoidLayer.forward(self.outputActivations[i])
self.outputs[i] = self.sigmoidActivations[i]
self.loss(self.outputs[i], y[i])
def loss(self, x, y):
self.lossF = np.mean(self.lossLayer.forward(x, y))
def collect_gradients(self, y):
for i in range(len(y)):
self.lossG[i] = self.lossLayer.backward(self.outputs[i], y[i])
self.gradSigmoid[i] = self.sigmoidLayer.backward(self.outputActivations[i], self.lossG[i])
self.gradOutput[i] = self.outputLayer.backward(self.gradSigmoid[i].reshape(-1))
self.gradRelu[i] = self.reluLayer.backward(self.hiddenActivations[i], self.gradOutput[i])
self.gradHidden[i] = self.hiddenLayer.backward(self.gradRelu[i].reshape(-1))
def update_weights(self, x, lr):
self.outputLayer.update(self.reluActivations, self.gradSigmoid, lr, self.momentum)
self.hiddenLayer.update(x, self.gradRelu, lr, self.momentum)
def forward_single(self, x):
input = np.array(x)
hidden = self.hiddenLayer.forward(input)
relu = self.reluLayer.forward(hidden)
output = self.outputLayer.forward(relu.reshape(-1))
return self.sigmoidLayer.forward(output)
def classify(self, prob):
if abs(prob) > .5: return 1
else : return 0
| StarcoderdataPython |
12847897 | <filename>oscar/lib/python2.7/site-packages/whoosh/analysis/ngrams.py
# Copyright 2007 <NAME>. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY MATT CHAPUT ``AS IS'' AND ANY EXPRESS OR
# IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
# EVENT SHALL MATT CHAPUT OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA,
# OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
# EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# The views and conclusions contained in the software and documentation are
# those of the authors and should not be interpreted as representing official
# policies, either expressed or implied, of Matt Chaput.
from whoosh.compat import text_type
from whoosh.compat import xrange
from whoosh.analysis.acore import Token
from whoosh.analysis.filters import Filter, LowercaseFilter
from whoosh.analysis.tokenizers import Tokenizer, RegexTokenizer
# Tokenizer
class NgramTokenizer(Tokenizer):
"""Splits input text into N-grams instead of words.
>>> ngt = NgramTokenizer(4)
>>> [token.text for token in ngt("hi there")]
["hi t", "i th", " the", "ther", "here"]
Note that this tokenizer does NOT use a regular expression to extract
words, so the grams emitted by it will contain whitespace, punctuation,
etc. You may want to massage the input or add a custom filter to this
tokenizer's output.
Alternatively, if you only want sub-word grams without whitespace, you
could combine a RegexTokenizer with NgramFilter instead.
"""
__inittypes__ = dict(minsize=int, maxsize=int)
def __init__(self, minsize, maxsize=None):
"""
:param minsize: The minimum size of the N-grams.
:param maxsize: The maximum size of the N-grams. If you omit
this parameter, maxsize == minsize.
"""
self.min = minsize
self.max = maxsize or minsize
def __eq__(self, other):
if self.__class__ is other.__class__:
if self.min == other.min and self.max == other.max:
return True
return False
def __call__(self, value, positions=False, chars=False, keeporiginal=False,
removestops=True, start_pos=0, start_char=0, mode='',
**kwargs):
assert isinstance(value, text_type), "%r is not unicode" % value
inlen = len(value)
t = Token(positions, chars, removestops=removestops, mode=mode)
pos = start_pos
if mode == "query":
size = min(self.max, inlen)
for start in xrange(0, inlen - size + 1):
end = start + size
if end > inlen:
continue
t.text = value[start:end]
if keeporiginal:
t.original = t.text
t.stopped = False
if positions:
t.pos = pos
if chars:
t.startchar = start_char + start
t.endchar = start_char + end
yield t
pos += 1
else:
for start in xrange(0, inlen - self.min + 1):
for size in xrange(self.min, self.max + 1):
end = start + size
if end > inlen:
continue
t.text = value[start:end]
if keeporiginal:
t.original = t.text
t.stopped = False
if positions:
t.pos = pos
if chars:
t.startchar = start_char + start
t.endchar = start_char + end
yield t
pos += 1
# Filter
class NgramFilter(Filter):
"""Splits token text into N-grams.
>>> rext = RegexTokenizer()
>>> stream = rext("hello there")
>>> ngf = NgramFilter(4)
>>> [token.text for token in ngf(stream)]
["hell", "ello", "ther", "here"]
"""
__inittypes__ = dict(minsize=int, maxsize=int)
def __init__(self, minsize, maxsize=None, at=None):
"""
:param minsize: The minimum size of the N-grams.
:param maxsize: The maximum size of the N-grams. If you omit this
parameter, maxsize == minsize.
:param at: If 'start', only take N-grams from the start of each word.
if 'end', only take N-grams from the end of each word. Otherwise,
take all N-grams from the word (the default).
"""
self.min = minsize
self.max = maxsize or minsize
self.at = 0
if at == "start":
self.at = -1
elif at == "end":
self.at = 1
def __eq__(self, other):
return other and self.__class__ is other.__class__\
and self.min == other.min and self.max == other.max
def __call__(self, tokens):
assert hasattr(tokens, "__iter__")
at = self.at
for t in tokens:
text = t.text
if len(text) < self.min:
continue
chars = t.chars
if chars:
startchar = t.startchar
# Token positions don't mean much for N-grams,
# so we'll leave the token's original position
# untouched.
if t.mode == "query":
size = min(self.max, len(t.text))
if at == -1:
t.text = text[:size]
if chars:
t.endchar = startchar + size
yield t
elif at == 1:
t.text = text[0 - size:]
if chars:
t.startchar = t.endchar - size
yield t
else:
for start in xrange(0, len(text) - size + 1):
t.text = text[start:start + size]
if chars:
t.startchar = startchar + start
t.endchar = startchar + start + size
yield t
else:
if at == -1:
limit = min(self.max, len(text))
for size in xrange(self.min, limit + 1):
t.text = text[:size]
if chars:
t.endchar = startchar + size
yield t
elif at == 1:
if chars:
original_startchar = t.startchar
start = max(0, len(text) - self.max)
for i in xrange(start, len(text) - self.min + 1):
t.text = text[i:]
if chars:
t.startchar = original_startchar + i
yield t
else:
for start in xrange(0, len(text) - self.min + 1):
for size in xrange(self.min, self.max + 1):
end = start + size
if end > len(text):
continue
t.text = text[start:end]
if chars:
t.startchar = startchar + start
t.endchar = startchar + end
yield t
# Analyzers
def NgramAnalyzer(minsize, maxsize=None):
"""Composes an NgramTokenizer and a LowercaseFilter.
>>> ana = NgramAnalyzer(4)
>>> [token.text for token in ana("hi there")]
["hi t", "i th", " the", "ther", "here"]
"""
return NgramTokenizer(minsize, maxsize=maxsize) | LowercaseFilter()
def NgramWordAnalyzer(minsize, maxsize=None, tokenizer=None, at=None):
if not tokenizer:
tokenizer = RegexTokenizer()
return tokenizer | LowercaseFilter() | NgramFilter(minsize, maxsize, at=at)
| StarcoderdataPython |
9779961 | DSN = "dbname=test user=test password=<PASSWORD> host=localhost" | StarcoderdataPython |
8037977 | <reponame>Irish-Gambit73/Python-Project
from playsound import playsound
playsound("jikan no seihou.mp3") | StarcoderdataPython |
9634414 | import os.path
import torch.utils.data as data
from data.image_folder import make_dataset
from PIL import Image
import random
import torchvision.transforms as transforms
import numpy as np
import torch
class BaseDataset(data.Dataset):
def __init__(self):
super(BaseDataset, self).__init__()
def name(self):
return 'BaseDataset'
def initialize(self, opt):
pass
class RemovalDataset(BaseDataset):
def initialize(self, opt):
self.opt = opt
self.root = opt.dataroot
self.phase = opt.phase
self.dir_C = os.path.join(opt.dataroot, opt.phase + 'C')
if opt.phase == 'train':
self.dir_A = os.path.join(opt.dataroot, opt.phase + 'A')
self.dir_B = os.path.join(opt.dataroot, opt.phase + 'B')
self.dir_W = os.path.join(opt.dataroot, opt.phase + 'W')
self.C_paths = make_dataset(self.dir_C)
if opt.phase == 'train':
self.A_paths = make_dataset(self.dir_A)
self.B_paths = make_dataset(self.dir_B)
self.W_paths = make_dataset(self.dir_W)
self.C_paths = sorted(self.C_paths)
if opt.phase == 'train':
self.A_paths = sorted(self.A_paths)
self.B_paths = sorted(self.B_paths)
self.W_paths = sorted(self.W_paths)
self.C_size = len(self.C_paths)
def get_transforms_0(self, img, i, j):
img = transforms.functional.crop(img, i, j, 256, 256)
return img
def get_transforms_1(self, img):
transform = transforms.CenterCrop(512)
img = transform(img)
return img
def get_transforms_2(self, img):
transform_list = []
transform_list.append(transforms.ToTensor())
transform_list.append(transforms.Normalize((0.5, 0.5, 0.5),
(0.5, 0.5, 0.5)))
transform = transforms.Compose(transform_list)
img = transform(img)
return img
def __getitem__(self, index):
C_path = self.C_paths[index]
if self.opt.phase == 'train':
A_path = self.A_paths[index%self.C_size]
B_path = self.B_paths[index%self.C_size]
W_path = self.W_paths[index%self.C_size]
C_img = Image.open(C_path).convert('RGB')
if self.opt.phase == 'train':
A_img = Image.open(A_path).convert('RGB')
B_img = Image.open(B_path).convert('RGB')
W_np = np.load(W_path)
C = self.get_transforms_2(C_img)
if self.opt.phase == 'train':
A = self.get_transforms_2(A_img)
B = self.get_transforms_2(B_img)
W = torch.from_numpy(W_np).view(3, self.opt.loadSize, self.opt.loadSize)
if self.opt.phase == 'train':
return {'A': A, 'B': B, 'C': C, 'W': W,
'C_path': C_path}
return {'C': C, 'C_path': C_path}
def __len__(self):
return self.C_size
def name(self):
return 'RemovalDataset'
| StarcoderdataPython |
1764663 | # Copyright 2017 University of Chicago. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from multiprocessing.pool import ThreadPool
from blazar_tempest_plugin.tests.scenario import (
resource_reservation_scenario as rrs)
class TestReservationConcurrencyScenario(rrs.ResourceReservationScenarioTest):
"""A Scenario test class checking Blazar handles concurrent requests."""
MAX_CONCURRENCY = 10
def setUp(self):
super(TestReservationConcurrencyScenario, self).setUp()
def tearDown(self):
super(TestReservationConcurrencyScenario, self).tearDown()
def test_concurrent_list_lease(self):
# run lease-list requests in parallel to check service concurrency
results = []
pool = ThreadPool(self.MAX_CONCURRENCY)
for i in range(0, self.MAX_CONCURRENCY):
results.append(
pool.apply_async(self.reservation_client.list_lease, ()))
pool.close()
pool.join()
results = [r.get() for r in results]
for r in results:
self.assertEqual('200', r.response['status'])
| StarcoderdataPython |
12818651 | <filename>sdk/python/pulumi_aws/dms/endpoint.py<gh_stars>0
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Dict, List, Mapping, Optional, Tuple, Union
from .. import _utilities, _tables
from . import outputs
from ._inputs import *
__all__ = ['Endpoint']
class Endpoint(pulumi.CustomResource):
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
certificate_arn: Optional[pulumi.Input[str]] = None,
database_name: Optional[pulumi.Input[str]] = None,
elasticsearch_settings: Optional[pulumi.Input[pulumi.InputType['EndpointElasticsearchSettingsArgs']]] = None,
endpoint_id: Optional[pulumi.Input[str]] = None,
endpoint_type: Optional[pulumi.Input[str]] = None,
engine_name: Optional[pulumi.Input[str]] = None,
extra_connection_attributes: Optional[pulumi.Input[str]] = None,
kafka_settings: Optional[pulumi.Input[pulumi.InputType['EndpointKafkaSettingsArgs']]] = None,
kinesis_settings: Optional[pulumi.Input[pulumi.InputType['EndpointKinesisSettingsArgs']]] = None,
kms_key_arn: Optional[pulumi.Input[str]] = None,
mongodb_settings: Optional[pulumi.Input[pulumi.InputType['EndpointMongodbSettingsArgs']]] = None,
password: Optional[pulumi.Input[str]] = None,
port: Optional[pulumi.Input[float]] = None,
s3_settings: Optional[pulumi.Input[pulumi.InputType['EndpointS3SettingsArgs']]] = None,
server_name: Optional[pulumi.Input[str]] = None,
service_access_role: Optional[pulumi.Input[str]] = None,
ssl_mode: Optional[pulumi.Input[str]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
username: Optional[pulumi.Input[str]] = None,
__props__=None,
__name__=None,
__opts__=None):
"""
Provides a DMS (Data Migration Service) endpoint resource. DMS endpoints can be created, updated, deleted, and imported.
> **Note:** All arguments including the password will be stored in the raw state as plain-text.
## Example Usage
```python
import pulumi
import pulumi_aws as aws
# Create a new endpoint
test = aws.dms.Endpoint("test",
certificate_arn="arn:aws:acm:us-east-1:123456789012:certificate/12345678-1234-1234-1234-123456789012",
database_name="test",
endpoint_id="test-dms-endpoint-tf",
endpoint_type="source",
engine_name="aurora",
extra_connection_attributes="",
kms_key_arn="arn:aws:kms:us-east-1:123456789012:key/12345678-1234-1234-1234-123456789012",
password="<PASSWORD>",
port=3306,
server_name="test",
ssl_mode="none",
tags={
"Name": "test",
},
username="test")
```
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] certificate_arn: The Amazon Resource Name (ARN) for the certificate.
:param pulumi.Input[str] database_name: The name of the endpoint database.
:param pulumi.Input[pulumi.InputType['EndpointElasticsearchSettingsArgs']] elasticsearch_settings: Configuration block with Elasticsearch settings. Detailed below.
:param pulumi.Input[str] endpoint_id: The database endpoint identifier.
:param pulumi.Input[str] endpoint_type: The type of endpoint. Can be one of `source | target`.
:param pulumi.Input[str] engine_name: The type of engine for the endpoint. Can be one of `aurora | aurora-postgresql| azuredb | db2 | docdb | dynamodb | elasticsearch | kafka | kinesis | mariadb | mongodb | mysql | oracle | postgres | redshift | s3 | sqlserver | sybase`.
:param pulumi.Input[str] extra_connection_attributes: Additional attributes associated with the connection. For available attributes see [Using Extra Connection Attributes with AWS Database Migration Service](http://docs.aws.amazon.com/dms/latest/userguide/CHAP_Introduction.ConnectionAttributes.html).
:param pulumi.Input[pulumi.InputType['EndpointKafkaSettingsArgs']] kafka_settings: Configuration block with Kafka settings. Detailed below.
:param pulumi.Input[pulumi.InputType['EndpointKinesisSettingsArgs']] kinesis_settings: Configuration block with Kinesis settings. Detailed below.
:param pulumi.Input[str] kms_key_arn: The Amazon Resource Name (ARN) for the KMS key that will be used to encrypt the connection parameters. If you do not specify a value for `kms_key_arn`, then AWS DMS will use your default encryption key. AWS KMS creates the default encryption key for your AWS account. Your AWS account has a different default encryption key for each AWS region.
:param pulumi.Input[pulumi.InputType['EndpointMongodbSettingsArgs']] mongodb_settings: Configuration block with MongoDB settings. Detailed below.
:param pulumi.Input[str] password: The password to be used to login to the endpoint database.
:param pulumi.Input[float] port: The port used by the endpoint database.
:param pulumi.Input[pulumi.InputType['EndpointS3SettingsArgs']] s3_settings: Configuration block with S3 settings. Detailed below.
:param pulumi.Input[str] server_name: The host name of the server.
:param pulumi.Input[str] service_access_role: The Amazon Resource Name (ARN) used by the service access IAM role for dynamodb endpoints.
:param pulumi.Input[str] ssl_mode: The SSL mode to use for the connection. Can be one of `none | require | verify-ca | verify-full`
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags: A map of tags to assign to the resource.
:param pulumi.Input[str] username: The user name to be used to login to the endpoint database.
"""
if __name__ is not None:
warnings.warn("explicit use of __name__ is deprecated", DeprecationWarning)
resource_name = __name__
if __opts__ is not None:
warnings.warn("explicit use of __opts__ is deprecated, use 'opts' instead", DeprecationWarning)
opts = __opts__
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = dict()
__props__['certificate_arn'] = certificate_arn
__props__['database_name'] = database_name
__props__['elasticsearch_settings'] = elasticsearch_settings
if endpoint_id is None:
raise TypeError("Missing required property 'endpoint_id'")
__props__['endpoint_id'] = endpoint_id
if endpoint_type is None:
raise TypeError("Missing required property 'endpoint_type'")
__props__['endpoint_type'] = endpoint_type
if engine_name is None:
raise TypeError("Missing required property 'engine_name'")
__props__['engine_name'] = engine_name
__props__['extra_connection_attributes'] = extra_connection_attributes
__props__['kafka_settings'] = kafka_settings
__props__['kinesis_settings'] = kinesis_settings
__props__['kms_key_arn'] = kms_key_arn
__props__['mongodb_settings'] = mongodb_settings
__props__['password'] = password
__props__['port'] = port
__props__['s3_settings'] = s3_settings
__props__['server_name'] = server_name
__props__['service_access_role'] = service_access_role
__props__['ssl_mode'] = ssl_mode
__props__['tags'] = tags
__props__['username'] = username
__props__['endpoint_arn'] = None
super(Endpoint, __self__).__init__(
'aws:dms/endpoint:Endpoint',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None,
certificate_arn: Optional[pulumi.Input[str]] = None,
database_name: Optional[pulumi.Input[str]] = None,
elasticsearch_settings: Optional[pulumi.Input[pulumi.InputType['EndpointElasticsearchSettingsArgs']]] = None,
endpoint_arn: Optional[pulumi.Input[str]] = None,
endpoint_id: Optional[pulumi.Input[str]] = None,
endpoint_type: Optional[pulumi.Input[str]] = None,
engine_name: Optional[pulumi.Input[str]] = None,
extra_connection_attributes: Optional[pulumi.Input[str]] = None,
kafka_settings: Optional[pulumi.Input[pulumi.InputType['EndpointKafkaSettingsArgs']]] = None,
kinesis_settings: Optional[pulumi.Input[pulumi.InputType['EndpointKinesisSettingsArgs']]] = None,
kms_key_arn: Optional[pulumi.Input[str]] = None,
mongodb_settings: Optional[pulumi.Input[pulumi.InputType['EndpointMongodbSettingsArgs']]] = None,
password: Optional[pulumi.Input[str]] = None,
port: Optional[pulumi.Input[float]] = None,
s3_settings: Optional[pulumi.Input[pulumi.InputType['EndpointS3SettingsArgs']]] = None,
server_name: Optional[pulumi.Input[str]] = None,
service_access_role: Optional[pulumi.Input[str]] = None,
ssl_mode: Optional[pulumi.Input[str]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
username: Optional[pulumi.Input[str]] = None) -> 'Endpoint':
"""
Get an existing Endpoint resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] certificate_arn: The Amazon Resource Name (ARN) for the certificate.
:param pulumi.Input[str] database_name: The name of the endpoint database.
:param pulumi.Input[pulumi.InputType['EndpointElasticsearchSettingsArgs']] elasticsearch_settings: Configuration block with Elasticsearch settings. Detailed below.
:param pulumi.Input[str] endpoint_arn: The Amazon Resource Name (ARN) for the endpoint.
:param pulumi.Input[str] endpoint_id: The database endpoint identifier.
:param pulumi.Input[str] endpoint_type: The type of endpoint. Can be one of `source | target`.
:param pulumi.Input[str] engine_name: The type of engine for the endpoint. Can be one of `aurora | aurora-postgresql| azuredb | db2 | docdb | dynamodb | elasticsearch | kafka | kinesis | mariadb | mongodb | mysql | oracle | postgres | redshift | s3 | sqlserver | sybase`.
:param pulumi.Input[str] extra_connection_attributes: Additional attributes associated with the connection. For available attributes see [Using Extra Connection Attributes with AWS Database Migration Service](http://docs.aws.amazon.com/dms/latest/userguide/CHAP_Introduction.ConnectionAttributes.html).
:param pulumi.Input[pulumi.InputType['EndpointKafkaSettingsArgs']] kafka_settings: Configuration block with Kafka settings. Detailed below.
:param pulumi.Input[pulumi.InputType['EndpointKinesisSettingsArgs']] kinesis_settings: Configuration block with Kinesis settings. Detailed below.
:param pulumi.Input[str] kms_key_arn: The Amazon Resource Name (ARN) for the KMS key that will be used to encrypt the connection parameters. If you do not specify a value for `kms_key_arn`, then AWS DMS will use your default encryption key. AWS KMS creates the default encryption key for your AWS account. Your AWS account has a different default encryption key for each AWS region.
:param pulumi.Input[pulumi.InputType['EndpointMongodbSettingsArgs']] mongodb_settings: Configuration block with MongoDB settings. Detailed below.
:param pulumi.Input[str] password: The password to be used to login to the endpoint database.
:param pulumi.Input[float] port: The port used by the endpoint database.
:param pulumi.Input[pulumi.InputType['EndpointS3SettingsArgs']] s3_settings: Configuration block with S3 settings. Detailed below.
:param pulumi.Input[str] server_name: The host name of the server.
:param pulumi.Input[str] service_access_role: The Amazon Resource Name (ARN) used by the service access IAM role for dynamodb endpoints.
:param pulumi.Input[str] ssl_mode: The SSL mode to use for the connection. Can be one of `none | require | verify-ca | verify-full`
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags: A map of tags to assign to the resource.
:param pulumi.Input[str] username: The user name to be used to login to the endpoint database.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = dict()
__props__["certificate_arn"] = certificate_arn
__props__["database_name"] = database_name
__props__["elasticsearch_settings"] = elasticsearch_settings
__props__["endpoint_arn"] = endpoint_arn
__props__["endpoint_id"] = endpoint_id
__props__["endpoint_type"] = endpoint_type
__props__["engine_name"] = engine_name
__props__["extra_connection_attributes"] = extra_connection_attributes
__props__["kafka_settings"] = kafka_settings
__props__["kinesis_settings"] = kinesis_settings
__props__["kms_key_arn"] = kms_key_arn
__props__["mongodb_settings"] = mongodb_settings
__props__["password"] = password
__props__["port"] = port
__props__["s3_settings"] = s3_settings
__props__["server_name"] = server_name
__props__["service_access_role"] = service_access_role
__props__["ssl_mode"] = ssl_mode
__props__["tags"] = tags
__props__["username"] = username
return Endpoint(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter(name="certificateArn")
def certificate_arn(self) -> pulumi.Output[str]:
"""
The Amazon Resource Name (ARN) for the certificate.
"""
return pulumi.get(self, "certificate_arn")
@property
@pulumi.getter(name="databaseName")
def database_name(self) -> pulumi.Output[Optional[str]]:
"""
The name of the endpoint database.
"""
return pulumi.get(self, "database_name")
@property
@pulumi.getter(name="elasticsearchSettings")
def elasticsearch_settings(self) -> pulumi.Output[Optional['outputs.EndpointElasticsearchSettings']]:
"""
Configuration block with Elasticsearch settings. Detailed below.
"""
return pulumi.get(self, "elasticsearch_settings")
@property
@pulumi.getter(name="endpointArn")
def endpoint_arn(self) -> pulumi.Output[str]:
"""
The Amazon Resource Name (ARN) for the endpoint.
"""
return pulumi.get(self, "endpoint_arn")
@property
@pulumi.getter(name="endpointId")
def endpoint_id(self) -> pulumi.Output[str]:
"""
The database endpoint identifier.
"""
return pulumi.get(self, "endpoint_id")
@property
@pulumi.getter(name="endpointType")
def endpoint_type(self) -> pulumi.Output[str]:
"""
The type of endpoint. Can be one of `source | target`.
"""
return pulumi.get(self, "endpoint_type")
@property
@pulumi.getter(name="engineName")
def engine_name(self) -> pulumi.Output[str]:
"""
The type of engine for the endpoint. Can be one of `aurora | aurora-postgresql| azuredb | db2 | docdb | dynamodb | elasticsearch | kafka | kinesis | mariadb | mongodb | mysql | oracle | postgres | redshift | s3 | sqlserver | sybase`.
"""
return pulumi.get(self, "engine_name")
@property
@pulumi.getter(name="extraConnectionAttributes")
def extra_connection_attributes(self) -> pulumi.Output[str]:
"""
Additional attributes associated with the connection. For available attributes see [Using Extra Connection Attributes with AWS Database Migration Service](http://docs.aws.amazon.com/dms/latest/userguide/CHAP_Introduction.ConnectionAttributes.html).
"""
return pulumi.get(self, "extra_connection_attributes")
@property
@pulumi.getter(name="kafkaSettings")
def kafka_settings(self) -> pulumi.Output[Optional['outputs.EndpointKafkaSettings']]:
"""
Configuration block with Kafka settings. Detailed below.
"""
return pulumi.get(self, "kafka_settings")
@property
@pulumi.getter(name="kinesisSettings")
def kinesis_settings(self) -> pulumi.Output[Optional['outputs.EndpointKinesisSettings']]:
"""
Configuration block with Kinesis settings. Detailed below.
"""
return pulumi.get(self, "kinesis_settings")
@property
@pulumi.getter(name="kmsKeyArn")
def kms_key_arn(self) -> pulumi.Output[str]:
"""
The Amazon Resource Name (ARN) for the KMS key that will be used to encrypt the connection parameters. If you do not specify a value for `kms_key_arn`, then AWS DMS will use your default encryption key. AWS KMS creates the default encryption key for your AWS account. Your AWS account has a different default encryption key for each AWS region.
"""
return pulumi.get(self, "kms_key_arn")
@property
@pulumi.getter(name="mongodbSettings")
def mongodb_settings(self) -> pulumi.Output[Optional['outputs.EndpointMongodbSettings']]:
"""
Configuration block with MongoDB settings. Detailed below.
"""
return pulumi.get(self, "mongodb_settings")
@property
@pulumi.getter
def password(self) -> pulumi.Output[Optional[str]]:
"""
The password to be used to login to the endpoint database.
"""
return pulumi.get(self, "password")
@property
@pulumi.getter
def port(self) -> pulumi.Output[Optional[float]]:
"""
The port used by the endpoint database.
"""
return pulumi.get(self, "port")
@property
@pulumi.getter(name="s3Settings")
def s3_settings(self) -> pulumi.Output[Optional['outputs.EndpointS3Settings']]:
"""
Configuration block with S3 settings. Detailed below.
"""
return pulumi.get(self, "s3_settings")
@property
@pulumi.getter(name="serverName")
def server_name(self) -> pulumi.Output[Optional[str]]:
"""
The host name of the server.
"""
return pulumi.get(self, "server_name")
@property
@pulumi.getter(name="serviceAccessRole")
def service_access_role(self) -> pulumi.Output[Optional[str]]:
"""
The Amazon Resource Name (ARN) used by the service access IAM role for dynamodb endpoints.
"""
return pulumi.get(self, "service_access_role")
@property
@pulumi.getter(name="sslMode")
def ssl_mode(self) -> pulumi.Output[str]:
"""
The SSL mode to use for the connection. Can be one of `none | require | verify-ca | verify-full`
"""
return pulumi.get(self, "ssl_mode")
@property
@pulumi.getter
def tags(self) -> pulumi.Output[Optional[Mapping[str, str]]]:
"""
A map of tags to assign to the resource.
"""
return pulumi.get(self, "tags")
@property
@pulumi.getter
def username(self) -> pulumi.Output[Optional[str]]:
"""
The user name to be used to login to the endpoint database.
"""
return pulumi.get(self, "username")
def translate_output_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
def translate_input_property(self, prop):
return _tables.SNAKE_TO_CAMEL_CASE_TABLE.get(prop) or prop
| StarcoderdataPython |
1851006 | <reponame>osgirl/python-veml6070
from setuptools import setup
setup(name='veml6070',
version='1.0',
url='http://github.com/cmur2/python-veml6070',
author='<NAME>',
description=' A python library for accessing the VEML6070 digital UV light sensor from Vishay.',
packages=['veml6070'],
long_description=open('README.md').read(),
requires=['python (>= 2.7)', 'smbus (>= 0.4.1)'],
install_requires=['smbus-cffi']) | StarcoderdataPython |
9654009 | # pylint: disable=missing-module-docstring,missing-function-docstring,unused-argument
from itertools import chain
from typing import List
import pytest
from _pytest.config import Config
from _pytest.config.exceptions import UsageError
from pytest_profiles.profile import profile
@pytest.mark.parametrize("profiles", [["single"], ["first", "second"]])
def test_addoption(pytester: pytest.Pytester, profiles: List[str]) -> None:
config = pytester.parseconfig(
*chain.from_iterable(("--profile", profile) for profile in profiles)
)
assert config.option.profile == profiles
def test_addoption_usage_error(pytester: pytest.Pytester) -> None:
with pytest.raises(UsageError):
pytester.parseconfig("--profile")
def test_configure(pytester: pytest.Pytester) -> None:
@profile
def set_verbose(config: Config) -> None:
config.option.verbose = 1
config = pytester.parseconfigure()
assert config.option.verbose == 0
config = pytester.parseconfigure("--profile", "set_verbose")
assert config.option.verbose == 1
| StarcoderdataPython |
8175709 | <filename>mvn_xsens_carla/carla_client.py
#!/usr/bin/env python3.6
import carla
import random
import time
from receive_from_xsens import get_data
SEGMENTS_IDS = {
1: "Pelvis",
2: "L5",
3: "L3",
4: "T12",
5: "T8",
6: "Neck",
7: "Head",
8: "Right Shoulder",
9: "Right Upper Arm",
10: "Right Forearm",
11: "Right Hand",
12: "Left Shoulder",
13: "Left Upper Arm",
14: "Left Forearm",
15: "Left Hand",
16: "Right Upper Leg",
17: "Right Lower Leg",
18: "Right Foot",
19: "Right Toe",
20: "Left Upper Leg",
21: "Left Lower Leg",
22: "Left Foot",
23: "Left Toe",
25: "Prop1",
26: "Prop2",
27: "Prop3",
28: "Prop4",
}
SEG_TO_CARLA = {
"Left Upper Leg": "crl_thigh__L",
"Right Upper Leg": "crl_thigh__R",
"Left Lower Leg": "crl_leg__L",
"Right Lower Leg": "crl_leg__R",
"Head": "crl_Head__C",
"Pelvis": "crl_hips__C",
"Left Shoulder": "ctrl_shoulder__L",
"Right Shoulder": "crl_shoulder__R",
"Left Upper Arm": "crl_arm__L",
"Right Upper Arm": "crl_arm__R",
"Left Forearm": "crl_foreArm__L",
"Right Forearm": "crl_foreArm__R",
"Left Hand": "crl_hand__L",
"Right Hand": "crl_hand__R",
"Neck": "crl_neck__C",
}
START_POINT = dict(x=10, y=20, z=1)
MAIN_POINT = 7
def seg_to_carla(segment_id):
seg_name = SEGMENTS_IDS[segment_id]
return SEG_TO_CARLA[seg_name]
def get_location(poseEuler, segment_id, ref_point=None):
_, *items = poseEuler
pos, *_ = [poseEuler[1:4] for poseEuler in items if poseEuler[0] == segment_id]
x, y, z = (n/100 for n in pos) # Was in cm
x, y, z = x, -z, y # Change to right-handed coordinate system
if ref_point is None:
return carla.Location(x, y, z)
x_start, y_start, z_start = START_POINT.values()
x_ref, y_ref, z_ref = ref_point.values()
return carla.Location(
x-x_ref+x_start,
y-y_ref+y_start,
z-z_ref+z_start
)
def get_rotation(poseEuler, segment_id, ref_point=None):
_, *items = poseEuler
rotation, *_ = [i[4:7] for i in items if i[0] == segment_id]
roll, pitch, yaw = rotation
roll, pitch, yaw = roll, -yaw, pitch # degrees, right-handed coordinate system
if ref_point is None:
return carla.Rotation(pitch, yaw, roll)
pitch_ref, yaw_ref, roll_ref = ref_point.values()
return carla.Rotation(
pitch-pitch_ref,
yaw-yaw_ref,
roll-roll_ref
)
def set_body_transform(poseEuler, walker, ref_point, rotation_ref_point):
location = get_location(poseEuler, MAIN_POINT, ref_point)
location = carla.Location(
x=location.x,
y=location.y,
z=walker.get_location().z
)
rotation = get_rotation(poseEuler, MAIN_POINT, rotation_ref_point)
walker.set_transform(carla.Transform(location, rotation))
def main():
try:
actor_list = []
client = carla.Client("localhost", 2000)
client.set_timeout(5.0)
world = client.get_world()
blueprint_library = world.get_blueprint_library()
location = carla.Location(**START_POINT)
spectator = world.get_spectator()
spectator.set_transform(
carla.Transform(
location + carla.Location(x=-5, z=1),
carla.Rotation()
)
)
blueprint = random.choice(blueprint_library.filter('walker.*'))
walker = world.spawn_actor(blueprint, carla.Transform(location))
actor_list.append(walker)
first_data = next(get_data())
ref_point = get_location(first_data, MAIN_POINT)
ref_point = dict(x=ref_point.x, y=ref_point.y, z=ref_point.z)
rotation_ref_point = get_rotation(first_data, MAIN_POINT)
rotation_ref_point = dict(pitch=rotation_ref_point.pitch, yaw=rotation_ref_point.yaw, roll=rotation_ref_point.roll)
for poseEuler in get_data():
_, *items = poseEuler
seg_ids = [int(i[0]) for i in items]
for seg_id in seg_ids:
# set_body_transform(poseEuler, walker, ref_point, rotation_ref_point)
try:
control = carla.WalkerBoneControl()
bone = seg_to_carla(seg_id)
transform = carla.Transform(
get_location(poseEuler, seg_id, ref_point),
get_rotation(poseEuler, seg_id, rotation_ref_point)
)
control.bone_transforms = [(bone, transform)]
walker.apply_control(control)
except KeyError:
continue
finally:
print('Destroying actors')
for actor in actor_list:
actor.destroy()
print('Done')
if __name__ == "__main__":
main()
| StarcoderdataPython |
6581066 | <gh_stars>1-10
import importlib
import inspect
from copy import deepcopy
from typing import Dict, Callable, Any, List, Optional
from kallisticore.exceptions import UnknownModuleName, CouldNotFindFunction
from kallisticore.lib.credential import Credential
from kallisticore.lib.expectation import Expectation
from kallisticore.models.step import Step
from kallisticore.utils.singleton import Singleton
class KallistiFunctionCache(metaclass=Singleton):
def __init__(self):
self.functions = {}
def add(self, module_name: str, function_name: str,
function_implementation: Callable) -> None:
key = self._function_key(function_name, module_name)
self.functions[key] = function_implementation
def get(self, module_name, function_name) -> Callable:
key = self._function_key(module_name, function_name)
return self.functions.get(key, None)
def _function_key(self, function_name: str, module_name: str):
return module_name, function_name
class FunctionLoader:
def __init__(self, module_map: dict, module_name: str):
"""
:param module_map: map of action modules
:param module_name: the name of the module to search
e.g. "cf".
"""
self._functions = KallistiFunctionCache()
self._module_path = module_name
self.module = FunctionLoader.get_module(module_map, self._module_path)
def get_function(self, function_name: str) -> Callable:
""" Get the function based on the type_name.
Caches the results for previous findings, and search the cache
before searching the modules.
:param function_name: the name of the function to search
e.g. "map_route_to_app".
:returns the function found or raise exception if no function can be
found.
"""
function_implementation = self._functions.get(self._module_path,
function_name)
if not function_implementation:
function_implementation = self._find_function(function_name)
self._functions.add(self._module_path, function_name,
function_implementation)
return function_implementation
def _find_function(self, function_name: str) -> Callable:
modules_to_search = self._get_modules_to_search()
for module in modules_to_search:
if hasattr(module, "__all__"):
declared_action_names = getattr(module, "__all__")
if function_name in declared_action_names:
function_implementation = getattr(module, function_name)
return function_implementation
raise CouldNotFindFunction(self._module_path + "." + function_name)
def _get_modules_to_search(self) -> list:
modules_to_search = [self.module]
if hasattr(self.module, "__actions_modules__"):
sub_modules = self._get_sub_modules_to_search()
modules_to_search.extend(sub_modules)
return modules_to_search
def _get_sub_modules_to_search(self) -> list:
sub_module_names = getattr(self.module, "__actions_modules__")
return [importlib.import_module(module_name) for module_name in
sub_module_names]
@staticmethod
def get_module(module_map: dict, namespace: str):
module_name = module_map.get(namespace)
if not module_name:
raise UnknownModuleName(namespace)
module = importlib.import_module(module_name)
return module
class Action:
func_loader_class = FunctionLoader
@classmethod
def build(cls, step: Step, action_module_map: dict,
credential_class_map: dict):
"""
:param step: Object of type Step
:param action_module_map: Map of action modules
:param credential_class_map: Map of credential classes
"""
description = step.description
arguments = deepcopy(step.where)
module_name = step.get_namespace()
function_name = step.get_function_name()
func_loader = cls.func_loader_class(action_module_map, module_name)
module_func = func_loader.get_function(function_name)
credential = None
if 'credentials' in arguments:
cred_dict = arguments.pop('credentials')
credential = Credential.build(credential_class_map, cred_dict)
expectations = []
for expect_spec in step.expect:
expectations.append(Expectation.build(expect_spec))
return cls(module_func, arguments, expectations, description,
credential)
def __init__(self, module_func: Callable, arguments: Dict,
expectations: Optional[List[Expectation]] = None,
name: str = None, credential: Credential = None):
"""
:param module_func: Action module function
:param arguments: Arguments required by function to be executed
:param expectations: Expectation of action's result
:param name: Description for the action to be execute
:param credential: Holds credential required for action to be executed
:type credential: Credential
"""
self.expectations = expectations if expectations else []
self.name = name
self.func = module_func
if inspect.isclass(self.func):
self.func = self.func(**arguments).execute
self.arguments = {}
else:
self.arguments = arguments
self.credential = credential
def execute(self) -> Any:
""" Execute the action, captures the exception if any.
:return True if the action has been executed successfully:
"""
result = self.func(**self.arguments)
self.check_result_for_expectations(result)
return result
def check_result_for_expectations(self, result):
for expect_spec in self.expectations:
expect_spec.execute(result)
def make_action(step: Step, action_module_map: dict,
credential_class_map: dict) -> Action:
""" Create Action based on the action type specified in action_spec.
:param step: the action specification in json
eg.g. {"step":"", "do":"", "where":{}}
:param action_module_map: Action module map
:param credential_class_map: Credential class map
:returns a Kallisti Action object.
"""
namespace = step.get_namespace()
module = FunctionLoader.get_module(action_module_map, namespace)
action_class = getattr(module, '__action_class__', Action)
return action_class.build(step, action_module_map, credential_class_map)
| StarcoderdataPython |
1689063 | # To add a new cell, type '# %%'
# To add a new markdown cell, type '# %% [markdown]'
# %%
# This Python 3 environment comes with many helpful analytics libraries installed
# It is defined by the kaggle/python docker image: https://github.com/kaggle/docker-python
# For example, here's several helpful packages to load in
# Dependencies
import numpy as np
import pandas as pd
import requests
import unidecode
import datetime
import dateutil
import subprocess
import sys
import json
import tempfile
import os
import re
# Install missing dependencies
def install(package):
subprocess.check_call([sys.executable, "-m", "pip", "install", package])
# Input data files are available in the "../input/" directory.
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
#for dirname, _, filenames in os.walk('/kaggle/input'):
# for filename in filenames:
# print(os.path.join(dirname, filename))
# Any results you write to the current directory are saved as output.
# %% [markdown]
# ---
# %% [markdown]
# # Colombia Covid19 Time Line
# Dataset obtained from [Instituto Nacional de Salud](https://www.ins.gov.co/Noticias/Paginas/Coronavirus.aspx) daily report Covid19 from Colombia.
#
# You can get the official dataset here:
# [INS - Official Report](https://www.datos.gov.co/Salud-y-Protecci-n-Social/Casos-positivos-de-COVID-19-en-Colombia/gt2j-8ykr)
#
# The number of new cases are increasing day by day around the world.
# This dataset has information about reported cases from 32 Colombia departments.
#
# Also you can get the dataset Google COVID-19 Community Mobility Reports - Colombia.
#
# You can view and collaborate to the analysis here:
# [colombia_covid_19_analysis](https://www.kaggle.com/sebaxtian/colombia-covid-19-analysis) Kaggle Notebook Kernel.
# %% [markdown]
# ---
# %% [markdown]
# ## Data Sources
# %%
# Input data files are available in the "../input/" directory.
INPUT_DIR = './'
if os.path.split(os.path.abspath('.'))[-1] == 'src':
INPUT_DIR = '../input'
# Output data files are available in the "../output/" directory.
OUTPUT_DIR = './'
if os.path.split(os.path.abspath('.'))[-1] == 'src':
OUTPUT_DIR = '../output'
# %%
# Covid19 Colombia Dataset
covid19co = pd.read_csv(os.path.join(OUTPUT_DIR, 'covid19co.csv'))
# Total Covid19 Colombia
covid19co.shape
# %%
# Covid19 Colombia Samples Processed Dataset
covid19co_samples_processed = pd.read_csv(os.path.join(OUTPUT_DIR, 'covid19co_samples_processed.csv'))
# Total Covid19 Colombia Samples Processed
covid19co_samples_processed.shape
# %% [markdown]
# ---
# %% [markdown]
# ## Official Date Report
# %%
# URL Bogota Date
URL_BOGOTA_TIME = 'http://worldtimeapi.org/api/timezone/America/Bogota'
# Get Bogota Date
with requests.get(URL_BOGOTA_TIME) as bogota_time:
bogota_time = bogota_time.json()
# Bogota Date
#print(bogota_time)
bogota_date = datetime.date.fromtimestamp(bogota_time['unixtime']).isoformat()
print('Bogota Date:', bogota_date)
try:
# URL Date Report
URL_DATE_REPORT = 'https://e.infogram.com/api/live/flex/efcb7f88-4bd0-4e26-a497-14ae28f6d199/a90dbc02-108d-44be-8178-b1eb6ea1fdd9?'
# Get Official Date Report
with requests.get(URL_DATE_REPORT) as official_date_report:
official_date_report = official_date_report.json()
# Official Date Report
#print(official_date_report['data'][0][1][0])
official_date_report = official_date_report['data'][0][1][0]
#print(official_date_report)
# Date Format
date_format = official_date_report.split(' ')[4].split('-')
# YEAR-MONTH-DAY
official_date_report = datetime.date(int(date_format[2]), int(date_format[1]), int(date_format[0]))
except:
official_date_report = bogota_date
# Print
print('Official Date Report:', official_date_report)
# %% [markdown]
# ---
# %% [markdown]
# ## Time Line Reported, Recupered and Deceased
# %%
# Show dataframe
covid19co.tail()
# %%
# Get Time Line
def get_time_line(dfreport):
# Time Line [date, total, accum]
dfreport_time_line = pd.DataFrame(columns=['date', 'total', 'accum'])
dfreport_time_line['date'] = [dti.strftime('%d/%m/%Y') for dti in pd.date_range(start='2020-03-01', end=official_date_report, freq='D')]
# Total by Date
total_by_date = {}
# Group by 'FECHA REPORTE WEB'
group_by_date = dfreport.groupby(['FECHA REPORTE WEB'], sort=False)
# For each date
for date_report in group_by_date.groups.keys():
total_by_date[date_report] = group_by_date.get_group(date_report)['ID DE CASO'].count()
# Update Total by Date
dfreport_time_line['total'] = dfreport_time_line['date'].transform(lambda date: total_by_date[date] if date in total_by_date else 0)
# Update Accumulative Sum Cases Reported by Date
dfreport_time_line['accum'] = dfreport_time_line['total'].cumsum()
# Drop the last one if doesn't have total
#index_empty = dfreport_time_line[dfreport_time_line['date'] == datetime.date.today().strftime('%d/%m/%Y')]
#index_empty = index_empty[index_empty['total'] == 0].index
#dfreport_time_line.drop(index_empty, inplace=True)
# Return
return dfreport_time_line
# %%
# Get Reported Time Line
reported_time_line = get_time_line(covid19co)
# Rename columns
reported_time_line.columns = ['date', 'total_reported', 'accum_reported']
# Show dataframe
reported_time_line.tail()
# %%
# Get Recupered Time Line
dfrecupered = covid19co[covid19co['ATENCION'] == 'Recuperado']
# Get Recupered Time Line
recupered_time_line = get_time_line(dfrecupered)
# Rename columns
recupered_time_line.columns = ['date_recupered', 'total_recupered', 'accum_recupered']
# Show dataframe
recupered_time_line.tail()
# %%
# Get Deceased Time Line
dfdeceased = covid19co[covid19co['ATENCION'] == 'Fallecido']
# Get Deceased Time Line
deceased_time_line = get_time_line(dfdeceased)
# Rename columns
deceased_time_line.columns = ['date_deceased', 'total_deceased', 'accum_deceased']
# Show dataframe
deceased_time_line.tail()
# %%
# Merge Time Lines
covid19co_time_line = pd.concat([reported_time_line, recupered_time_line, deceased_time_line], axis=1, sort=False)
# Delete Columns
covid19co_time_line.drop(columns=['date_recupered', 'date_deceased'], inplace=True)
# Show dataframe
covid19co_time_line.tail()
# %% [markdown]
# ## Time Line Reported, Recupered and Deceased
# > ***Output file***: covid19co_time_line.csv
# %%
# Save dataframe
covid19co_time_line.to_csv(os.path.join(OUTPUT_DIR, 'covid19co_time_line.csv'), index=False)
# %% [markdown]
# ---
# %% [markdown]
# ## Time Line Reported, Recupered and Deceased by City
# %%
# List of Cities
cities = list(set(covid19co['CIUDAD DE UBICACION'].values))
# Show total cities
len(cities)
# %%
# Time Line by City
time_line_by_city = {}
# For each city
for city in cities:
# Filter by City
covid19co_city = covid19co[covid19co['CIUDAD DE UBICACION'] == city]
# Get Reported Time Line
reported_time_line = get_time_line(covid19co_city)
# Rename columns
reported_time_line.columns = ['date', 'total_reported', 'accum_reported']
# Get Recupered Time Line
dfrecupered = covid19co_city[covid19co_city['ATENCION'] == 'Recuperado']
# Get Recupered Time Line
recupered_time_line = get_time_line(dfrecupered)
# Rename columns
recupered_time_line.columns = ['date_recupered', 'total_recupered', 'accum_recupered']
# Get Deceased Time Line
dfdeceased = covid19co_city[covid19co_city['ATENCION'] == 'Fallecido']
# Get Deceased Time Line
deceased_time_line = get_time_line(dfdeceased)
# Rename columns
deceased_time_line.columns = ['date_deceased', 'total_deceased', 'accum_deceased']
# Merge Time Lines
covid19co_time_line = pd.concat([reported_time_line, recupered_time_line, deceased_time_line], axis=1, sort=False)
# Delete Columns
covid19co_time_line.drop(columns=['date_recupered', 'date_deceased'], inplace=True)
# Create key city
key_city = ''.join(x for x in re.sub('[^A-Za-z0-9 ]+', '', unidecode.unidecode(city)).title() if not x.isspace())
# Add to dict
time_line_by_city[key_city] = covid19co_time_line
# %%
# Show time line by city keys
#list(time_line_by_city.keys())
# %%
# Show dataframe
time_line_by_city['Cali'].tail()
# %% [markdown]
# ## Time Line Reported, Recupered and Deceased by City
# > ***Output file***: covid19co_time_line_{key_city}.csv
# %%
for key_city in time_line_by_city:
# Save dataframe
time_line_by_city[key_city].to_csv(os.path.join(OUTPUT_DIR, 'covid19co_time_line_' + key_city + '.csv'), index=False)
# %% [markdown]
# ---
# %% [markdown]
# ## Time Line Reported, Recupered and Deceased by Department
# %%
# List of Departments
departs = list(set(covid19co['DEPARTAMENTO O DISTRITO '].values))
# Show total departments
len(departs)
# %%
# Time Line by Department
time_line_by_depto = {}
# For each deparment
for deparment in departs:
# Filter by Department
covid19co_depto = covid19co[covid19co['DEPARTAMENTO O DISTRITO '] == deparment]
# Get Reported Time Line
reported_time_line = get_time_line(covid19co_depto)
# Rename columns
reported_time_line.columns = ['date', 'total_reported', 'accum_reported']
# Get Recupered Time Line
dfrecupered = covid19co_depto[covid19co_depto['ATENCION'] == 'Recuperado']
# Get Recupered Time Line
recupered_time_line = get_time_line(dfrecupered)
# Rename columns
recupered_time_line.columns = ['date_recupered', 'total_recupered', 'accum_recupered']
# Get Deceased Time Line
dfdeceased = covid19co_depto[covid19co_depto['ATENCION'] == 'Fallecido']
# Get Deceased Time Line
deceased_time_line = get_time_line(dfdeceased)
# Rename columns
deceased_time_line.columns = ['date_deceased', 'total_deceased', 'accum_deceased']
# Merge Time Lines
covid19co_time_line = pd.concat([reported_time_line, recupered_time_line, deceased_time_line], axis=1, sort=False)
# Delete Columns
covid19co_time_line.drop(columns=['date_recupered', 'date_deceased'], inplace=True)
# Create key depto
key_depto = ''.join(x for x in re.sub('[^A-Za-z0-9 ]+', '', unidecode.unidecode(deparment)).title() if not x.isspace())
# Add to dict
time_line_by_depto[key_depto] = covid19co_time_line
# %%
# Show time line by deparment keys
#list(time_line_by_depto.keys())
# %%
# Show dataframe
time_line_by_depto['ValleDelCauca'].tail()
# %% [markdown]
# ## Time Line Reported, Recupered and Deceased by Department
# > ***Output file***: covid19co_time_line_{key_depto}.csv
# %%
for key_depto in time_line_by_depto:
# Save dataframe
time_line_by_depto[key_depto].to_csv(os.path.join(OUTPUT_DIR, 'covid19co_time_line_' + key_depto + '.csv'), index=False)
# %% [markdown]
# ---
# %% [markdown]
# ## Time Line Samples Processed
# %%
# Show dataframe
covid19co_samples_processed.head()
# %%
# Rename columns
covid19co_samples_processed.columns = ['date', 'accum_samples']
# Fill NaN
covid19co_samples_processed['accum_samples'].fillna(0, inplace=True)
# Update column type
covid19co_samples_processed['accum_samples'] = covid19co_samples_processed['accum_samples'].astype('int64')
# Show dataframe
covid19co_samples_processed.head()
# %%
# Time Line [date, accum]
covid19co_samples_time_line = pd.DataFrame(columns=['date', 'accum'])
covid19co_samples_time_line['date'] = [dti.strftime('%d/%m/%Y') for dti in pd.date_range(start='2020-03-01', end=official_date_report, freq='D')]
# Get Accumulative Samples
def get_accum(date_sample):
accum = covid19co_samples_processed[covid19co_samples_processed['date'] == date_sample]['accum_samples'].values
return accum[0] if len(accum) > 0 else 0
# Update accum
covid19co_samples_time_line['accum'] = covid19co_samples_time_line['date'].transform(lambda value: get_accum(value))
# Add samples without date
#covid19co_samples_time_line.iloc[2] = list(covid19co_samples_processed.iloc[0])
# Show dataframe
covid19co_samples_time_line.tail()
# %% [markdown]
# ## Time Line Samples Processed
# > ***Output file***: covid19co_samples_time_line.csv
# %%
# Save dataframe
covid19co_samples_time_line.to_csv(os.path.join(OUTPUT_DIR, 'covid19co_samples_time_line.csv'), index=False)
# %% [markdown]
# ---
# %%
| StarcoderdataPython |
4960776 | import random
from collections import defaultdict, Counter
# full credits to <NAME>!
# source: https://eli.thegreenplace.net/2018/elegant-python-code-for-a-markov-chain-text-generator/
def generate(file):
STATE_LEN = 10
with open(file, 'r', encoding='utf-8') as file:
data = file.read()
model = defaultdict(Counter)
print('Learning model...')
for i in range(len(data) - STATE_LEN):
state = data[i:i + STATE_LEN]
next = data[i + STATE_LEN]
model[state][next] += 1
print('Sampling...')
state = random.choice(list(model))
out = list(state)
for i in range(500000):
out.extend(random.choices(list(model[state]), model[state].values()))
state = state[1:] + out[-1]
print("Finished sampling")
skip_first_line = out.index('\n') + 1
with open('generated.txt', 'w', encoding='utf-8') as f:
for line in out[skip_first_line:]:
f.write(line)
f.close()
return f
| StarcoderdataPython |
1755680 | """Given a target image and a directory of source images, makes a photomosaic.
"""
import argparse
import cv2
import numpy as np
import os
import random
import sys
from glob import glob
from scipy.spatial import cKDTree
from skimage import color
def get_cell_tree(cell_images):
L_vector = [np.mean(im[:,:,0]) for im in cell_images]
a_vector = [np.mean(im[:,:,1]) for im in cell_images]
b_vector = [np.mean(im[:,:,2]) for im in cell_images]
return cKDTree(zip(L_vector, a_vector, b_vector))
def read_images(image_dir, size):
extensions = ["bmp", "jpeg", "jpg", "png", "tif", "tiff", "JPEG"]
search_paths = [os.path.join(image_dir, '*.' + ext) for ext in extensions]
image_files = reduce(list.__add__, map(glob, search_paths))
return [color.rgb2lab(cv2.resize(cv2.imread(f), size, interpolation=cv2.INTER_AREA))
for f in image_files]
def main(argv=None):
if argv is not None:
sys.argv = argv
parser = argparse.ArgumentParser(
description=__doc__,
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('target', help='the target image to produce')
parser.add_argument('source', help='directory of images used to make mosaic')
parser.add_argument('-r', '--num-rows', type=int, default=50,
help='number of rows of source images in final mosaic')
parser.add_argument('-c', '--num-cols', type=int, default=100,
help='number of columns of source images in final mosaic')
parser.add_argument('-d', '--repeat-distance', type=int, default=10,
help='minimum distance between repeating images')
parser.add_argument('-a', '--alpha', type=float, default=0.25,
help='amount of cell colorization to perform')
parser.add_argument('-o', '--output-filename',
default=os.path.join(os.getcwd(), 'mosaic.png'),
help='the output photomosaic image')
parser.add_argument('-m', '--output-size-multiplier', type=int, default=2,
help='amount to multiply size of output image')
args = parser.parse_args()
k = 2 * (args.repeat_distance**2 + args.repeat_distance)
target_image = cv2.imread(args.target)
target_size = (args.output_size_multiplier * target_image.shape[1],
args.output_size_multiplier * target_image.shape[0])
target_image = cv2.resize(target_image, target_size, cv2.INTER_CUBIC)
cell_height = target_image.shape[0] / args.num_rows
cell_width = target_image.shape[1] / args.num_cols
cell_images = read_images(args.source, (cell_width, cell_height))
assert len(cell_images) >= (2 * args.repeat_distance + 1)**2, \
"Not enough images in source directory for specified repeat distance."
cell_tree = get_cell_tree(cell_images)
target_size = (args.num_cols * cell_width, args.num_rows * cell_height)
target_image = cv2.resize(target_image, target_size, cv2.INTER_CUBIC)
target_image = color.rgb2lab(target_image)
output_image = np.zeros_like(target_image)
num_complete = 0
num_total = args.num_rows * args.num_cols
used_indices = np.full((args.num_rows, args.num_cols), -1, dtype=int)
for row in range(args.num_rows):
i = row * cell_height
first_row = max(row - args.repeat_distance, 0)
for col in range(args.num_cols):
j = col * cell_width
first_col = max(col - args.repeat_distance, 0)
last_col = min(col + args.repeat_distance, used_indices.shape[1] - 1)
nearby_used_indices = used_indices[first_row:row+1,first_col:last_col+1]
target_window = target_image[i:i+cell_height,j:j+cell_width,:]
target_color = [np.mean(target_window[:,:,x]) for x in range(3)]
best_index = [index for index in cell_tree.query(target_color, k=k)[1]
if index not in nearby_used_indices][0]
best_match = cell_images[best_index]
for x in range(3):
output_image[i:i+cell_height,j:j+cell_width,x] = (
(args.alpha * target_color[x] + (1.0 - args.alpha) * best_match[:,:,x]))
used_indices[row,col] = best_index
num_complete += 1
sys.stdout.flush()
sys.stdout.write("\r%.2f%% complete" % (100.0 * num_complete / num_total))
print
output_image = 255 * color.lab2rgb(output_image)
cv2.imwrite(args.output_filename, output_image)
if __name__ == '__main__':
sys.exit(main())
| StarcoderdataPython |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.