id stringlengths 1 8 | text stringlengths 6 1.05M | dataset_id stringclasses 1
value |
|---|---|---|
1787707 | <gh_stars>0
from . import *
from .window import *
from .keycodes import *
from .keyboard import *
from .timer import *
from .output import *
from .ui import *
from .audio import *
| StarcoderdataPython |
3328180 | <filename>move_ur_action_server.py
#!/usr/bin/env python
# Copyright 2018 NoMagic Sp. z o.o.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import actionlib
import rospy
import tf.transformations as trans
from nomagic_ur_driver.msg import MoveURAction, MoveURGoal, MoveURResult
def _joints2command(command, joints):
command.input_double_register_0 = joints[0]
command.input_double_register_1 = joints[1]
command.input_double_register_2 = joints[2]
command.input_double_register_3 = joints[3]
command.input_double_register_4 = joints[4]
command.input_double_register_5 = joints[5]
class MoveURActionServer:
def __init__(self, command_sender):
self._command_sender = command_sender
self._as = actionlib.SimpleActionServer('/move_robot', MoveURAction,
execute_cb=self.execute_cb, auto_start=False)
self._as.start()
def _prepare_command(self, goal):
command = self._command_sender.command
command.input_int_register_0 = goal.move_type
if goal.move_type == MoveURGoal.MOVE_JOINT \
or goal.move_type == MoveURGoal.MOVE_LINEAR_FK:
_joints2command(command, goal.target_joints)
command.input_double_register_6 = goal.velocity
command.input_double_register_7 = goal.acceleration
elif goal.move_type == MoveURGoal.MOVE_JOINT_IK \
or goal.move_type == MoveURGoal.MOVE_LINEAR:
rpy = trans.euler_from_quaternion([goal.target_pose.orientation.x, goal.target_pose.orientation.y,
goal.target_pose.orientation.z, goal.target_pose.orientation.w])
command.input_double_register_0 = goal.target_pose.position.x
command.input_double_register_1 = goal.target_pose.position.y
command.input_double_register_2 = goal.target_pose.position.z
command.input_double_register_3 = rpy[0]
command.input_double_register_4 = rpy[1]
command.input_double_register_5 = rpy[2]
command.input_double_register_6 = goal.velocity
command.input_double_register_7 = goal.acceleration
rospy.loginfo("Target position: {}".format(goal.target_pose.position))
rospy.loginfo("Target orientation: {}".format(goal.target_pose.orientation))
rospy.loginfo("Target RPY: {}".format(rpy))
elif goal.move_type == MoveURGoal.MOVE_GRIPPER or \
goal.move_type == MoveURGoal.MOVE_GRIPPER_DC:
command.input_double_register_0 = goal.gripper_width
command.input_double_register_1 = goal.gripper_force
elif goal.move_type in [MoveURGoal.FT_SEARCH, MoveURGoal.MOVE_TO_SUCK]:
rpy = trans.euler_from_quaternion([goal.target_pose.orientation.x, goal.target_pose.orientation.y,
goal.target_pose.orientation.z, goal.target_pose.orientation.w])
command.input_double_register_0 = goal.target_pose.position.x
command.input_double_register_1 = goal.target_pose.position.y
command.input_double_register_2 = goal.target_pose.position.z
command.input_double_register_3 = rpy[0]
command.input_double_register_4 = rpy[1]
command.input_double_register_5 = rpy[2]
command.input_double_register_6 = goal.velocity
command.input_double_register_7 = goal.acceleration
command.input_double_register_8 = goal.ft_threshold.force.x
command.input_double_register_9 = goal.ft_threshold.force.y
command.input_double_register_10 = goal.ft_threshold.force.z
command.input_double_register_11 = goal.ft_threshold.torque.z
command.input_double_register_12 = goal.ft_threshold.torque.z
command.input_double_register_13 = goal.ft_threshold.torque.z
command.input_double_register_14 = goal.ft_threshold.force_mag
command.input_double_register_15 = goal.ft_threshold.torque_mag
def execute_cb(self, goal):
self._prepare_command(goal)
self._command_sender.send_command(goal.move_type != MoveURGoal.STOP_PROGRAM)
res = MoveURResult()
self._as.set_succeeded(res)
| StarcoderdataPython |
3473588 | # 作用域(scope)
# 作用域指的是变量生效的区域
b = 20 # 全局变量
def fn():
a = 10 # a定义在了函数内部,所以他的作用域就是函数内部,函数外部无法访问
print('函数内部:', 'a =', a)
print('函数内部:', 'b =', b)
# fn()
# print('函数外部:','a =',a)
# print('函数外部:','b =',b)
# 在Python中一共有两种作用域
# 全局作用域
# - 全局作用域在程序执行时创建,在程序执行结束时销毁
# - 所有函数以外的区域都是全局作用域
# - 在全局作用域中定义的变量,都属于全局变量,全局变量可以在程序的任意位置被访问
#
# 函数作用域
# - 函数作用域在函数调用时创建,在调用结束时销毁
# - 函数每调用一次就会产生一个新的函数作用域
# - 在函数作用域中定义的变量,都是局部变量,它只能在函数内部被访问
#
# 变量的查找
# - 当我们使用变量时,会优先在当前作用域中寻找该变量,如果有则使用,
# 如果没有则继续去上一级作用域中寻找,如果有则使用,
# 如果依然没有则继续去上一级作用域中寻找,以此类推
# 直到找到全局作用域,依然没有找到,则会抛出异常
# NameError: name 'a' is not defined
def fn2():
def fn3():
print('fn3中:', 'a =', a)
fn3()
# fn2()
a = 20
def fn3():
# a = 10 # 在函数中为变量赋值时,默认都是为局部变量赋值
# 如果希望在函数内部修改全局变量,则需要使用global关键字,来声明变量
global a # 声明在函数内部的使用a是全局变量,此时再去修改a时,就是在修改全局的a
a = 10 # 修改全局变量
print('函数内部:', 'a =', a)
# fn3()
# print('函数外部:','a =',a)
# 命名空间(namespace)
# 命名空间指的是变量存储的位置,每一个变量都需要存储到指定的命名空间当中
# 每一个作用域都会有一个它对应的命名空间
# 全局命名空间,用来保存全局变量。函数命名空间用来保存函数中的变量
# 命名空间实际上就是一个字典,是一个专门用来存储变量的字典
# locals()用来获取当前作用域的命名空间
# 如果在全局作用域中调用locals()则获取全局命名空间,如果在函数作用域中调用locals()则获取函数命名空间
# 返回的是一个字典
scope = locals() # 当前命名空间
print(type(scope))
# print(a)
# print(scope['a'])
# 向scope中添加一个key-value
scope['c'] = 1000 # 向字典中添加key-value就相当于在全局中创建了一个变量(一般不建议这么做)
# print(c)
def fn4():
a = 10
# scope = locals() # 在函数内部调用locals()会获取到函数的命名空间
# scope['b'] = 20 # 可以通过scope来操作函数的命名空间,但是也是不建议这么做
# globals() 函数可以用来在任意位置获取全局命名空间
global_scope = globals()
# print(global_scope['a'])
global_scope['a'] = 30
# print(scope)
fn4()
| StarcoderdataPython |
6604575 | <reponame>ryanstocks00/positron-cross-section
"""Module for calculating properties of gasses."""
from pathlib import Path
from typing import Any, TypeVar
import numpy as np
import pandas
from numpy.typing import NDArray
GAS_CONSTANT = 8.31446261815324
AVOGADROS_CONSTANT = 6.02214086e23
MTORR_TO_PASCALS = 0.13332237
FloatLike = TypeVar("FloatLike", float, NDArray[np.float64])
def numeric_density(pressure: FloatLike, temperature: float = 300) -> FloatLike:
"""Calculate numeric density of a gas.
Args:
pressure (float): pressure in mTorr
temperature (float): temperature
Returns:
float: Numeric density (atoms per cubic meter)
"""
return pressure * MTORR_TO_PASCALS / (temperature * GAS_CONSTANT) * AVOGADROS_CONSTANT
def plot_existing_GTCS_data(ax: Any, target: str) -> None:
"""Plot existing GTCS data."""
filename = (
Path(__file__).parent.parent.parent / "previous_results" / f"{target.lower()}_tcs.csv"
)
if filename.exists():
previous_tcs = pandas.read_csv(filename)
ax.errorbar(
previous_tcs["Energy"],
previous_tcs["TCS"],
yerr=previous_tcs["Error"],
fmt="-d",
color="lightgray",
ecolor="black",
capsize=4,
label="Chiari et. al.",
)
filename = (
Path(__file__).parent.parent.parent
/ "previous_results"
/ f"{target.lower()}_tcs_floeder.csv"
)
if filename.exists():
previous_tcs = pandas.read_csv(filename)
ax.errorbar(
previous_tcs["Energy"],
previous_tcs["TCS"],
yerr=previous_tcs["Error"],
fmt="-^",
color="lightpink",
ecolor="white",
capsize=0,
label="Floeder et. al.",
)
| StarcoderdataPython |
310424 | <gh_stars>10-100
import matplotlib.pyplot as plt
from copylot import CoPylot
## Minimum working example -> Must update path to weather file
cp = CoPylot()
r = cp.data_create()
assert cp.data_set_string(
r,
"ambient.0.weather_file",
"../climate_files/USA CA Daggett (TMY2).csv",
)
assert cp.generate_layout(r)
field = cp.get_layout_info(r)
assert cp.simulate(r)
flux = cp.get_fluxmap(r)
assert cp.data_free(r)
# Plotting (default) solar field and flux map
# Solar Field
plt.scatter(field['x_location'], field['y_location'], s=1.5)
plt.tight_layout()
plt.show()
# flux
im = plt.imshow(flux)
plt.colorbar(im)
plt.tight_layout()
plt.show()
| StarcoderdataPython |
6675540 | # UDP demo
# works with ledcontrol.py
#
# <NAME>
# <EMAIL>
import RPi.GPIO as GPIO
import time,socket,traceback
pin = 17
IP = ''
PORT = 50006
sock = socket.socket(socket.AF_INET,socket.SOCK_DGRAM)
sock.bind((IP,PORT))
GPIO.setmode(GPIO.BCM)
GPIO.setup(pin,GPIO.OUT)
GPIO.output(pin,GPIO.LOW)
print('Listening...')
while True:
try:
data,addr = sock.recvfrom(1024)
m = data.decode()
#print(m)
if 'on' in m:
print('{} wants it {}'.format(addr[0],m))
GPIO.output(pin,GPIO.HIGH)
elif 'off' in m:
print('{} wants it {}'.format(addr[0],m))
GPIO.output(pin,GPIO.LOW)
except KeyboardInterrupt:
break
except:
traceback.print_exc()
print('Cleaning up...')
GPIO.cleanup(pin)
print('Done.')
| StarcoderdataPython |
8014875 | #!/usr/bin/env python3
from ArgditLib.ProcLog import ProcLog
from Bio import SeqIO
from Bio.Seq import Seq
import argparse
import sys
parser = argparse.ArgumentParser()
parser.add_argument('seq_db_path', help = 'nucleotide/protein database FASTA file path')
parser.add_argument('replace_seq_file_path', help = 'FASTA file path for replacement sequences')
parser.add_argument('output_seq_db_path', help = 'output database file path')
args = parser.parse_args()
replace_seq_record_count = 0
with open(args.replace_seq_file_path, 'rU') as f:
replace_seq_records = dict()
for seq_record in SeqIO.parse(f, 'fasta'):
replace_seq_records[seq_record.description] = seq_record
replace_seq_record_count += 1
input_seq_record_count = 0
export_seq_record_count = 0
direct_export_count = 0
replace_count = 0
with open(args.output_seq_db_path, 'w') as fw:
with open(args.seq_db_path, 'rU') as f:
for seq_record in SeqIO.parse(f, 'fasta'):
input_seq_record_count += 1
if seq_record.description in replace_seq_records:
SeqIO.write(replace_seq_records[seq_record.description], fw, 'fasta')
export_seq_record_count += 1
replace_count += 1
else:
SeqIO.write(seq_record, fw, 'fasta')
export_seq_record_count += 1
direct_export_count += 1
summary = list()
summary_stmt = ProcLog.create_summary_stmt(input_seq_record_count, 'read from {}'.format(args.seq_db_path))
summary.append(summary_stmt)
summary_stmt = ProcLog.create_summary_stmt(replace_seq_record_count, 'read from {}'.format(args.replace_seq_file_path),
'replacement')
summary.append(summary_stmt)
summary_stmt = ProcLog.create_summary_stmt(direct_export_count,
'directly exported to {}'.format(args.output_seq_db_path), 'input')
summary.append(summary_stmt)
summary_stmt = ProcLog.create_summary_stmt(replace_count, 'exported to {}'.format(args.output_seq_db_path),
'replacement')
summary.append(summary_stmt)
summary_stmt = ProcLog.create_summary_stmt(export_seq_record_count, 'exported in total')
summary.append(summary_stmt)
ProcLog.export_ext_summary(sys.stdout, summary)
| StarcoderdataPython |
9792139 | from PyObjCTools.TestSupport import *
import array
from CoreFoundation import *
import os
from Foundation import NSURL
try:
unicode
except NameError:
unicode = str
try:
long
except NameError:
long = int
class TestURL (TestCase):
def testTypes(self):
self.assertIs(CFURLRef, NSURL)
def testTypeID(self):
val = CFURLGetTypeID()
self.assertIsInstance(val, (int, long))
def testCreateWithBytes(self):
url = b"http://www.omroep.nl/"
ref = CFURLCreateWithBytes(None, url, len(url), kCFStringEncodingUTF8, None)
self.assertIsInstance(ref, CFURLRef)
strval = CFURLGetString(ref)
self.assertEqual(strval, unicode(url, "utf-8"))
ref2 = CFURLCreateWithBytes(None, url, len(url), kCFStringEncodingUTF8, ref)
self.assertIsInstance(ref2, CFURLRef)
a = array.array('b', b'http://www.nu.nl/')
ref3 = CFURLCreateWithBytes(None, a, len(a), kCFStringEncodingUTF8, None)
self.assertIsInstance(ref3, CFURLRef)
# Explictely test for unicode's buffer madness.
self.assertRaises((ValueError, TypeError), CFURLCreateWithBytes, None, unicode(url), len(url), kCFStringEncodingUTF8, None)
def testCreateData(self):
url = b"http://www.omroep.nl/ blank"
ref = CFURLCreateWithBytes(None, url, len(url), kCFStringEncodingUTF8, None)
self.assertIsInstance(ref, CFURLRef)
data = CFURLCreateData(None, ref, kCFStringEncodingUTF8, False)
self.assertIsInstance(data, CFDataRef)
val = CFDataGetBytes(data, (0, CFDataGetLength(data)), None)
self.assertEqual(val, url.replace(b' ', b'%20'))
data = CFURLCreateData(None, ref, kCFStringEncodingUTF8, True)
self.assertIsInstance(data, CFDataRef)
val = CFDataGetBytes(data, (0, CFDataGetLength(data)), None)
self.assertEqual(val, url.replace(b' ', b'%20'))
def testCreateWithString(self):
url = b"http://www.omroep.nl/".decode('ascii')
ref = CFURLCreateWithString(None, url, None)
self.assertIsInstance(ref, CFURLRef)
strval = CFURLGetString(ref)
self.assertEqual(strval, url)
ref2 = CFURLCreateWithString(None, url, ref)
self.assertIsInstance(ref2, CFURLRef)
def testCreateAbsolute(self):
url = b"http://www.omroep.nl/sport/".decode('ascii')
baseref = CFURLCreateWithString(None, url, None)
self.assertArgHasType(CFURLCreateAbsoluteURLWithBytes, 1, b'n^v')
self.assertArgSizeInArg(CFURLCreateAbsoluteURLWithBytes, 1, 2)
ref = CFURLCreateAbsoluteURLWithBytes(None, b"socker", len(b"socker"), kCFStringEncodingUTF8, baseref, True)
self.assertIsInstance(ref, CFURLRef)
strval = CFURLGetString(ref)
self.assertEqual(strval, b"http://www.omroep.nl/sport/socker".decode('ascii'))
relpath = b"../../../dummy"
ref = CFURLCreateAbsoluteURLWithBytes(None, relpath, len(relpath), kCFStringEncodingUTF8, baseref, True)
self.assertIsInstance(ref, CFURLRef)
strval = CFURLGetString(ref)
self.assertEqual(strval, b"http://www.omroep.nl/dummy".decode('ascii'))
relpath = b"../../../dummy"
ref = CFURLCreateAbsoluteURLWithBytes(None, relpath, len(relpath), kCFStringEncodingUTF8, baseref, False)
self.assertIsInstance(ref, CFURLRef)
strval = CFURLGetString(ref)
self.assertEqual(strval, b"http://www.omroep.nl/../../dummy".decode('ascii'))
def testCopyAbs(self):
# CFURLCopyAbsoluteURL
base = CFURLCreateWithString(None, b"http://www.omroep.nl/".decode('ascii'), None)
self.assertIsInstance(base, CFURLRef)
ref = CFURLCreateWithString(None, b"/sport".decode('ascii'), base)
self.assertIsInstance(ref, CFURLRef)
self.assertEqual(CFURLGetString(ref) , b"/sport".decode('ascii') )
abs = CFURLCopyAbsoluteURL(ref)
self.assertIsInstance(abs, CFURLRef)
self.assertEqual(CFURLGetString(abs) , b"http://www.omroep.nl/sport".decode('ascii') )
def testPaths(self):
url = CFURLCreateWithFileSystemPath(None,
b"/tmp/".decode('ascii'), kCFURLPOSIXPathStyle, True)
self.assertIsInstance(url, CFURLRef)
self.assertTrue(CFURLHasDirectoryPath(url))
url = CFURLCreateWithFileSystemPath(None,
b"/etc/hosts".decode('ascii'), kCFURLPOSIXPathStyle, False)
self.assertIsInstance(url, CFURLRef)
self.assertFalse(CFURLHasDirectoryPath(url))
p = os.path.expanduser('~')
p = p.encode('utf-8')
self.assertArgHasType(CFURLCreateFromFileSystemRepresentation, 1, b'n^t')
self.assertArgIsNullTerminated(CFURLCreateFromFileSystemRepresentation, 1)
url = CFURLCreateFromFileSystemRepresentation(None,
p, len(p), True)
self.assertIsInstance(url, CFURLRef)
self.assertRaises((ValueError, TypeError),
CFURLCreateFromFileSystemRepresentation, None,
b"/tmp/".decode('ascii'), 4, True)
base = CFURLCreateWithFileSystemPath(None,
b"/tmp".decode('ascii'), kCFURLPOSIXPathStyle, True)
self.assertIsInstance(base, CFURLRef)
self.assertArgIsBOOL(CFURLCreateWithFileSystemPathRelativeToBase, 3)
url = CFURLCreateWithFileSystemPathRelativeToBase(None,
b"filename".decode('ascii'), kCFURLPOSIXPathStyle, True, base)
self.assertIsInstance(url, CFURLRef)
strval = CFURLGetString(url)
self.assertEqual(strval, b"filename/".decode('ascii'))
self.assertArgIsBOOL(CFURLCreateFromFileSystemRepresentationRelativeToBase, 3)
url = CFURLCreateFromFileSystemRepresentationRelativeToBase(None,
b"filename2", 9, False, base)
self.assertIsInstance(url, CFURLRef)
strval = CFURLGetString(url)
self.assertEqual(strval, b"filename2".decode('ascii'))
ok, strval = CFURLGetFileSystemRepresentation(url, True, None, 100)
self.assertTrue(ok)
# Unfortunately metadata doesn't allow describing what we actually need
if b'\0' in strval:
strval = strval[:strval.index(b'\0')]
self.assertEqual(strval, b"/tmp/filename2")
def testParts(self):
base = CFURLCreateWithString(None, b"http://www.omroep.nl/".decode('ascii'), None)
self.assertIsInstance(base, CFURLRef)
ref = CFURLCreateWithString(None, b"/sport".decode('ascii'), base)
self.assertIsInstance(ref, CFURLRef)
self.assertEqual(CFURLGetBaseURL(base), None)
self.assertEqual(CFURLGetBaseURL(ref), base)
self.assertTrue(CFURLCanBeDecomposed(ref) is True)
self.assertEqual(CFURLCopyScheme(ref), b"http".decode('ascii'))
self.assertEqual(CFURLCopyNetLocation(ref), b"www.omroep.nl".decode('ascii'))
self.assertEqual(CFURLCopyPath(ref), b"/sport".decode('ascii'))
path, isDir = CFURLCopyStrictPath(ref, None)
self.assertEqual(path, b"sport".decode('ascii'))
self.assertEqual(isDir, True)
path = CFURLCopyFileSystemPath(ref, kCFURLPOSIXPathStyle)
self.assertEqual(path, b"/sport".decode('ascii'))
path = CFURLCopyFileSystemPath(ref, kCFURLPOSIXPathStyle)
self.assertEqual(path, b"/sport".decode('ascii'))
path = CFURLCopyFileSystemPath(ref, kCFURLWindowsPathStyle)
self.assertEqual(path, b"\\sport".decode('ascii'))
self.assertFalse(CFURLHasDirectoryPath(ref))
v = CFURLCopyResourceSpecifier(ref)
self.assertEqual(v, None)
v = CFURLCopyHostName(ref)
self.assertEqual(v, "www.omroep.nl")
v = CFURLGetPortNumber(ref)
self.assertEqual(v, -1)
ref = CFURLCreateWithString(None, b"https://ronald:<EMAIL>@www.<EMAIL>:42/sport/results.cgi?qs=1#anchor".decode('ascii'), None)
v = CFURLGetPortNumber(ref)
self.assertEqual(v, 42)
v = CFURLCopyResourceSpecifier(ref)
self.assertEqual(v, b"?qs=1#anchor".decode('ascii'))
v = CFURLCopyUserName(ref)
self.assertEqual(v, "ronald")
v = CFURLCopyPassword(ref)
self.assertEqual(v, "test")
v = CFURLCopyParameterString(ref, None)
self.assertEqual(v, None)
v = CFURLCopyQueryString(ref, None)
self.assertEqual(v, "qs=1")
v = CFURLCopyLastPathComponent(ref)
self.assertEqual(v, "results.cgi")
v = CFURLCopyPathExtension(ref)
self.assertEqual(v, "cgi")
cnt, bytes = CFURLGetBytes(ref, None, 100)
self.assertEqual(cnt, 62)
self.assertEqual(bytes,
b"https://ronald:test@www.<EMAIL>:42/sport/results.cgi?qs=1#anchor")
cnt, bytes = CFURLGetBytes(ref, objc.NULL, 0)
self.assertEqual(cnt, 62)
self.assertEqual(bytes, objc.NULL)
rng1, rng2 = CFURLGetByteRangeForComponent(ref, kCFURLComponentHost, None)
self.assertIsInstance(rng1, CFRange)
self.assertIsInstance(rng2, CFRange)
def testUpdating(self):
base = CFURLCreateWithString(None, b"http://www.omroep.nl/sport".decode('ascii'), None)
self.assertIsInstance(base, CFURLRef)
url = CFURLCreateCopyAppendingPathComponent(None, base, "soccer", True)
self.assertIsInstance(url, CFURLRef)
strval = CFURLGetString(url)
self.assertEqual(strval, "http://www.omroep.nl/sport/soccer/")
url = CFURLCreateCopyDeletingLastPathComponent(None, base)
self.assertIsInstance(url, CFURLRef)
strval = CFURLGetString(url)
self.assertEqual(strval, "http://www.omroep.nl/")
url = CFURLCreateCopyAppendingPathExtension(None, base, "cgi")
self.assertIsInstance(url, CFURLRef)
strval = CFURLGetString(url)
self.assertEqual(strval, "http://www.omroep.nl/sport.cgi")
url2 = CFURLCreateCopyDeletingPathExtension(None, base)
self.assertIsInstance(url2, CFURLRef)
strval = CFURLGetString(url2)
self.assertEqual(strval, "http://www.omroep.nl/sport")
def testStringEncoding(self):
base = b"http://www.omroep.nl/sport%20en%20%73%70el".decode('ascii')
strval = CFURLCreateStringByReplacingPercentEscapes(None, base, objc.NULL)
self.assertEqual(strval, "http://www.omroep.nl/sport%20en%20%73%70el")
strval = CFURLCreateStringByReplacingPercentEscapes(None, base, "")
self.assertEqual(strval, "http://www.omroep.nl/sport en spel")
strval = CFURLCreateStringByReplacingPercentEscapes(None, base, " ")
self.assertEqual(strval, "http://www.omroep.nl/sport%20en%20spel")
strval = CFURLCreateStringByReplacingPercentEscapesUsingEncoding(None, base, "", kCFStringEncodingISOLatin1)
self.assertEqual(strval, "http://www.omroep.nl/sport en spel")
base = b"http://www.omroep.nl/sport en spel".decode('ascii')
strval = CFURLCreateStringByAddingPercentEscapes(None, base, "", "",
kCFStringEncodingISOLatin1)
self.assertEqual(strval, b"http://www.omroep.nl/sport%20en%20spel".decode('ascii'))
strval = CFURLCreateStringByAddingPercentEscapes(None, base, " ", "s",
kCFStringEncodingISOLatin1)
self.assertEqual(strval, b"http://www.omroep.nl/%73port en %73pel".decode('ascii'))
def testFSRef(self):
ref = CFURLCreateWithFileSystemPath(None, os.getcwd(), kCFURLPOSIXPathStyle, True)
self.assertIsInstance(ref, CFURLRef)
ok, fsref = CFURLGetFSRef(ref, None)
self.assertTrue(ok)
self.assertIsInstance(fsref, objc.FSRef)
self.assertEqual(fsref.as_pathname(), os.getcwd())
ref2 = CFURLCreateFromFSRef(None, fsref)
self.assertEqual(ref, ref2)
def testConstants(self):
self.assertEqual(kCFURLPOSIXPathStyle, 0)
self.assertEqual(kCFURLHFSPathStyle, 1)
self.assertEqual(kCFURLWindowsPathStyle, 2)
self.assertEqual(kCFURLComponentScheme, 1)
self.assertEqual(kCFURLComponentNetLocation, 2)
self.assertEqual(kCFURLComponentPath, 3)
self.assertEqual(kCFURLComponentResourceSpecifier, 4)
self.assertEqual(kCFURLComponentUser, 5)
self.assertEqual(kCFURLComponentPassword, 6)
self.assertEqual(kCFURLComponentUserInfo, 7)
self.assertEqual(kCFURLComponentHost, 8)
self.assertEqual(kCFURLComponentPort, 9)
self.assertEqual(kCFURLComponentParameterString, 10)
self.assertEqual(kCFURLComponentQuery, 11)
self.assertEqual(kCFURLComponentFragment, 12)
@min_os_level('10.6')
def testFunctions10_6(self):
fp = open("/tmp/pyobjc.test", "w")
fp.close()
try:
baseURL = CFURLCreateWithFileSystemPath(None,
os.path.realpath(b"/tmp/pyobjc.test".decode('ascii')),
kCFURLPOSIXPathStyle, False)
self.assertIsInstance(baseURL, CFURLRef)
self.assertResultIsCFRetained(CFURLCreateFileReferenceURL)
url, err = CFURLCreateFileReferenceURL(None, baseURL, None)
self.assertIsInstance(url, CFURLRef)
self.assertEqual(err, None)
self.assertResultIsCFRetained(CFURLCreateFilePathURL)
url, err = CFURLCreateFilePathURL(None, baseURL, None)
self.assertIsInstance(url, CFURLRef)
self.assertEqual(err, None)
self.assertResultIsBOOL(CFURLCopyResourcePropertyForKey)
self.assertArgIsCFRetained(CFURLCopyResourcePropertyForKey, 2)
self.assertArgIsOut(CFURLCopyResourcePropertyForKey, 2)
self.assertArgIsOut(CFURLCopyResourcePropertyForKey, 3)
ok, value, error = CFURLCopyResourcePropertyForKey(url, kCFURLNameKey, None, None)
self.assertTrue(ok)
self.assertIsInstance(value, unicode)
self.assertEqual(error, None)
ok, value, error = CFURLCopyResourcePropertyForKey(url, kCFURLIsRegularFileKey, None, None)
self.assertTrue(ok)
self.assertIsInstance(value, bool)
self.assertEqual(error, None)
self.assertResultIsCFRetained(CFURLCreateFilePathURL)
self.assertArgIsOut(CFURLCopyResourcePropertyForKey, 2)
values, error = CFURLCopyResourcePropertiesForKeys(url, [kCFURLNameKey, kCFURLIsRegularFileKey], None)
self.assertIsInstance(values, CFDictionaryRef)
self.assertEqual(error, None)
CFURLClearResourcePropertyCacheForKey(url, kCFURLIsRegularFileKey)
CFURLClearResourcePropertyCache(url)
self.assertResultIsBOOL(CFURLResourceIsReachable)
v, err= CFURLResourceIsReachable(url, None)
self.assertIsInstance(v, bool)
self.assertEqual(err, None)
CFURLSetTemporaryResourcePropertyForKey(url, "pyobjc.test", b"hello".decode('ascii'))
ok, v, err = CFURLCopyResourcePropertyForKey(url, "pyobjc.test", None, None)
self.assertTrue(ok)
self.assertEqual(v, b"hello".decode('ascii'))
ok, cur, err = CFURLCopyResourcePropertyForKey(url, kCFURLIsHiddenKey, None, None)
self.assertTrue(ok)
ok, err = CFURLSetResourcePropertyForKey(url, kCFURLIsHiddenKey, not cur, None)
self.assertTrue(ok)
ok, new, err = CFURLCopyResourcePropertyForKey(url, kCFURLIsHiddenKey, None, None)
self.assertTrue(ok)
self.assertEqual(new, not cur)
self.assertEqual(err, None)
ok, err = CFURLSetResourcePropertiesForKeys(url, {kCFURLIsHiddenKey:cur}, None)
self.assertTrue(ok)
self.assertEqual(err, None)
ok, new, err = CFURLCopyResourcePropertyForKey(url, kCFURLIsHiddenKey, None, None)
self.assertTrue(ok)
self.assertEqual(new, cur)
self.assertEqual(err, None)
self.assertResultIsCFRetained(CFURLCreateBookmarkData)
data, err = CFURLCreateBookmarkData(None, url, kCFURLBookmarkCreationSuitableForBookmarkFile, [kCFURLNameKey, kCFURLIsHiddenKey], None, None)
self.assertIs(err, None)
self.assertIsInstance(data, CFDataRef)
self.assertResultIsCFRetained(CFURLCreateByResolvingBookmarkData)
u, stale, err = CFURLCreateByResolvingBookmarkData(None, data, 0, None, None, None, None)
self.assertEqual(u, url)
self.assertIsInstance(stale, bool)
self.assertFalse(stale)
self.assertIs(err, None)
self.assertResultIsCFRetained(CFURLCreateResourcePropertiesForKeysFromBookmarkData)
v = CFURLCreateResourcePropertiesForKeysFromBookmarkData(None, [kCFURLNameKey], data)
self.assertIsInstance(v, CFDictionaryRef)
self.assertResultIsCFRetained(CFURLCreateResourcePropertyForKeyFromBookmarkData)
v = CFURLCreateResourcePropertyForKeyFromBookmarkData(None, kCFURLNameKey, data)
self.assertIsInstance(v, unicode)
refURL = CFURLCreateWithFileSystemPath(None,
b"/tmp/pyobjc.test.2".decode('ascii'), kCFURLPOSIXPathStyle, False)
ok, err = CFURLWriteBookmarkDataToFile(data, refURL, 0, None)
self.assertTrue(ok)
self.assertIs(err, None)
self.assertTrue(os.path.exists('/tmp/pyobjc.test.2'))
self.assertResultIsCFRetained(CFURLCreateBookmarkDataFromFile)
n, err = CFURLCreateBookmarkDataFromFile(None, refURL, None)
self.assertIsInstance(n, CFDataRef)
self.assertIs(err, None)
self.assertResultIsCFRetained(CFURLCreateBookmarkDataFromAliasRecord)
self.assertArgHasType(CFURLCreateBookmarkDataFromAliasRecord, 0, b'^{__CFAllocator=}')
self.assertArgHasType(CFURLCreateBookmarkDataFromAliasRecord, 1, b'^{__CFData=}')
finally:
os.unlink('/tmp/pyobjc.test')
if os.path.exists('/tmp/pyobjc.test.2'):
os.unlink('/tmp/pyobjc.test.2')
@min_os_level('10.8')
def testFunctions10_8(self):
self.assertResultIsBOOL(CFURLStartAccessingSecurityScopedResource)
@min_os_level('10.8')
def testConstants10_8(self):
self.assertIsInstance(kCFURLIsExcludedFromBackupKey, unicode)
self.assertIsInstance(kCFURLPathKey, unicode)
self.assertEqual(kCFBookmarkResolutionWithoutUIMask, 1 << 8)
self.assertEqual(kCFBookmarkResolutionWithoutMountingMask, 1 << 9)
self.assertEqual(kCFURLBookmarkResolutionWithSecurityScope, 1 << 10)
@min_os_level('10.6')
def testConstants10_6(self):
self.assertIsInstance(kCFURLNameKey, unicode)
self.assertIsInstance(kCFURLLocalizedNameKey, unicode)
self.assertIsInstance(kCFURLIsRegularFileKey, unicode)
self.assertIsInstance(kCFURLIsDirectoryKey, unicode)
self.assertIsInstance(kCFURLIsSymbolicLinkKey, unicode)
self.assertIsInstance(kCFURLIsVolumeKey, unicode)
self.assertIsInstance(kCFURLIsPackageKey, unicode)
self.assertIsInstance(kCFURLIsSystemImmutableKey, unicode)
self.assertIsInstance(kCFURLIsUserImmutableKey, unicode)
self.assertIsInstance(kCFURLIsHiddenKey, unicode)
self.assertIsInstance(kCFURLHasHiddenExtensionKey, unicode)
self.assertIsInstance(kCFURLCreationDateKey, unicode)
self.assertIsInstance(kCFURLContentAccessDateKey, unicode)
self.assertIsInstance(kCFURLContentModificationDateKey, unicode)
self.assertIsInstance(kCFURLAttributeModificationDateKey, unicode)
self.assertIsInstance(kCFURLLinkCountKey, unicode)
self.assertIsInstance(kCFURLParentDirectoryURLKey, unicode)
self.assertIsInstance(kCFURLVolumeURLKey, unicode)
self.assertIsInstance(kCFURLTypeIdentifierKey, unicode)
self.assertIsInstance(kCFURLLocalizedTypeDescriptionKey, unicode)
self.assertIsInstance(kCFURLLabelNumberKey, unicode)
self.assertIsInstance(kCFURLLabelColorKey, unicode)
self.assertIsInstance(kCFURLLocalizedLabelKey, unicode)
self.assertIsInstance(kCFURLEffectiveIconKey, unicode)
self.assertIsInstance(kCFURLCustomIconKey, unicode)
self.assertIsInstance(kCFURLFileSizeKey, unicode)
self.assertIsInstance(kCFURLFileAllocatedSizeKey, unicode)
self.assertIsInstance(kCFURLIsAliasFileKey, unicode)
self.assertIsInstance(kCFURLVolumeLocalizedFormatDescriptionKey, unicode)
self.assertIsInstance(kCFURLVolumeTotalCapacityKey, unicode)
self.assertIsInstance(kCFURLVolumeAvailableCapacityKey, unicode)
self.assertIsInstance(kCFURLVolumeResourceCountKey, unicode)
self.assertIsInstance(kCFURLVolumeSupportsPersistentIDsKey, unicode)
self.assertIsInstance(kCFURLVolumeSupportsSymbolicLinksKey, unicode)
self.assertIsInstance(kCFURLVolumeSupportsHardLinksKey, unicode)
self.assertIsInstance(kCFURLVolumeSupportsJournalingKey, unicode)
self.assertIsInstance(kCFURLVolumeIsJournalingKey, unicode)
self.assertIsInstance(kCFURLVolumeSupportsSparseFilesKey, unicode)
self.assertIsInstance(kCFURLVolumeSupportsZeroRunsKey, unicode)
self.assertIsInstance(kCFURLVolumeSupportsCaseSensitiveNamesKey, unicode)
self.assertIsInstance(kCFURLVolumeSupportsCasePreservedNamesKey, unicode)
self.assertEqual(kCFURLBookmarkCreationPreferFileIDResolutionMask, 1<<8)
self.assertEqual(kCFURLBookmarkCreationMinimalBookmarkMask, 1<<9)
self.assertEqual(kCFURLBookmarkCreationSuitableForBookmarkFile, 1<<10)
self.assertEqual(kCFBookmarkResolutionWithoutUIMask, 1<<8)
self.assertEqual(kCFBookmarkResolutionWithoutMountingMask, 1<<9)
@min_os_level('10.7')
def testConstants10_7(self):
self.assertIsInstance(kCFURLKeysOfUnsetValuesKey, unicode)
self.assertIsInstance(kCFURLFileResourceIdentifierKey, unicode)
self.assertIsInstance(kCFURLVolumeIdentifierKey, unicode)
self.assertIsInstance(kCFURLPreferredIOBlockSizeKey, unicode)
self.assertIsInstance(kCFURLIsReadableKey, unicode)
self.assertIsInstance(kCFURLIsWritableKey, unicode)
self.assertIsInstance(kCFURLIsExecutableKey, unicode)
self.assertIsInstance(kCFURLFileSecurityKey, unicode)
self.assertIsInstance(kCFURLFileResourceTypeKey, unicode)
self.assertIsInstance(kCFURLFileResourceTypeNamedPipe, unicode)
self.assertIsInstance(kCFURLFileResourceTypeCharacterSpecial, unicode)
self.assertIsInstance(kCFURLFileResourceTypeDirectory, unicode)
self.assertIsInstance(kCFURLFileResourceTypeBlockSpecial, unicode)
self.assertIsInstance(kCFURLFileResourceTypeRegular, unicode)
self.assertIsInstance(kCFURLFileResourceTypeSymbolicLink, unicode)
self.assertIsInstance(kCFURLFileResourceTypeSocket, unicode)
self.assertIsInstance(kCFURLFileResourceTypeUnknown, unicode)
self.assertIsInstance(kCFURLTotalFileSizeKey, unicode)
self.assertIsInstance(kCFURLTotalFileAllocatedSizeKey, unicode)
self.assertIsInstance(kCFURLIsMountTriggerKey, unicode)
self.assertIsInstance(kCFURLVolumeSupportsRootDirectoryDatesKey, unicode)
self.assertIsInstance(kCFURLVolumeSupportsVolumeSizesKey, unicode)
self.assertIsInstance(kCFURLVolumeSupportsRenamingKey, unicode)
self.assertIsInstance(kCFURLVolumeSupportsAdvisoryFileLockingKey, unicode)
self.assertIsInstance(kCFURLVolumeSupportsExtendedSecurityKey, unicode)
self.assertIsInstance(kCFURLVolumeIsBrowsableKey, unicode)
self.assertIsInstance(kCFURLVolumeMaximumFileSizeKey, unicode)
self.assertIsInstance(kCFURLVolumeIsEjectableKey, unicode)
self.assertIsInstance(kCFURLVolumeIsRemovableKey, unicode)
self.assertIsInstance(kCFURLVolumeIsInternalKey, unicode)
self.assertIsInstance(kCFURLVolumeIsAutomountedKey, unicode)
self.assertIsInstance(kCFURLVolumeIsLocalKey, unicode)
self.assertIsInstance(kCFURLVolumeIsReadOnlyKey, unicode)
self.assertIsInstance(kCFURLVolumeCreationDateKey, unicode)
self.assertIsInstance(kCFURLVolumeURLForRemountingKey, unicode)
self.assertIsInstance(kCFURLVolumeUUIDStringKey, unicode)
self.assertIsInstance(kCFURLVolumeNameKey, unicode)
self.assertIsInstance(kCFURLVolumeLocalizedNameKey, unicode)
self.assertIsInstance(kCFURLIsUbiquitousItemKey, unicode)
self.assertIsInstance(kCFURLUbiquitousItemHasUnresolvedConflictsKey, unicode)
self.assertIsInstance(kCFURLUbiquitousItemIsDownloadedKey, unicode)
self.assertIsInstance(kCFURLUbiquitousItemIsDownloadingKey, unicode)
self.assertIsInstance(kCFURLUbiquitousItemIsUploadedKey, unicode)
self.assertIsInstance(kCFURLUbiquitousItemIsUploadingKey, unicode)
self.assertIsInstance(kCFURLUbiquitousItemPercentDownloadedKey, unicode)
self.assertIsInstance(kCFURLUbiquitousItemPercentUploadedKey, unicode)
if __name__ == "__main__":
main()
| StarcoderdataPython |
5112604 | import numpy as np
class CosineDistance:
def __call__(self, y: np.ndarray, y_pred: np.ndarray) -> np.float64:
return self.loss(y, y_pred)
def loss(self, y: np.ndarray, y_pred: np.ndarray) -> np.float64:
return np.dot(y, y_pred) / (np.linalg.norm(y) * np.linalg.norm(y_pred))
| StarcoderdataPython |
4891029 | <reponame>hase1128/dragonfly<filename>examples/synthetic/park1_3/park1_3_mf.py
"""
Park1 function with three domains.
-- <EMAIL>
"""
# pylint: disable=invalid-name
try:
from .park1_3 import park1_3_z_x
except ImportError:
from .park1_3 import park1_3_z_x
def park1_3_mf(z, x):
""" Computes the park1 function. """
f0 = len(z[0][0])/10.0
f1 = len(z[0][1])/10.0
f2 = (z[1] - 21.3) / (243.9 - 21.3)
f = [f0, f1, f2]
return park1_3_z_x(f, x)
def cost(z):
""" Cost function. """
f0 = len(z[0][0])/10.0
f1 = len(z[0][1])/10.0
f2 = (z[1] - 21.3) / (243.9 - 21.3)
return 0.1 + (f0 + f1)/2 + 1.3 * f1 * f2**2
# Write a function like this called obj.
def objective(z, x):
""" Objective. """
return park1_3_mf(z, x)
def main(z, x):
""" main function. """
return park1_3_mf(z, x), cost(z)
| StarcoderdataPython |
8148163 | <gh_stars>0
p = float(input('Me informe seu peso: '))
a = float(input('Me informe sua altura: '))
imc = p/(a**2)
print(f'SEU IMC {imc:.1f}')
if imc <= 18.50:
print('Você se encontra ABAIXO DO PESO')
elif imc > 18.51 and imc <= 25.00:
print('Você se encontra no PESO IDEAL')
elif imc > 25.01 and imc < 30.00:
print('Você se encontra SOBREPESO')
elif imc > 30.01 and imc < 40.00:
print('Você se encontra com OBESIDADE')
else:
print('Você se encontra com OBESIDADE MÓRBIDA')
| StarcoderdataPython |
3266484 | """
Fixtures for testing
"""
import pytest
import seamm_datastore
from sqlalchemy import create_engine
from sqlalchemy.orm import sessionmaker
@pytest.fixture()
def session():
some_engine = create_engine("sqlite:///:memory:")
# create a configured "Session" class
Session = sessionmaker(bind=some_engine)
# create a Session
sess = Session()
return sess
@pytest.fixture(scope="function")
def connection():
db = seamm_datastore.connect(initialize=True)
users = db.get_users()
for data in users:
if data["username"] != "admin":
user = data["username"]
break
db.login(username=user, password="<PASSWORD>")
return db
@pytest.fixture(scope="function")
def admin_connection():
db = seamm_datastore.connect(initialize=True)
db.login(username="admin", password="<PASSWORD>")
return db
@pytest.fixture(scope="function")
def connection_nologin():
db = seamm_datastore.connect(initialize=True)
return db
| StarcoderdataPython |
3228963 | <reponame>jmrozanec/features-generator
import os
# https://stackoverflow.com/questions/682504/what-is-a-clean-pythonic-way-to-have-multiple-constructors-in-python
class CreateKeyDataframeAction():
def __init__(self, key_name=None, columns=None):
self.key_name = None
self.columns = None
class SquashNumericDataframeAction():
def __init__(self, key_name=None, squash_strategy=None):
self.key_name = key_name
self.squash_strategy = squash_strategy
# TODO!
class JoinDataframeAction():
def __init__(self, key_name=None, squash_strategy=None):
self.key_name = key_name
self.squash_strategy = squash_strategy
class RemoveDummyColsDataframeAction():
def __init__(self, df_name):
self.df_name = df_name
class DatasetSplitOnDate():
def __init__(self, val, test):
self.val = val
self.test = test
def execute():
print("TODO: dummy split")
class DatasetSplitOnPercentage():
def __init__(self, val, test):
self.val = val
self.test = test
def execute():
print("TODO: dummy split")
class DataframeBuilder():
def __init__(self, experiment_builder):
self.experiment_builder = experiment_builder
self.dataset_path = dataset_path
self.is_path = True
base=os.path.basename(dataset_path)
self.df_name = os.path.splitext(base)[0]
self.actions = []
def from_repository(dataset_name):
self.is_path = False
return self
def from_file(dataset_path):
return self
def as(self, df_name):
self.df_name = df_name
return self
def with_key(self, columns):
self.actions.append(CreateKeyDataframeAction(key_name=key_name, columns=columns))
return self
def squash_numeric(self, key_name, squash_strategy):
self.actions.append(SquashNumericDataframeAction(key_name=key_name, squash_strategy=squash_strategy))
return self
def remove_dummy_cols():
print("TODO: dummy remove_dummy_cols")
return self
def remove_dummy_rows():
print("TODO: dummy remove_dummy_rows")
return self
def create_lag_features():
print("TODO: dummy create_lag_features")
return self
def ratio_for_lagged():
print("TODO: dummy ratio_for_lagged")
return self
def and():
return self.experiment_builder
class JoinBuilder():
def __init__(self, experiment_builder, join_type, left_df_name, right_df_name, columns_left, columns_right):
self.experiment_builder = experiment_builder
self.action = JoinDataframeAction(join_type, left_df_name, right_df_name, columns_left, columns_right)
self.df_name = "{}-{}-{}".format(left_df_name, join_type, right_df_name)
def as(self, df_name):
self.df_name = df_name
return self
def and():
return self.experiment_builder
class DatasetSplitBuilder():
def __init__(self, experiment_builder, split_type, val, test):
self.experiment_builder = experiment_builder
self.split_type = split_type
self.val = val
self.test = test
def build(): #TODO rename to method when applied cross actions
if (type(val) != type(test)):
print("Types for val and test should be the same") # TODO throw an error
else:
if (type(val) == "int" || type(val) == "float"):
return DatasetSplitOnPercentage(val, test)
if (type(val) == "datetime.timedelta"):
return DatasetSplitOnDate(val, test)
# TODO put into another package
class ModelTrainBuilder():
def __init__(self, builder, ):
def with_validation_metrics():
print("ModelTrainBuilder::with_validation_metrics()")
def saving_best():
print("ModelTrainBuilder::saving_best()")
class ModelTestBuilder():
def __init__(self, builder, ):
def with_test_metrics():
print("ModelTrainBuilder::with_test_metrics()")
class GBRegressorBuilder():
def __init__(self, experiment_builder):
self.params = {}
self.experiment_builder = experiment_builder
def with_colsample_bytree(colsample_bytree):
self.params['colsample_bytree']=colsample_bytree
return self
def with_gamma(gamma):
self.params['gamma']=gamma
return self
def with_learning_rate(learning_rate):
self.params['learning_rate']=learning_rate
return self
def with_max_depth(max_depth):
self.params['max_depth']=max_depth
return self
def with_min_child_weight(min_child_weight):
self.params['min_child_weight']=min_child_weight
return self
def with_estimators(estimators):
self.params['estimators']=estimators
return self
def with_reg_alpha(reg_alpha):
self.params['reg_alpha']=reg_alpha
return self
def with_reg_lambda(reg_lambda):
self.params['reg_lambda']=reg_lambda
return self
def with_subsample(subsample):
self.params['subsample']=subsample
return self
def and():
return self.experiment_builder
# the builder abstraction
class ExperimentBuilder():
def __init__(self):
self.steps = []
self.seed = 1234
self.steps.append() # TODO: set seed
def set_seed(self, seed: 1234):
self.seed = seed
return self
def load_dataframe(self, dataset_path):
step = DataframeBuilder(self, dataset_path)
self.steps.append(step)
return step
def join(self, join_type, left_df_name, right_df_name, columns_left, columns_right):
step = JoinBuilder(self, join_type, left_df_name, right_df_name, columns_left, columns_right)
self.steps.append(step)
return step
# We shall accept val/test: https://docs.python.org/3/library/datetime.html#datetime.timedelta
#
def split_trainvaltest(val=0.1, test=0.2):
step = DatasetSplitBuilder(self, val, test)
self.steps.append(step)
return step
def create_model(model_type):
if(model_type == 'gbr'):
return GBRegressorBuilder()
def train():
step = TrainAbstractionBuilder()
self.steps.append(step)
return step
def execute():
print("TODO: dummy execution")
def describe():
print("TODO: dummy experiment_builder describe")
def report():
print("TODO: dummy report")
def summary():
print("TODO: dummy summary")
# TODO create a summary report considering ex.: involved datasets and configurations.
f = ExperimentBuilder()
f.set_seed().load_dataframe()
# squash_strategy=mean, sum
#ExperimentBuilder()
# .load_dataframe('cars', '/home/datasets/cars.csv').with_key(key_name, [columns]).squash_numeric('dm-key', squash_strategy)
# .load_dataframe('trains', '/home/datasets/cars.csv').with_key(key_name)
# .inner_join(left, right, columns_left, columns_right)
# .create_lag_features(column, prefix, lag_range)
# .ratio_for_lagged([columns], lagged_column_prefix, source_lag_range, target_offset, target_lag_range_end)
#ExperimentBuilder()
# .load_dataframe('/home/datasets/cars.csv').with_key(key_name, [columns]).squash_numeric('dm-key', squash_strategy).as('cars') -> dataframe builder
# .and() -> experiment builder
# .load_dataframe('trains', '/home/datasets/cars.csv').with_key(key_name).and()
# .create_dataframe_as_join('df1', [left], [right], columns_left, columns_right) -> experiment builder
# .for('df1') -> dataframe builder
# .create_lag_features(column, prefix, lag_range) -> dataframe builder
# .ratio_for_lagged([columns], lagged_column_prefix, source_lag_range, target_offset, target_lag_range_end) -> dataframe builder
# .split_trainvaltest(val=0.1, test=0.2, policy='last') # TODO: we should randomize the dataset and get the required splits
# .split_trainvaltest(val=1.month, test=2.months, policy='any') # TODO: we should take required amount of months (months selected randomly) and then randomize each part
# .split_trainvaltest(val=1.month, test=2.months, policy='last') # TODO: we should sort by date if policy is 'last' and after division randomize each part
# .normalize().and() # TODO: auto-normalize or set manually?
# .feature_selection().and()
# .create_model('gbt').and()
# .train().with_validation_metrics().saving_best().and()
# .test().with_test_metrics().and()
# .report()
# .execute()
| StarcoderdataPython |
3358092 | from inspect import getfile
from numbers import Number
from os.path import realpath
from pathlib import Path
from typing import Dict, Iterable, AnyStr, Sequence
from typing import Union
from ppb import Vector
from ppb.events import EventMixin
TOP = "top"
BOTTOM = "bottom"
LEFT = "left"
RIGHT = "right"
error_message = "'{klass}' object does not have attribute '{attribute}'"
side_attribute_error_message = error_message.format
class Side:
sides = {
LEFT: ('x', -1),
RIGHT: ('x', 1),
TOP: ('y', -1),
BOTTOM: ('y', 1)
}
def __init__(self, parent: 'BaseSprite',side: AnyStr):
self.side = side
self.parent = parent
def __repr__(self):
return "Side({}, {})".format(self.parent, self.side)
def __str__(self):
return str(self.value)
def __add__(self, other):
return self.value + other
def __radd__(self, other):
return other + self.value
def __sub__(self, other):
return self.value - other
def __rsub__(self, other):
return other - self.value
def __eq__(self, other):
return self.value == other
def __le__(self, other):
return self.value <= other
def __ge__(self, other):
return self.value >= other
def __ne__(self, other):
return self.value != other
def __gt__(self, other):
return self.value > other
def __lt__(self, other):
return self.value < other
@property
def value(self):
coordinate, multiplier = self.sides[self.side]
offset = self.parent._offset_value
return self.parent.position[coordinate] + (offset * multiplier)
@property
def top(self):
self._attribute_gate(TOP, [TOP, BOTTOM])
return Vector(self.value, self.parent.top.value)
@top.setter
def top(self, value):
self._attribute_gate(TOP, [TOP, BOTTOM])
setattr(self.parent, self.side, value[0])
self.parent.top = value[1]
@property
def bottom(self):
self._attribute_gate(BOTTOM, [TOP, BOTTOM])
return Vector(self.value, self.parent.bottom.value)
@bottom.setter
def bottom(self, value):
self._attribute_gate(BOTTOM, [TOP, BOTTOM])
setattr(self.parent, self.side, value[0])
self.parent.bottom = value[1]
@property
def left(self):
self._attribute_gate(LEFT, [LEFT, RIGHT])
return Vector(self.parent.left.value, self.value)
@left.setter
def left(self, value):
self._attribute_gate(LEFT, [LEFT, RIGHT])
setattr(self.parent, self.side, value[1])
self.parent.left = value[0]
@property
def right(self):
self._attribute_gate(RIGHT, [LEFT, RIGHT])
return Vector(self.parent.right.value, self.value)
@right.setter
def right(self, value):
self._attribute_gate(RIGHT, [LEFT, RIGHT])
setattr(self.parent, self.side, value[1])
self.parent.right = value[0]
@property
def center(self):
if self.side in (TOP, BOTTOM):
return Vector(self.parent.center.x, self.value)
else:
return Vector(self.value, self.parent.center.y)
@center.setter
def center(self, value):
if self.side in (TOP, BOTTOM):
setattr(self.parent, self.side, value[1])
self.parent.center.x = value[0]
else:
setattr(self.parent, self.side, value[0])
self.parent.position.y = value[1]
def _attribute_gate(self, attribute, bad_sides):
if self.side in bad_sides:
name = type(self).__name__
message = side_attribute_error_message(klass=name,
attribute=attribute)
raise AttributeError(message)
class BaseSprite(EventMixin):
"""
The base Sprite class. All sprites should inherit from this (directly or
indirectly).
Attributes:
* image (str): The image file
* resource_path (pathlib.Path): The path that image is relative to
* position: Location of the sprite
* facing: The direction of the "top" of the sprite (rendering only)
* size: The width/height of the sprite (sprites are square)
"""
image = None
resource_path = None
position: Vector = Vector(0, 0)
facing: Vector = Vector(0, -1)
size: Union[int, float] = 1
def __init__(self, **kwargs):
super().__init__()
# Make these instance properties with fresh instances
# Don't use Vector.convert() because we need copying
self.position = Vector(*self.position)
self.facing = Vector(*self.facing)
# Initialize things
for k, v in kwargs.items():
# Abbreviations
if k == 'pos':
k = 'position'
# Castings
if k in ('position', 'facing'):
v = Vector(*v) # Vector.convert() when that ships.
setattr(self, k, v)
# Trigger some calculations
self.size = self.size
@property
def center(self) -> Vector:
return self.position
@center.setter
def center(self, value: Sequence[float]):
x = value[0]
y = value[1]
self.position.x = x
self.position.y = y
@property
def left(self) -> Side:
return Side(self, LEFT)
@left.setter
def left(self, value: float):
self.position.x = value + self._offset_value
@property
def right(self) -> Side:
return Side(self, RIGHT)
@right.setter
def right(self, value):
self.position.x = value - self._offset_value
@property
def top(self):
return Side(self, TOP)
@top.setter
def top(self, value):
self.position.y = value + self._offset_value
@property
def bottom(self):
return Side(self, BOTTOM)
@bottom.setter
def bottom(self, value):
self.position.y = value - self._offset_value
@property
def _offset_value(self):
return self.size / 2
def rotate(self, degrees: Number):
self.facing.rotate(degrees)
def __image__(self):
if self.image is None:
self.image = f"{type(self).__name__.lower()}.png"
return self.image
def __resource_path__(self):
if self.resource_path is None:
self.resource_path = Path(realpath(getfile(type(self)))).absolute().parent
return self.resource_path
| StarcoderdataPython |
3548130 | <reponame>Tech-With-Tim/models
from typing import List
from pydantic import BaseModel
from functools import cached_property
from .permission import BasePermission
class BaseCategory(BaseModel):
"""
Base class for a permission category
Attributes:
:param str name: The name of the permission category
:param List[BasePermission]: A list of the permissions this category has
"""
name: str
permissions: List[BasePermission]
class Config:
keep_untouched = (cached_property,)
@cached_property
def all_permissions(self) -> int:
perms = 0
for perm in self.permissions:
perms |= perm.value
return perms
| StarcoderdataPython |
391843 | <gh_stars>1-10
__author__ = 'Orthocenter'
| StarcoderdataPython |
6533007 | from yalul.parser import Parser
from yalul.lex.token import Token
from yalul.lex.token_type import TokenType
from yalul.parsers.ast.nodes.statements.expressions.func_call import FuncCall
from yalul.parsers.ast.nodes.statements.expressions.variable import Variable
class TestFuncCallExpressions:
"""Test parser generating function call expressions"""
def test_parser_run_generates_correct_ast_with_func_call_expressions(self):
"""
Validates if parser is generating a correct AST for function calls
"""
tokens = [
Token(TokenType.IDENTIFIER, 'sum'),
Token(TokenType.LEFT_PAREN, "Left Paren"),
Token(TokenType.INTEGER, 20),
Token(TokenType.INTEGER, 42),
Token(TokenType.RIGHT_PAREN, "Right Paren"),
Token(TokenType.END_STATEMENT, "End of Statement"),
Token(TokenType.EOF, "End of File")
]
parser_response = Parser(tokens).parse()
assert len(parser_response.errors()) == 0
first_statement_ast = parser_response.ast.statements[0]
assert type(first_statement_ast) is FuncCall
assert type(first_statement_ast.callee) is Variable
assert len(first_statement_ast.arguments) == 2
assert first_statement_ast.arguments[0].value == 20
assert first_statement_ast.arguments[1].value == 42
| StarcoderdataPython |
3320068 | import testflows.settings as settings
from testflows.core import *
@TestStep(Given)
def instrument_clickhouse_server_log(self, node=None, test=None,
clickhouse_server_log="/var/log/clickhouse-server/clickhouse-server.log"):
"""Instrument clickhouse-server.log for the current test (default)
by adding start and end messages that include test name to log
of the specified node. If we are in the debug mode and the test
fails then dump the messages from the log for this test.
"""
if test is None:
test = current()
if node is None:
node = self.context.node
with By("getting current log size"):
cmd = node.command(f"stat --format=%s {clickhouse_server_log}")
start_logsize = cmd.output.split(" ")[0].strip()
try:
with And("adding test name start message to the clickhouse-server.log"):
node.command(f"echo -e \"\\n-- start: {test.name} --\\n\" >> {clickhouse_server_log}")
yield
finally:
if test.terminating is True:
return
with Finally("adding test name end message to the clickhouse-server.log", flags=TE):
node.command(f"echo -e \"\\n-- end: {test.name} --\\n\" >> {clickhouse_server_log}")
with And("getting current log size at the end of the test"):
cmd = node.command(f"stat --format=%s {clickhouse_server_log}")
end_logsize = cmd.output.split(" ")[0].strip()
with And("checking if test has failing result"):
if settings.debug and not self.parent.result:
with Then("dumping clickhouse-server.log for this test"):
node.command(f"tail -c +{start_logsize} {clickhouse_server_log}"
f" | head -c {int(end_logsize) - int(start_logsize)}")
| StarcoderdataPython |
9631221 | <filename>solutions/hydrothermal_venture.py<gh_stars>0
"""
Part One
========
Determine the number of points where at least two lines overlap. In the
above example, this is anywhere in the diagram with a 2 or larger - a
total of 5 points.
Consider only horizontal and vertical lines. At how many points do at
least two lines overlap?
"""
from collections import defaultdict
from dataclasses import dataclass, field
from typing import DefaultDict, List, Optional
@dataclass(eq=True)
class Point:
"""x and y coordinates for a point on a grid."""
x: int
y: int
def __repr__(self) -> str:
"""Point as a string."""
return f"({self.x}, {self.y})"
@dataclass
class Line:
"""Grid representation of a line from two points."""
start: Point
end: Point
points: List[Point] = field(default_factory=list)
_delta_x: Optional[int] = None
_delta_y: Optional[int] = None
def __post_init__(self) -> None:
"""Draw the points of the line given a start and end."""
self.draw()
@classmethod
def from_string(cls, string: str) -> "Line":
"""Given a string of the format "x1,y1 -> x2,y2", construct a
line.
"""
raw_start, raw_end = string.split(" -> ")
start_x, start_y = raw_start.split(",")
start = Point(int(start_x), int(start_y))
end_x, end_y = raw_end.split(",")
end = Point(int(end_x), int(end_y))
return cls(start, end)
@property
def delta_x(self) -> int:
"""The change in x from start to end."""
if self._delta_x is None:
self._delta_x = self.end.x - self.start.x
return self._delta_x
@property
def delta_y(self) -> int:
"""The change in y from start to end."""
if self._delta_y is None:
self._delta_y = self.end.y - self.start.y
return self._delta_y
@property
def is_horizontal(self) -> bool:
"""Returns whether a line is horizontal or not."""
return self.delta_y == 0
@property
def is_vertical(self) -> bool:
"""Returns whether a line is vertical or not."""
return self.delta_x == 0
def draw(self) -> None:
"""Deduce the points of the line given its start and end."""
current_point = self.start
self.points.append(current_point)
while current_point != self.end:
new_x = current_point.x
if self.delta_x > 0:
new_x += 1
elif self.delta_x < 0:
new_x -= 1
new_y = current_point.y
if self.delta_y > 0:
new_y += 1
elif self.delta_y < 0:
new_y -= 1
current_point = Point(new_x, new_y)
self.points.append(current_point)
@dataclass
class Grid:
"""Grid on which to map the lines."""
@staticmethod
def map(lines: List[Line]) -> int:
"""Draw the lines on the grid and count how many times a point
is drawn over.
"""
point_counter: DefaultDict[str, int] = defaultdict(int)
for line in lines:
for point in line.points:
point_counter[str(point)] += 1
return sum(1 for count in point_counter.values() if count > 1)
@staticmethod
def map_horizontal_and_vertical(lines: List[Line]) -> int:
"""Draw the lines on the grid and count how many times a point
is drawn over but only for horizontal and vertical lines.
"""
point_counter: DefaultDict[str, int] = defaultdict(int)
for line in lines:
if line.is_vertical or line.is_horizontal:
for point in line.points:
point_counter[str(point)] += 1
return sum(1 for count in point_counter.values() if count > 1)
if __name__ == "__main__":
with open("inputs/hydrothermal_venture.txt") as input:
lines = [Line.from_string(raw_line) for raw_line in input]
print(f"Part One: {Grid.map_horizontal_and_vertical(lines)}")
print(f"Part Two: {Grid.map(lines)}")
| StarcoderdataPython |
6505498 | students = []
scores_lst=[]
for _ in range(int(input())):
name = input()
score = float(input())
scores_lst.append(score)
students.append([name,score])
scores_unique = list(set(scores_lst))
y = sorted(scores_unique)[1]
temp = [i[0] for i in students if i[1]==y]
temp.sort()
print("\n".join(temp))
| StarcoderdataPython |
3402211 | # tileDEMs.py
# This script looks in the master shapefile for a series of polygons. It then looks for all the DEMs in the directory, finds which DEM
# the polygon is in, and then clips the raster to this polygon. It writes the raster to a tile sub-directory.
# FJC 30/06/21
#import osgeo.gdal as gdal
import pandas as pd
import geopandas as gpd
import os
import numpy as np
import subprocess
def create_raster_from_tile(row):
"""
loop through the dataframe and create a raster from the geometry of each tile.
You need to have a column called "location" which tells us the raster that each
tile corresponds to.
This writes a temporary shapefile for each tile.
"""
if(str(row['raster']) != 'nan'):
print("TILE ID: ", row['id'])
sub_df = tiles_plus_rasters.iloc[[row.name]]
temp_shp = sub_df.to_file(data_dir+'tile_temp.shp')
this_tile_fname = 'tile_'+str(int(row['id']))
# create a new directory for this tile
if not os.path.isdir(data_dir+this_tile_fname):
os.mkdir(data_dir+this_tile_fname)
# define the destination and source files
dest_dst = data_dir+this_tile_fname+'/tile_'+str(int(row['id']))+'.bil'
raster_name = str(row['raster'])
src_dst = data_dir+raster_name[:len(raster_name) - 5]+'.'+row['ext']
print(dest_dst, src_dst)
# do the clipping to a new raster for the tile
gdal_cmd = 'gdalwarp -cutline {}tile_temp.shp -crop_to_cutline -dstnodata -9999 -of ENVI {} {}'.format(data_dir, src_dst, dest_dst)
#subprocess.run(gdal_cmd, check=True, shell=True)
print(row['ext'])
#gdal.Warp(dest_dst, src_dst, cutlineDSName=data_dir+'tile_temp.shp', cropToCutline=True, format='ENVI')
# first read in the tiling shapefile
data_dir = '/raid/fclubb/san_andreas/USGS_LidarB3/'
tiles = gpd.read_file(data_dir+'USGS_LidarB3_tiles.shp')
if not os.path.isfile(data_dir+'USGS_LidarB3_Rasters.shp'):
if not os.path.isfile(data_dir+'USGS_LPC_CA_Sonoma_1m_DTM_mask.shp'):
# loop through the rasters in the directory and create a footprint shapefile, then merge
for f in os.listdir(data_dir):
if f.endswith('.tif') or f.endswith('.bil'):
prefix = f.split('.')[0]
print(prefix)
# first gdal command - create a masking raster with an alpha band
gdal_cmd = 'gdalwarp -dstnodata 0 -dstalpha -of GTiff {} {}_mask.tif'.format(data_dir+f, data_dir+prefix)
print(gdal_cmd)
subprocess.run(gdal_cmd, check=True, shell=True)
# second step - polygonise masking raster to shapefile
gdal_cmd = 'gdal_polygonize.py {}_mask.tif -b 2 -f "ESRI Shapefile" {}_mask.shp'.format(data_dir+prefix, data_dir+prefix)
print(gdal_cmd)
subprocess.run(gdal_cmd, check=True, shell=True)
# merge these to a single shapefile for all the rasters
ogr_cmd = 'ogrmerge.py -o {}USGS_LidarB3_Rasters.shp {}*_mask.shp -single -f "ESRI Shapefile" -src_layer_field_name raster'.format(data_dir, data_dir)
subprocess.run(ogr_cmd, check=True, shell=True)
# read in the raster footprint shapefile
rasters = gpd.read_file(data_dir+'USGS_LidarB3_Rasters.shp')
# polygon intersection opereation to find out the raster corresponding to each tile.
tiles_plus_rasters = gpd.sjoin(tiles, rasters, how='left', op='intersects')
tiles_plus_rasters.drop(columns='DN', inplace=True)
tiles_plus_rasters = tiles_plus_rasters[tiles_plus_rasters['raster'].str.len() > 3]
tiles_plus_rasters.drop_duplicates(subset='id', keep='first', inplace=True)
tiles_plus_rasters['raster'] = tiles_plus_rasters['raster'].str[:-5]
# filter the DF to only have one row for each tile (remove overlapping rasters)
#print(tiles_plus_rasters)
# loop through the directory and add a column for whether it's a bil or a tif
raster_list = []
ext_list = []
for f in os.listdir(data_dir):
if f.endswith('.tif') or f.endswith('.bil'):
if not 'mask' in f:
raster_list.append(f.split('.')[0])
ext_list.append(f.split('.')[1])
dir_rasters = pd.DataFrame({'raster': raster_list, 'ext': ext_list})
del raster_list, ext_list
#print(dir_rasters)
# merge with the tiles df
tiles_plus_rasters = tiles_plus_rasters.merge(dir_rasters, left_on='raster', right_on='raster')
#print(tiles_plus_rasters)
# this should give a new column with the raster that each tile corresponds to. now use gdal to do the tiling
tiles_plus_rasters.apply(create_raster_from_tile, axis=1)
| StarcoderdataPython |
3500190 | <filename>Python/ReverseNumber.py
#https://leetcode.com/problems/reverse-integer/
#Given a 32-bit signed integer, reverse digits of an integer.
#Note:
#Assume we are dealing with an environment that could only store integers within the 32-bit signed integer range: [−231, 231 − 1]. For the purpose of this problem, assume that your function returns 0 when the reversed integer overflows.
#Example 1:
#Input: x = 123
#Output: 321
#Example 2:
#Input: x = -123
#Output: -321
#Example 3:
#Input: x = 120
#Output: 21
#Example 4:
#Input: x = 0
#Output: 0
#Constraints:
#-231 <= x <= 231 - 1
class Solution(object):
def reverse(self, x):
"""
:type x: int
:rtype: int
"""
x=str(x)
if x[0] == '-' :
x=x[:0:-1]
x = int(x) * -1
else :
x = x[::-1]
x = int(x)
if x > ((2**31)-1) or x < (-1 * 2**31) :
return 0
else :
return x
| StarcoderdataPython |
4902287 | class DurgaError(Exception):
"""Main exception class."""
pass
class ObjectNotFoundError(DurgaError):
"""The requested object does not exist."""
pass
class MultipleObjectsReturnedError(DurgaError):
"""The request returned multiple objects when only one was expected.
That is, if a GET request returns more than one element.
"""
def __str__(self):
return 'Your GET request returned multiple results.'
class ValidationError(DurgaError):
"""The value did not pass the validator."""
pass
| StarcoderdataPython |
3417083 | class Solution(object):
def maxWidthOfVerticalArea(self, points):
"""
:type points: List[List[int]]
:rtype: int
"""
points.sort(key=lambda x:x[0])
k=[]
for i in range (0, len(points)):
j=i+1
if j != len(points):
k.append(points[j][0]-points[i][0])
k.sort(reverse=True)
return k[0]
points=[[3,1],[9,0],[1,0],[1,4],[5,3],[8,8]]
print(Solution().maxWidthOfVerticalArea(points))
| StarcoderdataPython |
3214207 | from jetbot import ObjectDetector
from jetbot import Camera
import cv2
import numpy as np
model = ObjectDetector('ssd_mobilenet_v2_coco.engine')
camera = Camera.instance(width=300, height=300)
detections = model(camera.value)
while True:
# compute all detected objects
detections = model(camera.value)
img = camera.value
mask = np.zeros(img.shape[:2],dtype = np.uint8)
# draw all detections on image
for det in detections[0]:
bbox = det['bbox']
cv2.rectangle(mask, (int(300 * bbox[0]), int(300 * bbox[1])), (int(300 * bbox[2]), int(300 * bbox[3])), (255, 0, 0), 2)
orb = cv2.ORB_create()
kp = orb.detect(camera.value,mask)
kp,des = orb.compute(camera.value,kp)
img = cv2.drawKeypoints(camera.value,kp,camera.value)
cv2.imshow('detects',img)
cv2.waitKey(1)
| StarcoderdataPython |
9730057 | """
Loss Development
================
"""
import numpy as np
import copy
import warnings
from sklearn.base import BaseEstimator
from chainladder import WeightedRegression
class DevelopmentBase(BaseEstimator):
def fit_transform(self, X, y=None, sample_weight=None):
""" Equivalent to fit(X).transform(X)
Parameters
----------
X : Triangle-like
Set of LDFs based on the model.
y : Ignored
sample_weight : Ignored
Returns
-------
X_new : New triangle with transformed attributes.
"""
self.fit(X, y, sample_weight)
return self.transform(X)
@property
def cdf_(self):
if self.__dict__.get('ldf_', None) is None:
return
else:
obj = copy.deepcopy(self.ldf_)
cdf_ = np.flip(np.cumprod(np.flip(obj.triangle, -1), -1), -1)
obj.triangle = cdf_
return obj
class Development(DevelopmentBase):
""" A Transformer that allows for basic loss development pattern selection.
Parameters
----------
n_periods : integer, optional (default=-1)
number of origin periods to be used in the ldf average calculation. For
all origin periods, set n_periods=-1
average : string, optional (default='volume')
type of averaging to use for ldf average calculation. Options include
'volume', 'simple', and 'regression'
sigma_interpolation : string optional (default='log-linear')
Options include 'log-linear' and 'mack'
Attributes
----------
ldf_ : Triangle
The estimated loss development patterns
cdf_ : Triangle
The estimated cumulative development patterns
sigma_ : Triangle
Sigma of the ldf regression
std_err_ : Triangle
Std_err of the ldf regression
w_ : Triangle
The weight used in the ldf regression
"""
def __init__(self, n_periods=-1, average='volume',
sigma_interpolation='log-linear'):
self.n_periods = n_periods
self.average = average
self.sigma_interpolation = sigma_interpolation
def _assign_n_periods_weight(self, X):
if type(self.n_periods) is int:
return self._assign_n_periods_weight_int(X, self.n_periods)[..., :-1]
elif type(self.n_periods) is list:
if len(self.n_periods) != X.triangle.shape[-1]-1:
raise ValueError(f'n_periods list must be of lenth {X.triangle.shape[-1]-1}.')
else:
return self._assign_n_periods_weight_list(X)
else:
raise ValueError('n_periods must be of type <int> or <list>')
def _assign_n_periods_weight_list(self, X):
dict_map = {item: self._assign_n_periods_weight_int(X, item)
for item in set(self.n_periods)}
conc = [dict_map[item][..., num:num+1, :]
for num, item in enumerate(self.n_periods)]
return np.swapaxes(np.concatenate(tuple(conc), -2), -2, -1)
def _assign_n_periods_weight_int(self, X, n_periods):
''' Zeros out weights depending on number of periods desired
Only works for type(n_periods) == int
'''
if n_periods < 1 or n_periods >= X.shape[-2] - 1:
return X.triangle*0+1
else:
flip_nan = np.nan_to_num(X.triangle*0+1)
k, v, o, d = flip_nan.shape
w = np.concatenate((1-flip_nan[..., -(o-n_periods-1):, :],
np.ones((k, v, n_periods+1, d))), 2)*flip_nan
return w*X.expand_dims(X.nan_triangle())
def fit(self, X, y=None, sample_weight=None):
"""Fit the model with X.
Parameters
----------
X : Triangle-like
Set of LDFs to which the munich adjustment will be applied.
y : Ignored
sample_weight : Ignored
Returns
-------
self : object
Returns the instance itself.
"""
tri_array = X.triangle.copy()
tri_array[tri_array == 0] = np.nan
if type(self.average) is str:
average = [self.average] * (tri_array.shape[-1] - 1)
else:
average = self.average
average = np.array(average)
self.average_ = average
weight_dict = {'regression': 2, 'volume': 1, 'simple': 0}
_x = tri_array[..., :-1]
_y = tri_array[..., 1:]
val = np.array([weight_dict.get(item.lower(), 2)
for item in average])
for i in [2, 1, 0]:
val = np.repeat(np.expand_dims(val, 0), tri_array.shape[i], axis=0)
val = np.nan_to_num(val * (_y * 0 + 1))
_w = self._assign_n_periods_weight(X) / (_x**(val))
self.w_ = self._assign_n_periods_weight(X)
params = WeightedRegression(_w, _x, _y, axis=2, thru_orig=True).fit()
if self.n_periods != 1:
params = params.sigma_fill(self.sigma_interpolation)
else:
warnings.warn('Setting n_periods=1 does not allow enough degrees of'
' freedom to support calculation of all regression '
'statistics. Only LDFs have been calculated.')
params.std_err_ = np.nan_to_num(params.std_err_) + \
np.nan_to_num((1-np.nan_to_num(params.std_err_*0+1)) *
params.sigma_/np.swapaxes(np.sqrt(_x**(2-val))[..., 0:1, :], -1, -2))
params = np.concatenate((params.slope_,
params.sigma_,
params.std_err_), 3)
params = np.swapaxes(params, 2, 3)
self.ldf_ = self._param_property(X, params, 0)
self.sigma_ = self._param_property(X, params, 1)
self.std_err_ = self._param_property(X, params, 2)
return self
def transform(self, X):
""" If X and self are of different shapes, align self to X, else
return self.
Parameters
----------
X : Triangle
The triangle to be transformed
Returns
-------
X_new : New triangle with transformed attributes.
"""
X_new = copy.deepcopy(X)
X_new.std_err_ = self.std_err_
X_new.cdf_ = self.cdf_
X_new.ldf_ = self.ldf_
X_new.sigma_ = self.sigma_
X_new.sigma_interpolation = self.sigma_interpolation
X_new.average_ = self.average_
X_new.w_ = self.w_
return X_new
def _param_property(self, X, params, idx):
obj = copy.deepcopy(X)
obj.triangle = np.ones(X.shape)[..., :-1]*params[..., idx:idx+1, :]
obj.ddims = X.link_ratio.ddims
obj.valuation = obj._valuation_triangle(obj.ddims)
obj.nan_override = True
return obj
| StarcoderdataPython |
159089 | <reponame>Pistak/ha-smartthinq-sensors
import enum
from .device import STATE_OPTIONITEM_NONE
# Dryer State
class STATE_DRYER(enum.Enum):
POWER_OFF = STATE_OPTIONITEM_NONE
COOLING = "Cooling"
DRYING = "Drying"
END = "End"
ERROR = "Error"
ERRORSTATE = "An error occurred"
INITIAL = "Select Course"
PAUSE = "Paused"
RUNNING = "Drying"
SMART_DIAGNOSIS = "Smart Diagnosis"
WRINKLE_CARE = "Wrinkle Care"
# Dryer Level
class STATE_DRYER_DRYLEVEL(enum.Enum):
OFF = STATE_OPTIONITEM_NONE
NO_SELECT = "Not selected"
COOLING = "Cooling"
CUPBOARD = "Cupboard"
DAMP = "Damp"
ECO = "Eco"
EXTRA = "Extra"
ENERGY = "Energy"
IRON = "Iron"
LESS = "Less"
LOW = "Low"
MORE = "More"
NORMAL = "Normal"
SPEED = "Speed"
VERY = "Very"
TIME_30 = "30 min"
TIME_60 = "60 min"
TIME_90 = "90 min"
TIME_120 = "120 min"
TIME_150 = "150 min"
# Dryer Temp
class STATE_DRYER_TEMP(enum.Enum):
OFF = STATE_OPTIONITEM_NONE
NO_SELECT = "Not selected"
HIGH = "High"
LOW = "Low"
MEDIUM = "Medium"
MID_HIGH = "Medium High"
ULTRA_LOW = "Ultra Low"
# Dryer Error
class STATE_DRYER_ERROR(enum.Enum):
OFF = STATE_OPTIONITEM_NONE
NO_ERROR = "Normal"
ERROR_AE = "AE - Contact Service Center"
ERROR_CE1 = "CE1 - Contact Service Center"
ERROR_DE = "Door open - Please close the door"
ERROR_DE4 = "DE4 - Contact Service Center"
ERROR_EMPTYWATER = "Error Empty Water"
ERROR_F1 = "F1 - Contact Service Center"
ERROR_LE1 = "LE1 - Contact Service Center"
ERROR_LE2 = "LE2 - Contact Service Center"
ERROR_NOFILTER = "Error No Filter"
ERROR_NP = "NP - Contact Service Center"
ERROR_OE = "Drain error - Please make sure the pipe is not clogged/frozen"
ERROR_PS = "PS - Contact Service Center"
ERROR_TE1 = "TE1 - Contact Service Center"
ERROR_TE2 = "TE2 - Contact Service Center"
ERROR_TE5 = "TE5 - Contact Service Center"
ERROR_TE6 = "TE6 - Contact Service Center"
"""------------------for Dryers"""
DRYERSTATES = {
"ERROR": STATE_DRYER.ERROR,
"@WM_STATE_COOLING_W": STATE_DRYER.COOLING,
"@WM_STATE_END_W": STATE_DRYER.END,
"@WM_STATE_ERROR_W": STATE_DRYER.ERRORSTATE,
"@WM_STATE_DRYING_W": STATE_DRYER.DRYING,
"@WM_STATE_INITIAL_W": STATE_DRYER.INITIAL,
"@WM_STATE_POWER_OFF_W": STATE_DRYER.POWER_OFF,
"@WM_STATE_PAUSE_W": STATE_DRYER.PAUSE,
"@WM_STATE_RUNNING_W": STATE_DRYER.RUNNING,
"@WM_STATE_SMART_DIAGNOSIS_W": STATE_DRYER.SMART_DIAGNOSIS,
"@WM_STATE_WRINKLECARE_W": STATE_DRYER.WRINKLE_CARE,
}
DRYERDRYLEVELS = {
"-": STATE_DRYER_DRYLEVEL.OFF,
"OFF": STATE_DRYER_DRYLEVEL.OFF,
"NOT_SELECTED": STATE_DRYER_DRYLEVEL.NO_SELECT,
"@WM_TERM_NO_SELECT_W": STATE_DRYER_DRYLEVEL.NO_SELECT,
"@WM_OPTION_DRY_DAMP_DRY_W": STATE_DRYER_DRYLEVEL.DAMP,
"@WM_OPTION_DRY_LESS_DRY_W": STATE_DRYER_DRYLEVEL.LESS,
"@WM_OPTION_DRY_MORE_DRY_W": STATE_DRYER_DRYLEVEL.MORE,
"@WM_OPTION_DRY_NORMAL_W": STATE_DRYER_DRYLEVEL.NORMAL,
"@WM_OPTION_DRY_VERY_DRY_W": STATE_DRYER_DRYLEVEL.VERY,
"@WM_DRY24_DRY_LEVEL_CUPBOARD_W": STATE_DRYER_DRYLEVEL.CUPBOARD,
"@WM_DRY24_DRY_LEVEL_DAMP_W": STATE_DRYER_DRYLEVEL.DAMP,
"@WM_DRY24_DRY_LEVEL_EXTRA_W": STATE_DRYER_DRYLEVEL.EXTRA,
"@WM_DRY24_DRY_LEVEL_IRON_W": STATE_DRYER_DRYLEVEL.IRON,
"@WM_DRY24_DRY_LEVEL_LESS_W": STATE_DRYER_DRYLEVEL.LESS,
"@WM_DRY24_DRY_LEVEL_MORE_W": STATE_DRYER_DRYLEVEL.MORE,
"@WM_DRY24_DRY_LEVEL_NORMAL_W": STATE_DRYER_DRYLEVEL.NORMAL,
"@WM_DRY24_DRY_LEVEL_VERY_W": STATE_DRYER_DRYLEVEL.VERY,
"@WM_DRY27_DRY_LEVEL_CUPBOARD_W": STATE_DRYER_DRYLEVEL.CUPBOARD,
"@WM_DRY27_DRY_LEVEL_DAMP_W": STATE_DRYER_DRYLEVEL.DAMP,
"@WM_DRY27_DRY_LEVEL_EXTRA_W": STATE_DRYER_DRYLEVEL.EXTRA,
"@WM_DRY27_DRY_LEVEL_IRON_W": STATE_DRYER_DRYLEVEL.IRON,
"@WM_DRY27_DRY_LEVEL_LESS_W": STATE_DRYER_DRYLEVEL.LESS,
"@WM_DRY27_DRY_LEVEL_MORE_W": STATE_DRYER_DRYLEVEL.MORE,
"@WM_DRY27_DRY_LEVEL_NORMAL_W": STATE_DRYER_DRYLEVEL.NORMAL,
"@WM_DRY27_DRY_LEVEL_VERY_W": STATE_DRYER_DRYLEVEL.VERY,
"@WM_TITAN2_OPTION_DRY_NORMAL_W": STATE_DRYER_DRYLEVEL.NORMAL,
"@WM_TITAN2_OPTION_DRY_ECO_W": STATE_DRYER_DRYLEVEL.ECO,
"@WM_TITAN2_OPTION_DRY_VERY_W": STATE_DRYER_DRYLEVEL.VERY,
"@WM_TITAN2_OPTION_DRY_IRON_W": STATE_DRYER_DRYLEVEL.IRON,
"@WM_TITAN2_OPTION_DRY_LOW_W": STATE_DRYER_DRYLEVEL.LOW,
"@WM_TITAN2_OPTION_DRY_ENERGY_W": STATE_DRYER_DRYLEVEL.ENERGY,
"@WM_TITAN2_OPTION_DRY_SPEED_W": STATE_DRYER_DRYLEVEL.SPEED,
"@WM_TITAN2_OPTION_DRY_COOLING_W": STATE_DRYER_DRYLEVEL.COOLING,
"@WM_TITAN2_OPTION_DRY_30_W": STATE_DRYER_DRYLEVEL.TIME_30,
"@WM_TITAN2_OPTION_DRY_60_W": STATE_DRYER_DRYLEVEL.TIME_60,
"@WM_TITAN2_OPTION_DRY_90_W": STATE_DRYER_DRYLEVEL.TIME_90,
"@WM_TITAN2_OPTION_DRY_120_W": STATE_DRYER_DRYLEVEL.TIME_120,
"@WM_TITAN2_OPTION_DRY_150_W": STATE_DRYER_DRYLEVEL.TIME_150,
"@WM_FL24_TITAN_DRY_NORMAL_W": STATE_DRYER_DRYLEVEL.NORMAL,
"@WM_FL24_TITAN_DRY_ECO_W": STATE_DRYER_DRYLEVEL.ECO,
"@WM_FL24_TITAN_DRY_VERY_W": STATE_DRYER_DRYLEVEL.VERY,
"@WM_FL24_TITAN_DRY_IRON_W": STATE_DRYER_DRYLEVEL.IRON,
"@WM_FL24_TITAN_DRY_LOW_W": STATE_DRYER_DRYLEVEL.LOW,
"@WM_FL24_TITAN_DRY_ENERGY_W": STATE_DRYER_DRYLEVEL.ENERGY,
"@WM_FL24_TITAN_DRY_SPEED_W": STATE_DRYER_DRYLEVEL.SPEED,
"@WM_FL24_TITAN_DRY_COOLING_W": STATE_DRYER_DRYLEVEL.COOLING,
"@WM_FL24_TITAN_DRY_30_W": STATE_DRYER_DRYLEVEL.TIME_30,
"@WM_FL24_TITAN_DRY_60_W": STATE_DRYER_DRYLEVEL.TIME_60,
"@WM_FL24_TITAN_DRY_90_W": STATE_DRYER_DRYLEVEL.TIME_90,
"@WM_FL24_TITAN_DRY_120_W": STATE_DRYER_DRYLEVEL.TIME_120,
"@WM_FL24_TITAN_DRY_150_W": STATE_DRYER_DRYLEVEL.TIME_150,
}
DRYERTEMPS = {
"-": STATE_DRYER_TEMP.OFF,
"OFF": STATE_DRYER_TEMP.OFF,
"@WM_TERM_NO_SELECT_W": STATE_DRYER_TEMP.NO_SELECT,
"@WM_OPTION_TEMP_HIGH_W": STATE_DRYER_TEMP.HIGH,
"@WM_OPTION_TEMP_LOW_W": STATE_DRYER_TEMP.LOW,
"@WM_OPTION_TEMP_MEDIUM_W": STATE_DRYER_TEMP.MEDIUM,
"@WM_OPTION_TEMP_MEDIUM_HIGH_W": STATE_DRYER_TEMP.MID_HIGH,
"@WM_OPTION_TEMP_ULTRA_LOW_W": STATE_DRYER_TEMP.ULTRA_LOW,
"@WM_DRY27_TEMP_HIGH_W": STATE_DRYER_TEMP.HIGH,
"@WM_DRY27_TEMP_LOW_W": STATE_DRYER_TEMP.LOW,
"@WM_DRY27_TEMP_MEDIUM_W": STATE_DRYER_TEMP.MEDIUM,
"@WM_DRY27_TEMP_MID_HIGH_W": STATE_DRYER_TEMP.MID_HIGH,
"@WM_DRY27_TEMP_ULTRA_LOW_W": STATE_DRYER_TEMP.ULTRA_LOW,
}
DRYERREFERRORS = {
"OFF": STATE_DRYER_ERROR.OFF,
"No Error": STATE_DRYER_ERROR.NO_ERROR,
"AE Error": STATE_DRYER_ERROR.ERROR_AE,
"CE1 Error": STATE_DRYER_ERROR.ERROR_CE1,
"DE Error": STATE_DRYER_ERROR.ERROR_DE,
"DE4 Error": STATE_DRYER_ERROR.ERROR_DE4,
"EMPTYWATER Error": STATE_DRYER_ERROR.ERROR_EMPTYWATER,
"F1 Error": STATE_DRYER_ERROR.ERROR_F1,
"LE1 Error": STATE_DRYER_ERROR.ERROR_LE1,
"LE2 Error": STATE_DRYER_ERROR.ERROR_LE2,
"NOFILTER Error": STATE_DRYER_ERROR.ERROR_NOFILTER,
"NP Error": STATE_DRYER_ERROR.ERROR_NP,
"OE Error": STATE_DRYER_ERROR.ERROR_OE,
"PS Error": STATE_DRYER_ERROR.ERROR_PS,
"TE1 Error": STATE_DRYER_ERROR.ERROR_TE1,
"TE2 Error": STATE_DRYER_ERROR.ERROR_TE2,
"TE5 Error": STATE_DRYER_ERROR.ERROR_TE5,
"TE6 Error": STATE_DRYER_ERROR.ERROR_TE6,
}
# this is not used
DRYERERRORS = {
"OFF": STATE_DRYER_ERROR.OFF,
"ERROR_NOERROR": STATE_DRYER_ERROR.NO_ERROR,
"@WM_US_DRYER_ERROR_AE_W": STATE_DRYER_ERROR.ERROR_AE,
"@WM_US_DRYER_ERROR_CE1_W": STATE_DRYER_ERROR.ERROR_CE1,
"@WM_US_DRYER_ERROR_DE_W": STATE_DRYER_ERROR.ERROR_DE,
"@WM_WW_FL_ERROR_DE4_W": STATE_DRYER_ERROR.ERROR_DE4,
"@WM_US_DRYER_ERROR_EMPTYWATER_W": STATE_DRYER_ERROR.ERROR_EMPTYWATER,
"@WM_US_DRYER_ERROR_F1_W": STATE_DRYER_ERROR.ERROR_F1,
"@WM_US_DRYER_ERROR_LE1_W": STATE_DRYER_ERROR.ERROR_LE1,
"@WM_US_DRYER_ERROR_LE2_W": STATE_DRYER_ERROR.ERROR_LE2,
"@WM_US_DRYER_ERROR_NOFILTER_W": STATE_DRYER_ERROR.ERROR_NOFILTER,
"@WM_US_DRYER_ERROR_NP_GAS_W": STATE_DRYER_ERROR.ERROR_NP,
"@WM_US_DRYER_ERROR_PS_W": STATE_DRYER_ERROR.ERROR_PS,
"@WM_US_DRYER_ERROR_OE_W": STATE_DRYER_ERROR.ERROR_OE,
"@WM_US_DRYER_ERROR_TE1_W": STATE_DRYER_ERROR.ERROR_TE1,
"@WM_US_DRYER_ERROR_TE2_W": STATE_DRYER_ERROR.ERROR_TE2,
"@WM_US_DRYER_ERROR_TE5_W": STATE_DRYER_ERROR.ERROR_TE5,
"@WM_US_DRYER_ERROR_TE6_W": STATE_DRYER_ERROR.ERROR_TE6,
}
| StarcoderdataPython |
9778759 | #!/usr/bin/env python3
"""
Licensed under Apache License 2.0
Original source:
https://github.com/lucidsoftware/apt-boto-s3
It was adapted to python3:
https://github.com/Kirill888/apt-boto-s3
This provides S3 transport for apt-get.
Installation
------------
```
install -m 755 ./s3.py /usr/lib/apt/methods/s3
apt-get install python3-pip
pip3 install boto3
```
"""
import boto3
import botocore
import collections
import hashlib
import os
from queue import Queue
import re
import signal
import socket
import sys
import threading
import urllib.request
import urllib.error
import urllib.parse
class Settings(object):
def __init__(self):
self.metadata_service_num_attempts = 5
self.metadata_service_timeout = 1
self.signature_version = None
def botocore_session(self):
session = botocore.session.get_session()
if self.metadata_service_num_attempts is not None:
session.set_config_variable(
'metadata_service_num_attempts',
self.metadata_service_num_attempts,
)
if self.metadata_service_timeout is not None:
session.set_config_variable(
'metadata_service_timeout',
self.metadata_service_timeout,
)
return session
settings = Settings()
class Interrupt():
def __init__(self):
self.lock = threading.Lock()
self.interrupted = False
def __bool__(self):
return self.interrupted
def interupt(self):
with self.lock:
if not self.interrupted:
self.interrupted = True
return True
return False
class MessageHeader(collections.namedtuple('MessageHeader_', ['status_code', 'status_info'])):
def __str__(self):
return '{} {}'.format(self.status_code, self.status_info)
@staticmethod
def parse(line):
status_code, status_info = line.split(' ', 1)
return MessageHeader(int(status_code), status_info)
class MessageHeaders:
CAPABILITIES = MessageHeader(100, 'Capabilities')
STATUS = MessageHeader(102, 'Status')
URI_FAILURE = MessageHeader(400, 'URI Failure')
GENERAL_FAILURE = MessageHeader(401, 'General Failure')
URI_START = MessageHeader(200, 'URI Start')
URI_DONE = MessageHeader(201, 'URI Done')
URI_ACQUIRE = MessageHeader(600, 'URI Acquire')
CONFIGURATION = MessageHeader(601, 'Configuration')
class Message(collections.namedtuple('Message_', ['header', 'fields'])):
@staticmethod
def parse_lines(lines):
return Message(MessageHeader.parse(lines[0]), tuple(re.split(': *', line, 1) for line in lines[1:]))
def get_field(self, field_name):
return next(self.get_fields(field_name), None)
def get_fields(self, field_name):
return (value for name, value in self.fields if name.lower() == field_name.lower())
def __str__(self):
lines = [str(self.header)]
lines.extend('{}: {}'.format(name, value) for name, value in self.fields)
lines.append('\n')
return '\n'.join(lines)
Pipes = collections.namedtuple('Pipes', ['input', 'output'])
class AptIO(object):
@staticmethod
def input(input):
def read_one():
lines = []
while True:
line = input.readline()
if not line:
return None
line = line.rstrip('\n')
if line:
lines.append(line)
elif lines:
return Message.parse_lines(lines)
return iter(read_one, None)
@staticmethod
def output(output):
def send_one(message):
output.write(str(message))
output.flush()
return send_one
class AptMethod(object):
def __init__(self, pipes):
self.input = AptIO.input(pipes.input)
self.output = AptIO.output(pipes.output)
class AptRequest(object):
def __init__(self, output):
self.output = output
def handle_message(self, message):
try:
self._handle_message(message)
except Exception as ex:
exc_tb = sys.exc_info()[2]
message = '{} ({}, line {})'.format(ex, exc_tb.tb_frame.f_code.co_filename, exc_tb.tb_lineno)
self.output(Message(MessageHeaders.GENERAL_FAILURE, (('Message', message),)))
class PipelinedAptMethod(AptMethod):
class Output(object):
def __init__(self, method):
self.method = method
self.queue = Queue()
self.method.queues.put(self.queue)
def __enter__(self):
return self.queue.put
def __exit__(self, type, value, traceback):
self.queue.put(None)
def send(self, message):
if message.header != MessageHeaders.GENERAL_FAILURE:
self.queue.put(message)
elif self.method.interrupt:
self.queue.put(message)
def __init__(self, method_type, pipes):
super(PipelinedAptMethod, self).__init__(pipes)
self.interrupt = Interrupt()
self.method_type = method_type
self.queues = Queue()
def _send_queue_thread(self):
def f():
# try:
for queue in iter(self.queues.get, None):
for message in iter(queue.get, None):
self.output(message)
# except IOError:
# pass
thread = threading.Thread(target=f)
thread.start()
return thread
def _handle_message_thread(self, message):
pipelined_output = self.Output(self)
def f():
with pipelined_output as output:
self.method_type.request(output).handle_message(message)
thread = threading.Thread(target=f)
thread.start()
return thread
def run(self):
self.output(Message(MessageHeaders.CAPABILITIES, self.method_type.capabilities()))
# TODO: Use a proper executor. concurrent.futures has them, but only in Python 3.2+.
threads = [self._send_queue_thread()]
for message in self.input:
if self.interrupt:
break
threads.append(self._handle_message_thread(message))
self.queues.put(None)
for thread in threads:
thread.join()
class S3AptMethodType(object):
def request(self, output):
return S3AptRequest(output)
def capabilities(self):
return (
('Send-Config', 'true'),
('Pipeline', 'true'),
('Single-Instance', 'yes'),
)
class S3AptRequest(AptRequest):
def __init__(self, output):
super(S3AptRequest, self).__init__(output)
class S3Uri:
def __init__(self, request, raw_uri):
self.request = request
self.uri = urllib.parse.urlparse(raw_uri)
# parse host as if it were an AWS host
match = re.match('(.+\.|)?s3(?:[-.]([^.]*))?.amazonaws.com', self.uri.hostname)
self.virtual_host_bucket, self.region = (match.groups() if match else (None, None))
def user_host(self):
parts = self.uri.netloc.split('@', 1)
return parts if len(parts) == 2 else (None, parts[0])
def endpoint_url(self):
return 'https://{}/'.format(self.user_host()[1])
def credentials(self):
user, _ = self.user_host()
if user:
user_parts = user.split(':', 1)
if len(user_parts) == 2:
return list(map(urllib.parse.unquote, user_parts))
else:
raise Exception('Access key and secret are specified improperly in the URL')
role_arn = os.environ.get("role_arn", None)
if role_arn:
creds_rsp = boto3.client('sts').assume_role(
RoleArn=role_arn,
RoleSessionName=socket.gethostname().replace('.', '-'),
)
if "Credentials" in creds_rsp:
return creds_rsp["Credentials"]["AccessKeyId"],
creds_rsp["Credentials"]["SecretAccessKey"],
creds_rsp["Credentials"]["SessionToken"]
return None, None, None
def bucket_key(self):
if self.virtual_host_bucket:
key = self.uri.path[1:]
else:
_, bucket, key = list(map(urllib.parse.unquote, self.uri.path.split('/', 2)))
return bucket, key
def signature_version(self):
global settings
if settings.signature_version:
return settings.signature_version
elif self.virtual_host_bucket == '':
return 's3v4'
def _handle_message(self, message):
global settings
if message.header.status_code == MessageHeaders.CONFIGURATION.status_code:
for config in message.get_fields('Config-Item'):
key, value = config.split('=', 1)
if key == 'S3::Signature::Version':
try:
settings.signature_version = {'2':'s3', '4':'s3v4'}[value]
except KeyError:
raise Exception('Invalid value for S3::Signature::Version')
elif key == 'S3::Credentials::RoleArn':
os.environ["role_arn"] = value
elif key == 'S3::MetadataService::Retries':
try:
metadata_service_num_attempts = int(value) + 1
if metadata_service_num_attempts < 1:
metadata_service_num_attempts = 1
settings.metadata_service_num_attempts = metadata_service_num_attempts
except ValueError:
raise Exception('Invalid value for S3::MetadataService::Retries')
elif key == 'S3::MetadataService::Retries':
try:
metadata_service_timeout = int(value)
if metadata_service_timeout < 1:
metadata_service_timeout = None
settings.metadata_service_timeout = metadata_service_timeout
except ValueError:
raise Exception('Invalid value for S3::MetadataService::Timeout')
elif message.header.status_code == MessageHeaders.URI_ACQUIRE.status_code:
uri = message.get_field('URI')
filename = message.get_field('Filename')
s3_uri = self.S3Uri(self, uri)
access_key, access_secret, token = s3_uri.credentials()
bucket, key = s3_uri.bucket_key()
region = s3_uri.region
botocore_session = settings.botocore_session()
if not region and s3_uri.virtual_host_bucket:
# find bucket's region
session = boto3.session.Session(
aws_access_key_id=access_key,
aws_secret_access_key=access_secret,
aws_session_token=token,
region_name='us-east-1',
botocore_session=botocore_session,
)
s3_client = session.client('s3')
region = s3_client.get_bucket_location(Bucket=bucket)['LocationConstraint'] or 'us-east-1'
session = boto3.session.Session(
aws_access_key_id=access_key,
aws_secret_access_key=access_secret,
aws_session_token=token,
region_name=region or 'us-east-1',
botocore_session=botocore_session,
)
s3 = session.resource('s3',
config=botocore.client.Config(signature_version=s3_uri.signature_version()),
endpoint_url=s3_uri.endpoint_url(),
)
s3_object = s3.Bucket(bucket).Object(key)
self.output(Message(MessageHeaders.STATUS, (
('Message', 'Requesting {}/{}'.format(bucket, key)),
('URI', uri),
)))
try:
s3_request = {}
last_modified = message.get_field('Last-Modified')
if last_modified:
s3_request['IfModifiedSince'] = last_modified
s3_response = s3_object.get(**s3_request)
except botocore.exceptions.ClientError as error:
if error.response['Error']['Code'] == '304':
self.output(Message(MessageHeaders.URI_DONE, (
('Filename', filename),
('IMS-Hit', 'true'),
('URI', uri),
)))
else:
self.output(Message(MessageHeaders.URI_FAILURE, (
('Message', error.response['Error']['Message']),
('URI', uri),
)))
else:
self.output(Message(MessageHeaders.URI_START, (
('Last-Modified', s3_response['LastModified'].isoformat()),
('Size', s3_response['ContentLength']),
('URI', uri),
)))
md5 = hashlib.md5()
sha1 = hashlib.sha1()
sha256 = hashlib.sha256()
sha512 = hashlib.sha512()
with open(filename, 'wb') as f:
while True:
bytes = s3_response['Body'].read(16 * 1024)
if not bytes:
break
f.write(bytes)
md5.update(bytes)
sha1.update(bytes)
sha256.update(bytes)
sha512.update(bytes)
self.output(Message(MessageHeaders.URI_DONE, (
('Filename', filename),
('Last-Modified', s3_response['LastModified'].isoformat()),
('MD5-Hash', md5.hexdigest()),
('MD5Sum-Hash', md5.hexdigest()),
('SHA1-Hash', sha1.hexdigest()),
('SHA256-Hash', sha256.hexdigest()),
('SHA512-Hash', sha512.hexdigest()),
('Size', s3_response['ContentLength']),
('URI', uri),
)))
if __name__ == '__main__':
# interrupt signals are sometimes sent
def signal_handler(signal, frame):
pass
signal.signal(signal.SIGINT, signal_handler)
PipelinedAptMethod(S3AptMethodType(), Pipes(sys.stdin, sys.stdout)).run()
| StarcoderdataPython |
5189651 | <filename>remindme/cli.py
'''
Command-line runner for Application
'''
import argparse
import sys
from . import config
from . import utils
from .Repository import Repository
def arg_parser():
'''Argument Parser.'''
parser = argparse.ArgumentParser(
description='Reminds you of something you knew before',
epilog="See LICENSE at {0}".format(config.METADATA.LICENSE)
)
parser.add_argument('keywords',
metavar='TITLE', nargs='*',
help='title for remindme')
parser.add_argument('-l', '--list',
action='store_true',
help='list all remindme titles')
parser.add_argument('-a', '--add',
action='store_true',
help='add new remindme')
parser.add_argument('-e', '--edit',
action='store_true',
help='edit old remindme')
parser.add_argument('-i', '--in',
action='store_true',
help='pipe-in input for a new remindme')
parser.add_argument('-o', '--raw',
action='store_true',
help='provide unformatted output; suitable for piping')
parser.add_argument('-b', '--rename',
metavar='new_title', nargs='+',
help='rename a remindme')
parser.add_argument('-r', '--remove',
action='store_true',
help='remove a remindme')
parser.add_argument('-Ra', '--remove-all',
action='store_true',
help='remove all remindmes')
parser.add_argument('-x', '--encrypt',
action='store_true',
help='encrypt before storing')
parser.add_argument('-p', '--plain',
action='store_true',
help='store as plain text')
parser.add_argument('-n', '--index',
action='store_true',
help='use title as index in list')
parser.add_argument('-v', '--version',
action='version',
version='%(prog)s {0}'.format(config.METADATA.__version__))
args = parser.parse_args()
args = vars(args)
return args
def main():
'''Run actions.'''
settings = utils.Settings()
console = utils.Console("runner", settings)
gui = utils.GUI()
repository = Repository(config.PATHS["db_file"])
args = arg_parser()
retry_decryption = settings.get("retry_decryption")
remindme_title = ' '.join(args['keywords']) or None
def try_decrypt(remindme):
if not remindme.is_encrypted():
return remindme.get_content(), None
content = None
password = None
while 1:
password = get_password()
content = remindme.get_content(password=password)
if content:
break
console.error("could not decrypt content")
# TODO: max retries!
if retry_decryption:
continue
raise utils.DecryptionError("could not decrypt content for remindme '%s'" % (remindme.get_title()))
return content, password
def get_password(retry=False):
# determining whether to ask for a password based on need to encrypt
encryption_disabled = settings.get("disable_encryption")
encrypt_by_default = settings.get("encrypt_by_default")
retry_password = retry and settings.get("retry_password_match")
encryption_requested = args["encrypt"] or False
plaintext_requested = args["plain"] or False
password = None
# ensure encryption is not disabled
if encryption_disabled:
console.info("encryption is disabled")
return password
# if encryption has been requested
if encryption_requested:
password = console.get_password(retry=retry_password)
# if encryption is by default and plaintext has not been requested
elif encrypt_by_default and not plaintext_requested:
password = <PASSWORD>_password(retry=retry_password)
# warn the user that no password was captured, if the case is so
if password is None:
console.info("NO password was captured. Storing as plain text.")
return password
def get_remindme(title):
if args['index']:
try:
return repository.find_at_index(title)
except ValueError:
console.error("index provided is not an integer")
return None
else:
return repository.find_by_title(title)
if args['list']:
if remindme_title:
remindmes = repository.find(lambda r: r.get_title().startswith(remindme_title))
else:
remindmes = repository.get_remindmes()
titles = repository.titles_in_order(remindmes)
num = len(titles)
console.success('Found {0} remindmes'.format(num))
if num == 0:
return
number = 0
display_content = ""
for title in titles:
number += 1
display_content = ''.join([display_content, '%-2d - %s\n' % (number, title)])
console.raw(display_content)
return
# from here on, we require that the keywords have
# been provided. Otherwise, exit early
if not remindme_title:
console.error("Title of remindme has not been provided")
return 1
if args['add']:
if repository.find_by_title(remindme_title):
console.error("A remindme already has that title")
return 1
# use editor if available, otherwise use console
editor_cmd = settings.get("editor")
if editor_cmd:
try:
content = gui.editor(editor_cmd)
except utils.EditorError:
console.error("External editor (%s) exited with a non-zero status code" % (editor_cmd))
return 1
else:
message = "Enter what you remember now"
content = console.get_long_input(message)
# we need content, we do not create empty remindmes!
if not content:
console.error("We have nothing to save!")
return 1
# ask for the password to use
password = get_password(retry=True)
if not repository.create_remindme(remindme_title, content, password=password):
console.error('Remindme failed during creation.')
return 1
console.success('Remindme will remind you next time.')
return 0
if args['edit']:
remindme = get_remindme(remindme_title)
if not remindme:
console.error("no such remindme exists")
return 1
# we require an external editor for this
editor_cmd = settings.get("editor")
if not editor_cmd:
console.error("you need to set an external editor for editing existing remindmes")
return 1
# editing encrypted content
content = None
password = <PASSWORD>
try:
content, password = try_decrypt(remindme)
content = gui.editor(editor_cmd, content=content)
except utils.DecryptionError:
return 1
except utils.EditorError:
console.error("External editor (%s) exited with a non-zero status code" % (editor_cmd))
return 1
# update content, only if we got some content
if not content:
console.error("No content for update.")
console.log("Consider removing the remindme instead.")
return 1
remindme.set_content(content, password=password)
if not repository.update_remindme(remindme):
console.error('Remindme failed to save the remindme.')
return 1
console.success('The remindme has been updated.')
return 0
if args['in']:
if repository.find_by_title(remindme_title):
console.error("A remindme already has that title")
return 1
content = sys.stdin.read().strip()
if content is '':
console.error('Remindme got no data from stdin')
return 1
password = <PASSWORD>()
if not repository.create_remindme(remindme_title, content, password=password):
console.error('Remindme failed to create remindme.')
return 1
console.success('Remindme created.')
return 0
if args['rename']:
new_title = ' '.join(args['rename'])
remindme = get_remindme(remindme_title)
if not remindme:
console.error('No such remindme to rename')
return 1
if not remindme.set_title(new_title):
console.error('Remindme could not be renamed')
return 1
console.success('Remindme renamed successfully')
return 0
if args['remove']:
remindme = get_remindme(remindme_title)
if not remindme:
console.error("No such remindme exists")
return 1
if not remindme.delete():
console.error("Failed to remove remindme")
return 1
console.success('remindme successfully removed')
return 0
if args['remove_all']:
confirm = console.get_input("remove All Remindmes(yes/NO)")
if confirm is None or confirm.strip().lower() != "yes":
console.error("removal cancelled")
return 1
if not repository.remove_remindmes():
console.error('failed to remove all')
return 1
console.success('removed all of them')
return 0
# the fallback action, when only a title of a
# remindme has been provided.
if remindme_title:
remindme = get_remindme(remindme_title)
if not remindme:
console.error('I too can\'t remember that')
return 1
content, __ = try_decrypt(remindme)
if content is None:
console.error('empty remindme content')
return 1
# if we are to spit out unmodified content
if args['raw']:
console.raw(content)
return 0
console.success('Reminding you:')
lines = content.split("\n")
number = 0
for line in lines:
number += 1
console.raw("%-2d %s\n" % (number, line))
return 0
return 0
def run():
'''Execute the process and exit with proper exit code.'''
sys.exit(main())
if __name__ == '__main__':
run()
| StarcoderdataPython |
4903400 | <reponame>aforalee/rallyALi
# Copyright 2014: Mirantis Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
from oslo_config import cfg
from rally.plugins.openstack.scenarios.sahara import jobs
from tests.unit import test
CONF = cfg.CONF
SAHARA_JOB = "rally.plugins.openstack.scenarios.sahara.jobs.SaharaJob"
SAHARA_UTILS = "rally.plugins.openstack.scenarios.sahara.utils"
class SaharaJobTestCase(test.ScenarioTestCase):
def setUp(self):
super(SaharaJobTestCase, self).setUp()
self.context = test.get_test_context()
CONF.set_override("sahara_cluster_check_interval", 0, "benchmark")
CONF.set_override("sahara_job_check_interval", 0, "benchmark")
@mock.patch("rally.common.utils.generate_random_name",
return_value="job_42")
@mock.patch(SAHARA_JOB + "._run_job_execution")
def test_create_launch_job_java(self, mock__run_job_execution,
mock_generate_random_name):
self.clients("sahara").jobs.create.return_value = mock.MagicMock(
id="42")
self.context.update({
"tenant": {
"sahara_image": "test_image",
"sahara_mains": ["main_42"],
"sahara_libs": ["lib_42"],
"sahara_cluster": "cl_42",
"sahara_input": "in_42"
}
})
jobs_scenario = jobs.SaharaJob(self.context)
jobs_scenario.create_launch_job(
job_type="java",
configs={"conf_key": "conf_val"},
job_idx=0
)
self.clients("sahara").jobs.create.assert_called_once_with(
name=mock_generate_random_name.return_value,
type="java",
description="",
mains=["main_42"],
libs=["lib_42"]
)
mock__run_job_execution.assert_called_once_with(
job_id="42",
cluster_id="cl_42",
input_id=None,
output_id=None,
configs={"conf_key": "conf_val"},
job_idx=0
)
@mock.patch("rally.common.utils.generate_random_name",
return_value="job_42")
@mock.patch(SAHARA_JOB + "._run_job_execution")
@mock.patch(SAHARA_JOB + "._create_output_ds",
return_value=mock.MagicMock(id="out_42"))
def test_create_launch_job_pig(self, mock__create_output_ds,
mock__run_job_execution,
mock_generate_random_name):
self.clients("sahara").jobs.create.return_value = mock.MagicMock(
id="42")
self.context.update({
"tenant": {
"sahara_image": "test_image",
"sahara_mains": ["main_42"],
"sahara_libs": ["lib_42"],
"sahara_cluster": "cl_42",
"sahara_input": "in_42"
}
})
jobs_scenario = jobs.SaharaJob(self.context)
jobs_scenario.create_launch_job(
job_type="pig",
configs={"conf_key": "conf_val"},
job_idx=0
)
self.clients("sahara").jobs.create.assert_called_once_with(
name=mock_generate_random_name.return_value,
type="pig",
description="",
mains=["main_42"],
libs=["lib_42"]
)
mock__run_job_execution.assert_called_once_with(
job_id="42",
cluster_id="cl_42",
input_id="in_42",
output_id="out_42",
configs={"conf_key": "conf_val"},
job_idx=0
)
@mock.patch("rally.common.utils.generate_random_name",
return_value="job_42")
@mock.patch(SAHARA_JOB + "._run_job_execution")
def test_create_launch_job_sequence(self, mock__run_job_execution,
mock_generate_random_name):
self.clients("sahara").jobs.create.return_value = mock.MagicMock(
id="42")
self.context.update({
"tenant": {
"sahara_image": "test_image",
"sahara_mains": ["main_42"],
"sahara_libs": ["lib_42"],
"sahara_cluster": "cl_42",
"sahara_input": "in_42"
}
})
jobs_scenario = jobs.SaharaJob(self.context)
jobs_scenario.create_launch_job_sequence(
jobs=[
{
"job_type": "java",
"configs": {"conf_key": "conf_val"}
}, {
"job_type": "java",
"configs": {"conf_key2": "conf_val2"}
}])
jobs_create_call = mock.call(
name=mock_generate_random_name.return_value,
type="java",
description="",
mains=["main_42"],
libs=["lib_42"])
self.clients("sahara").jobs.create.assert_has_calls([jobs_create_call,
jobs_create_call])
mock__run_job_execution.assert_has_calls([
mock.call(
job_id="42",
cluster_id="cl_42",
input_id=None,
output_id=None,
configs={"conf_key": "conf_val"},
job_idx=0),
mock.call(
job_id="42",
cluster_id="cl_42",
input_id=None,
output_id=None,
configs={"conf_key2": "conf_val2"},
job_idx=1)]
)
@mock.patch("rally.common.utils.generate_random_name",
return_value="job_42")
@mock.patch(SAHARA_JOB + "._run_job_execution")
@mock.patch(SAHARA_JOB + "._scale_cluster")
def test_create_launch_job_sequence_with_scaling(
self, mock__scale_cluster, mock__run_job_execution,
mock_generate_random_name):
self.clients("sahara").jobs.create.return_value = mock.MagicMock(
id="42")
self.clients("sahara").clusters.get.return_value = mock.MagicMock(
id="cl_42", status="active")
self.context.update({
"tenant": {
"sahara_image": "test_image",
"sahara_mains": ["main_42"],
"sahara_libs": ["lib_42"],
"sahara_cluster": "cl_42",
"sahara_input": "in_42"
}
})
jobs_scenario = jobs.SaharaJob(self.context)
jobs_scenario.create_launch_job_sequence_with_scaling(
jobs=[
{
"job_type": "java",
"configs": {"conf_key": "conf_val"}
}, {
"job_type": "java",
"configs": {"conf_key2": "conf_val2"}
}],
deltas=[1, -1])
jobs_create_call = mock.call(
name=mock_generate_random_name.return_value,
type="java",
description="",
mains=["main_42"],
libs=["lib_42"])
self.clients("sahara").jobs.create.assert_has_calls([jobs_create_call,
jobs_create_call])
je_0 = mock.call(job_id="42", cluster_id="cl_42", input_id=None,
output_id=None, configs={"conf_key": "conf_val"},
job_idx=0)
je_1 = mock.call(job_id="42", cluster_id="cl_42", input_id=None,
output_id=None,
configs={"conf_key2": "conf_val2"}, job_idx=1)
mock__run_job_execution.assert_has_calls(
[je_0, je_1, je_0, je_1, je_0, je_1])
| StarcoderdataPython |
3265224 | import turtle
import math
print("导入a包")
__all__=["module_A","module_A2"] | StarcoderdataPython |
6516832 | import os
import sys
sys.path.insert(0, os.path.join(os.path.dirname(os.path.dirname(os.path.dirname(sys.argv[0]))), 'libs'))
from cpp import parser, tree
from optparse import OptionParser
import traceback
try:
import cPickle as pickle
except ImportError:
import pickle
option_decl = OptionParser()
option_decl.set_usage('kernel_ast.py kernel_name kernel_input task_output.ast')
option_decl.add_option(
"-d", dest="macro_file", action="append", help="Add the content of <macrofile> to the macros, one macro per line"
)
option_decl.add_option("-m", "--module", dest="module", help="Module root")
option_decl.add_option("-t", "--tmp", dest="tmp_dir", help="Directory to store temporary/cached files", default=".")
global_macro_map = {
"__declspec": True,
"__attribute__": True,
"CALLBACK": False,
"WINAPI": False,
"__cdecl": False,
"__fastcall": False,
"__stdcall": False,
"PASCAL": False,
}
if __name__ == '__main__':
(options, arguments) = option_decl.parse_args()
if not arguments:
option_decl.print_help()
sys.exit(1)
elif len(arguments) != 3:
option_decl.print_help()
sys.exit(1)
else:
try:
result = parser.parse(
arguments[1], os.path.join(options.tmp_dir, 'cpp_grammar.pickle'), options.macro_file, options.module,
'BugEngine'
)
if not result:
sys.exit(1)
kernels = []
for m in result.objects:
if isinstance(m, tree.OverloadedMethod):
for overload in m.overloads:
if '__kernel' in overload.attributes:
if len(m.overloads) > 1:
raise Exception("cannot overload a kernel method")
m = m.overloads[0]
args = []
arg0 = m.parameters[0]
if arg0.type.strip() != 'u32' and arg0.type.strip() != 'const u32':
raise Exception("invalid signature for method kmain")
arg1 = m.parameters[1]
if arg1.type.strip() != 'u32' and arg1.type.strip() != 'const u32':
raise Exception("invalid signature for method kmain")
for arg in m.parameters[2:]:
args.append((arg.name, arg.type))
kernels.append((m, args))
if len(kernels) == 0:
raise Exception("could not locate any kernel method in kernel")
with open(arguments[2], 'wb') as out_file:
pickle.dump((arguments[0], (result.includes, result.usings), arguments[1], kernels), out_file)
sys.exit(result.error_count)
except Exception as e:
print(e)
traceback.print_exc()
sys.exit(1)
| StarcoderdataPython |
8064853 | from cassiopeia import riotapi
from cassiopeia.type.core.common import LoadPolicy
import csv
import urllib
import configparser
import mysql.connector
def main():
config = configparser.ConfigParser()
config.read('settings.ini')
riotapi.set_api_key(config.get('LoL API','key'))
riotapi.set_load_policy(LoadPolicy.lazy)
riotapi.print_calls(False)
riotapi.set_region('NA')
try:
cnx = mysql.connector.connect(user=config.get('DB','username'),password=config.get('DB','password'),host=config.get('DB','host'),database=config.get('DB','database'))
cursor = cnx.cursor()
insertSpell = ('INSERT INTO SummonerSpell (id,name,description) '
'VALUES ({},"{}","{}")')
spells = riotapi.get_summoner_spells()
for spell in spells:
imageurl = 'http://ddragon.leagueoflegends.com/cdn/6.24.1/img/spell/'+spell.image.link
destPath = 'img/spell-'+str(spell.id)+'.png'
try:
urllib.urlretrieve(imageurl,destPath)
except IOError as err:
print("Error retreiving "+str(spell.id)+'.png')
insertspellStmt = insertSpell.format(spell.id,spell.name,spell.description)
print(insertspellStmt)
cursor.execute(insertspellStmt)
cursor.close()
cnx.close()
except mysql.connector.Error as err:
if err.errno == errorcode.ER_ACCESS_DENIED_ERROR:
print("Something is wrong with your user name or password")
elif err.errno == errorcode.ER_BAD_DB_ERROR:
print("Database does not exist")
else:
print(err)
else:
cnx.close()
if __name__ == "__main__":
main()
| StarcoderdataPython |
3210245 | """
Taken and adapted from https://github.com/graphdeeplearning/graphtransformer
"""
import torch.nn as nn
import dgl
from net.blocks import MLPReadout
from net.layer import GraphTransformerLayer
class GraphTransformerNet(nn.Module):
def __init__(self, net_params):
super().__init__()
num_atom_features = net_params['num_atom_features']
num_edge_input_dim = net_params['num_edge_input_dim']
hidden_dim = net_params['hidden_dim']
num_heads = net_params['n_heads']
out_dim = net_params['out_dim']
in_feat_dropout = net_params['in_feat_dropout']
dropout = net_params['dropout']
mlp_dropout = net_params['mlp_dropout']
n_layers = net_params['L']
pos_enc_dim = net_params['pos_enc_dim']
type_loss = net_params['type_loss']
self.readout = net_params['readout']
self.layer_norm = net_params['layer_norm']
self.batch_norm = net_params['batch_norm']
self.residual = net_params['residual']
self.device = net_params['device']
self.embedding_lap_pos_enc = nn.Linear(pos_enc_dim, hidden_dim)
self.embedding_h = nn.Linear(num_atom_features, hidden_dim)
self.embedding_e = nn.Linear(num_edge_input_dim, hidden_dim)
self.in_feat_dropout = nn.Dropout(in_feat_dropout)
self.layers = nn.ModuleList([GraphTransformerLayer(hidden_dim, hidden_dim, num_heads, dropout,
self.layer_norm, self.batch_norm, self.residual) for _ in
range(n_layers - 1)])
self.layers.append(
GraphTransformerLayer(hidden_dim, out_dim, num_heads, dropout, self.layer_norm, self.batch_norm,
self.residual))
self.MLP_layer = MLPReadout(out_dim, 1, drop=mlp_dropout) # 1 out dim since regression problem
if type_loss == "MSE":
self.func_loss = nn.MSELoss()
elif type_loss == "MAE":
self.func_loss = nn.L1Loss()
def forward(self, g, h, e, h_lap_pos_enc):
# input embedding
# Node Embedding and Positional Encoding
h = self.embedding_h(h)
h = self.in_feat_dropout(h)
h_lap_pos_enc = self.embedding_lap_pos_enc(h_lap_pos_enc.float())
h = h + h_lap_pos_enc
# Edge Embedding
e = self.embedding_e(e)
# convnets
for conv in self.layers:
h, e = conv(g, h, e)
g.ndata['h'] = h
if self.readout == "sum":
hg = dgl.sum_nodes(g, 'h')
elif self.readout == "max":
hg = dgl.max_nodes(g, 'h')
else:
hg = dgl.mean_nodes(g, 'h')
return self.MLP_layer(hg)
def loss(self, scores, targets):
return self.func_loss(scores.float(), targets.float()) | StarcoderdataPython |
9696004 | <reponame>alishaar/lyric_scraper
# -*- coding: utf-8 -*-
"""lyrics_scraper.ipynb
Automatically generated by Colaboratory.
Original file is located at
https://colab.research.google.com/drive/1FHuLFfoEoAVWb2I1v7stRVT7m86FemxL
"""
!pip install bs4
import requests
from bs4 import BeautifulSoup
import sys
import re
import os
import base64
from urllib.parse import urlencode
from google.colab import drive
drive.mount('/content/drive')
import sys
sys.path.append('/content/drive/My Drive/Colab Notebooks')
import configs
def getSpotifyToken():
tokenURL = "https://accounts.spotify.com/api/token"
tokenData={"grant_type": "client_credentials"}
clientCred= f"{configs.clientID}:{configs.clientSecret}"
clientCred64=base64.b64encode(clientCred.encode())
tokenHeader={"Authorization": f"Basic {clientCred64.decode()}"}
token = requests.post(tokenURL, data=tokenData, headers=tokenHeader).json()['access_token']
return str(token)
def getArtistId(name,apiToken):
headers = {'Authorization': 'Bearer ' + apiToken}
url = "https://api.spotify.com/v1/search"
data = urlencode({'q': name, "type": "artist"})
search_url=f"{url}?{data}"
response = requests.get(search_url,headers=headers).json()["artists"]["items"][0]["id"]
return str(response)
def getRelatedArtists(name):
artists=[]
apiToken=getSpotifyToken()
id=getArtistId(name,apiToken)
headers = {'Authorization': 'Bearer ' + apiToken}
url = "https://api.spotify.com/v1/artists/" + id + "/related-artists"
response=requests.get(url,headers= headers).json()
for i in range (0, len(response['artists'])):
artists.append(str(response['artists'][i]['name']))
return artists
def getArtistInfo(name, page):
url="https://api.genius.com"
headers = {'Authorization': 'Bearer ' + configs.apiToken}
search_url = url + '/search?per_page=10&page=' + str(page)
data = {'q': name}
response = requests.get(search_url, data=data, headers=headers)
return response
def getSongUrl(name):
page = 1
songs = []
while True:
response = getArtistInfo(name, page)
json = response.json() # Collect up to song_cap song objects from artist
songInfo = []
for hit in json['response']['hits']:
if name.lower() in hit['result']['primary_artist']['name'].lower():
songInfo.append(hit)
url=""
for song in songInfo:
url = song['result']['url']
songs.append(url)
if url == "":
break
else:
page+=1
print('Found {} songs by {}'.format(len(songs), name))
return songs
def scrapeLyrics(name):
songs= getSongUrl(name)
lyrics=[]
titles=[]
for i in range (len(songs)):
page = requests.get(songs[i])
html = BeautifulSoup(page.text, 'html.parser')
lyric="-"
title="-"
try:
lyric = html.find('div', class_='lyrics').get_text()
titleString=html.title.string
title=re.search('%s(.*)%s' % (" – ", " Lyrics | Genius Lyrics"), titleString).group(1)
except:
pass
try:
title=title.replace("/", "-")
except:
pass
lyric = re.sub(r'[\(\[].*?[\)\]]', '', lyric)
lyric = os.linesep.join([s for s in lyric.splitlines() if s])
lyrics.append(lyric)
titles.append(title)
return lyrics,titles
def createDataSet(name,filePath):
lyrics,titles=scrapeLyrics(name)
try:
os.mkdir(filePath)
except:
pass
filepath=filePath+name
try:
os.mkdir(filepath)
except:
pass
for i in range(len(lyrics)):
compPath=os.path.join(filepath, str(titles[i])+".txt")
f=open(compPath,"w+")
f.write(lyrics[i].lower())
f.close
print(titles[i])
def main():
name = input("name of artist:")
filePath="/content/datasets/" + name + " dataset/"
artists=getRelatedArtists(name)
artists.append(name)
length = len(artists)
for i in range (0,length-1):
otherArtists=getRelatedArtists(artists[i])
for artist in otherArtists:
if artist not in artists:
artists.append(artist)
print(artists)
for artist in artists:
createDataSet(artist,filePath)
main() | StarcoderdataPython |
3400640 | # -*- coding: utf-8 -*-
"""
OpenTabulate configuration file parser and class.
This reads '$HOME/.config/opentabulate.conf' using the ConfigParser class and
configures the OpenTabulate command line tool. It stores information such as where
the OpenTabulate root directory is, tabulation parameters (e.g. output encoding,
indexing, specific output formatting) and the available target output columns listed
in groups. The command line arguments take higher priority over the configuration
file unless stated otherwise.
Created and written by <NAME>, with support and funding from the
*Center for Special Business Projects* (CSBP) at *Statistics Canada*.
"""
import os
import sys
from configparser import ConfigParser
from ast import literal_eval
DEFAULT_PATHS = {'conf_dir' : os.path.expanduser('~') + '/.config',
'conf_file' :os.path.expanduser('~') + '/.config/opentabulate.conf'}
SUPPORTED_ENCODINGS = ('utf-8', 'cp1252')
ENCODING_ERRORS = ('strict', 'replace', 'ignore')
class ConfigError(Exception):
"""
Configuration exception class. Primarily to be used for configuration file
validation errors.
"""
class Configuration(ConfigParser):
"""
Child class of the built-in module ConfigParser. Adapted to read a hard-coded
location for a configuration file upon initialization and to validate its
contents.
Attributes:
conf_path (str): Configuration file path.
"""
def __init__(self, conf_path=None):
"""
Initializes ConfigParser object with configuration path. If the path is set
to None, use the default path.
"""
super().__init__(strict=True, empty_lines_in_values=False)
if conf_path is None:
self.conf_path = DEFAULT_PATHS['conf_file']
else:
self.conf_path = conf_path
def load(self):
"""
Load the configuration file.
Raises:
FileNotFoundError: configuration file is missing from path
"""
if not os.path.exists(self.conf_path):
raise FileNotFoundError("No configuration file found in %s" % self.conf_path)
else:
try:
self.read(self.conf_path)
except:
raise
def validate(self):
"""
Validates the contents of the configuration file.
Note: Existence of the OpenTabulate root directory and its folder contents are
not validated. This is handled separately by the command line argument
handler due to how the --initialize flag is handled.
Raises:
ConfigError: Validation error of loaded configuration
"""
base_sections = ('general', 'labels')
general_section = ('root_directory', 'add_index', 'target_encoding',
'output_encoding_errors', 'clean_whitespace', 'lowercase_output',
'log_level')
reserved_cols = ('idx', 'provider')
# check that the mandatory section 'general' and option 'root_directory' are present
# in the configuration file
try:
assert 'general' in self.sections()
except AssertionError:
raise ConfigError("Missing 'general' section")
try:
assert 'root_directory' in self['general']
except AssertionError:
raise ConfigError("Missing required 'root_directory' option in 'general' section")
# check if configuration sections are valid
for sec in self.sections():
if sec not in base_sections:
raise ConfigError("'%s' is not a valid section" % sec)
# check if 'general' section has invalid options
for option in self['general']:
if option not in general_section:
raise ConfigError("'%s' is not a valid option in 'general' section" % option)
# check if 'labels' section is using core labels
for option in self['labels']:
if option in general_section:
raise ConfigError("Cannot define label '%s', is a reserved word" % option)
# add default settings then validate
defaults = {'target_encoding' : 'utf-8',
'output_encoding_errors' : 'strict',
'add_index' : 'false',
'clean_whitespace' : 'false',
'lowercase_output' : 'false',
'log_level' : '3'}
for def_opt in defaults:
if def_opt not in self['general']:
self.set('general', def_opt, defaults[def_opt])
# validate boolean options
boolean_options = ('add_index', 'clean_whitespace', 'lowercase_output')
for option in boolean_options:
try:
self.getboolean('general', option)
except ValueError:
raise ConfigError("Option '%s' in 'general' section is not a"
" boolean value" % option)
# validate verbosity level
try:
log_level = self.getint('general', 'log_level')
assert log_level >= 0 and log_level <= 3
except:
raise ConfigError("Option '%s' in 'general' is not an integer value"
" between 0 and 3 (inclusive)" % option)
# validate encoding
encoding = self.get('general', 'target_encoding')
if encoding not in SUPPORTED_ENCODINGS:
raise ConfigError("'%s' is not a supported output encoding" % encoding)
# validate output encoding error handling
handler = self.get('general', 'output_encoding_errors')
if handler not in ENCODING_ERRORS:
raise ConfigError("'%s' is not an output encoding error handler")
# validate labels to make sure they are tuples and column names are not
# reserved words
for option in self['labels']:
value = None
try:
value = literal_eval(self.get('labels', option))
assert isinstance(value, tuple)
except:
raise ConfigError("Value of label '%s' is not a tuple")
for col in value:
if col in reserved_cols:
raise ConfigError("Column name '%s' cannot be used, is a reserved"
" word" % col) | StarcoderdataPython |
5051498 | <filename>openwisp_users/api/permissions.py
from django.utils.translation import gettext_lazy as _
from rest_framework.permissions import BasePermission
from swapper import load_model
Organization = load_model('openwisp_users', 'Organization')
class BaseOrganizationPermission(BasePermission):
def has_object_permission(self, request, view, obj):
organization = self.get_object_organization(view, obj)
return self.validate_membership(request.user, organization)
def has_permission(self, request, view):
return request.user and request.user.is_authenticated
def get_object_organization(self, view, obj):
organization_field = getattr(view, 'organization_field', 'organization')
fields = organization_field.split('__')
accessed_object = obj
for field in fields:
accessed_object = getattr(accessed_object, field, None)
if not accessed_object:
raise AttributeError(
_(
'Organization not found, `organization_field` '
'not implemented correctly.'
)
)
return accessed_object
def validate_membership(self, user, org):
raise NotImplementedError(
_(
'View\'s permission_classes not implemented correctly.'
'Please use one of the child classes: IsOrganizationMember, '
'IsOrganizationManager or IsOrganizationOwner.'
)
)
class IsOrganizationMember(BaseOrganizationPermission):
message = _(
'User is not a member of the organization to which the '
'requested resource belongs.'
)
def validate_membership(self, user, org):
return org and (user.is_superuser or user.is_member(org))
class IsOrganizationManager(BaseOrganizationPermission):
message = _(
'User is not a manager of the organization to which the '
'requested resource belongs.'
)
def validate_membership(self, user, org):
return org and (user.is_superuser or user.is_manager(org))
class IsOrganizationOwner(BaseOrganizationPermission):
message = _(
'User is not a owner of the organization to which the '
'requested resource belongs.'
)
def validate_membership(self, user, org):
return org and (user.is_superuser or user.is_owner(org))
| StarcoderdataPython |
3238134 | #!/usr/bin/env python
"""
Torture-test Afterglow Core API
"""
import argparse
import base64
import json
import random
import requests
import time
import traceback
import warnings
from multiprocessing import Process
from typing import Any, Dict, Optional, Union
def api_call(host, port, https, root, api_version, token, method, resource,
params=None) -> Optional[Union[Dict[str, Any], str, bytes]]:
method = method.upper()
headers = {'Authorization': 'Bearer {}'.format(token)}
if method != 'GET':
# Extract CSRF token from access/refresh token
# noinspection PyBroadException
try:
s = token[:token.rfind('.')]
s = base64.decodebytes(s + '='*((4 - len(s) % 4) % 4))
i = 1
while i <= len(s):
try:
json.loads(s[:i])
except ValueError:
i += 1
else:
break
s = s[i:]
i = 1
while i <= len(s):
try:
headers['X-CSRF-Token'] = json.loads(s[:i])['csrf']
break
except ValueError:
i += 1
except Exception:
pass
if not root and host not in ('localhost', '127.0.0.1'):
root = '/core'
elif root and not root.startswith('/'):
root = '/' + root
url = 'http{}://{}:{:d}{}/'.format('s' if https else '', host, port, root)
if not resource.startswith('oauth2') and not resource.startswith('ajax'):
url += 'api/v{}/'.format(api_version)
url += resource
json_data = None
if method not in ('GET', 'HEAD', 'OPTIONS') and params:
# For requests other than GET, we must pass parameters as JSON
params, json_data = None, params
warnings.filterwarnings('ignore', 'Unverified HTTPS request is being made')
r = requests.request(
method, url, verify=False, params=params, headers=headers,
json=json_data)
try:
content_type = r.headers['Content-Type'].split(';')[0].strip()
except KeyError:
return
if content_type.split('/')[-1].lower() == 'json':
res = r.json()
if 'data' in res:
return res['data']
if 'error' in res:
raise RuntimeError(str(res['error']))
return res
if content_type.split('/')[0].lower() == 'text':
return r.text
return r.content
def run_job(host, port, https, root, api_version, token, job_type, params):
job_params = {'type': job_type}
job_params.update(params)
job_id = api_call(
host, port, https, root, api_version, token, 'POST', 'jobs',
job_params)['id']
while True:
time.sleep(1)
if api_call(
host, port, https, root, api_version, token, 'GET',
'jobs/{}/state'.format(job_id))['status'] == 'completed':
break
res = api_call(
host, port, https, root, api_version, token, 'GET',
'jobs/{}/result'.format(job_id))
if res['errors']:
print(res['errors'])
return res
def test_process(
proc_id, host, port, https, root, api_version, token, obs_id, cycles):
# Import observation
while True:
# noinspection PyBroadException
try:
file_ids = run_job(
host, port, https, root, api_version, token, 'batch_import',
{'settings': [{
'provider_id': '1', 'duplicates': 'append',
'path': 'User Observations/{}/reduced'.format(obs_id)
}]})['file_ids']
except Exception:
time.sleep(5)
else:
if file_ids:
break
time.sleep(5)
for cycle in range(cycles):
# noinspection PyBroadException
try:
# Retrieve pixel data
for i in file_ids:
api_call(
host, port, https, root, api_version, token,
'GET', 'data-files/{}/pixels'.format(i))
# Stack images
time.sleep(random.uniform(0, 10))
temp_file_id = run_job(
host, port, https, root, api_version, token, 'stacking',
{'file_ids': file_ids})['file_id']
while True:
# noinspection PyBroadException
try:
api_call(
host, port, https, root, api_version, token,
'DELETE', 'data-files/{}'.format(temp_file_id))
except Exception:
time.sleep(5)
else:
break
# Extract sources from the first image
time.sleep(random.uniform(0, 10))
sources = run_job(
host, port, https, root, api_version, token,
'source_extraction', {'file_ids': [file_ids[0]]})['data']
# Photometer sources in all images
time.sleep(random.uniform(0, 10))
run_job(
host, port, https, root, api_version, token, 'photometry',
{'file_ids': file_ids, 'sources': sources, 'settings': {
'a': 10, 'a_in': 15, 'a_out': 20}})
except Exception:
traceback.print_exc()
print('{}: {}'.format(proc_id + 1, cycle + 1))
if __name__ == '__main__':
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument(
'--host', metavar='HOSTNAME', default='localhost',
help='Afterglow API server hostname or IP address')
# noinspection PyTypeChecker
parser.add_argument(
'--port', metavar='PORT', type=int, default=5000,
help='Afterglow API server port')
parser.add_argument(
'-s', '--https', action='store_true', help='use HTTPS instead of HTTP')
parser.add_argument('-r', '--root', default='', help='API root')
parser.add_argument(
'-v', '--api-version', default='1', help='server API version')
parser.add_argument(
'-t', '--token', help='authenticate with this personal token')
parser.add_argument(
'-o', '--obs', metavar='N', help='test observation ID')
parser.add_argument(
'-w', '--workers', metavar='N', type=int, default=100,
help='number of worker processes')
parser.add_argument(
'-c', '--cycles', metavar='N', type=int, default=100,
help='number of test cycles')
args = parser.parse_args()
print('Starting {} processes with {} test cycles'
.format(args.workers, args.cycles))
processes = [Process(target=test_process, args=(
i, args.host, args.port, args.https, args.root, args.api_version,
args.token, args.obs, args.cycles)) for i in range(args.workers)]
for p in processes:
p.start()
try:
for p in processes:
p.join()
finally:
# Cleanup
data_files = api_call(
args.host, args.port, args.https, args.root, args.api_version,
args.token, 'GET', 'data-files')
print('Deleting {} data files'.format(len(data_files)))
for f in data_files:
while True:
# noinspection PyBroadException
try:
# noinspection PyTypeChecker
api_call(
args.host, args.port, args.https, args.root,
args.api_version, args.token,
'DELETE', 'data-files/{}'.format(f['id']))
except Exception:
time.sleep(1)
else:
break
| StarcoderdataPython |
6508582 | from models.property import PropertyModel
import json
def test_get_properties(client, test_database):
"""the server should successfully retrieve all properties"""
response = client.get("/api/properties")
assert response.status_code == 200
def test_post_property(client, auth_headers, new_property):
property = new_property.json()
property['name'] = "new_property"
"""The server should check for the correct credentials when posting a new property"""
response = client.post("/api/properties", json=property)
assert response.status_code == 401
"""The server should successfully add a new property"""
response = client.post("/api/properties", json=property, headers=auth_headers["admin"])
assert response.status_code == 201
"""The server should return with an error if a duplicate property is posted"""
response = client.post("/api/properties", json=property, headers=auth_headers["admin"])
assert response.get_json() == {'message': 'A property with this name already exists'}
assert response.status_code == 401
def test_get_property_by_name(client, auth_headers, test_database):
"""The get property by name returns a successful response code."""
response = client.get("/api/properties/test1", headers=auth_headers["admin"])
assert response.status_code == 200
"""The server responds with an error if the URL contains a non-existent property name"""
responseBadPropertyName = client.get("/api/properties/this_property_does_not_exist", headers=auth_headers["admin"])
assert responseBadPropertyName == 404
def test_archive_property_by_id(client, auth_headers, new_property, test_database):
test_property = PropertyModel.find_by_name(new_property.name)
"""The server responds with a 401 error if a non-admin tries to archive"""
responseNoAdmin = client.post(f"/api/properties/archive/{test_property.id}")
assert responseNoAdmin == 401
"""The archive property endpoint should return a 201 code when successful"""
responseSuccess = client.post(f'/api/properties/archive/{test_property.id}', headers=auth_headers["admin"])
assert responseSuccess.status_code == 201
"""The property should have its 'archived' key set to True"""
responseArchivedProperty = client.get(f'/api/properties/{test_property.name}', headers=auth_headers["admin"])
assert json.loads(responseArchivedProperty.data)["archived"]
"""The server responds with a 400 error if the URL contains a non-existent property id"""
responseBadPropertyID = client.post("/api/properties/archive/99999", headers=auth_headers["admin"])
assert responseBadPropertyID.get_json() == {'message': 'Property cannot be archived'}
assert responseBadPropertyID.status_code == 400
def test_delete_property_by_name(client, auth_headers, new_property, test_database):
test_property = PropertyModel.find_by_name(new_property.name)
"""First verify that the property exists"""
response = client.get(f"/api/properties/{test_property.name}", headers=auth_headers["admin"])
assert response.status_code == 200
response = client.delete(f"/api/properties/{test_property.name}", headers=auth_headers["admin"])
assert response.status_code == 200
"""Now verify that the property no longer exists"""
response = client.get(f"/api/properties/{test_property.name}", headers=auth_headers["admin"])
assert response.status_code == 404
"""The server responds with a 401 error if a non-admin tries to delete"""
responseNoAdmin = client.delete(f"/api/properties/{test_property.name}")
assert responseNoAdmin == 401
"""The server responds with a 404 error if property not exist"""
response = client.delete(f"/api/properties/propertyNotInDB", headers=auth_headers["admin"])
assert response == 404
def test_update_property_by_name(client, auth_headers, new_property, test_database):
test_property = PropertyModel.find_by_name(new_property.name)
new_property_address = "123 NE Flanders St"
test_property.address = new_property_address
responseUpdateProperty = client.put( f'/api/properties/{test_property.name}'
, headers=auth_headers["admin"]
, json=test_property.json()
)
"""The property should have a new address"""
test_changed_property = client.get(f'/api/properties/{test_property.name}', headers=auth_headers["admin"])
test_changed_property = json.loads(test_changed_property.data)
assert test_changed_property["address"] == new_property_address
| StarcoderdataPython |
1991412 | <filename>set_up/group_settings.py
import discord
from discord.ext import commands
from set_up.settings import BotSettings, SettingTypes
from database.database import Database, SelectType
from tools.string import StringTools
import tools.channels as ChannelTools
import tools.error as Error
import tools.weather as Weather
import pytz
from typing import Dict
# GroupInfo: Stores specific information about a group
class GroupInfo():
def __init__(self, object: str, table: str, possess: str):
self.object = object
self.table = table
self.possess = possess
GROUP_DICT = {SettingTypes.Server: GroupInfo(" the server", "Server_Accounts", "the server's"),
SettingTypes.User: GroupInfo("", "User_Accounts", "your")}
# GroupSettings: settings for servers or private dms
class GroupSettings(BotSettings):
def __init__(self, client: discord.Client):
super().__init__(client)
# get_conditions(category, ctx) Get the specific conditions needed to look
# for in the database for a certain table
def get_conditions(self, category: SettingTypes, ctx: commands.Context) -> Dict[str, str]:
if (category == SettingTypes.Server):
conditions = {"id": f"{ctx.guild.id}"}
elif (category == SettingTypes.User):
conditions = {"id": f"{ctx.author.id}"}
return conditions
# change_timezone(self, ctx, timezone) Changes the server's timezone
# effects: sends embeds
# deletes and edits messages
async def change_timezone(self, ctx: commands.Context, timezone: str, category: SettingTypes):
error = False
if (category == SettingTypes.Server):
error = await ChannelTools.validate_activity_channel(ctx, error)
timezone = StringTools.convert_str(timezone)
if (timezone not in pytz.all_timezones):
error = True
embeded_message = Error.display_error(self.client, 10, element = timezone, group = "pytz.all_timezones", parameter = "timezones")
await ctx.send(embed = embeded_message)
elif (not error and ((category == SettingTypes.User) or (category == SettingTypes.Server and ctx.guild is not None))):
table = GROUP_DICT[category].table
conditions = self.get_conditions(category, ctx)
group_possess = GROUP_DICT[category].possess
cap_group_possess = group_possess.capitalize()
question_message = f"Do you want to change {group_possess} timezone to `{timezone}`?"
question_title = "Change Timezone?"
sync = Database.default_select(self.default_setting, SelectType.List, ["sync_time", table], {"conditions": conditions}, [ctx], {})[0]
fields = None
if (sync):
fields = {"Warning \U000026A0": f"*After this change,{group_possess} timezone will stop being synchronized with {group_possess} region and {group_possess} timezone will instead follow the timezone that you selected*"}
answer = await self.en_question_setting(ctx, question_message, question_title, True, fields = fields)
if (answer in StringTools.TRUE):
update_data = {"timezone": f"'{timezone}'"}
if (sync):
update_data["sync_time"] = f"{int(not(sync))}"
fields = {"Note \U0001F4DD": f"*{cap_group_possess} timezone stopped synchronizing with {group_possess} region*"}
Database.update(update_data, table, conditions=conditions)
answer_message = f"{cap_group_possess} timezone has been updated to `{timezone}`"
answer_title = "Successfully Changed Timezone"
await self.en_answer_setting(ctx, answer_message, answer_title, True, fields = fields)
# change_region(self, ctx, region) Changes the server's region
# effects: sends embeds
# deletes and edits messages
async def change_region(self, ctx: commands.Context, region: str, category: str):
error = False
if (category == SettingTypes.Server):
error = await ChannelTools.validate_activity_channel(ctx, error)
region = StringTools.convert_str(region)
weather_info = await Weather.get_weather(region)
if (weather_info is None):
embeded_message = Error.display_error(self.client, 17, member = "region", member_search_type = "search query", search_member = region)
await ctx.send(embed = embeded_message)
elif (not error and ((category == SettingTypes.User) or (category == SettingTypes.Server and ctx.guild is not None))):
object = GROUP_DICT[category].object
table = GROUP_DICT[category].table
conditions = self.get_conditions(category, ctx)
group_possess = GROUP_DICT[category].possess
cap_group_possess = group_possess.capitalize()
question = f"Is this the region that you want to set{object}?"
question_title = "Change Region"
fields = {"Region \U0001F5FA": f"```bash\n'found region': {weather_info.name}\n\n'latitude': {weather_info.latitude}\n'longitude': {weather_info.longitude}\n```"}
answer = await self.en_question_setting(ctx, question, question_title, True, fields = fields)
if (answer in StringTools.TRUE):
sync = Database.default_select(self.default_setting, SelectType.List, ["sync_time", table], {"conditions": conditions}, [ctx], {})[0]
update_data = {"region": f"'{weather_info.name}'"}
if (sync):
update_data["timezone"] = f"'{weather_info.tz_offset}'"
Database.update(update_data, table, conditions=conditions)
answer_message = f"{cap_group_possess} region has been updated to `{weather_info.name}`"
answer_title = "Successfully Changed Region"
await self.en_answer_setting(ctx, answer_message, answer_title, True, fields = fields)
# sync_time(ctx, category) Changes whether a server synchronizes their
# timezone with their region
# effects: sends embeds
# deletes and edits messages
async def sync_time(self, ctx: commands.Context, category: str):
error = False
if (category == SettingTypes.Server):
error = await ChannelTools.validate_activity_channel(ctx, error)
if (not error and ((category == SettingTypes.User) or (category == SettingTypes.Server and ctx.guild is not None))):
columns_needed = ["region", "sync_time"]
table = GROUP_DICT[category].table
conditions = self.get_conditions(category, ctx)
group_possess = GROUP_DICT[category].possess
cap_group_possess = group_possess.capitalize()
region_data = Database.default_select(self.default_setting, SelectType.Formatted, [columns_needed, columns_needed, table], {"conditions": conditions}, [ctx], {})[0]
weather_info = await Weather.get_weather(region_data["region"])
question = f"Do you want to synchronize {group_possess} timezone to {group_possess} selected region"
question_title = "Synchronize Time with Region"
sync = not(bool(region_data["sync_time"]))
fields = {"Server's Region \U0001F5FA": f"```bash\n'Region': {weather_info.name}\n\n'latitude': {weather_info.latitude}\n'longitude': {weather_info.longitude}\n```"}
answer = await self.question_setting(ctx, question, question_title, sync, {"synchronize": "unsynchronize", "Synchronize": "Unsynchronize"}, fields = fields)
if (answer in StringTools.TRUE):
update_data = {columns_needed[1]: f"{int(sync)}"}
if (sync):
timezone = str(weather_info.tz_offset)
update_data["timezone"] = str(weather_info.tz_offset)
Database.update(update_data, table, conditions=conditions)
answer_message = f"{cap_group_possess} timezone has been synchronized with {group_possess} region"
answer_title = "Successfully Synchronized Time with Region"
await self.answer_setting(ctx, answer_message, answer_title, sync, {"synchronized": "unsynchronized", "Synchronized": "Unsynchronized"}, fields = fields)
# update_time() Updates the time for timezones with only a time difference
# to account for daylight savings
async def update_time(self):
found_weather = {}
table = "Server_Accounts"
columns_needed = ["id", "timezone", "region"]
time_data = Database.formatted_select(columns_needed, columns_needed, table, conditions = {"sync_time": "1"})
for d in time_data:
try:
tz_float = float(d["timezone"])
except:
pass
else:
region = d["region"]
if (region not in list(found_weather.keys())):
found_weather[region] = await Weather.get_weather(region)
latest_weather = found_weather[region]
if (latest_weather.tz_offset != tz_float):
Database.update({"timezone": f"'{latest_weather.tz_offset}'"}, table, conditions = {"id": f"{d['id']}"})
| StarcoderdataPython |
6556319 | from mayan.apps.testing.tests.base import BaseTestCase
from ..events import event_cache_created, event_cache_purged
from ..models import Cache
from .mixins import CacheTestMixin
class CacheEventsTestCase(CacheTestMixin, BaseTestCase):
def test_cache_create_event(self):
self._clear_events()
self._create_test_cache()
events = self._get_test_events()
self.assertEqual(events.count(), 1)
cache = Cache.objects.last()
self.assertEqual(events[0].action_object, None)
self.assertEqual(events[0].actor, cache)
self.assertEqual(events[0].target, cache)
self.assertEqual(events[0].verb, event_cache_created.id)
def test_cache_purge_event(self):
self._create_test_cache()
self._clear_events()
self.test_cache.purge()
events = self._get_test_events()
self.assertEqual(events.count(), 1)
cache = Cache.objects.last()
self.assertEqual(events[0].action_object, None)
self.assertEqual(events[0].actor, cache)
self.assertEqual(events[0].target, cache)
self.assertEqual(events[0].verb, event_cache_purged.id)
| StarcoderdataPython |
8143450 | #!/usr/bin/env python
# vim: tabstop=4 shiftwidth=4 softtabstop=4
#
# Copyright (c) 2010-2011 OpenStack, LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
TOKEN-BASED AUTH MIDDLEWARE
This WSGI component performs multiple jobs:
- it verifies that incoming client requests have valid tokens by verifying
tokens with the auth service.
- it will reject unauthenticated requests UNLESS it is in 'delay_auth_decision'
mode, which means the final decision is delegated to the downstream WSGI
component (usually the OpenStack service)
- it will collect and forward identity information from a valid token
such as user name, groups, etc...
Refer to: http://wiki.openstack.org/openstack-authn
This WSGI component has been derived from Keystone's auth_token
middleware module. It contains some specialization for Quantum.
HEADERS
=======
Headers starting with ``HTTP_`` is a standard http header
Headers starting with ``HTTP_X`` is an extended http header
Coming in from initial call from client or customer
---------------------------------------------------
HTTP_X_AUTH_TOKEN
The client token being passed in
HTTP_X_STORAGE_TOKEN
The client token being passed in (legacy Rackspace use) to support
cloud files
Used for communication between components
-----------------------------------------
www-Authenticate
Only used if this component is being used remotely
HTTP_AUTHORIZATION
Basic auth password used to validate the connection
What we add to the request for use by the OpenStack service
-----------------------------------------------------------
HTTP_X_AUTHORIZATION
The client identity being passed in
"""
import httplib
import json
import logging
import urllib
from urlparse import urlparse
from webob.exc import HTTPUnauthorized, Request, Response
from keystone.common.bufferedhttp import http_connect_raw as http_connect
PROTOCOL_NAME = "Quantum Token Authentication"
logger = logging.getLogger(__name__) # pylint: disable=C0103
# pylint: disable=R0902
class AuthProtocol(object):
"""Auth Middleware that handles authenticating client calls"""
def _init_protocol_common(self, app, conf):
""" Common initialization code"""
logger.info("Starting the %s component", PROTOCOL_NAME)
self.conf = conf
self.app = app
#if app is set, then we are in a WSGI pipeline and requests get passed
# on to app. If it is not set, this component should forward requests
# where to find the Quantum service (if not in local WSGI chain)
# these settings are only used if this component is acting as a proxy
# and the OpenSTack service is running remotely
if not self.app:
self.service_protocol = conf.get('quantum_protocol', 'https')
self.service_host = conf.get('quantum_host')
self.service_port = int(conf.get('quantum_port'))
self.service_url = '%s://%s:%s' % (self.service_protocol,
self.service_host,
self.service_port)
# delay_auth_decision means we still allow unauthenticated requests
# through and we let the downstream service make the final decision
self.delay_auth_decision = int(conf.get('delay_auth_decision', 0))
def _init_protocol(self, _app, conf):
""" Protocol specific initialization """
# where to find the auth service (we use this to validate tokens)
self.auth_host = conf.get('auth_host')
self.auth_port = int(conf.get('auth_port'))
self.auth_protocol = conf.get('auth_protocol', 'http')
self.cert_file = conf.get('certfile', None)
self.key_file = conf.get('keyfile', None)
self.auth_timeout = conf.get('auth_timeout', 30)
self.auth_api_version = conf.get('auth_version', '2.0')
self.auth_location = "%s://%s:%s" % (self.auth_protocol,
self.auth_host,
self.auth_port)
self.auth_uri = conf.get('auth_uri', self.auth_location)
logger.debug("Authentication Service:%s", self.auth_location)
# Credentials used to verify this component with the Auth service
# since validating tokens is a privileged call
self.admin_user = conf.get('admin_user')
self.admin_password = conf.get('admin_password')
self.admin_token = conf.get('admin_token')
# bind to one or more service instances
service_ids = conf.get('service_ids')
self.serviceId_qs = ''
if service_ids:
self.serviceId_qs = '?HP-IDM-serviceId=%s' % \
(urllib.quote(service_ids))
def _build_token_uri(self, claims=None):
claim_str = "/%s" % claims if claims else ""
return "/v%s/tokens%s%s" % (self.auth_api_version, claim_str,
self.serviceId_qs or '')
def __init__(self, app, conf):
""" Common initialization code """
# Defining instance variables here for improving pylint score
# NOTE(salvatore-orlando): the following vars are assigned values
# either in init_protocol or init_protocol_common. We should not
# worry about them being initialized to None
self.admin_password = None
self.admin_token = None
self.admin_user = None
self.auth_api_version = None
self.auth_host = None
self.auth_location = None
self.auth_uri = None
self.auth_port = None
self.auth_protocol = None
self.auth_timeout = None
self.cert_file = None
self.key_file = None
self.service_host = None
self.service_port = None
self.service_protocol = None
self.service_url = None
self.proxy_headers = None
self.start_response = None
self.app = None
self.conf = None
self.env = None
self.delay_auth_decision = None
self.expanded = None
self.claims = None
self._init_protocol_common(app, conf) # Applies to all protocols
self._init_protocol(app, conf) # Specific to this protocol
# pylint: disable=R0912
def __call__(self, env, start_response):
""" Handle incoming request. Authenticate. And send downstream. """
logger.debug("entering AuthProtocol.__call__")
logger.debug("start response:%s", start_response)
self.start_response = start_response
self.env = env
#Prep headers to forward request to local or remote downstream service
self.proxy_headers = env.copy()
for header in self.proxy_headers.iterkeys():
if header[0:5] == 'HTTP_':
self.proxy_headers[header[5:]] = self.proxy_headers[header]
del self.proxy_headers[header]
#Look for authentication claims
logger.debug("Looking for authentication claims")
self.claims = self._get_claims(env)
if not self.claims:
#No claim(s) provided
logger.debug("No claims provided")
if self.delay_auth_decision:
#Configured to allow downstream service to make final decision.
#So mark status as Invalid and forward the request downstream
self._decorate_request("X_IDENTITY_STATUS", "Invalid")
else:
#Respond to client as appropriate for this auth protocol
return self._reject_request()
else:
# this request is presenting claims. Let's validate them
logger.debug("Claims found. Validating.")
valid = self._validate_claims(self.claims)
if not valid:
# Keystone rejected claim
if self.delay_auth_decision:
# Downstream service will receive call still and decide
self._decorate_request("X_IDENTITY_STATUS", "Invalid")
else:
#Respond to client as appropriate for this auth protocol
return self._reject_claims()
else:
self._decorate_request("X_IDENTITY_STATUS", "Confirmed")
#Collect information about valid claims
if valid:
logger.debug("Validation successful")
claims = self._expound_claims()
# Store authentication data
if claims:
# TODO(Ziad): add additional details we may need,
# like tenant and group info
self._decorate_request('X_AUTHORIZATION', "Proxy %s" %
claims['user'])
self._decorate_request('X_TENANT_ID',
claims['tenant']['id'],)
self._decorate_request('X_TENANT_NAME',
claims['tenant']['name'])
self._decorate_request('X_USER_ID',
claims['user']['id'])
self._decorate_request('X_USER_NAME',
claims['user']['name'])
self._decorate_request('X_TENANT', claims['tenant']['id'])
self._decorate_request('X_USER', claims['user']['id'])
if 'group' in claims:
self._decorate_request('X_GROUP', claims['group'])
if 'roles' in claims and len(claims['roles']) > 0:
if claims['roles'] is not None:
roles = ''
for role in claims['roles']:
if len(roles) > 0:
roles += ','
roles += role
self._decorate_request('X_ROLE', roles)
# NOTE(todd): unused
self.expanded = True
logger.debug("About to forward request")
#Send request downstream
return self._forward_request()
# NOTE(salvatore-orlando): this function is now used again
def get_admin_auth_token(self, username, password):
"""
This function gets an admin auth token to be used by this service to
validate a user's token. Validate_token is a priviledged call so
it needs to be authenticated by a service that is calling it
"""
headers = {
"Content-type": "application/json",
"Accept": "application/json"}
params = {
"auth":
{
"passwordCredentials":
{
"username": username,
"password": password
}
}
}
if self.auth_protocol == "http":
conn = httplib.HTTPConnection(self.auth_host, self.auth_port)
else:
conn = httplib.HTTPSConnection(self.auth_host, self.auth_port,
cert_file=self.cert_file)
conn.request("POST", self._build_token_uri(), json.dumps(params), \
headers=headers)
response = conn.getresponse()
data = response.read()
return data
@staticmethod
def _get_claims(env):
"""Get claims from request"""
claims = env.get('HTTP_X_AUTH_TOKEN', env.get('HTTP_X_STORAGE_TOKEN'))
return claims
def _reject_request(self):
"""Redirect client to auth server"""
return HTTPUnauthorized("Authentication required",
[("WWW-Authenticate",
"Keystone uri='%s'" % self.auth_uri)])(self.env,
self.start_response)
def _reject_claims(self):
"""Client sent bad claims"""
return HTTPUnauthorized()(self.env, self.start_response)
def _validate_claims(self, claims, retry=False):
"""Validate claims, and provide identity information if applicable """
# Step 1: We need to auth with the keystone service, so get an
# admin token
# TODO(ziad): Need to properly implement this, where to store creds
# for now using token from ini
# NOTE(salvatore-orlando): Temporarily restoring auth token retrieval,
# with credentials in configuration file
if not self.admin_token:
auth = self.get_admin_auth_token(self.admin_user,
self.admin_password)
self.admin_token = json.loads(auth)["access"]["token"]["id"]
# Step 2: validate the user's token with the auth service
# since this is a priviledged op,m we need to auth ourselves
# by using an admin token
headers = {"Content-type": "application/json",
"Accept": "application/json",
"X-Auth-Token": self.admin_token}
conn = http_connect(self.auth_host, self.auth_port, 'GET',
self._build_token_uri(claims), headers=headers,
ssl=(self.auth_protocol == 'https'),
key_file=self.key_file, cert_file=self.cert_file,
timeout=self.auth_timeout)
resp = conn.getresponse()
# pylint: disable=E1103
conn.close()
if not str(resp.status).startswith('20'):
# Keystone rejected claim
# In case a 404 error it might just be that the token has expired
# Therefore try and get a new token
# of course assuming admin credentials have been specified
# Note(salvatore-orlando): the 404 here is not really
# what should be returned
if self.admin_user and self.admin_password and \
not retry and str(resp.status) == '404':
logger.warn("Unable to validate token." +
"Admin token possibly expired.")
self.admin_token = None
return self._validate_claims(claims, True)
return False
else:
#TODO(Ziad): there is an optimization we can do here. We have just
#received data from Keystone that we can use instead of making
#another call in _expound_claims
logger.info("Claims successfully validated")
return True
def _expound_claims(self):
# Valid token. Get user data and put it in to the call
# so the downstream service can use it
headers = {"Content-type": "application/json",
"Accept": "application/json",
"X-Auth-Token": self.admin_token}
conn = http_connect(self.auth_host, self.auth_port, 'GET',
self._build_token_uri(self.claims),
headers=headers,
ssl=(self.auth_protocol == 'https'),
key_file=self.key_file, cert_file=self.cert_file,
timeout=self.auth_timeout)
resp = conn.getresponse()
data = resp.read()
# pylint: disable=E1103
conn.close()
if not str(resp.status).startswith('20'):
raise LookupError('Unable to locate claims: %s' % resp.status)
token_info = json.loads(data)
#TODO(Ziad): make this more robust
#first_group = token_info['auth']['user']['groups']['group'][0]
roles = []
rolegrants = token_info["access"]["user"]["roles"]
if rolegrants is not None:
roles = [rolegrant["id"] for rolegrant in rolegrants]
token_info = json.loads(data)
roles = [role['name'] for role in token_info[
"access"]["user"]["roles"]]
# in diablo, there were two ways to get tenant data
tenant = token_info['access']['token'].get('tenant')
if tenant:
# post diablo
tenant_id = tenant['id']
tenant_name = tenant['name']
else:
# diablo only
tenant_id = token_info['access']['user'].get('tenantId')
tenant_name = token_info['access']['user'].get('tenantName')
verified_claims = {
'user': {
'id': token_info['access']['user']['id'],
'name': token_info['access']['user']['name'],
},
'tenant': {
'id': tenant_id,
'name': tenant_name
},
'roles': roles}
return verified_claims
def _decorate_request(self, index, value):
"""Add headers to request"""
self.proxy_headers[index] = value
self.env["HTTP_%s" % index] = value
def _forward_request(self):
"""Token/Auth processed & claims added to headers"""
#now decide how to pass on the call
if self.app:
# Pass to downstream WSGI component
return self.app(self.env, self.start_response)
#.custom_start_response)
else:
# We are forwarding to a remote service (no downstream WSGI app)
req = Request(self.proxy_headers)
# pylint: disable=E1101
parsed = urlparse(req.url)
conn = http_connect(self.service_host,
self.service_port,
req.method,
parsed.path,
self.proxy_headers,
ssl=(self.service_protocol == 'https'))
resp = conn.getresponse()
data = resp.read()
return Response(status=resp.status, body=data)(self.proxy_headers,
self.start_response)
def filter_factory(global_conf, **local_conf):
"""Returns a WSGI filter app for use with paste.deploy."""
conf = global_conf.copy()
conf.update(local_conf)
def auth_filter(application):
return AuthProtocol(application, conf)
return auth_filter
def app_factory(global_conf, **local_conf):
conf = global_conf.copy()
conf.update(local_conf)
return AuthProtocol(None, conf)
| StarcoderdataPython |
9680134 | <gh_stars>0
from typing import Union, Optional
__all__ = ("Surah", "Ayah", "Search", "ApiError", "ContentTypeError", "NumberError", "WrongLang")
class ApiError(Exception):
def __init__(self, status: int, msg: str) -> None:
super().__init__(f"Api has an error, return code: {status}.\n{msg}")
class ContentTypeError(Exception):
def __init__(self, class_:str, mode:str, first_query: Optional[Union[str, int]], second_query: Optional[Union[str, int]]) -> None:
super().__init__(f"Attempt to decode JSON with unexpected mimetype: Return code: None\nlink: http://api.alquran.cloud/v1/{mode}/{first_query}:{second_query}/en.asad" if class_ == "Ayah" else f"Attempt to decode JSON with unexpected mimetype: Return code: None\nlink: http://api.alquran.cloud/v1/{mode}/{first_query}/{second_query}/en.asad")
class NumberError(Exception):
def __init__(self, mode:int, obj:str, first_query: int, second_query: Optional[Union[str, int]]) -> None:
super().__init__(f"{obj} must above {first_query}" if mode == 0 else f"{obj} must be between {first_query} to {second_query}")
class WrongLang(Exception):
def __init__(self, lang:str) -> None:
super().__init__(f"The lang '{lang}' is not supported, it only support arabic(ar) and english(eng)")
class Surah:
def __init__(self, surah: int = None):
self.surah = surah
@classmethod
def request(
cls,
surah: int = None
):
try:
import requests
except ImportError:
raise ImportError(
"Please Install the requests module if you want to make a sync request."
)
self = cls(surah)
self.request = requests.get(
f"https://api.alquran.cloud/v1/surah/{surah}/en.asad"
).json()
self.data = self.request["data"]
self.ayah = self.data["ayahs"]
if self.request["code"] > 202:
raise ApiError(
code=self.request['code'], msg=self.request['data']
)
return self
@classmethod
async def async_request(
cls,
surah: int = None, *,
loop=None
):
try:
import aiohttp
except ImportError:
raise ImportError(
"Please Install the aiohttp module if you want to make an async request."
)
self = cls(surah)
async with aiohttp.ClientSession(loop=loop) as session:
async with session.get(
f"https://api.alquran.cloud/v1/surah/{surah}/en.asad"
) as resp:
self.request = await resp.json()
self.data = self.request["data"]
self.ayah = self.data["ayahs"]
if self.request["code"] > 202:
raise ApiError(
code=self.request['code'], msg=self.request['data']
)
return self
@property
def api_code(self):
return self.request["code"]
@property
def api_status(self):
return self.request["status"]
def name(self, lang: str = "ar"):
if lang == "ar" or lang.lower() == "arabic":
return self.data["name"]
elif lang == "eng" or lang.lower() == "english":
return self.data["englishName"]
else:
raise WrongLang(lang=lang)
@property
def name_mean(self):
return self.data["englishNameTranslation"]
@property
def revelation_type(self):
return self.data["revelationType"]
@property
def number_ayahs(self):
return self.data["numberOfAyahs"]
def request_ayahs(self):
data = []
for number in range(Surah.request(self.surah).number_ayahs):
data.append(f"{self.ayah[number]['text']}")
return data
async def ayah_info(
self,
ayah: int = 1, *,
text: bool = True,
number_in_quran: bool = True,
number_in_surah: bool = True,
juz: bool = True,
manzil: bool = True,
page: bool = True,
ruku: bool = True,
hizbquarter: bool = True,
sajda : bool = True
):
if ayah <= 0:
raise NumberError(mode=0, obj="ayah", first_query=1, second_query=None)
elif ayah > int(Surah.request(self.surah).number_ayahs):
raise NumberError(mode=1, obj="ayah", first_query=1, second_query=Surah.request(self.surah).number_ayahs)
else:
ayah -= 1
data = {
"text": self.ayah[ayah]["text"] if text else None,
"number in quran": self.ayah[ayah]["number"]
if number_in_quran
else None,
"number in surah": self.ayah[ayah]["numberInSurah"]
if number_in_surah
else None,
"juz": self.ayah[ayah]["juz"] if juz else None,
"manzil": self.ayah[ayah]["manzil"] if manzil else None,
"page": self.ayah[ayah]["page"] if text else None,
"ruku": self.ayah[ayah]["ruku"]
if number_in_quran
else None,
"hizbquarter": self.ayah[ayah]["hizbQuarter"]
if number_in_surah
else None,
"sajda": self.ayah[ayah]["sajda"] if juz else None
}
return data
class Ayah:
def __init__(self, surah:int=None, *, ayah:int=None):
self.surah = surah
self.ayah = ayah
@classmethod
def request(
cls,
surah:int=None, *,
ayah:int=None,
loop=None
):
try:
import requests
except ImportError:
raise ImportError(
"Please Install the requests module if you want to make a sync request."
)
self = cls(surah, ayah=ayah)
self.request = requests.get(
f"http://api.alquran.cloud/v1/ayah/{surah}:{ayah}/en.asad"
).json()
self.data = self.request["data"]
if self.request["code"] > 202:
raise ApiError(
code=self.request['code'], msg=self.request['data']
)
return self
@classmethod
async def async_request(
cls,
surah:int=None, *,
ayah:int=None,
loop=None
):
try:
import aiohttp
except ImportError:
raise ImportError(
"Please Install the aiohttp module if you want to make an async request."
)
self = cls(surah, ayah=ayah)
try:
async with aiohttp.ClientSession(loop=loop) as session:
async with session.get(
f"http://api.alquran.cloud/v1/ayah/{surah}:{ayah}/en.asad"
) as resp:
self.request = await resp.json()
except aiohttp.client_exceptions.ContentTypeError:
raise ContentTypeError(class_="Ayah", mode='ayah', first_query=surah, second_query=surah)
self.data = self.request["data"]
if self.request["code"] > 202:
raise ApiError(
code=self.request['code'], msg=self.request['data']
)
return self
@property
def api_code(self):
return self.request['code']
@property
def api_status(self):
return self.request['status']
class Search:
def __init__(self, mention:str=None, *, surah:Optional[Union[str, int]]=None, request: int=None):
self.mention = mention
self.surah = surah
self.req = request
@classmethod
async def async_request(
cls,
mention:str=None, *,
surah:Optional[Union[str, int]]=None,
request:int=None,
loop=None
):
try:
import aiohttp
except ImportError:
raise ImportError(
"Please Install the aiohttp module if you want to make an async request."
)
self = cls(mention, surah=surah, request=request)
try:
async with aiohttp.ClientSession(loop=loop) as session:
async with session.get(
f"http://api.alquran.cloud/v1/search/{mention}/{surah}/en.pickthall"
) as resp:
self.request = await resp.json()
except aiohttp.client_exceptions.ContentTypeError:
raise ContentTypeError(class_="Search", mode='search', first_query=mention, second_query=surah)
self.data = self.request["data"]
self.matches = self.data['matches']
if self.request["code"] > 202:
raise ApiError(
code=self.request['code'], msg=self.request['data']
)
return self
@classmethod
def request(
cls,
mention: str=None, *,
surah: Optional[Union[str, int]]=None,
request: int = None
):
try:
import requests
except ImportError:
raise ImportError(
"Please Install the requests module if you want to make a sync request."
)
self = cls(mention, surah=surah, request=request)
self.request = requests.get(
f"http://api.alquran.cloud/v1/search/{mention}/{surah}/en.pickthall"
).json()
self.data = self.request["data"]
self.matches = self.data['matches']
if self.request["code"] > 202:
raise ApiError(
code=self.request['code'], msg=self.request['data']
)
return self
@property
def count(self):
return self.data['count']
@property
def api_code(self):
return self.request['code']
@property
def api_status(self):
return self.request['status']
def find(self):
data=[]
if self.req == None:
for num in range(self.data['count']):
data.append(self.matches[num]['text'])
return data
else:
for num in range(self.req):
data.append(self.matches[num]['text'])
return data
| StarcoderdataPython |
3326410 | import argparse
import pathlib
import sys
from typing import List
from typing import Optional
import black
from lib3to6 import checker_base as cb
from lib3to6 import checkers
from lib3to6 import common
from lib3to6 import fixer_base as fb
from lib3to6 import fixers
from lib3to6 import transpile
BLACK_TARGET_VERSIONS = {vs.name: vs for vs in black.TargetVersion}
def format_code(contents: str, black_mode: black.FileMode) -> str:
try:
formatted_contents: str = black.format_str(contents, mode=black_mode)
return formatted_contents
except Exception:
# We simply ignore any exceptions raised by black and return the
# unformatted contents
return contents
def untype_source(
files: List[pathlib.Path],
pkg_path: pathlib.Path,
target_version: str,
checkers_list: List[str],
fixers_list: List[str],
skip_black_formatting: bool = False,
) -> int:
exitcode = 0
for src_file in files:
relative = src_file.relative_to(pkg_path)
destdir = pkg_path / "downgraded"
destdir.mkdir(parents=True, exist_ok=True)
destdir.joinpath("__init__.py").touch()
dest = destdir / relative
dest.parent.mkdir(parents=True, exist_ok=True)
if dest.exists():
prev_contents = dest.read_text()
else:
prev_contents = None
ctx = common.init_build_context(
checkers=",".join(checkers_list),
fixers=",".join(fixers_list),
target_version=target_version,
filepath=src_file.name,
)
source_text = src_file.read_text()
try:
fixed_source_text = transpile.transpile_module(ctx, source_text)
except common.CheckError as err:
loc = str(src_file)
if err.lineno >= 0:
loc += "@" + str(err.lineno)
err.args = (loc + " - " + err.args[0],) + err.args[1:]
raise
if skip_black_formatting is False:
black_target_version = BLACK_TARGET_VERSIONS.get(
"PY{}".format(target_version.replace(".", ""))
)
if black_target_version:
target_versions = [black_target_version]
else:
target_versions = None
black_mode = black.FileMode(target_versions=target_versions, string_normalization=False)
fixed_source_text = format_code(fixed_source_text, black_mode)
if not prev_contents or prev_contents != fixed_source_text:
print(f"Untyping {src_file} -> {dest}")
dest.write_text(fixed_source_text)
exitcode = 1
return exitcode
def main(argv: Optional[List[str]] = None) -> None:
if argv is None:
argv = sys.argv[1:]
checkers_list = list(transpile.get_available_classes(checkers, cb.CheckerBase))
fixers_list = list(transpile.get_available_classes(fixers, fb.FixerBase))
parser = argparse.ArgumentParser(prog=__name__)
parser.add_argument(
"--target-version",
default="3.5",
help="The target version to translate the source code into.",
)
parser.add_argument(
"--pkg-path",
type=pathlib.Path,
required=True,
help="Path to package. For example, `--pkg-source=src/mypackage`",
)
parser.add_argument("--list-checkers", action="store_true")
parser.add_argument(
"--sc",
"--skip-checker",
dest="skip_checkers",
action="append",
default=[],
help="List checkers to skip. Check all of them by passing --list-checkers",
)
parser.add_argument("--list-fixers", action="store_true")
parser.add_argument(
"--sf",
"--skip-fixer",
dest="skip_fixers",
action="append",
default=[],
help="List fixers to skip. Check all of them by passing --list-fixers",
)
parser.add_argument(
"--no-black",
action="store_true",
default=False,
help="Don't format the 'downgraded' code with Black.",
)
parser.add_argument(
"files",
nargs="*",
type=pathlib.Path,
default=[],
help="Space separated list of files.",
)
options = parser.parse_args(argv)
if options.list_fixers:
parser.exit(
status=0,
message="Fixers List:\n{}\n".format("\n".join([f" - {item}" for item in fixers_list])),
)
if options.list_checkers:
parser.exit(
status=0,
message="Checkers List:\n{}\n".format(
"\n".join([f" - {item}" for item in checkers_list])
),
)
for checker in options.skip_checkers:
if checker not in checkers_list:
parser.exit(
status=1,
message=f"{checker} is not a valid checker. Pass --list-checkers for the full allowed list",
)
for fixer in options.skip_fixers:
if fixer not in fixers_list:
parser.exit(
status=1,
message=f"{fixer} is not a valid fixer. Pass --list-fixers for the full allowed list",
)
if not options.files:
parser.exit(status=1, message="No files were passed")
exitcode = untype_source(
files=options.files,
pkg_path=options.pkg_path,
target_version=options.target_version,
checkers_list=[ck for ck in checkers_list if ck not in options.skip_checkers],
fixers_list=[fx for fx in fixers_list if fx not in options.skip_fixers],
skip_black_formatting=options.no_black is True,
)
parser.exit(status=exitcode)
if __name__ == "__main__":
main(sys.argv[1:])
| StarcoderdataPython |
49230 | <gh_stars>1-10
from .heroku_client import HerokuClient
__version__ = "1.2.0"
__all__ = ["HerokuClient"]
| StarcoderdataPython |
11212010 | '''Version 0.32'''
import json
import csv
import glob
import sys
import importlib
from pprint import pprint
from collections import Counter
# init is an optional flag to indicate you're starting
# over; old autograder results are written over and column
# headers are printed to the file.
team = "0"
init = False
for arg in sys.argv:
if arg == "init":
init = True
else:
team = arg
api = importlib.import_module("Team%s.recipe_api" % team)
def check_tools(answer, stud):
score = 0
expans = dict([[a, a.split()] for a in answer])
for s in stud:
if s in answer:
print s
score += 1
answer.remove(s)
stud.remove(s)
expans = dict([[a, {'words': a.split(), 'matches': Counter()}] for a in answer])
expstud = dict([[a, a.split()] for a in stud])
for s in expstud:
tmpscore = -1
for word in expans:
complement = set(expstud[s]) ^ set(expans[word]['words'])
intersection = set(expstud[s]) & set(expans[word]['words'])
newscore = float(len(intersection))/(len(intersection)+len(complement))
print "%s, %s, %d, %d, %f" % (s, word, len(intersection), len(complement), newscore)
if newscore > tmpscore:
tmpscore = newscore
tmpmatch = word
if tmpscore > 0:
expans[tmpmatch]['matches'][s] = tmpscore
stud.remove(s)
for word in expans:
match = expans[word]['matches'].most_common(1)
if len(match) > 0:
score += expans[word]['matches'].most_common(1)[0][1]
return score
def check_ingredients(answer, stud):
scores = []
score = 0
for x in range(min([len(answer), len(stud)])):
for ind in ['name', 'measurement', 'quantity', 'descriptor', 'preparation', 'prep-description']:
if ind in stud[x]:
print "\nYour answer: %s"%str(stud[x][ind])
print "Valid answers: %s"%str(answer[x][ind])
if ind == 'quantity':
flag = False
for val in answer[x][ind]:
if type(stud[x][ind]) is str:
if val == stud[x][ind]:
flag = True
elif val == stud[x][ind]:
flag = True
elif float('%.2f'%stud[x][ind]) == val:
flag = True
if flag:
score += 1
else:
print "Match!"
elif stud[x][ind] in answer[x][ind]:
score += 1
scores.append(min([score, answer[x]['max']]))
print "Score: %s\n---"%str(scores[-1])
score = 0
return sum(scores)
def get_file(fn):
with open(fn, 'r') as f:
answer = json.load(f)
return answer
def main(team, init=False):
"""Pass 'init' as a command line variable if this is your
first time running the program and you want it to print the
column headers to the file."""
keys = ['ingredients', 'primary cooking method', 'cooking methods', 'cooking tools']
if init:
with open('parsegrades.csv', 'wb') as csvfile:
csvwriter = csv.writer(csvfile, delimiter='\t')
csvwriter.writerow(keys)
scores = Counter(dict(zip(keys, [0]*len(keys))))
cnt = 1
for answer in (get_file(fn) for fn in glob.iglob('Recipes/*.json')):
stud = getattr(api, "autograder")(answer['url'])
temp = Counter(dict(zip(keys, [0]*len(keys))))
if type(stud) == str:
stud = json.loads(stud)
if type(stud) == dict:
temp['cooking tools'] = min([check_tools(answer['cooking tools'], stud['cooking tools']), answer['max']['cooking tools']])/float(answer['max']['cooking tools'])
temp['cooking methods'] = min([check_tools(answer['cooking methods'], stud['cooking methods']), answer['max']['cooking methods']])/float(answer['max']['cooking methods'])
if stud['primary cooking method'] == answer['primary cooking method']:
temp['primary cooking method'] = 1
stud = stud['ingredients']
temp['ingredients'] = check_ingredients(answer['ingredients'], stud)/float(answer['max']['ingredients'])
scores += temp
print "%s\t%s\t%s\t%s\t%s" % ("Recipe", 'Ingredients', 'Primary Method', 'Methods', 'Tools')
print "Recipe %d:\t%.3f\t%d\t%.3f\t%.3f" % (cnt, temp['ingredients'], temp['primary cooking method'], temp['cooking methods'], temp['cooking tools'])
cnt += 1
else:
print "student answer formatting error"
row = ["Team %s" % team]
row.extend([scores[k] for k in keys])
with open('parsegrades.csv', 'ab') as csvfile:
csvwriter = csv.writer(csvfile, delimiter='\t')
csvwriter.writerow(row)
if __name__ == '__main__':
main(team, init)
| StarcoderdataPython |
6600013 | import os
import time
import yaml
import datetime
import linecache
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from pymatgen import MPRester
from pymatgen.io.cif import CifWriter
from diffpy.Structure import loadStructure
from diffpy.srreal.structureadapter import nosymmetry
from diffpy.srreal.pdfcalculator import DebyePDFCalculator
from diffpy.srreal.pdfcalculator import PDFCalculator
#from pdf_lib.glbl import glbl
#from glbl import glbl
class PdfLibBuild(object):
''' a class to look up cif data and calculate pdf automatically
Parameters:
-----------
API_key : str
uer-id-like generated from material project
crystal_system : str
name of crystal system. Capitalized, like 'CUBIC'
lib_dir : str
directory where you wants to store cif and pdf_data
'''
def __init__(self, API_key, lib_dir = None):
# set up API_key
self.API_key = API_key
test_m = MPRester(API_key)
print('You are using %s as API key' % API_key)
# create working dir
if not lib_dir:
lib_dir = time.strftime('PDF_Lib_%Y-%m-%d')
working_dir = os.path.expanduser('~/' + lib_dir)
self.working_dir = working_dir
self._makedirs(working_dir)
print('Lib dir %s has been built' % working_dir)
# output lib directory
self.data_dir = None
self.cif_dir = None # overwrite it later
self.output_dir = None # overwrite it later
self.crystal_system = None # overwrite it later
def get_symbol_list(self, crystal_system):
''' Get short names in one crystal system
'''
ref = "Space_group_ref.txt"
ind_list = []
ref_path = os.path.join(os.path.dirname(__file__), ref) # relative path
if os.path.isfile(ref_path):
print('open {} as reference'.format(ref_path))
# FIXME - temporily logic. Refine it later with loding Space_group_ref.txt
with open(ref_path, 'r') as f:
for ind, line in enumerate(f):
if crystal_system in line:
ind_list.append(ind-2)
# print(ind-2) debug line
symb_list = []
for ind in ind_list:
read = linecache.getline(ref_path, ind)
position_ind = [ ind for ind, x in enumerate(read) if x== '"']
#print('position_ind = {}'.format(position_ind)) debug line
if position_ind:
head = min(position_ind)
tail = max(position_ind)
symb = read[head+1:tail]
#print(symb) #degug line
symb_list.append(symb)
else:
pass
return symb_list
def _makedirs(self, path_name):
'''function to support python2 stupid logic'''
if os.path.isdir(path_name):
pass
else:
os.makedirs(path_name)
def cif_lib_build(self, crystal_system, size_limit=None):
''' function to build cif and pdf library based on space group symbol
Parameters
----------
crystal_system: str
name of crystal system. It capitalized, like CUBIC.
space group symbol will be generated by get_symbol_list method
size_list : int
optional. Uppder limit of data pulled out per symbol
'''
self.crystal_system = crystal_system
space_group_symbol = self.get_symbol_list(crystal_system)
if isinstance(space_group_symbol, list):
space_group_symbol_set = space_group_symbol
else:
space_group_symbol_set = list(spac_group_symbol)
## changing dir
data_dir = os.path.join(self.working_dir, crystal_system)
self.data_dir = data_dir
self._makedirs(data_dir)
os.chdir(data_dir)
if os.getcwd() == data_dir:
print('Library will be built at %s' % data_dir)
else:
e = 'Werid, return'
raise RuntimeError(e)
# summary lists
missed_list = [] # reference
m_id_list = [] # reference for searchs have been done in the past
time_info = time.strftime('%Y-%m-%d')
# create dirs, cif and calculated dir
cif_dir = os.path.join(data_dir, 'cif_data')
self._makedirs(cif_dir)
self.cif_dir = cif_dir
# looping
for space_group_symbol in space_group_symbol_set:
print('Building library with space_group symbol: {}'.format(space_group_symbol))
## search query
m = MPRester(self.API_key)
search = m.query(criteria = {"spacegroup.symbol": space_group_symbol},
properties = ["material_id"])
if search:
## crazy looping
if size_limit:
dim = 400 # 400 data sets per symbol
else:
dim = len(search)
print('Pull out %s data sets' % dim)
print('Now, starts to save cif and compute pdf...')
for i in range(dim):
# part 1: grab cif files from data base
m_id = search[i]['material_id']
m_id_list.append(m_id)
m_struc = m.get_structure_by_material_id(m_id)
m_formula = m_struc.formula
m_name = m_formula.replace(' ', '') # material name
cif_w = CifWriter(m_struc)
cif_name = '{}_{}.cif'.format(space_group_symbol, m_name)
cif_w_name = os.path.join(cif_dir, cif_name)
if os.path.isfile(cif_w_name):
print('already have {}, skip'.format(cif_name))
pass # skip files already exist
else:
cif_w.write_file(cif_w_name)
print('{} has been saved'.format(cif_name))
else:
print('Hmm, no reasult. Something wrong')
missed_list.append(space_group_symbol)
pass
m_id_list_name = '{}_{}_material_id.txt'.format(crystal_system, time_info)
m_id_list_w_name = os.path.join(data_dir, m_id_list_name)
np.savetxt(m_id_list_w_name, m_id_list)
print('''SUMMARY: for {} cystsal sytem,
Symbols {} can't be found from data base'''.format(crystal_system, missed_list))
return cif_dir
def gr_lib_build(self, cif_lib_path):
''' method to calculate G(r) based on path of cif library located at.
Paramters of G(r) calculation are set via glbl.<attribute>, one can tune it based on purpose of building library.
After entire method, text file contains all G(r), space_group_symbol and material name will be saved respectively
Parameters
----------
cif_lib_path : str
path to lib of cif files
'''
el_list = [] # data column
space_group_symbol_list = [] # reference for search have been done in the past
# set up calculation environment
#dbc = DebyePDFCalculator()
pdf = PDFCalculator()
pdf.rstep = glbl.rstep
cfg = {'qmin': glbl.q_min, 'qmax':glbl.q_max, 'rmin':glbl.r_min, 'rmax': glbl.r_max}
Bisoequiv = glbl.Bisoequiv #FIXME: current value = 0.5, need to figure out the most suitable value
print('====Parameter used in this PDF calculator is: {}===='.format(cfg))
print('====Bisoequiv used in this PDF calculator is: {}===='.format(Bisoequiv))
# step 1: list cif dir
output_dir = os.path.join(self.data_dir, 'lib_data')
self._makedirs(output_dir)
self.output_dir = output_dir
cif_f_list = [ f for f in os.listdir(self.cif_dir)]
# hidden step as numpy can't take an empty array and stack
struc = loadStructure(os.path.join(self.cif_dir, cif_f_list[0]))
struc.Bisoequiv = Bisoequiv
(r,g) = pdf(nosymmetry(struc), **cfg)
r_grid = np.array(r) # data x-axis
gr_list = np.empty_like(np.array(g)) # data y-axis
for cif in cif_f_list:
# part 2: calculate PDF with diffpy
struc = loadStructure(os.path.join(self.cif_dir, cif))
struc.Bisoequiv = Bisoequiv
#(r,g) = pdf(nosymmetry(struc), **cfg)
(r,g) = pdf(struc, **cfg)
print('Finished calculation of G(r) on {}'.format(cif))
sep = cif.index('_')
space_group_symbol = cif[:sep]
m_name = cif[sep+1:]
# part 3: save data
#if not gr_list.any():
#gr_list = np.append(gr_list, g)
gr_list = np.vstack((gr_list,g))
space_group_symbol_list.append(space_group_symbol)
el_list.append(m_name)
#print('size of gr_list = {}'.format(np.shape(gr_list)))
#space_group_symbol_list = np.concatenate([space_group_symbol_list, space_group_symbol], axis=0)
#el_list = np.concatenate([el_list, m_name], axis=0)
time_info = time.strftime('%Y-%m-%d')
gr_list_name = '{}_{}_Gr'.format(self.crystal_system, time_info)
gr_list_w_name = os.path.join(output_dir, gr_list_name)
print('Saving {}'.format(gr_list_w_name))
np.save(gr_list_w_name, gr_list)
r_grid_name = '{}_{}_rgrid'.format(self.crystal_system, time_info)
r_grid_w_name = os.path.join(output_dir, r_grid_name)
np.save(r_grid_w_name, r)
space_group_symbol_list_name = '{}_{}_SpaceGroupSymbol'.format(self.crystal_system, time_info)
space_group_symbol_list_w_name= os.path.join(output_dir, space_group_symbol_list_name)
np.save(space_group_symbol_list_w_name, space_group_symbol_list) #fmt="%s")
el_list_name = '{}_{}_Element'.format(self.crystal_system, time_info)
el_list_w_name = os.path.join(output_dir, el_list_name)
np.save(el_list_w_name, el_list) #fmt="%s")
print('''====SUMMARY====:
for {} cystsal sytem,
Number of cif pulled out and G(r) calculated is {}'''.format(self.crystal_system, np.shape(gr_list)))
return gr_list
| StarcoderdataPython |
88720 | <reponame>otovo/python-sanity-html<gh_stars>10-100
from __future__ import annotations
from dataclasses import dataclass, field
from typing import TYPE_CHECKING, cast
from portabletext_html.utils import get_default_marker_definitions
if TYPE_CHECKING:
from typing import Literal, Optional, Tuple, Type, Union
from portabletext_html.marker_definitions import MarkerDefinition
@dataclass(frozen=True)
class Span:
"""Class representation of a Portable Text span.
A span is the standard way to express inline text within a block.
"""
_type: Literal['span']
text: str
_key: Optional[str] = None
marks: list[str] = field(default_factory=list) # keys that correspond with block.mark_definitions
style: Literal['normal'] = 'normal'
@dataclass
class Block:
"""Class representation of a Portable Text block.
A block is what's typically recognized as a section of a text, e.g. a paragraph or a heading.
listItem and markDefs are camelCased to support dictionary unpacking.
"""
_type: Literal['block']
_key: Optional[str] = None
style: Literal['h1', 'h2', 'h3', 'h4', 'h5', 'h6', 'blockquote', 'normal'] = 'normal'
level: Optional[int] = None
listItem: Optional[Literal['bullet', 'number', 'square']] = None
children: list[dict] = field(default_factory=list)
markDefs: list[dict] = field(default_factory=list)
marker_definitions: dict[str, Type[MarkerDefinition]] = field(default_factory=dict)
marker_frequencies: dict[str, int] = field(init=False)
def __post_init__(self) -> None:
"""
Add custom fields after init.
To make handling of span `marks` simpler, we define marker_definitions as a dict, from which
we can directly look up both annotation marks or decorator marks.
"""
marker_definitions = get_default_marker_definitions(self.markDefs)
marker_definitions.update(self.marker_definitions)
self.marker_definitions = marker_definitions
self.marker_frequencies = self._compute_marker_frequencies()
def _compute_marker_frequencies(self) -> dict[str, int]:
counts: dict[str, int] = {}
for child in self.children:
for mark in child.get('marks', []):
if mark in counts:
counts[mark] += 1
else:
counts[mark] = 0
return counts
def get_node_siblings(self, node: Union[dict, Span]) -> Tuple[Optional[dict], Optional[dict]]:
"""Return the sibling nodes (prev, next) to the given node."""
if not self.children:
return None, None
try:
if type(node) == dict:
node = cast(dict, node)
node_idx = self.children.index(node)
elif type(node) == Span:
node = cast(Span, node)
for index, item in enumerate(self.children):
if 'text' in item and node.text == item['text']:
# Is it possible to handle several identical texts?
node_idx = index
break
else:
raise ValueError(f'Expected dict or Span but received {type(node)}')
except ValueError:
return None, None
prev_node = None
next_node = None
if node_idx != 0:
prev_node = self.children[node_idx - 1]
if node_idx != len(self.children) - 1:
next_node = self.children[node_idx + 1]
return prev_node, next_node
| StarcoderdataPython |
8005477 | <gh_stars>10-100
__author__ = 'shukkkur'
'''
https://codeforces.com/problemset/problem/148/A
'''
k = int(input())
l = int(input())
m = int(input())
n = int(input())
d = int(input())
healthy = 0
for i in range(1, d + 1):
if i % k != 0 and i % l != 0 and i % m != 0 and i % n != 0:
healthy += 1
print(d - healthy)
| StarcoderdataPython |
100745 | import json
import logging
import shutil
import requests
from spreads.vendor.pathlib import Path
from spreadsplug.web import task_queue
from util import find_stick, mount_stick
from persistence import get_workflow, save_workflow
logger = logging.getLogger('spreadsplug.web.tasks')
@task_queue.task()
def transfer_to_stick(workflow_id):
stick = find_stick()
workflow = get_workflow(workflow_id)
with mount_stick(stick) as p:
workflow.step = 'transfer'
workflow.step_done = False
# Filter out problematic characters
clean_name = (workflow.path.name.replace(':', '_')
.replace('/', '_'))
target_path = Path(p)/clean_name
if target_path.exists():
shutil.rmtree(unicode(target_path))
try:
shutil.copytree(unicode(workflow.path), unicode(target_path))
except shutil.Error as e:
# Error 38 means that some permissions could not be copied, this is
# expected behaviour for filesystems like FAT32 or exFAT, so we
# silently ignore it here, since the actual data will have been
# copied nevertheless.
if any("[Errno 38]" not in exc for src, dst, exc in e[0]):
raise e
workflow.step_done = True
| StarcoderdataPython |
1900624 | #
# This file is subject to the terms and conditions defined in the
# file 'LICENSE', which is part of this source code package.
#
# Copyright (c) 2018 <NAME> - All Rights Reserved.
#
import os
import sqlite3
from salty_orm.db.base_provider import BaseDBConnection, NotConnectedError, ConnectionFailedError, \
ExecStatementFailedError, InvalidStatementError
def dict_factory(cursor, row):
"""
Use for the sqlite connection row_factory function so we can lookup row data by field name.
"""
d = {}
for idx, col in enumerate(cursor.description):
d[col[0]] = row[idx]
return d
class SqliteDBConnection(BaseDBConnection):
"""
Used by the system service code base to connect to the sandtrap.db
"""
_db_path = None # Path to sqlite3 database
provider = 'sqlite3'
def __del__(self):
self.db_close()
def db_connect(self, alt_db_path=None) -> bool:
"""
Connect to a local sqlite3 database
:param alt_db_path: Alternate database path to use besides hardcoded path
:return: True if connected otherwise False
"""
db_path = self._db_path
if alt_db_path:
db_path = alt_db_path
if os.path.exists(db_path):
self._db_path = db_path
else:
raise FileNotFoundError('database path not found ({0})'.format(db_path))
try:
self._handle = sqlite3.connect(db_path)
self._handle.row_factory = dict_factory
self._connected = True
return True
except Exception as e:
raise NotConnectedError("connection attempt to database failed")
def db_close(self):
"""
Disconnect from the local sqlite3 database if we are connected
"""
if self._connected and self._handle:
self._handle.close()
self._connected = False
self._handle = None
def db_connected(self) -> bool:
"""
Return the connection state
:return: True if connected otherwise False
"""
return self._connected
def db_test_connection(self) -> bool:
"""
Test the connection for connection errors with a simple Select statement
:return: True if connection is good otherwise False, exception message
"""
if self.db_connected() is False:
raise NotConnectedError("not connected to a database")
try:
sql = 'SELECT * FROM sqlite_master LIMIT 1;'
cursor = self._handle.cursor()
cursor.execute(sql)
cursor.close()
except Exception as e:
raise ConnectionFailedError(e)
return True
def db_exec(self, stmt: str, args: dict=None) -> bool:
if self.db_connected() is False:
raise NotConnectedError("not connected to a database")
try:
cursor = self._handle.cursor()
cursor.execute(stmt, tuple(args.values()))
cursor.close()
self._handle.commit()
except Exception as e:
raise ExecStatementFailedError(e)
return True
def db_commit(self) -> bool:
"""
Call database commit
"""
if self.db_connected() is False:
raise NotConnectedError("not connected to a database")
try:
self._handle.commit()
except Exception as e:
raise ExecStatementFailedError(e)
return True
def db_exec_stmt(self, stmt: str, args: dict=None) -> dict:
"""
Execute a select statement
:param stmt: sql statement
:param args: argument list
:return: sqlite cursor or none
"""
if self.db_connected() is False:
raise NotConnectedError("not connected to a database")
if not stmt:
raise InvalidStatementError('sql statement is missing')
try:
cursor = self._handle.cursor()
cursor.execute(stmt, tuple(args.values()))
data = cursor.fetchall()
cursor.close()
return data
except Exception as e:
raise ExecStatementFailedError(e)
def db_exec_select_by_id_all(self, table: str, pk: int) -> dict:
if self.db_connected() is False:
raise NotConnectedError("not connected to a database")
tmp_l = list()
tmp_l.append(pk)
return self.db_exec_select("SELECT * FROM {0} WHERE id = ?".format(table), tuple(tmp_l))
def db_exec_commit(self, stmt: str, args: dict=None) -> int:
"""
Execute sql statement and commit
"""
if self.db_connected() is False:
raise NotConnectedError("not connected to a database")
if not stmt:
raise InvalidStatementError('sql statement is missing')
try:
cursor = self._handle.cursor()
cursor.execute(stmt, tuple(args.values()))
lastrowid = cursor.lastrowid
self._handle.commit()
cursor.close()
return lastrowid if lastrowid else 1
except Exception as e:
raise ExecStatementFailedError(e)
def db_attach_database(self, alias: str, db_path: str=None) -> bool:
"""
Attach a database to the current connection with the specified alias
:param alias: alias of connected database
:param db_path: path to database
:return: True if attached, otherwise false
"""
if self.db_connected() is False:
raise NotConnectedError("not connected to a database")
if not db_path or not os.path.exists(db_path):
raise NotConnectedError("database path given to attach is invalid")
stmt = "ATTACH DATABASE ? AS ?"
args = { "db_path": db_path, "alias": db_path, }
return self.db_exec(stmt, args)
def db_detach_database(self, alias: str) -> bool:
"""
Detach an attached database on the current connection
:param alias: alias of connected database
:return: True if attached, otherwise false
"""
if self.db_connected() is False:
raise NotConnectedError("not connected to a database")
sql = 'DETACH DATABASE ?'
args = { "alias": alias, }
return self.db_exec(sql, args)
def db_get_table_record_count(self, table: str):
""" return the record count of a table """
if self.db_connected() is False:
raise NotConnectedError("not connected to a database")
data = self.db_exec_select("SELECT COUNT(1) AS count FROM {0}".format(table))
if data:
return int(data['count'])
return 0
def db_get_record_info(self, fields, table: str, pk: int):
""" get the id, created and modified fields of a table record """
tmp_l = list()
tmp_l.append(pk)
data = self.db_exec_select("SELECT {0} FROM {1} WHERE id = ?".format(fields, table), tuple(tmp_l))
if data:
return data
return None
def db_get_all_info(self, table: str, fields: str=None):
""" get the id, created and modified fields of a table """
tmp_l = list()
tmp_l.append(id)
if fields:
fields = '"id", "modified", {0}'.format(fields)
else:
fields = '"id", "modified"'
data = self.db_exec_select("SELECT {0} FROM {1} ORDER BY id".format(fields, table))
if data:
return data
return None
def db_get_record_all(self, table: str, pk: str):
""" get all the fields of a table record """
tmp_l = list()
tmp_l.append(pk)
data = self.db_exec_select("SELECT * FROM {0} WHERE id = ?".format(table), tuple(tmp_l))
if data:
return data
return None
| StarcoderdataPython |
8012505 | <filename>src/utils.py
import logging
import os
import re
import sqlite3
import subprocess
import time
import uuid
import ppadb.client
import ppadb.command.serial
import ppadb.device
REGEX_USER: re.Pattern = re.compile(r"UserInfo\{([0-9]*):([a-zA-Z ]*):.*")
REGEX_FOCUS: re.Pattern = re.compile(r"mCurrentFocus.*com.Psyonix.RL2D.*")
REGEX_V4L2: re.Pattern = re.compile(r"v4l2loopback ")
PSYONIX_PACKAGE_NAME: str = "com.Psyonix.RL2D"
PSYONIX_ACTIVITY_NAME: str = "com.epicgames.ue4.SplashActivity"
ADB_HOST: str = "127.0.0.1"
ADB_PORT: int = 5037
DELAY_DETECT: int = 1
DATA_FOLDER = "data/"
UserList = list[tuple[int, str]]
def tap(pos) -> None:
device.shell(f"input tap {pos[0]} {pos[1]}")
def motion(pos, type) -> None:
device.shell(f"input motionevent {type} {pos[0]} {pos[1]}")
def connect_adb() -> ppadb.device.Device:
client: ppadb.client.Client = ppadb.client.Client(host=ADB_HOST, port=ADB_PORT)
devices: list[ppadb.device.Device] = client.devices()
devices_names: list[ppadb.command.serial.Serial] = list(map(lambda d: d.serial, devices))
logging.debug(f"detected devices: {devices_names}")
global device
if len(devices) == 1:
device = devices[0]
elif len(devices) == 0:
print("No device connected")
exit(1)
else:
print("Select a device :")
raise NotImplementedError # TODO
logging.debug(f"selected device: {device.serial}")
return device
def get_users(device: ppadb.device.Device) -> UserList:
device_users: str = device.shell("pm list users")
users: UserList = REGEX_USER.findall(device_users)
logging.debug(f"detected users: {users}")
return users
def detect_game(device: ppadb.device.Device, users: UserList) -> UserList:
playable_users: UserList = list()
for (id, name) in users:
if "package:" in device.shell(f"pm path --user {id} {PSYONIX_PACKAGE_NAME}"):
playable_users.append((id, name))
logging.debug(f"playable users: {playable_users}")
return playable_users
def start_game(device: ppadb.device.Device, users: UserList) -> None:
if len(users) == 1:
(user_id, user_name) = users[0]
elif len(users) == 0:
print("No Playable users detected")
exit(1)
else:
print("Select a user to game with :")
raise NotImplementedError # TODO
logging.debug(f"selected user: {user_name}")
device.shell(f"am start --user {user_id} -n {PSYONIX_PACKAGE_NAME}/{PSYONIX_ACTIVITY_NAME}")
logging.debug("game activity started")
def is_zen_mode(device: ppadb.device.Device) -> bool:
return device.shell("settings get global zen_mode") == "1\n"
def is_focused(device: ppadb.device.Device) -> bool:
activity_dump = device.shell("dumpsys activity activities")
result = REGEX_FOCUS.search(activity_dump)
return result is not None
def is_v4l2_loaded() -> bool:
lsmod = str(subprocess.check_output("lsmod"))
result = REGEX_V4L2.search(lsmod)
return result is not None
def start_scrpy() -> None:
subprocess.Popen(
"scrcpy --v4l2-sink=/dev/video2 --disable-screensaver -N -m 512".split(),
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
)
def startup(need_focus=True) -> None:
subprocess.call(("sudo modprobe v4l2loopback").split())
device = connect_adb()
users = get_users(device)
users = detect_game(device, users)
start_game(device, users)
while need_focus and not (is_zen_mode(device) and is_focused(device) and is_v4l2_loaded()):
time.sleep(DELAY_DETECT)
time.sleep(DELAY_DETECT)
start_scrpy()
def screenshot(filename=None, folder=DATA_FOLDER) -> uuid.UUID:
if filename == None:
filename = uuid.uuid1()
subprocess.Popen(
f"ffmpeg -f video4linux2 -i /dev/video2 -frames:v 1 -filter:v crop=400:150:45:30 -y {folder}/{filename}.jpg".split(),
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
)
return filename
def insert(database: sqlite3.Connection, uuid: uuid.UUID, loadout: tuple, x_rotation: int, y_rotation: int) -> None:
(wheel, (model, sticker), hat, team, primary_color, secondary_color) = loadout
database.execute(
f"INSERT INTO data VALUES ('{uuid}',{model},{team},{primary_color},{secondary_color},{hat},{sticker},{wheel},{x_rotation},{y_rotation})"
)
database.commit()
| StarcoderdataPython |
9735509 | <filename>app.py
import os
from flask import Flask
from twilio import twiml
import requests
# Declare and configure application
app = Flask(__name__, static_url_path='/static')
app.config.from_pyfile('local_settings.py')
app.config['API_PATH'] = \
"http://api.nytimes.com/svc/mostpopular/v2/"\
"mostviewed/all-sections/1.json?api-key="
# Specify Conference Room
@app.route('/conference/<conference_room>', methods=['POST'])
def voice(conference_room):
response = twiml.Response()
with response.dial() as dial:
dial.conference(conference_room, waitUrl="/wait")
return str(response)
# Conference Room Hold Music Reading Headlines from New York Times
@app.route('/wait', methods=['POST'])
def waitUrl():
response = twiml.Response()
if app.config['NYTIMES_API_KEY']:
api_request = requests.get("%s%s" % (app.config['API_PATH'],
app.config['NYTIMES_API_KEY']))
if api_request.status_code == 200:
json_response = api_request.json()
if json_response:
response.say("While you wait for your conference to connect," \
" here are today's headlines from the New York Times.",
voice="alice")
for result in json_response['results']:
response.say(result['abstract'], voice='alice')
response.pause()
else:
response.say("Unable to parse result from New York Times API.")
response.say("Check your configuration and logs.")
response.redirect("/music")
else:
response.say("Unable to reach New York Times API.")
response.say("Check your configuration and logs for the error.")
response.redirect("/music")
else:
response.say("Configuration error: You need to set your New York " \
"Times API Key environment variable. See the README for " \
"more information.")
response.redirect("/music")
return str(response)
# In the event of a failure, deliver hold music.
@app.route('/music', methods=['POST'])
def music():
response = twiml.Response()
response.say("Now, enjoy this normal hold music.")
response.play("http://com.twilio.music.soft-rock.s3.amazonaws.com/"\
"Fireproof_Babies_-_Melancholy_4_a_Sun-lit_day.mp3")
return str(response)
# If PORT not specified by environment, assume development config.
if __name__ == '__main__':
port = int(os.environ.get("PORT", 5000))
if port == 5000:
app.debug = True
app.run(host='0.0.0.0', port=port)
| StarcoderdataPython |
5123902 | <filename>sdk/python/pulumi_google_native/artifactregistry/v1beta2/get_repository.py
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from ... import _utilities
from . import outputs
__all__ = [
'GetRepositoryResult',
'AwaitableGetRepositoryResult',
'get_repository',
'get_repository_output',
]
@pulumi.output_type
class GetRepositoryResult:
def __init__(__self__, create_time=None, description=None, format=None, kms_key_name=None, labels=None, maven_config=None, name=None, update_time=None):
if create_time and not isinstance(create_time, str):
raise TypeError("Expected argument 'create_time' to be a str")
pulumi.set(__self__, "create_time", create_time)
if description and not isinstance(description, str):
raise TypeError("Expected argument 'description' to be a str")
pulumi.set(__self__, "description", description)
if format and not isinstance(format, str):
raise TypeError("Expected argument 'format' to be a str")
pulumi.set(__self__, "format", format)
if kms_key_name and not isinstance(kms_key_name, str):
raise TypeError("Expected argument 'kms_key_name' to be a str")
pulumi.set(__self__, "kms_key_name", kms_key_name)
if labels and not isinstance(labels, dict):
raise TypeError("Expected argument 'labels' to be a dict")
pulumi.set(__self__, "labels", labels)
if maven_config and not isinstance(maven_config, dict):
raise TypeError("Expected argument 'maven_config' to be a dict")
pulumi.set(__self__, "maven_config", maven_config)
if name and not isinstance(name, str):
raise TypeError("Expected argument 'name' to be a str")
pulumi.set(__self__, "name", name)
if update_time and not isinstance(update_time, str):
raise TypeError("Expected argument 'update_time' to be a str")
pulumi.set(__self__, "update_time", update_time)
@property
@pulumi.getter(name="createTime")
def create_time(self) -> str:
"""
The time when the repository was created.
"""
return pulumi.get(self, "create_time")
@property
@pulumi.getter
def description(self) -> str:
"""
The user-provided description of the repository.
"""
return pulumi.get(self, "description")
@property
@pulumi.getter
def format(self) -> str:
"""
The format of packages that are stored in the repository.
"""
return pulumi.get(self, "format")
@property
@pulumi.getter(name="kmsKeyName")
def kms_key_name(self) -> str:
"""
The Cloud KMS resource name of the customer managed encryption key that’s used to encrypt the contents of the Repository. Has the form: `projects/my-project/locations/my-region/keyRings/my-kr/cryptoKeys/my-key`. This value may not be changed after the Repository has been created.
"""
return pulumi.get(self, "kms_key_name")
@property
@pulumi.getter
def labels(self) -> Mapping[str, str]:
"""
Labels with user-defined metadata. This field may contain up to 64 entries. Label keys and values may be no longer than 63 characters. Label keys must begin with a lowercase letter and may only contain lowercase letters, numeric characters, underscores, and dashes.
"""
return pulumi.get(self, "labels")
@property
@pulumi.getter(name="mavenConfig")
def maven_config(self) -> 'outputs.MavenRepositoryConfigResponse':
"""
Maven repository config contains repository level configuration for the repositories of maven type.
"""
return pulumi.get(self, "maven_config")
@property
@pulumi.getter
def name(self) -> str:
"""
The name of the repository, for example: "projects/p1/locations/us-central1/repositories/repo1".
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="updateTime")
def update_time(self) -> str:
"""
The time when the repository was last updated.
"""
return pulumi.get(self, "update_time")
class AwaitableGetRepositoryResult(GetRepositoryResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetRepositoryResult(
create_time=self.create_time,
description=self.description,
format=self.format,
kms_key_name=self.kms_key_name,
labels=self.labels,
maven_config=self.maven_config,
name=self.name,
update_time=self.update_time)
def get_repository(location: Optional[str] = None,
project: Optional[str] = None,
repository_id: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetRepositoryResult:
"""
Gets a repository.
"""
__args__ = dict()
__args__['location'] = location
__args__['project'] = project
__args__['repositoryId'] = repository_id
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('google-native:artifactregistry/v1beta2:getRepository', __args__, opts=opts, typ=GetRepositoryResult).value
return AwaitableGetRepositoryResult(
create_time=__ret__.create_time,
description=__ret__.description,
format=__ret__.format,
kms_key_name=__ret__.kms_key_name,
labels=__ret__.labels,
maven_config=__ret__.maven_config,
name=__ret__.name,
update_time=__ret__.update_time)
@_utilities.lift_output_func(get_repository)
def get_repository_output(location: Optional[pulumi.Input[str]] = None,
project: Optional[pulumi.Input[Optional[str]]] = None,
repository_id: Optional[pulumi.Input[str]] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> pulumi.Output[GetRepositoryResult]:
"""
Gets a repository.
"""
...
| StarcoderdataPython |
6440491 | <gh_stars>10-100
from .viewer import *
from PySide2.QtWidgets import QSizePolicy, QVBoxLayout, QTextEdit
from PySide2.QtGui import QFont
class DataViewerRaw(DataViewer):
def __init__(self):
DataViewer.__init__(self)
self.text_box = QTextEdit()
self.text_box.setReadOnly(True)
font = QFont("Courier", 8)
self.text_box.setFont(font)
self.text_box.setLineWrapMode(QTextEdit.LineWrapMode.NoWrap)
size = QSizePolicy(QSizePolicy.Preferred, QSizePolicy.Preferred)
self.text_box.setSizePolicy(size)
self.main_layout = QVBoxLayout()
self.main_layout.addWidget(self.text_box)
self.setLayout(self.main_layout)
def vnode_process(self, vfs: VfsProcessor, vnode: VfsNode):
with vfs.file_obj_from(vnode) as f:
buf = f.read(vnode.size_u)
line_len = 16
header0 = ' '.join(['{:02x}'.format(i) for i in range(line_len)])
header1 = '-'.join(['--' for i in range(line_len)])
def clean(v):
if 0x20 <= v <= 0x7f:
return chr(v)
else:
return '░'
ss = ''
ss += header0 + '\n'
ss += header1 + '\n'
n = len(buf)
max_pos = min(n, 1024 * line_len)
for i in range(0, max_pos, line_len):
ep = min(n, i + line_len)
lb = buf[i:ep]
lbc = [clean(v) for v in lb]
ls = ' '.join(['{:02X}'.format(v) for v in lb])
if line_len > len(lb):
ls += ' ' + ' '.join([' ' for i in range(line_len - len(lb))])
ls += ' | '
ls += ''.join(lbc)
ss += ls + '\n'
self.text_box.setText(ss)
| StarcoderdataPython |
5015863 | <filename>proto_matcher/matcher/matcher.py
from typing import Optional, Set, Tuple, Union
from google.protobuf import message
from google.protobuf import text_format
from hamcrest.core.base_matcher import BaseMatcher
from hamcrest.core.description import Description
from hamcrest.core.helpers.wrap_matcher import wrap_matcher
from hamcrest.core.matcher import Matcher
from proto_matcher.compare import proto_compare, ProtoComparisonOptions
from proto_matcher.compare import ProtoComparisonScope
from proto_matcher.compare import ProtoFloatComparison
from proto_matcher.compare import RepeatedFieldComparison
_ProtoValue = Union[str, message.Message]
_ProtoMatcher = BaseMatcher[message.Message]
class _EqualsProto(_ProtoMatcher):
def __init__(self, msg: _ProtoValue):
self._msg = msg
self._opts = ProtoComparisonOptions()
def mut_options(self) -> ProtoComparisonOptions:
return self._opts
def matches(self,
item: message.Message,
mismatch_description: Optional[Description] = None) -> bool:
expected = self._msg
if isinstance(self._msg, str):
proto_type = type(item)
expected = text_format.Parse(self._msg, proto_type())
cmp_result = proto_compare(item, expected, opts=self._opts)
if not cmp_result.is_equal and mismatch_description:
mismatch_description.append_text(cmp_result.explanation)
return cmp_result.is_equal
def describe_mismatch(self, item: message.Message,
mismatch_description: Description):
self.matches(item, mismatch_description)
def describe_to(self, description: Description):
description.append_text(f"a protobuf of:\n{self._msg}")
def equals_proto(expected: message.Message) -> _ProtoMatcher:
return _EqualsProto(expected)
def partially(matcher: _ProtoMatcher) -> _ProtoMatcher:
matcher.mut_options().scope = ProtoComparisonScope.PARTIAL
return matcher
def approximately(matcher: _ProtoMatcher,
float_margin: Optional[float] = None,
float_fraction: Optional[float] = None) -> _ProtoMatcher:
opts = matcher.mut_options()
opts.float_comp = ProtoFloatComparison.APPROXIMATE
if float_margin:
opts.float_margin = float_margin
if float_fraction:
opts.float_fraction = float_fraction
return matcher
def ignoring_field_paths(field_paths: Set[Tuple[str]],
matcher: _ProtoMatcher) -> _ProtoMatcher:
opts = matcher.mut_options()
opts.ignore_field_paths = field_paths
return matcher
def ignoring_repeated_field_ordering(matcher: _ProtoMatcher) -> _ProtoMatcher:
opts = matcher.mut_options()
opts.repeated_field_comp = RepeatedFieldComparison.AS_SET
return matcher
| StarcoderdataPython |
1740622 | <gh_stars>0
# Create a SQL alchemy session maker to be used
from sqlalchemy import create_engine
from sqlalchemy.orm import sessionmaker
SQLALCHEMY_DATABASE_URL = "postgresql+psycopg2://admin:admin@localhost/pht_conductor"
engine = create_engine(
SQLALCHEMY_DATABASE_URL, # connect_args={"check_same_thread": False} For sqlite db
)
SessionLocal = sessionmaker(autocommit=False, autoflush=False, bind=engine)
| StarcoderdataPython |
5199468 | from typing import Union
import h5py
class SdsDatasets:
def __init__(self, h5_obj: Union[str, h5py.File]):
"""
Class to handle data loading from GEDI h5 datasets
:param h5_obj: Path of h5 file or h5 file loaded using h5py.File
"""
if isinstance(h5_obj, str):
self._h5 = h5py.File(h5_obj, 'r')
elif isinstance(h5_obj, h5py.File):
self._h5 = h5_obj
else:
raise NotImplementedError('Unsupported input for SdsLayers class.')
self.paths = []
self._h5.visit(self.paths.append)
self.level = self._h5['METADATA']['DatasetIdentification'].attrs['shortName'].split('_')[1]
self._GEDI_BEAMS = [
'BEAM0000',
'BEAM0001',
'BEAM0010',
'BEAM0011',
'BEAM0101',
'BEAM0110',
'BEAM1000',
'BEAM1011'
]
# Load SDS
self.sds_datasets = [x for x in self.paths if isinstance(self._h5[x], h5py.Dataset)]
def _beam_sds(self, beam):
"""Filter SDS data by beam"""
return [x for x in self.paths if beam in x]
def dem(self, beam):
"""Get Tandem-X DEM data array"""
return self._h5[[g for g in self._beam_sds(beam) if g.endswith('/digital_elevation_model')][0]][()]
def elevation(self, beam):
"""Get ground elevation array"""
return self._h5[[g for g in self._beam_sds(beam) if g.endswith('/elev_lowestmode')][0]][()]
def latitude(self, beam):
"""Get latitude array"""
return self._h5[[g for g in self._beam_sds(beam) if g.endswith('/lat_lowestmode')][0]][()]
def longitude(self, beam):
"""Get longitude array"""
return self._h5[[g for g in self._beam_sds(beam) if g.endswith('/lon_lowestmode')][0]][()]
def canopy_height(self, beam):
"""Get canopy height (rh100) array"""
return self._h5[[g for g in self._beam_sds(beam) if g.endswith('/rh100')][0]][()]
def canopy_elevation(self, beam):
"""Get canopy elevation array"""
return self._h5[[g for g in self._beam_sds(beam) if g.endswith('/elev_highestreturn')][0]][()]
def shot_number(self, beam):
"""Get shot number array"""
return self._h5[f'{beam}/shot_number'][()]
def quality_flag(self, beam, processing):
"""Get quality flag array"""
return self._h5[[g for g in self._beam_sds(beam) if g.endswith(f'/{processing.lower()}_quality_flag')][0]][()]
def degrade_flag(self, beam):
"""Get degrade flag array"""
return self._h5[[g for g in self._beam_sds(beam) if g.endswith(f'/degrade_flag')][0]][()]
def pavd_z(self, beam):
"""Get pavd_z array"""
return self._h5[f'{beam}/pavd_z'][()]
def pai(self, beam):
"""Get pai array"""
return self._h5[f'{beam}/pai'][()]
def sensitivity(self, beam):
"""Get sensitivity array"""
return self._h5[[g for g in self._beam_sds(beam) if g.endswith(f'/sensitivity')][0]][()]
def agbd(self, beam):
"""Get above ground biomass density array"""
return self._h5[[g for g in self._beam_sds(beam) if g.endswith(f'/agbd')][0]][()]
def agbd_se(self, beam):
"""Get above ground biomass density array"""
return self._h5[[g for g in self._beam_sds(beam) if g.endswith(f'/agbd_se')][0]][()]
def load_dataset(self, target_sds, beam=None):
"""
Load a dataset in the H5 file
:param target_sds: Target SDS to load
:param beam: Load data from a specific beam
:return:
"""
out_data = []
# Load data for all beams
if not beam:
for beam in self._GEDI_BEAMS:
out_data.extend(self._h5[self.search_path(target_sds, beam)][()])
else:
# Load data for one beam only
out_data.extend(self._h5[self.search_path(target_sds, beam)][()])
return out_data
def search_path(self, target_sds, beam_or_group=None):
"""
Search h5 data for specific SDS path
:param target_sds: Name of SDS
:param beam_or_group: Beam or H5 group used as an additional filter for string matching.
In GEDI data this would be something like the BEAMXXXX name, ANCILLARY, or METADATA groups.
:return:
"""
# Search for path with a particular beam
if beam_or_group:
results = [x for x in self.paths if x.endswith(target_sds) and beam_or_group in x]
if not results:
raise Exception(f'No SDS found with name "{target_sds}" and beam "{beam_or_group}"')
results = results[0]
else:
results = []
for beam in self._GEDI_BEAMS:
results_initial = [x for x in self.paths if x.endswith(target_sds) and beam in x]
if not results_initial:
raise Exception(f'No SDS found with name "{target_sds}" and beam "{beam}"')
results += results_initial
return results
if __name__ == '__main__':
pass
| StarcoderdataPython |
8100283 | <filename>fatd/holders/transitions.py
import numpy as np
import fatd.transform.tools.training
import fatd.holders
# From data to model
class Data2Model(object):
def __init__(self, splitting_function=None):
self.training_indices = None
self.test_indices = None
if splitting_function is None:
self.splitting_function = \
lambda X, y: fatd.transform.tools.training.train_test_split(
X, y, train_share=.8, seed=42
)
else:
self.splitting_function = splitting_function
# data_to_model
def transform(self, data_object, model_object):
self.training_indices, self.test_indices = \
self.splitting_function(data_object.data, data_object.target)
model_object.fit(data_object.data[self.training_indices],
data_object.target[self.training_indices])
return model_object
# From model to predictions
class Model2Predictions(object):
def __init__(self):
pass
# model_to_predictions
def transform(self, model_object, data_object, data_to_model_object=None):
#=None, ground_truth=None):
#if isinstance(data_object, fatd.holders.Data):
# data_to_predict = self.transform_data_object()
#else:
# data_to_predict = self.transform_data_array()
labels_set = np.unique(model_object.unique_labels)
if data_to_model_object is not None:
test_indices = data_to_model_object.test_indices
if len(test_indices) == 0 or test_indices is None:
print('Missing test partition in the data_to_model object. Using the whole data set.')
data_to_predict = data_object.data
ground_truth_to_predict = data_object.target
else:
print('Using test partition of the data based on the data_to_model object.')
data_to_predict = data_object.data[test_indices]
ground_truth_to_predict = data_object.target[test_indices]
else:
print('Missing data_to_model object. Using the whole data set.')
data_to_predict = data_object.data
ground_truth_to_predict = data_object.target
predictions = model_object.predict(data_to_predict)
return fatd.holders.Predictions(predictions,
data_to_predict,
ground_truth_to_predict,
labels_set)
| StarcoderdataPython |
3374364 | <gh_stars>1-10
import os
import time
import getpass
username = getpass.getuser()
def find():
os.system("cd /")
os.system("cd /home")
os.system("cd "%s) % username
def kill():
writepath = start.py
mode = 'a'
file_path = 'cat.txt'
try:
fp = open(cat.txt)
except IOError:
# If not exists, create the file
fp = open(cat.txt, 'w+')
print ("lol")
def execute():
return input("You are about to run a malicous script, do you want to coninue? 1 is Y/ 2 is N")
x = execute()
if x == 1:
find()
else:
quit()
| StarcoderdataPython |
1734932 | <reponame>eldad-a/BioCRNPyler
from biocrnpyler.chemical_reaction_network import Species, Reaction, ComplexSpecies, ChemicalReactionNetwork
print("Start")
#Names of different supported propensities
propensity_types = ['hillpositive', 'proportionalhillpositive', 'hillnegative', 'proportionalhillnegative', 'massaction', 'general']
kb = 100
ku = 10
kex = 1.
kd = .1
G = Species(name = "G", material_type = "dna") #DNA
A = Species(name = "A", material_type = "protein") #Activator
GA = ComplexSpecies([G, A, A]) #Activated Gene
X = Species(name = "X", material_type = "protein")
rxnd = Reaction([X], [], kd)
#Massaction Unregulated
species1 = [G, A, GA, X]
rxn0_1 = Reaction([G, A, A], [GA], k=kb, k_rev = ku)
rxn0_2 = Reaction([GA], [GA, X], k=kex)
CRN0 = ChemicalReactionNetwork(species1, [rxn0_1, rxn0_2, rxnd])
rxn1_1 = Reaction([G, A, A], [GA], k=kb, k_rev = ku)
rxn1_2 = Reaction([G], [G, X], k=kex)
CRN1 = ChemicalReactionNetwork(species1, [rxn1_1, rxn1_2, rxnd])
#hill positive
species2 = [G, A, X]
rxn2_1 = Reaction([G], [G, X], propensity_type = "hillpositive", propensity_params = {"k":kex, "n":2, "K":float(kb/ku), "s1":A})
CRN2 = ChemicalReactionNetwork(species2, [rxn2_1, rxnd])
#proportional hill positive
rxn3_1 = Reaction([G], [G, X], propensity_type = "proportionalhillpositive", propensity_params = {"k":kex, "n":2, "K":float(kb/ku), "s1":A, "d":G})
CRN3 = ChemicalReactionNetwork(species2, [rxn3_1, rxnd])
#hill Negative
rxn4_1 = Reaction([G], [G, X], propensity_type = "hillnegative", propensity_params = {"k":kex, "n":2, "K":float(kb/ku), "s1":A})
CRN4 = ChemicalReactionNetwork(species2, [rxn4_1, rxnd])
#proportional hill negative
rxn5_1 = Reaction([G], [G, X], propensity_type = "proportionalhillnegative", propensity_params = {"k":kex, "n":2, "K":float(kb/ku), "s1":A, "d":G})
CRN5 = ChemicalReactionNetwork(species2, [rxn5_1, rxnd])
import numpy as np
import pylab as plt
x0 = {repr(G):2, repr(A):10}
timepoints = np.linspace(0, 100, 200)
fname = "CRN.xml"
CRNs = [CRN0, CRN1, CRN2, CRN4, CRN3, CRN5]
LSs = ["-", "--", ":"]
plt.figure()
for i in range(6):
plt.subplot(3, 2, i+1)
CRN = CRNs[i]
CRN.write_sbml_file(file_name = fname)
print("Saved")
from bioscrape.types import Model
from bioscrape.simulator import py_simulate_model
from bioscrape.sbmlutil import *
M = Model(sbml_filename = fname)
A_list = [0, 1, 2, 5, 10]
for ind in range(len(A_list)):
x0[repr(A)] = A_list[ind]
M.set_species(x0)
R = py_simulate_model(timepoints, Model = M)
plt.plot(timepoints, R["protein_X"], label ="A="+str(A_list[ind]),color = 'blue', alpha = (ind+1)/len(A_list))
txt = ""
for rxn in CRN.reactions:
txt += repr(rxn)+"\n"
plt.title(txt[:-1], fontsize = 8)
plt.legend()
plt.show()
#CRN.simulate_with_bioscrape(timepoints, initial_condition_dict = x0)
#CRN.simulate_with_bioscrape_via_sbml(timepoints, file = fname, initial_condition_dict = x0) | StarcoderdataPython |
3223591 | # -*- coding: utf-8 -*-
# Generated by Django 1.10.2 on 2017-05-03 19:56
from __future__ import unicode_literals
import django.contrib.postgres.fields.jsonb
from django.db import migrations, models
import uuid
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Item',
fields=[
('id', models.UUIDField(default=uuid.uuid4, editable=False, primary_key=True, serialize=False)),
('owner', models.PositiveIntegerField(db_index=True)),
('data', django.contrib.postgres.fields.jsonb.JSONField(default='{}')),
('created_at', models.DateTimeField(auto_now_add=True)),
('updated_at', models.DateTimeField(auto_now=True)),
],
options={
'db_table': 'items',
},
),
migrations.CreateModel(
name='LogRecord',
fields=[
('id', models.BigAutoField(primary_key=True, serialize=False)),
('transaction', models.UUIDField(default=uuid.uuid4)),
('item', models.UUIDField()),
('data', django.contrib.postgres.fields.jsonb.JSONField(default='{}')),
('created_at', models.DateTimeField(auto_now_add=True)),
],
options={
'db_table': 'log_records',
},
),
]
| StarcoderdataPython |
5122760 | # Code to distill the knowledge from a Teacher to Student using data generated by a Generator
from __future__ import print_function
import argparse
import os
import random
import torch
import torch.nn as nn
import torch.backends.cudnn as cudnn
import torch.optim as optim
import torch.utils.data
import torchvision.datasets as dset
import torchvision.transforms as transforms
import torch.nn.functional as F
from tensorboardX import SummaryWriter
import numpy as np
from dcgan_model import Generator, Discriminator
from alexnet import AlexNet
from alexnet_half import AlexNet_half
writer = SummaryWriter()
# CUDA_VISIBLE_DEVICES=0 python KD_dfgan.py --dataroot ../../../../datasets --cuda --outf models --manualSeed 108 --niter 5000 --lambda_ 1 --temp 20 --netG ../../train_generator/out_cifar/netG_epoch_199.pth --netC ./best_model.pth
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--dataroot', required=True, help='path to test dataset')
parser.add_argument('--workers', type=int, help='number of data loading workers', default=2)
parser.add_argument('--batchSize', type=int, default=64, help='input batch size')
parser.add_argument('--nz', type=int, default=100, help='size of the latent z vector')
parser.add_argument('--niter', type=int, default=25, help='number of epochs to train for')
parser.add_argument('--lr', type=float, default=0.001, help='learning rate, default=0.0002')
parser.add_argument('--beta1', type=float, default=0.5, help='beta1 for adam. default=0.5')
parser.add_argument('--cuda', action='store_true', help='enables cuda')
parser.add_argument('--ngpu', type=int, default=1, help='number of GPUs to use')
parser.add_argument('--netG', required=True, help="path to Generator network weights")
parser.add_argument('--netC', required=True, help="path to Teacher network weights")
parser.add_argument('--netS', default='', help="path to Student network weights (to continue training)")
parser.add_argument('--outf', default='.', help='folder to output images and model checkpoints')
parser.add_argument('--manualSeed', type=int, help='manual seed')
parser.add_argument('--temp', default=10, type=float, help='Temperature for KD')
parser.add_argument('--lambda_', default=1, type=float, help='Weight of KD Loss during distillation')
parser.add_argument('--nBatches', default=256, type=float, help='Number of Batches')
opt = parser.parse_args()
print(opt)
try:
os.makedirs(opt.outf)
except OSError:
pass
if opt.manualSeed is None:
opt.manualSeed = random.randint(1, 10000)
print("Random Seed: ", opt.manualSeed)
random.seed(opt.manualSeed)
torch.manual_seed(opt.manualSeed)
cudnn.benchmark = True
if torch.cuda.is_available() and not opt.cuda:
print("WARNING: You have a CUDA device, so you should probably run with --cuda")
nc=3
transform=transforms.Compose([
transforms.ToTensor(),
])
test_loader = torch.utils.data.DataLoader(
dset.CIFAR10(opt.dataroot, train=False, download=True, transform=transform),
batch_size=opt.batchSize, shuffle=False)
device = torch.device("cuda:0" if opt.cuda else "cpu")
ngpu = int(opt.ngpu)
nz = int(opt.nz)
netG = Generator(ngpu).to(device)
netG.load_state_dict(torch.load(opt.netG))
print(netG)
netG.eval()
netC = AlexNet().to(device)
netC.load_state_dict(torch.load(opt.netC))
print(netC)
netC.eval()
netS = AlexNet_half().to(device)
if opt.netS != '':
netS.load_state_dict(torch.load(opt.netS))
print(netS)
temp = opt.temp
batch_size = int(opt.batchSize)
n_batches = int(opt.nBatches)
# setup optimizer
threshold = []
best_val_acc = 0
cnt = 0
n_epochs_lr1 = 0
for lr_cnt in range(2):
if lr_cnt == 0:
lrate = opt.lr
else:
lrate = opt.lr * 0.1
netS.load_state_dict(torch.load('models/best_model_lr1.pth'))
train_st_acc_lr2 = train_st_acc_lr1
val_st_acc_lr2 = val_st_acc_lr1
test_st_acc_lr2 = test_st_acc_lr1
test_acc_lr2 = test_acc_lr1
torch.save(netS.state_dict(), "models/best_model_lr2.pth")
optimizerS = optim.Adam(netS.parameters(), lr=lrate, betas=(opt.beta1, 0.999))
for epoch in range(1, opt.niter+1):
loss_kd_sum = 0
loss_ce_sum = 0
loss_all_sum = 0
teacher_student_correct_sum = 0
netS.train()
for i in range(n_batches):
optimizerS.zero_grad()
noise_rand = torch.randn(batch_size, nz, 1, 1, device=device)
fake_train = netG(noise_rand)
fake_train_class = netC(fake_train)
fake_student_class = netS(fake_train)
fake_train_class_ht = fake_train_class/temp
fake_student_class_ht = fake_student_class/temp
sm_teacher_ht = F.softmax(fake_train_class_ht, dim=1)
sm_student_ht = F.softmax(fake_student_class_ht, dim=1)
sm_teacher = F.softmax(fake_train_class, dim=1)
sm_student = F.softmax(fake_student_class, dim=1)
pred_class_argmax_teacher = sm_teacher.max(1, keepdim=True)[1]
loss_kd = nn.KLDivLoss(reduction='batchmean')(F.log_softmax(fake_student_class_ht, dim=1),F.softmax(fake_train_class_ht, dim=1))
loss_ce = F.cross_entropy(fake_student_class, pred_class_argmax_teacher.view(batch_size))
loss_all = opt.lambda_*temp*temp*loss_kd + (1-opt.lambda_)*loss_ce
loss_kd_sum = loss_kd_sum + loss_kd
loss_ce_sum = loss_ce_sum + loss_ce
loss_all_sum = loss_all_sum + loss_all
loss_all.backward()
optimizerS.step()
pred_class_argmax_student = sm_student.max(1, keepdim=True)[1]
pred_class_argmax_teacher = pred_class_argmax_teacher.view(sm_teacher.shape[0])
pred_class_argmax_student = pred_class_argmax_student.view(sm_teacher.shape[0])
teacher_student_correct = torch.sum(pred_class_argmax_student==pred_class_argmax_teacher)
teacher_student_correct_sum = teacher_student_correct_sum + (teacher_student_correct).cpu().data.numpy()
# do checkpointing
torch.save(netS.state_dict(), '%s/netS_epoch_%d.pth' % (opt.outf, epoch + n_epochs_lr1))
loss_kd_val_sum = 0
loss_ce_val_sum = 0
loss_all_val_sum = 0
teacher_student_correct_val_sum = 0
netS.eval()
with torch.no_grad():
for i in range(int(np.floor(n_batches/4))):
noise_rand = torch.randn(batch_size, nz, 1, 1, device=device)
fake_train = netG(noise_rand)
fake_train_class = netC(fake_train)
fake_student_class = netS(fake_train)
fake_train_class_ht = fake_train_class/temp
fake_student_class_ht = fake_student_class/temp
sm_teacher_ht = F.softmax(fake_train_class_ht, dim=1)
sm_student_ht = F.softmax(fake_student_class_ht, dim=1)
sm_teacher = F.softmax(fake_train_class, dim=1)
sm_student = F.softmax(fake_student_class, dim=1)
pred_class_argmax_teacher = sm_teacher.max(1, keepdim=True)[1]
loss_kd = nn.KLDivLoss(reduction='batchmean')(F.log_softmax(fake_student_class_ht, dim=1),F.softmax(fake_train_class_ht, dim=1))
loss_ce = F.cross_entropy(fake_student_class, pred_class_argmax_teacher.view(batch_size))
loss_all = opt.lambda_*temp*temp*loss_kd + (1-opt.lambda_)*loss_ce
loss_kd_val_sum = loss_kd_val_sum + loss_kd
loss_ce_val_sum = loss_ce_val_sum + loss_ce
loss_all_val_sum = loss_all_val_sum + loss_all
pred_class_argmax_student = sm_student.max(1, keepdim=True)[1]
pred_class_argmax_teacher = pred_class_argmax_teacher.view(sm_teacher.shape[0])
pred_class_argmax_student = pred_class_argmax_student.view(sm_teacher.shape[0])
teacher_student_correct = torch.sum(pred_class_argmax_student==pred_class_argmax_teacher)
teacher_student_correct_val_sum = teacher_student_correct_val_sum + (teacher_student_correct).cpu().data.numpy()
teacher_acc_sum = 0.0
student_acc_sum = 0.0
teacher_student_correct_test_sum = 0.0
num = 0.0
for data, target in test_loader:
data, target = data.to(device), target.to(device)
data = data*2 - 1
test_class_teacher = netC(data)
test_class_student = netS(data)
sm_teacher_test = F.softmax(test_class_teacher, dim=1)
sm_student_test = F.softmax(test_class_student, dim=1)
pred_class_argmax_teacher_test = sm_teacher_test.max(1, keepdim=True)[1]
pred_class_argmax_student_test = sm_student_test.max(1, keepdim=True)[1]
pred_class_argmax_teacher_test = pred_class_argmax_teacher_test.view(target.shape[0])
pred_class_argmax_student_test = pred_class_argmax_student_test.view(target.shape[0])
teacher_acc = torch.sum(pred_class_argmax_teacher_test==target)
student_acc = torch.sum(pred_class_argmax_student_test==target)
teacher_acc_sum = teacher_acc_sum + teacher_acc
student_acc_sum = student_acc_sum + student_acc
num = num + target.shape[0]
teacher_student_correct = torch.sum(pred_class_argmax_student_test==pred_class_argmax_teacher_test)
teacher_student_correct_test_sum = teacher_student_correct_test_sum + (teacher_student_correct).cpu().data.numpy()
teacher_acc_mean = float(teacher_acc_sum) / float(num)
student_acc_mean = float(student_acc_sum) / float(num)
teacher_student_correct_test_mean = float(teacher_student_correct_test_sum) / float(num)
val_student_acc = teacher_student_correct_val_sum / (float(np.floor(n_batches/4))*batch_size)
train_student_acc = teacher_student_correct_sum/ float(n_batches*batch_size)
if val_student_acc > best_val_acc:
print("Saving best model...")
if lr_cnt ==0 :
torch.save(netS.state_dict(), "models/best_model_lr1.pth")
train_st_acc_lr1 = train_student_acc
val_st_acc_lr1 = val_student_acc
test_st_acc_lr1 = teacher_student_correct_test_mean
test_acc_lr1 = student_acc_mean
else:
torch.save(netS.state_dict(), "models/best_model_lr2.pth")
train_st_acc_lr2 = train_student_acc
val_st_acc_lr2 = val_student_acc
test_st_acc_lr2 = teacher_student_correct_test_mean
test_acc_lr2 = student_acc_mean
best_val_acc = val_student_acc
cnt = 0
else:
cnt += 1
print("Epoch",epoch + n_epochs_lr1,"/",opt.niter)
print("Teacher accuracy=",round(teacher_acc_mean*100,2),"%, Student accuracy=",round(student_acc_mean*100,2),"%")
writer.add_scalar("KD loss train", loss_kd_sum/ n_batches, epoch + n_epochs_lr1)
writer.add_scalar("KD loss val", loss_kd_val_sum/ float(np.floor(n_batches/4)), epoch + n_epochs_lr1)
writer.add_scalar("CE loss train", loss_ce_sum/ n_batches, epoch + n_epochs_lr1)
writer.add_scalar("CE loss val", loss_ce_val_sum/ float(np.floor(n_batches/4)), epoch + n_epochs_lr1)
writer.add_scalar("Total loss train", loss_all_sum/ n_batches, epoch + n_epochs_lr1)
writer.add_scalar("Total loss val", loss_all_val_sum/ float(np.floor(n_batches/4)), epoch + n_epochs_lr1)
writer.add_scalar("Student test accuracy", student_acc_mean, epoch + n_epochs_lr1)
writer.add_scalar("Teacher-Student train accuracy", train_student_acc, epoch + n_epochs_lr1)
writer.add_scalar("Teacher-Student val accuracy", val_student_acc, epoch + n_epochs_lr1)
writer.add_scalar("Teacher-Student test accuracy", teacher_student_correct_test_mean, epoch + n_epochs_lr1)
writer.export_scalars_to_json("./all_scalars.json")
if cnt > 100:
print('Model has converged with learning rate = {}!'.format(lrate))
cnt = 0
break
if lr_cnt == 0:
n_epochs_lr1 = epoch
else:
n_epochs_lr2 = epoch
print('Number of epochs with lr = {} are {} and number of epochs with lr = {} are {}'.format(
opt.lr, n_epochs_lr1, opt.lr*0.1, n_epochs_lr2))
print('Accuracy with lr = {}: Train ST accuracy = {:.2f}%, Validation ST accuracy = {:.2f}%, Test ST accuracy = {:.2f}%, Test accuracy = {:.2f}%'.format(
opt.lr, train_st_acc_lr1*100, val_st_acc_lr1*100, test_st_acc_lr1*100, test_acc_lr1*100))
print('Accuracy with lr = {}: Train ST accuracy = {:.2f}%, Validation ST accuracy = {:.2f}%, Test ST accuracy = {:.2f}% Test accuracy = {:.2f}%'.format(
opt.lr*0.1, train_st_acc_lr2*100, val_st_acc_lr2*100, test_st_acc_lr2*100, test_acc_lr2*100))
writer.close()
| StarcoderdataPython |
12851962 | # Generated by Django 4.0.1 on 2022-01-10 09:29
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('firstApp', '0005_alter_registration_firstname'),
]
operations = [
migrations.AlterField(
model_name='registration',
name='degree',
field=models.CharField(max_length=12, null=True),
),
]
| StarcoderdataPython |
6538023 | <gh_stars>1-10
from unittest import TestCase
import numpy as np
import pandas as pd
from pybleau.app.plotting.api import factory_from_config
from pybleau.app.plotting.bar_factory import BarPlotFactory
from pybleau.app.plotting.heatmap_factory import HeatmapPlotFactory
from pybleau.app.plotting.histogram_factory import HistogramPlotFactory
from pybleau.app.plotting.line_factory import LinePlotFactory
from pybleau.app.plotting.plot_config import BarPlotConfigurator, \
HeatmapPlotConfigurator, \
HistogramPlotConfigurator, LinePlotConfigurator, ScatterPlotConfigurator
from pybleau.app.plotting.plot_factories import DEFAULT_FACTORIES
from pybleau.app.plotting.scatter_factories import ScatterPlotFactory
TEST_DF = pd.DataFrame({"Col_1": [1, 2, 3, 4, 5, 6, 7, 8],
"Col_2": np.array([1, 2, 3, 4, 5, 6, 7, 8])[::-1],
"Col_3": ["aa_aaa", "bb_bbb", "aa_aaa", "cc_ccc",
"ee_eee", "dd_ddd", "ff_fff", "gg_ggg"],
"Col_4": np.random.randn(8),
})
TEST_DF2 = pd.DataFrame({"Col_1": [1, 2, 1, 2],
"Col_2": [1, 1, 2, 2],
"Col_3": np.random.randn(4)})
class TestAPI(TestCase):
def test_heatmap_config_returns_heatmap_factory(self):
config = HeatmapPlotConfigurator(data_source=TEST_DF2)
config.x_col_name = "Col_1"
config.y_col_name = "Col_2"
config.z_col_name = "Col_3"
def_factories = DEFAULT_FACTORIES
actual = factory_from_config(config, def_factories)
self.assertIsInstance(actual, HeatmapPlotFactory)
def test_histogram_config_returns_historgram_factory(self):
config = HistogramPlotConfigurator(data_source=TEST_DF)
config.x_col_name = "Col_4"
def_factories = DEFAULT_FACTORIES
actual = factory_from_config(config, def_factories)
self.assertIsInstance(actual, HistogramPlotFactory)
def test_bar_config_returns_bar_factory(self):
config = BarPlotConfigurator(data_source=TEST_DF)
config.x_col_name = "Col_3"
config.y_col_name = "Col_1"
def_factories = DEFAULT_FACTORIES
actual = factory_from_config(config, def_factories)
self.assertIsInstance(actual, BarPlotFactory)
def test_line_config_returns_line_factory(self):
config = LinePlotConfigurator(data_source=TEST_DF)
config.x_col_name = "Col_1"
config.y_col_name = "Col_2"
def_factories = DEFAULT_FACTORIES
actual = factory_from_config(config, def_factories)
self.assertIsInstance(actual, LinePlotFactory)
def test_scatter_config_returns_scatter_factory(self):
config = ScatterPlotConfigurator(data_source=TEST_DF)
config.x_col_name = "Col_1"
config.y_col_name = "Col_2"
config.z_col_name = "Col_3"
def_factories = DEFAULT_FACTORIES
actual = factory_from_config(config, def_factories)
self.assertIsInstance(actual, ScatterPlotFactory)
| StarcoderdataPython |
9686745 | <reponame>74gigi8/Learning-Path-Learn-Web-Development-with-Python<gh_stars>10-100
from rest_framework import serializers
from posts import models
class PostSerializer(serializers.ModelSerializer):
posted_by = serializers.SerializerMethodField()
def get_posted_by(self, obj):
return obj.posted_by.username
class Meta:
model = models.Post
fields = ("posted_by", "message",)
| StarcoderdataPython |
296579 | import sublime_plugin
import sublime
# 20171211 - read in states via settings file - also use settings to define which states
# are considered to be active - so that only those states are displayed for "All Active"
# 20171106 - added in capability to choose all (including done) and all-active (not done).
# See corresponding way to search for inclusion in the list of states in "show_instances.py"
# 20171108 - corrected last line to pass a single element list instead of a text string if
# one item selected: self.a[index] --> [self.a[index]]
class TaskInterfaceCommand(sublime_plugin.TextCommand):
def run(self, edit):
settings = sublime.load_settings("Task.sublime-settings")
# List of task state keywords
keywords = settings.get('keywords')
# List of keywords that are considered to be active (including those waiting)
self.active = settings.get('active')
self.a = []
self.a.append("All-Active")
self.a.extend(keywords)
# timeout fix at https://github.com/tosher/Mediawiker/blob/master/mediawiker.py
sublime.set_timeout(lambda: self.view.window().show_quick_panel(self.a, self.on_done), 1)
def on_done(self, index):
if index == -1:
return
if self.a[index] == "All-Active":
self.a.remove("All-Active") # If selecting all active, parse out inactive tasks
b = []
for x in range(0, (len(self.a) - 1)):
if self.active[x]:
b.append(self.a[x])
self.a = b
self.view.run_command("show_instances", {"args": {'text': self.a}})
else:
self.view.run_command("show_instances", {"args": {'text': [self.a[index]]}})
| StarcoderdataPython |
3593851 | # This code is supporting material for the book
# Building Machine Learning Systems with Python
# by <NAME> and <NAME>
# published by PACKT Publishing
#
# It is made available under the MIT License
from __future__ import print_function
import logging
import gensim
import numpy as np
# Set up logging in order to get progress information as the model is being built:
logging.basicConfig(
format='%(asctime)s : %(levelname)s : %(message)s',
level=logging.INFO)
# Load the preprocessed corpus (id2word & mm):
id2word = gensim.corpora.Dictionary.load_from_text(
'data/wiki_en_output_wordids.txt.bz2')
mm = gensim.corpora.MmCorpus('data/wiki_en_output_tfidf.mm')
# Calling the constructor is enough to build the model
# This call will take a few hours!
model = gensim.models.hdpmodel.HdpModel(
corpus=mm,
id2word=id2word,
chunksize=10000)
# Save the model so we do not need to learn it again.
model.save('wiki_hdp.pkl')
# Compute the document/topic matrix
topics = np.zeros((len(mm), model.num_topics))
for di,doc in enumerate(mm):
doc_top = model[doc]
for ti,tv in doc_top:
topics[di,ti] += tv
np.save('topics_hdp.npy', topics)
| StarcoderdataPython |
6647146 | import matplotlib.pyplot as plt
from networkx import Graph, draw_networkx_nodes, draw_networkx_edges, \
draw_networkx_labels, spring_layout
from similarity import group_similarity
def author_graph(authorSubs):
authors = list(authorSubs.keys())
G = Graph()
n = len(authors)
for i in range(n):
for j in range(i+1, n):
auth1 = authors[i]
auth2 = authors[j]
subs1 = authorSubs[auth1]
subs2 = authorSubs[auth2]
sim = group_similarity(subs1, subs2)
G.add_edge(auth1, auth2, weight=sim)
return G
def plot_graph(G):
import warnings
with warnings.catch_warnings():
warnings.simplefilter("ignore")
pos = spring_layout(G) # positions for all nodes
draw_networkx_nodes(G, pos, node_size=700)
for (u,v,d) in G.edges(data=True):
draw_networkx_edges(G, pos, edgelist=[(u,v)], alpha=d['weight'],
width=3)
draw_networkx_labels(G, pos)
plt.axis('off')
plt.show()
| StarcoderdataPython |
11231576 | <reponame>ryanohoro/natlas
from flask import redirect, url_for, flash, render_template, request, current_app, session
from flask_login import login_user, logout_user, current_user
from app import db
from app.auth.forms import LoginForm, RegistrationForm, ResetPasswordRequestForm, \
ResetPasswordForm, InviteConfirmForm
from app.models import User, EmailToken
from app.auth.email import send_password_reset_email
from app.auth import bp
from werkzeug.urls import url_parse
@bp.route('/login', methods=['GET', 'POST'])
def login():
if current_user.is_authenticated:
return redirect(url_for('main.index'))
form = LoginForm()
if form.validate_on_submit():
user = User.query.filter_by(email=User.validate_email(form.email.data)).first()
if user is None or not user.check_password(form.password.data):
flash('Invalid email or password', 'danger')
return redirect(url_for('auth.login'))
login_user(user, remember=form.remember_me.data)
next_page = request.args.get('next')
if not next_page or url_parse(next_page).netloc != '':
next_page = url_for('main.index')
return redirect(next_page)
return render_template('auth/login.html', title='Sign In', form=form)
@bp.route('/logout')
def logout():
logout_user()
return redirect(url_for('main.index'))
@bp.route('/register', methods=['GET', 'POST'])
def register():
if current_user.is_authenticated:
return redirect(url_for('main.index'))
if not current_app.config['REGISTER_ALLOWED']:
flash("Sorry, we're not currently accepting new users. If you feel you've received this message in error, please contact an administrator.", "warning")
return redirect(url_for('auth.login'))
form = RegistrationForm()
if form.validate_on_submit():
validemail = User.validate_email(form.email.data)
if not validemail:
flash("%s does not appear to be a valid, deliverable email address." % form.email.data, "danger")
return redirect(url_for('auth.register'))
user = User(email=validemail)
user.set_password(form.password.data)
db.session.add(user)
db.session.commit()
flash('Congratulations, you are now a registered user!', 'success')
return redirect(url_for('auth.login'))
return render_template('auth/register.html', title='Register', form=form)
@bp.route('/reset_password', methods=['GET', 'POST'])
def reset_password_request():
if current_user.is_authenticated:
return redirect(url_for('main.index'))
form = ResetPasswordRequestForm()
if form.validate_on_submit():
validemail = User.validate_email(form.email.data)
if not validemail:
flash("%s does not appear to be a valid, deliverable email address." % form.email.data, "danger")
return redirect(url_for('auth.reset_password_request'))
user = User.query.filter_by(email=validemail).first()
if user:
send_password_reset_email(user)
flash('Check your email for the instructions to reset your password', "info")
return redirect(url_for('auth.login'))
return render_template(
'auth/password_reset.html',
title='Request Password Reset',
form=form,
pwrequest=True
)
@bp.route('/reset_password/<token>', methods=['GET'])
def reset_password(token):
if current_user.is_authenticated:
return redirect(url_for('main.index'))
session['reset_token'] = token
return redirect(url_for('auth.do_password_reset'))
@bp.route('/reset_password/reset', methods=['GET', 'POST'])
def do_password_reset():
token = session['reset_token']
if not token:
flash("Token not found!", "danger")
return redirect(url_for('auth.login'))
user = User.verify_reset_password_token(token)
if not user:
flash("Password reset token is invalid or has expired.", "danger")
session.pop('reset_token', None) # remove the invalid token from the session
return redirect(url_for('auth.login'))
form = ResetPasswordForm()
if form.validate_on_submit():
user.set_password(form.password.data)
EmailToken.expire_token(tokenstr=token)
session.pop('reset_token', None) # remove the reset token from the session
# No need to db.session.commit() because expire_token commits the session for us
flash('Your password has been reset.', "success")
return redirect(url_for('auth.login'))
return render_template('auth/password_reset.html', title="Reset Password", form=form)
@bp.route('/invite/<token>', methods=['GET'])
def invite_user(token):
if current_user.is_authenticated:
return redirect(url_for('main.index'))
session['invite_token'] = token
return redirect(url_for('auth.accept_invite'))
@bp.route('/invite/accept', methods=['GET', 'POST'])
def accept_invite():
token = session['invite_token']
if not token:
flash("Token not found!", "danger")
return redirect(url_for('auth.login'))
user = User.verify_invite_token(token)
if not user:
flash("Invite token is invalid or has expired", "danger")
session.pop('invite_token', None) # remove the invalid token from the session
return redirect(url_for('auth.login'))
form = InviteConfirmForm()
if form.validate_on_submit():
user.set_password(form.password.data)
EmailToken.expire_token(tokenstr=token)
session.pop('invite_token', None) # remove the invite token from the session
# No need to db.session.commit() because expire_token commits the session for us
flash('Your password has been set.', "success")
return redirect(url_for('auth.login'))
return render_template('auth/accept_invite.html', title="Accept Invitation", form=form)
| StarcoderdataPython |
1983311 | """Coordinates calculations.
This package is in early development stage nad can be deeply changed or even excluded
Actually allows image/WCS coordinates calculation using external xt2sky/sky2xy
tools from WCS tools. (which supports sextractor fits header format for distortion)
and matching multiple catalogs at once.
"""
from .coord_tools import *
from .CoordMatch import CoordMatch
from .XY2Sky import XY2Sky
from .Sky2XY import Sky2XY
#from .plots import plot_coords | StarcoderdataPython |
256692 | <reponame>cristianCarrerasCastillo/django_cafeteria
from django.shortcuts import get_object_or_404, render
from .models import Page
# Create your views here.
def page(request, page_id):
page = get_object_or_404(Page, id=page_id)
return render(request, 'pages/sample.html', {'page': page})
| StarcoderdataPython |
4959022 | <gh_stars>1-10
# -*- coding: utf-8 -*-
# Generated by Django 1.11.7 on 2018-07-03 17:43
from __future__ import unicode_literals
from django.db import migrations
import waldur_core.core.fields
class Migration(migrations.Migration):
dependencies = [
('waldur_jira', '0018_project_runtime_state'),
]
operations = [
migrations.AlterField(
model_name='project',
name='action_details',
field=waldur_core.core.fields.JSONField(default=dict),
),
]
| StarcoderdataPython |
12827797 | from kivy.app import App
from kivy.factory import Factory
from kivy.properties import ObjectProperty
from kivy.lang import Builder
from electrum_ltc.i18n import _
Builder.load_string('''
#:import _ electrum_ltc_gui.kivy.i18n._
<CheckpointDialog@Popup>
id: popup
title: _('Blockchain')
size_hint: 1, 1
cp_height: 0
cp_value: ''
BoxLayout:
orientation: 'vertical'
padding: '10dp'
spacing: '10dp'
TopLabel:
height: '48dp'
id: bc_height
text: _("Verified headers: %d blocks.")% app.num_blocks
TopLabel:
height: '48dp'
id: bc_status
text: _("Connected to %d nodes.")% app.num_nodes if app.num_nodes else _("Not connected?")
Widget:
size_hint: 1, 0.1
TopLabel:
text: _("In order to verify the history returned by your main server, Electrum downloads block headers from random nodes. These headers are then used to check that transactions sent by the server really are in the blockchain.")
font_size: '6pt'
Widget:
size_hint: 1, 0.1
GridLayout:
orientation: 'horizontal'
cols: 2
height: '36dp'
TopLabel:
text: _('Checkpoint') + ':'
height: '36dp'
TextInput:
id: height_input
multiline: False
input_type: 'number'
height: '36dp'
size_hint_y: None
text: '%d'%root.cp_height
on_focus: root.on_height_str()
TopLabel:
text: _('Block hash') + ':'
TxHashLabel:
data: root.cp_value
Widget:
size_hint: 1, 0.1
Label:
font_size: '6pt'
text: _('If there is a fork of the blockchain, you need to configure your checkpoint in order to make sure that you are on the correct side of the fork. Enter a block number to fetch a checkpoint from your main server, and check its value from independent sources.')
halign: 'left'
text_size: self.width, None
size: self.texture_size
Widget:
size_hint: 1, 0.3
BoxLayout:
orientation: 'horizontal'
size_hint: 1, 0.2
Button:
text: _('Cancel')
size_hint: 0.5, None
height: '48dp'
on_release: popup.dismiss()
Button:
text: _('OK')
size_hint: 0.5, None
height: '48dp'
on_release:
root.callback(root.cp_height, root.cp_value)
popup.dismiss()
''')
class CheckpointDialog(Factory.Popup):
def __init__(self, network, callback):
Factory.Popup.__init__(self)
self.network = network
self.cp_height, self.cp_value = self.network.blockchain.get_checkpoint()
self.callback = callback
def on_height_str(self):
try:
new_height = int(self.ids.height_input.text)
except:
new_height = self.cp_height
self.ids.height_input.text = '%d'%new_height
if new_height == self.cp_height:
return
try:
header = self.network.synchronous_get(('blockchain.block.get_header', [new_height]), 5)
new_value = self.network.blockchain.hash_header(header)
except BaseException as e:
self.network.print_error(str(e))
new_value = ''
if new_value:
self.cp_height = new_height
self.cp_value = new_value
| StarcoderdataPython |
87882 | <reponame>kemusiro/microc-compiler<filename>llvmgen.py
from util import *
# 項に対するLLM表現を返す。
def llvm_id(func, value):
if func.symtable.get_sym(value) is not None:
return func.symtable.get_sym(value, 'llvm_name')
elif func.program.symtable.get_sym(value) is not None:
return func.program.symtable.get_sym(value, 'llvm_name')
else:
return None
def llvm_num(func, value):
return value
def llvm_type(func, value):
llvm_type_map = {'int': 'i32', 'boolean': 'i1'}
return llvm_type_map[value]
def llvm_label(func, value):
return value
def llvm_term(func, term):
if is_id(term):
return llvm_id(func, id_name(term))
elif is_num(term):
return llvm_num(func, num_value(term))
elif is_label(term):
return llvm_label(func, label_name(term))
elif is_type(term):
return llvm_type(func, type_name(term))
else:
return tval(term)
# φ関数引数のLLVM表現を構築する。
def create_phi_arg(func, terms):
result = []
for pos, t in enumerate(terms):
if is_id(t):
bb = func.symtable.get_sym(id_name(t), 'bb')
result.append([llvm_term(func, t),
'%{}'.format(llvm_label(func, bb))])
elif is_num(t):
# 数値の場合は、ファイ関数の引数の位置に相当する先行ブロックから
# 到達するものと考える。
curbb = func.context['current_bb']
pred_bb = func.bbtable[curbb].pred[pos]
result.append([llvm_term(func, t),
'%{}'.format(llvm_label(func, func.bbtable[pred_bb].name))])
else:
print('WARNING: invalid phi arg {}'.format(t))
result.append([])
return result
# ひとつの命令をLLVM IRに変換し出力する。
def gen_inst(func, inst, result):
llvm_binop = {'+': 'add', '-': 'sub', '*': 'mul', '/': 'sdiv'}
llvm_relop = {'<': 'slt', '<=': 'sle', '>': 'sgt', '>=': 'sge', '==': 'eq', '!=': 'ne'}
if op(inst) == 'deflabel':
result.append('{}:'.format(llvm_term(func, right(inst, 1))))
elif op(inst) == 'defparam':
pass
elif op(inst) == 'goto':
result.append(' br label %{}'.format(llvm_term(func, right(inst, 1))))
elif op(inst) == 'if':
result.append(' br {} {}, label %{}, label %{}'
.format(llvm_type(func, term_type(func, right(inst, 1))),
llvm_term(func, right(inst, 1)),
llvm_term(func, right(inst, 2)),
llvm_term(func, right(inst, 3))))
elif op(inst) == 'phi':
phiarg = create_phi_arg(func, right(inst))
argstr = []
for a in phiarg:
argstr.append('[{}, {}]'.format(*a))
result.append(' {} = phi {} {}'
.format(llvm_term(func, left(inst)),
llvm_type(func, term_type(func, left(inst))),
', '.join(argstr)))
elif op(inst) == 'return':
result.append(' ret {} {}'
.format(llvm_type(func, term_type(func, right(inst, 1))),
llvm_term(func, right(inst, 1))))
elif op(inst) == 'call':
argstr = []
for a in right(inst)[1:]:
argstr.append('{} {}'.format(llvm_type(func, term_type(func, a)),
llvm_term(func, a)))
result.append(' {} = call i32 {} ({})'
.format(llvm_term(func, left(inst)),
llvm_term(func, right(inst, 1)),
', '.join(argstr)))
elif op(inst) in llvm_binop:
result.append(' {} = {} {} {}, {}'
.format(llvm_term(func, left(inst)),
llvm_binop[op(inst)],
llvm_type(func, term_type(func, right(inst, 1))),
llvm_term(func, right(inst, 1)),
llvm_term(func, right(inst, 2))))
elif op(inst) in llvm_relop:
result.append(' {} = icmp {} {} {}, {}'
.format(llvm_term(func, left(inst)),
llvm_relop[op(inst)],
llvm_type(func, term_type(func, right(inst, 1))),
llvm_term(func, right(inst, 1)),
llvm_term(func, right(inst, 2))))
elif op(inst) == '=':
# コピー文をLLVM IRでは表現できない(?)ようなので、
# ゼロとの加算命令に置き換える。
result.append(' {} = {} {} {}, {}'
.format(llvm_term(func, left(inst)),
llvm_binop['+'],
llvm_type(func, term_type(func, right(inst, 1))),
llvm_term(func, right(inst, 1)),
llvm_num(func, '0')))
# LLVM IRを生成する。
def llvmgen(p):
# LLVM IRの命名規則にしたがった識別子名を登録する。
for item in p.symtable.sym_enumerator(kind='func'):
p.symtable.set_sym(item, {'llvm_name': '@{}'.format(item)})
for func in p.func_list:
counter = 0
symtable = func.symtable
for item in func.symtable.sym_enumerator(kind='ssavar'):
origin = func.symtable.get_sym(item, 'origin')
if func.symtable.get_sym(origin, 'kind') == 'temp':
func.symtable.set_sym(item, {'llvm_name': '%{}'.format(counter)})
counter += 1
elif func.symtable.get_sym(origin, 'kind') in ('localvar', 'param'):
func.symtable.set_sym(item, {'llvm_name': '%{}'.format(item)})
result = []
# 各関数のLLVM IRを出力する。
for func in p.func_list:
argstr = []
for param in func.params:
argstr.append('{} {}'.format(llvm_term(func, param[0]),
llvm_term(func, param[1])))
result.append('define {} {}({}) {{'.format(llvm_type(func, func.ftype),
llvm_id(func, func.name),
', '.join(argstr)))
for k in func.bbtable.keys():
func.context['current_bb'] = k
for inst in func.bbtable[k].insts:
gen_inst(func, inst, result)
result.append('}')
del func.context['current_bb']
return result
| StarcoderdataPython |
149932 | # -*- coding: utf-8 -*-
import gi
gi.require_version("Gtk", "3.0")
from gi.repository import Gtk
from .popup import PopUp
class Mechanic:
def __init__(self, conndb):
self.conn = conndb
mechanik_builder = Gtk.Builder()
mechanik_builder.add_from_file("glade/mechanic.glade")
self.mechanik_window = mechanik_builder.get_object("mechanik_window")
self.mechanik_comboboxtext1_1b = mechanik_builder.get_object("mechanik_comboboxtext1_1b")
self.mechanik_button1_1c = mechanik_builder.get_object("mechanik_button1_1c")
self.mechanik_comboboxtext2_1b = mechanik_builder.get_object("mechanik_comboboxtext2_1b")
self.mechanik_comboboxtext2_2b = mechanik_builder.get_object("mechanik_comboboxtext2_2b")
self.mechanik_comboboxtext2_3b = mechanik_builder.get_object("mechanik_comboboxtext2_3b")
self.mechanik_button2_4a = mechanik_builder.get_object("mechanik_button2_4a")
self.mechanik_button2_4b = mechanik_builder.get_object("mechanik_button2_4b")
self.mechanik_comboboxtext3_1b = mechanik_builder.get_object("mechanik_comboboxtext3_1b")
self.mechanik_entry3_2b = mechanik_builder.get_object("mechanik_entry3_2b")
self.mechanik_button3_3a = mechanik_builder.get_object("mechanik_button3_3a")
self.mechanik_button3_3b = mechanik_builder.get_object("mechanik_button3_3b")
self.__load_ids(self.mechanik_comboboxtext1_1b, "zlecenia")
self.__load_ids(self.mechanik_comboboxtext2_1b, "carparts")
self.__load_ids(self.mechanik_comboboxtext2_2b, "uslugi")
self.__load_ids(self.mechanik_comboboxtext2_3b, "cars")
self.__load_ids(self.mechanik_comboboxtext3_1b, "carparts")
mechanik_builder.connect_signals(self)
self.mechanik_window.show()
def __load_ids(self, comboboxtext, tablename):
"""Ładuje identyfikatory (klucze główne) z określonej tabeli do zadanego pola wyboru."""
cur = self.conn.cursor()
if tablename == "carparts":
cur.execute("SELECT id FROM carparts;")
elif tablename == "uslugi":
cur.execute("SELECT nazwa FROM uslugi;")
elif tablename == "cars":
cur.execute("SELECT model FROM cars;")
elif tablename == "zlecenia":
cur.execute("SELECT id FROM zlecenia WHERE data_real IS NULL;")
idents = cur.fetchall()
self.conn.commit()
cur.close()
for s in [str(i[0]) for i in idents]:
comboboxtext.append_text(s)
comboboxtext.set_active(0)
def mechanik_window_destroy_cb(self, window):
"""Zamyka okno mechanika."""
self.conn.close()
Gtk.main_quit()
def mechanik_button1_1c_clicked_cb(self, button):
"""Reaguje na kliknięcie przycisku zakończenia zlecenia."""
ident = self.mechanik_comboboxtext1_1b.get_active_text() # SQL integer
args = [int(ident)]
cur = self.conn.cursor()
try:
cur.execute("UPDATE TABLE zlecenia SET data_real = now() WHERE id = %s", args)
except:
self.conn.rollback()
PopUp("WYSTĄPIŁ BŁĄD WEWNĘTRZNY BAZY. PRZERWANO.").show()
else:
self.conn.commit()
PopUp("ZLECENIE " + str(ident) + " ZOSTAŁO PO<NAME>OŃCZONE.").show()
finally:
cur.close()
def mechanik_button2_4a_clicked_cb(self, button):
"""Reaguje na kliknięcie przycisku przypisania części samochodowej do usługi."""
ident = self.mechanik_comboboxtext2_1b.get_active_text() # SQL integer
nazwa = self.mechanik_comboboxtext2_2b.get_active_text() # SQL text
args = [int(ident), nazwa]
cur = self.conn.cursor()
try:
cur.execute("INSERT INTO czeusl(cze_id, usl_nazwa) VALUES(%s, %s);", args)
except:
self.conn.rollback()
cur.close()
PopUp("WYSTĄPIŁ BŁĄD WEWNĘTRZNY BAZY. PRZERWANO.").show()
else:
self.conn.commit()
PopUp("POMYŚLNIE PRZYPISANO CZĘŚĆ DO USŁUGI.").show()
finally:
cur.close()
def mechanik_button2_4b_clicked_cb(self, button):
"""Reaguje na kliknięcie przycisku przypisania części samochodowej do modelu samochodu."""
ident = self.mechanik_comboboxtext2_1b.get_active_text() # SQL integer
model = self.mechanik_comboboxtext2_3b.get_active_text() # SQL text
args = [int(ident), model]
cur = self.conn.cursor()
try:
cur.execute("INSERT INTO czesam(cze_id, sam_model) VALUES(%s, %s);", args)
except:
self.conn.rollback()
cur.close()
PopUp("WYSTĄPIŁ BŁĄD WEWNĘTRZNY BAZY. PRZERWANO.").show()
else:
self.conn.commit()
PopUp("POMYŚLNIE PRZYPISANO CZĘŚĆ DO MODELU SAMOCHODU.").show()
finally:
cur.close()
def mechanik_button3_3a_clicked_cb(self, button):
"""Reaguje na kliknięcie przycisku wyświetlenia ilości części."""
ident = self.mechanik_comboboxtext3_1b.get_active_text() # SQL integer
args = [int(ident)]
cur = self.conn.cursor()
try:
cur.execute("SELECT ilosc FROM carparts WHERE id = %s", args)
wyn = cur.fetchone()[0]
except:
self.conn.rollback()
cur.close()
PopUp("WYSTĄPIŁ BŁĄD WEWNĘTRZNY BAZY. PRZERWANO.").show()
else:
self.conn.commit()
PopUp("W MAGAZYNIE ZNAJDUJE SIĘ " + str(wyn) +
" CZĘŚCI NUMER " + str(ident) + ".").show()
finally:
cur.close()
def mechanik_button3_3b_clicked_cb(self, button):
"""Reaguje na kliknięcie przycisku pobrania określonej ilości części."""
ident = self.mechanik_comboboxtext3_1b.get_active_text() # SQL integer
ilosc = self.mechanik_entry3_2b.get_text() # SQL integer
args = [int(ilosc), int(ident)]
cur = self.conn.cursor()
try:
cur.execute("UPDATE TABLE carparts SET ilosc = ilosc-%s WHERE id = %s", args)
except:
self.conn.rollback()
PopUp("WYSTĄPIŁ BŁĄD WEWNĘTRZNY BAZY. PRZERWANO.").show()
else:
self.conn.commit()
PopUp("POBRANO " + str(ilosc) +
" <NAME>ŚCI NUMER " + str(ident) + ".").show()
finally:
cur.close()
| StarcoderdataPython |
5011775 | from Urutu import *
import numpy as np
@Urutu("CU")
def add(e,f,d,a,b,c):
c[tx] = a[tx] + b[tx]
d[tx] = a[tx] - b[tx]
e[tx] = a[tx] * b[tx]
f[tx] = a[tx] + b[tx]
return c,d,e,f
a = np.random.randint(10,size=100)
b = np.random.randint(10,size=100)
c = np.empty_like(b)
d = np.empty_like(b)
e = np.empty_like(b)
f = np.empty_like(b)
print a,b,add([100,1,1],[1,1,1],e,f,d,a,b,c)
| StarcoderdataPython |
3479354 | """google_calendar_helpers
Simple helpers to deal with Google calendar, and the replies it sends.
"""
from datetime import datetime
from typing import Any, Dict, List, Union
from dateutil import parser
from ..classes.calendar_event_class import CalendarEvent
def convert_events(
events: List[CalendarEvent], format_string: str
) -> List[CalendarEvent]:
"""convert_events
Given a list of events, convert the time objects to a human
readable form.
"""
formatted_events: List[CalendarEvent] = []
for event in events:
start_time: str = get_time(event.start).strftime(format_string)
end_time: str = get_time(event.end).strftime(format_string)
formatted_events.append(
CalendarEvent(name=event.name, start=start_time, end=end_time)
)
return formatted_events
def get_time(time_to_convert: str) -> datetime:
"""get_time
Time object parser for Google Calendar responses.
Since the Google API response can either be a 'dateTime' or
'date' object depending on if the event is timed, or the whole day,
we need to parse and return the object differently for each.
"""
parsed_datetime: datetime = parser.parse(time_to_convert)
return parsed_datetime
def format_google_events(
events_list: List[Dict[str, Any]], diary_date: str
) -> List[CalendarEvent]:
"""format_google_events
Formats a list of GCal events down to the event name, and the
start and end date of the event.
"""
filtered_events: List[CalendarEvent] = []
for event in events_list:
try:
event_start = event["start"]["dateTime"]
event_end = event["end"]["dateTime"]
except KeyError:
event_start = event["start"]["date"]
event_end = event["end"]["date"]
event_date: str = str(get_time(event_start).date())
# If its an event not from today, then don't show it.
# This is needed since it can return some late events somehow.
if event_date != diary_date:
continue
filtered_events.append(
CalendarEvent(name=event["summary"], start=event_start, end=event_end)
)
return filtered_events
def create_google_event(event: CalendarEvent, timezone: str) -> Dict[str, Any]:
"""create_google_event
Given an event, create a Google Event with a time zone.
"""
return {
"summary": event.name,
"start": {
"timeZone": timezone,
"dateTime": parser.parse(event.start).isoformat(),
},
"end": {"timeZone": timezone, "dateTime": parser.parse(event.end).isoformat()},
}
def get_calendar_objects(
events: Union[List[CalendarEvent], List[Dict[str, Any]]]
) -> List[CalendarEvent]:
"""get_calendar_objects
Convert the loaded dicts to Objects, if they are not already.
This is easier for a number of reasons, the main of which is
that naming is kept consistent, versus dicts which require more
careful usage.
"""
events_to_convert: List[Dict[str, Any]] = [
event for event in events if isinstance(event, dict)
]
event_objects: List[CalendarEvent] = [
event for event in events if not isinstance(event, dict)
]
for event in events_to_convert:
event_objects.append(
CalendarEvent(name=event["name"], start=event["start"], end=event["end"])
)
return event_objects
| StarcoderdataPython |
9761603 | """Configuration of PMF with PSIS-LOO for BO optimization."""
import pmf_objectives as model
import numpy as np
import time
import logging
logger = logging.getLogger(__name__)
# BO cofiguration
n_init = 2
num_iterations = n_init + 1000
lower = np.array([1, -4,-4,-4,-4])
upper = np.array([100, 2,2,2,2])
X_init = None
Y_init = None
def abcd2musigma(a, b, c, d, env=np):
return a/b, env.sqrt(a)/b, c/d, env.sqrt(c)/d
def musigma2abcd(mut, sigmat, mub, sigmab):
a,b = (mut*mut)/(sigmat*sigmat), mut/(sigmat*sigmat)
c,d = (mub*mub)/(sigmab*sigmab), mub/(sigmab*sigmab)
return a, b, c, d
def objective_function(x):
"""Uses certain global variable OUTFILE."""
start_time = time.time()
#K, a, b, c, d = int(np.round(x[0])), 10**x[1], 10**x[2], 10**x[3], 10**x[4] # parametrization abcd
K, mut, sigmat, mub, sigmab = int(np.round(x[0])), 10**x[1], 10**x[2], 10**x[3], 10**x[4] #parametrization mu-sigma
a, b, c, d = musigma2abcd(mut, sigmat, mub, sigmab)
logger.info("[objective_function] Evaluating at %s %s %s %s %s" % (K,a,b,c,d))
res = model.posterior_objective_psisloo(K=K, a=a, b=b, c=c, d=d, seed=123,
NSAMPLES=100, verbose=True,
psis_data=model.DATA_TRAIN)
obj = res["obj"] # train data LOO
fitting_time = time.time()
loo = res["model"].psis(model.DATA_TEST, NSAMPLES=100) # test data LOO
end_time = time.time()
logger.info("[objective_function] writting to %s" % (OUTFILE+".csv"))
f = open(OUTFILE+".csv", "a")
f.write("%s,%s,%s,%s,%s,%s,%s,%s,%s,%s\n" % (K,a,b,c,d,obj,loo,start_time,fitting_time,end_time))
f.close()
logger.info("[objective_function] Evaluating at %s %s %s %s %s => train:%s test:%s" % (K,a,b,c,d,obj,loo))
return -obj # BO minimizes its objective
| StarcoderdataPython |
6450192 | <filename>construct_dict.py
import os
import torch
import torch.nn.functional as F
from torchvision import datasets, transforms
from torch.autograd import Variable
import torch.nn as nn
import load_activations as la
import hierarhical_tree_gpu as ht
from load_data import get_data
# from train_utils import train, test
from qlnet_model_quantized import BS_Net
from train_utils_quantized import train, test
from training_parameters import get_params
def if_exist(path):
if not os.path.exists(path) :
os.makedirs(path)
## 1. Load model
args = get_params()
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
model = BS_Net()
model.load_state_dict(torch.load('mnist_baseline.pth'))
model.to(device).eval()
case_number = args.case_number
if case_number >= 4:
if case_number == 4:
layer_id = 0
layer0 = 'layer' + str(layer_id)
activation_folder0 = os.path.join('./activations', layer0)
if_exist(activation_folder0)
### 2. Load train data
train_loader = get_data(args, dataset='mnist', ifTrain=True)
## 2. Extract activations for futher look-up dictionary construction
for batch_idx, (data, target) in enumerate(train_loader):
data, target = data.to(device), target.to(device)
_, activations = model(data)
#print(len(activations))
activation0 = activations[0].cpu().data.numpy()
torch.save(activation0, os.path.join(activation_folder0, layer0 + '_'+str(batch_idx)+'.npy'))
if batch_idx>6:break
# 3 Construct Look-up Dictionary
# parameters for look-up dictionary construction
n_cl = 10
max_depth = 1
# Load activations
print('Load activations')
data0 = la.load_data(activation_folder0) # load patched data of input layer
print('Construct tree input layer')
tree0 = ht.construct(data0, n_cl, 1, max_depth)
torch.save(tree0, 'tree_' + layer0)
elif case_number == 5:
layer_id = 0
layer0 = 'layer' + str(layer_id)
activation_folder0 = os.path.join('./activations', layer0)
if_exist(activation_folder0)
layer_id = 1
layer1 = 'layer' + str(layer_id)
activation_folder1 = os.path.join('./activations', layer1)
if_exist(activation_folder1)
### 2. Load train data
train_loader = get_data(args, dataset='mnist', ifTrain=True)
## 2. Extract activations for futher look-up dictionary construction
for batch_idx, (data, target) in enumerate(train_loader):
data, target = data.to(device), target.to(device)
_, activations = model(data)
activation0 = activations[0].cpu().data.numpy()
activation1 = activations[1].cpu().data.numpy()
torch.save(activation0, os.path.join(activation_folder0, layer0 + '_'+str(batch_idx)+'.npy'))
torch.save(activation1, os.path.join(activation_folder1, layer1 + '_'+str(batch_idx)+'.npy'))
if batch_idx>6:break
# 3 Construct Look-up Dictionary
# parameters for look-up dictionary construction
n_cl = 10
density = 30
max_depth = 1
# Load activations
print('Load activations')
data0 = la.load_data(activation_folder0) # load patched data of input layer
data1 = la.load_data(activation_folder1) # load patched data of layer1
print('Construct tree input layer')
tree0 = ht.construct(data0, n_cl, 1, max_depth)
torch.save(tree0, 'tree_' + layer0)
print('Construct tree layer 1')
tree1 = ht.construct(data1, n_cl, density, max_depth)
torch.save(tree1, 'tree_' + layer1)
elif case_number == 6:
layer_id = 0
layer0 = 'layer' + str(layer_id)
activation_folder0 = os.path.join('./activations', layer0)
if_exist(activation_folder0)
layer_id = 2
layer2 = 'layer' + str(layer_id)
activation_folder2 = os.path.join('./activations', layer2)
if_exist(activation_folder2)
### 2. Load train data
train_loader = get_data(args, dataset='mnist', ifTrain=True)
## 2. Extract activations for futher look-up dictionary construction
for batch_idx, (data, target) in enumerate(train_loader):
data, target = data.to(device), target.to(device)
_, activations = model(data)
activation0 = activations[0].cpu().data.numpy()
activation2 = activations[2].cpu().data.numpy()
torch.save(activation0, os.path.join(activation_folder0, layer0 + '_'+str(batch_idx)+'.npy'))
torch.save(activation2, os.path.join(activation_folder2, layer2 + '_'+str(batch_idx)+'.npy'))
if batch_idx>6:break
# 3 Construct Look-up Dictionary
# parameters for look-up dictionary construction
n_cl = 10
density = 30
max_depth = 1
# Load activations
print('Load activations')
data0 = la.load_data(activation_folder0) # load patched data of input layer
data2 = la.load_data(activation_folder2) # load patched data of layer2
print('Construct tree input layer')
tree0 = ht.construct(data0, n_cl, 1, max_depth)
torch.save(tree0, 'tree_' + layer0)
print('Construct tree layer 2')
tree2 = ht.construct(data2, n_cl, density, max_depth)
torch.save(tree2, 'tree_' + layer2)
elif case_number == 7:
layer_id = 0
layer0 = 'layer' + str(layer_id)
activation_folder0 = os.path.join('./activations', layer0)
if_exist(activation_folder0)
layer_id = 1
layer1 = 'layer' + str(layer_id)
activation_folder1 = os.path.join('./activations', layer1)
if_exist(activation_folder1)
layer_id = 2
layer2 = 'layer' + str(layer_id)
activation_folder2 = os.path.join('./activations', layer2)
if_exist(activation_folder2)
### 2. Load train data
train_loader = get_data(args, dataset='mnist', ifTrain=True)
## 2. Extract activations for futher look-up dictionary construction
for batch_idx, (data, target) in enumerate(train_loader):
data, target = data.to(device), target.to(device)
_, activations = model(data)
#print(len(activations))
activation0 = activations[0].cpu().data.numpy()
activation1 = activations[1].cpu().data.numpy()
activation2 = activations[2].cpu().data.numpy()
torch.save(activation0, os.path.join(activation_folder0, layer0 + '_'+str(batch_idx)+'.npy'))
torch.save(activation1, os.path.join(activation_folder1, layer1 + '_'+str(batch_idx)+'.npy'))
torch.save(activation2, os.path.join(activation_folder2, layer2 + '_'+str(batch_idx)+'.npy'))
if batch_idx>6:break
# 3 Construct Look-up Dictionary
# parameters for look-up dictionary construction
n_cl = 10
density = 30
max_depth = 1
# Load activations
print('Load activations')
data0 = la.load_data(activation_folder0) # load patched data of input layer
data1 = la.load_data(activation_folder1) # load patched data of layer1
data2 = la.load_data(activation_folder2) # load patched data of layer2
print('Construct tree input layer')
tree0 = ht.construct(data0, n_cl, 1, max_depth)
torch.save(tree0, 'tree_' + layer0)
print('Construct tree layer 1')
tree1 = ht.construct(data1, n_cl, density, max_depth)
torch.save(tree1, 'tree_' + layer1)
print('Construct tree layer 2')
tree2 = ht.construct(data2, n_cl, density, max_depth)
torch.save(tree2, 'tree_' + layer2)
else:
if case_number == 1:
layer_id = 1
layer1 = 'layer' + str(layer_id)
activation_folder1 = os.path.join('./activations', layer1)
if_exist(activation_folder1)
### 2. Load train data
train_loader = get_data(args, dataset='mnist', ifTrain=True)
## 2. Extract activations for futher look-up dictionary construction
for batch_idx, (data, target) in enumerate(train_loader):
data, target = data.to(device), target.to(device)
_, activations = model(data)
activation1 = activations[1].cpu().data.numpy()
torch.save(activation1, os.path.join(activation_folder1, layer1 + '_'+str(batch_idx)+'.npy'))
if batch_idx>6:break
# 3 Construct Look-up Dictionary
# parameters for look-up dictionary construction
n_cl = 10
density = 30
max_depth = 1
# Load activations
print('Load activations')
data1 = la.load_data(activation_folder1) # load patched data of layer1
print('Construct tree 1')
tree1 = ht.construct(data1, n_cl, density, max_depth)
torch.save(tree1, 'tree_' + layer1)
elif case_number == 2:
layer_id = 2
layer2 = 'layer' + str(layer_id)
activation_folder2 = os.path.join('./activations', layer2)
if_exist(activation_folder2)
### 2. Load train data
train_loader = get_data(args, dataset='mnist', ifTrain=True)
## 2. Extract activations for futher look-up dictionary construction
for batch_idx, (data, target) in enumerate(train_loader):
data, target = data.to(device), target.to(device)
_, activations = model(data)
activation2 = activations[2].cpu().data.numpy()
torch.save(activation2, os.path.join(activation_folder2, layer2 + '_'+str(batch_idx)+'.npy'))
if batch_idx>6:break
# 3 Construct Look-up Dictionary
# parameters for look-up dictionary construction
n_cl = 10
density = 30
max_depth = 1
# Load activations
print('Load activations')
data2 = la.load_data(activation_folder2) # load patched data of layer2
print('Construct tree 2')
tree2 = ht.construct(data2, n_cl, density, max_depth)
torch.save(tree2, 'tree_' + layer2)
elif case_number == 3:
layer_id = 1
layer1 = 'layer' + str(layer_id)
activation_folder1 = os.path.join('./activations', layer1)
if_exist(activation_folder1)
layer_id = 2
layer2 = 'layer' + str(layer_id)
activation_folder2 = os.path.join('./activations', layer2)
if_exist(activation_folder2)
### 2. Load train data
train_loader = get_data(args, dataset='mnist', ifTrain=True)
## 2. Extract activations for futher look-up dictionary construction
for batch_idx, (data, target) in enumerate(train_loader):
data, target = data.to(device), target.to(device)
_, activations = model(data)
activation1 = activations[1].cpu().data.numpy()
activation2 = activations[2].cpu().data.numpy()
torch.save(activation1, os.path.join(activation_folder1, layer1 + '_'+str(batch_idx)+'.npy'))
torch.save(activation2, os.path.join(activation_folder2, layer2 + '_'+str(batch_idx)+'.npy'))
if batch_idx>6:break
# 3 Construct Look-up Dictionary
# parameters for look-up dictionary construction
n_cl = 10
density = 30
max_depth = 1
# Load activations
print('Load activations')
data1 = la.load_data(activation_folder1) # load patched data of layer1
data2 = la.load_data(activation_folder2) # load patched data of layer2
print('Construct tree 1')
tree1 = ht.construct(data1, n_cl, density, max_depth)
torch.save(tree1, 'tree_' + layer1)
print('Construct tree 2')
tree2 = ht.construct(data2, n_cl, density, max_depth)
torch.save(tree2, 'tree_' + layer2)
| StarcoderdataPython |
8188404 | import os
import numpy as np
from random import shuffle
from collections import namedtuple
from glob import glob
import tensorflow as tf
from tensorflow.keras.optimizers import Adam
from tf2_module import build_generator, build_discriminator_classifier, softmax_criterion
from tf2_utils import get_now_datetime, save_midis
class Classifier(object):
def __init__(self, args):
self.dataset_A_dir = args.dataset_A_dir
self.dataset_B_dir = args.dataset_B_dir
self.sample_dir = args.sample_dir
self.batch_size = args.batch_size
self.time_step = args.time_step
self.pitch_range = args.pitch_range
self.input_c_dim = args.input_nc # number of input image channels
self.sigma_c = args.sigma_c
self.sigma_d = args.sigma_d
self.lr = args.lr
self.model = args.model
self.generator = build_generator
self.discriminator = build_discriminator_classifier
OPTIONS = namedtuple('OPTIONS', 'batch_size '
'time_step '
'input_nc '
'output_nc '
'pitch_range '
'gf_dim '
'df_dim '
'is_training')
self.options = OPTIONS._make((args.batch_size,
args.time_step,
args.input_nc,
args.output_nc,
args.pitch_range,
args.ngf,
args.ndf,
args.phase == 'train'))
self.now_datetime = get_now_datetime()
self._build_model(args)
print("Initializing classifier...")
def _build_model(self, args):
# build classifier
self.classifier = self.discriminator(self.options,
name='Classifier')
# optimizer
self.classifier_optimizer = Adam(self.lr,
beta_1=args.beta1)
# checkpoints
model_name = "classifier.model"
model_dir = "classifier_{}2{}_{}_{}".format(self.dataset_A_dir,
self.dataset_B_dir,
self.now_datetime,
str(self.sigma_c))
self.checkpoint_dir = os.path.join(args.checkpoint_dir,
model_dir,
model_name)
if not os.path.exists(self.checkpoint_dir):
os.makedirs(self.checkpoint_dir)
self.checkpoint = tf.train.Checkpoint(classifier_optimizer=self.classifier_optimizer,
classifier=self.classifier)
self.checkpoint_manager = tf.train.CheckpointManager(self.checkpoint,
self.checkpoint_dir,
max_to_keep=5)
def train(self, args):
# create training list (origin data with corresponding label)
# Label for A is (1, 0), for B is (0, 1)
dataA = glob('./datasets/{}/train/*.*'.format(self.dataset_A_dir))
dataB = glob('./datasets/{}/train/*.*'.format(self.dataset_B_dir))
labelA = [(1.0, 0.0) for _ in range(len(dataA))]
labelB = [(0.0, 1.0) for _ in range(len(dataB))]
data_origin = dataA + dataB
label_origin = labelA + labelB
training_list = [pair for pair in zip(data_origin, label_origin)]
print('Successfully create training list!')
# create test list (origin data with corresponding label)
dataA = glob('./datasets/{}/test/*.*'.format(self.dataset_A_dir))
dataB = glob('./datasets/{}/test/*.*'.format(self.dataset_B_dir))
labelA = [(1.0, 0.0) for _ in range(len(dataA))]
labelB = [(0.0, 1.0) for _ in range(len(dataB))]
data_origin = dataA + dataB
label_origin = labelA + labelB
testing_list = [pair for pair in zip(data_origin, label_origin)]
print('Successfully create testing list!')
data_test = [np.load(pair[0]) * 2. - 1. for pair in testing_list]
data_test = np.array(data_test).astype(np.float32)
gaussian_noise = np.random.normal(0,
self.sigma_c,
[data_test.shape[0],
data_test.shape[1],
data_test.shape[2],
data_test.shape[3]])
data_test += gaussian_noise
label_test = [pair[1] for pair in testing_list]
label_test = np.array(label_test).astype(np.float32).reshape(len(label_test), 2)
if args.continue_train:
if self.checkpoint.restore(self.checkpoint_manager.latest_checkpoint):
print(" [*] Load checkpoint succeeded!")
else:
print(" [!] Load checkpoint failed...")
counter = 1
for epoch in range(args.epoch):
# shuffle the training samples
shuffle(training_list)
# get the correct batch number
batch_idx = len(training_list) // self.batch_size
# learning rate would decay after certain epochs
self.lr = self.lr if epoch < args.epoch_step else self.lr * (args.epoch-epoch) / (args.epoch-args.epoch_step)
for idx in range(batch_idx):
# data samples in batch
batch = training_list[idx * self.batch_size:(idx + 1) * self.batch_size]
batch_data = [np.load(pair[0]) * 2. - 1. for pair in batch]
batch_data = np.array(batch_data).astype(np.float32)
# data labels in batch
batch_label = [pair[1] for pair in batch]
batch_label = np.array(batch_label).astype(np.float32).reshape(len(batch_label), 2)
with tf.GradientTape(persistent=True) as tape:
# Origin samples passed through the classifier
origin = self.classifier(batch_data,
training=True)
test = self.classifier(data_test,
training=True)
# loss
loss = softmax_criterion(origin, batch_label)
# test accuracy
test_softmax = tf.nn.softmax(test)
test_prediction = tf.equal(tf.argmax(test_softmax, 1), tf.argmax(label_test, 1))
test_accuracy = tf.reduce_mean(tf.cast(test_prediction, tf.float32))
# calculate gradients
classifier_gradients = tape.gradient(target=loss,
sources=self.classifier.trainable_variables)
# apply gradients to the optimizer
self.classifier_optimizer.apply_gradients(zip(classifier_gradients,
self.classifier.trainable_variables))
if idx % 100 == 0:
print('=================================================================')
print(("Epoch: [%2d] [%4d/%4d] loss: %6.2f, accuracy: %6.2f" %
(epoch, idx, batch_idx, loss, test_accuracy)))
counter += 1
print('=================================================================')
print(("Epoch: [%2d] loss: %6.2f, accuracy: %6.2f" % (epoch, loss, test_accuracy)))
# save the checkpoint per epoch
self.checkpoint_manager.save(epoch)
def test(self, args):
# load the origin samples in npy format and sorted in ascending order
sample_files_origin = glob('./test/{}2{}_{}_{}_{}/{}/npy/origin/*.*'.format(self.dataset_A_dir,
self.dataset_B_dir,
self.model,
self.sigma_d,
self.now_datetime,
args.which_direction))
sample_files_origin.sort(key=lambda x: int(os.path.splitext(os.path.basename(x))[0].split('_')[0]))
# load the origin samples in npy format and sorted in ascending order
sample_files_transfer = glob('./test/{}2{}_{}_{}_{}/{}/npy/transfer/*.*'.format(self.dataset_A_dir,
self.dataset_B_dir,
self.model,
self.sigma_d,
self.now_datetime,
args.which_direction))
sample_files_transfer.sort(key=lambda x: int(os.path.splitext(os.path.basename(x))[0].split('_')[0]))
# load the origin samples in npy format and sorted in ascending order
sample_files_cycle = glob('./test/{}2{}_{}_{}_{}/{}/npy/cycle/*.*'.format(self.dataset_A_dir,
self.dataset_B_dir,
self.model,
self.sigma_d,
self.now_datetime,
args.which_direction))
sample_files_cycle.sort(key=lambda x: int(os.path.splitext(os.path.basename(x))[0].split('_')[0]))
# put the origin, transfer and cycle of the same phrase in one zip
sample_files = list(zip(sample_files_origin,
sample_files_transfer,
sample_files_cycle))
if self.checkpoint.restore(self.checkpoint_manager.latest_checkpoint):
print(" [*] Load checkpoint succeeded!")
else:
print(" [!] Load checkpoint failed...")
# create a test path to store the generated sample midi files attached with probability
test_dir_mid = os.path.join(args.test_dir, '{}2{}_{}_{}_{}/{}/mid_attach_prob'.format(self.dataset_A_dir,
self.dataset_B_dir,
self.model,
self.sigma_d,
self.now_datetime,
args.which_direction))
if not os.path.exists(test_dir_mid):
os.makedirs(test_dir_mid)
count_origin = 0
count_transfer = 0
count_cycle = 0
line_list = []
for idx in range(len(sample_files)):
print('Classifying midi: ', sample_files[idx])
# load sample phrases in npy formats
origin = np.load(sample_files[idx][0])
transfer = np.load(sample_files[idx][1])
cycle = np.load(sample_files[idx][2])
# get the probability for each sample phrase
origin_softmax = tf.nn.softmax(self.classifier(origin * 2. - 1.,
training=False))
transfer_softmax = tf.nn.softmax(self.classifier(transfer * 2. - 1.,
training=False))
cycle_softmax = tf.nn.softmax(self.classifier(cycle * 2. - 1.,
training=False))
origin_transfer_diff = np.abs(origin_softmax - transfer_softmax)
content_diff = np.mean((origin * 1.0 - transfer * 1.0) ** 2)
# labels: (1, 0) for A, (0, 1) for B
if args.which_direction == 'AtoB':
line_list.append((idx + 1,
content_diff,
origin_transfer_diff[0][0],
origin_softmax[0][0],
transfer_softmax[0][0],
cycle_softmax[0][0]))
# for the accuracy calculation
count_origin += 1 if np.argmax(origin_softmax[0]) == 0 else 0
count_transfer += 1 if np.argmax(transfer_softmax[0]) == 0 else 0
count_cycle += 1 if np.argmax(cycle_softmax[0]) == 0 else 0
# create paths for origin, transfer and cycle samples attached with probability
path_origin = os.path.join(test_dir_mid, '{}_origin_{}.mid'.format(idx + 1,
origin_softmax[0][0]))
path_transfer = os.path.join(test_dir_mid, '{}_transfer_{}.mid'.format(idx + 1,
transfer_softmax[0][0]))
path_cycle = os.path.join(test_dir_mid, '{}_cycle_{}.mid'.format(idx + 1,
cycle_softmax[0][0]))
else:
line_list.append((idx + 1,
content_diff,
origin_transfer_diff[0][1],
origin_softmax[0][1],
transfer_softmax[0][1],
cycle_softmax[0][1]))
# for the accuracy calculation
count_origin += 1 if np.argmax(origin_softmax[0]) == 1 else 0
count_transfer += 1 if np.argmax(transfer_softmax[0]) == 1 else 0
count_cycle += 1 if np.argmax(cycle_softmax[0]) == 1 else 0
# create paths for origin, transfer and cycle samples attached with probability
path_origin = os.path.join(test_dir_mid, '{}_origin_{}.mid'.format(idx + 1,
origin_softmax[0][1]))
path_transfer = os.path.join(test_dir_mid, '{}_transfer_{}.mid'.format(idx + 1,
transfer_softmax[0][1]))
path_cycle = os.path.join(test_dir_mid, '{}_cycle_{}.mid'.format(idx + 1,
cycle_softmax[0][1]))
# generate sample MIDI files
save_midis(origin, path_origin)
save_midis(transfer, path_transfer)
save_midis(cycle, path_cycle)
# sort the line_list based on origin_transfer_diff and write to a ranking txt file
line_list.sort(key=lambda x: x[2], reverse=True)
with open(os.path.join(test_dir_mid, 'Rankings_{}.txt'.format(args.which_direction)), 'w') as f:
f.write('Id Content_diff P_O - P_T Prob_Origin Prob_Transfer Prob_Cycle')
for i in range(len(line_list)):
f.writelines("\n%5d %5f %5f %5f %5f %5f" % (line_list[i][0],
line_list[i][1],
line_list[i][2],
line_list[i][3],
line_list[i][4],
line_list[i][5]))
f.close()
# calculate the accuracy
accuracy_origin = count_origin * 1.0 / len(sample_files)
accuracy_transfer = count_transfer * 1.0 / len(sample_files)
accuracy_cycle = count_cycle * 1.0 / len(sample_files)
print('Accuracy of this classifier on test datasets is :', accuracy_origin, accuracy_transfer, accuracy_cycle)
def test_famous(self, args):
song_origin = np.load('./datasets/famous_songs/C2J/merged_npy/Scenes from Childhood (Schumann).npy')
song_transfer = np.load('./datasets/famous_songs/C2J/transfer/Scenes from Childhood (Schumann).npy')
print(song_origin.shape, song_transfer.shape)
if self.checkpoint.restore(self.checkpoint_manager.latest_checkpoint):
print(" [*] Load checkpoint succeeded!")
else:
print(" [!] Load checkpoint failed...")
sum_origin_A = 0
sum_origin_B = 0
sum_transfer_A = 0
sum_transfer_B = 0
for idx in range(song_transfer.shape[0]):
phrase_origin = song_origin[idx]
phrase_origin = phrase_origin.reshape(1, phrase_origin.shape[0], phrase_origin.shape[1], 1)
origin_softmax = tf.nn.softmax(self.classifier(phrase_origin * 2. - 1.,
training=False))
phrase_transfer = song_transfer[idx]
phrase_transfer = phrase_transfer.reshape(1, phrase_transfer.shape[0], phrase_transfer.shape[1], 1)
transfer_softmax = tf.nn.softmax(self.classifier(phrase_transfer * 2. - 1.,
training=False))
sum_origin_A += origin_softmax[0][0]
sum_origin_B += origin_softmax[0][1]
sum_transfer_A += transfer_softmax[0][0]
sum_transfer_B += transfer_softmax[0][1]
print("origin, source:", sum_origin_A / song_transfer.shape[0],
"target:", sum_origin_B / song_transfer.shape[0])
print("transfer, source:", sum_transfer_A / song_transfer.shape[0],
"target:", sum_transfer_B / song_transfer.shape[0])
| StarcoderdataPython |
4908324 | <reponame>dh-ab93/OSS-SAKI<filename>4-deep-q-learning/traders/trusting_trader.py
from typing import List
from framework.company import Company
from framework.interface_expert import IExpert
from framework.interface_trader import ITrader
from framework.logger import logger
from framework.portfolio import Portfolio
from framework.stock_data import StockData
from framework.stock_market_data import StockMarketData
from framework.order import Order, OrderType
from framework.vote import Vote
class TrustingTrader(ITrader):
"""
The trusting traders always follows the advice of the experts.
If both experts vote on buying stocks, then trusting traders prefers buying stock A rather than buying stock B.
"""
def __init__(self, expert_a: IExpert, expert_b: IExpert, color: str = 'black', name: str = 'tt_trader'):
"""
Constructor
"""
super().__init__(color, name)
assert expert_a is not None
assert expert_b is not None
self.__expert_a = expert_a
self.__expert_b = expert_b
def trade(self, portfolio: Portfolio, stock_market_data: StockMarketData) -> List[Order]:
"""
Generate actions to be taken on the "stock market"
Args:
portfolio : current Portfolio of this traders
stock_market_data : StockMarketData for evaluation
Returns:
A OrderList instance, may be empty never None
"""
order_list = []
company_list = stock_market_data.get_companies()
for company in company_list:
if company == Company.A:
stock_data_a = stock_market_data[Company.A]
vote_a = self.__expert_a.vote(stock_data_a)
self.__follow_expert_vote(Company.A, stock_data_a, vote_a, portfolio, order_list)
elif company == Company.B:
stock_data_b = stock_market_data[Company.B]
vote_b = self.__expert_b.vote(stock_data_b)
self.__follow_expert_vote(Company.B, stock_data_b, vote_b, portfolio, order_list)
else:
assert False
return order_list
def __follow_expert_vote(self, company: Company, stock_data: StockData, vote: Vote, portfolio: Portfolio,
order_list: List[Order]):
assert company is not None
assert stock_data is not None
assert vote is not None
assert portfolio is not None
assert order_list is not None
if vote == Vote.BUY:
# buy as many stocks as possible
stock_price = stock_data.get_last()[-1]
amount_to_buy = int(portfolio.cash // stock_price)
logger.debug(f"{self.get_name()}: Got vote to buy {company}: {amount_to_buy} shares a {stock_price}")
if amount_to_buy > 0:
order_list.append(Order(OrderType.BUY, company, amount_to_buy))
elif vote == Vote.SELL:
# sell as many stocks as possible
amount_to_sell = portfolio.get_stock(company)
logger.debug(f"{self.get_name()}: Got vote to sell {company}: {amount_to_sell} shares available")
if amount_to_sell > 0:
order_list.append(Order(OrderType.SELL, company, amount_to_sell))
else:
# do nothing
assert vote == Vote.HOLD
logger.debug(f"{self.get_name()}: Got vote to hold {company}")
| StarcoderdataPython |
1611647 | <filename>alipay/aop/api/domain/GFAOpenAPIAccountingAcceptance.py
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.constant.ParamConstants import *
class GFAOpenAPIAccountingAcceptance(object):
def __init__(self):
self._amount_map = None
self._biz_bill_nos_map = None
self._biz_elements = None
self._biz_ev_code = None
self._biz_pd_code = None
self._cnl_ev_code = None
self._cnl_pd_code = None
self._gmt_service = None
self._high_amount_map = None
self._inst_code = None
self._memo = None
self._out_business_no = None
self._properties = None
self._service_type = None
self._sub_out_business_no = None
self._system_origin = None
self._tnt_inst_id = None
@property
def amount_map(self):
return self._amount_map
@amount_map.setter
def amount_map(self, value):
self._amount_map = value
@property
def biz_bill_nos_map(self):
return self._biz_bill_nos_map
@biz_bill_nos_map.setter
def biz_bill_nos_map(self, value):
self._biz_bill_nos_map = value
@property
def biz_elements(self):
return self._biz_elements
@biz_elements.setter
def biz_elements(self, value):
self._biz_elements = value
@property
def biz_ev_code(self):
return self._biz_ev_code
@biz_ev_code.setter
def biz_ev_code(self, value):
self._biz_ev_code = value
@property
def biz_pd_code(self):
return self._biz_pd_code
@biz_pd_code.setter
def biz_pd_code(self, value):
self._biz_pd_code = value
@property
def cnl_ev_code(self):
return self._cnl_ev_code
@cnl_ev_code.setter
def cnl_ev_code(self, value):
self._cnl_ev_code = value
@property
def cnl_pd_code(self):
return self._cnl_pd_code
@cnl_pd_code.setter
def cnl_pd_code(self, value):
self._cnl_pd_code = value
@property
def gmt_service(self):
return self._gmt_service
@gmt_service.setter
def gmt_service(self, value):
self._gmt_service = value
@property
def high_amount_map(self):
return self._high_amount_map
@high_amount_map.setter
def high_amount_map(self, value):
self._high_amount_map = value
@property
def inst_code(self):
return self._inst_code
@inst_code.setter
def inst_code(self, value):
self._inst_code = value
@property
def memo(self):
return self._memo
@memo.setter
def memo(self, value):
self._memo = value
@property
def out_business_no(self):
return self._out_business_no
@out_business_no.setter
def out_business_no(self, value):
self._out_business_no = value
@property
def properties(self):
return self._properties
@properties.setter
def properties(self, value):
self._properties = value
@property
def service_type(self):
return self._service_type
@service_type.setter
def service_type(self, value):
self._service_type = value
@property
def sub_out_business_no(self):
return self._sub_out_business_no
@sub_out_business_no.setter
def sub_out_business_no(self, value):
self._sub_out_business_no = value
@property
def system_origin(self):
return self._system_origin
@system_origin.setter
def system_origin(self, value):
self._system_origin = value
@property
def tnt_inst_id(self):
return self._tnt_inst_id
@tnt_inst_id.setter
def tnt_inst_id(self, value):
self._tnt_inst_id = value
def to_alipay_dict(self):
params = dict()
if self.amount_map:
if hasattr(self.amount_map, 'to_alipay_dict'):
params['amount_map'] = self.amount_map.to_alipay_dict()
else:
params['amount_map'] = self.amount_map
if self.biz_bill_nos_map:
if hasattr(self.biz_bill_nos_map, 'to_alipay_dict'):
params['biz_bill_nos_map'] = self.biz_bill_nos_map.to_alipay_dict()
else:
params['biz_bill_nos_map'] = self.biz_bill_nos_map
if self.biz_elements:
if hasattr(self.biz_elements, 'to_alipay_dict'):
params['biz_elements'] = self.biz_elements.to_alipay_dict()
else:
params['biz_elements'] = self.biz_elements
if self.biz_ev_code:
if hasattr(self.biz_ev_code, 'to_alipay_dict'):
params['biz_ev_code'] = self.biz_ev_code.to_alipay_dict()
else:
params['biz_ev_code'] = self.biz_ev_code
if self.biz_pd_code:
if hasattr(self.biz_pd_code, 'to_alipay_dict'):
params['biz_pd_code'] = self.biz_pd_code.to_alipay_dict()
else:
params['biz_pd_code'] = self.biz_pd_code
if self.cnl_ev_code:
if hasattr(self.cnl_ev_code, 'to_alipay_dict'):
params['cnl_ev_code'] = self.cnl_ev_code.to_alipay_dict()
else:
params['cnl_ev_code'] = self.cnl_ev_code
if self.cnl_pd_code:
if hasattr(self.cnl_pd_code, 'to_alipay_dict'):
params['cnl_pd_code'] = self.cnl_pd_code.to_alipay_dict()
else:
params['cnl_pd_code'] = self.cnl_pd_code
if self.gmt_service:
if hasattr(self.gmt_service, 'to_alipay_dict'):
params['gmt_service'] = self.gmt_service.to_alipay_dict()
else:
params['gmt_service'] = self.gmt_service
if self.high_amount_map:
if hasattr(self.high_amount_map, 'to_alipay_dict'):
params['high_amount_map'] = self.high_amount_map.to_alipay_dict()
else:
params['high_amount_map'] = self.high_amount_map
if self.inst_code:
if hasattr(self.inst_code, 'to_alipay_dict'):
params['inst_code'] = self.inst_code.to_alipay_dict()
else:
params['inst_code'] = self.inst_code
if self.memo:
if hasattr(self.memo, 'to_alipay_dict'):
params['memo'] = self.memo.to_alipay_dict()
else:
params['memo'] = self.memo
if self.out_business_no:
if hasattr(self.out_business_no, 'to_alipay_dict'):
params['out_business_no'] = self.out_business_no.to_alipay_dict()
else:
params['out_business_no'] = self.out_business_no
if self.properties:
if hasattr(self.properties, 'to_alipay_dict'):
params['properties'] = self.properties.to_alipay_dict()
else:
params['properties'] = self.properties
if self.service_type:
if hasattr(self.service_type, 'to_alipay_dict'):
params['service_type'] = self.service_type.to_alipay_dict()
else:
params['service_type'] = self.service_type
if self.sub_out_business_no:
if hasattr(self.sub_out_business_no, 'to_alipay_dict'):
params['sub_out_business_no'] = self.sub_out_business_no.to_alipay_dict()
else:
params['sub_out_business_no'] = self.sub_out_business_no
if self.system_origin:
if hasattr(self.system_origin, 'to_alipay_dict'):
params['system_origin'] = self.system_origin.to_alipay_dict()
else:
params['system_origin'] = self.system_origin
if self.tnt_inst_id:
if hasattr(self.tnt_inst_id, 'to_alipay_dict'):
params['tnt_inst_id'] = self.tnt_inst_id.to_alipay_dict()
else:
params['tnt_inst_id'] = self.tnt_inst_id
return params
@staticmethod
def from_alipay_dict(d):
if not d:
return None
o = GFAOpenAPIAccountingAcceptance()
if 'amount_map' in d:
o.amount_map = d['amount_map']
if 'biz_bill_nos_map' in d:
o.biz_bill_nos_map = d['biz_bill_nos_map']
if 'biz_elements' in d:
o.biz_elements = d['biz_elements']
if 'biz_ev_code' in d:
o.biz_ev_code = d['biz_ev_code']
if 'biz_pd_code' in d:
o.biz_pd_code = d['biz_pd_code']
if 'cnl_ev_code' in d:
o.cnl_ev_code = d['cnl_ev_code']
if 'cnl_pd_code' in d:
o.cnl_pd_code = d['cnl_pd_code']
if 'gmt_service' in d:
o.gmt_service = d['gmt_service']
if 'high_amount_map' in d:
o.high_amount_map = d['high_amount_map']
if 'inst_code' in d:
o.inst_code = d['inst_code']
if 'memo' in d:
o.memo = d['memo']
if 'out_business_no' in d:
o.out_business_no = d['out_business_no']
if 'properties' in d:
o.properties = d['properties']
if 'service_type' in d:
o.service_type = d['service_type']
if 'sub_out_business_no' in d:
o.sub_out_business_no = d['sub_out_business_no']
if 'system_origin' in d:
o.system_origin = d['system_origin']
if 'tnt_inst_id' in d:
o.tnt_inst_id = d['tnt_inst_id']
return o
| StarcoderdataPython |
5072243 | import math
def point_distance(point1, point2):
"""
Returns the distance between two points given as tuples
"""
distance = math.sqrt(((point1[0] - point2[0])**2) +
((point1[1] - point2[1])**2))
return distance
def multidimensional_distance(point1, point2):
"""
Return the euclidean distance between two points in multidimensional space
given as tuples.
"""
# Checks if points are of the same dimension
if len(point1) != len(point2):
print('Points of unequal dimensions')
return None
dist_sq = sum( [(point1[i] - point2[i])**2 for i in range(len(point1))] )
return math.sqrt(dist_sq)
def find_delta(a, b, c):
"""
Function that returns the delta of a quadratic equation.
"""
if a == 0:
raise ValueError("a is 0! [y = ax^2 + bx + c , a != 0]")
delta = b**2 - 4*a*c
return delta
def find_vertex(a, b, c):
"""
Function that returns the vertex of a parabola, given three coefficients.
:returns: tuple
"""
delta = find_delta(a, b, c)
vertex = ( -b/(2*a), -(delta/(4*a)) )
return vertex
def find_focus(a, b, c):
"""
Function that returns the focus of a parabola, given three coefficients.
:returns: tuple
"""
delta = find_delta(a, b, c)
focus = ( -b/(2*a), (1-delta)/(4*a) )
return focus
def binomial_coefficient(n, k):
"""
Finds the binomial coefficient, given n and k.
Equals:
n! / (k!*(n-1)!)
"""
bin_coeff = (math.factorial(n) // (math.factorial(k) * math.factorial(n - k)))
return bin_coeff
def quadratic_roots(a, b, c):
"""
Returns a tuple containg the roots of the quadratic equation ax^2+bx+c
If the roots are imaginary then an error message is displayed and None is returned
"""
D = (b**2) - (4 * a * c)
if D<0:
print("Imaginary roots")
return None
else:
num1=-b+(D**(1/2))
num2=-b-(D**(1/2))
denum=2*a
return (num1/denum, num2/denum)
if __name__ == '__main__':
import doctest
doctest.testmod()
| StarcoderdataPython |
5069680 | import pytest
import numpy as np
import scipy.stats
from maxent_graph.poibin import dc_pb1, dc_pb2, dc_fft_pb
def test_dc_pb():
# todo: handle empty list case
for n in [10, 100, 1000, 10_000]:
ps = np.random.rand(n)
# both DC methods should give same results
r1 = dc_pb1(ps)
r2 = dc_pb2(ps)
assert np.allclose(r1, r2)
# sum of pmf values should be 1
assert np.sum(r1) == pytest.approx(1)
# fft method should be close to DC
r3 = dc_fft_pb(ps)
assert np.allclose(r1, r3)
def test_dc_fft():
for n in [10, 100, 1000, 10_000, 50_000]:
ps = np.random.rand(n)
r = dc_fft_pb(ps)
assert len(r) == len(ps) + 1
# test against binomial distribution, within a given error
ps = np.repeat(0.5, n)
r = dc_fft_pb(ps)
# less values to test, and smaller probs will have larger relative error
larger_probs = np.argwhere(r > 1e-5)
correct_probs = np.array(
[scipy.stats.binom.pmf(i, n, 0.5).item() for i in larger_probs]
)
assert np.allclose(r[larger_probs].ravel(), correct_probs)
| StarcoderdataPython |
1938753 | """
PyXLL Examples: Automation
PyXLL worksheet and menu functions can call back into Excel
using the Excel COM API*.
In addition to the COM API there are a few Excel functions
exposed via PyXLL that allow you to query information about
the current state of Excel without using COM.
Excel uses different security policies for different types
of functions that are registered with it. Depending on
the type of function, you may or may not be able to make
some calls to Excel.
Menu functions and macros are registered as 'commands'.
Commands are free to call back into Excel and make changes to
documents. These are equivalent to the VBA Sub routines.
Worksheet functions are registered as 'functions'. These
are limited in what they can do. You will be able to
call back into Excel to read values, but not change
anything. Most of the Excel functions exposed via PyXLL
will not work in worksheet functions. These are equivalent
to VBA Functions.
There is a third type of function - macro-sheet equivalent
functions. These are worksheet functions that are allowed to
do most things a macro function (command) would be allowed
to do. These shouldn't be used lightly as they may break
the calculation dependencies between cells if not
used carefully.
* Excel COM support was added in Office 2000. If you are
using an earlier version these COM examples won't work.
"""
import pyxll
from pyxll import xl_menu, xl_func, xl_macro
import logging
_log = logging.getLogger(__name__)
#
# Getting the Excel COM object
#
# PyXLL has a function 'xl_app'. This returns the Excel application
# instance either as a win32com.client.Dispatch object or a
# comtypes object (which com package is used may be set in the
# config file). The default is to use win32com.
#
# It is better to use this than
# win32com.client.Dispatch("Excel.Application")
# as it will always be the correct handle - ie the handle
# to the correct instance of Excel.
#
# For more information on win32com see the pywin32 project
# on sourceforge.
#
# The Excel object model is the same from COM as from VBA
# so usually it's straightforward to write something
# in python if you know how to do it in VBA.
#
# For more information about the Excel object model
# see MSDN or the object browser in the Excel VBA editor.
#
from pyxll import xl_app
#
# A simple example of a menu function that modifies
# the contents of the selected range.
#
@xl_menu("win32com test", sub_menu="More Examples")
def win32com_menu_test():
# get the current selected range and set some text
selection = xl_app().Selection
selection.Value = "Hello!"
pyxll.xlcAlert("Some text has been written to the current cell")
#
# Macros can also be used to call back into Excel when
# a control is activated.
#
# These work in the same way as VBA macros, you just assign
# them to the control in Excel by name.
#
@xl_macro
def button_example():
xl = xl_app()
range = xl.Range("button_output")
range.Value = range.Value + 1
@xl_macro
def checkbox_example():
xl = xl_app()
check_box = xl.ActiveSheet.CheckBoxes(xl.Caller)
if check_box.Value == 1:
xl.Range("checkbox_output").Value = "CHECKED"
else:
xl.Range("checkbox_output").Value = "Click the check box"
@xl_macro
def scrollbar_example():
xl = xl_app()
caller = xl.Caller
scrollbar = xl.ActiveSheet.ScrollBars(xl.Caller)
xl.Range("scrollbar_output").Value = scrollbar.Value
#
# Worksheet functions can also call back into Excel.
#
# The function 'async_call' must be used to do the
# actual work of calling back into Excel from another
# thread, otherwise Excel may lock waiting for the function
# to complete before allowing the COM object to modify the
# sheet, which will cause a dead-lock.
#
# To be able to call xlfCaller from the worksheet function,
# the function must be declared as a macro sheet equivalent
# function by passing macro=True to xl_func.
#
# If your function modifies the Excel worksheet it will
# trigger a recalculation so you have to take care not to
# cause an infinite loop.
#
# Accessing the 'address' property of the XLCell returned
# by xlfCaller requires this function to be a macro sheet
# equivalent function.
#
@xl_func(macro=True)
def automation_example(rows, cols, value):
"""copies value to a range of rows x cols below the calling cell"""
# get the address of the calling cell using xlfCaller
caller = pyxll.xlfCaller()
address = caller.address
# the update is done asynchronously so as not to block some
# versions of Excel by updating the worksheet from a worksheet function
def update_func():
xl = xl_app()
range = xl.Range(address)
# get the cell below and expand it to rows x cols
range = xl.Range(range.Resize(2, 1), range.Resize(rows+1, cols))
# and set the range's value
range.Value = value
# kick off the asynchronous call the update function
pyxll.async_call(update_func)
return address | StarcoderdataPython |
190245 | <reponame>Raahul-Singh/pythia
from pathlib import Path
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import pytest
from astropy.io import fits
from pythia.learning.datasets import BaseDataset
from pythia.learning.transforms import *
from torchvision import transforms
PATH = Path(__file__).parent / "test_data/"
@pytest.fixture
def tabular_data():
tabular_data = {'id': 0,
'X': 42,
'y': 1}
return pd.DataFrame(tabular_data, index=['id'])
@pytest.fixture
def fits_data():
fits_data = {'id': 0,
'filename': '20000101_1247_mdiB_1_8809.fits',
'noaa': 8809,
'flares': 0,
'observation_number': 1}
return pd.DataFrame(fits_data, index=['id'])
@pytest.fixture
def img_data():
img_data = {'id': 0,
'filename': '5397a56aa57caf04c6000001.jpg',
'noaa': 0,
'flares': 0,
'observation_number': 1}
return pd.DataFrame(img_data, index=['id'])
@pytest.fixture
def composed_transforms(size=(100, 100)):
return transforms.Compose([RemoveNaN(), Rescale(size), Normalize(),
FixChannel(), ToTensor()])
@pytest.fixture
def X_col():
return ['filename']
@pytest.fixture
def y_col():
return ['flares']
@pytest.fixture
def fits_file():
return fits.getdata(PATH / "20000101_1247_mdiB_1_8809.fits")
@pytest.fixture
def img_file():
return plt.imread(PATH / "5397a56aa57caf04c6000001.jpg")
@pytest.fixture
def default_dataset(fits_data, X_col, y_col):
return BaseDataset(data=fits_data,
root_dir=str(PATH) + "/",
X_col=X_col,
y_col=y_col)
def test_default_dataset(default_dataset, fits_file):
assert len(default_dataset) == 1
assert len(default_dataset[0]) == 2
X, y = default_dataset[0]
assert np.array_equal(X, fits_file)
assert y.dtype == np.int64
def test_img_dataset(img_data, X_col, y_col, img_file):
dataset = BaseDataset(data=img_data,
root_dir=str(PATH) + "/",
X_col=X_col,
y_col=y_col,
is_fits=False)
assert len(dataset) == 1
assert len(dataset[0]) == 2
X, y = dataset[0]
assert np.array_equal(X, img_file)
assert y.dtype == np.int64
def test_tabular_dataset(tabular_data):
dataset = BaseDataset(data=tabular_data,
X_col='X',
y_col='y',
is_fits=False,
is_tabular=True)
assert len(dataset) == 1
assert len(dataset[0]) == 2
X, y = dataset[0]
assert X == tabular_data.iloc[0]['X']
assert y == tabular_data.iloc[0]['y']
def test_apply_transforms(fits_data, X_col, y_col, composed_transforms):
dataset = BaseDataset(data=fits_data,
root_dir=str(PATH) + "/",
X_col=X_col,
y_col=y_col,
transform=composed_transforms)
X, y = dataset[0]
assert len(X.shape) == 3
assert X.shape == (1, 100, 100)
| StarcoderdataPython |
3515410 | import os
import time
import asyncio
import redis
import pandas as pd
from bs4 import BeautifulSoup
from multiprocessing import Pool
PATH = os.path.abspath(os.path.join(os.path.dirname(__file__), os.pardir)) + "/dataset"
URI = "http://finance.naver.com/item/sise_day.nhn?code={}&page={}"
r = redis.StrictRedis(host='localhost', port=6379, db=0)
def parse_html(code):
print('Start {}'.format(code))
dfs = []
for page in get_last_page(code):
df = pd.read_html(URI.format(code, page), header=0)[0]
df = df.rename(columns={
'날짜': 'date',
'종가': 'close',
'전일비': 'diff',
'시가': 'open',
'고가': 'high',
'저가': 'low',
'거래량': 'volume'
})
df['date'] = df['date'].apply(lambda d: str(pd.to_datetime(d)))
df['name'] = str(code)
df = df.dropna()
df[['close', 'diff', 'open', 'high', 'low', 'volume']] \
= df[['close', 'diff', 'open', 'high', 'low', 'volume']].astype(int)
dfs.append(df)
result = pd.concat(dfs)
result.to_parquet("{}/{}.parquet".format(PATH, code), engine='pyarrow')
def get_last_page(code):
last_page = int(r.get(code))
return range(1, 30 if last_page > 30 else last_page)
if __name__ == '__main__':
start_time = time.time()
code_df = pd.read_parquet("{}/code.parquet".format(PATH), engine='pyarrow')
code_df = code_df[code_df.market == 'kosdaq']
codes = code_df.code.tolist()
# Version1 (67.91s)
# for each in codes:
# print('Starting {}'.format(each))
# for page in get_last_page(each):
# parse_html(each, URI.format(each, page))
# Version2 (87.50s)
# for each in codes:
# urls = [(each, URI.format(each, page)) for page in get_last_page(each)]
# futures = [parse_html(code, url) for code, url in urls]
# loop = asyncio.get_event_loop()
# loop.run_until_complete(asyncio.wait(futures))
# Version3 (26.38s)
pool = Pool(processes=4)
pool.map(parse_html, codes)
print("--- %s seconds ---" % (time.time() - start_time))
| StarcoderdataPython |
348441 | import boto3
import pendulum
from bloop import UUID, BaseModel, Column, Engine
from bloop.ext.pendulum import DateTime
def engine_for_region(region, table_name_template="{table_name}"):
dynamodb = boto3.client("dynamodb", region_name=region)
dynamodbstreams = boto3.client("dynamodbstreams", region_name=region)
return Engine(
dynamodb=dynamodb,
dynamodbstreams=dynamodbstreams,
table_name_template=table_name_template
)
primary = engine_for_region("us-west-2", table_name_template="primary.{table_name}")
replica = engine_for_region("us-east-1", table_name_template="replica.{table_name}")
class SomeDataBlob(BaseModel):
class Meta:
stream = {
"include": {"new", "old"}
}
id = Column(UUID, hash_key=True)
uploaded = Column(DateTime, range_key=True)
primary.bind(SomeDataBlob)
replica.bind(SomeDataBlob)
def scan_replicate():
"""Bulk replicate existing data"""
for obj in primary.scan(SomeDataBlob):
replica.save(obj)
def stream_replicate():
"""Monitor changes in approximately real-time and replicate them"""
stream = primary.stream(SomeDataBlob, "trim_horizon")
next_heartbeat = pendulum.now()
while True:
now = pendulum.now()
if now >= next_heartbeat:
stream.heartbeat()
next_heartbeat = now.add(minutes=10)
record = next(stream)
if record is None:
continue
if record["new"] is not None:
replica.save(record["new"])
else:
replica.delete(record["old"])
| StarcoderdataPython |
1873392 | import PikaStdLib
import W801Device
pb5=W801Device.GPIO()
pb25=W801Device.GPIO()
time=W801Device.Time()
pwm = W801Device.PWM()
pb5.init()
pb5.setPin('PB5')
pb5.setMode('out')
pb5.setPull('up')
pb5.enable()
pb25.init()
pb25.setPin('PB25')
pb25.setMode('out')
pb25.setPull('up')
pb25.enable()
##pwm.init()
pwm.setPin('PB16')
pwm.enable()
pwm.setFrequency(5)
pwm.setDuty(50.0)
print('hello world')
while True:
pb5.low()
time.sleep_s(1)
pb5.high()
time.sleep_s(1)
| StarcoderdataPython |
3302383 | #!/usr/bin/python3
#Code written by
# _ _ __
# ___ | |_ _ __ / | / _| ___
# / __|| __|| '__|| || |_ / _ \
# \__ \| |_ | | | || _|| __/
# |___/ \__||_| |_||_| \___|
#
# Простая реализациия элементарных клеточных автоматов с применением ООП.
# Использование: создаете экземлпяр класса WolframCA с аргументами
# rule - Десятичное представление кода Вольфрама (int от 0 до 255)
# height - Количество итераций автомата (int от 1 до inf)
# width - Количество клеток в одной строке автомата (int от 1 до inf)
# Далее запускаете метод run()
#
# Пример:
#
# rule101 = WolframCA(101, 500, 500)
# rule101.run()
#
# Примечание: правила строятся на случайной конфигурации
import numpy as np
import matplotlib.pyplot as plt
class WolframCA:
def __init__(self, rule: int, height: int, width: int) -> None:
self.rule = rule
self.height = height
self.width = width
self.rule_2 = None
self.prev_state = None
self.initial_state = None
def set_rule(self) -> str:
num = str(bin(self.rule))
if (len(num) - 2) < 8:
missing = 8 - (len(num) - 2)
num = '0' * missing + num[2:]
return num
else:
num = num[2:]
return num
def get_rule(self) -> None:
self.rule_2 = self.set_rule()
def set_initial_state(self) -> np.ndarray:
return np.random.randint(2, size=self.width)
def get_initial_state(self) -> None:
self.initial_state = self.set_initial_state()
def read_state(self, prev: int, nxt: int, curr: int) -> int:
if prev == 1 and curr == 1 and nxt == 1:
return int(self.rule_2[0])
elif prev == 1 and curr == 1 and nxt == 0:
return int(self.rule_2[1])
elif prev == 1 and curr == 0 and nxt == 1:
return int(self.rule_2[2])
elif prev == 1 and curr == 0 and nxt == 0:
return int(self.rule_2[3])
elif prev == 0 and curr == 1 and nxt == 1:
return int(self.rule_2[4])
elif prev == 0 and curr == 1 and nxt == 0:
return int(self.rule_2[5])
elif prev == 0 and curr == 0 and nxt == 1:
return int(self.rule_2[6])
else:
return int(self.rule_2[7])
def get_new_state(self, i) -> np.ndarray:
new_state = np.zeros((1, self.width))[0]
if i == 0:
self.prev_state = self.initial_state
for j in range(self.width):
if j == 0:
new_state[j] = self.read_state(0, self.prev_state[j+1], self.prev_state[j])
elif j == self.width - 1:
new_state[j] = self.read_state(self.prev_state[j-1], 0, self.prev_state[j])
else:
new_state[j] = self.read_state(self.prev_state[j-1], self.prev_state[j+1], self.prev_state[j])
self.prev_state = new_state
return new_state
def draw_config(self, matr) -> None:
plt.imshow(matr, cmap="Greys", interpolation="nearest")
plt.show()
def run(self) -> None:
self.get_rule()
self.get_initial_state()
config = self.initial_state
for i in range(self.height):
new_state = self.get_new_state(i)
config = np.vstack((config, new_state))
self.draw_config(config)
if __name__ == "__main__":
rule101 = WolframCA(30, 300, 300)
rule101.run()
| StarcoderdataPython |
1821389 | <filename>survol/sources_types/mysql/table/__init__.py
"""
MySql table
"""
import lib_common
from sources_types import mysql as survol_mysql
from sources_types.mysql import instance as survol_mysql_instance
from sources_types.mysql import database as survol_mysql_database
def EntityOntology():
return ( ["Instance","Database","Table",], )
def MakeUri(instanceName,dbName,tableName):
return lib_common.gUriGen.UriMakeFromDict("mysql/table", { "Instance": instanceName, "Database" : dbName, "Table" : tableName } )
def EntityName(entity_ids_arr):
return entity_ids_arr[1] + "." + entity_ids_arr[2] + "@" + entity_ids_arr[0]
def AddInfo(grph,node,entity_ids_arr):
instanceMySql = entity_ids_arr[0]
databaseName = entity_ids_arr[1]
nodeInstance = survol_mysql_instance.MakeUri(instanceMySql)
nodeDatabase = survol_mysql_database.MakeUri(instanceMySql,databaseName)
grph.add((node,lib_common.MakeProp("Instance"),nodeInstance))
grph.add((node,lib_common.MakeProp("Database"),nodeDatabase))
| StarcoderdataPython |
9749679 | <reponame>tranconbv/ironpython-stubs
# encoding: utf-8
# module Wms.RemotingImplementation.Scripting.Remoting calls itself Remoting
# from Wms.RemotingImplementation,Version=1.23.1.0,Culture=neutral,PublicKeyToken=null
# by generator 1.145
# no doc
# no important
from __init__ import *
# no functions
# classes
class BaseRemotingSink(BaseChannelObjectWithProperties):
# no doc
def ZZZ(self):
"""hardcoded/mock instance of the class"""
return BaseRemotingSink()
instance=ZZZ()
"""hardcoded/returns an instance of the class"""
def ProcessRequest(self,*args):
""" ProcessRequest(self: BaseRemotingSink,message: IMessage,headers: ITransportHeaders,stream: Stream,state: object) -> (Stream,object) """
pass
def ProcessResponse(self,*args):
""" ProcessResponse(self: BaseRemotingSink,message: IMessage,headers: ITransportHeaders,stream: Stream,state: object) -> (IMessage,Stream) """
pass
def SetNextSink(self,nextSink):
""" SetNextSink(self: BaseRemotingSink,nextSink: object) """
pass
def __getitem__(self,*args):
""" x.__getitem__(y) <==> x[y] """
pass
def __init__(self,*args):
""" x.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signature """
pass
def __iter__(self,*args):
""" __iter__(self: IEnumerable) -> object """
pass
def __setitem__(self,*args):
""" x.__setitem__(i,y) <==> x[i]= """
pass
PerProviderState=property(lambda self: object(),lambda self,v: None,lambda self: None)
class MakePythonTypesSerializeableSink(BaseRemotingSink):
""" MakePythonTypesSerializeableSink() """
def ZZZ(self):
"""hardcoded/mock instance of the class"""
return MakePythonTypesSerializeableSink()
instance=ZZZ()
"""hardcoded/returns an instance of the class"""
def ProcessRequest(self,*args):
""" ProcessRequest(self: BaseRemotingSink,message: IMessage,headers: ITransportHeaders,stream: Stream,state: object) -> (Stream,object) """
pass
def ProcessResponse(self,*args):
""" ProcessResponse(self: MakePythonTypesSerializeableSink,message: IMessage,headers: ITransportHeaders,stream: Stream,state: object) -> (IMessage,Stream) """
pass
def __getitem__(self,*args):
""" x.__getitem__(y) <==> x[y] """
pass
def __init__(self,*args):
""" x.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signature """
pass
def __iter__(self,*args):
""" __iter__(self: IEnumerable) -> object """
pass
def __setitem__(self,*args):
""" x.__setitem__(i,y) <==> x[i]= """
pass
PerProviderState=property(lambda self: object(),lambda self,v: None,lambda self: None)
class SinkProviderOf(object):
""" SinkProviderOf[T]() """
def ZZZ(self):
"""hardcoded/mock instance of the class"""
return SinkProviderOf()
instance=ZZZ()
"""hardcoded/returns an instance of the class"""
def CreateSink(self,channel):
""" CreateSink(self: SinkProviderOf[T],channel: IChannelReceiver) -> IServerChannelSink """
pass
def GetChannelData(self,channelData):
""" GetChannelData(self: SinkProviderOf[T],channelData: IChannelDataStore) """
pass
def __init__(self,*args):
""" x.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signature """
pass
def __repr__(self,*args):
""" __repr__(self: object) -> str """
pass
Next=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Get: Next(self: SinkProviderOf[T]) -> IServerChannelSinkProvider
Set: Next(self: SinkProviderOf[T])=value
"""
| StarcoderdataPython |
1682599 | <gh_stars>0
PLUGINS = dict()
def register(func):
PLUGINS[func.__name__] = func
return func
@register
def add(a, b):
return a + b
@register
def multiply(a, b):
return a * b
def operation(func_name, a, b):
func = PLUGINS[func_name]
return func(a, b)
print(PLUGINS)
print(operation('add', 2, 3))
print(operation('multiply', 2, 3)) | StarcoderdataPython |
11353101 | for i in '7891561899':
print(i)
print(len('djakaoo'))
print(len("ddddddd"))
| StarcoderdataPython |
126373 | __all__ = ["mi_enb_decoder"]
PACKET_TYPE = {
"0xB0A3": "LTE_PDCP_DL_Cipher_Data_PDU",
"0xB0B3": "LTE_PDCP_UL_Cipher_Data_PDU",
"0xB173": "LTE_PHY_PDSCH_Stat_Indication",
"0xB063": "LTE_MAC_DL_Transport_Block",
"0xB064": "LTE_MAC_UL_Transport_Block",
"0xB092": "LTE_RLC_UL_AM_All_PDU",
"0xB082": "LTE_RLC_DL_AM_All_PDU",
"0xB13C": "LTE_PHY_PUCCH_SR",
}
FUNCTION_NAME = {
"0xB0A3": "handle_pdcp_dl",
"0xB0B3": "handle_pdcp_ul",
"0xB173": "handle_pdsch_stat",
"0xB063": "handle_mac_dl",
"0xB064": "handle_mac_ul",
"0xB092": "handle_rlc_ul",
"0xB082": "handle_rlc_dl",
"0xB13C": "handle_pucch_sr",
}
class mi_enb_decoder:
def __init__(self, packet):
self.packet = str(packet,'utf-8')
self.p_type_name = None
self.p_type = None
self.content = None
def get_type_id(self):
# print (type(self.packet))
try:
l = self.packet.split(" ")
# print (l)
if (l[1] in PACKET_TYPE):
self.p_type = l[1]
self.p_type_name = PACKET_TYPE[l[1]]
return self.p_type_name
except:
return None
def get_content(self):
if self.p_type is None:
return -1
else:
method_to_call = getattr(self, FUNCTION_NAME[self.p_type])
return method_to_call()
def handle_pdsch_stat(self):
# PDSCH format: [MI] ID FN SFN nRB
try:
if self.content is None:
d = {}
packet = self.packet
if packet[-1] == '\n':
packet = packet[0:-1]
l = packet.split(" ")
d['Records'] = []
dict_tmp = {}
dict_tmp['Frame Num'] = int(l[2])
dict_tmp['Subframe Num'] = int(l[3])
dict_tmp['Num RBs'] = int(l[4])
d['Records'].append(dict_tmp)
self.content = d
finally:
return self.content
def handle_pdcp_dl(self):
# PDCP DL format: [MI] 0xB0A3 FN SFN SN Size
try:
if self.content is None:
d = {}
packet = self.packet
if packet[-1] == '\n':
packet = packet[0:-1]
l = packet.split(" ")
d['Subpackets'] = []
dict_tmp = {}
dict_tmp['Sys FN'] = int(l[2])
dict_tmp['Sub FN'] = int(l[3])
dict_tmp['SN'] = int(l[4])
dict_tmp['PDU Size'] = int(l[5])
d['Subpackets'].append(dict_tmp)
self.content = d
finally:
return self.content
def handle_pdcp_ul(self):
# PDCP UL format: [MI] 0xB0B3 FN SFN SN Size RLC_Mode
try:
if self.content is None:
d = {}
packet = self.packet
if packet[-1] == '\n':
packet = packet[0:-1]
l = packet.split(" ")
d['Subpackets'] = []
dict_tmp = {}
dict_tmp['Sys FN'] = int(l[2])
dict_tmp['Sub FN'] = int(l[3])
dict_tmp['SN'] = int(l[4])
dict_tmp['PDU Size'] = int(l[5])
d['Subpackets'].append(dict_tmp)
self.content = d
finally:
return self.content
def handle_mac_ul(self):
# MAC UL format: [MI] ID FN SFN Grant
try:
if self.content is None:
d = {}
packet = self.packet
if packet[-1] == '\n':
packet = packet[0:-1]
l = packet.split(" ")
d['Subpackets'] = []
dict_tmp = {}
dict_tmp['Samples'] = []
dict_tmp2 = {}
dict_tmp2['SFN'] = int(l[2])
dict_tmp2['Sub FN'] = int(l[3])
dict_tmp2['Grant (bytes)'] = int(l[4])
dict_tmp['Samples'].append(dict_tmp2)
d['Subpackets'].append(dict_tmp)
self.content = d
finally:
return self.content
def handle_rlc_ul(self):
# Format: [MI] 0xB092 SFN [MI] 0xB092 TYPE(1=data) FN SFN BEARER SIZE HDR_SIZE DATA_SIZE
if self.content is None:
packet = self.packet
if packet[-1] == '\n':
packet = packet[0:-1]
l = packet.split(" ")
if l[5] == "0":
return None
d = {}
d['Subpackets'] = []
sub_dict = {}
record_dict = {}
record_dict['sys_fn'] = int(l[6])
record_dict['sub_fn'] = int(l[2])
record_dict['pdu_bytes'] = int(l[9])
sub_dict['RLCUL PDUs'] = [record_dict]
d['Subpackets'].append(sub_dict)
self.content = d
return self.content
def handle_rlc_dl(self):
pass
def handle_pucch_sr(self):
if self.content is None:
d = {}
packet = self.packet
if packet[-1] == '\n':
packet = packet[0:-1]
l = packet.split(" ")
d['Records'] = []
record_dict = {}
record_dict['Frame Num'] = int(l[3])
record_dict['Subframe Num'] = int(l[5])
d['Records'].append(record_dict)
self.content = d
return self.content
| StarcoderdataPython |
8088441 | #!/usr/bin/env python
import sys
import io
def print_help():
# print help message
print >> sys.stderr, "Usage: csv_conv.py [Options] <filename>"
print >> sys.stderr, "Options and arguments:"
print >> sys.stderr, " [-h/--help]: Show this message."
print >> sys.stderr, " [-s]: Define sperator. Defaults to comma."
print >> sys.stderr, " [-q]: Define text qualifier. Defaults to auto detect."
print >> sys.stderr, " [-t]: Trim white space at the beginning and end of each field. Defaults to double quote."
print >> sys.stderr, " [-z]: Specify timezone for time fields. Defaults to server timezone. Can also be Asia/Chongqing etc."
print >> sys.stderr, " For standard timezone names, refer to: https://en.wikipedia.org/wiki/List_of_tz_database_time_zones"
print >> sys.stderr, " [-k]: Skip errors and continue parsing following lines."
print >> sys.stderr, " <filename>: csv file name."
def get_parameters():
# get parameter from command line
args = {
"-s": ",",
"-q": '"',
"-t": True,
"-k": True
}
if len(sys.argv) > 1:
for i in range(0, len(sys.argv)):
p = sys.argv[i]
if len(sys.argv) > i + 1:
if p == '-s':
args['-s'] = sys.argv[i + 1]
elif p == '-q':
val = sys.argv[i + 1]
# only ' or " can be used as qualifier
if val == "'":
args['-q'] = "'"
else:
args['-q'] = '"'
elif p == '-t':
val = sys.argv[i + 1].lower()
if val == 'false':
args['-t'] = False
elif p == '-z':
args['-z'] = sys.argv[i + 1]
elif p == '-k':
val = sys.argv[i + 1].lower()
if val == 'false':
args['-k'] = False
return args
# Define a state machine to identify csv fields
STATES = {
"qualifier": "qualifier",
"qualifier_close": "qualifier_close",
"seperator": "seperator",
"field": "field",
"field_in_qualifier": "field_in_qualifier",
"end": "end",
"invalid": "invalid"
}
class CSVStateMachine:
def __init__(self, args, output):
self.seperator = args['-s']
self.qualifier = args['-q']
self.trim = args['-t']
# self.timezone = args['-z']
self.skip_error = args['-k']
self.output = output
self.state = STATES["qualifier"]
# input buffer for whole input line.
self.buff = ""
# position of character done recognizing
self.base_pos = 0
self.fields = []
self.encoding = 'utf8'
def feed(self, buff):
self.state = STATES["qualifier"]
self.fields = []
self.base_pos = 0
self.buff = buff
length = len(buff)
for i in range(0, length):
self._state_qualifier()
self._state_qualifier_close()
self._state_seperator()
self._state_field()
self._state_field_in_qualifier()
if self.state == STATES["end"] or self.state == STATES["invalid"]:
break
if self.state == STATES["end"]:
self._state_end()
else:
print >> sys.stderr, "Couldn't parse this line: {0}".format(line)
if not self.skip_error:
print >> sys.stderr, "-k set to stop on error. exiting..."
exit()
def _state_qualifier(self):
if self.state == STATES["qualifier"]:
# TODO: BOM is not properly handled here. Fix it!
psbl_qual = self.buff[self.base_pos:self.base_pos + len(self.qualifier)]
if psbl_qual == self.qualifier and self.qualifier != "":
# recognized qualifier
self.base_pos += len(self.qualifier)
self.state = STATES["field_in_qualifier"]
else:
self.state = STATES["field"]
def _state_qualifier_close(self):
if self.state == STATES["qualifier_close"]:
self.base_pos += len(self.qualifier)
i = self.base_pos + 1
string = self.buff[self.base_pos:i]
if string in ['\r', '\n', '']:
self.state = STATES["end"]
else:
self.state = STATES["seperator"]
def _state_seperator(self):
if self.state == STATES["seperator"]:
i = self.base_pos + len(self.seperator)
psbl_sprt = self.buff[self.base_pos:i]
if psbl_sprt == self.seperator:
self.base_pos = i
self.state = STATES["qualifier"]
# else:
# Shouldn't happen since this is handled in "qualifier" state
def _state_field(self):
if self.state == STATES["field"]:
i = self.base_pos + 1
psbl_end = self.buff[self.base_pos:i]
if psbl_end in ['\r', '\n', '']:
# last field is empty
self._push_field("")
self.state = STATES["end"]
else:
i = self.base_pos
while i < len(self.buff):
j = i + len(self.seperator)
psbl_end = self.buff[i:j]
if psbl_end == self.seperator:
field = self.buff[self.base_pos:i]
self._push_field(field)
self.state = STATES["seperator"]
self.base_pos = i
break
else:
j = i + 1
psbl_end = self.buff[j:j + 1]
if psbl_end in ['\r', '\n', '']:
# end of line
field = self.buff[self.base_pos:j]
self._push_field(field)
self.state = STATES["end"]
break
j = i + len(self.qualifier)
curr_str = self.buff[i:j]
if curr_str == self.qualifier:
k = j + len(self.qualifier)
after = self.buff[j:k]
if after == self.qualifier:
i += len(self.qualifier)
else:
# escape current qualifier by repeat it once
self.buff = self.buff[:j] + self.qualifier + self.buff[j:]
i += len(self.qualifier)
i += 1
def _state_field_in_qualifier(self):
if self.state == STATES["field_in_qualifier"]:
i = self.base_pos
while i < len(self.buff):
j = i + len(self.qualifier)
curr_str = self.buff[i:j]
if curr_str == self.qualifier:
# closing qualifier detected
# if it's followed by seperator, then the field is closed
k = j + len(self.seperator)
followed_by = self.buff[j:k]
followed_one = self.buff[j:j+1]
is_followed_by_end = followed_by == self.seperator or followed_one in ['\r', '\n', '']
# also try to detect qualifier escape. e.g. "".
# however field like "ab"" should not be treated as escape.
# depends on if 2nd qualifier is followed by a seperator or line end or EOF.
k = j + len(self.qualifier)
followed_by = self.buff[j:k]
l = k + len(self.seperator)
snd_followed_by = self.buff[k:l]
snd_followed_one = self.buff[k: k + 1]
is_followed_by_qual = followed_by == self.qualifier
is_qual_followed_by_end = snd_followed_by == self.seperator or snd_followed_one in ['\r', '\n', '']
if is_followed_by_end:
# qualifier is followed by seperator or line end or EOF.
field = self.buff[self.base_pos:i]
self._push_field(field)
self.state = STATES["qualifier_close"]
self.base_pos = i
break
elif is_followed_by_qual and not is_qual_followed_by_end:
# This is escape, skip the immediate after qualifier and continue
i += len(self.qualifier)
else:
# escape current qualifier by repeat it once
self.buff = self.buff[:j] + self.qualifier + self.buff[j:]
i += len(self.qualifier)
i += 1
if self.state == STATES["field_in_qualifier"]:
# searched to the end still can't find closing qualifier. something is wrong.
self.state = STATES["invalid"]
def _state_end(self):
if self.state == STATES["end"]:
self.base_pos = len(self.buff)
line = ",".join(map(lambda f: '"{0}"'.format(f) if f != "" else "", self.fields))
try:
line = line.decode(self.encoding)
except UnicodeDecodeError:
self.encoding = 'gbk'
# print >> sys.stderr, line
line = line.decode(self.encoding)
line = line.encode(encoding='utf8',errors='strict')
self.output .write(line)
self.output.write("\n")
self.output.flush()
def _push_field(self, field):
# TODO: handle time convertion
if self.trim:
field = field.strip()
self.fields.append(field)
def _detect_time(self):
return
if __name__ == "__main__":
if len(sys.argv) == 2 and (sys.argv[1] == "--help" or sys.argv[1] == '-h'):
print_help()
exit()
fs = None
if not sys.stdin.isatty():
# stdin preferred
fs = sys.stdin
elif len(sys.argv) >= 2:
# if stdin is not available, try last parameter as file name
filename = sys.argv[len(sys.argv) - 1]
try:
fs = open(filename, 'r')
except IOError:
print >> sys.stderr, "File not found or occupied by other process: " + filename
print_help()
exit()
else:
print >> sys.stderr, "Can't find file to read from."
print_help()
exit()
args = get_parameters()
line = fs.readline()
if not args.has_key('-q'):
if line.startswith("'"):
args['-q'] = "'"
elif line.startswith('"'):
args['-q'] = '"'
else:
args['-q'] = ""
state_machine = CSVStateMachine(args, sys.stdout)
while(line != ""):
state_machine.feed(line)
line = fs.readline()
| StarcoderdataPython |
6683821 | <reponame>zaind6/Word-Search-Puzzle
# Code for working with word search puzzles
#
# Do not modify the existing code
#
# Complete the tasks below marked by *task*
#
# Before submission, you must complete the following header:
#
# I hear-by decree that all work contained in this file is solely my own
# and that I received no help in the creation of this code.
# I have read and understood the University of Toronto academic code of
# behaviour with regards to plagiarism, and the seriousness of the
# penalties that could be levied as a result of committing plagiarism
# on an assignment.
#
# Name: <NAME>
# MarkUs Login: zafarsy4
PUZZLE1 = '''
glkutqyu
onnkjoaq
uaacdcne
gidiaayu
urznnpaf
ebnnairb
xkybnick
ujvaynak
'''
PUZZLE2 = '''
fgbkizpyjohwsunxqafy
hvanyacknssdlmziwjom
xcvfhsrriasdvexlgrng
lcimqnyichwkmizfujqm
ctsersavkaynxvumoaoe
ciuridromuzojjefsnzw
bmjtuuwgxsdfrrdaiaan
fwrtqtuzoxykwekbtdyb
wmyzglfolqmvafehktdz
shyotiutuvpictelmyvb
vrhvysciipnqbznvxyvy
zsmolxwxnvankucofmph
txqwkcinaedahkyilpct
zlqikfoiijmibhsceohd
enkpqldarperngfavqxd
jqbbcgtnbgqbirifkcin
kfqroocutrhucajtasam
ploibcvsropzkoduuznx
kkkalaubpyikbinxtsyb
vjenqpjwccaupjqhdoaw
'''
def rotate_puzzle(puzzle):
'''(str) -> str
Return the puzzle rotated 90 degrees to the left.
'''
raw_rows = puzzle.split('\n')
rows = []
# if blank lines or trailing spaces are present, remove them
for row in raw_rows:
row = row.strip()
if row:
rows.append(row)
# calculate number of rows and columns in original puzzle
num_rows = len(rows)
num_cols = len(rows[0])
# an empty row in the rotated puzzle
empty_row = [''] * num_rows
# create blank puzzle to store the rotation
rotated = []
for row in range(num_cols):
rotated.append(empty_row[:])
for x in range(num_rows):
for y in range(num_cols):
rotated[y][x] = rows[x][num_cols - y - 1]
# construct new rows from the lists of rotated
new_rows = []
for rotated_row in rotated:
new_rows.append(''.join(rotated_row))
rotated_puzzle = '\n'.join(new_rows)
return rotated_puzzle
def lr_occurrences(puzzle, word):
'''(str, str) -> int
Return the number of times word is found in puzzle in the
left-to-right direction only.
>>> lr_occurrences('xaxy\nyaaa', 'xy')
1
'''
return puzzle.count(word)
def total_occurrences(puzzle, word):
'''(str, str) -> int
Return total occurrences of word in puzzle.
All four directions are counted as occurrences:
left-to-right, top-to-bottom, right-to-left, and bottom-to-top.
>>> total_occurrences('xaxy\nyaaa', 'xy')
2
'''
# Return int value which is sum of the number of times word has occured
# in puzzle after the puzzle has gone through all possible rotations.
return((lr_occurrences(puzzle, word)) +
(lr_occurrences((rotate_puzzle(puzzle)), word)) +
(lr_occurrences(rotate_puzzle(rotate_puzzle(puzzle)), word)) +
(lr_occurrences(rotate_puzzle(rotate_puzzle(rotate_puzzle
(puzzle))), word)))
def in_puzzle_horizontal(puzzle, word):
'''(str, str) -> (bool)
>>> in_puzzle_horizontal(PUZZLE1, 'brian')
False
>>> in_puzzle_horizontal('zain\npain\nlame', 'ni')
True
>>> in_puzzle_horizontal('dan\npan', 'an')
True
>>> in_puzzle_horizontal('dan\npan', 'ran')
False
Return if word is found horizontally in puzzle or not.
If word occurs horizontally in puzzle (in one or both horizontal
directions), return 'True'. If not, return 'False'.
Only two directions are counted as occurences:
left-right and right-left or both.
'''
# check if word occurs in puzzle, horizontally.
# horizontal_occurences is the sum of left-right and right-left occurrences
# of word in puzzle.
horizontal_occurences = (
(lr_occurrences(puzzle, word)) +
(lr_occurrences(rotate_puzzle(rotate_puzzle(puzzle)), word)))
# check if word occurs in puzzle, vertically.
# vertical_occurences is the sum of top-bottom and bottom-top occurrences
# of word in puzzle.
vertical_occurences = (
(lr_occurrences((rotate_puzzle(puzzle)), word)) +
(lr_occurrences(rotate_puzzle(rotate_puzzle
(rotate_puzzle(puzzle))), word)))
# See if word occurs in puzzle horizontally, by checking if horrizontal
# occurrences for word in puzzle is one or more and if vertical occurrences
# are 0.
return((horizontal_occurences >= 1) and (vertical_occurences == 0))
def in_puzzle_vertical(puzzle, word):
'''(str, str) -> (bool)
>>> in_puzzle_vertical(PUZZLE1, 'brian')
False
>>> in_puzzle_vertical('dan\npan', 'aa')
True
>>> in_puzzle_vertical('dan\npan', 'ran')
False
>>> in_puzzle_vertical('ran\nfar' , 'nr')
True
Return if name occurs vertically in puzzle or not.
If word occurs vertically in puzzle (in one or both vertical directions),
return 'true'. If not, return 'false'.
Only two directions are counted as occurences:
left-right and right-left.
'''
# check if word occurs in puzzle, horizontally.
# horizontal_occurences is the sum of left-right and right-left occurrences
# of word in puzzle.
horizontal_occurences = (
(lr_occurrences(puzzle, word)) +
(lr_occurrences(rotate_puzzle(rotate_puzzle(puzzle)), word)))
# check if word occurs in puzzle, vertically.
# vertical_occurences is the sum of top-bottom and bottom-top occurrences
# of word in puzzle.
vertical_occurences = (
(lr_occurrences((rotate_puzzle(puzzle)), word)) +
(lr_occurrences(rotate_puzzle(rotate_puzzle
(rotate_puzzle(puzzle))), word)))
# See if word occurs in puzzle vertically, by checking if vertical
# occurrences for word in puzzle is one or more and if horizontal
# occurrences are 0.
return(
(vertical_occurences >= 1) and
(horizontal_occurences == 0))
def in_puzzle(puzzle, word):
'''(str, str) -> (bool)
>>> in_puzzle(PUZZLE1, 'brian')
True
>>> in_puzzle(PUZZLE1, 'hey')
False
>>> in_puzzle(PUZZLE2, 'nick')
True
Return if word is found in puzzle or not.
If word is found in puzzle atleast once, return 'True'.
If word is not found in puzzle, return 'False'.
All directions are considered as occurences:
left-right, right-left, top-bottom, and bottom-top.
'''
# Using the previous function, total_occurrences, check if the value
# returned from total_occurrences is greater than or equal to 1, which
# indicates that word exists in puzzle atleast once, return True.
# If not, return False.
check_if_word_exists = total_occurrences(puzzle, word)
return(check_if_word_exists >= 1)
def in_exactly_one_dimension(puzzle, word):
'''(str, str) -> (bool)
>>> in_exactly_one_dimension(PUZZLE1, 'brian')
False
>>> in_exactly_one_dimension('dan\npan' , 'an')
True
>>> in_exactly_one_dimension('zain\npain','zain is no pain')
False
>>> in_exactly_one_dimension('dan\npan', 'aa')
True
Return if word exists in only one dimension in puzzle or not.
If word is found in puzzle in only 1 dimention, return 'True'.
If word exists in both horizontal or both vertical directions, return True
If word is not found in puzzle, return 'False'.
If word is found in both dimentions, return 'False'.
Horizontal and Vertical are considered as the only two dimensions.
'''
# Find if word exists horizontally or vertically.
# If word does exist in one dimension, corresponding variable below will be
# assigned 'True'. If word does not exist in a horizontal or vertical
# dimention corresponding variable will be assigned 'False'.
word_exists_only_horizontally = in_puzzle_horizontal(puzzle, word)
word_exists_only_vertically = in_puzzle_vertical(puzzle, word)
# Check if both assigned values are not equal. As long as one is not equal
# to the other, return 'True', which indicates that word lies in either
# horizontal or vertical plane in the puzzle.
# If both have same value, such as True and True, then return False as this
# indicates that word exists in both horizontal and vertical plane.
# If both variables have same False value, return False, as this indicates
# that word does not exist in puzzle.
return(word_exists_only_horizontally != word_exists_only_vertically)
def all_horizontal(puzzle, word):
'''(str, str) -> (bool)
>>> all_horizontal(PUZZLE1, 'brian')
False
>>> all_horizontal('zain\npain' , 'zain')
True
>>> all_horizontal('dan\npan' , 'aa')
False
>>> all_horizontal('zain \n pain' , 'yes sir')
True
Return if all occurrences of word exist ONLY horizontally in puzzle or not.
If all occurrences of word occur ONLY horizontally in puzzle or if word
does not exist in puzzle, return 'True'. If word occurs horizontally and
vertically or just vertically, return False.
This function is identical to 'in_puzzle_horizontal' function, except the
only difference is that if word does not exist in puzzle, then return
'True'.
'''
# use previous function to check if word is only horizontal or only
# vertical in puzzle in either one or both horizontal or vertical
# directions.
check_if_word_is_horizontal = in_puzzle_horizontal(puzzle, word)
check_if_word_is_vertical = in_puzzle_vertical(puzzle, word)
# If word is horizontal and does not exist vertically in puzzle, return
# 'True'. If word is vertical in puzzle, return 'False'.
# If word is not in the puzzle horizontally, return 'True'.
return(((check_if_word_is_horizontal != check_if_word_is_vertical and
check_if_word_is_vertical == False)) or
(check_if_word_is_horizontal == in_puzzle(puzzle, word)))
def at_most_one_vertical(puzzle, word):
'''(str, str) -> (bool)
>>> at_most_one_vertical(PUZZLE1 , 'brian')
False
>>> at_most_one_vertical('zain \n pain', 'zp')
True
>>> at_most_one_vertical('we \n run' , 'far')
False
>>> at_most_one_vertical('zain \n pain' , 'aa')
False
>>> at_most_one_vertical('zain \n pain' , 'ni')
False
Return if word occurs in puzzle only once and if that occurrence is
vertical.
Return 'True', Iff word occurs once and is vertical in puzzle.
Return 'False', if word not in puzzle or if word lies in multiple dimention
or if word occurs vertically more than once or doesn't occur vertically at
all.
'''
# Find total occurrences of word in puzzle to see if it does occur
# atleast once
number_of_occurrences = total_occurrences(puzzle, word)
# Check to see if the word is vertical or not
check_if_word_is_vertical = in_puzzle_vertical(puzzle, word)
# Return 'True', iff word occurs once in puzzle and is vertical.
return (number_of_occurrences == 1 and check_if_word_is_vertical == True)
def do_tasks(puzzle, name):
'''(str, str) -> NoneType
puzzle is a word search puzzle and name is a word.
Carry out the tasks specified here and in the handout.
'''
# *task* 1a: add a print call below the existing one to print
# the number of times that name occurs in the puzzle left-to-right.
# Hint: one of the two starter functions defined above will be useful.
# the end='' just means "Don't start a newline, the next thing
# that's printed should be on the same line as this text
print('Number of times', name, 'occurs left-to-right: ', end='')
# your print call here
print (lr_occurrences(puzzle, name))
# *task* 1b: add code that prints the number of times
# that name occurs in the puzzle top-to-bottom.
# (your format for all printing should be similar to
# the print statements above)
# Hint: both starter functions are going to be useful this time!
print('Number of times', name, 'occurs top-to-bottom: ', end='')
print(lr_occurrences((rotate_puzzle(puzzle)), name))
# *task* 1c: add code that prints the number of times
# that name occurs in the puzzle right-to-left.
print('Number of times', name, 'occurs right-to-left: ', end='')
print (lr_occurrences(rotate_puzzle(rotate_puzzle(puzzle)), name))
# *task* 1d: add code that prints the number of times
# that name occurs in the puzzle bottom-to-top.
print('Number of times', name, 'occurs bottom-to-top: ', end='')
print(lr_occurrences(rotate_puzzle(rotate_puzzle
(rotate_puzzle(puzzle))), name))
# *task* 4: print the results of calling total_occurrences on
# puzzle and name.
# Add only one line below.
# Your code should print a single number, nothing else.
print(total_occurrences(puzzle, name))
# *task* 6: print the results of calling in_puzzle_horizontal on
# puzzle and name.
# Add only one line below. The code should print only True or False.
print(in_puzzle_horizontal(puzzle, name))
do_tasks(PUZZLE1, 'brian')
# *task* 2: call do_tasks on PUZZLE1 and 'nick'.
# Your code should work on 'nick' with no other changes made.
# If it doesn't work, check your code in do_tasks.
# Hint: you shouldn't be using 'brian' anywhere in do_tasks.
do_tasks(PUZZLE1, 'nick')
# *task* 7: call do_tasks on PUZZLE2 (that's a 2!) and 'nick'.
# Your code should work on the bigger puzzle with no changes made to do_tasks.
# If it doesn't work properly, go over your code carefully and fix it.
do_tasks(PUZZLE2, 'nick')
# *task* 9b: print the results of calling in_puzzle on PUZZLE1 and 'nick'.
# Add only one line below. Your code should print only True or False.
print(in_puzzle(PUZZLE1, 'nick'))
# *task* 9c: print the results of calling in_puzzle on PUZZLE2 and 'anya'.
# Add only one line below. Your code should print only True or False.
print(in_puzzle(PUZZLE2, 'anya'))
| StarcoderdataPython |
321850 | <filename>mll/tests/test_receiver_fc.py
import torch
import numpy as np
from mll.recv_models import fc1l_model, fc2l_model
def test_fc1l():
N = 5
utt_len = 20
vocab_size = 4
embedding_size = 11
num_meaning_types = 5
meanings_per_type = 10
inputs = torch.from_numpy(np.random.choice(vocab_size + 1, (utt_len, N), replace=True))
fc1l = fc1l_model.FC1LModel(
embedding_size=embedding_size, vocab_size=vocab_size, utt_len=utt_len, num_meaning_types=num_meaning_types,
meanings_per_type=meanings_per_type)
output = fc1l(inputs)
assert list(output.size()) == [N, num_meaning_types, meanings_per_type]
def test_fc2l():
N = 5
utt_len = 20
vocab_size = 4
embedding_size = 11
num_meaning_types = 5
meanings_per_type = 10
inputs = torch.from_numpy(np.random.choice(vocab_size + 1, (utt_len, N), replace=True))
fc1l = fc2l_model.FC2LModel(
embedding_size=embedding_size, vocab_size=vocab_size, utt_len=utt_len, num_meaning_types=num_meaning_types,
meanings_per_type=meanings_per_type, dropout=0.5)
output = fc1l(inputs)
print('output.size()', output.size())
assert list(output.size()) == [N, num_meaning_types, meanings_per_type]
| StarcoderdataPython |
9752572 | # -*- coding: utf-8 -*-
#
# Copyright 2019 Klimaat
import os
import time
import calendar
import numpy as np
import netrc
import shutil
import requests
import tarfile
from email.utils import parsedate_tz, mktime_tz
from rnlyss.dataset import Dataset
from rnlyss.grid import GaussianGrid
from rnlyss.util import syslog_elapsed_time
try:
import pygrib
except ImportError:
raise NotImplementedError("pygrib req'd to read CFS datasets")
class CFSV2(Dataset):
# Dataset variables
# NB: Due to storage of source files, some positive-only variables may
# have negative values.
# fmt: off
dvars = {
# Surface geopotential (m)
'orog': {'role': 'hgt', 'scale': 1, 'constant': True, 'hour0': 0},
# Land surface mask (0 or 1)
'lsm': {'role': 'land', 'scale': 1e-2, 'constant': True, 'hour0': 0},
# Dry bulb temperature @ 2m (K)
'tmp2m': {'role': 'tas', 'scale': 1e-2, 'offset': 330, 'hour0': 1},
# Specific humidity @ 2m (kg/kg)
'q2m': {'role': 'huss', 'scale': 1e-6, 'offset': 0.03, 'hour0': 1},
# Wind velocity vector (u, v) @ 10m (m/s)
'wnd10m': {'role': ('uas', 'vas'), 'scale': 1e-2, 'hour0': 1,
'vector': True},
# Surface pressure (Pa)
'pressfc': {'role': 'ps', 'scale': 1, 'offset': 75000, 'hour0': 0},
# Downwelling longwave surface radiation (W/m²)
'dlwsfc': {'role': 'rlds', 'scale': 0.1, 'offset': 0, 'hour0': 1},
# Downwelling shortwave surface radiation (W/m²)
'dswsfc': {'role': 'rsds', 'scale': 0.1, 'offset': 0, 'hour0': 1},
# Downwelling clear sky shortwave surface radiation (W/m²)
'dcsswsfc': {'role': 'rsdsc', 'scale': 0.1, 'offset': 0, 'hour0': 1,
'uglpr': (2, 0, 5, 5, 0)},
# Downwelling shortwave radiation at top of atmosphere (W/m²)
'dswtoa': {'role': 'rsdt', 'scale': 0.1, 'offset': 0, 'hour0': 1,
'uglpr': (2, 0, 21, 13, 0)},
# Precipitation rate (kg/m²/s = 1 mm/s = 3600 mm/hr)
'prate': {'role': 'pr', 'scale': 1/36000, 'hour0': 1},
# Cloud cover (convert from % to fraction)
'cldcovtot': {'role': 'clt', 'scale': 1e-4, 'hour0': 1,
'converter': lambda x: x/100},
# Precipitable water (kg/m²)
'pwat': {'role': 'pwat', 'scale': 1e-2, 'hour0': 0},
# Surface albedo (convert from % to fraction) (download & assemble)
'albedo': {'role': 'albedo', 'scale': 1e-4, 'offset': 0, 'hour0': 1,
'uglpr': (2, 0, 5, 1, 0), 'converter': lambda x: x/100},
}
# fmt:on
# Time
years = [2011, None]
freq = 1
# Grid
grid = GaussianGrid(shape=(880, 1760), origin=(90, 0), delta=(-1, 360 / 1760))
# CFSv2 RDA dataset
dataset = "ds094"
def stack(self, dvars=None, years=None, months=None, force=False, **kwargs):
"""
Fill element HDF with available GRB data.
"""
if dvars is None:
dvars = list(self.dvars.keys())
grb_path = self.get_data_path("grb2")
for dvar in sorted(dvars):
# Check dvar
if dvar not in self:
print("%s not in dataset... skipping" % dvar)
continue
# Get converter
converter = self.dvars[dvar].get("converter", None)
# Get hour offset
hour0 = self.dvars[dvar].get("hour0", 0)
# Special case: constants
if self.isconstant(dvar):
with self[dvar] as slab:
# Create slab is it doesn't exist
if not slab:
slab.create(
shape=self.grid.shape,
year=self.years[0],
freq=0,
**self.dvars[dvar]
)
if slab.isfull(0) and not force:
print(dvar, "already stacked... skipping")
continue
path = os.path.join(grb_path, dvar + ".grb2")
if not os.path.isfile(path):
print(dvar, "missing... skipping")
continue
# Open GRB
grb = pygrib.open(path)
msg = grb.readline()
print(msg)
slab.fill(
0, slab.to_int(np.expand_dims(msg.values, axis=-1), converter)
)
continue
# Loop over request years
for year in self.iter_year(years):
# Scalars
if self.isscalar(dvar):
shape = self.grid.shape
# Vectors
if self.isvector(dvar):
shape = self.grid.shape + (2,)
with self[dvar, year] as slab:
if slab:
print(dvar, "exists... updating")
else:
print(dvar, "does not exist... creating")
slab.create(
shape=shape, year=year, freq=self.freq, **self.dvars[dvar]
)
for month in self.iter_month(year, months):
start_time = time.time()
i = slab.date2ind(months=month, hours=hour0)
nh = slab.month_len(month)
if slab.isfull(np.s_[i : (i + nh)]) and not force:
print(year, month, "already stacked... skipping")
continue
fn = self.get_grb2_filename(dvar, year, month)
# Open GRB; should'nt be missing as we glob'd it above
try:
grb = pygrib.open(fn)
except IOError:
print(year, month, "missing... skipping")
continue
if self.isvector(dvar):
# Shape of this month
# i.e. {nlat} x {nlon} x {x, y components} x {nh}
shape = self.grid.shape + (2, nh)
# Store entire month; big!
X = np.zeros(shape, dtype=np.int16)
for hexa in range(grb.messages // 12):
for step in range(6):
# Loop over x- and y- component
for comp in range(2):
msg = grb.readline()
print(repr(msg))
h = (msg.day - 1) * 24 + msg.hour + step
if msg.stepType == "instant":
X[:, :, comp, h] = slab.to_int(
msg.values, converter
)
else:
raise NotImplementedError(msg.stepType)
else:
# Shape of this month; i.e. nlat x nlon x nh values
shape = self.grid.shape + (nh,)
# Store entire month; big!
X = np.zeros(shape, dtype=np.int16)
for hexa in range(grb.messages // 6):
values = np.zeros(self.grid.shape)
prev_values = np.zeros(self.grid.shape)
for step in range(6):
msg = grb.readline()
print(repr(msg))
np.copyto(values, msg.values)
h = (msg.day - 1) * 24 + msg.hour + step
if msg.stepType == "instant":
X[..., h] = slab.to_int(values, converter)
elif msg.stepType == "avg":
X[..., h] = slab.to_int(
(step + 1) * values - step * prev_values,
converter,
)
np.copyto(prev_values, values)
elif msg.stepType == "accum":
X[..., h] = slab.to_int(
values - prev_values, converter
)
np.copyto(prev_values, values)
else:
raise NotImplementedError(msg.stepType)
# Close the GRB
grb.close()
# Store it
print(year, month, "complete... writing", flush=True)
slab.fill(i, X)
X = None
# Write to syslog
syslog_elapsed_time(
time.time() - start_time,
"%s %s %04d-%02d written." % (str(self), dvar, year, month),
)
def download(self, dvars=None, years=None, months=None, **kwargs):
"""
Download CFSv2 GRB files.
"""
if dvars is None:
# Default is all of them
dvars = list(self.dvars.keys())
if not isinstance(dvars, list):
dvars = [dvars]
# Establish connection
session = requests.Session()
machine = "rda.ucar.edu"
auth = netrc.netrc().authenticators(machine)
if auth is None:
raise SystemExit("Add rda.ucar.edu credentials to .netrc")
email, _, passwd = auth
request = session.post(
r"https://rda.ucar.edu/cgi-bin/login",
data={"email": email, "password": <PASSWORD>, "action": "login"},
)
if request.status_code != 200:
raise SystemExit(request.headers)
def get_file(url, dst):
# Ensure directory exists
os.makedirs(os.path.dirname(dst), exist_ok=True)
# Start request
request = session.get(url, stream=True)
if request.status_code != 200:
print("%s unavailable... skipping" % url)
return False
content_length = int(request.headers["Content-Length"])
last_modified = mktime_tz(parsedate_tz(request.headers["Last-Modified"]))
if os.path.isfile(dst):
if os.path.getsize(dst) == content_length:
if os.path.getmtime(dst) == last_modified:
print("%s unchanged... skipping" % url)
return False
print("%s available..." % url, "downloading %d bytes" % content_length)
try:
# Stream to file
shutil.copyfileobj(request.raw, open(dst, "wb"))
# Set time on disk to server time
os.utime(dst, (last_modified, last_modified))
except BaseException:
# Problem; delete file
if os.path.isfile(dst):
print("%s deleted... skipping" % dst)
os.remove(dst)
raise
return True
def get_partial_file(url, dst, byte_range):
"""
Download file and append to existing dst
"""
# Ensure directory exists
os.makedirs(os.path.dirname(dst), exist_ok=True)
# Start request, downloading only within byte range
for i in range(3):
try:
headers = {"Range": "bytes={0}-{1}".format(*byte_range)}
request = session.get(url, headers=headers, stream=True)
except BaseException:
# Retry
print("%s unavailable... retrying" % url)
continue
else:
# Success
break
else:
# Failure
if os.path.isfile(dst):
os.remove(dst)
raise SystemExit("%s problem... deleting & exiting" % dst)
else:
raise SystemExit("%s problem... exiting" % dst)
if request.status_code != 206:
print("%s unavailable... skipping" % url)
return False
content_length = int(request.headers["Content-Length"])
print(
"%s available..." % url,
"partially downloading %d bytes" % content_length,
)
try:
# Stream to file
shutil.copyfileobj(request.raw, open(dst, "ab"))
except BaseException:
# Delete file at the slighest whiff of trouble
if os.path.isfile(dst):
os.remove(dst)
raise SystemExit("%s problem... deleting & exiting" % dst)
else:
raise SystemExit("%s problem... exiting" % dst)
return True
def get_constants():
# Need to get a single, large TAR from the 6-hourly products
# ds93.0 (CFSR) and ds94.0 (CFSv2)
constants = self.constants()
for dvar in constants:
fn = os.path.join(self.get_data_path("grb2"), "%s.grb2" % dvar)
if not os.path.isfile(fn):
break
else:
print("All constants downloaded and extracted")
return
if self.dataset == "ds093":
fn = r"flxf01.gdas.19790101-19790105.tar"
url = r"https://rda.ucar.edu/data/ds093.0/1979/" + fn
elif self.dataset == "ds094":
fn = r"flxf01.gdas.20110101-20110105.tar"
url = r"https://rda.ucar.edu/data/ds094.0/2011/" + fn
else:
raise NotImplemented(self.dataset)
dst = os.path.join(self.get_data_path("grb2"), fn)
# Ensure grb directory exists
os.makedirs(self.get_data_path("grb2"), exist_ok=True)
# Grab tar
get_file(url, dst)
# Open it up and pull out first GRB
print("Inspecting %s" % fn)
with tarfile.open(dst, "r") as tar:
for member in tar:
fn = os.path.join(self.get_data_path("grb2"), member.name)
if not os.path.isfile(fn):
print("Extracting %s" % fn)
with open(fn, "wb") as f:
shutil.copyfileobj(tar.fileobj, f, member.size)
break
# Extract constants
with pygrib.open(fn) as grb:
for msg in grb:
for dvar in constants[:]:
if msg.shortName == dvar:
fn = os.path.join(
self.get_data_path("grb2", "%s.grb2" % dvar)
)
with open(fn, "wb") as f:
f.write(msg.tostring())
constants.remove(dvar)
def get_hourly_file(dvar, year, month):
# Build paths
dst = self.get_grb2_filename(dvar, year, month)
url = "https://rda.ucar.edu/data/%s.1/%04d/%s" % (
self.dataset,
year,
os.path.basename(dst),
)
# Grab grb2
get_file(url, dst)
def get_byte_ranges(invUrl, uglpr, nForecasts=1):
"""
Download inventory URL and scan for UGLPR
Inventory ranges are provided for each specific variable coded as
U: Product e.g. 0: 1-hour Average, 1: 1-hour Forecast, etc.
G: Grid: e.g. 0: Gaussian
L: Level: e.g. 0: Surface, 8: Top-of-atmosphere
P: Variable: e.g. 59=clear sky (look for 0.4.192)
R: Process: e.g. 0
"""
request = session.get(invUrl)
if request.status_code != 200:
print("%s unavailable... skipping" % invUrl)
return []
# Content is gzip'd; extract into str
content = request.content.decode("utf-8")
# Build search string(s)
# If number of forecasts > 1, the U repeats every 4
uglprStr = []
U, G, L, P, R = uglpr
for f in range(nForecasts):
uglprStr.append("|%d|%d|%d|%d|%d" % (U + 4 * f, G, L, P, R))
# Find ranges associated with each message in tar
byte_ranges = []
for line in content.splitlines():
for f in range(nForecasts):
if uglprStr[f] in line:
fields = line.split("|")
offset, length = int(fields[0]), int(fields[1])
byte_ranges.append((offset, offset + length - 1))
return byte_ranges
def assemble_hourly_file(dvar, year, month, uglpr):
# Build path
dst = self.get_grb2_filename(dvar, year, month)
# Check if it exists
if os.path.isfile(dst):
print("%s exists... skipping" % dst)
return
# Get inventories and generate a download work list
print("%s..." % dst, "getting server inventory and building download list")
numDays = calendar.monthrange(year, month)[1]
work_list = [None] * (24 * numDays)
stream = self.get_stream(year, month)
if stream == "gdas":
# CFSR and CFSv2, the early days
for (d1, d2) in [
(1, 5),
(6, 10),
(11, 15),
(16, 20),
(21, 25),
(26, numDays),
]:
for fHour in range(6):
# Build URLs
dayRange = "-".join(
(
"%04d%02d%02d" % (year, month, d1),
"%04d%02d%02d" % (year, month, d2),
)
)
fn = "flxf%02d.gdas.%s.tar" % (fHour + 1, dayRange)
tarUrl = "https://rda.ucar.edu/data/"
tarUrl += "%s.0/%d/%s" % (self.dataset, year, fn)
invUrl = "https://rda.ucar.edu/"
invUrl += "cgi-bin/datasets/inventory?"
invUrl += "df=%04d/%s&" % (year, fn)
invUrl += "ds=%s.0&" % self.dataset[2:]
invUrl += "type=GrML"
# Get ranges for this particular inventory
ranges = get_byte_ranges(invUrl, uglpr)
if len(ranges):
for ir, r in enumerate(ranges):
# Absolute hour in month
hour = (d1 - 1) * 24 + ir * 6 + fHour
# Add to work list
work_list[hour] = (tarUrl, r)
elif stream == "cdas1":
# CFSv2, the later days
for day in range(1, numDays + 1):
# Build URLs
fn = "cdas1.%04d%02d%02d.sfluxgrbf.tar" % (year, month, day)
tarUrl = "https://rda.ucar.edu/data/"
tarUrl += "%s.0/%d/%s" % (self.dataset, year, fn)
invUrl = "https://rda.ucar.edu/cgi-bin/datasets/inventory?"
invUrl += "df=%04d/%s&" % (year, fn)
invUrl += "ds=%s.0&" % self.dataset[2:]
invUrl += "type=GrML"
# Get ranges for this particular inventory
ranges = get_byte_ranges(invUrl, uglpr, nForecasts=6)
if len(ranges):
for ir, r in enumerate(ranges):
# Absolute hour in month
hour = (day - 1) * 24 + ir
# Add to work list
work_list[hour] = (tarUrl, r)
else:
raise NotImplementedError(stream)
# Check if complete inventory
for val in work_list:
if val is None:
print("%s incomplete... skipping" % dst)
if os.path.isfile(dst):
os.remove(dst)
return False
# Now loop over in hourly order and concatenate to dst
for tarUrl, tarRange in work_list:
get_partial_file(tarUrl, dst, tarRange)
return True
# Release the sloths...
for dvar in sorted(dvars):
if dvar not in self:
print("%s not in dataset... skipping" % dvar)
print("available: %r" % self.dvars.keys())
continue
print(dvar)
if self.isconstant(dvar):
# Constant file
get_constants()
else:
# Hourly file
uglpr = self.dvars[dvar].get("uglpr", None)
for year, month in self.iter_year_month(years, months):
if uglpr is None:
get_hourly_file(dvar, year, month)
else:
assemble_hourly_file(dvar, year, month, uglpr)
return
def get_stream(self, year, month):
"""
Return stream based on year and month
"""
# Determine dataset and stream
if self.dataset == "ds093":
return "gdas"
else:
if year == 2011 and month < 4:
return "gdas"
else:
return "cdas1"
def get_grb2_filename(self, dvar, year, month):
stream = self.get_stream(year, month)
return os.path.join(
self.get_data_path(
"grb2",
"%04d" % year,
"%s.%s.%04d%02d.grb2" % (dvar.lower(), stream, year, month),
)
)
def calc_tsi(self, year):
"""
Calculate CFSR total solar irradiance based on year
c.f. radiation_astronomy.f
"""
# <NAME> data (1979-2006)
# fmt: off
dTSI = np.array([
6.70, 6.70, 6.80, 6.60, 6.20, 6.00, 5.70, 5.70, 5.80, 6.20, 6.50,
6.50, 6.50, 6.40, 6.00, 5.80, 5.70, 5.70, 5.90, 6.40, 6.70, 6.70,
6.80, 6.70, 6.30, 6.10, 5.90, 5.70
])
# fmt: on
# Index into dTSI
i = np.asarray(year) - 1979
# Extend backward and/or forward assuming 11-year sunspot cycle
while np.any(i < 0):
i[i < 0] += 11
while np.any(i > 27):
i[i > 27] -= 11
# Add base
return 1360.0 + dTSI[i]
def main():
# Create CFSv2 instance
C = CFSV2()
# Extract air temperature at 2m at a given location into a Pandas Series
# (return the nearest location)
x = C("tas", 33.640, -84.430)
print(x.head())
# The same call but applying bi-linear interpolation of the surrounding
# 4 grid locations and restricting data to the year 2018.
y = C("tas", 33.640, -84.430, hgt=313, order=1, years=[2018])
print(y.head())
# Calculate the ASHRAE tau coefficients and optionally the fluxes at noon
tau = C.to_clearsky(33.640, -84.430, years=[2011, 2015], noon_flux=True)
print(tau)
# Produces the average monthly (and annual) daily-average all sky radiation
# for every requested year
rad = C.to_allsky(lat=33.640, lon=-84.430, years=range(2011, 2015))
# Which again can be massaged into the required statistics (mean, std)
print(rad.describe().round(decimals=1))
# Extract the solar components
solar = C.solar_split(33.640, -84.430, years=[2018])
print(solar[12:24])
if __name__ == "__main__":
main()
| StarcoderdataPython |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.