id stringlengths 1 8 | text stringlengths 6 1.05M | dataset_id stringclasses 1
value |
|---|---|---|
4858587 | #!/usr/bin/env python3
from setuptools import setup, find_packages
setup(
name='acapi2',
version='2.0.0-a1',
url='https://github.com/pmatias/python-acquia-cloud-2',
download_url='https://pypi.python.org/pypi/TBD',
license='MIT',
author='<NAME>',
author_email='<EMAIL>',
description='Acquia Cloud API v2 client.',
zip_safe=False,
classifiers=[
'Development Status :: 3 - Alpha',
'Intended Audience :: Developers',
'Intended Audience :: Education',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Topic :: Internet',
],
platforms='any',
packages=find_packages(exclude=['tests']),
include_package_data=True,
install_requires=[
'http-hmac-python==2.4.0',
'requests==2.18.4',
'requests-cache==0.4.13',
'setuptools>=18.5'
]
)
| StarcoderdataPython |
1709748 | <gh_stars>0
import sys
import math
class IV:
def IV(t):
H = [int('6a09e667f3bcc908', 16) ^ int('a5a5a5a5a5a5a5a5', 16),
int('bb67ae8584caa73b', 16) ^ int('a5a5a5a5a5a5a5a5', 16),
int('3c6ef372fe94f82b', 16) ^ int('a5a5a5a5a5a5a5a5', 16),
int('a54ff53a5f1d36f1', 16) ^ int('a5a5a5a5a5a5a5a5', 16),
int('510e527fade682d1', 16) ^ int('a5a5a5a5a5a5a5a5', 16),
int('9b05688c2b3e6c1f', 16) ^ int('a5a5a5a5a5a5a5a5', 16),
int('1f83d9abfb41bd6b', 16) ^ int('a5a5a5a5a5a5a5a5', 16),
int('5be0cd19137e2179', 16) ^ int('a5a5a5a5a5a5a5a5', 16)]
t = hash.sha512_for_t('SHA-512/t'.replace('t', str(t)), H)
H = []
for i in range(0, 8):
H.append(int(t[i*16:(i* 16)+16], 16))
return H
class sched:
# FIPS-180-4 2.2.2
# Rotate bits to the right
@staticmethod
def ROTR(x, n, w=32):
return ((x >> n) | (x << w - n)) & ((1 << w) - 1)
# FIPS-180-4 3.2
# Rotate bits to the right
@staticmethod
def ROTL(x, n, w=32):
return sched.ROTR(x, w-n)
# FIPS-180-4 4.1.2
@staticmethod
def sigma0(x):
return sched.ROTR(x, 7) ^ sched.ROTR(x, 18) ^ (x >> 3)
# FIPS-180-4 4.1.2
@staticmethod
def sigma1(x):
return sched.ROTR(x, 17) ^ sched.ROTR(x, 19) ^ (x >> 10)
# FIPS-180-4 4.1.2
@staticmethod
def sigma0_sha384(x):
return sched.ROTR(x, 1, 64) ^ sched.ROTR(x, 8, 64) ^ (x >> 7)
# FIPS-180-4 4.1.2
@staticmethod
def sigma1_sha384(x):
return sched.ROTR(x, 19, 64) ^ sched.ROTR(x, 61, 64) ^ (x >> 6)
# FIPS-180-4 6.1.2
# (New word function for message schedule)
@staticmethod
def MIX_sha160(t, init_words, w=32):
if t >= 16:
return sched.ROTL(init_words[t-3] ^ init_words[t-8] ^ init_words[t-14] ^ init_words[t-16], 1)
return init_words[t]
@staticmethod
def MIX_sha224(t, init_words, w=32):
if t >= 16:
return int((sched.sigma1(init_words[t-2]) + init_words[t-7] + sched.sigma0(init_words[t-15]) + init_words[t-16]) % 2 ** 32)
return init_words[t]
@staticmethod
def MIX_sha384(t, init_words, w=64):
if t >= 16:
return int((sched.sigma1_sha384(init_words[t-2]) + init_words[t-7] + sched.sigma0_sha384(init_words[t-15]) + init_words[t-16]) % 2 ** 64)
return init_words[t]
@staticmethod
def MIX_512(t, init_words, w=64):
if t >= 16:
return int((sched.sigma1_sha384(init_words[t-2]) + init_words[t-7] + sched.sigma0_sha384(init_words[t-15]) + init_words[t-16]) % 2 ** 64)
return init_words[t]
# FIPS-180-4 6.1.2
# Create message schedule for block i
@staticmethod
def create_schedule_sha160(inital_words):
W = []
for t in range(0, 16):
W.append(sched.MIX_sha160(t, inital_words))
for t in range(16, 80):
W.append(sched.MIX_sha160(t, W))
return W
@staticmethod
def create_schedule_sha224(inital_words):
W = []
for t in range(0, 16):
W.append(sched.MIX_sha224(t, inital_words))
for t in range(16, 64):
W.append(sched.MIX_sha224(t, W))
return W
@staticmethod
def create_schedule_sha384(inital_words):
W = []
for t in range(0, 16):
W.append(sched.MIX_sha384(t, inital_words))
for t in range(16, 80):
W.append(sched.MIX_sha384(t, W))
return W
@staticmethod
def create_schedule_sha512(inital_words):
W = []
for t in range(0, 16):
W.append(sched.MIX_512(t, inital_words))
for t in range(16, 80):
W.append(sched.MIX_512(t, W))
return W
class ppp:
# Convert ASCII to ASCII
def from_str(message):
return message
# Convert integer to ASCII
def from_int(message):
return str(chr(int(message, 10)))
# Convert hexadecimal to ASCII
def from_hex(message):
return str(chr(int(message, 16)))
# Convert binary to ASCII
def from_bin(message):
return str(chr(int(message, 2)))
# Convert octal to ASCII
def from_oct(message):
return str(chr(int(message, 8)))
# Convert file to ASCII
def from_file(filename):
# Open file and store its content in $content
with open(filename, 'rb') as f:
content = f.read()
# Create result variable
rs = ''
# Convert content to ASCII
for c in content:
rs += str(chr(c))
return rs
class prep:
# FIPS-180-4 5.1.1
# This converts from an ASCII string to a binary string, it's lenght being a multiple of the block_size
def padd(message, block_size=512, lenght_block_size=64):
# Convert message to array of integers
ints = []
for c in message:
ints.append(ord(c))
# Convert array of integers to array of strings (Binary representation of the integers)
message = []
for i in ints:
message.append(bin(i)[2:].zfill(8))
# Convert string array (Message in bytes) to string (Message in bits)
message = ''.join(message)
# Get current lenght of message (in bits)
l = len(message)
# Get the lenght of the message in bits (In bits. I'm confused too)
l_bits = bin(l)[2:].zfill(8)
# Add bit (With value of 1) to the end of the message (In bits)
message += '1'
# Padd message with 0's
k = (((block_size - lenght_block_size) % (block_size)) - (1 + l))
while k < 0:
k += block_size
for i in range(0, k):
message += '0'
# Add lenght of message (In bits) to the end of the message, padd with zeroes to size of lenght_block_size
message += l_bits.zfill(lenght_block_size)
return message
# # FIPS-180-4 5.2.1
# Parse message (In bits) into blocks of the size of block_size, thoose are parsed into 16 words of the lenght of w bits
# Returns array of arrays, words are now integers
def parse(message, block_size=512, w=32):
# Create empty list of blocks
M = []
# How many blocks will be created
n = int(len(message) / block_size)
# Iterate over that number
for n in range(0, n):
# Create new block of the size of block_size
m = (message[n*block_size:(n*block_size)+block_size])
# Create empty word list
W = []
# Iterate over how many words are in a block (16)
for i in range(0, 16):
# Append the word to W, now as integer
W.append(int(m[i * w:(i * w) + w],2))
# Add the list of words (Containing the information of the block) to the list of blocks
M.append(W)
return M
# FIPS-180-4 6.2.1
# Pre-proccess a message
# Profiles:
# 0 - sha160
def prep(message, profile=0):
block_size = 512
lenght_block_size = 64
w = 32
if profile == 0:
block_size = 512
lenght_block_size = 64
w = 32
message = prep.padd(message, block_size, lenght_block_size)
message = prep.parse(message, block_size, w)
return message
class prep_sha384:
# FIPS-180-4 5.1.2
# This converts from an ASCII string to a binary string, it's lenght being a multiple of the block_size
def padd(message, block_size=1024, lenght_block_size=128):
# Convert message to array of integers
ints = []
for c in message:
ints.append(ord(c))
# Convert array of integers to array of strings (Binary representation of the integers)
message = []
for i in ints:
message.append(bin(i)[2:].zfill(8))
# Convert string array (Message in bytes) to string (Message in bits)
message = ''.join(message)
# Get current lenght of message (in bits)
l = len(message)
# Get the lenght of the message in bits (In bits. I'm confused too)
l_bits = bin(l)[2:].zfill(8)
# Add bit (With value of 1) to the end of the message (In bits)
message += '1'
# Padd message with 0's
k = (((block_size - lenght_block_size) % (block_size)) - (1 + l))
while k < 0:
k += block_size
for i in range(0, k):
message += '0'
# Add lenght of message (In bits) to the end of the message, padd with zeroes to size of lenght_block_size
message += l_bits.zfill(lenght_block_size)
return message
# # FIPS-180-4 5.2.2
# Parse message (In bits) into blocks of the size of block_size, thoose are parsed into 16 words of the lenght of w bits
# Returns array of arrays, words are now integers
def parse(message, block_size=1024, w=64):
# Create empty list of blocks
M = []
# How many blocks will be created
n = int(len(message) / block_size)
# Iterate over that number
for n in range(0, n):
# Create new block of the size of block_size
m = (message[n*block_size:(n*block_size)+block_size])
# Create empty word list
W = []
# Iterate over how many words are in a block (16)
for i in range(0, 16):
# Append the word to W, now as integer
W.append(int(m[i * w:(i * w) + w],2))
# Add the list of words (Containing the information of the block) to the list of blocks
M.append(W)
return M
# FIPS-180-4 6.2.1
# Pre-proccess a message
def prep(message, profile=0):
block_size = 1024
lenght_block_size = 128
w = 64
message = prep_sha384.padd(message, block_size, lenght_block_size)
message = prep_sha384.parse(message, block_size, w)
return message
class hash:
# FIPS-180-4 4.2.2
# Constant values
K_sha160 = []
# FIPS-180-4 5.3.1
# Constant inital hash values
H_sha160 = [int('67452301', 16),
int('efcdab89', 16),
int('98badcfe', 16),
int('10325476', 16),
int('c3d2e1f0', 16)]
K_sha224 = [int('428a2f98', 16),
int('71374491', 16),
int('b5c0fbcf', 16),
int('e9b5dba5', 16),
int('3956c25b', 16),
int('59f111f1', 16),
int('923f82a4', 16),
int('ab1c5ed5', 16),
int('d807aa98', 16),
int('12835b01', 16),
int('243185be', 16),
int('550c7dc3', 16),
int('72be5d74', 16),
int('80deb1fe', 16),
int('9bdc06a7', 16),
int('c19bf174', 16),
int('e49b69c1', 16),
int('efbe4786', 16),
int('0fc19dc6', 16),
int('240ca1cc', 16),
int('2de92c6f', 16),
int('4a7484aa', 16),
int('5cb0a9dc', 16),
int('76f988da', 16),
int('983e5152', 16),
int('a831c66d', 16),
int('b00327c8', 16),
int('bf597fc7', 16),
int('c6e00bf3', 16),
int('d5a79147', 16),
int('06ca6351', 16),
int('14292967', 16),
int('27b70a85', 16),
int('2e1b2138', 16),
int('4d2c6dfc', 16),
int('53380d13', 16),
int('650a7354', 16),
int('766a0abb', 16),
int('81c2c92e', 16),
int('92722c85', 16),
int('a2bfe8a1', 16),
int('a81a664b', 16),
int('c24b8b70', 16),
int('c76c51a3', 16),
int('d192e819', 16),
int('d6990624', 16),
int('f40e3585', 16),
int('106aa070', 16),
int('19a4c116', 16),
int('1e376c08', 16),
int('2748774c', 16),
int('34b0bcb5', 16),
int('391c0cb3', 16),
int('4ed8aa4a', 16),
int('5b9cca4f', 16),
int('682e6ff3', 16),
int('748f82ee', 16),
int('78a5636f', 16),
int('84c87814', 16),
int('8cc70208', 16),
int('90befffa', 16),
int('a4506ceb', 16),
int('bef9a3f7', 16),
int('c67178f2', 16)
]
# FIPS-180-4 5.3.2
# Constant inital hash values
H_sha224 = [int('c1059ed8', 16),
int('367cd507', 16),
int('3070dd17', 16),
int('f70e5939', 16),
int('ffc00b31', 16),
int('68581511', 16),
int('64f98fa7', 16),
int('befa4fa4', 16)]
# FIPS-180-4 4.2.2
# Constant values
K_sha256 = [int('428a2f98', 16),
int('71374491', 16),
int('b5c0fbcf', 16),
int('e9b5dba5', 16),
int('3956c25b', 16),
int('59f111f1', 16),
int('923f82a4', 16),
int('ab1c5ed5', 16),
int('d807aa98', 16),
int('12835b01', 16),
int('243185be', 16),
int('550c7dc3', 16),
int('72be5d74', 16),
int('80deb1fe', 16),
int('9bdc06a7', 16),
int('c19bf174', 16),
int('e49b69c1', 16),
int('efbe4786', 16),
int('0fc19dc6', 16),
int('240ca1cc', 16),
int('2de92c6f', 16),
int('4a7484aa', 16),
int('5cb0a9dc', 16),
int('76f988da', 16),
int('983e5152', 16),
int('a831c66d', 16),
int('b00327c8', 16),
int('bf597fc7', 16),
int('c6e00bf3', 16),
int('d5a79147', 16),
int('06ca6351', 16),
int('14292967', 16),
int('27b70a85', 16),
int('2e1b2138', 16),
int('4d2c6dfc', 16),
int('53380d13', 16),
int('650a7354', 16),
int('766a0abb', 16),
int('81c2c92e', 16),
int('92722c85', 16),
int('a2bfe8a1', 16),
int('a81a664b', 16),
int('c24b8b70', 16),
int('c76c51a3', 16),
int('d192e819', 16),
int('d6990624', 16),
int('f40e3585', 16),
int('106aa070', 16),
int('19a4c116', 16),
int('1e376c08', 16),
int('2748774c', 16),
int('34b0bcb5', 16),
int('391c0cb3', 16),
int('4ed8aa4a', 16),
int('5b9cca4f', 16),
int('682e6ff3', 16),
int('748f82ee', 16),
int('78a5636f', 16),
int('84c87814', 16),
int('8cc70208', 16),
int('90befffa', 16),
int('a4506ceb', 16),
int('bef9a3f7', 16),
int('c67178f2', 16)
]
# FIPS-180-4 5.3.3
# Constant inital hash values
H_sha256 = [int('6a09e667', 16),
int('bb67ae85', 16),
int('3c6ef372', 16),
int('a54ff53a', 16),
int('510e527f', 16),
int('9b05688c', 16),
int('1f83d9ab', 16),
int('5be0cd19', 16)]
# FIPS-180-4 4.2.3
# Constant values
K_sha384 = [int('428a2f98d728ae22', 16),
int('7137449123ef65cd', 16),
int('b5c0fbcfec4d3b2f', 16),
int('e9b5dba58189dbbc', 16),
int('3956c25bf348b538', 16),
int('59f111f1b605d019', 16),
int('923f82a4af194f9b', 16),
int('ab1c5ed5da6d8118', 16),
int('d807aa98a3030242', 16),
int('12835b0145706fbe', 16),
int('243185be4ee4b28c', 16),
int('550c7dc3d5ffb4e2', 16),
int('72be5d74f27b896f', 16),
int('80deb1fe3b1696b1', 16),
int('9bdc06a725c71235', 16),
int('c19bf174cf692694', 16),
int('e49b69c19ef14ad2', 16),
int('efbe4786384f25e3', 16),
int('0fc19dc68b8cd5b5', 16),
int('240ca1cc77ac9c65', 16),
int('2de92c6f592b0275', 16),
int('4a7484aa6ea6e483', 16),
int('5cb0a9dcbd41fbd4', 16),
int('76f988da831153b5', 16),
int('983e5152ee66dfab', 16),
int('a831c66d2db43210', 16),
int('b00327c898fb213f', 16),
int('bf597fc7beef0ee4', 16),
int('c6e00bf33da88fc2', 16),
int('d5a79147930aa725', 16),
int('06ca6351e003826f', 16),
int('142929670a0e6e70', 16),
int('27b70a8546d22ffc', 16),
int('2e1b21385c26c926', 16),
int('4d2c6dfc5ac42aed', 16),
int('53380d139d95b3df', 16),
int('650a73548baf63de', 16),
int('766a0abb3c77b2a8', 16),
int('81c2c92e47edaee6', 16),
int('92722c851482353b', 16),
int('a2bfe8a14cf10364', 16),
int('a81a664bbc423001', 16),
int('c24b8b70d0f89791', 16),
int('c76c51a30654be30', 16),
int('d192e819d6ef5218', 16),
int('d69906245565a910', 16),
int('f40e35855771202a', 16),
int('106aa07032bbd1b8', 16),
int('19a4c116b8d2d0c8', 16),
int('1e376c085141ab53', 16),
int('2748774cdf8eeb99', 16),
int('34b0bcb5e19b48a8', 16),
int('391c0cb3c5c95a63', 16),
int('4ed8aa4ae3418acb', 16),
int('5b9cca4f7763e373', 16),
int('682e6ff3d6b2b8a3', 16),
int('748f82ee5defb2fc', 16),
int('78a5636f43172f60', 16),
int('84c87814a1f0ab72', 16),
int('8cc702081a6439ec', 16),
int('90befffa23631e28', 16),
int('a4506cebde82bde9', 16),
int('bef9a3f7b2c67915', 16),
int('c67178f2e372532b', 16),
int('ca273eceea26619c', 16),
int('d186b8c721c0c207', 16),
int('eada7dd6cde0eb1e', 16),
int('f57d4f7fee6ed178', 16),
int('06f067aa72176fba', 16),
int('0a637dc5a2c898a6', 16),
int('113f9804bef90dae', 16),
int('1b710b35131c471b', 16),
int('28db77f523047d84', 16),
int('32caab7b40c72493', 16),
int('3c9ebe0a15c9bebc', 16),
int('431d67c49c100d4c', 16),
int('4cc5d4becb3e42b6', 16),
int('597f299cfc657e2a', 16),
int('5fcb6fab3ad6faec', 16),
int('6c44198c4a475817', 16)]
# FIPS-180-4 5.3.4
# Constant inital hash values
H_sha384 = [int('cbbb9d5dc1059ed8', 16),
int('629a292a367cd507', 16),
int('9159015a3070dd17', 16),
int('152fecd8f70e5939', 16),
int('67332667ffc00b31', 16),
int('8eb44a8768581511', 16),
int('db0c2e0d64f98fa7', 16),
int('47b5481dbefa4fa4', 16)]
# FIPS-180-4 4.2.3
# Constant values
K_sha512 = [int('428a2f98d728ae22', 16),
int('7137449123ef65cd', 16),
int('b5c0fbcfec4d3b2f', 16),
int('e9b5dba58189dbbc', 16),
int('3956c25bf348b538', 16),
int('59f111f1b605d019', 16),
int('923f82a4af194f9b', 16),
int('ab1c5ed5da6d8118', 16),
int('d807aa98a3030242', 16),
int('12835b0145706fbe', 16),
int('243185be4ee4b28c', 16),
int('550c7dc3d5ffb4e2', 16),
int('72be5d74f27b896f', 16),
int('80deb1fe3b1696b1', 16),
int('9bdc06a725c71235', 16),
int('c19bf174cf692694', 16),
int('e49b69c19ef14ad2', 16),
int('efbe4786384f25e3', 16),
int('0fc19dc68b8cd5b5', 16),
int('240ca1cc77ac9c65', 16),
int('2de92c6f592b0275', 16),
int('4a7484aa6ea6e483', 16),
int('5cb0a9dcbd41fbd4', 16),
int('76f988da831153b5', 16),
int('983e5152ee66dfab', 16),
int('a831c66d2db43210', 16),
int('b00327c898fb213f', 16),
int('bf597fc7beef0ee4', 16),
int('c6e00bf33da88fc2', 16),
int('d5a79147930aa725', 16),
int('06ca6351e003826f', 16),
int('142929670a0e6e70', 16),
int('27b70a8546d22ffc', 16),
int('2e1b21385c26c926', 16),
int('4d2c6dfc5ac42aed', 16),
int('53380d139d95b3df', 16),
int('650a73548baf63de', 16),
int('766a0abb3c77b2a8', 16),
int('81c2c92e47edaee6', 16),
int('92722c851482353b', 16),
int('a2bfe8a14cf10364', 16),
int('a81a664bbc423001', 16),
int('c24b8b70d0f89791', 16),
int('c76c51a30654be30', 16),
int('d192e819d6ef5218', 16),
int('d69906245565a910', 16),
int('f40e35855771202a', 16),
int('106aa07032bbd1b8', 16),
int('19a4c116b8d2d0c8', 16),
int('1e376c085141ab53', 16),
int('2748774cdf8eeb99', 16),
int('34b0bcb5e19b48a8', 16),
int('391c0cb3c5c95a63', 16),
int('4ed8aa4ae3418acb', 16),
int('5b9cca4f7763e373', 16),
int('682e6ff3d6b2b8a3', 16),
int('748f82ee5defb2fc', 16),
int('78a5636f43172f60', 16),
int('84c87814a1f0ab72', 16),
int('8cc702081a6439ec', 16),
int('90befffa23631e28', 16),
int('a4506cebde82bde9', 16),
int('bef9a3f7b2c67915', 16),
int('c67178f2e372532b', 16),
int('ca273eceea26619c', 16),
int('d186b8c721c0c207', 16),
int('eada7dd6cde0eb1e', 16),
int('f57d4f7fee6ed178', 16),
int('06f067aa72176fba', 16),
int('0a637dc5a2c898a6', 16),
int('113f9804bef90dae', 16),
int('1b710b35131c471b', 16),
int('28db77f523047d84', 16),
int('32caab7b40c72493', 16),
int('3c9ebe0a15c9bebc', 16),
int('431d67c49c100d4c', 16),
int('4cc5d4becb3e42b6', 16),
int('597f299cfc657e2a', 16),
int('5fcb6fab3ad6faec', 16),
int('6c44198c4a475817', 16)]
# FIPS-180-4 5.3.3
# Constant inital hash values
H_sha512 = [int('6a09e667f3bcc908', 16),
int('bb67ae8584caa73b', 16),
int('3c6ef372fe94f82b', 16),
int('a54ff53a5f1d36f1', 16),
int('510e527fade682d1', 16),
int('9b05688c2b3e6c1f', 16),
int('1f83d9abfb41bd6b', 16),
int('5be0cd19137e2179', 16)]
# Main function, return sha160 hash of input
# Message_format's:
# 0 = ASCII
# 1 = Integer (Base 10)
# 2 = Hexadecimal (Base 16)
# 3 = Binary (Base 2)
# 4 = Octal (Base 8)
# 5 = From file
@staticmethod
def sha160(message, message_format=0):
# Set inital message
inital_message = ppp.from_str(message)
# Convert message if neccessary
if message_format == 0:
inital_message = ppp.from_str(message)
elif message_format == 1:
inital_message = ppp.from_int(message)
elif message_format == 2:
inital_message = ppp.from_hex(message)
elif message_format == 3:
inital_message = ppp.from_bin(message)
elif message_format == 4:
inital_message = ppp.from_oct(message)
elif message_format == 5:
inital_message = ppp.from_file(message)
# Preproccess (converted) message (Padding & Parsing)
preproccessed_message = prep.prep(inital_message)
# Set H_sha160 variable with inital hash value
H_sha160 = [hash.get_H_sha160()]
# FIPS-180-4 6.2.2
# Foreach parsed block, create message schedule and hash, then append hash values to $H_sha160
for i in range(1, len(preproccessed_message) + 1):
schedule = sched.create_schedule_sha160(preproccessed_message[i-1])
message_hashed = hash.hash_sha160(schedule, H_sha160, i)
H_sha160.append(message_hashed)
# Create msg variable (This will be final result)
msg = ''
# Foreach word in the last entry of H_sha160
for w in H_sha160[-1]:
# Add word in hex to $msg string variable
msg += hex(w)[2:].zfill(8)
return msg
@staticmethod
def sha224(message, message_format=0):
# Set inital message
inital_message = ppp.from_str(message)
# Convert message if neccessary
if message_format == 0:
inital_message = ppp.from_str(message)
elif message_format == 1:
inital_message = ppp.from_int(message)
elif message_format == 2:
inital_message = ppp.from_hex(message)
elif message_format == 3:
inital_message = ppp.from_bin(message)
elif message_format == 4:
inital_message = ppp.from_oct(message)
elif message_format == 5:
inital_message = ppp.from_file(message)
# Preproccess (converted) message (Padding & Parsing)
preproccessed_message = prep.prep(inital_message)
# Set H variable with inital hash value
H = [hash.get_H_sha224()]
# FIPS-180-4 6.2.2
# Foreach parsed block, create message schedule and hash, then append hash values to $H
for i in range(1, len(preproccessed_message) + 1):
schedule = sched.create_schedule_sha224(preproccessed_message[i-1])
message_hashed = hash.hash_sha224(schedule, H, i)
H.append(message_hashed)
# Create msg variable (This will be final result)
msg = ''
# Foreach word in the last entry of H
for w in H[-1][:7]:
# Add word in hex to $msg string variable
msg += hex(w)[2:].zfill(8)
return msg
@staticmethod
def sha256(message, message_format=0):
# Set inital message
inital_message = ppp.from_str(message)
# Convert message if neccessary
if message_format == 0:
inital_message = ppp.from_str(message)
elif message_format == 1:
inital_message = ppp.from_int(message)
elif message_format == 2:
inital_message = ppp.from_hex(message)
elif message_format == 3:
inital_message = ppp.from_bin(message)
elif message_format == 4:
inital_message = ppp.from_oct(message)
elif message_format == 5:
inital_message = ppp.from_file(message)
# Preproccess (converted) message (Padding & Parsing)
preproccessed_message = prep.prep(inital_message)
# Set H variable with inital hash value
H = [hash.get_H_sha256()]
# FIPS-180-4 6.2.2
# Foreach parsed block, create message schedule and hash, then append hash values to $H
for i in range(1, len(preproccessed_message) + 1):
schedule = sched.create_schedule_sha224(preproccessed_message[i-1])
message_hashed = hash.hash_sha224(schedule, H, i)
H.append(message_hashed)
# Create msg variable (This will be final result)
msg = ''
# Foreach word in the last entry of H
for w in H[-1]:
# Add word in hex to $msg string variable
msg += hex(w)[2:].zfill(8)
return msg
@staticmethod
def sha384(message, message_format=0):
# Set inital message
inital_message = ppp.from_str(message)
# Convert message if neccessary
if message_format == 0:
inital_message = ppp.from_str(message)
elif message_format == 1:
inital_message = ppp.from_int(message)
elif message_format == 2:
inital_message = ppp.from_hex(message)
elif message_format == 3:
inital_message = ppp.from_bin(message)
elif message_format == 4:
inital_message = ppp.from_oct(message)
elif message_format == 5:
inital_message = ppp.from_file(message)
# Preproccess (converted) message (Padding & Parsing)
preproccessed_message = prep_sha384.prep(inital_message)
# Set H variable with inital hash value
H = [hash.get_H_sha384()]
# FIPS-180-4 6.2.2
# Foreach parsed block, create message schedule and hash, then append hash values to $H
for i in range(1, len(preproccessed_message) + 1):
schedule = sched.create_schedule_sha384(preproccessed_message[i-1])
message_hashed = hash.hash_sha384(schedule, H, i)
H.append(message_hashed)
# Create msg variable (This will be final result)
msg = ''
# Foreach word in the last entry of H
for w in H[-1]:
# Add word in hex to $msg string variable
msg += hex(w)[2:].zfill(8)
return msg
@staticmethod
def sha512(message, message_format=0):
# Set inital message
inital_message = ppp.from_str(message)
# Convert message if neccessary
if message_format == 0:
inital_message = ppp.from_str(message)
elif message_format == 1:
inital_message = ppp.from_int(message)
elif message_format == 2:
inital_message = ppp.from_hex(message)
elif message_format == 3:
inital_message = ppp.from_bin(message)
elif message_format == 4:
inital_message = ppp.from_oct(message)
elif message_format == 5:
inital_message = ppp.from_file(message)
# Preproccess (converted) message (Padding & Parsing)
preproccessed_message = prep_sha384.prep(inital_message)
# Set H variable with inital hash value
H = [hash.get_H_sha512()]
# FIPS-180-4 6.2.2
# Foreach parsed block, create message schedule and hash, then append hash values to $H
for i in range(1, len(preproccessed_message) + 1):
schedule = sched.create_schedule_sha512(preproccessed_message[i-1])
message_hashed = hash.hash_sha512(schedule, H, i)
H.append(message_hashed)
# Create msg variable (This will be final result)
msg = ''
# Foreach word in the last entry of H
for w in H[-1]:
# Add word in hex to $msg string variable
msg += hex(w)[2:].zfill(8)
return msg
@staticmethod
def sha512_for_t(message, h, trunc=512, message_format=0):
# Set inital message
inital_message = ppp.from_str(message)
# Convert message if neccessary
if message_format == 0:
inital_message = ppp.from_str(message)
elif message_format == 1:
inital_message = ppp.from_int(message)
elif message_format == 2:
inital_message = ppp.from_hex(message)
elif message_format == 3:
inital_message = ppp.from_bin(message)
elif message_format == 4:
inital_message = ppp.from_oct(message)
elif message_format == 5:
inital_message = ppp.from_file(message)
# Preproccess (converted) message (Padding & Parsing)
preproccessed_message = prep_sha384.prep(inital_message)
# Set H variable with inital hash value
H = [h]
# FIPS-180-4 6.2.2
# Foreach parsed block, create message schedule and hash, then append hash values to $H
for i in range(1, len(preproccessed_message) + 1):
schedule = sched.create_schedule_sha512(preproccessed_message[i-1])
message_hashed = hash.hash_sha512(schedule, H, i)
H.append(message_hashed)
# Create msg variable (This will be final result)
msg = ''
# Foreach word in the last entry of H
for w in H[-1]:
# Add word in hex to $msg string variable
msg += bin(w)[2:].zfill(64)
msg = hex(int(msg[:trunc] ,2))[2:]
return msg
@staticmethod
def sha512_t(message, t):
h = hash.sha512_for_t(message, IV.IV(t), t)[:]
return h
# FIPS-180-4 2.2.2
# Rotate bits to the right
@staticmethod
def ROTR(x, n, w=32):
return ((x >> n) | (x << w - n)) & ((1 << w) - 1)
# FIPS-180-4 3.2
# Rotate bits to the right
@staticmethod
def ROTL(x, n, w=32):
return sched.ROTR(x, w-n)
# FIPS-180-4 4.1.2
@staticmethod
def SIGMA0(x):
return hash.ROTR(x, 2) ^ hash.ROTR(x, 13) ^ hash.ROTR(x, 22)
# FIPS-180-4 4.1.2
@staticmethod
def SIGMA1(x):
return hash.ROTR(x, 6) ^ hash.ROTR(x, 11) ^ hash.ROTR(x, 25)
# FIPS-180-4 4.1.2
@staticmethod
def SIGMA0_sha384(x):
return hash.ROTR(x, 28, 64) ^ hash.ROTR(x, 34, 64) ^ hash.ROTR(x, 39, 64)
# FIPS-180-4 4.1.2
@staticmethod
def SIGMA1_sha384(x):
return hash.ROTR(x, 14, 64) ^ hash.ROTR(x, 18, 64) ^ hash.ROTR(x, 41, 64)
# FIPS-180-4 4.1.1
@staticmethod
def Ch(x, y, z):
return (x & y) ^ (~x & z)
# FIPS-180-4 4.1.1
@staticmethod
def Maj(x, y, z):
return (x & y) ^ (x & z) ^ (y & z)
# FIPS-180-4 4.1.1
@staticmethod
def Parity(x, y, z):
return x ^ y ^ z
# FIPS-180-4 4.1.1
@staticmethod
def f_sha160(x, y, z, t):
if 0 <= t and t <= 19:
return hash.Ch(x, y, z)
if 20 <= t and t <= 39:
return hash.Parity(x, y, z)
if 40 <= t and t <= 59:
return hash.Maj(x, y, z)
if 60 <= t and t <= 79:
return hash.Parity(x, y, z)
# FIPS-180-4 5.3.2
# Get the (constant) inital hash values
@staticmethod
def get_H_sha160():
return hash.H_sha160
@staticmethod
def get_H_sha224():
return hash.H_sha224
@staticmethod
def get_H_sha256():
return hash.H_sha256
@staticmethod
def get_H_sha384():
return hash.H_sha384
@staticmethod
def get_H_sha512():
return hash.H_sha512
# FIPS-180-4 6.2.2
@staticmethod
def hash_sha160(W, H_sha160, i):
# Set inital hash values from previous (final) hash values
a = H_sha160[i-1][0]
b = H_sha160[i-1][1]
c = H_sha160[i-1][2]
d = H_sha160[i-1][3]
e = H_sha160[i-1][4]
# Iterate 80 times
for t in range(80):
if 0 <= t <= 19:
hash.K_sha160.append(int('5A827999', 16))
elif 20 <= t <= 39:
hash.K_sha160.append(int('6ED9EBA1', 16))
elif 40 <= t <= 59:
hash.K_sha160.append(int('8F1BBCDC', 16))
elif 60 <= t <= 79:
hash.K_sha160.append(int('CA62C1D6', 16))
# Calculate temporary value
T = int((hash.ROTL(a, 5) + hash.f_sha160(b, c, d, t) + e + hash.K_sha160[t] + W[t]) % 2 ** 32)
e = d
d = c
c = hash.ROTL(b, 30)
b = a
a = T
# Calculate final hash values
H0 = (H_sha160[i-1][0] + a) % 2 ** 32
H1 = (H_sha160[i-1][1] + b) % 2 ** 32
H2 = (H_sha160[i-1][2] + c) % 2 ** 32
H3 = (H_sha160[i-1][3] + d) % 2 ** 32
H4 = (H_sha160[i-1][4] + e) % 2 ** 32
# Return final hash values
return [H0, H1, H2, H3, H4]
@staticmethod
def hash_sha224(W, H, i):
# Set inital hash values from previous (final) hash values
a = H[i-1][0]
b = H[i-1][1]
c = H[i-1][2]
d = H[i-1][3]
e = H[i-1][4]
f = H[i-1][5]
g = H[i-1][6]
h = H[i-1][7]
# Iterate 64 times
for t in range(0, 64):
# Calculate temporary value 1
T1 = int((h + hash.SIGMA1(e) + hash.Ch(e, f, g) + hash.K_sha224[t] + W[t]) % 2 ** 32)
# Calculate temporary value 2
T2 = int((hash.SIGMA0(a) + hash.Maj(a, b, c)) % 2 ** 32)
h = g
g = f
f = e
e = int((d + T1) % 2 ** 32)
d = c
c = b
b = a
a = (T1 + T2) % 2 ** 32
# Calculate final hash values
H0 = (H[i-1][0] + a) % 2 ** 32
H1 = (H[i-1][1] + b) % 2 ** 32
H2 = (H[i-1][2] + c) % 2 ** 32
H3 = (H[i-1][3] + d) % 2 ** 32
H4 = (H[i-1][4] + e) % 2 ** 32
H5 = (H[i-1][5] + f) % 2 ** 32
H6 = (H[i-1][6] + g) % 2 ** 32
H7 = (H[i-1][7] + h) % 2 ** 32
# Return final hash values
return [H0, H1, H2, H3, H4, H5, H6, H7]
@staticmethod
def hash_sha384(W, H, i):
# Set inital hash values from previous (final) hash values
a = H[i-1][0]
b = H[i-1][1]
c = H[i-1][2]
d = H[i-1][3]
e = H[i-1][4]
f = H[i-1][5]
g = H[i-1][6]
h = H[i-1][7]
# Iterate 80 times
for t in range(0, 80):
# Calculate temporary value 1
T1 = int((h + hash.SIGMA1_sha384(e) + hash.Ch(e, f, g) + hash.K_sha384[t] + W[t]) % 2 ** 64)
# Calculate temporary value 2
T2 = int((hash.SIGMA0_sha384(a) + hash.Maj(a, b, c)) % 2 ** 64)
h = g
g = f
f = e
e = int((d + T1) % 2 ** 64)
d = c
c = b
b = a
a = (T1 + T2) % 2 ** 64
# Calculate final hash values
H0 = (H[i-1][0] + a) % 2 ** 64
H1 = (H[i-1][1] + b) % 2 ** 64
H2 = (H[i-1][2] + c) % 2 ** 64
H3 = (H[i-1][3] + d) % 2 ** 64
H4 = (H[i-1][4] + e) % 2 ** 64
H5 = (H[i-1][5] + f) % 2 ** 64
# Return final hash values
return [H0, H1, H2, H3, H4, H5]
@staticmethod
def hash_sha512(W, H, i):
# Set inital hash values from previous (final) hash values
a = H[i-1][0]
b = H[i-1][1]
c = H[i-1][2]
d = H[i-1][3]
e = H[i-1][4]
f = H[i-1][5]
g = H[i-1][6]
h = H[i-1][7]
# Iterate 80 times
for t in range(0, 80):
# Calculate temporary value 1
T1 = int((h + hash.SIGMA1_sha384(e) + hash.Ch(e, f, g) + hash.K_sha512[t] + W[t]) % 2 ** 64)
# Calculate temporary value 2
T2 = int((hash.SIGMA0_sha384(a) + hash.Maj(a, b, c)) % 2 ** 64)
h = g
g = f
f = e
e = int((d + T1) % 2 ** 64)
d = c
c = b
b = a
a = (T1 + T2) % 2 ** 64
# Calculate final hash values
H0 = (H[i-1][0] + a) % 2 ** 64
H1 = (H[i-1][1] + b) % 2 ** 64
H2 = (H[i-1][2] + c) % 2 ** 64
H3 = (H[i-1][3] + d) % 2 ** 64
H4 = (H[i-1][4] + e) % 2 ** 64
H5 = (H[i-1][5] + f) % 2 ** 64
H6 = (H[i-1][6] + g) % 2 ** 64
H7 = (H[i-1][7] + h) % 2 ** 64
# Return final hash values
return [H0, H1, H2, H3, H4, H5, H6, H7]
| StarcoderdataPython |
1748461 | """
Gentle Hands-On Introduction to Python: course project
"""
import sys
if not "-m" in sys.argv:
from .distance import align, edit_distance, needleman_wunsch
from .assemble import assemble, score
| StarcoderdataPython |
3360706 | <filename>py4e/exercises/exercise_13_1.py<gh_stars>1-10
"""
Extracting Data from XML
In this assignment you will write a Python program somewhat similar to
http://www.py4e.com/code3/geoxml.py. The program will prompt for a URL,
read the XML data from that URL using urllib and then parse and extract
the comment counts from the XML data, compute the sum of the numbers in
the file.
We provide two files for this assignment. One is a sample file where we
give you the sum for your testing and the other is the actual data you
need to process for the assignment.
- Sample data: http://py4e-data.dr-chuck.net/comments_42.xml
(Sum=2553)
- Actual data: http://py4e-data.dr-chuck.net/comments_18422.xml
(Sum ends with 30)
You do not need to save these files to your folder since your program
will read the data directly from the URL. Note: Each student will have
a distinct data url for the assignment - so only use your own data url
for analysis.
Data Format and Approach
The data consists of a number of names and comment counts in XML as
follows:
+---------------------------+
| <comment> |
| <name>Matthias</name> |
| <count>97</count> |
| </comment> |
+---------------------------+
You are to look through all the <comment> tags and find the <count>
values sum the numbers. The closest sample code that shows how to parse
XML is geoxml.py. But since the nesting of the elements in our data is
different than the data we are parsing in that sample code you will
have to make real changes to the code.
To make the code a little simpler, you can use an XPath selector string
to look through the entire tree of XML for any tag named 'count' with
the following line of code:
+-----------------------------------+
| counts = tree.findall('.//count') |
+-----------------------------------+
Take a look at the Python ElementTree documentation and look for the
supported XPath syntax for details. You could also work from the top of
the XML down to the comments node and then loop through the child nodes
of the comments node.
Sample Execution
+---------------------------------------------------------------+
| $ python3 solution.py |
| Enter location: http://py4e-data.dr-chuck.net/comments_42.xml |
| Retrieving http://py4e-data.dr-chuck.net/comments_42.xml |
| Retrieved 4189 characters |
| Count: 50 |
| Sum: 2... |
+---------------------------------------------------------------+
"""
import urllib.request, urllib.parse, urllib.error
import xml.etree.ElementTree as ET
import ssl
URL = "http://py4e-data.dr-chuck.net/comments_18422.xml"
ctx = ssl.create_default_context()
ctx.check_hostname = False
ctx.verify_mode = ssl.CERT_NONE
uh = urllib.request.urlopen(URL, context=ctx)
data = uh.read()
print("Enter location:", URL)
print('Retrieving', URL)
print('Retrieved', len(data), 'characters')
tree = ET.fromstring(data)
results = tree.findall('comments/comment')
suma = 0
for item in results:
suma += int(item.find('count').text)
print('Count:', len(results))
print('Sum:', suma) | StarcoderdataPython |
1980606 | <gh_stars>0
pkgname = "transset"
pkgver = "1.0.2"
pkgrel = 0
build_style = "gnu_configure"
hostmakedepends = ["pkgconf"]
makedepends = ["libxt-devel"]
pkgdesc = "Sets the transparency of an X window"
maintainer = "q66 <<EMAIL>>"
license = "MIT"
url = "https://xorg.freedesktop.org"
source = f"$(XORG_SITE)/app/{pkgname}-{pkgver}.tar.bz2"
sha256 = "4bac142ee7cfda574893b2f6b4e413dacd88a130c6dca5be1a9958e7c1451b21"
def post_install(self):
self.install_license("COPYING")
| StarcoderdataPython |
12844454 | <filename>fastquotes/fund/history.py
import json
from datetime import datetime
from typing import Optional
import requests
from ..const import CUSTOM_HEADER
def get_dividend(msg: str) -> Optional[float]:
if not msg:
return None
left, right = 0, len(msg) - 1
while not msg[left].isdigit() or not msg[right].isdigit():
if not msg[left].isdigit():
left += 1
if not msg[right].isdigit():
right -= 1
return float(msg[left : right + 1])
def fund_history_data(fund_code: str) -> list:
url = f"http://fund.eastmoney.com/pingzhongdata/{fund_code}.js"
text = requests.get(url, headers=CUSTOM_HEADER).text
text = text[
text.find("Data_netWorthTrend") + 21 : text.find("Data_ACWorthTrend") - 15
]
res_list = []
dividend_sum = 0.0
growth_rate_factor = 1.0
for item in json.loads(text):
dividend = get_dividend(item["unitMoney"])
unit_nv = item["y"]
if dividend is not None:
dividend_sum += dividend
growth_rate_factor *= (unit_nv + dividend) / unit_nv
res_list.append(
{
"日期": datetime.fromtimestamp(item["x"] // 1000).strftime("%Y%m%d"),
"单位净值": unit_nv,
"累计净值": unit_nv + dividend_sum,
"复权净值": unit_nv * growth_rate_factor,
"日涨幅": item["equityReturn"],
"分红送配": dividend,
}
)
return res_list
def fund_history_profit_dict(fund_code: str) -> dict:
fund_history_list = fund_history_data(fund_code)
res_dic = {}
for i in range(1, len(fund_history_list)):
item = fund_history_list[i]
last_item = fund_history_list[i - 1]
res_dic[item["日期"]] = item["复权净值"] / last_item["复权净值"] - 1
return res_dic
| StarcoderdataPython |
3519623 | <filename>voidpp_tools/logger_proxy.py<gh_stars>0
class LoggerProxy(object):
def __init__(self, level):
self.level = level
def write(self, msg):
if msg != '\n':
self.level(msg)
def flush(self):
pass
| StarcoderdataPython |
3563438 | #!/usr/bin/env python3
from sys import path
path.append('../')
from sherline_lathe.lathe_parting import lathe_parting
import unittest
class test_lathe_parting(unittest.TestCase):
def test_set_surface_speed(self):
pass
| StarcoderdataPython |
4821327 | import pandas as pd
t = 600
df = pd.read_csv('player_time.csv')
df['team_1_gold'] = (df.gold_t_0 + df.gold_t_1 + df.gold_t_2 +
df.gold_t_3 + df.gold_t_4)
df['team_2_gold'] = (df.gold_t_128 + df.gold_t_129 + df.gold_t_130 +
df.gold_t_131 + df.gold_t_132)
df = df[['match_id', 'times', 'team_1_gold', 'team_2_gold']]
df = df.loc[df.times == t]
df['gold_lead'] = list((df.team_1_gold > df.team_2_gold))
df['gold_lead_by'] = (df.team_1_gold - df.team_2_gold) / df.team_2_gold
df = df[['match_id', 'gold_lead', 'gold_lead_by']]
match = pd.read_csv('match.csv')
match = match[['match_id', 'radiant_win']]
n = 0
gl = pd.merge(df, match)
gl = gl.loc[gl.gold_lead_by > n]
sum(gl.gold_lead == gl.radiant_win)
sum(gl.gold_lead == gl.radiant_win) / len(gl)
| StarcoderdataPython |
4940065 | # GUI component to "train" the watchdog
from tkinter import *
import dbclient, sys, gui_common
__all__ = []
if __name__ == '__main__':
Tk().title('搞打卡竞赛,进国家队,拿亚洲金牌!')
asker = gui_common.IPAsker()
asker.mainloop()
if asker.machine_data is None:
sys.exit(0)
trainer = gui_common.Trainer(asker.machine_data[0])
trainer.mainloop() | StarcoderdataPython |
1944565 | import os, webbrowser, requests, time, sys, sqlite3, random, traceback
import pyinspect as pi
import datetime
import wikipedia
import pywhatkit as kit
import ClointFusion as cf
from pathlib import Path
import subprocess
from PIL import Image
from rich.text import Text
from rich import print
from rich.console import Console
import pyaudio
from rich import pretty
import platform
windows_os = "windows"
linux_os = "linux"
mac_os = "darwin"
os_name = str(platform.system()).lower()
pi.install_traceback(hide_locals=True,relevant_only=True,enable_prompt=True)
pretty.install()
console = Console()
queries = ["current time,","global news,","send whatsapp,","open , minimize , close any application,","Open Gmail,", "play youtube video,","search in google,",'launch zoom meeting,','switch window,','locate on screen,','take selfie,','OCR now,', 'commands,', 'read screen,','help,',]
latest_queries = ['launch zoom meeting,','switch window,','locate on screen,','take selfie,','read screen,',]
def error_try_later():
error_choices=["Whoops, please try again","Mea Culpa, please try again","Sorry, i am experiencing some issues, please try again","Apologies, please try again"]
cf.text_to_speech(shuffle_return_one_option(error_choices))
def shuffle_return_one_option(lst_options=[]):
random.shuffle(lst_options)
return str(lst_options[0])
def _play_sound(music_file_path=""):
try:
import wave
#define stream chunk
chunk = 1024
#open a wav format music
f = wave.open(music_file_path,"rb")
#instantiate PyAudio
p = pyaudio.PyAudio()
#open stream
stream = p.open(format = p.get_format_from_width(f.getsampwidth()),
channels = f.getnchannels(),
rate = f.getframerate(),
output = True)
data = f.readframes(chunk)
#play stream
while data:
stream.write(data)
data = f.readframes(chunk)
#stop stream
stream.stop_stream()
stream.close()
#close PyAudio
p.terminate()
except Exception as ex:
cf.selft.crash_report(traceback.format_exception(*sys.exc_info(),limit=None, chain=True))
print("Unable to play sound" + str(ex))
def play_on_youtube():
cf.text_to_speech("OK...")
cf.text_to_speech("Which video ?")
video_name = cf.speech_to_text().lower() ## takes user cf.speech_to_text
cf.text_to_speech("Opening YouTube now, please wait a moment...")
kit.playonyt(video_name)
def call_Send_WA_MSG():
cf.text_to_speech("OK...")
cf.text_to_speech("Whats the message ?")
msg = cf.speech_to_text().lower() ## takes user cf.speech_to_text
if msg not in ["exit", "cancel", "stop"]:
cf.text_to_speech("Got it, whom to send, please say mobile number without country code")
else:
cf.text_to_speech("Sending message is cancelled...")
return
num = cf.speech_to_text().lower() ## takes user cf.speech_to_text
if num not in ["exit", "cancel", "stop"]:
cf.text_to_speech("Sending message now, please wait a moment")
kit.sendwhatmsg_instantly(phone_no=f"+91{num}",message=str(msg),wait_time=25, tab_close=True, close_time=5)
else:
cf.text_to_speech("Sending message is cancelled...")
return
def google_search():
cf.text_to_speech("OK...")
cf.text_to_speech("What to search ?")
msg = cf.speech_to_text().lower() ## takes user cf.speech_to_text
cf.text_to_speech("Searching in Gooogle now, please wait a moment...")
kit.search(msg)
def welcome(nth):
hour = datetime.datetime.now().hour
greeting = " Good Morning ! " if 5<=hour<12 else " Good Afternoon !! " if hour<18 else " Good Evening ! "
choices = ["Hey ! ", "Hi ! ", "Hello ! ", "Dear ! "]
greeting = random.choice(choices) + str(cf.user_name) + ' !!' + greeting
assist_choices = ["How can i assist you ?!","What can I do for you?","Feel free to call me for any help!","What something can I do for you?"]
cf.text_to_speech(greeting + shuffle_return_one_option(assist_choices))
if nth == 1:
suggest(options(3, 2))
elif nth == 2:
suggest(options(5, 5))
elif nth == 3:
suggest(options(5, 1))
else:
pass
def suggest(suggestions):
queries = suggestions
cf.text_to_speech("Try saying...")
random.shuffle(queries)
cf.text_to_speech(queries)
quit_options=['bye','quit','exit']
random.shuffle(quit_options)
cf.text_to_speech('To quit, just say {}'.format(quit_options[0]))
def call_help():
cf.text_to_speech(shuffle_return_one_option(["I support these commands","Here are the commands i support currently."]))
print("All commands:")
print(queries)
cf.text_to_speech(shuffle_return_one_option(["Try some latest commands:","Try something new:"]))
print("Latest commands")
print(latest_queries)
print("\n")
def options(total=5, latest=3):
remaining = [q for q in queries if q not in latest_queries]
custom_list = []
latest_done = False
try :
for i in range(latest+1):
custom_list.append(latest_queries[i])
latest_done = True
except IndexError:
latest_done = True
if latest_done:
for i in range(total - len(custom_list)):
custom_list.append(remaining[i])
random.shuffle(custom_list)
return custom_list
def trndnews():
url = "http://newsapi.org/v2/top-headlines?country=in&apiKey=59ff055b7c754a10a1f8afb4583ef1ab"
page = requests.get(url).json()
article = page["articles"]
results = [ar["title"] for ar in article]
for i in range(len(results)):
print(i + 1, results[i])
cf.text_to_speech("Here are the top trending news....!!")
cf.text_to_speech("Do yo want me to read!!!")
reply = cf.speech_to_text().lower()
reply = str(reply)
if reply == "yes":
cf.text_to_speech(results)
else:
cf.text_to_speech('ok!!!!')
def capture_photo(ocr=False):
try:
subprocess.run('start microsoft.windows.camera:', shell=True)
if ocr:
time.sleep(4)
else:
time.sleep(1)
img=cf.pg.screenshot()
time.sleep(1)
img.save(Path(os.path.join(cf.clointfusion_directory, "Images","Selfie.PNG")))
subprocess.run('Taskkill /IM WindowsCamera.exe /F', shell=True)
except Exception as ex:
cf.selft.crash_report(traceback.format_exception(*sys.exc_info(),limit=None, chain=True))
print("Error in capture_photo " + str(ex))
def call_read_screen():
try:
cf.text_to_speech('Window Name to read?')
windw_name = cf.speech_to_text().lower() ## takes user cf.speech_to_text
cf.window_show_desktop()
cf.window_activate_and_maximize_windows(windw_name)
time.sleep(2)
img=cf.pg.screenshot()
img.save(Path(os.path.join(cf.clointfusion_directory, "Images","Selfie.PNG")))
# OCR process
ocr_img_path = Path(os.path.join(cf.clointfusion_directory, "Images","Selfie.PNG"))
cf.text_to_speech(shuffle_return_one_option(["OK, performing OCR now","Give me a moment","abracadabra","Hang on"]))
ocr_result = cf.ocr_now(ocr_img_path)
print(ocr_result)
cf.text_to_speech("Do you want me to read?")
yes_no = cf.speech_to_text().lower() ## takes user cf.speech_to_text
if yes_no in ["yes", "yah", "ok"]:
cf.text_to_speech(ocr_result)
except Exception as ex:
cf.selft.crash_report(traceback.format_exception(*sys.exc_info(),limit=None, chain=True))
print("Error in capture_photo " + str(ex))
def call_name():
name_choices = ["I am ClointFusion's BOL!", "This is Bol!", "Hi, I am ClointFusion's Bol!","Hey, this is Bol!"]
cf.text_to_speech(shuffle_return_one_option(name_choices))
def call_time():
time = datetime.datetime.now().strftime('%I:%M %p')
cf.text_to_speech("It's " + str(time))
def call_wiki(query):
try:
cf.text_to_speech(wikipedia.summary(query,2))
except:
cf.text_to_speech("Please use a complete word...")
def call_ocr():
try:
ocr_say=["OK, Let me scan !","OK, Going to scan now","Please show me the image"]
cf.text_to_speech(shuffle_return_one_option(ocr_say))
capture_photo(ocr=True)
ocr_img_path = Path(os.path.join(cf.clointfusion_directory, "Images","Selfie.PNG"))
imageObject = Image.open(ocr_img_path)
corrected_image = imageObject.transpose(Image.FLIP_LEFT_RIGHT)
corrected_image.save(ocr_img_path)
cf.text_to_speech(shuffle_return_one_option(["OK, performing OCR now","Give me a moment","abracadabra","Hang on"]))
ocr_result = cf.ocr_now(ocr_img_path)
print(ocr_result)
cf.text_to_speech(ocr_result)
except Exception as ex:
cf.selft.crash_report(traceback.format_exception(*sys.exc_info(),limit=None, chain=True))
print("Error in OCR " + str(ex))
error_try_later()
def call_camera():
try:
subprocess.run('start microsoft.windows.camera:', shell=True)
except:
os.startfile('microsoft.windows.camera:')
def call_any_app():
cf.text_to_speech('OK, which application to open?')
app_name = cf.speech_to_text().lower() ## takes user cf.speech_to_text
cf.launch_any_exe_bat_application(app_name)
def call_switch_wndw():
cf.text_to_speech('OK, whats the window name?')
windw_name = cf.speech_to_text().lower() ## takes user cf.speech_to_text
cf.window_activate_and_maximize_windows(windw_name)
def call_find_on_screen():
cf.text_to_speech('OK, what to find ?')
query = cf.speech_to_text().lower() ## takes user cf.speech_to_text
cf.find_text_on_screen(searchText=query,delay=0.1, occurance=1,isSearchToBeCleared=False)
def call_minimize_wndw():
cf.text_to_speech('OK, which window to minimize?')
windw_name = cf.speech_to_text().lower() ## takes user cf.speech_to_text
cf.window_minimize_windows(windw_name)
def call_close_app():
cf.text_to_speech('OK, which application to close?')
app_name = cf.speech_to_text().lower() ## takes user cf.speech_to_text
cf.window_close_windows(app_name)
def call_take_selfie():
smile_say=["OK, Smile Please !","OK, Please look at the Camera !","OK, Say Cheese !","OK, Sit up straight !"]
cf.text_to_speech(shuffle_return_one_option(smile_say))
capture_photo()
cf.text_to_speech("Thanks, I saved your photo. Do you want me to open ?")
yes_no = cf.speech_to_text().lower() ## takes user cf.speech_to_text
if yes_no in ["yes", "yah", "ok"]:
cf.launch_any_exe_bat_application(Path(os.path.join(cf.clointfusion_directory, "Images","Selfie.PNG")))
else:
pass
def call_thanks():
choices = ["You're welcome","You're very welcome.","That's all right.","No problem.","No worries.","Don't mention it.","It's my pleasure.","My pleasure.","Glad to help.","Sure!",""]
cf.text_to_speech(shuffle_return_one_option(choices))
def call_shut_pc():
cf.text_to_speech('Do you want to Shutdown ? Are you sure ?')
yes_no = cf.speech_to_text().lower() ## takes user cf.speech_to_text
if yes_no in ["yes", "yah", "ok"]:
cf.text_to_speech("OK, Shutting down your machine in a minute")
os.system('shutdown -s')
def call_social_media():
#opens all social media links of ClointFusion
try:
webbrowser.open_new_tab("https://www.facebook.com/ClointFusion")
except Exception as ex:
print("Error in call_social_media = " + str(ex))
try:
webbrowser.open_new_tab("https://twitter.com/ClointFusion")
except Exception as ex:
print("Error in call_social_media = " + str(ex))
try:
webbrowser.open_new_tab("https://www.youtube.com/channel/UCIygBtp1y_XEnC71znWEW2w")
except Exception as ex:
print("Error in call_social_media = " + str(ex))
try:
webbrowser.open_new_tab("https://www.linkedin.com/showcase/clointfusion_official")
except Exception as ex:
print("Error in call_social_media = " + str(ex))
try:
webbrowser.open_new_tab("https://www.reddit.com/user/Cloint-Fusion")
except Exception as ex:
print("Error in call_social_media = " + str(ex))
try:
webbrowser.open_new_tab("https://www.instagram.com/clointfusion")
except Exception as ex:
print("Error in call_social_media = " + str(ex))
try:
webbrowser.open_new_tab("https://www.kooapp.com/profile/ClointFusion")
except Exception as ex:
print("Error in call_social_media = " + str(ex))
try:
webbrowser.open_new_tab("https://discord.com/invite/tsMBN4PXKH")
except Exception as ex:
print("Error in call_social_media = " + str(ex))
try:
webbrowser.open_new_tab("https://www.eventbrite.com/e/2-days-event-on-software-bot-rpa-development-with-no-coding-tickets-183070046437")
except Exception as ex:
print("Error in call_social_media = " + str(ex))
try:
webbrowser.open_new_tab("https://internshala.com/internship/detail/python-rpa-automation-software-bot-development-work-from-home-job-internship-at-clointfusion1631715670")
except Exception as ex:
print("Error in call_social_media = " + str(ex))
def bol_main():
query_num = 5
with console.status("Listening...\n") as status:
while True:
query = cf.speech_to_text().lower() ## takes user cf.speech_to_text
try:
if any(x in query for x in ["name","bol"]):
call_name()
elif 'time' in query:
call_time()
elif any(x in query for x in ["help","commands", "list of commands", "what can you do",]):
call_help()
elif 'who is' in query:
status.update("Processing...\n")
query = query.replace('who is',"")
call_wiki(query)
status.update("Listening...\n")
#Send WA MSG
elif any(x in query for x in ["send whatsapp","whatsapp","whatsapp message"]):
status.update("Processing...\n")
call_Send_WA_MSG()
status.update("Listening...\n")
#Play YouTube Video
elif any(x in query for x in ["youtube","play video","video song","youtube video"]):
status.update("Processing...\n")
play_on_youtube()
status.update("Listening...\n")
#Search in Google
elif any(x in query for x in ["google search","search in google"]):
status.update("Processing...\n")
google_search()
status.update("Listening...\n")
#Open gmail
elif any(x in query for x in ["gmail","email"]):
status.update("Processing...\n")
webbrowser.open_new_tab("http://mail.google.com")
status.update("Listening...\n")
#open camera
elif any(x in query for x in ["launch camera","open camera"]):
status.update("Processing...\n")
call_camera()
status.update("Listening...\n")
### close camera
elif any(x in query for x in ["close camera"]):
status.update("Processing...\n")
subprocess.run('Taskkill /IM WindowsCamera.exe /F', shell=True)
status.update("Listening...\n")
### news
elif 'news' in query:
status.update("Processing...\n")
trndnews()
status.update("Listening...\n")
#Clap
elif any(x in query for x in ["clap","applause","shout","whistle"]):
status.update("Processing...\n")
_play_sound((str(Path(os.path.join(cf.clointfusion_directory,"Logo_Icons","Applause.wav")))))
status.update("Listening...\n")
elif any(x in query for x in ["bye","quit","stop","exit"]):
exit_say_choices=["Have a good day! ","Have an awesome day!","I hope your day is great!","Today will be the best!","Have a splendid day!","Have a nice day!","Have a pleasant day!"]
cf.text_to_speech(shuffle_return_one_option(exit_say_choices))
break
elif "dost" in query:
try:
import subprocess
try:
import site
site_packages_path = next(p for p in site.getsitepackages() if 'site-packages' in p)
except:
site_packages_path = subprocess.run('python -c "import os; print(os.path.join(os.path.dirname(os.__file__), \'site-packages\'))"',capture_output=True, text=True).stdout
site_packages_path = str(site_packages_path).strip()
status.update("Processing...\n")
status.stop()
cmd = f'python "{site_packages_path}\ClointFusion\DOST_CLIENT.pyw"'
os.system(cmd)
status.start()
status.update("Listening...\n")
except Exception as ex:
cf.selft.crash_report(traceback.format_exception(*sys.exc_info(),limit=None, chain=True))
print("Error in calling dost from bol = " + str(ex))
elif any(x in query for x in ["open notepad","launch notepad"]):
status.update("Processing...\n")
cf.launch_any_exe_bat_application("notepad")
status.update("Listening...\n")
elif any(x in query for x in ["open application","launch application","launch app","open app"]):
status.update("Processing...\n")
call_any_app()
status.update("Listening...\n")
#Switch to window
elif any(x in query for x in ["switch window","toggle window","activate window","maximize window"]):
status.update("Processing...\n")
call_switch_wndw()
status.update("Listening...\n")
#Search in window / browser
elif any(x in query for x in ["find on screen","search on screen", "locate on screen"]):
status.update("Processing...\n")
call_find_on_screen()
status.update("Listening...\n")
elif any(x in query for x in ["minimize all","minimize window","show desktop"]):
status.update("Processing...\n")
cf.window_show_desktop()
status.update("Listening...\n")
elif any(x in query for x in ["minimize window","minimize application"]):
status.update("Processing...\n")
call_minimize_wndw()
status.update("Listening...\n")
elif any(x in query for x in ["close application","close window"]):
status.update("Processing...\n")
call_close_app()
status.update("Listening...\n")
elif any(x in query for x in ["launch meeting","zoom meeting"]):
status.update("Processing...\n")
webbrowser.open_new_tab("https://us02web.zoom.us/j/85905538540?pwd=<PASSWORD>")
status.update("Listening...\n")
elif "close google chrome" in query:
status.update("Processing...\n")
cf.browser_quit_h()
status.update("Listening...\n")
elif any(x in query for x in ["take pic","take selfie","take a pic","take a selfie"]):
status.update("Processing...\n")
call_take_selfie()
status.update("Listening...\n")
elif any(x in query for x in ["clear screen","clear","clear terminal","clean", "clean terminal","clean screen",]):
status.update("Processing...\n")
cf.clear_screen()
print("ClointFusion Bol is here to help.")
elif 'ocr' in query:
status.update("Processing...\n")
call_ocr()
elif any(x in query for x in ["social media"]):
status.update("Processing...\n")
call_social_media()
status.update("Listening...\n")
elif any(x in query for x in ["read the screen","read screen","screen to text"]):
status.update("Processing...\n")
call_read_screen()
status.update("Listening...\n")
elif any(x in query for x in ["thanks","thank you"]):
status.update("Processing...\n")
call_thanks()
status.update("Listening...\n")
elif any(x in query for x in ["shutdown my","turn off","switch off"]):
status.update("Processing...\n")
call_shut_pc()
status.update("Listening...\n")
else:
query_num += 1
if query_num % 6 == 1:
options(3, 2)
except:
error_try_later()
config_folder_path = Path(os.path.join(cf.clointfusion_directory, "Config_Files"))
if os_name == windows_os:
db_file_path = r'{}\BRE_WHM.db'.format(str(config_folder_path))
else:
db_file_path = cf.folder_create_text_file(config_folder_path, 'BRE_WHM.db', custom=True)
try:
connct = sqlite3.connect(db_file_path,check_same_thread=False)
cursr = connct.cursor()
except Exception as ex:
cf.selft.crash_report(traceback.format_exception(*cf.sys.exc_info(),limit=None, chain=True))
print("Error in connecting to DB="+str(ex))
data = cursr.execute("SELECT bol from CF_IMP_VALUES where ID = 1")
for row in data:
run = row[0]
welcome(run)
cursr.execute("UPDATE CF_IMP_VALUES set bol = bol+1 where ID = 1")
connct.commit()
bol_main()
| StarcoderdataPython |
4847465 | <reponame>mitodl/mitxonline
# Generated by Django 3.1.12 on 2021-09-23 14:51
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
("wagtailimages", "0023_add_choose_permissions"),
("cms", "0012_product_description_help_text"),
]
operations = [
migrations.AlterField(
model_name="coursepage",
name="feature_image",
field=models.ForeignKey(
blank=True,
help_text="Image that will be used where the course is featured or linked. (The recommended dimensions for the image are 375x244)",
null=True,
on_delete=django.db.models.deletion.SET_NULL,
related_name="+",
to="wagtailimages.image",
),
),
]
| StarcoderdataPython |
1684735 | # Create your views here.
from collections import OrderedDict
from copy import deepcopy, copy
from datetime import datetime
from humanize import intcomma
import time
import json
import os
from pathlib import Path
import zipfile
from accounts.forms import LogInForm, SignUpForm
from geodata.geodata import GeoData
from django.conf import settings
from django.contrib.auth import authenticate, login
from django.contrib.auth.models import User
from django.contrib.gis.geos import MultiPolygon, Polygon, GEOSGeometry, Point
from django.core.files.uploadedfile import SimpleUploadedFile
from django.core.files.temp import NamedTemporaryFile
from django.http import HttpResponse, JsonResponse
from django.shortcuts import render
from django.template import loader
from django.views.decorators.cache import cache_page
from ucsrb.forms import UploadShapefileForm
from ucsrb.models import FocusArea, TreatmentScenario, StreamFlowReading, TreatmentArea
from features.registry import get_feature_by_uid
def accounts_context():
context = {
'form': LogInForm(),
'login_title': 'Login',
'login_intro': 'Access your account',
'registration_form': SignUpForm(),
'registration_title': ' ', # space is needed to hide the defualt and insert a space
'forgot_password_link': 'Forgot Password?',
'register_link': ' ', # space is needed to hide the defualt and insert a space
'help_link': ' ', # space is needed to hide the defualt and insert a space
}
return context
def index(request):
template = loader.get_template('ucsrb/index.html')
context = accounts_context()
context['title'] = 'UCSRB FSTAT'
return HttpResponse(template.render(context, request))
def home(request):
template = loader.get_template('ucsrb/home.html')
context = accounts_context()
context['title'] = 'UCSRB'
return HttpResponse(template.render(context, request))
def help(request):
template = loader.get_template('ucsrb/help.html')
context = accounts_context()
context['title'] = 'UCSRB Terms Defined'
return HttpResponse(template.render(context, request))
def methods(request):
template = loader.get_template('ucsrb/methods.html')
context = accounts_context()
context['title'] = 'UCSRB Methods'
return HttpResponse(template.render(context, request))
def app(request):
template = loader.get_template('ucsrb/app.html')
context = accounts_context()
context['title'] = 'UCSRB'
context['MAPBOX_TOKEN'] = settings.MAPBOX_ACCESS_TOKEN
context['HERE_TOKEN'] = settings.HERE_API_TOKEN
context['HERE_APP_CODE'] = settings.HERE_APP_CODE
context['MAP_TECH'] = 'ol4'
context['UPLOAD_FORM'] = UploadShapefileForm
return HttpResponse(template.render(context, request))
# def sandbox(request):
# template = loader.get_template('ucsrb/sandbox.html')
# context = accounts_context()
# context['title'] = 'UCSRB Sandbox'
# return HttpResponse(template.render(context, request))
#
# def sandbox_json(request, id):
# ta_geojson_list = []
# geojson_response = {
# "type": "FeatureCollection",
# "features": []
# }
# tas = TreatmentArea.objects.filter(scenario__pk=id)
# for ta in tas:
# geojson_response['features'].append(json.loads(ta.geojson))
#
# return JsonResponse(geojson_response)
###########################################################
### API Calls #
###########################################################
def get_user_scenario_list(request):
user_scenarios_list = []
user_scenarios = TreatmentScenario.objects.filter(user=request.user)
for us in user_scenarios:
user_scenarios_list.append({
"id": us.pk,
"name": us.name,
"description": us.description,
})
return JsonResponse(sorted(user_scenarios_list, key=lambda k: k['name'].lower()), safe=False)
def get_json_error_response(error_msg="Error", status_code=500, context={}):
context['success'] = False
context['error_msg'] = error_msg
response = JsonResponse(context)
response.status_code = status_code
return response
def build_bbox(minX, minY, maxX, maxY):
bbox = Polygon( ((minX,minY), (minX,maxY), (maxX,maxY), (maxX,minY), (minX,minY)) )
bboxCenter = Point( ((minX + maxX)/2,(minY+maxY)/2))
return (bbox, bboxCenter)
def get_veg_unit_by_bbox(request):
[minX, minY, maxX, maxY] = [float(x) for x in request.GET.getlist('bbox_coords[]')]
bbox, bboxCenter = build_bbox(minX, minY, maxX, maxY)
# Get all veg units that intersect bbox (planning units)
from .models import VegPlanningUnit
vegUnits = VegPlanningUnit.objects.filter(geometry__coveredby=bbox)
# Select center-most veg unit (handle 0)
if vegUnits.count() > 1:
centerVegUnit = VegPlanningUnit.objects.filter(geometry__coveredby=bboxCenter)
if centerVegUnit.count() == 1:
retVegUnit = centerVegUnit[0].geometry.geojson
else:
retVegUnit = vegUnits[0].geometry.geojson
elif vegUnits.count() == 1:
retVegUnit = vegUnits[0].geometry.geojson
else:
retVegUnit = {}
# TODO: build context and return.
return JsonResponse(json.loads(retVegUnit))
def get_pourpoint_by_id(request, id):
print('Pour Point ID: %s' % str(id))
# TODO: query for pour point with given ID
# TODO: query for pour point basin polygon with given ID
# TODO: calculate area (on PPBasin model? On Madrona PolygonFeature model?)
# TODO: build context and return.
if request.method == 'GET':
from .models import PourPoint
ppt = PourPoint.objects.get(id=float(id))
return JsonResponse(json.loads('{"id":%s,"geojson": %s}' % (ppt.pk, ppt.geometry.geojson)))
def get_basin(request):
# focus_area = {"id": None, "geojson": None}
if request.method == 'GET':
from .models import FocusArea
unit_id = request.GET['pourPoint']
layer = 'PourPointOverlap'
focus_area = FocusArea.objects.get(unit_type=layer, unit_id=unit_id)
return JsonResponse(json.loads('{"id":%s,"geojson": %s}' % (focus_area.pk, focus_area.geometry.geojson)))
def create_treatment_areas(request):
if request.method == 'GET':
scenario_id = request.GET['scenario']
scenario = get_feature_by_uid(scenario_id)
final_geometry = copy(scenario.geometry_dissolved)
# Turns out break_up_multipolygons trashes the input value: copy it!
split_polys = break_up_multipolygons(copy(final_geometry), [])
prescription_selection = scenario.prescription_treatment_selection
context = {}
# if not scenario.geometry_dissolved.num_geom > 0:
if len(split_polys) < 1 or final_geometry.num_geom < 1:
return get_json_error_response('Drawing does not cover any forested land in the Upper Columbia', 500, context)
return get_scenario_treatment_areas_geojson(scenario, final_geometry, split_polys, prescription_selection, context)
def claim_treatment_area(request):
json_response = {
'status': 'Failed',
'code': 500,
'message': 'Unknown.',
}
try:
scenario_id = request.GET['scenario']
scenario = get_feature_by_uid('ucsrb_treatmentscenario_{}'.format(scenario_id))
user = request.user
anon_user = User.objects.get(pk=settings.ANONYMOUS_USER_PK)
if user.is_authenticated and scenario and scenario.user == anon_user:
scenario.user = user
scenario.save()
elif not user.is_authenticated:
json_response['code'] = 300
json_response['message'] = 'User is not authenticated.'
return JsonResponse(json_response)
elif not scenario:
json_response['code'] = 400
json_response['message'] = 'Treatment Scenario not found.'
return JsonResponse(json_response)
elif not scenario.user == anon_user:
json_response['code'] = 300
json_response['message'] = 'Treatment Scenario is owned by another user.'
return JsonResponse(json_response)
json_response['status'] = 'Success'
json_response['code'] = 200
except Exception as e:
json_response['message'] = '{}.'.format(e)
return JsonResponse(json_response)
def login_check(request):
json_response = {
'status': 'Failed',
'code': 500,
'message': 'Unknown.',
'is_authenticated': False
}
user = request.user
return JsonResponse({
'status': 'Success',
'code': 200,
'message': None,
'is_authenticated': user.is_authenticated,
})
def save_drawing(request):
context = {}
if request.method == 'POST':
featJson = request.POST['drawing']
scenario_name = request.POST['name']
description = request.POST['description']
if 'prescription_treatment_selection' in request.POST.keys():
prescription_selection = request.POST['prescription_treatment_selection']
else:
prescription_selection = "flow"
return define_scenario(request, featJson, scenario_name, description, prescription_selection)
return get_json_error_response('Unable to save drawing.', 500, context)
def clean_zip_file(tmp_zip_file):
is_clean = False
zipname = tmp_zip_file.name
if zipfile.is_zipfile(zipname):
zip = zipfile.ZipFile(zipname)
if len([x for x in zip.namelist() if '/' in x]) > 0:
new_tmp_zip = NamedTemporaryFile(mode='w+',delete=True, suffix='.zip')
outzip = zipfile.ZipFile(new_tmp_zip.name, 'w')
file_parts = [(x.split('/')[-1], zip.read(x)) for x in zip.namelist()]
for part in file_parts:
try:
outzip.writestr(part[0],part[1])
if '.shp' in part[0]:
is_clean=True
except IndexError as e:
# weird error from zipfile.py line 1792 in writestr
pass
outzip.close()
tmp_zip_file.close()
tmp_zip_file = new_tmp_zip
else:
is_clean = True
return (tmp_zip_file, is_clean)
def upload_treatment_shapefile(request):
context = {}
if request.method == 'POST':
form = UploadShapefileForm(request.POST, request.FILES)
if form.is_valid():
tmp_zip_file = NamedTemporaryFile(mode='wb+',delete=True, suffix='.zip')
for chunk in request.FILES['zipped_shapefile'].chunks():
tmp_zip_file.write(chunk)
tmp_zip_file.seek(0)
(tmp_zip_file, is_clean) = clean_zip_file(tmp_zip_file)
if is_clean:
try:
projection = request.POST['shp_projection']
geodata = GeoData()
if projection and len(projection) > 1:
geodata.read(tmp_zip_file.name, projection=projection)
else:
geodata.read(tmp_zip_file.name)
tmp_zip_file.close()
featJson = geodata.getUnion(format='geojson', projection='EPSG:3857')
scenario_name = request.POST['treatment_name']
if len(scenario_name) < 1:
scenario_name = '.'.join(request.FILES['zipped_shapefile'].name.split('.')[:-1])
description = request.POST['treatment_description']
prescription_selection = request.POST['prescription_treatment_selection']
return define_scenario(request, featJson, scenario_name, description, prescription_selection)
except Exception as e:
message = "Error when attempting to read provided shapefile: {}".format(e)
return get_json_error_response(message, 400, context)
else:
message = "Error: Unable to read provided file. Be sure you upload a zipfile (.zip) that contains a .shp, .dbf, etc..."
return get_json_error_response(message, 400, context)
else:
message = "Errors: "
for key in form.errors.keys():
message += "\n %s: %s" % (key, form.errors[key])
return get_json_error_response(message, 400, context)
else:
form = UploadShapefileForm()
return render(request, 'upload_modal.html', {'UPLOAD_FORM':form})
def break_up_multipolygons(multipolygon, polygon_list=[]):
if multipolygon.geom_type == 'MultiPolygon':
if multipolygon.num_geom > 0:
new_poly = multipolygon.pop()
polygon_list = break_up_multipolygons(new_poly, polygon_list)
polygon_list = break_up_multipolygons(multipolygon, polygon_list)
elif multipolygon.geom_type == 'Polygon':
polygon_list.append(multipolygon)
return polygon_list
def define_scenario(request, featJson, scenario_name, description, prescription_selection):
context = {}
polys = []
split_polys = []
for feature in json.loads(featJson)['features']:
geos_geom = GEOSGeometry(json.dumps(feature['geometry']))
# GEOS assumes 4326 when given GeoJSON (by definition this should be true)
# However, we've always used 3857, even in GeoJSON.
# Fixing this would be great, but without comprehensive testing, it's safer
# to perpetuate this breach of standards.
geos_geom.srid = settings.GEOMETRY_DB_SRID
polys.append(geos_geom)
for poly in polys:
split_polys = break_up_multipolygons(poly, split_polys)
geometry = MultiPolygon(split_polys)
layer = 'Drawing'
focus_area = FocusArea.objects.create(unit_type=layer, geometry=geometry)
focus_area.save()
focus_area.geometry.transform(2163)
treatment_acres = int(round(focus_area.geometry.area/4046.86, 0))
# return geometry to web mercator
focus_area.geometry.transform(3857)
if treatment_acres > settings.MAX_TREATMENT_ACRES:
return get_json_error_response('Treatment is too large ({} acres). Please keep it under {} acres.'.format(intcomma(treatment_acres), intcomma(settings.MAX_TREATMENT_ACRES)))
user = request.user
if not user.is_authenticated:
if settings.ALLOW_ANONYMOUS_DRAW == True:
from django.contrib.auth.models import User
user = User.objects.get(pk=settings.ANONYMOUS_USER_PK)
else:
return get_json_error_response('Anonymous Users Not Allowed. Please log in.', 401, context)
try:
scenario = TreatmentScenario.objects.create(
user=user,
name=scenario_name,
description=description,
focus_area=True,
focus_area_input=focus_area,
prescription_treatment_selection=prescription_selection
)
except:
# Technically we're testing for psycopg2's InternalError GEOSIntersects TopologyException
return get_json_error_response('Treatment Areas overlap. Please review your data and start over.', 500, context)
if not scenario.geometry_dissolved:
return get_json_error_response('Drawing does not cover any forested land in the Upper Columbia', 500, context)
final_geometry = scenario.geometry_dissolved
return get_scenario_treatment_areas_geojson(scenario, final_geometry, split_polys, prescription_selection, context)
def get_scenario_treatment_areas_geojson(scenario, final_geometry, split_polys, prescription_selection, context):
# EPSG:2163 = US National Atlas Equal Area
final_geometry.transform(2163)
if final_geometry.area/4046.86 < settings.MIN_TREATMENT_ACRES:
return get_json_error_response('Treatment does not cover enough forested land to make a difference', 500, context)
# return geometry to web mercator
final_geometry.transform(3857)
tas = []
for new_ta_geom in split_polys:
new_ta_geom.transform(3857)
new_ta = TreatmentArea.objects.create(
scenario=scenario,
prescription_treatment_selection=prescription_selection,
geometry=new_ta_geom
)
tas.append(new_ta)
ta_geojson_list = []
for ta in tas:
ta_geojson_list.append(ta.geojson)
geojson_response = '{"type": "FeatureCollection","features": [%s]}' % ', '.join(ta_geojson_list)
return JsonResponse(json.loads('{"id":%s,"geojson": %s,"footprint": %s}' % (scenario.pk, geojson_response, scenario.geometry_dissolved.geojson)))
def set_treatment_prescriptions(request):
json_response = {
'status': 'Failed',
'code': 500,
'message': 'Unknown',
'records_updated': 0,
'records_sent': -9999
}
if request.method=="POST":
try:
received_json_data = json.loads(request.body.decode("utf-8"))
except Exception as e:
json_response['message'] = "Unable to read posted data. {}".format(e)
return JsonResponse(json_response)
if not 'treatment_prescriptions' in received_json_data.keys():
json_response['code'] = 400
json_response['message'] = "Required 'treatment_prescriptions' key not found in supplied JSON."
return JsonResponse(json_response)
json_response['records_sent'] = len(received_json_data['treatment_prescriptions'])
for treatment in received_json_data['treatment_prescriptions']:
ta = TreatmentArea.objects.get(pk=int(treatment['id']))
# check treatment's Scenario's owner matches user
if ta.scenario.user == request.user or (request.user.is_anonymous and ta.scenario.user.pk == settings.ANONYMOUS_USER_PK):
ta.prescription_treatment_selection = treatment['prescription']
ta.save()
json_response['records_updated'] = json_response['records_updated']+1
else:
json_response['code'] = 300
json_response['message'] = "User does not have permission to update TreatmentArea with ID: {}".format(treatment['id'])
return JsonResponse(json_response)
if json_response['records_updated'] > 0 and json_response['records_updated'] == json_response['records_sent']:
# return success
json_response['status'] = 'Success'
json_response['code'] = 200
json_response['message'] = "Successfully updated all TreatmentAreas"
elif json_response['records_updated'] > 0:
json_response['message'] = "Unknown Error: Not all records could be updated."
elif json_response['records_updated'] == json_response['records_sent']:
json_response['code'] = 400
json_response['message'] = "0 records supplied for updated"
else:
json_response['message'] = "Unknown Error Occurred"
else:
json_response['code'] = 400
json_response['message'] = 'Request Denied: Requests must be of type "POST".'
return JsonResponse(json_response)
'''
Take a point in 3857 and return the feature at that point for a given FocusArea type
Primarily developed as a failsafe for not having pour point basin data.
'''
def get_focus_area_at(request):
focus_area = {"id": None, "geojson": None}
if request.method == 'GET':
from .models import FocusArea
point = request.GET.getlist('point[]')
pointGeom = Point( (float(point[0]), float(point[1])))
layer = request.GET['layer']
focus_area = FocusArea.objects.get(unit_type=layer, geometry__intersects=pointGeom)
return JsonResponse(json.loads('{"id":%s,"geojson": %s}' % (focus_area.unit_id, focus_area.geometry.geojson)))
def get_focus_area(request):
focus_area = {"id": None, "geojson": None}
if request.method == 'GET':
from .models import FocusArea
unit_id = request.GET['id']
layer = request.GET['layer']
focus_area = FocusArea.objects.get(unit_type=layer.upper(), unit_id=unit_id)
return JsonResponse(json.loads('{"id":%s,"geojson": %s}' % (focus_area.pk, focus_area.geometry.geojson)))
# NEEDS:
# pourpoint_id
### RDH - actually, we need to determine this from a given treatment scenario
### --- get all discrete ppt basins that intersect the treatment
### --- for each, get all downstream ppts
### --- consolidate all lists (including initial ppts) into a single unique list
def get_downstream_pour_points(request):
from ucsrb.models import PourPoint, FocusArea
pourpoint_id = request.GET.get('pourpoint_id')
downstream_ids = []
# TODO: get topology lookup strategy
downstream_ppts = []
for id in downstream_ids:
ppt_dict = {}
ppt = PourPoint.objects.get(pk=id)
focus_area = FocusArea.objects.get(unit_id=id, unit_type='PourPointDiscrete')
ppt_dict = {
'name': focus_area.description,
'id': id,
'geometry': ppt.geometry.json
}
downstream_ppts.append(ppt_dict)
return JsonResponse(downstream_ppts, safe=False)
def sort_output(flow_output):
results = OrderedDict({})
def get_timestamp_from_string(time_string):
return datetime.strptime(time_string, "%m.%d.%Y-%H:%M:%S")
for rx in flow_output.keys():
results[rx] = {}
for treatment in flow_output[rx].keys():
time_keys = sorted([x for x in flow_output[rx][treatment].keys() if not x == 'records_available'], key=get_timestamp_from_string)
if len(time_keys) > 0:
results[rx][treatment] = [{'timestep':time_key, 'flow': flow_output[rx][treatment][time_key]} for time_key in time_keys]
return results
def get_results_delta(flow_output):
if type(flow_output) == OrderedDict:
# while OrderedDict seems appropriate, the logic is written for an object with a list.
# Rather than haveing to write and maintain to pieces of code to do the
# same job, just convert it:
out_dict = json.loads(json.dumps(flow_output))
else:
out_dict = deepcopy(flow_output)
if type(out_dict[settings.NORMAL_YEAR_LABEL][settings.TREATED_LABEL]) in [dict, OrderedDict]:
# flow_results
for weather_year in out_dict.keys():
for timestep in out_dict[weather_year][settings.TREATED_LABEL].keys():
baseflow = flow_output[weather_year][settings.UNTREATED_LABEL][timestep]
for rx in out_dict[weather_year].keys():
# be sure not to process the 'records_available' key:
if timestep in out_dict[weather_year][rx].keys():
out_dict[weather_year][rx][timestep] -= baseflow
return sort_output(out_dict)
elif type(out_dict[settings.NORMAL_YEAR_LABEL][settings.TREATED_LABEL]) == list:
# previously-deltaed data
for weather_year in out_dict.keys():
for treatment in out_dict[weather_year].keys():
for index, timestep in enumerate(out_dict[weather_year][treatment]):
# Testing has shown that this logic is sound - chronological order is maintained across treatment.
baseflow = flow_output[weather_year][settings.UNTREATED_LABEL][index]['flow']
out_dict[weather_year][treatment][index]['flow'] -= baseflow
return out_dict
def get_results_xd_low(flow_output, sorted_results, days):
from copy import deepcopy
from statistics import median
out_dict = deepcopy(flow_output)
sept_median_x_day_low = {}
for rx in sorted_results.keys():
sept_median_x_day_low[rx] = {}
for treatment in sorted_results[rx].keys():
sept_list = []
for index, treatment_result in enumerate(sorted_results[rx][treatment]):
timestep = treatment_result['timestep']
time_object = datetime.strptime(timestep, "%m.%d.%Y-%H:%M:%S")
x_day_timestep_count = int(days*(24/settings.TIME_STEP_REPORTING))
if index < x_day_timestep_count:
flows = [x['flow'] for x in sorted_results[rx][treatment][0:x_day_timestep_count]]
else:
flows = [x['flow'] for x in sorted_results[rx][treatment][index-(x_day_timestep_count-1):index+1]]
low_flow = min(float(x) for x in flows)
out_dict[rx][treatment][timestep] = low_flow
if time_object.month == 9:
sept_list.append(low_flow)
sept_median_x_day_low[rx][treatment] = median(sept_list)
return (sort_output(out_dict), sept_median_x_day_low)
def get_results_xd_mean(flow_output, sorted_results, days):
from copy import deepcopy
out_dict = deepcopy(flow_output)
for rx in sorted_results.keys():
for treatment in sorted_results[rx].keys():
for index, treatment_result in enumerate(sorted_results[rx][treatment]):
timestep = treatment_result['timestep']
x_day_timestep_count = int(days*(24/settings.TIME_STEP_REPORTING))
if index < x_day_timestep_count:
flows = [x['flow'] for x in sorted_results[rx][treatment][0:x_day_timestep_count]]
else:
flows = [x['flow'] for x in sorted_results[rx][treatment][index-(x_day_timestep_count-1):index+1]]
mean_flow = sum(flows)/float(len(flows))
out_dict[rx][treatment][timestep] = mean_flow
return sort_output(out_dict)
def parse_flow_results(overlap_basin, treatment):
flow_results = {}
steps_to_aggregate = settings.TIME_STEP_REPORTING/settings.TIME_STEP_HOURS
for model_year in settings.MODEL_YEARS.keys():
output_dict = OrderedDict({})
annual_water_volume = {}
sept_avg_flow = {}
flow_results[model_year] = {}
flow_data_tuples = []
# We only draw the 'untreated baseline' year, not 'untreated wet/dry' years.
# if model_year == settings.NORMAL_YEAR_LABEL:
baseline_readings = StreamFlowReading.objects.filter(
segment_id=overlap_basin.unit_id,
is_baseline=True,
time__gte=settings.MODEL_YEARS[model_year]['start'],
time__lte=settings.MODEL_YEARS[model_year]['end'],
)
flow_data_tuples.append((settings.UNTREATED_LABEL, baseline_readings))
# else:
# flow_data_tuples.append((settings.UNTREATED_LABEL, []))
treated_readings = StreamFlowReading.objects.filter(
segment_id=overlap_basin.unit_id,
treatment=treatment,
time__gte=settings.MODEL_YEARS[model_year]['start'],
time__lte=settings.MODEL_YEARS[model_year]['end'],
)
# flow_data_tuples.append((model_year, treated_readings))
flow_data_tuples.append((settings.TREATED_LABEL, treated_readings))
for (treatment_type, readings_data) in flow_data_tuples:
aggregate_volume = 0
sept_flow = 0
sept_records = 0
annual_water_volume[treatment_type] = 0
output_dict[treatment_type] = OrderedDict({})
record_count = 0
try:
readings_data = readings_data.order_by('time')
except AttributeError as e:
# we use empty lists when no query was made.
pass
for index, reading in enumerate(list(readings_data)):
record_count += 1
time_object = reading.time
# Get volume of flow for timestep in Cubic Feet
timestep_volume = reading.value * 35.3147 * settings.TIME_STEP_HOURS # readings are in m^3/hr
aggregate_volume += timestep_volume
annual_water_volume[treatment_type] = annual_water_volume[treatment_type] + timestep_volume
if index%steps_to_aggregate == 0:
output_dict[treatment_type][reading.timestamp] = aggregate_volume/settings.TIME_STEP_REPORTING/60/60 #get ft^3/s
aggregate_volume = 0
if time_object.month == 9:
sept_flow += timestep_volume/settings.TIME_STEP_HOURS/60/60
sept_records += 1
if sept_records > 0:
sept_avg_flow[treatment_type] = str(round(sept_flow/sept_records, 2))
else:
sept_avg_flow[treatment_type] = 'unknown'
if record_count > 0:
output_dict[treatment_type]['records_available'] = True
else:
output_dict[treatment_type]['records_available'] = False
flow_results[model_year] = {
'flow_output': output_dict,
'annual_water_volume': annual_water_volume,
'sept_avg_flow': sept_avg_flow
}
return flow_results
def get_float_change_as_rounded_string(rx_val,baseline):
change_val = float(rx_val) - float(baseline)
if change_val > 0:
return "+%s" % str(round(change_val,2))
else:
return str(round(change_val,2))
def absolute_chart(chart_data):
# out_chart = OrderedDict({})
out_chart = {}
out_chart[settings.UNTREATED_LABEL] = chart_data[settings.NORMAL_YEAR_LABEL][settings.UNTREATED_LABEL]
out_chart[settings.DRY_YEAR_LABEL] = chart_data[settings.DRY_YEAR_LABEL][settings.TREATED_LABEL]
out_chart[settings.WET_YEAR_LABEL] = chart_data[settings.WET_YEAR_LABEL][settings.TREATED_LABEL]
out_chart[settings.NORMAL_YEAR_LABEL] = chart_data[settings.NORMAL_YEAR_LABEL][settings.TREATED_LABEL]
return out_chart
def delta_chart(chart_data):
# out_chart = OrderedDict({})
out_chart = {}
out_chart[settings.DRY_YEAR_LABEL] = chart_data[settings.DRY_YEAR_LABEL][settings.TREATED_LABEL]
out_chart[settings.WET_YEAR_LABEL] = chart_data[settings.WET_YEAR_LABEL][settings.TREATED_LABEL]
out_chart[settings.NORMAL_YEAR_LABEL] = chart_data[settings.NORMAL_YEAR_LABEL][settings.TREATED_LABEL]
return out_chart
# NEEDS:
# pourpoint_id
# treatment_id
# @cache_page(60 * 60) # 1 hour of caching
def get_hydro_results_by_pour_point_id(request):
from ucsrb.models import TreatmentScenario, FocusArea, PourPoint, VegPlanningUnit
import csv
import time
import os
# start = datetime.now()
# previous_stamp = datetime.now()
# checkpoint = 0
# #1
# checkpoint += 1
# print("Checkpoint %d: total - %d, step - %d" % (checkpoint, (datetime.now()-start).total_seconds(), (datetime.now()-previous_stamp).total_seconds()))
# previous_stamp = datetime.now()
# Get pourpoint_id from request or API
pourpoint_id = request.GET.get('pourpoint_id')
ppt = PourPoint.objects.get(id=pourpoint_id)
# Get treatment_id from request or API
treatment_id = request.GET.get('treatment_id')
treatment = TreatmentScenario.objects.get(pk=treatment_id)
overlap_basin = FocusArea.objects.filter(unit_type='PourPointOverlap', unit_id=pourpoint_id)[0]
# RDH 09/03/2018
# Some of the data I need is at the Overlapping Ppt Basin level, while some is aggregated to
# the PourPointBasin, which I am discovering was calculated to the Discrete Ppt Basins.
# Since the Discrete Ppt basins and the Overlapping ppt basins DO NOT MATCH, you will see
# a lot of workarounds in this section.
# If the two layers are made to match in the future this could be MUCH simpler.
upslope_ppts = [x.id for x in PourPoint.objects.filter(geometry__intersects=overlap_basin.geometry)]
if pourpoint_id not in upslope_ppts:
upslope_ppts.append(pourpoint_id)
# drainage_basins = FocusArea.objects.filter(unit_id__in=upslope_ppts, unit_type="PourPointOverlap")
# basin_acres = sum([x.area for x in drainage_basins])
overlap_geometry = overlap_basin.geometry
overlap_geometry.transform(2163)
basin_acres = round(overlap_geometry.area/4046.86, 2)
# return geometry to web mercator
overlap_geometry.transform(3857)
est_type = 'Modeled'
impute_id = ppt.id
flow_results = parse_flow_results(overlap_basin, treatment)
flow_output = {
settings.NORMAL_YEAR_LABEL: flow_results[settings.NORMAL_YEAR_LABEL]['flow_output'],
settings.WET_YEAR_LABEL: flow_results[settings.WET_YEAR_LABEL]['flow_output'],
settings.DRY_YEAR_LABEL: flow_results[settings.DRY_YEAR_LABEL]['flow_output']
}
avg_flow_results = {}
# Baseline water yield (bas_char)
# Cubic Feet per year (annual volume) / Square Feet (basin area) * 12 (inches/foot) = x inches/year
baseline_water_yield = {}
absolute_results = {}
# delta flow
delta_results = {}
seven_d_low_results = {}
sept_median_7_day_low = {}
# 1-day low-flow
one_d_low_results = {}
sept_median_1_day_low = {}
seven_d_mean_results = {}
one_d_mean_results = {}
delta_1_d_low_results = {}
delta_1_d_mean_results = {}
delta_7_d_low_results = {}
delta_7_d_mean_results = {}
for weather_year in flow_results.keys():
baseline_water_yield[weather_year] = str(round(flow_results[weather_year]['annual_water_volume'][settings.UNTREATED_LABEL]/(basin_acres*43560)*12, 2))
absolute_results = sort_output(flow_output)
delta_results = get_results_delta(flow_output)
(seven_d_low_results, sept_median_7_day_low) = get_results_xd_low(flow_output, absolute_results, 7)
(one_d_low_results, sept_median_1_day_low) = get_results_xd_low(flow_output, absolute_results, 1)
seven_d_mean_results = get_results_xd_mean(flow_output, absolute_results, 7)
one_d_mean_results = get_results_xd_mean(flow_output, absolute_results, 1)
delta_1_d_low_results = get_results_delta(one_d_low_results)
delta_1_d_mean_results = get_results_delta(one_d_mean_results)
delta_7_d_low_results = get_results_delta(seven_d_low_results)
delta_7_d_mean_results = get_results_delta(seven_d_mean_results)
for weather_year in flow_results.keys():
avg_flow_results[weather_year] = {}
for treatment_type in [settings.UNTREATED_LABEL, settings.TREATED_LABEL]:
avg_flow_results[weather_year][treatment_type] = str(round(flow_results[weather_year]['annual_water_volume'][treatment_type]/(365*24*60*60), 2))
charts = [
{'title': 'Absolute Flow Rate','data': absolute_chart(absolute_results)},
{'title': 'Seven Day Low Flow','data': absolute_chart(seven_d_low_results)},
{'title': 'Seven Day Mean Flow','data': absolute_chart(seven_d_mean_results)},
{'title': 'One Day Low Flow','data': absolute_chart(one_d_low_results)},
{'title': 'One Day Mean Flow','data': absolute_chart(one_d_mean_results)},
# {'title': 'Change in Flow Rate','data': delta_chart(delta_results)},
# {'title': 'Change in 7 Day Low Flow Rate','data': delta_chart(delta_7_d_low_results)},
# {'title': 'Change in 7 Day Mean Flow Rate','data': delta_chart(delta_7_d_mean_results)},
# {'title': 'Change in 1 Day Low Flow Rate','data': delta_chart(delta_1_d_low_results)},
# {'title': 'Change in 1 Day Mean Flow Rate','data': delta_chart(delta_1_d_mean_results)},
{'title': 'Change in Flow Rate','data': absolute_chart(delta_results)},
{'title': 'Change in 7 Day Low Flow Rate','data': absolute_chart(delta_7_d_low_results)},
{'title': 'Change in 7 Day Mean Flow Rate','data': absolute_chart(delta_7_d_mean_results)},
{'title': 'Change in 1 Day Low Flow Rate','data': absolute_chart(delta_1_d_low_results)},
{'title': 'Change in 1 Day Mean Flow Rate','data': absolute_chart(delta_1_d_mean_results)},
]
bas_char_data = []
bas_char_data.append({
'key': 'Total area upslope of this gauging station',
'value': basin_acres,
'unit': 'acres',
'help': '\'Upslope\' means \'all area that drains water to this point.\''
})
vus = VegPlanningUnit.objects.filter(dwnstream_ppt_id__in=upslope_ppts)
acres_forested = int(sum([x.acres for x in vus]))
bas_char_data.append({
'key': 'Total forested area upslope',
'value': acres_forested,
'unit': 'acres',
'help': '\'Upslope\' means \'all area that drains water to this point.\''
})
# bas_char_data.append({'key': 'Percent Forested', 'value': int(acres_forested/basin_acres*100), 'unit': '%' })
bas_char_data.append({'key': 'Baseline water yield', 'value': baseline_water_yield[settings.NORMAL_YEAR_LABEL], 'unit': 'inches/year' })
bas_char_data.append({'key': 'Baseline average annual flow', 'value': avg_flow_results[settings.NORMAL_YEAR_LABEL][settings.UNTREATED_LABEL], 'unit': 'CFS' })
bas_char_data.append({'key': 'Baseline September mean flow', 'value': flow_results[settings.NORMAL_YEAR_LABEL]['sept_avg_flow'][settings.UNTREATED_LABEL], 'unit': 'CFS' })
bas_char_data.append({'key': 'Baseline September median 7 day avg low flow', 'value': round(sept_median_7_day_low[weather_year][settings.UNTREATED_LABEL], 2), 'unit': 'CFS' })
hydro_char_data = []
hydro_char_data.append({'key': '<b>Change in average annual flow from proposed management</b>', 'value': '', 'unit': '' })
# for weather_year in [x for x in avg_flow_results.keys() if not x == settings.UNTREATED_LABEL]:
for weather_year in flow_results.keys():
treatment_type_change = get_float_change_as_rounded_string(avg_flow_results[weather_year][settings.TREATED_LABEL],avg_flow_results[weather_year][settings.UNTREATED_LABEL])
hydro_char_data.append({'key': ' - %s' % weather_year, 'value': treatment_type_change, 'unit': 'CFS' }) #Baseline annl flow - 50 annl flow
hydro_char_data.append({'key': '<b>Change in average September flow from proposed management </b>', 'value': '', 'unit': '' })
# for weather_year in [x for x in sept_avg_flow.keys() if not x == settings.UNTREATED_LABEL]:
for weather_year in flow_results.keys():
if flow_results[weather_year]['flow_output'][settings.UNTREATED_LABEL]['records_available'] and flow_results[weather_year]['flow_output'][settings.TREATED_LABEL]['records_available']:
treatment_type_sept_avg_change = get_float_change_as_rounded_string(flow_results[weather_year]['sept_avg_flow'][settings.TREATED_LABEL],flow_results[weather_year]['sept_avg_flow'][settings.UNTREATED_LABEL])
else:
treatment_type_sept_avg_change = 'Data not yet available'
hydro_char_data.append({'key': ' - %s' % weather_year, 'value': treatment_type_sept_avg_change, 'unit': 'CFS' })
hydro_char_data.append({
'key': '<b>Change in Sept. 7-day low flow from proposed management </b>',
'value': '',
'unit': '',
# 'help': 'These values represent the difference between a {}, untreated year and various years that have had the proposed management applied'.format(settings.NORMAL_YEAR_LABEL)
})
# for weather_year in [x for x in sept_median_7_day_low.keys() if not x == settings.UNTREATED_LABEL]:
for weather_year in flow_results.keys():
# treatment_type_sept_7_day_low_diff = get_float_change_as_rounded_string(sept_median_7_day_low[weather_year],sept_median_7_day_low[settings.UNTREATED_LABEL])
treatment_type_sept_7_day_low_diff = get_float_change_as_rounded_string(sept_median_7_day_low[weather_year][settings.TREATED_LABEL],sept_median_7_day_low[weather_year][settings.UNTREATED_LABEL])
hydro_char_data.append({'key': '&<KEY>;- %s' % weather_year, 'value': treatment_type_sept_7_day_low_diff, 'unit': 'CFS' })
prop_mgmt_data = []
basin_veg_units = treatment.veg_units.filter(geometry__intersects=overlap_basin.geometry) #within may be more accurate, but slower
treatment_acres = sum([x.acres for x in basin_veg_units])
prop_mgmt_data.append({'key': 'Total forested area in proposed treatment', 'value': int(treatment_acres), 'unit': 'acres' })
summary_reports = []
summary_reports.append({'title': 'Basin Characteristics','data': bas_char_data})
summary_reports.append({'title': 'Hydrologic Characteristics','data': hydro_char_data})
summary_reports.append({'title': 'Proposed Management','data': prop_mgmt_data})
results = [
{
'type': 'Summary',
'reports': summary_reports
},
{
'type': 'charts',
'reports' : charts
}
]
return JsonResponse({
'results': results, # TODO: Support 3 years in Hydro reports
'basin': overlap_basin.geometry.json
})
@cache_page(60 * 60) # 1 hour of caching
def get_results_by_scenario_id(request):
from ucsrb.models import TreatmentScenario, FocusArea, PourPoint
scenario_id = request.GET.get('id')
export = request.GET.get('export')
try:
treatment = get_feature_by_uid(scenario_id)
except:
return get_json_error_response('Treatment with given ID (%s) does not exist' % scenario_id, 500, {})
veg_units = treatment.veg_units
impacted_pourpoint_ids = list(set([x.dwnstream_ppt_id for x in veg_units]))
intermediate_downstream_ppts = PourPoint.objects.filter(id__in=impacted_pourpoint_ids)
viable_reporting_ppt_ids = [x.id for x in intermediate_downstream_ppts]
overlap_basins = FocusArea.objects.filter(unit_type='PourPointOverlap', unit_id__in=viable_reporting_ppt_ids)
for ppt in intermediate_downstream_ppts:
overlap_basins = overlap_basins.filter(geometry__intersects=ppt.geometry)
reportable_ppts = list(set(viable_reporting_ppt_ids).intersection(impacted_pourpoint_ids))
try:
containing_basin = sorted(overlap_basins, key= lambda x: x.geometry.area)[0]
reportable_ppts.append(containing_basin.unit_id)
except:
# In case there are no reportable downstream ppts.
pass
downstream_ppts = PourPoint.objects.filter(id__in=reportable_ppts)
if export:
print("Export %s" % export)
else:
if treatment.job_can_run(settings.NORMAL_YEAR_LABEL) or treatment.job_can_run(settings.DRY_YEAR_LABEL) or treatment.job_can_run(settings.WET_YEAR_LABEL):
treatment.set_report()
if treatment.aggregate_report is None or len(treatment.aggregate_report) == 0:
treatment = get_feature_by_uid(scenario_id)
# draw/upload seems to have aggregate_report as a string, while
# filter wizard sets/gets it as object. This is bad, but for now,
# we'll just cast to a string.
aggregate_results = eval(str(treatment.aggregate_report))
return_json = {
'scenario': {
'name': treatment.name,
'acres': aggregate_results['total_acres']
},
'aggregate_results': aggregate_results['results_list'],
'pourpoints': [ {'id': x.pk, 'name': '', 'geometry': json.loads(x.geometry.json) } for x in downstream_ppts ],
'focus_area': json.loads(treatment.focus_area_input.geometry.json),
'treatment_areas': json.loads(treatment.treatment_areas_geojson())
}
return JsonResponse(return_json)
def get_results_by_state(request):
return_json = {
'response': 'TODO :('
}
return JsonResponse(return_json)
def get_last_flow_line(flow_outfile):
# quick 'get last line' code modified from <NAME>:
# https://stackoverflow.com/a/54278929
with open(flow_outfile, 'rb') as f:
f.seek(-2, os.SEEK_END)
newline_found = False # since last_line may still be being written, 2nd-to-last will have to do.
current_set = f.read(1)
while current_set != b'\n' or not newline_found:
if current_set == b'\n':
newline_found = True
f.seek(-2, os.SEEK_CUR)
current_set = f.read(1)
last_line = f.readline().decode()
return last_line
def get_status_by_scenario_id(request):
from ucsrb.models import TreatmentScenario, FocusArea, PourPoint
scenario_id = request.GET.get('id')
try:
treatment = get_feature_by_uid(scenario_id)
except:
return get_json_error_response('Treatment with given ID (%s) does not exist' % scenario_id, 500, {})
weather_year_results = {
settings.NORMAL_YEAR_LABEL: None,
settings.WET_YEAR_LABEL: None,
settings.DRY_YEAR_LABEL: None
}
for weather_year in settings.MODEL_YEARS.keys():
weather_year_results[weather_year] = {
'progress': None,
'model_progress': 0,
'import_progress': 0,
'task_status': 'Initializing (1/4)',
'error': 'None',
'last_line': '',
'age': None
}
if treatment.job_status(weather_year) == 'None':
weather_year_results[weather_year]['task_status'] = 'Queued (0/4)'
if treatment.job_status(weather_year) == 'FAILURE':
weather_year_results[weather_year]['task_status'] = 'Failure. Restarting...'
treatment.set_report()
elif treatment.job_status(weather_year) != 'SUCCESS':
# Attempt to re-run the job - if job is too new, it won't restart, just continue
weather_year_results[weather_year]['progress'] = 0
# check out /tmp/runs/run_{id}/output/Streamflow.only
flow_outfile = "/tmp/runs/run_{}_{}/output/Streamflow.Only".format(treatment.id, weather_year)
if Path(flow_outfile).exists():
try:
weather_year_results[weather_year]['last_line'] = get_last_flow_line(flow_outfile)
[month, day, year] = weather_year_results[weather_year]['last_line'].split('-')[0].split('.')[0:3]
model_time = weather_year_results[weather_year]['last_line'].split(' ')[0].split('-')[1]
model_progress_date = datetime.strptime("{}.{}.{}-{}".format(month, day, year, model_time), "%m.%d.%Y-%H:%M:%S")
# model_year_type = settings.MODEL_YEAR_LOOKUP[str(year)]
model_year = settings.MODEL_YEARS[weather_year]
total_time = model_year['end'] - model_year['start']
completed_time = model_progress_date - model_year['start']
weather_year_results[weather_year]['model_progress'] = (completed_time.total_seconds()/total_time.total_seconds())*100*settings.MODEL_PROGRESS_FACTOR #50% of the progress is modelling, so /2
weather_year_results[weather_year]['task_status'] = 'Modelling (2/4)'
except (ValueError, IndexError) as e:
# Streamflow file doesn't have 2 complete entries yet.
weather_year_results[weather_year]['error'] = str(e)
print(e)
pass
except OSError as e:
# Streamflow file empty
pass
import_status_file = "/tmp/runs/run_{}_{}/output/dhsvm_status.log".format(treatment.id, weather_year)
if Path(import_status_file).exists():
weather_year_results[weather_year]['task_status'] = 'Importing (3/4)'
with open(import_status_file, 'r') as f:
inlines=f.readlines()
weather_year_results[weather_year]['import_progress'] = int(inlines[-1])*settings.IMPORT_PROGRESS_FACTOR
weather_year_results[weather_year]['progress'] = round(weather_year_results[weather_year]['model_progress']) + round(weather_year_results[weather_year]['import_progress'])
else:
weather_year_results[weather_year]['task_status'] = 'Complete'
weather_year_results[weather_year]['progress'] = 100
try:
weather_year_results[weather_year]['age'] = treatment.job_age(weather_year).total_seconds()
except AttributeError as e:
weather_year_results[weather_year]['age'] = 0
return JsonResponse(weather_year_results)
'''
'''
def run_filter_query(filters):
from ucsrb.models import VegPlanningUnit, FocusArea, PourPoint
# TODO: This would be nicer if it generically knew how to filter fields
# by name, and what kinds of filters they were. For now, hard code.
notes = []
filter_dict = {}
exclude_dicts = []
if 'focus_area' in filters.keys() and 'focus_area_input' in filters.keys() and filters['focus_area']:
# focus_area = FocusArea.objects.get(pk=filters['focus_area_input']).geometry;
focus_area = FocusArea.objects.get(pk=filters['focus_area_input']);
veg_unit_type_field = settings.FOCUS_AREA_FIELD_ID_LOOKUP[focus_area.unit_type]
if veg_unit_type_field:
if veg_unit_type_field == 'dwnstream_ppt_id':
discrete_basin_ids = [x.id for x in PourPoint.objects.filter(geometry__coveredby=focus_area.geometry)]
if not focus_area.unit_id in discrete_basin_ids:
discrete_basin_ids.append(focus_area.unit_id)
filter_dict['dwnstream_ppt_id__in'] = discrete_basin_ids
else:
filter_dict[veg_unit_type_field] = focus_area.unit_id
else:
filter_dict['geometry__intersects'] = focus_area.geometry
else:
notes = ['Please Filter By Focus Area']
query = VegPlanningUnit.objects.filter(pk=None)
return (query, notes)
if 'private_own' in filters.keys() and filters['private_own']:
exclude_dicts.append({'pub_priv_own__icontains':'private'}) # real value is 'Private land'
if 'pub_priv_own' in filters.keys() and filters['pub_priv_own']:
if 'pub_priv_own_input' in filters.keys():
filter_dict['pub_priv_own__iexact'] = filters['pub_priv_own_input']
if 'lsr_percent' in filters.keys() and filters['lsr_percent']:
filter_dict['lsr_percent__lt'] = settings.LSR_THRESHOLD
if 'has_critical_habitat' in filters.keys() and filters['has_critical_habitat']:
filter_dict['percent_critical_habitat__lt'] = settings.CRIT_HAB_THRESHOLD
exclude_dicts.append({'has_critical_habitat':True})
# if 'area' in filters.keys() and filters['area']:
# # RDH 1/8/18: filter(geometry__area_range(...)) does not seem available.
# # query = query.filter(geometry__area__range=(filters['area_min'], filters['area_max']))
#
# # RDH 1/9/18: Why can't we use the model's 'Run Filters' function?
# # RDH 1/26/18: Because the model object doesn't exist yet.
# pu_ids = [pu.pk for pu in query if pu.geometry.area <= float(filters['area_max']) and pu.geometry.area>= float(filters['area_min'])]
# query = (query.filter(pk__in=pu_ids))
# if 'percent_roadless' in filters.keys() and filters['percent_roadless']:
# filter_dict['percent_roadless__lt'] = settings.ROADLESS_THRESHOLD
if 'road_distance' in filters.keys() and filters['road_distance']:
if 'road_distance_max' in filters.keys():
filter_dict['road_distance__lte'] = float(filters['road_distance_max'])
if 'percent_wetland' in filters.keys() and filters['percent_wetland']:
filter_dict['percent_wetland__lt'] = settings.WETLAND_THRESHOLD
if 'percent_riparian' in filters.keys() and filters['percent_riparian']:
filter_dict['percent_riparian__lt'] = settings.RIPARIAN_THRESHOLD
if 'slope' in filters.keys() and filters['slope']:
if 'slope_max' in filters.keys():
filter_dict['slope__lte'] = float(filters['slope_max'])
if 'percent_fractional_coverage' in filters.keys() and filters['percent_fractional_coverage']:
if 'percent_fractional_coverage_min' in filters.keys():
filter_dict['percent_fractional_coverage__gte'] = float(filters['percent_fractional_coverage_min'])
if 'percent_fractional_coverage_max' in filters.keys():
filter_dict['percent_fractional_coverage__lte'] = float(filters['percent_fractional_coverage_max'])
if 'percent_high_fire_risk_area' in filters.keys() and filters['percent_high_fire_risk_area']:
filter_dict['percent_high_fire_risk_area__gt'] = settings.FIRE_RISK_THRESHOLD
# 11 and 21 = ridgetops
# 12 and 22 = north facing slopes
# 13 and 23 = south facing slopes
# 14 and 24 = valley bottoms
# 15 and 25 = east and west facing slopes
if 'has_burned' in filters.keys() and filters['has_burned']:
exclude_dicts.append({'has_burned':True})
if 'has_wilderness_area' in filters.keys() and filters['has_wilderness_area']:
exclude_dicts.append({'has_wilderness_area':True})
exclusion_list = []
if 'landform_type' in filters.keys() and filters['landform_type']:
if not 'landform_type_checkboxes_0' in filters.keys():
if not 'landform_type_include_north' in filters.keys() or not filters['landform_type_include_north']:
exclusion_list += [12, 22]
if not 'landform_type_checkboxes_1' in filters.keys():
if not 'landform_type_include_south' in filters.keys() or not filters['landform_type_include_south']:
exclusion_list += [13, 23]
if not 'landform_type_checkboxes_2' in filters.keys():
if not 'landform_type_include_ridgetop' in filters.keys() or not filters['landform_type_include_ridgetop']:
exclusion_list += [11, 21]
if not 'landform_type_checkboxes_3' in filters.keys():
if not 'landform_type_include_floors' in filters.keys() or not filters['landform_type_include_floors']:
exclusion_list += [14, 24]
if not 'landform_type_checkboxes_4' in filters.keys():
if not 'landform_type_include_east_west' in filters.keys() or not filters['landform_type_include_east_west']:
exclusion_list += [15, 25]
if len(exclusion_list) > 0:
exclude_dicts.append({'topo_height_class_majority__in':exclusion_list})
# query = query.exclude(topo_height_class_majority__in=exclusion_list)
query = VegPlanningUnit.objects.filter(**filter_dict)
# We want all exclusions in 'exclude_dict' to be applied independently, not only excluding items that match all
for exclude_dict in exclude_dicts:
query = query.exclude(**exclude_dict)
return (query, notes)
def parse_filter_checkboxes(request):
filter_dict = dict(request.GET.items())
landform_checkboxes = {
'landform_type_checkboxes_0': 'landform_type_include_north',
'landform_type_checkboxes_1': 'landform_type_include_south',
'landform_type_checkboxes_2': 'landform_type_include_ridgetop',
'landform_type_checkboxes_3': 'landform_type_include_floors',
'landform_type_checkboxes_4': 'landform_type_include_east_west',
}
for checkbox_key in landform_checkboxes.keys():
if checkbox_key in filter_dict.keys():
if filter_dict[checkbox_key] == 'true':
filter_dict[landform_checkboxes[checkbox_key]] = True
else:
filter_dict[landform_checkboxes[checkbox_key]] = False
else:
filter_dict[landform_checkboxes[checkbox_key]] = False
return filter_dict
'''
'''
@cache_page(60 * 60) # 1 hour of caching
def get_filter_count(request, query=False, notes=[]):
if not query:
filter_dict = parse_filter_checkboxes(request)
(query, notes) = run_filter_query(filter_dict)
count = query.count()
area_acres = 0
for pu in query:
area_acres += pu.acres
return HttpResponse("%d acres" % int(area_acres), status=200)
'''
'''
@cache_page(60 * 60) # 1 hour of caching
def get_filter_results(request, query=False, notes=[]):
if not query:
filter_dict = parse_filter_checkboxes(request)
(query, notes) = run_filter_query(filter_dict)
area_acres = 0
for pu in query:
area_acres += pu.acres
from scenarios import views as scenarioViews
return scenarioViews.get_filter_results(request, query, notes, {'area_acres': area_acres})
@cache_page(60 * 60) # 1 hour of caching
def get_planningunits(request):
from ucsrb.models import VegPlanningUnit
from json import dumps
json = []
# planningunits = PlanningUnit.objects.filter(avg_depth__lt=0.0, min_wind_speed_rev__isnull=False)
planningunits = VegPlanningUnit.objects.all()
for p_unit in planningunits:
json.append({
'id': p_unit.pk,
'wkt': p_unit.geometry.wkt,
'acres': p_unit.acres,
'huc_2_id': p_unit.huc_2_id,
'huc_4_id': p_unit.huc_4_id,
'huc_6_id': p_unit.huc_6_id,
'huc_8_id': p_unit.huc_8_id,
'huc_10_id': p_unit.huc_10_id,
'huc_12_id': p_unit.huc_12_id,
'pub_priv_own': p_unit.pub_priv_own,
'lsr_percent': p_unit.lsr_percent,
'has_critical_habitat': p_unit.has_critical_habitat,
'percent_critical_habitat': p_unit.percent_critical_habitat,
# 'percent_roadless': p_unit.percent_roadless,
'percent_wetland': p_unit.percent_wetland,
'percent_riparian': p_unit.percent_riparian,
'slope': p_unit.slope,
'road_distance': p_unit.road_distance,
'percent_fractional_coverage': p_unit.percent_fractional_coverage,
'percent_high_fire_risk_area': p_unit.percent_high_fire_risk_area,
'mgmt_alloc_code': p_unit.mgmt_alloc_code,
'mgmt_description': p_unit.mgmt_description,
'mgmt_unit_id': p_unit.mgmt_unit_id,
'dwnstream_ppt_id': p_unit.dwnstream_ppt_id,
'topo_height_class_majority': p_unit.topo_height_class_majority,
'has_burned': p_unit.has_burned,
'has_wilderness_area': p_unit.has_wilderness_area
})
return HttpResponse(dumps(json))
def get_scenarios(request, scenario_model='treatmentscenario'):
from scenarios.views import get_scenarios as scenarios_get_scenarios
return scenarios_get_scenarios(request, scenario_model, 'ucsrb')
def demo(request, template='ucsrb/demo.html'):
from scenarios import views as scenarios_views
return scenarios_views.demo(request, template)
| StarcoderdataPython |
3549859 | <gh_stars>0
# Copyright (c) 2019. Partners HealthCare, Harvard Medical School’s
# Department of Biomedical Informatics, <NAME>
#
# Developed by <NAME>, based on contributions by:
# <NAME> and other members of Division of Genetics,
# Brigham and Women's Hospital
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
import boto3
import sortedcontainers
import vcf as pyvcf
from utils.case_utils import parse_all_fam_files, get_trios_for_family, \
get_bam_patterns
BUCKET = "udn-joint-calling"
PREFIX = "cases/wes/"
def download_bams_for_trios(metadata:str, vcf_file:str, key:str, secret:str, dest:str):
s3 = boto3.client('s3', aws_access_key_id=key, aws_secret_access_key=secret)
families = parse_all_fam_files(metadata)
# families = {k:families[k] for k in ["udn0028", "udn0013"]}
vcf_reader = pyvcf.Reader(filename=vcf_file)
files = sortedcontainers.SortedDict()
patterns = get_bam_patterns()
for name in families:
family = families[name]
if not all([s in vcf_reader.samples for s in family]):
continue
trios = get_trios_for_family(family)
for trio in trios.values():
for sample in trio:
for p in patterns:
file_name = p.format(sample=sample)
object_name = PREFIX + sample + '/' + file_name
files[file_name] = object_name
print (files)
print (len(files))
if not os.path.isdir(dest):
print("ERROR: destination directory does not exist {}".format(dest))
return
for f in files:
object_name = files[f]
target = os.path.join(dest, f)
if os.path.exists(target):
response = s3.list_objects(Bucket = BUCKET, Prefix = object_name)
size = None
for obj in response["Contents"]:
if obj["Key"] != object_name:
continue
size = obj["Size"]
break
file_size = os.path.getsize(target)
if file_size == size:
print("Info: {} already exists, skipping".format(target))
continue
print ("Warning: {} already exists but has different size".format(target))
print("Downloading: {} ==> {}".format(object_name, target))
try:
s3.download_file(BUCKET, object_name, target)
except Exception as e:
print("ERROR: " + str(e))
print("All Done.")
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="Run BGM variant callers")
parser.add_argument("-i", "--input", "--vcf", dest="vcf",
help="Input VCF file, required. Use jointly called VCF "
"for better results",
required=True)
parser.add_argument("-f", "--families",
help="Path to collection of Family (fam) files, required",
required=True)
parser.add_argument("--key", help="aws_access_key_id, required", required=False)
parser.add_argument("--secret", help="aws_secret_access_key, required", required=False)
parser.add_argument("--dest", help="Destination directory, required", required=True)
args = parser.parse_args()
print(args)
if args.key:
key = args.key
secret = args.secret
else:
key = None
secret = None
configuration = "default"
section = '[' + configuration + ']'
f = os.path.join(os.path.expanduser('~'), ".aws", "credentials")
with open(f) as credentials:
state = 1
for line in credentials:
if state == 1:
if line.strip() == section:
state = 2
continue
if state == 2:
text = line.strip()
if text.startswith('['):
state = 3
break
if "aws_access_key_id" in text:
key = text.split('=')[1].strip()
elif "aws_secret_access_key" in text:
secret = text.split('=', 1)[1].strip()
download_bams_for_trios(args.families, args.vcf, key, secret, args.dest)
| StarcoderdataPython |
12840039 | <filename>App/Graphic/app.py
"""
This is the module containing the graphical user interface for
my Newegg tracker application
"""
from tkinter import *
from tkinter import ttk
import webbrowser
from PIL import ImageTk, Image #Tkinter's image management is outdated
root = Tk()
root.config(bg="#2D2D2D")
root.title("Newegg tracker by Joey-Boivin on GitHub")
root.geometry("1050x900")
main_frame = Frame(root)
main_frame.pack(fill=BOTH, expand=1)
my_canvas = Canvas(main_frame)
my_canvas.pack(side=LEFT, fill=BOTH, expand=1)
my_scrollbar = ttk.Scrollbar(main_frame, orient=VERTICAL, command=my_canvas.yview)
my_scrollbar.pack(side=RIGHT, fill=Y)
my_canvas.configure(bg='#2D2D2D', yscrollcommand=my_scrollbar.set)
my_canvas.bind('<Configure>', lambda e: my_canvas.configure(scrollregion=my_canvas.bbox('all')))
second_frame = Frame(my_canvas)
second_frame.config(bg='#2D2D2D')
my_canvas.create_window((0,0), window=second_frame, anchor='nw')
class Application:
"""
This is the class containing the graphical user interface.
"""
def __init__(self, data:dict):
self.data = data
icons, price_widgets, name_widgets, meta_widgets, button_widgets = self.create_widgets()
self.show_widgets(icons, price_widgets, name_widgets, meta_widgets, button_widgets)
def create_widgets(self):
"""
Creates all the widgets for the gui, including icons, name labels,
metadata about the items, and a "show on Newegg" button.
"""
icons = []
price_widgets = []
name_widgets = []
meta_widgets = []
newegg_button_widgets = []
for tag, _data in self.data['items'].items():
path = f'./Graphic/Images/{_data["img-token"]}.png'
img = ImageTk.PhotoImage(Image.open(path).resize((100,100)))
icons.append(img)
price = list(_data['history'].values())[-1] #last value
price_widget = Label(second_frame, text=price, bg='#2D2D2D', fg='white')
price_widgets.append(price_widget)
metadata = _data['metadata']
display = ""
if metadata:
for key, value in metadata.items():
display += str(key) + ': ' + str(value)
if len(metadata.items()) > 1:
display += '\n'
display = Label(second_frame, text=display, bg='#2D2D2D', fg='white')
meta_widgets.append(display)
name = _data['product-name']
name_widget = Label(second_frame, text=name, bg='#2D2D2D', fg='white')
name_widgets.append(name_widget)
newegg_button = Button(second_frame, text='See on Newegg.ca', bg='Black', fg='white', command=lambda tag=tag: self.show_on_newegg(tag))
newegg_button_widgets.append(newegg_button)
return icons, price_widgets, name_widgets, meta_widgets, newegg_button_widgets
def show_widgets(
self, icons:list, price_widgets:list,
name_widgets:list, meta_widgets:list, button_widgets:list
):
"""
Shows the widgets for the gui
"""
for i in range(int(self.data['number-of-items'])):
panel = Label(second_frame, image=icons[i])
panel.grid(row=i, column=0, padx = '50', pady='10')
name_widgets[i].grid(row=i, column=1, padx = '50', pady='10')
price_widgets[i].grid(row=i,column=2, padx = '50', pady='10')
meta_widgets[i].grid(row=i,column=3, padx = '50', pady='10')
button_widgets[i].grid(row=i, column=4, padx = '40', pady='10')
root.mainloop()
@staticmethod
def show_on_newegg(tag:str):
"""
Opens a new tab on Newegg.ca the tracked item.
"""
webbrowser.open_new(f'www.newegg.ca/{tag}')
| StarcoderdataPython |
5172057 | <filename>src/bda/calendar/base/converter.py<gh_stars>1-10
from DateTime import DateTime
from math import floor
import datetime
import pytz
ONEDAYINMILLIS = 86400000.0
def dt2DT(dt):
"""Convert Python's datetime to Zope's DateTime.
Acts timezone-aware.
"""
if isinstance(dt, DateTime):
return dt
return DateTime(dt.isoformat())
def DT2dt(DT):
"""Convert Zope's DateTime to Pythons's datetime.
Act timezone-neutral, outcome is on UTC.
"""
if isinstance(DT, datetime.datetime):
return DT
# seconds (parts[6]) is a float, so we map to int
DTutc = DT.toZone('UTC')
year, month, day, hour, min, sec = DTutc.parts()[:6]
sec = int(sec)
utc = pytz.timezone('UTC')
dt = datetime.datetime(year, month, day, hour, min, sec, tzinfo=utc)
return dt
def dt2UTCString(dt):
"""Build a string from the timetuple as UTC.
"""
dt = dt.astimezone(pytz.timezone('UTC'))
return '-'.join(['%d' % v for v in dt.timetuple()[:6]])
def dtFromUTCString(utcstr):
"""Build datetime from timetuple UTC string.
"""
y, m, d, h, mi, s = utcstr.split('-')
dt = datetime.datetime(int(y), int(m), int(d), int(h), int(mi), int(s),
tzinfo=pytz.timezone('UTC'))
return dt
def dt2epochday(dt):
"""Number of days since epoch.
Timezone gets a problem here, we need to normalize all to GMT to make it
recognize the same day even if it a different timezone:
i.e. 2008-05-01T00:00:00+02:00 (CEST)
"""
DT = dt2DT(dt) # if possible replace this one and call in next line
days = DT.earliestTime().millis() / ONEDAYINMILLIS
idays = floor(days)
return idays
| StarcoderdataPython |
11214951 | <gh_stars>0
import logging
from flask import jsonify, request
from flask_jwt_extended import create_access_token, jwt_required
from api import db_api
from api.notifier import notifier_blueprint
from api.notifier.models import Contact, User
from api.notifier.login_utils import verify_pass
from api.notifier.telegram_utils import send_telegram_message
##########
# LOGIN
@notifier_blueprint.route("/login", methods=["POST"])
def login():
logging.debug('🔐 Login')
username = request.json.get("username", None)
password = request.json.get("password", None)
user = User.query.filter_by(username=username).first()
if verify_pass(password, user.password):
logging.debug('🔓 Login successful for : ' + username)
access_token = create_access_token(identity=username)
return jsonify(access_token=access_token)
else:
logging.error('🚨 Login error for : ' + username)
return jsonify({"msg": "Error during the login"}), 401
## Create user
@notifier_blueprint.route('/create_user', methods=["POST"])
@jwt_required()
def create_user():
logging.info('👤 Create user')
user = request.json.get("username", None)
passwd = request.json.get("password", None)
user = User(username=user, password=<PASSWORD>)
db_api.session.add(user)
db_api.session.commit()
return jsonify({"msg": "👤 User created"}), 200
##########################
# INIT
@notifier_blueprint.route('/init')
def init():
logging.info('🌟 INITIALIZATION DATA')
user = User(username='admin', password='<PASSWORD>')
db_api.session.add(user)
db_api.session.commit()
return jsonify({"msg": "🌟 Init data successful"}), 200
##############
# CONTACT MGMT
## Add contact
@notifier_blueprint.route('/add_contact', methods=["POST"])
@jwt_required()
def add_contact():
logging.debug('📇 Creating new contact')
id_telegram = request.json.get("telegram_id", None)
contact = Contact(telegram_id=id_telegram)
db_api.session.add(contact)
db_api.session.commit()
return jsonify({"msg": "📇 Contact created successfully"}), 200
## Get all contacts
@notifier_blueprint.route('/contacts', methods=["GET"])
@jwt_required()
def get_contacts():
logging.debug('📇 Get list contacts')
contacts = Contact.query.all()
list_contacts = []
for contact in contacts:
list_contacts.append({
'id': contact.id,
'telegram_id': contact.telegram_id})
return jsonify({"msg": "Success", "list_contacts": list_contacts}), 200
## Delete contact
@notifier_blueprint.route('/delete_contact', methods=["POST"])
@jwt_required()
def delete_contact():
logging.debug('📇 Delete contact')
id_contact = request.json.get("id", None)
Contact.query.filter_by(id=id_contact).delete()
db_api.session.commit()
return jsonify({"msg": "Success"}), 200
##########
# NOTIFIER
## Send notification to all contacts
@notifier_blueprint.route('/send_notification', methods=["POST"])
@jwt_required()
def sent_notification():
logging.debug('📨 Sending notification')
message = request.json.get("message", None)
contacts = Contact.query.all()
for contact in contacts:
send_telegram_message(contact, message)
return jsonify({"msg": '📨 Message sent !'}), 200
| StarcoderdataPython |
333404 | from torndb import *
from toto.exceptions import *
from toto.session import *
from time import time, mktime
from datetime import datetime
from dbconnection import DBConnection
from uuid import uuid4
import toto.secret as secret
import base64
import uuid
import hmac
import hashlib
import random
import string
class MySQLdbSession(TotoSession):
_account = None
class MySQLdbAccount(TotoAccount):
def __init__(self, session):
super(MySQLdbSession.MySQLdbAccount, self).__init__(session)
self._properties['account_id'] = session.account_id
def _load_property(self, *args):
return self._session._db.get('select ' + ', '.join(args) + ' from account where account_id = %s', self._session.account_id)
def _save_property(self, *args):
self._session._db.execute('update account set ' + ', '.join(['%s = %%s' % k for k in args]) + ' where account_id = %s', *([self[k] for k in args] + [self._session.account_id,]))
def __setitem__(self, key, value):
if key != 'account_id':
super(MySQLdbSession.MySQLdbAccount, self).__setitem__(key, value)
def __init__(self, db, session_data, session_cache=None):
super(MySQLdbSession, self).__init__(db, session_data, session_cache)
self.account_id = session_data['account_id']
def get_account(self):
if not self._account:
self._account = MySQLdbSession.MySQLdbAccount(self)
return self._account
def session_data(self):
return {'user_id': self.user_id, 'expires': self.expires, 'session_id': self.session_id, 'state': TotoSession.dumps(self.state), 'account_id': self.account_id}
def refresh(self):
session_data = self._refresh_cache() or self.db.get("select session.session_id, session.expires, session.state, account.user_id, account.account_id from session join account on account.account_id = session.account_id where session.session_id = %s", session_id)
self.__init__(self._db, session_data, self._session_cache)
def save(self):
if not self._verified:
raise TotoException(ERROR_NOT_AUTHORIZED, "Not authorized")
if not self._save_cache():
self._db.execute("update session set state = %s where session_id = %s", TotoSession.dumps(self.state), self.session_id)
class MySQLdbConnection(DBConnection):
def create_tables(self, database):
if not self.db.get('''show tables like "account"'''):
self.db.execute(''.join(['''create table if not exists `account` (''',
self.uuid_account_id and '''`account_id` binary(16) not null,''' or '''`account_id` int(8) unsigned not null auto_increment,''',
'''`password` char(48) default null,
`user_id` varchar(191) not null,
primary key (`account_id`),
unique key `user_id_unique` (`user_id`),
index `user_id_password` (`user_id`, `password`)
)''']))
if not self.db.get('''show tables like "session"'''):
self.db.execute(''.join(['''create table if not exists `session` (
`session_id` char(22) not null,''',
self.uuid_account_id and '''`account_id` binary(16) not null,''' or '''`account_id` int(8) unsigned not null,''',
'''`expires` double not null,
`state` blob,
primary key (`session_id`),
index (`expires`),
foreign key (`account_id`) references `account`(`account_id`)
)''']))
def __init__(self, host, database, username, password, session_ttl=24*60*60*365, anon_session_ttl=24*60*60, session_renew=0, anon_session_renew=0, uuid_account_id=False):
self.db = Connection(host, database, username, password)
self.uuid_account_id = uuid_account_id
self.create_tables(database)
self.session_ttl = session_ttl
self.anon_session_ttl = anon_session_ttl or self.session_ttl
self.session_renew = session_renew or self.session_ttl
self.anon_session_renew = anon_session_renew or self.anon_session_ttl
def create_account(self, user_id, password, additional_values={}, **values):
if not user_id:
raise TotoException(ERROR_INVALID_USER_ID, "Invalid user ID.")
user_id = user_id.lower()
if self.db.get("select account_id from account where user_id = %s", user_id):
raise TotoException(ERROR_USER_ID_EXISTS, "User ID already in use.")
additional_values.pop('account_id', None)
values.update(additional_values)
values['user_id'] = user_id
values['password'] = <PASSWORD>)
if self.uuid_account_id:
values['account_id'] = uuid4().bytes
self.db.execute("insert into account (" + ', '.join([k for k in values]) + ") values (" + ','.join(['%s' for k in values]) + ")", *[values[k] for k in values])
def _load_uncached_data(self, session_id):
return self.db.get("select session.session_id, session.expires, session.state, account.user_id, account.account_id from session join account on account.account_id = session.account_id where session.session_id = %s and session.expires > %s", session_id, time())
def create_session(self, user_id=None, password=<PASSWORD>, verify_password=True):
if not user_id:
user_id = ''
user_id = user_id.lower()
account = user_id and self.db.get("select account_id, password from account where user_id = %s", user_id)
if user_id and (not account or (verify_password and not secret.verify_password(password, account['password']))):
raise TotoException(ERROR_USER_NOT_FOUND, "Invalid user ID or password")
session_id = MySQLdbSession.generate_id()
expires = time() + (user_id and self.session_ttl or self.anon_session_ttl)
session_data = {'user_id': user_id, 'expires': expires, 'session_id': session_id, 'account_id': account['account_id']}
if not self._cache_session_data(session_data):
self.db.execute("delete from session where account_id = %s and expires <= %s", account['account_id'], time())
self.db.execute("insert into session (account_id, expires, session_id) values (%s, %s, %s)", account['account_id'], expires, session_id)
session = MySQLdbSession(self.db, session_data, self._session_cache)
session._verified = True
return session
def retrieve_session(self, session_id, hmac_data=None, data=None):
session_data = self._load_session_data(session_id)
if not session_data:
return None
user_id = session_data['user_id']
if user_id and data and hmac_data != base64.b64encode(hmac.new(str(user_id), data, hashlib.sha1).digest()):
raise TotoException(ERROR_INVALID_HMAC, "Invalid HMAC")
expires = time() + (user_id and self.session_renew or self.anon_session_renew)
if session_data['expires'] < expires:
session_data['expires'] = expires
if not self._cache_session_data(session_data):
self.db.execute("update session set expires = %s where session_id = %s", session_data['expires'], session_id)
session = MySQLdbSession(self.db, session_data, self._session_cache)
session._verified = True
return session
def remove_session(self, session_id):
if self._session_cache:
self._session_cache.remove_session(session_id)
else:
self.db.execute("delete from session where session_id = %s", session_id)
def clear_sessions(self, user_id):
user_id = user_id.lower()
self.db.execute("delete from session using session join account on account.account_id = session.account_id where account.user_id = %s", user_id)
def change_password(self, user_id, password, <PASSWORD>_password):
user_id = user_id.lower()
account = self.db.get("select account_id, user_id, password from account where user_id = %s", user_id)
if not account or not secret.verify_password(password, account['password']):
raise TotoException(ERROR_USER_NOT_FOUND, "Invalid user ID or password")
self.db.execute("update account set password = %s where account_id = %s", secret.password_hash(new_password), account['account_id'])
self.clear_sessions(user_id)
def generate_password(self, user_id):
user_id = user_id.lower()
account = self.db.get("select account_id, user_id from account where user_id = %s", user_id)
if not account:
raise TotoException(ERROR_USER_NOT_FOUND, "Invalid user ID")
pass_chars = string.ascii_letters + string.digits
new_password = ''.join([random.choice(pass_chars) for x in xrange(10)])
self.db.execute("update account set password = %s where account_id = %s", secret.password_hash(new_password), account['account_id'])
self.clear_sessions(user_id)
return new_password
| StarcoderdataPython |
1909007 | import random
from random import randrange
import json
class Player:
def __init__(self, name, race, stamina, deffence, atack):
self.nameOfPlayer = name
self.raceOfPlayer = race
self.staminaOfPlayer = stamina
self.deffenceOfPlayer = deffence
self.atackOfPlayer = atack
self.overallOfPlayer = (stamina + deffence + atack) / 3
def toJson(self):
return {'nameOfPlayer' : self.nameOfPlayer, 'raceOfPlayer' : self.raceOfPlayer,
'playerStamina' : self.staminaOfPlayer, 'playerDeffence' : self.deffenceOfPlayer,
'playerAtack' : self.atackOfPlayer, 'overallOfPlayer' : self.overallOfPlayer}
def nameGenrator():
global nameOfPlayer
first_names = ("Elijea" , "Jacser", "Kayen", "Sylas", "Henry", "Trilla", "Missaos", "Obum")
last_names = ("Ridwaan", "Mudric", "Arkaan", "Tetholin", "Jarnoli", "Madroot", "Groot", "Tony")
full_name = random.choice(first_names) + " " + random.choice(last_names)
nameOfPlayer = full_name
nameOfPlayer = ""
def raceGenerator():
global raceOfPlayer
races = ("Human", "Asgardian", "Frost Giant", "Flora Colossus", "Kree", "Centaurian", "The Sovereign")
raceOfPlayer = random.choice(races)
raceOfPlayer = ""
def statsGenerator():
global playerStamina
global playerDeffence
global playerAtack
playerStamina = randrange(30,100)
playerDeffence = randrange(30,100)
playerAtack = randrange(30,100)
playerStamina = 0
playerDeffence = 0
playerAtack = 0
humanRace = 0
asgardianRace = 0
frostGiantRace = 0
kreeRace = 0
floraColossusRace = 0
centaurianRace = 0
theSoverign = 0
nameGenrator()
statsGenerator()
raceGenerator()
random_player1 = Player(nameOfPlayer, raceOfPlayer, playerStamina, playerDeffence, playerAtack)
if raceOfPlayer == "Human":
humanRace +=1
elif raceOfPlayer == "Asgardian":
asgardianRace +=1
elif raceOfPlayer == "Frost Giant":
frostGiantRace +=1
elif raceOfPlayer == "Flora Colossus":
floraColossusRace +=1
elif raceOfPlayer == "Centaurian":
centaurianRace +=1
elif raceOfPlayer == "The Sovereign":
theSoverign +=1
elif raceOfPlayer == "Kree":
kreeRace +=1
nameGenrator()
statsGenerator()
raceGenerator()
random_player2 = Player(nameOfPlayer, raceOfPlayer, playerStamina, playerDeffence, playerAtack)
if raceOfPlayer == "Human":
humanRace +=1
elif raceOfPlayer == "Asgardian":
asgardianRace +=1
elif raceOfPlayer == "Frost Giant":
frostGiantRace +=1
elif raceOfPlayer == "Flora Colossus":
floraColossusRace +=1
elif raceOfPlayer == "Centaurian":
centaurianRace +=1
elif raceOfPlayer == "The Sovereign":
theSoverign +=1
elif raceOfPlayer == "Kree":
kreeRace +=1
nameGenrator()
statsGenerator()
raceGenerator()
random_player3 = Player(nameOfPlayer, raceOfPlayer, playerStamina, playerDeffence, playerAtack)
if raceOfPlayer == "Human":
humanRace +=1
elif raceOfPlayer == "Asgardian":
asgardianRace +=1
elif raceOfPlayer == "Frost Giant":
frostGiantRace +=1
elif raceOfPlayer == "Flora Colossus":
floraColossusRace +=1
elif raceOfPlayer == "Centaurian":
centaurianRace +=1
elif raceOfPlayer == "The Sovereign":
theSoverign +=1
elif raceOfPlayer == "Kree":
kreeRace +=1
nameGenrator()
statsGenerator()
raceGenerator()
random_player4 = Player(nameOfPlayer, raceOfPlayer, playerStamina, playerDeffence, playerAtack)
if raceOfPlayer == "Human":
humanRace +=1
elif raceOfPlayer == "Asgardian":
asgardianRace +=1
elif raceOfPlayer == "Frost Giant":
frostGiantRace +=1
elif raceOfPlayer == "Flora Colossus":
floraColossusRace +=1
elif raceOfPlayer == "Centaurian":
centaurianRace +=1
elif raceOfPlayer == "The Sovereign":
theSoverign +=1
elif raceOfPlayer == "Kree":
kreeRace +=1
nameGenrator()
statsGenerator()
raceGenerator()
random_player5 = Player(nameOfPlayer, raceOfPlayer, playerStamina, playerDeffence, playerAtack)
if raceOfPlayer == "Human":
humanRace +=1
elif raceOfPlayer == "Asgardian":
asgardianRace +=1
elif raceOfPlayer == "Frost Giant":
frostGiantRace +=1
elif raceOfPlayer == "Flora Colossus":
floraColossusRace +=1
elif raceOfPlayer == "Centaurian":
centaurianRace +=1
elif raceOfPlayer == "The Sovereign":
theSoverign +=1
elif raceOfPlayer == "Kree":
kreeRace +=1
nameGenrator()
statsGenerator()
raceGenerator()
random_player6 = Player(nameOfPlayer, raceOfPlayer, playerStamina, playerDeffence, playerAtack)
if raceOfPlayer == "Human":
humanRace +=1
elif raceOfPlayer == "Asgardian":
asgardianRace +=1
elif raceOfPlayer == "Frost Giant":
frostGiantRace +=1
elif raceOfPlayer == "Flora Colossus":
floraColossusRace +=1
elif raceOfPlayer == "Centaurian":
centaurianRace +=1
elif raceOfPlayer == "The Sovereign":
theSoverign +=1
elif raceOfPlayer == "Kree":
kreeRace +=1
nameGenrator()
statsGenerator()
raceGenerator()
random_player7 = Player(nameOfPlayer, raceOfPlayer, playerStamina, playerDeffence, playerAtack)
if raceOfPlayer == "Human":
humanRace +=1
elif raceOfPlayer == "Asgardian":
asgardianRace +=1
elif raceOfPlayer == "Frost Giant":
frostGiantRace +=1
elif raceOfPlayer == "Flora Colossus":
floraColossusRace +=1
elif raceOfPlayer == "Centaurian":
centaurianRace +=1
elif raceOfPlayer == "The Sovereign":
theSoverign +=1
elif raceOfPlayer == "Kree":
kreeRace +=1
| StarcoderdataPython |
1728739 | <filename>src/gui/components/table/table.py<gh_stars>0
"""
This is the table that is used in the main part table
"""
from tkinter import Entry, Frame
class Table(Frame):
"""
Main table structure for the parts information section
Methods:
+ add_entry = add an entry with no label
+ remove_entry = remove entry with no label
+ remove_last = remove the last row of table
+ add_item = add an entry with a label
- unrender = remove table from view
- render = show table
Attributes:
+ rows
+ row
"""
def __init__(self, master, debug=False):
self.debug = debug
Frame.__init__(self, master)
self.columnconfigure(1, weight=1)
self.rows = []
self.row = 1
def add_entry(self):
"""add entry with no label"""
entry = Entry(self)
self.rows.append(entry)
self.__unrender()
self.__render()
def remove_entry(self, ind):
"""remove specified entry by index"""
self.__unrender()
try:
self.rows.pop(ind)
except IndexError as e:
if self.debug:
print(f"error in Table - remove_entry \n{e}")
self.__render()
def remove_last(self):
"""remove last item of table"""
self.remove_entry(-1)
def add_item(self, title):
"""add item with label"""
label = Entry(self, width=23, font=("Arial", 10), justify="center")
label.insert(0, title)
label.configure(state="disabled")
entry = Entry(self)
label.grid(row=self.row, column=0, padx=(10, 0), sticky="nsew")
entry.grid(row=self.row, column=1, padx=(0, 10), sticky="nsew")
self.rows.append(entry)
self.row += 1
def __unrender(self):
for i in self.rows:
i.grid_forget()
def __render(self):
index = 0
for entry in self.rows:
entry.grid(row=index, column=0, columnspan=2, padx=10, stick="nsew")
index += 1
| StarcoderdataPython |
6622807 | '''
A module for hash unordered elements
'''
from typing import Union
from collections import OrderedDict
import hashlib
import json
import warnings
class UnorderedSha256:
'''
Using SHA256 on unordered elements
'''
def __init__(self):
self.result = [0] * 32
def update_data(self, data: Union[bytes, bytearray, memoryview]):
'''update digest by data. type(data)=bytes'''
digest = hashlib.sha256(data).digest()
self.update_hash(digest)
def update_hash(self, hashvalue):
'''update digest by hash. type(hashvalue)=bytes'''
for i, bit in enumerate(list(hashvalue)):
self.result[i] = (self.result[i] + bit) & 0xFF
def digest(self) -> bytes:
'''return unordered hashvalue'''
return bytes(self.result)
def hexdigest(self) -> str:
'''return unordered hashvalue'''
return bytes(self.result).hex()
def dumps_json(obj) -> bytes:
'''Generate bytes to identify the object by json serialization'''
if isinstance(obj, (str, int, float, bool)):
return str(obj).encode('utf-8')
return json.dumps(obj, sort_keys=True).encode('utf-8')
def dumps(obj) -> bytes:
'''Generate bytes to identify the object by repr'''
return simple_dumps(convert_obj(obj))
def simple_dumps(obj) -> bytes:
return repr(obj).encode('utf-8')
def convert_obj(obj):
if isinstance(obj, OrderedDict):
return convert_ordered_dict(obj)
for cls, func in special_type_processing_functions.items():
if isinstance(obj, cls):
return func(obj)
if not isinstance(obj, common_types):
warnings.warn("It's unsupported to dumps a %s object. The result may not be expected." % type(obj).__name__)
return obj
def convert_dict(obj):
return type(obj), [(convert_obj(k), convert_obj(v)) for k, v in sorted(obj.items())]
def convert_ordered_dict(obj):
return type(obj), [(convert_obj(k), convert_obj(v)) for k, v in obj.items()]
def convert_ordered_iterable(obj):
return type(obj), [convert_obj(item) for item in obj]
def convert_unordered_iterable(obj):
# Elements in a set or a frozenset is unordered. Sort them before dumps.
return type(obj), [convert_obj(item) for item in sorted(obj)]
special_type_processing_functions = {
tuple: convert_ordered_iterable,
list: convert_ordered_iterable,
set: convert_unordered_iterable,
frozenset: convert_unordered_iterable,
dict: convert_dict,
OrderedDict: convert_ordered_dict
}
common_types = (str, int, float, bytes, bytearray, bool, type, type(None))
| StarcoderdataPython |
3365681 | <reponame>fanglab/6mASCOPE<filename>SLpackage/private/pacbio/pythonpkgs/pysiv2/lib/python2.7/site-packages/pysiv2/custom/test_ccs.py
"""
Test output of Circular Consensus Sequence (CCS) pipelines.
"""
# XXX currently the pipeline output is defined as the pbreports report, which
# has the various statistics pre-calculated. It is difficult to get at the
# final dataset directly, since the file ID will be different depending on
# whether the pipeline was run in chunked mode or not.
import subprocess
import tempfile
import unittest
import logging
import shutil
import json
import csv
import os.path as op
import os
import sys
import pysam
from pbcommand.models import FileTypes
from pbcore.io import ConsensusReadSet, FastaReader, FastaWriter, IndexedBamReader, SubreadSet
from pbsmrtpipe.testkit.core.test_task_options import LoadResolvedToolContractMixin
from pysiv2.custom.base import (TestReportStatistics, TestValuesLoader)
from pysiv2.utils import FastxStats
from pysiv2.custom import utils as u
from pysiv2.io.datastore import DataStore
MIN_CCS_MEAN_ACCURACY = 0.90
NRECORDS_MAX_ITER = 100
log = logging.getLogger(__name__)
class TestCCS(TestReportStatistics, LoadResolvedToolContractMixin):
"""
Test output of the ``ccs`` report from ``pbreports``, plus additional
validity tests on CCS output files.
"""
REPORT_ID = "ccs"
TEST_ID = "ccs"
METRIC_IDS = [
"total_number_of_ccs_bases",
"mean_ccs_readlength",
"number_of_ccs_reads",
"mean_ccs_num_passes",
]
@classmethod
def _get_output_file(cls, file_type):
for file_id, file_info in cls.datastore.get_file_dict().iteritems():
if (file_info.file_type_id == FileTypes.ZIP.file_type_id and
file_type.ext in file_info.file_id):
return file_info.path
raise IOError("bam2{f} zipped file not found".format(
f=file_type.ext))
@classmethod
def getMetrics(cls):
super(TestCCS, cls).getMetrics()
subread_set = None
for f_id, f_info in cls.datastore.get_file_dict().iteritems():
if f_info.file_type_id == FileTypes.DS_SUBREADS.file_type_id:
subread_set = f_info.path
break
if subread_set is None:
subread_set = cls.entrypoints.data.get("eid_subread", None)
with SubreadSet(subread_set) as subreads:
cls.is_barcoded = subreads.isBarcoded
for ext_res in subreads.externalResources:
cls.barcode_set = ext_res.barcodes
break
for file_id, file_info in cls.datastore.get_file_dict().iteritems():
if file_info.is_chunked:
continue
if file_info.file_type_id == FileTypes.DS_CCS.file_type_id:
cls.final_ccs_file = file_info.path
break
fastq_file = fasta_file = None
fastq_file = cls._get_output_file(FileTypes.FASTQ)
fasta_file = cls._get_output_file(FileTypes.FASTA)
assert not None in [fastq_file, fasta_file]
for file_type, seq_file in zip([FileTypes.FASTQ, FileTypes.FASTA],
[fastq_file, fasta_file]):
stats = FastxStats(seq_file, file_type,
is_barcoded=cls.is_barcoded).get_stats()
cls.metric_dict.update({
'{i}_total_number_of_ccs_bases'.format(i=file_type.ext): stats['sum'],
'{i}_mean_ccs_readlength'.format(i=file_type.ext): stats['avg'],
'{i}_number_of_ccs_reads'.format(i=file_type.ext): stats['num'],
})
cls.loadRtcs() # from LoadResolvedToolContractMixin
def _compare_fastx_output(self, file_type):
for metric in ["total_number_of_ccs_bases",
"mean_ccs_readlength",
"number_of_ccs_reads"]:
for expected, OP in self._expected_values_and_operators(metric):
key = "%s_%s" % (file_type, metric)
value = self.metric_dict[key]
eqn = "%s .%s. %s" % (value, OP.__name__, expected)
logging.info("Comparing values of %s: %s" % (metric, eqn))
self.assertTrue(OP(value, expected),
"FAILED {i}: ! {e}".format(i=metric, e=eqn))
def test_ccs_fastq_output(self):
"""
Check that the CCS dataset and Fastq file have the same basic stats
"""
self._compare_fastx_output("fastq")
def test_ccs_fasta_output(self):
"""
Check that the CCS dataset and FASTA file have the same basic stats
"""
self._compare_fastx_output("fasta")
@unittest.skip("DISABLED")
def test_validity_ccs_accuracy(self):
"""
check that ccs accuracy is greater than a preset threshold. this can
be specified in the 'ccs' section of test_values.json, otherwise the
permissive default value of 0.90 will be used.
"""
with ConsensusReadSet(self.final_ccs_file) as ds:
values_sum = n_values = 0
for rr in ds.resourceReaders():
values_sum += rr.readQual.sum()
n_values += len(rr)
# XXX see BamAlignment.readScore docstring for explanation
readScore = values_sum / n_values
vmin = MIN_CCS_MEAN_ACCURACY
if "min_ccs_mean_accuracy" in self.expected_values:
vmin = self.expected_values["min_ccs_mean_accuracy"]
self.assertGreater(readScore, vmin)
def test_ccs_bam_index(self):
"""
Test that the output includes .pbi index file(s).
"""
with ConsensusReadSet(self.final_ccs_file) as ds:
ds.assertIndexed()
def test_ccs_barcoding_propagation(self):
"""
Test that any BarcodeSet defined as an external resource of the
subreads BAM file(s) in the input SubreadSet is also an external
resource of the output ConsensusReadSet.
"""
if self.is_barcoded:
with ConsensusReadSet(self.final_ccs_file) as ccs:
self.assertTrue(ccs.isBarcoded)
for ext_res_out in ccs.externalResources:
self.assertEqual(self.barcode_set,
ext_res_out.barcodes)
if "barcodes" in self.expected_values:
barcodes = set()
for bam in ccs.resourceReaders():
if len(bam) == 0:
continue
bc_eq = bam.pbi.bcForward == bam.pbi.bcReverse
self.assertTrue(bc_eq.all())
barcodes.update(set(list(bam.pbi.bcForward)))
self.assertEqual(sorted(list(barcodes)),
self.expected_values["barcodes"])
else:
raise unittest.SkipTest("SubreadSet was not barcoded, skipping")
def test_ccs_report_barcode_table(self):
"""
Check for barcoding table in CCS report (if input was barcoded).
"""
if self.is_barcoded:
bc_table = self.report.tables[1]
self.assertEqual(bc_table.id, "ccs_barcodes")
for col in bc_table.columns:
if col.id == "number_of_ccs_reads":
self.assertTrue(all([x>0 for x in col.values]))
break
else:
self.fail("Can't find column number_of_ccs_reads")
else:
raise unittest.SkipTest("SubreadSet was not barcoded, skipping")
def test_ccs_bam_np_is_at_least_npasses(self):
"""
Check that the number of passes of each read in the ConsensusReadSet
output is at least equal to the minimum specified in the resolved
tool contract.
"""
nchecked = nskipped = 0
for rtc in self.resolved_tool_contracts:
if rtc.task.task_id == "pbccs.tasks.ccs":
min_passes = rtc.task.options["pbccs.task_options.min_passes"]
with ConsensusReadSet(rtc.task.output_files[0]) as ccs:
for bam in ccs.resourceReaders():
if len(bam) > NRECORDS_MAX_ITER:
nskipped += 1
else:
for rec in bam:
np = rec.peer.opt("np")
self.assertTrue(np >= min_passes,
"{r} has np {n} < {e}".format(r=rec.qName,
n=np,
e=min_passes))
nchecked += 1
if nchecked == 0:
if nskipped == 0:
raise unittest.SkipTest("No CCS BAM files found")
else:
raise unittest.SkipTest("File size over limit - 'np' not checked")
| StarcoderdataPython |
24261 | <reponame>megansimwei/pydex
from matplotlib import pyplot as plt
import numpy as np
class TrellisPlotter:
def __init__(self):
self.cmap = None
self.colorbar_label_rotation = None
self.colobar_tick_fontsize = None
self.grouped_fun = None
self.fun = None
self.data = None
self.data_sets = None
self.intervals = None
# options
self.figsize = None
self.constrained_layout = False
self.marker = "o"
self.markersize = None
self.markeralpha = None
self.n_xticks = 3
self.n_yticks = 3
self.xspace = 0.3
self.yspace = 0.3
self.xlabel = ""
self.ylabel = ""
self.xticks = None
self.yticks = None
self.xticklabels = None
self.yticklabels = None
self.oaxis_size = 0.20
self.oaxis_n_xticks = 3
self.oaxis_n_yticks = 3
self.oaxis_xticks = None
self.oaxis_yticks = None
self.oaxis_xticklabels = None
self.oaxis_yticklabels = None
self.oaxis_bar_transparency = 0.6
self.oaxis_xlabel = ""
self.oaxis_ylabel = ""
self.n_colorbar_ticks = 3
# computed
self.bounds = None
self.bins = None
self.group_bins = None
self.n_groups = None
self.grouped_data = None
# private
self._multiple_data_sets = None
def initialize(self):
if isinstance(self.data, list):
self._multiple_data_sets = True
else:
self._multiple_data_sets = False
# check data's validity
if self._multiple_data_sets:
if not np.all([isinstance(datum, np.ndarray) for datum in self.data]):
raise SyntaxError("All data sets must be a numpy array.")
if not np.all([datum.ndim == 2 for datum in self.data]):
raise SyntaxError("All data sets must be a 2D-array.")
if not np.all([
datum.shape[1] == self.data[0].shape[1]
for datum in self.data
]):
raise SyntaxError(
"Dimensions of points in the different data sets are inconsistent"
)
else:
if not isinstance(self.data, np.ndarray):
raise SyntaxError("Data must be a numpy array.")
if self.data.ndim != 2:
raise SyntaxError("Data must be a 2D-array.")
# check if all data sets have the same dimension
# check interval's validity
if not isinstance(self.intervals, np.ndarray):
raise SyntaxError("Intervals must be a numpy array.")
if self.intervals.ndim != 1:
raise SyntaxError("Intervals must be a 1D-array.")
# check if interval agrees with given data
if self._multiple_data_sets:
if self.intervals.shape[0] != (self.data[0].shape[1] - 2):
raise SyntaxError("Dimensions in given interval and data does not agree.")
else:
if self.intervals.shape[0] != (self.data.shape[1] - 2):
raise SyntaxError("Dimensions in given interval and data does not agree.")
self.n_groups = np.prod(self.intervals)
if self._multiple_data_sets:
self.data_sets = self.data
if self.fun is not None:
if not isinstance(self.fun, np.ndarray):
raise SyntaxError("Function values must be a numpy array.")
if self.fun.ndim != 1:
raise SyntaxError(f"Function values must be 1D array")
if self.fun.size != self.data.shape[0]:
raise SyntaxError(f"Length of function values and given data points "
f"are inconsistent.")
return None
def scatter(self):
self.initialize()
if not self._multiple_data_sets:
self.classify_data()
width_ratios = np.ones(self.intervals[1] + 1)
width_ratios[-1] = self.oaxis_size
height_ratios = np.ones(self.intervals[0] + 1)
height_ratios[0] = self.oaxis_size
fig, axes = plt.subplots(
nrows=self.intervals[0]+1,
ncols=self.intervals[1]+1,
gridspec_kw={
"wspace": self.xspace,
"hspace": self.yspace,
"width_ratios": width_ratios,
"height_ratios": height_ratios,
},
figsize=self.figsize,
constrained_layout=self.constrained_layout
)
for pos, axis in np.ndenumerate(axes):
r, c = pos
if r == 0 and c == self.intervals[1]:
fig.delaxes(axis)
# horizontal outer axis
elif r == 0 and c != self.intervals[1]:
# handle limits
axis.set_xlim([self.bounds[3, 0], self.bounds[3, 1]])
axis.set_ylim([0, 1])
# handle ticks
axis.set_yticks([])
axis.xaxis.tick_top()
if c % 2 == 0:
self.oaxis_xticks = np.linspace(
self.bounds[3, 0],
self.bounds[3, 1],
self.oaxis_n_xticks
)
axis.set_xticks(self.oaxis_xticks)
if self.oaxis_xticklabels is None:
self.oaxis_xticklabels = [
f"{tick:.2f}" for tick in self.oaxis_xticks
]
axis.xaxis.set_ticklabels(self.oaxis_xticklabels)
else:
axis.set_xticks([])
# draw bar
axis.fill_between(
x=[
self.group_bins[0, c, 1, 0],
self.group_bins[0, c, 1, 1],
],
y1=[1, 1],
y2=[0, 0],
facecolor="gray",
alpha=1 - self.oaxis_bar_transparency
)
# add label
if c % 2 == 1:
axis.annotate(
s=self.oaxis_xlabel,
xy=(np.mean(self.bounds[3, :]), 0.5),
ha="center",
va="center",
)
# vertical outer axis
elif r != 0 and c == self.intervals[1]:
# draw vertical outer axes
axis.set_xlim([0, 1])
axis.set_ylim([self.bounds[2, 0], self.bounds[2, 1]])
# handle ticks
axis.set_xticks([])
axis.yaxis.tick_right()
if r % 2 == 0:
if self.oaxis_yticks is None:
self.oaxis_yticks = np.linspace(
self.bounds[2, 0], self.bounds[2, 1], self.oaxis_n_yticks
)
axis.set_yticks(self.oaxis_yticks)
if self.oaxis_yticklabels is None:
self.oaxis_yticklabels = [f"{tick:.2f}"
for tick in self.oaxis_yticks]
axis.yaxis.set_ticklabels(self.oaxis_yticklabels)
else:
axis.set_yticks([])
# draw bar
axis.fill_between(
x=[0, 1],
y1=[
self.group_bins[r-1, 0, 0, 1],
self.group_bins[r-1, 0, 0, 1],
],
y2=[
self.group_bins[r-1, 0, 0, 0],
self.group_bins[r-1, 0, 0, 0],
],
facecolor="gray",
alpha=1 - self.oaxis_bar_transparency
)
# add label
if r % 2 == 0:
axis.annotate(
s=self.oaxis_ylabel,
xy=(0.50, np.mean(self.bounds[2, :])),
verticalalignment="center",
horizontalalignment="center",
rotation=270
)
# scatter
elif r != 0 and c != self.intervals[1]:
axis.scatter(
self.grouped_data[r-1, c, :, 0],
self.grouped_data[r-1, c, :, 1],
marker=self.marker,
s=self.markersize,
alpha = self.markeralpha,
)
axis.set_xlim([
self.bounds[0, 0] - 0.10 * (self.bounds[0, 1] - self.bounds[0, 0]),
self.bounds[0, 1] + 0.10 * (self.bounds[0, 1] - self.bounds[0, 0]),
])
axis.set_ylim([
self.bounds[1, 0] - 0.10 * (self.bounds[1, 1] - self.bounds[1, 0]),
self.bounds[1, 1] + 0.10 * (self.bounds[1, 1] - self.bounds[1, 0]),
])
if c % 2 == 0 and r == self.intervals[0]:
if self.xticks is None:
self.xticks = np.linspace(
self.bounds[0, 0], self.bounds[0, 1], self.n_xticks
)
axis.set_xticks(self.xticks)
if self.xticklabels is None:
self.xticklabels = [f"{ticks:.2f}" for ticks in self.xticks]
axis.xaxis.set_ticklabels(self.xticklabels)
else:
axis.set_xticks([])
if r % 2 == 0 and c == 0:
if self.yticks is None:
self.yticks = np.linspace(
self.bounds[1, 0], self.bounds[1, 1], self.n_yticks
)
axis.set_yticks(self.yticks)
if self.yticklabels is None:
self.yticklabels = [f"{ticks:.2f}" for ticks in self.yticks]
axis.yaxis.set_ticklabels(self.yticklabels)
else:
axis.set_yticks([])
if c % 2 == 1 and r == self.intervals[0]:
axis.set_xlabel(self.xlabel)
if r % 2 == 1 and c == 0:
axis.set_ylabel(self.ylabel)
figManager = plt.get_current_fig_manager()
figManager.window.showMaximized()
plt.show()
else:
for d_set in self.data_sets:
self.data = d_set
self.scatter()
return None
def contour(self, fun=None, levels=None, scatter_data=False):
if fun is not None:
self.fun = fun
self.initialize()
if not self._multiple_data_sets:
self.classify_data()
width_ratios = np.ones(self.intervals[1] + 1)
width_ratios[-1] = self.oaxis_size
height_ratios = np.ones(self.intervals[0] + 1)
height_ratios[0] = self.oaxis_size
fig, axes = plt.subplots(
nrows=self.intervals[0] + 1,
ncols=self.intervals[1] + 1,
gridspec_kw={
"wspace": self.xspace,
"hspace": self.yspace,
"width_ratios": width_ratios,
"height_ratios": height_ratios,
},
figsize=self.figsize,
constrained_layout=self.constrained_layout,
# sharex="col",
# sharey="row",
)
fig.subplots_adjust(
top=0.95,
bottom=0.05,
left=0.05,
right=0.95,
hspace=0.2,
wspace=0.2
)
for pos, axis in np.ndenumerate(axes):
c_axes = []
r, c = pos
if r == 0 and c == self.intervals[1]:
fig.delaxes(axis)
# horizontal outer axis
elif r == 0 and c != self.intervals[1]:
# handle limits
axis.set_xlim([self.bounds[3, 0], self.bounds[3, 1]])
axis.set_ylim([0, 1])
# handle ticks
axis.set_yticks([])
axis.xaxis.tick_top()
if c % 2 == 0:
self.oaxis_xticks = np.linspace(
self.bounds[3, 0],
self.bounds[3, 1],
self.oaxis_n_xticks
)
axis.set_xticks(self.oaxis_xticks)
if self.oaxis_xticklabels is None:
self.oaxis_xticklabels = [
f"{tick:.2f}" for tick in self.oaxis_xticks
]
axis.xaxis.set_ticklabels(self.oaxis_xticklabels)
else:
axis.set_xticks([])
# draw bar
axis.fill_between(
x=[
self.group_bins[0, c, 1, 0],
self.group_bins[0, c, 1, 1],
],
y1=[1, 1],
y2=[0, 0],
facecolor="gray",
alpha=1 - self.oaxis_bar_transparency
)
# add label
if c % 2 == 1:
axis.annotate(
s=self.oaxis_xlabel,
xy=(np.mean(self.bounds[3, :]), 0.5),
ha="center",
va="center",
)
# vertical outer axis
elif r != 0 and c == self.intervals[1]:
# draw vertical outer axes
axis.set_xlim([0, 1])
axis.set_ylim([self.bounds[2, 0], self.bounds[2, 1]])
# handle ticks
axis.set_xticks([])
axis.yaxis.tick_right()
if r % 2 == 0:
if self.oaxis_yticks is None:
self.oaxis_yticks = np.linspace(
self.bounds[2, 0], self.bounds[2, 1], self.oaxis_n_yticks
)
axis.set_yticks(self.oaxis_yticks)
if self.oaxis_yticklabels is None:
self.oaxis_yticklabels = [f"{tick:.2f}"
for tick in self.oaxis_yticks]
axis.yaxis.set_ticklabels(self.oaxis_yticklabels)
else:
axis.set_yticks([])
# draw bar
axis.fill_between(
x=[0, 1],
y1=[
self.group_bins[r - 1, 0, 0, 1],
self.group_bins[r - 1, 0, 0, 1],
],
y2=[
self.group_bins[r - 1, 0, 0, 0],
self.group_bins[r - 1, 0, 0, 0],
],
facecolor="gray",
alpha=1 - self.oaxis_bar_transparency
)
# add label
if r % 2 == 0:
axis.annotate(
s=self.oaxis_ylabel,
xy=(0.50, np.mean(self.bounds[2, :])),
verticalalignment="center",
horizontalalignment="center",
rotation=270
)
# contour
elif r != 0 and c != self.intervals[1]:
c_axes.append(axis)
contourf = axis.tricontourf(
self.grouped_data[r - 1, c, :, 0][~np.isnan(self.grouped_data[r-1, c, :, 0])],
self.grouped_data[r - 1, c, :, 1][~np.isnan(self.grouped_data[r-1, c, :, 1])],
self.grouped_fun[r - 1, c, :][~np.isnan(self.grouped_fun[r - 1, c, :])],
levels=levels,
cmap=self.cmap,
)
if scatter_data:
axis.scatter(
self.grouped_data[r - 1, c, :, 0],
self.grouped_data[r - 1, c, :, 1],
alpha=self.markeralpha,
marker="o",
c="white",
s=self.markersize,
)
axis.set_xlim([
self.bounds[0, 0] - 0.10 * (
self.bounds[0, 1] - self.bounds[0, 0]),
self.bounds[0, 1] + 0.10 * (
self.bounds[0, 1] - self.bounds[0, 0]),
])
axis.set_ylim([
self.bounds[1, 0] - 0.10 * (
self.bounds[1, 1] - self.bounds[1, 0]),
self.bounds[1, 1] + 0.10 * (
self.bounds[1, 1] - self.bounds[1, 0]),
])
if c % 2 == 0 and r == self.intervals[0]:
if self.xticks is None:
self.xticks = np.linspace(
self.bounds[0, 0], self.bounds[0, 1], self.n_xticks
)
axis.set_xticks(self.xticks)
if self.xticklabels is None:
self.xticklabels = [f"{ticks:.2f}" for ticks in self.xticks]
axis.xaxis.set_ticklabels(self.xticklabels)
else:
axis.set_xticks([])
if r % 2 == 0 and c == 0:
if self.yticks is None:
self.yticks = np.linspace(
self.bounds[1, 0], self.bounds[1, 1], self.n_yticks
)
axis.set_yticks(self.yticks)
if self.yticklabels is None:
self.yticklabels = [f"{ticks:.2f}" for ticks in self.yticks]
axis.yaxis.set_ticklabels(self.yticklabels)
else:
axis.set_yticks([])
if c % 2 == 1 and r == self.intervals[0]:
axis.set_xlabel(self.xlabel)
if r % 2 == 1 and c == 0:
axis.set_ylabel(self.ylabel)
colorbar_ticks = np.linspace(
np.nanmin(self.grouped_fun[r-1, c, :]),
np.nanmax(self.grouped_fun[r-1, c, :]),
self.n_colorbar_ticks,
)
colorbar = fig.colorbar(
contourf,
ax=axis,
shrink=1.0,
orientation="vertical",
pad=0.05,
fraction=0.15,
ticks=colorbar_ticks,
)
colorbar.ax.tick_params(
labelsize=self.colobar_tick_fontsize,
labelrotation=self.colorbar_label_rotation,
)
figManager = plt.get_current_fig_manager()
figManager.window.showMaximized()
plt.show()
else:
for d_set in self.data_sets:
self.data = d_set
self.scatter()
return None
def get_bounds(self):
self.bounds = np.array(
[np.nanmin(self.data, axis=0), np.nanmax(self.data, axis=0)]
).T
return self.bounds
def get_bins(self):
self.bins = []
for d, bound in enumerate(self.bounds):
if d > 1:
self.bins.append(np.linspace(bound[0], bound[1], self.intervals[d-2]+1))
self.group_bins = np.empty(shape=(self.intervals[0], self.intervals[1], 2, 2))
for r in range(self.intervals[0]):
for c in range(self.intervals[1]):
self.group_bins[r, c, :, :] = np.array([
[self.bins[0][r], self.bins[0][r+1]],
[self.bins[1][c], self.bins[1][c+1]]
])
self.group_bins = np.flip(self.group_bins, axis=0)
return self.group_bins
def classify_data(self):
self.get_bounds()
self.get_bins()
self.grouped_data = np.full((
self.intervals[0],
self.intervals[1],
self.data.shape[0],
self.data.shape[1],
), fill_value=np.nan)
if self.fun is not None:
self.grouped_fun = np.full((
self.intervals[0],
self.intervals[1],
self.data.shape[0],
), fill_value=np.nan)
for r in range(self.intervals[0]):
for c in range(self.intervals[1]):
for p, datum in enumerate(self.data):
check1 = datum[2] >= self.group_bins[r, c, 0, 0]
check2 = datum[2] <= self.group_bins[r, c, 0, 1]
check3 = datum[3] >= self.group_bins[r, c, 1, 0]
check4 = datum[3] <= self.group_bins[r, c, 1, 1]
if np.all([check1, check2, check3, check4]):
self.grouped_data[r, c, p, :] = datum
if self.fun is not None:
self.grouped_fun[r, c, p] = self.fun[p]
return self.grouped_data
def add_data(self, data):
if self.data is None:
self.data = data
else:
self.data = [self.data, data]
if __name__ == "__main__":
def fun(x):
return x[0] ** 2 + x[1] ** 2 + x[2] ** 2 + x[3] ** 2
# def fun(x):
# return x[0] + x[1] + x[2] + x[3]
# def fun(x):
# return x[0] ** 3 + x[1] ** 3 + x[2] ** 3 + x[3] ** 3
# def fun(x):
# return np.sin(x[0]) + np.sin(x[1]) + np.sin(x[2]) + np.sin(x[3])
# def fun(x):
# return x[0] ** 4 + x[1] ** 4 + x[2] ** 4 + x[3] ** 4
plotter1 = TrellisPlotter()
reso = 5j
multiplier = 2
x1, x2, x3, x4 = np.mgrid[
-1:1:reso*multiplier,
-1:1:reso*multiplier,
-1:1:reso,
-1:1:reso
]
plotter1.data = np.array([x1.flatten(), x2.flatten(), x3.flatten(), x4.flatten()]).T
plotter1.fun = fun(plotter1.data.T)
plotter1.intervals = np.array([5, 5])
plotter1.markeralpha = 0.10
plotter1.markersize = 5
plotter1.n_colorbar_ticks = 4
plotter1.cmap = plt.get_cmap("inferno")
plotter1.contour(levels=10, scatter_data=True)
| StarcoderdataPython |
3582548 | #
# Copyright (c) 2016-2018 Wind River Systems, Inc.
#
# SPDX-License-Identifier: Apache-2.0
#
# noinspection PyUnresolvedReferences
from ceph_manager.i18n import _
from ceph_manager.i18n import _LW
# noinspection PyUnresolvedReferences
from oslo_log import log as logging
LOG = logging.getLogger(__name__)
class CephManagerException(Exception):
message = _("An unknown exception occurred.")
def __init__(self, message=None, **kwargs):
self.kwargs = kwargs
if not message:
try:
message = self.message % kwargs
except TypeError:
LOG.warn(_LW('Exception in string format operation'))
for name, value in kwargs.items():
LOG.error("%s: %s" % (name, value))
# at least get the core message out if something happened
message = self.message
super(CephManagerException, self).__init__(message)
class CephPoolSetQuotaFailure(CephManagerException):
message = _("Error seting the OSD pool "
"quota %(name)s for %(pool)s to "
"%(value)s") + ": %(reason)s"
class CephPoolGetQuotaFailure(CephManagerException):
message = _("Error geting the OSD pool quota for "
"%(pool)s") + ": %(reason)s"
class CephPoolCreateFailure(CephManagerException):
message = _("Creating OSD pool %(name)s failed: %(reason)s")
class CephPoolDeleteFailure(CephManagerException):
message = _("Deleting OSD pool %(name)s failed: %(reason)s")
class CephPoolRulesetFailure(CephManagerException):
message = _("Assigning crush ruleset to OSD "
"pool %(name)s failed: %(reason)s")
class CephPoolSetParamFailure(CephManagerException):
message = _("Cannot set Ceph OSD pool parameter: "
"pool_name=%(pool_name)s, param=%(param)s, value=%(value)s. "
"Reason: %(reason)s")
class CephPoolGetParamFailure(CephManagerException):
message = _("Cannot get Ceph OSD pool parameter: "
"pool_name=%(pool_name)s, param=%(param)s. "
"Reason: %(reason)s")
class CephSetKeyFailure(CephManagerException):
message = _("Error setting the Ceph flag "
"'%(flag)s' %(extra)s: "
"response=%(response_status_code)s:%(response_reason)s, "
"status=%(status)s, output=%(output)s")
class CephApiFailure(CephManagerException):
message = _("API failure: "
"call=%(call)s, reason=%(reason)s")
| StarcoderdataPython |
12839428 | <gh_stars>1-10
from __future__ import absolute_import
from django.utils.translation import ugettext_lazy as _
from permissions.models import PermissionNamespace, Permission
namespace = PermissionNamespace('scheduler', _(u'Scheduler'))
PERMISSION_VIEW_JOB_LIST = Permission.objects.register(namespace, 'jobs_list', _(u'View the interval job list'))
| StarcoderdataPython |
9691128 | <filename>Query_two.py
"""
SELECT ID FROM EMPLOYEE e1
WHERE e1.department = 'department35' and
not exists (SELECT * FROM EMPLOYEE e2 WHERE e2.Department = e1.Department
and e2.salary>e1.salary and
exists (SELECT * from COURSE c Where c.EmpID=e2.ID));
"""
import time
import query_output_formatter
# Implement loop method
# Current implement equal to simple department match
def Q2(employee_table, course_table, department_id, flag):
result_set = []
start_time = time.time()
for tuple_e1 in employee_table:
if tuple_e1.Department == department_id:
outer_not_exist_flag = True
for tuple_e2 in employee_table:
if tuple_e2.Department == tuple_e1.Department and int(tuple_e2.Salary) > int(tuple_e1.Salary):
for tuple_c in course_table:
if tuple_c.EmpID == tuple_e2.ID:
# exists return True, e2 be selected
outer_not_exist_flag = False
break
if outer_not_exist_flag:
result_set.append(tuple_e1.ID)
end_time = time.time()
query_time = end_time - start_time
if flag:
query_output_formatter.formatted_output(result_set)
print("Time spent on Q2: " + str(query_time))
# In canonical method (B+tree)
# Really slow by now, may need further improvement
def Q2C(employee_table, course_tree, department_id, flag):
result_set = []
start_time = time.time()
for tuple_e1 in employee_table:
if tuple_e1.Department == department_id:
outer_not_exist_flag = True
for tuple_e2 in employee_table:
if tuple_e2.Department == tuple_e1.Department and int(tuple_e2.Salary) > int(tuple_e1.Salary):
if not course_tree.search(tuple_e2.ID) is None:
# exists return True, e2 be selected
outer_not_exist_flag = False
break
if outer_not_exist_flag:
result_set.append(tuple_e1.ID)
end_time = time.time()
query_time = end_time - start_time
if flag:
query_output_formatter.formatted_output(result_set)
print("Time spent on Q2C: " + str(query_time))
# Optimized method: multi-column indexing, with optimized exists
# (doesn't scan whole course table, break once found matched record)
def Q2O(employee_tree, course_tree, department_id, flag):
result_set = []
start_time = time.time()
# Search on first tree on Employee(department)
for tuple_e1 in employee_tree.search(department_id):
outer_not_exist_flag = True
# Handle "and" operation in the outer NOT EXISTS command
for tuple_e2 in employee_tree.search(tuple_e1.Department):
# Check the salary
if int(tuple_e1.Salary) < int(tuple_e2.Salary):
# Search crossing on second tree on course(ID)
# Also not look up for whole tuples, once there is some records in inner selection, EXISTS returns True
if not course_tree.search(tuple_e2.ID) is None:
# So outer NOT EXISTS returns False
outer_not_exist_flag = False
if outer_not_exist_flag:
result_set.append(tuple_e1.ID)
end_time = time.time()
query_time = end_time - start_time
if flag:
query_output_formatter.formatted_output(result_set)
print("Time spent on Q2O: " + str(query_time))
| StarcoderdataPython |
5101424 | <reponame>dangell7/xrpl-py
"""High-level methods to obtain information about accounts."""
import asyncio
from typing import Dict, Union
from xrpl.asyncio.account import main
from xrpl.clients.sync_client import SyncClient
from xrpl.models.response import Response
def does_account_exist(address: str, client: SyncClient) -> bool:
"""
Query the ledger for whether the account exists.
Args:
address: the account to query.
client: the network client used to make network calls.
Returns:
Whether the account exists on the ledger.
Raises:
XRPLRequestFailureException: if the transaction fails.
"""
return asyncio.run(main.does_account_exist(address, client))
def get_next_valid_seq_number(address: str, client: SyncClient) -> int:
"""
Query the ledger for the next available sequence number for an account.
Args:
address: the account to query.
client: the network client used to make network calls.
Returns:
The next valid sequence number for the address.
"""
return asyncio.run(main.get_next_valid_seq_number(address, client))
def get_balance(address: str, client: SyncClient) -> int:
"""
Query the ledger for the balance of the given account.
Args:
address: the account to query.
client: the network client used to make network calls.
Returns:
The balance of the address.
"""
return asyncio.run(main.get_balance(address, client))
def get_account_root(address: str, client: SyncClient) -> Dict[str, Union[int, str]]:
"""
Query the ledger for the AccountRoot object associated with a given address.
Args:
address: the account to query.
client: the network client used to make network calls.
Returns:
The AccountRoot dictionary for the address.
"""
return asyncio.run(main.get_account_root(address, client))
def get_account_info(address: str, client: SyncClient) -> Response:
"""
Query the ledger for account info of given address.
Args:
address: the account to query.
client: the network client used to make network calls.
Returns:
The account info for the address.
Raises:
XRPLRequestFailureException: if the rippled API call fails.
"""
return asyncio.run(main.get_account_info(address, client))
def get_account_lines(address: str, client: SyncClient) -> Response:
"""
Query the ledger for account lines of given address.
Args:
address: the account to query.
client: the network client used to make network calls.
Returns:
The account lines for the address.
Raises:
XRPLRequestFailureException: if the rippled API call fails.
"""
return asyncio.run(main.get_account_lines(address, client))
| StarcoderdataPython |
8137123 | import flask
blueprint = flask.Blueprint('auth', __name__, template_folder='templates')
import ruddock.modules.auth.routes
| StarcoderdataPython |
87989 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
class ActivationEvent(object):
def __init__(self, request, user):
self.request = request
self.user = user
class LoginEvent(object):
def __init__(self, request, user):
self.request = request
self.user = user
class LogoutEvent(object):
def __init__(self, request):
self.request = request
class PasswordResetEvent(object):
def __init__(self, request, user):
self.request = request
self.user = user
| StarcoderdataPython |
9674297 | <gh_stars>0
import gkeepapi
import re
import schoolopy
import time
import yaml
def main():
"""
Bring Schoology posts into Google Keep as notes.
Returns:
Number of posts newly converted.
"""
# Retrieve credentials
with open('config.yaml', 'r') as file:
config = yaml.load(file, Loader=yaml.FullLoader)
school_url = config['school_url']
limit = config['num_posts']
sc = schoolopy.Schoology(schoolopy.Auth(config['s_key'],
config['s_secret']))
count = 0
keep = gkeepapi.Keep()
keep.login(config['g_email'], config['g_password'])
# Get Schoology feed with attachments
feed = (schoolopy.Update(raw)
for raw in sc._get(
f'recent?with_attachments=1&&limit={limit}')['update'])
# Retrieve last ran timestamp
try:
with open('data.txt', 'r') as f:
last_ran = int(f.read())
# Script has never been ran before
except FileNotFoundError:
last_ran = 1
# Store new timestamps
with open('data.txt', 'w') as f:
f.write(str(int(time.time())))
for post in feed:
modified = False
comments = None
body = ''
if post.num_comments > 0:
# Get comments if post is in a group
if post.realm == "group":
comments = sc.get_group_update_comments(post.id,
post.group_id)
# Get comments if post is in a course
elif post.realm == "section":
comments = sc.get_section_update_comments(post.id,
post.section_id)
else:
continue
# The note has already been added
if post.created < last_ran:
for comment in comments:
# But there has been a new comment added to the post
if comment.created >= last_ran:
modified = True
break
if post.created >= last_ran or modified:
# Delete non-breaking space and carriage return
body = post.body.replace(u'\r', '').replace(u'\xa0', '') + '\n\n'
# Replaces any amount of newlines into only one empty newline
body = re.sub(r'\n+', '\n\n', body)
# Title will be the author of the post
title = sc.get_user(post.uid).name_display
if hasattr(post, 'attachments'):
attachments = post.attachments
# Leave link to original post if post attaches embeds or videos
if 'embeds' in attachments or 'videos' in attachments:
body += ("An embed or video is attached:\n"
f"https://{school_url}/group/"
f"{post.group_id}/update/{post.id}\n\n")
# Leave link to attached links
if 'links' in attachments:
for link in attachments['links']['link']:
body += f"{link['title']} (link)\n{link['url']}\n\n"
if 'files' in attachments:
for file in attachments['files']['file']:
# Leave link to preview attached images
if file['converted_type'] == 3:
body += (f"{file['title']} (image)\n"
f"https://{school_url}/attachment/"
f"{file['id']}/image/"
"lightbox_preview\n\n")
# Leave link to preview attached files
else:
body += (f"{file['title']} (file)\n"
f"https://{school_url}/attachment/"
f"{file['id']}/docviewer\n\n")
# Add comments, if any, to bottom of note
body_comment = ''
if post.num_comments > 0:
for comment in comments:
body_comment += (
f"{sc.get_user(comment.uid).name_display} "
f"(comment)\n{comment.comment}\n\n")
# Add new comment to old note and bring out of archive
if modified:
old_notes = list(keep.find(query=body.strip('\n')))
if len(old_notes) == 1:
old_note = old_notes[0]
body = (body + body_comment).strip(u'\n')
old_note.text = body
old_note.archived = False
else:
body = (body + body_comment).strip(u'\n')
note = keep.createNote(title, body)
# Label should be the group/course the post was made in
if post.realm == 'group':
group_name = sc.get_group(post.group_id).title
elif post.realm == 'section':
group_name = sc.get_section(post.section_id).course_title
else:
group_name = "Unkown"
label = keep.findLabel(group_name)
# Add group label if exists
if label:
note.labels.add(label)
# Create new group label if not
else:
note.labels.add(keep.createLabel(group_name))
count += 1
keep.sync()
return count
if __name__ == '__main__':
print("Reading posts...")
print(f"Added {main()} new posts")
| StarcoderdataPython |
6596700 | # -*- coding: utf-8 -*-
import random
from tkinter import *
#variables and Dictionary
#These are total events that could occur if/else can also be used but they are pain to implement
schema={
"rock":{"rock":1,"paper":0,"scissors":2},
"paper":{"rock":2,"paper":1,"scissors":0},
"scissors":{"rock":0,"paper":2,"scissors":1}
}
comp_score=0
player_score=0
#functions
def outcome_handler(user_choice):
global comp_score
global player_score
outcomes=["rock","paper","scissors"]
num=random.randint(0, 2)
computer_choice=outcomes[num]
result=schema[user_choice][computer_choice]
#now config the labes acc to the choices
Player_Choice_Label.config(fg="green",text="Player choice : "+str(user_choice))
Computer_Choice_Label.config(fg="red",text="Computer choice : "+str(computer_choice))
if result==2:
player_score+=2
Player_Score_Label.config(text="Player : "+str(player_score))
Outcome_Label.config(fg="blue",bg="skyblue",text="Player-Won")
elif result==1:
player_score+=1
comp_score+=1
Player_Score_Label.config(text="Player : "+str(player_score))
Outcome_Label.config(fg="blue",bg="skyblue",text="Draw")
Computer_Score_Label.config(text="Computer : "+str(comp_score))
elif result==0:
comp_score+=2
Outcome_Label.config(fg="blue",bg="skyblue",text="Computer-Won")
Computer_Score_Label.config(text="Computer : "+str(comp_score))
#main Screen
master=Tk()
master.title("RPS")
#labels
Label(master,text="Rock , Paper , Scissors",font=("Calibri",15)).grid(row=0,sticky=N,pady=10,padx=200)
Label(master,text="Please Select an option",font=("Calibri",12)).grid(row=2,sticky=N)
Player_Score_Label=Label(master,text="Player : 0",font=("Calibri",12)) #label for player Score
Player_Score_Label.grid(row=3,sticky=W)
Computer_Score_Label=Label(master,text="Computer : 0",font=("Calibri",12)) #label for computer score
Computer_Score_Label.grid(row=3,sticky=E)
#player and computer choice labels
Player_Choice_Label=Label(master,font=("Calibri",12))
Player_Choice_Label.grid(row=5,sticky=W)
Computer_Choice_Label=Label(master,font=("Calibri",12))
Computer_Choice_Label.grid(row=5,sticky=E)
#outcome Labels
Outcome_Label=Label(master,font=("Calibri",12))
Outcome_Label.grid(row=5,sticky=N,pady=10)
#buttons
Button(master,text="Rock",width=17,command=lambda:outcome_handler("rock")).grid(row=6,sticky=W,padx=10,pady=10)
Button(master,text="Paper",width=17,command=lambda:outcome_handler("paper")).grid(row=6,sticky=N,pady=10)
Button(master,text="Scissors",width=17,command=lambda:outcome_handler("scissors")).grid(row=6,sticky=E,padx=10,pady=10)
#dummy label to create space at the end of master screen
Label(master).grid(row=5)
master.mainloop()
| StarcoderdataPython |
9677127 | """
Merge compatible PUDL datapackages and load the result into an SQLite DB.
This script merges a set of compatible PUDL datapackages into a single
tabular datapackage, and then loads that package into the PUDL SQLite DB
The input datapackages must all have been produced in the same ETL run, and
share the same ``datapkg-bundle-uuid`` value. Any data sources (e.g. ferc1,
eia923) that appear in more than one of the datapackages to be merged must
also share identical ETL parameters (years, tables, states, etc.), allowing
easy deduplication of resources.
Having the ability to load only a subset of the datapackages resulting from an
ETL run into the SQLite database is helpful because larger datasets are much
easier to work with via columnar datastores like Apache Parquet -- loading all
of EPA CEMS into SQLite can take more than 24 hours. PUDL also provides a
separate epacems_to_parquet script that can be used to generate a Parquet
dataset that is partitioned by state and year, which can be read directly into
pandas or dask dataframes, for use in conjunction with the other PUDL data that
is stored in the SQLite DB.
"""
import argparse
import logging
import pathlib
import sys
import tempfile
import coloredlogs
import datapackage
import sqlalchemy as sa
from tableschema import exceptions
import pudl
from pudl.convert.merge_datapkgs import merge_datapkgs
logger = logging.getLogger(__name__)
def datapkg_to_sqlite(sqlite_url, out_path, clobber=False, fkeys=False):
"""
Load a PUDL datapackage into a sqlite database.
Args:
sqlite_url (str): An SQLite database connection URL.
out_path (path-like): Path to the base directory of the datapackage
to be loaded into SQLite. Must contain the datapackage.json file.
clobber (bool): If True, replace an existing PUDL DB if it exists. If
False (the default), fail if an existing PUDL DB is found.
fkeys(bool): If true, tell SQLite to check foreign key constraints
for the records that are being loaded. Left off by default.
Returns:
None
"""
# Using SQL Alchemy event hooks to enable the foreign key checking pragma
# within SQLite for all subsequent database connections. See these pages for
# additional documentation on how this stuff works:
# https://docs.sqlalchemy.org/en/13/core/event.html
# https://docs.sqlalchemy.org/en/13/dialects/sqlite.html#foreign-key-support
if fkeys:
logger.info("Enforcing foreign key constraints in SQLite3")
@sa.event.listens_for(sa.engine.Engine, "connect")
def _set_sqlite_pragma(dbapi_connection, connection_record):
from sqlite3 import Connection as SQLite3Connection
if isinstance(dbapi_connection, SQLite3Connection):
cursor = dbapi_connection.cursor()
cursor.execute("PRAGMA foreign_keys=ON;")
cursor.close()
# prepping the sqlite engine
pudl_engine = sa.create_engine(sqlite_url)
logger.info("Dropping the current PUDL DB, if it exists.")
try:
# So that we can wipe it out
pudl.helpers.drop_tables(pudl_engine, clobber=clobber)
except sa.exc.OperationalError:
pass
# And start anew
pudl_engine = sa.create_engine(sqlite_url)
# grab the merged datapackage metadata file:
pkg = datapackage.DataPackage(
descriptor=str(pathlib.Path(out_path, 'datapackage.json')))
# we want to grab the dictionary of columns that need autoincrement id cols
try:
autoincrement = pkg.descriptor['autoincrement']
# in case there is no autoincrement columns in the metadata..
except KeyError:
autoincrement = {}
logger.info("Loading merged datapackage into SQLite.")
logger.info("This could take a while. It might be a good time")
logger.info("to get a drink of water. Hydrate or die!")
try:
# Save the data package in SQL
pkg.save(storage='sql', engine=pudl_engine, merge_groups=True,
autoincrement=autoincrement)
except exceptions.TableSchemaException as exception:
logger.error('SQLite conversion failed. See following errors:')
logger.error(exception.errors)
def parse_command_line(argv):
"""
Parse command line arguments. See the -h option.
Args:
argv (str): Command line arguments, including caller filename.
Returns:
dict: Dictionary of command line arguments and their parsed values.
"""
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument(
'-c',
'--clobber',
action='store_true',
help="""Overwrite the existing PUDL sqlite database if it exists. Otherwise,
the existence of a pre-existing database will cause the conversion to fail.""",
default=False)
parser.add_argument(
'-k',
'--fkeys',
action='store_true',
help="""Enforce foreign-key constraints within SQLite.""",
default=False)
parser.add_argument(
'in_paths',
nargs="+",
help="""A list of paths to the datapackage.json files containing the
metadata for the PUDL tabular data packages to be merged and
potentially loaded into an SQLite database.""")
arguments = parser.parse_args(argv[1:])
return arguments
def main():
"""Merge PUDL datapackages and save them into an SQLite database."""
# Display logged output from the PUDL package:
logger = logging.getLogger(pudl.__name__)
log_format = '%(asctime)s [%(levelname)8s] %(name)s:%(lineno)s %(message)s'
coloredlogs.install(fmt=log_format, level='INFO', logger=logger)
args = parse_command_line(sys.argv)
pudl_settings = pudl.workspace.setup.get_defaults()
logger.info(f"pudl_in={pudl_settings['pudl_in']}")
logger.info(f"pudl_out={pudl_settings['pudl_out']}")
# Check if there's already a PUDL SQLite DB that we should not clobber
# Need to remove the sqlite:/// prefix from the SQLAlchemy URL since
# what we're checking against is a file path, not a URL.
if (
not args.clobber
and pathlib.Path(pudl_settings["pudl_db"].replace("sqlite:///", "")).exists()
):
raise FileExistsError(
f"SQLite DB at {pudl_settings['pudl_db']} exists and clobber is False.")
# Verify that the input data package descriptors exist
dps = []
for path in args.in_paths:
if not pathlib.Path(path).exists():
raise FileNotFoundError(
f"Input datapackage path {path} does not exist.")
dps.append(datapackage.DataPackage(descriptor=path))
logger.info("Merging datapackages.")
with tempfile.TemporaryDirectory() as tmpdir:
out_path = pathlib.Path(tmpdir)
merge_datapkgs(dps, out_path, clobber=args.clobber)
logger.info("Loading merged datapackage into an SQLite database.")
datapkg_to_sqlite(
pudl_settings['pudl_db'],
out_path,
clobber=args.clobber,
fkeys=args.fkeys,
)
logger.info("Success! You can connect to the PUDL DB at this URL:")
logger.info(f"{pudl_settings['pudl_db']}")
| StarcoderdataPython |
3464417 | <reponame>Crystal-girl/Algorithm-interview-lecture-40
"""
leetcode 98
Valid Binary Search Tree
"""
class TreeNode:
def __init__(self, x):
self.val = x
self.left = None
self.right = None
"""
solution 1
T: O(N)
S: O(N)
"""
def isValidBST(self, root: 'TreeNode') -> bool:
self.prev = None
def helper(node: 'TreeNode') -> bool:
if node is None:
return True
if not helper(node.left):
return False
if self.prev and self.prev.val >= node.val:
return False
self.prev = node
return helper(node.right)
return helper(root)
"""
solution 2
T: O(N)
S: O(N)
"""
def isValidBST(self, root: TreeNode) -> bool:
def helper(node: TreeNode, lower=float('-inf'), upper=float('inf')):
if not node:
return True
val = node.val
if val <= lower or val >= upper:
return False
if not helper(node.left, lower, val):
return False
if not helper(node.right, val, upper):
return False
return True
return helper(root) | StarcoderdataPython |
3316650 | #
# Copyright (C) 2015 <NAME>
#
# @@ All Rights Reserved @@
# This file is part of the RDKit.
# The contents are covered by the terms of the BSD license
# which is included in the file license.txt, found at the root
# of the RDKit source tree.
import argparse
from builtins import range
from rdkit.Chem.Scaffolds import MurckoScaffold
from rdkit import Chem
from rdkit.Chem import AllChem
import numpy as np
from numpy import linalg
from pipelines_utils import parameter_utils, utils
from pipelines_utils_rdkit import rdkit_utils
def write_out(mols,count,writer,file_format):
for mol in mols:
count += 1
if mol is None: continue
if file_format == 'sdf':
writer.write(mol)
elif file_format == 'json':
writer.write(mol, format='mol')
return count
def GetBestFitPlane(pts, weights=None):
if weights is None:
wSum = len(pts)
origin = np.sum(pts, 0)
origin /= wSum
sums = np.zeros((3, 3), np.double)
for pt in pts:
dp = pt - origin
for i in range(3):
sums[i, i] += dp[i] * dp[i]
for j in range(i + 1, 3):
sums[i, j] += dp[i] * dp[j]
sums[j, i] += dp[i] * dp[j]
sums /= wSum
vals, vects = linalg.eigh(sums)
order = np.argsort(vals)
normal = vects[:, order[0]]
plane = np.zeros((4, ), np.double)
plane[:3] = normal
plane[3] = -1 * normal.dot(origin)
return plane
def PBFRD(mol, confId=-1):
conf = mol.GetConformer(confId)
if not conf.Is3D():
return 0
pts = np.array([list(conf.GetAtomPosition(x)) for x in range(mol.GetNumAtoms())])
plane = GetBestFitPlane(pts)
denom = np.dot(plane[:3], plane[:3])
denom = denom**0.5
# add up the distance from the plane for each point:
res = 0.0
for pt in pts:
res += np.abs(pt.dot(plane[:3]) + plane[3])
res /= denom
res /= len(pts)
return res
def PBFev(mol):
'''returns an array of exit vectors for this mol'''
# Get murcko SMILES
murcko = MurckoScaffold.GetScaffoldForMol(mol)
# Get PBF plane for murcko scaffold only
confId = -1
conf = murcko.GetConformer(confId)
if not conf.Is3D():
print('This mol is not 3D - all PBFev angles will be 0 degrees')
return [0]
pts = np.array([list(conf.GetAtomPosition(i)) # Get atom coordinates
for i in range(murcko.GetNumAtoms())])
# GetBestFitPlane is in the RDKit Contrib directory as part of PBF
# Plane is xyz vector with a c intercept adjustment
plane = GetBestFitPlane(pts)
# Map onto parent structure coords (this func adds exit vectors [*])
murckoEv = Chem.ReplaceSidechains(mol, murcko)
confId = -1 # embed 3D conf object with EVs (atom indices do not change)
conf = murckoEv.GetConformer(confId)
# Where [#0] matches exit vector SMILES [*]
patt = Chem.MolFromSmarts('[#0]-[*]')
matches = murckoEv.GetSubstructMatches(patt)
if len(matches) == 0:
return None
# Calculate angles between exit vectors and the murcko plane of best fit
exitVectors = np.zeros(len(matches))
denom = np.dot(plane[:3], plane[:3])
denom = denom**0.5
for n, match in enumerate(matches):
evCoords = conf.GetAtomPosition(match[0])
anchorCoords = conf.GetAtomPosition(match[1])
v = np.array(((evCoords[0]-anchorCoords[0]),
(evCoords[1]-anchorCoords[1]),
(evCoords[2]-anchorCoords[2])))
angle = np.arcsin((np.dot(v, plane[:3])) /
((denom)*((np.dot(v, v))**0.5)))
angle = np.abs(np.degrees(angle))
exitVectors[n] = angle
return exitVectors
def main():
### command line args defintions #########################################
parser = argparse.ArgumentParser(description='Calculate plane of best fit for molecules')
parameter_utils.add_default_io_args(parser)
args = parser.parse_args()
utils.log("PBFEV args: ", args)
input ,output ,suppl ,writer ,output_base = rdkit_utils.default_open_input_output(args.input, args.informat, args.output, 'PBFEV', args.outformat)
i=0
count=0
errors=0
out_results = []
for mol in suppl:
i +=1
AllChem.EmbedMolecule(mol)
if mol is None:
errors += 1
continue
out_vector = PBFev(mol)
if out_vector is None:
errors += 1
continue
rd = PBFRD(mol)
mol.SetDoubleProp("distance", rd)
for j,angle in enumerate(out_vector):
mol.SetDoubleProp("angle" + "_" + str(j), angle)
out_results.append(mol)
count = write_out(out_results, count, writer, args.outformat)
utils.log("Handled " + str(i) + " molecules, resulting in "+ str(count)+ " outputs and " + str(errors) + ' errors')
writer.flush()
writer.close()
input.close()
output.close()
if __name__ == "__main__":
main() | StarcoderdataPython |
8187920 | <reponame>jtilly/git_root<filename>git_root/__init__.py<gh_stars>10-100
from .git_root import *
| StarcoderdataPython |
1670208 | from .environment import ForagingEnv
| StarcoderdataPython |
6614213 | <reponame>gstavosanchez/tytus
class DB():
def __init__(self):
self.dicDB = {}
self.dicTB = {}
#---------------------FUNCIONES BASES DE DATOS----------------------#
# CREAR BASE DE DATOS
def createDatabase(self, database):
if self.identify(database):
ready = False
if self.searchDB(database):
return 2
else:
self.dicDB[database] = self.dicTB
ready = True
if ready:
return 0
else:
return 1
else:
return 1
# LISTA DE BASES DE DATOS ALMACENADAS
def showDatabases(self):
keys = list()
for key in self.dicDB:
keys.append(key)
print(keys)
return keys
# CAMBIAR NOMBRE DE UNA BASE DE DATOS
def alterDatabase(self, databaseOld, databseNew):
if self.identify(databaseOld) and self.identify(databseNew):
ready = False
if self.searchDB(databaseOld):
if self.searchDB(databseNew):
return 3
else:
tmp = {}
for key, value in self.dicDB.items():
if key == databaseOld:
key = databseNew
tmp[key] = value
self.dicDB = tmp
ready = True
else:
return 2
if ready:
return 0
else:
return 1
else:
return 1
# ELIMINAR BASE DE DATOS
def dropDatabase(self, database):
if self.identify(database):
ready = False
if self.searchDB(database):
self.dicDB.pop(database)
ready = True
else:
return 2
if ready:
return 0
else:
return 1
else:
return 1
# ---------------------FUNCIONES TABLAS----------------------#
#--------------------UTILIDADES--------------------#
# VALIDA EL NOMBRE CON LAS REGLAS DE IDENTIFICADORES DE SQL
def identify(self, id):
special = ["[","@", "_", "o", "#"]
if id[0].isalpha():
return True
else:
if id[0].isdigit():
return False
elif id[0] in special:
if id[0] != '"' and id[0] != '[':
return True
else:
if id[0] == "[":
if id[len(id) - 1] == "]":
return True
else:
return False
else:
return False
# BUSCAR SI EXISTE LA BASE DE DATOS
def searchDB(self, key):
if key in self.dicDB.keys():
return True
else:
return False
# ---------------------EXTRAS----------------------#
# CREAR Y AÑADIR UNA TABLA
def addTable(self, key, name, content):
if self.searchDB(key):
if self.dicDB.get(key):
if self.searchTB(key, name):
print("Ya existe una tabla con ese nombre")
else:
dict2 = self.dicDB.get(key)
dict2[name] = content
self.dicDB[key] = dict2
else:
self.dicDB[key] = {name:content}
else:
print("No existe la base de datos")
# BUSCAR SI EXISTE LA TABLA EN UNA BASE DE DATOS
def searchTB(self, key, name):
if name in self.dicDB.get(key).keys():
return True
else:
False
# ELIMINAR TABLA DE UNA BASE DE DATOS
def deleteTB(self, key, name):
self.dicDB.get(key).pop(name)
# MOSTRAR BASES DE DATOS ALMACENADAS
def print(self):
n = 0
for key in self.dicDB:
print("[" + str(n) + "]","Base:", key, "| Tablas:", self.dicDB.get(key))
n +=1
# MOSTRAR TABLAS ALMACENDAS
def print(self):
n = 0
for key in self.dicDB.keys():
print(key + ":")
for i in self.dicDB.get(key).keys():
print(" "+i + ":")
for j in self.dicDB.get(key).get(i).keys():
print(" "+j+":")
for k in self.dicDB.get(key).get(i).get(j):
print(" "+str(k))
| StarcoderdataPython |
6445573 | <filename>findsame/calc.py<gh_stars>1-10
import os
import hashlib
import functools
import itertools
from collections import defaultdict
##from multiprocessing import Pool # same as ProcessPoolExecutor
from concurrent.futures import ThreadPoolExecutor, ProcessPoolExecutor
from findsame import common as co
from findsame.parallel import ProcessAndThreadPoolExecutor, \
SequentialPoolExecutor
from findsame.config import cfg
HASHFUNC = hashlib.sha1
def hashsum(x, encoding='utf-8'):
"""Hash of a string. Uses HASHFUNC."""
return HASHFUNC(x.encode(encoding=encoding)).hexdigest()
# Hash of an empty file as returned by hash_file(): file content is '' but the
# file size is 0 (an int). Since we hash size and content and hashsum('0') !=
# hashsum(''), we have to use the former. The encoding doesn't matter for '0'
# since ascii is a subset of e.g. utf-8 up to code point 127.
#
# We have:
#
# empty file
# * filesize = 0,
# * fpr=hashsum('0') -- result of hash_file(Leaf('/path/to/empty_file'))
# empty dir
# * zero files
# * fpr=hashsum('') -- definition
# dirs with N empty files:
# * fpr = hashsum(N times hashsum('0'))
# * so all dirs with the same number of empty files will have the same hash
EMPTY_FILE_FPR = hashsum('0')
EMPTY_DIR_FPR = hashsum('')
# Short-lived files that are collected while scanning the file system and
# building the tree can be gone by the time we calculate hashes. In that case
# we could either
# * delete them from the tree
# * return a pre-defined hash
# We do the latter. hash of -1 and -2 represent the hash of an empty file with
# negative size, which is impossible for existing files, so this should be a
# safe operation.
#
# Removal from the tree is trivial for files, but for dirs, we'd need to
# recursively delete all dirs and files below, which sounds like much more
# work.
MISSING_FILE_FPR = hashsum('-1')
MISSING_DIR_FPR = hashsum('-2')
def hash_file(leaf, blocksize=None, use_filesize=True):
"""Hash file content, using filesize as additional info.
Parameters
----------
leaf : Leaf
blocksize : int, None
size of block (bytes) to read at once; None = read whole file
use_filesize : bool
Notes
-----
Using `blocksize` stolen from:
http://pythoncentral.io/hashing-files-with-python/ . Result is the same as
e.g. ``sha1sum <filename>`` when use_filesize=False (or as a hack we set
leaf.filesize = '' (zero length byte string)).
"""
hasher = HASHFUNC()
if use_filesize:
hasher.update(str(leaf.filesize).encode('ascii'))
with open(leaf.path, 'rb') as fd:
buf = fd.read(blocksize)
while buf:
hasher.update(buf)
buf = fd.read(blocksize)
return hasher.hexdigest()
def hash_file_limit(leaf, blocksize=None, limit=None, use_filesize=True):
"""Same as :func:`hash_file`, but read only exactly `limit` bytes."""
# We have the same code (adjust blocksize, assert modulo) in the main
# script as early exit check, but this function may also be used elsewhere
# (benckmark, tests) w/o being called in MerkleTree where the already
# correct values cfg.blocksize and cfg.limit are used to call us here, so
# we need this. Timing shows that all asserts here cost virtually nothing.
assert blocksize is not None and (blocksize > 0), f"blocksize={blocksize}"
assert (limit is not None) and (limit > 0), f"limit={limit}"
bs = blocksize if blocksize < limit else limit
assert limit % bs == 0, f"limit={co.size2str(limit)} % bs={co.size2str(bs)} != 0"
hasher = HASHFUNC()
if use_filesize:
hasher.update(str(leaf.filesize).encode('ascii'))
with open(leaf.path, 'rb') as fd:
while True:
pos = fd.tell()
if pos == leaf.filesize or pos == limit:
break
hasher.update(fd.read(bs))
return hasher.hexdigest()
def split_path(path):
"""//foo/bar/baz -> ['foo', 'bar', 'baz']"""
return [x for x in path.split('/') if x != '']
class Element:
def __init__(self, path=None):
self.kind = None
self.path = path
def __repr__(self):
return f"{self.kind}:{self.path}"
@co.lazyprop
def fpr(self):
fpr = self._get_fpr()
co.debug_msg(f"fpr: {self.kind}={self.path} fpr={fpr}")
return fpr
def _get_fpr(self):
raise NotImplementedError
class Node(Element):
def __init__(self, *args, childs=None, **kwds):
super().__init__(*args, **kwds)
self.kind = 'node'
self.childs = childs
def add_child(self, child):
self.childs.append(child)
# The clean hashlib style way is smth like
# hasher = HASHFUNC()
# for fpr in sorted(fpr_lst):
# hasher.update(fpr.encode())
# ... etc ...
# instead of taking the hash of a concatenated string. Not that the
# resulting hash would chnage, it's just better style.
@staticmethod
def _merge_fpr(fpr_lst):
"""Hash of a list of fpr strings. Sort them first to ensure reproducible
results."""
nn = len(fpr_lst)
if nn > 1:
return hashsum(''.join(sorted(fpr_lst)))
elif nn == 1:
return hashsum(fpr_lst[0])
# no childs, this happen if
# * we really have a node (=dir) w/o childs
# * we have only links in the dir .. we currently treat
# that dir as empty since we ignore links
else:
return EMPTY_DIR_FPR
def _get_fpr(self):
if os.path.exists(self.path):
return self._merge_fpr([c.fpr for c in self.childs])
else:
return MISSING_DIR_FPR
class Leaf(Element):
def __init__(self, *args, fpr_func=hash_file, **kwds):
super().__init__(*args, **kwds)
self.kind = 'leaf'
self.fpr_func = fpr_func
self.filesize = os.path.getsize(self.path)
def _get_fpr(self):
if os.path.exists(self.path):
return self.fpr_func(self)
else:
return MISSING_FILE_FPR
class FileDirTree:
"""File (leaf) + dir (node) part of a Merkle tree. No hash calculation
here.
May consist of multiple independent sub-graphs (e.g. if data is brought in
by update()), thus there is no single "top" element which could be used for
recursive hash calculation (more details in MerkleTree).
Notes
-----
Merkle tree (single graph) with one top node:
::
$ tree test
test # top node
└── a # node
├── b # node
│ ├── c # node
│ │ └── file1 # leaf
│ ├── file4 # leaf
│ └── file5 # leaf
├── d # node
│ └── e # node
│ └── file2 # leaf
└── file3 # leaf
>>> [(r,d,f) for r,d,f in os.walk('test/')]
[('test/', ['a'], []),
('test/a', ['b', 'd'], ['file3']),
('test/a/b', ['c'], ['file5', 'file4']),
('test/a/b/c', [], ['file1']),
('test/a/d', ['e'], []),
('test/a/d/e', [], ['file2'])]
"""
def __init__(self, dr=None, files=None):
self.dr = dr
self.files = files
assert [files, dr].count(None) == 1, "dr or files must be None"
self.build_tree()
@staticmethod
def walk_files(files):
"""Mimic os.walk() given a list of files.
Example
-------
>>> for root,_,files in walk_files(files):
... <here be code>
Difference to os.walk(): The middle return arg is None and the order is
not top-down.
"""
dct = defaultdict(list)
for fn in files:
_dn = os.path.dirname(fn)
dct[os.path.curdir if _dn == '' else _dn].append(os.path.basename(fn))
for root, files in dct.items():
yield root,None,files
def walker(self):
if self.files is not None:
return self.walk_files(self.files)
elif self.dr is not None:
assert os.path.exists(self.dr) and os.path.isdir(self.dr)
return os.walk(self.dr)
else:
raise Exception("files and dr are None")
def build_tree(self):
"""Construct Merkle tree from all dirs and files in directory
`self.dr`. Don't calculate fprs.
"""
self.nodes = {}
self.leafs = {}
for root, _, files in self.walker():
# make sure os.path.dirname() returns the parent dir
if root.endswith('/'):
root = root[:-1]
node = Node(path=root, childs=[])
for base in files:
fn = os.path.join(root, base)
co.debug_msg(f"build_tree: {fn}")
# isfile(<link>) is True, has to be tested first
if os.path.islink(fn):
co.debug_msg(f"skip link: {fn}")
elif os.path.isfile(fn):
leaf = Leaf(path=fn)
node.add_child(leaf)
self.leafs[fn] = leaf
else:
co.debug_msg(f"skip unknown path type: {fn}")
# add node as child to parent node
# root = /foo/bar/baz
# parent_root = /foo/bar
self.nodes[root] = node
parent_root = os.path.dirname(root)
if parent_root in self.nodes.keys():
self.nodes[parent_root].add_child(node)
def update(self, other):
for name in ['nodes', 'leafs']:
attr = getattr(self, name)
attr.update(getattr(other, name))
class MerkleTree:
"""
In the simplest setting, the tree is a single graph with a top node. In
that case, a naive serial calculation would just call top.fpr, which would
trigger recursive hash (fpr) calculations for all nodes and their connected
leafs, thus populating each tree element (leaf, node) with a fpr value.
Here, we have two differences.
(1) We deal with possibly multiple distinct sub-graphs, thus there is no
single top element. It was a design decision to NOT model this using
multiple MerkleTree instances (sub-graphs) with a top node each, for
reasons which will become clear below. For one, we don't need to perform
complicated graph calculations to separate nodes and leafs into separate
graphs.
(2) We calculate leaf fprs in parallel explicitly before node fprs, thus
leaf fprs are never calculated by recursive node fprs. Therefore, we do not
need a top node. Calculating leafs in parallel is easy since they are
independent from one another.
These two points imply to issues, which are however easily solved:
_calc_node_fprs(): self.node_fprs
---------------------------------
node.fpr attribute access triggers recursive fpr calculation in all leafs
and nodes connected to this node. But since we do not assume the existence
of a single top node, we need to iterate thru all nodes explicitly. The
@lazyprop decorator of the fpr attribute (see class Element) makes sure we
don't calculate anything more than once. Profiling shows that the decorator
doesn't consume much resources, compared to hash calculation itself.
_calc_leaf_fprs(): share_leafs
------------------------------
Note: This applies ONLY to ProcessPoolExecutor, i.e. multiprocessing, which
NOT the fastest parallelization method here (ThreadPoolExecutor, i.e.
multithreading is better).
The calculation of self.node_fprs in _calc_node_fprs() causes a slowdown
with ProcessPoolExecutor if we do not assign calculated leaf fprs
beforehand. This is b/c when calculating node_fprs, we do not operate on
self.leaf_fprs, which WAS calculated fast in parallel, but on self.tree (a
FileDirTree instance). This is NOT shared between processes.
multiprocessing spawns N new processes, each with its own MerkleTree
object, and each will calculate approximately len(leafs)/N fprs, which are
then collected in leaf_fprs. Therefore, when we leave the pool context, the
MerkleTree objects (i.e. self) of each sub-process are deleted, while the
main process' self.tree object is still empty (no element has an fpr
attribute value)! Then, the node_fprs calculation in _calc_node_fprs()
triggers a new fpr calculation for the entire tree of the main process all
over again. We work around that by setting leaf.fpr by hand. Since the main
process' self.tree is empty, we don't need to test if leaf.fpr is already
populated (for that, we'd need to extend the @lazyprop decorator anyway).
some attributes
---------------
leaf_fprs, node_fprs:
{path1: fprA,
path2: fprA,
path3: fprB,
path4: fprC,
path5: fprD,
path6: fprD,
path7: fprD,
...}
invert_dict(leaf_fprs), invert_dict(node_fprs):
fprA: [path1, path2],
fprB: [path3],
fprC: [path4],
fprD: [path5, path6, path7],
...}
"""
def __init__(self, tree):
"""
Parameters
----------
tree : FileDirTree instance
"""
self.tree = tree
self.set_leaf_fpr_func(cfg.limit)
def calc_fprs(self):
self.calc_leaf_fprs()
self.calc_node_fprs()
def set_leaf_fpr_func(self, limit):
if limit is None:
leaf_fpr_func = functools.partial(hash_file,
blocksize=cfg.blocksize)
else:
leaf_fpr_func = functools.partial(hash_file_limit,
blocksize=cfg.blocksize,
limit=limit,
use_filesize=True)
for leaf in self.tree.leafs.values():
leaf.fpr_func = leaf_fpr_func
# pool.map(lambda kv: (k, v.fpr), ...) in _calc_leaf_fprs() doesn't work,
# error is "Can't pickle ... lambda ...", same with defining _fpr_worker()
# inside _calc_leaf_fprs(), need to def it in outer scope
@staticmethod
def fpr_worker(leaf):
return leaf.path, leaf.fpr
def calc_leaf_fprs(self):
# whether we use multiprocessing
useproc = False
if cfg.nthreads == 1 and cfg.nprocs == 1:
# same as
# self.leaf_fprs = dict((k,v.fpr) for k,v in self.tree.leafs.items())
# just looks nicer :)
getpool = SequentialPoolExecutor
elif cfg.nthreads == 1:
assert cfg.nprocs > 1
getpool = lambda: ProcessPoolExecutor(cfg.nprocs)
useproc = True
elif cfg.nprocs == 1:
assert cfg.nthreads > 1
getpool = lambda: ThreadPoolExecutor(cfg.nthreads)
else:
getpool = lambda: ProcessAndThreadPoolExecutor(nprocs=cfg.nprocs,
nthreads=cfg.nthreads)
useproc = True
with getpool() as pool:
self.leaf_fprs = dict(pool.map(self.fpr_worker,
self.tree.leafs.values(),
chunksize=1))
if useproc and cfg.share_leafs:
for leaf in self.tree.leafs.values():
leaf.fpr = self.leaf_fprs[leaf.path]
def calc_node_fprs(self):
self.node_fprs = dict((node.path,node.fpr) for node in self.tree.nodes.values())
| StarcoderdataPython |
6440958 | import warnings
from typing import Iterable, Optional, Union, Tuple, List
ArraysetsRef = Union['ArraysetDataReader', Iterable['ArraysetDataReader']]
class GroupedAsets(object):
"""Groups hangar arraysets and validate suitability for usage in dataloaders.
It can choose a subset of samples in the hangar arraysets by checking the
list of keys or an index range. :class:`GroupedAsets` does not expect all
the input hangar arraysets to have same length and same keys. It takes a
`set.union` of sample names from all the arraysets and `keys` argument if
passed and hence discard non-common keys while fetching. Based on `keys` or
`index_range` (ignore `index_range` if `keys` is present) it makes a subset
of sample names which is then used to fetch the data from hangar arraysets.
"""
def __init__(self,
arraysets: ArraysetsRef,
keys: Optional[Iterable[Union[int, str]]] = None,
index_range: Optional[slice] = None):
self.arrayset_array = []
self.arrayset_names = []
self._allowed_samples: Tuple[Union[str, int]] = None
if not isinstance(arraysets, (list, tuple, set)):
arraysets = (arraysets,)
if len(arraysets) == 0:
raise ValueError('len(arraysets) cannot == 0')
aset_lens = set()
all_aset_keys = []
for arrayset in arraysets:
if arrayset.iswriteable is True:
raise TypeError(f'Cannot load arraysets opened in `write-enabled` checkout.')
self.arrayset_array.append(arrayset)
self.arrayset_names.append(arrayset.name)
aset_lens.add(len(arrayset))
all_aset_keys.append(set(arrayset.keys()))
common_aset_keys = set.intersection(*all_aset_keys)
if len(aset_lens) > 1:
warnings.warn('Arraysets do not contain equal num samples', UserWarning)
if keys:
keys = set(keys,)
noncommon_keys = keys.difference(common_aset_keys)
if len(noncommon_keys) > 0:
raise ValueError(f'Keys: {noncommon_keys} do not exist in all arraysets.')
self._allowed_samples = tuple(keys)
elif index_range:
if not isinstance(index_range, slice):
raise TypeError(f'type(index_range): {type(index_range)} != slice')
# need to sort before sliceing on index_range, but since sample
# keys can be mixed int and str type, convert to common format and
# sort on that first
str_keys = [i if isinstance(i, str) else f'#{i}' for i in common_aset_keys]
sorted_keys = sorted(str_keys)
converted_keys = [int(i[1:]) if i.startswith('#') else i for i in sorted_keys]
self._allowed_samples = tuple(converted_keys)[index_range]
else:
self._allowed_samples = tuple(common_aset_keys)
def get_types(self, converter=None):
"""
Get dtypes of the all the arraysets in the `GroupedAsets`.
Parameters
----------
converter : Callable
A function that takes default dtype (numpy) and convert it to another
format
Returns
-------
A tuple of types
"""
types = []
for aset in self.arrayset_array:
if converter:
print(aset)
types.append(converter(aset.dtype))
else:
types.append(aset.dtype)
return tuple(types)
def get_shapes(self, converter=None):
"""
Get shapes of the all the arraysets in the `GroupedAsets`.
Parameters
----------
converter : Callable
A function that takes default shape (numpy) and convert it to another
format
Returns
-------
A tuple of arrayset shapes
"""
if self.arrayset_array[0].variable_shape:
return None
shapes = []
for aset in self.arrayset_array:
if converter:
shapes.append(converter(aset.shape))
else:
shapes.append(aset.shape)
return tuple(shapes)
@property
def sample_names(self):
return self._allowed_samples
| StarcoderdataPython |
5129272 | import unittest
import numpy as np
import openmdao.api as om
import numpy.testing as npt
import wisdem.commonse.environment as env
from wisdem.commonse import gravity as g
from openmdao.utils.assert_utils import assert_check_partials
npts = 100
myones = np.ones((npts,))
class TestPowerWind(unittest.TestCase):
def setUp(self):
self.params = {}
self.unknowns = {}
self.resid = None
self.params["shearExp"] = 2.0
self.params["Uref"] = 5.0
self.params["zref"] = 3.0
self.params["z0"] = 0.0
self.params["z"] = 9.0 * myones
self.wind = env.PowerWind(nPoints=npts)
def testRegular(self):
self.wind.compute(self.params, self.unknowns)
expect = 45.0 * myones
npt.assert_equal(self.unknowns["U"], expect)
def testIndex(self):
self.params["z"][1:] = -1.0
self.wind.compute(self.params, self.unknowns)
expect = 45.0 * myones
expect[1:] = 0.0
npt.assert_equal(self.unknowns["U"], expect)
def testZ0(self):
self.params["z0"] = 10.0
self.params["z"] += 10.0
self.params["zref"] += 10.0
self.wind.compute(self.params, self.unknowns)
expect = 45.0 * myones
npt.assert_equal(self.unknowns["U"], expect)
class TestLinearWaves(unittest.TestCase):
def setUp(self):
self.params = {}
self.unknowns = {}
self.resid = None
self.params["rho_water"] = 1e3
self.params["Hsig_wave"] = 2.0
self.params["Uc"] = 5.0
self.params["z_floor"] = -30.0
self.params["z_surface"] = 0.0
self.params["z"] = -2.0 * myones
self.wave = env.LinearWaves(nPoints=npts)
def testRegular(self):
D = np.abs(self.params["z_floor"])
k = 2.5
omega = np.sqrt(g * k * np.tanh(k * D))
self.params["Tsig_wave"] = 2.0 * np.pi / omega
self.wave.compute(self.params, self.unknowns)
a = 1.0 # 0.5*Hsig_wave
z = -2.0
rho = 1e3
U_exp = 5 + omega * a * np.cosh(k * (z + D)) / np.sinh(k * D)
W_exp = -omega * a * np.sinh(k * (z + D)) / np.sinh(k * D)
V_exp = np.sqrt(U_exp ** 2 + W_exp ** 2)
A_exp = omega * omega * a * np.cosh(k * (z + D)) / np.sinh(k * D)
p_exp = -rho * g * (z - a * np.cosh(k * (z + D)) / np.cosh(k * D))
npt.assert_almost_equal(self.unknowns["U"], U_exp)
npt.assert_almost_equal(self.unknowns["W"], W_exp)
npt.assert_almost_equal(self.unknowns["V"], V_exp)
npt.assert_almost_equal(self.unknowns["A"], A_exp)
npt.assert_almost_equal(self.unknowns["p"], p_exp)
# Positive depth input
self.params["z_floor"] = 30.0
self.wave.compute(self.params, self.unknowns)
npt.assert_almost_equal(self.unknowns["U"], U_exp)
npt.assert_almost_equal(self.unknowns["W"], W_exp)
npt.assert_almost_equal(self.unknowns["V"], V_exp)
npt.assert_almost_equal(self.unknowns["A"], A_exp)
npt.assert_almost_equal(self.unknowns["p"], p_exp)
def testPositiveZ(self):
self.params["Tsig_wave"] = 2.0
self.params["z"] = 2.0 * myones
self.wave.compute(self.params, self.unknowns)
npt.assert_equal(self.unknowns["U"], 0.0)
npt.assert_equal(self.unknowns["W"], 0.0)
npt.assert_equal(self.unknowns["V"], 0.0)
npt.assert_equal(self.unknowns["A"], 0.0)
npt.assert_equal(self.unknowns["p"], 0.0)
def testQuiet(self):
self.params["Hsig_wave"] = 0.0
self.params["Tsig_wave"] = 2.0
self.wave.compute(self.params, self.unknowns)
p_exp = 2e3 * g
npt.assert_equal(self.unknowns["U"], 5.0)
npt.assert_equal(self.unknowns["W"], 0.0)
npt.assert_equal(self.unknowns["V"], 5.0)
npt.assert_equal(self.unknowns["A"], 0.0)
npt.assert_equal(self.unknowns["p"], p_exp)
class TestPowerWindGradients(unittest.TestCase):
def test(self):
z = np.linspace(0.0, 100.0, 20)
nPoints = len(z)
prob = om.Problem()
root = prob.model = om.Group()
root.add_subsystem("p", env.PowerWind(nPoints=nPoints))
prob.setup()
prob["p.Uref"] = 10.0
prob["p.zref"] = 100.0
prob["p.z0"] = 0.001 # Fails when z0 = 0, What to do here?
prob["p.shearExp"] = 0.2
prob.run_model()
check = prob.check_partials(out_stream=None, compact_print=True, method="fd")
assert_check_partials(check)
class TestLogWindGradients(unittest.TestCase):
def test(self):
nPoints = 20
z = np.linspace(0.1, 100.0, nPoints)
prob = om.Problem()
root = prob.model = om.Group()
root.add_subsystem("p", env.LogWind(nPoints=nPoints))
prob.setup()
prob["p.Uref"] = 10.0
prob["p.zref"] = 100.0
prob["p.z0"] = 0.1 # Fails when z0 = 0
prob.run_model()
check = prob.check_partials(out_stream=None, compact_print=True, method="fd")
assert_check_partials(check)
### These partials are wrong; do not test
# class TestLinearWaveGradients(unittest.TestCase):
#
# def test(self):
#
# z_floor = 0.1
# z_surface = 20.
# z = np.linspace(z_floor, z_surface, 20)
# nPoints = len(z)
#
# prob = om.Problem()
# root = prob.model = om.Group()
# root.add_subsystem('p', env.LinearWaves(nPoints=nPoints))
#
# prob.setup()
#
# prob['p.Uc'] = 7.0
# prob['p.z_floor'] = z_floor
# prob['p.z_surface'] = z_surface
# prob['p.Hsig_wave'] = 10.0
# prob['p.Tsig_wave'] = 2.0
#
# prob.run_model()
#
# check = prob.check_partials(out_stream=None, compact_print=True, method='fd')
#
# assert_check_partials(check)
### The partials are currently not correct, so skip this test
# class TestSoilGradients(unittest.TestCase):
#
# def test(self):
#
# d0 = 10.0
# depth = 30.0
# G = 140e6
# nu = 0.4
#
# prob = om.Problem()
# root = prob.model = om.Group()
# root.add_subsystem('p', env.TowerSoil())
#
# prob.setup()
#
# prob['p.G'] = G
# prob['p.nu'] = nu
# prob['p.d0'] = d0
# prob['p.depth'] = depth
#
# prob.run_model()
#
# check = prob.check_partials(out_stream=None, compact_print=True, method='fd')
#
# assert_check_partials(check)
def suite():
suite = unittest.TestSuite()
suite.addTest(unittest.makeSuite(TestPowerWind))
suite.addTest(unittest.makeSuite(TestLinearWaves))
suite.addTest(unittest.makeSuite(TestPowerWindGradients))
suite.addTest(unittest.makeSuite(TestLogWindGradients))
return suite
if __name__ == "__main__":
result = unittest.TextTestRunner().run(suite())
if result.wasSuccessful():
exit(0)
else:
exit(1)
| StarcoderdataPython |
4964578 | #!/usr/bin/env python3
"""
Script to collect system-level resource info indluding CPU, memory, disk and netwwork.
"""
import psutil
import subprocess
import os
import sys
import time
import signal
import threading
from desc.sysmon import __version__
def none():
"""Function that always returns none."""
return None
class Notify:
"""Class to notify reporter when to exit."""
def __init__(self, val =None):
self.value = val
def __call__(self):
return self.value
def set(self, val =None):
self.value = val
class Params:
"""Class that holds the parameters for reporter."""
fnam = 'sysmon.csv'
dt = 10
check = none
timeout = 0
subcom = ''
dbg = 1
thr = False
log = ''
frqfnam = ''
def __init__(self):
"""Set defaults."""
self.fnam = Params.fnam
self.dt = Params.dt
self.check = Params.check
self.timeout = Params.timeout
self.subcom = Params.subcom
self.dbg = Params.dbg
self.thr = Params.thr
self.log = Params.log
self.frqfnam = Params.frqfnam
def update(self, vals):
for key in vals:
val = vals[key]
if key == 'fnam': self.fnam = val
elif key == 'dt': self.dt = val
elif key == 'check': self.check = val
elif key == 'timeout': self.timeout = val
elif key == 'subcom': self.subcom = val
elif key == 'dbg': self.dbg = val
elif key == 'thr': self.thr = val
elif key == 'log': self.log = val
elif key == 'frqfnam': self.frqfnam = val
else: raise KeyError(f"Invalid reporter parameter name: {key}")
def reporter(fnam =Params.fnam, dt =Params.dt, check =Params.check, timeout =Params.timeout,
subcom =Params.subcom, dbg=Params.dbg, thr=Params.thr, log=Params.log,
frqfnam=''):
"""
Report system parameters.
fnam - Output file name ['sysmon.csv'].
dt - Polling time interval in seconds [10].
subcom - If not empty, then subcom is run as a subprocess
and polling ceases when that process exits [''].
check - If not None polling ceases when check() returns anything
except None [None].
timeout - If nonzero, polling ceases after timeout seconds [0].
dbg - Log message level: 0=none, 1=minimal, 2=config, 3=every sample
thr - If true, reporter is run in a thread and this returns immediately.
log - If non-blank, logging is to this file. Blank means stdout.
frqfnam - If non-blank, per-cpu CPU freqs are written to this file.
"""
myname = 'sysmon.reporter[' + threading.current_thread().name + ']'
# Open log file.
fout = sys.stdout
if len(log):
fout = open(log, 'w')
# Define signal handler in main thread only.
sigTerm = False
if threading.current_thread() is threading.main_thread() and not thr:
print(f"{myname}: Setting handler for SIGTERM")
def signal_handler(*args):
nonlocal sigTerm
if dbg: print(f"{myname}: Received terminate signal.", file=fout, flush=True)
sigTerm = True
signal.signal(signal.SIGTERM, signal_handler) # Or whatever signal
# Display config.
if dbg > 1:
print(f"{myname}: fnam: {fnam}", file=fout)
print(f"{myname}: dt: {dt}", file=fout)
print(f"{myname}: subcom: {subcom}", file=fout)
print(f"{myname}: check: {check}", file=fout)
print(f"{myname}: timeout: {timeout}", file=fout)
print(f"{myname}: dbg: {dbg}", file=fout)
print(f"{myname}: thr: {thr}", file=fout)
print(f"{myname}: log: {log}", file=fout)
# Make sure the output directory exists.
dnam = os.path.dirname(fnam)
if len(dnam):
if not os.path.isdir(dnam):
if dbg > 0: print(f"{myname}: Creating output directory {dnam}", file=fout)
os.makedirs(dnam)
# If this is a thread request, create and start the thread.
if thr:
if dbg > 0: print(f"{myname}: Starting thread.", file=fout)
args=(fnam, dt, check, timeout, subcom, dbg, False, log, frqfnam, )
t = threading.Thread(target=reporter, args=args)
t.start()
return t
if dbg > 0:
print(f"{myname}: Starting reporter version {__version__}", file=fout)
print(f"{myname}: Monitor output file is {fnam}", file=fout)
print(f"{myname}: Log file is {log}", file=fout)
if dbg > 0: print(f"{myname}: Starting reporter version {__version__}", file=fout)
subproc = None
# Specify which fields are to be recorded.
# cpu_time = user + system + idle + ...
# memory fields are in GB = 2e30 byte
# Memory and I/0 are all incremental, i.e. the change since the sampling.
keys = ['time', 'cpu_count', 'cpu_percent']
keys += ['<KEY>']
keys += ['cpu_user', 'cpu_system', 'cpu_idle', 'cpu_iowait', 'cpu_time']
keys += ['mem_total', 'mem_available', 'mem_swapfree'] # Should we add swap total, in and out?
#keys += ['dio_readcount', 'dio_writecount']
keys += ['dio_readsize', 'dio_writesize']
keys += ['nio_readsize', 'nio_writesize']
hdrline = keys[0]
frq_file = None
for key in keys[1:]:
hdrline += ',' + key
if os.path.exists(fnam) and os.path.getsize(fnam):
firstline = next(open(fnam, 'r')).rstrip()
if firstline != hdrline:
raise Exception(f"""File {fnam} does not have the expected header.""")
needheader = False
else:
needheader = True
try:
csv_file = open(fnam, "a")
if len(frqfnam):
if os.path.exists(frqfnam) and os.path.getsize(frqfnam):
frq_file = open(frqfnam, 'a')
needfrqheader = False
if frqfnam is not None:
frq_file = open(frqfnam, 'w')
needfrqheader = True
except e:
print(f"{myname}: ERROR: {e}", file=fout)
else:
if dbg > 2: print(f"{myname}: Reporting...", file=fout)
if needheader:
print(hdrline, file=csv_file, flush=True)
needheader = False
cptlast = None
diolast = None
niolast = None
time0 = time.time()
npoll = 0
while True:
d = {}
now = time.time()
d['time'] = now
d['cpu_count'] = psutil.cpu_count()
d['cpu_percent'] = psutil.cpu_percent()
d['cpu_freq'] = psutil.cpu_freq().current
freqs = psutil.cpu_freq(True)
# user, nice, system, idle, iowait, irq, softirq, steal, guest, guest_nice
cpt = psutil.cpu_times()
mem = psutil.virtual_memory()
swap = psutil.virtual_memory()
dio = psutil.disk_io_counters()
nio = psutil.net_io_counters()
cptsum = sum(cpt)
if cptlast is None:
cptlast = cpt
cptsumlast = cptsum
d['cpu_user'] = cpt.user - cptlast.user
d['cpu_system'] = cpt.system - cptlast.system
d['cpu_idle'] = cpt.idle - cptlast.idle
d['cpu_iowait'] = cpt.iowait - cptlast.iowait
d['cpu_time'] = cptsum - cptsumlast
cptlast = cpt
cptsumlast = cptsum
gb = 2**30
d['mem_total'] = mem.total/gb
d['mem_available'] = mem.available/gb
d['mem_swapfree'] = swap.free/gb
if diolast is None:
diolast = dio
d['dio_readcount'] = dio.read_count - diolast.read_count
d['dio_writecount'] = dio.write_count - diolast.write_count
d['dio_readsize'] = (dio.read_bytes - diolast.read_bytes)/gb
d['dio_writesize'] = (dio.write_bytes - diolast.write_bytes)/gb
diolast = dio
if niolast is None:
niolast = nio
d['nio_readsize'] = (nio.bytes_recv - niolast.bytes_recv)/gb
d['nio_writesize'] = (nio.bytes_sent - niolast.bytes_sent)/gb
niolast = nio
# Write the selected fields.
sep = ''
line = ''
fmt = '.3f'
for key in keys:
line = f"""{line}{sep}{d[key]:{fmt}}"""
sep = ','
print(line, file=csv_file, flush=True)
if frq_file is not None:
if needfrqheader:
line = 'time'
for icpu in range(len(freqs)):
line += f",{icpu}"
print(line, file=frq_file, flush=True)
needfrqheader = False
line = f"{d['time']:{fmt}}"
for frq in freqs:
line += f",{frq.current:{fmt}}"
print(line, file=frq_file, flush=True)
npoll += 1
status = none
if len(subcom):
# If we are give a subprocess command, make sure that we poll before
# the process starts and after it ends.
if subproc is None:
subproc = subprocess.Popen(subcom.split())
if dbg: print(f"{myname}: Started subprocess {subproc.pid}", file=fout)
else:
if subproc.poll() is not None:
status = subproc.returncode
reason = f'subprocess terminated with status {subproc.returncode}'
break
checkval = None if check is None else check()
if dbg > 2: print(f'{myname}: Check returns {checkval}', file=fout)
if checkval is not None:
reason = f'check returned {checkval}'
break
if timeout > 0 and now - time0 > timeout:
reason = f'total time exceeded {timeout} sec'
break
if sigTerm:
reason = f'terminate signal was received'
break
if dbg > 2: print(f'{myname}: Sleeping...', file=fout, flush=True)
time.sleep(dt)
finally:
csv_file.close()
if dbg > 0: print(f"{myname}: Polling terminated because {reason}.", file=fout)
if dbg > 0: print(f"{myname}: Poll count is {npoll}.", file=fout)
if dbg > 0: print(f"{myname}: Done.", file=fout)
if len(log): fout.close()
return 0
def reporter_from_string(scfg =''):
myname = 'reporter_from_string'
def main_reporter():
"""
Main function wrapper for reporter.
First argument is the configuration to exec, e.g.
'fnam="syslog.csv";dt=5;timeout=3600'
Remaining argumens are the subcommand, e.g.
run-my-jobs arg1 arg2
The reporter will start logging and and continue until the command returns.
"""
import sys
print('Running the desc-sysmon-reporter')
myname = 'main_reporter'
pars = Params()
cfg = sys.argv[1] if len(sys.argv)>1 else ''
if len(cfg):
print(f"{myname}: Configuring with '{cfg}'")
glos = {}
vals = {}
exec(cfg, glos, vals)
pars.update(vals)
if len(sys.argv) > 1:
print(f"{myname}: Monitored command is 'pars.subcom'")
pars.subcom = " ".join(sys.argv[2:])
reporter(pars.fnam, pars.dt, pars.check, pars.timeout,
pars.subcom, pars.dbg, pars.thr, pars.log, pars.frqfnam)
#if __name__ == "__main__":
# main_reporter()
| StarcoderdataPython |
4882459 | <gh_stars>0
from neurons_and_synapses import *
import copy
def generate_memory_synapses(G,idx_start=0,name=''):
synapses_list = []
neurons_idx = {'input': 0, 'first': 1, 'last': 2, 'acc': 3, 'acc2': 4, 'ready': 5, 'recall': 6, 'output': 7}
for _key in neurons_idx:
neurons_idx[_key] +=idx_start
#Transmission of the input spike
synapses_list += [v_synapse(G,G,neurons_idx['input'],neurons_idx['first'],weight=w_e)]
synapses_list += [v_synapse(G, G, neurons_idx['input'], neurons_idx['last'], weight=w_e*0.5)]
#inhibition
synapses_list += [ v_synapse(G,G,neurons_idx['first'],neurons_idx['first'],weight=-w_e)]
#accumulation
synapses_list += [ g_e_synapse(G,G,neurons_idx['first'],neurons_idx['acc'],w_acc)]
#accumulation_2
synapses_list += [ g_e_synapse(G, G, neurons_idx['last'], neurons_idx['acc2'], w_acc)]
#stop accumulation
synapses_list += [g_e_synapse(G, G, neurons_idx['acc'], neurons_idx['acc2'], -w_acc)]
# value is ready
synapses_list += [v_synapse(G, G, neurons_idx['acc'], neurons_idx['ready'], w_e)]
#empty accumulator
synapses_list += [ g_e_synapse(G, G, neurons_idx['recall'], neurons_idx['acc2'], w_acc)]
#1st output spike
synapses_list += [ v_synapse(G, G, neurons_idx['recall'], neurons_idx['output'], w_e)]
#2nd output spike
synapses_list += [ v_synapse(G, G, neurons_idx['acc2'], neurons_idx['output'], w_e)]
nodes_names = {}
for _key in neurons_idx:
nodes_names[_key+name] = neurons_idx[_key]
return synapses_list,nodes_names
def generate_persistant_memory_synapses(G,idx_start=0,name=''):
synapses_list = []
synapses_list_tp, neurons_idx = generate_memory_synapses(G,idx_start=idx_start)
synapses_list += synapses_list_tp
synapses_list += [v_synapse(G,G,neurons_idx['output'],neurons_idx['first'],w_e)]
synapses_list += [v_synapse(G, G, neurons_idx['output'], neurons_idx['last'], w_e/2)]
synapses_list += [v_synapse(G, G, neurons_idx['input'], neurons_idx['acc2'], w_e)]
synapses_list += [v_synapse(G, G, neurons_idx['input'], neurons_idx['output'], -w_e)]
nodes_names = {}
for _key in neurons_idx:
nodes_names[_key+name] = neurons_idx[_key]
return synapses_list,nodes_names
if __name__ == '__main__':
G = neurons(8)
synapses_list, _ = generate_persistant_memory_synapses(G)
indices = array([0, 0, 0,0])
times = array([0,15,500,510]) * ms
inp = SpikeGeneratorGroup(1, indices, times)
s = v_synapse(inp, G, i=0, j=0, weight=w_e, delay=0)
recall = SpikeGeneratorGroup(1, [0,0], [250*ms,900*ms])
s_recall = v_synapse(recall, G, i=0, j=6, weight=w_e, delay=0)
M = StateMonitor(G, 'v', record=True)
S_monitor = SpikeMonitor(G,'v',record=True)
print('building model')
net_1 = Network(G,inp,s,*synapses_list,M,recall,s_recall,S_monitor)
print('starting computation')
net_1.run(1000*ms)
print('computation end')
plot(M.t / ms, M.v[7], 'C0', label='Brian_2')
S_monitor.all_values()['t'][7]/ms
| StarcoderdataPython |
142961 | <gh_stars>0
num = 10
num1 = 10
num2 = 20
num3 = 30
| StarcoderdataPython |
6660656 | # -*- coding: utf-8 -*-
# Generated by Django 1.10.4 on 2017-02-02 14:16
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
import softhub.models.Executable
class Migration(migrations.Migration):
dependencies = [
('softhub', '0004_version'),
]
operations = [
migrations.CreateModel(
name='Executable',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('info', models.CharField(blank=True, max_length=200)),
('executable_file', models.FileField(upload_to=softhub.models.Executable.upload_dir)),
('release_platform', models.ManyToManyField(to='softhub.OperatingSystem')),
('version', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='version_executable', to='softhub.Version')),
],
),
]
| StarcoderdataPython |
1916680 | <reponame>danixeee/textx-gen-syntaxhighlighting
from os.path import dirname, join
from textx import metamodel_from_file
mm_path = dirname(__file__)
def _get_metamodel(file_name):
return metamodel_from_file(join(mm_path, file_name))
coloring_mm = _get_metamodel("coloring.tx")
| StarcoderdataPython |
1896684 | #<NAME> 01/04/18
#Exercise 6
#function to output the factorial of a positive integer
# define function
def myfactorial (x):
ans = 1
#loop, start with number 1 and add one to end variable
for i in range(1, x+1):
#calculate factorial
ans = ans * i
return ans
#test factorial
print("the factorial of 10: ", myfactorial(10))
print("the factorial of 7: ", myfactorial(7))
print("the factorial of 5: ", myfactorial(5))
| StarcoderdataPython |
4813488 | from FreeTAKServer.model.FTSModel.fts_protocol_object import FTSProtocolObject
#######################################################
#
# Mission.py
# Python implementation of the Class Mission
# Generated by Enterprise Architect
# Created on(FTSProtocolObject): 11-Feb-2020 11(FTSProtocolObject):08(FTSProtocolObject):08 AM
# Original author: Corvo
#
#######################################################
from FreeTAKServer.model.FTSModelVariables.MissionVariables import MissionVariables as vars
from FreeTAKServer.model.FTSModel.MissionChanges import MissionChanges
class Mission(FTSProtocolObject):
def __init__(self):
self.type = None
self.tool = None
self.name = None
self.authorUid = None
@staticmethod
def ExcheckUpdate(TYPE=vars.ExcheckUpdate().TYPE, TOOL=vars.ExcheckUpdate().TOOL,
NAME=vars.ExcheckUpdate().NAME,
AUTHORUID=vars.ExcheckUpdate().AUTHORUID, ):
mission = Mission()
mission.settype(TYPE)
mission.settool(TOOL)
mission.setname(NAME)
mission.setauthorUid(AUTHORUID)
mission.MissionChanges = MissionChanges.ExcheckUpdate()
return mission
def settype(self, type):
self.type = type
def gettype(self):
return self.type
def settool(self, tool):
self.tool = tool
def gettool(self):
return self.tool
def setname(self, name):
self.name = name
def getname(self):
return self.name
def setauthorUid(self, authorUid):
self.authorUid = authorUid
def getauthorUid(self):
return self.authorUid
if __name__ == "__main__":
y = Mission.ExcheckUpdate() | StarcoderdataPython |
6428202 | <reponame>paulhuggett/pstore2<filename>system_tests/broker/broker_kill.py
#!/usr/bin/env python
# ===- system_tests/broker/broker_kill.py ---------------------------------===//
# * _ _ _ _ _ _ *
# * | |__ _ __ ___ | | _____ _ __ | | _(_) | | *
# * | '_ \| '__/ _ \| |/ / _ \ '__| | |/ / | | | *
# * | |_) | | | (_) | < __/ | | <| | | | *
# * |_.__/|_| \___/|_|\_\___|_| |_|\_\_|_|_| *
# * *
# ===----------------------------------------------------------------------===//
#
# Part of the pstore project, under the Apache License v2.0 with LLVM Exceptions.
# See https://github.com/SNSystems/pstore/blob/master/LICENSE.txt for license
# information.
# SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
#
# ===----------------------------------------------------------------------===//
from __future__ import print_function
import argparse
import collections
import os.path
import signal
import subprocess
import sys
import time
# Local imports
import common
import timed_process
ToolPaths = collections.namedtuple('ToolPaths', ['broker', 'poker'])
def get_tool_paths(exe_path):
broker_path = os.path.join(exe_path, common.executable('pstore-brokerd'))
if not os.path.exists(broker_path):
raise RuntimeError('Did not find broker executable at "%s"' % broker_path)
poker_path = os.path.join(exe_path, common.executable('pstore-broker-poker'))
if not os.path.exists(poker_path):
raise RuntimeError('Did not find broker-poker executable at "%s"' % poker_path)
return ToolPaths(broker=broker_path, poker=poker_path)
def main(argv):
exit_code = 0
argv0 = sys.argv[0]
parser = argparse.ArgumentParser(description='Test the broker by using the poker to fire messages at it.')
parser.add_argument('exe_path', help='The path of the pstore binaries')
parser.add_argument('--timeout', help='Process timeout in seconds', type=float,
default=timed_process.DEFAULT_PROCESS_TIMEOUT)
args = parser.parse_args(args=argv)
paths = get_tool_paths(args.exe_path)
pipe_path = os.path.join(common.pipe_root_dir(), 'pstore_broker_kill')
broker_command = [paths.broker, '--pipe-path', pipe_path, '--disable-http']
print("Popen: ", ' '.join(broker_command), file=sys.stderr)
broker_process = timed_process.TimedProcess(args=broker_command,
timeout=args.timeout,
name='broker',
creation_flags=subprocess.CREATE_NEW_PROCESS_GROUP if common.IS_WINDOWS else 0)
broker_process.start()
# TODO: this is a crude way to know whether the broker is up: we don't know how long the system
# will take to start the process before it actually gets to do any work.
time.sleep(2) # Wait until the broker is alive.
broker_process.send_signal(signal.CTRL_BREAK_EVENT if common.IS_WINDOWS else signal.SIGTERM)
print("Sent SIGTERM. Waiting for broker to exit.", file=sys.stderr)
broker_process.join()
print("Broker exited. Done.", file=sys.stderr)
common.report_error(argv0, 'broker', broker_process)
print(broker_process.output())
return exit_code
if __name__ == '__main__':
sys.exit(main(sys.argv[1:]))
| StarcoderdataPython |
4973566 | <reponame>daichi-yoshikawa/dnnet
# Authors: <NAME> <<EMAIL>>
# License: BSD 3 clause
import dnnet.utils.numcupy as ncp
from dnnet.ext_mathlibs import cp, np
from dnnet.exception import DNNetRuntimeError
from dnnet.layers.layer import Layer
from dnnet.training.weight_initialization import DefaultInitialization
from dnnet.utils.nn_utils import asnumpy
from dnnet.utils.cnn_utils import pad_img, im2col, col2im
class ConvolutionLayer(Layer):
def __init__(
self, filter_shape, pad=(0, 0), strides=(1, 1),
weight_initialization=DefaultInitialization()):
self.filter_shape = filter_shape
self.pad = pad
self.strides = strides
self.weight_initialization = weight_initialization
self.x = None
def set_dtype(self, dtype):
self.dtype = dtype
def get_type(self):
return 'convolution'
def get_config_str_tail(self):
tail = Layer.get_config_str_tail(self) + ', '
tail += 'filter: %s, ' % (self.filter_shape,)
tail += 'pad: %s, ' % (self.pad,)
tail += 'strides: %s' % (self.strides,)
return tail
def set_parent(self, parent):
Layer.set_parent(self, parent)
self.__check_shape(self.input_shape)
self.__init_weight(parent)
self.__set_output_shape()
def has_weight(self):
return True
def forward(self, x):
self.__forward(x)
self.child.forward(self.fire)
def backward(self, dy):
self.__backward(dy)
self.parent.backward(self.backfire)
def predict(self, x):
self.__forward(x)
return self.child.predict(self.fire)
def __forward(self, x):
x = cp.array(x)
if len(x.shape) != 4:
msg = 'Convolution layer assumes that input is 4-d array.\n'\
+ ' shape : %s' % str(x.shape)
raise DNNetRuntimeError(msg)
n_batches, _, _, _ = x.shape
n_channels, n_rows, n_cols = self.output_shape
x_pad = pad_img(x, self.pad[0], self.pad[1])
x = im2col(x_pad, self.filter_shape, self.strides)
x = cp.c_[cp.ones((x.shape[0], 1), dtype=self.dtype), x]
fire = cp.dot(x, cp.array(self.w))
fire = fire.reshape(n_batches, n_rows, n_cols, n_channels)
fire = fire.transpose(0, 3, 1, 2)
self.x = asnumpy(x)
self.fire = asnumpy(fire)
def __backward(self, dy):
dy = cp.array(dy)
n_batches, _, _, _ = self.fire.shape
n_channels, n_rows, n_cols = self.input_shape
n_filters, n_rows_filter, n_cols_filter = self.filter_shape
dy = dy.transpose(0, 2, 3, 1).reshape(-1, n_filters)
input_shape = (n_batches, n_channels, n_rows, n_cols)
backfire = np.dot(dy, cp.array(self.w[1:, :]).T)
backfire = col2im(
backfire, input_shape, self.output_shape,
self.filter_shape, self.pad, self.strides, aggregate=True)
if self.pad[0] > 0:
backfire = backfire[:, :, self.pad[0]:-self.pad[0], :]
if self.pad[1] > 0:
backfire = backfire[:, :, :, self.pad[1]:-self.pad[1]]
self.backfire = asnumpy(backfire)
self.dw = asnumpy(self.dtype(1.) / n_batches * cp.dot(cp.array(self.x).T, dy))
def __check_shape(self, shape):
if not isinstance(shape, tuple):
msg = 'Invalid type of shape : ' + type(shape)
raise DNNetRuntimeError(msg)
elif len(shape) != 3:
msg = 'Invalid shape : ' + str(shape)\
+ '\nShape must be (channels, rows, cols).'
raise DNNetRuntimeError(msg)
def __init_weight(self, parent):
n_channels, _, _ = self.input_shape
n_filters, n_rows_filter, n_cols_filter = self.filter_shape
n_rows = n_channels * n_rows_filter * n_cols_filter
n_cols = n_filters
self.w = self.weight_initialization.get(n_rows, n_cols, self).astype(self.dtype)
self.w = np.r_[np.zeros((1, n_cols)), self.w]
self.w = self.w.astype(self.dtype)
self.dw = np.zeros_like(self.w, dtype=self.dtype)
def __set_output_shape(self):
n_channels_in, n_rows_in, n_cols_in = self.input_shape
n_channels_filter, n_rows_filter, n_cols_filter = self.filter_shape
n_channels_out = n_channels_filter
rem_rows = n_rows_in + 2*self.pad[0] - n_rows_filter
rem_rows %= self.strides[0]
rem_cols = n_cols_in + 2*self.pad[1] - n_cols_filter
rem_cols %= self.strides[1]
if (rem_rows > 0) or (rem_cols > 0):
msg = 'Invalid combos of input, filter, pad, and stride.\n'\
+ ' input shape : %s\n' % str(self.input_shape)\
+ ' filter shape : %s\n' % str(self.filter_shape)\
+ ' pad, stride : %s, %s'\
% (str(self.pad), str(self.strides))
raise DNNetRuntimeError(msg)
n_rows_out = n_rows_in + 2*self.pad[0] - n_rows_filter
n_rows_out = n_rows_out // self.strides[0] + 1
n_cols_out = n_cols_in + 2*self.pad[1] - n_cols_filter
n_cols_out = n_cols_out // self.strides[1] + 1
self.output_shape = (n_channels_out, n_rows_out, n_cols_out)
| StarcoderdataPython |
1851671 | <filename>src/spaceone/repository/error/repository.py<gh_stars>1-10
from spaceone.core.error import *
class ERROR_NO_REPOSITORY(ERROR_BASE):
_message = 'Repository does not exists.'
class ERROR_REPOSITORY_BACKEND(ERROR_BASE):
_status_code = 'INTERNAL'
_message = 'Repository backend has problem. ({host})'
class ERROR_AWS_ECR_TOKEN(ERROR_BASE):
_message = 'Failed to get ECR TOKEN, check IAM policy'
| StarcoderdataPython |
5070208 | <reponame>ibrahim-elshar/Never-Ending-Learning-Imitation-Based-Methods
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import numpy as np
NUM_POINTS = 300.0
def plot(prefix, rewards):
x_gap = len(rewards) / NUM_POINTS
x_vals = np.arange(0, len(rewards), x_gap).astype(int)
rewards = np.array(rewards)
for name, axis_label, func in \
[('sum', 'Reward Sum (to date)', points_sum), \
('avg', 'Reward Average (next 100)', points_avg)]:
y_vals = func(rewards, x_vals)
for logscale in [True, False]:
if logscale:
plt.yscale('log')
plt.plot(x_vals+1, y_vals)
plt.xlabel('Timestep')
plt.ylabel(axis_label)
plt.grid(which='Both')
plt.tight_layout()
plt.savefig(prefix +'_' + name + '_' + ('log' if logscale else 'lin') + '.png')
plt.close()
def points_sum(rewards, x_vals):
return np.array([np.sum(rewards[0:val]) for val in x_vals])
def points_avg(rewards, x_vals):
return np.array([np.sum(rewards[val:min(len(rewards)-1, val+100)])/100 \
for val in x_vals])
| StarcoderdataPython |
8181835 | <reponame>brianWeng0223/toad
import pytest
import numpy as np
import pandas as pd
from .metrics import KS, KS_bucket, F1, PSI, AUC, matrix
np.random.seed(1)
feature = np.random.rand(500)
target = np.random.randint(2, size = 500)
base_feature = np.random.rand(500)
test_df = pd.DataFrame({
'A': np.random.rand(500),
'B': np.random.rand(500),
})
base_df = pd.DataFrame({
'A': np.random.rand(500),
'B': np.random.rand(500),
})
FUZZ_THRESHOLD = 1e-10
def test_KS():
result = KS(feature, target)
assert result == 0.05536775661256989
def test_KS_bucket():
result = KS_bucket(feature, target)
assert result.loc[4, 'ks'] == -0.028036335090276976
def test_KS_bucket_use_step():
result = KS_bucket(feature, target, method = 'step', clip_q = 0.01)
assert result.loc[4, 'ks'] == -0.0422147102645028
def test_KS_bucket_for_all_score():
result = KS_bucket(feature, target, bucket = False)
assert len(result) == 500
def test_KS_bucket_return_splits():
result, splits = KS_bucket(feature, target, return_splits = True)
assert len(splits) == 9
def test_KS_bucket_use_split_pointers():
result = KS_bucket(feature, target, bucket = [0.2, 0.6])
assert len(result) == 3
def test_KS_bucket_with_lift():
result = KS_bucket(feature, target)
assert result.loc[3, 'lift'] == 1.0038610038610036
def test_KS_bucket_with_cum_lift():
result = KS_bucket(feature, target)
assert result.loc[3, 'cum_lift'] == 1.003861003861004
def test_F1():
result, split = F1(feature, target, return_split = True)
assert result == 0.6844207723035951
def test_F1_split():
result = F1(feature, target, split = 0.5)
assert result == 0.51417004048583
def test_AUC():
result = AUC(feature, target)
assert result == 0.5038690142424582
def test_AUC_with_curve():
auc, fpr, tpr, thresholds = AUC(feature, target, return_curve = True)
assert thresholds[200] == 0.15773006987053328
def test_PSI():
result = PSI(feature, base_feature, combiner = [0.3, 0.5, 0.7])
assert result == 0.018630024627491467
def test_PSI_frame():
result = PSI(
test_df,
base_df,
combiner = {
'A': [0.3, 0.5, 0.7],
'B': [0.4, 0.8],
},
)
assert result['B'] == pytest.approx(0.014528279995858708, FUZZ_THRESHOLD)
def test_PSI_return_frame():
result, frame = PSI(
test_df,
base_df,
combiner = {
'A': [0.3, 0.5, 0.7],
'B': [0.4, 0.8],
},
return_frame = True,
)
assert frame.loc[4, 'test'] == 0.38
def test_matrix():
df = matrix(feature, target, splits = 0.5)
assert df.iloc[0,1] == 133
| StarcoderdataPython |
9753559 | <filename>environment/NFG/twoplayer_twoaction.py
'''
Created on 07/11/2014
@author: <NAME> <<EMAIL>>
'''
from environment import Environment, ENV_SINGLE_STATE
class TwoPlayerTwoAction(Environment):
def __init__(self, game_string):
super(TwoPlayerTwoAction, self).__init__()
self.__create_env(game_string)
def __create_env(self, game_string):
# split the game string
gs = game_string.split(';')
# split the set of possible actions
ac = gs[0].split(',')
# create the environment data structure
self._env = {}
# define the game matrix according to the input game string
# player 1 plays action ac[0]
self._env[ac[0]] = {
ac[0]: map(int, gs[1].split(',')), # player 2 plays action ac[0]
ac[1]: map(int, gs[2].split(',')) # player 2 plays action ac[1]
}
# player 1 plays action ac[1]
self._env[ac[1]] = {
ac[0]: map(int, gs[3].split(',')), # player 2 plays action ac[0]
ac[1]: map(int, gs[4].split(',')) # player 2 plays action ac[1]
}
def get_state_actions(self, state=None):
return self._env.keys()
def run_episode(self):
self._episodes += 1
self.reset_episode()
self.run_step()
def run_step(self):
self._steps += 1
self._has_episode_ended = True
# get the players
p1,p2 = self._learners.values()
# get their actions (states are present but not necessary due to the nature, of NFGs)
#_,a1 = p1.act_last()
#_,a2 = p2.act_last()
a1, a2 = self.__run_acts(p1, p2)
# calculate the reward associated with the actions of the players
r1,r2 = self.__calc_reward(a1, a2)
# provide the corresponding payoff
#p1.feedback_last(r1, ENV_SINGLE_STATE)
#p2.feedback_last(r2, ENV_SINGLE_STATE)
self.__run_feedbacks(p1, p2, r1, r2)
#print '<%s,%s> played <%s,%s> and received <%i,%i>'%(p1.get_name(),p2.get_name(),a1,a2,r1,r2)
#print '%i\t%f\t%f'%(self._episodes, p1._QTable[ENV_SINGLE_STATE]['C'], p2._QTable[ENV_SINGLE_STATE]['C'])
#print '%i\t%i\t%i'%(self._episodes, r1, r2)
print "%i\t%f\t%f"%(self._episodes, p1._policy[ENV_SINGLE_STATE][self._env.keys()[0]], p2._policy[ENV_SINGLE_STATE][self._env.keys()[0]])
def __run_feedbacks(self, p1, p2, r1, r2):
# feedback 1
p1.feedback1(r1, ENV_SINGLE_STATE)
p2.feedback1(r2, ENV_SINGLE_STATE)
# feedback 2
p1.feedback2(r1, ENV_SINGLE_STATE)
p2.feedback2(r2, ENV_SINGLE_STATE)
# feedback 3
p1.feedback3(r1, ENV_SINGLE_STATE)
p2.feedback3(r2, ENV_SINGLE_STATE)
# feedback last
p1.feedback_last(r1, ENV_SINGLE_STATE)
p2.feedback_last(r2, ENV_SINGLE_STATE)
def __run_acts(self, p1, p2):
# act 1
_ = p1.act1()
_ = p2.act1()
# act 2
_ = p1.act2()
_ = p2.act2()
# act 3
_ = p1.act3()
_ = p2.act3()
# act 4
_ = p1.act4()
_ = p2.act4()
# act last
_,a1 = p1.act_last()
_,a2 = p2.act_last()
return [a1, a2]
def has_episode_ended(self):
return self._has_episode_ended
def __calc_reward(self, action_p1, action_p2):
self._env[action_p1][action_p2]
return self._env[action_p1][action_p2]
| StarcoderdataPython |
9656399 | <gh_stars>1-10
import unittest
from unittest.mock import MagicMock
from equipment.framework.Queue.AbstractQueue import AbstractQueue
from equipment.framework.Queue.RedisQueue import RedisQueue
from equipment.framework.tests.TestCase import TestCase
class RedisQueueTest(TestCase):
def setUp(self):
super().setUp()
self.queue = RedisQueue(
config=self.app.config(),
log=self.app.log(),
)
def test_extends_from_abstract_queue(self):
with self.app.queue.override(self.queue):
self.assertTrue(
isinstance(self.app.queue(), AbstractQueue)
)
def test_enqueues_methods(self):
with self.app.queue.override(self.queue):
def method_test():
return True
self.queue.redis = MagicMock()
self.queue.queue = MagicMock()
self.assertTrue(
self.queue.push(method_test)
)
self.queue.queue.enqueue = MagicMock(side_effect=Exception())
self.assertFalse(
self.queue.push(method_test)
)
if __name__ == '__main__':
unittest.main()
| StarcoderdataPython |
9626261 | <reponame>domfp13/Snowflake-backup
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Created by <NAME>
import pandas as pd
from pandas import DataFrame
import os, contextlib, json
from pathlib import Path
@contextlib.contextmanager
def this_directory(path:str):
"""Change working directroy to the path(within the src) specified. Then change back to original path.
Args:
path (str): Path to move to
"""
original_workdir = os.getcwd()
os.chdir(path)
try:
yield
finally:
os.chdir(original_workdir)
def dump_file_to_pickle(df:DataFrame):
with this_directory(path=Path('..','data')):
df.to_pickle('data.pkl')
def dump_file_to_json(df:DataFrame):
with this_directory(path=Path('..','data')):
df.to_json('ddls.json', orient="table")
| StarcoderdataPython |
9665369 | import argparse
import os
import pdb
import numpy as np
import subprocess
import ffmpeg
import time
def main(video_dir):
print('MAIN')
t1 = time.time()
videos = os.listdir(video_dir)
videos = [v for v in videos if v.endswith('.mp4')]
for videoname in videos:
video_file = os.path.join(video_dir, videoname)
probe = ffmpeg.probe(video_file)
video_info = next(
s for s in probe['streams'] if s['codec_type'] == 'video')
width = int(video_info['width'])
height = int(video_info['height'])
# print( '\n\n dimensions', width, height, '\n\n')
out, _ = ffmpeg.input(video_file).output(
'pipe:', format='rawvideo', pix_fmt='rgb24', loglevel='panic').run(capture_stdout=True)
video = np.frombuffer(out, np.uint8).reshape([-1, height, width, 3])
print('shape', video.shape)
print('Time taken', time.time()-t1)
if __name__ == '__main__':
description = 'Helper script for downloading and trimming kinetics videos.'
p = argparse.ArgumentParser(description=description)
p.add_argument('video_dir', type=str,
help='Video directory where videos are saved.')
main(**vars(p.parse_args()))
| StarcoderdataPython |
9652324 | # -*- coding: utf-8 -*-
import pytest
from aiohttp.web import Application
from mtpylon.aiohandlers.schema import schema_view_factory
from tests.simpleschema import schema
from tests.test_serializers import json_schema
@pytest.fixture
def cli(loop, aiohttp_client):
schema_view = schema_view_factory(schema)
app = Application()
app.router.add_get('/schema', schema_view)
return loop.run_until_complete(aiohttp_client(app))
async def test_get_schema(cli):
resp = await cli.get('/schema')
assert resp.status == 200
schema_data = await resp.json()
assert schema_data == json_schema
| StarcoderdataPython |
62671 | <reponame>glimpens/EnergyScope
# -*- coding: utf-8 -*-
"""
This script modifies the input data and runs the EnergyScope model.
@author: <NAME>, <NAME>, <NAME>, <NAME>
"""
import yaml
import os
import pandas as pd
import energyscope as es
from print_run_files import print_master_run_file, print_main_run_file
from sys import platform
from energyscope.misc.utils import make_dir
from energyscope.postprocessing.utils import get_total_einv
def load_config(config_fn: str):
"""
Load the configuration into a dict.
:param config_fn: configuration file name.
:return: a dict with the configuration.
"""
# Load parameters
cfg = yaml.load(open(config_fn, 'r'), Loader=yaml.FullLoader)
if platform == "linux":
cfg['energyscope_dir'] = '/home/jdumas/PycharmProjects/EnergyScope/'
cfg['AMPL_path'] = '/home/jdumas/PycharmProjects/ampl_linux-intel64/ampl'
else:
cfg['energyscope_dir'] = '/Users/dumas/PycharmProjects/EnergyScope/'
cfg['AMPL_path'] = '/Users/dumas/PycharmProjects/ampl_macos64/ampl'
# Extend path
for param in ['case_studies_dir', 'user_data', 'developer_data', 'temp_dir', 'ES_path', 'step1_output']:
cfg[param] = os.path.join(cfg['energyscope_dir'], cfg[param])
return cfg
if __name__ == '__main__':
# Get the current working directory
cwd = os.getcwd()
# Print the current working directory
print("Current working directory: {0}".format(cwd))
# Load configuration into a dict
config = load_config(config_fn='config.yaml')
# Create the temp_dir if it does not exist
make_dir(config['temp_dir'])
# Loading data
all_data = es.import_data(user_data_dir=config['user_data'], developer_data_dir=config['developer_data'])
# Saving data to .dat files into the config['temp_dir'] directory
out_path = f"{config['temp_dir']}/ESTD_data.dat"
es.print_estd(out_path=out_path, data=all_data, import_capacity=config["import_capacity"], gwp_limit=config["GWP_limit"])
out_path = f"{config['temp_dir']}/ESTD_12TD.dat"
# WARNING
if not os.path.isfile(config["step1_output"]):
print('WARNING: the STEP1 that consists of generating the 12 typical days must be conducted before to compute the TD_of_days.out file located in %s' %(config["step1_output"]))
es.print_12td(out_path=out_path, time_series=all_data['Time_series'], step1_output_path=config["step1_output"])
# Print the master.run file
print_master_run_file(config=config)
print_main_run_file(config=config)
# Running EnergyScope
cs = f"{config['case_studies_dir']}/{config['case_study_name']}"
run_fn = f"{config['ES_path']}/master.run"
es.run_energyscope(cs, run_fn, config['AMPL_path'], config['temp_dir'])
# Example to print the sankey from this script
output_dir = f"{config['case_studies_dir']}/{config['case_study_name']}/output/"
es.drawSankey(path=f"{output_dir}/sankey")
# TODO: check if it is ok to use the GWP_op as limit
# Get the GWP op
gwp = pd.read_csv(f"{cs}/output/gwp_breakdown.txt", index_col=0, sep='\t')
gwp_op_tot = gwp.sum()['GWP_op']
# Get the EROI
total_demand = 388 # TWh
# TODO: check if total demand is 388 TWh
einv_tot = get_total_einv(cs)/1000 # TWh
eroi_ini = total_demand/einv_tot
print('EROI %.2f GWP op MtC02eq %.2f' %(eroi_ini, gwp_op_tot))
# # LOOP on several GWP maximum values and compute the related EROI
# eroi_list = []
# eroi_list.append(eroi_ini)
# for gwp_limit, cs_name in zip([gwp_op_tot*0.9, gwp_op_tot*0.8],['run_90', 'run_80']):
# print('RUN in progess %s' %(cs_name))
# # Saving data to .dat files into the config['temp_dir'] directory
# out_path = f"{config['temp_dir']}/ESTD_data.dat"
# es.print_estd(out_path=out_path, data=all_data, import_capacity=config["import_capacity"], gwp_limit=gwp_limit)
# out_path = f"{config['temp_dir']}/ESTD_12TD.dat"
# es.print_12td(out_path=out_path, time_series=all_data['Time_series'], step1_output_path=config["step1_output"])
#
# # Running EnergyScope
# cs = f"{config['case_studies_dir']}/{cs_name}"
# run_fn = f"{config['ES_path']}/master.run"
# es.run_energyscope(cs, run_fn, config['AMPL_path'], config['temp_dir'])
#
# # Example to print the sankey from this script
# output_dir = f"{config['case_studies_dir']}/{config['case_study_name']}/output/"
# es.drawSankey(path=f"{output_dir}/sankey")
#
# # Compute the EROI
# einv_temp = get_total_einv(cs) / 1000 # TWh
# eroi_temp = total_demand / einv_temp
# print('EROI %.2f GWP op MtC02eq %.2f' % (eroi_temp, gwp_limit))
# eroi_list.append(eroi_temp)
#
# # TODO: plot with EROI vs GWP and save plot | StarcoderdataPython |
4987549 | <reponame>pgDora56/Mocho<filename>tamagame.py
# coding=utf-8
import discord
import json
import pickle
import time
with open("config.json", "r") as f:
conf = json.load(f)
tg_zatsu_id = conf["tamagame"]
class TamaGame:
def __init__(self, client):
self.zatsudan = client.get_channel(tg_zatsu_id)
self.vc = client.get_channel(tg_zatsu_id + 1)
try:
with open("pickles/tamagame.pickle", "rb") as f:
lis = pickle.load(f)
self.talking_num = lis[0]
self.talkstart = lis[1]
self.sendpng = lis[2]
# await self.member_change()
except:
self.talking_num = len(self.vc.members)
self.talkstart = -1
self.sendpng = None
async def chat(self, msg):
await self.zatsudan.send(msg)
async def member_change(self):
newnum = self.get_vc_members()
if self.talking_num == newnum:
return
if self.talking_num == 0 and newnum == 1:
self.talkstart = time.time()
print("Talk start")
self.sendpng = await self.zatsudan.send(file=discord.File("tsuwa_in.png"))
elif self.talking_num == 1 and newnum == 0:
if self.talkstart == -1:
print("Time End Error")
elif self.sendpng != None:
await self.sendpng.delete()
self.talkstart = -1
else:
sec = time.time() - self.talkstart
m = sec // 60
sec = sec % 60
h = m // 60
m = m % 60
time_str = f"{int(h)}時間" if h > 0 else ""
time_str += f"{int(m)}分{int(sec)}秒"
await self.chat(f"お疲れ様!{time_str}喋ってたよ!(o・∇・o)")
self.talkstart = -1
else:
self.sendpng = None
self.talking_num = newnum
self.record()
def get_vc_members(self):
human_user_cnt = 0
for m in self.vc.members:
if not m.bot: human_user_cnt += 1
print(f"Human: {human_user_cnt}")
return human_user_cnt
def record(self):
with open("pickles/tamagame.pickle", "wb") as f:
pickle.dump(list([self.talking_num, self.talkstart, self.sendpng]), f)
| StarcoderdataPython |
3517257 | <reponame>toptal/license-cop
import pytest
from textwrap import dedent
from test import *
from app.dependency import *
from app.github.repository import *
from app.platforms.python.repository_matcher import *
@pytest.fixture
def nodejs_repository():
return GithubRepository.from_url(
'https://github.com/browserify/browserify',
http_compression=False
)
@pytest.fixture
def requirements_repository():
return GithubRepository.from_url(
'https://github.com/alanhamlett/pip-update-requirements',
http_compression=False
)
@pytest.fixture
def pipfile_repository():
return GithubRepository('toptal', 'license-cop', http_compression=False)
@pytest.fixture
def matcher():
return PythonRepositoryMatcher()
def test_parse_requirements_file():
data = dedent('''\
pandas~=0.20.2
httplib2~=0.10.2
pyOpenSSL~=16.2.0
google-cloud~=0.23.0
google-api-python-client~=1.6.2
grpcio==1.4.0
oauth2client~=4.1.2
googleads==6.0.0
# This is a comment
luigi==2.6.2
ruamel.yaml==0.15.18
newrelic~=172.16.58.3
-e ./foobar-common/
-e ./foobar-avro/
-e ./foobar-api/
-e ./foobar-csv/
-e ./foobar-etl/
-e ./foobar-orchestration/
-e ./foobar-validation/
-e ./foobar-chronos/
wheel
''')
assert parse_requirements_file(data, DependencyKind.DEVELOPMENT) == [
Dependency.development('pandas'),
Dependency.development('httplib2'),
Dependency.development('pyOpenSSL'),
Dependency.development('google-cloud'),
Dependency.development('google-api-python-client'),
Dependency.development('grpcio'),
Dependency.development('oauth2client'),
Dependency.development('googleads'),
Dependency.development('luigi'),
Dependency.development('ruamel.yaml'),
Dependency.development('newrelic'),
Dependency.development('wheel')
]
def test_parse_pipfile():
data = dedent('''\
[[source]]
url = 'https://pypi.python.org/simple'
verify_ssl = true
[requires]
python_version = '2.7'
[packages]
requests = { extras = ['socks'] }
records = '>0.5.0'
django = { git = 'https://github.com/django/django.git', ref = '1.11.4', editable = true }
[dev-packages]
pytest = ">=2.8.0"
codecov = "*"
"pytest-httpbin" = "==0.0.7"
"pytest-mock" = "*"
"pytest-cov" = "*"
"pytest-xdist" = "*"
alabaster = "*"
"readme-renderer" = "*"
sphinx = "<=1.5.5"
pysocks = "*"
docutils = "*"
"flake8" = "*"
tox = "*"
detox = "*"
httpbin = "==0.5.0"
''')
(runtime, development) = parse_pipfile(data)
assert runtime == [
Dependency.runtime('requests'),
Dependency.runtime('records'),
Dependency.runtime('django')
]
assert development == [
Dependency.development('pytest'),
Dependency.development('codecov'),
Dependency.development('pytest-httpbin'),
Dependency.development('pytest-mock'),
Dependency.development('pytest-cov'),
Dependency.development('pytest-xdist'),
Dependency.development('alabaster'),
Dependency.development('readme-renderer'),
Dependency.development('sphinx'),
Dependency.development('pysocks'),
Dependency.development('docutils'),
Dependency.development('flake8'),
Dependency.development('tox'),
Dependency.development('detox'),
Dependency.development('httpbin')
]
@VCR.use_cassette('python_repository_matcher_match_repository_with_requirements.yaml')
def test_match_repository_with_requirements(matcher, requirements_repository):
assert matcher.match(requirements_repository) is not None
@VCR.use_cassette('python_repository_matcher_match_repository_with_pipfile.yaml')
def test_match_repository_with_pipfile(matcher, pipfile_repository):
assert matcher.match(pipfile_repository) is not None
@VCR.use_cassette('python_repository_matcher_mismatch_repository_without_requirements_nor_pipfile.yaml')
def test_mismatch_repository_without_requirements_nor_pipfile(matcher, nodejs_repository):
assert matcher.match(nodejs_repository) is None
@VCR.use_cassette('python_repository_matcher_extract_manifest_from_requirements_files.yaml')
def test_extract_manifest_from_requirements_files(matcher, requirements_repository):
match = matcher.match(requirements_repository)
manifests = match.manifests
manifest = manifests[0]
assert manifest.platform == 'Python'
assert manifest.repository == requirements_repository
assert manifest.paths == ['requirements.txt', 'dev-requirements.txt']
assert manifest.runtime_dependencies == [
Dependency.runtime('click')
]
assert manifest.development_dependencies == [
Dependency.development('coverage'),
Dependency.development('mock'),
Dependency.development('nose'),
Dependency.development('nose-capturestderr'),
Dependency.development('nose-exclude')
]
@VCR.use_cassette('python_repository_matcher_extract_manifest_from_pipfile.yaml')
def test_extract_manifest_from_pipfile(matcher, pipfile_repository):
match = matcher.match(pipfile_repository)
manifests = match.manifests
manifest = manifests[0]
assert manifest.platform == 'Python'
assert manifest.repository == pipfile_repository
assert manifest.paths == ['Pipfile']
assert manifest.runtime_dependencies == [
Dependency.runtime('requests')
]
assert manifest.development_dependencies == [
Dependency.development('pytest'),
Dependency.development('vcrpy'),
Dependency.development('pytest-mock')
]
| StarcoderdataPython |
1740859 | <filename>public/shader/220210_2142.py<gh_stars>0
#define BPM 90.0
const float PI = acos(-1.0);
const float TAU = PI * 2.0;
/* sound common */
float timeToBeat(float t) {return t / 60.0 * BPM;}
float beatToTime(float b) {return b / BPM * 60.0;}
float sine(float phase) {
return sin(TAU * phase);
}
float pitch(float p, float t) {
return pow(2.0, p / 12.0) * t;
}
float saw(float phase) {
float s = 0.0;
for (int k=1; k<=8; k++) {
s += (sin(TAU * float(k) * phase) / float(k));
}
return (1.0 / 2.0) - (1.0 / PI) * s - 0.5;
}
float square(float phase) {
float s = 0.0;
for (int k=1; k<8; k++) {
s += sin(TAU * (2.0 * float(k) - 1.0) * phase) / (2.0 * float(k) - 1.0);
}
return (4.0 / PI) * s;
}
vec2 mainSound(float time){
float bpm = timeToBeat(time);
float tempo = sine((mod(bpm, 4.0) >= 1.0 ? 440.0 : 880.0) * time) * exp(-1e2 * fract(bpm));
float p = abs(sin(bpm * TAU / 8.0)) / beatToTime(bpm);
float s = sine(pitch(12.0 * p, 110.0) * time) * fract(-bpm);
return vec2(s, tempo);
}
| StarcoderdataPython |
11365048 | import sys
import torchvision.transforms as transforms
from torch.utils.data import DataLoader, WeightedRandomSampler
sys.path.append('../')
from config.cfg import cfg
from data.datasets import RafFaceDataset, UTKFaceDataset, RafPartDataset, FER2013Dataset
def load_data(dataset_name):
"""
load dataset
:param dataset_name:
:return:
"""
batch_size = cfg[dataset_name]['batch_size']
if dataset_name == 'RAF-Face':
print('loading %s dataset...' % dataset_name)
train_dataset = RafFaceDataset(train=True, type='basic',
transform=transforms.Compose([
transforms.Resize(224),
transforms.ColorJitter(),
transforms.RandomRotation(30),
transforms.ToTensor(),
transforms.Normalize(
[0.485, 0.456, 0.406], [0.229, 0.224, 0.225]
)
]))
weights = []
for sample in train_dataset:
label = sample['emotion']
if label == 0:
weights.append(3.68)
elif label == 1:
weights.append(16.78)
elif label == 2:
weights.append(6.8)
elif label == 3:
weights.append(1)
elif label == 4:
weights.append(2.42)
elif label == 5:
weights.append(6.87)
elif label == 6:
weights.append(1.86)
else:
print('label error')
weighted_random_sampler = WeightedRandomSampler(weights, num_samples=len(train_dataset), replacement=True)
trainloader = DataLoader(train_dataset, batch_size=batch_size, num_workers=50, pin_memory=True,
sampler=weighted_random_sampler)
test_dataset = RafFaceDataset(train=False, type='basic',
transform=transforms.Compose([
transforms.Resize(224),
transforms.CenterCrop(224),
transforms.ToTensor(),
transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
]))
testloader = DataLoader(test_dataset, batch_size=batch_size, shuffle=False, num_workers=50, pin_memory=True)
return trainloader, testloader
elif dataset_name == 'UTKFace':
print('loading %s dataset...' % dataset_name)
train_dataset = UTKFaceDataset(train=True,
transform=transforms.Compose([
transforms.Resize(224),
transforms.ColorJitter(),
transforms.RandomRotation(30),
transforms.ToTensor(),
transforms.Normalize(
mean=[0.5, 0.5, 0.5],
std=[0.5, 0.5, 0.5])
]))
trainloader = DataLoader(train_dataset, batch_size=batch_size, shuffle=True, num_workers=50, pin_memory=True)
test_dataset = UTKFaceDataset(train=False,
transform=transforms.Compose([
transforms.Resize(224),
transforms.ColorJitter(),
transforms.RandomRotation(30),
transforms.ToTensor(),
transforms.Normalize(
mean=[0.5, 0.5, 0.5],
std=[0.5, 0.5, 0.5])
]))
testloader = DataLoader(test_dataset, batch_size=batch_size, shuffle=False, num_workers=50, pin_memory=True)
return trainloader, testloader
elif dataset_name == 'RAF-Part':
print('loading %s dataset...' % dataset_name)
train_dataset = RafPartDataset(train=True, type='basic',
transform=transforms.Compose([
transforms.Resize((48, 64)),
transforms.CenterCrop(48),
transforms.ColorJitter(),
transforms.RandomRotation(15),
transforms.ToTensor(),
transforms.Normalize(
[0.485, 0.456, 0.406], [0.229, 0.224, 0.225]
)
]))
trainloader = DataLoader(train_dataset, batch_size=batch_size, shuffle=True, drop_last=True, num_workers=50,
pin_memory=True)
test_dataset = RafPartDataset(train=False, type='basic',
transform=transforms.Compose([
transforms.Resize((48, 64)),
transforms.CenterCrop(48),
transforms.ColorJitter(),
transforms.RandomRotation(15),
transforms.ToTensor(),
transforms.Normalize(
[0.485, 0.456, 0.406], [0.229, 0.224, 0.225]
)
]))
testloader = DataLoader(test_dataset, batch_size=batch_size, shuffle=False, drop_last=True, num_workers=50,
pin_memory=True)
return trainloader, testloader
elif dataset_name == 'FER2013':
print('loading %s dataset...' % dataset_name)
train_dataset = FER2013Dataset(train=True,
transform=transforms.Compose([
transforms.Resize(227),
transforms.CenterCrop(224),
transforms.ColorJitter(),
transforms.RandomRotation(15),
transforms.ToTensor(),
transforms.Normalize(
[0.485, 0.456, 0.406], [0.229, 0.224, 0.225]
)
]))
weights = []
for sample in train_dataset:
label = sample['emotion']
if label == 0:
weights.append(1.81)
elif label == 1:
weights.append(16.55)
elif label == 2:
weights.append(1.76)
elif label == 3:
weights.append(1)
elif label == 4:
weights.append(1.49)
elif label == 5:
weights.append(2.28)
elif label == 6:
weights.append(1.45)
else:
print('label error')
weighted_random_sampler = WeightedRandomSampler(weights, num_samples=len(train_dataset), replacement=True)
trainloader = DataLoader(train_dataset, batch_size=batch_size, sampler=weighted_random_sampler,
num_workers=50, pin_memory=True)
test_dataset = FER2013Dataset(train=False,
transform=transforms.Compose([
transforms.Resize(224),
transforms.ColorJitter(),
transforms.RandomRotation(15),
transforms.ToTensor(),
transforms.Normalize(
[0.485, 0.456, 0.406], [0.229, 0.224, 0.225]
)
]))
testloader = DataLoader(test_dataset, batch_size=batch_size, shuffle=False, drop_last=False, num_workers=50,
pin_memory=True)
return trainloader, testloader
else:
print('Error! Invalid dataset name~')
sys.exit(0)
| StarcoderdataPython |
3367728 | """
A dictionary to control logging via logging.config.dictConfig
"""
loggingDict = {
'version': 1,
'disable_existing_loggers': False,
'formatters': {
'default': {
'format': '%(levelname)s\t%(process)d '
'[%(asctime)s]:\t%(message)s',
'datefmt': '%m/%d/%Y %H:%M:%S',
}
},
'handlers': {
'file': {
'formatter': 'default',
'level': 'DEBUG',
'class': 'logging.handlers.TimedRotatingFileHandler',
'filename': '/tmp/app.log',
},
'console': {
'formatter': 'default',
'level': 'DEBUG',
'class': 'logging.StreamHandler'
},
},
'loggers': {
'': {
'handlers': ['console'],
'level': 'WARN',
'propagate': True,
},
},
}
| StarcoderdataPython |
8102008 | <reponame>bjlittle/ugrid-checks<filename>lib/ugrid_checks/tests/__init__.py
"""
Unittests for the ugrid_checks package.
TODO: definitely a good idea to convert all this to PyTest.
"""
import copy
from pathlib import Path
from subprocess import check_call
from typing import Text, Union
from pytest import fixture
from ugrid_checks.check import (
_VALID_CONNECTIVITY_ROLES,
_VALID_MESHCOORD_ATTRS,
)
from ugrid_checks.nc_dataset_scan import (
NcFileSummary,
NcVariableSummary,
scan_dataset,
)
from ugrid_checks.scan_utils import property_namelist
def cdl_scan(
cdl: Text, tempdir_path: Path, tempfile_name: Union[Text, None] = None
):
"""
Create a dataset "scan" :class:`~ugrid_checks.nc_data_scan.NcFileSummary`
from a CDL string.
Requires a temporary directory to create temporary intermediate files in.
Calls 'ncgen' to create a netcdf file from the CDL.
"""
tempfile_name = tempfile_name or "tmp.cdl"
temp_nc_file_name = tempfile_name.replace(".cdl", ".nc")
temp_cdl_path = Path(tempdir_path).resolve() / tempfile_name
temp_nc_path = Path(tempdir_path).resolve() / temp_nc_file_name
with open(temp_cdl_path, "w") as tempfile:
tempfile.write(cdl)
cmd = f"ncgen -k4 -o {temp_nc_path} {temp_cdl_path}"
check_call(cmd, shell=True)
scan = scan_dataset(temp_nc_path)
return scan
@fixture()
def cdl_scanner(tmp_path):
"""
A pytest fixture returning an object which can convert a CDL string to
a dataset "scan" :class:`~ugrid_checks.nc_data_scan.NcFileSummary`.
Since the operation uses temporary files, the 'cdl_scanner' embeds a
'tmp_path' fixture, which determines where they are created.
"""
class CdlScanner:
def __init__(self, tmp_path):
self.tmp_path = tmp_path
def scan(self, cdl_string):
return cdl_scan(cdl=cdl_string, tempdir_path=self.tmp_path)
return CdlScanner(tmp_path)
#
# Utilities to make duplicated structures within file-scans.
# N.B. these could perhaps be written for more generalised use, but at present
# their only expected usage is in tests.
#
def next_name(name: str) -> str:
# Convert 'xxx' to 'xxx_2' and 'xxx_<N>' to 'xxx_<N+1>'
if name[-2] == "_" and name[-1].isdigit():
digit = int(name[-1:])
name = name[:-1] + str(digit + 1)
else:
name += "_2" # NB don't use '_1'
return name
def next_dim(file_scan: NcFileSummary, dim_name: str) -> str:
# Get the 'next-named' dimension from an existing one,
# creating + installing it if required.
# N.B. we return only the *name*, suitable for setting variable dims.
new_name = next_name(dim_name)
new_dim = file_scan.dimensions.get(new_name)
if new_dim is None:
old_dim = file_scan.dimensions[dim_name]
new_dim = copy.deepcopy(old_dim)
file_scan.dimensions[new_name] = new_dim
return new_name
def next_var(
file_scan: NcFileSummary, var_name: str, ref_attrs=None
) -> NcVariableSummary:
# Return the 'next-named' var, creating it if necessary.
# This also 'bumps' its dimensions, and any referenced variables.
# Referenced variables are those named in attribute 'ref_attrs', plus
# 'mesh', 'coordinates' and 'bounds'.
new_name = next_name(var_name)
new_var = file_scan.variables.get(new_name)
if new_var is None:
old_var = file_scan.variables[var_name] # This *should* exist !
new_var = copy.deepcopy(old_var)
new_var.name = new_name
new_var.dimensions = [
next_dim(file_scan, dimname) for dimname in old_var.dimensions
]
# Shift any simple var refs (for datavar usage)
if ref_attrs is None:
ref_attrs = []
# always do these ones
ref_attrs += ["mesh", "bounds", "coordinates"]
for attrname in ref_attrs:
inner_varsattr = old_var.attributes.get(attrname)
inner_varnames = property_namelist(inner_varsattr)
if inner_varnames:
new_names = [
next_var(file_scan, inner_name).name
for inner_name in inner_varnames
]
new_var.attributes[attrname] = " ".join(new_names)
file_scan.variables[new_var.name] = new_var
return new_var
def next_mesh(file_scan: NcFileSummary, mesh_name: str) -> NcVariableSummary:
# Return the 'next-named' mesh, creating it if needed.
# This means duplicating its dimensions, coords and connectivities.
# N.B. unlike the checker code itself, we here assume that the original
# mesh is complete + consistent.
new_name = next_name(mesh_name)
new_mesh = file_scan.variables.get(new_name)
if not new_mesh:
# Copy the variable, also duplicating any coord+connectivity variables.
extra_ref_attrs = _VALID_MESHCOORD_ATTRS + _VALID_CONNECTIVITY_ROLES
new_mesh = next_var(file_scan, mesh_name, ref_attrs=extra_ref_attrs)
# Similarly 'bump' any mesh-dimension attributes.
for location in ("face", "edge"):
coords_attr = f"{location}_dimension"
dimname = str(new_mesh.attributes.get(coords_attr, ""))
if dimname:
new_mesh.attributes[coords_attr] = next_name(dimname)
return new_mesh
| StarcoderdataPython |
1924839 | import unittest
from selenium import webdriver
import page
import secrets
class ShoppingListTest(unittest.TestCase):
# set up will run before each test
def setUp(self):
# initialize driver with path to chrome driver in my personal environment
self.driver = webdriver.Chrome("/Users/cody/dev/practice/selenium-trial/chromedriver 2")
# opens web page
self.driver.get("https://arcane-atoll-75350.herokuapp.com/")
'''
any method that's name starts with test
will run between set up and tear down.
set up and tear down will run each time
example: * on file run * -
setUp -> test_one - > tearDown -> setUp -> testTwo -> tearDown
this is functionality of unittest
'''
def test_title_matches(self):
mainPage = page.MainPage(self.driver)
assert mainPage.is_title_matches()
def test_log_in(self):
mainPage = page.MainPage(self.driver)
mainPage.click_log_in_button()
email_field_element = page.EmailFieldElement()
email_field_element = secrets.email
password_field_element = page.PasswordFieldElement()
password_field_element = <PASSWORD>
logInPage = page.LogInPage(self.driver)
logInPage.click_log_in_button()
homePage = page.HomePage(self.driver)
assert homePage.is_logged_in()
# tear down will run after each test
def tearDown(self):
# close web page
self.driver.quit()
if __name__ == "__main__":
unittest.main()
| StarcoderdataPython |
8006555 | from typing import List
from ..db import get_session
from ..db_models import Chase
from ..error import BabylonServerError
from ..api_models.all_activity import AllActivity
from ..api_models.chase_activity import ChaseActivity
def fetch_all_by_type(activity_type: str) -> dict:
"""
Fetch all activity for a given type.
:param activity_type: Type of activity (i.e. "CHASE")
:return: All activity for the type.
"""
session = get_session()
if activity_type == 'CHASE':
return _to_all_activity(Chase.get_all_activity(session))
raise BabylonServerError(f"Given activity type {activity_type} is not supported")
def fetch_activity_between_dates(activity_type: str, start_date: str, end_date: str) -> dict:
"""
Fetch and return Activity objects within the specified date range for the given activity type.
These dates refer to the posting date of the account (not necessarily when the record was
added to the DB).
:param start_date: (ISO 8601 format) Lower bounded start date.
:param end_date: (ISO 8601) Upper bounded end date.
:param activity_type: Type of activity (i.e. "CHASE").
:return: Activity objects
"""
session = get_session()
if activity_type == 'CHASE':
return _to_all_activity(Chase.get_all_within_posted_date(session=session,
start_date=start_date,
end_date=end_date))
raise BabylonServerError(f"Given activity type {activity_type} is not supported")
def _to_all_activity(chase_activity: List[Chase]) -> dict:
"""
Convert from the Chase DB model to the API model.
:param chase_activity: DB model to convert from.
:return: List of API models to convert to.
"""
all_activity = []
for activity in chase_activity:
chase = dict(activityId=activity.id,
details=activity.details,
postingDate=activity.posting_date,
description=activity.description,
amount=activity.amount,
trxType=activity.trx_type,
balance=activity.balance)
all_activity.append(ChaseActivity().dump(chase))
return AllActivity().dump(dict(activity_type="CHASE", activity=all_activity))
| StarcoderdataPython |
5199762 | import pyaudio
import zlib
import io
import audioop
import time
import asyncio
import socket
from config.config import Audio
from config.config import Server
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
def callback(input_data, frame_count, time_info, status):
message = audioop.lin2adpcm(input_data, 2, None)
get_transcription(message[0])
return (input_data, pyaudio.paContinue)
def record_audio():
p = pyaudio.PyAudio()
stream = p.open(format = Audio.a_format,
channels = Audio.channels,
rate = Audio.rate,
input = True,
frames_per_buffer = Audio.chunk,
stream_callback = callback)
stream.start_stream()
while stream.is_active():
time.sleep(0.1)
data, addr = sock.recvfrom(1024)
if data:
print (data.decode())
stream.stop_stream()
stream.close()
p.terminate()
def get_transcription(data):
sock.sendto(zlib.compress(data), (Server.host, Server.port))
if __name__ == '__main__':
record_audio() | StarcoderdataPython |
11277769 | ##
# Copyright (c) 2008-2010 Sprymix Inc.
# All rights reserved.
#
# See LICENSE for details.
##
from importkit.yaml import validator
from importkit.yaml.validator.tests.base import SchemaTest, raises, result
class TestPerson(SchemaTest):
def setUp(self):
super().setUp()
self.schema = self.get_schema('person.Schema')
@raises(validator.SchemaValidationError, 'list expected')
def test_validator_root_sequence(self):
"""
name: Yuri
phone: 416-509-280
"""
@raises(validator.SchemaValidationError, 'pattern validation failed')
def test_validator_pattern(self):
"""
- name: Yuri
phone: 416-509-280
"""
@raises(validator.SchemaValidationError, 'range-max-ex validation failed')
def test_validator_range_max(self):
"""
- name: "123456789012345678901"
phone: 416-509-2801
"""
@result([{'phone': '416-509-2801', 'name': 'John', 'sex': 'male'}])
def test_validator_default1(self):
"""
- name: "John"
phone: 416-509-2801
"""
@raises(validator.SchemaValidationError, 'enum validation failed')
def test_validator_enum1(self):
"""
- name: "John"
phone: 416-509-2801
sex: unknown
"""
@raises(validator.SchemaValidationError, "unique key 'name', value 'Anya' is already used")
def test_validator_unique_value(self):
"""
- name: "Anya"
phone: 416-509-2801
sex: female
- name: "Anya"
phone: 416-509-2801
sex: female
"""
@result([{'phone': '416-509-2801', 'name': 'Anya', 'sex': 'female'},
{'phone': '416-509-2101', 'name': '<NAME>', 'sex': 'male'}])
def test_validator_person_seq1(self):
"""
- name: "Anya"
phone: 416-509-2801
sex: female
- name: "<NAME>"
phone: 416-509-2101
"""
| StarcoderdataPython |
270696 | <reponame>byteskeptical/salt<gh_stars>1-10
# -*- coding: utf-8 -*-
'''
Managing implicit state and baselines using snapshots
=====================================================
.. versionadded:: 2016.11.0
Salt can manage state against explicitly defined state, for example
if your minion state is defined by:
.. code-block:: yaml
/etc/config_file:
file.managed:
- source: salt://configs/myconfig
If someone modifies this file, the next application of the highstate will
allow the admin to correct this deviation and the file will be corrected.
Now, what happens if somebody creates a file ``/etc/new_config_file`` and
deletes ``/etc/important_config_file``? Unless you have a explicit rule, this
change will go unnoticed.
The snapper state module allows you to manage state implicitly, in addition
to explicit rules, in order to define a baseline and iterate with explicit
rules as they show that they work in production.
The workflow is: once you have a working and audited system, you would create
your baseline snapshot (eg. with ``salt tgt snapper.create_snapshot``) and
define in your state this baseline using the identifier of the snapshot
(in this case: 20):
.. code-block:: yaml
my_baseline:
snapper.baseline_snapshot:
- number: 20
- include_diff: False
- ignore:
- /var/log
- /var/cache
Baseline snapshots can be also referenced by tag. Most recent baseline snapshot
is used in case of multiple snapshots with the same tag:
my_baseline_external_storage:
snapper.baseline_snapshot:
- tag: my_custom_baseline_tag
- config: external
- ignore:
- /mnt/tmp_files/
If you have this state, and you haven't done changes to the system since the
snapshot, and you add a user, the state will show you the changes (including
full diffs) to ``/etc/passwd``, ``/etc/shadow``, etc if you call it
with ``test=True`` and will undo all changes if you call it without.
This allows you to add more explicit state knowing that you are starting from a
very well defined state, and that you can audit any change that is not part
of your explicit configuration.
So after you made this your state, you decided to introduce a change in your
configuration:
.. code-block:: yaml
my_baseline:
snapper.baseline_snapshot:
- number: 20
- ignore:
- /var/log
- /var/cache
hosts_entry:
file.blockreplace:
- name: /etc/hosts
- content: 'First line of content'
- append_if_not_found: True
The change in ``/etc/hosts`` will be done after any other change that deviates
from the specified snapshot are reverted. This could be for example,
modifications to the ``/etc/passwd`` file or changes in the ``/etc/hosts``
that could render your the ``hosts_entry`` rule void or dangerous.
Once you take a new snapshot and you update the baseline snapshot number to
include the change in ``/etc/hosts`` the ``hosts_entry`` rule will basically
do nothing. You are free to leave it there for documentation, to ensure that
the change is made in case the snapshot is wrong, but if you remove anything
that comes after the ``snapper.baseline_snapshot`` as it will have no effect;
by the moment the state is evaluated, the baseline state was already applied
and include this change.
.. warning::
Make sure you specify the baseline state before other rules, otherwise
the baseline state will revert all changes if they are not present in
the snapshot.
.. warning::
Do not specify more than one baseline rule as only the last one will
affect the result.
:codeauthor: <NAME>. <<EMAIL>>
:codeauthor: <NAME> <<EMAIL>>
:maturity: new
:platform: Linux
'''
from __future__ import absolute_import, unicode_literals, print_function
import os
def __virtual__():
'''
Only load if the snapper module is available in __salt__
'''
return 'snapper' if 'snapper.diff' in __salt__ else False
def _get_baseline_from_tag(config, tag):
'''
Returns the last created baseline snapshot marked with `tag`
'''
last_snapshot = None
for snapshot in __salt__['snapper.list_snapshots'](config):
if tag == snapshot['userdata'].get("baseline_tag"):
if not last_snapshot or last_snapshot['timestamp'] < snapshot['timestamp']:
last_snapshot = snapshot
return last_snapshot
def baseline_snapshot(name, number=None, tag=None, include_diff=True, config='root', ignore=None):
'''
Enforces that no file is modified comparing against a previously
defined snapshot identified by number.
number
Number of selected baseline snapshot.
tag
Tag of the selected baseline snapshot. Most recent baseline baseline
snapshot is used in case of multiple snapshots with the same tag.
(`tag` and `number` cannot be used at the same time)
include_diff
Include a diff in the response (Default: True)
config
Snapper config name (Default: root)
ignore
List of files to ignore. (Default: None)
'''
if not ignore:
ignore = []
ret = {'changes': {},
'comment': '',
'name': name,
'result': True}
if number is None and tag is None:
ret.update({'result': False,
'comment': 'Snapshot tag or number must be specified'})
return ret
if number and tag:
ret.update({'result': False,
'comment': 'Cannot use snapshot tag and number at the same time'})
return ret
if tag:
snapshot = _get_baseline_from_tag(config, tag)
if not snapshot:
ret.update({'result': False,
'comment': 'Baseline tag "{0}" not found'.format(tag)})
return ret
number = snapshot['id']
status = __salt__['snapper.status'](
config, num_pre=0, num_post=number)
for target in ignore:
if os.path.isfile(target):
status.pop(target, None)
elif os.path.isdir(target):
for target_file in [target_file for target_file in status.keys() if target_file.startswith(target)]:
status.pop(target_file, None)
for file in status:
# Only include diff for modified files
if "modified" in status[file]["status"] and include_diff:
status[file].pop("status")
status[file].update(__salt__['snapper.diff'](config,
num_pre=0,
num_post=number,
filename=file).get(file, {}))
if __opts__['test'] and status:
ret['changes'] = status
ret['comment'] = "{0} files changes are set to be undone".format(len(status.keys()))
ret['result'] = None
elif __opts__['test'] and not status:
ret['changes'] = {}
ret['comment'] = "Nothing to be done"
ret['result'] = True
elif not __opts__['test'] and status:
undo = __salt__['snapper.undo'](config, num_pre=number, num_post=0,
files=status.keys())
ret['changes']['sumary'] = undo
ret['changes']['files'] = status
ret['result'] = True
else:
ret['comment'] = "No changes were done"
ret['result'] = True
return ret
| StarcoderdataPython |
8170721 | <filename>funcx_sdk/funcx/sdk/client.py
import json
import os
import logging
from inspect import getsource
from globus_sdk import AuthClient
from fair_research_login import NativeClient, JSONTokenStorage
from funcx.sdk.search import SearchHelper, FunctionSearchResults
from funcx.serialize import FuncXSerializer
# from funcx.sdk.utils.futures import FuncXFuture
from funcx.sdk.utils import throttling
from funcx.sdk.utils.batch import Batch
from funcx.utils.errors import MalformedResponse, VersionMismatch, SerializationError, HTTPError
try:
from funcx_endpoint.endpoint import VERSION as ENDPOINT_VERSION
except ModuleNotFoundError:
ENDPOINT_VERSION = None
from funcx.sdk import VERSION as SDK_VERSION
logger = logging.getLogger(__name__)
class FuncXClient(throttling.ThrottledBaseClient):
"""Main class for interacting with the funcX service
Holds helper operations for performing common tasks with the funcX service.
"""
TOKEN_DIR = os.path.expanduser("~/.funcx/credentials")
TOKEN_FILENAME = 'funcx_sdk_tokens.json'
CLIENT_ID = '4cf29807-cf21-49ec-9443-ff9a3fb9f81c'
def __init__(self, http_timeout=None, funcx_home=os.path.join('~', '.funcx'),
force_login=False, fx_authorizer=None, search_authorizer=None,
openid_authorizer=None,
funcx_service_address='https://api.funcx.org/v1',
**kwargs):
""" Initialize the client
Parameters
----------
http_timeout: int
Timeout for any call to service in seconds.
Default is no timeout
force_login: bool
Whether to force a login to get new credentials.
fx_authorizer:class:`GlobusAuthorizer <globus_sdk.authorizers.base.GlobusAuthorizer>`:
A custom authorizer instance to communicate with funcX.
Default: ``None``, will be created.
search_authorizer:class:`GlobusAuthorizer <globus_sdk.authorizers.base.GlobusAuthorizer>`:
A custom authorizer instance to communicate with Globus Search.
Default: ``None``, will be created.
openid_authorizer:class:`GlobusAuthorizer <globus_sdk.authorizers.base.GlobusAuthorizer>`:
A custom authorizer instance to communicate with OpenID.
Default: ``None``, will be created.
funcx_service_address: str
The address of the funcX web service to communicate with.
Default: https://api.funcx.org/v1
Keyword arguments are the same as for BaseClient.
"""
self.func_table = {}
self.ep_registration_path = 'register_endpoint_2'
self.funcx_home = os.path.expanduser(funcx_home)
if not os.path.exists(self.TOKEN_DIR):
os.makedirs(self.TOKEN_DIR)
tokens_filename = os.path.join(self.TOKEN_DIR, self.TOKEN_FILENAME)
self.native_client = NativeClient(client_id=self.CLIENT_ID,
app_name="FuncX SDK",
token_storage=JSONTokenStorage(tokens_filename))
# TODO: if fx_authorizer is given, we still need to get an authorizer for Search
fx_scope = "https://auth.globus.org/scopes/facd7ccc-c5f4-42aa-916b-a0e270e2c2a9/all"
search_scope = "urn:globus:auth:scope:search.api.globus.org:all"
scopes = [fx_scope, search_scope, "openid"]
if not fx_authorizer or not search_authorizer or not openid_authorizer:
self.native_client.login(requested_scopes=scopes,
no_local_server=kwargs.get("no_local_server", True),
no_browser=kwargs.get("no_browser", True),
refresh_tokens=kwargs.get("refresh_tokens", True),
force=force_login)
all_authorizers = self.native_client.get_authorizers_by_scope(requested_scopes=scopes)
fx_authorizer = all_authorizers[fx_scope]
search_authorizer = all_authorizers[search_scope]
openid_authorizer = all_authorizers["openid"]
super(FuncXClient, self).__init__("funcX",
environment='funcx',
authorizer=fx_authorizer,
http_timeout=http_timeout,
base_url=funcx_service_address,
**kwargs)
self.fx_serializer = FuncXSerializer()
authclient = AuthClient(authorizer=openid_authorizer)
user_info = authclient.oauth2_userinfo()
self.searcher = SearchHelper(authorizer=search_authorizer, owner_uuid=user_info['sub'])
self.funcx_service_address = funcx_service_address
def version_check(self):
"""Check this client version meets the service's minimum supported version.
"""
resp = self.get("version", params={"service": "all"})
versions = resp.data
if "min_ep_version" not in versions:
raise VersionMismatch("Failed to retrieve version information from funcX service.")
min_ep_version = versions['min_ep_version']
if ENDPOINT_VERSION is None:
raise VersionMismatch("You do not have the funcx endpoint installed. You can use 'pip install funcx-endpoint'.")
if ENDPOINT_VERSION < min_ep_version:
raise VersionMismatch(f"Your version={ENDPOINT_VERSION} is lower than the "
f"minimum version for an endpoint: {min_ep_version}. Please update.")
def logout(self):
"""Remove credentials from your local system
"""
self.native_client.logout()
def update_table(self, return_msg, task_id):
""" Parses the return message from the service and updates the internal func_tables
Parameters
----------
return_msg : str
Return message received from the funcx service
task_id : str
task id string
"""
if isinstance(return_msg, str):
r_dict = json.loads(return_msg)
else:
r_dict = return_msg
r_status = r_dict.get('status', 'unknown')
status = {'pending': True,
'status': r_status}
if 'result' in r_dict:
try:
r_obj = self.fx_serializer.deserialize(r_dict['result'])
completion_t = r_dict['completion_t']
except Exception:
raise SerializationError("Result Object Deserialization")
else:
status.update({'pending': False,
'result': r_obj,
'completion_t': completion_t})
self.func_table[task_id] = status
elif 'exception' in r_dict:
try:
r_exception = self.fx_serializer.deserialize(r_dict['exception'])
completion_t = r_dict['completion_t']
logger.info(f"Exception : {r_exception}")
except Exception:
raise SerializationError("Task's exception object deserialization")
else:
status.update({'pending': False,
'exception': r_exception,
'completion_t': completion_t})
self.func_table[task_id] = status
return status
def get_task(self, task_id):
"""Get a funcX task.
Parameters
----------
task_id : str
UUID of the task
Returns
-------
dict
Task block containing "status" key.
"""
if task_id in self.func_table:
return self.func_table[task_id]
r = self.get("tasks/{task_id}".format(task_id=task_id))
logger.debug("Response string : {}".format(r))
try:
rets = self.update_table(r.text, task_id)
except Exception as e:
raise e
return rets
def get_result(self, task_id):
""" Get the result of a funcX task
Parameters
----------
task_id: str
UUID of the task
Returns
-------
Result obj: If task completed
Raises
------
Exception obj: Exception due to which the task failed
"""
task = self.get_task(task_id)
if task['pending'] is True:
raise Exception(task['status'])
else:
if 'result' in task:
return task['result']
else:
logger.warning("We have an exception : {}".format(task['exception']))
task['exception'].reraise()
def get_batch_result(self, task_id_list):
""" Request status for a batch of task_ids
"""
assert isinstance(task_id_list, list), "get_batch_status expects a list of task ids"
pending_task_ids = [t for t in task_id_list if t not in self.func_table]
results = {}
if pending_task_ids:
payload = {'task_ids': pending_task_ids}
r = self.post("/batch_status", json_body=payload)
logger.debug("Response string : {}".format(r))
pending_task_ids = set(pending_task_ids)
for task_id in task_id_list:
if task_id in pending_task_ids:
try:
data = r['results'][task_id]
rets = self.update_table(data, task_id)
results[task_id] = rets
except KeyError:
logger.debug("Task {} info was not available in the batch status")
except Exception:
logger.exception("Failure while unpacking results fom get_batch_status")
else:
results[task_id] = self.func_table[task_id]
return results
def run(self, *args, endpoint_id=None, function_id=None, **kwargs):
"""Initiate an invocation
Parameters
----------
*args : Any
Args as specified by the function signature
endpoint_id : uuid str
Endpoint UUID string. Required
function_id : uuid str
Function UUID string. Required
asynchronous : bool
Whether or not to run the function asynchronously
Returns
-------
task_id : str
UUID string that identifies the task
"""
assert endpoint_id is not None, "endpoint_id key-word argument must be set"
assert function_id is not None, "function_id key-word argument must be set"
batch = self.create_batch()
batch.add(*args, endpoint_id=endpoint_id, function_id=function_id, **kwargs)
r = self.batch_run(batch)
"""
Create a future to deal with the result
funcx_future = FuncXFuture(self, task_id, async_poll)
if not asynchronous:
return funcx_future.result()
# Return the result
return funcx_future
"""
return r[0]
def create_batch(self):
"""
Create a Batch instance to handle batch submission in funcX
Parameters
----------
Returns
-------
Batch instance
Status block containing "status" key.
"""
batch = Batch()
return batch
def batch_run(self, batch):
"""Initiate a batch of tasks to funcX
Parameters
----------
batch: a Batch object
Returns
-------
task_ids : a list of UUID strings that identify the tasks
"""
servable_path = 'submit'
assert isinstance(batch, Batch), "Requires a Batch object as input"
assert len(batch.tasks) > 0, "Requires a non-empty batch"
data = batch.prepare()
# Send the data to funcX
r = self.post(servable_path, json_body=data)
if r.http_status != 200:
raise HTTPError(r)
if r.get("status", "Failed") == "Failed":
raise MalformedResponse("FuncX Request failed: {}".format(r.get("reason", "Unknown")))
return r['task_uuids']
def map_run(self, *args, endpoint_id=None, function_id=None, asynchronous=False, **kwargs):
"""Initiate an invocation
Parameters
----------
*args : Any
Args as specified by the function signature
endpoint_id : uuid str
Endpoint UUID string. Required
function_id : uuid str
Function UUID string. Required
asynchronous : bool
Whether or not to run the function asynchronously
Returns
-------
task_id : str
UUID string that identifies the task
"""
servable_path = 'submit_batch'
assert endpoint_id is not None, "endpoint_id key-word argument must be set"
assert function_id is not None, "function_id key-word argument must be set"
ser_kwargs = self.fx_serializer.serialize(kwargs)
batch_payload = []
iterator = args[0]
for arg in iterator:
ser_args = self.fx_serializer.serialize((arg,))
payload = self.fx_serializer.pack_buffers([ser_args, ser_kwargs])
batch_payload.append(payload)
data = {'endpoints': [endpoint_id],
'func': function_id,
'payload': batch_payload,
'is_async': asynchronous}
# Send the data to funcX
r = self.post(servable_path, json_body=data)
if r.http_status != 200:
raise Exception(r)
if r.get("status", "Failed") == "Failed":
raise MalformedResponse("FuncX Request failed: {}".format(r.get("reason", "Unknown")))
return r['task_uuids']
def register_endpoint(self, name, endpoint_uuid, metadata=None, endpoint_version=None):
"""Register an endpoint with the funcX service.
Parameters
----------
name : str
Name of the endpoint
endpoint_uuid : str
The uuid of the endpoint
metadata : dict
endpoint metadata, see default_config example
endpoint_version: str
Version string to be passed to the webService as a compatibility check
Returns
-------
A dict
{'endopoint_id' : <>,
'address' : <>,
'client_ports': <>}
"""
self.version_check()
data = {
"endpoint_name": name,
"endpoint_uuid": endpoint_uuid,
"version": endpoint_version
}
if metadata:
data['meta'] = metadata
r = self.post(self.ep_registration_path, json_body=data)
if r.http_status != 200:
raise HTTPError(r)
# Return the result
return r.data
def get_containers(self, name, description=None):
"""Register a DLHub endpoint with the funcX service and get the containers to launch.
Parameters
----------
name : str
Name of the endpoint
description : str
Description of the endpoint
Returns
-------
int
The port to connect to and a list of containers
"""
registration_path = 'get_containers'
data = {"endpoint_name": name, "description": description}
r = self.post(registration_path, json_body=data)
if r.http_status != 200:
raise HTTPError(r)
# Return the result
return r.data['endpoint_uuid'], r.data['endpoint_containers']
def get_container(self, container_uuid, container_type):
"""Get the details of a container for staging it locally.
Parameters
----------
container_uuid : str
UUID of the container in question
container_type : str
The type of containers that will be used (Singularity, Shifter, Docker)
Returns
-------
dict
The details of the containers to deploy
"""
container_path = f'containers/{container_uuid}/{container_type}'
r = self.get(container_path)
if r.http_status != 200:
raise HTTPError(r)
# Return the result
return r.data['container']
def get_endpoint_status(self, endpoint_uuid):
"""Get the status reports for an endpoint.
Parameters
----------
endpoint_uuid : str
UUID of the endpoint in question
Returns
-------
dict
The details of the endpoint's stats
"""
stats_path = f'endpoints/{endpoint_uuid}/status'
r = self.get(stats_path)
if r.http_status != 200:
raise HTTPError(r)
# Return the result
return r.data
def register_function(self, function, function_name=None, container_uuid=None, description=None,
public=False, group=None, searchable=True):
"""Register a function code with the funcX service.
Parameters
----------
function : Python Function
The function to be registered for remote execution
function_name : str
The entry point (function name) of the function. Default: None
container_uuid : str
Container UUID from registration with funcX
description : str
Description of the file
public : bool
Whether or not the function is publicly accessible. Default = False
group : str
A globus group uuid to share this function with
searchable : bool
If true, the function will be indexed into globus search with the appropriate permissions
Returns
-------
function uuid : str
UUID identifier for the registered function
"""
registration_path = 'register_function'
source_code = ""
try:
source_code = getsource(function)
except OSError:
logger.error("Failed to find source code during function registration.")
serialized_fn = self.fx_serializer.serialize(function)
packed_code = self.fx_serializer.pack_buffers([serialized_fn])
data = {"function_name": function.__name__,
"function_code": packed_code,
"function_source": source_code,
"container_uuid": container_uuid,
"entry_point": function_name if function_name else function.__name__,
"description": description,
"public": public,
"group": group,
"searchable": searchable}
logger.info("Registering function : {}".format(data))
r = self.post(registration_path, json_body=data)
if r.http_status != 200:
raise HTTPError(r)
func_uuid = r.data['function_uuid']
# Return the result
return func_uuid
def update_function(self, func_uuid, function):
pass
def search_function(self, q, offset=0, limit=10, advanced=False):
"""Search for function via the funcX service
Parameters
----------
q : str
free-form query string
offset : int
offset into total results
limit : int
max number of results to return
advanced : bool
allows elastic-search like syntax in query string
Returns
-------
FunctionSearchResults
"""
return self.searcher.search_function(q, offset=offset, limit=limit, advanced=advanced)
def search_endpoint(self, q, scope='all', owner_id=None):
"""
Parameters
----------
q
scope : str
Can be one of {'all', 'my-endpoints', 'shared-with-me'}
owner_id
should be urn like f"urn:globus:auth:identity:{owner_uuid}"
Returns
-------
"""
return self.searcher.search_endpoint(q, scope=scope, owner_id=owner_id)
def register_container(self, location, container_type, name='', description=''):
"""Register a container with the funcX service.
Parameters
----------
location : str
The location of the container (e.g., its docker url). Required
container_type : str
The type of containers that will be used (Singularity, Shifter, Docker). Required
name : str
A name for the container. Default = ''
description : str
A description to associate with the container. Default = ''
Returns
-------
str
The id of the container
"""
container_path = 'containers'
payload = {'name': name, 'location': location, 'description': description, 'type': container_type}
r = self.post(container_path, json_body=payload)
if r.http_status != 200:
raise HTTPError(r)
# Return the result
return r.data['container_id']
def add_to_whitelist(self, endpoint_id, function_ids):
"""Adds the function to the endpoint's whitelist
Parameters
----------
endpoint_id : str
The uuid of the endpoint
function_ids : list
A list of function id's to be whitelisted
Returns
-------
json
The response of the request
"""
req_path = f'endpoints/{endpoint_id}/whitelist'
if not isinstance(function_ids, list):
function_ids = [function_ids]
payload = {'func': function_ids}
r = self.post(req_path, json_body=payload)
if r.http_status != 200:
raise HTTPError(r)
# Return the result
return r
def get_whitelist(self, endpoint_id):
"""List the endpoint's whitelist
Parameters
----------
endpoint_id : str
The uuid of the endpoint
Returns
-------
json
The response of the request
"""
req_path = f'endpoints/{endpoint_id}/whitelist'
r = self.get(req_path)
if r.http_status != 200:
raise HTTPError(r)
# Return the result
return r
def delete_from_whitelist(self, endpoint_id, function_ids):
"""List the endpoint's whitelist
Parameters
----------
endpoint_id : str
The uuid of the endpoint
function_ids : list
A list of function id's to be whitelisted
Returns
-------
json
The response of the request
"""
if not isinstance(function_ids, list):
function_ids = [function_ids]
res = []
for fid in function_ids:
req_path = f'endpoints/{endpoint_id}/whitelist/{fid}'
r = self.delete(req_path)
if r.http_status != 200:
raise HTTPError(r)
res.append(r)
# Return the result
return res
| StarcoderdataPython |
9744449 | <reponame>chopinx/file_manager<filename>file_manager/utils.py<gh_stars>0
import datetime
import hashlib
import os
import time
def str_md5sum(my_str, cache_map):
if cache_map.get(my_str, None) is not None:
return cache_map.get(my_str, None)
if not isinstance(my_str, str):
raise TypeError
m = hashlib.md5()
m.update(my_str.encode("utf8"))
result = m.hexdigest()
cache_map[my_str] = result
return result
def file_md5sum(path, cache_map):
if cache_map.get(path, None) is not None:
return cache_map.get(path, None)
if not os.path.exists(path):
raise ValueError()
if os.path.isdir(path):
raise TypeError()
m = hashlib.md5()
offset = 0
f_size = os.path.getsize(path)
block_size = 1024 * 1024
with open(path, 'rb') as f:
data = f.read(block_size)
while len(data) > 0:
m.update(data)
if f_size // 5 > block_size:
offset += f_size // 5
f.seek(offset)
data = f.read(block_size)
if f_size // 5 > block_size:
m.update(str(f_size).encode("utf8"))
result = m.hexdigest()
cache_map[path] = result
# print(len(cache_map), path, result)
return result
def dir_md5sum(path, cache_map):
if cache_map.get(path, None) is not None:
return cache_map.get(path, None)
if not os.path.isdir(path):
raise TypeError()
md5_list = []
for sub_path in os.listdir(path):
abs_path = os.path.join(path, sub_path)
if os.path.isdir(abs_path):
md5_list.append(dir_md5sum(abs_path, cache_map))
else:
md5_list.append(file_md5sum(abs_path, cache_map))
md5_list = sorted(md5_list)
result = str_md5sum("|".join(md5_list), cache_map)
cache_map[path] = result
# print(len(cache_map), path, result)
return result
def group_by_md5(path_md5_map):
md5_paths_map = {}
for path, md5 in path_md5_map.items():
if md5_paths_map.get(md5, None) is None:
md5_paths_map[md5] = []
md5_paths_map[md5].append(path)
for md5 in md5_paths_map.keys():
md5_paths_map[md5] = sorted(md5_paths_map[md5])
return md5_paths_map
def load_all_files(path):
start_time = time.time()
file_md5_map = {}
md5_file_map = {}
cnt = 0
size_sum = 0
last_size = 0
for root, dirs, files in os.walk(path):
for file_name in files:
abs_path = os.path.join(root, file_name)
size_sum += os.path.getsize(abs_path)
try:
md5sum = file_md5sum(abs_path, {})
file_md5_map[abs_path] = md5sum
if md5_file_map.get(md5sum, None) is not None:
print("%s is dup with %s" % (abs_path, md5_file_map[md5sum]))
md5_file_map[md5sum] = abs_path
cnt += 1
if size_sum - last_size > 1024 * 1024 * 1024:
print(
"%s:%d files has been scanned,%d dup, %.2fG, %.2fMB/s, file_name=%s" % (datetime.datetime.now(),
cnt,
len(file_md5_map) - len(
md5_file_map),
size_sum / 1024 / 1024 / 1024,
size_sum / 1024 / 1024 / (
time.time() - start_time),
file_name))
last_size = size_sum
except PermissionError as e:
print(e)
return file_md5_map, md5_file_map
def got_index(file_map):
md5_map = {}
for abs_path, md5 in file_map.items():
if md5_map.get(md5, None) is not None:
print("%s is dup with %s" % (abs_path, md5_map[md5]))
md5_map[md5] = abs_path
print("%d files, got %d md5" % (len(file_map), len(md5_map)))
return md5_map
def write2csv(path_md5_map, file_name):
md5_paths_map = group_by_md5(path_md5_map)
with open(file_name, "w", encoding="utf-8") as f:
for md5, paths in md5_paths_map.items():
f.write("%s,%s\n" % (md5, ",".join(paths)))
def scan_path_to_csv(path):
cache = {}
dir_md5_map = {}
file_md5_map = {}
i = 0
for root, dirs, files in os.walk(path):
for file_name in files:
abs_path = os.path.join(root, file_name)
md5sum = file_md5sum(abs_path, cache)
i += 1
print(i, abs_path, md5sum)
file_md5_map[abs_path] = md5sum
for dir_name in dirs:
abs_path = os.path.join(root, dir_name)
md5sum = dir_md5sum(abs_path, cache)
i += 1
print(i, abs_path, md5sum)
dir_md5_map[abs_path] = md5sum
write2csv(dir_md5_map, "dirs.csv")
write2csv(file_md5_map, "files.csv")
def remove_dup(path):
start_time = time.time()
# path = "E:\\test"
cache = {}
cnt = 0
path_list = []
for root, dirs, files in os.walk(path):
for file_name in files:
abs_path = os.path.join(root, file_name)
path_list.append(abs_path)
cnt += 1
print("%d files has been scanned" % cnt)
path_list = sorted(path_list)
md5_map = {}
f = open("dup.txt", "a+", encoding="utf8")
size_sum = 0
cnt = 0
for abs_path in path_list:
try:
md5sum = file_md5sum(abs_path, cache)
new_size = os.path.getsize(abs_path)
size_sum += new_size
cnt += 1
if md5_map.get(md5sum, None) is not None and abs_path != md5_map[md5sum]:
print("%s is dup with %s" % (abs_path, md5_map[md5sum]))
f.write("%s is dup with %s\n" % (abs_path, md5_map[md5sum]))
if new_size == os.path.getsize(md5_map[md5sum]):
if len(abs_path) >= len(md5_map[md5sum]) and abs_path.find(
"E:\\OneDrive - alumni.hust.edu.cn\\图片") < 0:
os.remove(abs_path)
elif md5_map[md5sum].find("E:\\OneDrive - alumni.hust.edu.cn\\图片") < 0:
os.remove(md5_map[md5sum])
md5_map[md5sum] = abs_path
else:
md5_map[md5sum] = abs_path
print("%d: %.2fG has been done,sleep=%.2fMB/s" % (cnt, size_sum / 1024 / 1024 / 1024,
size_sum / 1024 / 1024 / (time.time() - start_time)))
except Exception as e:
pass
f.close()
def remove_dup_2(base_path, new_paths):
_, md5_map = load_all_files(base_path)
for new_path in new_paths:
new_file_map, _ = load_all_files(new_path)
for new_file, md5 in new_file_map.items():
if md5_map.get(md5, None) is not None:
try:
print("%s is dup with %s" % (new_file, md5_map[md5]))
os.remove(new_file)
except PermissionError as e:
print(2)
def remove_empty_dir(path):
my_list = os.listdir(path)
for sub_path in my_list:
abs_path = os.path.join(path, sub_path)
if os.path.isdir(abs_path):
remove_empty_dir(abs_path)
my_list = os.listdir(path)
if len(my_list) <= 0:
print("%s is empty" % path)
os.rmdir(path)
if __name__ == '__main__':
remove_dup_2(base_path="E:\OneDrive - alumni.hust.edu.cn", new_paths=["F:\视频", "F:\百度云盘3", "F:\移动硬盘"])
| StarcoderdataPython |
9728941 | from collections import OrderedDict
from urllib.parse import urlencode
def order_list_api(request, data, page, max_per_page, total):
""" 顺序展示返回结果, 像Django DRF一样的返回。
:param request:
:param data:
:param page:
:param max_per_page:
:param total:
:return:
"""
url_urlencode = get_urlencode(request)
cur_count = max_per_page * page
host = "{}://{}".format(request.scheme, request.host)
previous_url = next_url = None
if total > cur_count:
next_url = '{}{}?page={}'.format(host, request.path, page + 1)
next_url = add_urlencode_to_url(next_url, url_urlencode)
if cur_count > max_per_page:
previous_url = '{}{}?page={}'.format(host, request.path, page - 1)
previous_url = add_urlencode_to_url(previous_url, url_urlencode)
return OrderedDict([
('count', total),
('next', next_url),
('previous', previous_url),
('results', data)
])
def get_urlencode(request):
args = request.args
url_args = {k: args.get(k) for k, v in args.items()}
if 'page' in url_args:
del url_args['page']
url_urlencode = urlencode(url_args)
return url_urlencode
def add_urlencode_to_url(url, url_urlencode):
if url_urlencode:
url = "{}&{}".format(url, url_urlencode)
return url | StarcoderdataPython |
3232193 | # ActivitySim
# See full license in LICENSE.txt.
import os.path
import logging
import pytest
import pandas as pd
from .. import tracing
from .. import inject
def close_handlers():
loggers = logging.Logger.manager.loggerDict
for name in loggers:
logger = logging.getLogger(name)
logger.handlers = []
logger.propagate = True
logger.setLevel(logging.NOTSET)
def teardown_function(func):
inject.clear_cache()
inject.reinject_decorated_tables()
def add_canonical_dirs():
inject.clear_cache()
configs_dir = os.path.join(os.path.dirname(__file__), 'configs')
inject.add_injectable("configs_dir", configs_dir)
output_dir = os.path.join(os.path.dirname(__file__), 'output')
inject.add_injectable("output_dir", output_dir)
def test_config_logger(capsys):
add_canonical_dirs()
tracing.config_logger()
logger = logging.getLogger('activitysim')
file_handlers = [h for h in logger.handlers if type(h) is logging.FileHandler]
assert len(file_handlers) == 1
asim_logger_baseFilename = file_handlers[0].baseFilename
print("handlers:", logger.handlers)
logger.info('test_config_logger')
logger.info('log_info')
logger.warning('log_warn1')
out, err = capsys.readouterr()
# don't consume output
print(out)
assert "could not find conf file" not in out
assert 'log_warn1' in out
assert 'log_info' not in out
close_handlers()
logger = logging.getLogger(__name__)
logger.warning('log_warn2')
with open(asim_logger_baseFilename, 'r') as content_file:
content = content_file.read()
print(content)
assert 'log_warn1' in content
assert 'log_warn2' not in content
def test_print_summary(capsys):
add_canonical_dirs()
tracing.config_logger()
tracing.print_summary('label', df=pd.DataFrame(), describe=False, value_counts=False)
out, err = capsys.readouterr()
# don't consume output
print(out)
assert 'print_summary neither value_counts nor describe' in out
close_handlers()
def test_register_households(capsys):
add_canonical_dirs()
tracing.config_logger()
df = pd.DataFrame({'zort': ['a', 'b', 'c']}, index=[1, 2, 3])
inject.add_injectable('traceable_tables', ['households'])
inject.add_injectable("trace_hh_id", 5)
tracing.register_traceable_table('households', df)
out, err = capsys.readouterr()
# print out # don't consume output
assert "Can't register table 'households' without index name" in out
df.index.name = 'household_id'
tracing.register_traceable_table('households', df)
out, err = capsys.readouterr()
# print out # don't consume output
# should warn that household id not in index
assert 'trace_hh_id 5 not in dataframe' in out
close_handlers()
def test_register_tours(capsys):
add_canonical_dirs()
tracing.config_logger()
inject.add_injectable('traceable_tables', ['households', 'tours'])
# in case another test injected this
inject.add_injectable("trace_tours", [])
inject.add_injectable("trace_hh_id", 3) # need this or register_traceable_table is a nop
tours_df = pd.DataFrame({'zort': ['a', 'b', 'c']}, index=[10, 11, 12])
tours_df.index.name = 'tour_id'
tracing.register_traceable_table('tours', tours_df)
out, err = capsys.readouterr()
assert "can't find a registered table to slice table 'tours' index name 'tour_id'" in out
inject.add_injectable("trace_hh_id", 3)
households_df = pd.DataFrame({'dzing': ['a', 'b', 'c']}, index=[1, 2, 3])
households_df.index.name = 'household_id'
tracing.register_traceable_table('households', households_df)
tracing.register_traceable_table('tours', tours_df)
out, err = capsys.readouterr()
# print out # don't consume output
assert "can't find a registered table to slice table 'tours'" in out
tours_df['household_id'] = [1, 5, 3]
tracing.register_traceable_table('tours', tours_df)
out, err = capsys.readouterr()
print(out) # don't consume output
# should be tracing tour with tour_id 3
traceable_table_ids = inject.get_injectable('traceable_table_ids')
assert traceable_table_ids['tours'] == [12]
close_handlers()
def test_write_csv(capsys):
add_canonical_dirs()
tracing.config_logger()
# should complain if df not a DataFrame or Series
tracing.write_csv(df='not a df or series', file_name='baddie')
out, err = capsys.readouterr()
print(out) # don't consume output
assert "unexpected type" in out
close_handlers()
def test_slice_ids():
df = pd.DataFrame({'household_id': [1, 2, 3]}, index=[11, 12, 13])
# slice by named column
sliced_df = tracing.slice_ids(df, [1, 3, 6], column='household_id')
assert len(sliced_df.index) == 2
# slice by index
sliced_df = tracing.slice_ids(df, [6, 12], column=None)
assert len(sliced_df.index) == 1
# attempt to slice by non-existent column
with pytest.raises(RuntimeError) as excinfo:
sliced_df = tracing.slice_ids(df, [5, 6], column='baddie')
assert "slice_ids slicer column 'baddie' not in dataframe" in str(excinfo.value)
def test_basic(capsys):
close_handlers()
configs_dir = os.path.join(os.path.dirname(__file__), 'configs')
inject.add_injectable("configs_dir", configs_dir)
output_dir = os.path.join(os.path.dirname(__file__), 'output')
inject.add_injectable("output_dir", output_dir)
# remove existing handlers or basicConfig is a NOP
logging.getLogger().handlers = []
tracing.config_logger(basic=True)
logger = logging.getLogger()
file_handlers = [h for h in logger.handlers if type(h) is logging.FileHandler]
assert len(file_handlers) == 0
logger = logging.getLogger('activitysim')
logger.info('test_basic')
logger.debug('log_debug')
logger.info('log_info')
logger.warning('log_warn')
out, err = capsys.readouterr()
# don't consume output
print(out)
assert 'log_warn' in out
assert 'log_info' in out
assert 'log_debug' not in out
close_handlers()
| StarcoderdataPython |
6425037 | <filename>townsquare/testing/__init__.py
"""
The testing package provides test boilerplate to avoid
writing the same code in tests. Use it extensively.
"""
from flask_testing import TestCase
from townsquare import TownSquare
from townsquare.db import db
from townsquare.db.data import bootstrap
class TownSquareTestCase(TestCase):
def setUp(self):
db.create_all()
bootstrap()
return super(TownSquareTestCase, self).setUp()
def tearDown(self):
db.drop_all()
return super(TownSquareTestCase, self).tearDown()
def create_app(self):
return TownSquare.create_app('testing')
| StarcoderdataPython |
6421007 | import pytest
from share.harvest.base import FetchResult
from share.models import SourceConfig, RawDatum
xml = """
<row xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance">
<APPLICATION_ID>6828756</APPLICATION_ID>
<ACTIVITY>N01</ACTIVITY>
<ADMINISTERING_IC>AG</ADMINISTERING_IC>
<APPLICATION_TYPE>5</APPLICATION_TYPE>
<ARRA_FUNDED xsi:nil="true"/>
<BUDGET_START xsi:nil="true"/>
<BUDGET_END xsi:nil="true"/>
<FOA_NUMBER xsi:nil="true"/>
<FULL_PROJECT_NUM>N01AG062101-013</FULL_PROJECT_NUM>
<FUNDING_ICs xsi:nil="true"/>
<FY>2003</FY>
<NIH_SPENDING_CATS xsi:nil="true"/>
<ORG_CITY xsi:nil="true"/>
<ORG_COUNTRY xsi:nil="true"/>
<ORG_DISTRICT xsi:nil="true"/>
<ORG_DUNS xsi:nil="true"/>
<ORG_DEPT xsi:nil="true"/>
<ORG_FIPS xsi:nil="true"/>
<ORG_STATE xsi:nil="true"/>
<ORG_ZIPCODE xsi:nil="true"/>
<IC_NAME>NATIONAL INSTITUTE ON AGING</IC_NAME>
<ORG_NAME>UNIVERSITY OF PITTSBURGH AT PI</ORG_NAME>
<PIS xsi:nil="true"/>
<PROJECT_TERMS xsi:nil="true"/>
<PROJECT_TITLE>DYNAMICS OF HEALTH,AGING, AND BODY COMPOSITION-260962101</PROJECT_TITLE>
<PROJECT_START xsi:nil="true"/>
<PROJECT_END xsi:nil="true"/>
<PHR xsi:nil="true"/>
<SERIAL_NUMBER>62101</SERIAL_NUMBER>
<STUDY_SECTION xsi:nil="true"/>
<STUDY_SECTION_NAME xsi:nil="true"/>
<SUPPORT_YEAR xsi:nil="true"/>
<SUFFIX xsi:nil="true"/>
<SUBPROJECT_ID xsi:nil="true"/>
<TOTAL_COST xsi:nil="true"/>
<TOTAL_COST_SUB_PROJECT xsi:nil="true"/>
<CORE_PROJECT_NUM>N01AG062101</CORE_PROJECT_NUM>
<CFDA_CODE xsi:nil="true"/>
<PROGRAM_OFFICER_NAME xsi:nil="true"/>
<ED_INST_TYPE xsi:nil="true"/>
<AWARD_NOTICE_DATE xsi:nil="true"/>
</row>
"""
@pytest.mark.django_db
def test_gov_nih_transformer():
config = SourceConfig.objects.get(label=('gov.nih'))
transformer = config.get_transformer()
fetch_result = FetchResult('6828756', data)
# fetch_result = FetchResult('http://gov_nih.org/seinet/collections/misc/collprofiles.php?collid=187', data)
raw_datum = RawDatum.objects.store_data(config, fetch_result)
graph = transformer.transform(raw_datum)
dataset = graph.filter_nodes(lambda n: n.type == 'dataset')[0]
assert dataset.type == 'dataset'
assert dataset['description'] == 'Sample description'
assert dataset['title'] == '<NAME> (SRSC)'
assert dataset['extra']['usage_rights'] == 'CC BY-NC (Attribution-Non-Commercial)'
assert dataset['extra']['access_rights'] == 'Sul Ross University'
assert dataset['extra']['collection_statistics'] == {
"(25%) georeferenced": "1,195",
"(59%) identified to species": "2,849",
"(61%) with images": "2,954",
"families": "104",
"genera": "361",
"species": "661",
"specimen records": "4,868",
"total taxa (including subsp. and var.)": "762"
}
agent_relations = dataset['agent_relations']
assert len(agent_relations) == 1
agent = agent_relations[0]['agent']
assert agent['given_name'] == 'Test'
assert agent['identifiers'][0]['uri'] == 'mailto:<EMAIL>'
identifiers = dataset['identifiers']
assert len(identifiers) == 1
assert identifiers[0]['uri'] == 'http://gov_nih.org/seinet/collections/misc/collprofiles.php?collid=187'
| StarcoderdataPython |
3219880 | <reponame>pawan7587/Snake_game<gh_stars>0
"""
This module contains a basic wrapper for a window class.
"""
import pygame
class Window:
"""
The Window class is a wrapper around a pygame window.
It provides very basic rendering functionallity.
Attributes:
-----------------
background_colour : list(int, int, int)
The background colour provided to the constructor.
Used when the method clear is called to determine the clearing colour.
screen : pygame.Surface
The underlying pygame screen that Window is a wrapper for.
screen_size : tuple(int, int)
The size of the screen as provided to the constructor.
Mostly here for ease of access, could be fetched from the screen.
Methods:
-----------------
draw_rect(background_colour : list(int, int, int), width : int, height : int):
Draw a rectangle on the screen with the given data
clear():
Clear the screen with the background colour provided in the constructor
height():
Returns the height of the window
width():
Returns the width of the window
"""
#pylint: disable=no-member
def __init__(self, background_colour, width, height):
"""Initialize the window class.
Parameters:
------------------------------------------
background_colour : list(int, int, int)
A triple of values between 0 and 255 indicating the r, g, b value to clear
width : int
The width of the window
height : int
The height of the window
"""
pygame.init()
self.background_colour = background_colour
self.screen_size = (width, height)
self.screen = pygame.display.set_mode(self.screen_size)
pygame.display.set_caption('DV1614 Assignment 2: Snake')
self.screen.fill(background_colour)
def draw_rect(self, colour, top_left, size):
"""Draw a rectangle on the screen.
Parameters:
------------------------------------------
colour : list(int, int, int)
A triple of values between 0 and 255 indicating the r, g, b value of the rectangle
top_left : tuple(int, int)
The x- and y-coordinates for the top left corner of the rectangle
size : tuple(int, int)
The width and height of the rectangle
"""
pygame.draw.rect(self.screen, colour, (top_left, size))
def clear(self):
"""Clear the screen to the background colour given in the init-function"""
self.screen.fill(self.background_colour)
def width(self):
"""Return the width of the screen"""
return self.screen_size[0]
def height(self):
"""Returns the height of the screen"""
return self.screen_size[1]
| StarcoderdataPython |
4990296 | <filename>cnapy/gui_elements/phase_plane_dialog.py
"""The phase plane plot dialog"""
import matplotlib.pyplot as plt
from qtpy.QtCore import Qt, Signal
from qtpy.QtWidgets import (QCompleter, QDialog, QHBoxLayout, QLabel,
QLineEdit, QPushButton, QVBoxLayout)
import numpy
class CompleterLineEdit(QLineEdit):
'''# does new completion after COMMA ,'''
def __init__(self, wordlist, *args):
QLineEdit.__init__(self, *args)
self.mycompleter = QCompleter(wordlist)
self.mycompleter.setCaseSensitivity(Qt.CaseInsensitive)
self.mycompleter.setWidget(self)
self.textChanged.connect(self.text_changed)
self.mycompleter.activated.connect(self.complete_text)
def text_changed(self, text):
all_text = text
text = all_text[:self.cursorPosition()]
prefix = text.split(',')[-1].strip()
self.mycompleter.setCompletionPrefix(prefix)
if prefix != '':
self.mycompleter.complete()
def complete_text(self, text):
cursor_pos = self.cursorPosition()
before_text = self.text()[:cursor_pos]
after_text = self.text()[cursor_pos:]
prefix_len = len(before_text.split(',')[-1].strip())
self.setText(before_text[:cursor_pos - prefix_len] + text + after_text)
self.setCursorPosition(cursor_pos - prefix_len + len(text))
textChangedX = Signal(str)
class PhasePlaneDialog(QDialog):
"""A dialog to create phase plane plots"""
def __init__(self, appdata):
QDialog.__init__(self)
self.setWindowTitle("Phase plane plotting")
self.appdata = appdata
completer = QCompleter(
self.appdata.project.cobra_py_model.reactions.list_attr("id"), self)
completer.setCaseSensitivity(Qt.CaseInsensitive)
self.layout = QVBoxLayout()
l1 = QHBoxLayout()
t1 = QLabel("Reaction (x-axis):")
l1.addWidget(t1)
self.x_axis = CompleterLineEdit(
self.appdata.project.cobra_py_model.reactions.list_attr("id"), "")
self.x_axis.setPlaceholderText("Enter reaction Id")
l1.addWidget(self.x_axis)
l2 = QHBoxLayout()
t2 = QLabel("Reaction (y-axis):")
l2.addWidget(t2)
self.y_axis = QLineEdit("")
self.y_axis.setPlaceholderText("Enter reaction Id")
self.y_axis.setCompleter(completer)
l2.addWidget(self.y_axis)
self.layout.addItem(l1)
self.layout.addItem(l2)
l3 = QHBoxLayout()
self.button = QPushButton("Plot")
self.cancel = QPushButton("Close")
l3.addWidget(self.button)
l3.addWidget(self.cancel)
self.layout.addItem(l3)
self.setLayout(self.layout)
# Connecting the signal
self.cancel.clicked.connect(self.reject)
self.button.clicked.connect(self.compute)
def compute(self):
self.setCursor(Qt.BusyCursor)
with self.appdata.project.cobra_py_model as model:
self.appdata.project.load_scenario_into_model(model)
x_axis = self.x_axis.text()
y_axis = self.y_axis.text()
try:
x_reac_idx = model.reactions.index(x_axis)
y_reac_idx = model.reactions.index(y_axis)
except KeyError:
return
points = 100
with model as ppmodel:
ppmodel.objective = ppmodel.reactions[x_reac_idx]
ppmodel.objective.direction = 'min'
x_lb = ppmodel.slim_optimize()
ppmodel.objective.direction = 'max'
x_ub = ppmodel.slim_optimize()
result2 = numpy.zeros((points, 3))
result2[:, 0] = numpy.linspace(x_lb, x_ub, num=points)
var = numpy.linspace(x_lb, x_ub, num=points)
lb = numpy.full(points, numpy.nan)
ub = numpy.full(points, numpy.nan)
with model as ppmodel:
ppmodel.objective = ppmodel.reactions[y_reac_idx]
for i in range(points):
# without second context the original reaction bounds are not restored (?)
with ppmodel as ppmodel2:
ppmodel2.reactions[x_reac_idx].lower_bound = result2[i, 0]
ppmodel2.reactions[x_reac_idx].upper_bound = result2[i, 0]
ppmodel2.objective.direction = 'min'
lb[i] = result2[i, 1] = ppmodel2.slim_optimize()
ppmodel2.objective.direction = 'max'
ub[i] = result2[i, 2] = ppmodel2.slim_optimize()
_fig, axes = plt.subplots()
axes.set_xlabel(model.reactions[x_reac_idx].id)
axes.set_ylabel(model.reactions[y_reac_idx].id)
x = [v for v in var] + [v for v in reversed(var)]
y = [v for v in lb] + [v for v in reversed(ub)]
if lb[0] != ub[0]:
x.extend([var[0], var[0]])
y.extend([lb[0], ub[0]])
plt.plot(x, y)
plt.show()
self.appdata.window.centralWidget().show_bottom_of_console()
self.setCursor(Qt.ArrowCursor)
| StarcoderdataPython |
9613815 | <reponame>mvgiacomello/leetcode-solutions
import unittest
from longest_substring_wo_repeat_chars import solution
class TestSolution(unittest.TestCase):
def test_repeat_char_detection(self):
self.assertTrue(solution.repeat_chars('aa'))
self.assertTrue(solution.repeat_chars('aA'))
self.assertTrue(solution.repeat_chars('abcabcd'))
self.assertFalse(solution.repeat_chars('ab'))
self.assertFalse(solution.repeat_chars('abc'))
def test_solution(self):
self.assertEqual(0, solution.length_longest_substring(''))
self.assertEqual(1, solution.length_longest_substring('a'))
self.assertEqual(2, solution.length_longest_substring('au'))
self.assertEqual(1, solution.length_longest_substring('aa'))
self.assertEqual(2, solution.length_longest_substring('aab'))
self.assertEqual(3, solution.length_longest_substring('abcabcbb'))
self.assertEqual(3, solution.length_longest_substring('abcABCBB'))
self.assertEqual(1, solution.length_longest_substring('bbbbb'))
self.assertEqual(3, solution.length_longest_substring('pwwkew'))
self.assertEqual(6, solution.length_longest_substring('mauricio'))
if __name__ == '__main__':
unittest.main()
| StarcoderdataPython |
4951108 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
import time,sys
from pymongo import MongoClient
from bson.objectid import ObjectId
db = MongoClient('10.168.11.151')['fair_db']
db.authenticate('ipcam','zjjL_3026')
ISOTIMEFORMAT=['%Y-%m-%d %X', '%Y-%m-%d', '%Y%m%d%H%M']
def time_str(t=None, format=0):
return time.strftime(ISOTIMEFORMAT[format], time.localtime(t))
# 退款统计
if __name__ == "__main__":
if len(sys.argv)<3:
print "usage: python %s <begin_date> <end_date> -list" % sys.argv[0]
sys.exit(2)
begin_date = '%s 00:00:00' % sys.argv[1]
end_date = '%s 23:59:59' % sys.argv[2]
if len(sys.argv)>3 and sys.argv[3]=='-list':
just_list = True
else:
just_list = False
condition = {
'type' : {'$in':['TUAN','SINGLE']},
'status' : {'$in':['CANCEL_TO_REFUND','FAIL_TO_REFUND']}, # 待退款订单
'$and' : [{'paid_time' : {'$gt' : begin_date}},
{'paid_time' : {'$lt' : end_date}}],
}
db_order = db.order_app.find(condition, {
'order_id' : 1,
'status' : 1,
'sum_to_refund': 1,
'paid_time' : 1,
'pay_type' : 1,
'type' : 1,
'due' : 1,
'uname' : 1,
'wx_out_trade_no':1,
}).sort([('paid_time',1)])
#print '订单号 金额 说明'
for u in db_order:
if u['pay_type']!='WXPAY':
continue
if just_list==False:
# 修改订单状态
db.order_app.update_one({'_id':u['_id']},{
'$set' : {'status' : 'REFUND'},
'$push' : {'history' : (time_str(), 'script', '退款完成')}
})
if len(u.get('wx_out_trade_no',''))>0:
order_id = u['wx_out_trade_no'].encode('utf-8')
else:
order_id = u['order_id'].encode('utf-8')
print \
order_id+' '+ \
'%.2f'%float(u.get('sum_to_refund',u['due']))+' '+ \
'系统退款'
| StarcoderdataPython |
21787 | import os
import pytest
import numpy as np
from laserembeddings import Laser
SIMILARITY_TEST = os.getenv('SIMILARITY_TEST')
def test_laser():
with open(Laser.DEFAULT_ENCODER_FILE, 'rb') as f_encoder:
laser = Laser(
Laser.DEFAULT_BPE_CODES_FILE,
None,
f_encoder,
)
assert laser.embed_sentences(
['hello world!', 'i hope the tests are passing'],
lang='en').shape == (2, 1024)
def test_similarity(test_data):
if not SIMILARITY_TEST:
pytest.skip("SIMILARITY_TEST not set")
if not test_data:
raise FileNotFoundError(
'laserembeddings-test-data.npz is missing, run "python -m laserembeddings download-test-data" to fix that 🔧'
)
report = os.path.join(os.path.dirname(os.path.realpath(__file__)),
'report', 'comparison-with-LASER.md')
laser = Laser()
with open(report, 'w', encoding='utf-8') as f_report:
f_report.write(
'# Comparison of the embeddings computed with original LASER with the embeddings computed with this package\n'
)
f_report.write(
'| |language|avg. cosine similarity|min. cosine similarity|\n')
f_report.write(
'|-|--------|----------------------|----------------------|\n')
for lang in test_data['langs']:
if lang in ('cmn', 'wuu', 'yue', 'zh', 'jpn', 'ja', 'el'):
# language not supported, ignoring
continue
sents = test_data[f'{lang}_sentences']
orig_embeddings = test_data[f'{lang}_embeddings']
embeddings = laser.embed_sentences(sents, lang)
assert embeddings.shape == orig_embeddings.shape
cosine_similarities = np.sum(
orig_embeddings * embeddings,
axis=1) / (np.linalg.norm(orig_embeddings, axis=1) *
np.linalg.norm(embeddings, axis=1))
similarity_mean = np.mean(cosine_similarities)
similarity_min = np.min(cosine_similarities)
f_report.write(
f'|{"✅" if similarity_min > 0.99999 else "⚠️" if similarity_mean > 0.99 else "❌"}|{lang}|{similarity_mean:.5f}|{similarity_min:.5f}|\n'
)
| StarcoderdataPython |
5145172 | <filename>riberry/app/context/__init__.py
from contextlib import contextmanager
import riberry
from .artifact import Artifact
from .current import ContextCurrent
from .event_registry import EventRegistry, EventRegistryHelper
from .external_task import ExternalTask
from .flow import Flow
from .input_mapping import InputMappings
from .report import Report
from .shared_data import SharedExecutionData
class Context:
def __init__(self):
self.current = ContextCurrent(context=self)
self.input = InputMappings(context=self)
self.data = SharedExecutionData(context=self)
self.flow = Flow(context=self)
self.artifact = Artifact()
self.report = Report(context=self)
self.external_task = ExternalTask(context=self)
self.event_registry = EventRegistry(context=self)
self.on = EventRegistryHelper(context=self)
@contextmanager
def scope(self, root_id, task_id, task_name, stream, category, step):
with self.current.scope(
root_id=root_id,
task_id=task_id,
task_name=task_name,
stream=stream,
category=category,
step=step,
):
yield
def spawn(self, form_name, job_name=None, input_values=None, input_files=None, owner=None, execute=True):
return riberry.app.actions.jobs.create_job(
form_name=form_name,
job_name=job_name,
input_files=input_files,
input_values=input_values,
owner=owner,
execute=execute,
)
| StarcoderdataPython |
8110912 | # coding: utf-8
from __future__ import print_function
import sys
import pytest
import xmldiff.main
from lxml import etree
from lxml.builder import E
from benker.builders.formex import FormexBuilder
from benker.cell import Cell
from benker.table import Table
TBL = E.TBL
ROW = E.ROW
P = E.P
@pytest.mark.parametrize(
'kwargs, attrib',
[
({}, {'COL': u"1"}),
({'width': 2}, {'COL': u"1", 'COLSPAN': u"2"}),
({'height': 2}, {'COL': u"1", 'ROWSPAN': u"2"}),
({'nature': 'body'}, {'COL': u"1", 'TYPE': 'NORMAL'}),
({'nature': 'header'}, {'COL': u"1", 'TYPE': 'HEADER'}),
({'nature': 'footer'}, {'COL': u"1", 'TYPE': '__GR.NOTES__'}),
({'styles': {'vertical-align': "middle"}}, {'COL': u"1"}),
({'styles': {'align': "center"}}, {'COL': u"1"}),
({'styles': {'background-color': "yellow"}}, {'COL': u"1"}),
],
)
def test_build_cell__body(kwargs, attrib):
builder = FormexBuilder()
p_elem = P(u"text")
cell_x1_y1 = Cell([p_elem], x=1, y=1, **kwargs)
table = Table([cell_x1_y1])
builder._table = table
# -- build the cell
row_elem = ROW()
row_y1 = next(iter(table.rows))
builder.build_cell(row_elem, cell_x1_y1, row_y1)
# -- check the '<CELL>' attributes
entry_elem = row_elem[0] # type: etree._Element
assert entry_elem.tag == u"CELL"
assert entry_elem.attrib == attrib
assert entry_elem[0] == p_elem
@pytest.mark.parametrize(
'kwargs, attrib',
[
({'styles': {'vertical-align': "middle"}}, {'COL': u"1", 'valign': "middle"}),
({'styles': {'align': "center"}}, {'COL': u"1", 'align': "center"}),
({'styles': {'background-color': "yellow"}}, {'COL': u"1", 'bgcolor': "yellow"}),
],
)
def test_build_cell__use_cals(kwargs, attrib):
builder = FormexBuilder(use_cals=True, cals_ns=None)
p_elem = P(u"text")
cell_x1_y1 = Cell([p_elem], x=1, y=1, **kwargs)
table = Table([cell_x1_y1])
builder._table = table
# -- build the cell
row_elem = ROW()
row_y1 = next(iter(table.rows))
builder.build_cell(row_elem, cell_x1_y1, row_y1)
# -- check the '<CELL>' attributes
entry_elem = row_elem[0] # type: etree._Element
assert entry_elem.tag == u"CELL"
assert entry_elem.attrib == attrib
assert entry_elem[0] == p_elem
@pytest.mark.parametrize(
'kwargs, attrib',
[
({'nature': 'body'}, {'COL': u"1", 'TYPE': 'NORMAL'}),
({'nature': 'header'}, {'COL': u"1"}),
({'nature': 'footer'}, {'COL': u"1", 'TYPE': '__GR.NOTES__'}),
],
)
def test_build_cell__head(kwargs, attrib):
builder = FormexBuilder()
p_elem = P(u"text")
cell_x1_y1 = Cell([p_elem], x=1, y=1, **kwargs)
table = Table([cell_x1_y1])
builder._table = table
# -- build the cell
row_elem = ROW()
row_y1 = next(iter(table.rows))
row_y1.nature = "header"
builder.build_cell(row_elem, cell_x1_y1, row_y1)
# -- check the '<CELL>' attributes
entry_elem = row_elem[0] # type: etree._Element
assert entry_elem.tag == u"CELL"
assert entry_elem.attrib == attrib
assert entry_elem[0] == p_elem
def test_build_title():
table = Table()
table.rows[1].insert_cell(u"Title", styles={"align": "center"})
builder = FormexBuilder()
tbl_elem = TBL()
builder.build_title(tbl_elem, table.rows[0])
# -- check the '<TITLE>' attributes
title_elem = tbl_elem[0] # type: etree._Element
xml_parser = etree.XMLParser(remove_blank_text=True)
# fmt: off
expected = etree.XML(u"""\
<TITLE>
<TI>
<P>Title</P>
</TI>
</TITLE>""", parser=xml_parser)
# fmt: on
diff_list = xmldiff.main.diff_trees(title_elem, expected)
if diff_list:
print(etree.tounicode(title_elem, pretty_print=True, with_tail=False), file=sys.stderr)
assert diff_list == []
def test_build_title__empty():
table = Table()
table.rows[1].insert_cell(None, styles={"align": "center"})
builder = FormexBuilder()
tbl_elem = TBL()
builder.build_title(tbl_elem, table.rows[0])
# -- check the '<TITLE>' attributes
title_elem = tbl_elem[0] # type: etree._Element
xml_parser = etree.XMLParser(remove_blank_text=True)
# fmt: off
expected = etree.XML(u"""\
<TITLE>
<TI>
<IE/>
</TI>
</TITLE>""", parser=xml_parser)
# fmt: on
diff_list = xmldiff.main.diff_trees(title_elem, expected)
if diff_list:
print(etree.tounicode(title_elem, pretty_print=True, with_tail=False), file=sys.stderr)
assert diff_list == []
def test_build_title__subtitle():
table = Table()
content = [P(u"TITLE"), P(u"Subtitle 1"), P(u"Subtitle 2")]
table.rows[1].insert_cell(content, styles={"align": "center"})
builder = FormexBuilder()
tbl_elem = TBL()
builder.build_title(tbl_elem, table.rows[0])
# -- check the '<TITLE>' attributes
title_elem = tbl_elem[0] # type: etree._Element
xml_parser = etree.XMLParser(remove_blank_text=True)
# fmt: off
expected = etree.XML(u"""\
<TITLE>
<TI>
<P>TITLE</P>
</TI>
<STI>
<P>Subtitle 1</P>
<P>Subtitle 2</P>
</STI>
</TITLE>""", parser=xml_parser)
# fmt: on
diff_list = xmldiff.main.diff_trees(title_elem, expected)
if diff_list:
print(etree.tounicode(title_elem, pretty_print=True, with_tail=False), file=sys.stderr)
assert diff_list == []
def test_build_tbl():
# see: formex-4/samples/jo-compl-2002C_061/C_2002061EN.01000403.xml
table = Table()
table.rows[1].nature = "header"
table.rows[1].insert_cell([P(u"Expert group")])
table.rows[1].insert_cell([P(u"First name and surname of the expert")])
table.rows[2].insert_cell([P(u"Control of infectious diseases")])
table.rows[2].insert_cell([P(u"<NAME>")])
table.rows[3].insert_cell([P(u"Information society")], height=3)
table.rows[3].insert_cell([P(u"<NAME>")])
table.rows[4].insert_cell([P(u"<NAME>CARNAÇÃO")])
table.rows[5].insert_cell([P(u"Berit SVENDSEN")])
table.rows[6].insert_cell([P(u"Controlled thermonuclear fusion")])
table.rows[6].insert_cell([P(u"Pekka PIRILÄ")])
builder = FormexBuilder()
table_elem = builder.build_tbl(table)
xml_parser = etree.XMLParser(remove_blank_text=True)
# fmt: off
expected = etree.XML(u"""\
<TBL COLS="2" NO.SEQ="0001">
<CORPUS>
<ROW TYPE="HEADER">
<CELL COL="1">
<P>Expert group</P>
</CELL>
<CELL COL="2">
<P>First name and surname of the expert</P>
</CELL>
</ROW>
<ROW>
<CELL COL="1">
<P>Control of infectious diseases</P>
</CELL>
<CELL COL="2">
<P><NAME></P>
</CELL>
</ROW>
<ROW>
<CELL COL="1" ROWSPAN="3">
<P>Information society</P>
</CELL>
<CELL COL="2">
<P><NAME></P>
</CELL>
</ROW>
<ROW>
<CELL COL="2">
<P><NAME></P>
</CELL>
</ROW>
<ROW>
<CELL COL="2">
<P>Berit SVENDSEN</P>
</CELL>
</ROW>
<ROW>
<CELL COL="1">
<P>Controlled thermonuclear fusion</P>
</CELL>
<CELL COL="2">
<P>Pekka PIRILÄ</P>
</CELL>
</ROW>
</CORPUS>
</TBL>""", parser=xml_parser)
# fmt: on
for elem in table_elem.xpath("//*"):
elem.text = elem.text or None
for elem in expected.xpath("//*"):
elem.text = elem.text or None
diff_list = xmldiff.main.diff_trees(table_elem, expected)
if diff_list:
print(etree.tounicode(table_elem, pretty_print=True, with_tail=False), file=sys.stderr)
assert diff_list == []
def test_build_tbl__with_title():
# see: formex-4/samples/jo-compl-2002C_280/C_2002280EN.01000101.xml
table = Table()
table.rows[1].insert_cell([P(u"1 euro =")], width=3, styles={"align": "center"})
table.rows[2].nature = "header"
table.rows[2].insert_cell([P()], styles={"x-cell-empty": "true"})
table.rows[2].insert_cell([P(u"Currency")])
table.rows[2].insert_cell([P(u"Exchange rate")])
table.rows[3].insert_cell([P(u"USD")])
table.rows[3].insert_cell([P(u"US dollar")])
table.rows[3].insert_cell([P(u"1,0029")])
table.rows[4].insert_cell([P(u"JPY")])
table.rows[4].insert_cell([P(u"Japanese yen")])
table.rows[4].insert_cell([P(u"121,05")])
builder = FormexBuilder(detect_titles=True)
table_elem = builder.build_tbl(table)
xml_parser = etree.XMLParser(remove_blank_text=True)
# fmt: off
expected = etree.XML(u"""\
<TBL COLS="3" NO.SEQ="0001">
<TITLE>
<TI>
<P>1 euro =</P>
</TI>
<STI/>
</TITLE>
<CORPUS>
<ROW TYPE="HEADER">
<CELL COL="1">
<IE/>
</CELL>
<CELL COL="2">
<P>Currency</P>
</CELL>
<CELL COL="3">
<P>Exchange rate</P>
</CELL>
</ROW>
<ROW>
<CELL COL="1">
<P>USD</P>
</CELL>
<CELL COL="2">
<P>US dollar</P>
</CELL>
<CELL COL="3">
<P>1,0029</P>
</CELL>
</ROW>
<ROW>
<CELL COL="1">
<P>JPY</P>
</CELL>
<CELL COL="2">
<P>Japanese yen</P>
</CELL>
<CELL COL="3">
<P>121,05</P>
</CELL>
</ROW>
</CORPUS>
</TBL>""", parser=xml_parser)
# fmt: on
for elem in table_elem.xpath("//*"):
elem.text = elem.text or None
for elem in expected.xpath("//*"):
elem.text = elem.text or None
diff_list = xmldiff.main.diff_trees(table_elem, expected)
if diff_list:
print(etree.tounicode(table_elem, pretty_print=True, with_tail=False), file=sys.stderr)
assert diff_list == []
@pytest.mark.parametrize(
'orient, size, expected',
[
('portrait', (595, 841), {'NO.SEQ': '0001', 'COLS': '1'}),
('landscape', (595, 841), {'NO.SEQ': '0001', 'COLS': '1', 'PAGE.SIZE': 'SINGLE.LANDSCAPE'}),
('portrait', (841, 595), {'NO.SEQ': '0001', 'COLS': '1'}),
('landscape', (841, 595), {'NO.SEQ': '0001', 'COLS': '1', 'PAGE.SIZE': 'SINGLE.LANDSCAPE'}),
('portrait', (1190, 841), {'NO.SEQ': '0001', 'COLS': '1', 'PAGE.SIZE': 'DOUBLE.PORTRAIT'}),
('landscape', (1190, 841), {'NO.SEQ': '0001', 'COLS': '1', 'PAGE.SIZE': 'DOUBLE.LANDSCAPE'}),
('portrait', (841, 1190), {'NO.SEQ': '0001', 'COLS': '1', 'PAGE.SIZE': 'DOUBLE.PORTRAIT'}),
('landscape', (841, 1190), {'NO.SEQ': '0001', 'COLS': '1', 'PAGE.SIZE': 'DOUBLE.LANDSCAPE'}),
],
)
def test_build_tbl__orient(orient, size, expected):
builder = FormexBuilder()
table = Table(styles={'x-sect-orient': orient, 'x-sect-size': size})
table.rows[1].insert_cell(u"text")
table_elem = builder.build_tbl(table)
assert table_elem.attrib == expected
def test_build_tbl__no_seq():
builder = FormexBuilder()
table1 = Table()
table1.rows[1].insert_cell(u"text1")
table1_elem = builder.build_tbl(table1)
table2 = Table()
table2.rows[1].insert_cell(u"text2")
table2_elem = builder.build_tbl(table2)
assert table1_elem.attrib['NO.SEQ'] == u"0001"
assert table2_elem.attrib['NO.SEQ'] == u"0002"
def test_build_tbl__empty_cell():
builder = FormexBuilder()
table1 = Table()
table1.rows[1].insert_cell(u"")
table1_elem = builder.build_tbl(table1)
cell_elem = table1_elem.xpath('//CELL')[0]
assert len(cell_elem) == 1
assert cell_elem[0].tag == 'IE'
def test_build_tbl__use_cals():
# see: formex-4/samples/jo-compl-2002C_061/C_2002061EN.01000403.xml
table = Table(
styles={
"border-top": "solid",
"border-bottom": "solid",
"x-sect-orient": "landscape",
"x-sect-cols": "1",
"background-color": "blue",
"width": "180",
}
)
table.rows[1].nature = "header"
table.rows[1].insert_cell([P(u"Expert group")], styles={"align": "center"})
table.rows[1].insert_cell([P(u"First name and surname of the expert")], styles={"align": "center"})
table.rows[2].insert_cell([P(u"Control of infectious diseases")])
table.rows[2].insert_cell([P(u"<NAME>")])
builder = FormexBuilder(use_cals=True, cals_ns=None)
table_elem = builder.build_tbl(table)
xml_parser = etree.XMLParser(remove_blank_text=True)
# fmt: off
expected = etree.XML(u"""\
<TBL NO.SEQ="0001" COLS="2" PAGE.SIZE="SINGLE.LANDSCAPE">
<CORPUS frame="topbot" colsep="0" rowsep="0" orient="land" pgwide="1" bgcolor="blue" width="180.00mm">
<colspec colname="c1" colnum="1"/>
<colspec colname="c2" colnum="2"/>
<ROW TYPE="HEADER">
<CELL COL="1" align="center">
<P>Expert group</P>
</CELL>
<CELL COL="2" align="center">
<P>First name and surname of the expert</P>
</CELL>
</ROW>
<ROW>
<CELL COL="1">
<P>Control of infectious diseases</P>
</CELL>
<CELL COL="2">
<P><NAME></P>
</CELL>
</ROW>
</CORPUS>
</TBL>""", parser=xml_parser)
# fmt: on
for elem in table_elem.xpath("//*"):
elem.text = elem.text or None
for elem in expected.xpath("//*"):
elem.text = elem.text or None
diff_list = xmldiff.main.diff_trees(table_elem, expected)
if diff_list:
print(etree.tounicode(table_elem, pretty_print=True, with_tail=False), file=sys.stderr)
assert diff_list == []
| StarcoderdataPython |
8102907 | <reponame>jqueguiner/ai-api-template<gh_stars>1-10
import os
import requests
import random
import _thread as thread
from uuid import uuid4
import numpy as np
import skimage
from skimage.filters import gaussian
import zipfile
from PIL import Image
import matplotlib.image as mpimg
import cv2
def blur(image, x0, x1, y0, y1, sigma=1, multichannel=True):
y0, y1 = min(y0, y1), max(y0, y1)
x0, x1 = min(x0, x1), max(x0, x1)
im = image.copy()
sub_im = im[y0:y1,x0:x1].copy()
blur_sub_im = gaussian(sub_im, sigma=sigma, multichannel=multichannel)
blur_sub_im = np.round(255 * blur_sub_im)
im[y0:y1,x0:x1] = blur_sub_im
return im
def download(url, filename):
data = requests.get(url).content
with open(filename, 'wb') as handler:
handler.write(data)
return filename
def generate_random_filename(upload_directory, extension):
filename = str(uuid4())
filename = os.path.join(upload_directory, filename + "." + extension)
return filename
def clean_me(filename):
if os.path.exists(filename):
os.remove(filename)
def clean_all(files):
for me in files:
clean_me(me)
def create_directory(path):
os.system("mkdir -p %s" % os.path.dirname(path))
def get_model_bin(url, output_path):
if not os.path.exists(output_path):
create_directory(output_path)
cmd = "wget -O %s %s" % (output_path, url)
os.system(cmd)
return output_path
#model_list = [(url, output_path), (url, output_path)]
def get_multi_model_bin(model_list):
for m in model_list:
thread.start_new_thread(get_model_bin, m)
def unzip(path_to_zip_file, directory_to_extract_to='.'):
with zipfile.ZipFile(path_to_zip_file, 'r') as zip_ref:
zip_ref.extractall(directory_to_extract_to)
def resize_img_in_folder(path, w, h):
dirs = os.listdir(path)
for item in dirs:
if os.path.isfile(path+item):
im = Image.open(path+item)
f, e = os.path.splitext(path+item)
imResize = im.resize((w, h), Image.ANTIALIAS)
imResize.save(f + '.jpg', 'JPEG', quality=90)
def resize_img(path, w, h):
img = mpimg.imread(path)
img = cv2.resize(img, dsize=(w, h))
return img
| StarcoderdataPython |
4902734 | # -*- coding: utf-8 -*-
#
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# DO NOT EDIT! This is a generated sample ("Request", "language_classify_text")
# To install the latest published package dependency, execute the following:
# pip install google-cloud-language
# sample-metadata
# title: Classify Content
# description: Classifying Content in a String
# usage: python3 samples/v1/language_classify_text.py [--text_content "That actor on TV makes movies in Hollywood and also stars in a variety of popular new TV shows."]
# [START language_classify_text]
from google.cloud import language_v1
def sample_classify_text(text_content):
"""
Classifying Content in a String
Args:
text_content The text content to analyze. Must include at least 20 words.
"""
client = language_v1.LanguageServiceClient()
# text_content = 'That actor on TV makes movies in Hollywood and also stars in a variety of popular new TV shows.'
# Available types: PLAIN_TEXT, HTML
type_ = language_v1.Document.Type.PLAIN_TEXT
# Optional. If not specified, the language is automatically detected.
# For list of supported languages:
# https://cloud.google.com/natural-language/docs/languages
language = "en"
document = {"content": text_content, "type_": type_, "language": language}
response = client.classify_text(request = {'document': document})
# Loop through classified categories returned from the API
for category in response.categories:
# Get the name of the category representing the document.
# See the predefined taxonomy of categories:
# https://cloud.google.com/natural-language/docs/categories
print(u"Category name: {}".format(category.name))
# Get the confidence. Number representing how certain the classifier
# is that this category represents the provided text.
print(u"Confidence: {}".format(category.confidence))
# [END language_classify_text]
def main():
import argparse
parser = argparse.ArgumentParser()
parser.add_argument(
"--text_content",
type=str,
default="That actor on TV makes movies in Hollywood and also stars in a variety of popular new TV shows.",
)
args = parser.parse_args()
sample_classify_text(args.text_content)
if __name__ == "__main__":
main()
| StarcoderdataPython |
83451 | # -*- coding: utf-8 -*-
"""
Created on Mon Sep 27 13:49:16 2021
@author: luis
"""
# Plantilla de pre-procesado #
# Cómo importar las librerías
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
# Importar el dataset a Spyder
dataset = pd.read_csv('Data.csv') # Definir la ubicación del archivo
# Sistema de coordenadas en Python [:, 3]
# primera parte definir las filas '[:,' y después de la coma ':3]' definir las columnas
X = dataset.iloc[:, :-1].values # agregar variables independientes
y = dataset.iloc[:, 3].values # agregar variables dependientes (a predecir)
# Conversión de los NA´s o null
from sklearn.impute import SimpleImputer
## strategy = por cuál valor se va a remplazar, mean = media o puedeser medium
## verbose = 0 es igual a media de la columna y 1 es media de la fila
imputer = SimpleImputer(missing_values = np.nan, strategy = "mean", verbose=0)
# Seleccionar las columnas en donde se van a reemplazar los NA´s y null
imputer = imputer.fit(X[:,1:3])
X[:, 1:3] = imputer.transform(X[:,1:3])
# Codificar los datos categóricos (conversión de valores alfanúmericos a númericos)
from sklearn.preprocessing import LabelEncoder, OneHotEncoder
from sklearn.compose import ColumnTransformer
## Proceso de codificación de a variables dummy
## Proceso para variable X
labelencoder_X = LabelEncoder()
X[:, 0] = labelencoder_X.fit_transform(X[:, 0])
ct = ColumnTransformer([('one_hot_encoder', OneHotEncoder(categories='auto'), [0])],remainder='passthrough')
X = np.array(ct.fit_transform(X), dtype=np.float)
## Proceso para variable Y
labelencoder_y = LabelEncoder()
y = labelencoder_y.fit_transform(y)
# Dividir el dataset una parte para entrenamiento y otra para testing
from sklearn.model_selection import train_test_split
## Se dividen las variables, por cada train debe haber una test
## El parámetro test_size es para definir el tamaño en porcentaje para testing
## El parámetro random_state es el número de veces que deseas reproducir el algoritmo
X_train, X_test, y_train, y_test = train_test_split(X, y,test_size = 0.2, random_state = 0)
# Escalado de variables
## Para poder dar un valor equilibrado a cada varible sin importar si hay mucha
## entre cada una de ellas.
from sklearn.preprocessing import StandardScaler
sc_X = StandardScaler()
X_train = sc_X.fit_transform(X_train)
X_test = sc_X.transform(X_test) | StarcoderdataPython |
4871428 | from rest_framework import routers
from invoices.api.viewsets import InvoiceViewSet
from menus.api.viewsets import MenuViewSet
from order_items.api.viewsets import OrderItemViewSet
from orders.api.viewsets import OrderViewSet
app_name = 'api'
router = routers.DefaultRouter()
router.register(r'orders', OrderViewSet, base_name='order')
router.register(r'order-items', OrderItemViewSet, base_name='order_item')
router.register(r'invoices', InvoiceViewSet, base_name='invoice')
router.register(r'menus', MenuViewSet, base_name='menu')
urlpatterns = router.urls
| StarcoderdataPython |
8007823 | import re
from rest_framework import filters
class TypedCourseSearchBackend(filters.SearchFilter):
code_re = re.compile(r"^([A-Za-z]{1,4})?[ |-]?(\d{1,5})?$")
def infer_search_type(self, query):
if self.code_re.match(query):
return "course"
else:
return "keyword"
def get_schema_operation_parameters(self, view):
return [
{
"name": "search",
"schema": {"type": "string"},
"required": False,
"in": "query",
"description": "Search query. Can be either a fragment of a course code, or any "
"keyword/professor name.",
},
]
@staticmethod
def get_search_type(request):
return request.GET.get("type", "auto")
def get_search_terms(self, request):
search_type = self.get_search_type(request)
query = request.query_params.get(self.search_param, "")
match = self.code_re.match(query)
# If this is a course query, either by designation or by detection,
if (
(
search_type == "course"
or (search_type == "auto" and self.infer_search_type(query) == "course")
)
and match
and match.group(1)
and match.group(2)
):
query = f"{match.group(1)}-{match.group(2)}"
return [query]
def get_search_fields(self, view, request):
search_type = self.get_search_type(request)
if search_type == "auto":
search_type = self.infer_search_type(request.GET.get("search", ""))
if search_type == "course":
return ["^full_code"]
elif search_type == "keyword":
return ["title", "sections__instructors__name"]
else:
return super().get_search_fields(view, request)
class TypedSectionSearchBackend(filters.SearchFilter):
code_re = re.compile(r"^([A-Za-z]+) *[ -]?(\d{3}|[A-Z]{1,3})?[ -]?(\d+)?$")
def get_search_terms(self, request):
query = request.query_params.get(self.search_param, "")
match = self.code_re.match(query)
if match:
query = match.group(1)
if match.group(2) is not None:
query = query + f"-{match.group(2)}"
if match.group(3) is not None:
query = query + f"-{match.group(3)}"
return [query]
| StarcoderdataPython |
280985 | # Entry point
import pyfiglet
from app.configs.local import projectName
print(pyfiglet.figlet_format(projectName, font = "slant"))
import core.packages.router.route_manager as route_manager
route_manager.RouteManger()
| StarcoderdataPython |
9756284 | # Ex1 : Write a function named total that takes a list of integers as input,
# and returns the total value of all those integers added together.
def total(lst) :
sum = 0
for num in lst :
sum += num
return sum
# Ex2 : Write a function called count that takes a list of numbers as input
# and returns a count of the number of elements in the list.
def count(lst) :
return len(lst)
| StarcoderdataPython |
8077335 | <gh_stars>0
# -*- coding:utf-8 -*-
# @Time : 2019/6/22 7:55 PM
# @Author : __wutonghe__
from test_plus.test import TestCase
from django.test import RequestFactory
from segmentfault.apps.circle.views import *
class BaseUser(TestCase):
def setUp(self):
self.user = self.make_user()
self.factory = RequestFactory
| StarcoderdataPython |
133798 | <reponame>bearsh/raccoon
from collections import OrderedDict
from copy import deepcopy
import pytest
import raccoon as rc
from raccoon.utils import assert_series_equal
def test_names():
srs = rc.Series([1, 2])
assert srs.index_name == 'index'
assert srs.data_name == 'value'
srs.index_name = 'new_index'
srs.data_name = 'data'
assert srs.index_name == 'new_index'
assert srs.data_name == 'data'
def test_default_list():
def check_list():
assert isinstance(srs.index, list)
assert isinstance(srs.data, list)
srs = rc.Series()
assert isinstance(srs, rc.Series)
assert srs.data == []
assert srs.index == []
assert srs.sort is True
check_list()
# add a new row and col
srs.set_cell(1, 1)
check_list()
# add a new row
srs.set_cell(2, 2)
check_list()
# add a complete new row
srs.set_rows([3], [5])
check_list()
def test_to_dict():
srs = rc.Series([1, 2, 3], index=['a', 'b', 'c'], data_name='a')
# with index
actual = srs.to_dict(index=True)
assert actual == {'index': ['a', 'b', 'c'], 'a': [1, 2, 3]}
# without index
actual = srs.to_dict(index=False)
assert actual == {'a': [1, 2, 3]}
# ordered
act_order = srs.to_dict(ordered=True)
expected = OrderedDict([('index', ['a', 'b', 'c']), ('a', [1, 2, 3])])
assert act_order == expected
def test_print():
srs = rc.Series([1.0, 2.55, 3.1], data_name='boo', index=['row1', 'row2', 'row3'])
# __repr__ produces a simple representation
expected = "object id: %s\ndata:\n[1.0, 2.55, 3.1]\nindex:\n['row1', 'row2', 'row3']\n" % id(srs)
actual = srs.__repr__()
assert actual == expected
# __str__ produces the standard table
expected = 'index boo\n------- -----\nrow1 1\nrow2 2.55\nrow3 3.1'
actual = srs.__str__()
assert actual == expected
# print() method will pass along any argument for the tabulate.tabulate function
srs.print()
def test_input_data_mutability():
input_data = [[1, 2, 3], [4, 5, 6]]
# without defining column order
srs = rc.Series(input_data)
orig_data = deepcopy(srs.data)
# change input_data
input_data[1] = [6, 7, 8]
assert srs.data != input_data
assert srs.data == orig_data
# change an inner index of input data
input_data.append(99)
assert srs.data == orig_data
# Now make an inner element a mutable item, confirm that mutability remains
input_data = [[1], [2], [3], [4, 5, 6]]
srs = rc.Series(input_data)
orig_data = deepcopy(srs.data)
# changing the input data changes the inner data in Series
input_data[0].append(11)
assert srs.data != orig_data
assert srs.get(0) == [1, 11]
# changing the entire inner element
srs[1] = [2, 22]
assert input_data == [[1, 11], [2], [3], [4, 5, 6]]
assert srs.data == [[1, 11], [2, 22], [3], [4, 5, 6]]
def test_get_data_mutability():
# the .data method only returns a view, and changes to the return values will corrupt the Series
srs = rc.Series([1.0, 2.55, 3.1])
orig_data = deepcopy(srs.data)
data = srs.data
# regular Series return a view of data
data.append(99)
assert srs.data != orig_data
assert srs.data == [1.0, 2.55, 3.1, 99]
# using the get commands returns a shallow copy
srs = rc.Series([[1], [2], [3]])
# mutate inner value
srs[1].append(22)
# changes the new_df
assert srs.data == [[1], [2, 22], [3]]
def test_len():
srs = rc.Series([], [])
assert len(srs) == 0
srs = rc.Series([1.0, 2.55, 3.1], sort=False)
assert len(srs) == 3
def test_equality():
srs = rc.Series([1, 2, 1, 2, 1, 1])
assert srs.sort is True
assert srs.equality(value=1) == [True, False, True, False, True, True]
assert srs.equality([1, 2, 3], 2) == [True, False, True]
assert srs.equality([False, False, False, True, True, True], 1) == [False, True, True]
# change all 1 to 3
srs.set(indexes=srs.equality(value=1), values=3)
assert srs.data == [3, 2, 3, 2, 3, 3]
srs = rc.Series([1, 2, 1, 2, 1, 1], sort=False)
assert srs.sort is False
assert srs.equality(value=1) == [True, False, True, False, True, True]
assert srs.equality([1, 2, 3], 2) == [True, False, True]
assert srs.equality([False, False, False, True, True, True], 1) == [False, True, True]
# not enough booleans to match index len
with pytest.raises(ValueError):
srs.equality([True, True], 2)
def test_select_index():
# simple index, not sort
srs = rc.Series([1, 2, 3, 4, 5, 6], index=['a', 'b', 'c', 'd', 'e', 'f'])
actual = srs.select_index('c', 'value')
assert actual == ['c']
actual = srs.select_index('d', 'boolean')
assert actual == [False, False, False, True, False, False]
# simple index, sort
srs = rc.Series([1, 2, 3, 4, 5, 6], index=['a', 'b', 'c', 'd', 'e', 'f'], sort=True)
actual = srs.select_index('c', 'value')
assert actual == ['c']
actual = srs.select_index('d', 'boolean')
assert actual == [False, False, False, True, False, False]
with pytest.raises(ValueError):
srs.select_index('a', 'BAD')
# simple index, not sort
srs = rc.Series([1, 2, 3, 4, 5, 6], index=['a', 'b', 'c', 'd', 'e', 'f'])
actual = srs.select_index('c', 'value')
assert actual == ['c']
actual = srs.select_index('d', 'boolean')
assert actual == [False, False, False, True, False, False]
# tuple index
tuples = [('a', 1, 3), ('a', 1, 4), ('a', 2, 3), ('b', 1, 4), ('b', 2, 1), ('b', 3, 3)]
srs = rc.Series([1, 2, 3, 4, 5, 6], index=tuples)
compare = ('a', None, None)
assert srs.select_index(compare) == [True, True, True, False, False, False]
compare = ('a', None, 3)
assert srs.select_index(compare, 'boolean') == [True, False, True, False, False, False]
compare = (None, 2, None)
assert srs.select_index(compare, 'value') == [('a', 2, 3), ('b', 2, 1)]
compare = (None, 3, 3)
assert srs.select_index(compare) == [False, False, False, False, False, True]
compare = (None, None, 3)
assert srs.select_index(compare, 'value') == [('a', 1, 3), ('a', 2, 3), ('b', 3, 3)]
compare = ('a', 1, 4)
assert srs.select_index(compare, 'value') == [('a', 1, 4)]
compare = ('a', 100, 99)
assert srs.select_index(compare, 'value') == []
compare = (None, None, None)
assert srs.select_index(compare) == [True] * 6
srs = rc.Series([1, 2, 3, 4, 5, 6])
assert srs.select_index(3) == [False, False, False, True, False, False]
assert srs.select_index(3, 'value') == [3]
def test_isin():
srs = rc.Series([1, 2, 3, 4, 5])
assert srs.isin([2, 3, 4]) == [False, True, True, True, False]
assert srs.isin([3]) == [False, False, True, False, False]
assert srs.isin([6, 7]) == [False, False, False, False, False]
def test_reset_index():
# no index defined
srs = rc.Series([4, 5, 6])
srs.reset_index()
expected = rc.Series([4, 5, 6])
assert_series_equal(srs, expected)
# with index and index name defined
srs = rc.Series([1, 2, 3], index=['x', 'y', 'z'], index_name='jelo')
srs.reset_index()
expected = rc.Series([1, 2, 3], [0, 1, 2], sort=False)
assert_series_equal(srs, expected)
| StarcoderdataPython |
1788693 | import torch
from torch import nn
from torch.nn import init
from torch.nn import functional as F
from transformers import BertModel
from config import Baseline_LSTMConfig
from config import Baseline_BertConfig
from config import Baseline_CNNConfig
from tools import load_bert_vocab_embedding_vec
class Baseline_Model_Bert_Classification(nn.Module):
def __init__(self, dataset_config):
super(Baseline_Model_Bert_Classification, self).__init__()
self.bert_model = BertModel.from_pretrained('bert-base-uncased')
self.hidden_size = Baseline_BertConfig.hidden_size
if not Baseline_BertConfig.fine_tuning:
for param in self.bert_model.parameters():
param.requires_grad = False
self.fc = nn.Sequential(
nn.Dropout(0.5),
nn.Linear(self.hidden_size, dataset_config.labels_num),
)
for params in self.fc.parameters():
init.normal_(params, mean=0, std=0.01)
def forward(self, inputs, inputs_mask):
"""forward
Args:
inputs (torch.tensor): [batch, seq_len]
inputs_mask (torch.tensor): [batch, seq_len]
Returns:
logits: [batch, 4]
"""
encoder, pooled = self.bert_model(inputs,
attention_mask=inputs_mask)[:]
logits = self.fc(pooled)
return logits
class Baseline_Model_LSTM_Classification(nn.Module):
def __init__(self, dataset_config, bidirectional):
super(Baseline_Model_LSTM_Classification, self).__init__()
self.vocab_size = Baseline_LSTMConfig.vocab_size
self.embedding_size = Baseline_LSTMConfig.embedding_size
self.hidden_size = Baseline_LSTMConfig.hidden_size
self.num_layers = Baseline_LSTMConfig.num_layers
self.bidirectional = bidirectional
self.embedding_layer = nn.Embedding(self.vocab_size,
self.embedding_size)
if Baseline_LSTMConfig.using_pretrained:
self.embedding_layer.from_pretrained(
torch.from_numpy(
load_bert_vocab_embedding_vec(
self.vocab_size, self.embedding_size,
Baseline_LSTMConfig.vocab_path,
Baseline_LSTMConfig.embedding_path)))
self.embedding_layer.weight.requires_grad = False
else:
self.embedding_layer.weight.requires_grad = True
self.dropout = nn.Dropout(0.5)
self.encoder = nn.LSTM(input_size=self.embedding_size,
hidden_size=self.hidden_size,
num_layers=self.num_layers,
bidirectional=self.bidirectional,
dropout=Baseline_LSTMConfig.dropout)
hidden_size = self.hidden_size
if self.bidirectional:
hidden_size = hidden_size * 2
if Baseline_LSTMConfig.head_tail:
hidden_size = hidden_size * 2
self.fc = nn.Sequential(
nn.Dropout(0.5), nn.Linear(hidden_size, dataset_config.labels_num))
for params in self.fc.parameters():
init.normal_(params, mean=0, std=0.01)
def forward(self, X):
X = self.embedding_layer(X) # [batch, sen_len, word_dim]
X = self.dropout(X)
# X: [sen_len, batch, word_dim]
X = X.permute(1, 0, 2)
outputs, hidden = self.encoder(X) # output, (hidden, memory)
# outputs [sen_len, batch, hidden]
# outputs [sen_len, batch, hidden*2] *2 means using bidrectional
if Baseline_LSTMConfig.head_tail:
outputs = torch.cat((outputs[0], outputs[-1]), -1)
else:
outputs = outputs[-1]
outputs = self.fc(outputs) # [batch, hidden] -> [batch, labels]
return outputs
class Baseline_Model_CNN_Classification(nn.Module):
def __init__(self, dataset_config):
super(Baseline_Model_CNN_Classification, self).__init__()
self.vocab_size = Baseline_CNNConfig.vocab_size
self.embedding_size = Baseline_CNNConfig.embedding_size
self.word_dim = self.embedding_size
self.embedding_train = nn.Embedding(self.vocab_size,
self.embedding_size)
if Baseline_CNNConfig.using_pretrained:
self.embedding_pre = nn.Embedding(self.vocab_size,
self.embedding_size)
self.embedding_pre.from_pretrained(
torch.from_numpy(
load_bert_vocab_embedding_vec(
self.vocab_size, self.embedding_size,
Baseline_CNNConfig.vocab_path,
Baseline_CNNConfig.embedding_path)))
self.embedding_pre.weight.requires_grad = False
self.word_dim *= 2
self.pool = nn.AdaptiveMaxPool1d(output_size=1)
self.channel_size = [self.word_dim] + Baseline_CNNConfig.channel_size
self.kernel_size = Baseline_CNNConfig.kernel_size
self.convs = nn.ModuleList()
for i in range(len(self.kernel_size)):
self.convs.append(
nn.Conv1d(in_channels=self.channel_size[i],
out_channels=self.channel_size[i + 1],
kernel_size=self.kernel_size[i]))
self.dropout = nn.Dropout(0.5)
self.fc = nn.Linear(sum(Baseline_CNNConfig.channel_size),
dataset_config.labels_num)
for params in self.fc.parameters():
init.normal_(params, mean=0, std=0.01)
def forward(self, X):
if Baseline_CNNConfig.using_pretrained:
embeddings = torch.cat(
(
self.embedding_train(X),
self.embedding_pre(X),
), dim=-1) # [batch, seqlen, word-dim0 + word-dim1]
else:
embeddings = self.embedding_train(X)
embeddings = self.dropout(embeddings)
embeddings = embeddings.permute(0, 2, 1) # [batch, dims, seqlen]
outs = torch.cat([
self.pool(F.relu(conv(embeddings))).squeeze(-1)
for conv in self.convs
],
dim=1)
outs = self.dropout(outs)
logits = self.fc(outs)
return logits
class Baseline_Model_Bert_Entailment(nn.Module):
def __init__(self, dataset_config):
super(Baseline_Model_Bert_Entailment, self).__init__()
self.bert_model = BertModel.from_pretrained('bert-base-uncased')
self.hidden_size = Baseline_BertConfig.hidden_size
if not Baseline_BertConfig.fine_tuning:
for param in self.bert_model.parameters():
param.requires_grad = False
self.fc = nn.Sequential(
nn.Dropout(0.5),
nn.Linear(self.hidden_size, dataset_config.labels_num),
)
for params in self.fc.parameters():
init.normal_(params, mean=0, std=0.01)
def forward(self, inputs, inputs_mask, inputs_type):
"""forward
Args:
inputs (torch.tensor): [batch, seq_len]
inputs_mask (torch.tensor): [batch, seq_len]
Returns:
logits: [batch, 4]
"""
encoder, pooled = self.bert_model(inputs,
attention_mask=inputs_mask,
token_type_ids=inputs_type)[:]
logits = self.fc(pooled)
return logits
class Baseline_Model_LSTM_Entailment(nn.Module):
def __init__(self, dataset_config, bidirectional):
super(Baseline_Model_LSTM_Entailment, self).__init__()
self.hidden_size = Baseline_LSTMConfig.hidden_size
self.num_layers = Baseline_LSTMConfig.num_layers
self.vocab_size = Baseline_LSTMConfig.vocab_size
self.bidirectional = bidirectional
self.embedding_size = Baseline_LSTMConfig.embedding_size
self.embedding_layer = nn.Embedding(self.vocab_size,
self.embedding_size)
if Baseline_LSTMConfig.using_pretrained:
self.embedding_layer.from_pretrained(
torch.from_numpy(
load_bert_vocab_embedding_vec(
self.vocab_size, self.embedding_size,
Baseline_LSTMConfig.vocab_path,
Baseline_LSTMConfig.embedding_path)))
self.embedding_layer.weight.requires_grad = False
self.premise_encoder = nn.LSTM(input_size=self.embedding_size,
hidden_size=self.hidden_size,
num_layers=self.num_layers,
bidirectional=self.bidirectional,
dropout=Baseline_LSTMConfig.dropout)
self.hypothesis_encoder = nn.LSTM(input_size=self.embedding_size,
hidden_size=self.hidden_size,
num_layers=self.num_layers,
bidirectional=self.bidirectional,
dropout=Baseline_LSTMConfig.dropout)
self.layers = nn.Sequential()
layer_sizes = [2 * self.hidden_size, 400, 100]
for i in range(len(layer_sizes) - 1):
layer = nn.Linear(layer_sizes[i], layer_sizes[i + 1])
self.layers.add_module("layer" + str(i + 1), layer)
bn = nn.BatchNorm1d(layer_sizes[i + 1], eps=1e-05, momentum=0.1)
self.layers.add_module("bn" + str(i + 1), bn)
self.layers.add_module("activation" + str(i + 1), nn.ReLU())
layer = nn.Linear(layer_sizes[-1], 3)
self.layers.add_module("layer" + str(len(layer_sizes)), layer)
self.layers.add_module("softmax", nn.Softmax(dim=1))
def store_grad_norm(self, grad):
norm = torch.norm(grad, 2, 1)
self.grad_norm = norm.detach().data.mean()
return grad
def forward(self, premise_indices, hypothesis_indices):
# premise: [batch, sen_len, embedding_size]
premise = self.embedding_layer(premise_indices)
output_prem, (hidden_prem, _) = self.premise_encoder(premise.permute(1, 0, 2))
# hidden_prem: [batch, hidden_size]
hidden_prem = hidden_prem[-1]
if hidden_prem.requires_grad:
hidden_prem.register_hook(self.store_grad_norm)
hypothesis = self.embedding_layer(hypothesis_indices)
output_hypo, (hidden_hypo,
_) = self.hypothesis_encoder(hypothesis.permute(1, 0, 2))
hidden_hypo = hidden_hypo[-1]
if hidden_hypo.requires_grad:
hidden_hypo.register_hook(self.store_grad_norm)
concatenated = torch.cat([hidden_prem, hidden_hypo], 1)
probs = self.layers(concatenated)
return probs
| StarcoderdataPython |
166977 | # Benchmark the counting of alanine residues in a PDB file
import time
import MDAnalysis as mda
pdb_filepath = "pdbs/1AKE.pdb"
u = mda.Universe(pdb_filepath)
def count():
return (u.residues.resnames == "ALA").sum()
start = time.time()
count()
elapsed = time.time() - start
print elapsed
| StarcoderdataPython |
11372743 | # Copyright (c) 2020 AllSeeingEyeTolledEweSew
#
# Permission to use, copy, modify, and/or distribute this software for any
# purpose with or without fee is hereby granted.
#
# THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES WITH
# REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
# AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT,
# INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
# LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR
# OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
# PERFORMANCE OF THIS SOFTWARE.
import contextlib
import errno
import functools
import io
import logging
import os
import socket as socket_lib
import threading
import time
from typing import cast
from typing import Iterator
from typing import List
from typing import Optional
from typing import Tuple
import pyftpdlib
import pyftpdlib.authorizers
import pyftpdlib.filesystems
import pyftpdlib.handlers
import pyftpdlib.servers
from tvaf import auth
from tvaf import fs
from tvaf import task as task_lib
import tvaf.config as config_lib
_LOG = logging.getLogger(__name__)
def _partialclass(cls, *args, **kwds):
class Wrapped(cls):
__init__ = functools.partialmethod(cls.__init__, *args, **kwds)
return Wrapped
class _FS(pyftpdlib.filesystems.AbstractedFS):
def __init__(self, *args, root: fs.Dir, **kwargs) -> None:
super().__init__(*args, **kwargs)
self.cur_dir = root
def validpath(self, path: str) -> bool:
# This is used to check whether a path traverses symlinks to escape a
# home directory.
return True
def ftp2fs(self, ftppath: str) -> str:
return cast(str, self.ftpnorm(ftppath))
def fs2ftp(self, fspath: str) -> str:
return fspath
def mkstemp(
self,
suffix="",
prefix="",
dir=None,
mode="wb",
) -> None:
raise fs.mkoserror(errno.EROFS)
def mkdir(self, path: str) -> None:
raise fs.mkoserror(errno.EROFS)
def rmdir(self, path: str) -> None:
raise fs.mkoserror(errno.EROFS)
def rename(self, src: str, dst: str) -> None:
raise fs.mkoserror(errno.EROFS)
def chmod(self, path: str, mode: str) -> None:
raise fs.mkoserror(errno.EROFS)
def utime(self, path: str, timeval) -> None:
raise fs.mkoserror(errno.EROFS)
def get_user_by_uid(self, uid: int) -> str:
return "root"
def get_group_by_gid(self, gid: int) -> str:
return "root"
def _traverse(self, path: str) -> fs.Node:
return self.cur_dir.traverse(path)
def _ltraverse(self, path: str) -> fs.Node:
return self.cur_dir.traverse(path, follow_symlinks=False)
def _traverse_to_dir(self, path: str) -> fs.Dir:
dir_ = cast(fs.Dir, self._traverse(path))
if not dir_.is_dir():
raise fs.mkoserror(errno.ENOTDIR)
return dir_
def _traverse_to_link(self, path: str) -> fs.Symlink:
symlink = cast(fs.Symlink, self._ltraverse(path))
if not symlink.is_link():
raise fs.mkoserror(errno.EINVAL)
return symlink
def chdir(self, path: str) -> None:
self.cur_dir = self._traverse_to_dir(path)
self.cwd = str(self.cur_dir.abspath())
def open(self, filename: str, mode: str) -> io.BufferedIOBase:
file_ = cast(fs.File, self._traverse(filename))
if file_.is_dir():
raise fs.mkoserror(errno.EISDIR)
fp = file_.open(mode)
return fp
def listdir(self, path: str) -> List[str]:
dir_ = self._traverse_to_dir(path)
return [d.name for d in dir_.readdir()]
def listdirinfo(self, path: str) -> List[str]:
# Doesn't seem to be used. However, the base class implements it and we
# don't want to allow access to the filesystem.
return self.listdir(path)
def stat(self, path: str) -> os.stat_result:
return self._traverse(path).stat().os()
def lstat(self, path: str) -> os.stat_result:
return self._ltraverse(path).stat().os()
def readlink(self, path: str) -> str:
return str(self._traverse_to_link(path).readlink())
def isfile(self, path: str) -> bool:
try:
return self._traverse(path).is_file()
except OSError:
return False
def islink(self, path: str) -> bool:
try:
return self._ltraverse(path).is_link()
except OSError:
return False
def lexists(self, path: str) -> bool:
try:
self._ltraverse(path)
except OSError:
return False
return True
def isdir(self, path: str) -> bool:
try:
return self._traverse(path).is_dir()
except OSError:
return False
def getsize(self, path: str) -> int:
return self._traverse(path).stat().size
def getmtime(self, path: str) -> int:
mtime = self._traverse(path).stat().mtime
if mtime is not None:
return mtime
return int(time.time())
def realpath(self, path: str) -> str:
return str(self.cur_dir.realpath(path))
class _Authorizer(pyftpdlib.authorizers.DummyAuthorizer):
def __init__(self, *, auth_service: auth.AuthService) -> None:
self.auth_service = auth_service
def add_user(
self,
username: str,
password: str,
homedir: str,
perm: str = "elr",
msg_login: str = "Login successful.",
msg_quit: str = "Goodbye.",
) -> None:
raise NotImplementedError
def add_anonymous(self, homedir: str, **kwargs) -> None:
raise NotImplementedError
def remove_user(self, username: str) -> None:
raise NotImplementedError
def override_perm(
self, username: str, directory: str, perm: str, recursive=False
) -> None:
raise NotImplementedError
def has_user(self, username: str) -> bool:
raise NotImplementedError
def get_msg_login(self, username: str) -> str:
return "Login successful."
def get_msg_quit(self, username: str) -> str:
return "Goodbye."
def get_home_dir(self, username: str) -> str:
return "/"
def has_perm(self, username: str, perm: str, path: str = None) -> bool:
return perm in self.read_perms
def get_perms(self, username: str) -> str:
return cast(str, self.read_perms)
def validate_authentication(
self, username: str, password: str, handler
) -> None:
try:
self.auth_service.auth_password_plain(username, password)
except auth.AuthenticationFailed as exc:
raise pyftpdlib.authorizers.AuthenticationFailed(exc)
def impersonate_user(self, username: str, password: str) -> None:
self.auth_service.push_user(username)
def terminate_impersonation(self, username: str) -> None:
self.auth_service.pop_user()
class _FTPHandler(pyftpdlib.handlers.FTPHandler):
# pyftpd just tests for existence of fileno, but BytesIO and
# BufferedTorrentIO expose fileno that raises io.UnsupportedOperation.
use_sendfile = False
def __init__(
self, *args, root: fs.Dir, auth_service: auth.AuthService, **kwargs
) -> None:
super().__init__(*args, **kwargs)
self.authorizer = _Authorizer(auth_service=auth_service)
self.abstracted_fs = _partialclass(_FS, root=root)
def _create_server(address: Tuple) -> socket_lib.socket:
sock = socket_lib.socket(socket_lib.AF_INET, socket_lib.SOCK_STREAM)
try:
sock.bind(address)
sock.listen()
return sock
except Exception:
sock.close()
raise
class FTPD(task_lib.Task, config_lib.HasConfig):
def __init__(
self,
*,
config: config_lib.Config,
root: fs.Dir,
auth_service: auth.AuthService
) -> None:
super().__init__(title="FTPD", thread_name="ftpd")
self._auth_service = auth_service
self._root = root
# TODO: fixup typing here
self._lock: threading.Condition = threading.Condition( # type: ignore
threading.RLock()
)
self._server: Optional[pyftpdlib.servers.FTPServer] = None
self._address: Optional[Tuple] = None
self.set_config(config)
@property
def socket(self) -> Optional[socket_lib.socket]:
with self._lock:
if self._server is None:
return None
return cast(socket_lib.socket, self._server.socket)
@contextlib.contextmanager
def stage_config(self, config: config_lib.Config) -> Iterator[None]:
config.setdefault("ftp_enabled", True)
config.setdefault("ftp_bind_address", "localhost")
config.setdefault("ftp_port", 8821)
address: Optional[Tuple] = None
socket: Optional[socket_lib.socket] = None
# Only parse address and port if enabled
if config.require_bool("ftp_enabled"):
address = (
config.require_str("ftp_bind_address"),
config.require_int("ftp_port"),
)
with self._lock:
if address != self._address and address is not None:
socket = _create_server(address)
yield
if self._terminated.is_set():
return
if address == self._address:
return
self._address = address
self._terminate()
if socket is None:
return
handler = _partialclass(
_FTPHandler, root=self._root, auth_service=self._auth_service
)
self._server = pyftpdlib.servers.ThreadedFTPServer(socket, handler)
def _terminate(self):
with self._lock:
if self._server is not None:
self._server.close_all()
self._server = None
self._lock.notify_all()
def _run(self):
while not self._terminated.is_set():
with self._lock:
if self._server is None:
self._lock.wait()
server = self._server
if server:
if _LOG.isEnabledFor(logging.INFO):
host, port = server.socket.getsockname()
_LOG.info("ftp server listening on %s:%s", host, port)
server.serve_forever()
_LOG.info("ftp server shut down")
| StarcoderdataPython |
347572 | <reponame>nielshojen/macnamer<filename>macnamer/urls.py
from django.conf.urls import patterns, include, url
# Uncomment the next two lines to enable the admin:
from django.contrib import admin
admin.autodiscover()
urlpatterns = patterns('',
# Examples:
# url(r'^macnamer/', include('macnamer.foo.urls')),
url(r'^login/$', 'django.contrib.auth.views.login'),
url(r'^logout/$', 'django.contrib.auth.views.logout_then_login'),
url(r'^changepassword/$', 'django.contrib.auth.views.password_change'),
url(r'^changepassword/done/$', 'django.contrib.auth.views.password_change_done'),
url(r'^', include('namer.urls')),
# Uncomment the admin/doc line below to enable admin documentation:
url(r'^admin/doc/', include('django.contrib.admindocs.urls')),
# Uncomment the next line to enable the admin:
url(r'^admin/', include(admin.site.urls)),
#url(r'^$', 'namer.views.index', name='home'),
)
| StarcoderdataPython |
5166595 | class Error(Exception):
"""Base class for exceptions in this module."""
pass
class DuplicateXMLDocumentError(Exception):
def __init__(self, *args):
message = "Error %s: \nIndicates: %s\nIn Location: %s" % (args[0], args[1], args[2])
print(message)
self.message = message
class UndefinedXMLWriter(Exception):
def __init__(self, *args):
print("Error %s: \nIndicates: %s\nIn Location: %s" % (args[0], args[1], args[2]))
class DatabaseAuthenticationError(Exception):
def __init__(self, *args):
print("Error %s: \nIndicates: %s\nIn Location: %s" % (args[0], args[1], args[2]))
class SoftwareCompatibilityError(Exception):
def __init__(self, *args):
print("Error %s: \nIndicates: %s\nIn Location: %s" % (args[1], args[0], args[2]))
class XSDError(Exception):
def __init__(self, *args):
print("Error %s: \nIndicates: %s\nIn Location: %s" % (args[1], args[0], args[2]))
class DBLayerNotFoundError(Exception):
def __init__(self, *args):
print("Error %s: \nIndicates: %s\nIn Location: %s" % (args[1], args[0], args[2]))
class VPNFailure(Error):
def __init__(self, *args):
print("Error %s: \nIndicates: %s\nIn Location: %s" % (args[1], args[0], args[2]))
class FTPUploadFailureError(Error):
def __init__(self, *args):
print("Error %s: \nIndicates: %s\nIn Location: %s" % (args[1], args[0], args[2]))
class KeyboardInterrupt(Error):
def __init__(self, *args):
print("Intercepted Keyboard Interupt")
class FileNotFoundError(Error):
def __init__(self, *args):
print("Error %s: \nIndicates: %s\nIn Location: %s" % (args[1], args[0], args[2]))
class DataFormatError(Error):
def __init__(self, *args):
print("Error %s: \nIndicates: %s\nIn Location: %s" % (args[1], args[0], args[2]))
class InvalidSSNError(Error):
def __init__(self, *args):
print("Error %s: \nIndicates: %s\nIn Location: %s" % (args[1], args[0], args[2]))
class EthnicityPickNotFound(Error):
def __init__(self, *args):
print("Error %s: \nIndicates: %s\nIn Location: %s" % (args[1], args[0], args[2]))
class InputError(Error):
"""Exception raised for errors in the input.
Attributes:
expression -- input expression in which the error occurred
message -- explanation of the error
"""
def __init__(self, expression, message):
self.expression = expression
self.message = message
#The MIT License
#
#Copyright (c) 2011, Alexandria Consulting LLC
#
#Permission is hereby granted, free of charge, to any person obtaining a copy
#of this software and associated documentation files (the "Software"), to deal
#in the Software without restriction, including without limitation the rights
#to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
#copies of the Software, and to permit persons to whom the Software is
#furnished to do so, subject to the following conditions:
#
#The above copyright notice and this permission notice shall be included in
#all copies or substantial portions of the Software.
#
#THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
#IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
#FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
#AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
#LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
#OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
#THE SOFTWARE. | StarcoderdataPython |
1890806 | <gh_stars>10-100
# ---
# jupyter:
# anaconda-cloud: {}
# jupytext:
# cell_metadata_filter: -all
# formats: ipynb,py:percent
# notebook_metadata_filter: all
# text_representation:
# extension: .py
# format_name: percent
# format_version: '1.3'
# jupytext_version: 1.6.0
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# language_info:
# codemirror_mode:
# name: ipython
# version: 3
# file_extension: .py
# mimetype: text/x-python
# name: python
# nbconvert_exporter: python
# pygments_lexer: ipython3
# version: 3.8.5
# ---
# %% [markdown]
# Branching GP Regression on synthetic data
# --
#
# *<NAME>, 2017*
#
# Branching GP regression with Gaussian noise on the hematopoiesis data described in the paper "BGP: Gaussian processes for identifying branching dynamics in single cell data".
#
# This notebook shows how to build a BGP model and plot the posterior model fit and posterior branching times.
# %%
import pickle
import numpy as np
import pandas as pd
from matplotlib import pyplot as plt
from BranchedGP import VBHelperFunctions as bplot
plt.style.use("ggplot")
# %matplotlib inline
# %% [markdown]
# ### Load the data
# 1. Monocle has already been run on the data. The first columns contains the state assigned by the DDRTree algorithm to each cell.
# 1. Second column is the gene time.
# 1. All other columns are the 40 genes. The first 10 branch early, then 20 branch late and 10 do not branch.
# %%
datafile = "syntheticdata/synthetic20.csv"
data = pd.read_csv(datafile, index_col=[0])
G = data.shape[1] - 2 # all data - time columns - state column
Y = data.iloc[:, 2:]
trueBranchingTimes = np.array([float(Y.columns[i][-3:]) for i in range(G)])
# %%
data.head()
# %% [markdown]
# # Plot the data
# %%
f, ax = plt.subplots(5, 8, figsize=(10, 8))
ax = ax.flatten()
for i in range(G):
for s in np.unique(data["MonocleState"]):
idxs = s == data["MonocleState"].values
ax[i].scatter(data["Time"].loc[idxs], Y.iloc[:, i].loc[idxs])
ax[i].set_title(Y.columns[i])
ax[i].set_yticklabels([])
ax[i].set_xticklabels([])
f.suptitle("Branching genes, location=1.1 indicates no branching")
# %% [markdown]
# # Run the BGP model
# Run script `runsyntheticData.py` to obtain a pickle file with results.
# This script can take ~10 to 20 minutes depending on your hardware.
# It performs a gene-by-gene branch model fitting.
# %% [markdown]
# # Plot BGP posterior fit
# Plot posterior fit.
# %%
r = pickle.load(open("syntheticdata/syntheticDataRun.p", "rb"))
# %%
r.keys()
# %%
# plot fit for a gene
g = 0
GPy = Y.iloc[:, g][:, None]
GPt = data["Time"].values
globalBranching = data["MonocleState"].values.astype(int)
bmode = r["Bsearch"][np.argmax(r["gpmodels"][g]["loglik"])]
print("True branching time", trueBranchingTimes[g], "BGP Maximum at b=%.2f" % bmode)
_ = bplot.PlotBGPFit(GPy, GPt, r["Bsearch"], r["gpmodels"][g])
# %% [markdown]
# We can also plot with the predictive uncertainty of the GP.
# The dashed lines are the 95% confidence intervals.
# %%
g = 0
bmode = r["Bsearch"][np.argmax(r["gpmodels"][g]["loglik"])]
pred = r["gpmodels"][g]["prediction"] # prediction object from GP
_ = bplot.plotBranchModel(
bmode,
GPt,
GPy,
pred["xtest"],
pred["mu"],
pred["var"],
r["gpmodels"][g]["Phi"],
fPlotPhi=True,
fColorBar=True,
fPlotVar=True,
)
# %% [markdown]
# # Plot posterior
# Plotting the posterior alongside the true branching location.
# %%
fs, ax = plt.subplots(1, 1, figsize=(5, 5))
for g in range(G):
bmode = r["Bsearch"][np.argmax(r["gpmodels"][g]["loglik"])]
ax.scatter(bmode, g, s=100, color="b") # BGP mode
ax.scatter(trueBranchingTimes[g] + 0.05, g, s=100, color="k") # True
# %%
| StarcoderdataPython |
11276289 | <filename>deep_filters/core/util.py<gh_stars>10-100
import logging
import os
import random
import shelve
import time
from logging.config import fileConfig
import numpy as np
from PIL import Image
from tqdm import *
from deep_filters.core.images import load_array_image, process_image
from deep_filters.core.keras_callbacks import ModelBestCheckpoint
from keras.callbacks import EarlyStopping
from keras.preprocessing.image import load_img, img_to_array
fileConfig('logging.conf')
logger = logging.getLogger('nn')
def predict_base_image(channels, img, model, mode, shape=None):
return predict_base_image_channels_1(channels, img, model, mode, shape=shape)
def predict_base_image_channels_1(channels, img, model, mode, shape=None):
wx = model.layers[-1].output_shape[2]
wy = model.layers[-1].output_shape[3]
logger.debug('Processing with neural model. Image size: {} {}'.format(wx, wy))
if mode == 'YCbCr':
img = img.convert('YCbCr')
img_ar = np.asarray(img, dtype='float32')
img_ar = img_ar.transpose(2, 1, 0)
full_time = 0
for y in tqdm(range(0, img.height, wy)):
for x in range(0, img.width, wx):
valid_x = model.layers[0].input_shape[2]
if x + valid_x > img.width:
valid_x = img.width - x
valid_y = model.layers[0].input_shape[3]
if y + valid_y > img.height:
valid_y = img.height - y
valid_x2 = wx
if x + valid_x2 > img.width:
valid_x2 = img.width - x
valid_y2 = wy
if y + valid_y2 > img.height:
valid_y2 = img.height - y
if channels == 3:
cropped_input = np.zeros((channels, model.layers[0].input_shape[2], model.layers[0].input_shape[3]), dtype='float32')
cropped_input[:, :valid_x, :valid_y] = img_ar[:, x:x+valid_x, y:y+valid_y]
start_time = time.process_time()
preds = model.predict(np.array([cropped_input]))
full_time += (time.process_time() - start_time)
preds = np.clip(preds, 0, 255)
img_ar[:, x:x+valid_x2, y:y+valid_y2] = preds[0][:, :valid_x, :valid_y]
else:
for c in range(0, 1 if mode == 'YCbCr' else 3):
cropped_input = np.zeros((1, model.layers[0].input_shape[2], model.layers[0].input_shape[3]), dtype='float32')
cropped_input[0, :valid_x, :valid_y] = img_ar[c, x:x+valid_x, y:y+valid_y]
start_time = time.process_time()
if mode == 'YCbCr':
preds = model.predict(cropped_input.reshape((1, 1, cropped_input.shape[1], cropped_input.shape[2])))
else:
p = cropped_input[0]
preds = model.predict(p.reshape((1, 1, p.shape[0], p.shape[1])))
full_time += (time.process_time() - start_time)
preds = np.clip(preds, 0, 255)
img_ar[c, x:x+valid_x2, y:y+valid_y2] = preds[0][0, :valid_x, :valid_y]
if mode == 'YCbCr':
result = img_ar.transpose(2, 1, 0).astype("uint8")
result = Image.fromarray(result[:, :, :], "YCbCr")
result = result.convert("RGB")
else:
img_ar = img_ar.transpose(2, 1, 0)
result = Image.fromarray(img_ar.astype("uint8"), "RGB")
logger.debug('End of processing, nn time: {}'.format(full_time))
return result
def get_size(l):
sum = 0
for x in l:
sum += x.size
return sum
def get_images(image_path):
for dirName, subdirList, fileList in os.walk(image_path):
return [os.path.join(image_path, f) for f in fileList]
def process_learning(img_filter, model, model_name, mode, channels, image_path='imgnet', monitor='val_loss',
test_images_path=None, samples=None, epochs=10, shape=None, resize=False, **kwargs):
try:
logger.debug('Loading weights')
model.load_weights('weights/' + model_name)
except:
logger.exception('Weights not found, learning from scratch.')
logger.debug("Model layers:")
for layer in model.layers:
logger.debug(layer.output_shape)
if not os.path.exists('weights'):
os.makedirs('weights')
model_dict = shelve.open('weights/' + model_name + '_storage')
progress = model_dict.get('progress', 0)
if not shape:
shape = (model.layers[-1].output_shape[2], model.layers[-1].output_shape[3])
test_images = get_images(test_images_path)
logger.debug(test_images)
logger.debug("Kernel size: {}".format(shape))
X_test, y_test = load_array_image(test_images, mode=mode, kernel=shape, img_filter=img_filter, channels=channels, model=model, resize=resize, **kwargs)
image_number = 0
logger.debug('Skipping {} lines'.format(progress))
logger.debug('Current val_loss {}'.format(model_dict.get('best', np.Inf)))
random.seed()
l = []
t = []
save_best = True if monitor == 'val_loss' else False
check_point = ModelBestCheckpoint('weights/' + model_name, monitor='val_loss', verbose=1, save_best_only=save_best, best=model_dict.get('best', np.Inf))
samples_size = 50000000
for x in range(0, 100):
for dirName, subdirList, fileList in os.walk(image_path):
# with open(image_set_path, "r") as ins:
for image_response in fileList:
for _ in range(progress):
continue
try:
image_number += 1
image = image_path + '/' + image_response
img = load_img(image)
if resize:
img = img.resize(shape)
process_image(img, shape, l, t, mode=mode, omit_coef=0.1, img_filter=img_filter, channels=channels, model=model, **kwargs)
if get_size(l) > samples_size:
X_train = np.array(l)
Y_train = np.array(t)
early_stopping = EarlyStopping(monitor='val_loss', verbose=1, mode='min', patience=1)
callbacks = [check_point]
if save_best:
callbacks.append(early_stopping)
model.fit(X_train, Y_train, verbose=1, batch_size=1, nb_epoch=epochs, validation_data=(X_test, y_test), callbacks=callbacks)
l = []
t = []
logger.debug("Progress: {}. Best loss so far: {}".format(progress, check_point.best))
model_dict['best'] = check_point.best
model_dict['progress'] = progress
image_number = 0
except Exception as ex:
print(ex)
pass
progress += 1
| StarcoderdataPython |
9798744 | from .bases import ButtonConfig
from .buttons import ActionButton, DropDownButton, HyperlinkButton, WidgetButton
from .enums import ButtonLevel, ButtonType, HyperlinkTarget
| StarcoderdataPython |
1616687 | from ccdc import cassandra
from ccdc import pyccd
from pyspark import sql
def test_options():
opts = cassandra.options("foo")
assert set(opts.keys()) == {'table',
'keyspace',
'spark.cassandra.auth.password',
'spark.cassandra.auth.username',
'spark.cassandra.connection.compression',
'spark.cassandra.connection.host',
'spark.cassandra.connection.port',
'spark.cassandra.input.consistency.level',
'spark.cassandra.output.batch.grouping.buffer.size',
'spark.cassandra.output.concurrent.writes',
'spark.cassandra.output.consistency.level'}
def test_read_write(spark_context, timeseries_rdd):
ctx = spark_context
# create a dataframe from an rdd
rdd = ctx.parallelize([(100, -100, 200, -200), (300, -300, 400, -400)])
layers = rdd.map(lambda x: sql.Row(cx=x[0], cy=x[1], px=x[2], py=x[3], sday='1999-01-01', eday='1999-12-31'))
sctx = sql.SQLContext(ctx)
dataframe = sctx.createDataFrame(layers)
# write the dataframe to cassandra. cassandra.write returns NoneType, not a dataframe
cassandra.write(ctx, dataframe, 'segment')
# read the table into a dataframe
read_dataframe = cassandra.read(spark_context, 'segment')
assert set([i.asDict()["cx"] for i in read_dataframe.collect()]) == set([100, 300])
| StarcoderdataPython |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.