seq_id string | text string | repo_name string | sub_path string | file_name string | file_ext string | file_size_in_byte int64 | program_lang string | lang string | doc_type string | stars int64 | dataset string | pt string | api list |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
71712982823 | import requests
from behave import *
from hamcrest import *
@when('Make a get request')
def make_get_request_to_api(context):
context.resp= requests.get("https://reqres.in/api/users?page=2")
assert_that(context.resp.status_code, equal_to(200))
@then('Check if users list is returned')
def check_user_list(context):
assert_that(len(context.resp.json()),greater_than(5))
@then("Check if user's data is correct")
def check_user_list(context):
expected_data={
"id":7
}
actual_data={
"id":context.resp.json().get('data')[0].get("id")
}
assert_that(expected_data,equal_to(actual_data)) | HarshDevSingh/docker_python_bdd | features/steps/rest_api.py | rest_api.py | py | 631 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "requests.get",
"line_number": 8,
"usage_type": "call"
}
] |
7737385160 | # -*- coding=utf8
import web, random, string
from StringIO import StringIO
from PIL import Image, ImageDraw, ImageFont, ImageFilter
#生成验证码的位数
vcodeLength = 4
#生成验证码图片的尺寸
vcodeSize = (60, 25)
#背景颜色, 默认白色
vcodeBgcolor = (238, 238, 238)
#字体颜色, 蓝色
vcodeFontcolor = (0, 0, 255)
#干扰线, 红色
vcodeLinecolor = (255, 0, 0)
#是否要加干扰线
isDrawLine = True
#加入干扰线的上下限
vcodeLineNumber = (1, 5)
#随机字符串
def gen_text():
source = list(string.letters)
for index in range(0, 10):
source.append(str(index))
validateCode = ''.join(random.sample(source, 4))
web.config._session['validateCode'] = validateCode
return validateCode
#绘制干扰线
def gen_line(draw, width, height):
begin = (random.randint(0, width), random.randint(0, height))
end = (random.randint(0, width), random.randint(0, height))
draw.line([begin, end], fill = vcodeLinecolor)
#生成验证码图片
def gen_code():
width, height = vcodeSize
image = Image.new('RGBA', (width, height), vcodeBgcolor)
font = ImageFont.truetype("arial.ttf", 25)
draw = ImageDraw.Draw(image)
text = gen_text()
font_width, font_height = font.getsize(text)
draw.text(((width - font_width) / vcodeLength, (height - font_height) / vcodeLength), text, font = font, fill = vcodeFontcolor)
if isDrawLine:
gen_line(draw, width, height)
image = image.transform((width + 20, height + 10), Image.AFFINE, (1, -0.3, 0, -0.1, 1, 0), Image.BILINEAR)
#image = image.filter(ImageFilter.EDGE_ENHANCE_MORE)
out = StringIO()
image.save(out, 'png', quality=75)
return out.getvalue()
class index:
def GET(self):
agent = web.ctx.env['HTTP_USER_AGENT'].lower()
if 'android' in agent or 'iphone' in agent:
return web.template.frender('indexMobile.html')()
else:
return web.template.frender('index.html')()
class get_validate_code:
def GET(self):
web.header('Content-Type', 'image/png')
return gen_code()
class check_validate_code:
def GET(self):
if 'validateCode' in web.config._session:
return web.config._session['validateCode'] == web.input().validateCode
else:
return None
| kungfucode-rex/jlgjg-admin | server/web/controller/Index_C.py | Index_C.py | py | 2,315 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "string.letters",
"line_number": 23,
"usage_type": "attribute"
},
{
"api_name": "random.sample",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "web.config",
"line_number": 27,
"usage_type": "attribute"
},
{
"api_name": "random.randint",
... |
10180408027 | #!/usr/bin/python3
'''
Core Flask App
'''
from flask import Flask, jsonify, make_response
from models import storage
from api.v1.views import app_views
from os import getenv
app = Flask(__name__)
app.register_blueprint(app_views)
app.url_map.strict_slashes = False
@app.teardown_appcontext
def closeStorage(ob):
'''calls storage.close()'''
storage.close()
@app.errorhandler(404)
def pageNotFound404(error):
''' 404 Page Not Found '''
return make_response(jsonify({"error": "Not found"}), 404)
if __name__ == "__main__":
host = getenv('HBNB_API_HOST')
port = getenv('HBNB_API_PORT')
if not host:
host = '0.0.0.0'
if not port:
port = '5000'
app.run(host=host, port=port, threaded=True)
| jamesAlhassan/AirBnB_clone_v3 | api/v1/app.py | app.py | py | 746 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "flask.Flask",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "api.v1.views.app_views",
"line_number": 11,
"usage_type": "argument"
},
{
"api_name": "models.storage.close",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "models.st... |
70077277545 | from path import Path
import sys, os
def rec(fname):
os.system('alsamixer')
os.system('sox -t alsa default "{fname}"'.format_map(vars()))
print('playback command: ~$ aplay {fname}'.format_map(vars()))
def initialize():
path = os.path.join(os.getcwd(), 'wavs')
os.path.exists(path) or os.mkdir(path)
return path
def normalize(fname):
ext = Path(fname).ext
if not ext:
fname += '.wav'
return fname
def sanity_check():
try:
assert list(os.popen('which sox'))
except AssertionError:
os.system('sudo apt-get update')
os.system('sudo apt-get install sox')
def main():
sanity_check()
try:
fname = os.path.join(initialize(), normalize(sys.argv[1]))
assert not os.path.exists(fname)
except AssertionError:
print('{fname} already exists.'.format_map(vars()))
except IndexError:
print('You must specify an output filename.')
else:
rec(fname)
if __name__ == '__main__':
main()
| chris-hamberg/system_utils | alsa_record.py | alsa_record.py | py | 1,013 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "os.system",
"line_number": 5,
"usage_type": "call"
},
{
"api_name": "os.system",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 10,
"... |
43111990750 | import aws_cdk as cdk
from constructs import Construct
from aws_cdk import (aws_apigateway as apigateway,
aws_lambda as lambda_,
aws_dynamodb)
class TasksService(Construct):
def __init__(self, scope: Construct, id: str):
super().__init__(scope, id)
task_table = aws_dynamodb.Table(
self,
"task_table",
partition_key=aws_dynamodb.Attribute(
name="id",
type=aws_dynamodb.AttributeType.STRING
)
)
task_lambda = lambda_.Function(
self,
"TaskLambda",
runtime=lambda_.Runtime.PYTHON_3_9,
code=lambda_.Code.from_asset("resources"),
handler="tasks.main"
)
task_lambda.add_environment("TABLE_NAME", task_table.table_name)
task_table.grant_read_write_data(task_lambda)
taks_integration = apigateway.LambdaIntegration(task_lambda)
api = apigateway.RestApi(
self, "widgets-api",
rest_api_name="Widget Service",
description="This service serves widgets."
)
api.root.add_method("ANY", taks_integration)
| basv98/api-dynamodb | tasks/tasks_service.py | tasks_service.py | py | 1,208 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "constructs.Construct",
"line_number": 7,
"usage_type": "name"
},
{
"api_name": "constructs.Construct",
"line_number": 8,
"usage_type": "name"
},
{
"api_name": "aws_cdk.aws_dynamodb.Table",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "aw... |
37821183076 | import tensorflow as tf
import numpy as np
from tensorflow.python.ops.signal import window_ops
from scipy import stats
import decimal, math
import os, sys
import librosa
import soundfile as sf
import functools
import matplotlib.pyplot as plt
from matplotlib import style
from scipy.special import exp1
import math
class tensor_polar():
def __init__(self, N_d, N_s, K, f_s):
"""
Argument/s:
N_d - window duration (samples).
N_s - window shift (samples).
K - number of frequency bins.
f_s - sampling frequency.
"""
self.N_d = N_d
self.N_s = N_s
self.K = K
self.f_s = f_s
self.W = functools.partial(window_ops.hamming_window,
periodic=False)
self.ten = tf.cast(10.0, tf.float32)
self.one = tf.cast(1.0, tf.float32)
def polar_analysis(self, x):
"""
Polar-form acoustic-domain analysis.
Argument/s:
x - waveform.
Returns:
Short-time magnitude and phase spectrums.
"""
STFT = tf.signal.stft(x, self.N_d, self.N_s, self.K,
window_fn=self.W, pad_end=True)
return tf.abs(STFT), tf.math.angle(STFT)
def polar_synthesis(self, STMS, STPS):
"""
Polar-form acoustic-domain synthesis.
Argument/s:
STMS - short-time magnitude spectrum.
STPS - short-time phase spectrum.
Returns:
Waveform.
"""
STFT = tf.cast(STMS, tf.complex64) * tf.exp(1j * tf.cast(STPS, tf.complex64))
return tf.signal.inverse_stft(STFT, self.N_d, self.N_s, self.K, tf.signal.inverse_stft_window_fn(self.N_s, self.W))
def mmse_lsa_np(xi, gamma):
"""
Computes the MMSE-LSA gain function.
Numpy version:
v_1 = np.divide(xi, np.add(1.0, xi))
nu = np.multiply(v_1, gamma)
return np.multiply(v_1, np.exp(np.multiply(0.5, exp1(nu)))) # MMSE-LSA gain function.
Argument/s:
xi - a priori SNR.
gamma - a posteriori SNR.
Returns:
MMSE-LSA gain function.
"""
xi = np.where(xi == 0, np.finfo(float).eps, xi)
gamma = np.where(gamma == 0, np.finfo(float).eps, gamma)
v_1 = np.divide(xi, np.add(1.0, xi))
nu = np.multiply(v_1, gamma)
return np.multiply(v_1, np.exp(np.multiply(0.5, exp1(nu)))) # MMSE-LSA gain function.
class mcra(object):
def __init__(self, alpha_d, alpha_s, alpha_p, lambda_d, frame_L, bin_num, delta, *tupleArg):
self.alpha_d = np.expand_dims(alpha_d,0)
self.alpha_s = np.expand_dims(alpha_s,0)
self.alpha_p = np.expand_dims(alpha_p,0)
if len(lambda_d.shape) == 2:
self.lambda_d = lambda_d
elif len(lambda_d.shape) == 1:
self.lambda_d = np.expand_dims(lambda_d,0)
self.bin_len = bin_num
a = np.hanning(7)
self.matrix = np.eye(self.bin_len)*a[3] \
+ np.eye(self.bin_len, k=-2)*a[1] + np.eye(self.bin_len, k=2)*a[5] \
+ np.eye(self.bin_len, k=-1)*a[2] + np.eye(self.bin_len, k=1)*a[4]
self.matrix = np.expand_dims(self.matrix, 0).repeat(self.lambda_d.shape[0], 0)
self.S = self.S_tmp = self.S_min = np.squeeze(np.matmul(self.matrix, np.expand_dims(self.lambda_d, -1)),-1)
self.frame_L = frame_L
self.delta = np.expand_dims(delta,0)
self.self_alpha_D_hat = np.expand_dims(alpha_d,0)
self.speech_present = np.expand_dims(np.zeros(self.bin_len, float),0)
self.snr_gammar = np.expand_dims(np.ones(self.bin_len, float)*0.1,0)
self.snr_xi = np.expand_dims(np.ones(self.bin_len, float)*0.1,0)
self.alpha_snr = 0.92
self.G_h=mmse_lsa_np(self.snr_xi, self.snr_gammar)
self.G_min = np.expand_dims(np.ones(self.bin_len, float) * 0.09,0)
def update_snr_dd(self, pwr):
snr_gammar_prev = self.snr_gammar
self.snr_gammar = pwr / self.lambda_d
self.snr_xi = self.alpha_snr * np.square(self.G_h) * snr_gammar_prev + (1 - self.alpha_snr) * np.maximum(
self.snr_gammar - 1, 0)
def update_S(self, pwr):
S_f = np.squeeze(np.matmul(self.matrix, np.expand_dims(pwr,-1)),-1)
self.S = self.alpha_s * self.S + (1 - self.alpha_s) * S_f
def tracking_S_win(self, current_frame):
if current_frame % self.frame_L == 0:
self.S_min = np.minimum(self.S, self.S_tmp)
self.S_tmp = self.S
else:
self.S_min = np.minimum(self.S, self.S_min)
self.S_tmp = np.minimum(self.S, self.S_tmp)
def update_speech_present(self):
S_ratio = self.S/self.S_min
p = np.array(S_ratio > self.delta).astype(int)
self.speech_present = self.alpha_p * self.speech_present + (1 - self.alpha_p) * p
def update_alpha_d(self):
self.alpha_D_hat = self.alpha_d + (1-self.alpha_d)*self.speech_present
def update_noise(self, pwr):
self.lambda_d = self.alpha_D_hat * self.lambda_d + (1 - self.alpha_D_hat) * pwr
def update_SNR_GH(self):
self.G_h = mmse_lsa_np(self.snr_xi, self.snr_gammar)
def tracking_noise(self, pwr, c_frame):
self.update_snr_dd(pwr)
self.update_S(pwr)
self.tracking_S_win(c_frame)
self.update_speech_present()
self.update_alpha_d()
self.update_noise(pwr)
self.update_SNR_GH()
return np.squeeze(self.lambda_d), np.squeeze(self.G_h), np.squeeze(self.speech_present)
def mmse_lsa(self, meg, c_frame):
pwr = np.square(meg)
lambda_d, G, P = self.tracking_noise(pwr, c_frame)
return np.squeeze(G * meg)
def omlsa(self, meg, c_frame):
pwr = np.square(meg)
lambda_d, G, P = self.tracking_noise(pwr, c_frame)
return np.squeeze(np.power(G, P) * np.power(self.G_min, (1 - P)) * meg)
class mcra_2(mcra):
def __init__(self, alpha_d, alpha_s, alpha_p, lambda_d, frame_L, fft_len, delta, gamma, beta):
super().__init__(alpha_d, alpha_s, alpha_p, lambda_d, frame_L, fft_len, delta)
self.gamma = gamma
self.beta = beta
self.S_minus_one = self.S
def update_S_2(self,meg):
self.S_minus_one = self.S
self.update_S(meg)
def tracking_S_continue(self):
p = np.array(self.S_min < self.S).astype(int)
p_not = np.array(self.S_min >= self.S).astype(int)
self.S_min = self.S*p_not+(self.gamma * self.S_min + (1-self.gamma)*(self.S - self.beta * self.S_minus_one)/(1-self.beta))*p
def tracking_noise(self, pwr, c_frame):
self.update_snr_dd(pwr)
self.update_S_2(pwr)
self.tracking_S_continue()
self.update_speech_present()
self.update_alpha_d()
self.update_noise(pwr)
self.update_SNR_GH()
return self.lambda_d, self.G_h, self.speech_present
class imcra(mcra):
def __init__(self, alpha_d, alpha_s, alpha_p, lambda_d, frame_L, fft_len, delta, beta, b_min, gamma0, gamma1, zeta0):
super().__init__(alpha_d, alpha_s, alpha_p, lambda_d, frame_L, fft_len, delta)
self.beta = beta
self.b_min = b_min
self.gamma0 = gamma0
self.gamma1 = gamma1
self.zeta0 = zeta0
self.S_hat = self.S
self.S_min_hat = self.S_min
self.S_tmp_hat = self.S_tmp
self.zero = np.zeros(self.bin_len, float)
self.ones = np.ones(self.bin_len, float)
self.gamma1minus1 = self.gamma1 - self.ones
self.alpha_s_hat = self.alpha_s * 1.2
self.frame_L_hat = frame_L * 0.5
def update_S_hat(self, pwr):
gamma_min = pwr/(self.b_min*self.S_min)
zeta = self.S/(self.b_min*self.S_min)
I_tmp = np.array(np.logical_and((gamma_min < self.gamma0), (zeta < self.zeta0))).astype(int)
win_I = np.matmul(self.matrix, I_tmp)
a_p = np.array(win_I == self.zero).astype(int)
a_p_not = np.array(win_I > self.zero).astype(int)
denominator = win_I + a_p
numerator = win_I*pwr + self.S_hat*a_p#_not
S_f = numerator/denominator
self.S_hat = self.alpha_s_hat * self.S_hat + (1-self.alpha_s_hat)*S_f
def tracking_S_win_hat(self, current_frame):
if current_frame % self.frame_L_hat == 0:
self.S_min_hat = np.minimum(self.S_hat, self.S_tmp_hat)
self.S_tmp_hat = self.S_hat
else:
self.S_min_hat = np.minimum(self.S_hat, self.S_min_hat)
self.S_tmp_hat = np.minimum(self.S_hat, self.S_tmp_hat)
def update_speech_present(self,pwr):
gamma_min_hat = pwr/(self.b_min*self.S_min_hat)
zeta_hat = self.S_hat/(self.b_min*self.S_min_hat)
a = np.array(np.logical_and((gamma_min_hat < self.ones),(zeta_hat < self.zeta0))).astype(int)
b = np.array(np.logical_and((zeta_hat < self.zeta0), np.logical_and((gamma_min_hat < self.gamma1), (gamma_min_hat > self.ones)))).astype(int)
q = a + b*(self.gamma1-gamma_min_hat)/self.gamma1minus1
c_x = 1+self.snr_xi
c_x = np.where(c_x == 0, np.finfo(float).eps, c_x)
v = np.true_divide(self.snr_xi*self.snr_gammar,c_x)
oneminusq = 1-q
oneminusq = np.where(oneminusq == 0, np.finfo(float).eps, oneminusq)
sp_reciprocal = 1+q*(1+self.snr_xi)*np.exp(-v)/oneminusq
sp_reciprocal = np.where(sp_reciprocal == 0, np.finfo(float).eps, sp_reciprocal)
self.speech_present = 1/sp_reciprocal
def tracking_noise(self, pwr, c_frame):
self.update_snr_dd(pwr)
self.update_S(pwr)
self.tracking_S_win(c_frame)
self.update_S_hat(pwr)
self.tracking_S_win_hat(c_frame)
self.update_speech_present(pwr)
self.update_alpha_d()
self.update_noise(pwr)
self.update_SNR_GH()
return np.squeeze(self.lambda_d), np.squeeze(self.G_h), np.squeeze(self.speech_present)
''''''
class mcra_tbrr(mcra):
def __init__(self, alpha_d, alpha_s, alpha_p, lambda_d, z_b, z_r, frame_L, bin_num, delta, *tupleArg):
super().__init__(alpha_d, alpha_s, alpha_p, lambda_d, frame_L, bin_num, delta)
self.mcra_zb = mcra(alpha_d=alpha_d, alpha_s=alpha_s, alpha_p=alpha_p, lambda_d=z_b, frame_L=frame_L, bin_num=bin_num,
delta=delta)
self.mcra_zr = mcra(alpha_d=alpha_d, alpha_s=alpha_s, alpha_p=alpha_p, lambda_d=z_r, frame_L=frame_L, bin_num=bin_num,
delta=delta)
self.Lambda_0 = 1.67
self.Lambda_1 = 1.81
self.gammar_0 = 4.6
self.gammar_0_minus_1 = 4.6-1
self.Omega_low = 1
self.Omega_high = 3
self.Omega_delta =self.Omega_high - self.Omega_low
self.betta = 1.47
def tracking_tbrr(self, pwr_b, pwr_bm, c_frame):
self.Q_zb,self.G_zb, _ = self.mcra_zb.tracking_noise(pwr_b, c_frame)
self.Q_zr,self.G_zr, _ = self.mcra_zr.tracking_noise(pwr_bm, c_frame)
self.Lambda_y = np.squeeze(self.mcra_zb.S/self.mcra_zb.lambda_d)
self.Lambda_bm = np.max(self.mcra_zr.S / self.mcra_zr.lambda_d,axis=0)
self.Omega = (self.mcra_zb.S - self.mcra_zb.lambda_d)/np.max(self.mcra_zr.S - self.mcra_zr.lambda_d,axis=0)
H0 = np.array(self.Lambda_y <= self.Lambda_0).astype(int)
H0_not_mask = 1 - H0
H1_tmp = np.array(self.Lambda_bm <= self.Lambda_1).astype(int)
H1 = H0_not_mask* H1_tmp
H1_not_mask = 1 - H1
Hr = H0_not_mask * H1_not_mask
H0t = np.logical_or(np.array(self.Omega < self.Omega_low), np.array(self.snr_gammar < 1)).astype(int)
H0t_tbrr = H0t * Hr
H0t_tbrr_not_mask = 1 - H0t_tbrr
H_tbrr_mask = Hr * H0t_tbrr_not_mask
H1_tbrr = np.logical_or(np.array(self.Omega > self.Omega_high), np.array(self.snr_gammar > self.gammar_0)).astype(int)
H1_tbrr_r = H1_tbrr * H_tbrr_mask
H1_tbrr_r_not_mask = 1 - H1_tbrr_r
Hr_tbrr_mask = H_tbrr_mask * H1_tbrr_r_not_mask
r_tbrr = np.maximum((self.gammar_0 - self.snr_gammar)/self.gammar_0_minus_1, (self.Omega_high-self.Omega)/self.Omega_delta)
Hr_tbrr = r_tbrr * Hr_tbrr_mask
self.q_tbrr = H0+H0t_tbrr+Hr_tbrr
def update_speech_present(self):
c_x = 1 + self.snr_xi
c_x = np.where(c_x == 0, np.finfo(float).eps, c_x)
v = np.true_divide(self.snr_xi * self.snr_gammar, c_x)
oneminusq = 1 - self.q_tbrr
oneminusq = np.where(oneminusq == 0, np.finfo(float).eps, oneminusq)
sp_reciprocal = 1 + self.q_tbrr * (1 + self.snr_xi) * np.exp(-v) / oneminusq
sp_reciprocal = np.where(sp_reciprocal == 0, np.finfo(float).eps, sp_reciprocal)
self.speech_present = 1 / sp_reciprocal
def tracking_noise(self, pwr, pwr_b, pwr_bm, c_frame):
self.update_snr_dd(pwr)
#self.update_S(pwr)
#self.tracking_S_win(c_frame)
self.tracking_tbrr(pwr_b, pwr_bm, c_frame)
self.update_speech_present()
self.update_alpha_d()
self.update_noise(pwr)
self.update_SNR_GH()
return self.lambda_d, self.G_h, self.speech_present
def omlsa(self, meg, meg_b, meg_bm, c_frame):
pwr = np.square(meg)
pwr_b = np.square(meg_b)
pwr_bm = np.square(meg_bm)
lambda_d, G, P = self.tracking_noise(pwr, pwr_b, pwr_bm,c_frame)
return np.squeeze(np.power(G, P) * np.power(self.G_min, (1 - P)) * meg)
def ensures_dir(directory: str):
if len(directory) > 0 and not os.path.exists(directory):
os.makedirs(directory)
def expw(x):
c_exp_x0 = [
[1.0, 1.0644944589178593, 1.1331484530668263, 1.2062302494209807,
1.2840254166877414, 1.3668379411737963, 1.4549914146182013, 1.5488302986341331,
1.6487212707001282, 1.7550546569602985, 1.8682459574322223, 1.988737469582292,
2.117000016612675, 2.2535347872132085, 2.398875293967098, 2.553589458062927],
[1.0, 1.0039138893383475, 1.007843097206448, 1.0117876835593316,
1.0157477085866857, 1.0197232327137742, 1.023714316602358, 1.0277210211516217,
1.0317434074991028, 1.035781537021624, 1.03983547133623, 1.0439052723011284,
1.0479910020166328, 1.0520927228261099, 1.056210497316932, 1.0603443883214314],
[1.0, 1.0002441704297478, 1.0004884004786945, 1.000732690161397,
1.0009770394924165, 1.0012214484863171, 1.0014659171576668, 1.001710445521037,
1.0019550335910028, 1.0021996813821428, 1.002444388909039, 1.0026891561862772,
1.0029339832284467, 1.0031788700501403, 1.0034238166659546, 1.003668823090489]
]
if x<-709: return 0
elif x>709: return 1.7E+308
s = x * np.log2(np.e)
integer = np.floor(s)
decimal = (s - np.floor(s))*np.log(2)
ep = decimal * 16
q0 = int(np.floor(ep))
ep = ep - np.floor(ep)
ep1 = ep * 16
q1 = int(np.floor(ep1))
ep1 = ep1 - np.floor(ep1)
ep2 = ep1 * 16
q2 = int(np.floor(ep2))
ep2 = ep2 - np.floor(ep2)
h = c_exp_x0[0][q0] * c_exp_x0[1][q1] * c_exp_x0[2][q2]
h1 = np.exp(q0/16)*np.exp(q1/(16*16))*np.exp(q2/(16*16*16))
w = ep2 / 4096
ew = 1 + w + w * w / 2 + w * w * w / 6 + w * w * w * w / 24
eww = np.exp(w)
decimal_final = h * ew
result = decimal_final * 2**integer
golden = np.exp(x)
goldenn = 2**(np.log2(np.e)*x)
pass
def loge(x):
#if x <= 0: return -1.7E+308
#elif x > 100000000: return 18.420680743952367
decimal = 0
shift = 1
inverselist = np.flipud(np.arange(52))
for i in inverselist:
mask = 1 << i
shift /= 2
if mask & x:
decimal += shift
if __name__ == '__main__':
pi = math.pi
M = 256
b = np.exp(1j)
W = np.exp((2*pi/M)*1j)
nature = np.arange(M)
#expw(3.5)
loge(0x5000000000000)
DFT_Matrix = np.ones([M,M],np.complex)
for row in range(M):
DFT_Matrix[row]=W**(-nature*(row))
def exp11(x):
return np.exp(-x)/x
x = np.linspace(0, 8, 256)
y = exp11(x)
plt.plot(x, y)
plt.show()
"""
三角信号
def triangle_wave(x, c, hc): # 幅度为hc,宽度为c,斜度为hc/2c的三角波
if x >= c / 2:
r = 0.0
elif x <= -c / 2:
r = 0.0
elif x > -c / 2 and x < 0:
r = 2 * x / c * hc + hc
else:
r = -2 * x / c * hc + hc
return r
x = np.linspace(-3, 3, 256)
y = np.array([triangle_wave(t, 4.0, 1.0) for t in x])
plt.ylim(-0.2, 1.2)
plt.plot(x, y)
plt.show()
#Y = DFT_Matrix*y
Y = np.matmul(DFT_Matrix, y)
y_idx = np.linspace(0, 2*pi, 256)
plt.plot(y_idx, np.absolute(Y))
"""
"""
矩形脉冲信号
def rect_wave(x, c, c0): # 起点为c0,宽度为c的矩形波
if x >= (c + c0):
r = 0.0
elif x < c0:
r = 0.0
else:
r = 1
return r
x = np.linspace(-2, 4, 256)
y = np.array([rect_wave(t, 2.0, -1.0) for t in x])
plt.ylim(-0.2, 4.2)
plt.plot(x, y)
plt.show()
Y = np.matmul(DFT_Matrix, y)
y_idx = np.linspace(0, 2*pi, 256)
plt.plot(y_idx, np.absolute(Y))
"""
from sympy import plot, sin, Symbol
x = np.linspace(0, 8, 256)
y = np.array([sin(np.pi/4*t) for t in x])
plt.ylim(-1.2, 6.2)
plt.plot(x, y)
plt.show()
y=y.astype(np.float64)
Y = np.matmul(DFT_Matrix, y)
y_idx = np.linspace(0, 2 * pi, 256)
plt.plot(y_idx, np.absolute(Y))
dd = 128
nature1 = np.arange(dd)
H_0 = np.exp(-(2 * pi * nature1 / dd) * 1j)
W = np.exp((2 * pi / dd) * 1j)
ranges = range(1, dd)
H_w = np.zeros(dd,np.complex)
for omega in nature1:
#tm = W**(-(nature1*omega))
#tm = np.exp(-(2 * pi * nature1 / dd + omega) * 1j)
tm = np.exp(-(2 * pi * nature1 * omega / dd) * 1j)
H_w[omega] = np.sum(tm)
abs_H = np.abs(H_w)
plt.figure(figsize=(20, 10))
plt.plot(ranges, abs_H[1:], 'b--o', label='H(jw)')
plt.show()
print("Processing observations...") | golfbears/DeepXi | bak/multiphase.py | multiphase.py | py | 18,079 | python | en | code | null | github-code | 36 | [
{
"api_name": "functools.partial",
"line_number": 28,
"usage_type": "call"
},
{
"api_name": "tensorflow.python.ops.signal.window_ops.hamming_window",
"line_number": 28,
"usage_type": "attribute"
},
{
"api_name": "tensorflow.python.ops.signal.window_ops",
"line_number": 28,
... |
21420497711 | import torch
import torch.nn as nn
import torch.nn.functional as F
## Defining the network
Hidden_layer = 64
Conv_kernel = 3
Conv_kerenl_time = 3
Padd_space = 1
Padd_time = 1
drop_out_level = 0.15
Bias = True
class Net(nn.Module):
def __init__(self):
super(Net,self).__init__()
#down layer 1
self.conv_DL1 = torch.nn.Sequential()
self.conv_DL1.add_module("Conv_DL1",nn.Conv3d(1,Hidden_layer,(Conv_kernel,Conv_kernel,Conv_kerenl_time), padding = (Padd_space,Padd_space,Padd_time), stride = 1,bias = Bias))
self.conv_DL1.add_module("BN1_DL1",nn.BatchNorm3d(Hidden_layer))
self.DropOut1 = nn.Dropout3d(p=drop_out_level,inplace=True)
self.conv_DL1_v2 = torch.nn.Sequential()
self.conv_DL1_v2.add_module("Conv_DL1_v2",nn.Conv3d(Hidden_layer,Hidden_layer,(Conv_kernel,Conv_kernel,Conv_kerenl_time), padding = (Padd_space,Padd_space,Padd_time), stride = 1,bias = Bias))
self.conv_DL1_v2.add_module("BN1_DL1_v2",nn.BatchNorm3d(Hidden_layer))
self.DropOut2 = nn.Dropout3d(p=drop_out_level,inplace=True)
# max pooling layer
self.conv_MP1 = torch.nn.Sequential()
self.conv_MP1.add_module("Max Pool 1",nn.MaxPool3d((2,2,2),stride = (2,2,2)))
#down layer 2
self.conv_DL2 = torch.nn.Sequential()
self.conv_DL2.add_module("Conv_DL2",nn.Conv3d(Hidden_layer,Hidden_layer*2,(Conv_kernel,Conv_kernel,Conv_kerenl_time), padding = (Padd_space,Padd_space,Padd_time), stride = 1,bias = Bias))
self.conv_DL2.add_module("BN1_DL2",nn.BatchNorm3d(Hidden_layer*2))
self.DropOut3 = nn.Dropout3d(p=drop_out_level,inplace=True)
self.conv_DL2_v2 = torch.nn.Sequential()
self.conv_DL2_v2.add_module("Conv_DL2_v2",nn.Conv3d(Hidden_layer*2,Hidden_layer*2,(Conv_kernel,Conv_kernel,Conv_kerenl_time), padding = (Padd_space,Padd_space,Padd_time), stride = 1,bias = Bias))
self.conv_DL2_v2.add_module("BN1_DL2_v2",nn.BatchNorm3d(Hidden_layer*2))
self.DropOut4 = nn.Dropout3d(p=drop_out_level,inplace=True)
# max pooling layer
self.conv_MP2 = torch.nn.Sequential()
self.conv_MP2.add_module("Max Pool 2",nn.MaxPool3d((2,2,2),stride = (2,2,2)))
#down layer 2
self.conv_DL3 = torch.nn.Sequential()
self.conv_DL3.add_module("Conv_DL3",nn.Conv3d(Hidden_layer*2,Hidden_layer*4,(Conv_kernel,Conv_kernel,Conv_kerenl_time), padding = (Padd_space,Padd_space,Padd_time), stride = 1,bias = Bias))
self.conv_DL3.add_module("BN1_DL3",nn.BatchNorm3d(Hidden_layer*4))
self.DropOut5 = nn.Dropout3d(p=drop_out_level,inplace=True)
self.conv_DL3_v2 = torch.nn.Sequential()
self.conv_DL3_v2.add_module("Conv_DL3_v2",nn.Conv3d(Hidden_layer*4,Hidden_layer*4,(Conv_kernel,Conv_kernel,Conv_kerenl_time), padding = (Padd_space,Padd_space,Padd_time), stride = 1,bias = Bias))
self.conv_DL3_v2.add_module("BN1_DL3_v2",nn.BatchNorm3d(Hidden_layer*4))
self.DropOut6 = nn.Dropout3d(p=drop_out_level,inplace=True)
# Conv Transpose
self.convT1 = nn.ConvTranspose3d(Hidden_layer*4,Hidden_layer*2,(2,2,2),stride = (2,2,2))
#up layer 1
self.conv_UP1 = torch.nn.Sequential()
self.conv_UP1.add_module("Conv_UP1",nn.Conv3d(Hidden_layer*4,Hidden_layer*2,(Conv_kernel,Conv_kernel,Conv_kerenl_time), padding = (Padd_space,Padd_space,Padd_time), stride = 1,bias = Bias))
self.conv_UP1.add_module("BN1_UP1",nn.BatchNorm3d(Hidden_layer*2))
self.DropOut7 = nn.Dropout3d(p=drop_out_level,inplace=True)
self.conv_UP1_v2 = torch.nn.Sequential()
self.conv_UP1_v2.add_module("Conv_UP1_v2",nn.Conv3d(Hidden_layer*2,Hidden_layer*2,(Conv_kernel,Conv_kernel,Conv_kerenl_time), padding = (Padd_space,Padd_space,Padd_time), stride = 1,bias = Bias))
self.conv_UP1_v2.add_module("BN1_UP1_v2",nn.BatchNorm3d(Hidden_layer*2))
self.DropOut8 = nn.Dropout3d(p=drop_out_level,inplace=True)
# Conv Transpose
self.convT2 = nn.ConvTranspose3d(Hidden_layer*2,Hidden_layer,(2,2,2),stride = (2,2,2))
#up layer 2
self.conv_UP2 = torch.nn.Sequential()
self.conv_UP2.add_module("Conv_UP2",nn.Conv3d(Hidden_layer*2,Hidden_layer,(Conv_kernel,Conv_kernel,Conv_kerenl_time), padding = (Padd_space,Padd_space,Padd_time), stride = 1,bias = Bias))
self.conv_UP2.add_module("BN1_UP2",nn.BatchNorm3d(Hidden_layer))
self.DropOut9 = nn.Dropout3d(p=drop_out_level,inplace=True)
self.conv_UP2_v2 = torch.nn.Sequential()
self.conv_UP2_v2.add_module("Conv_UP2_v2",nn.Conv3d(Hidden_layer,Hidden_layer,(Conv_kernel,Conv_kernel,Conv_kerenl_time), padding = (Padd_space,Padd_space,Padd_time), stride = 1,bias = Bias))
self.conv_UP2_v2.add_module("BN1_UP2_v2",nn.BatchNorm3d(Hidden_layer))
self.DropOut10 = nn.Dropout3d(p=drop_out_level,inplace=True)
#Final layer
self.conv_final = torch.nn.Sequential()
self.conv_final.add_module("Conv Final", nn.Conv3d(Hidden_layer,1,(1,1,1),padding = (0,0,0),stride = 1,bias = Bias))
def forward(self,x):
x_down1 = F.relu(self.DropOut1(self.conv_DL1.forward(x)))
x_down1_v2 = F.relu(self.DropOut2(self.conv_DL1_v2.forward(x_down1)))
x_MaxPool = self.conv_MP1.forward(x_down1_v2)
x_down2 = F.relu(self.DropOut3(self.conv_DL2.forward(x_MaxPool)))
x_down2_v2 = F.relu(self.DropOut4(self.conv_DL2_v2.forward(x_down2)))
x_MaxPool_v2 = self.conv_MP2.forward(x_down2_v2)
x_down3 = F.relu(self.DropOut5(self.conv_DL3.forward(x_MaxPool_v2)))
x_down3_v2 = F.relu(self.DropOut6(self.conv_DL3_v2.forward(x_down3)))
x_up1_ConvT = self.convT1(x_down3_v2,output_size = x_down2_v2.size())
x_down2_up1_stack = torch.cat((x_down2_v2,x_up1_ConvT),1)
x_up1 = F.relu(self.DropOut7(self.conv_UP1.forward(x_down2_up1_stack)))
x_up1_v2 = F.relu(self.DropOut8(self.conv_UP1_v2.forward(x_up1)))
x_up2_ConvT = self.convT2(x_up1_v2,output_size = x_down1_v2.size())
x_down1_up2_stack = torch.cat((x_down1_v2,x_up2_ConvT),1)
x_up2 = F.relu(self.DropOut9(self.conv_UP2.forward(x_down1_up2_stack)))
x_up2_v2 = F.relu(self.DropOut10(self.conv_UP2_v2.forward(x_up2)))
output = x+self.conv_final.forward(x_up2_v2)
return output | HMS-CardiacMR/DRAPR | InLineIntegration/network_arch.py | network_arch.py | py | 6,361 | python | en | code | 15 | github-code | 36 | [
{
"api_name": "torch.nn.Module",
"line_number": 14,
"usage_type": "attribute"
},
{
"api_name": "torch.nn",
"line_number": 14,
"usage_type": "name"
},
{
"api_name": "torch.nn.Sequential",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"lin... |
74062226345 | import argparse
from fauxcaml import build
def create_parser():
ap = argparse.ArgumentParser(
prog="fauxcamlc",
description="Compiles an OCaml source file to an x86-64 executable.",
epilog="project homepage: https://github.com/eignnx/fauxcaml",
)
ap.add_argument(
"source_file",
metavar="SRC",
type=str,
help="the file to compile",
)
ap.add_argument(
"-o",
dest="exe_file",
metavar="EXE",
type=str,
default=None,
help="the name of the executable to create",
)
return ap
if __name__ == "__main__":
ap = create_parser()
args = ap.parse_args()
build.compile_from_source_file(args.source_file, args.exe_file)
| eignnx/fauxcaml | fauxcaml/__main__.py | __main__.py | py | 757 | python | en | code | 2 | github-code | 36 | [
{
"api_name": "argparse.ArgumentParser",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "fauxcaml.build.compile_from_source_file",
"line_number": 35,
"usage_type": "call"
},
{
"api_name": "fauxcaml.build",
"line_number": 35,
"usage_type": "name"
}
] |
18252831721 | from typing import List
class Solution:
def reconstructQueue(self, people: List[List[int]]) -> List[List[int]]:
res = []
people.sort(key = lambda x : (-x[0], x[1]))
for a in people:
res.insert(a[1], a)
return res
solution = Solution()
people = [[7,0],[4,4],[7,1],[5,0],[6,1],[5,2]]
assert solution.reconstructQueue(people) == [[5,0],[7,0],[5,2],[6,1],[4,4],[7,1]], "Should be [[5,0],[7,0],[5,2],[6,1],[4,4],[7,1]]" | hujienan/Jet-Algorithm | leetcode/406. Queue Reconstruction by Height/index.py | index.py | py | 465 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "typing.List",
"line_number": 5,
"usage_type": "name"
}
] |
38380439299 | import matplotlib.pyplot as plt
import numpy as np
import netCDF4
def plot( file,ofile=None):
nc = netCDF4.Dataset( file )
fn = file.rpartition("/")[-1]
label = fn.split("_")[0]
var = nc.variables[label]
long_name = var.long_name
units = var.units
if len(var.shape) > 2:
print ( var.dimensions )
var = var[0,:,:]
lat = nc.variables["lat"]
lon = nc.variables["lon"]
fig = plt.figure(figsize=(6,5))
left, bottom, width, height = 0.1, 0.1, 0.8, 0.8
ax = fig.add_axes([left, bottom, width, height])
X, Y = np.meshgrid(lon, lat )
cp = plt.contourf(X[:], Y[:], var[:])
plt.colorbar(cp)
ax.set_title("%s: %s [%s]" % (label,long_name,units))
ax.set_xlabel('Longitude')
ax.set_ylabel('Latitude')
if ofile != None:
plt.savefig( ofile )
else:
plt.show()
if __name__ == "__main__":
import sys
if len(sys.argv) == 1:
plot( "../esgf_fetch/data_files/sftlf_fx_MIROC-ES2L_historical_r1i1p1f2_gn.nc" )
else:
file = sys.argv[1]
ofile = None
if len(sys.argv) == 3:
ofile = sys.argv[2]
plot ( file, ofile=ofile )
| cp4cds/cmip6_range_check_old | scripts/plot2.py | plot2.py | py | 1,090 | python | en | code | 1 | github-code | 36 | [
{
"api_name": "netCDF4.Dataset",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.figure",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 18,
"usage_type": "name"
},
{
"api_name": "numpy.meshg... |
7683932011 | # ----------------------------------------------------------------------------------------
# prepare environment (boilerplate)
# import the required packages using their usual aliases
import dash
from dash import dcc, html, Input, Output, State
import dash_bootstrap_components as dbc
import plotly.graph_objects as go
import plotly.express as px
import pandas as pd
import humanize
import os
# read token string with your access mapbox token from a hidden file
# saved in environment's root directory same as where this app.py file is
# if you're using GitHub make sure to add '*.mapbox_token' to your .gitignore file
# to prevent your private credentials from being publicly viewed or uploaded to GitHub
mapbox_access_token = os.environ.get('MAPBOX_ACCESS_TOKEN')
# ----------------------------------------------------------------------------------------
# -- call the data
# -- read the food trade matrix data into pandas from CSV file of 2019 export quantities (exported from analysis in Jupyter Notebook)
# prepared using original dataset FAOSTAT Detailed trade matrix: All Data Normalized from https://fenixservices.fao.org/faostat/static/bulkdownloads/Trade_DetailedTradeMatrix_E_All_Data_(Normalized).zip
# with appended key demographics from FAOSTAT Key dataset (in Jupyter Notebook)
# # full dataset
dffood = pd.read_csv('./data/dffood.csv')
# -- read the 4.5 depth soil organic carbon density (%) measurements pre-filtered for audience China's and U.S.'s food's trade export Reporter Countries (exported from analysis in Jupyter Notebook)
# prepared using original dataset Soil organic carbon density: SOCD5min.zip from http://globalchange.bnu.edu.cn/research/soilw
# with appended country name and ISO3 code from GeoPandas embedded World dataset
dfsoil = pd.read_csv('./data/dfsoil_subUSCN_prod.csv')
# ----------------------------------------------------------------------------------------
# create (instantiate) the app,
# using the Bootstrap MORPH theme, Slate (dark) or Flatly (light) theme or Darkly (its dark counterpart) to align with my llc website in development with Flatly (dadeda.design)
app = dash.Dash(__name__, external_stylesheets=[dbc.themes.MORPH],
meta_tags=[{'name': 'viewport',
# initial-scale is the initial zoom on each device on load
'content': 'width=device-width, initial-scale=1.0, maximum-scale=1.2, minimum-scale=0.5'}]
)
server = app.server
app.title = 'Sustain-Our-Soil-for-Our-Food'
# ----------------------------------------------------------------------------------------
# named variables for the app's layout
navbar = dbc.NavbarSimple(
children=[
dbc.DropdownMenu(
children=[
dbc.DropdownMenuItem("Email", href="mailto:kathryn@dadeda.design?subject=Sustain our Soil for our Food", target='_blank'), # mailto link, github issues, and/or "http://kathrynhurchla.com/", target="_blank"),
# submit a gitHub issue (with options of feature request or bug report active at time of prototype deployment)
dbc.DropdownMenuItem("Submit issues or Ideas", href="https://github.com/khurchla/sustain-our-soil-for-our-food/issues/new/choose", target='_blank'),
# link to gitHub repository for readme caveats, data preparation, or to recreate app/opensource code
dbc.DropdownMenuItem("View source code", href="https://github.com/khurchla/sustain-our-soil-for-our-food", target='_blank')
],
nav=True,
in_navbar=True,
label="Contact",
),
dbc.DropdownMenu(
children=[
# placeholder for Twitter button javascript embed # <a href="https://twitter.com/share?ref_src=twsrc%5Etfw" class="twitter-share-button" data-text="Organic carbon occurs naturally in soil, but whether it presents a threat or a service to humans depends on YOU." data-via="khurchla" data-hashtags="dataviz" data-show-count="false">Tweet</a><script async src="https://platform.twitter.com/widgets.js" charset="utf-8"></script>
dbc.DropdownMenuItem("Tweet", href="#"),
# placeholder for popular Chinese social media Weibo share URL: http://service.weibo.com/share/share.php?url=http://example.com&appkey=&title=Organic carbon occurs naturally in soil, but whether it presents a threat or a service to humans depends on YOU.&pic=&ralateUid=&language=zh_cn
dbc.DropdownMenuItem("Weibo", href="#"),
],
nav=True,
in_navbar=True,
label="Share",
),
],
brand='Sustain Our Soil for Our Food',
color='#483628', # "dark", #hex code color matching text in graphs, a dark orange brown; "dark" is MORPH theme option and a dark charcoal
dark=True,
class_name="fixed-top",
)
appSubheading = dbc.Container(
html.Div([
html.H5("Organic carbon occurs naturally in soil, but whether it presents a threat or a service to humans depends on YOU.")
])
)
# # empty card to push the info tooltip to the far right
# controlsSpacer = dbc.CardBody(
# html.Div()
# )
tooltip = dbc.CardFooter(
html.Div(children=[
dbc.Button(
"info",
id="info-toolbar-tooltip",
# class_name="mx-2",
n_clicks=0,
size="sm"
),
dbc.Tooltip(
"Use the in toolbar in the upper right corner of the map to zoom, move around, or reset your view.",
target="info-toolbar-tooltip",
placement="left"
),
],
))
learnMore = html.Div(children=[
dbc.Button("Learn more about soil health, and how you can help.", id="learn-more-button", n_clicks=0, color="link", size="md", class_name="btn btn-link"),
dbc.Modal(children=[
dbc.ModalHeader(dbc.ModalTitle("Take Your Curiosity a Step Further.")),
dbc.ModalBody(children=['Copy these suggested key terms by clicking the paper icon beside them or by selecting and copying them directly from within the text area below, and then paste them into your preferred search engine. There are many excellent resources to learn more on your journey as a soil stakeholder.',
html.Br(),
html.Br(),
dcc.Textarea(
id="search_terms_textarea_id",
value='"soil health" OR "soil carbon" OR "soil organic carbon" OR "regenerative agriculture" OR "regenerative grazing"',
style={"heaight": '100%',
"width": 300,
"overflow": "auto"},
),
dcc.Clipboard(
target_id="search_terms_textarea_id",
title="copy",
style={
"display": "inline-block",
"fontSize": 20,
"color": '#483628',
"verticalAlign": "top"
}
)
]
),
dbc.ModalFooter(
dbc.Button(
"Close", id="learn-more-close", className="ms-auto", n_clicks=0
)
),
],
id="modal",
size="lg",
is_open=False,
centered=True,
style={"color": '#483628'}
)
])
whyCarbon = dbc.Card(
html.Div(children=[
html.H5("Carbon has a superpower.",
style={'text-align': 'left'}
),
html.P("Often called the element or giver of life, carbon is critical to life supporting processes because it can bond to many other elements essentially as a building block of large and complex compounds that make up living things––including soil, and the plants and animals in the food chain. Soil organic carbon is left in the soil by the processes collectively called the Carbon Cycle, which includes both the growth and death of plants, animals, and other organisms.",
style={'text-align': 'left'}
),
html.P("Soil organic carbon (SOC) indicates soil's ability to hold water and nutrients that sustain plants in natural and farming settings. As an indicator of soil's overall organic matter, it also builds soil structure that reduces erosion leading to improved water quality and greater resilience from storms.",
style={'text-align': 'left'}
),
html.P("Including its mineral inorganic carbon parts, our soil holds the largest amount of carbon in Earth's ecosystem, and its release––through mismanagement from a lack of knowledge and the removal of forests and wetlands––is a great risk to increasing carbon dioxide in the atmosphere and speeding up climate change.",
style={'text-align': 'left'}
),
html.P("Whether your food comes from across the globe or your own garden, you have an opportunity to restore and ensure soil health to fill bellies all over the world with nutritious foods for years to come. By learning more, you can have an impact on soil health, and together we may even save the world one plate at a time.",
style={'text-align': 'left'}
)
]),
body=True,
color="light",
class_name="card bg-light mb-3"
)
dropdownReporterCountry = dbc.CardBody(
html.Div(children=[
# add a brief instructive subheading as a label
dbc.Label('Choose a trade partner.', style={'text-align': 'left'}
),
# add a dropdown for audience member using app to select a reporter country (their partner who exports the food they've chosen to their country)
dcc.Dropdown(id='reporter_country_dropdown',
options=[{'label': country, 'value': country}
# series values needed to be sorted first before taking unique to prevent errors
for country in dfsoil['Reporter_Country_name'].sort_values().unique()],
placeholder='Trade Partner',
searchable=True,
clearable=True, # shows an 'X' option to clear selection once selection is made
persistence=True, # True is required to use a persistence_type
persistence_type='session', # remembers dropdown value selection until browser tab is closed (saves after refresh)
multi=False, # do not allow multiple country selections (default); doing so would require more code development in callback function
style={"width": "75%"}
)
])
)
controls = html.Div(children=[
dbc.CardGroup([dropdownReporterCountry, tooltip], class_name="card border-primary bg-light mb-2")
]
)
mapExplorer = dbc.Card([
html.Div(children=[
html.P('Explore how much of the soil where your food comes from is made up of organic carbon.',
className="lead"
),
html.Div(controls),
# # map format without spinner for reference
# html.Div(id='map-socd',
# ),
# add a loading spinner to the map
dbc.Spinner(id='map-socd', size="lg", color="primary", type="border", fullscreen=False
),
]),
html.Br(),
html.Div(children=[
html.P("Dots on the map vary in size by the location's soil organic carbon density (SOCD), which can be understood as how much of the soil is made up of organic carbon, from the ground surface down to 4.5 centimeters deep. These density estimates are by global leading scientists from the available worldwide soil data––collected and mathematically modelled––and are expressed in metric tonnes per hectare (t ha-1), which are equal to about 1,000 kilograms or aproximately 2,205 pounds.",
style={'text-align': 'left'}),
html.P("Read more about carbon's importance in soil below.",
style={'text-align': 'left'}),
html.P(children=[
"Data source: Shangguan, W., Dai, Y., Duan, Q., Liu, B. and Yuan, H., 2014. A Global Soil Data Set for Earth System Modeling. Journal of Advances in Modeling Earth Systems, ",
html.A("6: 249-263.",
href='https://agupubs.onlinelibrary.wiley.com/doi/full/10.1002/2013MS000293',
target='_blank' # opens link in new tab or window
)
],
style={'text-align': 'left'}),
]),
# html.Br()
], body=True)
# --------------------------SOIL BAR graph--------------------------
# take the mean SOCD by grouping soil dataframe by Country and append the mean as a column
dfsoil['SOCDcountryMean'] = dfsoil['Reporter_Country_SOCD_depth4_5'].groupby(dfsoil['Reporter_Country_name']).transform('mean')
# drop the raw SOCD values from the subset of soil data; used in density ranges bar chart
dfsoilMeans = dfsoil.drop_duplicates(subset=['Reporter_Country_name', 'Reporter_Country_continent', 'SOCDcountryMean', 'Reporter_Country_pop_est']).drop(['Reporter_Country_SOCD_depth4_5'], axis=1).sort_values(by=['SOCDcountryMean', 'Reporter_Country_continent', 'Reporter_Country_name'], ascending=(False, True, True))
dfsoilMeansMaxOrder = ['Africa', 'Oceania', 'South America', 'Asia', 'North America', 'Europe']
# make numbers into a more human readable format, e.g., transform 12345591313 to '12.3 billion' for hover info
dfsoilMeans['humanPop'] = dfsoilMeans['Reporter_Country_pop_est'].apply(lambda x: humanize.intword(x))
# make a bar chart showing range of mean by countries, overlay countries within continent group to retain mean y axis levels
rangeSOCDfig = px.bar(dfsoilMeans, x='Reporter_Country_continent', y='SOCDcountryMean', color='SOCDcountryMean', barmode='overlay',
# set bolded title in hover text, and make a list of columns to customize how they appear in hover text
custom_data=['Reporter_Country_name',
'Reporter_Country_continent',
'SOCDcountryMean',
'humanPop'
],
color_continuous_scale=px.colors.sequential.speed, # alternately use turbid for more muted yellows to browns (speed for yellow to green to black scale)
# a better label that will display over color legend
labels={'SOCDcountryMean': 'Avg.<br>SOCD'},
# lower opacity to help see variations of color between countries as means change
opacity=0.20
)
# sort bars by mean SOCD, and suppress redundant axis titles, instead of xaxis={'categoryorder': 'mean ascending'} I pre-sorted the dataframe above, but still force sort here by explicit names
rangeSOCDfig.update_layout(xaxis={'categoryorder': 'array', 'categoryarray': dfsoilMeansMaxOrder},
xaxis_title=None, yaxis_title=None, # removed xaxis_tickangle=-45, # used to angle longer/more xaxis labels
paper_bgcolor='#e8ece8', # next tint variation up from a low tint of #dadeda
plot_bgcolor='#f7f5fc', # violet tone of medium purple to help greens pop forward
yaxis={'gridcolor': '#e8ece8'}, # match grid lines shown to background to appear as showing through
font={'color': '#483628'}) # a dark shade of orange that appears dark brown
rangeSOCDfig.update_traces(
hovertemplate="<br>".join([
"<b>%{customdata[0]} </b><br>", # bolded hover title included, since the separate hover_name is superseced by hovertemplae
"%{customdata[1]}", # Continent value with no label
"Average SOCD: %{customdata[2]:.1f} t ha<sup>−1</sup>", # with html <sup> superscript tag in abbr. metric tonnes per hectare (t ha-1) t ha<sup>−1</sup> formatted to 2 decimals
"Estimated Population (2019): %{customdata[3]} people" # in humanized format
])
)
densityRanges = dbc.Card([
html.Div(children=[
html.H5("Range of Average Soil Organic Carbon Density (SOCD) Worldwide"
),
dcc.Graph(figure=rangeSOCDfig,
id="SOCD-bar-chart",
config={'displayModeBar': True, 'scrollZoom': True}
)
]),
html.Br(),
html.Div(children=[
html.P("Bars show the range of soil organic carbon density on land as a mean average within each country in metric tonnes per hectare (t ha-1), which are equal to about 1,000 kilograms or aproximately 2,205 pounds. Hover over any bar to view details for specific countries.",
style={'text-align': 'left'}),
html.P(children=[
"Data source: Shangguan, W., Dai, Y., Duan, Q., Liu, B. and Yuan, H., 2014. A Global Soil Data Set for Earth System Modeling. Journal of Advances in Modeling Earth Systems, ",
html.A("6: 249-263.",
href='https://agupubs.onlinelibrary.wiley.com/doi/full/10.1002/2013MS000293',
target='_blank' # opens link in new tab or window
)
],
style={'text-align': 'left'}),
]),
html.Br()
], body=True)
# --------------------------FOOD TRADE graph--------------------------
# take the sum total of exported tonnes by grouping food dataframe by Partner (importing) Country and append the sum as a column
dffood['Export_Quantity_Sum'] = dffood['Export_Quantity_2019_Value_tonnes'].groupby(dffood['Partner_Country_name']).transform('sum')
# take the distinct count of exported items by grouping food dataframe by Reporter (exporting) Country and append the count as a column
dffood['Export_Items_Count'] = dffood['Item'].groupby(dffood['Partner_Country_name']).transform('nunique')
# make numbers into a more human readable format, e.g., transform 12345591313 to '12.3 billion' for hover info
dffood['tradeVolume'] = dffood['Export_Quantity_Sum'].apply(lambda x: humanize.intword(x))
# food data scatterplot points
RiskFoodsFig = px.scatter(dffood, x='Export_Items_Count', y='Export_Quantity_Sum', size='Export_Quantity_Sum',
custom_data=['Partner_Country_name', # 'Reporter_Country_name_x',
'Export_Quantity_Sum',
'Export_Items_Count'
]
)
# sort bars by mean SOCD, and suppress redundant axis titles, instead of xaxis={'categoryorder': 'mean ascending'} I pre-sorted the dataframe above, but still force sort here by explicit names
RiskFoodsFig.update_layout(
xaxis_title='Diversity of Foods Imported (How many unique items?)', # Exported (How many unique items?)',
# move yaxis text to title area for readability; add empty line above it so it appears below the plotly toolbar options
title={
'text': 'Volume as Total Quantity of Foods Imported (tonnes)',
'xref': 'container',
},
yaxis_title='', # moved to title attribute for readability
paper_bgcolor='#e8ece8', # next tint variation up from a low tint of #dadeda
plot_bgcolor='#f7f5fc', # violet tone of medium purple to help greens pop forward
yaxis={'gridcolor': '#e8ece8'}, # match grid lines shown to background to appear as showing through
font={'color': '#483628'}) # a dark shade of orange that appears dark brown
RiskFoodsFig.update_traces(
# hard code single point color
marker=dict(
color='#a99e54',
sizemin=10
),
# set bolded title in hover text, and make a list of columns to customize how they appear in hover text
hovertemplate="<br>".join([
"<b>%{customdata[0]} </b><br>", # bolded hover title included, since the separate hover_name is superseced by hovertemplae
"Trade Volume: %{customdata[1]:,} tonnes imported", # %{customdata[2]:,} tonnes exported", # note html tags can be used in string; comma sep formatted; note with tradeVolume use format .1f to 1 decimals
"Trade Diversity: %{customdata[2]:} unique food products imported" # %{customdata[3]:} unique food products exported",
])
)
riskFoods = dbc.Card([
html.Div(children=[
html.H5("Food Security Risk Analysis by Volume & Diversity of Food Trade Reliance"
),
dcc.Graph(figure=RiskFoodsFig,
id="food-quadrant-chart",
config={'displayModeBar': True, 'scrollZoom': True}
)
]),
html.Br(),
html.Div(children=[
html.P("Points show where each country falls in relation to these two major trade metrics as indicators of risk for a country's ability to feed its population. Countries in the upper right corner can generally be understood to be most at risk if food trade lines are affected by decreased production.",
style={'text-align': 'left'}),
html.P("All food products traded between countries are included in the total summary of items imported, in 2019, as measured in metric tonnes (vertical axis showing range with M representing millions of tonnes). While soil organic carbon content is a major factor determining agricultural productivity, those levels are not directly shown in this graph and there are many factors that can lead to trade volatility", # The major grid lines dividing the four sections are set at the median, in other words the middle, of that range of global values as a benchmark to divide high or low in population and trade dependency, in relation to other countries.",
style={'text-align': 'left'}),
html.P(children=["Food and Agriculture Organization of the United Nations. (2020). FAOSTAT Detailed trade matrix: All Data Normalized. ",
html.A('https://www.fao.org/faostat/en/#data/TM',
href='https://www.fao.org/faostat/en/#data/TM',
target="_blank" # opens link in new tab or window
)
],
style={'text-align': 'left'}
)
]),
html.Br()
], body=True)
tab1 = dbc.Tab([densityRanges], label="Density Ranges")
tab2 = dbc.Tab([riskFoods], label="At Risk Foods")
tab3 = dbc.Tab([whyCarbon], label="Why Carbon?")
tabs = dbc.Tabs(children=[tab1, tab2, tab3])
# create the app's layout with the named variables
app.layout = dbc.Container(
[
dbc.Row(
[
dbc.Col(navbar,
width=12)
]
),
dbc.Row(
[
dbc.Col(appSubheading,
width={"size": "auto", "offset": 0},
md={"size": "auto", "offset": 1},
xxl={"size": "auto", "offset": 2}
),
],
justify="left",
style={"padding-top": 95, "padding-bottom": 0}
),
dbc.Row(
[
dbc.Col(mapExplorer,
width={"size": 11, "offset": 0}
)
],
justify="center",
style={"padding-top": 10, "padding-bottom": 25}
),
dbc.Row(
[
dbc.Col(learnMore,
width={'size': 9, 'offset': 2}, md={'size': 5, 'offset': 6}
)
],
style={"padding-top": 10, "padding-bottom": 10}
),
dbc.Row(
[
dbc.Col(html.Br(),
width=12
)
]
),
dbc.Row(
[
dbc.Col(
dbc.Container(
tabs),
width={"size": 11, "offset": 0}
)
],
justify="center",
),
dbc.Row(
html.Div(children=[
html.Br(),
html.Br(),
html.Footer(children=[
html.A(u"\u00A9"+" Kathryn Hurchla 2021",
href="http://kathrynhurchla.com",
target="_blank",
style={'width': '100%', 'display': 'flex', 'align-items': 'center', 'justify-content': 'center'}
),
], className="text-muted",
),
],
),
),
],
fluid=True,
className="dbc"
)
# ----------------------------------------------------------------------------------------
# callback decorators and functions
# connecting the Dropdown values to the graph
# simple selection on country directly
@app.callback(
Output('map-socd', 'children'),
[Input('reporter_country_dropdown', 'value')]
)
def update_selected_reporter_country(selected_reporter_country):
# always make a copy of any dataframe to use in the function
# define the subset of data that matches the selected values from both dropdown(s)
dfsoil_sub = dfsoil
# filter dataframe with geo points for single selection multi=False (default)
dfsoil_sub1 = dfsoil_sub[(dfsoil_sub['Reporter_Country_name'] == selected_reporter_country)]
# create figure variables for the graph object
locations = [go.Scattermapbox(
name='SOCD at Surface Depth to 4.5cm',
lon=dfsoil_sub1['Reporter_Country_lon'],
lat=dfsoil_sub1['Reporter_Country_lat'],
mode='markers',
marker=go.scattermapbox.Marker(
size=dfsoil_sub['Reporter_Country_SOCD_depth4_5'],
# add a sequential color scale based on shades of fuschia #ff00ff
# bright hues range for contrast to map background layer
# to more easily differentiate each separate point on map
color=dfsoil_sub['Reporter_Country_SOCD_depth4_5'],
colorscale='Agsunset_r',
# show a colorbar for this colorscale range
showscale=True,
colorbar=dict(title="SOCD"
),
opacity=0.8, # float or integer range between 0 and 1
),
hovertemplate="Longitude: %{lon}<br>" + "Latitude: %{lat}<br><extra></extra>" # hide secondary tag with empty extra tag
)
]
# add a mapbox image layer below the data
layout = go.Layout(
# commented out uirevision to allow map to reset zoom level to default when selection is changed
# uirevision='foo', # to preserve state of figure/map after callback activated
# match background behind color legend to the page area graph sit on
paper_bgcolor='#e4ebf5', # Morph theme card background color,
font=dict(color='#483628'), # a dark shade of orange that appears dark brown
clickmode='event+select',
hovermode='closest',
hoverdistance=2,
mapbox=dict(
accesstoken=mapbox_access_token,
style='white-bg'
),
autosize=True,
margin=dict(l=0, r=0, t=35, b=0),
mapbox_layers=[
{
'below': 'traces',
'sourcetype': 'raster',
'source': [
"https://basemap.nationalmap.gov/arcgis/rest/services/USGSImageryOnly/MapServer/tile/{z}/{y}/{x}"
]
}
]
)
# Return figure
return dcc.Graph(config={'displayModeBar': True, 'scrollZoom': True},
figure={
'data': locations,
'layout': layout
})
# connect theLearn More button and modal with user interactions
@app.callback(
Output("modal", "is_open"),
[Input("learn-more-button", "n_clicks"), Input("learn-more-close", "n_clicks")],
[State("modal", "is_open")],
)
def toggle_modal(n1, n2, is_open):
if n1 or n2:
return not is_open
return is_open
# ----------------------------------------------------------------------------------------
# run the app
if __name__ == '__main__':
app.run_server(debug=True) # if inside Jupyter Notebook, add use_reloader=False inside parens to turn off reloader
| khurchla/sustain-our-soil-for-our-food-prod | app.py | app.py | py | 29,394 | python | en | code | 1 | github-code | 36 | [
{
"api_name": "os.environ.get",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "os.environ",
"line_number": 18,
"usage_type": "attribute"
},
{
"api_name": "pandas.read_csv",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "pandas.read_csv",
... |
4593625107 | import cv2
import json
import numpy as np
import matplotlib.pyplot as plt
from itertools import count
def put_speed_on_video(mp4_path, pred_text_path, act_text_path):
pred_speed_list = np.around(np.loadtxt(pred_text_path), decimals=1)
act_speed_list = np.around(np.loadtxt(act_text_path), decimals=1)[1:]
video = cv2.VideoCapture(mp4_path)
video.set(1, 1)
font = cv2.FONT_HERSHEY_SIMPLEX
out = cv2.VideoWriter('./docs/demos/demo.mp4', 0x7634706d, 20, (640, 480))
for t in count():
ret, frame = video.read()
if ret == False or t >= len(pred_speed_list):
break
pred_curr_speed = pred_speed_list[t]
act_curr_speed = act_speed_list[t]
cv2.putText(frame,
f'Speed (m/s): {pred_curr_speed}',
(50, 50),
font,
0.7,
(242, 23, 161),
2,
cv2.LINE_4)
cv2.putText(frame,
f'Error: {round(pred_curr_speed - act_curr_speed, 1)}',
(50, 80),
font,
0.7,
(82, 51, 255),
2,
cv2.LINE_4)
out.write(frame)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
video.release()
out.release()
cv2.destroyAllWindows()
def parse_logs(log_file_path):
train_loss = []
val_loss = []
with open(log_file_path, 'r') as file:
for line in file:
line = line.replace("\'", "\"")
line_dict = json.loads(line)
train_loss.append(line_dict['train_epoch_loss'])
val_loss.append(line_dict['eval_epoch_loss'])
return train_loss, val_loss
def graph_loss(log_file_path_farn, log_file_path_pwc):
farn_train_loss, farn_val_loss = parse_logs(log_file_path_farn)
pwc_train_loss, pwc_val_loss = parse_logs(log_file_path_pwc)
with plt.style.context('seaborn-muted'):
_, ax = plt.subplots(figsize=(20,6))
ax.plot(range(1, len(farn_train_loss)+1), farn_train_loss, alpha=0.7, linewidth=3, label='Farneback Train Loss')
ax.plot(range(1, len(farn_train_loss)+1), farn_val_loss, alpha=0.7, linewidth=3, label='Farneback Eval Loss')
ax.plot(range(1, len(pwc_train_loss)+1), pwc_train_loss, alpha=0.7, linewidth=3, label='PWC Train Loss')
ax.plot(range(1, len(pwc_train_loss)+1), pwc_val_loss, alpha=0.7, linewidth=3, label='PWC Eval Loss')
ax.set_xticks(range(1, len(pwc_train_loss)+1))
ax.set_xlabel('Epochs')
ax.set_ylabel('MSE Loss')
ax.legend()
plt.savefig('./docs/readme_media/loss.png')
if __name__ == '__main__':
put_speed_on_video('./data/train/train.mp4', './docs/demos/pred_test.txt', './data/train/train.txt')
# graph_loss('./training_logs/farneback.log', './training_logs/pwc.log')
| antoninodimaggio/Voof | demo_utils.py | demo_utils.py | py | 2,797 | python | en | code | 65 | github-code | 36 | [
{
"api_name": "numpy.around",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "numpy.loadtxt",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "numpy.around",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "numpy.loadtxt",
"line_number... |
40962236742 | #!/usr/bin/env python
# _*_ coding:utf-8 _*_
import json
from flask import Flask,request
from base.base import *
app = Flask(__name__)
@app.before_request
def before_request():
if request.method == 'POST' and request.form.get("name"):
name=request.form.get("name")
if existfile(name):
if ran(5):
return readcache(name)
elif request.args.get("name"):
name = request.args.get("name")
if existfile(name):
if ran(5):#概率5-->5%的概率更新缓存
return readcache(name),{"Content-Type":"application/json","server":"qq","time":"Hello"}
@app.after_request
def after_request(environ):
if True:#文件缓存
data=environ.data.decode('UTF-8')
if request.method == 'POST' and request.form.get("name"):
name=request.form.get("name")
writecache(name,data)
elif request.args.get("name"):
name = request.args.get("name")
writecache(name,data)
return environ
@app.route('/')
def hello_world():
return '调用方式/api?name=视频名称[GET|POST] return JSON'
@app.route('/api',methods=['GET','POST'])
def api():
if request.method=='POST':
name=request.form.get("name")
data=json.dumps(run(name))
return data
else:
name = request.args.get("name")
data=run(name)
jsondata = json.dumps(data)
return jsondata,{"Content-Type":"application/json","server":"qq"}
if __name__ == '__main__':
app.run(host='127.0.0.1',port=8080,threaded=True,debug=True)
#app.run(host='0.0.0.0', port=8081, threaded=True, debug=False) | 1185714392/moviesearch | app.py | app.py | py | 1,710 | python | en | code | 3 | github-code | 36 | [
{
"api_name": "flask.Flask",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "flask.request.method",
"line_number": 10,
"usage_type": "attribute"
},
{
"api_name": "flask.request",
"line_number": 10,
"usage_type": "name"
},
{
"api_name": "flask.request.form... |
29572532106 | """ Notes
-Need to have opencv built with gstreamer support
print(cv2.getBuildInformation())
-Set Xavier to max power:
(do do manually or providing sudo password as script arg -p PASSWORD)
sudo nvpmodel -m 0
sudo jetson_clocks
-JTOP - helpful activity monitor
sudo apt-get install python3-pip -y
sudo python3 -m pip install --upgrade pip
sudo pip3 install -U jetson-stats
sudo reboot
-testcard output to MUX - NVENC chip should light up
gst-launch-1.0 videotestsrc ! video/x-raw ! nvvidconv ! nvv4l2h264enc maxperf-enable=1 ! h264parse ! flvmux streamable=true ! queue ! rtmpsink location='rtmp://global-live.mux.com:5222/app/51bc0427-ad29-2909-4979-11ee335d2b53'
-to read
https://github.com/dusty-nv/jetson-inference/blob/master/docs/aux-image.md
https://github.com/Fuell-ai/acm/blob/jetcam_bits/jetcam/functions/nvidia_gpu_buff_share.py
"""
import cv2
import time
import math
import vpi
import numpy as np
from contextlib import contextmanager
import math
from jetson_inference import detectNet
import jetson_utils
import threading
import queue
import copy
import json
from datetime import datetime
import subprocess
import argparse
@contextmanager
def time_it(comment):
tic: float = time.perf_counter()
try:
yield
finally:
toc: float = time.perf_counter()
#if "total" in comment:
print(f"{comment}: {1000*(toc - tic):.3f}ms")
#print(" ")
def gstreamer_out():
# leaky downstream throws away old images - default queue is 5
# sync = false might be useful
# not tested with real cameras
#MUX playback ID https://stream.mux.com/vL9SJU61FSv8sSQR01F6ajKI702WeK2pXRuLVtw25zquo.m3u8
return (
"appsrc ! "
"videoconvert ! "
"video/x-raw, framerate=(fraction)25/1, format=RGBA ! "
"nvvidconv ! "
"nvv4l2h264enc ! "
"h264parse ! "
"flvmux ! "
"queue leaky=downstream ! "
"rtmpsink location=rtmp://global-live.mux.com:5222/app/eb27591f-6aa1-aaf9-8be8-978237205f5a sync=false"
)
def detector_cuda(inbox, outbox, ID):
# this is default ssd - example of how we will load
net = detectNet(model="/home/jetcam/mb1025_voc1501/ssd-mobilenet.onnx",
input_blob="input_0",
output_cvg="scores",
output_bbox="boxes",
threshold=0.1)
# net = detectNet(
# "ssd-mobilenet-v2",
# threshold=0.3)
cuda_buff = None
while True:
if inbox.empty() is False:
# blocking call here to see how long it takes to
# pop image off queue
with time_it(f"{ID}: get object off queue"):
cuda_obj = inbox.get(block=True)
#image type is 'jetson.utils.cudaImage'
if cuda_buff is None:
with time_it(f"{ID} create GPU buffer (once only):"):
cuda_buff = jetson_utils.cudaAllocMapped(
width=cuda_obj.width,
height=cuda_obj.height,
format=cuda_obj.format)
with time_it(f"{ID}::::::::::: total time :::::::"):
# copy image or something goes weird
# allocate this outside of loop
with time_it(f"{ID} copy GPU buffer:"):
jetson_utils.cudaMemcpy(cuda_buff, cuda_obj)
with time_it(f"{ID} detectnet"):
detections = net.Detect(cuda_buff)
with time_it(f"{ID}: feedback dects"):
all_dects = {}
dectdeets = None
# output is <class 'jetson.inference.detectNet.Detection'>
# single object is <detectNet.Detection object>
#{'ClassID': 2, 'Left': 555.9375, 'Top': 181.142578125,
# 'Right': 759.375, 'Bottom': 324.580078125,
# 'Confidence': 0.168701171875, 'index': '21'}
for index, deect in enumerate(detections):
dectdeets = {}
dectdeets["ClassID"] = deect.ClassID
dectdeets["Left"] = deect.Left
dectdeets["Top"] = deect.Top
dectdeets["Right"] = deect.Right
dectdeets["Bottom"] = deect.Bottom
dectdeets["Confidence"] = deect.Confidence
dectdeets["index"] = str(index)
all_dects[index]=copy.deepcopy(dectdeets)
output = json.dumps(all_dects)
if outbox.empty():
outbox.put(output)
else:
print(f"{ID}: Waiting for image")
time.sleep(0.02)
def main_videocap():
_in_box = queue.Queue(maxsize=3)
_dects_box = queue.Queue(maxsize=3)
workers = []
for id in range (0,1):
workers.append(threading.Thread(
target=detector_cuda,
args=(_in_box, _dects_box, f"IF{id}", )))
workers[-1].start()
input_size = (1920, 1080) #(3840, 2160)
output_size = (1920, 1080)
file_path = "/home/jetcam/tensorrt_hello/jetson-inference/data/images/humans_0.jpg"
img_people = cv2.imread(file_path)
img_people = cv2.resize(img_people, input_size)
file_path_save = file_path.replace(".jpg", "_copy.jpg")
cv2.imwrite(file_path_save, img_people)
# more args etc
# https://github.com/dusty-nv/jetson-inference/blob/master/docs/aux-streaming.md#source-code
# videosource returns its own GPU buffer so don't have to
# define one with cudalloc
input = jetson_utils.videoSource(file_path_save, ["--loop=-1"])
img_people = input.Capture(format='rgb8')
#img_people = resize(img_people, input_size)
# set up parallel process streams
streamLeft = vpi.Stream()
streamRight = vpi.Stream()
# using gstreamer instead of FFMPEG, Nvidia doesn't
# support FFMPEG 100% for hardware dec/enc
#ensure opencv is built with gstreamer support
out_stream = cv2.VideoWriter(
filename=gstreamer_out(),
apiPreference=cv2.CAP_GSTREAMER,
fourcc=0,
fps=25.0,
frameSize=output_size)
# not in loop while pulling from images - disc read time
input_img_1 = input.Capture(format='rgb8')
input_img_2 = input.Capture(format='rgb8')
cnt = 0
while True:
cnt +=1
# time-based moving transform
hom = np.array([
[1, (math.sin(cnt/10)), 0],
[0, 1, 0],
[0, 0, 1]])
print("------")
with time_it("VC: upload to GPU (2)"):
with vpi.Backend.CUDA:
# upload image into GPU
with streamLeft:
frame1 = vpi.asimage(input_img_1)#.convert(vpi.Format.RGB8)
with streamRight:
frame2 = vpi.asimage(input_img_2)#.convert(vpi.Format.RGB8)
with time_it("VC: perp processing & sync (2)"):
with vpi.Backend.CUDA:
# VIC processor can be used here - need to convert
# image to correct format (NVIDIA VPI doc page)
# but not much performance gain
# if we run out of GPU it will be useful
# https://docs.nvidia.com/vpi/algo_persp_warp.html#algo_persp_warp_perf
with streamLeft:
frame1 = frame1.perspwarp(hom)
with streamRight:
frame2 = frame2.perspwarp(hom)
# wait for GPU streams to finish their tasks
streamLeft.sync()
streamRight.sync()
result_dict = None
if _dects_box.empty() is False:
with time_it("VC: get detections off queue"):
try:
result_dict = _dects_box.get(block=False)
result_dict = json.loads(result_dict)
except queue.Empty:
pass
with time_it("VC: output GPU to CPU (1)"):
# lock GPU memory to pull out buffer
# here it is assumed the payload is
# 1080p
with frame1.rlock_cpu() as data:
img_copy = data.copy()
if _in_box.empty() :
with time_it("VC: put image on queue (2)"):
_in_box.put(input_img_1)
_in_box.put(input_img_2)
#time.sleep(1000)
with time_it("VC: draw on rectangles"):
ts = str(datetime.now().strftime("%H:%M:%S"))
cv2.putText(
img_copy,
ts,
(80, 80),
cv2.FONT_HERSHEY_SIMPLEX,
fontScale=3,
color=(255, 0, 0),
thickness=4)
if result_dict is not None:
print(f"detections found?{len(result_dict.values())}")
for dect in result_dict.values():
print(dect)
cv2.rectangle(
img_copy,
(int(dect["Left"]),int(dect["Top"])),
(int(dect["Right"]),int(dect["Bottom"])),
(255, 0, 0),
3)
with time_it("VC: output to mux"):
#print(img_copy.shape)
out_stream.write(img_copy)
def xavier_power_settings(sudo_pass):
# obviously not secure - for quick and dirty testing
sudo_password = sudo_pass
commands = ['sudo nvpmodel -m 8', 'sudo jetson_clocks']
check_pwr_mode = 'sudo nvpmodel -q'
for command in commands:
command = command.split()
print("command" , command)
cmd1 = subprocess.Popen(['echo', sudo_password], stdout=subprocess.PIPE)
cmd2 = subprocess.Popen(['sudo', '-S'] + command, stdin=cmd1.stdout, stdout=subprocess.PIPE)
print(cmd2.stdout.read().decode())
time.sleep(2)
print("checking power mode")
cmd1 = subprocess.Popen(['echo', sudo_password], stdout=subprocess.PIPE)
cmd2 = subprocess.Popen(['sudo', '-S'] + check_pwr_mode.split(), stdin=cmd1.stdout, stdout=subprocess.PIPE)
capture = (cmd2.stdout.read().decode())
print(capture)
#if 'MODE_15W_2CORE' not in capture:
# raise Exception("XAVIER not in max power mode - try again with correct sudo pass")
if '20W' not in capture:
raise Exception("XAVIER not in max power mode - try again with correct sudo pass")
if __name__ == '__main__':
parser = argparse.ArgumentParser(
description="",
formatter_class=argparse.RawTextHelpFormatter,
)
parser.add_argument(
'-sudopassword',
help='sudo password to enable power settings',
required=True)
args = parser.parse_args()
xavier_power_settings(sudo_pass=args.sudopassword)
main_videocap()
| LiellPlane/DJI_UE4_poc | Source/lumotag/mobilenet_inference_tidied.py | mobilenet_inference_tidied.py | py | 10,889 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "time.perf_counter",
"line_number": 44,
"usage_type": "call"
},
{
"api_name": "time.perf_counter",
"line_number": 48,
"usage_type": "call"
},
{
"api_name": "contextlib.contextmanager",
"line_number": 42,
"usage_type": "name"
},
{
"api_name": "jetson_... |
30467610617 | import collections
class Solution:
def accountsMerge(self, accounts: List[List[str]]) -> List[List[str]]:
node_to_neighbor = {}
# build graph
for account in accounts:
name = account[0]
for i in range(1, len(account)):
cur_email = account[i]
cur = UGNode(name, cur_email)
if cur not in node_to_neighbor:
node_to_neighbor[cur] = []
if i == len(account) - 1:
continue
after_email = account[i + 1]
after = UGNode(name, after_email)
node_to_neighbor[cur].append(after)
if after not in node_to_neighbor:
node_to_neighbor[after] = []
node_to_neighbor[after].append(cur)
result = []
# walk graph
visited = set()
for email, neighbor in node_to_neighbor.items():
start = email
if start in visited:
continue
group = self.bfs(node_to_neighbor, start, visited)
res = []
res.append(start.name)
res.extend(sorted(group))
result.append(res)
return result
def bfs(self, graph, start, visited):
emails = set()
queue = collections.deque()
queue.append(start)
while len(queue) > 0:
cur = queue.popleft()
if cur in visited:
continue
visited.add(cur)
emails.add(cur.email)
# children
if cur in graph:
neighbors = graph[cur]
else:
neighbors = []
for neighbor in neighbors:
queue.append(neighbor)
return emails
class UGNode:
def __init__(self, name, email):
self.name = name
self.email = email
def __hash__(self):
return hash((self.name, self.email))
def __eq__(self, other):
return (self.name, self.email) == (other.name, other.email)
| dundunmao/LeetCode2019 | 721. Accounts Merge.py | 721. Accounts Merge.py | py | 2,068 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "collections.deque",
"line_number": 43,
"usage_type": "call"
}
] |
11784656842 | from aiogram import Dispatcher
from aiogram.types import Message
from database.database import GAME_USERS
from lexicon.lexicon_ru import LEXICON_RU
async def send_reverse_answer(message: Message):
if message.from_user.id not in GAME_USERS:
await message.reply('\n'.join([message.text[::-1], LEXICON_RU['smile']]))
else:
if GAME_USERS[message.from_user.id]['in_game']:
await message.reply(text=LEXICON_RU['we_are_playing'])
else:
await message.reply('\n'.join([message.text[::-1], LEXICON_RU['smile']]))
def register_other_handlers(dp: Dispatcher):
dp.register_message_handler(send_reverse_answer)
| faralost/ichiraku-telegram-bot | handlers/other_handlers.py | other_handlers.py | py | 662 | python | en | code | 2 | github-code | 36 | [
{
"api_name": "aiogram.types.Message",
"line_number": 8,
"usage_type": "name"
},
{
"api_name": "database.database.GAME_USERS",
"line_number": 9,
"usage_type": "name"
},
{
"api_name": "lexicon.lexicon_ru.LEXICON_RU",
"line_number": 10,
"usage_type": "name"
},
{
"ap... |
5547127979 | """
Voting 12/04/2022.
1. Refund previous depositor' spending to finance multisig 0x48F300bD3C52c7dA6aAbDE4B683dEB27d38B9ABb
with 254.684812629886507249 stETH.
2. Fund depositor bot multisig 0x5181d5D56Af4f823b96FE05f062D7a09761a5a53 with 130 stETH.
Vote passed & executed on Apr-15-2022 05:34:30 PM +UTC, block #14591317.
TX URL: https://etherscan.io/tx/0x3b5fff376df823e26857e68a468e161a8ed818afd29410983f0680c2d18042f5
"""
import time
from typing import (Dict, Tuple, Optional)
from brownie.network.transaction import TransactionReceipt
from utils.voting import confirm_vote_script, create_vote
from utils.finance import make_steth_payout
from utils.evm_script import encode_call_script
from utils.config import (
get_deployer_account,
get_is_live
)
def start_vote(
tx_params: Dict[str, str],
silent: bool = False
) -> Tuple[int, Optional[TransactionReceipt]]:
"""Prepare and run voting."""
encoded_call_script = encode_call_script([
# 1. Refund previous depositor' spending to finance multisig 0x48F300bD3C52c7dA6aAbDE4B683dEB27d38B9ABb
# with 254.684812629886507249 stETH.
make_steth_payout(
target_address='0x48F300bD3C52c7dA6aAbDE4B683dEB27d38B9ABb',
steth_in_wei=254_684_812_629_886_507_249,
reference='Refund depositor\'s spending'
),
# 2. Fund dedicated depositor multisig 0x5181d5D56Af4f823b96FE05f062D7a09761a5a53 with 130 stETH.
make_steth_payout(
target_address='0x5181d5D56Af4f823b96FE05f062D7a09761a5a53',
steth_in_wei=130 * (10 ** 18),
reference='Fund depositor bot multisig'
),
])
return confirm_vote_script(encoded_call_script, silent) and create_vote(
vote_desc=(
'Omnibus vote: '
'1) Refund previous depositor\' spending to finance multisig with 254.684812629886507249 stETH; '
'2) Fund depositor bot multisig 0x5181d5D56Af4f823b96FE05f062D7a09761a5a53 with 130 stETH.'
),
evm_script=encoded_call_script,
tx_params=tx_params
)
def main():
tx_params = {'from': get_deployer_account()}
if get_is_live():
tx_params['max_fee'] = '300 gwei'
tx_params['priority_fee'] = '2 gwei'
vote_id, _ = start_vote(tx_params=tx_params)
vote_id >= 0 and print(f'Vote created: {vote_id}.')
time.sleep(5) # hack for waiting thread #2.
| lidofinance/scripts | archive/scripts/vote_2022_04_12.py | vote_2022_04_12.py | py | 2,423 | python | en | code | 14 | github-code | 36 | [
{
"api_name": "typing.Dict",
"line_number": 28,
"usage_type": "name"
},
{
"api_name": "utils.evm_script.encode_call_script",
"line_number": 33,
"usage_type": "call"
},
{
"api_name": "utils.finance.make_steth_payout",
"line_number": 36,
"usage_type": "call"
},
{
"a... |
43046784986 | from datetime import datetime
from discord.ext import commands
import discord
from discordbot.errors import ErrorMessage
class UserInfo(commands.Cog):
def __init__(self, bot):
self.bot = bot
self.color = 0xffffff
@commands.command(
brief="Erhalte Benutzerinfos",
description="Erhalte den Standardavatar, Avatar und das Alter eines Discordaccounts",
aliases=["avatar", "defaultavatar", "accountage"],
help="Benutze /userinfo <User> und du erhältst Informationen über diesen Discord Account",
usage="<User>"
)
async def userinfo(self, ctx, user: discord.User):
d = datetime.now()-user.created_at
await ctx.sendEmbed(
title="Benutzerinformationen",
description=f"Infos über den Benutzer {user.mention}",
fields=[
("ID", str(user.id)),
("Account erstellt am", f"<t:{int(datetime.timestamp(user.created_at))}>"),
("Account erstellt vor", f"{d.days} Tag(en)"),
("Standardavatar", f"[{user.default_avatar}]({user.default_avatar_url})"),
],
inline=False,
thumbnailurl=str(user.avatar_url))
@commands.command(
brief='Stalke musikhörende Leute',
description='Erhalte Links zu dem Song, welcher jemand gerade hört',
aliases=[],
help="Benutze /usersong <Member> um den Song zu erhalten",
usage="<Member>"
)
@commands.guild_only()
async def usersong(self, ctx, member: discord.Member):
found = False
for activity in member.activities:
if str(activity.type) == "ActivityType.listening":
try:
await ctx.sendEmbed(title="Spotify Song", fields=[
("Titel", activity.title),
("Künstler", activity.artist),
("Link", ("[Spotify](https://open.spotify.com/track/"+activity.track_id+")")),
("Benutzer", member.mention)])
except AttributeError:
raise ErrorMessage(
message="Scheinbar hört dieser Benutzer keinen richtigen Song.")
found = True
if not found:
raise ErrorMessage(message="Dieser Benutzer hört keinen Song!")
def setup(bot):
bot.add_cog(UserInfo(bot))
| AlexeiSur/bot12345 | discordbot/botcmds/userinfo.py | userinfo.py | py | 2,397 | python | de | code | 0 | github-code | 36 | [
{
"api_name": "discord.ext.commands.Cog",
"line_number": 8,
"usage_type": "attribute"
},
{
"api_name": "discord.ext.commands",
"line_number": 8,
"usage_type": "name"
},
{
"api_name": "discord.User",
"line_number": 20,
"usage_type": "attribute"
},
{
"api_name": "da... |
9475668450 | """API related fixtures."""
from contextlib import contextmanager
from typing import Any, Callable, ContextManager, Generator
from uuid import uuid4
import pytest
import respx
from fastapi.testclient import TestClient
from httpx import Request, Response
from python_scaffold import api, settings
@pytest.fixture(scope="session")
def test_client() -> TestClient:
"""Test client of the service.
[Read here for more](https://fastapi.tiangolo.com/tutorial/testing/)
"""
return TestClient(api.app)
@pytest.fixture()
def mock_api_auth() -> Callable[[Response | None], ContextManager[dict[str, respx.Route]]]:
"""Mock API for the auth API."""
@contextmanager
def _mock_api_auth(custom_response: Response | None = None) -> Generator[dict[str, respx.Route], None, None]:
def _dynamic_message_response(request: Request) -> Response:
if custom_response:
return custom_response
return Response(201, json={"access_token": uuid4().hex})
route_auth = respx.post(url=settings.external_api_auth_url, name="auth").mock(
side_effect=_dynamic_message_response
)
yield {"auth": route_auth}
return _mock_api_auth
@pytest.fixture()
def example_message() -> str:
"""Just a simple example message."""
return "Hi i am a example message."
@pytest.fixture()
async def mock_api_messages(example_message: str) -> Callable[..., ContextManager[dict[str, respx.Route]]]:
"""Mock an external API."""
@contextmanager
def _mock_api_messages(
messages: list[dict[str, Any]] | None = None
) -> Generator[dict[str, respx.Route], None, None]:
_default_messageid = "0" * 8
def _dynamic_message_response(request: Request) -> Response:
request_url_id = str(request.url).split("/")[-1]
if not request_url_id:
return Response(403, json={"details": "Error in request: no ID was given"})
message = example_message
if len(messages_ids_to_respond_custom_msg):
message = [
msg.get("base_message", example_message)
for msg in messages_ids_to_respond_custom_msg
if msg.get("messageid", _default_messageid) == request_url_id
][0]
if not len(message):
return Response(404, json={"details": "Error in request: no MSCONS with this ID exists."})
return Response(200, json=[{"edifact": message}])
messages_ids_to_respond_custom_msg = (
[message for message in messages if bool(message["compacted"])] if messages else []
)
route_messages = respx.get(
url=settings.external_api_base_url, path__startswith="/", name="get_some_messages"
).mock(side_effect=_dynamic_message_response)
yield {"messages": route_messages}
return _mock_api_messages
| IronicUsername/python-scaffold | python-scaffold/tests/test_python_scaffold/fixtures/api.py | api.py | py | 2,931 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "fastapi.testclient.TestClient",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "python_scaffold.api.app",
"line_number": 20,
"usage_type": "attribute"
},
{
"api_name": "python_scaffold.api",
"line_number": 20,
"usage_type": "name"
},
{
"ap... |
19033663462 | """OS identification method using netflows -- User-Agent
This module contains implementation of UserAgent class which is a method for OS
identification using User-Agent technique.
"""
import structlog
class UserAgent:
"""UserAgent OS identification technique
This class provides an interface for performing OS identification based on
netflow data.
"""
WIN_MAP = {'Windows 10.0': 'Windows 10',
'Windows 6.3': 'Windows 8.1',
'Windows 6.2': 'Windows 8',
'Windows 6.1': 'Windows 7',
'Windows 6.0': 'Windows Vista',
'Windows 5.2': 'Windows XP Professional x64',
'Windows 5.1': 'Windows XP',
'Windows 5.0': 'Windows 2000'}
API_MAP = {#'Android 1': 'Android 1.0',
#'Android 2': 'Android 1.1',
#'Android 3': 'Android 1.5',
#'Android 4': 'Android 1.6',
#'Android 5': 'Android 2.0',
#'Android 6': 'Android 2.0',
#'Android 7': 'Android 2.1',
#'Android 8': 'Android 2.2.x',
#'Android 9': 'Android 2.3',
'Android 10': 'Android 2.3',
'Android 11': 'Android 3.0',
'Android 12': 'Android 3.1',
'Android 13': 'Android 3.2',
'Android 14': 'Android 4.0',
'Android 15': 'Android 4.0',
'Android 16': 'Android 4.1',
'Android 17': 'Android 4.2',
'Android 18': 'Android 4.3',
'Android 19': 'Android 4.4',
'Android 21': 'Android 5.0',
'Android 22': 'Android 5.1',
'Android 23': 'Android 6.0',
'Android 24': 'Android 7.0',
'Android 25': 'Android 7.1',
'Android 26': 'Android 8.0',
'Android 27': 'Android 8.1',
'Android 28': 'Android 9'}
@classmethod
def convert_win(cls, os_name):
"""
Convert windows version to windows name
:param os: windows version
:return: windows name
"""
return cls.WIN_MAP.get(os_name, os_name)
@classmethod
def convert_api(cls, os_name):
"""
Convert Android API version to OS version
:param os: Android string with API version
:return: Android sring with OS version
"""
return cls.API_MAP.get(os_name, os_name)
def __init__(self, logger=structlog.get_logger()):
self.logger = logger.bind(method="useragent")
def run(self, flows):
"""Run the method on given flows
:param flows: flows to process
:return: dictionary between IPs and predicted operating systems
"""
self.logger.info("Method start")
result = {}
for flow in flows:
try:
if "sa" not in flow:
continue
sa = flow["sa"]
os_name = flow["hos"]
major = flow["hosmaj"]
minor = flow["hosmin"]
tmp = result.get(sa, {})
if os_name != "N/A":
if major != "N/A":
os_name += " " + major
if minor != "N/A":
os_name += "." + minor
os_name = self.convert_win(os_name)
os_name = self.convert_api(os_name)
tmp[os_name] = tmp.get(os_name, 0) + 1
if tmp:
result[sa] = tmp
except KeyError as e:
self.logger.warning('Flow is missing a necessary key!', key=str(e))
except Exception as e:
self.logger.warning(f'Exception while processing flow!', exception=str(e), flow=str(flow))
for sa in result:
total = sum(result[sa].values())
for os_name in result[sa].keys():
result[sa][os_name] /= total
self.logger.info("Method finish")
return result
| CSIRT-MU/CRUSOE | crusoe_observe/OS-parser-component/osrest/method/useragent.py | useragent.py | py | 4,053 | python | en | code | 9 | github-code | 36 | [
{
"api_name": "structlog.get_logger",
"line_number": 70,
"usage_type": "call"
}
] |
33512505076 | # -*- coding: utf8 -*-
from collective.contact.core.behaviors import IRelatedOrganizations
from collective.contact.core.testing import INTEGRATION
from ecreall.helpers.testing.base import BaseTest
from z3c.relationfield.relation import RelationValue
from zope.component import getUtility
from zope.interface import alsoProvides
from zope.intid.interfaces import IIntIds
import unittest
class TestSearch(unittest.TestCase, BaseTest):
"""Tests realted organizations"""
layer = INTEGRATION
def setUp(self):
super(TestSearch, self).setUp()
self.portal = self.layer['portal']
self.mydirectory = self.portal['mydirectory']
self.armeedeterre = self.mydirectory['armeedeterre']
self.corpsa = self.armeedeterre['corpsa']
self.divisionalpha = self.corpsa['divisionalpha']
self.divisionbeta = self.corpsa['divisionbeta']
def test_related_searchable_text(self):
pc = self.portal.portal_catalog
index = pc._catalog.getIndex("SearchableText")
rid = pc(UID=self.divisionalpha.UID())[0].getRID()
indexed = index.getEntryForObject(rid, default=[])
self.assertListEqual(indexed, ['armee', 'de', 'terre', 'corps', 'a', 'division', 'alpha'])
intids = getUtility(IIntIds)
alsoProvides(self.divisionalpha, IRelatedOrganizations)
self.divisionalpha.related_organizations = [
RelationValue(intids.getId(self.divisionbeta)),
]
self.divisionalpha.reindexObject()
indexed = index.getEntryForObject(rid, default=[])
self.assertListEqual(indexed, ['armee', 'de', 'terre', 'corps', 'a', 'division', 'beta', 'armee', 'de',
'terre', 'corps', 'a', 'division', 'alpha'])
| collective/collective.contact.core | src/collective/contact/core/tests/test_related.py | test_related.py | py | 1,764 | python | en | code | 6 | github-code | 36 | [
{
"api_name": "unittest.TestCase",
"line_number": 14,
"usage_type": "attribute"
},
{
"api_name": "ecreall.helpers.testing.base.BaseTest",
"line_number": 14,
"usage_type": "name"
},
{
"api_name": "collective.contact.core.testing.INTEGRATION",
"line_number": 17,
"usage_type... |
27281502055 | """project URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.9/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Add an import: from blog import urls as blog_urls
2. Import the include() function: from django.conf.urls import url, include
3. Add a URL to urlpatterns: url(r'^blog/', include(blog_urls))
"""
from django.conf.urls import url, include
from django.conf.urls.static import static
from django.contrib import admin
from project import settings
urlpatterns = [
url(r'^admin/', admin.site.urls),
url(r'', include('apps.base.urls', namespace='base')),
url(r'^legoteka/', include('apps.legoteka.urls', namespace='legoteka')),
url(r'^humanitarian_aid/', include('apps.aid.urls', namespace='humanitarian_aid')),
url(r'^library/', include('apps.library.urls', namespace='library')),
url(r'^financial/', include('apps.financial.urls', namespace='financial')),
url(r'^api/', include('api.urls')),
url(r'^pages/(\S+)/$', 'apps.pages.views.pages_detail', name='pages_detail'),
url(r'^redactor/', include('redactor.urls')),
] + static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
| mikha1lov/headway | project/urls.py | urls.py | py | 1,515 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "django.conf.urls.url",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "django.contrib.admin.site",
"line_number": 23,
"usage_type": "attribute"
},
{
"api_name": "django.contrib.admin",
"line_number": 23,
"usage_type": "name"
},
{
"api_name... |
24835307446 | import aiologger
from aiologger.handlers.streams import AsyncStreamHandler
from aiologger.handlers.files import AsyncFileHandler
import logging
class MyFormatter(logging.Formatter):
def format(self, record):
return f"{record.created} - {record.name} - {record.levelname} - {record.msg}"
def setup_async_logger():
logger = aiologger.Logger.with_default_handlers(
name="my_app",
level=logging.DEBUG
)
console_handler = AsyncStreamHandler(level=logging.DEBUG)
file_handler = AsyncFileHandler(
filename="my_app.log",
mode="a",
encoding="utf-8"
)
formatter = MyFormatter()
console_handler.formatter = formatter
file_handler.formatter = formatter
logger.add_handler(console_handler)
logger.add_handler(file_handler)
return logger
| bucin98/fast_api_coin_price | app/get_logger.py | get_logger.py | py | 830 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "logging.Formatter",
"line_number": 7,
"usage_type": "attribute"
},
{
"api_name": "aiologger.Logger.with_default_handlers",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "aiologger.Logger",
"line_number": 13,
"usage_type": "attribute"
},
{
... |
26804643066 | # -*- coding: utf-8 -*-
import pymysql
import itertools
if __name__ == "__main__":
pre_deal()
#search all of the entities from db and remove duplicated entries.
def pre_deal():
db = pymysql.connect("localhost", "root", "302485", "imdb", charset='utf8')
cursor = db.cursor()
search_sql = """search identifiers from imdb_entity"""
try:
cursor.execute(search_sql)
except Exception as e:
db.rollback()
print(str(e))
finally:
cursor.close()
db.close()
identifiers = cursor.fetchall()
identify_groups = []
for identify in identifiers:
split_identfify = identify.split(",")
identify_groups.append(split_identfify)
identify_groups.sort()
id_distincts = itertools.groupby(identify_groups)
return id_distincts
#search relationships between identifiers
def get_relation(identifiers):
count = len(identifiers)
triples = []
for i in range(0, count):
for j in range(i + 1, count):
triple_one = get_triple(identifiers[i], identifiers[j])
triple_two = get_triple(identifiers[j], identifiers[i])
if(triple_one!=''):
triples.append(triple_one)
if(triple_two!=''):
triples.append(triple_two)
return triples
def get_triple(identfier_one, identifier_two):
url = 'http://192.168.0.196:9999/bigdata/namespace/wdq/sparql'
query = """
SELECT ?item_one ?predicate ?item_two
WHERE
{
?item_one ?predicate ?item_two.
BIND(%s AS ?item_one).
BIND(%s AS ?item_two).
SERVICE wikibase:label { bd:serviceParam wikibase:language "[AUTO_LANGUAGE]". }
}
""" %(identfier_one,identifier_two)
r = requests.get(url, params = {'format': 'json', 'query': query})
data = r.json()
# print(data)
identifier = ''
bindings = data['results']['bindings']
if bindings:
identifier = data['results']['bindings'][0]['item']['value'].split('/')[-1]
#print(identifier)
return identifier
| LYunCoder/imdb_analysis | subgraph_wikidata/construct_relations.py | construct_relations.py | py | 2,079 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "pymysql.connect",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "itertools.groupby",
"line_number": 29,
"usage_type": "call"
}
] |
19027441425 | from django.db import models
class SocialNetwork(models.Model):
"""Social Network model definitions"""
DEFAULT_SOCIALNETWORKS = (
(0, 'FaceBook'),
(1, 'Instagram'),
(2, 'Linkedin'),
(3, 'Twitter'),
(4, 'YouTube'),
)
title = models.CharField(
verbose_name='Rede Social',
max_length=50,
choices=DEFAULT_SOCIALNETWORKS
)
url = models.URLField(
unique=True, null=True, blank=True,
verbose_name='URL do Perfil',
help_text='Link do Perfil na rede social escolhida.',
)
class Meta:
ordering = ['title']
verbose_name = 'Rede Social'
verbose_name_plural = 'Redes Sociais'
def __str__(self):
return self.title
| ag-castro/brazil-ongs-mapping | ressonantes/core/models/social_network.py | social_network.py | py | 762 | python | en | code | 1 | github-code | 36 | [
{
"api_name": "django.db.models.Model",
"line_number": 4,
"usage_type": "attribute"
},
{
"api_name": "django.db.models",
"line_number": 4,
"usage_type": "name"
},
{
"api_name": "django.db.models.CharField",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": ... |
19565250442 | from django.db import models
from users.models import User
from .validators import validate_year
class Category(models.Model):
name = models.CharField(max_length=256)
slug = models.SlugField(max_length=50, unique=True)
def __str__(self):
return self.name
class Meta:
ordering = ('name',)
class Genre(models.Model):
name = models.CharField(max_length=256)
slug = models.SlugField(max_length=50, unique=True)
def __str__(self):
return self.name
class Meta:
ordering = ('name',)
class Title(models.Model):
name = models.CharField(max_length=256)
year = models.IntegerField(validators=[validate_year])
description = models.TextField(
null=True,
blank=True
)
genre = models.ManyToManyField(Genre)
category = models.ForeignKey(
Category, on_delete=models.SET_NULL,
null=True
)
def __str__(self):
return self.name
class Meta:
ordering = ('name',)
class Review(models.Model):
title = models.ForeignKey(
Title,
on_delete=models.CASCADE,
related_name='reviews'
)
text = models.TextField(
'Текст отзыва'
)
score = models.IntegerField(
'Оценка',
choices=list(zip(range(1, 11), range(1, 11))),
)
author = models.ForeignKey(
User,
on_delete=models.CASCADE,
related_name='reviews'
)
pub_date = models.DateTimeField(
auto_now_add=True,
db_index=True
)
class Meta:
constraints = [
models.UniqueConstraint(
fields=['author', 'title'],
name='unique_author_title'
)
]
def __str__(self):
return self.text
class Comment(models.Model):
review = models.ForeignKey(
Review,
on_delete=models.CASCADE,
related_name='comments'
)
text = models.TextField(
'Текст комментария'
)
author = models.ForeignKey(
User,
on_delete=models.CASCADE,
related_name='comments'
)
pub_date = models.DateTimeField(
auto_now_add=True,
db_index=True
)
class Meta:
ordering = ('pub_date',)
def __str__(self):
return self.text
| Daniil-lev/infra_sp2 | api_yamdb/reviews/models.py | models.py | py | 2,321 | python | en | code | 3 | github-code | 36 | [
{
"api_name": "django.db.models.Model",
"line_number": 7,
"usage_type": "attribute"
},
{
"api_name": "django.db.models",
"line_number": 7,
"usage_type": "name"
},
{
"api_name": "django.db.models.CharField",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "... |
23728224660 | import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from StyleFrame import StyleFrame, utils
# read excel file to usable numpy arrays
def load_multispectral_data(excel_file):
df = pd.read_excel(excel_file, 'Multispectral Image')
nir = df[0:20].iloc[:, 1:].to_numpy()
red = df[22:42].iloc[:, 1:].to_numpy()
return nir, red
def load_training_data(excel_file):
sf = StyleFrame.read_excel(excel_file, sheet_name='Training Samples', read_style=True, use_openpyxl_styles=False)
return StyleFrame(sf.applymap(get_classes_from_colors)).data_df[0:20].iloc[:, 1:].to_numpy(dtype=np.str)
def get_classes_from_colors(cell):
if cell.style.bg_color in {utils.colors.green, 'FF92D050'}:
return 'vegetation'
elif cell.style.bg_color in {utils.colors.red, 'FFFF0000'}:
return 'bare ground'
elif cell.style.bg_color in {utils.colors.blue, '3275c8'}:
return 'water'
else:
return 'unclassified'
# calculate ndvi and set invalid values (zero division) to zero
def calculate_ndvi(nir, red):
ndvi = np.divide(nir - red, nir + red)
return np.nan_to_num(ndvi)
def plot_histogram(ndvi, interval, filename):
histo = []
series = np.arange(-1, 1, interval)
for i in series:
in_interval = np.logical_and(i <= ndvi, ndvi < i + interval)
histo.append(in_interval.sum())
plt.bar(series, histo, width=interval, align='edge', edgecolor='white', color='grey')
plt.title('Histogram of NDVI values')
plt.xlabel('Range of NDVI values')
plt.ylabel('Amount of values within range')
plt.savefig(filename)
def plot_scatter(red, nir, colors='Grey'):
plt.scatter(red, nir, c=colors)
plt.title('Relationship of red and near-infrared channels')
plt.xlabel('Red channel')
plt.ylabel('Near-infrared channel')
def mv_cov_covinv(ar1, ar2):
obs_vectors = np.ma.vstack((ar1, ar2)).T
mean_vector = np.mean(obs_vectors, axis=0)
covariance_matrix = np.cov(obs_vectors)
covariance_matrix_inv = np.linalg.pinv(covariance_matrix)
return {'mean vector': mean_vector,
'covariance matrix': covariance_matrix,
'inverse covariance matrix': covariance_matrix_inv}
def minimum_distance_to_mean(vec, means):
distances = [np.linalg.norm(vec-i) for i in means]
min_distance = distances[np.argmin(distances)]
return (np.argmin(distances) + 1, min_distance)
if __name__ == '__main__':
### assignment 1
fn = './Multispectral Classification.xlsx'
nir, red = load_multispectral_data(fn)[0], load_multispectral_data(fn)[1]
ndvi = calculate_ndvi(nir, red)
fig = plt.figure(figsize=(6, 3.2))
ax = fig.add_subplot(111)
plt.imshow(ndvi)
plt.colorbar(orientation='vertical')
plt.title('NDVI values of 20x20 area')
plot_histogram(ndvi, 0.2, 'histogram.jpg')
### assignment 2
plot_scatter(red, nir)
plt.savefig('scatter.jpg')
plt.close()
### assignment 3
training_classes = load_training_data(fn)
# get masks for each class
water_mask = np.isin(training_classes, 'water', invert=True)
bg_mask = np.isin(training_classes, 'bare ground', invert=True)
veg_mask = np.isin(training_classes, 'vegetation', invert=True)
unc_mask = np.isin(training_classes, 'unclassified', invert=True)
# plot each class with a different color
plot_scatter(red[~unc_mask], nir[~unc_mask], colors='lightgrey')
plot_scatter(red[~water_mask], nir[~water_mask], colors='blue')
plot_scatter(red[~bg_mask], nir[~bg_mask], colors='red')
plot_scatter(red[~veg_mask], nir[~veg_mask], colors='green')
plt.savefig('scatter_f.jpg')
plt.close()
### assignment 5
# compute mean vector, covariance matrix and inverse of covariance matrix
water_stats = mv_cov_covinv(red[~water_mask], nir[~water_mask])
bg_stats = mv_cov_covinv(red[~bg_mask], nir[~bg_mask])
veg_stats = mv_cov_covinv(red[~veg_mask], nir[~veg_mask])
### assignment 6
obs_vecs = np.array((red, nir)).T
means = (veg_stats['mean vector'], bg_stats['mean vector'], water_stats['mean vector'])
classified = np.zeros(ndvi.shape)
distances = np.zeros(ndvi.shape)
for i in range(len(obs_vecs)):
for j in range(len(obs_vecs[i])):
pixel_class = minimum_distance_to_mean(obs_vecs[i][j], means)
classified[j][i] = pixel_class[0]
distances[j][i] = pixel_class[1]
# threshold distance values
classified[distances > 2*np.std(distances)] = None
# write to excel file
df = pd.DataFrame(classified)
df.to_excel('classified.xlsx', index=False)
plt.imshow(classified)
plt.show()
colors = {0:'lightgrey', 1: 'green', 2: 'red', 3:'blue'}
# assignment 7
for i in range(0, 4):
plt.scatter(red[classified == i], nir[classified == i], c=colors[i])
in_class = ndvi[classified == i]
in_class[abs(in_class - np.mean(in_class)) > np.std(in_class)] = None
ndvi_range = (np.nanmin(in_class), np.nanmax(in_class))
plt.show()
| maxvanschendel/Geomatics | GEO1001/assignment_5.py | assignment_5.py | py | 5,084 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "pandas.read_excel",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "StyleFrame.StyleFrame.read_excel",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "StyleFrame.StyleFrame",
"line_number": 18,
"usage_type": "name"
},
{
"api_name"... |
21301309349 | from celery.decorators import task
from tracker import celery_app
from api.models import User, Tracker
from core_listing_scraper import get_current_listings, make_dict
from mailgun_email_api.mailgun_email_api import send_confirmation_message, send_email_for_new_or_updated_listings
@task(name='create_tracker')
def create_tracker(user_email, results_page_url):
user, created = User.objects.get_or_create(email=user_email)
user.save()
data = get_current_listings(results_page_url)
tracker = Tracker(user=user, results_page_url=results_page_url, listings=data)
tracker.save()
# send initial email with current listings
send_confirmation_message(user_email, results_page_url, data)
@celery_app.task(name='api.update_trackers')
def update_trackers():
users = User.objects.all()
for user in users:
trackers = user.tracker_set.all()
for tracker in trackers:
results_page_url = tracker.results_page_url
outdated_listings = tracker.listings
current_listings = get_current_listings(results_page_url)
new_or_updated_listings = get_new_or_updated_listings(outdated_listings, current_listings)
if new_or_updated_listings:
send_email_for_new_or_updated_listings(user.email, results_page_url, new_or_updated_listings)
tracker.listings = current_listings
tracker.save()
def get_new_or_updated_listings(outdated_listings, current_listings):
new_or_updated_listings = {}
for craig_id, current_listing in current_listings.iteritems():
outdated_listing = outdated_listings.get(craig_id)
if listing_did_not_exist(outdated_listing):
new_or_updated_listings[craig_id] = make_dict(current_listing)
elif listing_has_been_updated(outdated_listing, current_listing):
new_or_updated_listings[craig_id] = make_dict(current_listing)
else:
# listing has not changed
continue
return new_or_updated_listings
def listing_has_been_updated(outdated_listing, current_listing):
has_been_updated = (outdated_listing.get('title') != current_listing.get('title') or
outdated_listing.get('price') != current_listing.get('price') or
outdated_listing.get('absolute_url') != current_listing.get('absolute_url') or
outdated_listing.get('last_modified_at') != current_listing.get('last_modified_at'))
return has_been_updated
def listing_did_not_exist(outdated_listing):
return outdated_listing == None
| brianleungwh/tracker | api/tasks.py | tasks.py | py | 2,626 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "api.models.User.objects.get_or_create",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "api.models.User.objects",
"line_number": 10,
"usage_type": "attribute"
},
{
"api_name": "api.models.User",
"line_number": 10,
"usage_type": "name"
},
{
... |
29976190660 | #!/usr/bin/env python3
import numpy as np
import matplotlib.pyplot as pl
from numba import autojit
import time
import sys
@autojit
def stochastic(t, eta, amplitude, frequency):
"""
Create time series of stochastic oscillations for a given damping rate
(eta), amplitude and frequency. From De Ridder et al. 2006
Usage:
t - array: time stamps given in units of seconds
eta - float: damping rate
amplitude - float: amplitude of oscillations
frequency - float: frequency of oscillations
Author: grd349
Edited by jsk389
"""
# Compute cadence from time stamps
dt = (t.max()-t.min()) / float(len(t))
# Compute time between kicks for a given damping rate
dtkick = 1.0 / eta / 100.0
# If time between kicks is less than cadence set equal to cadence
if dtkick < dt:
dtkick = dt
# Standard deviation of white noise component
sigmae = amplitude * np.sqrt(eta * dtkick)
N_noise = np.round((t.max() - t.min()) / dtkick + 1).astype(int)
# Compute white noise components
bnoise = sigmae * np.random.randn(N_noise)
cnoise = sigmae * np.random.randn(N_noise)
bn, cn = np.zeros(N_noise), np.zeros(N_noise)
# Amplitudes
coeff = np.exp(-eta * dtkick)
for i in range(N_noise):
bn[i] = coeff * bn[i-1] + bnoise[i]
cn[i] = coeff * cn[i-1] + cnoise[i]
# Generate signal
N_time = len(t)
output = np.zeros(N_time)
n = np.floor(t / dtkick).astype(int)
#output = np.exp(-eta * (t - (n*dtkick))) * (\
# bn * np.sin(2.0*np.pi*frequency*t) + \
# cn * np.cos(2.0*np.pi*frequency*t))
for i in range(N_time):
first = bn[n[i]] * np.sin(2.0 * np.pi * frequency * t[i])
second = cn[n[i]] * np.cos(2.0 * np.pi * frequency * t[i])
output[i] = np.exp(-eta * (t[i] - (n[i] * dtkick))) * \
(first + second)
return output
@autojit
def lorentzian(t, linewidth, amplitude, frequency):
"""
It is much easier to think of oscillation parameters in terms of the
Lorentzian profile that is seen in the power spectrum. Therefore
generate oscillations with respect to supplied Lorentzian profile
parameters
Usage:
t - array: time stamps
linewidth - array: linewidth of Lorentzian profile, linked to eta
through eta = linewidth * pi
amplitude - array: amplitude of Lorentzian
frequency - array: central frequency of Lorentzian (Hertz)
"""
eta = linewidth * np.pi
y = stochastic(t, eta, amplitude, frequency)
return y
if __name__=="__main__":
# Run quick example
cadence = 40.0
days = 100.0 * 1.0 * 73.0
npts = days * 24.0 * 3600.0 / cadence
linewidth = 1.0e-6
amplitude = 100.0
frequency = 200e-6
t = np.linspace(0, npts*cadence, npts)
s = time.time()
y = lorentzian(t, linewidth, amplitude, frequency)
print("Time taken for dataset of length {0} days is {1} s", int(days), time.time()-s)
| jsk389/Stochastic-Simulations | Oscillations/oscillations.py | oscillations.py | py | 3,024 | python | en | code | 1 | github-code | 36 | [
{
"api_name": "numpy.sqrt",
"line_number": 34,
"usage_type": "call"
},
{
"api_name": "numpy.round",
"line_number": 36,
"usage_type": "call"
},
{
"api_name": "numpy.random.randn",
"line_number": 39,
"usage_type": "call"
},
{
"api_name": "numpy.random",
"line_nu... |
23458756442 | import logging
import os.path
import schedule
import time
import threading
import requests
import ip_provider
SERVER_ADDRESS = "http://{}:8080".format(os.getenv("KIOSK_SERVER", "localhost"))
CONNECTOR_SERVICE_ADDRESS = "/kiosksConnector"
AUTHENTICATION_HEADER_KEY = "Authentication"
SERVICE_CALL_INTERVAL_IN_SECONDS = 30
SENT_FROM_IP_HEADER_KEY = 'X-From-Ip'
def worker_job(jwt, controller_service_port):
schedule.every(SERVICE_CALL_INTERVAL_IN_SECONDS).seconds.do(call_create_method, jwt=jwt,
controller_service_port=controller_service_port)
while True:
schedule.run_pending()
time.sleep(1)
def start_status_update_worker(jwt, controller_service_port):
threading.Thread(target=worker_job, args=(jwt, controller_service_port)).start()
def call_create_method(jwt, controller_service_port):
try:
session = requests.Session()
session.headers.update({AUTHENTICATION_HEADER_KEY: jwt})
session.headers.update({SENT_FROM_IP_HEADER_KEY: ip_provider.get_ip() + ":" + str(controller_service_port)})
response = session.post(SERVER_ADDRESS + CONNECTOR_SERVICE_ADDRESS)
if response.status_code != 202:
logging.warning(
"Error status code returned while updating last online time, status code {}".format(
response.status_code))
else:
logging.info("Updating last online time finished successfully")
session.close()
except requests.exceptions.ConnectionError:
logging.warning("Connection error while updating last online time")
| z13z/Kiosks | kiosk-worker/alive.py | alive.py | py | 1,645 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "os.path.getenv",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 11,
"usage_type": "name"
},
{
"api_name": "schedule.every",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "schedule.run_pending",
"lin... |
11399653224 |
"""
Fits, etc. to extracted spectra
"""
import os
import time
import warnings
import numpy as np
import scipy.ndimage as nd
from scipy.optimize import nnls
import matplotlib.pyplot as plt
from matplotlib.ticker import MultipleLocator
from matplotlib.gridspec import GridSpec
import astropy.io.fits as pyfits
from grizli import utils
utils.set_warnings()
try:
import eazy
wave = np.exp(np.arange(np.log(2.4), np.log(4.5), 1./4000))*1.e4
_temp = utils.pah33(wave)
PAH_TEMPLATES = {}
for t in _temp:
if '3.47' in t:
continue
_tp = _temp[t]
PAH_TEMPLATES[t] = eazy.templates.Template(name=t, arrays=(_tp.wave, _tp.flux))
except:
print('Failed to initialize PAH_TEMPLATES')
PAH_TEMPLATES = {}
import grizli.utils_c
import astropy.units as u
import eazy.igm
igm = eazy.igm.Inoue14()
from . import drizzle
from . import utils as msautils
SCALE_UNCERTAINTY = 1.0
# try:
# from prospect.utils.smoothing import smoothspec
# except (FileNotFoundError, TypeError):
# if 'SPS_HOME' not in os.environ:
# sps_home = 'xxxxdummyxxxx' #os.path.dirname(__file__)
# print(f'msaexp: setting environment variable SPS_HOME={sps_home} '
# 'to be able to import prospect.utils.smoothing')
# os.environ['SPS_HOME'] = sps_home
FFTSMOOTH = False
__all__ = ["fit_redshift", "fit_redshift_grid", "plot_spectrum",
"read_spectrum", "calc_uncertainty_scale",
"SpectrumSampler"]
def test():
from importlib import reload
import msaexp.spectrum
from tqdm import tqdm
import msaexp.resample_numba
from grizli import utils
reload(msaexp.resample_numba); reload(msaexp.spectrum)
reload(msaexp.resample_numba); reload(msaexp.spectrum)
from msaexp.spectrum import SpectrumSampler
import eazy.templates
self = SpectrumSampler('macsj0647_1169.v1.spec.fits')
t = eazy.templates.Template('templates/sfhz/fsps_4590.fits')
z = 4.2418
res = self.resample_eazy_template(t, z=z)
line = self.resample_eazy_template(t, z=z)
lw, lr = utils.get_line_wavelengths()
k = 'highO32'
zg = np.linspace(z-0.1, z+0.1, 256)
chi2 = zg*0.
bspl = self.bspline_array(nspline=13, log=True)
bspl2 = self.bspline_array(nspline=3, log=True)
scale_disp = 1.2
velocity_sigma = 100
for i, zi in tqdm(enumerate(zg)):
lines = [self.fast_emission_line(w*(1+zi)/1.e4,
line_flux=r,
scale_disp=scale_disp,
velocity_sigma=velocity_sigma,
nsig=4)
for w, r in zip(lw[k], lr[k])]
A = np.vstack([np.array(lines).sum(axis=0)*bspl2] + [bspl])
Ax = (A / self.spec['full_err'])
yx = self.spec['flux'] / self.spec['full_err']
x = np.linalg.lstsq(Ax[:,self.valid].T, yx[self.valid].data, rcond=None)
model = A.T.dot(x[0])
resid = (self.spec['flux'] - model)/self.spec['full_err']
chi2[i] = (resid[self.valid]**2).sum()
zi = zg[np.argmin(chi2)]
lines = [self.fast_emission_line(w*(1+zi)/1.e4,
line_flux=r,
scale_disp=scale_disp,
velocity_sigma=velocity_sigma,
nsig=4)
for w, r in zip(lw[k], lr[k])]
A = np.vstack([np.array(lines).sum(axis=0)*bspl2] + [bspl])
Ax = (A / self.spec['full_err'])
yx = self.spec['flux'] / self.spec['full_err']
x = np.linalg.lstsq(Ax[:,self.valid].T, yx[self.valid].data, rcond=None)
model = A.T.dot(x[0])
class SpectrumSampler(object):
spec = {}
spec_wobs = None
spec_R_fwhm = None
valid = None
def __init__(self, spec_input, **kwargs):
"""
Helper functions for sampling templates onto the wavelength grid
of an observed spectrum
Parameters
----------
spec_input : str, `~astropy.io.fits.HDUList`
- `str` : spectrum filename, usually `[root].spec.fits`
- `~astropy.io.fits.HDUList` : FITS data
Attributes
----------
resample_func : func
Template resampling function, from
`msaexp.resample_template_numba.msaexp.resample_numba` if possible and
`msaexp.resample.resample_template` otherwise
sample_line_func : func
Emission line function, from
`msaexp.resample_template_numba.msaexp.sample_gaussian_line_numba` if
possible and `msaexp.resample.sample_line_func` otherwise
spec : `~astropy.table.Table`
1D spectrum table from the `SPEC1D HDU of ``file``
spec_wobs : array-like
Observed wavelengths, microns
spec_R_fwhm : array-like
Tabulated spectral resolution `R = lambda / dlambda`, assumed to be
defined as FWHM
valid : array-like
Boolean array of valid 1D data
"""
try:
from .resample_numba import resample_template_numba as resample_func
from .resample_numba import sample_gaussian_line_numba as sample_line_func
except ImportError:
from .resample import resample_template as resample_func
from .resample import sample_gaussian_line as sample_line_func
self.resample_func = resample_func
self.sample_line_func = sample_line_func
self.initialize_spec(spec_input)
self.initialize_emission_line()
def __getitem__(self, key):
"""
Return column of the `spec` table
"""
return self.spec[key]
@property
def meta(self):
"""
Metadata of `spec` table
"""
return self.spec.meta
def initialize_emission_line(self, nsamp=64):
"""
Initialize emission line
"""
self.xline = np.linspace(-nsamp, nsamp, 2*nsamp+1)/nsamp*0.1+1
self.yline = self.xline*0.
self.yline[nsamp] = 1
self.yline /= np.trapz(self.yline, self.xline)
def initialize_spec(self, spec_input, **kwargs):
"""
Read spectrum data from file and initialize attributes
Parameters
----------
spec_input : str
Filename, usually `[root].spec.fits`
kwargs : dict
Keyword arguments passed to `msaexp.spectrum.read_spectrum`
"""
self.spec_input = spec_input
if isinstance(spec_input, str):
self.file = spec_input
else:
self.file = None
self.spec = read_spectrum(spec_input, **kwargs)
self.spec_wobs = self.spec['wave'].astype(np.float32)
self.spec_R_fwhm = self.spec['R'].astype(np.float32)
self.valid = np.isfinite(self.spec['flux']/self.spec['full_err'])
@property
def meta(self):
return self.spec.meta
def resample_eazy_template(self, template, z=0, scale_disp=1.0, velocity_sigma=100., fnu=True, nsig=4):
"""
Smooth and resample an `eazy.templates.Template` object onto the observed
wavelength grid of a spectrum
Parameters
----------
template : `eazy.templates.Template`
Template object
z : float
Redshift
scale_disp : float
Factor multiplied to the tabulated spectral resolution before sampling
velocity_sigma : float
Gaussian velocity broadening factor, km/s
fnu : bool
Return resampled template in f-nu flux densities
nsig : int
Number of standard deviations to sample for the convolution
Returns
-------
res : array-like
Template flux density smoothed and resampled at the spectrum wavelengths
"""
templ_wobs = template.wave.astype(np.float32)*(1+z)/1.e4
if fnu:
templ_flux = template.flux_fnu(z=z).astype(np.float32)
else:
templ_flux = template.flux_flam(z=z).astype(np.float32)
res = self.resample_func(self.spec_wobs,
self.spec_R_fwhm*scale_disp,
templ_wobs,
templ_flux,
velocity_sigma=velocity_sigma,
nsig=nsig)
return res
def emission_line(self, line_um, line_flux=1, scale_disp=1.0, velocity_sigma=100., nsig=4):
"""
Make an emission line template - *deprecated in favor of*
`~msaexp.spectrum.SpectrumSampler.fast_emission_line`
Parameters
----------
line_um : float
Line center, microns
line_flux : float
Line normalization
scale_disp : float
Factor by which to scale the tabulated resolution FWHM curve
velocity_sigma : float
Velocity sigma width in km/s
nsig : int
Number of sigmas of the convolution kernel to sample
Returns
-------
res : array-like
Gaussian emission line sampled at the spectrum wavelengths
"""
res = self.resample_func(self.spec_wobs,
self.spec_R_fwhm*scale_disp,
self.xline*line_um,
self.yline,
velocity_sigma=velocity_sigma,
nsig=nsig)
return res*line_flux/line_um
def fast_emission_line(self, line_um, line_flux=1, scale_disp=1.0, velocity_sigma=100.):
"""
Make an emission line template with numerically correct pixel integration
function
Parameters
----------
line_um : float
Line center, microns
line_flux : float
Line normalization
scale_disp : float
Factor by which to scale the tabulated resolution FWHM curve
velocity_sigma : float
Velocity sigma width in km/s
Returns
-------
res : array-like
Gaussian emission line sampled at the spectrum wavelengths
"""
res = self.sample_line_func(self.spec_wobs,
self.spec_R_fwhm*scale_disp,
line_um,
line_flux=line_flux,
velocity_sigma=velocity_sigma,
)
return res
def bspline_array(self, nspline=13, log=False, get_matrix=True):
"""
Initialize bspline templates for continuum fits
Parameters
----------
nspline : int
Number of spline functions to sample across the wavelength range
log : bool
Sample in log(wavelength)
get_matrix : bool
If true, return array data. Otherwise, return template objects
Returns
-------
bspl : array-like
bspline data, depending on ``get_matrix``
"""
if get_matrix:
bspl = utils.bspline_templates(wave=self.spec_wobs*1.e4,
degree=3,
df=nspline,
log=log,
get_matrix=get_matrix
)
bspl = bspl.T
else:
bspl = utils.bspline_templates(wave=self.spec_wobs*1.e4,
degree=3,
df=nspline,
log=log,
get_matrix=get_matrix
)
return bspl
def redo_1d_extraction(self, **kwargs):
"""
Redo 1D extraction from 2D arrays with `msaexp.drizzle.make_optimal_extraction`
Parameters
----------
kwargs : dict
Keyword arguments passed to `msaexp.drizzle.make_optimal_extraction`
Returns
-------
output : `~msaexp.spectrum.SpectrumSampler`
A new `~msaexp.spectrum.SpectrumSampler` object
Examples
--------
.. plot::
:include-source:
# Compare 1D extractions
from msaexp import spectrum
import matplotlib.pyplot as plt
sp = spectrum.SpectrumSampler('https://s3.amazonaws.com/msaexp-nirspec/extractions/ceers-ddt-v1/ceers-ddt-v1_prism-clear_2750_1598.spec.fits')
fig, axes = plt.subplots(2,1,figsize=(8,5), sharex=True, sharey=True)
# Boxcar extraction, center pixel +/- 2 pix
ax = axes[0]
new = sp.redo_1d_extraction(ap_radius=2, bkg_offset=-6)
ax.plot(sp['wave'], sp['flux'], alpha=0.5, label='Original optimal extraction')
ax.plot(new['wave'], new['aper_flux'], alpha=0.5, label='Boxcar, y = 23 ± 2')
ax.grid()
ax.legend()
# Extractions above and below the center
ax = axes[1]
low = sp.redo_1d_extraction(ap_center=21, ap_radius=1)
hi = sp.redo_1d_extraction(ap_center=25, ap_radius=1)
ax.plot(low['wave'], low['aper_flux']*1.5, alpha=0.5, label='Below, y = 21 ± 1', color='b')
ax.plot(hi['wave'], hi['aper_flux']*3, alpha=0.5, label='Above, y = 25 ± 1', color='r')
ax.set_xlim(0.9, 5.3)
ax.grid()
ax.legend()
ax.set_xlabel(r'$\lambda$')
for ax in axes:
ax.set_ylabel(r'$\mu\mathrm{Jy}$')
fig.tight_layout(pad=1)
"""
if isinstance(self.spec_input, pyfits.HDUList):
out_hdul = drizzle.extract_from_hdul(self.spec_input, **kwargs)
else:
with pyfits.open(self.file) as hdul:
out_hdul = drizzle.extract_from_hdul(hdul, **kwargs)
output = SpectrumSampler(out_hdul)
return output
def drizzled_hdu_figure(self, **kwargs):
"""
Run `msaexp.utils.drizzled_hdu_figure` on array data
Parameters
----------
kwargs : dict
Keyword arguments passed to `msaexp.utils.drizzled_hdu_figure`
Returns
-------
fig : `~matplotlib.figure.Figure`
Spectrum figure
"""
if isinstance(self.spec_input, pyfits.HDUList):
fig = msautils.drizzled_hdu_figure(self.spec_input, **kwargs)
else:
with pyfits.open(self.file) as hdul:
fig = msautils.drizzled_hdu_figure(hdul, **kwargs)
return fig
def smooth_template_disp_eazy(templ, wobs_um, disp, z, velocity_fwhm=80, scale_disp=1.3, flambda=True, with_igm=True):
"""
Smooth a template with a wavelength-dependent dispersion function.
*NB:* Not identical to the preferred
`~msaexp.spectrum.SpectrumSampler.resample_eazy_template`
Parameters
----------
templ : `eazy.template.Template`
Template object
wobs_um : array-like
Target observed-frame wavelengths, microns
disp : table
NIRSpec dispersion table with columns ``WAVELENGTH``, ``R``
z : float
Target redshift
velocity_fwhm : float
Velocity dispersion FWHM, km/s
scale_disp : float
Scale factor applied to ``disp['R']``
flambda : bool
Return smoothed template in units of f_lambda or f_nu.
Returns
-------
tsmooth : array-like
Template convolved with spectral resolution + velocity dispersion.
Same length as `wobs_um`
"""
dv = np.sqrt(velocity_fwhm**2 + (3.e5/disp['R']/scale_disp)**2)
disp_ang = disp['WAVELENGTH']*1.e4
dlam_ang = disp_ang*dv/3.e5/2.35
def _lsf(wave):
return np.interp(wave,
disp_ang,
dlam_ang,
left=dlam_ang[0], right=dlam_ang[-1],
)
if hasattr(wobs_um,'value'):
wobs_ang = wobs_um.value*1.e4
else:
wobs_ang = wobs_um*1.e4
flux_model = templ.to_observed_frame(z=z,
lsf_func=_lsf,
clip_wavelengths=None,
wavelengths=wobs_ang,
smoothspec_kwargs={'fftsmooth':FFTSMOOTH},
)
if flambda:
flux_model = np.squeeze(flux_model.flux_flam())
else:
flux_model = np.squeeze(flux_model.flux_fnu())
return flux_model
def smooth_template_disp_sedpy(templ, wobs_um, disp, z, velocity_fwhm=80, scale_disp=1.3, flambda=True, with_igm=True):
"""
Smooth a template with a wavelength-dependent dispersion function using
the `sedpy`/`prospector` LSF smoothing function
Parameters
----------
templ : `eazy.template.Template`
Template object
wobs_um : array-like
Target observed-frame wavelengths, microns
disp : table
NIRSpec dispersion table with columns ``WAVELENGTH``, ``R``
z : float
Target redshift
velocity_fwhm : float
Velocity dispersion FWHM, km/s
scale_disp : float
Scale factor applied to ``disp['R']``
flambda : bool
Return smoothed template in units of f_lambda or f_nu.
Returns
-------
tsmooth : array-like
Template convolved with spectral resolution + velocity dispersion.
Same length as `wobs_um`
"""
from sedpy.smoothing import smoothspec
wobs = templ.wave*(1+z)
trim = (wobs > wobs_um[0]*1.e4*0.95)
trim &= (wobs < wobs_um[-1]*1.e4*1.05)
if flambda:
fobs = templ.flux_flam(z=z)#[wclip]
else:
fobs = templ.flux_fnu(z=z)#[wclip]
if with_igm:
fobs *= templ.igm_absorption(z)
wobs = wobs[trim]
fobs = fobs[trim]
R = np.interp(wobs, disp['WAVELENGTH']*1.e4, disp['R'],
left=disp['R'][0], right=disp['R'][-1])*scale_disp
dv = np.sqrt(velocity_fwhm**2 + (3.e5/R)**2)
dlam_ang = wobs*dv/3.e5/2.35
def _lsf(wave):
return np.interp(wave, wobs, dlam_ang)
tsmooth = smoothspec(wobs, fobs,
smoothtype='lsf', lsf=_lsf,
outwave=wobs_um*1.e4,
fftsmooth=FFTSMOOTH,
)
return tsmooth
def smooth_template_disp(templ, wobs_um, disp, z, velocity_fwhm=80, scale_disp=1.3, flambda=True, with_igm=True):
"""
Smooth a template with a wavelength-dependent dispersion function
Parameters
----------
templ : `eazy.template.Template`
Template object
wobs_um : array-like
Target observed-frame wavelengths, microns
disp : table
NIRSpec dispersion table with columns ``WAVELENGTH``, ``R``
z : float
Target redshift
velocity_fwhm : float
Velocity dispersion FWHM, km/s
scale_disp : float
Scale factor applied to ``disp['R']``
flambda : bool
Return smoothed template in units of f_lambda or f_nu.
Returns
-------
tsmooth : array-like
Template convolved with spectral resolution + velocity dispersion.
Same length as `wobs_um`
"""
wobs = templ.wave*(1+z)/1.e4
if flambda:
fobs = templ.flux_flam(z=z)#[wclip]
else:
fobs = templ.flux_fnu(z=z)#[wclip]
if with_igm:
fobs *= templ.igm_absorption(z)
disp_r = np.interp(wobs, disp['WAVELENGTH'], disp['R'])*scale_disp
fwhm_um = np.sqrt((wobs/disp_r)**2 + (velocity_fwhm/3.e5*wobs)**2)
sig_um = np.maximum(fwhm_um/2.35, 0.5*np.gradient(wobs))
x = wobs_um[:,np.newaxis] - wobs[np.newaxis,:]
gaussian_kernel = 1./np.sqrt(2*np.pi*sig_um**2)*np.exp(-x**2/2/sig_um**2)
tsmooth = np.trapz(gaussian_kernel*fobs, x=wobs, axis=1)
return tsmooth
SMOOTH_TEMPLATE_DISP_FUNC = smooth_template_disp_eazy
def fit_redshift(file='jw02767005001-02-clear-prism-nrs2-2767_11027.spec.fits', z0=[0.2, 10], zstep=None, eazy_templates=None, nspline=None, scale_disp=1.3, vel_width=100, Rline=None, is_prism=False, use_full_dispersion=False, ranges=None, sys_err=0.02, **kwargs):
"""
Fit spectrum for the redshift
Parameters
----------
file : str
Spectrum filename
z0 : (float, float)
Redshift range
zstep : (float, float)
Step sizes in `dz/(1+z)`
eazy_templates : list, None
List of `eazy.templates.Template` objects. If not provided, just use
dummy spline continuum and emission line templates
nspline : int
Number of splines to use for dummy continuum
scale_disp : float
Scale factor of nominal dispersion files, i.e., `scale_disp > 1`
*increases* the spectral resolution
vel_width : float
Velocity width the emission line templates
Rline : float
Original spectral resolution used to sample the line templates
is_prism : bool
Is the spectrum from the prism?
use_full_dispersion : bool
Convolve `eazy_templates` with the full wavelength-dependent
dispersion function
ranges : list of tuples
Wavelength ranges for the subplots
sys_err : float
Systematic uncertainty added in quadrature with nominal uncertainties
Returns
-------
fig : Figure
Diagnostic figure
sp : `~astropy.table.Table`
A copy of the 1D spectrum as fit with additional columns describing the
best-fit templates
data : dict
Fit metadata
"""
import yaml
def float_representer(dumper, value):
text = '{0:.6f}'.format(value)
return dumper.represent_scalar(u'tag:yaml.org,2002:float', text)
yaml.add_representer(float, float_representer)
#is_prism |= ('clear' in file)
spec = read_spectrum(file, sys_err=sys_err, **kwargs)
is_prism |= spec.grating in ['prism']
if 'spec.fits' in file:
froot = file.split('.spec.fits')[0]
else:
froot = file.split('.fits')[0]
if zstep is None:
if (is_prism):
step0 = 0.002
step1 = 0.0001
else:
step0 = 0.001
step1 = 0.00002
else:
step0, step1 = zstep
if Rline is None:
if is_prism:
Rline = 1000
else:
Rline = 5000
# First pass
zgrid = utils.log_zgrid(z0, step0)
zg0, chi0 = fit_redshift_grid(file, zgrid=zgrid,
line_complexes=False,
vel_width=vel_width,
scale_disp=scale_disp,
eazy_templates=eazy_templates,
Rline=Rline,
use_full_dispersion=use_full_dispersion,
sys_err=sys_err,
**kwargs)
zbest0 = zg0[np.argmin(chi0)]
# Second pass
zgrid = utils.log_zgrid(zbest0 + np.array([-0.005, 0.005])*(1+zbest0),
step1)
zg1, chi1 = fit_redshift_grid(file, zgrid=zgrid,
line_complexes=False,
vel_width=vel_width,
scale_disp=scale_disp,
eazy_templates=eazy_templates,
Rline=Rline,
use_full_dispersion=use_full_dispersion,
sys_err=sys_err,
**kwargs)
zbest = zg1[np.argmin(chi1)]
fz, az = plt.subplots(1,1,figsize=(6,4))
az.plot(zg0, chi0)
az.plot(zg1, chi1)
az.set_ylim(chi1.min()-50, chi1.min() + 10**2)
az.grid()
az.set_xlabel('redshift')
az.set_ylabel(r'$\chi^2$')
az.set_title(os.path.basename(file))
fz.tight_layout(pad=1)
fz.savefig(froot+'.chi2.png')
if is_prism:
if ranges is None:
ranges = [(3427, 5308), (6250, 9700)]
if nspline is None:
nspline = 41
else:
if ranges is None:
ranges = [(3680, 4400), (4861-50, 5008+50), (6490, 6760)]
if nspline is None:
nspline = 23
fig, sp, data = plot_spectrum(file, z=zbest, show_cont=True,
draws=100, nspline=nspline,
figsize=(16, 8), vel_width=vel_width,
ranges=ranges, Rline=Rline,
scale_disp=scale_disp,
eazy_templates=eazy_templates,
use_full_dispersion=use_full_dispersion,
sys_err=sys_err,
**kwargs)
if eazy_templates is not None:
spl_fig, sp2, spl_data = plot_spectrum(file, z=zbest, show_cont=True,
draws=100, nspline=nspline,
figsize=(16, 8), vel_width=vel_width,
ranges=ranges, Rline=Rline,
scale_disp=scale_disp,
eazy_templates=None,
use_full_dispersion=use_full_dispersion,
sys_err=sys_err,
**kwargs)
for k in ['coeffs', 'covar', 'model', 'mline', 'fullchi2', 'contchi2']:
if k in spl_data:
data[f'spl_{k}'] = spl_data[k]
spl_fig.savefig(froot+'.spl.png')
sp['spl_model'] = sp2['model']
sp['wave'].unit = u.micron
sp['flux'].unit = u.microJansky
sp.write(froot+'.spec.zfit.fits', overwrite=True)
zdata = {}
zdata['zg0'] = zg0.tolist()
zdata['chi0'] = chi0.tolist()
zdata['zg1'] = zg1.tolist()
zdata['chi1'] = chi1.tolist()
data['dchi2'] = float(np.nanmedian(chi0) - np.nanmin(chi0))
for k in ['templates','spl_covar','covar']:
if k in data:
_ = data.pop(k)
with open(froot+'.zfit.yaml', 'w') as fp:
yaml.dump(zdata, stream=fp)
with open(froot+'.yaml', 'w') as fp:
yaml.dump(data, stream=fp)
fig.savefig(froot+'.zfit.png')
return fig, sp, data
H_RECOMBINATION_LINES = ['Ha+NII', 'Ha','Hb','Hg','Hd',
'PaA','PaB','PaG','PaD','Pa8',
'BrA','BrB','BrG','BrD']
def make_templates(sampler, z, bspl={}, eazy_templates=None, vel_width=100, broad_width=4000, broad_lines=[], scale_disp=1.3, use_full_dispersion=False, disp=None, grating='prism', halpha_prism=['Ha+NII'], oiii=['OIII'], o4363=[], sii=['SII'], lorentz=False, with_pah=True, **kwargs):
"""
Generate fitting templates
wobs : array
Observed-frame wavelengths of the spectrum to fit, microns
z : float
Redshift
bspl : dict
Spline templates for dummy continuum
eazy_templates : list
Optional list of `eazy.templates.Template` template objects to use in
place of the spline + line templates
vel_width : float
Velocity width of the individual emission line templates
halpha_prism : ['Ha+NII'], ['Ha','NII']
Line template names to use for Halpha and [NII], i.e., ``['Ha+NII']``
fits with a fixed line ratio and `['Ha','NII']` fits them separately
but with a fixed line ratio 6548:6584 = 1:3
oiii : ['OIII'], ['OIII-4959','OIII-5007']
Similar for [OIII]4959+5007, ``['OIII']`` fits as a doublet with fixed
ratio 4959:5007 = 1:2.98 and ``['OIII-4949', 'OIII-5007']`` fits them
independently.
o4363 : [] or ['OIII-4363']
How to fit [OIII]4363.
sii : ['SII'], ['SII-6717','SII-6731']
[SII] doublet
lorentz : bool
Use Lorentzian profile for lines
Returns
-------
templates : list
List of the computed template objects
tline : array
Boolean list of which templates are line components
_A : (NT, NWAVE) array
Design matrix of templates interpolated at `wobs`
"""
from grizli import utils
wobs = sampler.spec_wobs
wrest = wobs/(1+z)*1.e4
wmask = sampler.valid
wmin = wobs[wmask].min()
wmax = wobs[wmask].max()
templates = []
tline = []
if eazy_templates is None:
lw, lr = utils.get_line_wavelengths()
_A = [bspl*1]
for i in range(bspl.shape[0]):
templates.append(f'spl {i}')
tline.append(False)
#templates = {}
#for k in bspl:
# templates[k] = bspl[k]
# templates = {}
if grating in ['prism']:
hlines = ['Hb', 'Hg', 'Hd']
if z > 4:
oiii = ['OIII-4959','OIII-5007']
hene = ['HeII-4687', 'NeIII-3867','HeI-3889']
o4363 = ['OIII-4363']
else:
#oiii = ['OIII']
hene = ['HeI-3889']
#o4363 = []
#sii = ['SII']
#sii = ['SII-6717', 'SII-6731']
hlines += halpha_prism + ['NeIII-3968']
fuv = ['OIII-1663']
oii_7320 = ['OII-7325']
extra = []
else:
hlines = ['Hb', 'Hg', 'Hd','H8','H9', 'H10', 'H11', 'H12']
hene = ['HeII-4687', 'NeIII-3867']
o4363 = ['OIII-4363']
oiii = ['OIII-4959','OIII-5007']
sii = ['SII-6717', 'SII-6731']
hlines += ['Ha', 'NII-6549', 'NII-6584']
hlines += ['H7', 'NeIII-3968']
fuv = ['OIII-1663', 'HeII-1640', 'CIV-1549']
oii_7320 = ['OII-7323', 'OII-7332']
extra = ['HeI-6680', 'SIII-6314']
line_names = []
line_waves = []
for l in [*hlines, *oiii, *o4363, 'OII',
*hene,
*sii,
*oii_7320,
'ArIII-7138', 'ArIII-7753', 'SIII-9068', 'SIII-9531',
'OI-6302', 'PaD', 'PaG', 'PaB', 'PaA', 'HeI-1083',
'BrA','BrB','BrG','BrD','PfB','PfG','PfD','PfE',
'Pa8','Pa9','Pa10',
'HeI-5877',
*fuv,
'CIII-1906', 'NIII-1750', 'Lya',
'MgII', 'NeV-3346', 'NeVI-3426',
'HeI-7065', 'HeI-8446',
*extra
]:
if l not in lw:
continue
lwi = lw[l][0]*(1+z)
if lwi < wmin*1.e4:
continue
if lwi > wmax*1.e4:
continue
line_names.append(l)
line_waves.append(lwi)
so = np.argsort(line_waves)
line_waves = np.array(line_waves)[so]
for iline in so:
l = line_names[iline]
lwi = lw[l][0]*(1+z)
if lwi < wmin*1.e4:
continue
if lwi > wmax*1.e4:
continue
# print(l, lwi, disp_r)
name = f'line {l}'
for i, (lwi0, lri) in enumerate(zip(lw[l], lr[l])):
lwi = lwi0*(1+z)/1.e4
if l in broad_lines:
vel_i = broad_width
else:
vel_i = vel_width
line_i = sampler.fast_emission_line(lwi,
line_flux=lri/np.sum(lr[l]),
scale_disp=scale_disp,
velocity_sigma=vel_i,)
if i == 0:
line_0 = line_i
else:
line_0 += line_i
_A.append(line_0/1.e4)
templates.append(name)
tline.append(True)
if with_pah:
xpah = 3.3*(1+z)
if ((xpah > wmin) & (xpah < wmax)) | (0):
for t in PAH_TEMPLATES:
tp = PAH_TEMPLATES[t]
tflam = sampler.resample_eazy_template(tp,
z=z,
velocity_sigma=vel_width,
scale_disp=scale_disp,
fnu=False)
_A.append(tflam)
templates.append(t)
tline.append(True)
_A = np.vstack(_A)
ll = wobs.value*1.e4/(1+z) < 1215.6
igmz = igm.full_IGM(z, wobs.value*1.e4)
_A *= np.maximum(igmz, 0.01)
else:
if isinstance(eazy_templates[0], dict) & (len(eazy_templates) == 2):
# lw, lr dicts
lw, lr = eazy_templates
_A = [bspl*1]
for i in range(bspl.shape[0]):
templates.append(f'spl {i}')
tline.append(False)
for l in lw:
name = f'line {l}'
line_0 = None
for i, (lwi0, lri) in enumerate(zip(lw[l], lr[l])):
lwi = lwi0*(1+z)/1.e4
if lwi < wmin:
continue
elif lwi > wmax:
continue
if l in broad_lines:
vel_i = broad_width
else:
vel_i = vel_width
line_i = sampler.fast_emission_line(lwi,
line_flux=lri/np.sum(lr[l]),
scale_disp=scale_disp,
velocity_sigma=vel_i,)
if line_0 is None:
line_0 = line_i
else:
line_0 += line_i
if line_0 is not None:
_A.append(line_0/1.e4)
templates.append(name)
tline.append(True)
_A = np.vstack(_A)
ll = wobs.value*1.e4/(1+z) < 1215.6
igmz = igm.full_IGM(z, wobs.value*1.e4)
_A *= np.maximum(igmz, 0.01)
elif len(eazy_templates) == 1:
# Scale single template by spline
t = eazy_templates[0]
for i in range(bspl.shape[0]):
templates.append(f'{t.name} spl {i}')
tline.append(False)
tflam = sampler.resample_eazy_template(t,
z=z,
velocity_sigma=vel_width,
scale_disp=scale_disp,
fnu=False)
_A = np.vstack([bspl*tflam])
ll = wobs.value*1.e4/(1+z) < 1215.6
igmz = igm.full_IGM(z, wobs.value*1.e4)
_A *= np.maximum(igmz, 0.01)
else:
templates = []
tline = []
_A = []
for i, t in enumerate(eazy_templates):
tflam = sampler.resample_eazy_template(t,
z=z,
velocity_sigma=vel_width,
scale_disp=scale_disp,
fnu=False)
_A.append(tflam)
templates.append(t.name)
tline.append(False)
_A = np.vstack(_A)
return templates, np.array(tline), _A
def old_make_templates(wobs, z, wfull, wmask=None, bspl={}, eazy_templates=None, vel_width=100, broad_width=4000, broad_lines=[], scale_disp=1.3, use_full_dispersion=False, disp=None, grating='prism', halpha_prism=['Ha+NII'], oiii=['OIII'], o4363=[], sii=['SII'], lorentz=False, **kwargs):
"""
Generate fitting templates
wobs : array
Observed-frame wavelengths of the spectrum to fit, microns
z : float
Redshift
wfull : array
Full wavelength array of the templates
wmask : array-like
Boolean mask on `wobs` for valid data
bspl : dict
Spline templates for dummy continuum
eazy_templates : list
Optional list of `eazy.templates.Template` template objects to use in
place of the spline + line templates
vel_width : float
Velocity width of the individual emission line templates
halpha_prism : ['Ha+NII'], ['Ha','NII']
Line template names to use for Halpha and [NII], i.e., ``['Ha+NII']``
fits with a fixed line ratio and `['Ha','NII']` fits them separately
but with a fixed line ratio 6548:6584 = 1:3
oiii : ['OIII'], ['OIII-4959','OIII-5007']
Similar for [OIII]4959+5007, ``['OIII']`` fits as a doublet with fixed
ratio 4959:5007 = 1:2.98 and ``['OIII-4949', 'OIII-5007']`` fits them
independently.
o4363 : [] or ['OIII-4363']
How to fit [OIII]4363.
sii : ['SII'], ['SII-6717','SII-6731']
[SII] doublet
lorentz : bool
Use Lorentzian profile for lines
Returns
-------
templates : list
List of the computed template objects
tline : array
Boolean list of which templates are line components
_A : (NT, NWAVE) array
Design matrix of templates interpolated at `wobs`
"""
from grizli import utils
lw, lr = utils.get_line_wavelengths()
wrest = wobs/(1+z)*1.e4
if wmask is None:
wmask = np.isfinite(wobs)
wmin = wobs[wmask].min()
wmax = wobs[wmask].max()
if eazy_templates is None:
templates = {}
for k in bspl:
templates[k] = bspl[k]
# templates = {}
if grating in ['prism']:
hlines = ['Hb', 'Hg', 'Hd']
if z > 4:
oiii = ['OIII-4959','OIII-5007']
hene = ['HeII-4687', 'NeIII-3867','HeI-3889']
o4363 = ['OIII-4363']
else:
#oiii = ['OIII']
hene = ['HeI-3889']
#o4363 = []
#sii = ['SII']
#sii = ['SII-6717', 'SII-6731']
hlines += halpha_prism + ['NeIII-3968']
fuv = ['OIII-1663']
oii_7320 = ['OII-7325']
extra = []
else:
hlines = ['Hb', 'Hg', 'Hd','H8','H9', 'H10', 'H11', 'H12']
hene = ['HeII-4687', 'NeIII-3867']
o4363 = ['OIII-4363']
oiii = ['OIII-4959','OIII-5007']
sii = ['SII-6717', 'SII-6731']
hlines += ['Ha', 'NII-6549', 'NII-6584']
hlines += ['H7', 'NeIII-3968']
fuv = ['OIII-1663', 'HeII-1640', 'CIV-1549']
oii_7320 = ['OII-7323', 'OII-7332']
extra = ['HeI-6680', 'SIII-6314']
for l in [*hlines, *oiii, *o4363, 'OII',
*hene,
*sii,
*oii_7320,
'ArIII-7138', 'ArIII-7753', 'SIII-9068', 'SIII-9531',
'OI-6302', 'PaD', 'PaG', 'PaB', 'PaA', 'HeI-1083',
'BrA','BrB','BrG','BrD','PfB','PfG','PfD','PfE',
'Pa8','Pa9','Pa10',
'HeI-5877',
*fuv,
'CIII-1906', 'NIII-1750', 'Lya',
'MgII', 'NeV-3346', 'NeVI-3426',
'HeI-7065', 'HeI-8446',
*extra
]:
if l not in lw:
continue
lwi = lw[l][0]*(1+z)
if lwi < wmin*1.e4:
continue
if lwi > wmax*1.e4:
continue
# print(l, lwi, disp_r)
name = f'line {l}'
for i, (lwi0, lri) in enumerate(zip(lw[l], lr[l])):
lwi = lwi0*(1+z)
disp_r = np.interp(lwi/1.e4, disp['WAVELENGTH'],
disp['R'])*scale_disp
if l in broad_lines:
vel_i = broad_width
else:
vel_i = vel_width
fwhm_ang = np.sqrt((lwi/disp_r)**2 + (vel_i/3.e5*lwi)**2)
# print(f'Add component: {l} {lwi0} {lri}')
if i == 0:
templates[name] = utils.SpectrumTemplate(wave=wfull,
flux=None,
central_wave=lwi,
fwhm=fwhm_ang,
name=name,
lorentz=lorentz)
templates[name].flux *= lri/np.sum(lr[l])
else:
templates[name].flux += utils.SpectrumTemplate(wave=wfull,
flux=None,
central_wave=lwi,
fwhm=fwhm_ang,
lorentz=lorentz,
name=name).flux*lri/np.sum(lr[l])
_, _A, tline = utils.array_templates(templates,
max_R=10000,
wave=wobs.astype(float)*1.e4,
apply_igm=False)
ll = wobs.value*1.e4/(1+z) < 1215.6
igmz = igm.full_IGM(z, wobs.value*1.e4)
_A *= np.maximum(igmz, 0.01)
else:
templates = {}
if use_full_dispersion:
_A = []
tline = np.zeros(len(eazy_templates), dtype=bool)
for i, t in enumerate(eazy_templates):
templates[t.name] = 0.
tflam = SMOOTH_TEMPLATE_DISP_FUNC(t,
wobs,
disp,
z,
velocity_fwhm=vel_width,
scale_disp=scale_disp,
flambda=True)
_A.append(tflam)
tline[i] = t.name.startswith('line ')
_A = np.array(_A)
else:
for t in eazy_templates:
tflam = t.flux_flam(z=z)
templates[t.name] = utils.SpectrumTemplate(wave=t.wave,
flux=tflam, name=t.name)
# ToDo: smooth with dispersion
_, _A, tline = utils.array_templates(templates,
max_R=10000,
wave=wrest,
z=z, apply_igm=True)
for i in range(len(templates)):
_A[i,:] = nd.gaussian_filter(_A[i,:], 0.5)
return templates, tline, _A
def fit_redshift_grid(file='jw02767005001-02-clear-prism-nrs2-2767_11027.spec.fits', zgrid=None, vel_width=100, bkg=None, scale_disp=1.3, nspline=27, line_complexes=True, Rline=1000, eazy_templates=None, use_full_dispersion=True, sys_err=0.02, use_aper_columns=False, **kwargs):
"""
Fit redshifts on a grid
Parameters
----------
zgrid : array
Redshifts to fit
others : see `msaexp.spectrum.fit_redshift`
Returns
-------
zgrid : array
Copy of `zgrid`
chi2 : array
Chi-squared of the template fits at redshifts from `zgrid`
"""
import time
import os
from tqdm import tqdm
import astropy.io.fits as pyfits
import numpy as np
from grizli import utils
import grizli.utils_c
import astropy.units as u
import eazy.igm
import matplotlib.pyplot as plt
#spec = read_spectrum(file, sys_err=sys_err)
sampler = SpectrumSampler(file, **kwargs)
spec = sampler.spec
if (use_aper_columns > 0) & ('aper_flux' in spec.colnames):
if ('aper_corr' in spec.colnames) & (use_aper_columns > 1):
ap_corr = spec['aper_corr']*1
else:
ap_corr = 1
flam = spec['aper_flux']*spec['to_flam']*ap_corr
eflam = spec['aper_full_err']*spec['to_flam']*ap_corr
else:
flam = spec['flux']*spec['to_flam']
eflam = spec['full_err']*spec['to_flam']
wobs = spec['wave']
mask = spec['valid']
flam[~mask] = np.nan
eflam[~mask] = np.nan
#spline = utils.bspline_templates(wave=spec['wave']*1.e4, degree=3, df=nspline)
bspl = sampler.bspline_array(nspline=nspline, get_matrix=True)
chi2 = zgrid*0.
#bspl = utils.bspline_templates(wave=spec['wave']*1.e4, degree=3, df=nspline) #, log=True)
# w0 = utils.log_zgrid([spec['wave'].min()*1.e4,
# spec['wave'].max()*1.e4], 1./Rline)
for iz, z in tqdm(enumerate(zgrid)):
templates, tline, _A = make_templates(sampler, z,
bspl=bspl,
eazy_templates=eazy_templates,
vel_width=vel_width,
scale_disp=scale_disp,
use_full_dispersion=use_full_dispersion,
disp=spec.disp,
grating=spec.grating,
**kwargs,
)
okt = _A[:,mask].sum(axis=1) > 0
_Ax = _A[okt,:]/eflam
_yx = flam/eflam
if eazy_templates is None:
_x = np.linalg.lstsq(_Ax[:,mask].T,
_yx[mask], rcond=None)
else:
_x = nnls(_Ax[:,mask].T, _yx[mask])
coeffs = np.zeros(_A.shape[0])
coeffs[okt] = _x[0]
_model = _A.T.dot(coeffs)
chi = (flam - _model) / eflam
chi2_i = (chi[mask]**2).sum()
# print(z, chi2_i)
chi2[iz] = chi2_i
return zgrid, chi2
def calc_uncertainty_scale(file=None, data=None, method='bfgs', order=3, update=True, verbose=True, init=(1, 3), **kwargs):
"""
Compute a polynomial scaling of the spectrum uncertainties. The procedure is to fit for
coefficients of a polynomial multiplied to the `err` array of the spectrum such that
`(flux - model)/(err*scl)` residuals are `N(0,1)`
Parameters
----------
file : str
Spectrum filename
data : tuple
Precomputed outputs from `msaexp.spectrum.plot_spectrum`
method : str
Optimization method for `scipy.optimize.minimize`
order : int
Degree of the correction polynomial
update : bool
Update the global `msaexp.spectrum.SCALE_UNCERTAINTY` array with the fit result
verbose : bool
Print status messages
init : (float, float)
Masking for the fit initialization
kwargs : dict
Keyword arguments for `msaexp.spectrum.plot_spectrum` if `data` not specified
Returns
-------
spec : `~astropy.table.Table`
The spectrum as fit
escale : array
The wavelength-dependent scaling of the uncertainties
res : object
Output from `scipy.optimize.minimize`
"""
from scipy.stats import norm
from scipy.optimize import minimize
global SCALE_UNCERTAINTY
SCALE_UNCERTAINTY = 1.0
if data is None:
spec, spl, _ = plot_spectrum(file=file, eazy_templates=None,
get_spl_templates=True,
**kwargs
)
else:
spec, spl = data
ok = (spec['err'] > 0) & (spec['flux'] != 0)
ok &= np.isfinite(spec['err']+spec['flux'])
if init is not None:
err = init[0]*spec['err']
if 'escale' in spec.colnames:
err *= spec['escale']
err = np.sqrt(err**2 + (0.02*spec['flux'])**2)
_Ax = spl/err
_yx = spec['flux']/err
_x = np.linalg.lstsq(_Ax[:,ok].T, _yx[ok], rcond=None)
_model = spl.T.dot(_x[0])
ok &= np.abs((spec['flux']-_model)/err) < init[1]
def objfun_scale_uncertainties(c):
err = 10**np.polyval(c, spec['wave'])*spec['err']
if 'escale' in spec.colnames:
err *= spec['escale']
err = np.sqrt(err**2 + (0.02*spec['flux'])**2)
_Ax = spl/err
_yx = spec['flux']/err
_x = np.linalg.lstsq(_Ax[:,ok].T, _yx[ok], rcond=None)
_model = spl.T.dot(_x[0])
lnp = norm.logpdf((spec['flux']-_model)[ok],
loc=_model[ok]*0.,
scale=err[ok]).sum()
if verbose > 1:
print(c, lnp)
return -lnp/2.
# objfun_scale_uncertainties([0.0])
c0 = np.zeros(order+1)
#c0[-1] = np.log10(3)
res = minimize(objfun_scale_uncertainties, c0, method=method)
if update:
if verbose:
print('Set SCALE_UNCERTAINTY: ', res.x)
SCALE_UNCERTAINTY = res.x
return spec, 10**np.polyval(res.x, spec['wave']), res
def setup_spectrum(file, **kwargs):
"""
Deprecated, use `msaexp.spectrum.read_spectrum`
"""
return read_spectrum(file, **kwargs)
def read_spectrum(inp, spectrum_extension='SPEC1D', sys_err=0.02, err_mask=(10,0.5), err_median_filter=[11, 0.8], **kwargs):
"""
Read a spectrum and apply flux and/or uncertainty scaling
Flux scaling `corr` is applied if there are `POLY[i]` keywords in the spectrum
metadata, with
.. code-block:: python
:dedent:
>>> coeffs = [header[f'POLY{i}'] for i in range(order+1)]
>>> corr = np.polyval(coeffs, np.log(spec['wave']*1.e4))
Parameters
----------
inp : str or `~astropy.io.fits.HDUList`
Fits filename of a file that includes a `~astropy.io.fits.BinTableHDU` table of
an extracted spectrum. Alternatively, can be an `~astropy.io.fits.HDUList`
itself
spectrum_extension : str
Extension name of 1D spectrum in file or HDUList input
sys_err : float
Systematic uncertainty added in quadrature with `err` array
err_mask : float, float or None
Mask pixels where ``err < np.percentile(err[err > 0], err_mask[0])*err_mask[1]``
err_median_filter : int, float or None
Mask pixels where
``err < nd.median_filter(err, err_median_filter[0])*err_median_filter[1]``
Returns
-------
spec : `~astropy.table.Table`
Spectrum table. Existing columns in `file` should be
- ``wave`` : observed-frame wavelength, microns
- ``flux`` : flux density, `~astropy.units.microJansky`
- ``err`` : Uncertainty on ```flux```
Columns calculated here are
- ``corr`` : flux scaling
- ``escale`` : extra scaling of uncertainties
- ``full_err`` : Full uncertainty including `sys_err`
- ``R`` : spectral resolution
- ``valid`` : Data are valid
"""
global SCALE_UNCERTAINTY
import scipy.ndimage as nd
if isinstance(inp, str):
if 'fits' in inp:
with pyfits.open(inp) as hdul:
if spectrum_extension in hdul:
spec = utils.read_catalog(hdul[spectrum_extension])
else:
spec = utils.read_catalog(inp)
else:
spec = utils.read_catalog(inp)
elif isinstance(inp, pyfits.HDUList):
if spectrum_extension in inp:
spec = utils.read_catalog(inp[spectrum_extension])
else:
msg = f'{spectrum_extension} extension not found in HDUList input'
raise ValueError(msg)
else:
spec = utils.read_catalog(inp)
if 'POLY0' in spec.meta:
pc = []
for pi in range(10):
if f'POLY{pi}' in spec.meta:
pc.append(spec.meta[f'POLY{pi}'])
corr = np.polyval(pc, np.log(spec['wave']*1.e4))
spec['flux'] *= corr
spec['err'] *= corr
spec['corr'] = corr
else:
spec['corr'] = 1.
if 'escale' not in spec.colnames:
if hasattr(SCALE_UNCERTAINTY,'__len__'):
if len(SCALE_UNCERTAINTY) < 6:
spec['escale'] = 10**np.polyval(SCALE_UNCERTAINTY, spec['wave'])
elif len(SCALE_UNCERTAINTY) == len(spec):
spec['escale'] = SCALE_UNCERTAINTY
else:
spec['escale'] = SCALE_UNCERTAINTY
# print('xx scale scalar', SCALE_UNCERTAINTY)
for c in ['flux','err']:
if hasattr(spec[c], 'filled'):
spec[c] = spec[c].filled(0)
valid = np.isfinite(spec['flux']+spec['err'])
valid &= spec['err'] > 0
valid &= spec['flux'] != 0
if (valid.sum() > 0) & (err_mask is not None):
_min_err = np.nanpercentile(spec['err'][valid], err_mask[0])*err_mask[1]
valid &= spec['err'] > _min_err
if err_median_filter is not None:
med = nd.median_filter(spec['err'][valid], err_median_filter[0])
medi = np.interp(spec['wave'], spec['wave'][valid], med, left=0, right=0)
valid &= spec['err'] > err_median_filter[1]*medi
spec['full_err'] = np.sqrt((spec['err']*spec['escale'])**2 +
(sys_err*spec['flux'])**2)
if 'aper_err' in spec.colnames:
spec['aper_full_err'] = np.sqrt((spec['aper_err']*spec['escale'])**2 +
(sys_err*spec['aper_flux'])**2)
spec.meta['sys_err'] = sys_err
spec['full_err'][~valid] = 0
spec['flux'][~valid] = 0.
spec['err'][~valid] = 0.
spec['valid'] = valid
grating = spec.meta['GRATING'].lower()
_filter = spec.meta['FILTER'].lower()
_data_path = os.path.dirname(__file__)
disp = utils.read_catalog(f'{_data_path}/data/jwst_nirspec_{grating}_disp.fits')
spec.disp = disp
spec['R'] = np.interp(spec['wave'], disp['WAVELENGTH'], disp['R'],
left=disp['R'][0], right=disp['R'][-1])
spec.grating = grating
spec.filter = _filter
flam_unit = 1.e-20*u.erg/u.second/u.cm**2/u.Angstrom
um = spec['wave'].unit
if um is None:
um = u.micron
spec.equiv = u.spectral_density(spec['wave'].data*um)
spec['to_flam'] = (1*spec['flux'].unit).to(flam_unit, equivalencies=spec.equiv).value
spec.meta['flamunit'] = flam_unit.unit
spec.meta['fluxunit'] = spec['flux'].unit
spec.meta['waveunit'] = spec['wave'].unit
spec['wave'] = spec['wave'].value
spec['flux'] = spec['flux'].value
spec['err'] = spec['err'].value
return spec
def plot_spectrum(inp='jw02767005001-02-clear-prism-nrs2-2767_11027.spec.fits', z=9.505, vel_width=100, bkg=None, scale_disp=1.3, nspline=27, show_cont=True, draws=100, figsize=(16, 8), ranges=[(3650, 4980)], Rline=1000, full_log=False, write=False, eazy_templates=None, use_full_dispersion=True, get_spl_templates=False, scale_uncertainty_kwargs=None, plot_unit=None, spline_single=True, sys_err=0.02, return_fit_results=False, use_aper_columns=False, label=None, **kwargs):
"""
Make a diagnostic figure
Parameters
----------
...
return_fit_results : bool
Just return the fit results -
``templates, coeffs, flam, eflam, _model, mask, full_chi2``
"""
global SCALE_UNCERTAINTY
lw, lr = utils.get_line_wavelengths()
if isinstance(inp, str):
sampler = SpectrumSampler(inp, **kwargs)
file = inp
elif isinstance(inp, pyfits.HDUList):
sampler = SpectrumSampler(inp, **kwargs)
file = None
else:
file = None
sampler = inp
if (label is None) & (file is not None):
label = os.path.basename(file)
spec = sampler.spec
if (use_aper_columns > 0) & ('aper_flux' in spec.colnames):
if ('aper_corr' in spec.colnames) & (use_aper_columns > 1):
ap_corr = spec['aper_corr']*1
else:
ap_corr = 1
flam = spec['aper_flux']*spec['to_flam']*ap_corr
eflam = spec['aper_full_err']*spec['to_flam']*ap_corr
else:
flam = spec['flux']*spec['to_flam']
eflam = spec['full_err']*spec['to_flam']
wrest = spec['wave']/(1+z)*1.e4
wobs = spec['wave']
mask = spec['valid']
flam[~mask] = np.nan
eflam[~mask] = np.nan
bspl = sampler.bspline_array(nspline=nspline, get_matrix=True)
# bspl = utils.bspline_templates(wave=spec['wave']*1.e4,
# degree=3,
# df=nspline)
w0 = utils.log_zgrid([spec['wave'].min()*1.e4,
spec['wave'].max()*1.e4], 1./Rline)
templates, tline, _A = make_templates(sampler, z,
bspl=bspl,
eazy_templates=eazy_templates,
vel_width=vel_width,
scale_disp=scale_disp,
use_full_dispersion=use_full_dispersion,
disp=spec.disp,
grating=spec.grating,
**kwargs,
)
if scale_uncertainty_kwargs is not None:
_, escl, _ = calc_uncertainty_scale(file=None,
data=(spec, _A),
**scale_uncertainty_kwargs)
eflam *= escl
spec['escale'] *= escl
okt = _A[:,mask].sum(axis=1) > 0
_Ax = _A[okt,:]/eflam
_yx = flam/eflam
if eazy_templates is None:
_x = np.linalg.lstsq(_Ax[:,mask].T,
_yx[mask], rcond=None)
else:
_x = nnls(_Ax[:,mask].T, _yx[mask])
coeffs = np.zeros(_A.shape[0])
coeffs[okt] = _x[0]
_model = _A.T.dot(coeffs)
_mline = _A.T.dot(coeffs*tline)
_mcont = _model - _mline
full_chi2 = ((flam - _model)**2/eflam**2)[mask].sum()
cont_chi2 = ((flam - _mcont)**2/eflam**2)[mask].sum()
if return_fit_results:
return templates, coeffs, flam, eflam, _model, mask, full_chi2
try:
oktemp = okt & (coeffs != 0)
AxT = (_A[oktemp,:]/eflam)[:,mask].T
covar_i = utils.safe_invert(np.dot(AxT.T, AxT))
covar = utils.fill_masked_covar(covar_i, oktemp)
covard = np.sqrt(covar.diagonal())
has_covar = True
except:
has_covar = False
covard = coeffs*0.
N = len(templates)
covar = np.eye(N, N)
print(f'\n# line flux err\n# flux x 10^-20 erg/s/cm2')
if label is not None:
print(f'# {label}')
print(f'# z = {z:.5f}\n# {time.ctime()}')
cdict = {}
eqwidth = {}
for i, t in enumerate(templates):
cdict[t] = [float(coeffs[i]), float(covard[i])]
if t.startswith('line '):
lk = t.split()[-1]
# Equivalent width:
# coeffs, line fluxes are in units of 1e-20 erg/s/cm2
# _mcont, continuum model is in units of 1-e20 erg/s/cm2/A
# so observed-frame equivalent width is roughly
# eqwi = coeffs[i] / _mcont[ wave_obs[i] ]
if lk in lw:
lwi = lw[lk][0]*(1+z)/1.e4
continuum_i = np.interp(lwi, spec['wave'], _mcont)
eqwi = coeffs[i]/continuum_i
else:
eqwi = np.nan
eqwidth[t] = eqwi
print(f'{t:>20} {coeffs[i]:8.1f} ± {covard[i]:8.1f} (EW={eqwi:9.1f})')
if 'srcra' not in spec.meta:
spec.meta['srcra'] = 0.0
spec.meta['srcdec'] = 0.0
spec.meta['srcname'] = 'unknown'
spec['model'] = _model/spec['to_flam']
spec['mline'] = _mline/spec['to_flam']
data = {'z': float(z),
'file':file,
'label':label,
'ra': float(spec.meta['srcra']),
'dec': float(spec.meta['srcdec']),
'name': str(spec.meta['srcname']),
'wmin':float(spec['wave'][mask].min()),
'wmax':float(spec['wave'][mask].max()),
'coeffs':cdict,
'covar':covar.tolist(),
'wave': [float(m) for m in spec['wave']],
'flux': [float(m) for m in spec['flux']],
'err': [float(m) for m in spec['err']],
'escale': [float(m) for m in spec['escale']],
'model': [float(m) for m in _model/spec['to_flam']],
'mline':[float(m) for m in _mline/spec['to_flam']],
'templates':templates,
'dof': int(mask.sum()),
'fullchi2': float(full_chi2),
'contchi2': float(cont_chi2),
'eqwidth': eqwidth,
}
for k in ['z','wmin','wmax','dof','fullchi2','contchi2']:
spec.meta[k] = data[k]
#fig, axes = plt.subplots(len(ranges)+1,1,figsize=figsize)
if len(ranges) > 0:
fig = plt.figure(figsize=figsize, constrained_layout=True)
gs = GridSpec(2, len(ranges), figure=fig)
axes = []
for i, _ra in enumerate(ranges):
axes.append(fig.add_subplot(gs[0,i]))
axes.append(fig.add_subplot(gs[1,:]))
else:
fig, ax = plt.subplots(1,1,figsize=figsize)
axes = [ax]
_Acont = (_A.T*coeffs)[mask,:][:,:nspline]
_Acont[_Acont < 0.001*_Acont.max()] = np.nan
if (draws is not None) & has_covar:
with warnings.catch_warnings():
warnings.simplefilter("ignore")
mu = np.random.multivariate_normal(coeffs[oktemp], covar_i, size=draws)
#print('draws', draws, mu.shape, _A.shape)
mdraws = _A[oktemp,:].T.dot(mu.T)
else:
mdraws = None
if plot_unit is not None:
unit_conv = (1*spec.meta['flamunit']).to(plot_unit,
equivalencies=spec.equiv).value
else:
unit_conv = np.ones(len(wobs))
for ax in axes:
if 1:
ax.errorbar(wobs, flam*unit_conv, eflam*unit_conv,
marker='None', linestyle='None',
alpha=0.5, color='k', ecolor='k', zorder=100)
ax.step(wobs, flam*unit_conv, color='k', where='mid', lw=1, alpha=0.8)
# ax.set_xlim(3500, 5100)
#ax.plot(_[1]['templz']/(1+z), _[1]['templf'])
ax.step(wobs[mask], (_mcont*unit_conv)[mask],
color='pink', alpha=0.8, where='mid')
ax.step(wobs[mask], (_model*unit_conv)[mask],
color='r', alpha=0.8, where='mid')
cc = utils.MPL_COLORS
for w, c in zip([3727, 4980, 6565, 9070, 9530, 1.094e4, 1.282e4,
1.875e4],
[cc['purple'], cc['b'], cc['g'], 'darkred', 'darkred',
cc['pink'], cc['pink'], cc['pink']]):
wz = w*(1+z)/1.e4
dw = 70*(1+z)/1.e4
ax.fill_between([wz-dw, wz+dw], [0,0], [100,100],
color=c, alpha=0.07, zorder=-100)
if mdraws is not None:
ax.step(wobs[mask], (mdraws.T*unit_conv).T[mask,:],
color='r', alpha=np.maximum(1./draws, 0.02), zorder=-100, where='mid')
if show_cont:
ax.plot(wobs[mask], (_Acont.T*unit_conv[mask]).T,
color='olive', alpha=0.3)
ax.fill_between(ax.get_xlim(), [-100, -100], [0, 0], color='0.8',
alpha=0.5, zorder=-1)
ax.fill_betweenx([0, 100], [0,0], [1215.67*(1+z)/1.e4]*2,
color=utils.MPL_COLORS['orange'], alpha=0.2,
zorder=-1)
ax.grid()
# axes[0].set_xlim(1000, 2500)
# ym = 0.15; axes[0].set_ylim(-0.1*ym, ym)
for i, r in enumerate(ranges):
axes[i].set_xlim(*[ri*(1+z)/1.e4 for ri in r])
# print('xxx', r)
if spec.filter == 'clear':
axes[-1].set_xlim(0.6, 5.29)
axes[-1].xaxis.set_minor_locator(MultipleLocator(0.1))
axes[-1].xaxis.set_major_locator(MultipleLocator(0.5))
elif spec.filter == 'f070lp':
axes[-1].set_xlim(0.69, 1.31)
axes[-1].xaxis.set_minor_locator(MultipleLocator(0.02))
elif spec.filter == 'f100lp':
axes[-1].set_xlim(0.99, 1.91)
axes[-1].xaxis.set_minor_locator(MultipleLocator(0.02))
axes[-1].xaxis.set_major_locator(MultipleLocator(0.1))
elif spec.filter == 'f170lp':
axes[-1].set_xlim(1.69, 3.21)
elif spec.filter == 'f290lp':
axes[-1].set_xlim(2.89, 5.31)
else:
axes[-1].set_xlim(wrest[mask].min(), wrest[mask].max())
axes[-1].set_xlabel(f'obs wavelenth, z = {z:.5f}')
#axes[0].set_title(os.path.basename(file))
for ax in axes:
xl = ax.get_xlim()
ok = wobs > xl[0]
ok &= wobs < xl[1]
ok &= np.abs(wrest-5008) > 100
ok &= np.abs(wrest-6564) > 100
ok &= mask
if ok.sum() == 0:
ax.set_visible(False)
continue
ymax = np.maximum((_model*unit_conv)[ok].max(), 10*np.median((eflam*unit_conv)[ok]))
ymin = np.minimum(-0.1*ymax, -3*np.median((eflam*unit_conv)[ok]))
ax.set_ylim(ymin, ymax*1.3)
# print(xl, ymax)
if ok.sum() > 0:
if (np.nanmax((flam/eflam)[ok]) > 20) & (full_log):
ax.set_ylim(0.005*ymax, ymax*5)
ax.semilogy()
if len(axes) > 0:
gs.tight_layout(fig, pad=0.8)
else:
fig.tight_layout(pad=0.8)
if label is not None:
fig.text(0.015*12./12, 0.005, f'{label}',
ha='left', va='bottom',
transform=fig.transFigure, fontsize=8)
fig.text(1-0.015*12./12, 0.005, time.ctime(),
ha='right', va='bottom',
transform=fig.transFigure, fontsize=6)
return fig, spec, data
| gbrammer/msaexp | msaexp/spectrum.py | spectrum.py | py | 68,700 | python | en | code | 17 | github-code | 36 | [
{
"api_name": "grizli.utils.set_warnings",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "grizli.utils",
"line_number": 21,
"usage_type": "name"
},
{
"api_name": "numpy.exp",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "numpy.arange",
"... |
30321707707 | from unittest.mock import patch
import unittest
import os
import uuid
from datetime import datetime
import cmr.util.common as com
# ******************************************************************************
class TestSearch(unittest.TestCase):
"""Test suit for Search API"""
# **********************************************************************
# Util methods
# **********************************************************************
# Tests
def test_conj(self):
"""Test the conj function"""
self.assertEqual([3, 4], com.conj(None, [3, 4]), 'src was None')
self.assertEqual([1, 2, 3, 4], com.conj([1, 2], [3, 4]), 'good src, lists')
self.assertEqual((4, 3, 1, 2), com.conj((1, 2), (3, 4)), 'good src, tuples')
self.assertEqual({'a': 'A', 'b': 'B'}, com.conj({'a':'A'}, {'b':'B'}), 'good src, dict')
def test_always(self):
"""Test the always function"""
self.assertEqual({}, com.always("wrong type"), 'wrong thing')
self.assertEqual({}, com.always([]), 'wrong type')
self.assertEqual({}, com.always({}), 'same type')
self.assertEqual({'a':'b'}, com.always({'a':'b'}), 'populated dict, assumed')
self.assertEqual({'a':'b'}, com.always({'a':'b'}, otype=dict), 'populated dict')
self.assertEqual(['a', 'b'], com.always(['a','b'], otype=list), 'populated list')
self.assertEqual((1,2,3), com.always((1,2,3), otype=tuple), 'populated tuple')
self.assertEqual((1,2,3), com.always((1,2,3), tuple), 'populated tuple, positional')
self.assertEqual('', com.always(None, str), 'not populated string, positional')
self.assertEqual('', com.always('', str), 'empty string, positional')
self.assertEqual('text', com.always('text', str), 'populated string, positional')
# None use cases
self.assertEqual({}, com.always(None), 'assumed, none, dict')
self.assertEqual({}, com.always(None, otype=dict), 'None, dict')
self.assertEqual([], com.always(None, otype=list), 'None, list')
self.assertEqual((), com.always(None, otype=tuple), 'None, tuple')
self.assertEqual((), com.always(None, tuple), 'None, tuple, positional')
def test_drop_key_safely(self):
"""Test that values can be dropped safely"""
def tester (expected, src, key, msg):
return self.assertEqual(expected, com.drop_key_safely(src, key), msg)
tester({}, {}, "Not existing", "Empty dictionary")
tester({"key":"value"}, {"key": "value"}, "not found", "wrong key, no drop")
tester({}, {"key":"value"}, "key", "drop found key")
def test_write_read_round_trip(self):
"""
Test the read and write functions by doing a full round trip test. Save
some text to a temp file, then read it back, testing both functions at once
"""
path = "/tmp/" + str(uuid.uuid4())
expected = str(uuid.uuid4())
com.write_file(path, expected)
actual = com.read_file(path)
os.remove(path) # cleanup now
self.assertEqual(expected, actual, "Write-Read round trip")
def test_execute_command(self):
"""Execute will run any command, test that it behaves as expected"""
def tester (expected, given, msg):
return self.assertEqual(expected, com.execute_command(given), msg)
tester("", "true", "Test a single command response")
tester("_result_", ["printf", '_%s_', 'result'], "Test a command with properties")
@patch('cmr.util.common.execute_command')
def test_security_call(self, execute_command_mock):
"""
test that the code will call an external command and respond as expected
"""
execute_command_mock.return_value = " response info "
self.assertEqual("response info", com.call_security("account", "service"), "Good response")
execute_command_mock.return_value = None
try:
com.call_security("account", "service")
except TypeError as err:
self.assertEqual('account not found in keychain', str(err), "Bad response")
def test_help_format_lambda(self):
"""Test that the lambda function performs as expected"""
cmd = com.help_format_lambda()
self.assertTrue("str(object='') -> str" in cmd("str", ""))
def test_mask_string(self):
"""Test that the mask_diictionary function will clean out sensitive info"""
def tester(expected, given, msg):
return self.assertEqual(expected, com.mask_string(given), msg)
tester("", None, "None sent")
tester("", "", "No Letters")
tester("0", "0", "One letter")
tester("01", "01", "Two Letters")
tester("0*2", "012", "Three Letters")
tester('EDL-U123********34567890', 'EDL-U12345678901234567890', "Real example")
def test_mask_dictionary(self):
"""Test that the mask_diictionary function will clean out sensitive info"""
data = {'ignore': 'this',
'token': '012345687', 'cmr-token': 'EDL-U12345678901234567890'}
expected1 = {'ignore': 'this',
'token': '012345687', 'cmr-token': 'EDL-U123********34567890'}
expected2 = {'ignore': 'this',
'token': '012***687', 'cmr-token': 'EDL-U12345678901234567890'}
expected3 = {'ignore': 'this',
'token': '012345687', 'cmr-token': 'EDL-U12345678901234567890'}
expected4 = {'ignore': 'this',
'token': '012***687', 'cmr-token': 'EDL-U123********34567890'}
self.assertEqual(expected1, com.mask_dictionary(data, 'cmr-token'))
self.assertEqual(expected1, com.mask_dictionary(data, ['cmr-token']))
self.assertEqual(expected2, com.mask_dictionary(data, 'token'))
self.assertEqual(expected2, com.mask_dictionary(data, ['token']))
self.assertEqual(expected3, com.mask_dictionary(data, 'cmr'))
self.assertEqual(expected3, com.mask_dictionary(data, ['cmr']))
self.assertEqual(expected4, com.mask_dictionary(data, ['token', 'cmr-token']))
self.assertEqual(data, com.mask_dictionary(data, ''))
self.assertEqual(data, com.mask_dictionary(data, []))
def test_now(self):
"""
The now function is provided to allow tests to patch it for returning a
fixed time. This function should normally return the same value as
datetime.now(). Test that the value is within 1 second of a direct call
to datetime.now()
"""
actual = datetime.now().timestamp()
managed = com.now().timestamp()
dif = managed - actual
self.assertTrue(dif < 1.0, "time returned should be close to the real thing")
| nasa/eo-metadata-tools | CMR/python/test/cmr/util/test_common.py | test_common.py | py | 6,735 | python | en | code | 25 | github-code | 36 | [
{
"api_name": "unittest.TestCase",
"line_number": 12,
"usage_type": "attribute"
},
{
"api_name": "cmr.util.common.conj",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "cmr.util.common",
"line_number": 23,
"usage_type": "name"
},
{
"api_name": "cmr.util.... |
9044071393 | """Sensor platform for Ambrogio Robot."""
from __future__ import annotations
from homeassistant.core import HomeAssistant
from homeassistant.const import (
ATTR_LOCATION,
ATTR_LATITUDE,
ATTR_LONGITUDE,
)
from homeassistant.components.device_tracker import SOURCE_TYPE_GPS
from homeassistant.components.device_tracker.config_entry import TrackerEntity
from homeassistant.config_entries import ConfigEntry
from homeassistant.helpers.entity import EntityDescription
from homeassistant.helpers.entity_platform import AddEntitiesCallback
from .const import (
DOMAIN,
)
from .coordinator import AmbrogioDataUpdateCoordinator
from .entity import AmbrogioRobotEntity
ENTITY_DESCRIPTIONS = (
EntityDescription(
key="location",
name="Robot Location",
icon="mdi:robot-mower",
translation_key="location",
),
)
async def async_setup_entry(
hass: HomeAssistant, entry: ConfigEntry, async_add_devices: AddEntitiesCallback
):
"""Set up the sensor platform."""
coordinator: AmbrogioDataUpdateCoordinator = hass.data[DOMAIN][entry.entry_id]
async_add_devices(
[
AmbrogioRobotDeviceTracker(
coordinator=coordinator,
entity_description=entity_description,
robot_imei=robot_imei,
robot_name=robot_name,
)
for robot_imei, robot_name in coordinator.robots.items()
for entity_description in ENTITY_DESCRIPTIONS
],
update_before_add=True,
)
class AmbrogioRobotDeviceTracker(AmbrogioRobotEntity, TrackerEntity):
"""Ambrogio Robot Device Tracker class."""
def __init__(
self,
coordinator: AmbrogioDataUpdateCoordinator,
entity_description: EntityDescription,
robot_imei: str,
robot_name: str,
) -> None:
"""Initialize the sensor class."""
super().__init__(
coordinator=coordinator,
robot_imei=robot_imei,
robot_name=robot_name,
entity_type="device_tracker",
entity_key=entity_description.key,
)
self.entity_description = entity_description
@property
def latitude(self) -> float | None:
"""Return latitude value of the device."""
location = self._get_attribute(ATTR_LOCATION, {}).get(ATTR_LATITUDE, None)
return location if location else None
@property
def longitude(self) -> float | None:
"""Return longitude value of the device."""
location = self._get_attribute(ATTR_LOCATION, {}).get(ATTR_LONGITUDE, None)
return location if location else None
@property
def source_type(self):
"""Return the source type, eg gps or router, of the device."""
return SOURCE_TYPE_GPS
@property
def device_class(self):
"""Return Device Class."""
return None
| sHedC/homeassistant-ambrogio | custom_components/ambrogio_robot/device_tracker.py | device_tracker.py | py | 2,892 | python | en | code | 2 | github-code | 36 | [
{
"api_name": "homeassistant.helpers.entity.EntityDescription",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "homeassistant.core.HomeAssistant",
"line_number": 33,
"usage_type": "name"
},
{
"api_name": "homeassistant.config_entries.ConfigEntry",
"line_number": 33,... |
37349410877 | """Test all electron density for right interpretation of coreholes"""
import pytest
from ase.build import molecule
from ase.units import Bohr
from gpaw import GPAW, PoissonSolver
from gpaw.mixer import Mixer
from gpaw.test import gen
@pytest.mark.later
def test_aed_with_corehole_li():
"""Compare number of electrons for different channels with corehole"""
li_setup = gen('Li', name='fch1s', corehole=(1, 0, 1), xcname='PBE')
grf = 1
atoms = molecule('Li2')
atoms.center(vacuum=2.5)
calc = GPAW(xc='PBE',
mixer=Mixer(),
setups={0: li_setup},
charge=-1,
poissonsolver=PoissonSolver('fd'))
atoms.calc = calc
atoms.get_potential_energy()
n_sg = calc.get_all_electron_density(gridrefinement=grf)
ne_sz = calc.density.gd.integrate(
n_sg, global_integral=False) * (Bohr / grf)**3
assert ne_sz == pytest.approx(6.0, abs=1e-5)
atoms.set_initial_magnetic_moments([0.66, .34])
calc = calc.new(spinpol=True)
atoms.calc = calc
atoms.get_potential_energy()
for sz in range(2):
n_sg = calc.get_all_electron_density(spin=sz, gridrefinement=grf)
ne_sz = calc.density.gd.integrate(
n_sg, global_integral=False) * (Bohr / grf)**3
assert ne_sz == pytest.approx(3.0, abs=1e-5)
| f-fathurrahman/ffr-learns-gpaw | my_gpaw/test/corehole/test_li2.py | test_li2.py | py | 1,336 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "gpaw.test.gen",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "ase.build.molecule",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "gpaw.GPAW",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "gpaw.mixer.Mixer",
"li... |
70003678825 | from discord.ext import commands
import logging, traceback, discord
from collections import Counter
import datetime
import asyncio, aioredis
import os, sys, time
import random
from multiprocessing import Queue
from queue import Empty as EmptyQueue
import json
import hashlib
import config
import rethinkdb as r
import aiohttp
BLACK, RED, GREEN, YELLOW, BLUE, MAGENTA, CYAN, WHITE = range(8)
RESET_SEQ = "\033[0m"
COLOR_SEQ = "\033[1;%dm"
BOLD_SEQ = "\033[1m"
TIME_SEQ = COLOR_SEQ % (30 + MAGENTA)
NAME_SEQ = COLOR_SEQ % (30 + CYAN)
FORMAT = "[$TIME_SEQ%(asctime)-3s$RESET]" \
"[$NAME_SEQ$BOLD%(name)-2s$RESET]" \
"[%(levelname)-1s]" \
"[%(message)s]" \
"[($BOLD%(filename)s$RESET:%(lineno)d)]"
def formatter_message(message: str, colored: bool = True):
if colored:
message = message.replace("$RESET", RESET_SEQ)
message = message.replace("$BOLD", BOLD_SEQ)
message = message.replace("$TIME_SEQ", TIME_SEQ)
message = message.replace("$NAME_SEQ", NAME_SEQ)
return message
else:
message = message.replace("$RESET", "")
message = message.replace("$BOLD", "")
return message
class ColoredFormatter(logging.Formatter):
def __init__(self, msg, use_color=True):
logging.Formatter.__init__(self, msg)
self.use_color = use_color
def format(self, record):
level_name = record.levelname
if self.use_color and level_name in COLORS:
level_name_color = COLOR_SEQ % (30 + COLORS[level_name]) + level_name + RESET_SEQ
record.levelname = level_name_color
message = record.msg
if self.use_color and level_name in COLORS:
message_color = COLOR_SEQ % (30 + BLUE) + message + RESET_SEQ
record.msg = message_color
return logging.Formatter.format(self, record)
class ColoredLogger(logging.Logger):
def __init__(self, name):
logging.Logger.__init__(self, name, logging.INFO)
return
COLORS = {
'WARNING': YELLOW,
'INFO': BLUE,
'DEBUG': WHITE,
'CRITICAL': YELLOW,
'ERROR': RED
}
logger = logging.getLogger()
logger.setLevel(logging.INFO)
color_format = formatter_message(FORMAT, True)
logging.setLoggerClass(ColoredLogger)
color_formatter = ColoredFormatter(color_format)
console = logging.StreamHandler()
console.setFormatter(color_formatter)
logger.addHandler(console)
commandLog = logging.getLogger("commandLog")
commandLog.setLevel(logging.INFO)
if sys.platform == "linux":
file = logging.FileHandler(filename=f'logs/{datetime.datetime.utcnow()}.log', encoding='utf-8', mode='w')
file.setFormatter(color_formatter)
logger.addHandler(file)
file = logging.FileHandler(filename="logs/{}-commands.log".format(datetime.datetime.utcnow()), encoding="utf-8", mode="w")
file.setFormatter(color_formatter)
commandLog.addHandler(file)
async def _prefix_callable(bot, msg):
prefix = await bot.redis.get(f"{msg.author.id}-prefix")
if not prefix:
prefix = ['n!', 'N!']
else:
prefix = [prefix.decode("utf8"), "n!", "N!"]
return commands.when_mentioned_or(*prefix)(bot, msg)
class NekoBot(commands.AutoShardedBot):
def __init__(self, instance, instances, shard_count, shard_ids, pipe, ipc_queue: Queue, **kwargs):
super().__init__(command_prefix=_prefix_callable,
description="NekoBot",
pm_help=None,
shard_ids=shard_ids,
shard_count=shard_count,
status=discord.Status.idle,
fetch_offline_members=False,
max_messages=kwargs.get("max_messages", 105),
help_attrs={"hidden": True})
self.counter = Counter()
self.command_usage = Counter()
self.instance = instance
self.instances = instances
self.pipe = pipe
self.ipc_queue = ipc_queue
self.shard_ids = shard_ids
async def _init_redis():
self.redis = await aioredis.create_redis(address=("localhost", 6379), loop=self.loop)
async def _init_rethink():
r.set_loop_type("asyncio")
self.r_conn = await r.connect(host="localhost",
db="nekobot")
self.loop.create_task(_init_rethink())
self.loop.create_task(_init_redis())
for file in os.listdir("modules"):
if file.endswith(".py"):
name = file[:-3]
try:
self.load_extension(f"modules.{name}")
except:
logger.warning("Failed to load {}.".format(name))
traceback.print_exc()
self.loop.create_task(self.ipc())
self.run()
async def ipc(self):
while True:
try:
data = self.ipc_queue.get_nowait()
if data:
data = json.loads(data)
if data["op"] == "reload":
self.unload_extension("modules.{}".format(data["d"]))
self.load_extension("modules.{}".format(data["d"]))
logger.info("Reloaded {}".format(data["d"]))
elif data["op"] == "load":
self.load_extension("modules.{}".format(data["d"]))
logger.info("Loaded {}".format(data["d"]))
elif data["op"] == "unload":
self.unload_extension("modules.{}".format(data["d"]))
logger.info("Unloaded {}".format(data["d"]))
except EmptyQueue:
pass
except Exception as e:
logger.error("IPC Failed, {}".format(e))
await asyncio.sleep(30)
async def get_language(self, ctx):
data = await self.redis.get("%s-lang" % ctx.author.id)
if not data:
return None
dec = data.decode("utf8")
if dec == "english":
await self.redis.delete("%s-lang" % ctx.author.id)
return None
return dec
async def on_command_error(self, context, exception):
if isinstance(exception, commands.CommandNotFound):
return
async def on_command_completion(self, ctx):
data = await self.redis.get("{}:{}:{}".format(ctx.author.id, ctx.channel.id, ctx.message.id))
if data:
completion = int(time.time()) - int(data)
commandLog.info("{} executed {} in {}s".format(ctx.author.id, ctx.command.name, completion))
if completion >= 30:
commandLog.warning("{} took over 30 seconds to execute".format(ctx.command.name))
async def on_command(self, ctx):
self.counter["commands_used"] += 1
self.command_usage[ctx.command.name] += 1
await self.redis.incr(ctx.command.name)
await self.redis.set("{}:{}:{}".format(ctx.author.id, ctx.channel.id, ctx.message.id), int(time.time()), expire=3600)
async def send_cmd_help(self, ctx):
if ctx.invoked_subcommand:
pages = await self.formatter.format_help_for(ctx, ctx.invoked_subcommand)
for page in pages:
await ctx.send(page)
else:
pages = await self.formatter.format_help_for(ctx, ctx.command)
for page in pages:
await ctx.send(page)
async def __level_handler(self, message):
if not isinstance(message.channel, discord.TextChannel):
return
if message.content == "" or not len(message.content) > 5:
return
if random.randint(1, 15) == 1:
author = message.author
user_data = await r.table("levelSystem").get(str(author.id)).run(self.r_conn)
if not user_data:
data = {
"id": str(author.id),
"xp": 0,
"lastxp": "0",
"blacklisted": False,
"lastxptimes": []
}
return await r.table("levelSystem").insert(data).run(self.r_conn)
if user_data.get("blacklisted", False):
return
if (int(time.time()) - int(user_data["lastxp"])) >= 120:
lastxptimes = user_data["lastxptimes"]
lastxptimes.append(str(int(time.time())))
xp = user_data["xp"] + random.randint(1, 30)
data = {
"xp": xp,
"lastxp": str(int(time.time())),
"lastxptimes": lastxptimes
}
await r.table("levelSystem").get(str(author.id)).update(data).run(self.r_conn)
elif random.randint(1, 15) == 1:
guildXP = await r.table("guildXP").get(str(message.guild.id)).run(self.r_conn)
if not guildXP or not guildXP.get(str(message.author.id)):
data = {
str(message.author.id): {
"lastxp": str(int(time.time())),
"xp": 0
}
}
if not guildXP:
data["id"] = str(message.guild.id)
return await r.table("guildXP").get(str(message.guild.id)).update(data).run(self.r_conn)
if (int(time.time()) - int(guildXP.get(str(message.author.id))["lastxp"])) >= 120:
xp = guildXP.get(str(message.author.id))["xp"] + random.randint(1, 30)
data = {
str(message.author.id): {
"xp": xp,
"lastxp": str(int(time.time()))
}
}
await r.table("guildXP").get(str(message.guild.id)).update(data).run(self.r_conn)
async def on_message(self, message):
self.counter["messages_read"] += 1
if message.author.bot:
return
await self.process_commands(message)
await self.__level_handler(message)
async def close(self):
self.r_conn.close()
self.redis.close()
await super().close()
async def on_ready(self):
if not hasattr(self, "uptime"):
self.uptime = datetime.datetime.utcnow()
async with aiohttp.ClientSession() as cs:
await cs.post(config.status_smh, json={
"content": "instance {} ready smh".format(self.instance)
})
print(" _ _ _ \n"
" | | | | | | \n"
" _ __ ___| | _____ | |__ ___ | |_ \n"
" | '_ \ / _ \ |/ / _ \| '_ \ / _ \| __|\n"
" | | | | __/ < (_) | |_) | (_) | |_ \n"
" |_| |_|\___|_|\_\___/|_.__/ \___/ \__|\n"
" \n"
" ")
logger.info("Ready OwO")
logger.info(f"Shards: {self.shard_count}")
logger.info(f"Servers {len(self.guilds)}")
logger.info(f"Instance {self.instance}")
logger.info(f"Users {len(set(self.get_all_members()))}")
await self.change_presence(status=discord.Status.idle)
if not hasattr(self, "instancePoster"):
self.instancePoster = True
while self.instancePoster:
await self.redis.set("instance%s-guilds" % self.instance, len(self.guilds))
await self.redis.set("instance%s-users" % self.instance, sum([x.member_count for x in self.guilds]))
await self.redis.set("instance%s-messages" % self.instance, self.counter["messages_read"])
await self.redis.set("instance%s-commands" % self.instance, self.counter["commands_used"])
await self.redis.set("instance%s-channels" % self.instance, len(set(self.get_all_channels())))
logger.info(f"Updated Instance {self.instance}'s Guild Count with {len(self.guilds)}")
await asyncio.sleep(300)
def run(self):
super().run(config.token)
| harumaki4649/nekobot | shardedBot.py | shardedBot.py | py | 12,244 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "logging.Formatter",
"line_number": 44,
"usage_type": "attribute"
},
{
"api_name": "logging.Formatter.__init__",
"line_number": 46,
"usage_type": "call"
},
{
"api_name": "logging.Formatter",
"line_number": 46,
"usage_type": "attribute"
},
{
"api_name... |
24706362299 | import pygame
from Snake import Snake
from Segment import Segment
class Player(Snake):
def __init__(self,x,y,w,h,filePath, winDims):
super().__init__(x,y,w,h,filePath)
self.winDims = winDims
def update(self,orbs,snakes):
self.calculateDirection()
return super().update(snakes)
def calculateDirection(self):
mousePos = pygame.mouse.get_pos()
worldPos = (mousePos[0] - self.winDims[0] / 2 + self.rect.x, mousePos[1] - self.winDims[1] / 2 + self.rect.y)
self.direction = [worldPos[0] - self.rect.x, worldPos[1] - self.rect.y]
length = (self.direction[0] ** 2 + self.direction[1] ** 2) ** (1 / 2)
self.direction = [self.direction[0] / length, self.direction[1] / length] | MCK144/Slither.io | Player.py | Player.py | py | 777 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "Snake.Snake",
"line_number": 5,
"usage_type": "name"
},
{
"api_name": "pygame.mouse.get_pos",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "pygame.mouse",
"line_number": 15,
"usage_type": "attribute"
}
] |
29144047296 | #!/usr/bin/python3
import numpy as np
from scipy.io import loadmat
from func import displayData, nnCostFunction, sigmoidGradient, randInitializeWeights,\
checkNNGradients, fmin_nn, fmin_nn1, predict
import matplotlib.pyplot as plt
def main():
# Setup the parameters you will use for this exercise
input_layer_size = 400 # mnist dataset 20x20
hidden_layer_size = 25
num_labels = 10
## Part 1: Loading and Visualizing Data
print("Loading and Visualizing Data ...")
dat = loadmat("./ex4data1.mat")
X = dat['X']
y = dat['y']
m = X.shape[0]
# Randomly select 100 data points to display
rand_indices = np.random.permutation(m)
sel = X[rand_indices[:100], :]
displayData(sel)
## Part 2: Loading Parameters
# Load the weights into variables Theta1 and Theta2
dat1 = loadmat("./ex4weights.mat")
Theta1 = dat1["Theta1"]
Theta2 = dat1["Theta2"]
# Unroll parameters
nn_params = np.vstack([Theta1.reshape(-1, 1), Theta2.reshape(-1, 1)])
## Part 3: Compute Cost (Feedforward)
print("\nFeedforward Using Neural Network ...")
# Weight regularization parameter
lmbd = 0
J, _ = nnCostFunction(nn_params, input_layer_size, hidden_layer_size, num_labels, X, y, lmbd)
print("Cost at parameters (loaded from ex4weights): {}\n\
(this value should be about 0.2877629)".format(J))
## Part 4: Implement Regularization
print("\nChecking Cost Function (w/ Regularization) ...")
lmbd = 1
J, _ = nnCostFunction(nn_params, input_layer_size, hidden_layer_size, num_labels, X, y, lmbd)
print("Cost at parameters (loaded from ex4weights): {}\n\
(this value should be about 0.383770)".format(J))
## Part 5: Sigmoid Gradient
print("\nEvaluationg sigmoid gradient...")
g = sigmoidGradient(np.array([-1, -0.5, 0, 0.5, 1]))
print("Sigmoid gradient evaluated at [-1, -0.5, 0, 0.5, 1]:")
print(g)
print("\n")
## Part 6: Initializing Parameters
print("\nInitializing Neural Network Parameters ...")
# initial_Theta1 = randInitializeWeights(input_layer_size, hidden_layer_size)
# initial_Theta2 = randInitializeWeights(hidden_layer_size, num_labels)
# Unroll parameters
# initial_nn_params = np.vstack([initial_Theta1.reshape(-1, 1), initial_Theta2.reshape(-1, 1)])
## Part 7: Implement Backpropagation
print("\nChecking Backpropagation...")
checkNNGradients()
## Part 8: Implement Regularization
print("\nChecking Backpropagation (w/ Regularization) ...")
# Check gradients by running checkNNGradients
lmbd = 3
checkNNGradients(lmbd)
# Also output the costFunction debugging values
debug_J, _ = nnCostFunction(nn_params, input_layer_size, hidden_layer_size, num_labels, X, y, lmbd)
print("\n\nCost at (fixed) debugging parameters (w/ lambda = {}): {}"\
"\n(for lambda = 3, this value should be about 0.576051)\n".format(lmbd, debug_J))
## Part 8: Training NN
print("\nTraining Neural Network...")
lmbd = 1 # TODO optimize() can't not work with regularization now, should be 1 here
nn_params, _ = fmin_nn1(input_layer_size, hidden_layer_size, num_labels, X, y, lmbd)
Theta1 = nn_params[:hidden_layer_size*(input_layer_size+1)].reshape(hidden_layer_size, (input_layer_size+1))
Theta2 = nn_params[hidden_layer_size*(input_layer_size+1):].reshape(num_labels, (hidden_layer_size+1))
## Part 9: Visualize Weights
print("\nVisualizing Neural Network ...")
displayData(Theta1[:, 1:])
## Part 10: Implement Predict
pred = predict(Theta1, Theta2, X)
pred[pred==0] = 10 # label 10 is set to 0 in the nn model
print("\nTraining Set Accuracy: {}".format(np.mean(np.double(pred == y.ravel())) * 100))
plt.show()
if __name__ == "__main__":
main()
| rossihwang/Coursera_ML_homework_with_python | week5/ex4.py | ex4.py | py | 3,839 | python | en | code | 1 | github-code | 36 | [
{
"api_name": "scipy.io.loadmat",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "numpy.random.permutation",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "numpy.random",
"line_number": 24,
"usage_type": "attribute"
},
{
"api_name": "func.disp... |
40186857847 | # import community
import numpy as np
import networkx as nx
import matplotlib as mpl
from matplotlib.pyplot import imshow
from matplotlib import pyplot as plt
import matplotlib.image as mpimg
import pygraphviz
from networkx.drawing.nx_agraph import write_dot, graphviz_layout
import random
import pydoc
from ds import McmcTree as Tree
from utils import ColorPrint as _
import sys
sys.path.append("..")
from datasets.synthetic.generator import TreeGenerator
font = {'weight' : 'normal',
'size' : 24}
mpl.rc('font', **font)
### load random data
M = 20
N = 40
ZETA = 1
Gamma = 0.15
alpha = 0.01
beta = 0.01
MR = 0.005
tg = TreeGenerator(
M = M,
N = N,
ZETA = ZETA,
Gamma = Gamma,
alpha = alpha,
beta = beta,
MR = MR,
)
(gt_E, gt_D, D, gt_T) = tg.generate()
gensNames = list( str(i) for i in range(M) )
print(gensNames)
C_num = D.shape[1]
G_num = D.shape[0]
_.print_warn( 'There is {} cells and {} mutations at {} genes in this dataset.'.format(C_num, G_num, len(gensNames)) )
# ### fill missed data
# def tf(m,c):
# os = len(np.where(D[:,c]==1.))*1.
# zs = len(np.where(D[:,c]==0.))*1.
# return 1. if np.random.rand() < os/(os+zs) else 0.
# for m in range(G_num):
# for c in range(C_num):
# if D[m,c] == 3.:
# D[m,c] = tf(m,c)
### Run
dl = list(d for d in D)
root = [n for n,d in gt_T.in_degree() if d==0][0]
print('ROOT:', root)
T = Tree(gensNames, D, data_list=dl, root=str(root), alpha=alpha, beta=beta)
T.set_ground_truth(gt_D, gt_E, gt_T=gt_T)
T.randomize()
T.plot_best_T('initial T')
# T.plot('T0')
for i in range(1000):
if T.next():
break
T.plot_all_results()
| afshinbigboy/itmt | src/test.py | test.py | py | 1,675 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "sys.path.append",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "sys.path",
"line_number": 16,
"usage_type": "attribute"
},
{
"api_name": "matplotlib.rc",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "datasets.synthetic.genera... |
74572928422 | from flask import Flask,render_template
from os import path
from flask_misaka import markdown,Misaka
from LocalStorageBackend import folderlist
##### this is the Template jinja stuff for the webpage
app = Flask(__name__, template_folder="views")
### need this line for the Misaka markdown rendering
Misaka(app,fenced_code="true")
def get_namespaceslocal(filepath):
content = list(((folderlist(filepath))))
return content
@app.route('/') # to list all the module namespaces
def index():
filepath = f'./v1/modules/'
namespaces = get_namespaceslocal(filepath)
return render_template('index.html',modules=namespaces, filepath=filepath)
@app.route('/v1/modules/<namespace>/', methods=['GET']) # list all the modules in a namespace
def namespaceselect(namespace):
filepath = f'./v1/modules/{namespace}'
namespaces = get_namespaceslocal(filepath)
return render_template('namespace.html',modules=namespaces, filepath=filepath)
@app.route('/v1/modules/<namespace>/<name>/', methods=['GET']) # list the providers of a particular module
def moduleselect(namespace,name):
filepath = f'./v1/modules/{namespace}/{name}'
namespaces = get_namespaceslocal(filepath)
return render_template('modules.html',modules=namespaces, filepath=filepath)
@app.route('/v1/modules/<namespace>/<name>/<provider>/', methods=['GET']) # list the versions of a module for a given provider
def providerselect(namespace,name,provider):
filepath = f'./v1/modules/{namespace}/{name}/{provider}'
namespaces = get_namespaceslocal(filepath)
return render_template('provider.html',modules=namespaces, filepath=filepath)
#Renders the Readme.md from the verion folder
@app.route('/v1/modules/<namespace>/<name>/<provider>/<version>/', methods=['GET'])
def load_readme(namespace, name,provider,version):
filepath = f'./v1/modules/{namespace}/{name}/{provider}/{version}'
with open(f'{filepath}/readme.md', 'r') as f:
content = f.read()
return render_template("readme.html",text=content, title=f'Readme for {namespace}/{name}/{provider}/{version}')
| gabrielmccoll/Simple-Terraform-Registry | LocalStorageGUI.py | LocalStorageGUI.py | py | 2,087 | python | en | code | 4 | github-code | 36 | [
{
"api_name": "flask.Flask",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "flask_misaka.Misaka",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "LocalStorageBackend.folderlist",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "flask.re... |
24527321437 | import shutil
import tempfile
from ..models import Post, User, Comment
from django.conf import settings
from django.test import Client, TestCase, override_settings
from django.urls import reverse
from django.core.files.uploadedfile import SimpleUploadedFile
TEMP_MEDIA_ROOT = tempfile.mkdtemp(dir=settings.BASE_DIR)
@override_settings(MEDIA_ROOT=TEMP_MEDIA_ROOT)
class TestPostForm(TestCase):
@classmethod
def setUpClass(cls):
super().setUpClass()
cls.post_text = 'Test Text PostForm'
cls.user_name = 'PostForm'
cls.user = User.objects.create_user(username=cls.user_name)
cls.first_post = Post.objects.create(
text=cls.post_text,
author=cls.user,
)
def setUp(self):
self.guest_client = Client()
self.authorized_client = Client()
self.authorized_client.force_login(self.user)
@classmethod
def tearDownClass(cls):
super().tearDownClass()
shutil.rmtree(TEMP_MEDIA_ROOT, ignore_errors=True)
def test_create_post(self):
"""Проверка формы создания поста"""
small_gif = (
b'\x47\x49\x46\x38\x39\x61\x02\x00'
b'\x01\x00\x80\x00\x00\x00\x00\x00'
b'\xFF\xFF\xFF\x21\xF9\x04\x00\x00'
b'\x00\x00\x00\x2C\x00\x00\x00\x00'
b'\x02\x00\x01\x00\x00\x02\x02\x0C'
b'\x0A\x00\x3B'
)
uploaded = SimpleUploadedFile(
name='small.gif',
content=small_gif,
content_type='image/gif'
)
posts_count = Post.objects.count()
Comment.objects.create(
post=self.first_post,
author=self.user,
text='test text com'
)
form_post = {
'text': 'TEXT',
'author': self.user,
'image': uploaded,
}
response = self.authorized_client.post(
reverse('posts:post_create'),
data=form_post,
follow=True
)
self.assertRedirects(response, reverse('posts:profile', kwargs={
'username': self.user_name
}))
self.assertEqual(Post.objects.count(), posts_count + 1)
self.assertTrue(
Post.objects.filter(text='TEXT', image='posts/small.gif').exists()
)
self.assertTrue(
Comment.objects.filter(text='test text com').exists()
)
def test_edit_post(self):
"""Проверка формы редактирования поста"""
form_data = {
'text': 'test_text',
'author': self.user
}
response = self.authorized_client.post(
reverse(
'posts:post_edit',
kwargs={'post_id': '1'}),
data=form_data,
follow=True
)
self.assertRedirects(
response,
reverse(
'posts:post_detail',
kwargs={'post_id': '1'}
))
self.assertTrue(Post.objects.filter(text='test_text'))
| Gabrie1002/hw05_final | yatube/posts/tests/test_forms.py | test_forms.py | py | 3,092 | python | en | code | 1 | github-code | 36 | [
{
"api_name": "tempfile.mkdtemp",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "django.conf.settings.BASE_DIR",
"line_number": 11,
"usage_type": "attribute"
},
{
"api_name": "django.conf.settings",
"line_number": 11,
"usage_type": "name"
},
{
"api_name... |
22524801892 | from .extentions import (
login_manager,
db,
moment,
bootstrap,
avatarTeam,
avatarUser,
coverPost,
imgTeam,
coverUser,
commonImage,
ckeditor,
nav,
mail
)
from flask_uploads import patch_request_class, configure_uploads
from .config import config
from flask import Flask, redirect, url_for, render_template, flash
from flask_login import current_user
from .tools.photo import resize
import logging
def create_app(config_name):
"""
创建flask实例并配置
初始化扩展
注册蓝本
:param config_name:
:return: flask
"""
#create instance
app = Flask(__name__)
app.config.from_object(config[config_name])
config[config_name].init_app(app)
#initial extention with flask instance
db.init_app(app)
moment.init_app(app)
bootstrap.init_app(app)
login_manager.init_app(app)
ckeditor.init_app(app)
nav.init_app(app)
mail.init_app(app)
# image upload config
configure_uploads(app, (avatarUser, avatarTeam, coverPost, imgTeam, coverUser, commonImage))
patch_request_class(app, 10*1024*1024)
#register blueprint
from .auth import auth
app.register_blueprint(auth)
from .user import user
app.register_blueprint(user)
from .admin import admin
app.register_blueprint(admin)
from .team import team
app.register_blueprint(team)
from .pay import pay
app.register_blueprint(pay)
# logger
handler = logging.FileHandler('flask.log', encoding='UTF-8')
handler.setLevel(logging.DEBUG)
logging_format = logging.Formatter(
'%(asctime)s - %(levelname)s - %(filename)s - %(funcName)s - %(lineno)s - %(message)s')
handler.setFormatter(logging_format)
app.logger.addHandler(handler)
@app.context_processor
def inject_vars():
from .models.activity import registration_way, volunteer_type, RegistrationWay
from .models.tools import province
return dict(RegistrationWay=registration_way, Province=province, VolunteerType=volunteer_type,Registration_Way=RegistrationWay)
#add main router
@app.route('/')
def index():
from .models.activity import Activity
from .models.outdoorType import OutdoorType
carousel_items = Activity.get_activities_home()
collection = OutdoorType.show_list()
activities = Activity.get_activities_home_panel()
from .models.team import Team
teams = Team.query.limit(10).all()
return render_template('home.html',
carousel_items = carousel_items,
collection=collection,
activities=activities,
teams=teams)
@app.route('/invest', methods=['GET', 'POST'])
def invest():
from .models.demand import Demand
from .forms.demand import DemandForm
form = DemandForm()
if form.validate_on_submit():
demand = Demand(company=form.company.data,
contact = form.contact.data,
phone = form.phone.data,
image = form.image.data,
brand = form.brand.data,
product = form.product.data,
market = form.market.data,
other = form.other.data)
if current_user.is_authenticated:
demand.user_id = current_user.id
db.session.add(demand)
flash('您已经提交了您的需求,稍后会与您联系')
return redirect(url_for('invest'))
return render_template('invest.html', form=form)
# -----------------ckeditor图片上传-----------
@app.route('/ckupload/', methods=['POST'])
def ckupload():
from flask import request, make_response
from .tools.string_tools import get_rnd_filename_w_ext
import os
error = ''
url = ''
callback = request.args.get("CKEditorFuncNum")
if request.method == 'POST' and 'upload' in request.files:
fileobj = request.files['upload']
rnd_name = get_rnd_filename_w_ext(fileobj.filename)
filepath = os.path.join(app.static_folder,'images', 'upload', rnd_name)
# 检查路径是否存在,不存在则创建
dirname = os.path.dirname(filepath)
if not os.path.exists(dirname):
try:
os.makedirs(dirname)
except:
error = 'ERROR_CREATE_DIR'
elif not os.access(dirname, os.W_OK):
error = 'ERROR_DIR_NOT_WRITEABLE'
if not error:
#fileobj.save(filepath)
#不限制上传大小,但是图片必须在1200像素以下
resize(fileobj, filepath, 1200)
url = url_for('static', filename='%s/%s' % ('images/upload/', rnd_name))
else:
error = 'post error'
res = """<script type="text/javascript">
window.parent.CKEDITOR.tools.callFunction(%s, '%s', '%s');
</script>""" % (callback, url, error)
response = make_response(res)
response.headers["Content-Type"] = "text/html"
return response
#---------错误处理--------------
@app.errorhandler(404)
def page_not_fount(e):
return render_template('404.html'), 404
@app.errorhandler(500)
def internal_server_error(e):
app.logger.exception('error 500:%s', e)
app.logger.error(e)
return render_template('500.html', e=e), 500
@app.errorhandler(403)
def internal_server_error(e):
return render_template('403.html'), 403
@app.errorhandler(413)
def internal_server_error(e):
return render_template('413.html'), 413
return app
| Honglin-Li/TravelPlatform | app/__init__.py | __init__.py | py | 6,069 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "flask.Flask",
"line_number": 33,
"usage_type": "call"
},
{
"api_name": "config.config",
"line_number": 34,
"usage_type": "name"
},
{
"api_name": "config.config",
"line_number": 35,
"usage_type": "name"
},
{
"api_name": "extentions.db.init_app",
... |
37229157121 | import pygame
from player import *
from blocks import *
from pyganim import *
# window
WIN_WIDTH = 800 # Ширина создаваемого окна
WIN_HEIGHT = 640 # Высота
DISPLAY = (WIN_WIDTH, WIN_HEIGHT) # Группируем ширину и высоту в одну переменную
BACKGROUND_COLOR = (0, 64, 0)
NAME = "Battle of one"
ANIMATION_DELAY = 0.1 # скорость смены кадров
def main():
pygame.init() # Инициация PyGame, обязательная строчка
screen = pygame.display.set_mode(DISPLAY) # Создаем окошко
pygame.display.set_caption(NAME) # Пишем в шапку
surf = pygame.Surface(DISPLAY)
surf.fill(BACKGROUND_COLOR)
hero = Player(55, 55) # создаем героя по (x,y) координатам
left = right = False # по умолчанию — стоим
up = False
entities = pygame.sprite.Group() # Все объекты
platforms = [] # то, во что мы будем врезаться или опираться
entities.add(hero)
level = ["_________________________",
"_ _",
"_ _",
"_ _",
"_ _",
"_ _",
"_ _",
"_ _____",
"_ _",
"_ _",
"_ _ _",
"_ ____ _",
"_ _",
"_ _ _",
"_ __ _",
"_ _",
"_ _________ _",
"_ _",
"_ _",
"_________________________"]
timer = pygame.time.Clock()
x = y = 0 # координаты
for row in level:
for col in row:
if col == "_":
platform = Platform(x, y)
entities.add(platform)
platforms.append(platform)
x = x + PLATFORM_WIDTH # блоки платформы ставятся на ширине блоков
y = y + PLATFORM_HEIGHT # то же самое и с высотой
x = 0 # на каждой новой строчке начинаем с нуля
while 1: # Основной цикл программы
timer.tick(60) #fps = 60
for e in pygame.event.get():
keys = pygame.key.get_pressed()
if e.type == KEYDOWN and e.key == K_UP:
up = True
if e.type == KEYUP and e.key == K_UP:
up = False
if e.type == KEYDOWN and e.key == K_LEFT:
left = True
if e.type == KEYDOWN and e.key == K_RIGHT:
right = True
if e.type == KEYUP and e.key == K_RIGHT:
right = False
if e.type == KEYUP and e.key == K_LEFT:
left = False
if e.type == pygame.QUIT:
exit()
screen.blit(surf, (0, 0)) # перерисовка на каждой итерации
hero.update(left, right, up, platforms) # передвижение
entities.draw(screen) # отображение всего
pygame.display.update() # обновление и вывод всех изменений на экран
if __name__ == "__main__":
main()
| Cruciano/Totsuka-Blade | game.py | game.py | py | 3,608 | python | ru | code | 0 | github-code | 36 | [
{
"api_name": "pygame.init",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "pygame.display.set_mode",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "pygame.display",
"line_number": 17,
"usage_type": "attribute"
},
{
"api_name": "pygame.displa... |
29701799964 | import os
import pandas as pd
import pandas.util.testing as pdt
import pytest
import six
@pytest.fixture
def sj_out_tab(tmpdir):
s = """chr1 76 299 1 2 1 0 1 39
chr1 201 299 1 1 1 0 1 10
chr1 201 249 1 1 0 0 1 22
chr1 201 799 1 1 1 19 20 43
chr1 201 799 1 1 0 8 15 41
chr1 155832 164262 1 1 1 61 3 46
chr1 156087 156200 1 1 0 1 14 44
chr1 329977 334128 1 1 1 0 2 14
chr1 569184 569583 1 1 0 0 1 17
chr1 655581 659737 1 1 1 0 2 14
chr1 661725 662046 1 1 0 0 1 22
chr1 668587 671992 1 1 0 0 4 28
"""
df = pd.read_table(six.StringIO(s), header=None, sep='\s+')
filename = '{0}/SJ.out.tab'.format(tmpdir)
df.to_csv(filename, index=False, header=False, sep='\t')
return filename
def test_read_sj_out_tab(sj_out_tab, simulated_unprocessed):
from outrigger.io.star import read_sj_out_tab
test = read_sj_out_tab(sj_out_tab)
csv = os.path.join(simulated_unprocessed, 'true_splice_junctions.csv')
true = pd.read_csv(csv)
assert (test.junction_start < test.junction_stop).all()
pdt.assert_frame_equal(test, true)
def test_int_to_intron_motif():
from outrigger.io.star import int_to_junction_motif
ints = [0, 1, 2, 3, 4, 5, 6]
test = [int_to_junction_motif(i) for i in ints]
true = ['non-canonical', 'GT/AG', 'GT/AG', 'GC/AG', 'GC/AG', 'AT/AC',
'AT/AC']
assert test == true
@pytest.fixture
def splice_junction_csv(ignore_multimapping, tasic2016_intermediate):
"""Different file depending on whether multimapping is True"""
template = os.path.join(tasic2016_intermediate,
'index', 'star',
'splice_junctions_ignore_multimapping{}.csv')
return template.format(str(ignore_multimapping))
def test_read_multiple_sj_out_tab(sj_filenames, ignore_multimapping,
splice_junction_csv):
from outrigger.io.star import read_multiple_sj_out_tab
from outrigger.common import READS
# Read csv file and convert to numeric
true = pd.read_csv(splice_junction_csv)
true = true.convert_objects()
test = read_multiple_sj_out_tab(
sj_filenames, ignore_multimapping=ignore_multimapping)
assert READS in test
pdt.assert_frame_equal(test, true)
def test_make_metadata(tasic2016_intermediate, junction_reads):
from outrigger.io.star import make_metadata
csv = os.path.join(tasic2016_intermediate, 'junction_metadata.csv')
true = pd.read_csv(csv)
test = make_metadata(junction_reads)
pdt.assert_frame_equal(test, true)
| YeoLab/outrigger | outrigger/tests/io/test_star.py | test_star.py | py | 2,952 | python | en | code | 60 | github-code | 36 | [
{
"api_name": "pandas.read_table",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "six.StringIO",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "pytest.fixture",
"line_number": 9,
"usage_type": "attribute"
},
{
"api_name": "outrigger.io.star.r... |
35753678574 | import xlsxwriter
workbook = xlsxwriter.Workbook("1.xlsx")
mySheet = workbook.add_worksheet()
mySheet.write("A1", "t_value")
mySheet.write("B1", "y1_value")
mySheet.write("C1", "y2_value")
t = 0
t1 = 0
y2 = 0
t1_value = []
y1_value = []
y2_value = []
while int(t) != 2:
t += 0.1
y1 = (5 * t) + ((2 * t) ** 2)
t = ("{:.2f}".format(t))
t1_value.append(t)
y1_value.append(y1)
t = float(t)
print(y1)
j = 0
for i in range(2, len(t1_value)+2):
mySheet.write(("A"+str(i)), t1_value[j])
j += 1
j = 0
for i in range(2, len(y1_value)+2):
mySheet.write("B"+str(i), y1_value[j])
j += 1
while int(t1) != 2:
t1 += 0.1
y2 = 30 + (10 * t1) - ((5 * t1) ** 2)
t1 = ("{:.2f}".format(t1))
y2_value.append(y2)
t1 = float(t1)
j = 0
for i in range(2, len(y2_value)+2):
mySheet.write(("C"+str(i)), y2_value[j])
j += 1
workbook.close()
| toni7891/magshimimHW_10grade | selfProjects/physics/phisycaProg1.py | phisycaProg1.py | py | 893 | python | en | code | 3 | github-code | 36 | [
{
"api_name": "xlsxwriter.Workbook",
"line_number": 3,
"usage_type": "call"
}
] |
12829538864 | import json
class Participant:
# custom classes must be converted to dictionary or list to be serializable
def __init__(
self,
points=0,
total_points=0,
problems_solved=0,
easy=0,
medium=0,
hard=0,
won=0,
first=0,
) -> None:
self.points = points
self.total_points = total_points
self.problems_solved = problems_solved # also days committed
self.easy = easy
self.medium = medium
self.hard = hard
self.won = won
self.first = first
def toJSON(self):
return json.dumps(self, default=lambda x: x.__dict__, sort_keys=True, indent=2)
def to_string(self):
result = "\nTotal Points: " + str(self.total_points)
result += "\nProblems Solved: " + str(self.problems_solved)
result += "\nEasy Problems Solved: " + str(self.easy)
result += "\nMedium Problems Solved: " + str(self.medium)
result += "\nHard Problems Solved: " + str(self.hard)
result += "\nCompetitions Won: " + str(self.won)
result += "\nFirst Submissions: " + str(self.first)
return result
def get_points(self):
return self.points
def clear_points(self):
self.points = 0
def update_win(self):
self.won += 1
def update_stats(self, difficulty: str, points_recieved: int, was_first: bool):
if difficulty == "hard":
self.hard += 1
elif difficulty == "med":
self.medium += 1
elif difficulty == "easy":
self.easy += 1
self.points += points_recieved
self.problems_solved += 1
self.total_points += points_recieved
self.first += 1 if was_first else 0
"""
def test_stats(self, setting: str, point_amount: int):
if (setting == "first"):
self.first += point_amount
if setting == "hard":
self.hard += point_amount
elif setting == "med":
self.medium += point_amount
elif setting == "easy":
self.easy += point_amount
elif setting == "point":
self.total_points += point_amount
elif setting == "prob":
self.problems_solved += point_amount
elif setting == "win":
self.won += point_amount
"""
def get_badge_title(self):
PROBLEM_THRESHOLD = 20
POINT_THRESHOLD = 100
DIFFICULTY_PERCENTAGE_THRESHOLD = 45
PERCENT_TOTAL = lambda amount: (amount / self.problems_solved) * 100
badge_title = "No badge... Do some problems to earn a badge!"
if self.problems_solved < PROBLEM_THRESHOLD:
return badge_title
easy_percentage = PERCENT_TOTAL(self.easy)
medium_percentage = PERCENT_TOTAL(self.medium)
hard_percentage = PERCENT_TOTAL(self.hard)
if self.won >= PROBLEM_THRESHOLD:
badge_title = "🥇 *Standing on the shoulder of giants. And your hard work.*"
elif self.first >= PROBLEM_THRESHOLD:
badge_title = (
"💨 *Well, would you look at the time. Lack there of obviously.*"
)
elif hard_percentage >= DIFFICULTY_PERCENTAGE_THRESHOLD:
badge_title = "🏆 *The highest honor. Not using Stack Overflow.*"
elif medium_percentage >= DIFFICULTY_PERCENTAGE_THRESHOLD:
badge_title = "🍪 *Here's a cookie for all your efforts.*"
elif easy_percentage >= DIFFICULTY_PERCENTAGE_THRESHOLD:
badge_title = "🐒 *If rock and monke, then create fire.*"
elif self.total_points >= POINT_THRESHOLD:
badge_title = "🦾 *Point King*"
elif self.problems_solved >= PROBLEM_THRESHOLD:
badge_title = (
"👨🌾 *Living the simple life. Eat. Solve a programming problem. Sleep.*"
)
return badge_title
| misslame/BroncoderBot | participant_data_handling/participant.py | participant.py | py | 3,927 | python | en | code | 9 | github-code | 36 | [
{
"api_name": "json.dumps",
"line_number": 28,
"usage_type": "call"
}
] |
37218399811 | from os import getenv
from minio import Minio
def get_s3_client():
endpoint = "{host}:{port}".format(
host = getenv("MINIO_HOST", "127.0.0.1"),
port = getenv("MINIO_PORT", "9000")
)
access_key = getenv("MINIO_ACCESS_KEY", "minioadmin")
secret_key = getenv("MINIO_SECRET_KEY", "minioadmin")
return Minio(
endpoint,
access_key,
secret_key,
secure = False
)
| parledoct/qbestdocks | src/common/resources/s3.py | s3.py | py | 441 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "os.getenv",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "os.getenv",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "os.getenv",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "os.getenv",
"line_number": 10,
"us... |
185118540 | # -*- coding: utf-8 -*-
"""
Created on Wed Mar 10 13:55:54 2021
@author: 44797
"""
from collections import Counter
import collections
class Solution:
def frequencySort(self, nums):
nums_count = collections.OrderedDict(sorted(Counter(nums).items(), key=lambda x: x[0], reverse=True))
output = []
sorted_count = dict(sorted(nums_count.items(), key=lambda x: x[1]))
for i in sorted_count:
for _ in range(nums_count[i]):
output.append(i)
return output
nums = [1,1,2,2,2,3]
sol = Solution().frequencySort(nums)
print(sol) | sicsempatyrannis/Hackarank-Leetcode | Frequency sort.py | Frequency sort.py | py | 642 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "collections.OrderedDict",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "collections.Counter",
"line_number": 11,
"usage_type": "call"
}
] |
31422423491 | from __future__ import barry_as_FLUFL, print_function, division
__version__ = '0.1'
__author__ = 'Maryam Najafian'
"""
* Implementing Part of Speech (POS) tagging
* Using RNN in Tensorflow
structure: Embedding --> GRU --> Dense
* INPUTs are one hot encoded words and OUTPUTs are tags
* Measure F1-score and accuracy
* Note: in TF (unlike Theano) all sequences should have equal length
Anything that is shorter than the longest sequence is 0 padded
You can think of your data as a NXTXD
* N samples
* samples of length T
* D is the dimensionality of each word vector
This allows us to process our data in batches
which is more difficult in Theano where you are
going to have variable length sequences
"""
from builtins import range
import numpy as np
import matplotlib.pyplot as plt
import tensorflow as tf
import os
import sys
sys.path.append(os.path.abspath('..'))
from pos_baseline import get_data
from sklearn.utils import shuffle
from util import init_weight
from datetime import datetime
from sklearn.metrics import f1_score
from tensorflow.contrib.rnn import static_rnn as get_rnn_output
from tensorflow.contrib.rnn import BasicRNNCell, GRUCell
import config
def get_data(split_sequences=False):
train_text = config.CHUNKING_DATA + '/train.txt'
test_text = config.CHUNKING_DATA + '/test.txt'
if not os.path.exists(config.CHUNKING_DATA):
print("Please create a folder in your local directory called 'chunking'")
print("train.txt and test.txt should be stored in there.")
print("Please check the comments to get the download link.")
exit()
elif not os.path.exists(train_text):
print("train.txt is not in chunking/train.txt")
print("Please check the comments to get the download link.")
exit()
elif not os.path.exists(test_text):
print("test.txt is not in chunking/test.txt")
print("Please check the comments to get the download link.")
exit()
word2idx = {}
tag2idx = {}
# unlike the Theano version of this code the index starts
# from 1 because TF needs all input seq. to be the same size
# and it does 0 padding and 0 is a special number that we can't
# use for anything else
word_idx = 1
tag_idx = 1
# X/Ytrain:each element is a sample and each sample is a list containing word or tag indeces
Xtrain = []
Ytrain = []
# currentX/Y: contain a list of words and tags in the current sentence
currentX = []
currentY = []
# each line contains one word and one tag
# each sentence is separated by a blank line
for line in open(train_text):
line = line.rstrip()
if line: # check if the line is blank
r = line.split()
word, tag, _ = r
if word not in word2idx:
word2idx[word] = word_idx
word_idx += 1
currentX.append(word2idx[word])
if tag not in tag2idx:
tag2idx[tag] = tag_idx
tag_idx += 1
currentY.append(tag2idx[tag])
elif split_sequences: # add the whole list as a sample
Xtrain.append(currentX)
Ytrain.append(currentY)
currentX = []
currentY = []
if not split_sequences:
Xtrain = currentX
Ytrain = currentY
# load and score test data
Xtest = []
Ytest = []
currentX = []
currentY = []
for line in open(test_text):
line = line.rstrip()
if line:
r = line.split()
word, tag, _ = r
if word in word2idx:
currentX.append(word2idx[word])
else:
currentX.append(word_idx) # use this as unknown
currentY.append(tag2idx[tag])
elif split_sequences:
Xtest.append(currentX)
Ytest.append(currentY)
currentX = []
currentY = []
if not split_sequences:
Xtest = currentX
Ytest = currentY
return Xtrain, Ytrain, Xtest, Ytest, word2idx
def flatten(l): # we need to flatten our data which is a list of lists
return [item for sublist in l for item in sublist]
# get the data
Xtrain, Ytrain, Xtest, Ytest, word2idx = get_data(split_sequences=True)
V = len(word2idx) + 2 # vocab size (+1 for including an index for unknown, +1 for starting from 1 rather than 0)
K = len(set(flatten(Ytrain)) | set(
flatten(Ytest))) + 1 # num classes (assumption no unknown index, +1 for starting from 1 rather than 0)
# training config
epochs = 20
learning_rate = 1e-2
mu = 0.99
batch_size = 32
hidden_layer_size = 10
embedding_dim = 10
sequence_length = max(len(x) for x in Xtrain + Xtest)
# pad sequences
Xtrain = tf.keras.preprocessing.sequence.pad_sequences(Xtrain, maxlen=sequence_length)
Ytrain = tf.keras.preprocessing.sequence.pad_sequences(Ytrain, maxlen=sequence_length)
Xtest = tf.keras.preprocessing.sequence.pad_sequences(Xtest, maxlen=sequence_length)
Ytest = tf.keras.preprocessing.sequence.pad_sequences(Ytest, maxlen=sequence_length)
print("Xtrain.shape:", Xtrain.shape)
print("Ytrain.shape:", Ytrain.shape)
# inputs
inputs = tf.placeholder(tf.int32, shape=(None, sequence_length))
targets = tf.placeholder(tf.int32, shape=(None, sequence_length))
num_samples = tf.shape(inputs)[0] # useful for later
# word embedding matrix
We = np.random.randn(V, embedding_dim).astype(np.float32)
# weight and bias of the final dense layer (output layer)
Wo = init_weight(hidden_layer_size, K).astype(np.float32)
bo = np.zeros(K).astype(np.float32)
# make them tensorflow variables
tfWe = tf.Variable(We)
tfWo = tf.Variable(Wo)
tfbo = tf.Variable(bo)
# make the rnn unit
rnn_unit = GRUCell(num_units=hidden_layer_size, activation=tf.nn.relu)
# pass the inputs through the embedding layer to get the output and from that build the cost
x = tf.nn.embedding_lookup(tfWe, inputs)
# TF doesn'tt like 3D objects, so unsttack converts x from a tensor of shape N x T x M
# into a list of length T, where each element is a tensor of shape N x M
x = tf.unstack(x, sequence_length, 1)
# get the rnn output
outputs, states = get_rnn_output(rnn_unit, x, dtype=tf.float32)
# outputs are now of size (T, N, M)
# so make it (N, T, M)
outputs = tf.transpose(outputs, (1, 0, 2))
outputs = tf.reshape(outputs, (sequence_length * num_samples, hidden_layer_size)) # NT x M
# final dense layer
logits = tf.matmul(outputs, tfWo) + tfbo # we need to flatten our data because matmul only works on 2D tensor
# objects: NT x K
predictions = tf.argmax(logits, 1)
predict_op = tf.reshape(predictions, (num_samples, sequence_length))
labels_flat = tf.reshape(targets, [-1])
# we need to flatten our data because cross entropy cost function only works on 2D tensor objects and doesn't like
# 3D tensors
cost_op = tf.reduce_mean(
tf.nn.sparse_softmax_cross_entropy_with_logits(
logits=logits,
labels=labels_flat
)
)
train_op = tf.train.AdamOptimizer(learning_rate).minimize(cost_op)
# init stuff
sess = tf.InteractiveSession()
init = tf.global_variables_initializer()
sess.run(init)
# training loop
costs = []
n_batches = len(Ytrain) // batch_size
for i in range(epochs):
n_total = 0
n_correct = 0
t0 = datetime.now()
Xtrain, Ytrain = shuffle(Xtrain, Ytrain)
cost = 0
for j in range(n_batches):
x = Xtrain[j * batch_size:(j + 1) * batch_size]
y = Ytrain[j * batch_size:(j + 1) * batch_size]
# get the cost, predictions, and perform a gradient descent step
c, p, _ = sess.run(
(cost_op, predict_op, train_op),
feed_dict={inputs: x, targets: y})
cost += c
# calculate the accuracy
for yi, pi in zip(y, p):
# we don't care about the padded entries so ignore them because
# 0 means padding so we filter out all the enteries where the target is 0
yii = yi[yi > 0]
pii = pi[yi > 0]
n_correct += np.sum(yii == pii)
n_total += len(yii)
# print stuff out periodically
if j % 10 == 0:
sys.stdout.write(
"j/N: %d/%d correct rate so far: %f, cost so far: %f\r" %
(j, n_batches, float(n_correct) / n_total, cost)
)
sys.stdout.flush()
# get test acc. too
p = sess.run(predict_op, feed_dict={inputs: Xtest, targets: Ytest})
n_test_correct = 0
n_test_total = 0
for yi, pi in zip(Ytest, p):
yii = yi[yi > 0]
pii = pi[yi > 0]
n_test_correct += np.sum(yii == pii)
n_test_total += len(yii)
test_acc = float(n_test_correct) / n_test_total
print(
"i:", i, "cost:", "%.4f" % cost,
"train acc:", "%.4f" % (float(n_correct) / n_total),
"test acc:", "%.4f" % test_acc,
"time for epoch:", (datetime.now() - t0)
)
costs.append(cost)
plt.plot(costs)
plt.show()
| MaryamNajafian/Tea_Maryam_NLP | Code/pos_tf.py | pos_tf.py | py | 8,941 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "sys.path.append",
"line_number": 32,
"usage_type": "call"
},
{
"api_name": "sys.path",
"line_number": 32,
"usage_type": "attribute"
},
{
"api_name": "os.path.abspath",
"line_number": 32,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_num... |
72749247463 | import json
import gamestate
from enum import Enum
from typing import List, Dict
import city
import items
import time
from main_menu import GAME_WIDTH, dotted_line, empty_line, print_in_the_middle, print_left_indented, write_over, \
go_up_and_clear, yes_no_selection, clear_screen, informScreen
from narration import narration, left_narration
import puzzles
class GameAction(gamestate.GameState):
def __init__(self, game_state: gamestate.GameState):
self.game_state = game_state
@property
# Get map_arr
def map_arr(self) -> [city.District]:
return self.game_state._map_arr
# Set map_arr
def set_map_arr(self, map_arr: [city.District]):
self.game_state._map_arr = map_arr
@property
# Return turns remaining
def turns_remaining(self) -> int:
return self.game_state._turns_remaining
# Decrement turns remaining
def decrement_turns_remaining(self) -> None:
self.game_state._turns_remaining -= 1
@property
# Return current location
def current_location(self) -> str:
return self.game_state._current_location
# Check if lair has been discovered
def lair_discovered(self) -> bool:
return self.game_state._current_location == self.game_state._lair_location and self.game_state._vision_orb == True
# Change location
def change_location(self, new_location: str) -> int:
valid_location = False
new_location = new_location.lower()
if new_location in gamestate.District.__members__:
valid_location = True
if valid_location:
self.game_state._current_location = gamestate.District[new_location].name
return 0
else:
return 1
# Check legendary items collected
def check_legendary(self) -> [str]:
legendary_status = [None] * 4
legend_list = [ (self.game_state._vision_orb, "Vision Orb"),
(self.game_state._strength_orb, "Strength Orb"),
(self.game_state._vitality_orb, "Vitality Orb"),
(self.game_state._magic_sword, "Magic Sword") ]
for i in range(len(legend_list)):
if legend_list[i][0]:
if self.check_inventory_by_name(legend_list[i][1]):
legendary_status[i] = "On Hand"
else:
legendary_status[i] = "Found "
else:
legendary_status[i] = "Unknown"
return legendary_status
@property
# Return inventory
def current_inventory(self) -> List[items.Item]:
return self.game_state._current_inventory
# Check if there's space in inventory
def space_in_inventory(self) -> bool:
return len(self.current_inventory) < gamestate.MAX_INVENTORY
# Add item to inventory
def add_to_inventory(self, new_item: items.Item) -> int:
valid_item = True
if len(self.game_state._current_inventory) >= gamestate.MAX_INVENTORY:
valid_item = False
elif (True): # TODO: validate item
pass
if valid_item:
self.game_state._current_inventory.append(new_item)
return 0
else:
return 1
# Remove item from inventory
def remove_from_inventory(self, item_to_remove: items.Item) -> int:
if item_to_remove in self.game_state._current_inventory:
self.game_state._current_inventory.remove(item_to_remove)
return 0
else:
return 1
# Check if item exists in inventory
def check_inventory(self, item: items.Item) -> bool:
if item in self.game_state._current_inventory:
return True
return False
# Check if item exists in inventory by name
def check_inventory_by_name(self, item_name: str) -> bool:
for i in range(len(self.game_state._current_inventory)):
if item_name == self.game_state._current_inventory[i].name:
return True
return False
# Get item from inventory by name
def get_item_from_inventory_by_name(self, item_name: str) -> items.Item:
for item in self.game_state._current_inventory:
if item.name.lower() == item_name.lower():
return item
# Remove item from inventory
def remove_item_from_inventory(self, item: items.Item):
self.game_state._current_inventory.remove(item)
# Remove item from inventory by name
def remove_item_from_inventory_by_name(self, item_name: str):
item_index = None
for i in range(len(self.game_state._current_inventory)):
if self.game_state._current_inventory[i].name.lower() == item_name.lower():
item_index = i
break
del self.game_state._current_inventory[item_index]
# Get item from uncollected_legendary_items array by name
def get_item_from_uncollected_legendary_items(self, item_name: str) -> items.Item:
for item in self.game_state.uncollected_legendary_items:
if item.name == item_name:
return item
# Remove item from uncollected_legendary_items array by name
def remove_item_from_uncollected_legendary_items(self, item_name: str):
item_index = None
for i in range(len(self.game_state.uncollected_legendary_items)):
if self.game_state.uncollected_legendary_items[i].name == item_name:
item_index = i
break
del self.game_state.uncollected_legendary_items[item_index]
@property
# Return obtained clues in an ascending order by clue_id
def obtained_clues(self) -> [str]:
return self.game_state._obtained_clues
# Add clue to obtained clues
def add_to_obtained_clues(self, clue_text: str):
self.game_state._obtained_clues.append(clue_text)
# Check if district has been visited
def check_visited(self, district_name: str) -> bool:
district_name = district_name.lower()
if district_name in gamestate.District.__members__:
proper_name = gamestate.District[district_name].name
return self.game_state._visited[proper_name]
else:
raise ValueError("A bad district_name was supplied.")
# Change district to visited
def change_visited(self, district_name: str) -> int:
district_name = district_name.lower()
if district_name in gamestate.District.__members__:
proper_name = gamestate.District[district_name].name
self.game_state._visited[proper_name] = True
return 0
else:
return 1
def enter_lair_confirmation(self) -> int:
msg1 = "Are you sure you want to continue into the Lair?"
msg2 = "Once you've entered, there's no going back!"
clear_screen()
dotted_line(GAME_WIDTH)
empty_line(1)
print_in_the_middle(GAME_WIDTH, msg1)
print_in_the_middle(GAME_WIDTH, msg2)
empty_line(1)
dotted_line(GAME_WIDTH)
selection = yes_no_selection(input("Yes/No >>> "))
return selection
def narration_screen(self, narr):
clear_screen()
dotted_line(GAME_WIDTH)
empty_line(2)
narration(narr, GAME_WIDTH)
empty_line(2)
dotted_line(GAME_WIDTH)
input("Press [Enter] to continue...")
clear_screen()
# Dr. Crime's lair final game sequence
def final_game_sequence(self) -> str:
number_of_tries = 8
story1 = "You've entered the lair and encountered Dr. Crime. There are " + str(len(self.game_state.boss_puzzles))+ " puzzles " \
"you must solve. You must answer all puzzles correctly in order to defeat Dr. Crime and win the game. " \
"And you are only be allowed " + str(number_of_tries) + " wrong answer tries."
wrong_narr = "Dr. Crime says, 'You are foolish to think you can outsmart me.'"
right1 = "Dr. Crime says, 'That was a lucky guess. Let's see how you do on this next one.'"
right2 = "Dr. Crime says, 'Well, you're smarter than you look. Fine, you won't be able to solve this next one.'"
right3 = "Dr. Crime says, 'Arghhhh, who do you think you are?! You most definitely will not get this next one.'"
right4 = "As you raise up your Magic Sword, Dr. Crime's eyes glisten with fear. You quickly drop the sword, letting" \
" the weight cut Dr. Crime. You rest easy knowing Dr. Crime can no longer poison the city."
# Check all legendary items are in user's inventory to allow user to proceed
legendary_items_status = self.check_legendary()
for status in legendary_items_status:
if status != "On Hand":
informScreen("You need all 4 Legendary items in your inventory to proceed!")
return ""
# Check if user wishes to proceed
if self.enter_lair_confirmation() == 2: # User chooses 'no'
return ""
# Play all boss puzzles
self.narration_screen(story1)
status, number_of_tries = self.game_state.boss_puzzles[0].play_boss_puzzle(number_of_tries)
if status == False:
self.narration_screen(wrong_narr)
return "losegame"
self.narration_screen(right1)
status, number_of_tries = self.game_state.boss_puzzles[1].play_boss_puzzle(number_of_tries)
if status == False:
self.narration_screen(wrong_narr)
return "losegame"
self.narration_screen(right2)
status, number_of_tries = self.game_state.boss_puzzles[2].play_boss_puzzle(number_of_tries)
if status == False:
self.narration_screen(wrong_narr)
return "losegame"
self.narration_screen(right3)
status, number_of_tries = self.game_state.boss_puzzles[3].play_boss_puzzle(number_of_tries)
if status == False:
self.narration_screen(wrong_narr)
return "losegame"
self.narration_screen(right4)
return "wingame"
| farbill/capricornus | gameaction.py | gameaction.py | py | 10,119 | python | en | code | 1 | github-code | 36 | [
{
"api_name": "gamestate.GameState",
"line_number": 14,
"usage_type": "attribute"
},
{
"api_name": "gamestate.GameState",
"line_number": 15,
"usage_type": "attribute"
},
{
"api_name": "city.District",
"line_number": 20,
"usage_type": "attribute"
},
{
"api_name": "... |
28212815026 | from django.shortcuts import get_object_or_404, render, redirect
from core.models import Item
from django.contrib.auth import login, logout, authenticate
from django.contrib.auth.models import User
import api.views as api
from core.forms import ItemCreateForm, UserCreateForm, UserLoginForm, UserUpdateForm
from django.contrib import messages
def index_view(request, q=None):
item_list = Item.objects.all()
if request.method == "POST":
q = request.POST.get("q")
messages.add_message(
request, messages.INFO, f"Showing search results containing: `{q}`"
)
item_list = Item.objects.filter(name__icontains=q)
context = {
"item_list": item_list,
}
return render(request, "index.html", context=context)
def user_register_view(request):
form = UserCreateForm(request.POST or None)
if request.method == "POST":
if form.is_valid():
user = form.save()
login(request, user)
messages.add_message(
request, messages.SUCCESS, "User was created successfully"
)
return redirect("core:index")
else:
messages.add_message(request, messages.ERROR, "Invalid Inputs.")
return redirect("core:user_register")
if request.user.is_authenticated:
return redirect("core:user_details", request.user.pk)
context = {"form": form, "type": "register"}
return render(request, "user/user_create_update.html", context=context)
def user_list_view(request):
user_list = User.objects.all()
context = {"user_list": user_list}
return render(request, "user/user_list.html", context=context)
def user_details_view(request, user_id: int):
user = get_object_or_404(User, pk=user_id)
context = {"user": user}
return render(request, "user/user_details.html", context=context)
def user_login_view(request):
if request.user.is_authenticated:
return redirect("core:user_details", request.user.pk)
form = UserLoginForm(request.POST or None)
if request.method == "POST":
if form.is_valid():
# username = form.cleaned_data["username"]
# password = form.cleaned_data["password"]
# user = authenticate(username=username, password=password)
user = authenticate(**form.cleaned_data)
if user is not None:
login(request, user)
messages.add_message(request, messages.SUCCESS, "You have logged in.")
return redirect("core:index")
else:
messages.add_message(request, messages.ERROR, "Invalid Credentials.")
context = {"form": form}
return render(request, "user/user_login.html", context=context)
def user_update_view(request):
if not request.user.is_authenticated:
messages.add_message(request, messages.ERROR, "You have to log in first.")
return redirect("core:user_login")
form = UserUpdateForm(request.POST or None)
if request.method == "POST":
user = get_object_or_404(User, pk=request.user.pk)
if form.is_valid():
new_data = {
"first_name": form.cleaned_data.get("first_name"),
"last_name": form.cleaned_data.get("last_name"),
"username": form.cleaned_data.get("username"),
"email": form.cleaned_data.get("email"),
}
password = form.cleaned_data.get("password")
for key, val in new_data.items():
if val:
print(f"{key}: {val} was eddited")
setattr(user, key, val)
if password:
user.set_password(password)
user.save()
logout(request)
login(request, user)
messages.add_message(
request, messages.SUCCESS, "Updated user data successfu<lly."
)
return redirect("core:user_details", request.user.pk)
else:
messages.add_message(request, messages.ERROR, "Invalid inputs!")
context = {"form": form, "type": "update"}
return render(request, "user/user_create_update.html", context=context)
def user_logout_view(request):
if request.method == "POST":
logout(request)
messages.add_message(request, messages.INFO, "You have been logged out.")
return redirect("core:index")
return render(request, "user/user_logout.html")
def item_create_view(request):
if not request.user.is_authenticated:
return redirect("core:user_login")
form = ItemCreateForm(request.POST, request.FILES or None)
if request.method == "POST":
print(request.FILES)
if form.is_valid():
print(form.cleaned_data)
item = Item(**form.cleaned_data)
item.user = request.user
item.save()
messages.add_message(request, messages.SUCCESS, "Item was Created.")
return redirect("core:index")
else:
messages.add_message(
request, messages.ERROR, "Invalid inputs for the Item."
)
context = {"form": form}
return render(request, "item/item_create.html", context=context)
def item_details_view(request, item_id: int):
item = get_object_or_404(Item, pk=item_id)
context = {"item": item}
return render(request, "item/item_details.html", context=context)
def item_delete_view(request, item_id: int):
if not request.user.is_authenticated:
messages.add_message(request, messages.ERROR, "You should login first.")
return redirect("core:user_login")
item = get_object_or_404(Item, pk=item_id)
if request.user != item.user:
messages.add_message(
request, messages.ERROR, "You can only delete items you own."
)
return redirect("core:index")
if request.method == "POST":
item.delete()
messages.add_message(
request, messages.SUCCESS, "Item was deleted successfully."
)
return redirect("core:index")
context = {"item": item}
return render(request, "item/item_delete.html", context=context)
def item_buy_view(request, item_id: int):
item = get_object_or_404(Item, pk=item_id)
if request.method == "POST":
res = api.pay_for_item(item.price)
if res.status_code != 200:
messages.add_message(request, messages.ERROR, "Something went wrong!")
return redirect("core:item_buy", {"item_id", item_id})
item.delete()
messages.add_message(request, messages.SUCCESS, "Item was bought successfully!")
return redirect("core:index")
return render(request, "item/item_buy.html", {"item": item})
def user_item_list_view(request, user_id: int):
item_list = Item.objects.all().filter(user__pk=user_id)
context = {
"item_list": item_list,
}
messages.add_message(
request, messages.INFO, f"Showing items owned by: {request.user.username}"
)
return render(request, "index.html", context=context)
| HomayoonAlimohammadi/divar | divar-clone/core/views.py | views.py | py | 7,084 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "core.models.Item.objects.all",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "core.models.Item.objects",
"line_number": 11,
"usage_type": "attribute"
},
{
"api_name": "core.models.Item",
"line_number": 11,
"usage_type": "name"
},
{
"api_n... |
8290193445 | from lxml import etree as ET
def parse_params_xmlfile(params_xml_file):
parameter = dict()
tree = ET.parse(params_xml_file)
root = tree.getroot()
global_parameter = root.find('global')
parameter['rpn_nms_thresh'] = float(global_parameter.find('rpn_nms_thresh').text)
parameter['rpn_fg_iou_thresh'] = float(global_parameter.find('rpn_fg_iou_thresh').text)
parameter['rpn_bg_iou_thresh'] = float(global_parameter.find('rpn_bg_iou_thresh').text)
parameter['box_nms_thresh'] = float(global_parameter.find('box_nms_thresh').text)
parameter['box_fg_iou_thresh'] = float(global_parameter.find('box_fg_iou_thresh').text)
parameter['box_bg_iou_thresh'] = float(global_parameter.find('box_bg_iou_thresh').text)
input_parameter = root.find('train')
parameter['batchsize'] = int(input_parameter.find('batchsize').text)
parameter['num_workers'] = int(input_parameter.find('num_workers').text)
parameter['learning_rate'] = float(input_parameter.find('learning_rate').text)
parameter['backbone'] = input_parameter.find('backbone').text
parameter['test_dir'] = input_parameter.find('test_dir').text
parameter['resume_from'] = input_parameter.find('resume_from').text
parameter['test_ratio'] = float(input_parameter.find('test_ratio').text) if input_parameter.find('test_ratio').text else None
parameter['save_log_path'] = input_parameter.find('save_log_path').text
return parameter
def parse_params_xmlfile_test(params_xml_file):
parameter = dict()
from lxml import etree as ET
tree = ET.parse(params_xml_file)
root = tree.getroot()
global_parameter = root.find('global')
parameter['rpn_nms_thresh'] = float(global_parameter.find('rpn_nms_thresh').text)
parameter['rpn_fg_iou_thresh'] = float(global_parameter.find('rpn_fg_iou_thresh').text)
parameter['rpn_bg_iou_thresh'] = float(global_parameter.find('rpn_bg_iou_thresh').text)
parameter['box_nms_thresh'] = float(global_parameter.find('box_nms_thresh').text)
parameter['box_fg_iou_thresh'] = float(global_parameter.find('box_fg_iou_thresh').text)
parameter['box_bg_iou_thresh'] = float(global_parameter.find('box_bg_iou_thresh').text)
input_parameter = root.find('test')
parameter['batchsize'] = int(input_parameter.find('batchsize').text)
parameter['num_workers'] = int(input_parameter.find('num_workers').text)
parameter['gap'] = int(input_parameter.find('gap').text)
parameter['backbone'] = input_parameter.find('backbone').text
parameter['mask_threshold'] = float(input_parameter.find('mask_threshold').text)
parameter['nms_name'] = input_parameter.find('nms_name').text
parameter['iou_threshold'] = float(input_parameter.find('iou_threshold').text)
parameter['score_threshold'] = float(input_parameter.find('score_threshold').text)
return parameter | PauliKarl/shipdet | shipdet/datasets/parse.py | parse.py | py | 2,919 | python | en | code | 1 | github-code | 36 | [
{
"api_name": "lxml.etree.parse",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "lxml.etree",
"line_number": 7,
"usage_type": "name"
},
{
"api_name": "lxml.etree.parse",
"line_number": 34,
"usage_type": "call"
},
{
"api_name": "lxml.etree",
"line_num... |
22340501918 | import os
import json
import requests
import sys
import readline
# Constants
URL = "https://api.perplexity.ai/chat/completions"
HEADERS = {
"accept": "text/event-stream",
"content-type": "application/json",
"authorization": f"Bearer {os.getenv('PERPLEXITY_API_KEY')}"
}
def get_input(prompt):
try:
# Use readline for input (for TTY)
return input(prompt)
except EOFError:
return None
def stream_request(messages):
last_printed = "" # Variable to keep track of the last printed message
payload = {
"model": "pplx-70b-chat-alpha",
"messages": messages,
"stream": True
}
with requests.post(URL, headers=HEADERS, json=payload, stream=True) as response:
response.raise_for_status()
sys.stdout.write("Assistant: ")
for line in response.iter_lines():
if line:
decoded_line = line.decode('utf-8').replace('data: ', '')
try:
data = json.loads(decoded_line)
current_content = data['choices'][0]['message']['content']
if current_content != last_printed: # Update only if there is new content
new_content = current_content[len(last_printed):]
if new_content: # Only update if new content is not empty
sys.stdout.write(new_content)
sys.stdout.flush() # Flush the buffer to immediately print the new content
last_printed = current_content
except json.JSONDecodeError:
continue
print() # Print a new line after full response is received
def main():
print("Perplexity Chat Bot")
print("-------------------")
print("Type 'exit' to end the chat.")
while True:
user_input = get_input("You: ")
if user_input is None or user_input.lower().strip() == 'exit':
print("Goodbye!")
break
messages = [
{
"role": "system",
"content": "Be precise and concise."
},
{
"role": "user",
"content": user_input
}
]
stream_request(messages)
if __name__ == "__main__":
main()
| piercecohen1/pplx-api-streaming | pplxchat.py | pplxchat.py | py | 2,331 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "os.getenv",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "requests.post",
"line_number": 30,
"usage_type": "call"
},
{
"api_name": "sys.stdout.write",
"line_number": 32,
"usage_type": "call"
},
{
"api_name": "sys.stdout",
"line_numbe... |
73685012263 | import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
from sklearn.preprocessing import StandardScaler,PolynomialFeatures
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import LabelEncoder, Binarizer
from sklearn import linear_model
from sklearn.metrics import mean_squared_error,r2_score
train = pd.read_csv('train_NIR5Yl1.csv')
train.head()
train.drop(['ID','Username'],axis=1,inplace=True)
bn = Binarizer(threshold=5)
pd_watched = bn.transform([train['Answers']])[0]
train['pd_watched'] = pd_watched
le = LabelEncoder()
train['Tag'] = le.fit_transform(train['Tag'])
print(train.head())
X=train.drop('Upvotes',axis=1)
y=train['Upvotes']
std=StandardScaler()
X_scaled=pd.DataFrame(std.fit_transform(X),columns=X.columns,index=X.index)
ts = 0.24
rs = 205
X_train, X_val, y_train, y_val = train_test_split(X_scaled, y, test_size=ts, random_state=rs)
print(X_train.head())
print(X_val.head())
poly_reg=PolynomialFeatures(degree=4,include_bias=True,interaction_only=False)
X_poly_train = poly_reg.fit_transform(X_train)
X_poly_train = pd.DataFrame(X_poly_train)
X_poly_val = poly_reg.fit_transform(X_val)
X_poly_val = pd.DataFrame(X_poly_val)
alp = 0.027
lin_reg_1 = linear_model.LassoLars(alpha=alp,max_iter=150)
lin_reg_1.fit(X_poly_train,y_train)
pred_train = lin_reg_1.predict(X_poly_train)
print('Train R2:',r2_score(y_train, pred_train))
print('Train RMSE:',np.sqrt(mean_squared_error(y_train, pred_train)))
pred_val = lin_reg_1.predict(X_poly_val)
print('Val R2:',r2_score(y_val, pred_val))
print('Val RMSE:',np.sqrt(mean_squared_error(y_val, pred_val)))
test = pd.read_csv('test_8i3B3FC.csv')
ID = test['ID']
test.drop(['ID','Username'],axis=1,inplace=True)
test['Tag'] = le.fit_transform(test['Tag'])
pd_watched = bn.transform([test['Answers']])[0]
test['pd_watched'] = pd_watched
test_scaled=pd.DataFrame(std.fit_transform(test),columns=test.columns,index=test.index)
test_poly = poly_reg.fit_transform(test_scaled)
test_poly = pd.DataFrame(test_poly)
test_pred = lin_reg_1.predict(test_poly)
test_pred = abs(test_pred)
ans = pd.DataFrame({'ID' : ID, 'Upvotes' : test_pred})
sub = ans.sort_values(by=['ID'])
print(sub)
file_name = '5-lasso__ts_{}__rs_{}__alpha_{}.csv'.format(ts,rs,alp)
sub.to_csv(file_name, index=False)
| smittal1995/Upvote-count | lasso.py | lasso.py | py | 2,390 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "pandas.read_csv",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "sklearn.preprocessing.Binarizer",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "sklearn.preprocessing.LabelEncoder",
"line_number": 20,
"usage_type": "call"
},
{
... |
34084783842 | from pathlib import Path
from brownie import Strategy, accounts, config, network, project, web3
from brownie.network.gas.strategies import GasNowStrategy
from brownie.network import gas_price
from eth_utils import is_checksum_address
API_VERSION = config["dependencies"][0].split("@")[-1]
Vault = project.load(
Path.home() / ".brownie" / "packages" / config["dependencies"][0]
).Vault
#1INCH token
WANT_TOKEN = "0x111111111117dC0aa78b770fA6A738034120C302"
STRATEGIST_ADDR = "0xAa9E20bAb58d013220D632874e9Fe44F8F971e4d"
#Deployer as governance
GOVERNANCE = STRATEGIST_ADDR
#Rewards to deployer,we can change it to yearn governance after approval
REWARDS = STRATEGIST_ADDR
#Set gas price as fast
gas_price(62 * 1e9)
def get_address(msg: str) -> str:
while True:
val = input(msg)
if is_checksum_address(val):
return val
else:
addr = web3.ens.address(val)
if addr:
print(f"Found ENS '{val}' [{addr}]")
return addr
print(f"I'm sorry, but '{val}' is not a checksummed address or ENS")
def main():
print(f"You are using the '{network.show_active()}' network")
dev = accounts.load("dev")
print(f"You are using: 'dev' [{dev.address}]")
if input("Is there a Vault for this strategy already? y/[N]: ").lower() == "y":
vault = Vault.at(get_address("Deployed Vault: "))
assert vault.apiVersion() == API_VERSION
else:
#Deploy vault
vault = Vault.deploy({"from": dev})
vault.initialize(
WANT_TOKEN,#OneInch token as want token
GOVERNANCE,#governance
REWARDS,#rewards
"",#nameoverride
"",#symboloverride
{"from": dev}
)
print(API_VERSION)
assert vault.apiVersion() == API_VERSION
print(
f"""
Strategy Parameters
api: {API_VERSION}
token: {vault.token()}
name: '{vault.name()}'
symbol: '{vault.symbol()}'
"""
)
if input("Deploy Strategy? [y]/n: ").lower() == "n":
strategy = Strategy.at(get_address("Deployed Strategy: "))
else:
strategy = Strategy.deploy(vault, {"from": dev}, publish_source=True)
#add strat to vault
vault.addStrategy(strategy, 10_000, 0, 0, {"from": dev})
#Set deposit limit to 5000 1INCH tokens
vault.setDepositLimit(5000 * 1e18)
| akshaynexus/BoringDAOStrats | scripts/deploy.py | deploy.py | py | 2,398 | python | en | code | 2 | github-code | 36 | [
{
"api_name": "brownie.config",
"line_number": 9,
"usage_type": "name"
},
{
"api_name": "brownie.project.load",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "brownie.project",
"line_number": 10,
"usage_type": "name"
},
{
"api_name": "pathlib.Path.home"... |
34023322627 | import PIL.Image
import os
def resize_image(image_path, new_width, new_height):
"""Resizes an image without changing its dimensions.
Args:
image_path: The path to the image file.
new_width: The new width of the image.
new_height: The new height of the image.
Returns:
The resized image.
"""
image = PIL.Image.open(image_path)
width, height = image.size
aspect_ratio = width / height
new_width = int(new_width * aspect_ratio)
new_height = int(new_height * aspect_ratio)
resized_image = image.thumbnail((new_width, new_height), PIL.Image.ANTIALIAS)
return image
def crop_Image(img_path, save_path):
image = PIL.Image.open(img_path)
image.crop((0, 0, image.width, image.width)).save(save_path)
if __name__ == "__main__":
H = 300
W = 300
resized_image = resize_image("res/thumbnail/2104007_ete_21.png", 300, 300).save(
"res/thumbnail/2104007_ete_21.png"
)
# resized_image.save("resized_image.jpg")
for img in os.listdir("res/thumbnail"):
image_path = f"res/thumbnail/{img}"
# resize_image(image_path, W, H).save(image_path)
crop_Image(image_path, image_path)
print(f"Cropped {image_path} and saved at {image_path}")
| dev5h/ete21 | resize_thumbnails.py | resize_thumbnails.py | py | 1,264 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "PIL.Image.Image.open",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "PIL.Image.Image",
"line_number": 17,
"usage_type": "attribute"
},
{
"api_name": "PIL.Image",
"line_number": 17,
"usage_type": "name"
},
{
"api_name": "PIL.Image.Image",... |
74918582185 | # -*- coding: utf-8 -*-
import datetime
from dateutil import rrule
from odoo import models, fields, api, _
from odoo.exceptions import UserError
class Loan(models.Model):
_name = "hr.loan"
_description = 'Employee Loans'
@api.model
def _default_currency(self):
return self.env.user.company_id.currency_id.id
employee_id = fields.Many2one('hr.employee', string="Employee", readonly=True,
states={'draft': [('readonly', False)]})
date_from = fields.Date('From Date', readonly=True, states={'draft': [('readonly', False)]})
date_to = fields.Date('To Date', readonly=True, states={'draft': [('readonly', False)]})
currency_id = fields.Many2one('res.currency', default=_default_currency, string="Currency", readonly=True,
states={'draft': [('readonly', False)]})
amount_total = fields.Monetary(string="Total Loan Amount", readonly=True, states={'draft': [('readonly', False)]})
amount_deduct = fields.Monetary(string="Deduction Amount", readonly=True, states={'draft': [('readonly', False)]})
type = fields.Selection([('sss', 'SSS'), ('hdmf', 'HDMF'), ('other', 'OTHER')], string='Type', readonly=True,
states={'draft': [('readonly', False)]})
amount_total_deducted = fields.Monetary(string="Total Deducted Amount", readonly=True,
states={'draft': [('readonly', False)]})
state = fields.Selection([('draft', 'Draft'), ('open', 'In Progress'), ('done', 'Done')], string="Status",
default="draft", store=True)
@api.one
def _compute_state(self):
if self.amount_total_deducted >= self.amount_total:
self.state = 'done'
@api.multi
def action_open(self):
self.write({'state': 'open'})
@api.multi
def unlink(self):
for loan in self:
if loan.state in ['open', 'done']:
raise UserError(_('Deleting of open or paid loans is not allowed.'))
return super(Loan, self).unlink()
@api.multi
def name_get(self):
result = []
for loan in self:
amount_str = 0.0
if loan.currency_id.position == 'before':
amount_str = loan.currency_id.symbol + ' ' + str(loan.amount_total)
if loan.currency_id.position == 'after':
amount_str = str(loan.amount_total) + ' ' + loan.currency_id.symbol
result.append((loan.id, "[%s] %s" % (amount_str, loan.employee_id.name)))
return result
class TripTemplate(models.Model):
_name = "ibas_hris.trip_template"
_description = 'TRIP TEMPLATE'
@api.model
def _default_currency(self):
return self.env.user.company_id.currency_id.id
name = fields.Char('Name', compute="_compute_name", store=True)
loc_from = fields.Char('From Location', required=True)
loc_to = fields.Char('To Location', required=True)
currency_id = fields.Many2one('res.currency', default=_default_currency, string="Currency")
amount = fields.Monetary(string="Amount", required=True)
@api.depends('loc_from', 'loc_to')
def _compute_name(self):
self.name = (self.loc_from or '') + ' -> ' + (self.loc_to or '')
class Trip(models.Model):
_name = "ibas_hris.trip"
_description = 'TRIPS'
@api.model
def _default_currency(self):
return self.env.user.company_id.currency_id.id
date = fields.Date('Date', required=True)
trip_template_id = fields.Many2one('ibas_hris.trip_template', string='Template')
loc_from = fields.Char('From Location', required=True)
loc_to = fields.Char('To Location', required=True)
currency_id = fields.Many2one('res.currency', default=_default_currency, string="Currency")
amount = fields.Monetary(string="Amount", required=True)
employee_id = fields.Many2one('hr.employee', string="Employee", required=True)
@api.multi
def name_get(self):
result = []
for trip in self:
result.append((trip.id, "[%s] %s" % (trip.employee_id.name, (trip.loc_from or '') + ' -> ' + (trip.loc_to or ''))))
return result
@api.onchange('trip_template_id')
def _onchange_trip_template_id(self):
if self.trip_template_id:
self.loc_from = self.trip_template_id.loc_from
self.loc_to = self.trip_template_id.loc_to
self.amount = self.trip_template_id.amount
class Employee(models.Model):
_inherit = 'hr.employee'
loan_ids = fields.One2many('hr.loan', 'employee_id', string='Loans')
trip_ids = fields.One2many('ibas_hris.trip', 'employee_id', string='Trips')
@api.model
def _current_year_avg_net_pay(self, current_payslip=None):
date_from = datetime.date.today().strftime('%Y-01-01')
date_to = datetime.date.today().strftime('%Y-12-31')
payslips = self.env['hr.payslip'].search(
[('employee_id', '=', self.id), ('date_from', '>=', date_from), ('date_from', '<=', date_to),
('id', '!=', current_payslip.id)])
lines = payslips.mapped('line_ids').filtered(lambda r: r.code == 'NETPAY')
return sum(lines.mapped('total'))
class Payslip(models.Model):
_inherit = 'hr.payslip'
deduct_sss = fields.Boolean('Deduct SSS')
deduct_philhealth = fields.Boolean('Deduct Philhealth')
deduct_hdmf = fields.Boolean('Deduct HDMF')
generate_backpay = fields.Boolean('Generate 13 th Month Pay / BackPay')
@api.model
def get_worked_day_lines(self, contracts, date_from, date_to):
res = super(Payslip, self).get_worked_day_lines(contracts, date_from, date_to)
att_obj = self.env['hr.attendance']
contract = self.contract_id
employee = self.employee_id
resource_calendar_id = employee.work_sched or contract.resource_calendar_id
attendances = att_obj.search(
[('employee_id', '=', contract.employee_id.id), ('check_in', '>=', date_from), ('check_in', '<=', date_to)])
# HR-2, 3, 5, 6, 7, 8, 9, 10
late_in_float = 0.0
undertime_minutes = 0.0
regular_holiday_worked_hours = 0.0
special_holiday_worked_hours = 0.0
restday_regular_holiday_worked_hours = 0.0
restday_special_holiday_worked_hours = 0.0
actual_worked_hours = 0.0
restday_hours = 0.0
for att in attendances:
if att.is_workday:
if att.is_tardy:
late_in_float += att.late_in_float
if att.is_undertime:
undertime_minutes += att.undertime_minutes
if att.is_regular:
regular_holiday_worked_hours += att.worked_hours < 8 and att.worked_hours or 8
if att.is_special:
special_holiday_worked_hours += att.worked_hours < 8 and att.worked_hours or 8
if not att.is_workday:
if att.is_regular:
restday_regular_holiday_worked_hours += att.worked_hours < 8 and att.worked_hours or 8
if att.is_special:
restday_special_holiday_worked_hours += att.worked_hours < 8 and att.worked_hours or 8
restday_hours += att.worked_hours < 8 and att.worked_hours or 8
actual_worked_hours += att.worked_hours < 8 and att.worked_hours or 8
# HR-4
absences = 0
for day in rrule.rrule(rrule.DAILY, dtstart=fields.Datetime.from_string(date_from),
until=fields.Datetime.from_string(date_to).replace(hour=23, minute=59, second=59,
microsecond=999999)):
if not attendances.filtered(lambda r: str(day) <= r.check_in <= str(
day.replace(hour=23, minute=59, second=59, microsecond=999999)) and r.is_workday):
work_hours = employee.get_day_work_hours_count(day, calendar=resource_calendar_id)
if work_hours:
holiday = self.env['ibas_hris.holiday'].search([('date', '=', day.date())])
if not holiday:
absences += 1
# HR-5
overtimes = self.env['ibas_hris.ot'].search(
[('state', '=', 'approved'), ('overtime_from', '>=', date_from + ' 00:00:00'),
('overtime_from', '<=', date_to + ' 23:59:59'), ('employee_id', '=', employee.id)])
regular_ot_minutes = 0.0
restday_ot_minutes = 0.0
regular_holiday_ot_minutes = 0.0
special_holiday_ot_minutes = 0.0
regular_holiday_restday_ot_minutes = 0.0
special_holiday_restday_ot_minutes = 0.0
for ot in overtimes:
ot_day = fields.Datetime.from_string(date_from).date()
ot_day_work_hours = employee.get_day_work_hours_count(ot_day, calendar=resource_calendar_id)
ot_day_holiday = self.env['ibas_hris.holiday'].search([('date', '=', ot_day)])
if ot_day_work_hours and not ot_day_holiday: # Regular Overtime
regular_ot_minutes = + ot.ot_minutes
elif not ot_day_work_hours and not ot_day_holiday: # Restday Overtime
restday_ot_minutes = + ot.ot_minutes
if ot_day_work_hours and ot_day_holiday and ot_day_holiday.holiday_type == 'regular': # Regular Holiday Overtime
regular_holiday_ot_minutes = + ot.ot_minutes
if ot_day_work_hours and ot_day_holiday and ot_day_holiday.holiday_type == 'special': # Special Holiday Overtime
special_holiday_ot_minutes = + ot.ot_minutes
if not ot_day_work_hours and ot_day_holiday and ot_day_holiday.holiday_type == 'regular': # Regular Holiday Restday Overtime
regular_holiday_restday_ot_minutes = + ot.ot_minutes
if not ot_day_work_hours and ot_day_holiday and ot_day_holiday.holiday_type == 'special': # Special Holiday Restday Overtime
special_holiday_restday_ot_minutes = + ot.ot_minutes
res.extend([
{
'name': _("Lates"), # HR-2
'sequence': 1,
'code': 'LATE',
'number_of_days': (late_in_float / 60.00) / 8.00,
'number_of_hours': (late_in_float / 60.00),
'contract_id': contract.id,
}, {
'name': _("UNDERTIME"), # HR-3
'sequence': 2,
'code': 'UNDERTIME',
'number_of_days': (undertime_minutes / 60.00) / 8.00,
'number_of_hours': (undertime_minutes / 60.00),
'contract_id': contract.id,
}, {
'name': _("ABSENT"), # HR-4
'sequence': 3,
'code': 'ABSENT',
'number_of_days': absences,
'number_of_hours': absences * 8.00,
'contract_id': contract.id,
}, {
'name': _("Overtime"), # HR-5 (a)
'sequence': 4,
'code': 'OT',
'number_of_days': (regular_ot_minutes / 60) / 8,
'number_of_hours': regular_ot_minutes / 60,
'contract_id': contract.id,
}, {
'name': _("Restday Overtime"), # HR-5 (b)
'sequence': 4,
'code': 'RDOT',
'number_of_days': (restday_ot_minutes / 60) / 8,
'number_of_hours': restday_ot_minutes / 60,
'contract_id': contract.id,
}, {
'name': _("Regular Holiday Overtime"), # HR-5 (c)
'sequence': 4,
'code': 'RHOT',
'number_of_days': (regular_holiday_ot_minutes / 60) / 8,
'number_of_hours': regular_holiday_ot_minutes / 60,
'contract_id': contract.id,
}, {
'name': _("Special Holiday Overtime"), # HR-5 (d)
'sequence': 4,
'code': 'SHOT',
'number_of_days': (special_holiday_ot_minutes / 60) / 8,
'number_of_hours': special_holiday_ot_minutes / 60,
'contract_id': contract.id,
}, {
'name': _("Restday Regular Holiday Overtime"), # HR-5 (e)
'sequence': 4,
'code': 'RDRHOT',
'number_of_days': (regular_holiday_restday_ot_minutes / 60) / 8,
'number_of_hours': regular_holiday_restday_ot_minutes / 60,
'contract_id': contract.id,
}, {
'name': _("Restday Special Holiday Overtime"), # HR-5 (f)
'sequence': 4,
'code': 'RDSHOT',
'number_of_days': (special_holiday_restday_ot_minutes / 60) / 8,
'number_of_hours': special_holiday_restday_ot_minutes / 60,
'contract_id': contract.id,
}, {
'name': _("Regular Holiday"), # HR-6
'sequence': 5,
'code': 'RH',
'number_of_days': regular_holiday_worked_hours / 8,
'number_of_hours': regular_holiday_worked_hours,
'contract_id': contract.id,
}, {
'name': _("Special Holiday"), # HR-7
'sequence': 6,
'code': 'SH',
'number_of_days': special_holiday_worked_hours / 8,
'number_of_hours': special_holiday_worked_hours,
'contract_id': contract.id,
}, {
'name': _("Restday Regular Holiday"), # HR-8
'sequence': 7,
'code': 'RDRH',
'number_of_days': restday_regular_holiday_worked_hours / 8,
'number_of_hours': restday_regular_holiday_worked_hours,
'contract_id': contract.id,
}, {
'name': _("Actual Days Worked"), # HR-9
'sequence': 8,
'code': 'NORMWD',
'number_of_days': actual_worked_hours / 8,
'number_of_hours': actual_worked_hours,
'contract_id': contract.id,
}, {
'name': _("Restday Special Holiday"), # HR-10
'sequence': 9,
'code': 'RDSH',
'number_of_days': restday_special_holiday_worked_hours / 8,
'number_of_hours': restday_special_holiday_worked_hours,
'contract_id': contract.id,
}, {
'name': _("Restday"), # HR-10
'sequence': 10,
'code': 'RD',
'number_of_days': restday_hours / 8,
'number_of_hours': restday_hours,
'contract_id': contract.id,
}
])
return res
@api.multi
def action_payslip_done(self):
res = super(Payslip, self).action_payslip_done()
for rec in self:
for l in rec.line_ids:
if l.code == 'SSSLOAN':
loan = rec.employee_id.loan_ids.filtered(lambda r: r.state == 'open' and r.type == 'sss')
loan and loan[0].write({'amount_total_deducted': loan.amount_total_deducted + l.total})
loan and loan._compute_state()
if l.code == 'HDMFLOAN':
loan = rec.employee_id.loan_ids.filtered(lambda r: r.state == 'open' and r.type == 'hdmf')
loan and loan[0].write({'amount_total_deducted': loan.amount_total_deducted + l.total})
loan and loan._compute_state()
if l.code == 'OTHLOAN':
loan = rec.employee_id.loan_ids.filtered(lambda r: r.state == 'open' and r.type == 'other')
loan and loan[0].write({'amount_total_deducted': loan.amount_total_deducted + l.total})
loan and loan._compute_state()
return res
| lawrence24/ndms-1 | ibas_payroll/models/models.py | models.py | py | 15,973 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "odoo.models.Model",
"line_number": 10,
"usage_type": "attribute"
},
{
"api_name": "odoo.models",
"line_number": 10,
"usage_type": "name"
},
{
"api_name": "odoo.api.model",
"line_number": 14,
"usage_type": "attribute"
},
{
"api_name": "odoo.api",
... |
9601977492 | from pydub import AudioSegment
import glob
from PIL import Image, ImageDraw
import os
import multiprocessing
import tqdm
import json
import numpy as np
in_path = '/Volumes/AGENTCASHEW/sound-effects-output/'
def process_clip(wave_file_name):
print(wave_file_name)
if os.path.isdir(wave_file_name+'/waveform'):
return None
meta = json.load(open(wave_file_name+'/meta.json'))
print(meta)
audio = AudioSegment.from_file(wave_file_name+'/audio.mp3')
data = np.fromstring(audio._data, np.int16)
fs = audio.frame_rate
BARS = 600
BAR_HEIGHT = 120
LINE_WIDTH = 1
length = len(data)
RATIO = length/BARS
count = 0
maximum_item = 0
max_array = []
highest_line = 0
for d in data:
if count < RATIO:
count = count + 1
if abs(d) > maximum_item:
maximum_item = abs(d)
else:
max_array.append(maximum_item)
if maximum_item > highest_line:
highest_line = maximum_item
maximum_item = 0
count = 1
line_ratio = highest_line/BAR_HEIGHT
print(meta['type'],len(max_array))
# each tick is x number of milliseconds
tick = int(meta['length']/len(max_array))
print('tick is',tick)
im = Image.new('RGBA', (BARS * LINE_WIDTH, BAR_HEIGHT), (255, 255, 255, 0))
draw = ImageDraw.Draw(im)
current_x = 1
for item in max_array:
item_height = item/line_ratio
current_y = (BAR_HEIGHT - item_height)/2
draw.line((current_x, current_y, current_x, current_y + item_height), fill=(158, 158, 158), width=0)
current_x = current_x + LINE_WIDTH
os.mkdir(wave_file_name+'/waveform')
current_x = 1
for idx, item in enumerate(max_array):
item_height = item/line_ratio
current_y = (BAR_HEIGHT - item_height)/2
draw.line((current_x, current_y, current_x, current_y + item_height), fill=(255, 87, 34), width=0)
current_x = current_x + LINE_WIDTH
im.save(f"{wave_file_name}/waveform/{idx}.png")
the_pool = multiprocessing.Pool(8)
path, dirs, files = os.walk(in_path).__next__()
for result in tqdm.tqdm(the_pool.imap_unordered(process_clip, glob.iglob(in_path+'*')), total=len(files)):
pass
| thisismattmiller/sound-effect-bot | build_waveform_frames.py | build_waveform_frames.py | py | 2,125 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "os.path.isdir",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 15,
"usage_type": "attribute"
},
{
"api_name": "json.load",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "pydub.AudioSegment.from_file",
... |
756466471 | import argparse
import time
from utils import load_weights, read_mnist, preprocessing_data
from sklearn.metrics import classification_report
from my_svm import MySvm
def parse_args():
path_to_x_test = 'samples/t10k-images-idx3-ubyte.gz'
path_to_y_test = 'samples/t10k-labels-idx1-ubyte.gz'
path_to_model = 'samples/my_model'
parser = argparse.ArgumentParser()
parser.add_argument('-x', '--x_test_dir', default=path_to_x_test,
help=f'path to the file with the testing sample\'s records, '
f'default: {path_to_x_test}')
parser.add_argument('-y', '--y_test_dir', default=path_to_y_test,
help=f'path to the file with the testing sample\'s labels, '
f'default: {path_to_y_test}')
parser.add_argument('-m', '--model_input_dir', default=path_to_model,
help='path to the file for loading model, '
f'default: {path_to_model}')
parser.add_argument('-k', '--kernel', default='poly',
help='kernel function: \'linear\' or \'poly\', default: \'poly\'')
return parser.parse_args()
def main():
args = parse_args()
path_to_x_test = args.x_test_dir
path_to_y_test = args.y_test_dir
path_to_model = args.model_input_dir
kernel = args.kernel
X_original = read_mnist(path_to_x_test)
X_test, image_shape = preprocessing_data(X_original)
y_test = read_mnist(path_to_y_test)
weights = load_weights(path_to_model)
clf = MySvm(kernel_type=kernel, image_shape=image_shape)
clf.load_weights(weights)
predict_labels = clf.predict(X_test)
print('Metrics on the test data:\n')
print(classification_report(y_test, predict_labels, digits=4))
if __name__ == "__main__":
start_time = time.time()
main()
exec_time = time.time() - start_time
print(f'\n\nExecution time: {exec_time//60:5.0f} min, {exec_time%60:1.3} sec\n')
| albellov/mrg_mlcourse_module1 | predict.py | predict.py | py | 1,990 | python | en | code | 1 | github-code | 36 | [
{
"api_name": "argparse.ArgumentParser",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "utils.read_mnist",
"line_number": 37,
"usage_type": "call"
},
{
"api_name": "utils.preprocessing_data",
"line_number": 38,
"usage_type": "call"
},
{
"api_name": "uti... |
20306864607 | #!/usr/bin/python
'''
Filename: distance.py
Contributors: Todd Boone II, Jackson Brietzke, Jonah Woods, Andrew Zolintakis, Frank Longo, Peter Awori
Description: Enables the CanCan application to retrieve distance
information from Google's Distance Matrix API.
Modules
Imported: requests
difflib
creating_materials (created by us)
Imported By: gui.py
References: https://developers.google.com/maps/documentation/distance-matrix/intro
http://docs.python-requests.org/en/latest/api/
'''
import requests
import difflib
import creating_materials
GOOGLE_DISTANCE_API_URL = 'https://maps.googleapis.com/maps/api/distancematrix/json?'
API_KEY = 'AIzaSyC6ELq9yvgnhnmnnMhfmfPHRBQ6KVjSfMY'
# Initialize recycling locations
recyclingLocations = creating_materials.create_locations_df()
# Map GUI category names to creating_materials material name
def categorySwitcher(category):
switcher={
'Aluminum':'Scrap Metals',
'Battery':'Batteries',
'Computers':'Computers',
'E-Cycling':'Electronics',
'Glass':'Construction',
'Mobile':'Mobile Phones',
'Paper':'Household',
'Plastic':'Plastic',
'Tires':'Tires',
'Waste':'Construction'
}
return switcher.get(category,"")
# Retrieve full Google Distance Matrix API Response
def getDistanceInfo(origin, destination):
'''
Add necessary params to params dict
Paramters:
{origin} - starting point for calculating travel distance and time
{destination} - finishing point for calculating travel distance and time
{units} - specify unit system, options: 'imperial' or 'metric'(default)
{key} - API_KEY
'''
params = {
'origins': origin,
'destinations': destination,
'units': 'imperial',
'key': API_KEY
}
# Make the API request and store response, else print error and exit
try:
response = requests.get(GOOGLE_DISTANCE_API_URL, params=params)
distanceResponse = response.json()
except requests.exceptions.RequestException as e:
print(e)
sys.exit(1)
return distanceResponse
# Retrieve the list of destination addresses
def getAddress(distanceResponse):
address = []
# Retrieve miles from response
try:
for currentAddress in distanceResponse['destination_addresses']:
address.append(currentAddress)
except:
if distanceResponse['status'] == 'ZERO_RESULTS':
error = 'The distance could not be calculated. Try a different address.'
return error
return address
# Retrieve the list of miles in between origin and destination
def getMiles(distanceResponse):
distance = []
# Retrieve miles from response
try:
for element in distanceResponse['rows'][0]['elements']:
for key, val in element['distance'].items():
if key == 'text':
distance.append(val)
except:
if distanceResponse['rows'][0]['elements'][0]['status'] == 'ZERO_RESULTS':
error = 'The miles could not be calculated. Try a different address.'
return error
return distance
# Retrieve the list of duration times in between origin and destination
def getDuration(distanceResponse):
duration = []
# Retrieve duration from response
try:
for element in distanceResponse['rows'][0]['elements']:
for key, val in element['duration'].items():
if key == 'text':
duration.append(val)
except:
if distanceResponse['rows'][0]['elements'][0]['status'] == 'ZERO_RESULTS':
error = 'The duration could not be calculated. Try a different address.'
return error
return duration
# Retrieve the list of duration values in between origin and destination
def getDurationValue(distanceResponse):
durationValue = []
# Retrieve duration from response
try:
for element in distanceResponse['rows'][0]['elements']:
for key, val in element['duration'].items():
if key == 'value':
durationValue.append(val)
except:
if distanceResponse['rows'][0]['elements'][0]['status'] == 'ZERO_RESULTS':
error = 'The duration value could not be calculated. Try a different address.'
return error
return durationValue
# Get a dictionary of closest location
def getClosestLocation(origin, destination):
closestIndex = ''
# Retrieve Distance Response
distanceResponse = getDistanceInfo(origin, destination)
# Get lists of corresponding addresses, miles, duration, and duration values
address = getAddress(distanceResponse)
miles = getMiles(distanceResponse)
duration = getDuration(distanceResponse)
durationValue = getDurationValue(distanceResponse)
# Find the index of closest address
closestIndex = durationValue.index(min(durationValue))
# Create a dictionary that holds informatiion about the closest location
closestLocation = {
'address': address[closestIndex],
'miles': miles[closestIndex],
'duration': duration[closestIndex]
}
return closestLocation
# Get a full dictionary that represents closest info to display on application
def getClosestAppropriateLocation(origin='Heinz College', material = ''):
'''
Retrieve closest location that can accept specified material
'''
# Map GUI category names to creating_materials material name
material = categorySwitcher(material)
# Retrieve and format list of all approriate locations
appropriateLocations = creating_materials.find_locations_that_accept_material(recyclingLocations, material)
listOfAddresses = []
for locations in appropriateLocations:
listOfAddresses.append(locations['location_address'])
formattedListOfAddresses = "|".join(listOfAddresses) # format for Google Distance Matrix API
'''
Get the closest appropriate location in the following format:
closestAppropriateLocationDict = {
'address': address[closestIndex],
'miles': miles[closestIndex],
'duration': duration[closestIndex]
}
'''
closestAppropriateLocationDict = getClosestLocation(origin, formattedListOfAddresses)
# Append the name of the place at appropriate address
for place in appropriateLocations:
if place['location_address'] == difflib.get_close_matches(closestAppropriateLocationDict['address'], listOfAddresses)[0]:
closestAppropriateLocationDict['name'] = place['location_name']
return closestAppropriateLocationDict
if __name__ == "__main__":
'''
Testing getClosestAppropriateLocation() functionality
'''
print("Enter an address. We will find the closest facility to you that can accept Batteries.\n")
origin = input('Enter an origin address: ')
material = "Batteries"
closestAppropriateLocationDict = getClosestAppropriateLocation(origin, material)
print("Name: " + str(closestAppropriateLocationDict.get('name')))
print("Address: " + str(closestAppropriateLocationDict.get('address')))
print("Miles: " + str(closestAppropriateLocationDict.get('miles')))
print("Duration: " + str(closestAppropriateLocationDict.get('duration')))
# End Testing getClosestAppropriateLocation() functionality
| toddbooneii/cancan | distance.py | distance.py | py | 6,739 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "creating_materials.create_locations_df",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "requests.get",
"line_number": 63,
"usage_type": "call"
},
{
"api_name": "requests.exceptions",
"line_number": 65,
"usage_type": "attribute"
},
{
"api_... |
42246125387 | """
Module with class that wraps OpenCV based detectors and descriptors.
Allows performing Non-Maximum suppression based on keypoints response, top-response keypoints filtering,
descriptors normalization.
"""
from typing import Union, Iterable, Tuple, Optional
import cv2
import numpy as np
from scipy.spatial import KDTree
class OpenCVFeatures:
def __init__(self, features: cv2.Feature2D, max_keypoints: int = -1,
nms_diameter: float = 9., normalize_desc: bool = True, root_norm: bool = True,
laf_scale_mr_size: Optional[float] = 6.0):
self.features = features
self.max_keypoints = max_keypoints
self.nms_radius = nms_diameter / 2
self.normalize_desc = normalize_desc
self.root_norm = root_norm
self.laf_scale_mr_size = laf_scale_mr_size
@staticmethod
def normalize_descriptors(descriptors: np.ndarray, root_norm: bool = True) -> np.ndarray:
"""
Normalize descriptors.
If root_norm=True apply RootSIFT-like normalization, else regular L2 normalization.
Args:
descriptors: array (N, 128) with unnormalized descriptors
root_norm: boolean flag indicating whether to apply RootSIFT-like normalization
Returns:
descriptors: array (N, 128) with normalized descriptors
"""
descriptors = descriptors.astype(np.float32)
if root_norm:
# L1 normalize
norm = np.linalg.norm(descriptors, ord=1, axis=1, keepdims=True)
descriptors /= norm
# take square root of descriptors
descriptors = np.sqrt(descriptors)
else:
# L2 normalize
norm = np.linalg.norm(descriptors, ord=2, axis=1, keepdims=True)
descriptors /= norm
return descriptors
@staticmethod
def lafs_from_opencv_kpts(kpts: Iterable[cv2.KeyPoint],
mr_size: float = 6.0,
with_resp: bool = False) -> Union[np.ndarray, Tuple[np.ndarray, np.ndarray]]:
"""
Convert OpenCV keypoint to Local Affine Frames.
Adapted from kornia_moons for numpy arrays.
https://github.com/ducha-aiki/kornia_moons/blob/6aa7bdbe1879303bd9bf35494b383e4f959a1135/kornia_moons/feature.py#L60
Args:
kpts: iterable of OpenCV keypoints
mr_size: multiplier for keypoint size
with_resp: flag indicating whether to return responses
Returns:
lafs: array (N, 2, 3) of local affine frames made from keypoints
responses (optional): array (N,) of responses corresponding to lafs
"""
xy = np.array([k.pt for k in kpts], dtype=np.float32)
scales = np.array([mr_size * k.size for k in kpts], dtype=np.float32)
angles = np.array([k.angle for k in kpts], dtype=np.float32)
# if angles are not set, make them 0
if np.allclose(angles, -1.):
angles = np.zeros_like(scales, dtype=np.float32)
angles = np.deg2rad(-angles)
n = xy.shape[0]
lafs = np.empty((n, 2, 3), dtype=np.float32)
lafs[:, :, 2] = xy
s_cos_t = scales * np.cos(angles)
s_sin_t = scales * np.sin(angles)
lafs[:, 0, 0] = s_cos_t
lafs[:, 0, 1] = s_sin_t
lafs[:, 1, 0] = -s_sin_t
lafs[:, 1, 1] = s_cos_t
if with_resp:
resp = np.array([k.response for k in kpts], dtype=np.float32)
return lafs, resp
else:
return lafs
def detect_and_compute(self, image: np.array) -> Tuple[np.ndarray, np.ndarray, np.ndarray]:
"""
Detect keypoint with OpenCV-based detector and apply OpenCV-based description.
Args:
image: array representation of grayscale image of uint8 data type
Returns:
lafs: array (N, 2, 3) of local affine frames created from detected keypoints
scores: array (N,) of corresponding detector responses
descriptors: array (N, 128) of descriptors
"""
kpts, scores, descriptors = detect_kpts_opencv(self.features, image, self.nms_radius, self.max_keypoints,
describe=True)
lafs = self.lafs_from_opencv_kpts(kpts, mr_size=self.laf_scale_mr_size, with_resp=False)
if self.normalize_desc:
descriptors = self.normalize_descriptors(descriptors, self.root_norm)
return lafs, scores, descriptors
def __repr__(self):
return f'OpenCVFeatures(features={type(self.features)})'
def detect_kpts_opencv(features: cv2.Feature2D, image: np.ndarray, nms_radius: float, max_keypoints: int,
describe: bool = False) -> np.ndarray:
"""
Detect keypoints using OpenCV Detector. Optionally, perform NMS and filter top-response keypoints.
Optionally perform description.
Args:
features: OpenCV based keypoints detector and descriptor
image: Grayscale image of uint8 data type
nms_radius: radius of non-maximum suppression. If negative, skip nms
max_keypoints: maximum number of keypoints to keep based on response. If negative, keep all
describe: flag indicating whether to simultaneously compute descriptors
Returns:
kpts: 1D array of detected cv2.KeyPoint
"""
if describe:
kpts, descriptors = features.detectAndCompute(image, None)
else:
kpts = features.detect(image, None)
kpts = np.array(kpts)
responses = np.array([k.response for k in kpts], dtype=np.float32)
kpts_pt = np.array([k.pt for k in kpts], dtype=np.float32)
if nms_radius > 0:
nms_mask = nms_keypoints(kpts_pt, responses, nms_radius)
else:
nms_mask = np.ones((kpts_pt.shape[0],), dtype=bool)
responses = responses[nms_mask]
kpts = kpts[nms_mask]
if max_keypoints > 0:
top_score_idx = np.argpartition(-responses, min(max_keypoints, len(responses) - 1))[:max_keypoints]
else:
# select all
top_score_idx = ...
if describe:
return kpts[top_score_idx], responses[top_score_idx], descriptors[nms_mask][top_score_idx]
else:
return kpts[top_score_idx], responses[top_score_idx]
def nms_keypoints(kpts: np.ndarray, responses: np.ndarray, radius: float) -> np.ndarray:
# TODO: add approximate tree
kd_tree = KDTree(kpts)
sorted_idx = np.argsort(-responses)
kpts_to_keep_idx = []
removed_idx = set()
for idx in sorted_idx:
# skip point if it was already removed
if idx in removed_idx:
continue
kpts_to_keep_idx.append(idx)
point = kpts[idx]
neighbors = kd_tree.query_ball_point(point, r=radius)
# Variable `neighbors` contains the `point` itself
removed_idx.update(neighbors)
mask = np.zeros((kpts.shape[0],), dtype=bool)
mask[kpts_to_keep_idx] = True
return mask
| ucuapps/OpenGlue | models/features/opencv/base.py | base.py | py | 6,977 | python | en | code | 304 | github-code | 36 | [
{
"api_name": "cv2.Feature2D",
"line_number": 15,
"usage_type": "attribute"
},
{
"api_name": "typing.Optional",
"line_number": 17,
"usage_type": "name"
},
{
"api_name": "numpy.ndarray",
"line_number": 27,
"usage_type": "attribute"
},
{
"api_name": "numpy.float32",... |
32882318710 | # -*- coding: utf-8 -*-
"""
Created on Sun Jun 28 21:01:17 2020
@author: Hemakshi Pandey
"""
# NLP with BAG OF MODEL using SUPPORT VECTOR MACHINE
## Importing the libraries
import numpy as np
#NumPy is a python library used for working with arrays.
import pandas as pd
#They are used in Python to deal with data analysis and manipulation. To put it in simpler words, Pandas help us to organize data and manipulate the data by putting it in a tabular form.
import nltk
# NLTK is a leading platform for building Python programs to work with human language data.
import pickle
#Comes handy to save complicated data.Python pickle module is used for serializing and de-serializing python object structures.
import re
#This module provides regular expression matching operations
from nltk.corpus import stopwords
nltk.download('stopwords')
# One of the major forms of pre-processing is to filter out useless data.
#In natural language processing, useless words (data), are referred to as stop words.
nltk.download('wordnet')
wnlem = nltk.WordNetLemmatizer()
#Lemmatization, unlike Stemming, reduces the inflected words properly ensuring that the root word belongs to the language.
nltk.download('punkt')
#This tokenizer divides a text into a list of sentences, by using an unsupervised algorithm to build a model for abbreviation words, collocations, and words that start sentences.
"""## Importing the dataset"""
dataset = pd.read_csv('Final_IPC_label_data.csv') # This data contains the labelled definitions of IPC 302,307 and 376
dataset.head() #The head() function is used to get the first n rows.
"""## Cleaning the texts"""
corpus = [] # defining a list of corpus
for i in range(0, 578): # the loop for traversing through the rows
definition = re.sub('[^a-zA-Z]', ' ', dataset['Definition'][i]) # the operation takes input of all word including alphabet
definition = definition.lower() # converts that into lower case (normalization and cleaning)
definition = definition.split() #split() method returns a list of strings after breaking the given string by the specified separator.
wnlem = nltk.WordNetLemmatizer() #brings context to the words.
all_stopwords = stopwords.words('english') #useless words (data), are referred to as stop words.
definition = [wnlem.lemmatize(word) for word in definition if not word in set(all_stopwords)] # traversing through the words and normalizing it
definition = ' '.join(definition) #Join all items in a tuple into a string, using a space (' ') character as separator:
corpus.append(definition) # filtered definition are added to the list
print(corpus)
"""## Creating the Bag of Words model"""
from sklearn.feature_extraction.text import CountVectorizer #Convert a collection of text words to a matrix of token counts
cv = CountVectorizer( max_features = 620)
#With CountVectorizer we are converting raw text to a numerical vector representation of words.
#This makes it easy to directly use this representation as features in Machine Learning tasks such as for text classification and clustering.
X = cv.fit_transform(corpus).toarray() #one step fit tranform
#Here the fit method, when applied to the training dataset,learns the model parameters (for example, mean and standard deviation).
#We then need to apply the transform method on the training dataset to get the transformed (scaled) training dataset.
y = dataset.iloc[:, -1].values
len(X[0])
"""## Splitting the dataset into the Training set and Test set"""
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.20, random_state = 0)
"""## Training the Support Vector Machine model on the Training set"""
from sklearn.svm import SVC
classifier = SVC(kernel = 'linear', random_state = 0)
classifier.fit(X_train, y_train)
"""## Predicting the Test set results"""
y_pred = classifier.predict(X_test)
print(np.concatenate((y_pred.reshape(len(y_pred),1), y_test.reshape(len(y_test),1)),1))
"""## Making the Confusion Matrix"""
from sklearn.metrics import confusion_matrix, accuracy_score
cm = confusion_matrix(y_test, y_pred)
print(cm)
accuracy_score(y_test, y_pred)
# Saving our classifier
with open('C:/DEPLOYMENT/SVMclassifier.pkl','wb') as model_SVM_pkl:
pickle.dump(classifier,model_SVM_pkl)
# Saving the BAG OF WORDS model
with open('C:/DEPLOYMENT/bagofwordsmodel.pkl','wb') as model_BOW_pkl:
pickle.dump(cv,model_BOW_pkl)
| hemakshi1234/NCRB_Automatic-IPC-Section-classification | flask_NLP_predict_train.py | flask_NLP_predict_train.py | py | 4,502 | python | en | code | 2 | github-code | 36 | [
{
"api_name": "nltk.download",
"line_number": 29,
"usage_type": "call"
},
{
"api_name": "nltk.download",
"line_number": 34,
"usage_type": "call"
},
{
"api_name": "nltk.WordNetLemmatizer",
"line_number": 35,
"usage_type": "call"
},
{
"api_name": "nltk.download",
... |
16748045789 | import os
import pickle
from pathlib import Path
import numpy as np
import pandas as pd
import sklearn
import xgboost as xgb
class CapacityPredictionModel:
def __init__(self, classes=None, hyper_params=None):
"""set default hyper-parameters"""
if hyper_params is None:
self.hyper_params = {
"objective": "multi:softprob",
"learning_rate": 0.01,
"col_sample_bytree": 0.85,
"max_depth": 3,
"n_estimators": 256,
"verbosity": 0,
}
# definition of classification classes
self.classes = classes
# create xgb model
self.model = xgb.XGBClassifier(kwargs=self.hyper_params)
def train(self, train_x, train_y, val_x, val_y):
"""train model"""
self.model.fit(
train_x,
train_y,
eval_set=[(train_x, train_y), (val_x, val_y)],
verbose=False,
)
def predict(self, x):
# find best iteration (on validation set)
best_iter = int(
np.argmin(self.model.evals_result()["validation_1"]["mlogloss"])
)
# predict classes
y_pred = self.model.predict(x, ntree_limit=best_iter)
y_pred = pd.DataFrame(y_pred.flatten(), index=x.index)[0]
# predict probabilities
y_pred_prob = self.model.predict_proba(x)
y_pred_prob = pd.DataFrame(y_pred_prob, index=x.index)
return y_pred, y_pred_prob
def evaluate(self, x, y_true):
scores = {}
# predict on x
y_pred, _ = self.predict(x)
# compute f1 score
scores["f1"] = sklearn.metrics.f1_score(
y_true.values,
y_pred.values.flatten(),
average="weighted",
labels=np.unique(y_pred.values.flatten()),
)
# compute accuracy score
scores["accuracy"] = sklearn.metrics.accuracy_score(
y_true.values, y_pred.values.flatten()
)
return scores
def save(self, directory):
# check directory
if not os.path.exists(directory):
os.makedirs(directory, exist_ok=True)
# save model
with open(directory / "model.pkl", "wb") as fh:
pickle.dump(self.model, fh)
# save classes
with open(directory / "classes.pkl", "wb") as fh:
pickle.dump(self.classes, fh)
def load(self, directory):
# load model
model_file = Path(directory) / "model.pkl"
with open(model_file, "rb") as fh:
self.model = pickle.load(fh)
# load classes
classes_file = Path(directory) / "classes.pkl"
with open(classes_file, "rb") as fh:
self.classes = pickle.load(fh)
| AlexisMignon/openstf | openstf/model/capacity/model.py | model.py | py | 2,790 | python | en | code | null | github-code | 36 | [
{
"api_name": "xgboost.XGBClassifier",
"line_number": 28,
"usage_type": "call"
},
{
"api_name": "numpy.argmin",
"line_number": 42,
"usage_type": "call"
},
{
"api_name": "pandas.DataFrame",
"line_number": 47,
"usage_type": "call"
},
{
"api_name": "pandas.DataFrame"... |
10410217579 | import json
import random
from pykafka import KafkaClient
from datetime import datetime
import time
from faker import Faker
CONS_KAFKA_TOPIC = "test-demand3"
CONS_KAFKA_SERVER = "localhost:9092"
#creating instances of Kafka variables
kafka_client = KafkaClient(CONS_KAFKA_SERVER)
kafka_topic = kafka_client.topics[CONS_KAFKA_TOPIC]
producer = kafka_topic.get_producer()
consumer = kafka_topic.get_simple_consumer()
#initializing necessary variables
captain_data = {}
user_data = {}
id = [1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20]
age = [21,20,22,23,24,25,26,27,28,29,30,31,32,33,34,35,36,37,38,39,40]
fake = Faker()
#making a list of latitudes and longitudes taken from geojson and stored in demand_supply.json
with open('demand_supply.json') as f:
json_array = json.load(f)
coordinates = json_array['coordinates']
#generates captain data and produces to the demand_supply topic every 1 minute
def gen_captain_data():
i = 0
while i<50:
captain_data['capId'] = random.choice(id)
captain_data['name'] = fake.name()
captain_data['email'] = fake.email()
captain_data['age'] = random.choice(age)
captain_data['event-type'] = 'captain'
coordinate = random.choice(coordinates)
captain_data['lat'] = coordinate[0]
captain_data['long'] = coordinate[1]
captain_data['timestamp'] = str(datetime.utcnow())
mssg = json.dumps(captain_data)
producer.produce(mssg.encode('ascii'))
i += 1
#time.sleep(4)
#generates user data and produces to the demand_supply topic every 2 minutes
def gen_user_data():
j = 0
while j<40:
user_data['userId'] = random.choice(id)
user_data['name'] = fake.name()
user_data['email'] = fake.email()
user_data['age'] = random.choice(age)
user_data['event-type'] = 'user'
coordinate = random.choice(coordinates)
user_data['lat'] = coordinate[0]
user_data['long'] = coordinate[1]
user_data['timestamp'] = str(datetime.utcnow())
msg = json.dumps(user_data)
producer.produce(msg.encode('ascii'))
j += 1
#time.sleep(10)
if __name__ == '__main__':
gen_captain_data()
gen_user_data()
for message in consumer:
print(f"{message.offset}: {message.value}")
| ayushmanadhikari/kafka-basics | pykafka-dir/demand_supply.py | demand_supply.py | py | 2,341 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "pykafka.KafkaClient",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "faker.Faker",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "json.load",
"line_number": 31,
"usage_type": "call"
},
{
"api_name": "random.choice",
"line_n... |
9200352322 | import torch
print("\n---First example---")
x = torch.ones(2, 2, requires_grad=True)
y = x + 2
z = y * y * 3
out = z.mean()
out.backward()
print("x.grad:", x.grad)
# # ----- ----- ----- -----
# # alternative: comment previous backward() and x.grad references
# print("x.grad alternative:", torch.autograd.grad(outputs=out, inputs=x))
# # ----- ----- ----- -----
# ----- ----- ----- -----
# Neural network example
# ----- ----- ----- -----
print("\n---Neural network example---")
x = torch.ones(8) # input tensor
y = torch.zeros(10) # expected output
W = torch.randn(8, 10, requires_grad=True) # weights
b = torch.randn(10, requires_grad=True) # bias vector
z = torch.matmul(x, W)+b # output
loss = torch.nn.functional.binary_cross_entropy_with_logits(z, y)
loss.backward()
# print(W.grad) #OK
print("b.grad:", b.grad) #OK
print("x.grad:",x.grad)
print("y.grad:",y.grad)
# print(z.grad) # WARNING
# print(loss.grad) # WARNING
# ----- ----- ----- -----
# Vector-Jacobian example #1
# ----- ----- ----- -----
print("\n---Vector-Jacobian example #1---")
x = torch.rand(3, requires_grad=True)
y = x + 2
# y.backward() <---
# RuntimeError: grad can be implicitly
# created only for scalar outputs
# try ---> y.backward(v) where v is any tensor of length 3
# v = torch.rand(3)
v = torch.tensor([1.,2,3])
y.backward(v)
print("x.grad:", x.grad)
# # ----- ----- ----- -----
# # alternative: comment previous backward() and x.grad references
# print("x.grad alternative:",torch.autograd.grad(outputs=y, inputs=x, grad_outputs=v))
# # ----- ----- ----- -----
# ----- ----- ----- -----
# Vector-Jacobian example #2
# ----- ----- ----- -----
print("\n---Vector-Jacobian example #2---")
x = torch.tensor([1., 2], requires_grad=True)
print('x:', x)
y = torch.empty(3)
y[0] = x[0]**2
y[1] = x[0]**2 + 5*x[1]**2
y[2] = 3*x[1]
print('y:', y)
v = torch.tensor([1., 1, 1,])
y.backward(v)
print('x.grad:', x.grad)
# ----- ----- ----- -----
# Vector-Jacobian example #2
# ----- ----- ----- -----
print("\n---General case example---")
x = torch.tensor([[1.,2,3],[4,5,6]], requires_grad=True)
y = torch.log(x)
# y is a 2x2 tensor obtained by taking logarithm entry-wise
v = torch.tensor([[3.,2,0],[4,0,1]], requires_grad=True)
# v is not a 1D tensor!
y.backward(v)
print("x.grad:", x.grad) # returns dl/dx, as evaluated by "matrix-Jacobian" product v * dy/dx
# therefore we can interpret v as a matrix dl/dy
# for which the chain rule expression dl/dx = dl/dy * dy/dx holds. | antonio-f/pytorch_backward_function | backward_examples.py | backward_examples.py | py | 2,586 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "torch.ones",
"line_number": 4,
"usage_type": "call"
},
{
"api_name": "torch.ones",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "torch.zeros",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "torch.randn",
"line_number": 28,... |
769211687 | import glob
from music21 import converter, instrument, note, chord
def get_notes():
""" Get all the notes and chords from the midi files in the ./midi_songs directory """
notes = []
for file in glob.glob("rammstein/*.mid*"):
midi = converter.parse(file)
print("Parsing %s" % file)
notes_to_parse = None
try: # file has instrument parts
s2 = instrument.partitionByInstrument(midi)
notes_to_parse = s2.parts[0].recurse()
except: # file has notes in a flat structure
notes_to_parse = midi.flat.notes
notes.extend(parse_notes(notes_to_parse))
return notes
def parse_notes(notes_to_parse):
notes = []
for element in notes_to_parse:
if isinstance(element, note.Note):
notes.append(parse_note(element))
elif isinstance(element, chord.Chord):
notes.append(parse_chord(element))
elif isinstance(element, note.Rest):
notes.append(parse_rest(element))
return notes
def parse_note(element):
pitch = str(element.pitch)
duration = element.duration.type
return [pitch, duration]
def parse_chord(element):
pitch = '.'.join(str(n.pitch) for n in element.notes)
duration = element.duration.type
return [pitch, duration]
def parse_rest(element):
pitch = element.name
duration = element.duration.type
return [pitch, duration] | tanelxen/riff-composer | get_notes.py | get_notes.py | py | 1,486 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "glob.glob",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "music21.converter.parse",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "music21.converter",
"line_number": 9,
"usage_type": "name"
},
{
"api_name": "music21.instrument.p... |
6035629289 | import cv2
import numpy as np
from math import exp, pow
FILENAME = "testbaby"
SIZE = 200
OBJCOLOR, BKGCOLOR = (0, 0, 255), (0, 255, 0)
SOURCE, SINK = -2, -1
def read_cuts(filename, image):
with open(filename, "r") as f:
lines = f.readlines()
mf = int(lines[0])
idx = 0
for char in lines[1]:
if idx >= SIZE*SIZE:
break
r, c = idx // SIZE, idx % SIZE
idx += 1
if char == '0':
# attached to sink
image[r][c] = (0, 0, 255)
else:
# attached to source
image[r][c] = (0, 255, 0)
image = cv2.imread("{}.jpg".format(FILENAME), cv2.IMREAD_GRAYSCALE)
image = cv2.resize(image, (SIZE, SIZE))
seeded_image = cv2.imread("{}seeded.jpg".format(FILENAME), cv2.IMREAD_COLOR)
seeded_image = cv2.resize(seeded_image, (SIZE, SIZE), interpolation=cv2.INTER_NEAREST)
unresized_seeded = cv2.resize(seeded_image, (SIZE*10, SIZE*10), interpolation=cv2.INTER_NEAREST)
V = image.size + 2
graph = np.zeros((V, V), dtype="int32")
cuts = read_cuts("graph_output.txt".format(FILENAME), seeded_image)
cv2.imshow("image", image)
cv2.imshow("seeded image", seeded_image)
cv2.imshow("unresized seeded image", unresized_seeded)
cv2.waitKey(0)
| 2022tgoel/6.854-Final-Project | cut_renderer.py | cut_renderer.py | py | 1,292 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "cv2.imread",
"line_number": 30,
"usage_type": "call"
},
{
"api_name": "cv2.IMREAD_GRAYSCALE",
"line_number": 30,
"usage_type": "attribute"
},
{
"api_name": "cv2.resize",
"line_number": 31,
"usage_type": "call"
},
{
"api_name": "cv2.imread",
"lin... |
74230073703 | import asyncio
import os
from agconnect.common_server import AGCClient
from agconnect.common_server import CredentialParser
from agconnect.cloud_function import AGConnectFunction
AGCClient.initialize("real_cli",
credential=CredentialParser.to_credential(
(os.path.join(os.path.dirname(__file__), '[PATH]/agconnect_credentials.json'))))
agcFunction = AGConnectFunction.get_instance()
async def my_handler_test():
value = agcFunction.wrap("callback", "$latest")
value.set_timeout(20000)
test_str = "test s string"
res = await value.call(test_str)
print(f"res: {res.get_value()}")
buf = memoryview(bytearray(10))
res3 = await value.call(buf)
print(f"res2: {res3.get_value()}")
async def my_handler():
good_res = {'simple': 'example'}
test_str = "test s string"
res = await agcFunction.wrap("callback", "$latest").call(test_str)
print(f"res: {res.get_value()}")
assert res.get_value() == good_res
loop = asyncio.get_event_loop()
loop.run_until_complete(my_handler_test())
| AppGalleryConnect/agc-server-demos-python | cloudfunction/main.py | main.py | py | 1,075 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "agconnect.common_server.AGCClient.initialize",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "agconnect.common_server.AGCClient",
"line_number": 8,
"usage_type": "name"
},
{
"api_name": "agconnect.common_server.CredentialParser.to_credential",
"line_n... |
20319120410 | import flask
import flask_redis
import flask_socketio
import time
import threading
import json
redis_store = flask_redis.FlaskRedis()
socketio = flask_socketio.SocketIO()
def get_data_for_hashtag(tag):
return redis_store.lrange(tag, 0, 1000)
def broadcast_thread():
while True:
# sleeping for 50ms
time.sleep(0.2)
# get all keys for datapoints:
keys = redis_store.keys(pattern="points-*")
for k in keys:
category = k.decode("utf-8").partition('-')[2]
val = redis_store.lindex(k, 0)
socketio.emit('points', {"p": float(val)}, namespace="/{}".format(category))
def broadcast_mentions():
while True:
time.sleep(2)
keys = redis_store.keys(pattern="mentions-*")
for k in keys:
category = k.decode("utf-8").partition('-')[2]
if redis_store.llen(k) == 0:
continue
element = redis_store.lpop(k)
try:
jelement = json.loads(element)
except ValueError:
continue
socketio.emit('mentions'.format(k), jelement, namespace="/{}".format(category))
#, namespace="/{}".format(k)
def create_app():
app = flask.Flask(__name__)
redis_store.init_app(app)
socketio.init_app(app)
thread = threading.Thread(target=broadcast_thread)
thread.daemon = True
thread.start()
thread = threading.Thread(target=broadcast_mentions)
thread.daemon = True
thread.start()
return app
app = create_app()
@app.route("/")
def line():
left = {
"category": "ichackupper",
"data": get_data_for_hashtag("ichackupper")
}
right = {
"category": "ichacklower",
"data": get_data_for_hashtag("ichacklower")
}
return flask.render_template("index.html", left=left, right=right)
| thelinerocks/lineweb | app.py | app.py | py | 1,855 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "flask_redis.FlaskRedis",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "flask_socketio.SocketIO",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "time.sleep",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "time.sleep",
... |
72447111784 | from base64 import b64decode
import invoicegen.settings
import settings.helper
from django.contrib.auth.decorators import login_required, permission_required
from django.core.files.base import ContentFile
from django.http import JsonResponse
from django.views import View
from django.shortcuts import *
from django.utils import timezone
from django.utils.crypto import get_random_string
from django_tables2 import RequestConfig
from .tables import AgreementTable, AgreementTextTable
from .helper import replace_text
from .forms import AgreementForm, AgreementTextForm
from .models import *
@login_required
@permission_required('agreements.view_agreement')
def agreement_index(request):
agreements = AgreementTable(Agreement.objects.all())
RequestConfig(request).configure(agreements)
return render(request, 'agreements/agreements.html', {'agreements': agreements})
@login_required
@permission_required('agreements.view_agreementtext')
def agreementtext_index(request):
model_agreements = AgreementTextTable(AgreementText.objects.all())
RequestConfig(request).configure(model_agreements)
return render(request, 'agreements/agreementtext/agreementtext_index.html',
{'model_agreements': model_agreements})
class AddAgreement(View):
def post(self, request):
agreement = Agreement()
agreement_form = AgreementForm(request.POST, instance=agreement)
if agreement_form.is_valid():
data = agreement_form.cleaned_data
agreement_form.save(commit=False)
agreement.created = timezone.now()
agreement.url = get_random_string(length=32)
agreement.company = data['company']
agreement.save()
for article in data['article_concerned']:
agreement.article_concerned.add(article)
agreement.save()
request.session['toast'] = 'Overeenkomst toegevoegd'
return redirect(reverse('new_agreement_step_two', kwargs={'agreement_id': agreement.id}))
else:
return render(request, 'agreements/new_edit_agreement.html',
{'toast': 'Formulier onjuist ingevuld', 'form': agreement_form})
def get(self, request):
form = AgreementForm()
articles = Product.objects.filter(done=False)
return render(request, 'agreements/new_edit_agreement.html', {'form': form, 'articles': articles})
class AddAgreementStepTwo(View):
def post(self, request, agreement_id):
agreement = Agreement.objects.get(id=agreement_id)
variables = self.agreement_variables(agreement_id)
key_value_list = {}
for variable in variables.all():
post_name = 'variable' + str(variable.id)
value = request.POST[post_name]
if variable.name:
key_value_list[variable.name] = value
agreement.agreement_text_copy = replace_text(agreement.agreement_text.text, agreement.article_concerned.all(),
agreement.company, key_value_list)
agreement.save()
request.session['toast'] = 'Overeenkomst toegevoegd'
return redirect(reverse('agreement_index'))
def get(self, request, agreement_id):
variables = self.agreement_variables(agreement_id)
return render(request, 'agreements/new_agreement_step_two.html', {'variables': variables, 'agreement_id': agreement_id})
def agreement_variables(self, agreement_id):
agreement = Agreement.objects.get(id=agreement_id)
variables = agreement.agreement_text.variables
return variables
def view_agreement(request, url):
agreement = Agreement.objects.get(url=url)
agreement.complete_url = 'https://' + invoicegen.settings.ALLOWED_HOSTS[
0] + '/overeenkomsten/ondertekenen/' + agreement.url
agreement.full_name = settings.helper.get_user_fullname()
if request.method == 'GET':
return render(request, 'agreements/view_sign_agreement.html', {'agreement': agreement})
@login_required
@permission_required('agreements.change_agreement')
def sign_agreement_contractor(request, url):
agreement = Agreement.objects.get(url=url)
if request.method == 'POST':
if 'signature' in request.POST and 'signee_name' in request.POST and request.POST[
'signee_name'].strip() and request.POST['signee_name'].strip():
image_data = request.POST['signature'].split(',')
image_data = b64decode(image_data[1])
now = timezone.now()
file_name = 'signature-of-' + request.POST['signee_name'] + '-at-' + str(now) + '.png'
agreement.signature_file_contractor = ContentFile(image_data, file_name)
agreement.signed_by_contractor_at = now
agreement.signed_by_contractor = True
agreement.save()
agreement.complete_url = 'https://' + invoicegen.settings.ALLOWED_HOSTS[
0] + '/overeenkomsten/ondertekenen/' + agreement.url
return JsonResponse({'success': True})
else:
return JsonResponse({'error': 'Naam of handtekening ontbreekt'})
def sign_agreement_client(request, url):
agreement = Agreement.objects.get(url=url)
if request.method == 'POST':
if 'signature' in request.POST and 'signee_name' in request.POST and request.POST[
'signee_name'].strip() and request.POST['signee_name'].strip():
image_data = request.POST['signature'].split(',')
image_data = b64decode(image_data[1])
now = timezone.now()
file_name = 'signature-of-' + request.POST['signee_name'] + '-at-' + str(now) + '.png'
agreement.signature_file_client = ContentFile(image_data, file_name)
agreement.signed_by_client_at = now
agreement.signed_by_client = True
agreement.save()
return JsonResponse({'success': True})
else:
return JsonResponse({'error': 'Naam of handtekening ontbreekt'})
def send_push_notification_signed_agreement():
pass
@login_required
@permission_required('agreements.delete_agreement')
def delete_agreement(request, agreement_id=-1):
try:
agreement_to_delete = Agreement.objects.get(id=agreement_id)
agreement_to_delete.delete()
request.session['toast'] = 'Overeenkomst verwijderd'
return redirect(reverse('agreement_index'))
except:
request.session['toast'] = 'Verwijderen mislukt'
return redirect(reverse('agreement_index'))
@login_required
@permission_required('agreements.delete_agreementtext')
def delete_model_agreement(request, model_agreement_text_id=-1):
try:
agreement_text_to_delete = AgreementText.objects.get(id=model_agreement_text_id)
agreement_text_to_delete.delete()
request.session['toast'] = 'Modelvereenkomst verwijderd'
return redirect(reverse('agreementtext_index'))
except:
request.session['toast'] = 'Verwijderen mislukt'
return redirect(reverse('agreementtext_index'))
class EditAgreementText(View):
def post(self, request, model_agreement_id):
agreementtext = AgreementText.objects.get(id=model_agreement_id)
form = AgreementTextForm(request.POST, instance=agreementtext)
if form.is_valid():
form.save()
variable_list = get_extra_variables(request)
agreementtext.variables.add(*variable_list)
agreementtext.save()
return redirect(reverse('agreementtext_index'))
else:
return render(request, 'agreements/agreementtext/edit_agreementtext.html',
{'form': form, 'edit': True, 'error': form.errors,
'model_agreement_id': agreementtext.id})
def get(self, request, model_agreement_id):
model_agreement = AgreementText.objects.get(id=model_agreement_id)
form = AgreementTextForm(instance=model_agreement)
return render(request, 'agreements/agreementtext/edit_agreementtext.html',
{'form': form, 'model_agreement_id': model_agreement.id})
class AddAgreementText(View):
def post(self, request):
agree_text = AgreementText()
agree_text_form = AgreementTextForm(request.POST, instance=agree_text)
if agree_text_form.is_valid():
agree_text_form.save(commit=False)
agree_text.edited_at = timezone.now()
agree_text.save()
variable_list = get_extra_variables(request)
agree_text.variables.add(*variable_list)
agree_text.save()
request.session['toast'] = 'Modelovereenkomst toegevoegd'
return redirect(reverse('agreementtext_index'))
else:
return render(request, 'agreements/agreementtext/new_agreementtext.html',
{'toast': 'Formulier onjuist ingevuld', 'form': agree_text_form,
'error': agree_text_form.errors})
def get(self, request):
form = AgreementTextForm()
return render(request, 'agreements/agreementtext/new_agreementtext.html', {'form': form})
def get_extra_variables(request):
var_obj = request.POST['var_name1']
if var_obj != '':
counter = 1
variable_list = []
while var_obj is not None:
desc_variable_name = 'desc' + str(counter)
desc = request.POST[desc_variable_name]
variable = AgreementTextVariable(name=var_obj, description=desc)
variable.save()
variable_list.append(variable)
counter += 1
key_variable_name = 'var_name' + str(counter)
if key_variable_name in request.POST:
var_obj = request.POST[key_variable_name]
else:
var_obj = None
return variable_list
return [] | jlmdegoede/Invoicegen | agreements/views.py | views.py | py | 9,931 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "tables.AgreementTable",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "django_tables2.RequestConfig",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "django.contrib.auth.decorators.login_required",
"line_number": 20,
"usage_type": "name... |
2252814008 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time : 2020/01/22 10:18
# @Author : zc
# @File : get_htmlText.py
import requests
from bs4 import BeautifulSoup
from selenium import webdriver
from selenium.webdriver.common.by import By
from PIL import Image
# 重定向爬虫h4
url = "http://www.itest.info/courses"
soup = BeautifulSoup(requests.get(url).text,'html.parser')
for courses in soup.find_all('p'):
print(courses.text)
print("\r")
# v2ex爬虫标题
url = "https://www.v2ex.com"
v2ex = BeautifulSoup(requests.get(url).text,'html.parser')
for span in v2ex.find_all('span',class_='item_hot_topic_title'):
print(span.find('a').text,span.find('a')['href'])
for title in v2ex.find_all("a",class_="topic-link"):
print(title.text,url+title["href"])
# 煎蛋爬虫图片
headers = {
'user-agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_13_1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/62.0.3202.94 Safari/537.36'
}
def download_file(url):
'''下载图片'''
print('Downding %s' %url)
local_filename = url.split('/')[-1]
img_path = "/Users/zhangc/Desktop/GitTest/project_Buger_2/Python爬虫/img/" + local_filename
print(local_filename)
r = requests.get(url, stream=True, headers=headers)
with open(img_path, 'wb') as f:
for chunk in r.iter_content(chunk_size=1024):
if chunk:
f.write(chunk)
f.flush()
return img_path
url = 'http://jandan.net/drawings'
soup = BeautifulSoup(requests.get(url, headers=headers).text, 'html.parser')
def valid_img(src):
'''判断地址符不符合关键字'''
return src.endswith('jpg') and '.sinaimg.cn' in src
for img in soup.find_all('img', src=valid_img):
src = img['src']
if not src.startswith('http'):
src = 'http:' + src
download_file(src)
# 知乎热门
headers ={
"user-agent":"user-agent: Mozilla/5.0 (Macintosh; Intel Mac OS X 10_13_6) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/68.0.3440.106 Safari/537.36"
}
url = "https://www.zhihu.com/explore"
zhihu = BeautifulSoup(requests.get(url,headers=headers).text,"html.parser")
for title in zhihu.find_all('a',class_="ExploreSpecialCard-contentTitle"):
print(title.text)
# selenium爬虫
url = "https://www.zhihu.com/explore"
driver = webdriver.Chrome("/Users/zhangc/Desktop/GitTest/project_Buger_2/poium测试库/tools/chromedriver")
driver.get(url)
info = driver.find_element(By.CSS_SELECTOR,"div.ExploreHomePage-specials")
for title in info.find_elements(By.CSS_SELECTOR,"div.ExploreHomePage-specialCard > div.ExploreSpecialCard-contentList > div.ExploreSpecialCard-contentItem > a.ExploreSpecialCard-contentTitle"):
print(title.text,title.get_attribute('href')) | Owen-ET/project_Buger_2 | Python爬虫/get_htmlText.py | get_htmlText.py | py | 2,735 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "bs4.BeautifulSoup",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "requests.get",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "bs4.BeautifulSoup",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "requests.get",
"... |
33329719512 | #!/bin/env python
import json
import helpers
if __name__ == '__main__':
root_dir = helpers.root_dir()
path = "%s/sources/counties.geojson" % root_dir
print("loading %s" % path)
file = open(path, 'rb')
geojson = json.load(file)
data = {}
for feature in geojson["features"]:
props = feature["properties"]
geoid = props["GEOID"]
name = props["NAME"]
data[geoid] = name
path = "%s/data/counties.json" % root_dir
print("saving %s" % path)
file = open(path, 'wb')
json.dump(data, file, sort_keys=True)
| knightmirnj/acluedtool | counties.py | counties.py | py | 524 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "helpers.root_dir",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "json.load",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "json.dump",
"line_number": 28,
"usage_type": "call"
}
] |
23443224445 | from .utils import display_table, read_sold, str_to_date
def show_sold(start_date, end_date):
start_date, end_date = str_to_date(start_date), str_to_date(end_date)
header, data = read_sold()
sold_in_this_time = []
for row in data:
sold_date = str_to_date(row[-1])
if start_date <= sold_date <= end_date:
sold_in_this_time.append(row)
title = f"Sold Products (from {start_date} to {end_date})"
display_table(title, header, sold_in_this_time)
| sndr157/Inventory | modules/sold.py | sold.py | py | 459 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "utils.str_to_date",
"line_number": 4,
"usage_type": "call"
},
{
"api_name": "utils.read_sold",
"line_number": 5,
"usage_type": "call"
},
{
"api_name": "utils.str_to_date",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "utils.display_table"... |
32676816163 | import requests
import sys
import urllib3
from requests_toolbelt.utils import dump
urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)
proxies = {'http': 'http://127.0.0.1:8080', 'https': 'http://127.0.0.1:8080'}
def exploit_sqli(url, payload):
path = 'filter?category='
r = requests.get(url + path + payload, verify=False, proxies=proxies)
data = dump.dump_all(r)
print(data.decode('utf-8'))
if ".svg" in r.text:
return True
else:
return False
if __name__ == "__main__":
try:
url = sys.argv[1].strip()
payload = sys.argv[2].strip()
except IndexError:
print("[-] Usage: %s <url> <payload>" % sys.argv[0])
print('[-] Example: %s www.example.com "1=1"' % sys.argv[0])
sys.exit(-1)
if exploit_sqli(url, payload):
print("[+] SQL injection successful!")
else:
print("[-] SQL injection unsuccessful!")
| marcomania/Web-Security-Academy-Series | sql-injection/lab-01/sqli-lab-01.py | sqli-lab-01.py | py | 934 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "urllib3.disable_warnings",
"line_number": 5,
"usage_type": "call"
},
{
"api_name": "urllib3.exceptions",
"line_number": 5,
"usage_type": "attribute"
},
{
"api_name": "requests.get",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "requests_... |
70516415785 | import os
import os.path
import sys
from pyspark import SparkContext
from pyspark.mllib.recommendation import ALS
from numpy import array
if __name__ == "__main__":
data_file = '/spark/data/als.data'
if len(sys.argv) == 1:
print >> sys.stderr, "Usage: filtering.py <master>"
exit(-1)
else:
sc = SparkContext(sys.argv[1], "Collaborative Filtering")
data = sc.textFile(data_file)
ratings = data.map(lambda line: array([float(x) for x in line.split(',')]))
# Build the recommendation model using Alternating Least Squares
model = ALS.train(ratings, 1, 20)
# Evaluate the model on training data
testdata = ratings.map(lambda p: (int(p[0]), int(p[1])))
predictions = model.predictAll(testdata).map(lambda r: ((r[0], r[1]), r[2]))
ratesAndPreds = ratings.map(lambda r: ((r[0], r[1]), r[2])).join(predictions)
MSE = ratesAndPreds.map(lambda r: (r[1][0] - r[1][1])**2).reduce(lambda x, y: x + y)/ratesAndPreds.count()
print("Mean Squared Error = " + str(MSE))
| jhorey/ferry | ferry/data/dockerfiles/spark/filtering.py | filtering.py | py | 1,072 | python | en | code | 253 | github-code | 36 | [
{
"api_name": "sys.argv",
"line_number": 11,
"usage_type": "attribute"
},
{
"api_name": "sys.stderr",
"line_number": 12,
"usage_type": "attribute"
},
{
"api_name": "pyspark.SparkContext",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "sys.argv",
"li... |
15636919766 | import pybullet as p
import time
import pybullet_data
import math
import numpy as np
physicsClient = p.connect(p.GUI)#or p.DIRECT for non-graphical version
p.setAdditionalSearchPath(pybullet_data.getDataPath()) #optionally
p.setGravity(0,0,-10)
planeId = p.loadURDF("plane.urdf")
startPos = [0, 0, 1.4054411813121799]
startOrientation = p.getQuaternionFromEuler([0,0,0])
boxId = p.loadURDF("aba_excavator/excavator.urdf",startPos, startOrientation)
for i in range(1000):
p.setJointMotorControl2(boxId, 1 , p.VELOCITY_CONTROL, targetVelocity = 0)
p.setJointMotorControl2(boxId, 2 , p.VELOCITY_CONTROL, targetVelocity = 0.4, force= 250_000)
p.setJointMotorControl2(boxId, 3 , p.VELOCITY_CONTROL, targetVelocity = 0.1, force= 250_000)
p.setJointMotorControl2(boxId, 4 , p.VELOCITY_CONTROL, targetVelocity = 0.1)
# (linkWorldPosition,
# linkWorldOrientation,
# localInertialFramePosition,
# localInertialFrameOrientation,
# worldLinkFramePosition,
# worldLinkFrameOrientation,
# worldLinkLinearVelocity,
# worldLinkAngularVelocity) = p.getLinkState(boxId,4, computeLinkVelocity=1, computeForwardKinematics=1)
# print(linkWorldPosition)
p.stepSimulation()
time.sleep(1.0/240.)
theta0, theta1, theta2, theta3 = p.getJointStates(boxId, [1,2,3,4])
print(theta0[0], theta1[0], theta2[0], theta3[0])
p.disconnect()
| cencencendi/excabot | coba.py | coba.py | py | 1,438 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "pybullet.connect",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "pybullet.GUI",
"line_number": 6,
"usage_type": "attribute"
},
{
"api_name": "pybullet.setAdditionalSearchPath",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "pybu... |
70415685225 | import sys
from io import StringIO
from unittest import mock, TestCase
from unittest.mock import call, patch
from bs4 import BeautifulSoup
import ffq.ffq as ffq
from tests.mixins import TestMixin
from ffq.main import main
from ffq import __version__
class TestFfq(TestMixin, TestCase):
def test_validate_accessions(self):
SEARCH_TYPES = (
"SRR",
"ERR",
"DRR",
"SRP",
"ERP",
"DRP",
"SRX",
"GSE",
"GSM",
"DOI",
)
self.assertEqual(
[
{
"accession": "SRR244234",
"prefix": "SRR",
"valid": True,
"error": None,
},
{
"accession": "SRT44322",
"prefix": "UNKNOWN",
"valid": False,
"error": None,
},
{
"accession": "10.1016/J.CELL.2018.06.052",
"prefix": "DOI",
"valid": True,
"error": None,
},
{
"accession": "ASA10.1016/J.CELL.2018.06.052",
"prefix": "UNKNOWN", # TODO better DOI error handling
"valid": False,
"error": None,
},
{
"accession": "GSM12345",
"prefix": "GSM",
"valid": True,
"error": None,
},
{
"accession": "GSE567890",
"prefix": "GSE",
"valid": True,
"error": None,
},
],
ffq.validate_accessions(
[
"SRR244234",
"SRT44322",
"10.1016/j.cell.2018.06.052",
"ASA10.1016/j.cell.2018.06.052",
"GSM12345",
"GSE567890",
],
SEARCH_TYPES,
),
)
def test_parse_run(self):
self.maxDiff = None
with mock.patch(
"ffq.ffq.get_files_metadata_from_run"
) as get_files_metadata_from_run, mock.patch(
"ffq.ffq.ncbi_fetch_fasta"
) as ncbi_fetch_fasta, mock.patch(
"ffq.ffq.parse_ncbi_fetch_fasta"
) as parse_ncbi_fetch_fasta:
with open(self.run_path, "r") as f:
soup = BeautifulSoup(f.read(), "xml")
get_files_metadata_from_run.return_value = []
ncbi_fetch_fasta.return_value = []
parse_ncbi_fetch_fasta.return_value = []
self.assertEqual(
{
"accession": "SRR8426358",
"experiment": "SRX5234128",
"study": "SRP178136",
"sample": "SRS4237519",
"title": "Illumina HiSeq 4000 paired end sequencing; GSM3557675: old_Dropseq_1; Mus musculus; RNA-Seq",
"attributes": {
"ENA-SPOT-COUNT": 109256158,
"ENA-BASE-COUNT": 21984096610,
"ENA-FIRST-PUBLIC": "2019-01-27",
"ENA-LAST-UPDATE": "2019-01-27",
},
"files": {"aws": [], "ftp": [], "gcp": [], "ncbi": []},
},
ffq.parse_run(soup),
)
def test_parse_run_bam(self):
with open(self.run2_path, "r") as f:
soup = BeautifulSoup(f.read(), "xml")
self.maxDiff = None
self.assertEqual(
{
"accession": "SRR6835844",
"attributes": {
"ENA-BASE-COUNT": 12398988240,
"ENA-FIRST-PUBLIC": "2018-03-30",
"ENA-LAST-UPDATE": "2018-03-30",
"ENA-SPOT-COUNT": 137766536,
"assembly": "mm10",
"dangling_references": "treat_as_unmapped",
},
"experiment": "SRX3791763",
"files": {
"ftp": [
{
"accession": "SRR6835844",
"filename": "10X_P4_0.bam",
"filetype": "bam",
"filesize": 17093057664,
"filenumber": 1,
"md5": "5355fe6a07155026085ce46631268ab1",
"urltype": "ftp",
"url": "ftp://ftp.sra.ebi.ac.uk/vol1/SRA653/SRA653146/bam/10X_P4_0.bam",
}
],
"aws": [
{
"accession": "SRR6835844",
"filename": "10X_P4_0.bam.1",
"filetype": "bam",
"filesize": None,
"filenumber": 1,
"md5": None,
"urltype": "aws",
"url": "https://sra-pub-src-1.s3.amazonaws.com/SRR6835844/10X_P4_0.bam.1",
},
{
"accession": "SRR6835844",
"filename": "SRR6835844",
"filenumber": 1,
"filesize": None,
"filetype": "sra",
"md5": None,
"url": "https://sra-pub-run-odp.s3.amazonaws.com/sra/SRR6835844/SRR6835844",
"urltype": "aws",
},
],
"gcp": [
{
"accession": "SRR6835844",
"filename": "10X_P4_0.bam.1",
"filetype": "bam",
"filesize": None,
"filenumber": 1,
"md5": None,
"urltype": "gcp",
"url": "gs://sra-pub-src-1/SRR6835844/10X_P4_0.bam.1",
},
{
"accession": "SRR6835844",
"filename": "SRR6835844.1",
"filenumber": 1,
"filesize": None,
"filetype": "sra",
"md5": None,
"url": "gs://sra-pub-crun-7/SRR6835844/SRR6835844.1",
"urltype": "gcp",
},
],
"ncbi": [],
},
"sample": "SRS3044236",
"study": "SRP131661",
"title": "Illumina NovaSeq 6000 sequencing; GSM3040890: library 10X_P4_0; Mus musculus; RNA-Seq",
},
ffq.parse_run(soup),
)
def test_parse_sample(self):
with open(self.sample_path, "r") as f:
soup = BeautifulSoup(f.read(), "xml")
self.assertEqual(
{
"accession": "SRS4237519",
"title": "old_Dropseq_1",
"organism": "Mus musculus",
"attributes": {
"source_name": "Whole lung",
"tissue": "Whole lung",
"age": "24 months",
"number of cells": "799",
"ENA-SPOT-COUNT": 109256158,
"ENA-BASE-COUNT": 21984096610,
"ENA-FIRST-PUBLIC": "2019-01-11",
"ENA-LAST-UPDATE": "2019-01-11",
},
"experiments": "SRX5234128",
},
ffq.parse_sample(soup),
)
def test_parse_experiment_with_run(self):
with open(self.experiment_path, "r") as f:
soup = BeautifulSoup(f.read(), "xml")
self.maxDiff = None
self.assertEqual(
{
"accession": "SRX3517583",
"instrument": "HiSeq X Ten",
"platform": "ILLUMINA",
"runs": {
"SRR6425163": {
"accession": "SRR6425163",
"attributes": {
"ENA-BASE-COUNT": 74994708900,
"ENA-FIRST-PUBLIC": "2017-12-30",
"ENA-LAST-UPDATE": "2017-12-30",
"ENA-SPOT-COUNT": 249982363,
},
"experiment": "SRX3517583",
"files": {
"aws": [
{
"accession": "SRR6425163",
"filename": "J2_S1_L001_R1_001.fastq.gz",
"filenumber": 1,
"filesize": None,
"filetype": "fastq",
"md5": None,
"url": "s3://sra-pub-src-6/SRR6425163/J2_S1_L001_R1_001.fastq.gz",
"urltype": "aws",
},
{
"accession": "SRR6425163",
"filename": "J2_S1_L001_R2_001.fastq.gz",
"filenumber": 2,
"filesize": None,
"filetype": "fastq",
"md5": None,
"url": "s3://sra-pub-src-6/SRR6425163/J2_S1_L001_R2_001.fastq.gz",
"urltype": "aws",
},
{
"accession": "SRR6425163",
"filename": "SRR6425163",
"filenumber": 1,
"filesize": None,
"filetype": "sra",
"md5": None,
"url": "https://sra-pub-run-odp.s3.amazonaws.com/sra/SRR6425163/SRR6425163",
"urltype": "aws",
},
],
"ftp": [
{
"accession": "SRR6425163",
"filename": "SRR6425163_1.fastq.gz",
"filenumber": 1,
"filesize": 21858866426,
"filetype": "fastq",
"md5": "2dcf9ae4cfb30ec0aaf06edf0e3ca49a",
"url": "ftp://ftp.sra.ebi.ac.uk/vol1/fastq/SRR642/003/SRR6425163/SRR6425163_1.fastq.gz",
"urltype": "ftp",
},
{
"accession": "SRR6425163",
"filename": "SRR6425163_2.fastq.gz",
"filenumber": 2,
"filesize": 22946392178,
"filetype": "fastq",
"md5": "1d0703967a2331527a3aebf97a3f1c32",
"url": "ftp://ftp.sra.ebi.ac.uk/vol1/fastq/SRR642/003/SRR6425163/SRR6425163_2.fastq.gz",
"urltype": "ftp",
},
],
"gcp": [
{
"accession": "SRR6425163",
"filename": "J2_S1_L001_R1_001.fastq.gz",
"filenumber": 1,
"filesize": None,
"filetype": "fastq",
"md5": None,
"url": "gs://sra-pub-src-6/SRR6425163/J2_S1_L001_R1_001.fastq.gz",
"urltype": "gcp",
},
{
"accession": "SRR6425163",
"filename": "J2_S1_L001_R2_001.fastq.gz",
"filenumber": 2,
"filesize": None,
"filetype": "fastq",
"md5": None,
"url": "gs://sra-pub-src-6/SRR6425163/J2_S1_L001_R2_001.fastq.gz",
"urltype": "gcp",
},
{
"accession": "SRR6425163",
"filename": "SRR6425163.1",
"filenumber": 1,
"filesize": None,
"filetype": "sra",
"md5": None,
"url": "gs://sra-pub-crun-7/SRR6425163/SRR6425163.1",
"urltype": "gcp",
},
],
"ncbi": [],
},
"sample": "SRS2792433",
"study": "SRP127624",
"title": "HiSeq X Ten paired end sequencing; GSM2905292: BMPa-1; Homo sapiens; RNA-Seq",
}
},
"title": "HiSeq X Ten paired end sequencing; GSM2905292: BMPa-1; Homo sapiens; RNA-Seq",
},
ffq.parse_experiment_with_run(soup, 10),
)
def test_parse_study(self):
with open(self.study_path, "r") as f:
soup = BeautifulSoup(f.read(), "xml")
self.assertEqual(
{
"accession": "SRP178136",
"title": "Multi-modal analysis of the aging mouse lung at cellular resolution",
"abstract": "A) Whole lung tissue from 24 months (n=7) "
"and 3 months old (n=8) mice was dissociated and single-cell "
"mRNAseq libraries generated with Drop-Seq. B) Bulk RNA-seq "
"data was generated from whole mouse lung tissue of old (n=3) "
"and young (n=3) samples. C) Bulk RNA-seq data was generated "
"from flow-sorted macrophages from old (n=7) and young (n=5) "
"mice and flow-sorted epithelial cells from old (n=4) and "
"young (n=4) mice. Overall design: Integration of bulk RNA-seq "
"from whole mouse lung tissue and bulk RNA-seq from flow-sorted "
"lung macrophages and epithelial cells was used to validate results "
"obtained from single cell RNA-seq of whole lung tissue.",
"accession": "SRP178136",
},
ffq.parse_study(soup),
)
def test_gse_search_json(self):
with open(self.gse_search_path, "r") as f:
soup = BeautifulSoup(f.read(), "html.parser")
self.assertEqual(
{"accession": "GSE93374", "geo_id": "200093374"},
ffq.parse_gse_search(soup),
)
def test_gse_summary_json(self):
with open(self.gse_summary_path, "r") as f:
soup = BeautifulSoup(f.read(), "html.parser")
self.assertEqual({"accession": "SRP096361"}, ffq.parse_gse_summary(soup))
def test_ffq_gse(self):
# Need to figure out how to add for loop test for adding individual runs
with mock.patch(
"ffq.ffq.get_gse_search_json"
) as get_gse_search_json, mock.patch(
"ffq.ffq.parse_gse_search"
) as parse_gse_search, mock.patch(
"ffq.ffq.gse_to_gsms"
) as gse_to_gsms, mock.patch(
"ffq.ffq.ffq_gsm"
) as ffq_gsm, mock.patch(
"ffq.ffq.geo_to_suppl"
) as geo_to_suppl:
parse_gse_search.return_value = {"accession": "GSE1", "geo_id": "GEOID1"}
gse_to_gsms.return_value = ["GSM_1", "GSM_2"]
geo_to_suppl.return_value = {
"filename": "file",
"size": "size",
"url": "url",
}
ffq_gsm.side_effect = [
{"accession": "GSM1"},
{"accession": "GSM2"},
"test",
"test",
]
self.assertEqual(
{
"accession": "GSE1",
"supplementary_files": {
"filename": "file",
"size": "size",
"url": "url",
},
"geo_samples": {
"GSM1": {"accession": "GSM1"},
"GSM2": {"accession": "GSM2"},
},
},
ffq.ffq_gse("GSE1"),
)
get_gse_search_json.assert_called_once_with("GSE1")
gse_to_gsms.assert_called_once_with("GSE1")
ffq_gsm.assert_has_calls([call("GSM_1", None), call("GSM_2", None)])
def test_ffq_gsm(self):
# Need to figure out how to add for loop test for adding individual runs
with mock.patch(
"ffq.ffq.get_gsm_search_json"
) as get_gsm_search_json, mock.patch(
"ffq.ffq.geo_to_suppl"
) as geo_to_suppl, mock.patch(
"ffq.ffq.gsm_to_platform"
) as gsm_to_platform, mock.patch(
"ffq.ffq.gsm_id_to_srs"
) as gsm_id_to_srs, mock.patch(
"ffq.ffq.ffq_sample"
) as ffq_sample:
get_gsm_search_json.return_value = {"accession": "GSM1", "geo_id": "GSMID1"}
geo_to_suppl.return_value = {"supplementary_files": "supp"}
gsm_to_platform.return_value = {"platform": "platform"}
gsm_id_to_srs.return_value = "SRS1"
ffq_sample.return_value = {"accession": "SRS1"}
self.assertEqual(
{
"accession": "GSM1",
"supplementary_files": {"supplementary_files": "supp"},
"platform": "platform",
"samples": {"SRS1": {"accession": "SRS1"}},
},
ffq.ffq_gsm("GSM1"),
)
get_gsm_search_json.assert_called_once_with("GSM1")
geo_to_suppl.assert_called_once_with("GSM1", "GSM")
gsm_to_platform.assert_called_once_with("GSM1")
gsm_id_to_srs.assert_called_once_with("GSMID1")
ffq_sample.assert_called_once_with("SRS1", None)
def test_ffq_run(self):
with mock.patch("ffq.ffq.get_xml") as get_xml, mock.patch(
"ffq.ffq.parse_run"
) as parse_run:
run = mock.MagicMock()
parse_run.return_value = run
self.assertEqual(run, ffq.ffq_run("SRR8426358"))
get_xml.assert_called_once_with("SRR8426358")
def test_ffq_study(self):
with mock.patch("ffq.ffq.get_xml") as get_xml, mock.patch(
"ffq.ffq.parse_study"
) as parse_study, mock.patch("ffq.ffq.ffq_sample") as ffq_sample, mock.patch(
"ffq.ffq.get_samples_from_study"
) as get_samples_from_study:
parse_study.return_value = {"study": "study_id"}
get_samples_from_study.return_value = ["sample_id1", "sample_id2"]
ffq_sample.side_effect = [{"accession": "id1"}, {"accession": "id2"}]
self.assertEqual(
{
"study": "study_id",
"samples": {
"id1": {"accession": "id1"},
"id2": {"accession": "id2"},
},
},
ffq.ffq_study("SRP226764"),
)
get_xml.assert_called_once_with("SRP226764")
self.assertEqual(2, ffq_sample.call_count)
ffq_sample.assert_has_calls(
[call("sample_id1", None), call("sample_id2", None)]
)
def test_ffq_experiment(self):
with mock.patch("ffq.ffq.get_xml") as get_xml, mock.patch(
"ffq.ffq.parse_experiment_with_run"
) as parse_experiment_with_run:
parse_experiment_with_run.return_value = {
"experiments": "experiment",
"runs": {"run": "run"},
}
self.assertEqual(
{"experiments": "experiment", "runs": {"run": "run"}},
ffq.ffq_experiment("SRX7048194"),
)
get_xml.assert_called_once_with("SRX7048194")
# Do one per accession, simply asserting equal to the expected list of links.
# def test_ffq_links_gse_ftp(self):
# self.maxDiff = None
# capturedOutput = io.StringIO()
# sys.stdout = capturedOutput
# ffq.ffq_links([('GSE', 'GSE112570')], 'ftp')
# sys.stdout = sys.__stdout__
# self.assertEqual(
# capturedOutput.getvalue(),
# (
# 'accession\tfiletype\tfilenumber\tlink\n'
# 'GSM3073088\t\tbam\t1\tftp://ftp.sra.ebi.ac.uk/vol1/SRA678/SRA678017/bam/H17w_K1.bam\n' # noqa
# 'GSM3073089\t\tbam\t1\tftp://ftp.sra.ebi.ac.uk/vol1/SRA678/SRA678017/bam/H17w_K2.bam\n' # noqa
# )
# )
# def test_ffq_links_srs_ftp(self):
# capturedOutput = io.StringIO() # Create StringIO object
# sys.stdout = capturedOutput # and redirect stdout.
# ffq.ffq_links([('SRS', 'SRS4629239')], 'ftp') # Call function.
# sys.stdout = sys.__stdout__
# self.assertEqual(
# capturedOutput.getvalue(),
# 'ftp://ftp.sra.ebi.ac.uk/vol1/fastq/SRR890/000/SRR8903510/SRR8903510.fastq.gz '
# )
# def test_ffq_links_gsm_aws(self):
# capturedOutput = io.StringIO()
# sys.stdout = capturedOutput
# ffq.ffq_links([('GSM', 'GSM3396164')], 'AWS')
# sys.stdout = sys.__stdout__
# self.assertEqual(
# capturedOutput.getvalue(),
# 'https://sra-pub-src-1.s3.amazonaws.com/SRR7881402/possorted_genome_bam_Ck.bam.1 '
# )
# def test_ffq_links_srr_gcp(self):
# capturedOutput = io.StringIO()
# sys.stdout = capturedOutput
# ffq.ffq_links([('SRR', 'SRR8327928')], 'GCP')
# sys.stdout = sys.__stdout__
# self.assertEqual(
# capturedOutput.getvalue(),
# 'gs://sra-pub-src-1/SRR8327928/PDX110_possorted_genome_bam.bam.1 '
# )
# def test_ffq_links_srx_ncbi(self):
# capturedOutput = io.StringIO()
# sys.stdout = capturedOutput
# ffq.ffq_links([('SRX', 'SRX4063411')], 'NCBI')
# sys.stdout = sys.__stdout__
# self.assertEqual(
# capturedOutput.getvalue(),
# 'https://sra-downloadb.be-md.ncbi.nlm.nih.gov/sos2/sra-pub-run-13/SRR7142647/SRR7142647.1 '
# )
def test_ffq_doi(self):
with mock.patch("ffq.ffq.get_doi") as get_doi, mock.patch(
"ffq.ffq.search_ena_title"
) as search_ena_title, mock.patch("ffq.ffq.ffq_study") as ffq_study:
get_doi.return_value = {"title": ["title"]}
search_ena_title.return_value = ["SRP1"]
self.assertEqual([ffq_study.return_value], ffq.ffq_doi("doi"))
get_doi.assert_called_once_with("doi")
search_ena_title.assert_called_once_with("title")
ffq_study.assert_called_once_with("SRP1", None)
def test_ffq_doi_no_title(self):
with mock.patch("ffq.ffq.get_doi") as get_doi, mock.patch(
"ffq.ffq.search_ena_title"
) as search_ena_title, mock.patch(
"ffq.ffq.ncbi_search"
) as ncbi_search, mock.patch(
"ffq.ffq.ncbi_link"
) as ncbi_link, mock.patch(
"ffq.ffq.geo_ids_to_gses"
) as geo_ids_to_gses, mock.patch(
"ffq.ffq.ffq_gse"
) as ffq_gse:
get_doi.return_value = {"title": ["title"]}
search_ena_title.return_value = []
ncbi_search.return_value = ["PMID1"]
ncbi_link.return_value = ["GEOID1"]
geo_ids_to_gses.return_value = ["GSE1"]
self.assertEqual([ffq_gse.return_value], ffq.ffq_doi("doi"))
get_doi.assert_called_once_with("doi")
search_ena_title.assert_called_once_with("title")
ncbi_search.assert_called_once_with("pubmed", "doi")
ncbi_link.assert_called_once_with("pubmed", "gds", "PMID1")
geo_ids_to_gses.assert_called_once_with(["GEOID1"])
ffq_gse.assert_called_once_with("GSE1")
def test_ffq_doi_no_geo(self):
with mock.patch("ffq.ffq.get_doi") as get_doi, mock.patch(
"ffq.ffq.search_ena_title"
) as search_ena_title, mock.patch(
"ffq.ffq.ncbi_search"
) as ncbi_search, mock.patch(
"ffq.ffq.ncbi_link"
) as ncbi_link, mock.patch(
"ffq.ffq.sra_ids_to_srrs"
) as sra_ids_to_srrs, mock.patch(
"ffq.ffq.ffq_run"
) as ffq_run:
get_doi.return_value = {"title": ["title"]}
search_ena_title.return_value = []
ncbi_search.return_value = ["PMID1"]
ncbi_link.side_effect = [[], ["SRA1"]]
sra_ids_to_srrs.return_value = ["SRR1"]
ffq_run.return_value = {"accession": "SRR1", "study": {"accession": "SRP1"}}
self.assertEqual(
[
{
"accession": "SRP1",
"runs": {
"SRR1": {
"accession": "SRR1",
"study": {"accession": "SRP1"},
}
},
}
],
ffq.ffq_doi("doi"),
)
get_doi.assert_called_once_with("doi")
search_ena_title.assert_called_once_with("title")
ncbi_search.assert_called_once_with("pubmed", "doi")
self.assertEqual(2, ncbi_link.call_count)
ncbi_link.assert_has_calls(
[
call("pubmed", "gds", "PMID1"),
call("pubmed", "sra", "PMID1"),
]
)
sra_ids_to_srrs.assert_called_once_with(["SRA1"])
ffq_run.assert_called_once_with("SRR1")
def test_version_string(self):
with patch("sys.argv", ["main", "--version"]):
out = StringIO()
sys.stdout = out
try:
main()
except SystemExit:
pass
output = out.getvalue()
self.assertEqual(output, f"main {__version__}\n")
def test_split_output(self):
# test the functionality of --split ensuring the output file is created
# and is a valid ffq json file
import tempfile
import json
import os
tempdir = tempfile.mkdtemp()
with patch("sys.argv", ["main", "--split", "-o", tempdir, "SRR1581006"]):
out = StringIO()
sys.stdout = out
try:
main()
except SystemExit:
pass
output = out.getvalue()
# Test that the STDOUT is empty (an not "null")
self.assertEqual(output, "")
# Test the output JSON file
file_json = json.load(open(os.path.join(tempdir, "SRR1581006.json")))
self.assertEqual(file_json["accession"], "SRR1581006")
| pachterlab/ffq | tests/test_ffq.py | test_ffq.py | py | 28,319 | python | en | code | 494 | github-code | 36 | [
{
"api_name": "tests.mixins.TestMixin",
"line_number": 14,
"usage_type": "name"
},
{
"api_name": "unittest.TestCase",
"line_number": 14,
"usage_type": "name"
},
{
"api_name": "ffq.ffq.validate_accessions",
"line_number": 67,
"usage_type": "call"
},
{
"api_name": "... |
28981509521 | import json
import traceback
from tendrl.commons.utils import log_utils as logger
from tendrl.monitoring_integration.grafana import constants
from tendrl.monitoring_integration.grafana import dashboard_utils
from tendrl.monitoring_integration.grafana import datasource
from tendrl.monitoring_integration.grafana import exceptions
from tendrl.monitoring_integration.grafana import grafana_org_utils
from tendrl.monitoring_integration.grafana import utils
def upload_default_dashboards():
dashboards = []
NS.config.data["credentials"] = utils.get_credentials()
try:
main_org_id = grafana_org_utils.get_org_id(constants.MAIN_ORG)
if main_org_id:
response = grafana_org_utils.switch_context(
json.loads(main_org_id)["id"]
)
except (exceptions.ConnectionFailedException, KeyError) as ex:
msg = (json.loads(main_org_id)).get(
"message", "Cannot connect to grafana")
logger.log("error", NS.get("publisher_id", None),
{'message': msg})
raise ex
title = []
# create datasource
datasource.create()
dashboards = dashboard_utils.get_all_dashboards()
for dashboard_json in dashboards:
title.append(dashboard_json["uri"].split('/')[1])
for dashboard_json in NS.config.data["dashboards"]:
if dashboard_json in title:
msg = '\n' + "Dashboard " + str(dashboard_json) + \
" already exists" + '\n'
logger.log("debug", NS.get("publisher_id", None),
{'message': msg})
continue
response = dashboard_utils.create_dashboard(dashboard_json)
if response.status_code == 200:
msg = '\n' + "Dashboard " + str(dashboard_json) + \
" uploaded successfully" + '\n'
logger.log("debug", NS.get("publisher_id", None),
{'message': msg})
else:
msg = ("Dashboard {0} upload failed. Error code: {1} ,"
"Error message: " + "{2} ").format(
str(dashboard_json),
str(response.status_code),
str(get_message_from_response(response)))
logger.log("debug", NS.get("publisher_id", None),
{'message': msg})
try:
dashboard_json = dashboard_utils.get_dashboard(
NS.config.data["home_dashboard"])
if 'dashboard' in dashboard_json:
dashboard_id = dashboard_json.get('dashboard').get('id')
response = dashboard_utils.set_home_dashboard(dashboard_id)
response = dashboard_utils.set_home_dashboard(dashboard_id)
if response.status_code == 200:
msg = '\n' + "Dashboard " + \
str(NS.config.data["home_dashboard"]) + \
" is set as home dashboard" + '\n'
logger.log("debug", NS.get("publisher_id", None),
{'message': msg})
else:
msg = '\n' + str(dashboard_json.get('message')) + '\n'
logger.log("debug", NS.get("publisher_id", None),
{'message': msg})
except exceptions.ConnectionFailedException as ex:
traceback.print_exc()
logger.log("error", NS.get("publisher_id", None),
{'message': str(ex)})
raise exceptions.ConnectionFailedException
def get_message_from_response(response_data):
message = ""
try:
if isinstance(json.loads(response_data.content), list):
message = str(json.loads(response_data.content)[0]["message"])
else:
message = str(json.loads(response_data.content)["message"])
except (AttributeError, KeyError):
pass
return message
| Tendrl/monitoring-integration | tendrl/monitoring_integration/grafana/dashboard.py | dashboard.py | py | 3,810 | python | en | code | 4 | github-code | 36 | [
{
"api_name": "tendrl.monitoring_integration.grafana.utils.get_credentials",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "tendrl.monitoring_integration.grafana.utils",
"line_number": 15,
"usage_type": "name"
},
{
"api_name": "tendrl.monitoring_integration.grafana.gra... |
24517536 | from itertools import count
import sys
def input(): return sys.stdin.readline().rstrip()
n = int(input())
nums = list(map(int, input().split()))
q = int(input())
lNums = list(map(int, input().split()))
mx = max(max(nums), max(lNums))
dp = [0] * (mx+1)
for a in nums:
dp[a] += 1
for i in range(2, mx+1):
for j in count(1):
if j*j > i: break
if i % j == 0:
dp[i] += dp[j]
if j*j != i and j != 1:
dp[i] += dp[i//j]
print(*(dp[i] for i in lNums))
# 해설에 적힌 코드. 엄청난 테크닉이다...! | kmgyu/baekJoonPractice | Arena solvedAC/2023 arena 1/g.py | g.py | py | 567 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "sys.stdin.readline",
"line_number": 3,
"usage_type": "call"
},
{
"api_name": "sys.stdin",
"line_number": 3,
"usage_type": "attribute"
},
{
"api_name": "itertools.count",
"line_number": 16,
"usage_type": "call"
}
] |
25994684161 | import discord
from discord.ext import commands
import asyncio
import random
import datetime
import traceback
import os, sys
class Game(commands.Cog, name='一息ゲームコマンド'):
def __init__(self, bot):
self.bot = bot
@commands.command()
async def mine(self, ctx):
""" 14x14のマインスイーパを生成するぞ! """
bomb_list = []
num_dict = { 0 : '0⃣', 1 : '1⃣', 2 : '2⃣', 3 : '3⃣', 4 : '4⃣', 5 : '5⃣', 6 : '6⃣', 7 : '7⃣', 8 : '8⃣', 9 : '9⃣'}
search_list = ((-1, -1), (0, -1), (1, -1),
(-1, 0), (1, 0),
(-1, 1), (0, 1), (1, 1))
X = 14
Y = 14
# ボム生成
for y in range(Y):
bomb_list.append([9 if random.randint(0, 4) == 1 else 0 for i in range(X)])
# ボム位置の把握
for y in range(Y):
for x in range(X):
count = 0
if bomb_list[y][x] != 9:
for s_ptr in search_list:
tmp_x = x + s_ptr[0]
tmp_y = y + s_ptr[1]
if 0 <= tmp_x < X and 0 <= tmp_y < Y:
if bomb_list[tmp_y][tmp_x] == 9:
count += 1
bomb_list[y][x] = count
# 文字列に変換
mine_data = ''
for bomb_ptr in bomb_list:
#print(bomb_ptr)
for bomb in bomb_ptr:
if bomb == 9:
mine_data += '||#⃣||'
else:
mine_data += '||'+ num_dict[bomb] + '||'
mine_data += '\r\n'
mine_txt = await ctx.send(mine_data)
await mine_txt.add_reaction('😰')
# 答え合わせ
def check(reaction, user):
emoji = str(reaction.emoji)
if user.bot == True: # botは無視
pass
else:
return emoji == '😰'
while not self.bot.is_closed():
try:
reaction, user = await self.bot.wait_for('reaction_add', timeout=600, check=check)
except asyncio.TimeoutError:
await mine_txt.add_reaction('😪')
break
else:
if ctx.author.id != user.id:
continue
mine_data = ''
for bomb_ptr in bomb_list:
#print(bomb_ptr)
for bomb in bomb_ptr:
if bomb == 9:
mine_data += '||#⃣||'
else:
mine_data += num_dict[bomb]
mine_data += '\r\n'
await mine_txt.edit(content=mine_data)
await mine_txt.add_reaction('😪')
break
@commands.command()
async def slot(self, ctx):
"""スロットを回すぞ!"""
def make_slot_txt(s):
txt = '**'
for i in range(0, 3):
txt += '['+ s[i][0] +'] ['+ s[i][1] +'] ['+ s[i][2] +']\r\n'
return txt + '**'
def set_slot(s, item, x):
r = random.randint(0, 8)
for i in range(0, 3):
s[i][x] = item[r]
r += 1
if r > 8: r = 0
return s
s = [['㊙️', '㊙️', '㊙️'], ['㊙️', '㊙️', '㊙️'], ['㊙️', '㊙️', '㊙️']]
item = ['7⃣', '🔔', '🍉', '🍌', '🍋', '🍊', '🍒', '🍇', '🎰']
num = { '0⃣' : 0, '1⃣' : 1, '2⃣' : 2 }
slot_txt = await ctx.send(make_slot_txt(s))
await slot_txt.add_reaction('0⃣')
await slot_txt.add_reaction('1⃣')
await slot_txt.add_reaction('2⃣')
def check(reaction, user):
emoji = str(reaction.emoji)
if user.bot == True: # botは無視
pass
else:
return emoji == '0⃣' or emoji == '1⃣' or emoji == '2⃣' or emoji == '🔄'
cnt = 0
index_list = []
while not self.bot.is_closed():
try:
reaction, user = await self.bot.wait_for('reaction_add', timeout=60, check=check)
except asyncio.TimeoutError:
await slot_txt.add_reaction('😪')
break
else:
if ctx.author.id != user.id:
continue
if str(reaction.emoji) == '🔄':
index_list = list()
cnt = 0
s = [['㊙️', '㊙️', '㊙️'], ['㊙️', '㊙️', '㊙️'], ['㊙️', '㊙️', '㊙️']]
await slot_txt.edit(content=make_slot_txt(s))
continue
cnt += 1
index = num[str(reaction.emoji)]
if index not in index_list:
index_list.append(index)
s = set_slot(s, item, index)
await slot_txt.edit(content=make_slot_txt(s))
if cnt >= 3:
await slot_txt.add_reaction('🔄')
def setup(bot):
bot.add_cog(Game(bot)) | hirosuke-pi/DiscordBot | progracat/mods/game/main.py | main.py | py | 5,289 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "discord.ext.commands.Cog",
"line_number": 11,
"usage_type": "attribute"
},
{
"api_name": "discord.ext.commands",
"line_number": 11,
"usage_type": "name"
},
{
"api_name": "random.randint",
"line_number": 29,
"usage_type": "call"
},
{
"api_name": "asy... |
23211155458 | from math import fabs
from os.path import split
from re import sub
from utils.tools import addWordsToJieba, splitSentence
import ujson
import os
from utils.config import DATASET
import jieba
from io import BytesIO, StringIO
attraction_db_path = "attraction_db.json"
hotel_db_path = "hotel_db.json"
metro_db_path = "metro_db.json"
restaurant_db_path = "restaurant_db.json"
taxi_db_path = "taxi_db.json"
EntityIndex = 0
AttrsDictIndex = 1
#SPO_index satified MEMTOKEN
SUBJECT_INDEX=0
PREDICATE_INDEX=1
OBJECT_INDEX=2
"""
(subject-predicate-object(predicateInfo))
(entity-predicate-predicateInfo)
(subject-name-entity)
name is kind of predicate
entity is object
"""
SUBJECT_KEY = "领域"
ENTITIES_KEY = "名称"
SUBJECTS = ["景点", "酒店", "餐馆", "地铁", "出租"]
def getDictfromDataBase(filepath: str):
abspath = os.path.join(os.getcwd(), "data", DATASET, "database", filepath)
database_dict = None
with open(abspath,encoding='utf-8') as f:
database_dict = ujson.load(f)
return database_dict
# equals
# attraction_db = getDictfromDataBase(attraction_db_path)
# hotel_db = getDictfromDataBase(hotel_db_path)
# metro_db = getDictfromDataBase(metro_db_path)
# restaurant_db = getDictfromDataBase(restaurant_db_path)
# taxi_db = getDictfromDataBase()
dbs = [getDictfromDataBase(path) for path in iter((
attraction_db_path, hotel_db_path, metro_db_path, restaurant_db_path, taxi_db_path))]
# ChooseDataBaseBySubjectName = {SUBJECTS[i]: db for i,db in enumerate(dbs)}
ChooseDataBaseBySubjectName = dict()
for i, each in enumerate(SUBJECTS):
ChooseDataBaseBySubjectName.setdefault(each,dbs[i])
PREDICATES = {}
PREDICATES = {eachSubject: [key for key in ChooseDataBaseBySubjectName[
eachSubject][0][AttrsDictIndex].keys()] for eachSubject in SUBJECTS}
# for eachSubject in SUBJECTS:
# database = ChooseDataBaseBySubjectName[]
ENTITIES = []
ENTITIES_belongs_SUBJECTS={}
def initPredicate(dbs: tuple):
for eachSubject in SUBJECTS:
database = ChooseDataBaseBySubjectName[eachSubject]
attrsObj = database[0][AttrsDictIndex]
PREDICATES.setdefault(eachSubject,[])
for key in attrsObj.keys():
PREDICATES[eachSubject].append(key)
def initEntitiesAndEntities_belongs(dbs: tuple):
for index , database in enumerate(dbs):
for item in database:
ent = item[EntityIndex]
ENTITIES.append(ent)
ENTITIES_belongs_SUBJECTS.setdefault(ent,SUBJECTS[index])
initPredicate(dbs)
initEntitiesAndEntities_belongs(dbs)
# 避免jieba将数据集词拆分
# 读入却分词无效,jieba背锅
# dict_path = os.path.join(os.getcwd(), 'data', 'crossWOZ', 'dict.txt')
# if os.path.isfile(dict_path):
# with open(dict_path, "r+", encoding="utf8") as file:
# for each in SUBJECTS:
# file.writelines(' 3 n \n'.join(PREDICATES[each]))
# file.writelines(' 3 n \n'.join(SUBJECTS))
# file.writelines(' 3 n \n'.join(ENTITIES))
# jieba.load_userdict(file)
for each in SUBJECTS:
addWordsToJieba(PREDICATES[each])
addWordsToJieba(SUBJECTS)
addWordsToJieba(ENTITIES)
# def getSubjectByEntityThroughDBs(dbs: tuple, ent: str) -> str:
# for database in dbs:
# for item in database:
# if item[EntityIndex] is ent:
# return item[AttrsDictIndex][SUBJECT_KEY]
# return None
def getSubjectByEntity(ent: str) -> str:
return ENTITIES_belongs_SUBJECTS[ent]
def getAttrsByEntityThroughDBs(dbs: tuple, ent: str) -> dict:
for database in dbs:
for item in database:
if item[EntityIndex] is ent:
return item[AttrsDictIndex]
return None
def getAttrsByEntity(ent: str) -> dict:
database = ChooseDataBaseBySubjectName[ENTITIES_belongs_SUBJECTS[ent]]
for item in database:
if item[EntityIndex] == ent:
return item[AttrsDictIndex]
return None
def getEntitesBySPO(subject: str, predicate: str, predicateInfo: str):
database = ChooseDataBaseBySubjectName[subject]
entities = []
# entities = [item[EntityIndex] if item[AttrsDictIndex][predicate] is predicateInfo else None for item in database]
for item in database:
if item[AttrsDictIndex][predicate] is predicateInfo:
entities.append(item[EntityIndex])
return entities if len(entities)>0 else None
def getEntitesBySubject(subject: str)->list:
ents = []
for item in ChooseDataBaseBySubjectName[subject]:
ents.append(item[EntityIndex])
return ents if len(ents) else None
def getEntityAttrs(ent:str):
database = ChooseDataBaseBySubjectName[ENTITIES_belongs_SUBJECTS[ent]]
for item in database:
if item[EntityIndex] is ent:
return item[AttrsDictIndex]
def getEntitesAttrsBySubjectAndPredicate(subject: str, predicate: str)->dict:
database = ChooseDataBaseBySubjectName[subject]
# ENTITIES_Attrs = {item[EntityIndex]: {key: item[AttrsDictIndex][key]
# for key in item[AttrsDictIndex].keys()} if item is predicate else None for item in database}
ENTITIES_Attrs = {}
for item in database:
for key in item[AttrsDictIndex].keys():
if key is predicate:
ENTITIES_Attrs.setdefault(item[EntityIndex],item[AttrsDictIndex])
return ENTITIES_Attrs if len(ENTITIES_Attrs) else None
# def getEntitiesBySubjectAndInformPredicate(subject: str, predicate: str,inform_predicate) -> dict:
# database = ChooseDataBaseBySubjectName[subject]
# ENTITIES = []
# for item in database:
# if item[AttrsDictIndex][predicate] is inform_predicate:
# ENTITIES.append(item[EntityIndex])
# return ENTITIES if len(ENTITIES) else None
def findEntities(splitWords:list):
ents = []
for word in splitWords:
if ENTITIES.__contains__(word):
ents.append(word)
return ents if len(ents) else None
def findPredicatesBySubject(splitWords:list,subject:str):
predicates=[]
for word in splitWords:
if PREDICATES[subject].__contains__(word):
predicates.append(word)
return predicates if len(predicates) else None
def findPredicatesByEnt(splitWords:list,ent:str):
predicates = []
for word in splitWords:
if PREDICATES[ENTITIES_belongs_SUBJECTS[ent]].__contains__(word):
predicates.append(word)
return predicates if len(predicates) else None
def findSubjects(splitWords:list):
subjects = []
for word in splitWords:
if SUBJECTS.__contains__(word):
subjects.append(subjects)
return subjects if len(subjects) else None
def compareInfoEqual(wordlist, keys):
for word in wordlist:
for key in keys:
if word is key:
return True
return False
def wordListFindRequestPredicateInfo(wordlist, old_ents)->dict:
result =None
userWants = {}
subjects = findSubjects(wordlist)
inform_predicate = [findPredicatesBySubject(wordlist,subject) for subject in subjects]
ents = findEntities(wordlist)
if ents is None:
ents = old_ents
# if subjects:
# ents = getEntitesBySubject()
# for ent in ents:
# ents_info_list.append(ent)
if ents and inform_predicate:
userWants.setdefault(inform_predicate, [])
for ent in ents:
attrs = getAttrsByEntity(ent)
for word in wordlist:
for key, val in enumerate(attrs):
if word is val:
userWants[inform_predicate].append(ent[inform_predicate])
elif subjects and inform_predicate:
# user need ent
if ents:
userWants.setdefault(ENTITIES_KEY,[])
for ent in ents:
# attrs = getAttrsByEntity(ent)
predicates = PREDICATES[ENTITIES_belongs_SUBJECTS(ent)]
if compareInfoEqual(wordlist, predicates):
userWants[ENTITIES_KEY].append(ent)
else:
ents = getEntitesBySubject(
subjects)
userWants.setdefault(ENTITIES_KEY, ents)
return userWants if len(userWants) else None
def getPredicateInfoByEntityThroughDBs(dbs: tuple, ent: str, predicate: str) -> str:
for database in dbs:
for item in database:
if item[EntityIndex] is ent:
return item[AttrsDictIndex][predicate]
return None
def generateAllSPO(user_split_words,sys_answer_sentence=None):
SPO_list = []
contains_entities = []
if sys_answer_sentence:
for word in splitSentence(sys_answer_sentence):
if word in ENTITIES:
contains_entities.append(word)
for word in user_split_words:
if word in ENTITIES:
contains_entities.append(word)
for word in contains_entities:
database = ChooseDataBaseBySubjectName[ENTITIES_belongs_SUBJECTS[word]]
for item in database:
if item[EntityIndex] == word:
for predicate,object in item[AttrsDictIndex].items():
if isinstance(object,list):
for slice in object:
SPO_list.append([word,predicate,slice]) # tuple
elif object is not None:
SPO_list.append([word,predicate,object])
return SPO_list
def patternSubject(wordList):
for index , word in enumerate(wordList):
if word in SUBJECTS:
return word
return None
def patternPredicateWithSubject(wordList,subject):
for index, word in enumerate(wordList):
if word in subject:
return PREDICATES[subject]
return None
def patternEntity(wordList):
for index , word in enumerate(wordList):
if word in ENTITIES:
return word
return None
| LOST0LOSER/End-To-End-Dialog-System | utils/DataBase.py | DataBase.py | py | 9,837 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "os.path.join",
"line_number": 42,
"usage_type": "call"
},
{
"api_name": "utils.config.DATASET",
"line_number": 42,
"usage_type": "argument"
},
{
"api_name": "os.path",
"line_number": 42,
"usage_type": "attribute"
},
{
"api_name": "os.getcwd",
"l... |
12741468324 | from telegram import ReplyKeyboardMarkup, ReplyKeyboardRemove, ParseMode
from telegram.ext import ConversationHandler
import random
def anketa_random_start(update, context):
update.message.reply_text(
f'Вы выбрали случайный фильм. Нажмите на кнопку "Получить фильм" и подождите немного, пока его подберу',
reply_markup=ReplyKeyboardMarkup(
[["Получить фильм"]],
one_time_keyboard=True,
resize_keyboard=True
)
)
return 'anketa_random_result'
movies_list = ['Красотка','Зеленая миля','Бетмен: начало','Форрест Гамп','Перл Харбор','Храброе сердце','Девчата']
def anketa_random_result(update, context):
random_movie = movies_list[random.randint(0,len(movies_list)-1)]
update.message.reply_text(
f'Ваш случайный фильм: {random_movie}. \nМожете попросить меня подобрать другой случайный фильм',
reply_markup=ReplyKeyboardMarkup([['Подобрать другой случайный фильм', 'Я нашел нужный фильм']],
one_time_keyboard=True,
resize_keyboard=True
)
)
movies_list.remove(random_movie)
return 'final_random'
def other_random(update, context):
if len(movies_list) > 0:
other_random_movie = movies_list[random.randint(0,len(movies_list)-1)]
update.message.reply_text(
f'Ваш следующий рандомный фильм: {other_random_movie}',
reply_markup=ReplyKeyboardMarkup([['Подобрать другой случайный фильм', 'Я нашел нужный фильм']],
one_time_keyboard=True,
resize_keyboard=True
)
)
movies_list.remove(other_random_movie)
return 'final_random'
else:
update.message.reply_text(
f'У меня закончились фильмы, вы маньяк', reply_markup=ReplyKeyboardMarkup([['Вернуться в начало']],
one_time_keyboard=True,
resize_keyboard=True
)
)
return ConversationHandler.END
def final_random(update, context):
update.message.reply_text(
f'Рад был помочь!', reply_markup=ReplyKeyboardMarkup([['Вернуться в начало']],
one_time_keyboard=True,
resize_keyboard=True
)
)
return ConversationHandler.END
def anketa_dontknow_random(update, context):
update.message.reply_text('Я вас не понимаю')
| bezrezen/kino_bot | anketa_random.py | anketa_random.py | py | 2,846 | python | ru | code | 1 | github-code | 36 | [
{
"api_name": "telegram.ReplyKeyboardMarkup",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "random.randint",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "telegram.ReplyKeyboardMarkup",
"line_number": 25,
"usage_type": "call"
},
{
"api_name"... |
10513629088 | #import getopt
import sys
#import ast
import json
import formatingDataSetProximity as formating
import enumerateTrackersProximity as et
import distancesProximity as distances
import visualisationProximity as vis
from datetime import datetime
from time import gmtime, strftime
import pandas as pd
def main():
# intimate, personal, social, public
#personal validate distances (0.46-1.2m)
proxemic='intimate'
proxemic2='intimate'
patientIDDevice=''
#folderData='/Users/13371327/Documents/Gloria/2020/RulesApp/obs-rules/server/routes/localisation/data';
folderData = 'server/routes/localisation/data'
#print(folderData);
roles = {}
coordinates={}
centeredRole=''
A= json.loads(str(sys.argv[1]))
B= json.loads(str(sys.argv[2]))
C= json.loads(str(sys.argv[3]))
# GETTING PARAMETERS FROM NODE
#ID rule
idRule = A[0]['id']
#TYPE OF GRAPH
typeOfGraph = A[0]['value_of_mag']
spetialSim=''
if typeOfGraph == 'Priority':
spetialSim='barchar'
if typeOfGraph == 'All':
typeOfGraph='full'
else:
typeOfGraph='role-centered'
#PHASES
myFormat = '%Y-%m-%d %I:%M:%S'
phase1 = B[0]['time_action']
phase2 = B[1]['time_action']
#print('dates in the python script: ', phase1, phase2)
#phase1 = datetime.strptime(phase1.split('.')[0], myFormat)
#phase2 = datetime.strptime(phase2.split('.')[0], myFormat)
#print('dates in the python script AFTER : ', phase1, phase2)
#CENTERED ROLE
if typeOfGraph == 'role-centered':
#print('The value of the center role: ', A[0]['value_of_mag'])
if(A[0]['value_of_mag'] is None or A[0]['value_of_mag']== '' or A[0]['value_of_mag']== 'null'):
centeredRole='11111'
else:
centeredRole= A[0]['value_of_mag']
else:
centeredRole=0
# ROLES
#print('centeredRole value: ', centeredRole)
#7 is the patient role according to the web tool
for x in range(len(C)):
if (C[x]['id_object']) == 7:
patientIDDevice = C[x]['serial']
patientcoordinates = C[x]['coordinates']
if(centeredRole=='11111'):
roles[x] = C[x]['name'] + ',' + '11111'
else:
roles[x] = C[x]['name'] + ',' + '11111'
#print('Here is the patient information: ',patientIDDevice, patientcoordinates, roles[x])
else:
roles[x] = C[x]['name'] + ',' + C[x]['serial']
#print(roles[x])
#print('After the loop: ',patientIDDevice)
# WHICH SESSION
session = A[0]['id_session']
file = folderData + '/' + str(session) + '.json'
#print(A, B, str(sys.argv[3]));
#print(typeOfGraph, phase1, phase2, centeredRole, len(C), roles, session);
# Reminder: to know who the patient is, use the roles dictionary
#print(typeOfGraph, phase1, phase2, centeredRole, len(C), roles, session);
if(spetialSim=='barchar'):
#print('Here we are about to generate a barchar')
D = json.loads(str(sys.argv[4]))
#COORDINATES
for x in range(len(D)):
coordinates[x] = D[x]['coordinates']
#print('This is the first group of coordinates: ', D[0]["coordinates"], D[0]["name"])
createBarChar(file, session, coordinates,proxemic, phase1, phase2, idRule, patientIDDevice)
else:
initAnalisis(file, centeredRole, proxemic, proxemic2, phase1, phase2, roles, typeOfGraph, session, idRule, patientIDDevice, patientcoordinates)
def initAnalisis(file, centeredRole, proxemic,proxemic2, phase1, phase2, roles, typeOfGraph, session, idRule, patientIDDevice, patientcoordinates):
#READ DATA
df = formating.readingDataJson(file,session)
#print('Alll the variables I want to know: ',centeredRole, patientcoordinates, patientIDDevice);
if ((not(patientIDDevice is None)) & (patientIDDevice != '')) & (typeOfGraph=='full'):
query = 'tracker !=' + patientIDDevice
df = df.query(query)
if (typeOfGraph=='role-centered'):
# Add the patient info into the dataFrame
if(not(patientcoordinates is None)) & (centeredRole=='11111'):
#create a small dataFrame with the patient info
#the tagId is 0000
#print('Good the patient coordinate and the centered role is patient', centeredRole, patientcoordinates)
start = df['timestamp'].iloc[0]
# last value
end = df['timestamp'].iloc[-1]
dfPatient= formating.creatingTimestampColumns(start, end, patientcoordinates, session)
#Concat the new dataFrame with the one that was read in the first line
frames = [dfPatient, df]
df = pd.concat(frames, sort=True)
df = df.reset_index()
#print(df);
elif (patientcoordinates is None):
response = {"message": 'none', "path": 'none', "messageError": 'Please set the patient coordinate or the role serial tracker'}
json_RESPONSE = json.dumps(response)
print(json_RESPONSE)
#FORMATING
#session = session;
#FILTER DATA ACCORDING TO PHASES
df1= formating.nameTrackers(df, roles)
#print(df.loc[df['tracker'] == 26689])
#print(df1.Role.unique())
#print(df1)
#GET NUMBER OF TRACKERS
n = et.numberTrackers(df1)
#print ('number of trackers', n)
#print (roles)
#print ('BEFORE FILTERING: ',len(df.index))
#FILTERING PER PHASE
#df = formating.asign_phases(df, phase1, phase2)
df, toSend = formating.filteringPhases(df1, phase1, phase2)
#Total of seconds
#print('This is the data number of rows: ',len(df.index))
totalSeconds = len(df.index)
if df.empty:
#print('No matching rows: ', toSend);
df, toSend= formating.filteringPhasesAdding(df1, phase1, phase2)
if df.empty:
df, toSend = formating.filteringPhasesMinosTimeZone(df1, phase1, phase2)
if df.empty:
df, toSend = formating.filteringPhasesMinosTimeZone1(df1, phase1, phase2)
#print(toSend)
#print(df, toSend)
#print('This is the data filtered dataframe: ',df.Role.unique(), df)
# Call the function that enumerates trackers
df_trackers = et.enumerate_trackers(df)
#print('df_trackers: $$$$$',df_trackers)
df = et.asignEnumTrackers(df, df_trackers)
#print('Assign enum trackers: $$$$$',df)
# HERE I NEED TO KNOW HOW MANY SECONDS THIS SECTION OF THE SIMULATION LAST
#print ('AFTER FILTERING: ',len(df.index))
# WHICH TRACKER IS THE SELECTED ROLE, returns the enum tracker
#print('Here is the center role value: ',centeredRole)
centeredRole = formating.roleNum(df, df_trackers, centeredRole)
#print('Enum for the selected role in the miedle: $$$$$', centeredRole)
## DISTANCES
# To run the calculation of distances it requires the number of trackers and the dataset
df_distancesBetTrackers = distances.distancesBetweenTrackers(df, n)
#print('Distances between trackers: $$$$$', df_distancesBetTrackers)
#print(df_distancesBetTrackers.head(10))
# The next steep is to asign proxemic labels according to the distances
df_proxemic_labels, prox_labels = distances.proxemicsLabels(df_distancesBetTrackers, n)
#print('Labels according to the distance: $$$$$', df_proxemic_labels, prox_labels)
#print(df_proxemic_labels, prox_labels)
# Agregate the proxemic labels per session
df = vis.aggregateLabels(df_proxemic_labels, prox_labels)
#print('Agregation of the proxemic labels', df.head(5))
if (typeOfGraph == 'full'):
#print(df.head(10))
filterProxemic = vis.filterPL(df, proxemic, proxemic2, role=0)
# trackers_names = vis.nameTrackers(df, listRoles)
#df_trackers_ordered = vis.orderTrackers(centeredRole, df_trackers)
trackers_names = vis.nameTrackers(df_trackers, roles)
#trackers_names = vis.nameTrackers(df_trackers, roles)
#filterProxemic = vis.filterPL(df, proxemic,proxemic2, role=0)
graph, message = vis.generateFullGraph(filterProxemic, trackers_names)
name = vis.visualiseGraph1(graph, session, 'porcentages', proxemic, idRule)
response = {"message": message, "path": name, "messageError": "none"}
json_RESPONSE = json.dumps(response)
print(json_RESPONSE)
# Indicators of centrality
#print('GRAPH DEGREE: ', vis.graphDegree(graph))
#print('VERTEX 1 DEGREE: ', vis.vertexDegree(1, graph))
#print('EDGE DEGREE: ', vis.edgeBetweennes(graph))
#print('VERTEX DEGREE: ', vis.vertexBetweennes(graph))
#print('LARGEST BETWEENESS: ', vis.largestBetweeness(graph, 'tracker'))
#print('PAGE RANK: ', vis.pageRabk(graph))
#print('PERSONALISE PAGE RANK: ', vis.PpageRabk(graph, 'proxLabel'))
else:
# Filtering data according to proxemic label of interest and the role
filterProxemic = vis.filterPL(df, proxemic, proxemic2, centeredRole)
#totalSeconds = len(filterProxemic.index)
#print('Filter the data according to the proxemic label: ',filterProxemic)
# Once we have the proxemic labels we can try to plot the SN
df_trackers_ordered = vis.orderTrackers(centeredRole, df_trackers)
#print(df_trackers_ordered)
trackers_names = vis.nameTrackers(df_trackers_ordered, roles)
#print('NAME TRACKERS: @@@@ ',trackers_names)
#print('ORDERED TRACKERS: @@@@ ', df_trackers_ordered)
# VISUALISE
# visualise normalized data and porcentages
dfnorm = vis.normalizedata(filterProxemic)
#print(dfnorm)
graph, message = vis.graphDefinition(dfnorm, trackers_names, 'porcentages')
#print(graph)
name = vis.visualiseGraph1(graph, session, 'porcentages', proxemic, idRule)
response = {"message":message, "path":name, "messageError": "none"}
json_RESPONSE = json.dumps(response)
print(json_RESPONSE)
def createBarChar(file, session, coordinates,proxemic, phase1, phase2, idRule, patientIDDevice):
#Read the file
df1 = formating.readingDataJson(file, session)
#Remove the patient' data from the dataFrame, if it was tracked
#print('Patient ID device', patientIDDevice)
#print(df1.head(10), df1.tracker.unique(), phase1, phase2)
if (patientIDDevice!='') & (not(patientIDDevice is None)):
query='tracker !=' + patientIDDevice
df1 = df1.query(query)
#FilterDataSet
df, toSend = formating.filteringPhases(df1, phase1, phase2)
if df.empty:
# print('No matching rows: ', toSend);
df, toSend = formating.filteringPhasesAdding(df1, phase1, phase2)
if df.empty:
df, toSend = formating.filteringPhasesMinosTimeZone(df1, phase1, phase2)
if df.empty:
df, toSend = formating.filteringPhasesMinosTimeZone1(df1, phase1, phase2)
#print(toSend)
#print(df.tracker.unique(), toSend, df)
#print('This is the data number of rows: ',len(df.index))
#Calculate distancesRolesAndBeds
df = distances.calculateDistancesRolesToBeds(df, coordinates)
#Were they in intimate proxemity with the patient asign label?
numberOfPatients = len(coordinates)
#print('The number of patients is: ', numberOfPatients);
# careful with this functions of do you want to validate different distances. works only for intimate and personal
df = distances.asignProximityLabel(df, numberOfPatients)
#Agregate values according to the proximity of each patient Create a summary
# bed 1: %, bed 2: %, bed 3: %
itemsPlot, message, indexMax=distances.aggregateProximity(df, proxemic, numberOfPatients)
name = vis.plotBarChart(itemsPlot, session, idRule, indexMax)
response = {"message": message, "path": name, "messageError": "none"}
json_RESPONSE = json.dumps(response)
print(json_RESPONSE)
if __name__ == "__main__":
# execute only if run as a script
main()
| Teamwork-Analytics/obs-rules | server/routes/localisation/ProximityLocalisation.py | ProximityLocalisation.py | py | 10,895 | python | en | code | 1 | github-code | 36 | [
{
"api_name": "json.loads",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "sys.argv",
"line_number": 25,
"usage_type": "attribute"
},
{
"api_name": "json.loads",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "sys.argv",
"line_number": 26,... |
40107696527 | import numpy as np
import cv2 as cv
flower2 = "../mysamples/flower2.jpg"
# flower2 = "/home/mmni/projects/opencv-python/mysamples/flower2.jpg"
img = cv.imread(flower2)
someflowers = img[2000:2200, 2300:2500]
# someflowers = img[200:400, 600:800]
img[100:300, 200:400] = someflowers
cv.imshow("flowers", img)
cv.imshow("flowers some", someflowers)
cv.waitKey(150000)
cv.destroyAllWindows()
exit(0) | ekim197711/opencv-python | core/part-of-image.py | part-of-image.py | py | 400 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "cv2.imread",
"line_number": 5,
"usage_type": "call"
},
{
"api_name": "cv2.imshow",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "cv2.imshow",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "cv2.waitKey",
"line_number": 14,
... |
36374178276 | from ibm_watson import TextToSpeechV1
from ibm_cloud_sdk_core.authenticators import IAMAuthenticator
from playsound import playsound
import json
from watson_developer_cloud import VisualRecognitionV3
import json
import ibm_boto3
from ibm_botocore.client import Config, ClientError
visual_recognition = VisualRecognitionV3(
'2018-03-19',
iam_apikey='9txnOj7i6F1b8kxKdiIO96GYI7V_xxjE3v34uB_a1ERp')
authenticator = IAMAuthenticator('ZmfQSpS-m85wNBln69v_ojQDkFIlhJMIrQP3w5Y3hegP')
text_to_speech = TextToSpeechV1(
authenticator=authenticator
)
text_to_speech.set_service_url('https://api.au-syd.text-to-speech.watson.cloud.ibm.com/instances/3e6111c0-3fec-4fe0-92d2-61e9250fc06b')
with open('./food.jpg', 'rb') as image_file:
classes = visual_recognition.classify(
image_file,
threshold='0.6',
classifier_ids='food').get_result()
print(json.dumps(classes, indent=2))
speak=json.loads(json.dumps(classes))
x=speak['images']
for i in x:
for j in i['classifiers']:
k=j['classes']
for l in k:
m=l['class']
print(m)
with open('task.mp3', 'wb') as audio_file:
audio_file.write(
text_to_speech.synthesize(
m,
voice='en-US_AllisonVoice',
accept='audio/mp3'
).get_result().content)
playsound('task.mp3')
# Constants for IBM COS values
COS_ENDPOINT = "https://s3.jp-tok.cloud-object-storage.appdomain.cloud" # Current list avaiable at https://control.cloud-object-storage.cloud.ibm.com/v2/endpoints
COS_API_KEY_ID = "Rz4Bn5WfJ3NHLyoF3rQesiKjG6lXo-k8vnVBm3-rm_2z" # eg "W00YiRnLW4a3fTjMB-odB-2ySfTrFBIQQWanc--P3byk"
COS_AUTH_ENDPOINT = "https://iam.cloud.ibm.com/identity/token"
COS_RESOURCE_CRN = "crn:v1:bluemix:public:cloud-object-storage:global:a/d27055cdf70a4c8a82a0891135504b4c:be3efa61-d84f-4161-b654-255da6f7b06f::" # eg "crn:v1:bluemix:public:cloud-object-storage:global:a/3bf0d9003abfb5d29761c3e97696b71c:d6f04d83-6c4f-4a62-a165-696756d63903::"
# Create resource
cos = ibm_boto3.resource("s3",
ibm_api_key_id=COS_API_KEY_ID,
ibm_service_instance_id=COS_RESOURCE_CRN,
ibm_auth_endpoint=COS_AUTH_ENDPOINT,
config=Config(signature_version="oauth"),
endpoint_url=COS_ENDPOINT
)
def multi_part_upload(bucket_name, item_name, file_path):
try:
print("Starting file transfer for {0} to bucket: {1}\n".format(item_name, bucket_name))
# set 5 MB chunks
part_size = 1024 * 1024 * 5
# set threadhold to 15 MB
file_threshold = 1024 * 1024 * 15
# set the transfer threshold and chunk size
transfer_config = ibm_boto3.s3.transfer.TransferConfig(
multipart_threshold=file_threshold,
multipart_chunksize=part_size
)
# the upload_fileobj method will automatically execute a multi-part upload
# in 5 MB chunks for all files over 15 MB
with open(file_path, "rb") as file_data:
cos.Object(bucket_name, item_name).upload_fileobj(
Fileobj=file_data,
Config=transfer_config
)
print("Transfer for {0} Complete!\n".format(item_name))
except ClientError as be:
print("CLIENT ERROR: {0}\n".format(be))
except Exception as e:
print("Unable to complete multi-part upload: {0}".format(e))
multi_part_upload("mohammadansari2", "ansari.mp3", "task.mp3")
| Ansari369/IoT-projects | taskapp.py | taskapp.py | py | 3,478 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "watson_developer_cloud.VisualRecognitionV3",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "ibm_cloud_sdk_core.authenticators.IAMAuthenticator",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "ibm_watson.TextToSpeechV1",
"line_number": 16,
... |
6811791128 | #!/usr/bin/env python3
from random import random
from z3 import *
import numpy as np
import time
from math import *
from statistics import *
from random_lib import *
from matplotlib import pyplot as plt
from matplotlib.patches import Rectangle
from collections import defaultdict
import heapq
import faulthandler
faulthandler.enable()
# bad code goes here
import sys
sys.setrecursionlimit(10**6)
vis_clusters = []
unexp_clusters = []
class Vis_Cluster:
def __init__(self,m,n):
# Get the dimensions of the grid
self.rows = m
self.cols = n
self.visited_map = np.zeros((m,n), dtype=bool)
global vis_clusters
vis_clusters = []
self.cell2cluster = dict()
self.vis_cells_per_cluster = []
def traverse(self,r, c):
# Check if the current cell is out of bounds or has already been visited
if r < 0 or r >= self.rows or c < 0 or c >= self.cols or self.visited_map[r][c]:
return
# Check if the current cell is a 0
if map[r][c] != 0.5:
return
# Mark the current cell as visited
self.visited_map[r][c] = True
self.component.append((c,r))
(x,y) = (c,r)
key = str(x)+'+'+str(y)
self.cell2cluster[key] = len(vis_clusters)
# Recursively traverse the neighbors of the current cell
self.traverse(r + 1, c) # right
self.traverse(r - 1, c) # left
self.traverse(r, c + 1) # down
self.traverse(r, c - 1) # up
def make_clusters(self):
for (x,y) in visible_cells:
(r,c) = (y,x)
# Skip cells that have already been visited
if self.visited_map[r][c]:
continue
# Initialize a new connected component as a list of coordinates
self.component = []
# Traverse the connected component and add the coordinates of each cell to the list
self.traverse(r, c)
# Add the connected component to the list of components
vis_clusters.append(np.array(self.component))
self.vis_cells_per_cluster.append(len(self.component))
return np.array(self.vis_cells_per_cluster),self.cell2cluster
class Unexp_Cluster:
def __init__(self,m,n):
# Get the dimensions of the grid
self.rows = m
self.cols = n
global unexp_clusters
unexp_clusters = []
self.visited = np.zeros((m,n), dtype=bool)
self.cell2cluster = dict()
self.cells_per_cluster = []
def bfs(self,r, c):
component = []
count = 0
# Create a queue to store the cells to visit
component.append((c,r))
queue = [(r,c)]
while queue:
r, c = queue.pop(0)
(x,y) = (c,r)
if not self.visited[r][c]:
count+=1
key = str(x)+'+'+str(y)
self.cell2cluster[key] = len(unexp_clusters)
self.visited[r][c] = True
# Check the four neighboring cells
for r_, c_ in [(r-1, c), (r+1, c), (r, c-1), (r, c+1)]:
if 0 <= r_ < len(map) and 0 <= c_ < len(map[0]) and map[r_][c_] == 0.0:
queue.append((r_, c_))
return component,count
def make_clusters(self):
for r in range(self.rows):
for c in range(self.cols):
# Skip cells that have already been visited
if map[r][c] == 0.0 and not self.visited[r][c]:
# Traverse the connected component and add the coordinates of each cell to the list
component,count = self.bfs(r, c)
# Add the connected component to the list of components
unexp_clusters.append(np.array(component))
self.cells_per_cluster.append(count)
return np.array(self.cells_per_cluster),self.cell2cluster
class SMT:
def __init__(self,R,T,Dx,Dy,vis_dist,n_neighbors):
self.s = Optimize()
self.R = R
self.T = T
self.Dx = Dx
self.Dy = Dy
self.m = len(map[0])
self.n = len(map)
self.vis_dist = vis_dist
self.n_neighbors = n_neighbors
self.W_Cost = 1000 # 100
self.W_Reward = 1000 # 10
self.WR_Cov = max(Dx,Dy)*(10+0.1) # 300
self.WR_Vis = max(Dx,Dy)*(10+0.1) # 300
self.reachable_cells = [[] for i in range(R)]
self.X = [[Int("x%s_%s" % (i, j)) for j in range(T)] for i in range(R)]
self.Y = [[Int("y%s_%s" % (i, j)) for j in range(T)] for i in range(R)]
self.C = [[Real("c_%s_%s" % (i, j)) for j in range(T-1)] for i in range(R)]
self.Re = [[Real("re_%s_%s" % (i, j)) for j in range(T-1)] for i in range(R)]
self.total_cost = Real('total_cost')
self.total_reward = Real('total_reward')
self.Obj_func = Real('Obj_func')
def new_assign_Bots2Clusters(self,bot_loc):
nc = len(vis_clusters)
nr = self.R
self.clusters_assigned = [[] for c in range(nc)]
self.near_b2c = [[] for c in range(nc)]
self.far_b2c = [[] for c in range(nc)]
self.bots_assigned = [[] for r in range(nr)]
self.D = np.zeros((nc,nr))
self.D_min_idx = np.zeros((nc,nr))
count_clusters_unassigned = nc
count_bots_unassigned = nr
for r in range(nr):
c_near = []
(rx,ry) = bot_loc[r]
for c in range(nc):
dist = abs(rx-vis_clusters[c][:,0]) + abs(ry-vis_clusters[c][:,1])
idxs = dist.argsort()[:self.n_neighbors]
d_min = dist[idxs[0]]
#d_avg = np.average(dist[idxs])
self.D_min_idx[c][r] = idxs[0]
self.D[c][r] = d_min
if d_min==1:
c_near.append(c)
if len(c_near)>0:
c_near_cells = np.array(self.vis_cells_per_cluster[c_near])
idxs = c_near_cells.argsort()
c_near_min = c_near[idxs[0]]
c_near_max = c_near[idxs[len(idxs)-1]]
c = 0
if len(self.clusters_assigned[c_near_min]) == 0:
c = c_near_min
else:
c = c_near_max
self.clusters_assigned[c].append(r)
self.near_b2c[c].append(r)
self.bots_assigned[r].append(c)
count_bots_unassigned += -1
count_clusters_unassigned += -1
Idx_sorted =self.D.argsort(axis=None)
# Assigning island of visible cells which have zero assigned robots
for idx in Idx_sorted:
c = idx // nr
r = idx % nr
if len(self.clusters_assigned[c]) == 0 and len(self.bots_assigned[r]) == 0 and self.inbetween_vis(r,c)==0:
self.clusters_assigned[c].append(r)
self.far_b2c[c].append(r)
self.bots_assigned[r].append(c)
count_bots_unassigned += -1
count_clusters_unassigned += -1
self.bots_per_cluster = np.array([ len(bots) for bots in self.clusters_assigned])
self.cells_per_cluster = self.vis_cells_per_cluster + self.unexp_cells_per_cluster
cells_per_bot = np.array([self.cells_per_cluster[c]/ self.bots_per_cluster[c] if self.bots_per_cluster[c]>0 else 0 for c in range(nc)])
bots_unassigned = []
for r in range(self.R) :
if len(self.bots_assigned[r]) == 0:
bots_unassigned.append(r)
while not(count_bots_unassigned == 0):
c = np.argmax(cells_per_bot)
dist = self.D[c][bots_unassigned]
idxs = dist.argsort()
assigned = False
for i in idxs :
r = bots_unassigned[i]
if (self.inbetween_vis(r,c)==0 and self.cells_2_cover(c,r)>0 ):
bots_unassigned.pop(i)
self.clusters_assigned[c].append(r)
self.bots_assigned[r].append(c)
self.bots_per_cluster[c]+=1
cells_per_bot[c] = self.cells_per_cluster[c]/self.bots_per_cluster[c]
count_bots_unassigned += -1
assigned = True
break
if(assigned == False):
cells_per_bot[c] = 0
cls_canot_be_assigned_count = 0
for c in range(nc):
if cells_per_bot[c] == 0:
cls_canot_be_assigned_count+=1
if cls_canot_be_assigned_count == nc:
break
#print(cls_canot_be_assigned)
self.bots_per_cluster = np.array([ len(bots) for bots in self.clusters_assigned])
self.cells_per_cluster = self.vis_cells_per_cluster + self.unexp_cells_per_cluster
cells_per_bot = np.array([self.cells_per_cluster[c]/ self.bots_per_cluster[c] if self.bots_per_cluster[c]>0 else 0 for c in range(nc)])
bots_unassigned = []
for r in range(self.R) :
if len(self.bots_assigned[r]) == 0:
bots_unassigned.append(r)
while (count_bots_unassigned != 0):
c = np.argmax(cells_per_bot)
dist = self.D[c][bots_unassigned]
idxs = dist.argsort()
assigned = False
for i in idxs :
r = bots_unassigned[i]
if (self.inbetween_vis(r,c)==0 ):
bots_unassigned.pop(i)
self.clusters_assigned[c].append(r)
self.bots_assigned[r].append(c)
self.bots_per_cluster[c]+=1
cells_per_bot[c] = self.cells_per_cluster[c]/self.bots_per_cluster[c]
count_bots_unassigned += -1
assigned = True
break
if(assigned == False):
cells_per_bot[c] = 0
def cells_2_cover(self,c,r):
near_bots = len(self.near_b2c[c])
cells_2_cover = self.cells_per_cluster[c]
new_far_b2c = self.far_b2c[c] + [r]
dist = self.D[c][new_far_b2c]
idxs = np.argsort(dist)
for i in idxs :
if(i==0):
r1 = new_far_b2c[i]
cells_2_cover -= (self.D[c][r1]-1)*(near_bots)
else:
r1 = new_far_b2c[i-1]
r2 = new_far_b2c[i]
cells_2_cover -= (self.D[c][r2]-self.D[c][r1])*(near_bots+i)
return cells_2_cover
def new2_assign_Bots2Clusters(self,bot_loc):
nc = len(vis_clusters)
nr = self.R
self.clusters_assigned = [[] for c in range(nc)]
self.near_b2c = [[] for c in range(nc)]
self.far_b2c = [[] for c in range(nc)]
self.bots_assigned = [[] for r in range(nr)]
D = np.zeros((nc,nr))
self.D_min_idx = np.zeros((nc,nr))
count_clusters_unassigned = nc
count_bots_unassigned = nr
for r in range(nr):
c_near = []
(rx,ry) = bot_loc[r]
for c in range(nc):
dist = abs(rx-vis_clusters[c][:,0]) + abs(ry-vis_clusters[c][:,1])
idxs = dist.argsort()[:self.n_neighbors]
d_min = dist[idxs[0]]
#d_avg = np.average(dist[idxs])
self.D_min_idx[c][r] = idxs[0]
self.D[c][r] = d_min
if d_min==1:
c_near.append(c)
if len(c_near)>0:
c_near_cells = np.array(self.vis_cells_per_cluster[c_near])
idxs = c_near_cells.argsort()
c_near_min = c_near[idxs[0]]
c_near_max = c_near[idxs[len(idxs)-1]]
c = 0
if len(self.clusters_assigned[c_near_min]) == 0:
c = c_near_min
else:
c = c_near_max
self.clusters_assigned[c].append(r)
self.near_b2c[c].append(r)
self.bots_assigned[r].append(c)
count_bots_unassigned += -1
count_clusters_unassigned += -1
Idx_sorted = D.argsort(axis=None)
# Assigning island of visible cells which have zero assigned robots
for idx in Idx_sorted:
c = idx // nr
r = idx % nr
if len(self.clusters_assigned[c]) == 0 and len(self.bots_assigned[r]) == 0 and self.inbetween_vis(r,c)==0:
self.clusters_assigned[c].append(r)
self.far_b2c[c].append(r)
self.bots_assigned[r].append(c)
count_bots_unassigned += -1
count_clusters_unassigned += -1
# If any bots left , then assign according to the algo
cells_per_cluster = self.vis_cells_per_cluster+self.unexp_cells_per_cluster
bots_unassigned = []
for r in range(nr) :
if len(self.bots_assigned[r]) == 0:
bots_unassigned.append(r)
D_ = np.zeros((nc,nr))
for c in range(nc):
for r in range(nr):
if self.inbetween_vis(r,c)==0:
D_[c][r]= self.D[c][r]-1
else:
D_[c][r] = 1000000
while (count_bots_unassigned != 0):
nr = len(bots_unassigned)
cells_2_cover = np.zeros((nc,nr))
for c in range(nc):
near_bots = len(self.near_b2c[c])
for ri in range(nr):
r = bots_unassigned[ri]
new_far_b2c = self.far_b2c[c] + [r]
dist = D_[c][new_far_b2c]
idx = np.argsort(dist)
cells_2_cover[c][ri] = cells_per_cluster[c]
for i in idx :
if(i==0):
r = new_far_b2c[i]
cells_2_cover[c][ri] -= D_[c][r]*(near_bots)
else:
r1 = new_far_b2c[i-1]
r2 = new_far_b2c[i]
cells_2_cover[c][ri] -= (D_[c][r2]-D_[c][r1])*(near_bots+i)
Idx_sorted_ = cells_2_cover.argsort(axis=None)
idx = len(Idx_sorted_)-1
c = idx // nr
i = idx % nr
r = bots_unassigned[i]
bots_unassigned.pop(i)
self.clusters_assigned[c].append(r)
self.far_b2c[c].append(r)
self.bots_assigned[r].append(c)
count_bots_unassigned += -1
count_clusters_unassigned += -1
def inbetween_vis(self,r,c):
vis_array = copy.copy(visible_cells)
vis_array = np.array(vis_array)
(rx,ry) = self.bot_loc[r]
(vx,vy) = vis_clusters[c][int(self.D_min_idx[c][r])]
dx = rx-vx
dy = ry-vy
dx_array = rx-vis_array[:,0]
dy_array = ry-vis_array[:,1]
filtered_idxs = (dx*dx_array >= 0) & (dy*dy_array >= 0) & (abs(dx_array)+abs(dy_array) < abs(dx)+abs(dy))
if(dx==0):
filtered_idxs = filtered_idxs & (dx_array==0)
elif(dy==0):
filtered_idxs = filtered_idxs & (dy_array==0)
vis_array = vis_array[filtered_idxs]
return len(vis_array)
def shortest_distance(self,r,c):
grid, uav_pos, cluster = map, self.bot_loc[r],vis_clusters[c]
# Create an adjacency list representation of the grid
adj_list = defaultdict(list)
for i in range(len(grid)):
for j in range(len(grid[i])):
if grid[i][j] in cluster:
if i > 0 and grid[i-1][j] != -1:
adj_list[(i, j)].append((i-1, j))
if i < len(grid) - 1 and grid[i+1][j] != -1:
adj_list[(i, j)].append((i+1, j))
if j > 0 and grid[i][j-1] != -1:
adj_list[(i, j)].append((i, j-1))
if j < len(grid[i]) - 1 and grid[i][j+1] != -1:
adj_list[(i, j)].append((i, j+1))
# Initialize the distance and visited arrays
dist = {(i, j): float('inf') for i in range(len(grid)) for j in range(len(grid[i]))}
visited = {(i, j): False for i in range(len(grid)) for j in range(len(grid[i]))}
dist[uav_pos] = 0
# Create a priority queue to store the nodes to visit
pq = []
heapq.heappush(pq, (0, uav_pos))
while pq:
current_dist, current_node = heapq.heappop(pq)
if visited[current_node]:
continue
visited[current_node] = True
# Update the distances of the neighboring nodes
for neighbor in adj_list[current_node]:
if dist[neighbor] > current_dist + 1:
dist[neighbor] = current_dist + 1
heapq.heappush(pq, (dist[neighbor], neighbor))
# Find the minimum distance from the UAV to the cluster
min_dist = float('inf')
for cell in cluster:
if dist[cell] < min_dist:
min_dist = dist[cell]
return min_dist
def unexplored_per_cluster(self):
unexp_cells_per_cluster = [0 for c in range(len(vis_clusters))]
unexp_cluster_per_cluster = [[] for c in range(len(vis_clusters))]
d = 1
for c in range(len(vis_clusters)):
for cell in vis_clusters[c]:
(vx,vy) = cell
xl = int (max (0, vx-d))
xh = int (min (self.Dx, vx+d+1))
yl = int (max (0, vy-d))
yh = int (min (self.Dy, vy+d+1))
for x in range (xl, xh):
for y in range (yl, yh):
if map[y][x] == 0.0:
key = str(x)+'+'+str(y)
unexp_c = self.unexpcell2cluster[key]
if unexp_c not in unexp_cluster_per_cluster[c]:
unexp_cluster_per_cluster.append(unexp_c)
unexp_cells_per_cluster[c] += self.unexp_cells_per_unexpcluster[unexp_c]
return np.array(unexp_cells_per_cluster)
def make_and_assign_clusters(self,bot_loc):
self.bot_loc = bot_loc
self.vis_cells_per_cluster, self.viscell2cluster = Vis_Cluster(self.m,self.n).make_clusters()
self.unexp_cells_per_unexpcluster,self.unexpcell2cluster = Unexp_Cluster(self.m,self.n).make_clusters()
self.unexp_cells_per_cluster = self.unexplored_per_cluster()
self.new_assign_Bots2Clusters(bot_loc)
def init_bot_loc(self,bot_loc):
self.bot_loc = bot_loc
for r in range (self.R):
(x,y) = bot_loc[r]
self.s.add (And (self.X[r][0] == int (x), self.Y[r][0] == int (y))) # Assign the initial x and y coordinates
self.collect_reachables(r,self.vis_dist) # collect other reachable locations available
##########
def collect_reachables(self,r,d):
(rx,ry) = self.bot_loc[r]
xl = int (max (0, rx-d))
xh = int (min (self.Dx, rx+d+1))
yl = int (max (0, ry-d))
yh = int (min (self.Dy, ry+d+1))
for x in range (xl, xh):
for y in range (yl, yh):
# Collect all reachable cells from visible cells according to path length T (reachability)
if ((map[y][x] == 0.5 or map[y][x] == 1.0) and (abs (x - rx) + abs (y - ry) <= d) ) :
self.reachable_cells[r].append((x,y))
def motion_primitive(self):
for r in range (self.R):
for t in range (self.T-1):
self.s.add (And (self.P[r][t] <= 4, self.P[r][t] >= 0)) # Only 5 motion primitives are allowed
self.s.add (Or (self.C[r][t] == 1 , self.C[r][t] == 3 )) # Only 2 cost values are allowed
# For robot r at time t , If we choose an allowed value of P then the corresponding cost and next state allowed is defined
self.s.add(Implies(self.P[r][t] == 0, And(self.X[r][t+1] == self.X[r][t], self.Y[r][t+1] == self.Y[r][t], self.C[r][t] == 3))) # same
self.s.add(Implies(self.P[r][t] == 1, And(self.X[r][t+1] == self.X[r][t]+1,self.Y[r][t+1] == self.Y[r][t], self.C[r][t] == 1))) # right
self.s.add(Implies(self.P[r][t] == 2, And(self.X[r][t+1] == self.X[r][t]-1,self.Y[r][t+1] == self.Y[r][t], self.C[r][t] == 1))) # left
self.s.add(Implies(self.P[r][t] == 3, And(self.X[r][t+1] == self.X[r][t], self.Y[r][t+1] == self.Y[r][t]+1,self.C[r][t] == 1))) # up
self.s.add(Implies(self.P[r][t] == 4, And(self.X[r][t+1] == self.X[r][t], self.Y[r][t+1] == self.Y[r][t]-1,self.C[r][t] == 1))) # down
def action_cost(self,current_loc,next_loc):
same_cell_cost = 3
different_cell_cost = 1
if current_loc == next_loc:
return same_cell_cost
else:
return different_cell_cost
##########
def reward(self,r,current_loc,next_loc):
(nx,ny) = (int(next_loc[0]),int(next_loc[1]))
if(map[ny][nx] == 0.5):
key = str(nx)+'+'+str(ny)
c = self.viscell2cluster[key]
cov = self.surroundings(next_loc,1)
# return (cov + 1/len(vis_clusters[c]))*self.WR_Vis #cell_age[ny][nx]+
if(self.bots_assigned[r][0]==c):
return (cov + 1/len(vis_clusters[c]))*self.WR_Vis #cell_age[ny][nx]+
else:
return -1000
elif(map[ny][nx] == 1.0):
return self.near(r,current_loc, next_loc)*self.WR_Cov
else:
return -1000
def near(self,r,current_loc,next_loc):
(nx,ny) = (next_loc[0],next_loc[1])
(rx,ry) = (current_loc[0],current_loc[1])
# if(len(self.visible_cells)==0):
# return 0
# visible_cells = np.array (self.visible_cells)
if(len(vis_clusters[self.bots_assigned[r][0]])==0):
return 0
np_visible_cells = np.array (vis_clusters[self.bots_assigned[r][0]])
dist = abs (np_visible_cells[:,0] - rx) + abs (np_visible_cells[:,1] - ry)
idxs = dist.argsort ()[:self.n_neighbors]
safe_visible_cells = np_visible_cells[idxs]
k = len(safe_visible_cells)
total_d = 0
for loc in safe_visible_cells:
d = abs (loc[0] - nx) + abs (loc[1] - ny)
total_d += d
return k/total_d
##########
# def near(self,r,current_loc,next_loc):
# (nx,ny) = (next_loc[0],next_loc[1])
# (rx,ry) = (current_loc[0],current_loc[1])
# total_w_d = 0
# key = str(rx)+'+'+str(ry)
# k = len(self.nearest_vis_cells.get(key))
# Total_W = 0
# for loc in self.nearest_vis_cells[key]:
# d = abs (loc[0] - nx) + abs (loc[1] - ny)
# (x,y) = (loc[0],loc[1])
# key = str(x)+'+'+str(y)
# common_ratio = self.R/self.vis_common.get(key)
# w = pow(common_ratio,3)
# Total_W += w
# total_w_d += d*w
# return Total_W/total_w_d
##########
def Visible_cells_common_count(self):
self.vis_common = dict({})
self.nearest_vis_cells = dict({})
for r in range(self.R):
(rx,ry) = self.bot_loc[r]
key = str(rx)+'+'+str(ry)
self.nearest_vis_cells[key] = []
if(len(vis_clusters[self.bots_assigned[r][0]])==0):
return 0
visible_cells = np.array (vis_clusters[self.bots_assigned[r][0]])
dist = abs (visible_cells[:,0] - rx) + abs (visible_cells[:,1] - ry)
idxs = dist.argsort ()[:self.n_neighbors]
self.nearest_vis_cells[key] = visible_cells[idxs]
for cell in self.nearest_vis_cells[key]:
(x,y) = (cell[0],cell[1])
key = str(x)+'+'+str(y)
if self.vis_common.get(key) == None :
self.vis_common[key] = 1
else:
self.vis_common[key] += 1
def collision_avoidance(self):
for t in range(self.T-1):
for r1 in range (self.R):
for r2 in range (r1+1,self.R):
# Both x and y coordinates of r1 & r2 at time t+1 cannot be equal
self.s.add (Not( And(self.X[r1][t+1] == self.X[r2][t+1], self.Y[r1][t+1] == self.Y[r2][t+1])))
# Head on collision or Swaping position collision
self.s.add(Not (And(And(self.X[r1][t+1] == self.X[r2][t],self.Y[r1][t+1] == self.Y[r2][t]),And(self.X[r2][t+1] == self.X[r1][t],self.Y[r2][t+1] == self.Y[r1][t]))))
def d_bots(self,r1,r2):
(x1,y1) = self.bot_loc[r1]
(x2,y2) = self.bot_loc[r2]
return abs(x1-x2)+abs(y1-y2)
def new_collision_avoidance(self):
for t in range(self.T-1):
for r1 in range (self.R):
for r2 in range(r1+1,self.R):
# Both x and y coordinates of r1 & r2 at time t cannot be equal
if(self.d_bots(r1,r2)<=2):
self.s.add (Not( And(self.X[r1][t+1] == self.X[r2][t+1], self.Y[r1][t+1] == self.Y[r2][t+1])))
# Head on collision or Swaping position collision
if(self.d_bots(r1,r2)==1):
self.s.add(Not (And(And(self.X[r1][t+1] == self.X[r2][t],self.Y[r1][t+1] == self.Y[r2][t]),And(self.X[r2][t+1] == self.X[r1][t],self.Y[r2][t+1] == self.Y[r1][t]))))
def obstacle_avoidance(self,obst_loc):
self.obst_loc = obst_loc
for r in range (self.R):
for t in range (1,self.T):
for obst in obst_loc:
# Both the x & y coordinates of r at time t cannot be equal to that of obstacle coordinates
self.s.add (Not( And (self.X[r][t] == obst[0], self.Y[r][t] == obst[1])))
# stay within the grid bounds
self.s.add (And (self.X[r][t] < self.Dx, self.X[r][t] >= 0))
self.s.add (And (self.Y[r][t] < self.Dy, self.Y[r][t] >= 0))
def visit_reachable_cells(self):
#self.Visible_cells_common_count()
for r in range (self.R):
for t in range (self.T-1):
# A robot r at time t must choose a cell from all the reachable cells
self.s.add (Or ([And (self.X[r][t+1] == x, self.Y[r][t+1] == y) for (x,y) in self.reachable_cells[r]]))
curr = self.bot_loc[r]
for next in self.reachable_cells[r]:
cx,cy = curr
nx,ny = next
self.s.add(Implies(And(And (self.X[r][t] == int(cx), self.Y[r][t] == int(cy)),And (self.X[r][t+1] == int(nx), self.Y[r][t+1] == int(ny))),And(self.Re[r][t] == self.reward(r,[cx,cy],[nx,ny]),self.C[r][t] == self.action_cost([cx,cy],[nx,ny]))))
#self.s.add(Implies(Or(Not(And (self.X[r][t] == int(cx), self.Y[r][t] == int(cy))),Not(And (self.X[r][t+1] == int(nx), self.Y[r][t+1] == int(ny)))),self.Re[r][t] == -1000))
def check_smt(self):
TC = []
TR = []
for r in range(self.R):
TC+= self.C[r]
TR+= self.Re[r]
self.total_cost = Sum(TC)
self.total_reward = Sum(TR)
self.s.add(self.Obj_func == self.W_Cost*self.total_cost - self.W_Reward*self.total_reward)
h = self.s.minimize(self.Obj_func)
check = str(self.s.check())
return check
def add_visible_cells(self,loc,d):
(rx,ry) = loc
xl = int (max (0, rx-d))
xh = int (min (self.Dx, rx+d+1))
yl = int (max (0, ry-d))
yh = int (min (self.Dy, ry+d+1))
for x in range (xl, xh):
for y in range (yl, yh): # For another condition to select visible cells
if (map[y][x] == 0.0): # and d < self.vis_dist):
self.new_visible_cells.append((x,y))
map[y][x] = 0.5
def return_all_vars(self):
global visible_cells
#global Unexp_cells
model = self.s.model()
next_start_loc = []
current_traj = []
self.new_visible_cells = []
covered_visible_cells = []
count = 0
for r in range(self.R):
bot_traj = []
for t in range(self.T):
rx = int (str (model[self.X[r][t]]))
ry = int (str (model[self.Y[r][t]]))
if map[ry][rx] == 0.5 :
covered_visible_cells.append((rx,ry))
#cell_age[ry][rx] = 0
count+=1
if(t>0):
bot_traj.append((rx,ry))
self.add_visible_cells([rx,ry],self.vis_dist)
if(t==self.T-1):
next_start_loc.append((rx,ry))
map[ry][rx] = 1.0
current_traj.append(bot_traj)
filtered_cells = []
for cell in visible_cells:
if cell not in covered_visible_cells:
filtered_cells.append(cell)
visible_cells = filtered_cells + self.new_visible_cells
# new_unexp_cells = []
# for cell in Unexp_cells:
# if cell not in self.new_visible_cells:
# new_unexp_cells.append(cell)
# Unexp_cells = new_unexp_cells
return next_start_loc,current_traj,count
def surroundings(self,loc,d=1):
(vx,vy) = (int(loc[0]),int(loc[1]))
cov = 0
vis = 0
n = self.Dx
m = self.Dy
for x in range (vx-d, vx+d+1):
for y in range (vy-d, vy+d+1):
if (x==vx and y==vy):
continue
if(x<0 or y<0 or x>=n or y>=m):
cov+=1
else:
if map[y][x]==1:
cov+=1
return cov
#------------------------------------------------------------------------------------------------------------------------
def Init_visible_cells(init_loc,Dx,Dy,d):
global map
global visible_cells
visible_cells = []
for loc in init_loc:
(rx,ry) = loc
xl = int (max (0, rx-d))
xh = int (min (Dx, rx+d+1))
yl = int (max (0, ry-d))
yh = int (min (Dy, ry+d+1))
for x in range (xl, xh):
for y in range (yl, yh):
# d = ((rx-x)^2+(ry-y)^2)^(0.5) # For another condition to select visible cells
if (map[y][x] == 0.0): # and d < self.vis_dist):
visible_cells.append((x,y))
map[y][x] = 0.5
# def update_age():
# for cell in visible_cells:
# (x,y) = cell
# cell_age[y][x]+=1
def make_map(R,Dx,Dy,init_pos,obst_pos):
global map
map = np.full ((Dy,Dx),0.0)
for pos in init_pos:
(x,y) = pos
map[y][x] = 1.0
for pos in obst_pos:
(x,y) = pos
map[y][x] = -1.0
def main(R,T,Dx,Dy,plots_dir,wp_dir,init_pos,obst_pos,vis = False):
# num_obst = int (Dx*Dy/10)
# obst_pos = [] # random_obst_positions (Dx, Dy, num_obst)
# init_pos,map = random_init_positions (Dx, Dy, R, obst_pos)
make_map(R,Dx,Dy,init_pos,obst_pos)
vis_dist = 1
cells_need_to_be_covered = Dx*Dy-len(obst_pos)
cells_covered = R
n_neighbors = 5
Init_visible_cells(init_pos,Dx,Dy,vis_dist)
#global cell_age
# cell_age = np.full ((Dy,Dx), 0)
# update_age()
files = []
for r in range (R):
filename = 'robot_' + str (r)
filepath = os.path.join(wp_dir, filename)
f = open (filepath, 'w+')
files.append (f)
x,y = init_pos[r]
s = str (y) + " " + str (x) + "\n"
files[r].write (s)
files[r].flush ()
k=0
total_time = 0
while True:
if cells_covered>=cells_need_to_be_covered and len(visible_cells)==0:
break
k+=1
tic = time.time()
smt = SMT(R,T,Dx,Dy,vis_dist,n_neighbors)
smt.make_and_assign_clusters(init_pos)
toc1 = time.time()
smt.init_bot_loc(init_pos)
#smt.motion_primitive()
smt.new_collision_avoidance()
#smt.obstacle_avoidance(obst_pos)
toc2 = time.time()
smt.visit_reachable_cells()
toc3 = time.time()
if(smt.check_smt()=='unsat'):
break
tocF = time.time()
dt1 = round(toc1 - tic,3)
dt2 = round(toc2 - toc1,3)
dt3 = round(toc3 - toc2,3)
dt4 = round(tocF - toc3,3)
dt = round(tocF - tic,3)
total_time+= dt
init_pos,current_traj,count = smt.return_all_vars()
cells_covered+=count
no_visible = len(visible_cells)
no_uncovered_cells = Dx*Dy -cells_covered-no_visible
print("For horizon {} : Total time taken : {} sec , Total cells covered : {} , Visible cells : {} , Uncovered cells : {}\n".format(k,dt,cells_covered,no_visible,no_uncovered_cells))
if dt<60:
print(" Total time taken : {} sec, cluster : {} sec, collision : {} sec, visit_reach : {} sec, SMT_check : {} sec\n".format(dt,dt1,dt2,dt3,dt4))
else:
print(" Total time taken : {} min, cluster : {} min, collision : {} min, visit_reach : {} sec, SMT_check : {} min\n".format(dt/60,dt1/60,dt2/60,dt3/60,dt4/60))
#update_age()
for r in range(R):
for loc in current_traj[r]:
x,y = loc
s = str (y) + " " + str (x) + "\n"
files[r].write (s)
files[r].flush ()
if (cells_covered<cells_need_to_be_covered):
print("SMT not Satisfied")
print("Total no of horizons needed : {} \n".format(k))
if total_time<60:
print("Total Time taken : {} sec\n".format(total_time))
else:
print("Total Time taken : {} min\n".format(total_time/60))
return k,round(total_time,3)
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('-r', dest='num_robots', type=int, help='Number of robots')
parser.add_argument('-d', dest='dimension', type=int, help='Size of workspace')
parser.add_argument('-t', dest='tests', type=int, help='No of tests')
parser.add_argument('-it', default=1, dest='init_test', type=int, help='Initial test location')
parser.add_argument('-v', dest='vis', type=int, help='should visualize or not')
parser.add_argument('-f', dest='filename', type=str, help='Name of the file to save')
args = parser.parse_args()
D = int(args.dimension)
R = int(args.num_robots)
it = int(args.init_test)
filename = str(D)+'x'+str(D)+'_'+str(R)+'bots'
init_loc_file = "INITIAL_LOCATIONS-"+str(1)
if(args.filename==""):
filename = args.filename
if not os.path.isdir (filename):
os.mkdir (filename)
Dx = D
Dy = D
T = 2
do_test = 0
vis = False
if args.vis == 1:
vis = True
if(args.tests):
do_test = args.tests
if (do_test==0):
test_dir = os.path.join(filename,'TEST'+str(it))
if not os.path.isdir (test_dir):
os.mkdir (test_dir)
plots_dir = os.path.join(test_dir, 'plots')
if not os.path.isdir (plots_dir):
os.mkdir (plots_dir)
wp_dir = os.path.join(test_dir, 'WPts')
if not os.path.isdir (wp_dir):
os.mkdir (wp_dir)
path = os.path.join(init_loc_file,str(D)+'x'+str(D)+'_'+str(R),'TEST-'+str(it),'robot_init_locs')
file = open(path,'r')
init_pos = []
obst_pos = []
for r in range(R):
NewLine = file.readline()
y,x = int (NewLine.split(' ')[0]), int (NewLine.split(' ')[1])
init_pos.append((x,y))
k,total_time = main(R,T,Dx,Dy,plots_dir,wp_dir,init_pos,obst_pos,vis)
#######################################
else:
tests = do_test
Avg_k = 0
Avg_time = 0
K = []
Time = []
for i in range(tests):
print("TEST : ",i+1)
test_dir = os.path.join(filename,'TEST'+str(i+1))
if not os.path.isdir (test_dir):
os.mkdir (test_dir)
plots_dir = os.path.join(test_dir, 'plots')
if not os.path.isdir (plots_dir):
os.mkdir (plots_dir)
wp_dir = os.path.join(test_dir, 'WPts')
if not os.path.isdir (wp_dir):
os.mkdir (wp_dir)
path = os.path.join(init_loc_file,str(D)+'x'+str(D)+'_'+str(R),'TEST-'+str(i+1),'robot_init_locs')
file = open(path,'r')
init_pos = []
for r in range(R):
NewLine = file.readline()
y,x = int (NewLine.split(' ')[0]), int (NewLine.split(' ')[1])
init_pos.append((x,y))
k,total_time = main(R,T,Dx,Dy,plots_dir,wp_dir,init_pos,obst_pos=[])
K.append(k)
Time.append(total_time)
for i in range(tests):
if (Time[i]< 60):
print("TEST {} --------------> No of horizons : {} ,Computation time : {} sec\n".format(i+1,K[i],Time[i]))
else:
print("TEST {} --------------> No of horizons : {} ,Computation time : {} min\n".format(i+1,K[i],Time[i]/60))
Avg_k = mean(K)
Avg_time = mean(Time)
sd_k = stdev(K)
sd_time = stdev(Time)
print("For {}x{} grid & {} robots in {} tests ------>>>> Average no of horizons needed : {} , Standard Deviation : {}\n".format(D,D,R,tests,Avg_k,sd_k))
if (Avg_time< 60):
print("For {}x{} grid & {} robots in {} tests ------>>>> Average no of horizons needed : {} sec, Standard Deviation : {} sec\n".format(D,D,R,tests,Avg_time,sd_time))
else:
print("For {}x{} grid & {} robots in {} tests ------>>>> Average Computation time needed : {} min, Standard Deviation : {} sec\n".format(D,D,R,tests,Avg_time/60,sd_time))
| Luckykantnayak/uav-project-2 | lucky_smt_v5.py | lucky_smt_v5.py | py | 38,555 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "faulthandler.enable",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "sys.setrecursionlimit",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "numpy.zeros",
"line_number": 30,
"usage_type": "call"
},
{
"api_name": "numpy.array",
... |
14919660857 | #!/usr/bin/env python
# Brocapi RQ Worker
__copyright__ = """
Copyright 2017 FireEye, Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
__license__ = "Apache 2.0"
import glob
import logging
import os
import subprocess
import brocapi_syslog
TYPE_BLACKLIST = [
"capture_loss",
"stats",
"loaded_scripts",
"packet_filter"
]
def process_job(job_uuid, job_tag, pcaps, bro_bin,
bro_processing_dir, syslog_host, syslog_port,
syslog_proto, syslog_prefix):
logging.info("Received job: %s", job_uuid)
bro_log_dir = bro_processing_dir + job_uuid + "/logs/bro/"
logging.info("Moving into Bro log dir: %s", bro_log_dir)
os.chdir(bro_log_dir)
for pcap in pcaps:
pcap_path = bro_processing_dir + job_uuid + '/pcaps/' + pcap
logging.debug("Calling bro for pcap %s as part of job %s", pcap_path, job_uuid)
try:
subprocess.call([
bro_bin,
"-C",
"-r",
pcap_path,
"local"])
except Exception as e:
logging.error("Bro processing failed for pcap %s", pcap)
logging.error(e)
# Get all the relevant bro logs in the dir
bro_logs = glob.glob('*.log')
logging.debug("Found bro logs: %s", str(bro_logs))
if len(bro_logs) == 0:
logging.error("No bro logs present for job %s", job_uuid)
return False
# Connect to syslog server
logging.debug("Creating a syslog broker socket to %s:%s over %s for job %s", syslog_host, syslog_port, syslog_proto, job_uuid)
broker_socket = brocapi_syslog.connect_syslog(syslog_host, syslog_port, syslog_proto)
if not broker_socket:
return False
# Loop through all log types
for _log in bro_logs:
logging.debug("Processing log %s for job %s", _log, job_uuid)
bro_type = _log.split(".")[0]
if bro_type in TYPE_BLACKLIST:
logging.debug("Skipping blacklisted type %s for job %s", bro_type, job_uuid)
continue
syslog_program = syslog_prefix % bro_type
# handle every line in the log file
with open(_log) as bro_file:
for line in bro_file:
if line.startswith("#"):
continue
if job_tag is None:
job_tag = "brocapi"
syslog_message = brocapi_syslog.format_syslog_message(job_tag, syslog_program, line)
broker_socket.send(syslog_message)
# close out the socket
broker_socket.close()
| fireeye/brocapi | brocapi/brocapi_worker.py | brocapi_worker.py | py | 3,053 | python | en | code | 27 | github-code | 36 | [
{
"api_name": "logging.info",
"line_number": 38,
"usage_type": "call"
},
{
"api_name": "logging.info",
"line_number": 41,
"usage_type": "call"
},
{
"api_name": "os.chdir",
"line_number": 42,
"usage_type": "call"
},
{
"api_name": "logging.debug",
"line_number":... |
72166992744 | import datetime
def get_period(start_day: str, n_days: int) -> list:
''' get the list of string dates from <start_date> <n_days> backwards '''
datelst = [datetime.datetime.strptime(start_day, '%Y-%m-%d') - datetime.timedelta(days=x) for x in range(n_days)]
datelst = [x.strftime('%Y-%m-%d') for x in datelst]
return datelst
def convert_datetime(df, sin_cos=False):
start_time = time.time()
sh = df.shape
print("datetime conversion started...")
df['hour'] = df.created_ts.apply(get_hour)
df['weekday'] = df.created_ts.apply(get_weekday)
df['day'] = df.created_ts.apply(get_day)
if sin_cos:
df = sin_cos_encoding(df, 'hour', 24)
df = sin_cos_encoding(df, 'weekday', 7)
df = sin_cos_encoding(df, 'day', 30)
tests.test_df_shape(sh, 3*2, df.shape)
else:
tests.test_df_shape(sh, 3, df.shape)
print(f"datetime conversion completed, time : {int(time.time() - start_time)}s")
return df
def dt_string_converter(df, dt_column, fmt="datetime"):
'''convert string to datetime & vice versa,
fmt: [datetime/string]'''
if all([fmt == "datetime", df[dt_column].dtype == "object"]):
df[dt_column] = df[dt_column].apply(lambda v: datetime.datetime.strptime(v, "%Y-%m-%d %H:%M:%S"))
if all([fmt == "string", df[dt_column].dtype == "<M8[ns]"]):
df[dt_column] = df[dt_column].apply(lambda v: datetime.datetime.strftime(v, "%Y-%m-%d %H:%M:%S"))
try:
assert df[dt_column].dtype == {"datetime":"<M8[ns]", "string":"object"}[fmt]
except AssertionError:
print(f"datetime string converter failed")
return df
| qCircuit/unos_scripts | datetime.py | datetime.py | py | 1,675 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "datetime.datetime.strptime",
"line_number": 5,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 5,
"usage_type": "attribute"
},
{
"api_name": "datetime.timedelta",
"line_number": 5,
"usage_type": "call"
},
{
"api_name": "dat... |
4778228979 | from pathlib import Path
import re
import subprocess
import numpy as np
import pytest
from transformer_engine.paddle.fp8 import is_fp8_available
test_root = Path(__file__).resolve().parent
is_fp8_supported, reason = is_fp8_available()
@pytest.mark.skipif(not is_fp8_supported, reason=reason)
@pytest.mark.parametrize('use_reentrant', [False, True])
def test_transformer_encoder_recompute(use_reentrant):
"""
Test TransformerLayer encoder recompute
"""
rtol = 1e-5
atol = 1e-5
def launch_subprocess_and_check_output(enable_recompute):
"""Launch training in subprocess and check output"""
try:
cmd = [
'python',
str(test_root / 'recompute_tests' / 'recompute_transformer_encoder.py'),
str(int(enable_recompute)),
str(int(use_reentrant))
]
result = subprocess.check_output(cmd, stderr=subprocess.STDOUT, universal_newlines=True)
print(result)
loss_match = re.search(r'Loss:\s+(-?\d+\.\d+)', result)
memory_match = re.search(r'Peak memory:\s+(\d+)', result)
loss_value = float(loss_match.group(1))
memory_value = int(memory_match.group(1))
return loss_value, memory_value
except subprocess.CalledProcessError as e:
raise ValueError(f"Subprocess failed with error: {e}") from e
loss_recompute, peak_memory_recompute = launch_subprocess_and_check_output(True)
loss_ref, peak_memory_ref = launch_subprocess_and_check_output(False)
assert peak_memory_recompute < peak_memory_ref
np.testing.assert_allclose(loss_recompute, loss_ref, rtol=rtol, atol=atol)
| NVIDIA/TransformerEngine | tests/paddle/test_recompute.py | test_recompute.py | py | 1,707 | python | en | code | 1,056 | github-code | 36 | [
{
"api_name": "pathlib.Path",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "transformer_engine.paddle.fp8.is_fp8_available",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "subprocess.check_output",
"line_number": 32,
"usage_type": "call"
},
{
... |
30488234848 | from eth_abi.codec import (
ABICodec,
)
from eth_utils import (
add_0x_prefix,
apply_to_return_value,
from_wei,
is_address,
is_checksum_address,
keccak as eth_utils_keccak,
remove_0x_prefix,
to_bytes,
to_checksum_address,
to_int,
to_text,
to_wei,
)
from hexbytes import (
HexBytes,
)
from typing import Any, cast, Dict, List, Optional, Sequence, TYPE_CHECKING
from eth_typing import HexStr, Primitives
from eth_typing.abi import TypeStr
from eth_utils import (
combomethod,
)
from ens import ENS
from web3._utils.abi import (
build_default_registry,
build_strict_registry,
map_abi_data,
)
from web3._utils.decorators import (
deprecated_for,
)
from web3._utils.empty import (
empty,
)
from web3._utils.encoding import (
hex_encode_abi_type,
to_hex,
to_json,
)
from web3._utils.rpc_abi import (
RPC,
)
from web3._utils.module import (
attach_modules,
)
from web3._utils.normalizers import (
abi_ens_resolver,
)
from web3.eth import (
Eth,
)
from web3.geth import (
Geth,
GethAdmin,
GethMiner,
GethPersonal,
GethShh,
GethTxPool,
)
from web3.iban import (
Iban,
)
from web3.manager import (
RequestManager as DefaultRequestManager,
)
from web3.net import (
Net,
)
from web3.parity import (
Parity,
ParityPersonal,
ParityShh,
)
from web3.providers import (
BaseProvider,
)
from web3.providers.eth_tester import (
EthereumTesterProvider,
)
from web3.providers.ipc import (
IPCProvider,
)
from web3.providers.rpc import (
HTTPProvider,
)
from web3.providers.websocket import (
WebsocketProvider,
)
from web3.testing import (
Testing,
)
from web3.types import ( # noqa: F401
Middleware,
MiddlewareOnion,
)
from web3.version import (
Version,
)
if TYPE_CHECKING:
from web3.pm import PM # noqa: F401
def get_default_modules() -> Dict[str, Sequence[Any]]:
return {
"eth": (Eth,),
"net": (Net,),
"version": (Version,),
"parity": (Parity, {
"personal": (ParityPersonal,),
"shh": (ParityShh,),
}),
"geth": (Geth, {
"admin": (GethAdmin,),
"miner": (GethMiner,),
"personal": (GethPersonal,),
"shh": (GethShh,),
"txpool": (GethTxPool,),
}),
"testing": (Testing,),
}
class Web3:
# Providers
HTTPProvider = HTTPProvider
IPCProvider = IPCProvider
EthereumTesterProvider = EthereumTesterProvider
WebsocketProvider = WebsocketProvider
# Managers
RequestManager = DefaultRequestManager
# Iban
Iban = Iban
# Encoding and Decoding
toBytes = staticmethod(to_bytes)
toInt = staticmethod(to_int)
toHex = staticmethod(to_hex)
toText = staticmethod(to_text)
toJSON = staticmethod(to_json)
# Currency Utility
toWei = staticmethod(to_wei)
fromWei = staticmethod(from_wei)
# Address Utility
isAddress = staticmethod(is_address)
isChecksumAddress = staticmethod(is_checksum_address)
toChecksumAddress = staticmethod(to_checksum_address)
# mypy Types
eth: Eth
parity: Parity
geth: Geth
net: Net
def __init__(
self,
provider: Optional[BaseProvider] = None,
middlewares: Optional[Sequence[Any]] = None,
modules: Optional[Dict[str, Sequence[Any]]] = None,
ens: ENS = cast(ENS, empty)
) -> None:
self.manager = self.RequestManager(self, provider, middlewares)
if modules is None:
modules = get_default_modules()
attach_modules(self, modules)
self.codec = ABICodec(build_default_registry())
self.ens = ens
@property
def middleware_onion(self) -> MiddlewareOnion:
return self.manager.middleware_onion
@property
def provider(self) -> BaseProvider:
return self.manager.provider
@provider.setter
def provider(self, provider: BaseProvider) -> None:
self.manager.provider = provider
@property
def clientVersion(self) -> str:
return self.manager.request_blocking(RPC.web3_clientVersion, [])
@property
def api(self) -> str:
from web3 import __version__
return __version__
@staticmethod
@deprecated_for("keccak")
@apply_to_return_value(HexBytes)
def sha3(primitive: Optional[Primitives] = None, text: Optional[str] = None,
hexstr: Optional[HexStr] = None) -> bytes:
return Web3.keccak(primitive, text, hexstr)
@staticmethod
@apply_to_return_value(HexBytes)
def keccak(primitive: Optional[Primitives] = None, text: Optional[str] = None,
hexstr: Optional[HexStr] = None) -> bytes:
if isinstance(primitive, (bytes, int, type(None))):
input_bytes = to_bytes(primitive, hexstr=hexstr, text=text)
return eth_utils_keccak(input_bytes)
raise TypeError(
"You called keccak with first arg %r and keywords %r. You must call it with one of "
"these approaches: keccak(text='txt'), keccak(hexstr='0x747874'), "
"keccak(b'\\x74\\x78\\x74'), or keccak(0x747874)." % (
primitive,
{'text': text, 'hexstr': hexstr}
)
)
@combomethod
@deprecated_for("solidityKeccak")
def soliditySha3(cls, abi_types: List[TypeStr], values: List[Any]) -> bytes:
return cls.solidityKeccak(abi_types, values)
@combomethod
def solidityKeccak(cls, abi_types: List[TypeStr], values: List[Any]) -> bytes:
"""
Executes keccak256 exactly as Solidity does.
Takes list of abi_types as inputs -- `[uint24, int8[], bool]`
and list of corresponding values -- `[20, [-1, 5, 0], True]`
"""
if len(abi_types) != len(values):
raise ValueError(
"Length mismatch between provided abi types and values. Got "
"{0} types and {1} values.".format(len(abi_types), len(values))
)
if isinstance(cls, type):
w3 = None
else:
w3 = cls
normalized_values = map_abi_data([abi_ens_resolver(w3)], abi_types, values)
hex_string = add_0x_prefix(HexStr(''.join(
remove_0x_prefix(hex_encode_abi_type(abi_type, value))
for abi_type, value
in zip(abi_types, normalized_values)
)))
return cls.keccak(hexstr=hex_string)
def isConnected(self) -> bool:
return self.provider.isConnected()
def is_encodable(self, _type: TypeStr, value: Any) -> bool:
return self.codec.is_encodable(_type, value)
@property
def ens(self) -> ENS:
if self._ens is cast(ENS, empty):
return ENS.fromWeb3(self)
else:
return self._ens
@ens.setter
def ens(self, new_ens: ENS) -> None:
self._ens = new_ens
@property
def pm(self) -> "PM":
if hasattr(self, '_pm'):
# ignored b/c property is dynamically set via enable_unstable_package_management_api
return self._pm # type: ignore
else:
raise AttributeError(
"The Package Management feature is disabled by default until "
"its API stabilizes. To use these features, please enable them by running "
"`w3.enable_unstable_package_management_api()` and try again."
)
def enable_unstable_package_management_api(self) -> None:
from web3.pm import PM # noqa: F811
if not hasattr(self, '_pm'):
PM.attach(self, '_pm')
def enable_strict_bytes_type_checking(self) -> None:
self.codec = ABICodec(build_strict_registry())
| MLY0813/FlashSwapForCofixAndUni | FlashSwapForCofixAndUni/venv/lib/python3.9/site-packages/web3/main.py | main.py | py | 7,774 | python | en | code | 70 | github-code | 36 | [
{
"api_name": "typing.TYPE_CHECKING",
"line_number": 106,
"usage_type": "name"
},
{
"api_name": "web3.eth.Eth",
"line_number": 112,
"usage_type": "name"
},
{
"api_name": "web3.net.Net",
"line_number": 113,
"usage_type": "name"
},
{
"api_name": "web3.version.Versio... |
8436779783 | # Given an array of positive numbers and a positive number ‘k,’ find the maximum sum of any contiguous subarray of size ‘k’.
def find_max_sum(arr, k):
sum = 0
max_sum = 0
for i in range(len(arr)):
sum += arr[i]
if i > k-1:
sum -= arr[i-k]
max_sum = max(max_sum, sum)
return max_sum
#Given an array of positive numbers and a positive number ‘S,’ find the length of the smallest contiguous subarray whose sum is greater than or equal to ‘S’. Return 0 if no such subarray exists.
def smallest_subarray_with_sum(arr, target):
left = 0
min_length = len(arr)
for i in range(len(arr)):
sum += arr[i]
while sum > target:
min_length = min(min_length, i - left + 1)
sum -= arr[left]
left += 1
return min_length
from collections import Counter
# Given a string, find the length of the longest substring in it with no more than K distinct characters.
def longest_substring_with_k_distinct_characters(s, k):
left = 0
char_count = Counter()
distinct = 0
max_length = 0
for i, c in enumerate(s):
if char_count[c] == 0:
distinct += 1
char_count[c] += 1
while distinct > k:
char_count[s[left]] -= 1
if char_count[s[left]] == 0:
del char_count[s[left]]
distinct -= 1
left += 1
max_length = max(max_length, i - left + 1)
return max_length
# Given a string, find the length of the longest substring, which has no repeating characters.
def longest_length_with_unique_characters(s):
char_count = {}
left = 0
max_l = 0
for i, c in enumerate(s):
if c in char_count:
if char_count[c] >= left:
left = char_count[c] + 1
max_l = max(i - left + 1, max_l)
char_count[c] = i
return max(max_l, len(s) - left)
# Longest Substring with Same Letters after Replacement
# Given a string with lowercase letters only, if you are allowed to replace no more than ‘k’ letters with any letter,
# find the length of the longest substring having the same letters after replacement.
def find_longest_substring_with_same_characters_after_k_replacements(s, k):
# find window that has k characters that are not the character with max count
char_count = {}
max_count = 0
l = 0
max_l = 0
for i, c in enumerate(s):
if c in char_count:
char_count[c] += 1
if char_count[c] > max_count:
max_count = char_count[c]
else:
char_count[c] = 1
while i - l - max_count > k:
char_count[s[l]] -= 1
if char_count[s[l]] == 0:
del char_count[s[l]]
l += 1
max_l = max(max_l, i-l+1)
return max_l
# Given an array containing 0s and 1s, if you are allowed to replace no more than ‘k’ 0s with 1s, find the length of the longest contiguous subarray having all 1s.
def find_length_of_array_having_ones_with_k_replacements(arr, k):
max_l = 0
left = 0
ones_counter = 0
zeros = 0
for i, n in enumerate(arr):
if n == 1:
ones_counter+=1
else:
zeros += 1
while i - left - ones_counter > k:
if arr[left] == 1:
ones_counter -= 1
left += 1
max_l = max(max_l, i - left + 1)
return max_l
def permutation_in_a_string(s, perm):
p_count = Counter(perm)
s_count = Counter()
for i, c in enumerate(s):
s_count[c] += 1
if i >= len(perm)-1:
s_count[i-len(perm)] -=1
if s_count[i-len(perm)] == 0:
del s_count[i-len(perm)]
if s_count == p_count:
return True
import math
def min_window_substring(s, t):
t_char_count = Counter(t)
keys_to_cover = len(t)
left = 0
min_length = math.inf
start, end = -1, -1
for i, c in enumerate(s):
if c in t_char_count:
t_char_count[c] -= 1
keys_to_cover -= 1
if t_char_count[c] == 0:
del t_char_count[c]
while keys_to_cover == 0:
if i -left +1 < min_length:
min_length = min(min_length, i - left + 1)
start = left
end = i
if s[left] in t_char_count:
t_char_count[s[left]] += 1
keys_to_cover += 1
left += 1
return s[start:end]
def check_if_word_concatenation_of_substrings(s, words):
words_count = Counter(words)
words_to_cover = len(words)
unit_size = len(words[0])
res = []
for i in range(0, len(s) - words_to_cover * unit_size +1):
substr = s[i:i+unit_size]
print("start checking at index ", i, substr)
if substr in words_count:
j = i
mapper = Counter(words)
words_to_cover = len(words)
print("before while loop: ")
while True:
print(s[j:j+unit_size])
print(mapper)
if s[j:j+unit_size] in mapper:
mapper[s[j:j+unit_size]] -= 1
words_to_cover -= 1
if mapper[s[j:j+unit_size]] == 0:
del mapper[s[j:j+unit_size]]
if words_to_cover == 0:
res.append(i)
else:
break
print("after while loop: ", mapper, "\n****")
j += unit_size
return res
if __name__ == '__main__':
print(check_if_word_concatenation_of_substrings("wordgoodgoodgoodbestword", ["word","good","best","good"]))
print(check_if_word_concatenation_of_substrings("bagfoxcat", ["cat", "fox"]))
print(check_if_word_concatenation_of_substrings("barfoothefoobarman", ["foo", "the"]))
print(check_if_word_concatenation_of_substrings("barfoofoobarthefoobarman", ["bar","foo","the"])) | kashyapa/coding-problems | april19th/sliding-window/sliding_window.py | sliding_window.py | py | 5,992 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "collections.Counter",
"line_number": 30,
"usage_type": "call"
},
{
"api_name": "collections.Counter",
"line_number": 108,
"usage_type": "call"
},
{
"api_name": "collections.Counter",
"line_number": 109,
"usage_type": "call"
},
{
"api_name": "collect... |
8438985423 | from django.shortcuts import render, get_object_or_404
from django.views import View
from proyectofinal.models import Jedi
from proyectofinal.forms import Buscar, JediForm
from django.urls import reverse_lazy
from django.views.generic import DetailView, ListView, CreateView, DeleteView, UpdateView
#Create your views here.
def pasar_path(request, id):
return id
def home(request):
return render(request, "proyectofinal/home.html")
def mostrarjedis(request):
lista_jedis = Jedi.objects.all()
return render(request, 'proyectofinal/jedis.html', {'lista_jedis': lista_jedis})
class ListaJedis(ListView):
model = Jedi
class DetalleJedi(DetailView):
model = Jedi
class NuevoJedi(CreateView):
model = Jedi
success_url = reverse_lazy("jedis-panel")
fields = ['nombre','numero_jedi', 'titulo', 'color_sable']
class BorrarJedi(DeleteView):
model = Jedi
success_url = reverse_lazy("jedis-panel")
class JediActualizar(UpdateView):
template_name = 'proyectofinal/jedi_update.html'
model = Jedi
success_url = reverse_lazy("jedis-panel")
fields = ['nombre','numero_jedi', 'titulo', 'color_sable']
class BuscarJedi(View):
form_class = Buscar
template_name = 'proyectofinal/buscar.html'
initial = {"nombre":""}
def get(self, request):
form = self.form_class(initial=self.initial)
return render(request, self.template_name, {'form':form})
def post(self, request):
form = self.form_class(request.POST)
if form.is_valid():
nombre = form.cleaned_data.get("nombre")
lista_jedis = Jedi.objects.filter(nombre__icontains=nombre).all()
form = self.form_class(initial=self.initial)
return render(request, self.template_name, {'form':form,
'lista_jedis':lista_jedis})
return render(request, self.template_name, {"form": form})
""" class AltaJedi(View):
form_class = JediForm
template_name = 'proyectofinal/alta_jedi.html'
initial = {'nombre':'','numero_jedi':'', 'titulo':'', 'color_sable':''}
def get(self, request):
form = self.form_class(initial=self.initial)
return render(request, self.template_name, {'form':form})
def post(self, request):
form = self.form_class(request.POST)
if form.is_valid():
form.save()
msg_exito = f"Se cargó con éxito al nuevo integrante del Sindicato Jedi, {form.cleaned_data.get('nombre')}"
form = self.form_class(initial=self.initial)
return render(request, self.template_name, {'form':form,
'msg_exito':msg_exito})
return render(request, self.template_name, {"form": form}) """
"""class ActualizarJedi(View):
form_class = JediForm
template_name = 'proyectofinal/actualizar_jedi.html'
initial = {'nombre':'','numero_jedi':'', 'titulo':'', 'color_sable':''}
# prestar atención ahora el method get recibe un parametro pk == primaryKey == identificador único
def get(self, request, pk):
jedi = get_object_or_404(Jedi, pk=pk)
form = self.form_class(instance=jedi)
return render(request, self.template_name, {'form':form,'jedi': jedi})
# prestar atención ahora el method post recibe un parametro pk == primaryKey == identificador único
def post(self, request, pk):
jedi = get_object_or_404(Jedi, pk=pk)
form = self.form_class(request.POST ,instance=jedi)
if form.is_valid():
form.save()
msg_exito = f"Se actualizó con éxito el integrante {form.cleaned_data.get('nombre')}"
form = self.form_class(initial=self.initial)
return render(request, self.template_name, {'form':form,
'jedi': jedi,
'msg_exito': msg_exito})
return render(request, self.template_name, {"form": form})"""
"""class BorrarJedi(View):
template_name = 'proyectofinal/jedis.html'
def get(self, request, pk):
jedi = get_object_or_404(Jedi, pk=pk)
jedi.delete()
lista_jedis = Jedi.objects.all()
return render(request, self.template_name, {'lista_jedis': lista_jedis})"""
| matiaslopez9411/proyecto-final | proyectofinal/views.py | views.py | py | 4,289 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "django.shortcuts.render",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "proyectofinal.models.Jedi.objects.all",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "proyectofinal.models.Jedi.objects",
"line_number": 16,
"usage_type": "attri... |
73819276905 | import sys
import argparse
from pathlib import Path
base_dir = Path(__file__).resolve().parents[1]
sys.path.append(str(base_dir))
from utils import txt2iob
from transformers import BertJapaneseTokenizer
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Train BERT')
parser.add_argument('--path', type=str, help='data path')
parser.add_argument('--output_path', type=str, help='data path')
parser.add_argument('--tag', default=None, help='valid tag list : C,M')
args = parser.parse_args()
tag = args.tag.split(",") if args.tag is not None else None
tokenizer = BertJapaneseTokenizer.from_pretrained("bert-base-japanese-char")
with open(args.path, 'r') as f:
lines = [line for line in f.read().split('\n') if line != '']
output = '\n\n'.join(['\n'.join(['\t'.join(t) for t in line]) for line in txt2iob.doc2iob(lines, format=tokenizer.tokenize, tag_list=tag, bert=True)])
with open(args.output_path, 'w') as f:
f.write(output)
| ujiuji1259/NER | BERT/iob_for_bert.py | iob_for_bert.py | py | 1,010 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "pathlib.Path",
"line_number": 4,
"usage_type": "call"
},
{
"api_name": "sys.path.append",
"line_number": 5,
"usage_type": "call"
},
{
"api_name": "sys.path",
"line_number": 5,
"usage_type": "attribute"
},
{
"api_name": "argparse.ArgumentParser",
... |
21014356290 | # -*- coding:utf-8 -*-
# This file is part of Pyoro (A Python fan game).
#
# Metawars is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Metawars is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Metawars. If not, see <https://www.gnu.org/licenses/>
"""
Provide useful functions on pygame.surface.Surface.
Created on 18/08/2018.
"""
import pygame
__author__ = "RedbeanGit"
__repo__ = "https://github.com/RedbeanGit/Pyoro"
def resize_image(image, new_size):
"""
Resize a pygame surface by stretching its pixels.
:type image: pygame.surface.Surface
:param image: The surface to resize.
:type new_size: (tuple)
:param new_size: A (w, h) tuple where w and h are both integers.
:rtype: pygame.surface.Surface
:returns: A new pygame surface resized from the given one.
"""
if len(new_size) != 2:
return image
new_size = (int(new_size[0]), int(new_size[1]))
return pygame.transform.scale(image, new_size)
def invert_image(image, vertical, horizontal):
"""
Flip a pygame surface vertically and / or horizontally.
:type image: pygame.surface.Surface
:param image: The surface to flip.
:type vertical: bool
:param vertical: If True, flip the surface vertically.
:type horizontal: bool
:param horizontal: If True, flip the surface horizontally.
:rtype: pygame.surface.Surface
:returns: A new pygame surface flipped from the given one.
"""
return pygame.transform.flip(image, vertical, horizontal)
def stretch_image(image, new_size, border_size):
"""
Try to stretch a pygame surface without deforming it. This technique is
inspired by Android 9-patch. Only the center and borders of the image
can stretch, leaving the corners and the thickness of the borders
intact.
:type image: pygame.surface.Surface
:param image: The surface to resize.
:type new_size: (tuple)
:param new_size: A (w, h) tuple where w and h are both integers.
:type border_size: int
:param border_size: The thickness of the borders (kept after the
operation).
:rtype: pygame.surface.Surface
:returns: A new pygame surface resized from the given one.
"""
if len(new_size) != 2:
return image
new_size = (int(new_size[0]), int(new_size[1]))
if border_size <= new_size[0] / 2 and border_size <= new_size[1] / 2:
border_size = int(border_size)
else:
border_size = min(new_size) // 2
if image.get_alpha is None:
back = pygame.Surface(new_size).convert()
else:
back = pygame.Surface(new_size).convert_alpha()
side_length = (
image.get_size()[0] - border_size * 2,
image.get_size()[1] - border_size * 2,
)
new_side_length = (new_size[0] - border_size * 2, new_size[1] - border_size * 2)
back.blit(image.subsurface((0, 0), (border_size, border_size)).copy(), (0, 0))
back.blit(
pygame.transform.scale(
image.subsurface((border_size, 0), (side_length[0], border_size)).copy(),
(new_side_length[0], border_size),
),
(border_size, 0),
)
back.blit(
image.subsurface(
(side_length[0] + border_size, 0), (border_size, border_size)
).copy(),
(new_side_length[0] + border_size, 0),
)
back.blit(
pygame.transform.scale(
image.subsurface((0, border_size), (border_size, side_length[1])).copy(),
(border_size, new_side_length[1]),
),
(0, border_size),
)
back.blit(
pygame.transform.scale(
image.subsurface(
(border_size, border_size), (side_length[0], side_length[1])
),
(new_side_length[0], new_side_length[1]),
),
(border_size, border_size),
)
back.blit(
pygame.transform.scale(
image.subsurface(
(side_length[0] + border_size, border_size),
(border_size, side_length[1]),
).copy(),
(border_size, new_side_length[1]),
),
(new_side_length[0] + border_size, border_size),
)
back.blit(
image.subsurface(
(0, side_length[1] + border_size), (border_size, border_size)
).copy(),
(0, new_side_length[1] + border_size),
)
back.blit(
pygame.transform.scale(
image.subsurface(
(border_size, side_length[1] + border_size),
(side_length[0], border_size),
).copy(),
(new_side_length[0], border_size),
),
(border_size, new_side_length[1] + border_size),
)
back.blit(
image.subsurface(
(side_length[0] + border_size, side_length[1] + border_size),
(border_size, border_size),
).copy(),
(new_side_length[0] + border_size, new_side_length[1] + border_size),
)
return back
| RedbeanGit/Pyoro | src/gui/image_transformer.py | image_transformer.py | py | 5,427 | python | en | code | 1 | github-code | 36 | [
{
"api_name": "pygame.transform.scale",
"line_number": 47,
"usage_type": "call"
},
{
"api_name": "pygame.transform",
"line_number": 47,
"usage_type": "attribute"
},
{
"api_name": "pygame.transform.flip",
"line_number": 67,
"usage_type": "call"
},
{
"api_name": "py... |
70563793064 | import os
import pickle
import numpy as np
# Modified from smplx code for FLAME
import torch
import torch.nn as nn
import torch.nn.functional as F
from pytorch3d.transforms import rotation_6d_to_matrix, matrix_to_rotation_6d
from skimage.io import imread
from loguru import logger
from flame.lbs import lbs
I = matrix_to_rotation_6d(torch.eye(3)[None].cuda())
def to_tensor(array, dtype=torch.float32):
if 'torch.tensor' not in str(type(array)):
return torch.tensor(array, dtype=dtype)
def to_np(array, dtype=np.float32):
if 'scipy.sparse' in str(type(array)):
array = array.todense()
return np.array(array, dtype=dtype)
class Struct(object):
def __init__(self, **kwargs):
for key, val in kwargs.items():
setattr(self, key, val)
def rot_mat_to_euler(rot_mats):
# Calculates rotation matrix to euler angles
# Careful for extreme cases of eular angles like [0.0, pi, 0.0]
sy = torch.sqrt(rot_mats[:, 0, 0] * rot_mats[:, 0, 0] +
rot_mats[:, 1, 0] * rot_mats[:, 1, 0])
return torch.atan2(-rot_mats[:, 2, 0], sy)
class FLAME(nn.Module):
"""
borrowed from https://github.com/soubhiksanyal/FLAME_PyTorch/blob/master/FLAME.py
Given FLAME parameters for shape, pose, and expression, this class generates a differentiable FLAME function
which outputs the a mesh and 2D/3D facial landmarks
"""
def __init__(self, config):
super(FLAME, self).__init__()
logger.info(f"[FLAME] Creating the 3DMM from {config.flame_geom_path}")
with open(config.flame_geom_path, 'rb') as f:
ss = pickle.load(f, encoding='latin1')
flame_model = Struct(**ss)
self.dtype = torch.float32
self.register_buffer('faces', to_tensor(to_np(flame_model.f, dtype=np.int64), dtype=torch.long))
# The vertices of the template model
self.register_buffer('v_template', to_tensor(to_np(flame_model.v_template), dtype=self.dtype))
# The shape components and expression
shapedirs = to_tensor(to_np(flame_model.shapedirs), dtype=self.dtype)
shapedirs = torch.cat([shapedirs[:, :, :config.num_shape_params], shapedirs[:, :, 300:300 + config.num_exp_params]], 2)
self.register_buffer('shapedirs', shapedirs)
# The pose components
num_pose_basis = flame_model.posedirs.shape[-1]
posedirs = np.reshape(flame_model.posedirs, [-1, num_pose_basis]).T
self.register_buffer('posedirs', to_tensor(to_np(posedirs), dtype=self.dtype))
#
self.register_buffer('J_regressor', to_tensor(to_np(flame_model.J_regressor), dtype=self.dtype))
parents = to_tensor(to_np(flame_model.kintree_table[0])).long();
parents[0] = -1
self.register_buffer('parents', parents)
self.register_buffer('lbs_weights', to_tensor(to_np(flame_model.weights), dtype=self.dtype))
self.register_buffer('l_eyelid', torch.from_numpy(np.load(f'{os.path.abspath(os.path.dirname(__file__))}/blendshapes/l_eyelid.npy')).to(self.dtype)[None])
self.register_buffer('r_eyelid', torch.from_numpy(np.load(f'{os.path.abspath(os.path.dirname(__file__))}/blendshapes/r_eyelid.npy')).to(self.dtype)[None])
# Register default parameters
self._register_default_params('neck_pose_params', 6)
self._register_default_params('jaw_pose_params', 6)
self._register_default_params('eye_pose_params', 12)
self._register_default_params('shape_params', config.num_shape_params)
self._register_default_params('expression_params', config.num_exp_params)
# Static and Dynamic Landmark embeddings for FLAME
mediapipe_lmk_embedding = np.load('flame/mediapipe/mediapipe_landmark_embedding.npz', allow_pickle=True, encoding='latin1')
lmk_embeddings = np.load(config.flame_lmk_path, allow_pickle=True, encoding='latin1')
lmk_embeddings = lmk_embeddings[()]
self.mediapipe_idx = mediapipe_lmk_embedding['landmark_indices'].astype(int)
self.register_buffer('mp_lmk_faces_idx', torch.from_numpy(mediapipe_lmk_embedding['lmk_face_idx'].astype(int)).to(torch.int64))
self.register_buffer('mp_lmk_bary_coords', torch.from_numpy(mediapipe_lmk_embedding['lmk_b_coords']).to(self.dtype).float())
self.register_buffer('lmk_faces_idx', torch.from_numpy(lmk_embeddings['static_lmk_faces_idx'].astype(int)).to(torch.int64))
self.register_buffer('lmk_bary_coords', torch.from_numpy(lmk_embeddings['static_lmk_bary_coords']).to(self.dtype).float())
self.register_buffer('dynamic_lmk_faces_idx', torch.from_numpy(np.array(lmk_embeddings['dynamic_lmk_faces_idx']).astype(int)).to(torch.int64))
self.register_buffer('dynamic_lmk_bary_coords', torch.from_numpy(np.array(lmk_embeddings['dynamic_lmk_bary_coords'])).to(self.dtype).float())
neck_kin_chain = []
NECK_IDX = 1
curr_idx = torch.tensor(NECK_IDX, dtype=torch.long)
while curr_idx != -1:
neck_kin_chain.append(curr_idx)
curr_idx = self.parents[curr_idx]
self.register_buffer('neck_kin_chain', torch.stack(neck_kin_chain))
def _find_dynamic_lmk_idx_and_bcoords(self, vertices, pose, dynamic_lmk_faces_idx,
dynamic_lmk_b_coords,
neck_kin_chain, cameras, dtype=torch.float32):
"""
Selects the face contour depending on the reletive position of the head
Input:
vertices: N X num_of_vertices X 3
pose: N X full pose
dynamic_lmk_faces_idx: The list of contour face indexes
dynamic_lmk_b_coords: The list of contour barycentric weights
neck_kin_chain: The tree to consider for the relative rotation
dtype: Data type
return:
The contour face indexes and the corresponding barycentric weights
"""
batch_size = vertices.shape[0]
aa_pose = torch.index_select(pose.view(batch_size, -1, 6), 1, neck_kin_chain)
rot_mats = rotation_6d_to_matrix(aa_pose.view(-1, 6)).view([batch_size, -1, 3, 3])
rel_rot_mat = torch.eye(3, device=vertices.device, dtype=dtype).unsqueeze_(dim=0).expand(batch_size, -1, -1)
for idx in range(len(neck_kin_chain)):
rel_rot_mat = torch.bmm(rot_mats[:, idx], rel_rot_mat)
rel_rot_mat = cameras @ rel_rot_mat # Cameras flips z and x, plus multiview needs different lmk sliding per view
y_rot_angle = torch.round(torch.clamp(-rot_mat_to_euler(rel_rot_mat) * 180.0 / np.pi, max=39)).to(dtype=torch.long)
neg_mask = y_rot_angle.lt(0).to(dtype=torch.long)
mask = y_rot_angle.lt(-39).to(dtype=torch.long)
neg_vals = mask * 78 + (1 - mask) * (39 - y_rot_angle)
y_rot_angle = (neg_mask * neg_vals + (1 - neg_mask) * y_rot_angle)
dyn_lmk_faces_idx = torch.index_select(dynamic_lmk_faces_idx, 0, y_rot_angle)
dyn_lmk_b_coords = torch.index_select(dynamic_lmk_b_coords, 0, y_rot_angle)
return dyn_lmk_faces_idx, dyn_lmk_b_coords
def _vertices2landmarks(self, vertices, faces, lmk_faces_idx, lmk_bary_coords):
"""
Calculates landmarks by barycentric interpolation
Input:
vertices: torch.tensor NxVx3, dtype = torch.float32
The tensor of input vertices
faces: torch.tensor (N*F)x3, dtype = torch.long
The faces of the mesh
lmk_faces_idx: torch.tensor N X L, dtype = torch.long
The tensor with the indices of the faces used to calculate the
landmarks.
lmk_bary_coords: torch.tensor N X L X 3, dtype = torch.float32
The tensor of barycentric coordinates that are used to interpolate
the landmarks
Returns:
landmarks: torch.tensor NxLx3, dtype = torch.float32
The coordinates of the landmarks for each mesh in the batch
"""
# Extract the indices of the vertices for each face
# NxLx3
batch_size, num_verts = vertices.shape[:2]
device = vertices.device
lmk_faces = torch.index_select(faces, 0, lmk_faces_idx.view(-1).to(torch.long)).view(batch_size, -1, 3)
lmk_faces += torch.arange(batch_size, dtype=torch.long, device=device).view(-1, 1, 1) * num_verts
lmk_vertices = vertices.view(-1, 3)[lmk_faces].view(batch_size, -1, 3, 3)
landmarks = torch.einsum('blfi,blf->bli', [lmk_vertices, lmk_bary_coords])
return landmarks
def forward(self, shape_params, cameras, trans_params=None, rot_params=None, neck_pose_params=None, jaw_pose_params=None, eye_pose_params=None, expression_params=None, eyelid_params=None):
"""
Input:
trans_params: N X 3 global translation
rot_params: N X 3 global rotation around the root joint of the kinematic tree (rotation is NOT around the origin!)
neck_pose_params (optional): N X 3 rotation of the head vertices around the neck joint
jaw_pose_params (optional): N X 3 rotation of the jaw
eye_pose_params (optional): N X 6 rotations of left (parameters [0:3]) and right eyeball (parameters [3:6])
shape_params (optional): N X number of shape parameters
expression_params (optional): N X number of expression parameters
return:d
vertices: N X V X 3
landmarks: N X number of landmarks X 3
"""
batch_size = shape_params.shape[0]
I = matrix_to_rotation_6d(torch.cat([torch.eye(3)[None]] * batch_size, dim=0).cuda())
if trans_params is None:
trans_params = torch.zeros(batch_size, 3).cuda()
if rot_params is None:
rot_params = I.clone()
if neck_pose_params is None:
neck_pose_params = I.clone()
if jaw_pose_params is None:
jaw_pose_params = I.clone()
if eye_pose_params is None:
eye_pose_params = torch.cat([I.clone()] * 2, dim=1)
if shape_params is None:
shape_params = self.shape_params.expand(batch_size, -1)
if expression_params is None:
expression_params = self.expression_params.expand(batch_size, -1)
# Concatenate identity shape and expression parameters
betas = torch.cat([shape_params, expression_params], dim=1)
# The pose vector contains global rotation, and neck, jaw, and eyeball rotations
full_pose = torch.cat([rot_params, neck_pose_params, jaw_pose_params, eye_pose_params], dim=1)
# FLAME models shape and expression deformations as vertex offset from the mean face in 'zero pose', called v_template
template_vertices = self.v_template.unsqueeze(0).expand(batch_size, -1, -1)
# Use linear blendskinning to model pose roations
vertices, _ = lbs(betas, full_pose, template_vertices,
self.shapedirs, self.posedirs,
self.J_regressor, self.parents,
self.lbs_weights, dtype=self.dtype)
if eyelid_params is not None:
vertices = vertices + self.r_eyelid.expand(batch_size, -1, -1) * eyelid_params[:, 1:2, None]
vertices = vertices + self.l_eyelid.expand(batch_size, -1, -1) * eyelid_params[:, 0:1, None]
lmk_faces_idx = self.lmk_faces_idx.unsqueeze(dim=0).expand(batch_size, -1).contiguous()
lmk_bary_coords = self.lmk_bary_coords.unsqueeze(dim=0).expand(batch_size, -1, -1).contiguous()
dyn_lmk_faces_idx, dyn_lmk_bary_coords = self._find_dynamic_lmk_idx_and_bcoords(
vertices, full_pose, self.dynamic_lmk_faces_idx,
self.dynamic_lmk_bary_coords,
self.neck_kin_chain, cameras, dtype=self.dtype)
lmk_faces_idx = torch.cat([dyn_lmk_faces_idx, lmk_faces_idx], 1)
lmk_bary_coords = torch.cat([dyn_lmk_bary_coords, lmk_bary_coords], 1)
lmk68 = self._vertices2landmarks(vertices, self.faces, lmk_faces_idx, lmk_bary_coords)
mp_lmk_faces_idx = self.mp_lmk_faces_idx.unsqueeze(dim=0).expand(batch_size, -1).contiguous()
mp_lmk_bary_coords = self.mp_lmk_bary_coords.unsqueeze(dim=0).expand(batch_size, -1, -1).contiguous()
mp = self._vertices2landmarks(vertices, self.faces, mp_lmk_faces_idx, mp_lmk_bary_coords)
vertices = vertices + trans_params.unsqueeze(dim=1)
lmk68 = lmk68 + trans_params.unsqueeze(dim=1)
mp = mp + trans_params.unsqueeze(dim=1)
return vertices, lmk68, mp
def _register_default_params(self, param_fname, dim):
default_params = torch.zeros([1, dim], dtype=self.dtype, requires_grad=False)
self.register_parameter(param_fname, nn.Parameter(default_params, requires_grad=False))
class FLAMETex(nn.Module):
def __init__(self, config):
super(FLAMETex, self).__init__()
tex_space = np.load(config.tex_space_path)
# FLAME texture
if 'tex_dir' in tex_space.files:
mu_key = 'mean'
pc_key = 'tex_dir'
n_pc = 200
scale = 1
# BFM to FLAME texture
else:
mu_key = 'MU'
pc_key = 'PC'
n_pc = 199
scale = 255.0
texture_mean = tex_space[mu_key].reshape(1, -1)
texture_basis = tex_space[pc_key].reshape(-1, n_pc)
n_tex = config.tex_params
texture_mean = torch.from_numpy(texture_mean).float()[None, ...] * scale
texture_basis = torch.from_numpy(texture_basis[:, :n_tex]).float()[None, ...] * scale
self.texture = None
self.register_buffer('texture_mean', texture_mean)
self.register_buffer('texture_basis', texture_basis)
self.image_size = config.image_size
self.check_texture(config)
def check_texture(self, config):
path = os.path.join(config.actor, 'texture.png')
if os.path.exists(path):
self.texture = torch.from_numpy(imread(path)).permute(2, 0, 1).cuda()[None, 0:3, :, :] / 255.0
def forward(self, texcode):
if self.texture is not None:
return F.interpolate(self.texture, self.image_size, mode='bilinear')
texture = self.texture_mean + (self.texture_basis * texcode[:, None, :]).sum(-1)
texture = texture.reshape(texcode.shape[0], 512, 512, 3).permute(0, 3, 1, 2)
texture = F.interpolate(texture, self.image_size, mode='bilinear')
texture = texture[:, [2, 1, 0], :, :]
return texture / 255.
| Zielon/metrical-tracker | flame/FLAME.py | FLAME.py | py | 14,729 | python | en | code | 188 | github-code | 36 | [
{
"api_name": "pytorch3d.transforms.matrix_to_rotation_6d",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "torch.eye",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "torch.float32",
"line_number": 18,
"usage_type": "attribute"
},
{
"api_name"... |
32920662032 | # -*- coding: utf-8 -*-
import copy
from typing import List
from flowlauncher import FlowLauncher
from plugin.templates import *
from plugin.devtoys import *
class Main(FlowLauncher):
messages_queue = []
def sendNormalMess(self, title: str, subtitle: str):
message = copy.deepcopy(RESULT_TEMPLATE)
message["Title"] = title
message["SubTitle"] = subtitle
self.messages_queue.append(message)
def sendActionMess(self, title: str, subtitle: str, icopath: str, method: str, value: List):
# information
message = copy.deepcopy(RESULT_TEMPLATE)
message["Title"] = title
message["SubTitle"] = subtitle
if icopath != "":
message["IcoPath"] = icopath
# action
action = copy.deepcopy(ACTION_TEMPLATE)
action["JsonRPCAction"]["method"] = method
action["JsonRPCAction"]["parameters"] = value
message.update(action)
self.messages_queue.append(message)
def query(self, param: str) -> List[dict]:
q = param.strip().lower()
for tool in DEVTOYS_TOOLS:
key = tool["tool"]
name = tool["name"]
icon = tool["icon"] if "icon" in tool else ""
if q in key.lower() or q in name.lower():
self.sendActionMess(name, key, icon, "startDevtoysTool", [key])
return self.messages_queue
def startDevtoysTool(self, tool):
startTool(tool)
| umi-uyura/Flow.Launcher.Plugin.DevToysLauncher | plugin/ui.py | ui.py | py | 1,462 | python | en | code | 5 | github-code | 36 | [
{
"api_name": "flowlauncher.FlowLauncher",
"line_number": 12,
"usage_type": "name"
},
{
"api_name": "copy.deepcopy",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "typing.List",
"line_number": 22,
"usage_type": "name"
},
{
"api_name": "copy.deepcopy",
... |
30993271589 | from django.urls import path
from myproject.apps.board import views
urlpatterns = [
# path('boards/', views.boards, name='all_boards'),
path('boards/', views.BoardsView.as_view(), name='all_boards'),
# topic
path('board/<int:pk>/topics', views.topics, name='all_topics'),
path('board/<int:pk>/topics/new', views.new_topic, name='new_topic'),
# post
path('board/<int:pk>/topic/<int:topic_pk>/posts', views.posts, name='all_posts'),
path('board/<int:pk>/topic/<int:topic_pk>/posts/new', views.new_post, name='new_post'),
# path('board/<int:pk>/topic/<int:topic_pk>/posts/edit',views.PostUpdateView.as_view())
# url(r'^board/(?P<pk>\d+)/topics/(?P<topic_pk>\d+)/posts/(?P<post_pk>\d+)/edit/$',
# boards_views.PostUpdateView.as_view(), name='edit_post'),
]
| SunA0/django_learn | myproject/apps/board/urls.py | urls.py | py | 811 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "django.urls.path",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "myproject.apps.board.views.BoardsView.as_view",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "myproject.apps.board.views.BoardsView",
"line_number": 6,
"usage_type": "att... |
73412902183 | from flask import Flask, jsonify, redirect
import feedparser
app = Flask(__name__)
# Function grabs the rss feed headlines (titles) and returns them as a list
def getHeadlines( rss_url ):
headlines = []
feed = feedparser.parse( rss_url )
for newsitem in feed['items']:
headlines.append(newsitem['title'])
headlines.append(newsitem['link'])
return headlines
@app.route('/', methods=['GET'])
def home():
return '''<h1>Welcome to News Feeder API</h1>
<p>A prototype API for national and international news feed getter.</p>'''
@app.route('/resources/documentation', methods=['GET'])
def documentation():
return redirect('https://app.swaggerhub.com/apis/daffaadevvv/NewsFeederAPI/1.0.0', code = 303)
@app.route('/resources/news/internasional', methods=['GET'])
def indexinter():
# A list to hold all headlines
allinterheadlines = []
# List of RSS feeds that we will fetch and combine
newsinturls = {
'rtnews': 'https://www.rt.com/rss/',
'googlenews': 'https://news.google.com/news/rss/?hl=en&ned=us&gl=US'
}
# Iterate over the feed urls
for key,url in newsinturls.items():
# Call getHeadlines() and combine the returned headlines with allheadlines
allinterheadlines.extend( getHeadlines( url ) )
print(allinterheadlines)
return jsonify(allinterheadlines)
@app.route('/resources/news/dalamnegeri', methods=['GET'])
def indexnat():
# A list to hold all headlines
allnatheadlines = []
# List of RSS feeds that we will fetch and combine
newsnaturls = {
'republikanews': 'https://www.republika.co.id/rss',
'detiknews': 'http://rss.detik.com/index.php/detikcom'
}
# Iterate over the feed urls
for key,url in newsnaturls.items():
# Call getHeadlines() and combine the returned headlines with allheadlines
allnatheadlines.extend( getHeadlines( url ) )
print(allnatheadlines)
return jsonify(allnatheadlines)
if __name__ == '__main__':
app.run(debug = True) | daffaadevvv/StudyGit | newsfeederapi.py | newsfeederapi.py | py | 2,102 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "flask.Flask",
"line_number": 4,
"usage_type": "call"
},
{
"api_name": "feedparser.parse",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "flask.redirect",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "flask.jsonify",
"line_... |
28295137025 | from mmsystem import Goldbeter_1995
from ssystem import SSystem
from sigmoidal import Sigmoidal
import matplotlib.pyplot as plt
import numpy as np
mm_model = Goldbeter_1995()
steps = 50
delta = 0.01
#states, velocities = mm_model.run(state=initial_state, velocity=initial_velocity, delta=0.1, steps=3)
#for i in range(states.shape[1]):
# plt.plot(states[:,i], label="MM X {}".format(i+1))
trainer = SSystem(n_vars=4)
trainer.g = np.array([[0, 0, -0.8, 0], [0.5, 0, 0, 0], [0, 0.75, 0, 0], [0.5, 0, 0, 0]])
trainer.h = np.array([[0.5, 0, 0, 0], [0, 0.75, 0, 0], [0, 0, 0.5, 0.2], [0, 0, 0, 0.8]])
trainer.alpha = np.array([12., 8., 3., 2.])
trainer.beta = np.array([10., 3., 5., 6.])
all_states = []
all_velocities = []
while len(all_states) < 1:
initial_state = np.random.random(4)
initial_velocity = np.random.random(4)
states, velocities = trainer.run(state=initial_state, velocity=initial_velocity, delta=delta, steps=steps)
if not np.any(np.isnan(states)) and not np.any(np.isnan(velocities)):
all_states.append(states)
all_velocities.append(velocities)
all_states = np.vstack(all_states)
all_velocities = np.vstack(all_velocities)
for i in range(states.shape[1]):
plt.plot(states[:,i], label="Trainer X {}".format(i+1))
#ssystem = SSystem(n_vars=4)
#ssystem.solve(all_states, all_velocities, iterations=1)
#states, velocities = ssystem.run(state=initial_state, velocity=initial_velocity, delta=delta, steps=steps)
#for i in range(states.shape[1]):
# plt.plot(states[:,i], label="S-Sys X {}".format(i+1))
nnsystem = Sigmoidal(n_vars=4)
nnsystem.solve(all_states, all_velocities)
states, velocities = nnsystem.run(state=initial_state, velocity=initial_velocity, delta=delta, steps=steps)
for i in range(states.shape[1]):
plt.plot(states[:,i], label="S-Sys X {}".format(i+1))
plt.legend()
plt.show()
| warut-vijit/modelsel | main.py | main.py | py | 1,856 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "mmsystem.Goldbeter_1995",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "ssystem.SSystem",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "numpy.array",
... |
18795434468 | import networkx as nx
import matplotlib.pyplot as plt
import plotly.express as px
import webbrowser
import folium
from graph import *
from node import *
def isNodeValid(nodeName, graph):
# Check if node is on the graph
for n in graph.nodeList:
if nodeName == n.name:
return True
return False
def findNodeByName(nodeName, graph):
# Return node by name
for n in graph.nodeList:
if nodeName == n.name:
return n
def aStar(startName, goalName, graph):
#A* search algorithm
start = findNodeByName(startName, graph)
start.path = [start]
goal = findNodeByName(goalName, graph)
queue = []
queue.append(start)
while len(queue) > 0:
# Pop the first element
current = queue.pop(0)
# Check if current node is goal
if current == goal:
return current
listNewNode = []
for neighbor in current.neighbors:
# Create new node with new path
hn = neighbor.calculateHaversine(goal)
gn = current.gn + current.calculateHaversine(neighbor)
fn = hn + gn
# Update the attributes of the new node
newNode = Node(current.name + " -> " + neighbor.name, neighbor.x, neighbor.y)
newNode.path = current.path + [neighbor]
newNode.setValue(gn, hn, fn)
# Remove the visited node from the new node neighbors
newNode.neighbors = neighbor.removeNeighbor(newNode.path)
# Append the new node to the list for sorting
listNewNode.append(newNode)
# Check if the goal node is found
if hn == 0:
return newNode
# add the new node list to the queue and sort it based on fn
queue = listNewNode + queue
queue.sort(key=lambda x: x.fn)
def displayGraph(graph, result = Node()):
# Display graph
g = nx.Graph()
for node in graph.nodeList:
g.add_node(node.name)
for neighbor in node.neighbors:
if neighbor in result.path and node in result.path:
g.add_edge(node.name, neighbor.name, color='r', weight= round(node.calculateHaversine(neighbor), 2))
else:
g.add_edge(node.name, neighbor.name, color='black', weight= round(node.calculateHaversine(neighbor), 2))
pos = nx.spring_layout(g)
edges,colors = zip(*nx.get_edge_attributes(g, 'color').items())
nx.draw(g, pos, edgelist=edges, edge_color=colors, with_labels = True, font_weight = 'bold')
edge_weight = nx.get_edge_attributes(g, 'weight')
nx.draw_networkx_edge_labels(g, pos, edge_labels = edge_weight)
plt.show()
def displayMap(graph, start, goal, result, name):
# Display map
startNode = graph.findNodeByName(start)
goalNode = graph.findNodeByName(goal)
m = folium.Map(location=[startNode.x, startNode.y], zoom_start=50)
for node in graph.nodeList:
if node.name == start:
folium.Marker([node.x, node.y], popup=node.name, icon=folium.Icon(color="red")).add_to(m)
elif node.name == goal:
folium.Marker([node.x, node.y], popup=node.name, icon=folium.Icon(color="green")).add_to(m)
else:
folium.Marker([node.x, node.y], popup=node.name).add_to(m)
for neighbor in node.neighbors:
distance = node.calculateHaversine(neighbor)
if neighbor in result.path and node in result.path:
folium.PolyLine(locations=[[node.x, node.y], [neighbor.x, neighbor.y]], color="red", weight=2.5, opacity=1, popup= str(distance)).add_to(m)
else:
folium.PolyLine(locations=[[node.x, node.y], [neighbor.x, neighbor.y]], color="blue", weight=2.5, opacity=1, popup= str(distance)).add_to(m)
name += ".html"
m.save(name)
webbrowser.open_new_tab(name)
| febryanarota/Tucil-3-IF2122 | src/aStar.py | aStar.py | py | 3,930 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "graph.nodeList",
"line_number": 11,
"usage_type": "attribute"
},
{
"api_name": "graph.nodeList",
"line_number": 18,
"usage_type": "attribute"
},
{
"api_name": "networkx.Graph",
"line_number": 67,
"usage_type": "call"
},
{
"api_name": "graph.nodeList... |
8309674493 | from bson import ObjectId
# noinspection PyProtectedMember
from motor.motor_asyncio import AsyncIOMotorCollection
from task_tracker_backend.dataclasses import UserData
from task_tracker_backend.task_factory import TaskFactory
class User:
def __init__(
self, users_collection: AsyncIOMotorCollection, tasks_collection: AsyncIOMotorCollection, _id: ObjectId
) -> None:
self.__id = _id
self.__users_collection = users_collection
self.__tasks_collection = tasks_collection
@property
def id(self) -> ObjectId:
return self.__id
@property
def task_factory(self) -> TaskFactory:
return TaskFactory(self.__tasks_collection, self.__id)
@property
async def data(self) -> UserData:
return UserData.parse_obj(await self.__users_collection.find_one({'_id': self.__id}, {'_id': 0, 'password': 0}))
async def update_data(self, data: UserData) -> None:
await self.__users_collection.update_one({'_id': self.__id}, {'$set': data.dict()})
| smthngslv/task-tracker-backend | task_tracker_backend/user.py | user.py | py | 1,031 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "motor.motor_asyncio.AsyncIOMotorCollection",
"line_number": 11,
"usage_type": "name"
},
{
"api_name": "bson.ObjectId",
"line_number": 11,
"usage_type": "name"
},
{
"api_name": "bson.ObjectId",
"line_number": 18,
"usage_type": "name"
},
{
"api_name":... |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.