id stringlengths 2 8 | text stringlengths 16 264k | dataset_id stringclasses 1 value |
|---|---|---|
1887619 | import json
from django.http import HttpResponse
from django.conf import settings
def render_json(data, code=200):
"""
自定义render函数, code默认值为200, 返回正常
"""
result = {
'data': data,
'code': code,
}
# 开发为了显示清晰使用
if settings.DEBUG:
json_str = json.dumps(
result,
indent=4, # 缩进
# sepatarors=[',', ':'], # 分隔符
ensure_ascii=False, # 编码方式
sort_keys=True, # 排序
)
else:
# 实际项目中使用
json_str = json.dumps(result,
# sepatarors=[',', ':'],
ensure_ascii=False)
return HttpResponse(json_str)
| StarcoderdataPython |
3575729 | <gh_stars>1-10
# Copyright 2019 <NAME>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
from .hydrabus import Hydrabus
class Utils:
"""
Utilities available in hydrafw
:param port: The port name
:type port: str
"""
def __init__(self, port=""):
self._hydrabus = Hydrabus(port)
self._logger = logging.getLogger(__name__)
self._hydrabus.flush_input()
@property
def adc(self):
"""
Read ADC value
:return: ADC value (10 bits)
:rtype: int
"""
self._hydrabus.write(b"\x14")
v = self._hydrabus.read(2)
return int.from_bytes(v, byteorder="big")
def continuous_adc(self):
"""
Continuously print ADC value
"""
try:
self._hydrabus.write(b"\x15")
while 1:
v = self._hydrabus.read(2)
except KeyboardInterrupt:
self._hydrabus.write(b"\x00")
self._hydrabus.reset()
return True
def frequency(self):
"""
Read frequency value
:return: (frequency, duty cycle)
:rtype: tuple
"""
self._hydrabus.write(b"\x16")
freq = self._hydrabus.read(4)
duty = self._hydrabus.read(4)
return (
int.from_bytes(freq, byteorder="little"),
int.from_bytes(duty, byteorder="little"),
)
def close(self):
"""
Close the communication channel and resets Hydrabus
"""
self._hydrabus.exit_bbio()
self._hydrabus.close()
| StarcoderdataPython |
11218347 | from __future__ import absolute_import
from __future__ import unicode_literals
import time
from functools import wraps
from celery.task import periodic_task
from corehq.util.datadog import statsd, datadog_logger
from corehq.util.decorators import ContextDecorator
from corehq.util.soft_assert import soft_assert
from corehq.util.datadog.utils import bucket_value
from corehq.util.timer import TimingContext
def datadog_gauge_task(name, fn, run_every, enforce_prefix='commcare'):
"""
helper for easily registering datadog gauges to run periodically
To update a datadog gauge on a schedule based on the result of a function
just add to your app's tasks.py:
my_calculation = datadog_gauge_task('my.datadog.metric', my_calculation_function,
run_every=crontab(minute=0))
"""
_enforce_prefix(name, enforce_prefix)
datadog_gauge = _DatadogGauge(name, fn, run_every)
return datadog_gauge.periodic_task()
def datadog_histogram(name, value, enforce_prefix='commcare', tags=None):
"""
Usage: Used to track the statistical distribution of a set of values over a statsd flush period.
Actually submits as multiple metrics:
"""
_datadog_record(statsd.histogram, name, value, enforce_prefix, tags)
def datadog_gauge(name, value, enforce_prefix='commcare', tags=None):
"""
Stored as a GAUGE type in the datadog web application. Each value in the stored timeseries
is the last gauge value submitted for that metric during the statsd flush period.
"""
_datadog_record(statsd.gauge, name, value, enforce_prefix, tags)
def datadog_counter(name, value=1, enforce_prefix='commcare', tags=None):
"""
Usage: Used to increment a counter of events.
Stored as a RATE type in the datadog web application. Each value in the stored timeseries
is a time-normalized delta of the counter's value over that statsd flush period.
"""
_datadog_record(statsd.increment, name, value, enforce_prefix, tags)
def _datadog_record(fn, name, value, enforce_prefix='commcare', tags=None):
_enforce_prefix(name, enforce_prefix)
try:
fn(name, value, tags=tags)
except Exception:
datadog_logger.exception('Unable to record Datadog stats')
def datadog_bucket_timer(metric, tags, timing_buckets, callback=None):
"""
create a context manager that times and reports to datadog using timing buckets
adds a 'duration' tag specifying which predefined timing bucket the timing fell into,
see the `bucket_value` function for more info.
Example Usage:
timer = datadog_bucket_timer('commcare.some.special.metric', tags=[
'type:{}'.format(type),
], timing_buckets=(.001, .01, .1, 1, 10, 100))
with timer:
some_special_thing()
This will result it a datadog counter metric with a 'duration' tag, with the possible values
lt_0.001, lt_0.01, lt_0.1, lt_001, lt_010, lt_100, and over_100.
:param metric: Name of the datadog metric (must start with 'commcare.')
:param tags: Datadog tags to include
:param timing_buckets: sequence of numbers representing time thresholds, in seconds
:return: A context manager that will perform the specified timing
and send the specified metric
"""
timer = TimingContext()
original_stop = timer.stop
def new_stop(name=None):
original_stop(name)
if callback:
callback(timer.duration)
datadog_counter(
metric,
tags=list(tags) + ['duration:%s' % bucket_value(timer.duration, timing_buckets, 's')]
)
timer.stop = new_stop
return timer
class _DatadogGauge(object):
def __init__(self, name, fn, run_every):
self.name = name
self.fn = fn
self.run_every = run_every
def periodic_task(self):
@periodic_task(serializer='pickle', queue='background_queue', run_every=self.run_every,
acks_late=True, ignore_result=True)
@wraps(self.fn)
def inner(*args, **kwargs):
statsd.gauge(self.name, self.fn(*args, **kwargs))
return inner
def _enforce_prefix(name, prefix):
soft_assert(fail_if_debug=True).call(
not prefix or name.split('.')[0] == prefix,
"Did you mean to call your gauge 'commcare.{}'? "
"If you're sure you want to forgo the prefix, you can "
"pass enforce_prefix=None".format(name))
class datadog_track_errors(ContextDecorator):
"""Record when something succeeds or errors in datadog
Eg: This code will log to commcare.myfunction.succeeded when it completes
successfully, and to commcare.myfunction.failed when an exception is
raised.
@datadog_track_errors('myfunction')
def myfunction():
pass
"""
def __init__(self, name, duration_buckets=None):
self.succeeded_name = "commcare.{}.succeeded".format(name)
self.failed_name = "commcare.{}.failed".format(name)
self.duration_buckets = duration_buckets
self.timer_start = None
def __enter__(self):
if self.duration_buckets:
self.timer_start = time.time()
def __exit__(self, exc_type, exc_val, exc_tb):
if self.duration_buckets:
duration = time.time() - self.timer_start
duration_value = bucket_value(duration, self.duration_buckets, unit='s')
tags = ['duration:{}'.format(duration_value)]
else:
tags = None
if not exc_type:
datadog_counter(self.succeeded_name, tags=tags)
else:
datadog_counter(self.failed_name, tags=tags)
| StarcoderdataPython |
58976 | """
qoc - a directory for the main package
"""
from .core import (evolve_lindblad_discrete,
grape_lindblad_discrete,
evolve_schroedinger_discrete,
grape_schroedinger_discrete,)
__all__ = [
"evolve_lindblad_discrete",
"grape_lindblad_discrete",
"evolve_schroedinger_discrete",
"grape_schroedinger_discrete",
]
| StarcoderdataPython |
6699306 | <gh_stars>100-1000
import logging
import datetime
from directory_utilities import validate_or_make_directory
date = "{:%Y-%m-%d}".format(datetime.datetime.now())
log_file_string = "../logs/{}.log".format(date)
validate_or_make_directory(log_file_string)
logging.basicConfig(filename=log_file_string, level=logging.WARNING, format="%(asctime)s - %(levelname)s: %(message)s",
datefmt="%Y/%m/%d %I:%M:%S %p")
logger = logging
| StarcoderdataPython |
8121340 | <gh_stars>0
from socket import socket,AF_INET,SOCK_DGRAM
import sys, random, traceback
from datetime import datetime
from threading import Thread
from time import sleep
MY_PORT = int(sys.argv[1])
ALICE_IP = sys.argv[2]
ALICE_PORT = int(sys.argv[3])
ALICE_ADDR = (ALICE_IP, ALICE_PORT)
MODE = int(sys.argv[4])
s = socket(AF_INET,SOCK_DGRAM)
s.bind(('', MY_PORT))
def send(data, addr):
if addr != ALICE_ADDR:
s.sendto(data, ALICE_ADDR)
print(f'Forwarded {data} from {addr} to {ALICE_ADDR}')
else:
s.sendto(data, BOB_ADDR)
print(f'Forwarded {data} from {addr} to {BOB_ADDR}')
def delayed(data, addr):
delay = random.randrange(5000) / 1000
print(f'Delaying! for {delay} seconds...')
sleep(delay)
send(data, addr)
x = datetime.now()
random.seed(int(x.strftime("%f")))
DROP_RATE = 100
DELAY_RATE = 100
if MODE == 1:
print('Playing nice')
if (MODE == 2) or (MODE == 4):
DROP_RATE = random.randrange(99)
print(f'Dropping {100-DROP_RATE}%')
if (MODE == 3) or (MODE == 4):
DELAY_RATE = random.randrange(100)
print(f'Delaying {100-DELAY_RATE}%')
data, BOB_ADDR = s.recvfrom(101)
addr = BOB_ADDR
while True:
try:
drop = random.randrange(100)
if (len(data) <= 100) and (drop < DROP_RATE):
print(f'Phiiiii, no drop.... {drop}')
delay = random.randrange(100)
if (delay > DELAY_RATE):
print(f'Good night.... {delay} {DELAY_RATE}')
t = Thread(target = delayed, args = (data, addr, ))
t.start()
else:
print(f'Yay, no sleep.... {delay} {DELAY_RATE}')
send(data, addr)
else:
if len(data) > 100:
print('Dropped! too big...')
else:
print('Dropped! randomly... ', str(drop))
except Exception:
traceback.print_exc()
finally:
data, addr = s.recvfrom(100)
| StarcoderdataPython |
6699642 | import os
import igem_wikisync as sync
sync.run(
team = os.environ.get('WIKISYNC_TEAM'),
src_dir = os.environ.get('WIKISYNC_SOURCE'),
build_dir = os.environ.get('WIKISYNC_BUILD'),
poster_mode = os.environ.get('WIKISYNC_POSTER')
)
| StarcoderdataPython |
1902150 | <reponame>254Davidhashisoma/blog
import unittest
from app.models import Post, User, Comment
class TestPost(unittest.TestCase):
def setUp(self):
self.user_Collins = User(first_name = "David",
last_name = "Hashisoma",
username = "@Hashi",
password = "<PASSWORD>",
email = "<EMAIL>")
self.new_post = Post(post_title = "Test Title",
post_content = "This is a great move. I love blogging!",
user_id = self.user_Hashisoma.id)
self.new_comment = Comment(comment = "Great one!",
post_id = self.new_post.id,
user_id = self.user_George.id)
def test_instance(self):
self.assertTrue(isinstance(self.user_Hashisoma, User))
self.assertTrue(isinstance(self.new_post, Post))
self.assertTrue(isinstance(self.new_comment, Comment))
#test posting change qoutes | StarcoderdataPython |
1900237 | # -*- coding: utf-8 -*-
# Generated by Django 1.11.1 on 2017-12-12 00:18
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('registration', '0024_auto_20171210_1808'),
]
operations = [
migrations.AlterField(
model_name='workshops',
name='session',
field=models.IntegerField(choices=[(1, '1'), (2, '2'), (3, '3')], default=1),
),
]
| StarcoderdataPython |
4821954 | n, m = input().split()
arr = list(input().split())
A = set(input().split())
B = set(input().split())
def happiness(array, happy_set, unhappy_set):
h = 0
for num in array:
if happy_set.__contains__(num):
h += 1
elif unhappy_set.__contains__(num):
h -= 1
return h
print(happiness(arr, A, B))
| StarcoderdataPython |
5025015 | <filename>ex0020.py<gh_stars>0
'''import random
aluno1 = input("nome do primeiro aluno ")
aluno2 = input("nome do aluno dois ")
aluno3 = input("nome do terceiro aluno ")
aluno4 = input("nome do quarto aluno") '''
# salve rosendo que ta vendo os exerciocios, você não conseguiu entender a parte do [0,0,0,0]
import random
aluno1 = input("nome do primeiro aluno ")
aluno2 = input("nome do aluno dois ")
aluno3 = input("nome do terceiro aluno ")
aluno4 = input("nome do quarto aluno")
lista = [aluno4, aluno3, aluno2, aluno1]
ordem = random.choices(lista)
print("A ordem das apresentações será: {}".format(ordem))
| StarcoderdataPython |
1925047 | import h5py
import matplotlib.pyplot as plt
import numpy as np
import os
import os.path
import tensorflow as tf
from keras.backend import floatx
from keras.layers import Conv1D, Conv2D, Dense
from keras.layers.core import Flatten, Reshape
from keras.models import load_model, Sequential
from keras import optimizers
from scipy import io, signal
from sys import argv, exit
tf.logging.set_verbosity(tf.logging.WARN)
tf.logging.set_verbosity(tf.logging.INFO)
os.environ['TF_CPP_MIN_LOG_LEVEL']='2'
######################################################
def main():
# files
model_file = 'model_ball_rawA.h5'
sets_file = 'ball_data2_sets.h5'
if not os.path.isfile(model_file):
print "building model..."
path = os.getcwd()+'/'
with h5py.File(path+sets_file, 'r') as sets:
x_train = sets['train_da'][:]/32000
y_train = np.log(1+sets['train_depths'][:].reshape(-1, 192))
x_test = sets['test_da'][:]/32000
y_test = np.log(1+sets['test_depths'][:].reshape(-1, 192))
model = build_and_train_model(x_train, y_train, model_file)
else:
print "loading model..."
path = os.getcwd()+'/'
with h5py.File(path+sets_file, 'r') as sets:
x_test = sets['test_da'][:]/32000
y_test = np.log(1+sets['test_depths'][:].reshape(-1, 192))
model = load_model(model_file, custom_objects={'adjusted_mse':adjusted_mse})
loss = run_model(model, x_test, y_test)
######################################################
######################################################
def build_and_train_model(x_train, y_train, model_file):
net = Sequential()
net.add(Conv1D(32, (256),
strides=(26),
activation='relu',
input_shape=x_train.shape[1:]))
conv_output_size = net.layers[0].compute_output_shape(x_train.shape)[1]
net.add(Reshape((conv_output_size,32,1)))
net.add(Conv2D(128, (5,5), activation='relu'))
net.add(Conv2D(128, (5,5), strides=(1,1), activation='relu'))
net.add(Conv2D(32, (5,5), strides=(2,2), activation='relu'))
net.add(Flatten())
net.add(Dense(600, activation='relu'))
net.add(Dense(600, activation='relu'))
net.add(Dense(300, activation='relu'))
net.add(Dense(192, activation='linear'))
net.compile(optimizer='adam', loss=adjusted_mse)
print "finished compiling"
hist = net.fit(x_train, y_train, validation_split=0.0, epochs=1, batch_size=32)
with h5py.File(model_file[:-3]+'_loss_history.h5', 'w') as lh:
lh.create_dataset('losses', data=hist.history['loss'])
print "loss history saved as '"+model_file[:-3]+"_loss_history.h5'"
net.save(model_file)
print "model saved as '%s'" %model_file
return net
######################################################
def run_model(net, x_test, y_test):
predictions = net.predict(x_test)
loss = net.evaluate(x_test, y_test)
print "\nTEST LOSS:", loss
view_average_error(np.exp(y_test)-1,np.exp(predictions)-1)
for i in range(100, 2000, 110):
view_depth_maps(i, np.exp(y_test)-1, np.exp(predictions)-1)
#####################################################
def adjusted_mse(y_true, y_pred):
ok_entries = np.all(y_true)
ok_entries = tf.cast(ok_entries, bool)
safe_targets = tf.where(ok_entries, y_true, y_pred)
sqr = tf.square(y_pred - safe_targets)
valid = tf.cast(ok_entries, floatx())
num_ok = tf.reduce_sum(valid, axis=-1) # count OK entries
num_ok = tf.maximum(num_ok, tf.ones_like(num_ok)) # avoid divide by zero
return tf.reduce_sum(sqr, axis=-1) / num_ok
#####################################################
def view_average_error(ytrue, ypred):
error = np.reshape(ypred-ytrue, (-1,12,16))
avg_error = np.mean(error, axis=0)
stdev = np.std(avg_error)
avg_val = np.mean(avg_error)
rng = (avg_val-(3*stdev),avg_val+(3*stdev))
error_map = plt.imshow(avg_error, clim=rng, cmap="Greys", interpolation='none')
plt.title("Absolute Average Error")
plt.show()
#####################################################
def view_depth_maps(index, ytrue, ypred):
all_error = ypred-ytrue
avg_error = np.mean(all_error)
stdev = np.std(all_error)
rng = (avg_error-(3*stdev),avg_error+(3*stdev))
for i in range(0, ytrue.shape[0], 50):
for j in range(10):
index = i + j
true = np.reshape(ytrue[index], (12,16))
pred = np.reshape(ypred[index], (12,16))
error = pred - true
ax1 = plt.subplot(10,3,j*3 + 1)
true_map = plt.imshow(true, clim=(500, 2000), interpolation='none')
ax1.set_title("True Depth")
ax2 = plt.subplot(10,3,j*3 + 2)
pred_map = plt.imshow(pred, clim=(500, 2000), interpolation='none')
ax2.set_title("Predicted Depth")
ax3 = plt.subplot(10,3,j*3 + 3)
error_map = plt.imshow(error, clim=rng, cmap="Greys", interpolation='none')
ax3.set_title("Squared Error Map")
plt.show()
#####################################################
main()
| StarcoderdataPython |
1601949 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Thu Jul 22 11:24:01 2021
@author: ja17375
"""
import pygmt
import numpy as np
import pandas as pd
import xarray as xr
import netCDF4 as nc
def plot_forte_gmt():
tx2008 = np.loadtxt('/Users/ja17375/SWSTomo/ForteModels/Flow_Models/TX2008/forteV2_1deg_150km.txt')
shp = (181, 361)
dg = 15
lat = tx2008[:,1].reshape(shp)
lon = tx2008[:,2].reshape(shp)
Ur = tx2008[:,3].reshape(shp)
Utheta = tx2008[:,4].reshape(shp)*-1 # theta is colat so invert
Uphi = tx2008[:,5].reshape(shp)
hzdeg = ((lat % dg == 0) & (lon % dg == 0))
# Cast Ur (radial velocity) into xarry for pyGMT
U_grid = xr.DataArray(data=np.flipud(Ur),
coords=[('latitude', np.linspace(-90,90,181),
{'units': 'degrees_north'}),
('longitude', np.linspace(-180,180,361),
{'units': 'degrees_east'})],
)
fig = pygmt.Figure()
africa_med = [-25,80,-5,60]
easia = [60,150,10,70]
epac = [-170, -80, 10, 65]
proj = "M15c"
gproj = "Ks12c"
fig.basemap(region=africa_me, projection=proj, frame="afg",)
# Flow model TX2008
# pygmt.makecpt(cmap='roma', series=[-1.5, 1.5], reverse=True)
# fig.grdimage(grid=U_grid)
# fig.colorbar(frame=['a0.5', 'x+l"Vertical Velocity (cm/yr)"' ])
# S40RTS
fig.grdimage(grid='/Users/ja17375/DiscrePy/Data/S40RTS/S40RTS_2800km.grd',
cmap='/Users/ja17375/DiscrePy/Data/S40RTS/S40RTS.cpt')
fig.colorbar(frame=['a0.5', 'x+l"dVs (%)"' ], cmap='/Users/ja17375/DiscrePy/Data/S40RTS/S40RTS.cpt')
fig.coast(shorelines=True)
# flow_ang = np.rad2deg(np.arctan2(np.ravel(Utheta[hzdeg]), np.ravel(Uphi[hzdeg])))
# flow_len = np.sqrt(np.ravel(Utheta[hzdeg])**2 + np.ravel(Uphi[hzdeg])**2)
# flow_data = np.zeros((325, 4))
# flow_data[:,0] = lon[hzdeg]
# flow_data[:,1] = lat[hzdeg]
# flow_data[:,2] = flow_ang
# flow_data[:,3] = flow_len *0.5
# fig.plot(data=flow_data, style = 'v0.2c+e', color='black', pen='1p')
# flow_data[:,2] = flow_data[:,2] + 180
# fig.plot(data=flow_data, style = 'v0c', color='black', pen='1p')
fig.plot(x=130, y=20, direction = [[0], [1]], style = 'v0c', color='black', pen='1p')
data = pd.read_csv('~/DiscrePy/Sheba/Results/Combined/Filt_05Hz/Combined_goodQ.pairs', delim_whitespace=True)
for i, row in data.iterrows():
fig.plot(x=[row['SKS_PP_LON'], row['SKKS_PP_LON']],
y=[row['SKS_PP_LAT'], row['SKKS_PP_LAT']],
pen="1p,black")
if (row['Q_SKS'] >= 0.5):
#Plot split SKS - black circle
fig.plot(x=row['SKS_PP_LON'],
y=row['SKS_PP_LAT'],
style='c0.15c', color='black', pen='black')
vec = np.array([[row['SKS_PP_LON'], row['SKS_PP_LAT'], row['FAST_SKS'], row['TLAG_SKS']*0.5],
[row['SKS_PP_LON'], row['SKS_PP_LAT'], row['FAST_SKS']+180, row['TLAG_SKS']*0.5]])
fig.plot(data=vec, style = 'v0c', color='black', pen='0.75p')
elif (row['Q_SKS'] <= -0.5):
fig.plot(x=row['SKS_PP_LON'],
y=row['SKS_PP_LAT'],
style='c0.15c', color='white', pen='black')
else:
print('Bad Q for SKS')
if (row['Q_SKKS'] >= 0.5):
#Plot split SKKS - black circle
fig.plot(x=row['SKKS_PP_LON'],
y=row['SKKS_PP_LAT'],
style='d0.15c', color='black', pen='black')
vec = np.array([[row['SKKS_PP_LON'], row['SKKS_PP_LAT'], row['FAST_SKKS'], row['TLAG_SKKS']*0.5],
[row['SKKS_PP_LON'], row['SKKS_PP_LAT'], row['FAST_SKKS']+180, row['TLAG_SKKS']*0.5]])
fig.plot(data=vec, style = 'v0c', color='black', pen='0.75p')
elif (row['Q_SKKS'] <= -0.5):
fig.plot(x=row['SKKS_PP_LON'],
y=row['SKKS_PP_LAT'],
style='d0.15c', color='white', pen='black')
fig.savefig('/Users/ja17375/Documents/Thesis-enclosing/Thesis/chapters/chapter02/Figs/Africa_Med_SKS_SKKS_onS40RTS.eps',
crop=True, show=True)
# fig.show(method='external')
def plot_flament(dpath='/Users/ja17375/SWSTomo/FlamentModel',extent='epac'):
nc_vx = nc.Dataset(f'{dpath}/C3-vx-000Ma-2677km.grd')
nc_vy = nc.Dataset(f'{dpath}/C3-vy-000Ma-2677km.grd')
nc_vz = nc.Dataset(f'{dpath}/C3-vz-000Ma-2677km.grd')
vel_conv = 4.9e-4 # converts velocity to cm/year (from N. Flament - see model README.txt)
Utheta = nc_vx['z'][:] * vel_conv *-1 #theta is colat so invert
Uphi = nc_vy['z'][:] * vel_conv # longitudl velocity
Ur = nc_vz['z'][:] * vel_conv # radial velocity
lon, lat = np.meshgrid(nc_vx['lon'][:], nc_vx['lat'][:])
dg = 15
hzdeg = ((lat % dg == 0) & (lon % dg == 0))
U_grid = xr.DataArray(data=np.flipud(Ur),
coords=[('latitude', np.linspace(-90,90,181),
{'units': 'degrees_north'}),
('longitude', np.linspace(-180,180,361),
{'units': 'degrees_east'})],
)
fig = pygmt.Figure()
africa_med = [25,70,-5,50]
fig.basemap(region=africa_med, projection="Ks12c", frame="afg",)
fig.grdimage(grid=U_grid)
fig.coast(shorelines=True)
flow_ang = np.rad2deg(np.arctan2(np.ravel(Utheta[hzdeg]), np.ravel(Uphi[hzdeg])))
flow_len = np.sqrt(np.ravel(Utheta[hzdeg])**2 + np.ravel(Uphi[hzdeg])**2)
flow_data = np.zeros((325, 4))
flow_data[:,0] = lon[hzdeg]
flow_data[:,1] = lat[hzdeg]
flow_data[:,2] = flow_ang
flow_data[:,3] = flow_len *0.1
fig.plot(data=flow_data, style = 'v0.2c+e', color='black', pen='1p')
flow_data[:,2] = flow_data[:,2] + 180
fig.plot(data=flow_data, style = 'v0c', color='black', pen='1p')
data = pd.read_csv('~/DiscrePy/Sheba/Results/Combined/Filt_05Hz/Combined_goodQ.pairs', delim_whitespace=True)
for i, row in data.iterrows():
fig.plot(x=[row['SKS_PP_LON'], row['SKKS_PP_LON']],
y=[row['SKS_PP_LAT'], row['SKKS_PP_LAT']],
pen="1p,black")
if (row['Q_SKS'] >= 0.5):
#Plot split SKS - black circle
fig.plot(x=row['SKS_PP_LON'],
y=row['SKS_PP_LAT'],
style='c0.15c', color='black', pen='black')
vec = np.array([[row['SKS_PP_LON'], row['SKS_PP_LAT'], row['FAST_SKS'], row['TLAG_SKS']*0.25],
[row['SKS_PP_LON'], row['SKS_PP_LAT'], row['FAST_SKS']+180, row['TLAG_SKS']*0.25]])
fig.plot(data=vec, style = 'v0c', color='black', pen='0.75p')
elif (row['Q_SKS'] <= -0.5):
fig.plot(x=row['SKS_PP_LON'],
y=row['SKS_PP_LAT'],
style='c0.15c', color='white', pen='black')
else:
print('Bad Q for SKS')
if (row['Q_SKKS'] >= 0.5):
#Plot split SKKS - black circle
fig.plot(x=row['SKKS_PP_LON'],
y=row['SKKS_PP_LAT'],
style='d0.15c', color='black', pen='black')
vec = np.array([[row['SKKS_PP_LON'], row['SKKS_PP_LAT'], row['FAST_SKKS'], row['TLAG_SKKS']*0.25],
[row['SKKS_PP_LON'], row['SKKS_PP_LAT'], row['FAST_SKKS']+180, row['TLAG_SKKS']*0.25]])
fig.plot(data=vec, style = 'v0c', color='black', pen='0.75p')
elif (row['Q_SKKS'] <= -0.5):
fig.plot(x=row['SKKS_PP_LON'],
y=row['SKKS_PP_LAT'],
style='d0.15c', color='white', pen='black')
fig.show(method='external')
if __name__ == '__main__':
plot_forte_gmt() | StarcoderdataPython |
9711170 | import os.path
DATA_DIR = './data'
TRAIN_DIR = os.path.join(DATA_DIR, 'train_hq')
TRAIN_MASK_DIR = os.path.join(DATA_DIR, 'train_masks')
TEST_DIR = os.path.join(DATA_DIR, 'test_hq')
TRAIN_IMAGESET_PATH = os.path.join(DATA_DIR, 'train.csv')
VAL_IMAGESET_PATH = os.path.join(DATA_DIR, 'val.csv')
OUTPUT_DIR = './output'
PROBS_DIR_NAME = 'probs'
# Images are of size 1918 * 1280
img_size = (1280, 1918) # (height, width)
# 1918 = 2 * 7 * 137
# 1920 = 2^7 * 3 * 5
# 1280 = 2^8 * 5
| StarcoderdataPython |
5060895 | __author__ = 'Matt'
import math
import cmath
from UI.network.DataTransferProtocol import sendData
import UI.WheelComputation as WheelComputation
from MathHelpers import *
import numpy
class DriveControl:
def __init__(self, x, y, size, data, data_client):
self.x = x
self.y = y
self.size = size
self.path_area_size = self.size - self.size/self.SIDE_BOX_RATIO
self.half_size = self.path_area_size/2
self.robot_center_radius = self.path_area_size/self.ROBOT_DOT_RADIUS_RATIO
self.path_dot_radius = self.path_area_size/self.PATH_DOT_RADIUS_RATIO
# A Rectangle defining two corners of the path area on the UI
self.path_area = (x, y+self.path_area_size/self.SIDE_BOX_RATIO, x+size, y+size)
# Where the robot is centered on the UI
self.robot_center = (self.path_area[0]+self.half_size, self.path_area[1]+self.half_size)
self.throttle_center = (x+self.path_area_size, y + size/self.SIDE_BOX_RATIO + self.half_size)
self.current_control = self.NO_AREA_SELECTED
self.ui_data = data
self.dataClient = data_client
self.button_active = True
self.radius_offset_x = 0
self.radius_offset_y = 0
self.go_forward = True
# 0 is stop, -100 is full reverse, 100 is full forward
self.throttle = 0
self.fl_pos = (WheelComputation.FL_WHEEL_POS[0],
WheelComputation.FL_WHEEL_POS[1])
self.fr_pos = (WheelComputation.FR_WHEEL_POS[0],
WheelComputation.FR_WHEEL_POS[1])
self.ml_pos = (WheelComputation.ML_WHEEL_POS[0],
WheelComputation.ML_WHEEL_POS[1])
self.mr_pos = (WheelComputation.MR_WHEEL_POS[0],
WheelComputation.MR_WHEEL_POS[1])
self.rl_pos = (WheelComputation.RL_WHEEL_POS[0],
WheelComputation.RL_WHEEL_POS[1])
self.rr_pos = (WheelComputation.RR_WHEEL_POS[0],
WheelComputation.RR_WHEEL_POS[1])
self.wheel_matrix = WheelComputation.WHEEL_MATRIX
return
NO_AREA_SELECTED = -1
PATH_AREA_SELECTED = 0
THROTTLE_AREA_SELECTED = 1
ACTIVATE_AREA_SELECTED = 2
SCALE = 1
SIDE_BOX_RATIO = 18
ROBOT_DOT_RADIUS_RATIO = 75
PATH_DOT_RADIUS_RATIO = 150
WHEEL_DOT_RADIUS_RATIO = 300
def draw(self, canvas):
arc_center = (self.robot_center[0] + self.radius_offset_x, self.robot_center[1] + self.radius_offset_y)
# Draw the snap to objects
canvas.create_line(self.x, self.robot_center[1], self.x+self.path_area_size, self.robot_center[1], fill="grey")
canvas.create_oval(self.robot_center[0]-self.robot_center_radius,
self.robot_center[1]-self.robot_center_radius,
self.robot_center[0]+self.robot_center_radius,
self.robot_center[1]+self.robot_center_radius, fill="grey")
# Draw the wheels
self.draw_wheel2(canvas, (self.robot_center[0]+self.fl_pos[0], self.robot_center[1]+self.fl_pos[1]),
arc_center,
self.path_area_size, self.ui_data.fl_articulation_angle)
self.draw_wheel2(canvas, (self.robot_center[0]+self.ml_pos[0], self.robot_center[1]+self.ml_pos[1]),
arc_center, self.path_area_size, self.ui_data.ml_articulation_angle)
self.draw_wheel2(canvas, (self.robot_center[0]+self.rl_pos[0], self.robot_center[1]+self.rl_pos[1]),
arc_center,
self.path_area_size, self.ui_data.rl_articulation_angle)
self.draw_wheel2(canvas, (self.robot_center[0]+self.fr_pos[0], self.robot_center[1]+self.fr_pos[1]),
arc_center,
self.path_area_size, self.ui_data.fr_articulation_angle)
self.draw_wheel2(canvas, (self.robot_center[0]+self.mr_pos[0], self.robot_center[1]+self.mr_pos[1]),
arc_center,
self.path_area_size, self.ui_data.mr_articulation_angle)
self.draw_wheel2(canvas, (self.robot_center[0]+self.rr_pos[0], self.robot_center[1]+self.rr_pos[1]),
arc_center,
self.path_area_size, self.ui_data.rr_articulation_angle)
# Draw
if self.go_forward:
canvas.create_rectangle(self.x, self.y, self.path_area_size, self.path_area_size/self.SIDE_BOX_RATIO,
fill="purple")
# canvas.create_oval(arcCenter[0]-self.pathDotRadius, arcCenter[1]-self.pathDotRadius,
# arcCenter[0]+self.pathDotRadius, arcCenter[1]+self.pathDotRadius,
# fill="grey")
else:
canvas.create_rectangle(self.x, self.y, self.path_area_size, self.path_area_size/self.SIDE_BOX_RATIO,
fill="grey")
canvas.create_oval(arc_center[0]-self.path_dot_radius, arc_center[1]-self.path_dot_radius,
arc_center[0]+self.path_dot_radius, arc_center[1]+self.path_dot_radius,
fill="black")
self.draw_path(canvas, arc_center)
# Draw throttle area
throttle_left = self.x+self.path_area_size
throttle_right = self.x+self.size
canvas.create_rectangle(throttle_left, self.y+self.size/self.SIDE_BOX_RATIO,
throttle_right, self.y+self.size,
fill="grey")
throttle_top = self.throttle_center[1]-2
throttle_bottom = self.throttle_center[1]+2
if self.throttle > 0:
throttle_top = self.throttle_center[1] + self.half_size * self.throttle/-100
elif self.throttle < 0:
throttle_bottom = self.throttle_center[1] + self.half_size*self.throttle/-100
canvas.create_rectangle(throttle_left, throttle_top, throttle_right, throttle_bottom, fill="purple")
# Draw Activate Area
if not self.button_active:
active_color = "red"
else:
active_color = "green"
canvas.create_rectangle(self.x+self.path_area_size, self.y,
self.x+self.size, self.y+self.size/self.SIDE_BOX_RATIO,
fill=active_color)
return
def draw_path(self, canvas, arc_center):
# Driving forward
if self.go_forward:
path_end_pos = self.y+self.size/self.SIDE_BOX_RATIO+self.half_size+self.half_size*self.throttle/-100
canvas.create_line(self.robot_center[0], self.y+self.size/self.SIDE_BOX_RATIO,
self.x+self.path_area_size/2, self.y+self.size)
canvas.create_line(self.robot_center[0], self.y+self.size/self.SIDE_BOX_RATIO+self.half_size,
self.x+self.path_area_size/2, path_end_pos,
fill="purple", width=2)
canvas.create_oval(self.robot_center[0]-self.path_dot_radius, path_end_pos-self.path_dot_radius,
self.robot_center[0]+self.path_dot_radius, path_end_pos+self.path_dot_radius,
fill="purple")
# canvas.create_oval(arcCenter[0]-self.arcCenterRadius, arcCenter[1]-self.arcCenterRadius,
# arcCenter[0]+self.arcCenterRadius, arcCenter[1]+self.arcCenterRadius,
# fill="grey")
# Rotation
elif self.robot_center[0] == arc_center[0] and self.robot_center[1] == arc_center[1]:
radius = self.half_size/4
arc_degree_length = float(359)*self.throttle/100
canvas.create_oval(arc_center[0]-radius, arc_center[1]-radius,
arc_center[0]+radius, arc_center[1]+radius,
fill=None)
canvas.create_arc(arc_center[0]-radius, arc_center[1]-radius,
arc_center[0]+radius, arc_center[1]+radius,
fill=None, style="arc", outline="purple", width=2,
start=90, extent=arc_degree_length)
path_end_pos_x = arc_center[0]+cmath.cos((arc_degree_length+90)/180*cmath.pi.real).real*radius
path_end_pos_y = arc_center[1]+cmath.sin((arc_degree_length+90)/180*cmath.pi.real).real*radius*-1
canvas.create_oval(path_end_pos_x-self.path_dot_radius, path_end_pos_y-self.path_dot_radius,
path_end_pos_x+self.path_dot_radius, path_end_pos_y+self.path_dot_radius,
fill="purple")
# Arcing
else:
# Compute the radius of the arc
radius = dist(arc_center[0], arc_center[1], self.robot_center[0], self.robot_center[1])
# Draw the circle that the arc falls on
canvas.create_oval(arc_center[0]-radius, arc_center[1]-radius,
arc_center[0]+radius, arc_center[1]+radius,
fill=None)
theta = 0.0
# Adjacent is the length of line adjacent to theta
# Hypotenuse is our radius
# Theta is the interior angle around the point of rotation
# Top Right Quadrant
if arc_center[0] > self.robot_center[0] and arc_center[1] <= self.robot_center[1]:
adjacent = arc_center[0] - self.robot_center[0]
theta = 180 + (cmath.acos(float(adjacent)/float(radius)).real/cmath.pi.real*180).real
# Top Left Quadrant
if arc_center[0] < self.robot_center[0] and arc_center[1] < self.robot_center[1]:
adjacent = self.robot_center[0] - arc_center[0]
theta = 360 - (cmath.acos(float(adjacent)/float(radius)).real/cmath.pi.real*180).real
# Bottom Left Quadrant
if arc_center[0] < self.robot_center[0] and arc_center[1] > self.robot_center[1]:
adjacent = self.robot_center[1] - arc_center[1]
theta = (cmath.acos(float(adjacent)/float(radius)).real/cmath.pi.real*180).real-90
# Bottom Right Quadrant
if arc_center[0] > self.robot_center[0] and arc_center[1] > self.robot_center[1]:
adjacent = arc_center[1] - self.robot_center[1]
theta = (cmath.acos(float(adjacent)/float(radius)).real/cmath.pi.real*180).real+90
# We want forward throttle to always move the robot forward. This enforces that behavior
if arc_center[0] > self.robot_center[0]:
throttlePathMod = -1
arc_degree_length = 360 - (float(359)*self.throttle/100 - theta)
else:
throttlePathMod = 1
arc_degree_length = 360 - (float(359)*self.throttle/100*-1 - theta)
# The purple arc to represent actual drive distance around the circle
canvas.create_arc(arc_center[0]-radius, arc_center[1]-radius, arc_center[0]+radius, arc_center[1]+radius,
start=theta, extent=359*self.throttle/100*throttlePathMod,
fill=None, style="arc", outline="purple", width=2)
# The position that the robot will stop at, the end of the arc
path_end_pos_x = arc_center[0]+cmath.cos(deg2rad(arc_degree_length)).real*radius
path_end_pos_y = arc_center[1]+cmath.sin(deg2rad(arc_degree_length)).real*radius*-1
# Draw a marker to show the position that the robot will stop at
canvas.create_oval(path_end_pos_x-self.path_dot_radius,
path_end_pos_y-self.path_dot_radius,
path_end_pos_x+self.path_dot_radius,
path_end_pos_y+self.path_dot_radius,
fill="purple")
return
def draw_wheel(self, canvas, wheel_pos, arc_center_pos, size, theta):
# Radius of the dot to draw at the wheel
wheel_dot_radius = size/self.WHEEL_DOT_RADIUS_RATIO
# Draw a dot at the center of the wheel
canvas.create_oval(wheel_pos[0]-wheel_dot_radius, wheel_pos[1]-wheel_dot_radius,
wheel_pos[0]+wheel_dot_radius, wheel_pos[1]+wheel_dot_radius,
fill="black")
# Created a dotted line from the wheel to the center of the circle it will be driving around
if not self.go_forward:
canvas.create_line(wheel_pos[0], wheel_pos[1],
arc_center_pos[0], arc_center_pos[1],
dash=(1, 1))
dx = size/self.WHEEL_LENGTH_RATIO*cmath.cos(deg2rad(theta)).real
dy = size/self.WHEEL_LENGTH_RATIO*cmath.sin(deg2rad(theta)).real
# Draw the wheel line
canvas.create_line(wheel_pos[0]-dx, wheel_pos[1]-dy,
wheel_pos[0]+dx, wheel_pos[1]+dy,
width=size/self.WHEEL_WIDTH_RATIO)
return
def draw_wheel2(self, canvas, wheel_pos, arc_center_pos, size, theta):
# theta = theta + 90
rot = numpy.matrix(
[[math.cos(deg2rad(theta)), -math.sin(deg2rad(theta))],
[math.sin(deg2rad(theta)), math.cos(deg2rad(theta))]]
)
rotated = self.wheel_matrix.dot(rot)
canvas.create_polygon(wheel_pos[0]+rotated[0].item(0), wheel_pos[1]+rotated[0].item(1),
wheel_pos[0]+rotated[1].item(0), wheel_pos[1]+rotated[1].item(1),
wheel_pos[0]+rotated[2].item(0), wheel_pos[1]+rotated[2].item(1),
wheel_pos[0]+rotated[3].item(0), wheel_pos[1]+rotated[3].item(1),
wheel_pos[0]+rotated[4].item(0), wheel_pos[1]+rotated[4].item(1),
wheel_pos[0]+rotated[5].item(0), wheel_pos[1]+rotated[5].item(1),
wheel_pos[0]+rotated[6].item(0), wheel_pos[1]+rotated[6].item(1),
wheel_pos[0]+rotated[7].item(0), wheel_pos[1]+rotated[7].item(1))
# Created a dotted line from the wheel to the center of the circle it will be driving around
if not self.go_forward:
canvas.create_line(wheel_pos[0], wheel_pos[1],
arc_center_pos[0], arc_center_pos[1],
dash=(1, 1))
# Draw a dot at the center of the wheel
canvas.create_oval(wheel_pos[0]-2, wheel_pos[1]-2,
wheel_pos[0]+2, wheel_pos[1]+2,
fill="grey", outline=None)
return
def on_mouse_press(self, event):
# Mark which area the user first clicked in
if event.x < self.x + self.path_area_size:
self.current_control = self.PATH_AREA_SELECTED
elif event.y < self.y+self.path_area_size/self.SIDE_BOX_RATIO:
self.current_control = self.ACTIVATE_AREA_SELECTED
else:
self.current_control = self.THROTTLE_AREA_SELECTED
return
# Users should never ever call this.
def on_mouse_motion(self, event):
# If the user first clicked in the path definition area
if self.current_control == self.PATH_AREA_SELECTED:
# if the cursor is in the top box area, set the wheels to go forward mode
self.go_forward = event.y < self.y+self.path_area_size/self.SIDE_BOX_RATIO
# If we are going forward then all wheels should be pointing in this direction
if self.go_forward:
self.ui_data.fl_articulation_angle = 180
self.ui_data.fr_articulation_angle = 0
self.ui_data.ml_articulation_angle = 180
self.ui_data.mr_articulation_angle = 0
self.ui_data.rl_articulation_angle = 180
self.ui_data.rr_articulation_angle = 0
return
# If the cursor leaves the path definition area then don't do anything
if event.x > self.x + self.path_area_size:
return
# If the user gets close enough to the robot center point then snap to the robot center
if dist(event.x, event.y, self.robot_center[0], self.robot_center[1]) < self.path_area_size/self.ROBOT_DOT_RADIUS_RATIO:
self.radius_offset_x = 0
self.radius_offset_y = 0
# If the user gets close enough to the center (horizontal) line, then snap to it
elif dist(0, event.y, 0, self.robot_center[1]) < self.size/75:
self.radius_offset_x = event.x - self.robot_center[0]
self.radius_offset_y = 0
# Otherwise we just use the coordinates of the cursor
else:
self.radius_offset_x = event.x - self.robot_center[0]
self.radius_offset_y = event.y - self.robot_center[1]
arc_center_pos = (self.radius_offset_x, self.radius_offset_y)
# Compute the angle and speed of each of the articulation joints/wheels
self.ui_data.fl_articulation_angle = 360 - WheelComputation.calc_articulation_angle(self.fl_pos, arc_center_pos, self.go_forward) + 180 - 360
self.ui_data.fl_drive_speed = WheelComputation.calc_wheel_speed(self.fl_pos, arc_center_pos, self.go_forward)
print self.ui_data.fl_articulation_angle
self.ui_data.fr_articulation_angle = 360 - WheelComputation.calc_articulation_angle(self.fr_pos, arc_center_pos, self.go_forward) - 360
self.ui_data.fr_drive_speed = WheelComputation.calc_wheel_speed(self.fr_pos, arc_center_pos, self.go_forward)
self.ui_data.ml_articulation_angle = 360 - WheelComputation.calc_articulation_angle(self.ml_pos, arc_center_pos, self.go_forward) + 180 - 360
self.ui_data.ml_drive_speed = WheelComputation.calc_wheel_speed(self.ml_pos, arc_center_pos, self.go_forward)
self.ui_data.mr_articulation_angle = 360 - WheelComputation.calc_articulation_angle(self.mr_pos, arc_center_pos, self.go_forward) - 360
self.ui_data.mr_drive_speed = WheelComputation.calc_wheel_speed(self.mr_pos, arc_center_pos, self.go_forward)
self.ui_data.rl_articulation_angle = 360 - WheelComputation.calc_articulation_angle(self.rl_pos, arc_center_pos, self.go_forward) + 180 - 360
self.ui_data.rl_drive_speed = WheelComputation.calc_wheel_speed(self.rl_pos, arc_center_pos, self.go_forward)
self.ui_data.rr_articulation_angle = 360 - WheelComputation.calc_articulation_angle(self.rr_pos, arc_center_pos, self.go_forward) - 360
self.ui_data.rr_drive_speed = WheelComputation.calc_wheel_speed(self.rr_pos, arc_center_pos, self.go_forward)
# Normalize all speeds to that they are between 0 and 1
WheelComputation.normalize_wheel_speeds(self.ui_data)
# If the user first clicked in the throttle area
if self.current_control == self.THROTTLE_AREA_SELECTED:
# If the user moves the cursor out of the throttle area then don't do anything
if event.y > self.size or event.y < self.size/self.SIDE_BOX_RATIO:
return
# Compute the throttle value
self.throttle = (self.throttle_center[1] - event.y)*100/self.half_size
return
def on_mouse_release(self, event):
# If the user is releasing the mouse in the activate area and they first clicked in the activate area then
# send a command to the robot
# activate the button
if event.x > self.x + self.path_area_size and event.y < self.y+self.path_area_size/self.SIDE_BOX_RATIO:
if self.current_control == self.ACTIVATE_AREA_SELECTED:
if self.button_active:
self.button_active = False
sendData(self.dataClient.socket, self.ui_data)
else:
self.button_active = True
self.ui_data.e_stop = True
sendData(self.dataClient.socket, self.ui_data)
self.ui_data.e_stop = False
self.current_control = self.NO_AREA_SELECTED
return
| StarcoderdataPython |
4858143 | <gh_stars>10-100
"""Test groupfinder."""
import pytest
from pyramid_fullauth.auth import groupfinder
@pytest.mark.parametrize(
["is_admin", "is_active", "groups"],
[
(True, True, ["s:superadmin", "s:user"]),
(True, False, ["s:superadmin", "s:inactive"]),
(False, True, ["s:user"]),
(False, False, ["s:inactive"]),
],
)
def test_groupfinder(web_request, db_session, user, is_admin, is_active, groups):
"""Return special group assigned for user."""
user = db_session.merge(user)
user.is_admin = is_admin
user.is_active = is_active
web_request.user = user
assigned_groups = groupfinder(user.id, web_request)
assert groups == assigned_groups
| StarcoderdataPython |
3508329 | <filename>Yandex_contest_HW/YC_HW2/task_2F.py
from _collections import deque
"""n, k = int(input()), int(input())
p = list(map(str, input().split()))
p_deque = deque(p)
cnt = 0
for i in range(0, n):
while cnt != k:
if p_deque[i] < p_deque[i + 1]:
p_deque.append(p[i])
p_deque.popleft()
cnt += 1
else:
p_deque.append(p[i+1])
p_deque.remove(p[i])
print(p_deque)"""
n, k = [int(x) for x in input().split()]
p = list(map(int, input().split()))
dict_n = {}
for i in range(0, len(p)):
dict_n[int(p[i])] = 0
for i in range(0, n):
i = 0
while max(dict_n.values()) != k:
if p[0] != max(p):
if p[i] > p[i + 1]:
p.append(p[i + 1])
p.pop((i + 1))
dict_n[p[i]] += 1
else:
p.append(p[i])
p.pop(i)
dict_n[p[i + 1]] += 1
else:
break
break
print(p[0])
#while max(dict_n.values()) != k:
"""
dict_n = {}
for i in range(0, len(p)):
dict_n[i] = [int(p[i]), 0]
"""
"""cnt = 0
while max([el[1] for el in list(dict_n.values())]) != k:
for i in range(0, n):
for j in range(i + 1, n):
#print("i ", i, " j ", j, " p_i ", p[i], " p_j ", p[j])
print(dict_n)
if p[i] > p[j]:
p.append(p[j])
p.pop(p[j])
dict_n[i][1] = cnt + 1
else:
p.append(p[i])
p.pop(p[i])
p.insert(0, p[j])
dict_n[j][1] = cnt + 1
print(dict_n)"""
| StarcoderdataPython |
6681007 | <reponame>rjt-gupta/USHUAIA
from django.contrib import admin
from django.urls import path
from django.conf.urls import url
from music.views import album_detail, song_detail, artist_detail, playlist_detail, create_artist, create_album, create_song, add_to_playlist, delete_song
from music.views import delete_song_playlist, delete_album, delete_artist
urlpatterns = [
path('admin/', admin.site.urls),
# path('music/',views.IndexView.as_view()), # generic way
# path('music/' + '<int:pk>/', views.DetailView.as_view())
path('music/', artist_detail), # use this if you use the easy way
path('music/songs/' + '<int:album_id>', song_detail), # In django 2.0 regular expressions can be used by re_path()
path('music/album/' + '<int:artist_id>', album_detail),
path('music/playlist', playlist_detail),
path('music/create_artist', create_artist),
path('music/create_album/' + '<int:artist_id>', create_album),
path('music/create_song/' + '<int:album_id>', create_song),
path('music/add_to_playlist/', add_to_playlist),
path('music/delete_song/' + '<int:song_id>', delete_song),
path('music/delete_song_playlist/' + '<int:play_id>', delete_song_playlist),
path('music/delete_album/' + '<int:album_id>', delete_album),
path('music/delete_artist/' + '<int:artist_id>', delete_artist)
]
| StarcoderdataPython |
3230391 | <filename>examiner/exam_test_case.py
"""
Overriding TestCase for exam tool.
"""
import re
import unittest
import importlib
from examiner.exceptions import TestFuncNameError, TestClassNameError
import examiner.helper_functions as hf
class ExamTestCase(unittest.TestCase):
"""
Override methods to help customize outputs of testcases.
"""
ASSIGNMENT_REGEX = r"\.Test[0-9]?([A-Z].+)\)"
TEST_NAME_REGEX = r"test(_[a-z])?_(\w+)"
USER_TAGS = []
link_to_assignment = ""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.assignment = ""
self.test_name = ""
self.student_answer = ""
self.correct_answer = ""
self.norepr = False
self._set_test_name_and_assignment()
def _set_test_name_and_assignment(self):
"""
Extract Assignment from TestCase name.
Extract test name from test function name.
Format testname and assignment text and assign to test object.
"""
test_string = str(self)
try:
self.assignment = re.search(self.ASSIGNMENT_REGEX, test_string).group(1)
except AttributeError as e:
raise TestClassNameError(
"Class name for TestCase should follow the structure 'Test[<number>]<words>'. Got '" + test_string + "'"
) from e
try:
self.test_name = re.search(self.TEST_NAME_REGEX, test_string).group(2).replace("_", " ")
except AttributeError as e:
raise TestFuncNameError(
"Test function name should follow the structure 'test[_<letter>]_<name>' Got '" + test_string + "'"
) from e
def set_answers(self, student_answer, correct_answer):
"""
Set students answer and correct answer as members.
"""
self.student_answer = repr(student_answer)
self.correct_answer = repr(correct_answer)
if self.norepr:
if isinstance(student_answer, str):
self.student_answer = hf.clean_str(student_answer)
else:
self.student_answer = str(student_answer)
def assertEqual(self, first, second, msg=None):
"""
Check if first is equal to second. Save correct and student answer as to variables.
First comes from student
"""
self.set_answers(first, second)
super().assertEqual(first, second, msg)
def assertIn(self, member, container, msg=None):
"""
Check if value in container. Save correct and student answer as to variables.
Container comes from student
"""
self.set_answers(container, member)
super().assertIn(member, container, msg)
def assertFalse(self, expr, msg=None):
"""
Check that the expression is False.
Save correct and student answer as to variables.
"""
self.set_answers(expr, False)
super().assertFalse(expr, msg)
def assertTrue(self, expr, msg=None):
"""
Check that the expression is true.
Save correct and student answer as to variables.
"""
self.set_answers(expr, True)
super().assertTrue(expr, msg)
def assertNotIn(self, member, container, msg=None):
"""
Check that the expression is true.
Save correct and student answer as to variables.
"""
self.set_answers(container, member)
super().assertNotIn(member, container, msg)
def assertModule(self, module, module_path=None, msg=None):
"""
Check that module can be imported.
Save correct and student answer as to variables.
"""
self.set_answers(module_path, module)
if module_path is None:
if importlib.util.find_spec(module) is None:
msg = self._formatMessage(msg, f"{module} not as standard import")
raise self.failureException(msg)
else:
try:
hf.import_module(module_path, module)
except FileNotFoundError as e:
msg = self._formatMessage(msg, f"{module} not found in path {module_path}")
raise self.failureException(msg) from e
def assertAttribute(self, obj, attr, msg=None):
"""
Check that object has attribute.
Save correct and student answer as to variables.
"""
self.set_answers(obj, attr)
try:
getattr(obj, attr)
except AttributeError as e:
msg = self._formatMessage(msg, f"attribute {attr} not found in object {obj}")
raise self.failureException(msg) from e
def assertRaises(self, expected_exception, *args, **kwargs):
"""
assertRaises is a context and therefore we need to return it
"""
self.set_answers("", expected_exception)
return super().assertRaises(expected_exception, *args, **kwargs)
def assertCountEqual(self, first, second, msg=None):
"""Asserts that two iterables have the same elements, the same number of
times, without regard to order.
self.assertEqual(Counter(list(first)),
Counter(list(second)))
Example:
- [0, 1, 1] and [1, 0, 1] compare equal.
- [0, 0, 1] and [0, 1] compare unequal.
"""
self.set_answers(first, second)
super().assertCountEqual(first, second, msg)
def assertOrder(self, order, container, msg=None):
"""
Check that in index of elements in order are lowest to highest in container.
Save correct and student answer as to variables.
"""
self.set_answers(container, order)
try:
for i in range(len(order)-1):
if not container.index(order[i]) < container.index(order[i+1]):
raise ValueError
except ValueError as e:
msg = self._formatMessage(msg, f"Index of elemnts in {order} don't appear in correct order in {container}")
raise self.failureException(msg) from e
| StarcoderdataPython |
4966611 | <filename>resumos/cursos/cs50/all-challenges/labs/birthdays/application.py
import os
from cs50 import SQL
from flask import Flask, flash, jsonify, redirect, render_template, request, session
# Configure application
app = Flask(__name__)
# Ensure templates are auto-reloaded
app.config["TEMPLATES_AUTO_RELOAD"] = True
# Configure CS50 Library to use SQLite database
# birthdays table: id, name, month, and day
db = SQL("sqlite:///birthdays.db")
@app.route("/", methods=["GET", "POST"])
def index():
if request.method == "POST":
name = request.form["name"]
month = request.form["month"]
day = request.form["day"]
# TODO: Add the user's entry into the database
if not name or not month or not day:
print("Fields Error")
return redirect("/")
db.execute("INSERT INTO birthdays (name, month,day) VALUES (?, ?, ?)", name, month, day)
print(name, month, day)
return redirect("/")
else:
# TODO: Display the entries in the database on index.html
rows = db.execute("SELECT name, day, month FROM birthdays")
return render_template("index.html", birthdays = rows)
| StarcoderdataPython |
6518459 | from unittest import TestCase, mock
from usecase.extract_datasets_infos_from_database import (
extract_gtfs_datasets_infos_from_database,
extract_gbfs_datasets_infos_from_database,
extract_previous_sha1_hashes,
extract_source_infos,
)
from utilities.constants import (
CLAIMS,
MAINSNAK,
DATAVALUE,
VALUE,
LABELS,
ENGLISH,
STABLE_URL_PROP,
SHA1_HASH_PROP,
GTFS_CATALOG_OF_SOURCES_CODE,
GBFS_CATALOG_OF_SOURCES_CODE,
)
class TestExtractDatabaseSha1(TestCase):
@mock.patch("usecase.extract_datasets_infos_from_database.os.environ")
@mock.patch("usecase.extract_datasets_infos_from_database.wbi_core.ItemEngine")
@mock.patch(
"usecase.extract_datasets_infos_from_database.extract_dataset_version_codes"
)
def test_extract_database_sha1_with_existing_entity_codes_should_return_sha1_dict(
self, mock_versions_extractor, mock_item_engine, mock_env
):
test_env = {
SHA1_HASH_PROP: "test_sha1_prop",
}
mock_env.__getitem__.side_effect = test_env.__getitem__
mock_versions_extractor.return_value = {"Q81"}
test_entity = ["Q80"]
test_sha1 = {"sha1_hash"}
mock_item_engine.return_value.get_json_representation.return_value = {
CLAIMS: {"test_sha1_prop": [{MAINSNAK: {DATAVALUE: {VALUE: "sha1_hash"}}}]}
}
under_test = extract_previous_sha1_hashes(test_entity)
self.assertEqual(under_test, test_sha1)
@mock.patch("usecase.extract_datasets_infos_from_database.os.environ")
@mock.patch("usecase.extract_datasets_infos_from_database.wbi_core.ItemEngine")
@mock.patch(
"usecase.extract_datasets_infos_from_database.extract_dataset_version_codes"
)
def test_extract_database_sha1_with_None_sha1(
self, mock_versions_extractor, mock_item_engine, mock_env
):
test_env = {
SHA1_HASH_PROP: "test_sha1_prop",
}
mock_env.__getitem__.side_effect = test_env.__getitem__
mock_versions_extractor.return_value = {"Q81"}
test_entity = ["Q80"]
mock_item_engine.return_value.get_json_representation.return_value = {
CLAIMS: {"test_sha1_prop": [{MAINSNAK: {DATAVALUE: {VALUE: None}}}]}
}
under_test = extract_previous_sha1_hashes(test_entity)
self.assertEqual(under_test, set())
@mock.patch(
"usecase.extract_datasets_infos_from_database.extract_dataset_version_codes"
)
def test_extract_database_sha1_with_non_existing_entity_should_return_empty_sha1_dict(
self, mock_versions_extractor
):
test_entity = ["mock"]
test_sha1 = set()
mock_versions_extractor.return_value = set()
under_test = extract_previous_sha1_hashes(test_entity)
self.assertEqual(under_test, test_sha1)
class TestExtractInfosTest(TestCase):
@mock.patch("usecase.extract_datasets_infos_from_database.os.environ")
@mock.patch("usecase.extract_datasets_infos_from_database.wbi_core.ItemEngine")
def test_extract_source_infos_with_default_parameters_should_return_dataset_infos(
self, mock_item_engine, mock_env
):
test_env = {
STABLE_URL_PROP: "test_url_prop",
}
mock_env.__getitem__.side_effect = test_env.__getitem__
mock_item_engine.return_value.get_json_representation.return_value = {
CLAIMS: {
"test_url_prop": [
{
MAINSNAK: {
DATAVALUE: {
VALUE: "http://www.stl.laval.qc.ca/opendata/GTF_STL.zip"
}
}
}
],
},
LABELS: {ENGLISH: {VALUE: "test_name"}},
}
(
under_test_url,
under_test_name,
) = extract_source_infos("Q82")
self.assertEqual(
under_test_url, "http://www.stl.laval.qc.ca/opendata/GTF_STL.zip"
)
self.assertEqual(under_test_name, "test_name")
class TestExtractDatasetsInfosFromDatabase(TestCase):
@mock.patch("usecase.extract_datasets_infos_from_database.os.environ")
@mock.patch(
"usecase.extract_datasets_infos_from_database.extract_source_entity_codes"
)
@mock.patch("usecase.extract_datasets_infos_from_database.extract_source_infos")
@mock.patch(
"usecase.extract_datasets_infos_from_database.extract_previous_sha1_hashes"
)
def test_extract_gtfs_with_valid_parameters_should_return_dataset_infos(
self,
mock_sha1_extractor,
mock_source_infos_extractor,
mock_entity_codes_extractor,
mock_env,
):
test_env = {
GTFS_CATALOG_OF_SOURCES_CODE: "test_gtfs_catalog",
}
mock_env.__getitem__.side_effect = test_env.__getitem__
mock_entity_codes_extractor.return_value = ["Q80"]
mock_source_infos_extractor.return_value = "test_url", "test_name"
mock_sha1_extractor.return_value = {"test_sha1_hash"}
under_test = extract_gtfs_datasets_infos_from_database()
self.assertEqual(len(under_test), 1)
under_test_dataset_info = under_test[0]
self.assertEqual(under_test_dataset_info.url, "test_url")
self.assertEqual(under_test_dataset_info.source_name, "test_name")
self.assertEqual(
under_test_dataset_info.previous_sha1_hashes, {"test_sha1_hash"}
)
@mock.patch("usecase.extract_datasets_infos_from_database.os.environ")
@mock.patch(
"usecase.extract_datasets_infos_from_database.extract_source_entity_codes"
)
@mock.patch("usecase.extract_datasets_infos_from_database.extract_source_infos")
@mock.patch(
"usecase.extract_datasets_infos_from_database.extract_previous_sha1_hashes"
)
def test_extract_gbfs_with_valid_parameters_should_return_dataset_infos(
self,
mock_sha1_extractor,
mock_source_infos_extractor,
mock_entity_codes_extractor,
mock_env,
):
test_env = {
GBFS_CATALOG_OF_SOURCES_CODE: "test_gbfs_catalog",
}
mock_env.__getitem__.side_effect = test_env.__getitem__
mock_entity_codes_extractor.return_value = ["Q80"]
mock_source_infos_extractor.return_value = "test_url", "test_name"
mock_sha1_extractor.return_value = {"test_sha1_hash"}
under_test = extract_gbfs_datasets_infos_from_database()
self.assertEqual(len(under_test), 1)
under_test_dataset_info = under_test[0]
self.assertEqual(under_test_dataset_info.url, "test_url")
self.assertEqual(under_test_dataset_info.source_name, "test_name")
self.assertEqual(
under_test_dataset_info.previous_sha1_hashes, {"test_sha1_hash"}
)
| StarcoderdataPython |
3430831 | # -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'qt/configuredialog.ui'
#
# Created: Thu Jun 25 09:17:51 2015
# by: pyside-uic 0.2.15 running on PySide 1.2.2
#
# WARNING! All changes made in this file will be lost!
from PySide import QtCore, QtGui
class Ui_ConfigureDialog(object):
def setupUi(self, ConfigureDialog):
ConfigureDialog.setObjectName("ConfigureDialog")
ConfigureDialog.resize(418, 303)
self.gridLayout = QtGui.QGridLayout(ConfigureDialog)
self.gridLayout.setObjectName("gridLayout")
self.buttonBox = QtGui.QDialogButtonBox(ConfigureDialog)
self.buttonBox.setOrientation(QtCore.Qt.Horizontal)
self.buttonBox.setStandardButtons(QtGui.QDialogButtonBox.Cancel|QtGui.QDialogButtonBox.Ok)
self.buttonBox.setObjectName("buttonBox")
self.gridLayout.addWidget(self.buttonBox, 2, 0, 1, 1)
self.configGroupBox = QtGui.QGroupBox(ConfigureDialog)
self.configGroupBox.setTitle("")
self.configGroupBox.setObjectName("configGroupBox")
self.formLayout = QtGui.QFormLayout(self.configGroupBox)
self.formLayout.setObjectName("formLayout")
self.label0 = QtGui.QLabel(self.configGroupBox)
self.label0.setObjectName("label0")
self.formLayout.setWidget(0, QtGui.QFormLayout.LabelRole, self.label0)
self.lineEditIdentifier = QtGui.QLineEdit(self.configGroupBox)
self.lineEditIdentifier.setObjectName("lineEditIdentifier")
self.formLayout.setWidget(0, QtGui.QFormLayout.FieldRole, self.lineEditIdentifier)
self.checkBoxDefaultLocation = QtGui.QCheckBox(self.configGroupBox)
self.checkBoxDefaultLocation.setChecked(True)
self.checkBoxDefaultLocation.setObjectName("checkBoxDefaultLocation")
self.formLayout.setWidget(1, QtGui.QFormLayout.FieldRole, self.checkBoxDefaultLocation)
self.label0_2 = QtGui.QLabel(self.configGroupBox)
self.label0_2.setObjectName("label0_2")
self.formLayout.setWidget(2, QtGui.QFormLayout.LabelRole, self.label0_2)
self.horizontalLayout = QtGui.QHBoxLayout()
self.horizontalLayout.setObjectName("horizontalLayout")
self.lineEditOutputLocation = QtGui.QLineEdit(self.configGroupBox)
self.lineEditOutputLocation.setObjectName("lineEditOutputLocation")
self.horizontalLayout.addWidget(self.lineEditOutputLocation)
self.pushButtonOutputLocation = QtGui.QPushButton(self.configGroupBox)
self.pushButtonOutputLocation.setObjectName("pushButtonOutputLocation")
self.horizontalLayout.addWidget(self.pushButtonOutputLocation)
self.formLayout.setLayout(2, QtGui.QFormLayout.FieldRole, self.horizontalLayout)
self.gridLayout.addWidget(self.configGroupBox, 1, 0, 1, 1)
self.retranslateUi(ConfigureDialog)
QtCore.QObject.connect(self.buttonBox, QtCore.SIGNAL("accepted()"), ConfigureDialog.accept)
QtCore.QObject.connect(self.buttonBox, QtCore.SIGNAL("rejected()"), ConfigureDialog.reject)
QtCore.QMetaObject.connectSlotsByName(ConfigureDialog)
def retranslateUi(self, ConfigureDialog):
ConfigureDialog.setWindowTitle(QtGui.QApplication.translate("ConfigureDialog", "ConfigureDialog", None, QtGui.QApplication.UnicodeUTF8))
self.label0.setText(QtGui.QApplication.translate("ConfigureDialog", "identifier: ", None, QtGui.QApplication.UnicodeUTF8))
self.checkBoxDefaultLocation.setText(QtGui.QApplication.translate("ConfigureDialog", "Use output default location", None, QtGui.QApplication.UnicodeUTF8))
self.label0_2.setText(QtGui.QApplication.translate("ConfigureDialog", "Output: ", None, QtGui.QApplication.UnicodeUTF8))
self.pushButtonOutputLocation.setText(QtGui.QApplication.translate("ConfigureDialog", "...", None, QtGui.QApplication.UnicodeUTF8))
| StarcoderdataPython |
11268625 | # -*- coding: utf-8 -*-
"""Tests for sandwich robust covariance estimation
see also in regression for cov_hac compared to Gretl and
sandbox.panel test_random_panel for comparing cov_cluster, cov_hac_panel and
cov_white
Created on Sat Dec 17 08:39:16 2011
Author: <NAME>
"""
import numpy as np
from numpy.testing import assert_almost_equal, assert_allclose
from statsmodels.regression.linear_model import OLS
from statsmodels.tools.tools import add_constant
import statsmodels.stats.sandwich_covariance as sw
def test_cov_cluster_2groups():
# comparing cluster robust standard errors to Peterson
# requires Petersen's test_data
# http://www.kellogg.northwestern.edu/faculty/petersen
# .../htm/papers/se/test_data.txt
import os
cur_dir = os.path.abspath(os.path.dirname(__file__))
fpath = os.path.join(cur_dir, "test_data.txt")
pet = np.genfromtxt(fpath)
endog = pet[:, -1]
group = pet[:, 0].astype(int)
time = pet[:, 1].astype(int)
exog = add_constant(pet[:, 2])
res = OLS(endog, exog).fit()
cov01, covg, covt = sw.cov_cluster_2groups(res, group, group2=time)
# Reference number from Petersen
# http://www.kellogg.northwestern.edu/faculty/petersen/htm
# .../papers/se/test_data.htm
bse_petw = [0.0284, 0.0284]
bse_pet0 = [0.0670, 0.0506]
bse_pet1 = [0.0234, 0.0334] # year
bse_pet01 = [0.0651, 0.0536] # firm and year
bse_0 = sw.se_cov(covg)
bse_1 = sw.se_cov(covt)
bse_01 = sw.se_cov(cov01)
# print res.HC0_se, bse_petw - res.HC0_se
# print bse_0, bse_0 - bse_pet0
# print bse_1, bse_1 - bse_pet1
# print bse_01, bse_01 - bse_pet01
assert_almost_equal(bse_petw, res.HC0_se, decimal=4)
assert_almost_equal(bse_0, bse_pet0, decimal=4)
assert_almost_equal(bse_1, bse_pet1, decimal=4)
assert_almost_equal(bse_01, bse_pet01, decimal=4)
def test_hac_simple():
from statsmodels.datasets import macrodata
d2 = macrodata.load_pandas().data
g_gdp = 400 * np.diff(np.log(d2['realgdp'].values))
g_inv = 400 * np.diff(np.log(d2['realinv'].values))
exogg = add_constant(np.c_[g_gdp, d2['realint'][:-1].values])
res_olsg = OLS(g_inv, exogg).fit()
# > NeweyWest(fm, lag = 4, prewhite = FALSE, sandwich = TRUE,
# verbose=TRUE, adjust=TRUE)
# Lag truncation parameter chosen: 4
# (Intercept) ggdp lint
cov1_r = [
[+1.40643899878678802, -0.3180328707083329709, -0.060621111216488610],
[-0.31803287070833292, 0.1097308348999818661, +0.000395311760301478],
[-0.06062111121648865, 0.0003953117603014895, +0.087511528912470993]
]
# > NeweyWest(fm, lag = 4, prewhite = FALSE, sandwich = TRUE,
# verbose=TRUE, adjust=FALSE)
# Lag truncation parameter chosen: 4
# (Intercept) ggdp lint
cov2_r = [
[+1.3855512908840137, -0.313309610252268500, -0.059720797683570477],
[-0.3133096102522685, +0.108101169035130618, +0.000389440793564339],
[-0.0597207976835705, +0.000389440793564336, +0.086211852740503622]
]
cov1 = sw.cov_hac_simple(res_olsg, nlags=4, use_correction=True)
se1 = sw.se_cov(cov1)
cov2 = sw.cov_hac_simple(res_olsg, nlags=4, use_correction=False)
se2 = sw.se_cov(cov2)
# Relax precision requirements for this test due to failure in NumPy 1.23
assert_allclose(cov1, cov1_r)
assert_allclose(cov2, cov2_r)
assert_allclose(np.sqrt(np.diag(cov1_r)), se1)
assert_allclose(np.sqrt(np.diag(cov2_r)), se2)
# compare default for nlags
cov3 = sw.cov_hac_simple(res_olsg, use_correction=False)
cov4 = sw.cov_hac_simple(res_olsg, nlags=4, use_correction=False)
assert_allclose(cov3, cov4)
| StarcoderdataPython |
1994144 | # -*- coding: utf-8 -*-
"""
1947. Maximum Compatibility Score Sum
https://leetcode.com/problems/maximum-compatibility-score-sum/
Example 1:
Input: students = [[1,1,0],[1,0,1],[0,0,1]], mentors = [[1,0,0],[0,0,1],[1,1,0]]
Output: 8
Explanation: We assign students to mentors in the following way:
- student 0 to mentor 2 with a compatibility score of 3.
- student 1 to mentor 0 with a compatibility score of 2.
- student 2 to mentor 1 with a compatibility score of 3.
The compatibility score sum is 3 + 2 + 3 = 8.
Example 2:
Input: students = [[0,0],[0,0],[0,0]], mentors = [[1,1],[1,1],[1,1]]
Output: 0
Explanation: The compatibility score of any student-mentor pair is 0.
"""
from typing import List
class Solution:
def maxCompatibilitySum1(self, students: List[List[int]], mentors: List[List[int]]) -> int:
"""
M: len(students) / N: len(students[0])
TC: O(M! * N^2) / SC: O(M^2)
"""
self.ans = 0
def dfs(s_idx: int = 0, assigned: List[int] = [], scores: int = 0) -> int:
if s_idx == len(students):
if self.ans < scores:
self.ans = scores
return
for m_idx in range(len(students)):
if m_idx not in assigned: # TC: O(N)
score = sum([s == m for s, m in zip(students[s_idx], mentors[m_idx])])
dfs(s_idx + 1, assigned + [m_idx], scores + score) # SC: O(M^2)
return self.ans
return dfs()
def maxCompatibilitySum2(self, students: List[List[int]], mentors: List[List[int]]) -> int:
"""
M: len(students) / N: len(students[0])
TC: O(M! * N^2) / SC: O(M)
"""
self.ans = 0
def dfs(s_idx: int = 0, assigned: List[int] = [], scores: int = 0) -> int:
if s_idx == len(students):
if self.ans < scores:
self.ans = scores
return
for m_idx in range(len(students)):
if m_idx not in assigned: # TC: O(N)
score = sum([s == m for s, m in zip(students[s_idx], mentors[m_idx])])
assigned.append(m_idx)
dfs(s_idx + 1, assigned, scores + score) # SC: O(M)
assigned.pop()
return self.ans
return dfs()
| StarcoderdataPython |
12808182 | <reponame>moocowmoo/paywall
#!/usr/bin/python3
import cgi
import cgitb
import datetime
import json
import os
import re
import requests
import subprocess
import sys
import time
from bmdjson import check_address
print("Content-Type: text/plain\n")
print("testing keybase")
print()
print("PASS:")
signature = "BEGIN KEYBASE SALTPACK SIGNED MESSAGE. kXR7VktZdyH7rvq v5weRa0zkSjiJmm 8dzt8BnSF7QPfAy AmWtlYORgWXP5hk aXmzZHPBPoIRpYD qsXcl0JX7RT65NS KLnnW8kwG9ujBNt r2bd6GNLnp4xVMr btCVAG2TMDpNhVf yXSbZmzQDnE6mIM Y4oS4YGVbw244Je Bc7lmO6225Gu6tj HgIwRnLz975GBZU Bc3GLDyRpvTEGXr AzRtx0gMk2FzHxf 2oimZKG. END KEYBASE SALTPACK SIGNED MESSAGE."
sig_result = check_address(signature)
for k, v in sorted(sig_result.items(), key=lambda x: x[0]):
# is saying the leftmost of the pair k,v -- alphabetic sorting of keys
# now sig_addr, sig_by, then sig_good -- display bugged me
print("[" + str(k) + "] = ", v)
print()
print("FAIL: Bad String")
signature2 = "BEGIN KEYBASE SALTPACK SIGNED MESSAGE. kXR7VktZdy27rvq v5weRa0zkDL3e9k D1e7HgTLY1WFWdi UfZI1s56lquWUJu lBvdIblMbFGwTGa M9oYSI9cU7KjGW9 2JOGghIjQX3Fqw5 xsvEpPo9pEuA25J Ut0J0Fur0C3F8oZ n50PAvVWVmb0iEP 5MNUBEMHMo5DTtF OhK66v3FFwu0qJe 8R35q5A5ycevVsR pdaOBQQ1VGcNIlF 9YU6a0Wi5kd85JH rjSupUZ. END KEYBASE SALTPACK SIGNED MESSAGE."
sig_result = check_address(signature2)
for k, v in sorted(sig_result.items(), key=lambda x: x[0]):
# is saying the leftmost of the pair k,v -- alphabetic sorting of keys
# now sig_addr, sig_by, then sig_good -- display bugged me
print("[" + str(k) + "] = ", v)
print()
print("end.")
| StarcoderdataPython |
3257737 | <gh_stars>10-100
# coding: utf-8
"""
Automated Tool for Optimized Modelling (ATOM)
Author: Mavs
Description: Unit tests for training.py
"""
# Standard packages
import pytest
# Own modules
from atom.training import (
DirectClassifier,
DirectRegressor,
SuccessiveHalvingClassifier,
SuccessiveHalvingRegressor,
TrainSizingClassifier,
TrainSizingRegressor,
)
from .utils import bin_train, bin_test, class_train, class_test, reg_train, reg_test
# Test trainers ============================================== >>
def test_infer_task():
"""Assert that the correct task is inferred from the data."""
trainer = DirectClassifier("LR")
trainer.run(bin_train, bin_test)
assert trainer.task == "binary classification"
trainer = DirectClassifier("LR")
trainer.run(class_train, class_test)
assert trainer.task == "multiclass classification"
trainer = DirectRegressor("LGB")
trainer.run(reg_train, reg_test)
assert trainer.task == "regression"
def test_sh_skip_runs_below_zero():
"""Assert that an error is raised if skip_runs < 0."""
sh = SuccessiveHalvingRegressor(models="OLS", skip_runs=-1)
pytest.raises(ValueError, sh.run, reg_train, reg_test)
def test_sh_skip_runs_too_large():
"""Assert that an error is raised if skip_runs >= n_runs."""
sh = SuccessiveHalvingRegressor(models=["OLS", "BR"], skip_runs=2)
pytest.raises(ValueError, sh.run, reg_train, reg_test)
def test_models_are_restored():
"""Assert that the models attributes are all restored after fitting."""
sh = SuccessiveHalvingRegressor(["Tree", "RF", "AdaB", "LGB"], random_state=1)
sh.run(reg_train, reg_test)
assert "Tree" not in sh._models # Original model is deleted
assert all(m in sh.models for m in ("Tree4", "RF2", "AdaB1"))
def test_ts_int_train_sizes():
"""Assert that train sizing accepts different types as sizes."""
sh = TrainSizingClassifier("Tree", train_sizes=5, random_state=1)
sh.run(reg_train, reg_test)
assert len(sh.tree02.train) == 61
assert len(sh.tree06.train) == 185
def test_ts_different_train_sizes_types():
"""Assert that train sizing accepts different types as sizes."""
sh = TrainSizingClassifier("Tree", train_sizes=[0.2, 200], random_state=1)
sh.run(reg_train, reg_test)
assert len(sh.tree02.train) == 61
assert len(sh.tree065.train) == 200
# Test goals ======================================================= >>
def test_goals_trainers():
"""Assert that the goal of every Trainer class is set correctly."""
trainer = DirectClassifier("LR")
assert trainer.goal == "class"
trainer = DirectRegressor("OLS")
assert trainer.goal == "reg"
def test_goals_successive_halving():
"""Assert that the goal of every SuccessiveHalving class is set correctly."""
sh = SuccessiveHalvingClassifier("LR")
assert sh.goal == "class"
sh = SuccessiveHalvingRegressor("OLS")
assert sh.goal == "reg"
def test_goals_train_sizing():
"""Assert that the goal of every TrainSizing class is set correctly."""
ts = TrainSizingClassifier("LR")
assert ts.goal == "class"
ts = TrainSizingRegressor("OLS")
assert ts.goal == "reg"
| StarcoderdataPython |
4999720 | <filename>voicemd/models/long_filter_cnn.py
import logging
import torch
import torch.nn as nn
import torch.nn.functional as F
# from voicemd.utils.hp_utils import check_and_log_hp
logger = logging.getLogger(__name__)
class LongFilterCNN(nn.Module):
def __init__(self, hyper_params):
super(LongFilterCNN, self).__init__()
self.hyper_params = hyper_params
self.conv2d = nn.Sequential(
nn.Conv2d(1, 64, (80, 3), 1),
nn.ReLU(),
)
self.conv1d = nn.Sequential(
nn.Conv1d(64, 64, 3, 1),
nn.Conv1d(64, 32, 3, 1),
nn.ReLU(),
nn.MaxPool1d(2),
nn.Conv1d(32, 32, 3, 1),
nn.Conv1d(32, 32, 3, 1),
nn.ReLU(),
nn.MaxPool1d(2),
)
self.classifier = nn.Sequential(
nn.Linear(1920, 256),
nn.ReLU(),
nn.Linear(256, 128),
nn.ReLU(),
nn.Linear(128, 2),
)
def forward(self, x):
x = self.conv2d(x)
x = torch.squeeze(x, dim=2)
x = self.conv1d(x)
x = torch.flatten(x, 1)
output = self.classifier(x)
return output
| StarcoderdataPython |
6456651 | <reponame>akx/upcloud-python-api
from __future__ import print_function
from __future__ import unicode_literals
from __future__ import division
from __future__ import absolute_import
from upcloud_api import Tag
def create_cluster(manager, cluster):
"""Create all servers in cluster."""
for server in cluster:
s = manager.create_server(cluster[server])
for server in cluster:
cluster[server].ensure_started()
return manager.get_servers()
def firewall_test(manager, firewall_rules, server):
"""Run tests on firewall rules."""
# add 1 rule and remove it
server.add_firewall_rule(firewall_rules[0])
fs = server.get_firewall_rules()
assert len(fs) == 1
fs[0].destroy()
fs = server.get_firewall_rules()
assert len(fs) == 0
# add several rules and remove them
server.configure_firewall(firewall_rules)
fs = server.get_firewall_rules()
assert len(fs) == 2
for f in fs:
manager.delete_firewall_rule(server.uuid, 1)
fs = server.get_firewall_rules()
assert len(fs) == 0
def server_test(manager, server):
"""Run tests on a server instance."""
server.populate()
server.core_number = '3'
server.memory_amount = '1024'
server.save()
server.add_ip()
storage = manager.create_storage(size=10, tier='maxiops', zone='uk-lon1')
server.add_storage(storage)
server.start()
#sync new info from API and assert the changes from above have happened
server.populate()
assert server.core_number == '3'
assert server.memory_amount == '1024'
assert len(server.storage_devices) == 3
assert len(server.ip_addresses) == 4
server.ensure_started()
def tag_servers_test(manager, tags, cluster):
"""Run tests on tags."""
# create tags
for t in tags:
manager.create_tag(str(t))
cluster['web1'].add_tags(['testweb'])
cluster['web2'].add_tags(['testweb'])
cluster['lb'].add_tags([tags[1]]) # tags[1] is 'db'
cluster['db'].add_tags(['testlb'])
fetched_servers = manager.get_servers(tags_has_one=['testlb'])
assert len(fetched_servers) == 1
assert fetched_servers[0].tags[0] == 'testlb'
| StarcoderdataPython |
6542274 | # Auto generated by generator.py. Delete this line if you make modification.
from scrapy.spiders import Rule
from scrapy.linkextractors import LinkExtractor
XPATH = {
'name' : "//div[@class='s_product_center']/div[@class='s_hot_right']/div[@class='s_hot_name']/font",
'price' : "//div[@class='Detail_Right']/ul/li[2]/strong/font",
'category' : "",
'description' : "//div[@class='s_product_content']/div[@class='panes']/div",
'images' : "//meta[@property='og:image']/@content",
'canonical' : "",
'base_url' : "//base/@href",
'brand' : ""
}
name = 'techcity.vn'
allowed_domains = ['techcity.vn']
start_urls = ['http://techcity.vn']
tracking_url = ''
sitemap_urls = ['']
sitemap_rules = [('', 'parse_item')]
sitemap_follow = []
rules = [
Rule(LinkExtractor(allow=['/Products/']), 'parse_item'),
Rule(LinkExtractor(allow=['/[a-zA-Z0-9-]+/']), 'parse'),
#Rule(LinkExtractor(), 'parse_item_and_links'),
]
| StarcoderdataPython |
3468579 | <filename>B2G/gecko/testing/marionette/update-smoketests/smoketest.py
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
import json
import os
import subprocess
import sys
import tempfile
import threading
import zipfile
from ConfigParser import ConfigParser
this_dir = os.path.abspath(os.path.dirname(__file__))
marionette_dir = os.path.dirname(this_dir)
marionette_client_dir = os.path.join(marionette_dir, 'client', 'marionette')
def find_b2g():
sys.path.append(marionette_client_dir)
from b2ginstance import B2GInstance
return B2GInstance()
class DictObject(dict):
def __getattr__(self, item):
try:
return self.__getitem__(item)
except KeyError:
raise AttributeError(item)
def __getitem__(self, item):
value = dict.__getitem__(self, item)
if isinstance(value, dict):
return DictObject(value)
return value
class SmokeTestError(Exception):
pass
class SmokeTestConfigError(SmokeTestError):
def __init__(self, message):
SmokeTestError.__init__(self, 'smoketest-config.json: ' + message)
class SmokeTestConfig(DictObject):
TOP_LEVEL_REQUIRED = ('devices', 'public_key', 'private_key')
DEVICE_REQUIRED = ('system_fs_type', 'system_location', 'data_fs_type',
'data_location', 'sdcard', 'sdcard_recovery',
'serials')
def __init__(self, build_dir):
self.top_dir = build_dir
self.build_data = {}
self.flash_template = None
with open(os.path.join(build_dir, 'smoketest-config.json')) as f:
DictObject.__init__(self, json.loads(f.read()))
for required in self.TOP_LEVEL_REQUIRED:
if required not in self:
raise SmokeTestConfigError('No "%s" found' % required)
if len(self.devices) == 0:
raise SmokeTestConfigError('No devices found')
for name, device in self.devices.iteritems():
for required in self.DEVICE_REQUIRED:
if required not in device:
raise SmokeTestConfigError('No "%s" found in device "%s"' % (required, name))
def get_build_data(self, device, build_id):
if device in self.build_data:
if build_id in self.build_data[device]:
return self.build_data[device][build_id]
else:
self.build_data[device] = {}
build_dir = os.path.join(self.top_dir, device, build_id)
flash_zip = os.path.join(build_dir, 'flash.zip')
with zipfile.ZipFile(flash_zip) as zip:
app_ini = ConfigParser()
app_ini.readfp(zip.open('system/b2g/application.ini'))
platform_ini = ConfigParser()
platform_ini.readfp(zip.open('system/b2g/platform.ini'))
build_data = self.build_data[device][build_id] = DictObject({
'app_version': app_ini.get('App', 'version'),
'app_build_id': app_ini.get('App', 'buildid'),
'platform_build_id': platform_ini.get('Build', 'buildid'),
'platform_milestone': platform_ini.get('Build', 'milestone'),
'complete_mar': os.path.join(build_dir, 'complete.mar'),
'flash_script': os.path.join(build_dir, 'flash.sh')
})
return build_data
class SmokeTestRunner(object):
DEVICE_TIMEOUT = 30
def __init__(self, config, b2g, run_dir=None):
self.config = config
self.b2g = b2g
self.run_dir = run_dir or tempfile.mkdtemp()
update_tools = self.b2g.import_update_tools()
self.b2g_config = update_tools.B2GConfig()
def run_b2g_update_test(self, serial, testvars, tests):
b2g_update_test = os.path.join(marionette_client_dir,
'venv_b2g_update_test.sh')
if not tests:
tests = [os.path.join(marionette_client_dir, 'tests',
'update-tests.ini')]
args = ['bash', b2g_update_test, sys.executable,
'--homedir', self.b2g.homedir,
'--address', 'localhost:2828',
'--type', 'b2g+smoketest',
'--device', serial,
'--testvars', testvars]
args.extend(tests)
print ' '.join(args)
subprocess.check_call(args)
def build_testvars(self, device, start_id, finish_id):
run_dir = os.path.join(self.run_dir, device, start_id, finish_id)
if not os.path.exists(run_dir):
os.makedirs(run_dir)
start_data = self.config.get_build_data(device, start_id)
finish_data = self.config.get_build_data(device, finish_id)
partial_mar = os.path.join(run_dir, 'partial.mar')
if not os.path.exists(partial_mar):
build_gecko_mar = os.path.join(self.b2g.update_tools,
'build-gecko-mar.py')
subprocess.check_call([sys.executable, build_gecko_mar,
'--from', start_data.complete_mar,
'--to', finish_data.complete_mar,
partial_mar])
finish_data['partial_mar'] = partial_mar
testvars = os.path.join(run_dir, 'testvars.json')
if not os.path.exists(testvars):
open(testvars, 'w').write(json.dumps({
'start': start_data,
'finish': finish_data
}))
return testvars
def wait_for_device(self, device):
for serial in self.config.devices[device].serials:
proc = subprocess.Popen([self.b2g.adb_path, '-s', serial,
'wait-for-device'])
def wait_for_adb():
proc.communicate()
thread = threading.Thread(target=wait_for_adb)
thread.start()
thread.join(self.DEVICE_TIMEOUT)
if thread.isAlive():
print >>sys.stderr, '%s device %s is not recognized by ADB, ' \
'trying next device' % (device, serial)
proc.kill()
thread.join()
continue
return serial
return None
def run_smoketests_for_device(self, device, start_id, finish_id, tests):
testvars = self.build_testvars(device, start_id, finish_id)
serial = self.wait_for_device(device)
if not serial:
raise SmokeTestError('No connected serials for device "%s" could ' \
'be found' % device)
try:
self.run_b2g_update_test(serial, testvars, tests)
except subprocess.CalledProcessError:
print >>sys.stderr, 'SMOKETEST-FAIL | START=%s | FINISH=%s | ' \
'DEVICE=%s/%s | %s' % (start_id, finish_id,
device, serial, testvars)
def run_smoketests(self, build_ids, tests):
build_ids.sort()
latest_build_id = build_ids.pop(-1)
for build_id in build_ids:
for device in self.config.devices:
self.run_smoketests_for_device(device, build_id,
latest_build_id, tests)
| StarcoderdataPython |
12827721 | import sys
import os
sys.path.append('../')
current_dir = os.path.dirname(os.path.abspath(__file__))
DATA_DIR = os.path.normpath(os.path.join(
current_dir, '../data/', 'optstoic_db_v3'))
| StarcoderdataPython |
8098143 | <filename>torchtime/models/inception.py
import math
from typing import List, Optional, Any
import torch
from torch import nn, jit, Tensor
activations = {
'relu': nn.ReLU,
'elu': nn.ELU,
'leaky_relu': nn.LeakyReLU,
'sigmoid': nn.Sigmoid,
'tanh': nn.Tanh,
'linear': nn.Identity
}
class BasicConv1d(nn.Module):
def __init__(
self,
in_channels: int,
out_channels: int,
activation: Optional[str],
**kwargs: Any
) -> None:
super(BasicConv1d, self).__init__()
self.conv = nn.Conv1d(in_channels, out_channels, bias=False, **kwargs)
self.bn = nn.BatchNorm1d(out_channels, eps=0.001)
# if activation is not None:
# self.activation = activations[activation]# (inplace=True)
# else:
# self.activation = nn.Identity()
self.activation = activations[activation]()
def forward(self, x: Tensor) -> Tensor:
x = self.conv(x)
x = self.bn(x)
x = self.activation(x)
return x
class Concat(nn.Module):
def __init__(self, dim=1):
super(Concat, self).__init__()
self.dim = dim
def forward(self, x: List[Tensor]):
return torch.cat(x, dim=self.dim)
def __repr__(self):
return f'{self.__class__.__name__}(dim={self.dim})'
class Flatten(nn.Module):
def forward(self, x):
return x.view(x.size(0), -1)
class GAP1d(nn.Module):
"""Global Adaptive Pooling + Flatten
"""
def __init__(self, output_size=1):
super(GAP1d, self).__init__()
self.gap = nn.AdaptiveAvgPool1d(output_size)
self.flatten = Flatten()
def forward(self, x):
return self.flatten(self.gap(x))
class Inception(nn.Module):
"""Inception Module
"""
def __init__(self, n_inputs: int, n_convolutions: int = 3, n_filters: int = 32, kernel_size: int = 39,
use_bottleneck: bool = True, bottleneck_size: int = 32, activation: str = 'linear'):
super(Inception, self).__init__()
if n_convolutions > math.log2(kernel_size):
raise AttributeError
stride = 1 # hard coded, since SAME padding does not support any stride values other than 1
self.kernel_size = kernel_size
# self.activation = activations[activation]
self.bottleneck = nn.Conv1d(in_channels=n_inputs, out_channels=bottleneck_size,
kernel_size=1,
padding='same',
stride=stride,
bias=False) if use_bottleneck else nn.Identity()
self.conv_layers = nn.ModuleDict()
for i in range(n_convolutions):
kernel_size_ = self.kernel_size // (2 ** i)
# kernel_size_ = self.kernel_size - (2 * i)
if kernel_size_ % 2 == 0:
kernel_size_ -= 1
self.conv_layers[f"Conv1D_{i}"] = BasicConv1d(in_channels=bottleneck_size if use_bottleneck else n_inputs,
out_channels=n_filters,
activation=activation,
kernel_size=kernel_size_,
padding='same',
stride=stride)
self.maxpoolconv = nn.Sequential(nn.MaxPool1d(kernel_size=3, stride=stride, padding=1),
nn.Conv1d(in_channels=n_inputs,
out_channels=n_filters,
kernel_size=1,
padding='same',
stride=stride,
bias=False))
self.concat = Concat()
self.bn = nn.BatchNorm1d(n_filters * (n_convolutions + 1))
self.relu = nn.ReLU()
def forward(self, x):
input_tensor = x
x = self.bottleneck(x)
out = self.concat([l(x) for l in self.conv_layers.values()] + [self.maxpoolconv(input_tensor)])
out = self.bn(out)
out = self.relu(out)
return out
class ShortCut(nn.Module):
"""Skip connection
"""
def __init__(self, in_channels: int, n_filters: int):
super(ShortCut, self).__init__()
self.conv = nn.Conv1d(in_channels=in_channels, out_channels=n_filters, kernel_size=1, bias=False)
self.bn = nn.BatchNorm1d(n_filters)
self.activation = nn.ReLU()
def forward(self, x: torch.Tensor, y: torch.Tensor):
x = self.conv(x)
x = self.bn(x)
out = torch.add(x, y)
out = self.activation(out)
return out
class InceptionTime(nn.Module):
"""InceptionTime model definition
"""
def __init__(self, n_inputs: Optional[int] = None, n_classes: Optional[int] = None, use_residual=True,
use_bottleneck=True, depth=6, n_convolutions: int = 3, n_filters: int = 32, kernel_size=32,
initialization="kaiming_uniform", **kwargs):
super(InceptionTime, self).__init__()
self.blocks = nn.ModuleList([nn.Sequential()])
for d in range(depth):
self.blocks[-1].add_module(f"Inception_{d}",
Inception(n_inputs=n_inputs if d == 0 else n_filters * (n_convolutions + 1),
use_bottleneck=use_bottleneck,
n_convolutions=n_convolutions,
n_filters=n_filters,
kernel_size=kernel_size,
**kwargs)
)
if use_residual and d % 3 == 2:
n_in, n_out = n_inputs if d == 2 else n_filters * (
n_convolutions + 1), n_filters * (n_convolutions + 1)
self.blocks.append(ShortCut(n_in, n_out))
# n_filters=self.blocks[-1].get_submodule(f"Inception_{d - 1}").maxpoolconv[-1].out_channels * (n_convolutions + 1)))
if d < depth - 1:
self.blocks.append(nn.Sequential())
self.gap = GAP1d(1)
self.fc = nn.Linear(n_filters * (n_convolutions + 1), out_features=n_classes, bias=True)
# self.activation = nn.Softmax(dim=1)
# for d in range(depth):
# self.blocks[-1].add_module(f"Inception_{d}",
# Inception(n_inputs=n_inputs if d == 0 else n_filters * (n_convolutions + 1),
# use_bottleneck=use_bottleneck, n_convolutions=n_convolutions,
# n_filters=n_filters, **kwargs))
# if use_residual and d % 3 == 2:
# n_in, n_out = n_inputs if d == 2 else n_filters * (n_convolutions + 1), n_filters * (n_convolutions + 1)
# self.blocks.append(ShortCut(n_in, n_out))
# # n_filters=self.blocks[-1].get_submodule(f"Inception_{d - 1}").maxpoolconv[-1].out_channels * (n_convolutions + 1)))
# if d < depth - 1:
# self.blocks.append(nn.Sequential())
# self.gap = GAP1d(1)
# self.fc = nn.Linear(n_filters * (n_convolutions + 1), out_features=n_classes, bias=True)
# self.activation = nn.Softmax(dim=1)
if initialization == "glorot_uniform":
self.apply(self.glorot_uniform_initialization)
def glorot_uniform_initialization(self, m):
if isinstance(m, nn.Conv1d):
nn.init.xavier_uniform(m.weight.data)
if isinstance(m, nn.Linear):
nn.init.xavier_uniform(m.weight.data)
def forward(self, x: torch.Tensor):
block_in = x
block_out = torch.ones((2,))
for i, block in enumerate(self.blocks):
if i % 2 == 0:
block_out = block(block_in)
else:
block_in = block(block_in, block_out)
out = self.gap(block_out)
out = self.fc(out)
# https://stackoverflow.com/questions/57342987/translating-pytorch-program-into-keras-different-results
# Logits are calculated by loss function already!!!
# out = self.activation(out)
return out
| StarcoderdataPython |
1634070 | """Class definitions for adapting acoustic models"""
from __future__ import annotations
import os
import shutil
from typing import TYPE_CHECKING, Optional
from ..exceptions import KaldiProcessingError
from ..models import AcousticModel
from ..multiprocessing import (
align,
calc_fmllr,
compile_information,
compile_train_graphs,
train_map,
)
from ..utils import log_kaldi_errors
from .base import BaseAligner
if TYPE_CHECKING:
from logging import Logger
from ..config import AlignConfig
from ..corpus import Corpus
from ..dictionary import Dictionary
from ..models import MetaDict
from .pretrained import PretrainedAligner
__all__ = ["AdaptingAligner"]
class AdaptingAligner(BaseAligner):
"""
Aligner adapts another acoustic model to the current data
Parameters
----------
corpus : :class:`~montreal_forced_aligner.corpus.base.Corpus`
Corpus object for the dataset
dictionary : :class:`~montreal_forced_aligner.dictionary.Dictionary`
Dictionary object for the pronunciation dictionary
pretrained_aligner: :class:`~montreal_forced_aligner.aligner.pretrained.PretrainedAligner`
Pretrained aligner to use as input to training
align_config : :class:`~montreal_forced_aligner.config.align_config.AlignConfig`
Configuration for alignment
temp_directory : str, optional
Specifies the temporary directory root to save files need for Kaldi.
If not specified, it will be set to ``~/Documents/MFA``
debug: bool
Flag for debug mode, default is False
verbose: bool
Flag for verbose mode, default is False
logger: :class:`~logging.Logger`
Logger to use
"""
def __init__(
self,
corpus: Corpus,
dictionary: Dictionary,
previous_aligner: PretrainedAligner,
align_config: AlignConfig,
temp_directory: Optional[str] = None,
debug: bool = False,
verbose: bool = False,
logger: Optional[Logger] = None,
):
self.previous_aligner = previous_aligner
super().__init__(
corpus,
dictionary,
align_config,
temp_directory,
debug,
verbose,
logger,
acoustic_model=self.previous_aligner.acoustic_model,
)
self.align_config.data_directory = corpus.split_directory
log_dir = os.path.join(self.align_directory, "log")
os.makedirs(log_dir, exist_ok=True)
self.align_config.logger = self.logger
self.logger.info("Done with setup!")
self.training_complete = False
self.mapping_tau = 20
def setup(self) -> None:
"""Set up the aligner"""
super().setup()
self.previous_aligner.align()
self.acoustic_model.export_model(self.adapt_directory)
for f in ["final.mdl", "final.alimdl"]:
p = os.path.join(self.adapt_directory, f)
if not os.path.exists(p):
continue
os.rename(p, os.path.join(self.adapt_directory, f.replace("final", "0")))
@property
def align_directory(self) -> str:
"""Align directory"""
return os.path.join(self.temp_directory, "adapted_align")
@property
def adapt_directory(self) -> str:
"""Adapt directory"""
return os.path.join(self.temp_directory, "adapt")
@property
def working_directory(self) -> str:
"""Current working directory"""
if self.training_complete:
return self.align_directory
return self.adapt_directory
@property
def working_log_directory(self) -> str:
"""Current log directory"""
return os.path.join(self.working_directory, "log")
@property
def current_model_path(self):
"""Current acoustic model path"""
if self.training_complete:
return os.path.join(self.working_directory, "final.mdl")
return os.path.join(self.working_directory, "0.mdl")
@property
def next_model_path(self):
"""Next iteration's acoustic model path"""
return os.path.join(self.working_directory, "final.mdl")
def adapt(self) -> None:
"""Run the adaptation"""
done_path = os.path.join(self.adapt_directory, "done")
dirty_path = os.path.join(self.adapt_directory, "dirty")
if os.path.exists(done_path):
self.logger.info("Adapting already done, skipping.")
return
try:
self.logger.info("Adapting pretrained model...")
train_map(self)
self.training_complete = True
shutil.copyfile(
os.path.join(self.adapt_directory, "final.mdl"),
os.path.join(self.align_directory, "final.mdl"),
)
shutil.copyfile(
os.path.join(self.adapt_directory, "final.occs"),
os.path.join(self.align_directory, "final.occs"),
)
shutil.copyfile(
os.path.join(self.adapt_directory, "tree"),
os.path.join(self.align_directory, "tree"),
)
if os.path.exists(os.path.join(self.adapt_directory, "final.alimdl")):
shutil.copyfile(
os.path.join(self.adapt_directory, "final.alimdl"),
os.path.join(self.align_directory, "final.alimdl"),
)
if os.path.exists(os.path.join(self.adapt_directory, "lda.mat")):
shutil.copyfile(
os.path.join(self.adapt_directory, "lda.mat"),
os.path.join(self.align_directory, "lda.mat"),
)
except Exception as e:
with open(dirty_path, "w"):
pass
if isinstance(e, KaldiProcessingError):
log_kaldi_errors(e.error_logs, self.logger)
e.update_log_file(self.logger.handlers[0].baseFilename)
raise
with open(done_path, "w"):
pass
@property
def meta(self) -> MetaDict:
"""Acoustic model metadata"""
from datetime import datetime
from ..utils import get_mfa_version
data = {
"phones": sorted(self.dictionary.nonsil_phones),
"version": get_mfa_version(),
"architecture": self.acoustic_model.meta["architecture"],
"train_date": str(datetime.now()),
"features": self.previous_aligner.align_config.feature_config.params(),
"multilingual_ipa": self.dictionary.multilingual_ipa,
}
if self.dictionary.multilingual_ipa:
data["strip_diacritics"] = self.dictionary.strip_diacritics
data["digraphs"] = self.dictionary.digraphs
return data
def save(self, path, root_directory=None) -> None:
"""
Output an acoustic model and dictionary to the specified path
Parameters
----------
path : str
Path to save acoustic model and dictionary
root_directory : str or None
Path for root directory of temporary files
"""
directory, filename = os.path.split(path)
basename, _ = os.path.splitext(filename)
acoustic_model = AcousticModel.empty(basename, root_directory=root_directory)
acoustic_model.add_meta_file(self)
acoustic_model.add_model(self.align_directory)
if directory:
os.makedirs(directory, exist_ok=True)
basename, _ = os.path.splitext(path)
acoustic_model.dump(path)
def align(self, subset: Optional[int] = None) -> None:
"""
Align using the adapted model
Parameters
----------
subset: int, optional
Number of utterances to align in corpus
"""
done_path = os.path.join(self.align_directory, "done")
dirty_path = os.path.join(self.align_directory, "dirty")
if os.path.exists(done_path):
self.logger.info("Alignment already done, skipping.")
return
try:
log_dir = os.path.join(self.align_directory, "log")
os.makedirs(log_dir, exist_ok=True)
compile_train_graphs(self)
self.logger.info("Performing first-pass alignment...")
self.speaker_independent = True
align(self)
unaligned, average_log_like = compile_information(self)
self.logger.debug(
f"Prior to SAT, average per frame likelihood (this might not actually mean anything): {average_log_like}"
)
if (
not self.align_config.disable_sat
and self.previous_aligner.acoustic_model.feature_config.fmllr
and not os.path.exists(os.path.join(self.align_directory, "trans.0"))
):
self.logger.info("Calculating fMLLR for speaker adaptation...")
calc_fmllr(self)
self.speaker_independent = False
self.logger.info("Performing second-pass alignment...")
align(self)
unaligned, average_log_like = compile_information(self)
self.logger.debug(
f"Following SAT, average per frame likelihood (this might not actually mean anything): {average_log_like}"
)
except Exception as e:
with open(dirty_path, "w"):
pass
if isinstance(e, KaldiProcessingError):
log_kaldi_errors(e.error_logs, self.logger)
e.update_log_file(self.logger.handlers[0].baseFilename)
raise
with open(done_path, "w"):
pass
| StarcoderdataPython |
45360 | """
Python module.
""" | StarcoderdataPython |
1698315 | <reponame>tilan7663/lucene-pyparser
import json
from .file_reader import FileReader
from .utils import *
class NormMeta(FileReader):
extension = ".nvm"
def __init__(self, segment_info, field_infos):
super(NormMeta, self).__init__(segment_info)
self.f = self.get_file_ptr()
self.field_infos = field_infos
def parse_norm_meta(self):
print("#######################################################")
print("###################### NORM META ######################")
print("#######################################################")
field_infos = self.field_infos
f = self.f
f.seek(self.offset)
print_header(*parse_header(f))
norms_fields_meta = {}
while True:
field_number = read_vint(f)
if field_number == 0xffffffff:
break
info = field_infos[field_number]
if not(info["index_option_bits"] != INDEX_OPTION_NONE and info["omit_norm"] == False):
raise Exception("corrupted field types")
entry = {}
entry["bytes_per_value"] = read_byte(f)
entry["offset"] = read_long(f)
entry["field_name"] = info["field_name"]
norms_fields_meta[field_number] = entry
footer_magic, checksum_algo, checksum = parse_footer(f)
actual_checksum = compute_checksum(f)
print_footer(footer_magic, checksum_algo, checksum, actual_checksum)
self.norms_fields_meta = norms_fields_meta
def __iter__(self):
for field_number, entry in self.norms_fields_meta.items():
yield field_number, entry
def __str__(self):
return json.dumps(self.norms_fields_meta, sort_keys=False, indent=4) | StarcoderdataPython |
5192972 | from .Circle import Circle
from .Collision import Collision
from .CollisionList import CollisionList
from .Line import Line
from .Object import Object
from .System import System
from .functions import *
| StarcoderdataPython |
1676162 | #!/usr/bin/env python3
SINGLE_INDENT = 4 * ' '
GRAPH = '''\
{typ} {name} {{
{config}
{nodes}
{subgraphs}
{edges}
}}'''
SUBGRAPH = '''
{indent}subgraph {name} {{
{config}
{nodes}
{subgraphs}
{edges}
{indent}}}'''
NODE = '''
{indent}node [{config}]
{indent}{name} [label="{label}"]'''
EDGE = '''
{indent}edge [{config}]
{indent}{name} [label="{label}"]'''
def dict_to_str(the_dict, separator, level):
line_template = (level * SINGLE_INDENT) + '{key}="{value}"'
return separator.join([
line_template.format(key=key, value=value)
for key, value in sorted(the_dict.items())])
def compile_graph(graph):
for key in ['typ', 'name', 'config', 'nodes', 'subgraphs', 'edges']:
assert key in graph, 'Error: No %s in the graph.' % key
code = GRAPH.format(
typ=graph['typ'],
name=graph['name'],
config=dict_to_str(graph['config'], '\n', level=1),
nodes='\n'.join([
compile_node(node, level=1) for node in graph['nodes']]),
subgraphs='\n'.join([
compile_subgraph(subgraph, level=1)
for subgraph in graph['subgraphs']]),
edges='\n'.join([
compile_edge(edge, level=1) for edge in graph['edges']]))
return code
def compile_node(node, level):
[(name, body)] = node.items()
for key in ['config', 'label']:
assert key in body, 'Error: No %s in the node "%s".' % (key, name)
code = NODE.format(
config=dict_to_str(body['config'], ' ', level=0),
name=name,
label=body['label'],
indent=level * SINGLE_INDENT)
return code
def compile_edge(edge, level):
[(name, body)] = edge.items()
for key in ['config', 'label']:
assert key in body, 'Error: No %s in the edge "%s".' % (key, name)
code = EDGE.format(
config=dict_to_str(body['config'], ' ', level=0),
name=name,
label=body['label'],
indent=level * SINGLE_INDENT)
return code
def compile_subgraph(subgraph, level):
[(name, body)] = subgraph.items()
for key in ['config', 'nodes', 'subgraphs', 'edges']:
assert key in body, 'Error: No %s in the subgraph "%s".' % (key, name)
code = SUBGRAPH.format(
indent=level * SINGLE_INDENT,
name=name,
config=dict_to_str(body['config'], '\n', level=level+1),
nodes='\n'.join([
compile_node(node, level=level+1) for node in body['nodes']]),
subgraphs='\n'.join([
compile_subgraph(subsubgraph, level=level+1)
for subsubgraph in body['subgraphs']]),
edges='\n'.join([
compile_edge(edge, level=level+1) for edge in body['edges']]))
return code
def test_backend():
graph = dict(
typ='digraph',
name='test',
config=dict(
rankdir='TD',
nodesep='1',
fontname='acme',
fontsize=28,
splines='ortho',
penwidth=2,
newrank='true',
compound='true'),
nodes=[
{'fizz_buzz': dict(
label='3 FIZZ BUZZ',
config=dict(
fontname='acme',
fontsize=24,
fillcolor='white',
style='filled,rounded',
shape='box',
fixedsize='true',
width=3,
height=1,
labelloc='b'))}],
subgraphs=[
{'cluster_1': dict(
config=dict(
label='\nBRRRRT',
labelloc='t',
margin=40,
style='filled',
fillcolor='lightgrey',
clusterrank='none',
ranksep='1 equally',
color='white'),
subgraphs=[],
nodes=[
{'foo_bar': dict(
label='1 FOO BAR',
config=dict(
fontname='acme',
fontsize=24,
fillcolor='white',
style='filled,rounded',
shape='box',
fixedsize='true',
width=3,
height=1,
labelloc='m'))
}, {
'baz': dict(
label='2 BAZ',
config=dict(
fontname='acme',
fontsize=24,
fillcolor='white',
style='filled,rounded',
shape='box',
fixedsize='true',
width=3,
height=1,
labelloc='t'))
}],
edges=[
{'foobar -> baz': dict(
label='',
config={})}])
}
],
edges=[
{'baz -> fizz_buzz': dict(
label='',
config={})}],
)
print(compile_graph(graph))
if __name__ == '__main__':
test_backend()
| StarcoderdataPython |
4813536 | import json
from storyhub.sdk.service.Argument import Argument
from storyhub.sdk.service.HttpOptions import HttpOptions
from storyhub.sdk.service.output.OutputAction import OutputAction
from tests.storyhub.sdk.JsonFixtureHelper import JsonFixtureHelper
output_action_fixture = JsonFixtureHelper.load_fixture("output_action_fixture")
output_action_fixture_json = json.dumps(output_action_fixture)
def test_deserialization(mocker):
mocker.patch.object(json, "loads", return_value=output_action_fixture)
mocker.patch.object(HttpOptions, "from_dict")
mocker.patch.object(Argument, "from_dict")
assert (
OutputAction.from_json(jsonstr=output_action_fixture_json) is not None
)
json.loads.assert_called_with(output_action_fixture_json)
HttpOptions.from_dict.assert_called_with(
data={"http_options": output_action_fixture["output_action"]["http"]}
)
Argument.from_dict.assert_called_with(
data={
"name": "flush",
"argument": output_action_fixture["output_action"]["arguments"][
"flush"
],
}
)
def test_serialization(mocker):
mocker.patch.object(json, "dumps", return_value=output_action_fixture_json)
service_event = OutputAction.from_dict(data=output_action_fixture)
assert service_event.as_json(compact=True) is not None
json.dumps.assert_called_with(output_action_fixture, sort_keys=True)
assert service_event.as_json() is not None
json.dumps.assert_called_with(
output_action_fixture, indent=4, sort_keys=True
)
def test_getters(mocker):
output_action = OutputAction.from_json(jsonstr=output_action_fixture_json)
assert output_action.help() == "No help available."
| StarcoderdataPython |
8189777 | <reponame>TransRadOnc-HIT/RADIANTS
from nipype.interfaces.base import (
BaseInterface, TraitedSpec, Directory, File,
traits, BaseInterfaceInputSpec, InputMultiPath)
from radiomics import featureextractor
import csv
import os.path as op
class FeatureExtractionInputSpec(BaseInterfaceInputSpec):
parameter_file = File(exists=True, desc='File with all the '
'parameters to be used for feature extraction.')
rois = InputMultiPath(File(exists=True), desc='List of roi to extract the features from.')
input_image = File(exists=True, desc='Input image.')
outname = traits.Str('Features_pyradiomics', usedefault=True,
desc='Output file name.')
class FeatureExtractionOutputSpec(TraitedSpec):
feature_files = InputMultiPath(File(exists=True), desc='CSV file with the radiomics features.')
class FeatureExtraction(BaseInterface):
input_spec = FeatureExtractionInputSpec
output_spec = FeatureExtractionOutputSpec
def _run_interface(self, runtime):
rois = self.inputs.rois
parameter_file = self.inputs.parameter_file
image = self.inputs.input_image
image_name = image.split('/')[-1].split('.nii')[0]
out_basename = self.inputs.outname
self.outfiles = []
extractor = featureextractor.RadiomicsFeatureExtractor(parameter_file)
for roi in rois:
roi_name = roi.split('/')[-1].split('.nii')[0]
keys = ['Subject', 'Mask']
values = [image, roi]
outname = op.abspath(out_basename+'_'+image_name+'_'+roi_name+'.csv')
result = extractor.execute(image, roi)
for k, value in result.items():
keys.append(k)
values.append(value)
with open(outname, 'w') as outfile:
csvwriter = csv.writer(outfile)
csvwriter.writerow(keys)
csvwriter.writerow(values)
self.outfiles.append(outname)
return runtime
def _list_outputs(self):
outputs = self._outputs().get()
outputs['feature_files'] = self.outfiles
return outputs
| StarcoderdataPython |
200952 | <filename>solutions/lowest_common_ancestor_deepest_leaves/solution.py
from collections import deque
from ..utils import TreeNode
def lcaDeepestLeaves(root: TreeNode) -> TreeNode:
"""Given the 'root' of a binary tree, return the
lowest common ancestor of its deepest leaves."""
if not root.left and not root.right:
# Base case, if the root is itself a leaf.
return root
deepestLeaves = []
deepestLevel = 0
parentOf = {}
nodeOf = {}
# Apply BFS and keep track of deepest leaves.
queue = deque([(root, 0)])
while queue:
node, level = queue.popleft()
# Keep track of val -> node mapping, as we return a 'TreeNode'.
nodeOf[node.val] = node
if node.left or node.right:
if node.left:
parentOf[node.left.val] = node.val
queue.append((node.left, level + 1))
if node.right:
parentOf[node.right.val] = node.val
queue.append((node.right, level + 1))
else:
# Found possible deepest leaf candidate.
# Update 'deepestLevel' if this node is deepest.
if level > deepestLevel:
deepestLevel = level
deepestLeaves = []
deepestLeaves.append(node.val)
# Map deepest leaves to their parents until they share the same
# parent -- the set will discard duplicate parents.
lowestCommonAncestors = set(deepestLeaves)
while len(lowestCommonAncestors) > 1:
lowestCommonAncestors = set(parentOf[node]
for node in lowestCommonAncestors)
lowestCommonAncestor = next(iter(lowestCommonAncestors))
return nodeOf[lowestCommonAncestor] | StarcoderdataPython |
3407106 | from bluebottle.fsm.effects import TransitionEffect
from bluebottle.fsm.triggers import ModelChangedTrigger
from bluebottle.funding.effects import UpdateFundingAmountsEffect
from bluebottle.funding.models import Funding, PlainPayoutAccount, Donation
from bluebottle.funding.states import FundingStateMachine, PlainPayoutAccountStateMachine
class DeadlineChangedTrigger(ModelChangedTrigger):
field = 'deadline'
effects = [
TransitionEffect(
'extend',
conditions=[
FundingStateMachine.is_complete,
FundingStateMachine.is_valid,
FundingStateMachine.deadline_in_future,
FundingStateMachine.without_approved_payouts
]
),
TransitionEffect(
'succeed',
conditions=[
FundingStateMachine.should_finish,
FundingStateMachine.target_reached
]
),
TransitionEffect(
'partial',
conditions=[
FundingStateMachine.should_finish,
FundingStateMachine.target_not_reached
]
),
TransitionEffect(
'cancel',
conditions=[
FundingStateMachine.should_finish,
FundingStateMachine.no_donations
]
),
]
class AmountChangedTrigger(ModelChangedTrigger):
field = 'target'
effects = [
TransitionEffect(
'succeed',
conditions=[FundingStateMachine.should_finish, FundingStateMachine.target_reached]
),
TransitionEffect(
'partial',
conditions=[FundingStateMachine.should_finish, FundingStateMachine.target_not_reached]
),
TransitionEffect(
'cancel',
conditions=[FundingStateMachine.should_finish, FundingStateMachine.no_donations]
),
]
class MatchingAmountChangedTrigger(AmountChangedTrigger):
field = 'amount_matching'
Funding.triggers = [DeadlineChangedTrigger, MatchingAmountChangedTrigger, AmountChangedTrigger]
class AccountReviewedTrigger(ModelChangedTrigger):
field = 'reviewed'
effects = [
TransitionEffect(
'verify',
conditions=[PlainPayoutAccountStateMachine.is_reviewed]
),
TransitionEffect(
'reject',
conditions=[PlainPayoutAccountStateMachine.is_unreviewed]
),
]
PlainPayoutAccount.triggers = [AccountReviewedTrigger]
class DonationAmountChangedTrigger(ModelChangedTrigger):
field = 'payout_amount'
effects = [
UpdateFundingAmountsEffect
]
Donation.triggers = [DonationAmountChangedTrigger]
| StarcoderdataPython |
6573405 | <gh_stars>1-10
files = ["0-500k", "500k-1M", "1M-1.5M", "1.5M-2M", "2M-2-5M", "2.5M-3M",
"3M-3-5M", "3.5M-4M", "4M-4-5M", "4.5M-5M","5M-5-5M", "5.5M-6M",
"6M-6-5M", "6.5M-7M", "7M-7-5M"]
for i in range(len(files)):
filedata = None
print("lendo {}".format(files[i]))
with open('paris_' + files[i] + '.txt', 'r') as _f:
filedata = _f.read()
count = 0
with open('trained_data.txt', 'r') as tweets:
for tweet in tweets:
count += 1
print("fazendo substituicao numero {}".format(count))
filedata = filedata.replace(tweet, '')
print("escrevendo em {}".format(files[i]))
with open('paris_' + files[i] + '.txt', 'r') as _g:
_g.write(filedata)
| StarcoderdataPython |
1778643 | print("*********** BOOLEAN OPERATORS EXAMPLES ************")
"""
and
***************************
TRUE and TRUE -> TRUE
TRUE and FALSE -> FALSE
FALSE and FALSE -> FALSE
***************************
or
***************************
TRUE or TRUE -> TRUE
TRUE or FALSE -> TRUE
FALSE or FALSE -> FALSE
***************************
not
***************************
NOT TRUE -> FALSE
NOT FALSE -> TRUE
"""
print("**** AND OPERATOR ****")
and_operator = (10 == 10) and (10 >= 10)
print(and_operator)
and_operator = (10 == 10) and (10 > 10)
print(and_operator)
and_operator = (10 != 10) and (10 > 10)
print(and_operator)
print("**** OR OPERATOR ****")
or_operator = (10 == 10) or (10 >= 10)
print(or_operator)
or_operator = (10 == 10) or (10 > 10)
print(or_operator)
or_operator = (10 != 10) or (10 > 10)
print(or_operator)
print("**** NOT OPERATOR ****")
not_operator = not(10 == 11)
print(not_operator)
not_operator = not(10 == 10)
print(not_operator)
| StarcoderdataPython |
8110762 | <reponame>mcflugen/rafem
#! /usr/bin/env python
from setuptools import find_packages, setup
import versioneer
setup(
name="rafem",
version=versioneer.get_version(),
author="<NAME>",
author_email="<EMAIL>",
description="River Avulsion Flooplain Evolution Model",
long_description=open("README.rst", encoding="utf-8").read(),
classifiers=[
"Intended Audience :: Science/Research",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
"Topic :: Scientific/Engineering :: Physics",
],
url="https://github.com/katmratliff/avulsion-bmi",
license="MIT",
install_requires=open("requirements.txt", "r").read().splitlines(),
packages=find_packages(),
entry_points={"console_scripts": ["rafem=rafem.main:rafem"]},
cmdclass=versioneer.get_cmdclass(),
)
| StarcoderdataPython |
6501982 | <reponame>schmouk/ArcheryVideoTraining<filename>src/Utils/periodical_thread.py
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Copyright (c) 2021 <NAME>
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
#=============================================================================
from threading import Event, Thread
import time
from src.Utils.Scheduling import Scheduler
#=============================================================================
class PeriodicalThread( Thread ):
"""The class of periodical threads.
Periodical threads get their processing score called at
periodical periods of time.
"""
#-------------------------------------------------------------------------
def __init__(self, period_s: float, thread_name: str = None) -> None:
'''Constructor.
Args:
period_s: float
A fractional count of seconds. Defines the
period of time that separates two calls to
the processing method of this thread.
thread_name: str
the name of this thread. May be not set,
in which case the underlying platform will
define one by default. Defaults to None
(i.e. not set).
'''
super().__init__( name=thread_name )
self.period_s = period_s
self.stop_event = Event()
#-------------------------------------------------------------------------
def finalize_run_loop(self):
'''Finalization step after exiting running loop.
This method may be overwritten in inheriting classes
to release any allocated resource after processing
has completed.
In this base class, does nothing.
See method 'initialize_run_loop()'.
'''
pass
#-------------------------------------------------------------------------
def initialize_run_loop(self):
'''Initialization step before entering running loop.
This method may be overwritten in inheriting classes to
initialize / allocate any resource before processing
takes place.
In this base class, does nothing.
See method 'finalize_run_loop()'.
'''
pass
#-------------------------------------------------------------------------
def is_ok(self) -> bool:
'''Returns True if the period for this periodical thread is ok.
'''
return self.period_s > 0.0
#-------------------------------------------------------------------------
def process(self) -> bool:
'''The processing core of this periodical thread.
This method MUST BE IMPLEMENTED in inheriting classes.
Returns:
True if processing is to be kept on, or False if
this thread must be definitively stopped.
Raises:
NotImplementedError: method 'process()' is not
implemented in inheriting class.
'''
raise NotImplementedError( f"method 'process()' is not implemented in class '{self.__class__.__name__}'" )
#-------------------------------------------------------------------------
def run(self) -> None:
'''The looping method of this thread.
'''
if self.is_ok():
self.initialize_run_loop()
self.set_start_time()
loops_count = 0
self.keep_on = True
with Scheduler( 3 ):
while self.keep_on:
next_time = loops_count * self.period_s + self.start_time
# calls the processing core of this periodical thread
if not self.process():
break
# evaluates the next time for call
loops_count += 1
next_time = loops_count * self.period_s + self.start_time
# evaluates the waiting period of time
current_time = time.perf_counter()
wait_time = next_time - current_time
if wait_time > 0:
time.sleep( wait_time )
self.finalize_run_loop()
#-------------------------------------------------------------------------
def set_start_time(self) -> None:
'''Sets the start time for this periodical processing.
'''
self.start_time = time.perf_counter()
#-------------------------------------------------------------------------
def stop(self) -> None:
'''Definitively stops this thread.
'''
self.keep_on = False
#===== end of src.Utils.periodical_thread =====#
| StarcoderdataPython |
1841299 | <filename>quadratic_mc.py
import os, sys
import glob
import platform
import math
import numpy as np
import torch
from cffi import FFI
class QuadraticMarchingCubes:
def __init__(self):
self.ffi = FFI()
with open("Src/exported_routines.h") as header:
header_str = header.read()
cstr = ""
ignore = False
for line in header_str.splitlines():
if(line.startswith("#if")):
ignore = True
if(ignore == False):
cstr += line
if(line.startswith("#end")):
ignore = False
self.ffi.cdef(cstr)
correctWorkingDirectory = os.getcwd()
libname_start = correctWorkingDirectory + "/build/libquadratic_iso"
if(platform.system() == "Darwin"):
if os.path.exists(libname_start + ".dylib"):
libname = libname_start + ".dylib"
else:
libname = libname_start + "d.dylib"
elif(platform.system() == "Windows"):
libname = correctWorkingDirectory + "/build/Debug/quadratic_iso.dll"
else:
if os.path.exists(libname_start + ".so"):
libname = libname_start + ".so"
else:
libname = libname_start + "d.so"
self.isosurf = self.ffi.dlopen(libname)
os.chdir(os.getcwd())
print(self.isosurf)
def run(self, isovalue, np_sdf_data, dim, path=None):
#Allocate the maximum possible amount of memory used for buffers
#passed into the C++ code.
np_tris = np.zeros((dim*dim*dim,3), dtype=np.int32)
np_verts = np.zeros((dim*dim*dim,3), dtype=np.float32)
ffi_vert_count = self.ffi.new("int*")
ffi_tri_count = self.ffi.new("int*")
#Run the C++ code.
self.isosurf.run_quadratic_mc(
self.ffi.cast("int", dim),
self.ffi.cast("float*",np_sdf_data.ctypes.data),
self.ffi.cast("float", isovalue),
self.ffi.cast("float*",np_verts.ctypes.data), ffi_vert_count,
self.ffi.cast("int*",np_tris.ctypes.data), ffi_tri_count)
#Trim off unused memory
np_verts = np_verts[:ffi_vert_count[0],:]
np_tris = np_tris[:ffi_tri_count[0],:]
if path is not None:
with open(path, "w") as f:
f.write("# OBJ file\n")
for i in range(ffi_vert_count[0]):
f.write(f"v {np_verts[i,0]:3.4f} {np_verts[i,1]:3.4f} {np_verts[i,2]:3.4f}\n")
for i in range(ffi_tri_count[0]):
f.write(f"f {np_tris[i,0]+1:d} {np_tris[i,1]+1:d} {np_tris[i,2]+1:d}\n")
return np_verts, np_tris
if __name__ == "__main__":
import test_sdf
dim = 32
sdf = test_sdf.TestSDF(dim)
qmc = QuadraticMarchingCubes()
qmc.run(0.0, sdf.data.numpy(), dim) | StarcoderdataPython |
3249038 | from lightweaver.fal import Falc82
from lightweaver.rh_atoms import H_6_atom, H_6_CRD_atom, H_3_atom, C_atom, O_atom, OI_ord_atom, Si_atom, Al_atom, CaII_atom, Fe_atom, FeI_atom, He_9_atom, He_atom, He_large_atom, MgII_atom, N_atom, Na_atom, S_atom
import lightweaver as lw
import numpy as np
import scipy.interpolate as interp
from astropy.convolution import Box1DKernel
from astropy.convolution import convolve
from astropy.io import fits
from enum import IntEnum
from mpi4py import MPI
from tqdm import tqdm
import pickle
import argparse
class tags(IntEnum):
READY = 0
DONE = 1
EXIT = 2
START = 3
def smooth(sig, width):
return convolve(sig, Box1DKernel(width))
def iterate_ctx_crd(ctx, Nscatter=10, NmaxIter=500):
for i in range(NmaxIter):
dJ = ctx.formal_sol_gamma_matrices(verbose=False)
if i < Nscatter:
continue
delta = ctx.stat_equil(printUpdate=False)
if dJ < 3e-3 and delta < 1e-3:
# print(i, flush=True)
# print('----------')
return
def synth_spectrum(atmos, depthData=False, Nthreads=1, conserveCharge=False):
atmos.quadrature(5)
aSet = lw.RadiativeSet([H_6_atom(),
C_atom(),
OI_ord_atom(), Si_atom(), Al_atom(),
CaII_atom(),
Fe_atom(),
He_9_atom(),
MgII_atom(), N_atom(), Na_atom(), S_atom()
])
# aSet.set_active('H', 'Ca')
aSet.set_active('Ca')
spect = aSet.compute_wavelength_grid()
eqPops = aSet.compute_eq_pops(atmos)
ctx = lw.Context(atmos, spect, eqPops, Nthreads=Nthreads, conserveCharge=conserveCharge)
if depthData:
ctx.depthData.fill = True
iterate_ctx_crd(ctx)
eqPops.update_lte_atoms_Hmin_pops(atmos, quiet=True)
ctx.formal_sol_gamma_matrices(verbose=False)
return ctx
def master_work(filename, write_frequency=1):
task_index = 0
num_workers = size - 1
closed_workers = 0
fmodel = fits.open('/net/drogon/scratch1/aasensio/3dcubes/Enhanced_network_385_tau_from_RH_01_tau8.fits')
bifrost = fmodel[0].data[:].astype('<f8').reshape((11, 96, -1))
n = bifrost.shape[-1]
log_departure_list = [None] * n
T_list = [None] * n
tau_list = [None] * n
vturb_list = [None] * n
cmass = [None]
success = True
tasks = [0] * n
pointer = 0
print("*** Master starting with {0} workers".format(num_workers))
with tqdm(initial=pointer, total=n, ncols=140) as pbar:
while closed_workers < num_workers:
dataReceived = comm.recv(source=MPI.ANY_SOURCE, tag=MPI.ANY_TAG, status=status)
source = status.Get_source()
tag = status.Get_tag()
if tag == tags.READY:
# Worker is ready, so send it a task
try:
task_index = tasks.index(0)
tau500 = np.ascontiguousarray(10.0**bifrost[0, ::-1, task_index])
T = np.ascontiguousarray(bifrost[1, ::-1, task_index])
vlos = np.ascontiguousarray(bifrost[5, ::-1, task_index]) / 100.0 # m/s
vturb = np.ascontiguousarray(bifrost[3, ::-1, task_index]) / 100.0 # m/s
dataToSend = {'index': task_index, 'tau500': tau500, 'T': T, 'vlos': vlos, 'vturb': vturb}
comm.send(dataToSend, dest=source, tag=tags.START)
tasks[task_index] = 1
pbar.set_postfix(sent=f'{task_index}->{source}')
except:
comm.send(None, dest=source, tag=tags.EXIT)
elif tag == tags.DONE:
index = dataReceived['index']
success = dataReceived['success']
if (not success):
tasks[index] = -1
else:
log_departure_list[index] = dataReceived['log_departure']
T_list[index] = dataReceived['T']
tau_list[index] = dataReceived['tau']
vturb_list[index] = dataReceived['vturb']
cmass = dataReceived['cmass']
pbar.update(1)
elif tag == tags.EXIT:
print(" * MASTER : worker {0} exited.".format(source))
closed_workers += 1
if (pbar.n / write_frequency == pbar.n // write_frequency):
with open(f'{filename}_logdeparture.pk', 'wb') as filehandle:
pickle.dump(log_departure_list[0:task_index], filehandle)
with open(f'{filename}_T.pk', 'wb') as filehandle:
pickle.dump(T_list[0:task_index], filehandle)
with open(f'{filename}_vturb.pk', 'wb') as filehandle:
pickle.dump(vturb_list[0:task_index], filehandle)
with open(f'{filename}_tau.pk', 'wb') as filehandle:
pickle.dump(tau_list[0:task_index], filehandle)
with open(f'{filename}_cmass.pk', 'wb') as filehandle:
pickle.dump(cmass, filehandle)
print("Master finishing")
with open(f'{filename}_cmass.pk', 'wb') as filehandle:
pickle.dump(cmass, filehandle)
with open(f'{filename}_logdeparture.pk', 'wb') as filehandle:
pickle.dump(log_departure_list, filehandle)
with open(f'{filename}_T.pk', 'wb') as filehandle:
pickle.dump(T_list, filehandle)
with open(f'{filename}_vturb.pk', 'wb') as filehandle:
pickle.dump(vturb_list, filehandle)
with open(f'{filename}_tau.pk', 'wb') as filehandle:
pickle.dump(tau_list, filehandle)
def slave_work(rank):
while True:
comm.send(None, dest=0, tag=tags.READY)
dataReceived = comm.recv(source=0, tag=MPI.ANY_TAG, status=status)
tag = status.Get_tag()
if tag == tags.START:
# Do the work here
task_index = dataReceived['index']
tau500 = dataReceived['tau500']
T = dataReceived['T']
vlos = dataReceived['vlos']
vturb = dataReceived['vturb']
success = 1
try:
atmos = lw.Atmosphere.make_1d(scale=lw.ScaleType.Tau500, depthScale=tau500, temperature=T, vlos=vlos, vturb=vturb, verbose=False)
ctx = synth_spectrum(atmos, depthData=True, conserveCharge=False)
tau = atmos.tauRef
cmass = atmos.cmass
temperature = atmos.temperature
vturb = atmos.vturb
log_departure = np.log10(ctx.activeAtoms[0].n / ctx.activeAtoms[0].nStar)
except:
success = 0
dataToSend = {'index': task_index, 'T': temperature, 'log_departure': log_departure, 'tau': tau, 'cmass': cmass, 'vturb': vturb, 'success': success}
comm.send(dataToSend, dest=0, tag=tags.DONE)
elif tag == tags.EXIT:
break
comm.send(None, dest=0, tag=tags.EXIT)
if (__name__ == '__main__'):
# Initializations and preliminaries
comm = MPI.COMM_WORLD # get MPI communicator object
size = comm.size # total number of processes
rank = comm.rank # rank of this process
status = MPI.Status() # get MPI status object
print(f"Node {rank}/{size} active", flush=True)
if rank == 0:
parser = argparse.ArgumentParser(description='Generate synthetic models and solve NLTE problem')
parser.add_argument('--f', '--freq', default=1, type=int, metavar='FREQ', help='Frequency of model write')
parsed = vars(parser.parse_args())
master_work('bifrost', write_frequency=parsed['f'])
else:
slave_work(rank)
| StarcoderdataPython |
1808821 | from setuptools import setup, find_packages
version = '0.1'
setup(
name='ckanext-ldap',
version=version,
description="CKAN plugin to provide LDAP authentication",
url='https://github.com/NaturalHistoryMuseum/ckanext-ldap',
packages=find_packages(),
namespace_packages=['ckanext', 'ckanext.ldap'],
entry_points="""
[ckan.plugins]
ldap = ckanext.ldap.plugin:LdapPlugin
[paste.paster_command]
ldap=ckanext.ldap.commands.ldap:LDAPCommand
""",
include_package_data=True,
)
| StarcoderdataPython |
8181639 | <reponame>rikeshi/galaxy
from galaxy.jobs import runners
def test_default_specs():
# recheck_missing_job_retries is integer >= 0
params = runners.RunnerParams(specs=runners.BaseJobRunner.DEFAULT_SPECS, params=dict(recheck_missing_job_retries="1"))
assert params.recheck_missing_job_retries == 1
assert params["recheck_missing_job_retries"] == 1
exception_raised = False
try:
runners.RunnerParams(specs=runners.BaseJobRunner.DEFAULT_SPECS, params=dict(recheck_missing_job_retries=-1))
except Exception:
exception_raised = True
assert exception_raised
def test_missing_parameter():
exception = None
try:
runners.RunnerParams(specs={}, params=dict(foo="bar"))
except Exception as e:
exception = e
assert str(exception) == runners.JOB_RUNNER_PARAMETER_UNKNOWN_MESSAGE % "foo"
def test_invalid_parameter():
exception = None
try:
runners.RunnerParams(specs=dict(foo=dict(valid=lambda x: x != "bar", defualt="baz")), params=dict(foo="bar"))
except Exception as e:
exception = e
assert str(exception) == runners.JOB_RUNNER_PARAMETER_VALIDATION_FAILED_MESSAGE % "foo"
def test_map_problem():
exception = None
try:
runners.RunnerParams(specs=dict(foo=dict(map=lambda x: 1 / 0, default="baz")), params=dict(foo="bar"))
except Exception as e:
exception = e
assert str(exception) == runners.JOB_RUNNER_PARAMETER_MAP_PROBLEM_MESSAGE % ("foo", "bar")
def test_param_default():
runner_params = runners.RunnerParams(specs=dict(foo=dict(default="baz")), params={})
assert runner_params["foo"] == "baz"
assert runner_params.foo == "baz"
| StarcoderdataPython |
12852447 | <reponame>ranigb/Set-Tree<filename>exps/jets/top_quark_gbdt.py
import os
import numpy as np
import argparse
import logging
import random
import pickle
from pprint import pformat
from exps.data import ParticleNetDataset
from settree.set_data import SetDataset, OPERATIONS, merge_init_datasets
import exps.eval_utils as eval
from exps.eval_utils import create_logger
data_root = '/home/royhir/projects/data/physics/top_quark/proc'
def pre_process(dataset, limit=None):
x = dataset.X
y = dataset.y
if limit is None:
limit = len(y)
inds = random.sample(range(len(y)), limit)
x_points = x['points'].take(inds, axis=0)
x_features = x['features'].take(inds, axis=0)
x_mask = x['mask'].take(inds, axis=0)
y = y.take(inds, axis=0)
y = y.argmax(1)
records = []
ys = []
for p, f, m, y in zip(x_points, x_features, x_mask, y):
try:
m_row = np.where(p.any(axis=1))[0].max()
records.append(np.concatenate((p[:m_row, :], f[:m_row, :], m[:m_row, :]),axis=1))
ys.append(y)
except:
pass
return records, np.array(ys)
def get_top_quark_datset(train=None, val=None, test=None):
train_dataset = ParticleNetDataset(os.path.join(data_root, 'train_file_0.awkd'), data_format='channel_last')
val_dataset = ParticleNetDataset(os.path.join(data_root, 'val_file_0.awkd'), data_format='channel_last')
test_dataset = ParticleNetDataset(os.path.join(data_root, 'test_file_0.awkd'), data_format='channel_last')
logging.info('Loaded raw data')
train_records, train_y = pre_process(train_dataset, limit=train)
val_records, val_y = pre_process(val_dataset, limit=val)
test_records, test_y = pre_process(test_dataset, limit=test)
logging.info('Finish pre-processing')
logging.info('train: {} val: {} test: {}'.format(len(train_y), len(val_y), len(test_y)))
return SetDataset(records=train_records, is_init=True), train_y, \
SetDataset(records=val_records, is_init=True), val_y, \
SetDataset(records=test_records, is_init=True), test_y
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument("--exp_name", type=str, default='test')
parser.add_argument("--splits", type=int, nargs="+", default=[1200000, 400000, 400000])
parser.add_argument("--attention_set_limit", type=int, default=6)
parser.add_argument("--use_attention_set", action='store_true')
parser.add_argument('--save', action='store_true')
parser.add_argument("--log", action='store_true')
args = parser.parse_args()
np.random.seed(42)
random.seed(42)
log_dir = os.path.join(os.path.abspath('__file__' + '/../'), 'outputs', 'top_quark')
create_logger(log_dir=log_dir,
log_name=args.exp_name,
dump=args.log)
logging.info(args)
train, val, test = args.splits
ds_train, y_train, ds_val, y_val, ds_test, y_test = get_top_quark_datset(train, val, test)
shared_gbdt_params = {'n_estimators': 50,
'learning_rate': 0.1,
'max_depth': 8,
'max_features': None,
'subsample': 0.5,
'criterion': 'mse',
'early_stopping_rounds': 5,
'random_state': 42}
logging.info('Shared params:\n' + pformat(shared_gbdt_params))
set_params = {'n_estimators': shared_gbdt_params['n_estimators'],
'operations': OPERATIONS,
'splitter': 'sklearn',
'use_attention_set': True,
'use_attention_set_comp': False,
'attention_set_limit': args.attention_set_limit,
'max_depth': shared_gbdt_params['max_depth'],
'max_features': shared_gbdt_params['max_features'],
'subsample': shared_gbdt_params['subsample'],
'random_state': shared_gbdt_params['random_state'],
'save_path': None,
'validation_fraction': 0.25,
'tol': 1e-4,
'n_iter_no_change': shared_gbdt_params['early_stopping_rounds'],
'verbose': 3}
sklearn_params = {'n_estimators': shared_gbdt_params['n_estimators'],
'criterion': 'mse',
'learning_rate': shared_gbdt_params['learning_rate'],
'max_depth': shared_gbdt_params['max_depth'],
'max_features': shared_gbdt_params['max_features'],
'subsample': shared_gbdt_params['subsample'],
'validation_fraction': 0.25,
'tol': 1e-4,
'n_iter_no_change': shared_gbdt_params['early_stopping_rounds'],
'random_state': shared_gbdt_params['random_state']}
xgboost_params = {#'tree_method': 'gpu_hist',
#'gpu_id': 7,
#'objective': 'binary:logistic',
'max_depth': shared_gbdt_params['max_depth'],
'n_jobs': 10,
'eval_metric': ['error'],
'learning_rate': shared_gbdt_params['learning_rate'],
'n_estimators': shared_gbdt_params['n_estimators'],
'colsample_bytree': shared_gbdt_params['max_features'],
'subsample': shared_gbdt_params['subsample'],
'reg_lambda': 0,
'verbosity': 0,
'random_state': shared_gbdt_params['random_state'],
'seed': shared_gbdt_params['random_state']}
x_train, x_test, x_val = eval.flatten_datasets(ds_train, ds_test,
operations_list=set_params['operations'],
ds_val=ds_val)
xgboost_gbtd = eval.train_and_predict_xgboost(xgboost_params,
x_train, y_train,
x_test, y_test,
val_x=None, val_y=None,
early_stopping_rounds=None)
ds_train_val = merge_init_datasets(ds_train, ds_val)
set_gbtd = eval.train_and_predict_set_gbdt(set_params,
ds_train_val, np.concatenate([y_train, y_val]),
ds_test, y_test,
resume=None)
if args.save:
pkl_filename = os.path.join(log_dir, '{}_model.pkl'.format(args.exp_name))
with open(pkl_filename, 'wb') as file:
pickle.dump(set_gbtd, file)
| StarcoderdataPython |
1710472 | def array_max_consecutive_sum_short(a, k):
c = m = sum(a[:k])
for i in range(len(a) - k):
c = c + a[i + k] - a[i]
m = max(c, m)
return m
def array_max_consecutive_sum(a, k):
# works, but has O(n^2) time which is undesirable
result_array = []
for i in range(len(a) - (k-1)):
temp = []
for j in range(i, i+k):
temp.append((a[j]))
result_array.append(sum(temp))
return max(result_array)
if __name__ == '__main__':
arr = [2, 3, 5, 1, 6]
k = 2
print(array_max_consecutive_sum(arr, k))
| StarcoderdataPython |
1922366 | <filename>model_zoo/research/hpc/ocean_model/src/oa_operator.py
# Copyright 2020 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""stencil operations kernel"""
import mindspore.nn as nn
from mindspore.ops import operations as P
class axb_kernel(nn.Cell):
"""create axb_kernel"""
def __init__(self):
super(axb_kernel, self).__init__()
self.pad = P.Pad(((1, 0), (0, 0), (0, 0)))
self.slice = P.Slice()
self.shape = P.Shape()
def construct(self, x):
x1 = self.pad(x)
x_shape = self.shape(x)
x1 = self.slice(x1, (0, 0, 0), x_shape)
out = 0.5 * (x + x1)
return out
class ayb_kernel(nn.Cell):
"""create ayb_kernel"""
def __init__(self):
super(ayb_kernel, self).__init__()
self.pad = P.Pad(((0, 0), (1, 0), (0, 0)))
self.slice = P.Slice()
self.shape = P.Shape()
def construct(self, x):
x1 = self.pad(x)
x_shape = self.shape(x)
x1 = self.slice(x1, (0, 0, 0), x_shape)
out = 0.5 * (x + x1)
return out
class azb_kernel(nn.Cell):
"""create azb_kernel"""
def __init__(self):
super(azb_kernel, self).__init__()
self.pad = P.Pad(((0, 0), (0, 0), (1, 0)))
self.slice = P.Slice()
self.shape = P.Shape()
def construct(self, x):
x1 = self.pad(x)
x_shape = self.shape(x)
x1 = self.slice(x1, (0, 0, 0), x_shape)
out = 0.5 * (x + x1)
return out
class axf_kernel(nn.Cell):
"""create axf_kernel"""
def __init__(self):
super(axf_kernel, self).__init__()
self.pad = P.Pad(((0, 1), (0, 0), (0, 0)))
self.slice = P.Slice()
self.shape = P.Shape()
def construct(self, x):
x1 = self.pad(x)
x_shape = self.shape(x)
x1 = self.slice(x1, (1, 0, 0), x_shape)
out = 0.5 * (x + x1)
return out
class ayf_kernel(nn.Cell):
"""create ayf_kernel"""
def __init__(self):
super(ayf_kernel, self).__init__()
self.pad = P.Pad(((0, 0), (0, 1), (0, 0)))
self.slice = P.Slice()
self.shape = P.Shape()
def construct(self, x):
x1 = self.pad(x)
x_shape = self.shape(x)
x1 = self.slice(x1, (0, 1, 0), x_shape)
out = 0.5 * (x + x1)
return out
class azf_kernel(nn.Cell):
"""create azf_kernel"""
def __init__(self):
super(azf_kernel, self).__init__()
self.pad = P.Pad(((0, 0), (0, 0), (0, 1)))
self.slice = P.Slice()
self.shape = P.Shape()
def construct(self, x):
x1 = self.pad(x)
x_shape = self.shape(x)
x1 = self.slice(x1, (0, 0, 1), x_shape)
out = 0.5 * (x + x1)
return out
class dxb_kernel(nn.Cell):
"""create dxb_kernel"""
def __init__(self):
super(dxb_kernel, self).__init__()
self.pad = P.Pad(((1, 0), (0, 0), (0, 0)))
self.slice = P.Slice()
self.shape = P.Shape()
def construct(self, x):
x1 = self.pad(x)
x_shape = self.shape(x)
x1 = self.slice(x1, (0, 0, 0), x_shape)
x = x - x1
return x
class dxf_kernel(nn.Cell):
"""create dxf_kernel"""
def __init__(self):
super(dxf_kernel, self).__init__()
self.pad = P.Pad(((0, 1), (0, 0), (0, 0)))
self.slice = P.Slice()
self.shape = P.Shape()
def construct(self, x):
x1 = self.pad(x)
x_shape = self.shape(x)
x1 = self.slice(x1, (1, 0, 0), x_shape)
x = x1 - x
return x
class dyb_kernel(nn.Cell):
"""create dyb_kernel"""
def __init__(self):
super(dyb_kernel, self).__init__()
self.pad = P.Pad(((0, 0), (1, 0), (0, 0)))
self.slice = P.Slice()
self.shape = P.Shape()
def construct(self, x):
x1 = self.pad(x)
x_shape = self.shape(x)
x1 = self.slice(x1, (0, 0, 0), x_shape)
x = x - x1
return x
class dyf_kernel(nn.Cell):
"""create dyf_kernel"""
def __init__(self):
super(dyf_kernel, self).__init__()
self.pad = P.Pad(((0, 0), (0, 1), (0, 0)))
self.slice = P.Slice()
self.shape = P.Shape()
def construct(self, x):
x1 = self.pad(x)
x_shape = self.shape(x)
x1 = self.slice(x1, (0, 1, 0), x_shape)
x = x1 - x
return x
class dzb_kernel(nn.Cell):
"""create dzb_kernel"""
def __init__(self):
super(dzb_kernel, self).__init__()
self.pad = P.Pad(((0, 0), (0, 0), (1, 0)))
self.slice = P.Slice()
self.shape = P.Shape()
def construct(self, x):
x1 = self.pad(x)
x_shape = self.shape(x)
x1 = self.slice(x1, (0, 0, 0), x_shape)
x = x - x1
return x
class dzf_kernel(nn.Cell):
"""create dzf_kernel"""
def __init__(self):
super(dzf_kernel, self).__init__()
self.pad = P.Pad(((0, 0), (0, 0), (0, 1)))
self.slice = P.Slice()
self.shape = P.Shape()
def construct(self, x):
x1 = self.pad(x)
x_shape = self.shape(x)
x1 = self.slice(x1, (0, 0, 1), x_shape)
x = x1 - x
return x
| StarcoderdataPython |
12833047 | import torch
def accuracy(logits, y):
return torch.mean((torch.argmax(logits, dim=-1) == y).float())
def loss(logits, y):
return torch.nn.functional.cross_entropy(logits, y)
def print_metrics(metrics):
for metric, value in metrics.items():
print(f"{metric}: {value}")
| StarcoderdataPython |
20112 | <gh_stars>0
# coding=utf-8
from __future__ import unicode_literals
from tornado.testing import AsyncTestCase
from apps.core.models import (ModelBase,
_get_master_engine,
_get_slave_engine)
from tornado.options import options
from apps.core.urlutils import urlpattens
from apps.auth.views import LoginHandler
from apps.views import IndexHandler
from apps.core.datastruct import QueryDict, lru_cache
from simplejson import loads
from tornado.testing import AsyncHTTPTestCase, gen_test
from apps.core.httpclient import (RESTfulAsyncClient, SessionClient)
from apps.core.crypto import get_random_string
from tornado.web import URLSpec
import re
from tornado.web import Application
from apps.core.cache.base import CacheBase, cache as cache_proxy
from tornado.gen import sleep
from mock import patch
from apps.core.timezone import now
from concurrent.futures import ThreadPoolExecutor
import thread
# 这样不会清掉数据库哈
options.testing = True # 这个太关键了所以不用mock,下边的base_url改掉了
class EngineTest(AsyncTestCase):
"""测试是否已经是测试用的sql连接了"""
contexts = None
def setUp(self):
if self.contexts is None:
self.contexts = []
o = patch.object(options.mockable(), 'sql_connection',
b"sqlite:///")
self.contexts.append(o)
for context in self.contexts:
context.__enter__()
super(EngineTest, self).setUp()
engine = _get_master_engine()
self.assertEqual(str(engine.url), options.test_db)
self.assertEqual(engine.driver, "pysqlite")
engine = _get_slave_engine()
self.assertEqual(str(engine.url), options.test_db)
self.assertEqual(engine.driver, "pysqlite")
def tearDown(self):
engine = _get_master_engine()
self.assertEqual(str(engine.url), options.test_db)
self.assertEqual(engine.driver, "pysqlite")
engine = _get_slave_engine()
self.assertEqual(str(engine.url), options.test_db)
self.assertEqual(engine.driver, "pysqlite")
for context in self.contexts:
context.__exit__()
super(EngineTest, self).tearDown()
def test_engine(self):
engine = _get_master_engine()
self.assertEqual(str(engine.url), options.test_db)
self.assertEqual(engine.driver, "pysqlite")
engine = _get_slave_engine()
self.assertEqual(str(engine.url), options.test_db)
self.assertEqual(engine.driver, "pysqlite")
class BaseTestCase(EngineTest):
contexts = None
@staticmethod
def _parse_cookie(cookie_line):
return cookie_line.split(";")[0]
def reverse_url(self, url_name, *args):
return self.get_url(self._app.reverse_url(url_name, *args))
def setUp(self):
if self.contexts is None:
self.contexts = []
o = patch.object(options.mockable(), 'base_url',
b"/")
self.contexts.append(o)
super(BaseTestCase, self).setUp()
engine = _get_master_engine()
self.assertEqual(engine.driver, "pysqlite")
ModelBase.metadata.create_all(engine)
def tearDown(self):
# 用sqlite 内存数据库不需要删除,主要是为了本地文件而搞的
engine = _get_master_engine()
self.assertEqual(str(engine.url), options.test_db)
self.assertEqual(engine.driver, "pysqlite")
engine = _get_slave_engine()
self.assertEqual(str(engine.url), options.test_db)
self.assertEqual(engine.driver, "pysqlite")
ModelBase.metadata.drop_all(engine)
super(BaseTestCase, self).tearDown()
class UrlTestCase(BaseTestCase, AsyncHTTPTestCase):
def get_app(self):
url = urlpattens('test',
[
("/login/", LoginHandler, None, "login"),
("/callback", LoginHandler, None, "callback"),
]
)
return Application(url)
def test_reverse(self):
self.assertEqual(self._app.reverse_url("test:login"), "/test/login/")
def test_urlpatten_with_prefex(self):
url = urlpattens('user',
[
("/login/", LoginHandler),
("/callback", LoginHandler),
]
)
root_url = [(r"/", IndexHandler)]
new_urls = root_url + url
self.assertEqual(new_urls[2].regex,
re.compile(r"/user/callback$"))
def test_urlpatten_without_prefex(self):
url = urlpattens('',
[
("/login/", LoginHandler),
("/callback", LoginHandler),
]
)
root_url = [(r"/", IndexHandler)]
new_urls = root_url + url
self.assertEqual(new_urls[1].regex,
URLSpec(r"/login/", LoginHandler).regex)
def test_urlpatten_radd(self):
url = urlpattens('',
[
("/login/", LoginHandler),
("/callback", LoginHandler),
]
)
root_url = [(r"/", IndexHandler)]
new_urls = url + root_url # 换顺序
self.assertEqual(new_urls[0].regex,
URLSpec(r"/login/", LoginHandler).regex)
class DataStructTestCase(EngineTest):
def test_urlencode_safe(self):
q = QueryDict({})
q['next'] = '/a&b/'
self.assertEqual(q.urlencode(), 'next=%2Fa%26b%2F')
self.assertEqual(q.urlencode("/"), 'next=/a%26b/')
def test_urlencode_unicode(self):
q = QueryDict({})
q['next'] = '啊'
self.assertEqual(q.urlencode(), 'next=%E5%95%8A')
def test_urlencode_list(self):
q = QueryDict({})
q['next'] = ['1', "2"]
self.assertEqual(q.urlencode(), 'next=1&next=2')
def test_lru(self):
store = dict(zip("abcd", range(4)))
@lru_cache(2)
def somefunc(arg):
return store[arg]
self.assertEqual(somefunc("a"), 0)
self.assertEqual(somefunc("b"), 1)
cache_info = somefunc.cache_info()
self.assertEqual(cache_info.misses, 2)
self.assertEqual(cache_info.hits, 0)
self.assertEqual(somefunc("a"), 0)
self.assertEqual(somefunc("b"), 1)
cache_info = somefunc.cache_info()
self.assertEqual(cache_info.misses, 2)
self.assertEqual(cache_info.hits, 2)
somefunc.cache_clear()
self.assertEqual(somefunc("a"), 0)
self.assertEqual(somefunc("b"), 1)
cache_info = somefunc.cache_info()
self.assertEqual(cache_info.misses, 2)
self.assertEqual(cache_info.hits, 0)
self.assertEqual(somefunc("c"), 2)
self.assertEqual(somefunc("d"), 3)
cache_info = somefunc.cache_info()
self.assertEqual(cache_info.misses, 4)
self.assertEqual(cache_info.hits, 0)
def test_lru_nosize(self):
store = dict(zip("abcd", range(4)))
@lru_cache(None)
def somefunc(arg):
return store[arg]
self.assertEqual(somefunc("a"), 0)
self.assertEqual(somefunc("b"), 1)
cache_info = somefunc.cache_info()
self.assertEqual(cache_info.misses, 2)
self.assertEqual(cache_info.hits, 0)
class ClientTestCase(BaseTestCase, AsyncHTTPTestCase):
def get_app(self):
from main import make_app
return make_app()
def test_get(self):
client = RESTfulAsyncClient()
url = self.get_url("/api/")
client.get(url,
{"a": "b", "c": 1},
callback=self.stop)
response = self.wait()
response = loads(response.body)["data"]
self.assertItemsEqual(response['query'], {
"a": "b",
"c": "1"
})
def test_post(self):
client = RESTfulAsyncClient()
url = self.get_url("/api/")
response = client.post(url,
{"a": "b", "c": ["1", 3, 4]},
callback=self.stop)
# self.assertEqual()
response = self.wait()
response = loads(response.body)["data"]
self.assertItemsEqual(response['form'], {
"a": "b",
"c": ["1", 3, 4]
})
def test_put(self):
client = RESTfulAsyncClient()
url = self.get_url("/api/")
response = client.put(url,
{"a": "b", "c": ["1", 3, 4]},
callback=self.stop)
response = self.wait()
response = loads(response.body)["data"]
self.assertItemsEqual(response['form'], {
"a": "b",
"c": ["1", 3, 4]
})
def test_delete(self):
client = RESTfulAsyncClient()
url = self.get_url("/api/")
response = client.delete(url,
callback=self.stop)
response = self.wait()
self.assertEqual(response.code, 200)
class TestCrypto(EngineTest):
def test_random(self):
self.assertEqual(len(get_random_string(12)), 12)
self.assertEqual(len(get_random_string(20)), 20)
self.assertNotEqual(get_random_string(12), get_random_string(12))
class TestTimeUtils(EngineTest):
def test_now(self):
dt = now()
self.assertIsNotNone(dt.tzinfo)
class TestSessionClient(AsyncHTTPTestCase, BaseTestCase):
def get_app(self):
from main import make_app
return make_app()
def get_http_client(self):
return SessionClient(io_loop=self.io_loop)
def test_single_instance(self):
new_client = SessionClient(io_loop=self.io_loop)
self.assertEqual(id(new_client), id(self.http_client))
self.assertEqual(id(new_client.cookiejar),
id(self.http_client.cookiejar))
def test_session(self):
url = self.get_url("/api/")
self.http_client.get(url, callback=self.stop)
response = self.wait()
# 第一次请求有Set-Cookie头
self.assertEquals(response.code, 200)
self.assertIn("Set-Cookie", response.headers)
url = self.get_url("/api/")
self.http_client.get(url, callback=self.stop)
response = self.wait()
# 第二次响应头就没有Set-Cookie了
self.assertNotIn("Set-Cookie", response.headers)
self.assertIn("cookie", response.request.headers)
# 外部请求影响速度,不测了
# self.http_client.get("http://httpbin.org/get",
# callback=self.stop)
# response = self.wait()
# self.assertNotIn("cookie", response.request.headers)
class MemoryCacheTestCase(EngineTest):
contexts = None
def setUp(self):
if self.contexts is None:
self.contexts = []
o = patch.object(options.mockable(), 'cache_engine',
"apps.core.cache.memory.MemoryCache")
self.contexts.append(o)
super(MemoryCacheTestCase, self).setUp()
@gen_test
def test_get(self):
CacheBase.configure(
"apps.core.cache.memory.MemoryCache", io_loop=self.io_loop)
cache = CacheBase(self.io_loop)
value = yield cache.get("key_not_exist")
self.assertEqual(value, None)
@gen_test
def test_set(self):
CacheBase.configure(
"apps.core.cache.memory.MemoryCache", io_loop=self.io_loop)
cache = CacheBase(self.io_loop)
yield cache.set("somekey", 1)
value = yield cache.get("somekey")
self.assertEqual(value, 1)
@gen_test
def test_size_set(self):
CacheBase.configure(
"apps.core.cache.memory.MemoryCache", io_loop=self.io_loop,
defaults={"max_size": 2})
cache = CacheBase()
yield cache.set("somekey", 1)
yield cache.set("somekey2", 2)
yield cache.set("somekey3", 3)
value = yield cache.get("somekey")
self.assertEqual(value, None)
@gen_test
def test_size_lru(self):
CacheBase.configure(
"apps.core.cache.memory.MemoryCache", io_loop=self.io_loop,
defaults={"max_size": 2})
cache = CacheBase()
yield cache.set("somekey", 1)
yield cache.set("somekey2", 2)
# yield cache.set("somekey3", 3)
value = yield cache.get("somekey")
self.assertEqual(value, 1)
yield cache.set("somekey3", 3) # somekey2被挤出
value = yield cache.get("somekey")
self.assertEqual(value, 1)
value = yield cache.get("somekey2")
self.assertEqual(value, None)
@gen_test
def test_timeout(self):
CacheBase.configure(
"apps.core.cache.memory.MemoryCache", io_loop=self.io_loop,
defaults={"max_size": 2})
cache = CacheBase()
yield cache.set("somekey", 1, 1)
yield cache.set("somekey2", 2, 2)
yield sleep(2)
self.assertNotIn("somekey", cache._cache)
self.assertNotIn("somekey", cache)
@gen_test
def test_proxy(self):
o = patch.object(options.mockable(),
'cache_options',
{"max_size": 2})
o.__enter__()
self.contexts.append(o)
o = patch.object(options.mockable(),
'cache_engine',
"apps.core.cache.memory.MemoryCache")
o.__enter__()
self.contexts.append(o)
yield cache_proxy.set("somekey", 1, 1)
yield cache_proxy.set("somekey2", 2, 2)
yield sleep(2)
self.assertNotIn("somekey", cache_proxy._cache)
self.assertNotIn("somekey", cache_proxy)
class A(object):
def __init__(self, arg):
self.arg = arg
class RedisCacheTest(BaseTestCase):
# teardown怎么清掉呢。。。。
@gen_test
def test_get(self):
CacheBase.configure("apps.core.cache.redis.RedisCache",
defaults=options.cache_options)
cache = CacheBase(self.io_loop)
value = yield cache.get("key_not_exist")
self.assertEqual(value, None)
@gen_test
def test_set(self):
CacheBase.configure("apps.core.cache.redis.RedisCache",
defaults=options.cache_options)
cache = CacheBase(self.io_loop)
yield cache.set("testkey", "value")
value = yield cache.get("testkey",)
self.assertEqual(value, "value")
yield cache.delete("testkey")
value = yield cache.get("testkey",)
self.assertEqual(value, None)
@gen_test
def test_set_object(self):
CacheBase.configure("apps.core.cache.redis.RedisCache",
defaults=options.cache_options)
cache = CacheBase(self.io_loop)
obj = A(123123)
yield cache.set("testkey", obj)
value = yield cache.get("testkey",)
self.assertEqual(isinstance(value, A), True)
self.assertEqual(value.arg, 123123)
yield cache.delete("testkey")
value = yield cache.get("testkey",)
self.assertEqual(value, None)
@gen_test
def test_set_dict(self):
CacheBase.configure("apps.core.cache.redis.RedisCache",
defaults=options.cache_options)
cache = CacheBase(self.io_loop)
obj = {"asd": 123, "zxc": "qwe"}
yield cache.set("testkey", obj)
value = yield cache.get("testkey",)
self.assertEqual(isinstance(value, dict), True)
self.assertItemsEqual(value, {"asd": 123, "zxc": "qwe"})
yield cache.delete("testkey")
value = yield cache.get("testkey",)
self.assertEqual(value, None)
@gen_test
def test_bin(self):
CacheBase.configure("apps.core.cache.redis.RedisCache",
defaults=options.cache_options)
cache = CacheBase(self.io_loop)
obj = {"asd": 123, "zxc": u"啊"}
yield cache.set("testkey", obj)
value = yield cache.get("testkey",)
self.assertItemsEqual(value, {"asd": 123, "zxc": u"啊"})
self.assertTrue(isinstance(value["zxc"], unicode))
obj = {"asd": 123, "zxc": b"\x00\x01\x02"}
yield cache.set("testkey2", obj)
value = yield cache.get("testkey2",)
self.assertTrue(isinstance(value["zxc"], bytes))
self.assertEquals(value["zxc"], b"\x00\x01\x02")
class ExecutorTestCase(EngineTest):
def user_pow(self, *args):
self.assertNotEqual(thread.get_ident(), self.father_id)
return pow(*args)
def test_thread_db(self):
self.father_id = thread.get_ident()
with ThreadPoolExecutor(max_workers=4) as exectors:
future = exectors.map(self.user_pow, range(5), range(5))
self.assertItemsEqual(list(future), [1, 1, 4, 27, 256])
| StarcoderdataPython |
3491630 | from flask_jsonvalidator import (
JSONValidator,
StringValidator
)
class ReplyValidator(JSONValidator):
validators = {
"content" : StringValidator(nullable=False),
"report_public_id" : StringValidator(nullable=False),
}
class ReplyEditValidator(JSONValidator):
validators = {
"content" : StringValidator(nullable=False)
}
| StarcoderdataPython |
11396970 | <reponame>kyper999/SmartHome-Demo2
# -*- coding: utf-8 -*-
"""
Base task class
"""
from celery.utils.log import get_task_logger
class BaseTask(object):
"""
Base class for Celery task
"""
Error = None
Desc = "Base task Object"
def run(self):
pass
@property
def log(self):
return get_task_logger('%s.%s' % (__name__, self.__class__.__name__))
class TaskException(Exception):
pass
def task_entry(app):
try:
app.log.info("Ready to proceed task: %s." % app.Desc)
app.run()
except Exception, e:
app.log.error(e.message)
raise e
| StarcoderdataPython |
4855706 | ## @package presence.py
# Used to check whether a user is online
from flask import request, jsonify, Response, session, redirect, make_response
from flask.views import MethodView
from sqlalchemy import and_, or_
from backend import db, app
from backend.database.models import Presence, User
import gevent, json
presence_list = []
## Checks regularly whether users have gone online or went offline
def event_stream():
while True:
# Check database every n seconds (2 right now)
gevent.sleep(1)
# Get users who are either online in the game or web
user_presences = Presence.query.join(Presence.user).filter(or_(Presence.game_online == True, Presence.web_online == True)).all()
if user_presences is None:
return "data: []\n\n"
# Put all new presence users into the presence list
for new_data in user_presences:
if not any(init_data['username'] == new_data.user.username for init_data in presence_list):
# if new_data.user.username not in init_data (presence_list):
jsonData = {'id':new_data.id,'username': new_data.user.username,'first_name': new_data.user.first_name, 'last_name': new_data.user.last_name, 'web_online': new_data.web_online, 'game_online': new_data.game_online}
presence_list.append(jsonData)
# Now this is check specifically if a user has changed their web or game online status
else:
for temp_data in presence_list:
if(temp_data['username'] == new_data.user.username):
temp_data['web_online'] = new_data.web_online
temp_data['game_online'] = new_data.game_online
# This deletes a user from presence list if he has gone offline
for it_data in presence_list:
if not any(init_data.user.username == it_data['username'] for init_data in user_presences):
presence_list.remove(it_data)
# return jsonify(data = presence_list)
return "data: %s\n\n" % json.dumps(presence_list)
@app.route('/stream')
def stream():
return Response(event_stream(), mimetype="text/event-stream")
## Used to get all users which are present
class PresenceOnlineApi(MethodView):
def get(self):
presence_list = []
user_presences = Presence.query.join(Presence.user).filter(or_(Presence.game_online == True, Presence.web_online == True)).all()
if user_presences is None:
return jsonify(**{'success': False}), 401
for data in user_presences:
json = {'username': data.user.username,'first_name': data.user.first_name, 'last_name': data.user.last_name, 'web_online': data.web_online, 'game_online': data.game_online}
presence_list.append(json)
return jsonify(results = presence_list)
presence_online_view = PresenceOnlineApi.as_view('presence_online_api')
app.add_url_rule('/api/presence/online/', view_func=presence_online_view, methods=['GET']) | StarcoderdataPython |
6415620 | <reponame>CityPulse/CP_Resourcemanagement<gh_stars>1-10
import threading
import Queue
from time import sleep
from virtualisation.misc.log import Log as L
__author__ = '<NAME> (<EMAIL>)'
class StoppableThread(threading.Thread):
"""Thread class with a stop() method. The thread itself has to check
regularly for the stopped() condition."""
def __init__(self):
super(StoppableThread, self).__init__()
self._stop = threading.Event()
def stop(self):
self._stop.set()
def stopped(self):
return self._stop.isSet()
class QueueThread(StoppableThread):
# def __init__(self, maxsize=100, handler=None, timeout=0.1):
def __init__(self, handler=None, timeout=0.01):
"""
:param maxsize: the maximum number of items in the queue
:param handler: A 1 argument function, which is called asynchronously when an item in the queue is available.
The argument for the function is a queue item added before.
:param timeout: Float number how much delay (in seconds) between checks if an item in the queue is ready.
:return:
"""
super(QueueThread, self).__init__()
# self.queue = Queue.Queue(maxsize)
self.queue = Queue.Queue()
self.handler = handler
self.timeout = timeout
def add(self, item):
if self.queue.full():
L.d2("trying to add something into a full queue:", item)
self.queue.put(item, True)
L.d2("QueueThread size:", self.queue.qsize())
def run(self):
if not self.handler:
raise Exception("No handler set!")
while True:
sleep(self.timeout)
if not self.stopped():
while not self.queue.empty():
self.handler(self.queue.get())
self.queue.task_done()
else:
break
def getQueueSize(self):
return self.queue.qsize()
| StarcoderdataPython |
6671656 | <reponame>igormilovanovic/python-data-viz-cookbook<gh_stars>10-100
import numpy
import matplotlib.pyplot as plt
def _get_mask(t, t1, t2, lvl_pos, lvl_neg):
if t1 >= t2:
raise ValueError("t1 must be less than t2")
return numpy.where(numpy.logical_and(t > t1, t < t2), lvl_pos, lvl_neg)
def generate_signal(t):
sin1 = numpy.sin(2 * numpy.pi * 100 * t)
sin2 = 2 * numpy.sin(2 * numpy.pi * 200 * t)
# add interval of high pitched signal
masks = _get_mask(t, 2, 4, 1.0, 0.0) + \
_get_mask(t, 14, 15, 1.0, 0.0)
sin2 = sin2 * masks
noise = 0.02 * numpy.random.randn(len(t))
final_signal = sin1 + sin2 + noise
return final_signal
if __name__ == '__main__':
step = 0.001
sampling_freq=1000
t = numpy.arange(0.0, 20.0, step)
y = generate_signal(t)
# we can visualize this now
# in time
ax1 = plt.subplot(211)
plt.plot(t, y)
# and in frequency
plt.subplot(212)
plt.specgram(y, NFFT=1024, noverlap=900,
Fs=sampling_freq, cmap=plt.cm.gist_heat)
plt.show()
| StarcoderdataPython |
9789147 | <reponame>ketgo/quantum-computing
from .circuit import QuantumCircuit
from .functions import Hybrid, HybridFunction
| StarcoderdataPython |
4946814 | from a10sdk.common.A10BaseClass import A10BaseClass
class List(A10BaseClass):
"""This class does not support CRUD Operations please use parent.
:param a1: {"minimum": 1, "type": "number", "maximum": 10, "format": "number"}
:param a2: {"minLength": 1, "maxLength": 32, "type": "string", "description": "other fields for choice of a", "format": "string"}
:param b1: {"minLength": 1, "maxLength": 32, "type": "string", "format": "string"}
:param b2: {"description": "other fields for choice of b", "minimum": 1, "type": "number", "maximum": 100, "format": "number"}
:param c2: {"type": "string", "description": "a compound key for choice of c", "format": "ipv4-netmask"}
:param c3: {"minLength": 1, "maxLength": 128, "type": "string", "description": "other field for choice of c", "format": "string"}
:param entry: {"enum": ["a", "b", "c"], "type": "string", "description": "a: \"a\"; b: \"b\"; c: \"c\"; ", "format": "enum"}
:param c1: {"type": "string", "format": "ipv4-address"}
:param DeviceProxy: The device proxy for REST operations and session handling. Refer to `common/device_proxy.py`
"""
def __init__(self, **kwargs):
self.ERROR_MSG = ""
self.b_key = "list"
self.DeviceProxy = ""
self.a1 = ""
self.a2 = ""
self.b1 = ""
self.b2 = ""
self.c2 = ""
self.c3 = ""
self.entry = ""
self.c1 = ""
for keys, value in kwargs.items():
setattr(self,keys, value)
class ListEnum1(A10BaseClass):
"""Class Description::
Unit test CM list with different types of entries.
Class list-enum-1 supports CRUD Operations and inherits from `common/A10BaseClass`.
This class is the `"PARENT"` class for this module.`
:param list: {"minItems": 1, "items": {"type": "object"}, "uniqueItems": true, "type": "array", "array": [{"properties": {"a1": {"minimum": 1, "type": "number", "maximum": 10, "format": "number"}, "a2": {"minLength": 1, "maxLength": 32, "type": "string", "description": "other fields for choice of a", "format": "string"}, "b1": {"minLength": 1, "maxLength": 32, "type": "string", "format": "string"}, "b2": {"description": "other fields for choice of b", "minimum": 1, "type": "number", "maximum": 100, "format": "number"}, "c2": {"type": "string", "description": "a compound key for choice of c", "format": "ipv4-netmask"}, "c3": {"minLength": 1, "maxLength": 128, "type": "string", "description": "other field for choice of c", "format": "string"}, "entry": {"enum": ["a", "b", "c"], "type": "string", "description": "a: \"a\"; b: \"b\"; c: \"c\"; ", "format": "enum"}, "c1": {"type": "string", "format": "ipv4-address"}, "optional": true}}]}
:param DeviceProxy: The device proxy for REST operations and session handling. Refer to `common/device_proxy.py`
URL for this object::
`https://<Hostname|Ip address>//axapi/v3/cm-ut/list-enum-1`.
"""
def __init__(self, **kwargs):
self.ERROR_MSG = ""
self.required=[]
self.b_key = "list-enum-1"
self.a10_url="/axapi/v3/cm-ut/list-enum-1"
self.DeviceProxy = ""
self.A10WW_list = []
for keys, value in kwargs.items():
setattr(self,keys, value)
| StarcoderdataPython |
1682972 | <filename>starter_code/migrations/versions/c0de0819f9f0_.py
"""empty message
Revision ID: c0de0819f9f0
Revises: c3880377ac48
Create Date: 2020-02-04 15:45:02.049082
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = 'c0de0819f9f0'
down_revision = 'c3880377ac48'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_column('Artist', 'type')
op.drop_column('Shows', 'type')
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.add_column('Shows', sa.Column('type', sa.VARCHAR(length=120), autoincrement=False, nullable=True))
op.add_column('Artist', sa.Column('type', sa.VARCHAR(length=120), autoincrement=False, nullable=True))
# ### end Alembic commands ###
| StarcoderdataPython |
154991 | from django.shortcuts import render, get_object_or_404
from rest_framework import status
from django.http import HttpResponse, JsonResponse
# importing the models
from .models import CarBrands, Employees, EmployeeDesignations, Snippet, Persons, PersonTasks
# importing a APIView class based views from rest_framwork
from rest_framework.views import APIView
from rest_framework.response import Response
# for serializing the objects into json form
from rest_framework.renderers import JSONRenderer
from rest_framework.parsers import JSONParser
from .serializer import * # importing the serializer for each models
from rest_framework.decorators import api_view
# Create your views here.
# creating a view for the Snippet model
class Snippet_list(APIView):
'''List all snippets,or create a new snippet'''
def get(self, request):
snipets = Snippet.objects.all()
seriliazer_class = SnippetSerializerA(snipets, many=True)
return Response({"Snippet_details": seriliazer_class.data})
def post(self, request): # for adding a snippet
seriliazer_class = SnippetSerializerA(data=request.data)
if seriliazer_class.is_valid():
seriliazer_class.save()
return Response({"status": "snippet sucessfully created"}, status=201)
else:
return Response(seriliazer_class.errors)
def put(self, request, pk):
snippet = Snippet.objects.get(pk=pk)
serializer_class = SnippetSerializerA(snippet, request.data)
if serializer_class.is_valid():
serializer_class.save()
return Response("existing snipet of id {} is modified".format(serializer_class.data["id"]))
else:
return Response(serializer_class.errors, status=status.HTTP_400_BAD_REQUEST)
def delete(self, request, pk):
try:
snippet = Snippet.objects.get(pk=pk)
snippet.delete()
return Response("snnipet with id {} is deleted".format(pk))
except Snippet.DoesNotExist:
return Response(status=status.HTTP_404_NOT_FOUND)
# creating a view for rendering a data of the Car Brands Model
class CarBrand(APIView):
def get(self, request): # for getting a list of all the car models
CarBrand = CarBrands.objects.all()
serialize_class = CarBrandsSerializer(CarBrand, many=True)
return Response({"CarBrands": serialize_class.data})
def post(self, request): # for adding a new car brand
serializer = CarBrandsSerializer(data=request.data)
if serializer.is_valid():
serializer.save()
return Response({"status": 2, "message": "Sucessfully created"})
else:
return Response({"status": 0, "error-message": "errors"})
class CarBrandDetails(APIView):
def get(self, request, pk):
state = CarBrands.objects.get(pk=pk)
serializer = CarBrandsSerializer(state)
return Response(serializer.data)
class FetchCar(APIView):
def get(self, request, alpha_bet):
car = CarBrands.objects.filter(brandname__icontains=alpha_bet)
if len(car) == 0:
return Response({"Oops!": "No such car brand is available"})
else:
serializer_class = CarBrandsSerializer(car, many=True)
return Response(serializer_class.data)
class Employeeslist(APIView):
def get(self, request):
Employee = Employees.objects.all()
serialize_class = EmployeeNameSerializer(Employee, many=True)
return Response(serialize_class.data)
# view for the employees details and for adding a new employee
class Employeesdetails(APIView):
def get(self, request):
Employee = Employees.objects.all()
serialize_class = EmpDetailSerializer(Employee, many=True)
return Response(serialize_class.data)
def post(self, request):
serializer_class = EmpDetailSerializer(data=request.data)
if serializer_class.is_valid(raise_exception=True):
serializer_class.save()
return Response("New employee {} is added".format(serializer_class.data["employee_name"]))
else:
return Response(serializer_class.errors, status=status.HTTP_400_BAD_REQUEST)
# view for the list of all the designation and for adding a new employee designation
class DesignationList(APIView):
def get(self, request):
desination = EmployeeDesignations.objects.values(
"designation_name").distinct()
serialize_class = EmployeeDesignationsSerializer(desination, many=True)
return Response({"list of all the designations that this company has": serialize_class.data})
def post(self, request):
serializer_class = EmployeeDesignationsSerializer(data=request.data)
if serializer_class.is_valid():
serializer_class.save()
return Response("New designation {} is added".format(serializer_class.data["designation"]))
# designation_id=request.data.get("designation")
# designation=request.data.get("designation_name")
# new_designation=EmployeeDesignations.objects.create(designation_id=designation_id,designation=designation)
class UserTasklist(APIView):
def get(self, request):
details = Persons.objects.all()
serializer_class = PersonSerializer(details, many=True)
return Response({"person tasks": serializer_class.data})
| StarcoderdataPython |
325879 | <reponame>sabidib/hikari
# -*- coding: utf-8 -*-
# Copyright (c) 2020 Nekokatt
# Copyright (c) 2021 davfsa
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import asyncio
import contextlib
import datetime
import platform
import aiohttp
import mock
import pytest
from hikari import _about
from hikari import config
from hikari import errors
from hikari import intents
from hikari import presences
from hikari import undefined
from hikari.impl import shard
from hikari.internal import aio
from hikari.internal import time
from tests.hikari import client_session_stub
from tests.hikari import hikari_test_helpers
def test_log_filterer():
filterer = shard._log_filterer("TOKEN")
returned = filterer("this log contains the TOKEN and it should get removed and the TOKEN here too")
assert returned == (
"this log contains the **REDACTED TOKEN** and it should get removed and the **REDACTED TOKEN** here too"
)
@pytest.fixture()
def http_settings():
return mock.Mock(spec_set=config.HTTPSettings)
@pytest.fixture()
def proxy_settings():
return mock.Mock(spec_set=config.ProxySettings)
@pytest.mark.asyncio()
class TestGatewayTransport:
@pytest.fixture()
def transport_impl(self):
with mock.patch.object(aiohttp.ClientWebSocketResponse, "__init__"):
transport = shard._GatewayTransport()
transport.logger = mock.Mock(isEnabledFor=mock.Mock(return_value=True))
transport.log_filterer = mock.Mock()
yield transport
def test__init__calls_super(self):
with mock.patch.object(aiohttp.ClientWebSocketResponse, "__init__") as init:
shard._GatewayTransport("arg1", "arg2", some_kwarg="kwarg1")
init.assert_called_once_with("arg1", "arg2", some_kwarg="kwarg1")
async def test_send_close_when_not_closed_nor_closing_logs(self, transport_impl):
transport_impl.sent_close = False
with mock.patch.object(aiohttp.ClientWebSocketResponse, "close", new=mock.Mock()) as close:
with mock.patch.object(asyncio, "wait_for", return_value=mock.AsyncMock()) as wait_for:
assert await transport_impl.send_close(code=1234, message=b"some message") is wait_for.return_value
wait_for.assert_awaited_once_with(close.return_value, timeout=5)
close.assert_called_once_with(code=1234, message=b"some message")
async def test_send_close_when_TimeoutError(self, transport_impl):
transport_impl.sent_close = False
with mock.patch.object(aiohttp.ClientWebSocketResponse, "close", side_effect=asyncio.TimeoutError) as close:
assert await transport_impl.send_close(code=1234, message=b"some message") is False
close.assert_called_once_with(code=1234, message=b"some message")
@pytest.mark.parametrize("trace", [True, False])
async def test_receive_json(self, transport_impl, trace):
transport_impl._receive_and_check = mock.AsyncMock(return_value="{'json_response': null}")
transport_impl.logger.isEnabledFor.return_value = trace
mock_loads = mock.Mock(return_value={"json_response": None})
assert await transport_impl.receive_json(loads=mock_loads, timeout=69) == {"json_response": None}
transport_impl._receive_and_check.assert_awaited_once_with(69)
mock_loads.assert_called_once_with("{'json_response': null}")
@pytest.mark.parametrize("trace", [True, False])
async def test_send_json(self, transport_impl, trace):
transport_impl.send_str = mock.AsyncMock()
transport_impl.logger.isEnabledFor.return_value = trace
mock_dumps = mock.Mock(return_value="{'json_send': null}")
await transport_impl.send_json({"json_send": None}, 420, dumps=mock_dumps)
transport_impl.send_str.assert_awaited_once_with("{'json_send': null}", 420)
mock_dumps.assert_called_once_with({"json_send": None})
class StubResponse:
def __init__(
self,
*,
type=None,
data=None,
extra=None,
):
self.type = type
self.data = data
self.extra = extra
@pytest.mark.parametrize(
"code",
[
*range(3990, 4000),
errors.ShardCloseCode.DECODE_ERROR,
errors.ShardCloseCode.INVALID_SEQ,
errors.ShardCloseCode.UNKNOWN_ERROR,
errors.ShardCloseCode.SESSION_TIMEOUT,
errors.ShardCloseCode.RATE_LIMITED,
],
)
async def test__receive_and_check_when_message_type_is_CLOSE_and_should_reconnect(self, code, transport_impl):
stub_response = self.StubResponse(type=aiohttp.WSMsgType.CLOSE, extra="some error extra", data=code)
transport_impl.receive = mock.AsyncMock(return_value=stub_response)
with pytest.raises(errors.GatewayServerClosedConnectionError) as exinfo:
await transport_impl._receive_and_check(10)
exception = exinfo.value
assert exception.reason == "some error extra"
assert exception.code == int(code)
assert exception.can_reconnect is True
transport_impl.receive.assert_awaited_once_with(10)
@pytest.mark.parametrize(
"code",
[*range(4010, 4020), 5000],
)
async def test__receive_and_check_when_message_type_is_CLOSE_and_should_not_reconnect(self, code, transport_impl):
stub_response = self.StubResponse(type=aiohttp.WSMsgType.CLOSE, extra="dont reconnect", data=code)
transport_impl.receive = mock.AsyncMock(return_value=stub_response)
with pytest.raises(errors.GatewayServerClosedConnectionError) as exinfo:
await transport_impl._receive_and_check(10)
exception = exinfo.value
assert exception.reason == "dont reconnect"
assert exception.code == int(code)
assert exception.can_reconnect is False
transport_impl.receive.assert_awaited_once_with(10)
async def test__receive_and_check_when_message_type_is_CLOSING(self, transport_impl):
stub_response = self.StubResponse(type=aiohttp.WSMsgType.CLOSING)
transport_impl.receive = mock.AsyncMock(return_value=stub_response)
with pytest.raises(errors.GatewayError, match="Socket has closed"):
await transport_impl._receive_and_check(10)
transport_impl.receive.assert_awaited_once_with(10)
async def test__receive_and_check_when_message_type_is_CLOSED(self, transport_impl):
stub_response = self.StubResponse(type=aiohttp.WSMsgType.CLOSED)
transport_impl.receive = mock.AsyncMock(return_value=stub_response)
with pytest.raises(errors.GatewayError, match="Socket has closed"):
await transport_impl._receive_and_check(10)
transport_impl.receive.assert_awaited_once_with(10)
async def test__receive_and_check_when_message_type_is_BINARY(self, transport_impl):
response1 = self.StubResponse(type=aiohttp.WSMsgType.BINARY, data=b"some")
response2 = self.StubResponse(type=aiohttp.WSMsgType.BINARY, data=b"data")
response3 = self.StubResponse(type=aiohttp.WSMsgType.BINARY, data=b"\x00\x00\xff\xff")
transport_impl.receive = mock.AsyncMock(side_effect=[response1, response2, response3])
transport_impl.zlib = mock.Mock(decompress=mock.Mock(return_value=b"utf-8 encoded bytes"))
assert await transport_impl._receive_and_check(10) == "utf-8 encoded bytes"
transport_impl.receive.assert_awaited_with(10)
transport_impl.zlib.decompress.assert_called_once_with(bytearray(b"somedata\x00\x00\xff\xff"))
async def test__receive_and_check_when_buff_but_next_is_not_BINARY(self, transport_impl):
response1 = self.StubResponse(type=aiohttp.WSMsgType.BINARY, data=b"some")
response2 = self.StubResponse(type=aiohttp.WSMsgType.TEXT)
transport_impl.receive = mock.AsyncMock(side_effect=[response1, response2])
with pytest.raises(errors.GatewayError, match="Unexpected message type received TEXT, expected BINARY"):
await transport_impl._receive_and_check(10)
transport_impl.receive.assert_awaited_with(10)
async def test__receive_and_check_when_message_type_is_TEXT(self, transport_impl):
transport_impl.receive = mock.AsyncMock(
return_value=self.StubResponse(type=aiohttp.WSMsgType.TEXT, data="some text")
)
assert await transport_impl._receive_and_check(10) == "some text"
transport_impl.receive.assert_awaited_once_with(10)
async def test__receive_and_check_when_message_type_is_unknown(self, transport_impl):
transport_impl.receive = mock.AsyncMock(return_value=self.StubResponse(type=aiohttp.WSMsgType.ERROR))
transport_impl.exception = mock.Mock(return_value=Exception)
with pytest.raises(errors.GatewayError, match="Unexpected websocket exception from gateway"):
await transport_impl._receive_and_check(10)
transport_impl.receive.assert_awaited_once_with(10)
async def test_connect_yields_websocket(self, http_settings, proxy_settings):
class MockWS(hikari_test_helpers.AsyncContextManagerMock, shard._GatewayTransport):
closed = True
send_close = mock.AsyncMock()
sent_close = False
def __init__(self):
pass
mock_websocket = MockWS()
mock_client_session = hikari_test_helpers.AsyncContextManagerMock()
mock_client_session.ws_connect = mock.MagicMock(return_value=mock_websocket)
stack = contextlib.ExitStack()
sleep = stack.enter_context(mock.patch.object(asyncio, "sleep"))
client_session = stack.enter_context(
mock.patch.object(aiohttp, "ClientSession", return_value=mock_client_session)
)
tcp_connector = stack.enter_context(mock.patch.object(aiohttp, "TCPConnector"))
client_timeout = stack.enter_context(mock.patch.object(aiohttp, "ClientTimeout"))
logger = mock.Mock()
log_filterer = mock.Mock()
with stack:
async with shard._GatewayTransport.connect(
http_settings=http_settings,
proxy_settings=proxy_settings,
logger=logger,
url="https://some.url",
log_filterer=log_filterer,
) as ws:
assert ws.logger is logger
tcp_connector.assert_called_once_with(
limit=1,
ttl_dns_cache=10,
use_dns_cache=False,
ssl=http_settings.ssl,
enable_cleanup_closed=http_settings.enable_cleanup_closed,
force_close=http_settings.force_close_transports,
)
client_timeout.assert_called_once_with(
total=http_settings.timeouts.total,
connect=http_settings.timeouts.acquire_and_connect,
sock_read=http_settings.timeouts.request_socket_read,
sock_connect=http_settings.timeouts.request_socket_connect,
)
client_session.assert_called_once_with(
connector=tcp_connector(),
connector_owner=True,
raise_for_status=True,
timeout=client_timeout(),
trust_env=proxy_settings.trust_env,
version=aiohttp.HttpVersion11,
ws_response_class=shard._GatewayTransport,
)
mock_client_session.ws_connect.assert_called_once_with(
max_msg_size=0,
proxy=proxy_settings.url,
proxy_headers=proxy_settings.headers,
url="https://some.url",
)
mock_client_session.assert_used_once()
mock_websocket.assert_used_once()
sleep.assert_awaited_once_with(0.25)
async def test_connect_when_gateway_error_after_connecting(self, http_settings, proxy_settings):
class MockWS(hikari_test_helpers.AsyncContextManagerMock, shard._GatewayTransport):
closed = False
sent_close = False
send_close = mock.AsyncMock()
def __init__(self):
pass
mock_websocket = MockWS()
mock_client_session = hikari_test_helpers.AsyncContextManagerMock()
mock_client_session.ws_connect = mock.MagicMock(return_value=mock_websocket)
stack = contextlib.ExitStack()
sleep = stack.enter_context(mock.patch.object(asyncio, "sleep"))
stack.enter_context(mock.patch.object(aiohttp, "ClientSession", return_value=mock_client_session))
stack.enter_context(mock.patch.object(aiohttp, "TCPConnector"))
stack.enter_context(mock.patch.object(aiohttp, "ClientTimeout"))
stack.enter_context(pytest.raises(errors.GatewayError, match="some reason"))
logger = mock.Mock()
log_filterer = mock.Mock()
with stack:
async with shard._GatewayTransport.connect(
http_settings=http_settings,
proxy_settings=proxy_settings,
logger=logger,
url="https://some.url",
log_filterer=log_filterer,
):
raise errors.GatewayError("some reason")
mock_websocket.send_close.assert_awaited_once_with(
code=errors.ShardCloseCode.UNEXPECTED_CONDITION, message=b"unexpected fatal client error :-("
)
sleep.assert_awaited_once_with(0.25)
mock_client_session.assert_used_once()
mock_websocket.assert_used_once()
async def test_connect_when_unexpected_error_after_connecting(self, http_settings, proxy_settings):
class MockWS(hikari_test_helpers.AsyncContextManagerMock, shard._GatewayTransport):
closed = False
send_close = mock.AsyncMock()
sent_close = False
def __init__(self):
pass
mock_websocket = MockWS()
mock_client_session = hikari_test_helpers.AsyncContextManagerMock()
mock_client_session.ws_connect = mock.MagicMock(return_value=mock_websocket)
stack = contextlib.ExitStack()
sleep = stack.enter_context(mock.patch.object(asyncio, "sleep"))
stack.enter_context(mock.patch.object(aiohttp, "ClientSession", return_value=mock_client_session))
stack.enter_context(mock.patch.object(aiohttp, "TCPConnector"))
stack.enter_context(mock.patch.object(aiohttp, "ClientTimeout"))
stack.enter_context(pytest.raises(errors.GatewayError, match="Unexpected ValueError: testing"))
logger = mock.Mock()
log_filterer = mock.Mock()
with stack:
async with shard._GatewayTransport.connect(
http_settings=http_settings,
proxy_settings=proxy_settings,
logger=logger,
url="https://some.url",
log_filterer=log_filterer,
):
raise ValueError("testing")
mock_websocket.send_close.assert_awaited_once_with(
code=errors.ShardCloseCode.UNEXPECTED_CONDITION, message=b"unexpected fatal client error :-("
)
sleep.assert_awaited_once_with(0.25)
mock_client_session.assert_used_once()
mock_websocket.assert_used_once()
async def test_connect_when_no_error_and_not_closing(self, http_settings, proxy_settings):
class MockWS(hikari_test_helpers.AsyncContextManagerMock, shard._GatewayTransport):
closed = False
_closing = False
sent_close = False
send_close = mock.AsyncMock()
def __init__(self):
pass
mock_websocket = MockWS()
mock_client_session = hikari_test_helpers.AsyncContextManagerMock()
mock_client_session.ws_connect = mock.MagicMock(return_value=mock_websocket)
stack = contextlib.ExitStack()
sleep = stack.enter_context(mock.patch.object(asyncio, "sleep"))
stack.enter_context(mock.patch.object(aiohttp, "ClientSession", return_value=mock_client_session))
stack.enter_context(mock.patch.object(aiohttp, "TCPConnector"))
stack.enter_context(mock.patch.object(aiohttp, "ClientTimeout"))
logger = mock.Mock()
log_filterer = mock.Mock()
with stack:
async with shard._GatewayTransport.connect(
http_settings=http_settings,
proxy_settings=proxy_settings,
logger=logger,
url="https://some.url",
log_filterer=log_filterer,
):
pass
mock_websocket.send_close.assert_awaited_once_with(
code=shard._RESUME_CLOSE_CODE, message=b"client is shutting down"
)
sleep.assert_awaited_once_with(0.25)
mock_client_session.assert_used_once()
mock_websocket.assert_used_once()
async def test_connect_when_no_error_and_closing(self, http_settings, proxy_settings):
class MockWS(hikari_test_helpers.AsyncContextManagerMock, shard._GatewayTransport):
closed = False
_closing = True
close = mock.AsyncMock()
def __init__(self):
pass
mock_websocket = MockWS()
mock_client_session = hikari_test_helpers.AsyncContextManagerMock()
mock_client_session.ws_connect = mock.MagicMock(return_value=mock_websocket)
stack = contextlib.ExitStack()
sleep = stack.enter_context(mock.patch.object(asyncio, "sleep"))
stack.enter_context(mock.patch.object(aiohttp, "ClientSession", return_value=mock_client_session))
stack.enter_context(mock.patch.object(aiohttp, "TCPConnector"))
stack.enter_context(mock.patch.object(aiohttp, "ClientTimeout"))
logger = mock.Mock()
log_filterer = mock.Mock()
with stack:
async with shard._GatewayTransport.connect(
http_settings=http_settings,
proxy_settings=proxy_settings,
logger=logger,
url="https://some.url",
log_filterer=log_filterer,
):
pass
mock_websocket.close.assert_not_called()
sleep.assert_awaited_once_with(0.25)
mock_client_session.assert_used_once()
mock_websocket.assert_used_once()
async def test_connect_when_error_connecting(self, http_settings, proxy_settings):
mock_client_session = hikari_test_helpers.AsyncContextManagerMock()
mock_client_session.ws_connect = mock.MagicMock(side_effect=aiohttp.ClientConnectionError("some error"))
stack = contextlib.ExitStack()
sleep = stack.enter_context(mock.patch.object(asyncio, "sleep"))
stack.enter_context(mock.patch.object(aiohttp, "ClientSession", return_value=mock_client_session))
stack.enter_context(mock.patch.object(aiohttp, "TCPConnector"))
stack.enter_context(mock.patch.object(aiohttp, "ClientTimeout"))
stack.enter_context(
pytest.raises(errors.GatewayConnectionError, match=r"Failed to connect to server: 'some error'")
)
logger = mock.Mock()
log_filterer = mock.Mock()
with stack:
async with shard._GatewayTransport.connect(
http_settings=http_settings,
proxy_settings=proxy_settings,
logger=logger,
url="https://some.url",
log_filterer=log_filterer,
):
pass
sleep.assert_awaited_once_with(0.25)
mock_client_session.assert_used_once()
async def test_connect_when_handshake_error_with_unknown_reason(self, http_settings, proxy_settings):
mock_client_session = hikari_test_helpers.AsyncContextManagerMock()
mock_client_session.ws_connect = mock.MagicMock(
side_effect=aiohttp.WSServerHandshakeError(
status=123, message="some error", request_info=None, history=None
)
)
stack = contextlib.ExitStack()
sleep = stack.enter_context(mock.patch.object(asyncio, "sleep"))
stack.enter_context(mock.patch.object(aiohttp, "ClientSession", return_value=mock_client_session))
stack.enter_context(mock.patch.object(aiohttp, "TCPConnector"))
stack.enter_context(mock.patch.object(aiohttp, "ClientTimeout"))
stack.enter_context(
pytest.raises(
errors.GatewayConnectionError,
match=(
r'Failed to connect to server: "WSServerHandshakeError\(None, None, status=123, message=\'some error\'\)"'
),
)
)
logger = mock.Mock()
log_filterer = mock.Mock()
with stack:
async with shard._GatewayTransport.connect(
http_settings=http_settings,
proxy_settings=proxy_settings,
logger=logger,
url="https://some.url",
log_filterer=log_filterer,
):
pass
sleep.assert_awaited_once_with(0.25)
mock_client_session.assert_used_once()
async def test_connect_when_handshake_error_with_known_reason(self, http_settings, proxy_settings):
mock_client_session = hikari_test_helpers.AsyncContextManagerMock()
mock_client_session.ws_connect = mock.MagicMock(
side_effect=aiohttp.WSServerHandshakeError(
status=500, message="some error", request_info=None, history=None
)
)
stack = contextlib.ExitStack()
sleep = stack.enter_context(mock.patch.object(asyncio, "sleep"))
stack.enter_context(mock.patch.object(aiohttp, "ClientSession", return_value=mock_client_session))
stack.enter_context(mock.patch.object(aiohttp, "TCPConnector"))
stack.enter_context(mock.patch.object(aiohttp, "ClientTimeout"))
stack.enter_context(
pytest.raises(
errors.GatewayConnectionError,
match=(
r'Failed to connect to server: "WSServerHandshakeError\(None, None, status=500, message=\'some error\'\)"'
),
)
)
logger = mock.Mock()
log_filterer = mock.Mock()
with stack:
async with shard._GatewayTransport.connect(
http_settings=http_settings,
proxy_settings=proxy_settings,
logger=logger,
url="https://some.url",
log_filterer=log_filterer,
):
pass
sleep.assert_awaited_once_with(0.25)
mock_client_session.assert_used_once()
@pytest.mark.asyncio()
class TestGatewayShardImpl:
@pytest.fixture()
def client_session(self):
stub = client_session_stub.ClientSessionStub()
with mock.patch.object(aiohttp, "ClientSession", new=stub):
yield stub
@pytest.fixture(scope="module")
def unslotted_client_type(self):
return hikari_test_helpers.mock_class_namespace(shard.GatewayShardImpl, slots_=False)
@pytest.fixture()
def client(self, http_settings, proxy_settings, unslotted_client_type):
return unslotted_client_type(
event_manager=mock.Mock(),
event_factory=mock.Mock(),
url="wss://gateway.discord.gg",
intents=intents.Intents.ALL,
token="lol",
http_settings=http_settings,
proxy_settings=proxy_settings,
)
@pytest.mark.parametrize(
("compression", "expect"),
[
(None, f"v={shard._VERSION}&encoding=json"),
("transport_zlib_stream", f"v={shard._VERSION}&encoding=json&compress=zlib-stream"),
],
)
def test__init__sets_url_is_correct_json(self, compression, expect, http_settings, proxy_settings):
g = shard.GatewayShardImpl(
event_manager=mock.Mock(),
event_factory=mock.Mock(),
http_settings=http_settings,
proxy_settings=proxy_settings,
intents=intents.Intents.ALL,
url="wss://gaytewhuy.discord.meh",
data_format="json",
compression=compression,
token="12345",
)
assert g._url == f"wss://gaytewhuy.discord.meh?{expect}"
def test_using_etf_is_unsupported(self, http_settings, proxy_settings):
with pytest.raises(NotImplementedError, match="Unsupported gateway data format: etf"):
shard.GatewayShardImpl(
event_manager=mock.Mock(),
event_factory=mock.Mock(),
http_settings=http_settings,
proxy_settings=proxy_settings,
token=mock.Mock(),
url="wss://erlpack-is-broken-lol.discord.meh",
intents=intents.Intents.ALL,
data_format="etf",
compression=True,
)
def test_heartbeat_latency_property(self, client):
client._heartbeat_latency = 420
assert client.heartbeat_latency == 420
def test_id_property(self, client):
client._shard_id = 101
assert client.id == 101
def test_intents_property(self, client):
intents = object()
client._intents = intents
assert client.intents is intents
def test_is_alive_property(self, client):
client._run_task = None
assert client.is_alive is False
@pytest.mark.asyncio()
async def test_is_alive_property_with_active_future(self, client):
client._run_task = asyncio.get_running_loop().create_future()
assert client.is_alive is True
@pytest.mark.asyncio()
async def test_is_alive_property_with_finished_future(self, client):
client._run_task = aio.completed_future()
assert client.is_alive is False
def test_shard_count_property(self, client):
client._shard_count = 69
assert client.shard_count == 69
def test_shard__check_if_alive_when_not_alive(self, client):
with mock.patch.object(shard.GatewayShardImpl, "is_alive", new=False):
with pytest.raises(errors.ComponentStateConflictError):
client._check_if_alive()
def test_shard__check_if_alive_when_alive(self, client):
with mock.patch.object(shard.GatewayShardImpl, "is_alive", new=True):
client._check_if_alive()
async def test_close_when_closing_event_set(self, client):
client._closing_event = mock.Mock(is_set=mock.Mock(return_value=True))
client._closed_event = mock.Mock(wait=mock.AsyncMock())
client._send_close = mock.Mock()
client._chunking_rate_limit = mock.Mock()
client._total_rate_limit = mock.Mock()
await client.close()
client._closing_event.set.assert_not_called()
client._send_close.assert_not_called()
client._chunking_rate_limit.close.assert_not_called()
client._total_rate_limit.close.assert_not_called()
client._closed_event.wait.assert_awaited_once_with()
async def test_close_when_closing_event_not_set(self, client):
client._closing_event = mock.Mock(is_set=mock.Mock(return_value=False))
client._closed_event = mock.Mock(wait=mock.AsyncMock())
client._ws = mock.Mock(send_close=mock.AsyncMock())
client._chunking_rate_limit = mock.Mock()
client._total_rate_limit = mock.Mock()
await client.close()
client._closing_event.set.assert_called_once_with()
client._ws.send_close.assert_awaited_once_with(
code=errors.ShardCloseCode.GOING_AWAY, message=b"shard disconnecting"
)
client._chunking_rate_limit.close.assert_called_once_with()
client._total_rate_limit.close.assert_called_once_with()
client._closed_event.wait.assert_awaited_once_with()
async def test_close_when_closing_event_not_set_and_ws_is_None(self, client):
client._closing_event = mock.Mock(is_set=mock.Mock(return_value=False))
client._closed_event = mock.Mock(wait=mock.AsyncMock())
client._ws = None
client._chunking_rate_limit = mock.Mock()
client._total_rate_limit = mock.Mock()
await client.close()
client._closing_event.set.assert_called_once_with()
client._chunking_rate_limit.close.assert_called_once_with()
client._total_rate_limit.close.assert_called_once_with()
client._closed_event.wait.assert_awaited_once_with()
async def test_when__user_id_is_None(self, client):
client._handshake_completed = mock.Mock(wait=mock.AsyncMock())
client._user_id = None
with pytest.raises(RuntimeError):
assert await client.get_user_id()
async def test_when__user_id_is_not_None(self, client):
client._handshake_completed = mock.Mock(wait=mock.AsyncMock())
client._user_id = 123
assert await client.get_user_id() == 123
def test__get_ws_when_active(self, client):
mock_ws = client._ws = object()
assert client._get_ws() is mock_ws
def test__get_ws_when_inactive(self, client):
client._ws = None
with pytest.raises(errors.ComponentStateConflictError):
client._get_ws()
async def test_join(self, client):
client._closed_event = mock.Mock(wait=mock.AsyncMock())
await client.join()
client._closed_event.wait.assert_awaited_once_with()
async def test_request_guild_members_when_no_query_and_no_limit_and_GUILD_MEMBERS_not_enabled(self, client):
client._check_if_alive = mock.Mock()
client._intents = intents.Intents.GUILD_INTEGRATIONS
with pytest.raises(errors.MissingIntentError):
await client.request_guild_members(123, query="", limit=0)
client._check_if_alive.assert_called_once_with()
async def test_request_guild_members_when_presences_and_GUILD_PRESENCES_not_enabled(self, client):
client._check_if_alive = mock.Mock()
client._intents = intents.Intents.GUILD_INTEGRATIONS
with pytest.raises(errors.MissingIntentError):
await client.request_guild_members(123, query="test", limit=1, include_presences=True)
client._check_if_alive.assert_called_once_with()
async def test_request_guild_members_when_presences_false_and_GUILD_PRESENCES_not_enabled(self, client):
client._check_if_alive = mock.Mock()
client._intents = intents.Intents.GUILD_INTEGRATIONS
client._send_json = mock.AsyncMock()
await client.request_guild_members(123, query="test", limit=1, include_presences=False)
client._send_json.assert_awaited_once_with(
{
"op": 8,
"d": {"guild_id": "123", "query": "test", "presences": False, "limit": 1},
}
)
client._check_if_alive.assert_called_once_with()
@pytest.mark.parametrize("kwargs", [{"query": "some query"}, {"limit": 1}])
async def test_request_guild_members_when_specifiying_users_with_limit_or_query(self, client, kwargs):
client._check_if_alive = mock.Mock()
client._intents = intents.Intents.GUILD_INTEGRATIONS
with pytest.raises(ValueError, match="Cannot specify limit/query with users"):
await client.request_guild_members(123, users=[], **kwargs)
client._check_if_alive.assert_called_once_with()
@pytest.mark.parametrize("limit", [-1, 101])
async def test_request_guild_members_when_limit_under_0_or_over_100(self, client, limit):
client._check_if_alive = mock.Mock()
client._intents = intents.Intents.ALL
with pytest.raises(ValueError, match="'limit' must be between 0 and 100, both inclusive"):
await client.request_guild_members(123, limit=limit)
client._check_if_alive.assert_called_once_with()
async def test_request_guild_members_when_users_over_100(self, client):
client._check_if_alive = mock.Mock()
client._intents = intents.Intents.ALL
with pytest.raises(ValueError, match="'users' is limited to 100 users"):
await client.request_guild_members(123, users=range(101))
client._check_if_alive.assert_called_once_with()
async def test_request_guild_members_when_nonce_over_32_chars(self, client):
client._check_if_alive = mock.Mock()
client._intents = intents.Intents.ALL
with pytest.raises(ValueError, match="'nonce' can be no longer than 32 byte characters long."):
await client.request_guild_members(123, nonce="x" * 33)
client._check_if_alive.assert_called_once_with()
@pytest.mark.parametrize("include_presences", [True, False])
async def test_request_guild_members(self, client, include_presences):
client._intents = intents.Intents.ALL
client._check_if_alive = mock.Mock()
client._send_json = mock.AsyncMock()
await client.request_guild_members(123, include_presences=include_presences)
client._send_json.assert_awaited_once_with(
{
"op": 8,
"d": {"guild_id": "123", "query": "", "presences": include_presences, "limit": 0},
}
)
client._check_if_alive.assert_called_once_with()
async def test_start_when_already_running(self, client):
client._run_task = object()
with pytest.raises(errors.ComponentStateConflictError):
await client.start()
async def test_start_when_shard_closed_before_starting(self, client):
client._run_task = None
client._shard_id = 20
client._run = mock.Mock()
client._handshake_completed = mock.Mock(wait=mock.Mock())
run_task = mock.Mock()
waiter = mock.Mock()
stack = contextlib.ExitStack()
create_task = stack.enter_context(mock.patch.object(asyncio, "create_task", side_effect=[run_task, waiter]))
wait = stack.enter_context(mock.patch.object(asyncio, "wait", return_value=([run_task], [waiter])))
stack.enter_context(
pytest.raises(asyncio.CancelledError, match="shard 20 was closed before it could start successfully")
)
with stack:
await client.start()
assert client._run_task is None
assert create_task.call_count == 2
create_task.has_call(mock.call(client._run(), name="run shard 20"))
create_task.has_call(mock.call(client._handshake_completed.wait(), name="wait for shard 20 to start"))
run_task.result.assert_called_once_with()
waiter.cancel.assert_called_once_with()
wait.assert_awaited_once_with((waiter, run_task), return_when=asyncio.FIRST_COMPLETED)
async def test_start(self, client):
client._run_task = None
client._shard_id = 20
client._run = mock.Mock()
client._handshake_completed = mock.Mock(wait=mock.Mock())
run_task = mock.Mock()
waiter = mock.Mock()
with mock.patch.object(asyncio, "create_task", side_effect=[run_task, waiter]) as create_task:
with mock.patch.object(asyncio, "wait", return_value=([waiter], [run_task])) as wait:
await client.start()
assert client._run_task == run_task
assert create_task.call_count == 2
create_task.has_call(mock.call(client._run(), name="run shard 20"))
create_task.has_call(mock.call(client._handshake_completed.wait(), name="wait for shard 20 to start"))
run_task.result.assert_not_called()
waiter.cancel.assert_called_once_with()
wait.assert_awaited_once_with((waiter, run_task), return_when=asyncio.FIRST_COMPLETED)
async def test_update_presence(self, client):
client._check_if_alive = mock.Mock()
presence_payload = object()
client._serialize_and_store_presence_payload = mock.Mock(return_value=presence_payload)
client._send_json = mock.AsyncMock()
await client.update_presence(
idle_since=datetime.datetime.now(),
afk=True,
status=presences.Status.IDLE,
activity=None,
)
client._send_json.assert_awaited_once_with({"op": 3, "d": presence_payload})
client._check_if_alive.assert_called_once_with()
async def test_update_voice_state(self, client):
client._check_if_alive = mock.Mock()
client._send_json = mock.AsyncMock()
payload = {
"guild_id": "123456",
"channel_id": "6969420",
"self_mute": False,
"self_deaf": True,
}
await client.update_voice_state(123456, 6969420, self_mute=False, self_deaf=True)
client._send_json.assert_awaited_once_with({"op": 4, "d": payload})
async def test_update_voice_state_without_optionals(self, client):
client._check_if_alive = mock.Mock()
client._send_json = mock.AsyncMock()
payload = {"guild_id": "123456", "channel_id": "6969420"}
await client.update_voice_state(123456, 6969420)
client._send_json.assert_awaited_once_with({"op": 4, "d": payload})
def test_dispatch_when_READY(self, client):
client._seq = 0
client._session_id = 0
client._user_id = 0
client._logger = mock.Mock()
client._handshake_completed = mock.Mock()
client._event_manager = mock.Mock()
pl = {
"session_id": 101,
"user": {"id": 123, "username": "hikari", "discriminator": "5863"},
"guilds": [
{"id": "123"},
{"id": "456"},
{"id": "789"},
],
"v": 8,
}
client._dispatch(
"READY",
10,
pl,
)
assert client._seq == 10
assert client._session_id == 101
assert client._user_id == 123
client._logger.info.assert_called_once_with(
"shard is ready: %s guilds, %s (%s), session %r on v%s gateway",
3,
"hikari#5863",
123,
101,
8,
)
client._handshake_completed.set.assert_called_once_with()
client._event_manager.consume_raw_event.assert_called_once_with(
"READY",
client,
pl,
)
def test__dipatch_when_RESUME(self, client):
client._seq = 0
client._session_id = 123
client._logger = mock.Mock()
client._handshake_completed = mock.Mock()
client._event_manager = mock.Mock()
client._dispatch("RESUME", 10, {})
assert client._seq == 10
client._logger.info.assert_called_once_with("shard has resumed [session:%s, seq:%s]", 123, 10)
client._handshake_completed.set.assert_called_once_with()
client._event_manager.consume_raw_event.assert_called_once_with("RESUME", client, {})
def test__dipatch(self, client):
client._logger = mock.Mock()
client._handshake_completed = mock.Mock()
client._event_manager = mock.Mock()
client._dispatch("EVENT NAME", 10, {"payload": None})
client._logger.info.assert_not_called()
client._logger.debug.assert_not_called()
client._handshake_completed.set.assert_not_called()
client._event_manager.consume_raw_event.assert_called_once_with("EVENT NAME", client, {"payload": None})
async def test__dispatch_for_unknown_event(self, client):
client._logger = mock.Mock()
client._handshake_completed = mock.Mock()
client._event_manager = mock.Mock(consume_raw_event=mock.Mock(side_effect=LookupError))
client._dispatch("UNEXISTING_EVENT", 10, {"payload": None})
client._logger.info.assert_not_called()
client._handshake_completed.set.assert_not_called()
client._event_manager.consume_raw_event.assert_called_once_with("UNEXISTING_EVENT", client, {"payload": None})
client._logger.debug.assert_called_once_with(
"ignoring unknown event %s:\n %r", "UNEXISTING_EVENT", {"payload": None}
)
async def test__identify(self, client):
client._token = "token"
client._intents = intents.Intents.ALL
client._large_threshold = 123
client._shard_id = 0
client._shard_count = 1
client._serialize_and_store_presence_payload = mock.Mock(return_value={"presence": "payload"})
client._send_json = mock.AsyncMock()
stack = contextlib.ExitStack()
stack.enter_context(mock.patch.object(platform, "system", return_value="Potato PC"))
stack.enter_context(mock.patch.object(platform, "architecture", return_value=["ARM64"]))
stack.enter_context(mock.patch.object(aiohttp, "__version__", new="v0.0.1"))
stack.enter_context(mock.patch.object(_about, "__version__", new="v1.0.0"))
with stack:
await client._identify()
expected_json = {
"op": 2,
"d": {
"token": "token",
"compress": False,
"large_threshold": 123,
"properties": {
"$os": "Potato PC ARM64",
"$browser": "aiohttp v0.0.1",
"$device": "hikari v1.0.0",
},
"shard": [0, 1],
"intents": 32767,
"presence": {"presence": "payload"},
},
}
client._send_json.assert_awaited_once_with(expected_json)
@hikari_test_helpers.timeout()
async def test__heartbeat(self, client):
client._last_heartbeat_sent = 5
client._logger = mock.Mock()
client._closing_event = mock.Mock(is_set=mock.Mock(return_value=False))
client._closed_event = mock.Mock(is_set=mock.Mock(return_value=False))
client._send_heartbeat = mock.AsyncMock()
with mock.patch.object(time, "monotonic", return_value=10):
with mock.patch.object(asyncio, "wait_for", side_effect=[asyncio.TimeoutError, None]) as wait_for:
assert await client._heartbeat(20) is False
wait_for.assert_awaited_with(client._closing_event.wait(), timeout=20)
@hikari_test_helpers.timeout()
async def test__heartbeat_when_zombie(self, client):
client._last_heartbeat_sent = 10
client._logger = mock.Mock()
with mock.patch.object(time, "monotonic", return_value=5):
with mock.patch.object(asyncio, "wait_for") as wait_for:
assert await client._heartbeat(20) is True
wait_for.assert_not_called()
async def test__resume(self, client):
client._token = "token"
client._seq = 123
client._session_id = 456
client._send_json = mock.AsyncMock()
await client._resume()
expected_json = {
"op": 6,
"d": {"token": "token", "seq": 123, "session_id": 456},
}
client._send_json.assert_awaited_once_with(expected_json)
@pytest.mark.skip("TODO")
async def test__run(self, client):
...
@pytest.mark.skip("TODO")
async def test__run_once(self, client):
...
async def test__send_heartbeat(self, client):
client._send_json = mock.AsyncMock()
client._last_heartbeat_sent = 0
client._seq = 10
with mock.patch.object(time, "monotonic", return_value=200):
await client._send_heartbeat()
client._send_json.assert_awaited_once_with({"op": 1, "d": 10})
assert client._last_heartbeat_sent == 200
def test__serialize_activity_when_activity_is_None(self, client):
assert client._serialize_activity(None) is None
def test__serialize_activity_when_activity_is_not_None(self, client):
activity = mock.Mock(type="0", url="https://some.url")
activity.name = "<NAME>" # This has to be set separate because if not, its set as the mock's name
assert client._serialize_activity(activity) == {"name": "<NAME>", "type": 0, "url": "https://some.url"}
@pytest.mark.parametrize("idle_since", [datetime.datetime.now(), None])
@pytest.mark.parametrize("afk", [True, False])
@pytest.mark.parametrize(
"status",
[presences.Status.DO_NOT_DISTURB, presences.Status.IDLE, presences.Status.ONLINE, presences.Status.OFFLINE],
)
@pytest.mark.parametrize("activity", [presences.Activity(name="foo"), None])
def test__serialize_and_store_presence_payload_when_all_args_undefined(
self, client, idle_since, afk, status, activity
):
client._activity = activity
client._idle_since = idle_since
client._is_afk = afk
client._status = status
actual_result = client._serialize_and_store_presence_payload()
if activity is not undefined.UNDEFINED and activity is not None:
expected_activity = {
"name": activity.name,
"type": activity.type,
"url": activity.url,
}
else:
expected_activity = None
if status == presences.Status.OFFLINE:
expected_status = "invisible"
else:
expected_status = status.value
expected_result = {
"game": expected_activity,
"since": int(idle_since.timestamp() * 1_000) if idle_since is not None else None,
"afk": afk if afk is not undefined.UNDEFINED else False,
"status": expected_status,
}
assert expected_result == actual_result
@pytest.mark.parametrize("idle_since", [datetime.datetime.now(), None])
@pytest.mark.parametrize("afk", [True, False])
@pytest.mark.parametrize(
"status",
[presences.Status.DO_NOT_DISTURB, presences.Status.IDLE, presences.Status.ONLINE, presences.Status.OFFLINE],
)
@pytest.mark.parametrize("activity", [presences.Activity(name="foo"), None])
def test__serialize_and_store_presence_payload_sets_state(self, client, idle_since, afk, status, activity):
client._serialize_and_store_presence_payload(idle_since=idle_since, afk=afk, status=status, activity=activity)
assert client._activity == activity
assert client._idle_since == idle_since
assert client._is_afk == afk
assert client._status == status
def test__serialize_datetime_when_datetime_is_None(self, client):
assert client._serialize_datetime(None) is None
def test__serialize_datetime_when_datetime_is_not_None(self, client):
dt = datetime.datetime(2004, 11, 22, tzinfo=datetime.timezone.utc)
assert client._serialize_datetime(dt) == 1101081600000
| StarcoderdataPython |
11200999 | # load general packages and functions
# (none)
# load program-specific functions
from parameters.constants import constants as C
import gnn.mpnn
import util
# defines the models with parameters from `constants.py`
def initialize_model():
""" Initializes the model to be trained. Possible model: "GGNN".
Returns:
model (modules.SummationMPNN or modules.AggregationMPNN or
modules.EdgeMPNN) : Neural net model.
"""
try:
hidden_node_features = C.hidden_node_features
except AttributeError: # raised for EMN model only
hidden_node_features = None
edge_emb_hidden_dim = C.edge_emb_hidden_dim
if C.model == "GGNN":
net = gnn.mpnn.GGNN(
f_add_elems=C.dim_f_add_p1,
edge_features=C.dim_edges[2],
enn_depth=C.enn_depth,
enn_dropout_p=C.enn_dropout_p,
enn_hidden_dim=C.enn_hidden_dim,
mlp1_depth=C.mlp1_depth,
mlp1_dropout_p=C.mlp1_dropout_p,
mlp1_hidden_dim=C.mlp1_hidden_dim,
mlp2_depth=C.mlp2_depth,
mlp2_dropout_p=C.mlp2_dropout_p,
mlp2_hidden_dim=C.mlp2_hidden_dim,
gather_att_depth=C.gather_att_depth,
gather_att_dropout_p=C.gather_att_dropout_p,
gather_att_hidden_dim=C.gather_att_hidden_dim,
gather_width=C.gather_width,
gather_emb_depth=C.gather_emb_depth,
gather_emb_dropout_p=C.gather_emb_dropout_p,
gather_emb_hidden_dim=C.gather_emb_hidden_dim,
hidden_node_features=hidden_node_features,
initialization=C.weights_initialization,
message_passes=C.message_passes,
message_size=C.message_size,
n_nodes_largest_graph=C.max_n_nodes,
node_features=C.dim_nodes[1],
)
else:
raise NotImplementedError("Model is not defined.")
net = net.to("cuda", non_blocking=True)
return net
| StarcoderdataPython |
11218012 | import os
import sys
import logging.config
import yaml
import requests
import time
from datetime import datetime
#--------------------importing process configs and setting logger---------------------
sys.path.insert(1, os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
import properties.config as config
with open(config.log_config, 'r') as stream:
log_config = yaml.load(stream, Loader=yaml.FullLoader)
logging.config.dictConfig(log_config)
logger = logging.getLogger('BitcoinTrackerTask')
#-------------------------------------------------------------------------------
def get_latest_bitcoin_price():
response = requests.get(config.BITCOIN_API_URL)
import pprint as pp
pp.pprint(response.text)
response_json = response.json()
# Convert the price to a floating point number
print(response_json)
return float(response_json[0]['price_usd'])
def post_ifttt_webhook(event, value):
# The payload that will be sent to IFTTT service
data = {'value1': value}
# inserts our desired event
ifttt_event_url = config.IFTTT_WEBHOOKS_URL.format(event)
# Sends a HTTP POST request to the webhook URL
requests.post(ifttt_event_url, json=data)
def main():
logger.info("Starting main task")
bitcoin_history = []
while True:
price = get_latest_bitcoin_price()
date = datetime.now()
bitcoin_history.append({'date': date, 'price': price})
# Send an emergency notification
if price < config.BITCOIN_PRICE_THRESHOLD:
post_ifttt_webhook('bitcoin_price_emergency', price)
# Send a Telegram notification
# Once we have 5 items in our bitcoin_history send an update
if len(bitcoin_history) == 5:
post_ifttt_webhook('bitcoin_price_update',
format_bitcoin_history(bitcoin_history))
# Reset the history
bitcoin_history = []
# Sleep for 5 minutes
# (For testing purposes you can set it to a lower number)
time.sleep(config.app_sleep)
logger.info("Finished main task")
def format_bitcoin_history(bitcoin_history):
rows = []
for bitcoin_price in bitcoin_history:
# Formats the date into a string: '24.02.2018 15:09'
date = bitcoin_price['date'].strftime('%d.%m.%Y %H:%M')
price = bitcoin_price['price']
# <b> (bold) tag creates bolded text
# 24.02.2018 15:09: $<b>10123.4</b>
row = '{}: $<b>{}</b>'.format(date, price)
rows.append(row)
# Use a <br> (break) tag to create a new line
# Join the rows delimited by <br> tag: row1<br>row2<br>row3
return '<br>'.join(rows)
if __name__ == '__main__':
main()
| StarcoderdataPython |
294015 | #!/usr/bin/env python
"""
lib.py
General ROMS utils
Written by <NAME> on 05/24/13
Copyright (c)2010--2021 University of Hawaii under the MIT-License.
"""
import numpy as np
import seapy
from seapy.lib import default_epoch, secs2day
import netCDF4
from warnings import warn
fields = {"zeta": {"grid": "rho", "dims": 2},
"ubar": {"grid": "u", "dims": 2, "rotate": "vbar"},
"vbar": {"grid": "v", "dims": 2, "rotate": "ubar"},
"u": {"grid": "u", "dims": 3, "rotate": "v"},
"v": {"grid": "v", "dims": 3, "rotate": "u"},
"temp": {"grid": "rho", "dims": 3},
"salt": {"grid": "rho", "dims": 3}}
ids = {1: "zeta", 2: "ubar", 3: "vbar", 4: "u", 5: "v", 6: "temp", 7: "salt"}
def stretching(vstretching=2, theta_s=2, theta_b=0.1, hc=100, s_rho=10,
w_grid=False):
"""
Compute the stretching function for ROMS
Parameters
----------
vstretching : int, optional
stretching algorithm type
theta_s: float, optional
value of surface theta
theta_b: float, optional
value of bottom theta
hc: int, optional
critical depth
s_rho: int, optional
number of s-levels
w_grid: bool, optional
solve stretching on the w-grid
Returns
-------
s, cs: array
"""
ds = 1.0 / s_rho
if w_grid:
lev = np.arange(1, s_rho + 1)
else:
lev = np.arange(1, s_rho + 1) - 0.5
s = (lev - s_rho) * ds
if vstretching == 1:
if theta_s > 0:
ptheta = np.sinh(theta_s * s) / np.sinh(theta_s)
rtheta = np.tanh(theta_s * (s + 0.5)) / \
(2.0 * np.tanh(0.5 * theta_s)) - 0.5
cs = (1.0 - theta_b) * ptheta + theta_b * rtheta
else:
cs = s
pass
elif vstretching == 2:
if theta_s > 0:
csur = (1.0 - np.cosh(theta_s * s)) / (np.cosh(theta_s) - 1.0)
if theta_b > 0:
cbot = -1.0 + np.sinh(theta_b * (s + 1.0)) / np.sinh(theta_b)
weight = (s + 1.0) * (1.0 - s)
cs = weight * csur + (1.0 - weight) * cbot
else:
cs = csur
else:
cs = s
pass
elif vstretching == 3:
if theta_s > 0:
exp_s = theta_s
exp_b = theta_b
alpha = 3
cbot = np.log(np.cosh(alpha * (s + 1.0)**exp_b)) / \
np.log(np.cosh(alpha)) - 1.0
csur = -np.log(np.cosh(alpha * np.abs(s)**exp_s)) / \
np.log(np.cosh(alpha))
weight = (1.0 - np.tanh(alpha * (s + .5))) / 2.0
cs = weight * cbot + (1.0 - weight) * csur
else:
cs = s
pass
elif vstretching == 4:
if theta_s > 0:
csur = (1.0 - np.cosh(theta_s * s)) / (np.cosh(theta_s) - 1.0)
else:
csur = -(s * s)
pass
if theta_b > 0:
cs = (np.exp(theta_b * csur) - 1.0) / (1.0 - np.exp(-theta_b))
else:
cs = s
pass
elif vstretching == 5:
s = -(lev * lev - 2 * lev * s_rho + lev + s_rho * s_rho - s_rho) / \
(1.0 * s_rho * s_rho - s_rho) - \
0.01 * (lev * lev - lev * s_rho) / (1.0 - s_rho)
if theta_s > 0:
csur = (1.0 - np.cosh(theta_s * s)) / (np.cosh(theta_s) - 1)
else:
csur = -(s * s)
if theta_b > 0:
cs = (np.exp(theta_b * (csur + 1.0)) - 1.0) / \
(np.exp(theta_b) - 1.0) - 1.0
else:
cs = csur
pass
else:
raise ValueError("stretching value must be between 1 and 5")
return s, cs
def depth(vtransform=1, h=None, hc=100, scoord=None,
stretching=None, zeta=0, w_grid=False):
"""
Solve the depth of the given bathymetry in s-levels.
Parameters
----------
vtransform : int, optional
transform algorithm type
h: array, optional
value of bottom depths
hc: int, optional
critical depth
scoord: array
s coordinates from stretching method
stretching: array
stretching values from stretching method
zeta: array
sea surface height to add to bottom
w_grid: bool, optional
solve stretching on the w-grid
Returns
-------
z: ndarray,
depth of grid cells
"""
if h is None or scoord is None or stretching is None:
raise AttributeError("you must supply h, scoord, and stretching")
if scoord.size != stretching.size:
raise ValueError(
"the stretching and scoord arrays must be the same size")
N = scoord.size
hinv = 1 / h
h = np.asanyarray(h)
wk = 0
r = range(N)
if w_grid:
N = N + 1
wk = 1
z = np.zeros(np.hstack((N, h.shape)))
if vtransform == 1:
cff = hc * (scoord - stretching)
for k in r:
z0 = cff[k] + stretching[k] * h
z[k + wk, :] = z0 + zeta * (1.0 + z0 * hinv)
elif vtransform == 2:
cff = 1 / (hc + h)
for k in r:
cff1 = hc * scoord[k] + h * stretching[k]
z[k + wk, :] = zeta + (zeta + h) * cff * cff1
else:
raise ValueError("transform value must be between 1 and 2")
if w_grid:
z[0, :] = -h
return z
def thickness(vtransform=1, h=None, hc=100, scoord=None,
stretching=None, zeta=0):
"""
Get the thickness of the grid cells for the given sigma-parameters.
Parameters
----------
vtransform : int, optional
transform algorithm type
h: array, optional
value of bottom depths
hc: int, optional
critical depth
scoord: array
s coordinates from stretching method
stretching: array
stretching values from stretching method
zeta: array
sea surface height to add to bottom
w_grid: bool, optional
solve stretching on the w-grid
Returns
-------
hz : array,
thickness
"""
# Get the w-coordinate depths and return the differenc
z_w = depth(vtransform, h, hc, scoord, stretching, zeta, True)
return z_w[1:, :, :] - z_w[0:-1, :, :]
def gen_boundary_region(shp, north=None, east=None, west=None, south=None,
kind='linear'):
"""
Generate a masked field varying from 1 at the boundary to 0 in the
middle along each of the specified boundaries. This is used to create
nudging and sponge fields to save into the respective ROMS files.
Parameters
----------
shp : tuple,
The shape of the grid to use
north : int, optional,
The size of the region in the north boundary
south : int, optional,
The size of the region in the south boundary
east : int, optional,
The size of the region in the east boundary
west : int, optional,
The size of the region in the west boundary
kind : string, optional,
The type of transition:
'linear' (default)
'cosine'
Returns
-------
fld : np.ma.array,
array containing boundary values ranging from 0 to 1. masked values
were not set by the routine, but the fill_value is set to 0.
"""
fld = np.ma.zeros(shp, fill_value=0)
fld[:] = np.ma.masked
# Set up a dictionary to define how to deal with each boundary.
# The tuple is (dimension, array_end, rotate)
dirs = {"north": (shp[1], True, True),
"south": (shp[1], False, True),
"east": (shp[0], True, False),
"west": (shp[0], False, False)}
ref = locals()
for d in dirs:
# Set the factor to generate the values
nx = ref[d]
if nx is None or nx == 0:
continue
x = np.arange(nx)
if kind == "cosine":
x = np.cos(np.pi / (2.0 * nx) * x)[::-1]
else:
x = 1.0 / nx * x
x = np.tile(x[::-1], [dirs[d][0], 1])
# If the boundary is the end, flip it
sl = np.array([slice(None, None, None), slice(None, nx, None)])
if dirs[d][1]:
x = np.fliplr(x)
sl[1] = slice(-nx, None, None)
# If the dimensions are rotated, transpose
if dirs[d][2]:
x = np.transpose(x)
sl = sl[::-1]
sl = (sl[0], sl[1])
fld[sl] = np.maximum(fld.filled()[sl], x)
return fld
def _get_calendar(var):
"""
Get the proper calendar string from a netcdf file
Parameters
----------
var : netCDF4.variable
Returns
-------
calendar type: string,
The type of calendar system used
convert : bool
True if the calendar needs to be converted to datetime
"""
# Set up the mapping for calendars
default = 1
calendar_conv = [False, False, False,
True, True, True, False, False, False]
calendar_types = ['standard', 'gregorian', 'proleptic_gregorian', 'noleap',
'julian', 'all_leap', '365_day', '366_day', '360_day']
cals = {v: v for v in calendar_types}
cals['gregorian_proleptic'] = 'proleptic_gregorian'
# Load the calendar type. If it is incorrectly specified (*cough* ROMS),
# change it
for cal in ('calendar', 'calendar_type'):
if hasattr(var, cal):
cal = cals.get(str(getattr(var, cal)).lower(),
calendar_types[default])
return cal, calendar_conv[calendar_types == cal]
return calendar_types[default], calendar_conv[default]
def date2num(dates, nc, tvar=None):
"""
Convert the datetime vector to number for the given netcdf files
considering the units and the calendar type used. This is a wrapper to the
netCDF4.date2num function to account for calendar strangeness in ROMS
Parameters
----------
dates : array of datetime.datetime
Values to convert
nc : netCDF4.Dataset,
netcdf input file
tvar : string, optional
time variable to load. If not specified, it will find the
time variable from predefined
Returns
-------
ndarray,
Array of values in the correct units/calendar of the netCDF file
"""
tvar = tvar if tvar else get_timevar(nc)
calendar, _ = _get_calendar(nc.variables[tvar])
# Convert the times
return netCDF4.date2num(dates,
nc.variables[tvar].units,
calendar=calendar)
def num2date(nc, tvar=None, records=None, as_datetime=True, epoch=None):
"""
Load the time vector from a netCDF file as a datetime array, accounting
for units and the calendar type used. This is a wrapper to the
netCDF4.num2date function to account for calendar strangeness in ROMS
Parameters
----------
nc : netCDF4.Dataset,
netcdf input file
tvar : string, optional
time variable to load. If not specified, it will find the
time variable from predefined
records : array or slice, optional
the indices of records to load
as_datetime : boolean, optional
convert the result to an array of datetimes [default]
epoch : datetime.datetime, optional
if you would like the values relative to an epoch, then
specify the epoch to remove.
Returns
-------
ndarray,
Array of datetimes if no epoch is supplied. If epoch, array
is in days since epoch
"""
import datetime
records = records if records is not None else np.s_[:]
tvar = tvar if tvar else get_timevar(nc)
if tvar not in nc.variables:
warn(f"{nc.filepath()} does not have a recognizable time dimension.")
return list()
calendar, convert = _get_calendar(nc.variables[tvar])
# Load the times
times = np.atleast_1d(netCDF4.num2date(nc.variables[tvar][records],
nc.variables[tvar].units,
calendar=calendar))
# If we don't have datetime instances, convert to datetime if we can
if (as_datetime or convert) and \
(not isinstance(times[0], datetime.datetime)
and times[0].datetime_compatible):
times = np.array([datetime.datetime.strptime(
t.strftime('%Y-%m-%d %H:%M:%S'),
'%Y-%m-%d %H:%M:%S') for t in
times])
if not epoch:
return times
else:
return np.asarray([(t - epoch).total_seconds() * secs2day for t in times])
def get_timevar(nc):
"""
Find the appropriate time variable (bry_time, ocean_time, etc.) from a
given netcdf file
Parameters
----------
nc : netCDF4.Dataset netcdf input file
Returns
-------
time: string
"""
for time in ("ocean_time", "time", "bry_time", "wind_time",
"clim_time", "frc_time", "zeta_time"):
if time in nc.variables:
return time
return None
def get_reftime(nc, epoch=default_epoch):
"""
Given a ROMS netCDF4 file, return the reference time for the file. This
is the timebase of the record dimension in the format:
"<units> since <reftime>"
Parameters
----------
nc : netCDF4 dataset
Input ROMS file
epoch_str : string, optional
If lacking units, use this string as the units
Returns
-------
timebase : datetime
datetime of the origin for the file
time : string
name of variable used to generate the base (None if default)
"""
try:
tvar=get_timevar(nc)
calendar, _=_get_calendar(nc.variables[tvar])
return netCDF4.num2date(0, nc.variables[tvar].units,
calendar=calendar), tvar
except AttributeError:
return epoch, None
def omega(grid, u, v, zeta=0, scale=True, work=False):
"""
Compute the vertical velocity on s-grid.
Parameters
----------
grid : seapy.model.grid,
The grid to use for the calculations
u : ndarray,
The u-field in time
v : ndarray,
The v-field in time
zeta : ndarray, optional,
The zeta-field in time
scale : bool, optional,
If [True], return omega in [m s**-1];
If False, return omega in [m**3 s**-1]
work : bool, optional,
If True, return the work arrays:
z_r : ndarray,
Depth on rho-grid (time-varying if zeta != 0)
z_w : ndarray,
Depth on w-grid (time-varying if zeta != 0)
thick_u : ndarray
Thickness of the u-grid
thick_v : ndarray
Thickness of the v-grid
If False, return only omega
Returns
-------
omega : ndarray,
Vertical Velocity on s-grid
"""
grid=seapy.model.asgrid(grid)
u=np.ma.array(u)
v=np.ma.array(v)
zeta=np.ma.array(zeta)
# Check the sizes
while u.ndim < 4:
u=u[np.newaxis, ...]
while v.ndim < 4:
v=v[np.newaxis, ...]
while zeta.ndim < 3:
zeta=zeta[np.newaxis, ...]
# Get the model grid parameters for the given thickness
thick_u=u * 0
thick_v=v * 0
z_r=np.ma.zeros((u.shape[0], u.shape[1], zeta.shape[1], zeta.shape[2]))
z_w=np.ma.zeros((u.shape[0], u.shape[1] + 1,
zeta.shape[1], zeta.shape[2]))
for i in range(zeta.shape[0]):
s_w, cs_w=seapy.roms.stretching(
grid.vstretching, grid.theta_s, grid.theta_b, grid.hc,
grid.n, w_grid=True)
z_r[i, ...]=seapy.roms.depth(grid.vtransform, grid.h, grid.hc,
s_w, cs_w, zeta=zeta[i, ...],
w_grid=False)
z_w[i, ...]=seapy.roms.depth(grid.vtransform, grid.h, grid.hc,
s_w, cs_w, zeta=zeta[i, ...],
w_grid=True)
thick_rho=np.squeeze(z_w[i, 1:, :, :] - z_w[i, :-1, :, :])
thick_u[i, ...]=seapy.model.rho2u(thick_rho)
thick_v[i, ...]=seapy.model.rho2v(thick_rho)
z_r[z_r > 50000]=np.ma.masked
z_w[z_w > 50000]=np.ma.masked
# Compute W (omega)
Huon=u * thick_u * seapy.model.rho2u(grid.dn)
Hvom=v * thick_v * seapy.model.rho2v(grid.dm)
W=z_w * 0
for k in range(grid.n):
W[:, k + 1, :-2, :-2]=W[:, k, :-2, :-2] - \
(Huon[:, k, 1:-1, 1:] - Huon[:, k, 1:-1, :-1]
+ Hvom[:, k, 1:, 1:-1] - Hvom[:, k, :-1, 1:-1])
wrk=W[:, -1:, :, :] / (z_w[:, -1:, :, :] - z_w[:, 0:1, :, :])
W[:, :-1, :, :]=W[:, :-1, :, :] - wrk * \
(z_w[:, :-1, :, :] - z_w[:, 0:1, :, :])
W[:, -1, :, :]=0
if scale:
W *= grid.pn * grid.pm
if work:
return W, z_r, z_w, thick_u, thick_v
else:
return W
def wvelocity(grid, u, v, zeta=0):
"""
Compute "true" vertical velocity
Parameters
----------
grid : seapy.model.grid,
The grid to use for the calculations
u : ndarray,
The u-field in time
v : ndarray,
The v-field in time
zeta : ndarray, optional,
The zeta-field in time
Returns
-------
w : ndarray,
Vertical Velocity
"""
grid=seapy.model.asgrid(grid)
u=np.ma.array(u)
v=np.ma.array(v)
zeta=np.ma.array(zeta)
# Check the sizes
while u.ndim < 4:
u=u[np.newaxis, ...]
while v.ndim < 4:
v=v[np.newaxis, ...]
while zeta.ndim < 3:
zeta=zeta[np.newaxis, ...]
# Get omega
W, z_r, z_w, thick_u, thick_v=omega(grid, u, v, zeta, scale=True,
work=True)
# Compute quasi-horizontal motions (Ui + Vj)*GRAD s(z)
vert=z_r * 0
# U-contribution
wrk=u * (z_r[:, :, :, 1:] - z_r[:, :, :, :-1]) * \
(grid.pm[:, 1:] - grid.pm[:, :-1])
vert[:, :, :, 1:-1]=0.25 * (wrk[:, :, :, :-1] + wrk[:, :, :, 1:])
# V-contribution
wrk = v * (z_r[:, :, 1:, :] - z_r[:, :, :-1, :]) * \
(grid.pn[1:, :] - grid.pn[:-1, :])
vert[:, :, 1:-1, :] += 0.25 * (wrk[:, :, :-1, :] + wrk[:, :, 1:, :])
# Compute barotropic velocity [ERROR IN FORMULATION RIGHT NOW]
wrk = np.zeros((vert.shape[0], vert.shape[2], vert.shape[3]))
ubar = np.sum(u * thick_u, axis=1) / np.sum(thick_u, axis=1)
vbar = np.sum(v * thick_v, axis=1) / np.sum(thick_v, axis=1)
# wrk[:, 1:-1, 1:-1] = (ubar[:, 1:-1, :-1] - ubar[:, 1:-1, 1:] +
# vbar[:, :-1, 1:-1] - vbar[:, 1:, 1:-1])
# Shift vert from rho to w
wvel = z_w * 0
# First two layers
slope = (z_r[:, 0, :, :] - z_w[:, 0, :, :]) / \
(z_r[:, 1, :, :] - z_r[:, 0, :, :])
wvel[:, 0, :, :] = 0.375 * (vert[:, 0, :, :] - slope *
(vert[:, 1, :, :] - vert[:, 0, :, :])) + \
0.75 * vert[:, 0, :, :] - \
0.125 * vert[:, 1, :, :]
wvel[:, 1, :, :] = W[:, 1, :, :] + wrk + \
0.375 * vert[:, 0, :, :] + \
0.75 * vert[:, 1, :, :] - 0.125 * vert[:, 2, :, :]
# Middle of the grid
wvel[:, 2:-2, :, :] = W[:, 2:-2, :, :] + \
wrk[:, np.newaxis, :, :] + \
0.5625 * (vert[:, 1:-2, :, :] + vert[:, 2:-1, :, :]) - \
0.0625 * (vert[:, :-3, :, :] + vert[:, 3:, :, :])
# Upper two layers
slope = (z_w[:, -1, :, :] - z_r[:, -1, :, :]) / \
(z_r[:, -1, :, :] - z_r[:, -2, :, :])
wvel[:, -1, :, :] = wrk + 0.375 * (vert[:, -1, :, :] + slope *
(vert[:, -1, :, :] - vert[:, -2, :, :])) + \
0.75 * vert[:, -1, :, :] - \
0.0625 * vert[:, -2, :, :]
wvel[:, -2, :, :] = W[:, -2, :, :] + 0.375 * vert[:, -1, :, :] + \
wrk + 0.75 * vert[:, -2, :, :] - \
0.125 * vert[:, -3, :, :]
# No gradient at the boundaries
wvel[:, :, 0, :] = wvel[:, :, 1, :]
wvel[:, :, -2:, :] = wvel[:, :, -3:-2, :]
wvel[:, :, :, 0] = wvel[:, :, :, 1]
wvel[:, :, :, -2:] = wvel[:, :, :, -3:-2]
return wvel
pass
| StarcoderdataPython |
6560469 | # Copyright (c) 2009-2012 testtools developers. See LICENSE for details.
"""Content - a MIME-like Content object."""
__all__ = [
'attach_file',
'Content',
'content_from_file',
'content_from_stream',
'json_content',
'text_content',
'TracebackContent',
]
import codecs
import inspect
import json
import os
import sys
from extras import try_import
# To let setup.py work, make this a conditional import.
traceback = try_import('traceback2')
from testtools.compat import (
_b,
_u,
istext,
str_is_unicode,
)
from testtools.content_type import ContentType, JSON, UTF8_TEXT
functools = try_import('functools')
_join_b = _b("").join
DEFAULT_CHUNK_SIZE = 4096
STDOUT_LINE = '\nStdout:\n%s'
STDERR_LINE = '\nStderr:\n%s'
def _iter_chunks(stream, chunk_size, seek_offset=None, seek_whence=0):
"""Read 'stream' in chunks of 'chunk_size'.
:param stream: A file-like object to read from.
:param chunk_size: The size of each read from 'stream'.
:param seek_offset: If non-None, seek before iterating.
:param seek_whence: Pass through to the seek call, if seeking.
"""
if seek_offset is not None:
stream.seek(seek_offset, seek_whence)
chunk = stream.read(chunk_size)
while chunk:
yield chunk
chunk = stream.read(chunk_size)
class Content(object):
"""A MIME-like Content object.
'Content' objects can be serialised to bytes using the iter_bytes method.
If the 'Content-Type' is recognised by other code, they are welcome to
look for richer contents that mere byte serialisation - for example in
memory object graphs etc. However, such code MUST be prepared to receive
a generic 'Content' object that has been reconstructed from a byte stream.
:ivar content_type: The content type of this Content.
"""
def __init__(self, content_type, get_bytes):
"""Create a ContentType."""
if None in (content_type, get_bytes):
raise ValueError("None not permitted in %r, %r" % (
content_type, get_bytes))
self.content_type = content_type
self._get_bytes = get_bytes
def __eq__(self, other):
return (self.content_type == other.content_type and
_join_b(self.iter_bytes()) == _join_b(other.iter_bytes()))
def as_text(self):
"""Return all of the content as text.
This is only valid where ``iter_text`` is. It will load all of the
content into memory. Where this is a concern, use ``iter_text``
instead.
"""
return _u('').join(self.iter_text())
def iter_bytes(self):
"""Iterate over bytestrings of the serialised content."""
return self._get_bytes()
def iter_text(self):
"""Iterate over the text of the serialised content.
This is only valid for text MIME types, and will use ISO-8859-1 if
no charset parameter is present in the MIME type. (This is somewhat
arbitrary, but consistent with RFC2617 3.7.1).
:raises ValueError: If the content type is not text/\\*.
"""
if self.content_type.type != "text":
raise ValueError("Not a text type %r" % self.content_type)
return self._iter_text()
def _iter_text(self):
"""Worker for iter_text - does the decoding."""
encoding = self.content_type.parameters.get('charset', 'ISO-8859-1')
decoder = codecs.getincrementaldecoder(encoding)()
for bytes in self.iter_bytes():
yield decoder.decode(bytes)
final = decoder.decode(_b(''), True)
if final:
yield final
def __repr__(self):
return "<Content type=%r, value=%r>" % (
self.content_type, _join_b(self.iter_bytes()))
class StackLinesContent(Content):
"""Content object for stack lines.
This adapts a list of "preprocessed" stack lines into a 'Content' object.
The stack lines are most likely produced from ``traceback.extract_stack``
or ``traceback.extract_tb``.
text/x-traceback;language=python is used for the mime type, in order to
provide room for other languages to format their tracebacks differently.
"""
# Whether or not to hide layers of the stack trace that are
# unittest/testtools internal code. Defaults to True since the
# system-under-test is rarely unittest or testtools.
HIDE_INTERNAL_STACK = True
def __init__(self, stack_lines, prefix_content="", postfix_content=""):
"""Create a StackLinesContent for ``stack_lines``.
:param stack_lines: A list of preprocessed stack lines, probably
obtained by calling ``traceback.extract_stack`` or
``traceback.extract_tb``.
:param prefix_content: If specified, a unicode string to prepend to the
text content.
:param postfix_content: If specified, a unicode string to append to the
text content.
"""
content_type = ContentType('text', 'x-traceback',
{"language": "python", "charset": "utf8"})
value = prefix_content + \
self._stack_lines_to_unicode(stack_lines) + \
postfix_content
super(StackLinesContent, self).__init__(
content_type, lambda: [value.encode("utf8")])
def _stack_lines_to_unicode(self, stack_lines):
"""Converts a list of pre-processed stack lines into a unicode string.
"""
msg_lines = traceback.format_list(stack_lines)
return _u('').join(msg_lines)
class TracebackContent(Content):
"""Content object for tracebacks.
This adapts an exc_info tuple to the 'Content' interface.
'text/x-traceback;language=python' is used for the mime type, in order to
provide room for other languages to format their tracebacks differently.
"""
def __init__(self, err, test, capture_locals=False):
"""Create a TracebackContent for ``err``.
:param err: An exc_info error tuple.
:param test: A test object used to obtain failureException.
:param capture_locals: If true, show locals in the traceback.
"""
if err is None:
raise ValueError("err may not be None")
exctype, value, tb = err
# Skip test runner traceback levels
if StackLinesContent.HIDE_INTERNAL_STACK:
while tb and '__unittest' in tb.tb_frame.f_globals:
tb = tb.tb_next
limit = None
# Disabled due to https://bugs.launchpad.net/testtools/+bug/1188420
if (False
and StackLinesContent.HIDE_INTERNAL_STACK
and test.failureException
and isinstance(value, test.failureException)):
# Skip assert*() traceback levels
limit = 0
while tb and not self._is_relevant_tb_level(tb):
limit += 1
tb = tb.tb_next
stack_lines = list(traceback.TracebackException(exctype, value, tb,
limit=limit, capture_locals=capture_locals).format())
content_type = ContentType('text', 'x-traceback',
{"language": "python", "charset": "utf8"})
super(TracebackContent, self).__init__(
content_type, lambda: [x.encode('utf8') for x in stack_lines])
def StacktraceContent(prefix_content="", postfix_content=""):
"""Content object for stack traces.
This function will create and return a 'Content' object that contains a
stack trace.
The mime type is set to 'text/x-traceback;language=python', so other
languages can format their stack traces differently.
:param prefix_content: A unicode string to add before the stack lines.
:param postfix_content: A unicode string to add after the stack lines.
"""
stack = traceback.walk_stack(None)
def filter_stack(stack):
# Discard the filter_stack frame.
next(stack)
# Discard the StacktraceContent frame.
next(stack)
for f, f_lineno in stack:
if StackLinesContent.HIDE_INTERNAL_STACK:
if '__unittest' in f.f_globals:
return
yield f, f_lineno
extract = traceback.StackSummary.extract(filter_stack(stack))
extract.reverse()
return StackLinesContent(extract, prefix_content, postfix_content)
def json_content(json_data):
"""Create a JSON Content object from JSON-encodeable data."""
data = json.dumps(json_data)
if str_is_unicode:
# The json module perversely returns native str not bytes
data = data.encode('utf8')
return Content(JSON, lambda: [data])
def text_content(text):
"""Create a Content object from some text.
This is useful for adding details which are short strings.
"""
if not istext(text):
raise TypeError(
"text_content must be given text, not '%s'." % type(text).__name__
)
return Content(UTF8_TEXT, lambda: [text.encode('utf8')])
def maybe_wrap(wrapper, func):
"""Merge metadata for func into wrapper if functools is present."""
if functools is not None:
wrapper = functools.update_wrapper(wrapper, func)
return wrapper
def content_from_file(path, content_type=None, chunk_size=DEFAULT_CHUNK_SIZE,
buffer_now=False, seek_offset=None, seek_whence=0):
"""Create a Content object from a file on disk.
Note that unless ``buffer_now`` is explicitly passed in as True, the file
will only be read from when ``iter_bytes`` is called.
:param path: The path to the file to be used as content.
:param content_type: The type of content. If not specified, defaults
to UTF8-encoded text/plain.
:param chunk_size: The size of chunks to read from the file.
Defaults to ``DEFAULT_CHUNK_SIZE``.
:param buffer_now: If True, read the file from disk now and keep it in
memory. Otherwise, only read when the content is serialized.
:param seek_offset: If non-None, seek within the stream before reading it.
:param seek_whence: If supplied, pass to ``stream.seek()`` when seeking.
"""
if content_type is None:
content_type = UTF8_TEXT
def reader():
with open(path, 'rb') as stream:
for chunk in _iter_chunks(stream,
chunk_size,
seek_offset,
seek_whence):
yield chunk
return content_from_reader(reader, content_type, buffer_now)
def content_from_stream(stream, content_type=None,
chunk_size=DEFAULT_CHUNK_SIZE, buffer_now=False,
seek_offset=None, seek_whence=0):
"""Create a Content object from a file-like stream.
Note that unless ``buffer_now`` is explicitly passed in as True, the stream
will only be read from when ``iter_bytes`` is called.
:param stream: A file-like object to read the content from. The stream
is not closed by this function or the 'Content' object it returns.
:param content_type: The type of content. If not specified, defaults
to UTF8-encoded text/plain.
:param chunk_size: The size of chunks to read from the file.
Defaults to ``DEFAULT_CHUNK_SIZE``.
:param buffer_now: If True, reads from the stream right now. Otherwise,
only reads when the content is serialized. Defaults to False.
:param seek_offset: If non-None, seek within the stream before reading it.
:param seek_whence: If supplied, pass to ``stream.seek()`` when seeking.
"""
if content_type is None:
content_type = UTF8_TEXT
reader = lambda: _iter_chunks(stream, chunk_size, seek_offset, seek_whence)
return content_from_reader(reader, content_type, buffer_now)
def content_from_reader(reader, content_type, buffer_now):
"""Create a Content object that will obtain the content from reader.
:param reader: A callback to read the content. Should return an iterable of
bytestrings.
:param content_type: The content type to create.
:param buffer_now: If True the reader is evaluated immediately and
buffered.
"""
if content_type is None:
content_type = UTF8_TEXT
if buffer_now:
contents = list(reader())
reader = lambda: contents
return Content(content_type, reader)
def attach_file(detailed, path, name=None, content_type=None,
chunk_size=DEFAULT_CHUNK_SIZE, buffer_now=True):
"""Attach a file to this test as a detail.
This is a convenience method wrapping around ``addDetail``.
Note that by default the contents of the file will be read immediately. If
``buffer_now`` is False, then the file *must* exist when the test result is
called with the results of this test, after the test has been torn down.
:param detailed: An object with details
:param path: The path to the file to attach.
:param name: The name to give to the detail for the attached file.
:param content_type: The content type of the file. If not provided,
defaults to UTF8-encoded text/plain.
:param chunk_size: The size of chunks to read from the file. Defaults
to something sensible.
:param buffer_now: If False the file content is read when the content
object is evaluated rather than when attach_file is called.
Note that this may be after any cleanups that obj_with_details has, so
if the file is a temporary file disabling buffer_now may cause the file
to be read after it is deleted. To handle those cases, using
attach_file as a cleanup is recommended because it guarantees a
sequence for when the attach_file call is made::
detailed.addCleanup(attach_file, 'foo.txt', detailed)
"""
if name is None:
name = os.path.basename(path)
content_object = content_from_file(
path, content_type, chunk_size, buffer_now)
detailed.addDetail(name, content_object)
| StarcoderdataPython |
4845797 | # Generated by Django 3.2.4 on 2021-08-15 08:21
from django.db import migrations, models
import src.users.services.image_services
class Migration(migrations.Migration):
dependencies = [
("users", "0021_alter_contact_user"),
]
operations = [
migrations.RemoveField(
model_name="user",
name="is_verified",
),
migrations.AlterField(
model_name="user",
name="avatar",
field=models.ImageField(
blank=True,
null=True,
upload_to=src.users.services.image_services.UploadToPathAndRename(
"images/users/avatars"
),
),
),
]
| StarcoderdataPython |
105689 | <filename>demo/sensors/button.py<gh_stars>10-100
# spikedev libraries
from spikedev.button import ButtonLeft
from spikedev.logging import log_msg
log_msg("start")
btn = ButtonLeft()
btn.wait_for_pressed(5000)
log_msg("finish")
| StarcoderdataPython |
8182611 | import argparse
import logging
import os
import sys
import appdirs
import hangups.auth
from slackups.server import Server
def runit():
logging.basicConfig(level=logging.INFO, stream=sys.stdout)
logging.getLogger('hangups').setLevel(logging.WARNING)
dirs = appdirs.AppDirs('hangups', 'hangups')
default_cookies_path = os.path.join(dirs.user_cache_dir, 'cookies.json')
cookies = hangups.auth.get_auth_stdin(default_cookies_path)
parser = argparse.ArgumentParser(description='IRC Gateway for Hangouts')
parser.add_argument('--address', help='bind address', default='127.0.0.1')
parser.add_argument('--port', help='bind port', default=6667, type=int)
parser.add_argument('--ascii-smileys', action='store_true',
help='display smileys in ascii')
args = parser.parse_args()
Server(cookies, args.ascii_smileys).run(args.address, args.port)
if __name__ == '__main__':
runit()
| StarcoderdataPython |
8138574 | <reponame>scottwedge/OpenStack-Stein
# Copyright (c) 2015 VMware, Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
from __future__ import print_function
from __future__ import division
from __future__ import absolute_import
from oslo_log import log as logging
import six
from six.moves import range
from congress.datalog import base
from congress.datalog import builtin
from congress.datalog import compile
from congress.datalog import unify
from congress.datalog import utility
LOG = logging.getLogger(__name__)
class TopDownTheory(base.Theory):
"""Class that holds the Top-Down evaluation routines.
Classes will inherit from this class if they want to import and specialize
those routines.
"""
class TopDownContext(object):
"""Struct for storing the search state of top-down evaluation."""
def __init__(self, literals, literal_index, binding, context, theory,
depth):
self.literals = literals
self.literal_index = literal_index
self.binding = binding
self.previous = context
self.theory = theory # a theory object, not just its name
self.depth = depth
def __str__(self):
return (
"TopDownContext<literals={}, literal_index={}, binding={}, "
"previous={}, theory={}, depth={}>").format(
"[" + ",".join([str(x) for x in self.literals]) + "]",
str(self.literal_index), str(self.binding),
str(self.previous), self.theory.name, str(self.depth))
class TopDownResult(object):
"""Stores a single result for top-down-evaluation."""
def __init__(self, binding, support):
self.binding = binding
self.support = support # for abduction
def __str__(self):
return "TopDownResult(binding={}, support={})".format(
unify.binding_str(self.binding), utility.iterstr(self.support))
class TopDownCaller(object):
"""Struct for info about the original caller of top-down evaluation.
VARIABLES is the list of variables (from the initial query)
that we want bindings for.
BINDING is the initially empty BiUnifier.
FIND_ALL controls whether just the first or all answers are found.
ANSWERS is populated by top-down evaluation: it is the list of
VARIABLES instances that the search process proved true.
"""
def __init__(self, variables, binding, theory,
find_all=True, save=None):
# an iterable of variable objects
self.variables = variables
# a bi-unifier
self.binding = binding
# the top-level theory (for included theories)
self.theory = theory
# a boolean
self.find_all = find_all
# The results of top-down-eval: a list of TopDownResults
self.results = []
# a Function that takes a compile.Literal and a unifier and
# returns T iff that literal under the unifier should be
# saved as part of an abductive explanation
self.save = save
# A variable used to store explanations as they are constructed
self.support = []
def __str__(self):
return (
"TopDownCaller<variables={}, binding={}, find_all={}, "
"results={}, save={}, support={}>".format(
utility.iterstr(self.variables), str(self.binding),
str(self.find_all), utility.iterstr(self.results),
repr(self.save), utility.iterstr(self.support)))
#########################################
# External interface
def __init__(self, name=None, abbr=None, theories=None, schema=None,
desc=None, owner=None):
super(TopDownTheory, self).__init__(
name=name, abbr=abbr, theories=theories, schema=schema,
desc=desc, owner=owner)
self.includes = []
def select(self, query, find_all=True):
"""Return list of instances of QUERY that are true.
If FIND_ALL is False, the return list has at most 1 element.
"""
assert compile.is_datalog(query), "Query must be atom/rule"
if compile.is_atom(query):
literals = [query]
else:
literals = query.body
# Because our output is instances of QUERY, need all the variables
# in QUERY.
bindings = self.top_down_evaluation(query.variables(), literals,
find_all=find_all)
# LOG.debug("Top_down_evaluation returned: %s", bindings)
if len(bindings) > 0:
self.log(query.tablename(), "Found answer %s",
"[" + ",".join([str(query.plug(x))
for x in bindings]) + "]")
return [query.plug(x) for x in bindings]
def explain(self, query, tablenames, find_all=True):
"""Return list of instances of QUERY that are true.
Same as select except stores instances of TABLENAMES
that participated in each proof. If QUERY is an atom,
returns list of rules with QUERY in the head and
the stored instances of TABLENAMES in the body; if QUERY is
a rule, the rules returned have QUERY's head in the head
and the stored instances of TABLENAMES in the body.
"""
# This is different than abduction because instead of replacing
# a proof attempt with saving a literal, we want to save a literal
# after a successful proof attempt.
assert False, "Not yet implemented"
def abduce(self, query, tablenames, find_all=True):
"""Compute additional literals.
Computes additional literals that if true would make
(some instance of) QUERY true. Returns a list of rules
where the head represents an instance of the QUERY and
the body is the collection of literals that must be true
in order to make that instance true. If QUERY is a rule,
each result is an instance of the head of that rule, and
the computed literals if true make the body of that rule
(and hence the head) true. If FIND_ALL is true, the
return list has at most one element.
Limitation: every negative literal relevant to a proof of
QUERY is unconditionally true, i.e. no literals are saved
when proving a negative literal is true.
"""
assert compile.is_datalog(query), "abduce requires a formula"
if compile.is_atom(query):
literals = [query]
output = query
else:
literals = query.body
output = query.head
# We need all the variables we will be using in the output, which
# here is just the head of QUERY (or QUERY itself if it is an atom)
abductions = self.top_down_abduction(
output.variables(), literals, find_all=find_all,
save=lambda lit, binding: lit.tablename() in tablenames)
results = [compile.Rule(output.plug(abd.binding), abd.support)
for abd in abductions]
self.log(query.tablename(), "abduction result:")
self.log(query.tablename(), "\n".join([str(x) for x in results]))
return results
def consequences(self, filter=None, table_theories=None):
"""Return all the true instances of any table in this theory."""
# find all table, theory pairs defined in this theory
if table_theories is None:
table_theories = set()
for key in self.rules.keys():
table_theories |= set([(rule.head.table.table,
rule.head.table.service)
for rule in self.rules.get_rules(key)])
results = set()
# create queries: need table names and arities
# TODO(thinrichs): arity computation will need to ignore
# modals once we start using insert[p(x)] instead of p+(x)
for (table, theory) in table_theories:
if filter is None or filter(table):
tablename = compile.Tablename(table, theory)
arity = self.arity(tablename)
vs = []
for i in range(0, arity):
vs.append("x" + str(i))
vs = [compile.Variable(var) for var in vs]
tablename = table
if theory:
tablename = theory + ":" + tablename
query = compile.Literal(tablename, vs)
results |= set(self.select(query))
return results
def top_down_evaluation(self, variables, literals,
binding=None, find_all=True):
"""Compute bindings.
Compute all bindings of VARIABLES that make LITERALS
true according to the theory (after applying the unifier BINDING).
If FIND_ALL is False, stops after finding one such binding.
Returns a list of dictionary bindings.
"""
# LOG.debug("CALL: top_down_evaluation(vars=%s, literals=%s, "
# "binding=%s)",
# ";".join(str(x) for x in variables),
# ";".join(str(x) for x in literals),
# str(binding))
results = self.top_down_abduction(variables, literals,
binding=binding, find_all=find_all,
save=None)
# LOG.debug("EXIT: top_down_evaluation(vars=%s, literals=%s, "
# "binding=%s) returned %s",
# iterstr(variables), iterstr(literals),
# str(binding), iterstr(results))
return [x.binding for x in results]
def top_down_abduction(self, variables, literals, binding=None,
find_all=True, save=None):
"""Compute bindings.
Compute all bindings of VARIABLES that make LITERALS
true according to the theory (after applying the
unifier BINDING), if we add some number of additional
literals. Note: will not save any literals that are
needed to prove a negated literal since the results
would not make sense. Returns a list of TopDownResults.
"""
if binding is None:
binding = self.new_bi_unifier()
caller = self.TopDownCaller(variables, binding, self,
find_all=find_all, save=save)
if len(literals) == 0:
self._top_down_finish(None, caller)
else:
# Note: must use same unifier in CALLER and CONTEXT
context = self.TopDownContext(literals, 0, binding, None, self, 0)
self._top_down_eval(context, caller)
return list(set(caller.results))
#########################################
# Internal implementation
def _top_down_eval(self, context, caller):
"""Compute instances.
Compute all instances of LITERALS (from LITERAL_INDEX and above)
that are true according to the theory (after applying the
unifier BINDING to LITERALS).
Returns True if done searching and False otherwise.
"""
# no recursive rules, ever; this style of algorithm will not terminate
lit = context.literals[context.literal_index]
# LOG.debug("CALL: %s._top_down_eval(%s, %s)",
# self.name, context, caller)
# abduction
if caller.save is not None and caller.save(lit, context.binding):
self._print_call(lit, context.binding, context.depth)
# save lit and binding--binding may not be fully flushed out
# when we save (or ever for that matter)
caller.support.append((lit, context.binding))
self._print_save(lit, context.binding, context.depth)
success = self._top_down_finish(context, caller)
caller.support.pop() # pop in either case
if success:
return True
else:
self._print_fail(lit, context.binding, context.depth)
return False
# regular processing
if lit.is_negated():
# LOG.debug("%s is negated", lit)
# recurse on the negation of the literal
plugged = lit.plug(context.binding)
assert plugged.is_ground(), (
"Negated literal not ground when evaluated: " +
str(plugged))
self._print_call(lit, context.binding, context.depth)
new_context = self.TopDownContext(
[lit.complement()], 0, context.binding, None,
self, context.depth + 1)
new_caller = self.TopDownCaller(caller.variables, caller.binding,
caller.theory, find_all=False,
save=None)
# Make sure new_caller has find_all=False, so we stop as soon
# as we can.
# Ensure save=None so that abduction does not save anything.
# Saving while performing NAF makes no sense.
self._top_down_eval(new_context, new_caller)
if len(new_caller.results) > 0:
self._print_fail(lit, context.binding, context.depth)
return False # not done searching, b/c we failed
else:
# don't need bindings b/c LIT must be ground
return self._top_down_finish(context, caller, redo=False)
elif lit.tablename() == 'true':
self._print_call(lit, context.binding, context.depth)
return self._top_down_finish(context, caller, redo=False)
elif lit.tablename() == 'false':
self._print_fail(lit, context.binding, context.depth)
return False
elif lit.is_builtin():
return self._top_down_builtin(context, caller)
elif (self.theories is not None and
lit.table.service is not None and
lit.table.modal is None and # not a modal
lit.table.service != self.name and
not lit.is_update()): # not a pseudo-modal
return self._top_down_module(context, caller)
else:
return self._top_down_truth(context, caller)
def _top_down_builtin(self, context, caller):
"""Evaluate a table with a builtin semantics.
Returns True if done searching and False otherwise.
"""
lit = context.literals[context.literal_index]
self._print_call(lit, context.binding, context.depth)
built = builtin.builtin_registry.builtin(lit.table)
# copy arguments into variables
# PLUGGED is an instance of compile.Literal
plugged = lit.plug(context.binding)
# PLUGGED.arguments is a list of compile.Term
# create args for function
args = []
for i in range(0, built.num_inputs):
# save builtins with unbound vars during evaluation
if not plugged.arguments[i].is_object() and caller.save:
# save lit and binding--binding may not be fully flushed out
# when we save (or ever for that matter)
caller.support.append((lit, context.binding))
self._print_save(lit, context.binding, context.depth)
success = self._top_down_finish(context, caller)
caller.support.pop() # pop in either case
if success:
return True
else:
self._print_fail(lit, context.binding, context.depth)
return False
assert plugged.arguments[i].is_object(), (
("Builtins must be evaluated only after their "
"inputs are ground: {} with num-inputs {}".format(
str(plugged), builtin.num_inputs)))
args.append(plugged.arguments[i].name)
# evaluate builtin: must return number, string, or iterable
# of numbers/strings
try:
result = built.code(*args)
except Exception as e:
errmsg = "Error in builtin: " + str(e)
self._print_note(lit, context.binding, context.depth, errmsg)
self._print_fail(lit, context.binding, context.depth)
return False
# self._print_note(lit, context.binding, context.depth,
# "Result: " + str(result))
success = None
undo = []
if built.num_outputs > 0:
# with return values, local success means we can bind
# the results to the return value arguments
if (isinstance(result,
(six.integer_types, float, six.string_types))):
result = [result]
# Turn result into normal objects
result = [compile.Term.create_from_python(x) for x in result]
# adjust binding list
unifier = self.new_bi_unifier()
undo = unify.bi_unify_lists(result,
unifier,
lit.arguments[built.num_inputs:],
context.binding)
success = undo is not None
else:
# without return values, local success means
# result was True according to Python
success = bool(result)
if not success:
self._print_fail(lit, context.binding, context.depth)
unify.undo_all(undo)
return False
# otherwise, try to finish proof. If success, return True
if self._top_down_finish(context, caller, redo=False):
unify.undo_all(undo)
return True
# if fail, return False.
else:
unify.undo_all(undo)
self._print_fail(lit, context.binding, context.depth)
return False
def _top_down_module(self, context, caller):
"""Move to another theory and continue evaluation."""
# LOG.debug("%s._top_down_module(%s)", self.name, context)
lit = context.literals[context.literal_index]
if lit.table.service not in self.theories:
self._print_call(lit, context.binding, context.depth)
errmsg = "No such policy: %s" % lit.table.service
self._print_note(lit, context.binding, context.depth, errmsg)
self._print_fail(lit, context.binding, context.depth)
return False
return self.theories[lit.table.service]._top_down_eval(context, caller)
def _top_down_truth(self, context, caller):
"""Top down evaluation.
Do top-down evaluation over the root theory at which
the call was made and all the included theories.
"""
# return self._top_down_th(context, caller)
return self._top_down_includes(context, caller)
def _top_down_includes(self, context, caller):
"""Top-down evaluation of all the theories included in this theory."""
is_true = self._top_down_th(context, caller)
if is_true and not caller.find_all:
return True
for th in self.includes:
is_true = th._top_down_includes(context, caller)
if is_true and not caller.find_all:
return True
return False
def _top_down_th(self, context, caller):
"""Top-down evaluation for the rules in self."""
# LOG.debug("%s._top_down_th(%s)", self.name, context)
lit = context.literals[context.literal_index]
self._print_call(lit, context.binding, context.depth)
for rule in self.head_index(lit.table.table,
lit.plug(context.binding)):
unifier = self.new_bi_unifier()
self._print_note(lit, context.binding, context.depth,
"Trying %s" % rule)
# Prefer to bind vars in rule head
undo = self.bi_unify(self.head(rule), unifier, lit,
context.binding, self.name)
if undo is None: # no unifier
continue
if len(self.body(rule)) == 0:
if self._top_down_finish(context, caller):
unify.undo_all(undo)
if not caller.find_all:
return True
else:
unify.undo_all(undo)
else:
new_context = self.TopDownContext(
rule.body, 0, unifier, context, self, context.depth + 1)
if self._top_down_eval(new_context, caller):
unify.undo_all(undo)
if not caller.find_all:
return True
else:
unify.undo_all(undo)
self._print_fail(lit, context.binding, context.depth)
return False
def _top_down_finish(self, context, caller, redo=True):
"""Helper function.
This is called once top_down successfully completes
a proof for a literal. Handles (i) continuing search
for those literals still requiring proofs within CONTEXT,
(ii) adding solutions to CALLER once all needed proofs have
been found, and (iii) printing out Redo/Exit during tracing.
Returns True if the search is finished and False otherwise.
Temporary, transparent modification of CONTEXT.
"""
if context is None:
# Found an answer; now store it
if caller is not None:
# flatten bindings and store before we undo
# copy caller.support and store before we undo
binding = {}
for var in caller.variables:
binding[var] = caller.binding.apply(var)
result = self.TopDownResult(
binding, [support[0].plug(support[1], caller=caller)
for support in caller.support])
caller.results.append(result)
return True
else:
self._print_exit(context.literals[context.literal_index],
context.binding, context.depth)
# continue the search
if context.literal_index < len(context.literals) - 1:
context.literal_index += 1
finished = context.theory._top_down_eval(context, caller)
context.literal_index -= 1 # in case answer is False
else:
finished = self._top_down_finish(context.previous, caller)
# return search result (after printing a Redo if failure)
if redo and (not finished or caller.find_all):
self._print_redo(context.literals[context.literal_index],
context.binding, context.depth)
return finished
def _print_call(self, literal, binding, depth):
msg = "{}Call: %s".format("| " * depth)
self.log(literal.tablename(), msg, literal.plug(binding))
def _print_exit(self, literal, binding, depth):
msg = "{}Exit: %s".format("| " * depth)
self.log(literal.tablename(), msg, literal.plug(binding))
def _print_save(self, literal, binding, depth):
msg = "{}Save: %s".format("| " * depth)
self.log(literal.tablename(), msg, literal.plug(binding))
def _print_fail(self, literal, binding, depth):
msg = "{}Fail: %s".format("| " * depth)
self.log(literal.tablename(), msg, literal.plug(binding))
return False
def _print_redo(self, literal, binding, depth):
msg = "{}Redo: %s".format("| " * depth)
self.log(literal.tablename(), msg, literal.plug(binding))
return False
def _print_note(self, literal, binding, depth, msg):
self.log(literal.tablename(), "{}Note: {}".format("| " * depth,
msg))
#########################################
# Routines for specialization
@classmethod
def new_bi_unifier(cls, dictionary=None):
"""Return a unifier compatible with unify.bi_unify."""
return unify.BiUnifier(dictionary=dictionary)
# lambda (index):
# compile.Variable("x" + str(index)), dictionary=dictionary)
def defined_tablenames(self):
"""Returns list of table names defined in/written to this theory."""
raise NotImplementedError
def head_index(self, table, match_literal=None):
"""Return head index.
This routine must return all the formulas pertinent for
top-down evaluation when a literal with TABLE is at the top
of the stack.
"""
raise NotImplementedError
def head(self, formula):
"""Given the output from head_index(), return the formula head.
Given a FORMULA, return the thing to unify against.
Usually, FORMULA is a compile.Rule, but it could be anything
returned by HEAD_INDEX.
"""
raise NotImplementedError
def body(self, formula):
"""Return formula body.
Given a FORMULA, return a list of things to push onto the
top-down eval stack.
"""
raise NotImplementedError
def bi_unify(self, head, unifier1, body_element, unifier2, theoryname):
"""Unify atoms.
Given something returned by self.head HEAD and an element in
the return of self.body BODY_ELEMENT, modify UNIFIER1 and UNIFIER2
so that HEAD.plug(UNIFIER1) == BODY_ELEMENT.plug(UNIFIER2).
Returns changes that can be undone via unify.undo-all.
THEORYNAME is the name of the theory for HEAD.
"""
return unify.bi_unify_atoms(head, unifier1, body_element, unifier2,
theoryname)
#########################################
# Routines for unknowns
def instances(self, rule, possibilities=None):
results = set([])
possibilities = possibilities or []
self._instances(rule, 0, self.new_bi_unifier(), results, possibilities)
return results
def _instances(self, rule, index, binding, results, possibilities):
"""Return all instances of the given RULE without evaluating builtins.
Assumes self.head_index returns rules with empty bodies.
"""
if index >= len(rule.body):
results.add(rule.plug(binding))
return
lit = rule.body[index]
self._print_call(lit, binding, 0)
# if already ground or a builtin, go to the next literal
if (lit.is_ground() or lit.is_builtin()):
self._instances(rule, index + 1, binding, results, possibilities)
return
# Otherwise, find instances in this theory
if lit.tablename() in possibilities:
options = possibilities[lit.tablename()]
else:
options = self.head_index(lit.tablename(), lit.plug(binding))
for data in options:
self._print_note(lit, binding, 0, "Trying: %s" % repr(data))
undo = unify.match_atoms(lit, binding, self.head(data))
if undo is None: # no unifier
continue
self._print_exit(lit, binding, 0)
# recurse on the rest of the literals in the rule
self._instances(rule, index + 1, binding, results, possibilities)
if undo is not None:
unify.undo_all(undo)
self._print_redo(lit, binding, 0)
self._print_fail(lit, binding, 0)
| StarcoderdataPython |
4809597 | <filename>cpu_cores/common.py
# This file is part of cpu_cores released under the MIT license.
# See the LICENSE file for more information.
import sys
class CPUCoresCounter(object):
platform = None
_physical_cores_count = None
_physical_processors_count = None
def _count(self, *args, **kwargs):
raise NotImplementedError()
@classmethod
def factory(cls, force_platform=None):
if force_platform is not None:
cls.platform = force_platform
else:
cls.platform = sys.platform
if cls.platform.startswith('darwin'):
from cpu_cores.darwin import DarwinCPUCoresCounter
return DarwinCPUCoresCounter()
elif cls.platform.startswith('linux'):
from cpu_cores.linux import LinuxCPUCoresCounter
return LinuxCPUCoresCounter()
else:
raise NotImplementedError("unsupported platform type [%s]" %
cls.platform)
def _check_counting_or_do_it(self):
if self._physical_processors_count is None or \
self._physical_cores_count is None:
self._count()
def get_physical_cores_count(self):
self._check_counting_or_do_it()
return self._physical_cores_count
def get_physical_processors_count(self):
self._check_counting_or_do_it()
return self._physical_processors_count
| StarcoderdataPython |
9772230 | """
Image Match 의 ImageSignature 모듈로 이미지 시그니쳐 벡터를 추출하는 모듈입니다.
"""
import os
import numpy as np
from image_match.goldberg import ImageSignature
from tqdm import tqdm
from config import *
def extract_signature():
img_paths = os.listdir(IMG_DIR)
img_paths.sort()
img_paths = [os.path.join(IMG_DIR, filename) for filename in img_paths if filename.endswith(IMG_EXT)]
with open(os.path.join(DATA_DIR, IMG_PATHS), 'w') as f:
f.writelines([line + "\n" for line in img_paths])
# init a signature generator
gis = ImageSignature()
# process images
num_processed_images = 0
signatures = np.ndarray(shape=[0, gis.sig_length])
for img_path in tqdm(img_paths):
sig = gis.generate_signature(img_path)
signatures = np.concatenate((signatures, np.reshape(sig, (1, gis.sig_length))))
# save signatures to npy file
if os.path.exists(DATA_DIR) is False:
os.makedirs(DATA_DIR)
np.save(os.path.join(DATA_DIR, SIGNATURES), signatures)
if __name__ == '__main__':
extract_signature()
| StarcoderdataPython |
374312 | import xml.etree.ElementTree as ET
import re
import os
########################################################
# this is meant to be run from the docs folder
# if running manually, cd docs first
########################################################
tasknames = os.listdir('../xml/tasks')
# loop through each task xml webpage and parse the xml to python dictionaries
tasklist = []
for ii, task in enumerate(tasknames):
with open('../xml/tasks/'+task, 'r') as fid:
xmlstring = fid.read()
xmlroot = ET.fromstring(xmlstring)
if '}' not in xmlroot.tag:
print('### skipping ' + task)
continue
nps = xmlroot.tag[:xmlroot.tag.rindex('}')+1]
troot = xmlroot.find(nps+'task') # xml root of the task
# initialize task dictionary (td)
td = {'name': troot.attrib['name'], 'category': troot.attrib['category'].split(',')[0].split('/')[0].split(' ')[0]}
td.update(dict([(ee.tag.replace(nps, ''), ee.text) for ee in list(troot) if ee.tag not in [nps+'params']]))
# fix bad category
if td['category'] == 'import':
td['category'] = 'data'
# parameters
if troot.find(nps+'input') is not None:
iroot = troot.find(nps+'input')
td['params'] = {}
td['subparams'] = {}
for param in iroot.findall(nps + 'param'):
pd = param.attrib
pd['shortdescription'] = '' if param.find(nps+'shortdescription') is None else param.find(nps+'shortdescription').text
pd['description'] = '' if param.find(nps+'description') is None else param.find(nps+'description').text
# overwrite param type with limittype if present
if (param.find(nps+'any') is not None) and ('limittypes' in param.find(nps+'any').attrib):
pd['type'] = ', '.join(param.find(nps+'any').attrib['limittypes'].split(' '))
elif (param.find(nps + 'any') is not None) and ('type' in param.find(nps + 'any').attrib):
pd['type'] = ', '.join(param.find(nps + 'any').attrib['type'].split(' '))
# overwrite param type with value type if it is still 'any', also store value itself as default
if param.find(nps + 'value') is not None:
if ('type' in param.find(nps+'value').attrib) and (pd['type'] == 'any'):
pd['type'] = param.find(nps+'value').attrib['type']
pd['value'] = param.find(nps + 'value').text
if (len(list(param.find(nps + 'value'))) > 0) and ('string' in pd['type']):
pd['value'] = '['+', '.join(['\''+ee.text+'\'' if ee.text is not None else '\'\'' for ee in list(param.find(nps + 'value'))])+']'
elif len(list(param.find(nps + 'value'))) > 0:
pd['value'] = '['+', '.join([ee.text if ee.text is not None else '\'\'' for ee in list(param.find(nps + 'value'))])+']'
elif ('array' in pd['type'].split(',')[0].lower()) and (not pd['type'].startswith('[')):
pd['value'] = '[' + pd['value'] + ']' if pd['value'] is not None else '[\'\']'
# store parameter dictionary under key equal to parameter name
td['params'][param.attrib['name']] = pd
# subparameter constraints
if iroot.find(nps + 'constraints') is not None:
for parent in list(iroot.find(nps + 'constraints')):
param = parent.attrib['param']
for condition in list(parent): # equals, notequals
condstr = condition.tag.replace(nps,'').replace('notequals','!=').replace('equals','=')
paramstr = "%s %s %s" % (param, condstr, condition.attrib['value'] if len(condition.attrib['value']) > 0 else '\'\'')
cd = {} # condition dictionary
for sub in list(condition):
if sub.tag.replace(nps,'') == 'description': continue
cd[sub.attrib['param']] = ['' if ee.text is None else ee.text for ee in sub.findall(nps+'value')]
td['subparams'][paramstr] = cd
tasklist += [td]
####################################################################
# now we have all the tasks in an array of dictionaries
# for each one, create a python function stub,
# write the parameters to docstring format
# and marry up the Plone description page to the bottom
# helper function to return a string of type and default value for a given parameter
def ParamSpec(param):
pd = task['params'][param]
ptype = '{%s}'%pd['type'] if len(pd['type'].split(', ')) > 1 else pd['type']
proto = '%s_ (%s=\'\')' % (param, ptype)
# must exist params don't have default values
if ('mustexist' in pd) and (pd['mustexist'] == 'true'):
proto = '%s_ (%s)' % (param, ptype)
elif ('value' in pd) and (pd['value'] is not None):
if ('string' in pd['type'].split(', ')) or ('variant' in ptype):
proto = '%s_ (%s=\'%s\')' % (param, ptype, pd['value'].strip())
else:
proto = '%s_ (%s=%s)' % (param, ptype, pd['value'].strip())
return proto
# clean out old data
if os.path.exists('../casatasks'): os.system('rm -fr ../casatasks')
os.system('mkdir ../casatasks')
for task in tasklist:
# grab rst description page if it exists, otherwise skip this task
rst = ''
if os.path.exists('tasks/task_' + task['name'] + '.rst'):
with open('tasks/task_' + task['name'] + '.rst', 'r') as fid:
rst = fid.read()
else:
continue
if not os.path.exists('../casatasks/'+task['category']):
os.system('mkdir ../casatasks/'+task['category'])
# change image links
rst = re.sub('(\.\. \|.*?\| image:: )_apimedia/(\S*)\s*?\n', r'\1../../tasks/_apimedia/\2\n', rst, flags=re.DOTALL)
rst = re.sub('(\.\. figure:: )_apimedia/(\S*)\s*?\n', r'\1../../tasks/_apimedia/\2\n', rst, flags=re.DOTALL)
# add this task to the __init__.py
with open('../casatasks/'+task['category']+'/'+'__init__.py', 'a') as fid:
fid.write('from .' + task['name'] + ' import *\n')
# write the python stub function
with open('../casatasks/'+task['category']+'/'+task['name']+'.py', 'w') as fid:
fid.write('#\n# stub function definition file for docstring parsing\n#\n\n')
# build the function prototype, start with params that have no default
proto = [pp for pp in task['params'] if ('mustexist' in task['params'][pp]) and (task['params'][pp]['mustexist']=='true')]
proto = ', '.join(proto) + ', ' if len(proto) > 0 else ''
for param in task['params'].keys():
# must exist params don't have default values
if ('mustexist' not in task['params'][param]) or (task['params'][param]['mustexist'] == 'false'):
proto += '%s%s, ' % (param, ParamSpec(param)[ParamSpec(param).rindex('='):-1])
fid.write('def %s(%s):\n r"""\n' % (task['name'], proto[:-2]))
# populate function description
if 'shortdescription' in task.keys():
fid.write(task['shortdescription']+'\n\n')
elif 'description' in task.keys():
fid.write(re.sub('\s+', ' ', task['description'].strip(), flags=re.DOTALL) + '\n\n')
else:
fid.write(' \n\n')
# populate a horizontal toc nav bar
fid.write('[' + '] ['.join(['`%s`_' % section for section in ['Description', 'Examples', 'Development', 'Details']]) + ']\n\n')
# populate function parameters
fid.write('\nParameters\n')
for param in task['params'].keys():
# skip subparameters for now, they are handled below for each regular parameter
if ('subparam' in task['params'][param]) and (task['params'][param]['subparam'].lower() == 'true'):
continue
fid.write(' - %s' % ParamSpec(param))
if ('shortdescription' in task['params'][param].keys()) and (task['params'][param]['shortdescription'] is not None):
if len(task['params'][param]['shortdescription'].strip()) > 0:
fid.write(' - %s' % task['params'][param]['shortdescription'])
fid.write('\n')
# populate function subparameters (if any)
subparmkeys = [ee for ee in task['subparams'].keys() if ee.startswith(param + ' =') or ee.startswith(param + ' !=')]
for paramstr in subparmkeys:
if len(task['subparams'][paramstr]) > 0:
fid.write('\n .. raw:: html\n\n <details><summary><i> %s </i></summary>\n\n' % paramstr)
# grab each subparam from the main param section and write it out
for subparam in task['subparams'][paramstr].keys():
if subparam not in task['params']: continue
fid.write(' - %s' % ParamSpec(subparam))
if ('shortdescription' in task['params'][subparam].keys()) and (task['params'][subparam]['shortdescription'] is not None):
if len(task['params'][subparam]['shortdescription'].strip()) > 0:
fid.write(' - %s' % task['params'][subparam]['shortdescription'])
fid.write('\n')
if len(task['subparams'][paramstr]) > 0:
fid.write('\n .. raw:: html\n\n </details>\n')
# marry up the Plone content to the bottom Notes section
fid.write('\n\n' + rst + '\n\n')
# add long descriptions of each parameter as footnotes at the bottom
fid.write('.. _Details:\n\n')
fid.write('\nParameter Details\n Detailed descriptions of each function parameter\n\n')
for param in task['params'].keys():
if ('description' in task['params'][param].keys()) and (task['params'][param]['description'] is not None):
fid.write('.. _%s:\n\n' % param)
fid.write('| ``%s`` - ' % ParamSpec(param).replace('_ ', ' '))
fid.write('%s\n\n' % re.sub('\n+', '\n| ', task['params'][param]['description'].strip(), flags=re.DOTALL))
# close docstring stub
fid.write('\n """\n pass\n') | StarcoderdataPython |
8047908 | class MedianFinder:
def __init__(self):
self.min_heap: List[int] = []
self.max_heap: List[int] = []
def addNum(self, num: int) -> None:
heappush(self.max_heap, -heappushpop(self.min_heap, num))
if len(self.max_heap) > len(self.min_heap):
heappush(self.min_heap, -heappop(self.max_heap))
def findMedian(self) -> float:
has_even_count = len(self.max_heap) == len(self.min_heap)
if has_even_count:
return (-self.max_heap[0] + self.min_heap[0])/2.0
return float(self.min_heap[0])
# Your MedianFinder object will be instantiated and called as such:
# obj = MedianFinder()
# obj.addNum(num)
# param_2 = obj.findMedian()
| StarcoderdataPython |
1707408 | <filename>qiling/qiling/extensions/coverage/formats/base.py
#!/usr/bin/env python3
#
# Cross Platform and Multi Architecture Advanced Binary Emulation Framework
#
from abc import ABC, abstractmethod
class QlBaseCoverage(ABC):
"""
An abstract base class for concrete code coverage collectors.
To add support for a new coverage format, just derive from this class and implement
all the methods marked with the @abstractmethod decorator.
"""
def __init__(self):
super().__init__()
@property
@staticmethod
@abstractmethod
def FORMAT_NAME():
raise NotImplementedError
@abstractmethod
def activate(self):
pass
@abstractmethod
def deactivate(self):
pass
@abstractmethod
def dump_coverage(self, coverage_file):
pass
| StarcoderdataPython |
11369904 | <gh_stars>0
"""Module to parse the settings file.
This file first reads the default settings file, and then optionally reads a
settings file in the working directory to override any of the settings.
"""
import json
import os
# Figure out the root directory for our package.
dirname = os.path.dirname
package_root_directory = dirname(dirname(os.path.abspath(__file__)))
# First, load the default settings
default_path = os.path.join(package_root_directory, 'avocado_settings.json')
settings = json.load(open(default_path))
# Next, override with user settings
user_path = os.path.join(os.getcwd(), settings['user_settings_file'])
try:
user_settings = json.load(open(user_path))
except FileNotFoundError:
# No user settings available. Just use the defaults.
pass
else:
settings.update(user_settings)
| StarcoderdataPython |
1804329 | #!/usr/bin/env python2
# -*- coding: utf-8 -*-
"""
Created on Wed Dec 20 12:06:21 2017
@author: cuijiaxu
"""
import numpy as np
import scipy.io
import scipy.stats
import pylab as pl
from plotshadedErrorBar import plotshadedErrorBar2
from matplotlib.ticker import FuncFormatter
def getconvcurve(data):
curve=np.zeros(data.shape[0])
for i in range(len(curve)):
curve[i]=max(data[0:i+1,1])
return curve
def plot_one_convcurve(data,ax,color):
a,=ax.plot(data[:,0],getconvcurve(data),color = color)
return a
def plot_multi_convcurves(data_list,max_x,ax,color,linestyle,max_y=None):
data_list_truncated=[]
for data in data_list:
data1=np.zeros(2*max_x).reshape(max_x,2)
if data.shape[0]<max_x:
data1[:,1]=np.array(list(data[:,1])+[max(data[:,1]) for i in range(max_x-len(data[:,1]))])
else:
data1[:,1]=data[0:max_x,1]
data_list_truncated.append(data1)
curves=[]
for data_ in data_list_truncated:
#print data_.shape
curves.append(getconvcurve(data_))
curves=np.array(curves)
dis_mean=np.mean(curves,axis=0)
dis_var=2.0*np.std(curves,axis=0)
#print dis_mean.shape,dis_var.shape
if max_y==None:
return plotshadedErrorBar(np.linspace(1,len(dis_mean),len(dis_mean)).reshape(1,len(dis_mean)),dis_mean.reshape(1,len(dis_mean)),dis_var.reshape(1,len(dis_mean)),ax,color,linestyle)
else:
dis_up=dis_mean+dis_var
dis_up[dis_up>max_y]=max_y
dis_down=dis_mean-dis_var
return plotshadedErrorBar2(np.linspace(1,len(dis_mean),len(dis_mean)).reshape(1,len(dis_mean)),dis_mean.reshape(1,len(dis_mean)),dis_up.reshape(1,len(dis_mean)),dis_down.reshape(1,len(dis_mean)),ax,color,linestyle)
| StarcoderdataPython |
4961290 | <reponame>git2samus/praw
"""PRAW exception classes.
Includes two main exceptions: :class:`.APIException` for when something goes
wrong on the server side, and :class:`.ClientException` when something goes
wrong on the client side. Both of these classes extend :class:`.PRAWException`.
"""
from typing import Optional
class PRAWException(Exception):
"""The base PRAW Exception that all other exception classes extend."""
class APIException(PRAWException):
"""Indicate exception that involve responses from Reddit's API."""
def __init__(self, error_type: str, message: str, field: Optional[str]):
"""Initialize an instance of APIException.
:param error_type: The error type set on Reddit's end.
:param message: The associated message for the error.
:param field: The input field associated with the error if available.
"""
error_str = "{}: '{}'".format(error_type, message)
if field:
error_str += " on field '{}'".format(field)
super().__init__(error_str)
self.error_type = error_type
self.message = message
self.field = field
class ClientException(PRAWException):
"""Indicate exceptions that don't involve interaction with Reddit's API."""
class DuplicateReplaceException(ClientException):
"""Indicate exceptions that involve the replacement of MoreComments."""
def __init__(self):
"""Initialize the class."""
super().__init__(
"A duplicate comment has been detected. Are you attempting to call"
" ``replace_more_comments`` more than once?"
)
class WebSocketException(ClientException):
"""Indicate exceptions caused by use of WebSockets."""
def __init__(self, message: str, exception: Exception):
"""Initialize a WebSocketException.
:param message: The exception message.
:param exception: The exception thrown by the websocket library.
"""
super().__init__(message)
self.original_exception = exception
| StarcoderdataPython |
11351676 | import torch
import numpy as np
import yaml
from . import networks
from . import math_utils
from ...common.utils import pprint_dict
def _combine(accel_pred, metric_pred, jacobians, extra_metrics):
B = []
C = []
for i in range(len(jacobians)):
jacobian = jacobians[i]
metric = metric_pred[i]
accel = accel_pred[i]
B.append(np.linalg.multi_dot((jacobian.T, metric, accel)))
C.append(np.linalg.multi_dot((jacobian.T, metric, jacobian)))
for jacobian, metric, ddx in extra_metrics:
B.append(np.linalg.multi_dot((jacobian.T, metric, ddx)))
C.append(np.linalg.multi_dot((jacobian.T, metric, jacobian)))
B = np.sum(B, axis=0)
C = np.sum(C, axis=0)
accel = np.dot(np.linalg.pinv(C), B)
return accel
def make_nets(specs, device):
ret = {}
for net_name, spec in specs.items():
net_class = getattr(networks, spec['class'])
net_args = spec.get('net_kwargs', {})
net = net_class(**net_args).to(device)
ret[net_name] = net
return ret
class NeuralRMP(object):
def __init__(self, weights_file, n_control_points=4, device='cuda'):
state_dict = torch.load(weights_file, map_location='cpu')
print('loaded %s' % weights_file)
g = state_dict.get('global_args', {})
print('global args:')
print(pprint_dict(g))
self.g = g
if isinstance(g.model_spec, dict):
nets = make_nets(g.model_spec, device)
else:
nets = make_nets(yaml.load(open(g.model_spec).read()), device)
for name, net in nets.items():
net.load_state_dict(state_dict['nets'][name])
net.train(False)
self.device = device
self.nets = nets
self.n_control_points = n_control_points
self.control_point_accels = None
self.control_point_metrics = None
def _run_model(self, img, wp_local, vel_local, angular_vel):
with torch.no_grad():
nets = self.nets
img_th = torch.as_tensor(img, dtype=torch.float32, device=self.device)
wp_th = torch.as_tensor(wp_local, dtype=torch.float32, device=self.device)
vel_th = torch.as_tensor(vel_local, dtype=torch.float32, device=self.device)
angular_vel_th = torch.as_tensor(angular_vel, dtype=torch.float32, device=self.device)
img_feature = nets['img_encoder'](img_th)
goal_feature = nets['wp_encoder'](wp_th)
vel_feature = nets['vel_encoder'](vel_th)
angular_vel_feature = nets['angular_vel_encoder'](angular_vel_th)
feature = torch.cat([img_feature, goal_feature, vel_feature, angular_vel_feature],
dim=-1)
pred_accel, pred_metric = nets['rmp_regressor'](feature)
pred_accel = pred_accel.data.cpu().numpy().reshape((self.n_control_points, 2)) / self.g.accel_output_scale
pred_metric = pred_metric.data.cpu().numpy().reshape((self.n_control_points, 2, 2)) / self.g.metric_output_scale
if self.g.get('log_metric', False):
pred_metric = np.array([math_utils.exp_psd_matrix(m) for m in pred_metric], np.float32)
return pred_accel, pred_metric
def compute_optimal_accel(
self, img, wp_local, vel_local, angular_vel, jacobians, extra_metrics):
'''
:param extra_metrics: a list of tuples (jacobian, metric, acceleration) to be combined.
:return:
'''
accel_pred, metric_pred = self._run_model(
img[None, :].astype(np.float32),
wp_local[None, :].astype(np.float32) * self.g.waypoint_scale,
vel_local[None, :].astype(np.float32),
np.atleast_1d(angular_vel)[None, :].astype(np.float32))
self.control_point_accels = accel_pred
self.control_point_metrics = metric_pred
return _combine(accel_pred, metric_pred, jacobians, extra_metrics)
| StarcoderdataPython |
6528402 | <filename>test/tello/test_protocol.py
from unittest.mock import Mock
from tello.tello_protocol import TelloProtocol
def test_connection_made():
tello_protocol = TelloProtocol("test command", Mock())
tello_protocol.connection_made(Mock())
tello_protocol.transport.sendto.assert_called_once_with(b"test command")
def test_datagram_received():
tello_protocol = TelloProtocol("test command", Mock())
tello_protocol.connection_made(Mock())
tello_protocol.datagram_received(b"test response", ("localhost", 9999))
tello_protocol.transport.close.assert_called_once()
def test_error_received():
tello_protocol = TelloProtocol("test command", Mock())
tello_protocol.error_received(Exception("test error"))
def test_connection_lost():
done_callback = Mock()
done_callback.cancelled.return_value = False
tello_protocol = TelloProtocol("test command", done_callback)
tello_protocol.connection_lost(None)
tello_protocol.done_callback.set_result.assert_called_once_with(True)
def test_connection_lost_timeout():
done_callback = Mock()
done_callback.cancelled.return_value = True
tello_protocol = TelloProtocol("test command", done_callback)
tello_protocol.connection_lost(None)
tello_protocol.done_callback.set_result.assert_not_called()
| StarcoderdataPython |
11394289 | # -*- coding: utf-8 -*-
# Define your item pipelines here
#
# Don't forget to add your pipeline to the ITEM_PIPELINES setting
# See: http://doc.scrapy.org/en/latest/topics/item-pipeline.html
#encoding: utf-8
import os
def ParseFilePath(url, id):
# user should change this folder path
outfolder = "e:\\data\\FinTech\\News\\Stocks\\%s" % id
components = url.split("/")
year = components[3]
monthday=components[4]
month = monthday[:2]
day = monthday[2:]
idx=components[5]
page=idx+"_"+components[6]
#folder = outfolder + "\\%s_%s_%s_" % (year, month, day)
folder = outfolder
if ((year=='') | ('keywords' in page)):
filepath='xxx'
else:
filepath = folder + "\\%s_%s_%s_%s.txt" % (year, month, day, page)
filepath=filepath.replace('?', '_')
return(folder, filepath)
class Stock163Pipeline(object):
def process_item(self, item, spider):
if spider.name != "stocknews": return item
if item.get("news_thread", None) is None: return item
url = item['news_url']
if 'keywords' in url:
return item
folder, filepath = ParseFilePath(url, spider.stock_id)
spider.counter = spider.counter+1
counterfilepath = folder+"\\counter.txt"
#one a single machine will is virtually no risk of race-condition
if not os.path.exists(folder):
os.makedirs(folder)
#print(filepath, counterfilepath)
#print(spider.stats)
fo = open(counterfilepath, "w", encoding="UTF-8")
fo.write(str(spider.counter))
fo.close()
if (filepath!='xxx'):
fo = open(filepath, 'w', encoding='utf-8')
fo.write(str(dict(item)))
fo.close()
return None
| StarcoderdataPython |
11258571 | import sys
import time
from lib.tracing import init_tracer
from opentracing_instrumentation.request_context import get_current_span, span_in_context
def say_hello(hello_to):
with tracer.start_span('say-hello') as span:
span.set_tag('hello-to', hello_to)
with span_in_context(span):
hello_str = format_string(hello_to)
print_hello(hello_str)
def format_string(hello_to):
root_span = get_current_span()
with tracer.start_span('format', child_of=root_span) as span:
hello_str = 'Hello, %s!' % hello_to
span.log_kv({'event': 'string-format', 'value': hello_str})
return hello_str
def print_hello(hello_str):
root_span = get_current_span()
with tracer.start_span('println', child_of=root_span) as span:
print(hello_str)
span.log_kv({'event': 'println'})
# main
assert len(sys.argv) == 2
tracer = init_tracer('hello-world')
hello_to = sys.argv[1]
say_hello(hello_to)
# yield to IOLoop to flush the spans
time.sleep(2)
tracer.close()
| StarcoderdataPython |
5134379 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals, absolute_import
from django.contrib import admin
from .models import Favorite
class FavoriteAdmin(admin.ModelAdmin):
list_display = ('product','user',)
admin.site.register(Favorite,FavoriteAdmin)
| StarcoderdataPython |
4817185 | <reponame>CesarArroyo09/light_geodesics_thesis
#!/usr/bin/env python
#Import packages
import numpy as np
import matplotlib.pyplot as plt
#Open data file
f1 = open('/home/cesar/light_geodesics_thesis/frw/geodesic_solution_gamma_zero.dat','r')
f2 = open('/home/cesar/light_geodesics_thesis/perturbed_minkowski/geodesic_solution_minkowski_hernquist_poster.dat','r')
f3 = open('/home/cesar/light_geodesics_thesis/frw/geodesic_solution_poster.dat','r')
#Read file
lines1 = f1.readlines()
lines2 = f2.readlines()
lines3 = f3.readlines()
#Close file
f1.close()
f2.close()
f3.close()
#Variables to store information
radiusfrw = []
difftfrw = []
difftfrwfrw = []
radiusmink = []
difftmink = []
radiusisw = []
isw =[]
#Scan rows
for line in lines1:
p = line.split()
radiusfrw.append(float(p[2]))
difftfrw.append(float(p[7]))
difftfrwfrw.append(float(p[8]))
for line in lines2:
p = line.split()
radiusmink.append(float(p[2]))
difftmink.append(float(p[7]))
for line in lines3:
p = line.split()
radiusisw.append(float(p[2]))
isw.append(float(p[10]))
radiusisw2 = []
isw2 = []
length = len(radiusisw)
for i in range(length):
if(radiusisw[i] > 4000.0):
radiusisw2.append(radiusisw[i])
isw2.append(isw[i])
iswabs = np.abs(isw2)
radiusfrwg = np.array(radiusfrw)
difftfrwg = np.array(difftfrw)
difftfrwfrwg = np.array(difftfrwfrw)
radiusminkg = np.array(radiusmink)
difftminkg = np.array(difftmink)
radiusisw2g = np.array(radiusisw2)
isw2g = np.array(iswabs)
#Plot data
plt.figure(1)
plt.xlabel('Radius (kpc/h)', fontsize = 16)
plt.ylabel(r'$\Delta E/E$', fontsize = 20)
#plt.title('Fractional changes in energy for FRW with static Hernquist perturbation')
plt.plot(radiusfrwg,difftfrwg,'r-',label='FRW with static perturbation', linewidth = 2.5)
plt.plot(radiusfrwg,difftfrwfrwg,'k--',label='FRW', linewidth = 2.5)
plt.legend(loc='best')
plt.show()
plt.figure(2)
plt.xlabel('Radius (kpc/h)', fontsize = 16)
plt.ylabel(r'$\Delta E/E$', fontsize = 20)
#plt.title(r'Fractional change in energy for Minkowski with Hernquist perturbation')
plt.plot(radiusminkg, difftminkg, 'b-', label='Minkowski with static perturbation', linewidth = 2.5)
plt.legend(loc='best')
plt.show()
plt.figure(3)
plt.xlabel('Radius (kpc/h)')
plt.title('Temperature fluctuaction for FRW spacetime with Hernquist sphere')
plt.yscale('log')
plt.plot(radiusisw2g, isw2g, 'g-')
plt.show()
| StarcoderdataPython |
1896068 | <gh_stars>0
import torch
import torch.nn as nn
from src.lap_solvers.sinkhorn import Sinkhorn
from src.feature_align import feature_align
from src.gconv import Siamese_ChannelIndependentConv #, Siamese_GconvEdgeDPP, Siamese_GconvEdgeOri
from models.PCA.affinity_layer import Affinity
from src.lap_solvers.hungarian import hungarian
from src.utils.config import cfg
from src.backbone import *
CNN = eval(cfg.BACKBONE)
class Net(CNN):
def __init__(self):
super(Net, self).__init__()
self.sinkhorn = Sinkhorn(max_iter=cfg.CIE.SK_ITER_NUM, epsilon=cfg.CIE.SK_EPSILON, tau=cfg.CIE.SK_TAU)
self.l2norm = nn.LocalResponseNorm(cfg.CIE.FEATURE_CHANNEL * 2, alpha=cfg.CIE.FEATURE_CHANNEL * 2, beta=0.5, k=0)
self.gnn_layer = cfg.CIE.GNN_LAYER # numbur of GNN layers
for i in range(self.gnn_layer):
if i == 0:
gnn_layer = Siamese_ChannelIndependentConv(cfg.CIE.FEATURE_CHANNEL * 2, cfg.CIE.GNN_FEAT, 1)
else:
gnn_layer = Siamese_ChannelIndependentConv(cfg.CIE.GNN_FEAT, cfg.CIE.GNN_FEAT, cfg.CIE.GNN_FEAT)
self.add_module('gnn_layer_{}'.format(i), gnn_layer)
self.add_module('affinity_{}'.format(i), Affinity(cfg.CIE.GNN_FEAT))
if i == self.gnn_layer - 2: # only second last layer will have cross-graph module
self.add_module('cross_graph_{}'.format(i), nn.Linear(cfg.CIE.GNN_FEAT * 2, cfg.CIE.GNN_FEAT))
self.add_module('cross_graph_edge_{}'.format(i), nn.Linear(cfg.CIE.GNN_FEAT * 2, cfg.CIE.GNN_FEAT))
self.rescale = cfg.PROBLEM.RESCALE
def forward(self, data_dict, **kwargs):
if 'images' in data_dict:
# real image data
src, tgt = data_dict['images']
P_src, P_tgt = data_dict['Ps']
ns_src, ns_tgt = data_dict['ns']
G_src, G_tgt = data_dict['Gs']
H_src, H_tgt = data_dict['Hs']
# extract feature
src_node = self.node_layers(src)
src_edge = self.edge_layers(src_node)
tgt_node = self.node_layers(tgt)
tgt_edge = self.edge_layers(tgt_node)
# feature normalization
src_node = self.l2norm(src_node)
src_edge = self.l2norm(src_edge)
tgt_node = self.l2norm(tgt_node)
tgt_edge = self.l2norm(tgt_edge)
# arrange features
U_src = feature_align(src_node, P_src, ns_src, self.rescale)
F_src = feature_align(src_edge, P_src, ns_src, self.rescale)
U_tgt = feature_align(tgt_node, P_tgt, ns_tgt, self.rescale)
F_tgt = feature_align(tgt_edge, P_tgt, ns_tgt, self.rescale)
elif 'features' in data_dict:
# synthetic data
src, tgt = data_dict['features']
ns_src, ns_tgt = data_dict['ns']
G_src, G_tgt = data_dict['Gs']
H_src, H_tgt = data_dict['Hs']
U_src = src[:, :src.shape[1] // 2, :]
F_src = src[:, src.shape[1] // 2:, :]
U_tgt = tgt[:, :tgt.shape[1] // 2, :]
F_tgt = tgt[:, tgt.shape[1] // 2:, :]
else:
raise ValueError('Unknown data type for this model.')
P_src_dis = (P_src.unsqueeze(1) - P_src.unsqueeze(2))
P_src_dis = torch.norm(P_src_dis, p=2, dim=3).detach()
P_tgt_dis = (P_tgt.unsqueeze(1) - P_tgt.unsqueeze(2))
P_tgt_dis = torch.norm(P_tgt_dis, p=2, dim=3).detach()
Q_src = torch.exp(-P_src_dis / self.rescale[0])
Q_tgt = torch.exp(-P_tgt_dis / self.rescale[0])
emb_edge1 = Q_src.unsqueeze(-1)
emb_edge2 = Q_tgt.unsqueeze(-1)
# adjacency matrices
A_src = torch.bmm(G_src, H_src.transpose(1, 2))
A_tgt = torch.bmm(G_tgt, H_tgt.transpose(1, 2))
# U_src, F_src are features at different scales
emb1, emb2 = torch.cat((U_src, F_src), dim=1).transpose(1, 2), torch.cat((U_tgt, F_tgt), dim=1).transpose(1, 2)
ss = []
for i in range(self.gnn_layer):
gnn_layer = getattr(self, 'gnn_layer_{}'.format(i))
# during forward process, the network structure will not change
emb1, emb2, emb_edge1, emb_edge2 = gnn_layer([A_src, emb1, emb_edge1], [A_tgt, emb2, emb_edge2])
affinity = getattr(self, 'affinity_{}'.format(i))
s = affinity(emb1, emb2) # xAx^T
s = self.sinkhorn(s, ns_src, ns_tgt)
ss.append(s)
if i == self.gnn_layer - 2:
cross_graph = getattr(self, 'cross_graph_{}'.format(i))
new_emb1 = cross_graph(torch.cat((emb1, torch.bmm(s, emb2)), dim=-1))
new_emb2 = cross_graph(torch.cat((emb2, torch.bmm(s.transpose(1, 2), emb1)), dim=-1))
emb1 = new_emb1
emb2 = new_emb2
# edge cross embedding
'''
cross_graph_edge = getattr(self, 'cross_graph_edge_{}'.format(i))
emb_edge1 = emb_edge1.permute(0, 3, 1, 2)
emb_edge2 = emb_edge2.permute(0, 3, 1, 2)
s = s.unsqueeze(1)
new_emb_edge1 = cross_graph_edge(torch.cat((emb_edge1, torch.matmul(torch.matmul(s, emb_edge2), s.transpose(2, 3))), dim=1).permute(0, 2, 3, 1))
new_emb_edge2 = cross_graph_edge(torch.cat((emb_edge2, torch.matmul(torch.matmul(s.transpose(2, 3), emb_edge1), s)), dim=1).permute(0, 2, 3, 1))
emb_edge1 = new_emb_edge1
emb_edge2 = new_emb_edge2
'''
data_dict.update({
'ds_mat': ss[-1],
'perm_mat': hungarian(ss[-1], ns_src, ns_tgt)
})
return data_dict | StarcoderdataPython |
5180084 | <gh_stars>0
import os
import matplotlib.pyplot as plt
from stable_baselines3.common.callbacks import BaseCallback
from stable_baselines3.common.results_plotter import load_results, ts2xy
import numpy as np
class SaveOnBestTrainingRewardCallback(BaseCallback):
"""
Callback for saving a model (the check is done every ``check_freq`` steps)
based on the training reward (in practice, we recommend using ``EvalCallback``).
:param check_freq: (int)
:param log_dir: (str) Path to the folder where the model will be saved.
It must contains the file created by the ``Monitor`` wrapper.
:param verbose: (int)
"""
def __init__(self, check_freq: int, log_dir: str, verbose=1):
super(SaveOnBestTrainingRewardCallback, self).__init__(verbose)
self.check_freq = check_freq
self.log_dir = log_dir
self.save_path = os.path.join(log_dir, 'best_model')
self.best_mean_reward = -np.inf
def _on_step(self) -> bool:
if self.n_calls % self.check_freq == 0:
# Retrieve training reward
x, y = ts2xy(load_results(self.log_dir), 'timesteps')
if len(x) > 0:
# Mean training reward over the last 100 episodes
mean_reward = np.mean(y[-100:])
if self.verbose > 0:
print(f"Num timesteps: {self.num_timesteps}")
print(
f"Best mean reward: {self.best_mean_reward:.2f} - Last mean reward per episode: {mean_reward:.2f}")
# New best model, you could save the agent here
if mean_reward > self.best_mean_reward:
self.best_mean_reward = mean_reward
# Example for saving best model
if self.verbose > 0:
print(f"Saving new best model to {self.save_path}.zip")
self.model.save(self.save_path)
return True
def plot_results(log_folder, title='Learning Curve'):
from scipy.signal import savgol_filter
R = load_results(log_folder)['r']
T = load_results(log_folder)['t']
_w = 7
_window_size = len(R) // _w if (len(R) // _w) % 2 != 0 else len(R) // _w + 1
filtered = savgol_filter(R, _window_size, 1)
plt.title('smoothed returns')
plt.ylabel('Returns')
plt.xlabel('time step')
plt.plot(T, filtered)
plt.grid()
plt.show() | StarcoderdataPython |
6444111 | # coding=utf-8
# the path of the Doxyfile
DOXFILE_PATH = '/Users/niels/Documents/repositories/json/doc/Doxyfile'
| StarcoderdataPython |
346911 | import json
import os
import pandas
def filter_word(input_word):
tmp_output = ''
for c in input_word:
if c in 'zxcvbnmasdfghjklqwertyuiopZXCVBNMASDFGHJKLQWERTYUIOP':
tmp_output += c
return tmp_output
def counting_pairs_from_yelp_parsed_data(parsed_data, verb_nsubj_amod_dict, verb_dobj_amod_dict, verb_nsubj_dict,
verb_dobj_dict, noun_amod_dict):
for i, sentence in enumerate(parsed_data):
if i % 10000 == 0:
print('We have counted:', i, '/', len(parsed_data))
for subsentence in sentence:
# print('subsentence:', subsentence)
for pair in subsentence:
if pair[1] == 'amod':
tmp_noun = pair[0][1]
tmp_adj = pair[2][1]
if tmp_noun not in noun_amod_dict:
noun_amod_dict[tmp_noun] = dict()
if tmp_adj not in noun_amod_dict[tmp_noun]:
noun_amod_dict[tmp_noun][tmp_adj] = 0
noun_amod_dict[tmp_noun][tmp_adj] += 1
# print('pair:', pair)
if pair[1] == 'nsubj':
tmp_verb = pair[0][1]
tmp_subj = pair[2][1]
if tmp_verb not in verb_nsubj_dict:
verb_nsubj_dict[tmp_verb] = dict()
if tmp_subj not in verb_nsubj_dict[tmp_verb]:
verb_nsubj_dict[tmp_verb][tmp_subj] = 0
verb_nsubj_dict[tmp_verb][tmp_subj] += 1
for tmp_pair in subsentence:
if tmp_pair[1] == 'amod' and tmp_pair[0][0] == pair[2][0]:
tmp_adj = tmp_pair[2][1]
if tmp_verb not in verb_nsubj_amod_dict:
verb_nsubj_amod_dict[tmp_verb] = dict()
if tmp_adj not in verb_nsubj_amod_dict[tmp_verb]:
verb_nsubj_amod_dict[tmp_verb][tmp_adj] = 0
verb_nsubj_amod_dict[tmp_verb][tmp_adj] += 1
if pair[1] == 'dobj':
tmp_verb = pair[0][1]
tmp_dobj = pair[2][1]
if tmp_verb not in verb_dobj_dict:
verb_dobj_dict[tmp_verb] = dict()
if tmp_dobj not in verb_dobj_dict[tmp_verb]:
verb_dobj_dict[tmp_verb][tmp_dobj] = 0
verb_dobj_dict[tmp_verb][tmp_dobj] += 1
for tmp_pair in subsentence:
if tmp_pair[1] == 'amod' and tmp_pair[0][0] == pair[2][0]:
tmp_adj = tmp_pair[2][1]
if tmp_verb not in verb_dobj_amod_dict:
verb_dobj_amod_dict[tmp_verb] = dict()
if tmp_adj not in verb_dobj_amod_dict[tmp_verb]:
verb_dobj_amod_dict[tmp_verb][tmp_adj] = 0
verb_dobj_amod_dict[tmp_verb][tmp_adj] += 1
def counting_pairs_from_wiki_parsed_data(parsed_data, verb_nsubj_amod_dict, verb_dobj_amod_dict, verb_nsubj_dict,
verb_dobj_dict, noun_amod_dict):
for i, sentence in enumerate(parsed_data):
if i % 10000 == 0:
print('We have counted:', i, '/', len(parsed_data))
for pair in sentence:
# print('subsentence:', subsentence)
if pair[1] == 'amod':
tmp_noun = pair[0][1]
tmp_adj = pair[2][1]
if tmp_noun not in noun_amod_dict:
noun_amod_dict[tmp_noun] = dict()
if tmp_adj not in noun_amod_dict[tmp_noun]:
noun_amod_dict[tmp_noun][tmp_adj] = 0
noun_amod_dict[tmp_noun][tmp_adj] += 1
# print('pair:', pair)
if pair[1] == 'nsubj':
tmp_verb = pair[0][1]
tmp_subj = pair[2][1]
if tmp_verb not in verb_nsubj_dict:
verb_nsubj_dict[tmp_verb] = dict()
if tmp_subj not in verb_nsubj_dict[tmp_verb]:
verb_nsubj_dict[tmp_verb][tmp_subj] = 0
verb_nsubj_dict[tmp_verb][tmp_subj] += 1
for tmp_pair in sentence:
if tmp_pair[1] == 'amod' and tmp_pair[0][0] == pair[2][0]:
tmp_adj = tmp_pair[2][1]
if tmp_verb not in verb_nsubj_amod_dict:
verb_nsubj_amod_dict[tmp_verb] = dict()
if tmp_adj not in verb_nsubj_amod_dict[tmp_verb]:
verb_nsubj_amod_dict[tmp_verb][tmp_adj] = 0
verb_nsubj_amod_dict[tmp_verb][tmp_adj] += 1
if pair[1] == 'dobj':
tmp_verb = pair[0][1]
tmp_dobj = pair[2][1]
if tmp_verb not in verb_dobj_dict:
verb_dobj_dict[tmp_verb] = dict()
if tmp_dobj not in verb_dobj_dict[tmp_verb]:
verb_dobj_dict[tmp_verb][tmp_dobj] = 0
verb_dobj_dict[tmp_verb][tmp_dobj] += 1
for tmp_pair in sentence:
if tmp_pair[1] == 'amod' and tmp_pair[0][0] == pair[2][0]:
tmp_adj = tmp_pair[2][1]
if tmp_verb not in verb_dobj_amod_dict:
verb_dobj_amod_dict[tmp_verb] = dict()
if tmp_adj not in verb_dobj_amod_dict[tmp_verb]:
verb_dobj_amod_dict[tmp_verb][tmp_adj] = 0
verb_dobj_amod_dict[tmp_verb][tmp_adj] += 1
print('Start to count the yelp dataset')
# tmp_file_name = '/home/data/corpora/wikipedia/stanford_enhanced++_parsed_data/1000000.json'
verb_nsubj_amod_dict = dict()
verb_dobj_amod_dict = dict()
verb_nsubj_dict = dict()
verb_dobj_dict = dict()
noun_amod_dict = dict()
if os.path.isfile('verb_nsubj_amod_dict_yelp.json'):
with open('verb_nsubj_amod_dict_yelp.json', 'r') as f:
verb_nsubj_amod_dict = json.load(f)
if os.path.isfile('verb_dobj_amod_dict_yelp.json'):
with open('verb_dobj_amod_dict_yelp.json', 'r') as f:
verb_dobj_amod_dict = json.load(f)
if os.path.isfile('verb_nsubj_dict_yelp.json'):
with open('verb_nsubj_dict_yelp.json', 'r') as f:
verb_nsubj_dict = json.load(f)
if os.path.isfile('verb_dobj_dict_yelp.json'):
with open('verb_dobj_dict_yelp.json', 'r') as f:
verb_dobj_dict = json.load(f)
if os.path.isfile('noun_amod_dict_yelp.json'):
with open('noun_amod_dict_yelp.json', 'r') as f:
noun_amod_dict = json.load(f)
if os.path.isfile('counted_yelp_file.json'):
with open('counted_yelp_file.json', 'r') as f:
counted_yelp_file = json.load(f)
else:
counted_yelp_file = list()
yelp_folder_location = '/home/data/corpora/YELP/parsed_yelp_data_with_stanford/'
for f_name in os.listdir(yelp_folder_location):
tmp_file_name = yelp_folder_location + f_name
if tmp_file_name in counted_yelp_file:
print('We have counted this file')
continue
print('We are working on file:', tmp_file_name)
sampled_data = list()
with open(tmp_file_name, 'r') as original_f:
sampled_data = json.load(original_f)
counting_pairs_from_yelp_parsed_data(sampled_data, verb_nsubj_amod_dict, verb_dobj_amod_dict, verb_nsubj_dict,
verb_dobj_dict, noun_amod_dict)
counted_yelp_file.append(tmp_file_name)
with open('verb_dobj_dict_yelp.json', 'w') as f:
json.dump(verb_dobj_dict, f)
with open('verb_nsubj_dict_yelp.json', 'w') as f:
json.dump(verb_nsubj_dict, f)
with open('verb_dobj_amod_dict_yelp.json', 'w') as f:
json.dump(verb_dobj_amod_dict, f)
with open('verb_nsubj_amod_dict_yelp.json', 'w') as f:
json.dump(verb_nsubj_amod_dict, f)
with open('noun_amod_dict_yelp.json', 'w') as f:
json.dump(noun_amod_dict, f)
with open('counted_yelp_file.json', 'w') as f:
json.dump(counted_yelp_file, f)
print('end')
| StarcoderdataPython |
1858243 | """ Nothing to see here """
import sys
__version__ = "0.3"
__uri__ = 'https://github.com/garbled1/pybalboa'
__title__ = "pybalboa"
__description__ = 'Interface Library for Balboa Spa'
__doc__ = __description__ + " <" + __uri__ + ">"
__author__ = '<NAME>'
__email__ = '<EMAIL>'
__license__ = "Apache 2.0"
__copyright__ = "Copyright (c) 2019 <NAME>"
from .balboa import *
from . import clients
from . import homie
from . import messages
if __name__ == '__main__': print(__version__)
| StarcoderdataPython |
3428897 | <gh_stars>0
# -*- coding: utf-8 -*-
# Generated by Django 1.11.3 on 2017-08-14 20:30
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('ac_seguridad', '0004_auto_20170808_1926'),
]
operations = [
migrations.RenameField(
model_name='alerta',
old_name='Numero_alertas',
new_name='numero_alertas',
),
migrations.RenameField(
model_name='alerta',
old_name='Tipo',
new_name='tipo',
),
migrations.RenameField(
model_name='ocurre_a',
old_name='Cedula_usuarios_en_alertas',
new_name='cedula_usuarios_en_alertas',
),
migrations.RenameField(
model_name='ocurre_a',
old_name='Fecha_alertas',
new_name='fecha_alertas',
),
migrations.RenameField(
model_name='ocurre_a',
old_name='Numero_alertas',
new_name='numero_alertas',
),
migrations.RenameField(
model_name='ocurre_en',
old_name='Fecha_alertas',
new_name='fecha_alertas',
),
migrations.RenameField(
model_name='ocurre_en',
old_name='Numero_alertas',
new_name='numero_alertas',
),
migrations.RenameField(
model_name='ocurre_en',
old_name='RIF',
new_name='rif',
),
]
| StarcoderdataPython |
1617693 | from .coordattention import CoordAttention, H_Sigmoid, H_Swish
from .involution import Involution
from .identity import Identity
from .droppath import DropPath, droppath
| StarcoderdataPython |
1680750 | <gh_stars>0
from fractions import Fraction as Frac
def probs(n,m): #returns a set of probabilities of winning,
#drawing and losing given your opponent must take n steps and you must take m
win = Frac()
draw = Frac()
loss = Frac()
i=0
while (i<6):
i+=1
j=0
while j<6:
j+=1
if(n-i<=0):
if(n-i-(m-j)<0):
loss+=Frac(1,36)
elif (n-i-(m-j)>0):
win+=Frac(1,36)
elif (n-i-(m-j)==0):
draw+=Frac(1,36)
else:
print("Error: difference neither above, below, or equal to 0")
else: #n-i>0 ie the opponent has not reached the end
if (m-j<=0):
win+=Frac(1,36)
else: #m-j>0 ie we have not reached the end
cascade=probs(n-i,m-j)
win+=(Frac(1/36)*cascade[0])
draw+=(Frac(1/36)*cascade[1])
loss+=(Frac(1/36)*cascade[2])
return [win,draw,loss]
def decompose(a):
win=a[0]
draw=a[1]
loss=a[2]
print("Win: %.4f" %win)
print("Draw: %.4f" %draw)
print("Loss: %.4f" %loss)
def profit(a):
win=a[0]
draw=a[1]
loss=a[2]
profit=win*20+draw*15+loss*10
print("Profit: %.4f" %profit)
def calculate(n,m):
distribution=probs(n,m)
decompose(distribution)
profit(distribution)
| StarcoderdataPython |
3348715 | <gh_stars>0
# coding: utf-8
"""
Test suite for performance profiling and testing with isolated HTTP WEB Server in a standalone process
"""
from app01.app01_imp import app
from multiprocessing import Process
from time import sleep
import socket
import unittest
from urllib3 import HTTPConnectionPool
import os
import timeit
import logging
import json
app.config["TESTING"] = True
logging.basicConfig(format='%(asctime)s %(levelname)s:%(message)s', datefmt='%m/%d/%Y %I:%M:%S %p', level=logging.DEBUG)
def run_server(**kwargs):
app.run(**kwargs)
class PerfTestCase(unittest.TestCase):
"""
Tests multiple requests to a standalone HTTP server process
"""
@classmethod
def setUpClass(cls):
cls.path = os.path.dirname(__file__)
cls.resources_path = cls.path + "/resources/"
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.bind(('localhost', 0))
cls.port = sock.getsockname()[1]
sock.close()
with app.app_context():
cls.server = Process(target=run_server, kwargs={'host': 'localhost', 'port': cls.port})
cls.server.start()
sleep(5)
def test_form_upload_sample_01(self):
"""
Multiple Upload Form calls to isolated HTTP Server process
"""
pool = HTTPConnectionPool('localhost', port=self.port, maxsize=1)
def proc():
response = pool.request('POST', '/', fields=dict(file=('file.txt', data),))
self.assertEqual(response.status, 200)
with open(self.resources_path + 'oracle.txt', 'r', encoding='utf-8') as f:
data = f.read()
print(timeit.timeit(proc, number=100))
def test_api_convert_sample_01(self):
"""
Multiple API convert calls to isolated HTTP Server process
"""
pool = HTTPConnectionPool('localhost', port=self.port, maxsize=1)
def proc():
response = pool.request(
'POST',
'/api/convert',
headers={'Content-Type': 'application/json'},
body=json.dumps({'text': data})
)
self.assertEqual(response.status, 200)
with open(self.resources_path + 'oracle.txt', 'r', encoding='utf-8') as f:
data = f.read()
print(timeit.timeit(proc, number=100))
@classmethod
def tearDownClass(cls):
cls.server.terminate()
cls.server.join()
if __name__ == "__main__":
unittest.main()
| StarcoderdataPython |
1963160 | <reponame>k-manish2001/1901CB23_2021<filename>proj2/home/admin.py
from django.contrib import admin
from home.models import Index
admin.site.register(Index)
# Register your models here.
| StarcoderdataPython |
3221109 | <reponame>qx-teo/covidcast-indicators
"""Tests for running the signal generation functions."""
import pandas as pd
import numpy as np
from delphi_hhs_facilities.generate_signals import generate_signal, sum_cols
class TestGenerateSignals:
def test_generate_signals(self):
test_input = pd.DataFrame(
{"a": [1, 2, 3, 4, np.nan],
"b": [2, np.nan, 4, 6, np.nan],
"geo_id": ["x", "x", "x", "y", "z"],
"timestamp": [pd.Timestamp("20200201"),
pd.Timestamp("20200201"),
pd.Timestamp("20200202"),
pd.Timestamp("20200203"),
pd.Timestamp("20200204")]
})
test_output = generate_signal(test_input, ["a", "b"], sum_cols, -1)
expected = pd.DataFrame(
{"timestamp": [pd.Timestamp("20200131"),
pd.Timestamp("20200201"),
pd.Timestamp("20200202")],
"geo_id": ["x", "x", "y"],
"val": [5., 7., 10.],
"se": [np.nan]*3,
"sample_size": [np.nan]*3
})
pd.testing.assert_frame_equal(test_output, expected)
def test_sum_cols(self):
test_input = [
pd.Series([1, 2, 3, np.nan, np.nan]),
pd.Series([np.nan, 3, 6, 9, np.nan])
]
test_output = sum_cols(test_input)
expected = pd.Series([1, 5, 9, 9, np.nan])
pd.testing.assert_series_equal(test_output, expected)
| StarcoderdataPython |
8010503 | import pandas as pd
import json
import sys
import os
def load_curr(path):
df = pd.read_csv(path, sep=',', quotechar='"', index_col='index')
return df
def create_curr():
df = pd.DataFrame(columns=['index', 'hours', 'title', 'link', 'content']).set_index('index')
return df
def save_curr(df, path):
df.to_csv(path, sep=',', quotechar='"')
def add_curr(df, hours, title, link, content):
"""
add new row to the curriculum
:df: pd.DataFrame
:hours: str of float
:title: str, title of course
:link: str, link to course
:content: str, content category
"""
if len(df):
index = max(df.index) + 1
else:
index = 0
return df.append(pd.DataFrame([[index, float(hours), title, link, content]], columns=['index', 'hours', 'title', 'link', 'content']).set_index('index'))
def remove_curr(df, index):
"""
remove row with index index
:df: pd.DataFrame
:index: str of integer
"""
return df.drop(index=int(index))
def move_curr(df, index, dir):
"""
move row up or down by one
:df: pd.DataFrame
:index: str of int
:dir: one of ['up', 'down']
"""
row_ind = list(df.index).index(int(index))
if (row_ind == 0 and dir == 'up') or (row_ind == len(df)-1 and dir == 'down'):
return df
if dir == 'up':
new_order = list(df.index)[:row_ind-1] + [df.index[row_ind]] + [df.index[row_ind-1]] + list(df.index)[row_ind+1:]
elif dir == 'down':
new_order = list(df.index)[:row_ind] + [df.index[row_ind+1]] + [df.index[row_ind]] + list(df.index)[row_ind+2:]
else:
raise ValueError('Invalid dir')
return df.reindex(new_order)
def get_curr(df, printOut=True):
l = []
for i, row in df.iterrows():
d = dict(row)
d['index'] = i
if printOut:
print(json.dumps(d))
else:
l.append(d)
if not printOut:
return l
def path_curr(name):
return os.path.join(os.path.split(__file__)[0], 'curricula', f'{name}.csv')
if __name__ == "__main__":
cmd, name = sys.argv[1:3]
cmd = cmd.strip("'")
name = name.strip("'")
path = path_curr(name)
if not cmd == 'create':
df = load_curr(path)
if cmd == 'create':
df = create_curr()
elif cmd == 'get':
get_curr(df)
elif cmd == 'add':
df = add_curr(df, *[x.strip("'") for x in sys.argv[3:]])
elif cmd == 'remove':
df = remove_curr(df, *[x.strip("'") for x in sys.argv[3:]])
elif cmd == 'move':
df = move_curr(df, *[x.strip("'") for x in sys.argv[3:]])
else:
raise ValueError('Unkown Command')
if cmd != 'get':
save_curr(df, path) | StarcoderdataPython |
3516281 | <filename>HowmanySimulationAreSufficient.py
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
@author: <EMAIL>
"""
import os
import glob
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
from sklearn import preprocessing
import seaborn as sns
import matplotlib.pyplot as plt
import random
from sklearn import preprocessing
def Mean_Val_Con(D_strain):
Mean_Val=[]
N=D_strain.size
for i in range(N-1):
# print(D_strain[0:i+1])
Mean=D_strain[0:i+1].mean()
Mean_Val.append(Mean)
Mean_Val=pd.DataFrame(Mean_Val)
MM=Mean_Val.mean()
Sx=Mean_Val.std()
Z=2.576
MMU=MM + -Z*Sx/np.sqrt(N)
MMO=MM + Z*Sx/np.sqrt(N)
plt.figure()
plt.plot(range(N-1),Mean_Val)
plt.axhline(y=MM.item(), xmin=0, xmax=N-1, linewidth=1, color = 'y',label='Mean value')
plt.axhline(y=MMU.item(), xmin=0, xmax=N-1, linewidth=1, color = 'r',linestyle='dashed',label='1-percentile errorbound')
plt.axhline(y=MMO.item(), xmin=0, xmax=N-1, linewidth=1, color = 'r',linestyle='dashed')
plt.ylabel(str(D_strain.name))
plt.xlabel('Number of Simulation')
plt.title('Mean Value with Number of Simulation')
plt.legend()
return plt , Mean_Val
def STD_Val_Con(D_strain):
STD_Val=[]
N=D_strain.size
for i in range(N-1):
if i==0:
pass
else:
STD=D_strain[0:i+1].mean()
STD_Val.append(STD)
STD_Val=pd.DataFrame(STD_Val)
MM=STD_Val.mean()
Sx=STD_Val.std()
Z=2.576
MMU=MM + -Z*Sx/np.sqrt(N)
MMO=MM + Z*Sx/np.sqrt(N)
plt.figure()
plt.plot(range(N-2),STD_Val)
plt.axhline(y=MM.item(), xmin=0, xmax=N-1, linewidth=1, color = 'y')
plt.axhline(y=MMU.item(), xmin=0, xmax=N-1, linewidth=1, color = 'r',linestyle='dashed')
plt.axhline(y=MMO.item(), xmin=0, xmax=N-1, linewidth=1, color = 'r',linestyle='dashed')
plt.ylabel(str(D_strain.name))
plt.xlabel('Number of Simulation')
plt.title('Mean Value of STD with Number of Simulation')
return plt ,STD_Val
def KeyToDelete(np_file):
KeyToDelete=[]
for key in np_file.keys():
if len(np_file[key]) != 28:
KeyToDelete.append(key)
for i in KeyToDelete:
del np_file[i]
return np_file
def Convert2Pandas(np_file):
df = pd.DataFrame(np_file)
df=df.transpose()
return df
def Convert2Pandas2(np_file):
df = pd.DataFrame(np_file)
df=df.rename(columns={0:'Orientation STD', 1:'Length STD', 2:'Diameter STD', 3:'SVF Mean X', 4:'SVF STD X',
5:'SVF Mean Y', 6:'SVF STD Y', 7:'SVF Mean Z', 8:'SVF STD Z', 9:'Realsurface', 10:'Cont_Area_X_Mean', 11:'Cont_Area_X_STD',
12:'Cont_Area_Y_Mean', 13:'Cont_Area_Y_STD', 14:'Cont_Area_Z_Mean', 15:'Cont_Area_Z_STD', 16:'Cont_Area_Normal_X_Mean',
17:'Cont_Area_Normal_X_STD', 18:'Cont_Area_Normal_Y_Mean', 19:'Cont_Area_Normal_Y_STD',
20:'Cont_Area_Normal_Z_Mean', 21:'Cont_Area_Normal_Z_STD', 22:'Cont_Area_Size_Mean',
23:'Cont_Area_Size_STD', 24:'Strain to failure', 25:'Maximal Stress',
26:'Thoughness', 27:'Initial Effective Stiffness'})
return df
def correlation_matrix(df):
from matplotlib import pyplot as plt
from matplotlib import cm as cm
fig = plt.figure()
cmap = cm.get_cmap('jet', 30)
cax = plt.imshow(df.corr(), interpolation="nearest", cmap=cmap)
plt.title('Abalone Feature Correlation')
plt.grid(True)
labels=['Orientation STD', 'Length STD','Diameter STD','SVF Mean X','SVF STD X',
'SVF Mean Y', 'SVF STD Y', 'SVF Mean Z', 'SVF STD Z', 'Realsurface', 'Cont_Area_X_Mean', 'Cont_Area_X_STD',
'Cont_Area_Y_Mean', 'Cont_Area_Y_STD', 'Cont_Area_Z_Mean', 'Cont_Area_Z_STD', 'Cont_Area_Normal_X_Mean',
'Cont_Area_Normal_X_STD', 'Cont_Area_Normal_Y_Mean', 'Cont_Area_Normal_Y_STD',
'Cont_Area_Normal_Z_Mean', 'Cont_Area_Normal_Z_STD', 'Cont_Area_Size_Mean',
'Cont_Area_Size_STD', 'Strain to failure', 'Maximal Stress',
'Thoughness', 'Initial Effective Stiffness']
plt.xlabel(labels)
plt.ylabel(labels)
fig.colorbar(cax)
plt.show()
np_file=np.load('dict_statab_final_10_03_20.npy',allow_pickle=True).item()
df=Convert2Pandas(np_file)
df=df.astype(float)
df=df[~np.isnan(df).any(axis=1)]
D_strain = df['Strain to failure']
D_stress = df['Maximal Stress']
D_Thoughness = df['Thoughness']
Stiffness =df['Initial Effective Stiffness']
## Some plottings
##############################How Many Simulation Convergence #####################################
#a1,M1=Mean_Val_Con(D_strain)
#a1.savefig('MeanStrain-NOS.png')
#a2,M2=Mean_Val_DS=Mean_Val_Con(D_Thoughness)
#a2.savefig('MeanThoughness-NOS.png')
#a3,M3=Mean_Val_DT=Mean_Val_Con(D_stress)
#a3.savefig('MeanStress-NOS.png')
#a4,M4=Mean_Val_Con(Stiffness)
#a4.savefig('MeanStiffness-NOS.png')
#STD_Val_Con(D_strain)
#STD_Val_Con(D_Thoughness)
#STD_Val_Con(D_stress)
#STD_Val_Con(Stiffness)
##############################How Many Simulation Convergence #####################################
#######################################Correlation#################################
#df_cleand=df.drop(columns=['Realsurface','SVF Mean X','SVF Mean Z','SVF Mean Y', 'SVF STD Y', 'SVF STD X'
# ,'SVF STD Z','Cont_Area_Normal_Y_STD','Cont_Area_Normal_Z_Mean','Cont_Area_Normal_X_STD','Cont_Area_Z_Mean','Cont_Area_Size_STD',],axis=1)
#df_cleaned_corr=df_cleand.corr(method='pearson')
#df_cleaned_corr=df.corr(method='pearson')
#correlation_matrix(df_cleaned_corr)
#############################################save##
#df.to_pickle('./df.pkl')
###################################################################################
#x = df.values #returns a numpy array
#x_scaled=preprocessing.scale(x)
#min_max_scaler = preprocessing.MinMaxScaler()
#x_scaled = min_max_scaler.fit_transform(x)
#df_scaled = pd.DataFrame(x_scaled,columns=['Orientation STD','Length STD','Diameter STD','SVF Mean X','SVF STD X',
# 'SVF Mean Y','SVF STD Y','SVF Mean Z','SVF STD Z','Realsurface','Cont_Area_X_Mean','Cont_Area_X_STD',
# 'Cont_Area_Y_Mean','Cont_Area_Y_STD','Cont_Area_Z_Mean','Cont_Area_Z_STD', 'Cont_Area_Normal_X_Mean',
# 'Cont_Area_Normal_X_STD','Cont_Area_Normal_Y_Mean','Cont_Area_Normal_Y_STD',
# 'Cont_Area_Normal_Z_Mean','Cont_Area_Normal_Z_STD','Cont_Area_Size_Mean',
# 'Cont_Area_Size_STD','Strain to failure','Maximal Stress',
# 'Thoughness','Initial Effective Stiffness'])
#df_scaled_corr = df_scaled.corr
#
#g = sns.pairplot(df_scaled, vars=['Strain to failure','Maximal Stress',
# 'Thoughness','Initial Effective Stiffness'],hue='Orientation STD',kind='reg' )
#g = sns.pairplot(df_scaled, vars=['Strain to failure','Maximal Stress',
# 'Thoughness','Initial Effective Stiffness'],hue='Orientation STD',kind='reg' )
#fig, axs = plt.subplots(nrows=2, ncols=2)
#f, (ax1, ax2) = plt.subplots(2)
#import importlib
#importlib.reload(plt); importlib.reload(sns)
#sns.set_style("white")
#g1=sns.jointplot('Orientation STD','Strain to failure', data=df, kind="reg",joint_kws = {'scatter_kws':dict(alpha=0.2)})
#g2=sns.jointplot('Orientation STD','Initial Effective Stiffness', data=df, kind="reg",joint_kws = {'scatter_kws':dict(alpha=0.2)})
#
#g3=sns.jointplot('Orientation STD','Thoughness', data=df, kind="reg",joint_kws = {'scatter_kws':dict(alpha=0.2)})
#
#g4=sns.jointplot('Orientation STD','Maximal Stress', data=df, kind="reg",joint_kws = {'scatter_kws':dict(alpha=0.2)})
#plt.close('all')
#
#g1=sns.jointplot('Length STD','Strain to failure', data=df, kind="reg")
#g2=sns.jointplot('Length STD','Initial Effective Stiffness', data=df, kind="reg")
#g3=sns.jointplot('Length STD','Thoughness', data=df, kind="reg")
#g4=sns.jointplot('Length STD','Maximal Stress', data=df, kind="reg")
##
#plt.close('all')
#g1=sns.jointplot('Diameter STD','Strain to failure', data=df, kind="reg")
#g2=sns.jointplot('Diameter STD','Initial Effective Stiffness', data=df, kind="reg")
#g3=sns.jointplot('Diameter STD','Thoughness', data=df, kind="reg")
#g4=sns.jointplot('Diameter STD','Maximal Stress', data=df, kind="reg")
#plt.close('all')
##
#g1=sns.jointplot('Cont_Area_Size_Mean','Strain to failure', data=df, kind="reg",joint_kws = {'scatter_kws':dict(alpha=0.2)})
#
#g2=sns.jointplot('Cont_Area_Size_Mean','Initial Effective Stiffness', data=df, kind="reg",joint_kws = {'scatter_kws':dict(alpha=0.2)})
#
#g3=sns.jointplot('Cont_Area_Size_Mean','Thoughness', data=df, kind="reg",joint_kws = {'scatter_kws':dict(alpha=0.2)})
#
#g4=sns.jointplot('Cont_Area_Size_Mean','Maximal Stress', data=df, kind="reg",joint_kws = {'scatter_kws':dict(alpha=0.2)})
#plt.close('all')
#g1=sns.jointplot('Cont_Area_Z_STD','Strain to failure', data=df, kind="reg")
#
#g2=sns.jointplot('Cont_Area_Z_STD','Initial Effective Stiffness', data=df, kind="reg")
#
#g3=sns.jointplot('Cont_Area_Z_STD','Thoughness', data=df, kind="reg")
#
#g4=sns.jointplot('Cont_Area_Z_STD','Maximal Stress', data=df, kind="reg")
#g1=sns.jointplot('Cont_Area_Normal_Z_Mean','Strain to failure', data=df, kind="reg",joint_kws = {'scatter_kws':dict(alpha=0.2)})
#
#g2=sns.jointplot('Cont_Area_Normal_Z_Mean','Initial Effective Stiffness', data=df, kind="reg",joint_kws = {'scatter_kws':dict(alpha=0.2)})
#
#g3=sns.jointplot('Cont_Area_Normal_Z_Mean','Thoughness', data=df, kind="reg",joint_kws = {'scatter_kws':dict(alpha=0.2)})
#
#g4=sns.jointplot('Cont_Area_Normal_Z_Mean','Maximal Stress', data=df, kind="reg",joint_kws = {'scatter_kws':dict(alpha=0.2)})
#plt.close('all')
#g = sns.pairplot(df, vars=['Orientation STD', 'Initial Effective Stiffness'],hue='Orientation STD')
#g1 = sns.pairplot(df, vars=['Length STD', 'Initial Effective Stiffness'],hue='Length STD')
#g2 = sns.pairplot(df, vars=['Diameter STD', 'Initial Effective Stiffness'],hue='Diameter STD')
#g = sns.pairplot(df, vars=['Orientation STD','Thoughness'],hue='Length STD')
#g = sns.pairplot(df, vars=['Orientation STD','Maximal Stress'],hue='Length STD')
#g = sns.pairplot(df, vars=['Orientation STD','Strain to failure'],hue='Length STD')
#g = sns.pairplot(df, vars=['Length STD', 'Strain to failure'])
#g = sns.pairplot(df, vars=['Diameter STD', 'Strain to failure'])
| StarcoderdataPython |
9672398 | # -*- coding:utf-8 -*-
import os
import time
import six
import eventlet
import cPickle
import contextlib
import mysql
import mysql.connector
from simpleutil.config import cfg
from simpleutil.log import log as logging
from simpleutil.utils.systemutils import ExitBySIG
from simpleutil.utils.systemutils import UnExceptExit
from simpleservice.ormdb.tools.backup import mysqldump
from simpleservice.ormdb.tools.backup import mysqlload
from simpleflow.utils.storage_utils import build_session
from simpleflow.api import load
from simpleflow.task import Task
from simpleflow.types import failure
from simpleflow.patterns import linear_flow as lf
from simpleflow.patterns import unordered_flow as uf
from goperation.manager.rpc.agent import sqlite
from simpleflow.storage.middleware import LogBook
from simpleflow.storage import Connection
from simpleflow.engines.engine import ParallelActionEngine
from goperation.utils import safe_fork
from goperation.manager import common as manager_common
from gogamechen3 import common
from gogamechen3.api import exceptions
CONF = cfg.CONF
LOG = logging.getLogger(__name__)
SWALLOW = 'SWALLOW'
DUMPING = 'DUMPING'
SWALLOWED = 'SWALLOWED'
INSERT = 'INSERT'
FINISHED = 'FINISHED'
def sqlfile(entity):
return '%s-db-%d.sql' % (common.GAMESERVER, entity)
@contextlib.contextmanager
def dbconnect(host, port, user, passwd, schema,
raise_on_warnings=True):
if not schema:
raise ValueError('Schema is none?')
kwargs = dict(user=user, passwd=passwd,
host=host, port=port,
database=schema,
raise_on_warnings=raise_on_warnings)
conn = mysql.connector.connect(**kwargs)
try:
yield conn
finally:
conn.close()
def cleandb(host, port, user, passwd, schema):
"""drop 所有表"""
with dbconnect(host=host, port=port,
user=user, passwd=<PASSWORD>,
schema=schema) as conn:
cursor = conn.cursor()
cursor.execute('show tables')
tables = cursor.fetchall()
for table in tables:
cursor.execute('drop table %s' % table[0])
# cursor.fetchall()
cursor.close()
class Swallow(Task):
def __init__(self, uuid, steps, entity, endpoint):
self.endpoint = endpoint
self.entity = entity
self.stpes = steps
self.uuid = uuid
super(Swallow, self).__init__(name='swallow_%d' % entity, provides='db_%d' % entity)
def execute(self, entity, timeout):
step = self.stpes[self.entity]
if step in (DUMPING, SWALLOW):
with self.endpoint.mlock:
result = self.endpoint.client.swallow_entity(self.entity, self.uuid, entity)
if result.get('resultcode') != manager_common.RESULT_SUCCESS or not result.get('data'):
LOG.error('Swallow success, but can not find database from result')
return None
data = result.get('data')
databases = data[0].get('databases')
if not databases:
LOG.error('Swallow success, databases is empty')
return None
self.stpes[self.entity] = DUMPING
return databases
return None
class DumpData(Task):
NODUMPTABLES = [
'battlefield_log_lowfight',
'limit_level',
'mining_area',
'pay_censoring',
'player_censoring',
'quick_report',
'pvp_arena_pet_rank',
'var_world',
'pvp_cupmatch_fight_log',
'oper_record_plot',
'timer_boss',
'pvp_arena_rank',
'pve_campaign_log',
]
DUMPONLYONE = [
'var_world'
]
def __init__(self, uuid, steps, entity,
endpoint=None,
skip_only_one=True):
self.entity = entity
self.stpes = steps
self.uuid = uuid
self.endpoint = endpoint
self.skip_only_one = skip_only_one
super(DumpData, self).__init__(name='dump_%d' % entity,
rebind=['mergeroot', 'dtimeout', 'db_%d' % entity])
def _ext_args(self, schema):
extargs = ['-t', '-c']
nodumps = (self.NODUMPTABLES + self.DUMPONLYONE) if self.skip_only_one else self.NODUMPTABLES
for table in nodumps:
extargs.append('--ignore-table=%s.%s' % (schema, table))
return extargs
@staticmethod
def _prepare_database(databases):
return databases[common.DATADB]
def execute(self, root, timeout, databases):
"""
导出需要合并的实体数据库
如果init.sql文件不存在,导出一份init.sql文件
"""
step = self.stpes[self.entity]
if step == DUMPING:
_file = os.path.join(root, sqlfile(self.entity))
if os.path.exists(_file):
return
database = DumpData._prepare_database(databases)
try:
mysqldump(_file,
database.get('host'), database.get('port'),
database.get('user'), database.get('passwd'),
database.get('schema'),
character_set=None,
extargs=self._ext_args(database.get('schema')),
logfile=None, callable=safe_fork,
timeout=timeout)
except (ExitBySIG, UnExceptExit):
LOG.error('Dump database of entity %d fail' % self.entity)
if os.path.exists(_file):
try:
os.remove(_file)
except (OSError, OSError):
LOG.error('Try remove file %d fail!' % _file)
raise exceptions.MergeException('Remove error file %s fail' % _file)
else:
self.stpes[self.entity] = SWALLOWED
# create init file
initfile = os.path.join(root, 'init.sql')
if not os.path.exists(initfile):
try:
with self.endpoint.mlock:
if not os.path.exists(initfile):
LOG.info('Dump init sql from entity %d, schema %s' % (self.entity, database.get('schema')))
mysqldump(initfile,
database.get('host'), database.get('port'),
database.get('user'), database.get('passwd'),
database.get('schema'),
character_set=None, extargs=['-R', '-d'],
logfile=None, callable=safe_fork,
timeout=timeout)
except (ExitBySIG, UnExceptExit):
if os.path.exists(initfile):
try:
os.remove(initfile)
except (OSError, OSError):
LOG.error('Try remove init sql file fail!')
class Swallowed(Task):
def __init__(self, uuid, steps, entity, endpoint):
self.endpoint = endpoint
self.entity = entity
self.stpes = steps
self.uuid = uuid
super(Swallowed, self).__init__(name='swallowed_%d' % entity)
def execute(self, entity, timeout):
step = self.stpes[self.entity]
if step == SWALLOWED:
with self.endpoint.mlock:
result = self.endpoint.client.swallowed_entity(self.entity, self.uuid, entity)
try:
if result.get('resultcode') != manager_common.RESULT_SUCCESS or not result.get('data'):
LOG.error('Swallowed success, but can not find areas from result')
return None
data = result.get('data')
areas = data[0].get('areas')
if not areas:
raise KeyError('Not areas found')
except KeyError as e:
LOG.error('Get areas fail %s' % e.message)
else:
self.stpes[self.entity] = INSERT
for i in range(5):
if entity not in self.endpoint.konwn_appentitys:
eventlet.sleep(3)
try:
self.endpoint.konwn_appentitys[entity]['areas'].extend(areas)
except KeyError:
raise exceptions.MergeException('Target entity %d not in konwn appentitys' % entity)
LOG.debug('Extend new areas of konwn appentitys success')
class SafeCleanDb(Task):
def __init__(self):
super(SafeCleanDb, self).__init__(name='cleandb')
def execute(self, root, database):
"""清空前备份数据库,正常情况下备份内容为空"""
LOG.debug('Try backup database before clean')
safebak = os.path.join(root, 'safebak.%d.gz' % time.time())
# back up database
mysqldump(safebak,
database.get('host'), database.get('port'),
database.get('user'), database.get('passwd'),
database.get('schema'),
character_set=None, extargs=['-R'],
logfile=None, callable=safe_fork,
timeout=15)
LOG.debug('Backup database before clean success, try clean it')
# drop all table
cleandb(host=database.get('host'), port=database.get('port'),
user=database.get('user'), passwd=database.<PASSWORD>('passwd'),
schema=database.get('schema'))
class InitDb(Task):
def __init__(self):
super(InitDb, self).__init__(name='initdb')
@staticmethod
def _predo(root, database):
"""对原始数据库做特殊处理"""
prefile = os.path.join(root, 'pre.sql')
if os.path.exists(prefile):
mysqlload(prefile,
database.get('host'), database.get('port'),
database.get('user'), database.get('passwd'),
database.get('schema'),
character_set=None, extargs=None,
logfile=None, callable=safe_fork,
timeout=30)
def execute(self, timeline, root, database):
LOG.debug('Try init databases')
initfile = os.path.join(root, 'init.sql')
logfile = os.path.join(root, 'initdb.err.%d.log' % timeline)
mysqlload(initfile,
database.get('host'), database.get('port'),
database.get('user'), database.get('passwd'),
database.get('schema'),
character_set=None, extargs=None,
logfile=logfile, callable=safe_fork,
timeout=15)
LOG.debug('Init databases success, try call pre.sql')
os.remove(logfile)
self._predo(root, database)
class InserDb(Task):
"""插入各个实体的数据库"""
def __init__(self, entity, stoper):
self.entity = entity
self.stoper = stoper
super(InserDb, self).__init__(name='insert-%d' % entity)
def execute(self, timeline, root, database, timeout):
if self.stoper[0]:
raise exceptions.MergeException('Stop mark is true')
_file = os.path.join(root, sqlfile(self.entity))
logfile = os.path.join(root, 'insert-%d.err.%d.log' % (self.entity, timeline))
LOG.info('Insert database of entity %d, sql file %s' % (self.entity, _file))
mysqlload(_file,
database.get('host'), database.get('port'),
database.get('user'), database.get('passwd'),
database.get('schema'),
character_set=None, extargs=None,
logfile=logfile, callable=safe_fork,
timeout=timeout)
LOG.info('Insert database of entity %d success' % self.entity)
os.remove(logfile)
def revert(self, result, database, **kwargs):
"""插入失败清空数据库"""
if isinstance(result, failure.Failure):
if not self.stoper[0]:
LOG.warning('Insert database of entity %d fail' % self.entity)
self.stoper[0] = 1
else:
LOG.warning('Insert database of entity %d get stop mark' % self.entity)
class PostDo(Task):
def __init__(self, uuid, endpoint):
self.uuid = uuid
self.endpoint = endpoint
super(PostDo, self).__init__(name='postdo')
@staticmethod
def _postdo(root, database):
"""合并完成后特殊处理"""
postfile = os.path.join(root, 'post.sql')
if not os.path.exists(postfile):
with open(postfile, 'w') as f:
f.write('delete from var_player where `key` = 100;\n')
f.write('update guilds set is_change_name = 0;\n')
if os.path.exists(postfile):
mysqlload(postfile,
database.get('host'), database.get('port'),
database.get('user'), database.get('passwd'),
database.get('schema'),
character_set=None, extargs=None,
logfile=None, callable=safe_fork,
timeout=30)
def execute(self, root, database):
"""post execute"""
try:
self._postdo(root, database)
except Exception:
LOG.exception('Post databse execute fail')
raise
def create_merge(appendpoint, uuid, entitys, middleware, opentime, chiefs):
mergepath = 'merge-%s' % uuid
mergeroot = os.path.join(appendpoint.endpoint_backup, mergepath)
if not os.path.exists(mergeroot):
os.makedirs(mergeroot)
stepsfile = os.path.join(mergeroot, 'steps.dat')
if os.path.exists(stepsfile):
raise exceptions.MergeException('Steps file exist, can not merge')
data = {}
steps = {}
for _entity in entitys:
steps[_entity] = SWALLOW
data['opentime'] = opentime
data['chiefs'] = chiefs
data['steps'] = steps
with open(stepsfile, 'wb') as f:
cPickle.dump(data, f)
merge_entitys(appendpoint, uuid, middleware.entity, middleware.databases)
def merge_entitys(appendpoint, uuid, entity, databases):
datadb = databases[common.DATADB]
mergepath = 'merge-%s' % uuid
mergeroot = os.path.join(appendpoint.endpoint_backup, mergepath)
stepsfile = os.path.join(mergeroot, 'steps.dat')
initfile = os.path.join(mergeroot, 'init.sql')
if not os.path.exists(stepsfile):
raise exceptions.MergeException('Steps file not exist')
with open(stepsfile, 'rb') as f:
data = cPickle.load(f)
steps = data['steps']
prepares = []
for _entity, step in six.iteritems(steps):
# 一些post sql执行错误对整体无影响情况下
# 可以直接讲step改为FINISHED避免重复合服步骤
if step == FINISHED:
for _step in six.itervalues(steps):
if _step != FINISHED:
raise exceptions.MergeException('Steps is finish?')
appendpoint.client.finish_merge(uuid)
appendpoint.flush_config(entity, databases,
opentime=data['opentime'],
chiefs=data['chiefs'])
return
if step != INSERT:
prepares.append(_entity)
mini_entity = min(prepares)
if prepares:
name = 'prepare-merge-at-%d' % int(time.time())
book = LogBook(name=name)
store = dict(timeout=5, dtimeout=600, mergeroot=mergeroot, entity=entity)
taskflow_session = build_session('sqlite:///%s' % os.path.join(mergeroot, '%s.db' % name))
connection = Connection(taskflow_session)
prepare_uflow = uf.Flow(name)
for _entity in prepares:
entity_flow = lf.Flow('prepare-%d' % _entity)
entity_flow.add(Swallow(uuid, steps, _entity, appendpoint))
entity_flow.add(DumpData(uuid, steps, _entity, appendpoint, _entity != mini_entity))
entity_flow.add(Swallowed(uuid, steps, _entity, appendpoint))
prepare_uflow.add(entity_flow)
engine = load(connection, prepare_uflow, store=store,
book=book, engine_cls=ParallelActionEngine,
max_workers=4)
try:
engine.run()
except Exception as e:
if LOG.isEnabledFor(logging.DEBUG):
LOG.exception('Prepare merge task execute fail')
raise exceptions.MergeException('Prepare merge task execute fail, %s %s' % (e.__class__.__name__, str(e)))
finally:
connection.session = None
taskflow_session.close()
with open(stepsfile, 'wb') as f:
cPickle.dump(data, f)
for _entity, step in six.iteritems(steps):
if step != INSERT:
raise exceptions.MergeException('Some step not on %s' % INSERT)
if not os.path.exists(os.path.join(mergeroot, sqlfile(_entity))):
raise exceptions.MergeException('Entity %d sql file not exist' % _entity)
if not os.path.exists(initfile):
LOG.error('Init database file not exist')
raise exceptions.MergeException('Init database file not exist')
LOG.info('Prepare merge success, try merge database')
now = int(time.time())
name = 'merge-at-%d' % now
book = LogBook(name=name)
store = dict(timeout=1800, root=mergeroot, database=datadb, timeline=now)
taskflow_session = build_session('sqlite:///%s' % os.path.join(mergeroot, '%s.db' % name))
connection = Connection(taskflow_session)
merge_flow = lf.Flow('merge-to')
merge_flow.add(SafeCleanDb())
merge_flow.add(InitDb())
insert_lflow = lf.Flow('insert-db')
stoper = [0]
for _entity in steps:
insert_lflow.add(InserDb(_entity, stoper))
merge_flow.add(insert_lflow)
merge_flow.add(PostDo(uuid, appendpoint))
engine = load(connection, merge_flow, store=store,
book=book, engine_cls=ParallelActionEngine,
max_workers=4)
try:
engine.run()
except Exception as e:
if LOG.isEnabledFor(logging.DEBUG):
LOG.exception('Merge database task execute fail')
raise exceptions.MergeException('Merge database task execute fail, %s %s' % (e.__class__.__name__, str(e)))
else:
for _entity in steps:
steps[_entity] = FINISHED
with open(stepsfile, 'wb') as f:
cPickle.dump(data, f)
appendpoint.client.finish_merge(uuid)
appendpoint.flush_config(entity, databases,
opentime=data['opentime'],
chiefs=data['chiefs'])
LOG.info('Merge task %s all finish' % uuid)
finally:
connection.session = None
taskflow_session.close()
| StarcoderdataPython |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.